Merge remote-tracking branch 'hikey-aosp/mirror-android13-5.10-lts' into meson-5.10

Change-Id: I1e34ffefd069e7f78cf243a4fb0967fd173e0718
diff --git a/Documentation/devicetree/bindings/staging/nanohub.txt b/Documentation/devicetree/bindings/staging/nanohub.txt
new file mode 100644
index 0000000..aaa2b09
--- /dev/null
+++ b/Documentation/devicetree/bindings/staging/nanohub.txt
@@ -0,0 +1,36 @@
+GOOGLE nanohub sensorhub
+
+Required properties:
+- compatible : should be "nanohub"
+
+Example:
+
+&spi_7 {
+	sensorhub@0 {
+		compatible = "nanohub";
+		reg = <0>;
+		spi-max-frequency = <25000000>;
+		spi-cpol;
+		spi-cpha;
+
+		interrupt-parent = <&msm_gpio>;
+		interrupts = <66 0x2>;
+		sensorhub,nreset-gpio = <&msm_gpio 59 0>;
+		sensorhub,boot0-gpio = <&msm_gpio 60 0>;
+		sensorhub,wakeup-gpio = <&msm_gpio 65 0>;
+		sensorhub,irq1-gpio = <&msm_gpio 66 0>;
+		sensorhub,spi-cs-gpio = <&msm_gpio 43 0>;
+		sensorhub,bl-max-frequency = <1000000>;
+		sensorhub,bl-addr = <0x08000000>;
+		sensorhub,kernel-addr = <0x0800C000>;
+		sensorhub,shared-addr = <0x08020000>;
+		sensorhub,flash-banks = <0 0x08000000 0x04000>,
+			<3 0x0800C000 0x04000>,
+			<4 0x08010000 0x10000>,
+			<5 0x08020000 0x20000>;
+		sensorhub,num-flash-banks = <4>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&sensorhub_ctrl_active
+				&sensorhub_int1_active
+				&sensorhub_int2_active>;
diff --git a/OWNERS b/OWNERS
index 414594e..77bac51 100644
--- a/OWNERS
+++ b/OWNERS
@@ -10,3 +10,6 @@
 # https://android.googlesource.com/kernel/common/+/android-mainline/OWNERS_DrNo
 
 include kernel/common:android-mainline:/OWNERS_DrNo
+dimitrysh@google.com
+aldend@google.com
+zhangmax@google.com
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 91af076..2b4dfb9 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -52,3 +52,4 @@
 dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l-android.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-c4.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-a1-ad401.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l-android.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 075153a..1ef0f2b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -17,6 +17,12 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
+	aliases {
+		mmc0 = &sd_emmc_b; /* SD card */
+		mmc1 = &sd_emmc_c; /* eMMC */
+		mmc2 = &sd_emmc_a; /* SDIO */
+	};
+
 	chosen {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -2035,6 +2041,7 @@
 				interrupts = <GIC_SPI 203 IRQ_TYPE_EDGE_RISING>;
 				clocks = <&clkc_AO CLKID_AO_CTS_OSCIN>;
 				clock-names = "oscin";
+				amlogic,ao-sysctrl = <&rti>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510-android.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510-android.dts
index 2f89a01..391375b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510-android.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510-android.dts
@@ -38,6 +38,17 @@
                         };
                 };
 
+		gpio-keys-polled {
+			compatible = "gpio-keys-polled";
+			poll-interval = <100>;
+
+			button-btn0 {
+				label = "btn0";
+				linux,code = <BTN_0>;
+				gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>;
+			};
+		};
+
                 cvbs-connector {
                         status = "disabled";
                 };
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
index fb0ab27..a6a7587 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
@@ -85,6 +85,7 @@
 		opp-1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt = <731000>;
+			opp-suspend;
 		};
 
 		opp-1398000000 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 47cbb0a..77f62a0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -20,6 +20,12 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
+	aliases {
+		mmc0 = &sd_emmc_b; /* SD card */
+		mmc1 = &sd_emmc_c; /* eMMC */
+		mmc2 = &sd_emmc_a; /* SDIO */
+	};
+
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l-android.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l-android.dts
index 4b06bf2..39597a3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l-android.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l-android.dts
@@ -38,14 +38,6 @@
     gpio-open-drain;
 };
 
-&uart_A {
-	bluetooth {
-        interrupt-parent = <&gpio_intc>;
-        interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
-		interrupt-names = "host-wakeup";
-    };
-};
-
 &uart_C {
         status = "disabled";
         pinctrl-0 = <&uart_c_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index defe0b8..1dac89b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -103,6 +103,7 @@
 		opp-1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt = <780000>;
+			opp-suspend;
 		};
 
 		opp-1404000000 {
diff --git a/arch/arm64/configs/meson_defconfig b/arch/arm64/configs/meson_defconfig
new file mode 100644
index 0000000..3b51bb1
--- /dev/null
+++ b/arch/arm64/configs/meson_defconfig
@@ -0,0 +1,785 @@
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=y
+CONFIG_LOG_BUF_SHIFT=19
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+CONFIG_USER_NS=y
+# CONFIG_NET_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+# CONFIG_ZONE_DMA is not set
+CONFIG_ARCH_MESON=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_COMPAT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ENERGY_MODEL=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=y
+CONFIG_CRYPTO_SHA3_ARM64=y
+CONFIG_CRYPTO_SM3_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_CHACHA20_NEON=y
+CONFIG_CRYPTO_AES_ARM64_BS=y
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=15
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_L2TP=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_STREAM_PARSER=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_LEDS=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+# CONFIG_FW_CACHE is not set
+CONFIG_SIMPLE_PM_BUS=y
+CONFIG_VEXPRESS_CONFIG=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_UBI=y
+CONFIG_OF_OVERLAY=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_SRAM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+# CONFIG_ATA_SFF is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_IPVLAN=y
+CONFIG_IPVTAP=y
+CONFIG_GENEVE=y
+CONFIG_NETCONSOLE=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HISILICON is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MESON_GXL_PHY=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MARVELL_10G_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_AT803X_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_BUS_MUX_MESON_G12A=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_SR9800=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_NET_PLUSB=y
+CONFIG_USB_NET_MCS7830=y
+CONFIG_BCMDHD=y
+CONFIG_BCMDHD_FW_PATH="/vendor/firmware/brcm/fw_bcmdhd.bin"
+CONFIG_BCMDHD_NVRAM_PATH="/vendor/firmware/brcm/nvram.txt"
+CONFIG_BCMDHD_SDIO_IRQ=y
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+CONFIG_WL18XX=y
+CONFIG_WLCORE_SDIO=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_DEVMEM is not set
+CONFIG_TCG_TPM=y
+CONFIG_TCG_TIS_I2C_INFINEON=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MESON=y
+CONFIG_SPI=y
+CONFIG_SPI_MESON_SPICC=y
+CONFIG_SPI_MESON_SPIFC=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_VEXPRESS=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_SBS=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_INA2XX=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_KHADAS_MCU_FAN_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_ARM_SP805_WATCHDOG=y
+CONFIG_MESON_GXBB_WATCHDOG=y
+CONFIG_MESON_WATCHDOG=y
+CONFIG_MFD_BD9571MWV=y
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_MFD_KHADAS_MCU=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_BD9571MWV=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_REGULATOR_VCTRL=y
+CONFIG_RC_CORE=y
+# CONFIG_RC_MAP is not set
+CONFIG_LIRC=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_NEC_DECODER=y
+CONFIG_IR_RC5_DECODER=y
+CONFIG_IR_RC6_DECODER=y
+CONFIG_IR_JVC_DECODER=y
+CONFIG_IR_SONY_DECODER=y
+CONFIG_IR_SANYO_DECODER=y
+CONFIG_IR_SHARP_DECODER=y
+CONFIG_IR_MCE_KBD_DECODER=y
+CONFIG_IR_XMP_DECODER=y
+CONFIG_IR_IMON_DECODER=y
+CONFIG_RC_DEVICES=y
+CONFIG_IR_MESON=y
+CONFIG_CEC_MESON_AO=y
+CONFIG_CEC_MESON_G12A_AO=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_GSPCA=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_MALI_MIDGARD=y
+# CONFIG_MALI_GATOR_SUPPORT is not set
+CONFIG_MALI_DMA_FENCE=y
+CONFIG_MALI_PLATFORM_NAME="meson"
+CONFIG_MALI_PLATFORM_POWER_DOWN_ONLY=y
+# CONFIG_MALI_KUTF is not set
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
+CONFIG_DRM_I2C_CH7006=y
+CONFIG_DRM_I2C_SIL164=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_I2C_ADV7511=y
+CONFIG_DRM_MESON=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_LP855X=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_MESON_AXG_SOUND_CARD=y
+CONFIG_SND_SOC_AK4613=y
+CONFIG_SND_SOC_MAX98357A=y
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_SND_AUDIO_GRAPH_CARD=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_STEAM=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_USB_HIDDEV=y
+CONFIG_I2C_HID=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_HOST=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ISP1301=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_OBEX=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_GADGETFS=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_CQHCI=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=y
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_MESON_VRTC=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_DMADEVICES=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_SYSFS_STATS=y
+CONFIG_DMABUF_HEAPS_DEFERRED_FREE=y
+CONFIG_DMABUF_HEAPS_PAGE_POOL=y
+CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_DMABUF_HEAPS_CMA=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=y
+CONFIG_VFIO=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VHOST_VSOCK=y
+CONFIG_STAGING=y
+CONFIG_STAGING_MEDIA=y
+CONFIG_VIDEO_MESON_VDEC=y
+CONFIG_ASHMEM=y
+CONFIG_NANOHUB=y
+CONFIG_NANOHUB_SPI=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_HWSPINLOCK=y
+CONFIG_MAILBOX=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_SOC_TI=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_MEMORY=y
+CONFIG_PWM=y
+CONFIG_PWM_MESON=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MESON_EFUSE=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_FUSE_BPF=y
+CONFIG_OVERLAY_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_UBIFS_FS=y
+CONFIG_CRAMFS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_RAM=y
+CONFIG_EROFS_FS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_UNICODE=y
+CONFIG_SECURITY_DMESG_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_NETWORK_XFRM=y
+CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_INDIRECT_PIO=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_MEMTEST=y
diff --git a/build.config.yukawa.aarch64 b/build.config.yukawa.aarch64
new file mode 100644
index 0000000..db20f1d
--- /dev/null
+++ b/build.config.yukawa.aarch64
@@ -0,0 +1,45 @@
+. ${ROOT_DIR}/hikey-linaro/build.config.common
+. ${ROOT_DIR}/hikey-linaro/build.config.aarch64
+
+BRANCH=android-amlogic-bmeson-5.10
+DEFCONFIG=meson_defconfig
+KERNEL_DIR=hikey-linaro
+POST_DEFCONFIG_CMDS="check_defconfig"
+STOP_SHIP_TRACEPRINTK=1
+SKIP_CP_KERNEL_HDR=1
+# this is needed to generate __symbols__ for u-boot
+DTC_FLAGS="-@"
+
+MAKE_GOALS="${MAKE_GOALS}
+dtbs
+"
+
+FILES="
+arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dtb
+arch/arm64/boot/dts/amlogic/meson-g12a-sei510-android.dtb
+arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dtb
+arch/arm64/boot/dts/amlogic/meson-sm1-sei610-android.dtb
+arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb
+arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3-android.dtb
+arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dtb
+arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l-android.dtb
+arch/arm64/boot/Image.lz4
+"
+
+EXTRA_CMDS="lz4_compress"
+DIST_CMDS="rename_file"
+
+function lz4_compress() {
+        lz4 -f -12 --favor-decSpeed ${OUT_DIR}/arch/arm64/boot/Image ${OUT_DIR}/arch/arm64/boot/Image.lz4
+}
+
+function rename_file(){
+        KERN_VER=$(echo "$BRANCH" | cut -f 4 -d "-")
+        for FILE in ${FILES}; do
+            if [ -f ${DIST_DIR}/${FILE##*/} ]; then
+                  cp -p ${DIST_DIR}/${FILE##*/} ${DIST_DIR}/${FILE##*/}-${KERN_VER}
+            else
+                  echo "  $FILE is not a file, skipping"
+            fi
+        done
+}
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 835c883..b3924fe 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1,8 +1,8 @@
-# SPDX-License-Identifier: GPL-2.0-only
 # drm/tegra depends on host1x, so if both drivers are built-in care must be
 # taken to initialize them in the correct order. Link order is the only way
 # to ensure this currently.
 obj-$(CONFIG_TEGRA_HOST1X)	+= host1x/
 obj-y			+= drm/ vga/
+obj-y			+= arm/
 obj-$(CONFIG_IMX_IPUV3_CORE)	+= ipu-v3/
 obj-$(CONFIG_TRACE_GPU_MEM)		+= trace/
diff --git a/drivers/gpu/arm/Kbuild b/drivers/gpu/arm/Kbuild
new file mode 100644
index 0000000..1a6fa3c9
--- /dev/null
+++ b/drivers/gpu/arm/Kbuild
@@ -0,0 +1,23 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+obj-$(CONFIG_MALI_MIDGARD) += midgard/
diff --git a/drivers/gpu/arm/Kconfig b/drivers/gpu/arm/Kconfig
new file mode 100644
index 0000000..693b86f
--- /dev/null
+++ b/drivers/gpu/arm/Kconfig
@@ -0,0 +1,25 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+menu "ARM GPU Configuration"
+source "drivers/gpu/arm/midgard/Kconfig"
+endmenu
diff --git a/drivers/gpu/arm/midgard/Kbuild b/drivers/gpu/arm/midgard/Kbuild
new file mode 100644
index 0000000..456eee4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Kbuild
@@ -0,0 +1,168 @@
+#
+# (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+# Driver version string which is returned to userspace via an ioctl
+MALI_RELEASE_NAME ?= "r16p0-01rel0"
+
+# Paths required for build
+KBASE_PATH = $(src)
+KBASE_PLATFORM_PATH = $(KBASE_PATH)/platform_dummy
+UMP_PATH = $(src)/../../../base
+
+# Set up defaults if not defined by build system
+MALI_CUSTOMER_RELEASE ?= 1
+MALI_USE_CSF ?= 0
+MALI_UNIT_TEST ?= 0
+MALI_KERNEL_TEST_API ?= 0
+MALI_COVERAGE ?= 0
+CONFIG_MALI_PLATFORM_NAME ?= "devicetree"
+
+# Set up our defines, which will be passed to gcc
+DEFINES = \
+	-DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+	-DMALI_USE_CSF=$(MALI_USE_CSF) \
+	-DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
+	-DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
+	-DMALI_COVERAGE=$(MALI_COVERAGE) \
+	-DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\"
+
+ifeq ($(KBUILD_EXTMOD),)
+# in-tree
+DEFINES +=-DMALI_KBASE_PLATFORM_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME)
+else
+# out-of-tree
+DEFINES +=-DMALI_KBASE_PLATFORM_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME)
+endif
+
+DEFINES += -I$(srctree)/drivers/staging/android
+
+DEFINES += -DMALI_KBASE_BUILD
+
+# Use our defines when compiling
+ccflags-y += $(DEFINES) -I$(KBASE_PATH)   -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH)   -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+
+SRC := \
+	mali_kbase_device.c \
+	mali_kbase_cache_policy.c \
+	mali_kbase_mem.c \
+	mali_kbase_mmu.c \
+	mali_kbase_ctx_sched.c \
+	mali_kbase_jd.c \
+	mali_kbase_jd_debugfs.c \
+	mali_kbase_jm.c \
+	mali_kbase_gpuprops.c \
+	mali_kbase_js.c \
+	mali_kbase_js_ctx_attr.c \
+	mali_kbase_event.c \
+	mali_kbase_context.c \
+	mali_kbase_pm.c \
+	mali_kbase_config.c \
+	mali_kbase_vinstr.c \
+	mali_kbase_hwcnt.c \
+	mali_kbase_hwcnt_backend_gpu.c \
+	mali_kbase_hwcnt_gpu.c \
+	mali_kbase_hwcnt_legacy.c \
+	mali_kbase_hwcnt_types.c \
+	mali_kbase_hwcnt_virtualizer.c \
+	mali_kbase_softjobs.c \
+	mali_kbase_10969_workaround.c \
+	mali_kbase_hw.c \
+	mali_kbase_debug.c \
+	mali_kbase_gpu_memory_debugfs.c \
+	mali_kbase_mem_linux.c \
+	mali_kbase_core_linux.c \
+	mali_kbase_replay.c \
+	mali_kbase_mem_profile_debugfs.c \
+	mali_kbase_mmu_mode_lpae.c \
+	mali_kbase_mmu_mode_aarch64.c \
+	mali_kbase_disjoint_events.c \
+	mali_kbase_gator_api.c \
+	mali_kbase_debug_mem_view.c \
+	mali_kbase_debug_job_fault.c \
+	mali_kbase_smc.c \
+	mali_kbase_mem_pool.c \
+	mali_kbase_mem_pool_debugfs.c \
+	mali_kbase_tlstream.c \
+	mali_kbase_strings.c \
+	mali_kbase_as_fault_debugfs.c \
+	mali_kbase_regs_history_debugfs.c \
+	thirdparty/mali_kbase_mmap.c
+
+
+ifeq ($(CONFIG_MALI_CINSTR_GWT),y)
+	SRC += mali_kbase_gwt.c
+endif
+
+ifeq ($(MALI_UNIT_TEST),1)
+	SRC += mali_kbase_tlstream_test.c
+endif
+
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+	SRC += mali_kbase_regs_dump_debugfs.c
+endif
+
+
+ccflags-y += -I$(KBASE_PATH)
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o
+
+# Tell the Linux build system to enable building of our .c files
+mali_kbase-y := $(SRC:.c=.o)
+
+# Kconfig passes in the name with quotes for in-tree builds - remove them.
+platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_NAME))
+MALI_PLATFORM_DIR := platform/$(platform_name)
+ccflags-y += -I$(srctree)/$(src)/$(MALI_PLATFORM_DIR)
+include $(src)/$(MALI_PLATFORM_DIR)/Kbuild
+
+ifeq ($(CONFIG_MALI_DEVFREQ),y)
+  ifeq ($(CONFIG_DEVFREQ_THERMAL),y)
+    include $(src)/ipa/Kbuild
+  endif
+endif
+
+ifeq ($(MALI_USE_CSF),1)
+	include $(src)/csf/Kbuild
+endif
+
+mali_kbase-$(CONFIG_MALI_DMA_FENCE) += \
+	mali_kbase_dma_fence.o \
+	mali_kbase_fence.o
+mali_kbase-$(CONFIG_SYNC) += \
+	mali_kbase_sync_android.o \
+	mali_kbase_sync_common.o
+mali_kbase-$(CONFIG_SYNC_FILE) += \
+	mali_kbase_sync_file.o \
+	mali_kbase_sync_common.o \
+	mali_kbase_fence.o
+
+include  $(src)/backend/gpu/Kbuild
+mali_kbase-y += $(BACKEND:.c=.o)
+
+
+ccflags-y += -I$(srctree)/$(src)/backend/gpu
+subdir-ccflags-y += -I$(srctree)/$(src)/backend/gpu
+
+# For kutf and mali_kutf_irq_latency_test
+obj-$(CONFIG_MALI_KUTF) += tests/
diff --git a/drivers/gpu/arm/midgard/Kconfig b/drivers/gpu/arm/midgard/Kconfig
new file mode 100644
index 0000000..328f0f6
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Kconfig
@@ -0,0 +1,223 @@
+#
+# (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+menuconfig MALI_MIDGARD
+	tristate "Mali Midgard series support"
+	select GPU_TRACEPOINTS if ANDROID
+	default n
+	help
+	  Enable this option to build support for a ARM Mali Midgard GPU.
+
+	  To compile this driver as a module, choose M here:
+	  this will generate a single module, called mali_kbase.
+
+config MALI_GATOR_SUPPORT
+	bool "Enable Streamline tracing support"
+	depends on MALI_MIDGARD
+	default y
+	help
+	  Enables kbase tracing used by the Arm Streamline Performance Analyzer.
+	  The tracepoints are used to derive GPU activity charts in Streamline.
+
+config MALI_MIDGARD_DVFS
+	bool "Enable legacy DVFS"
+	depends on MALI_MIDGARD && !MALI_DEVFREQ
+	default n
+	help
+	  Choose this option to enable legacy DVFS in the Mali Midgard DDK.
+
+config MALI_MIDGARD_ENABLE_TRACE
+	bool "Enable kbase tracing"
+	depends on MALI_MIDGARD
+	default n
+	help
+	  Enables tracing in kbase.  Trace log available through
+	  the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
+
+config MALI_DEVFREQ
+	bool "devfreq support for Mali"
+	depends on MALI_MIDGARD && PM_DEVFREQ
+	default y
+	help
+	  Support devfreq for Mali.
+
+	  Using the devfreq framework and, by default, the simpleondemand
+	  governor, the frequency of Mali will be dynamically selected from the
+	  available OPPs.
+
+config MALI_DMA_FENCE
+	bool "DMA_BUF fence support for Mali"
+	depends on MALI_MIDGARD
+	default n
+	help
+	  Support DMA_BUF fences for Mali.
+
+	  This option should only be enabled if the Linux Kernel has built in
+	  support for DMA_BUF fences.
+
+config MALI_PLATFORM_NAME
+	depends on MALI_MIDGARD
+	string "Platform name"
+	default "devicetree"
+	help
+	  Enter the name of the desired platform configuration directory to
+	  include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must
+	  exist.
+
+config MALI_PLATFORM_POWER_DOWN_ONLY
+	depends on MALI_MIDGARD
+	bool "Perform GPU power down using only platform specific code"
+	default n
+	help
+	  If this is non-zero then kbase will avoid powering down shader cores, the
+	  tiler, and the L2 cache, instead just powering down the entire GPU through
+	  platform specific code. This may be required for certain platform
+	  integrations.
+	  Note that as this prevents kbase from powering down shader cores, this limits
+	  the available power policies to coarse_demand and always_on.
+
+# MALI_EXPERT configuration options
+
+menuconfig MALI_EXPERT
+	depends on MALI_MIDGARD
+	bool "Enable Expert Settings"
+	default n
+	help
+	  Enabling this option and modifying the default settings may produce a driver with performance or
+	  other limitations.
+
+config MALI_CORESTACK
+	bool "Support controlling power to the GPU core stack"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Enabling this feature on supported GPUs will let the driver powering
+	  on/off the GPU core stack independently without involving the Power
+	  Domain Controller. This should only be enabled on platforms which
+	  integration of the PDC to the Mali GPU is known to be problematic.
+	  This feature is currently only supported on t-Six and t-HEx GPUs.
+
+	  If unsure, say N.
+
+config MALI_DEBUG
+	bool "Debug build"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Select this option for increased checking and reporting of errors.
+
+config MALI_FENCE_DEBUG
+	bool "Debug sync fence usage"
+	depends on MALI_MIDGARD && MALI_EXPERT && (SYNC || SYNC_FILE)
+	default y if MALI_DEBUG
+	help
+	  Select this option to enable additional checking and reporting on the
+	  use of sync fences in the Mali driver.
+
+	  This will add a 3s timeout to all sync fence waits in the Mali
+	  driver, so that when work for Mali has been waiting on a sync fence
+	  for a long time a debug message will be printed, detailing what fence
+	  is causing the block, and which dependent Mali atoms are blocked as a
+	  result of this.
+
+	  The timeout can be changed at runtime through the js_soft_timeout
+	  device attribute, where the timeout is specified in milliseconds.
+
+config MALI_NO_MALI
+	bool "No Mali"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  This can be used to test the driver in a simulated environment
+	  whereby the hardware is not physically present. If the hardware is physically
+	  present it will not be used. This can be used to test the majority of the
+	  driver without needing actual hardware or for software benchmarking.
+	  All calls to the simulated hardware will complete immediately as if the hardware
+	  completed the task.
+
+config MALI_ERROR_INJECT
+	bool "Error injection"
+	depends on MALI_MIDGARD && MALI_EXPERT && MALI_NO_MALI
+	default n
+	help
+	  Enables insertion of errors to test module failure and recovery mechanisms.
+
+config MALI_SYSTEM_TRACE
+	bool "Enable system event tracing support"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Choose this option to enable system trace events for each
+	  kbase event. This is typically used for debugging but has
+	  minimal overhead when not in use. Enable only if you know what
+	  you are doing.
+
+config MALI_2MB_ALLOC
+	bool "Attempt to allocate 2MB pages"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Rather than allocating all GPU memory page-by-page, attempt to
+	  allocate 2MB pages from the kernel. This reduces TLB pressure and
+	  helps to prevent memory fragmentation.
+
+	  If in doubt, say N
+
+config MALI_PWRSOFT_765
+	bool "PWRSOFT-765 ticket"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  PWRSOFT-765 fixes devfreq cooling devices issues. The fix was merged
+	  in kernel v4.10, however if backported into the kernel then this
+	  option must be manually selected.
+
+	  If using kernel >= v4.10 then say N, otherwise if devfreq cooling
+	  changes have been backported say Y to avoid compilation errors.
+
+# Instrumentation options.
+
+config MALI_JOB_DUMP
+	bool "Enable system level support needed for job dumping"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Choose this option to enable system level support needed for
+	  job dumping. This is typically used for instrumentation but has
+	  minimal overhead when not in use. Enable only if you know what
+	  you are doing.
+
+config MALI_PRFCNT_SET_SECONDARY
+	bool "Use secondary set of performance counters"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Select this option to use secondary set of performance counters. Kernel
+	  features that depend on an access to the primary set of counters may
+	  become unavailable. Enabling this option will prevent power management
+	  from working optimally and may cause instrumentation tools to return
+	  bogus results.
+
+	  If unsure, say N.
+
+source "drivers/gpu/arm/midgard/platform/Kconfig"
+source "drivers/gpu/arm/midgard/tests/Kconfig"
diff --git a/drivers/gpu/arm/midgard/Makefile b/drivers/gpu/arm/midgard/Makefile
new file mode 100644
index 0000000..9e7756b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Makefile
@@ -0,0 +1,39 @@
+#
+# (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+include $(PANFROST_CFG_DIR)/config.mk
+
+KDIR ?= /lib/modules/$(shell uname -r)/build
+
+BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../..
+KBASE_PATH_RELATIVE = $(CURDIR)
+
+ifeq ($(CONFIG_MALI_FPGA_BUS_LOGGER),y)
+#Add bus logger symbols
+EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers
+endif
+
+# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions
+all:
+	$(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include -I$(CURDIR)/../../../../tests/include $(SCONS_CFLAGS) -include $(PANFROST_CFG_DIR)/config.h -include $(PANFROST_INC_DIR)/upstream-workarounds.h" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
+
+clean:
+	$(MAKE) -C $(KDIR) M=$(CURDIR) clean
diff --git a/drivers/gpu/arm/midgard/Makefile.kbase b/drivers/gpu/arm/midgard/Makefile.kbase
new file mode 100644
index 0000000..6b0f81e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Makefile.kbase
@@ -0,0 +1,23 @@
+#
+# (C) COPYRIGHT 2010, 2013, 2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+EXTRA_CFLAGS += -I$(ROOT) -I$(KBASE_PATH) -I$(KBASE_PATH)/platform_$(PLATFORM)
+
diff --git a/drivers/gpu/arm/midgard/Mconfig b/drivers/gpu/arm/midgard/Mconfig
new file mode 100644
index 0000000..46dca14
--- /dev/null
+++ b/drivers/gpu/arm/midgard/Mconfig
@@ -0,0 +1,190 @@
+#
+# (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA  02110-1301, USA.
+#
+#
+
+
+menuconfig MALI_MIDGARD
+	bool "Mali Midgard series support"
+	default y
+	help
+	  Enable this option to build support for a ARM Mali Midgard GPU.
+
+	  To compile this driver as a module, choose M here:
+	  this will generate a single module, called mali_kbase.
+
+config MALI_GATOR_SUPPORT
+	bool "Enable Streamline tracing support"
+	depends on MALI_MIDGARD && !BACKEND_USER
+	default y
+	help
+	  Enables kbase tracing used by the Arm Streamline Performance Analyzer.
+	  The tracepoints are used to derive GPU activity charts in Streamline.
+
+config MALI_MIDGARD_DVFS
+	bool "Enable legacy DVFS"
+	depends on MALI_MIDGARD && !MALI_DEVFREQ
+	default n
+	help
+	  Choose this option to enable legacy DVFS in the Mali Midgard DDK.
+
+config MALI_MIDGARD_ENABLE_TRACE
+	bool "Enable kbase tracing"
+	depends on MALI_MIDGARD
+	default n
+	help
+	  Enables tracing in kbase.  Trace log available through
+	  the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
+
+config MALI_DEVFREQ
+	bool "devfreq support for Mali"
+	depends on MALI_MIDGARD
+	default y if PLATFORM_JUNO
+	default y if PLATFORM_CUSTOM
+	help
+	  Support devfreq for Mali.
+
+	  Using the devfreq framework and, by default, the simpleondemand
+	  governor, the frequency of Mali will be dynamically selected from the
+	  available OPPs.
+
+config MALI_DMA_FENCE
+	bool "DMA_BUF fence support for Mali"
+	depends on MALI_MIDGARD
+	default n
+	help
+	  Support DMA_BUF fences for Mali.
+
+	  This option should only be enabled if the Linux Kernel has built in
+	  support for DMA_BUF fences.
+
+config MALI_PLATFORM_NAME
+	depends on MALI_MIDGARD
+	string "Platform name"
+	default "arndale" if PLATFORM_ARNDALE
+	default "arndale_octa" if PLATFORM_ARNDALE_OCTA
+	default "rk" if PLATFORM_FIREFLY
+	default "hisilicon" if PLATFORM_HIKEY960
+	default "hisilicon" if PLATFORM_HIKEY970
+	default "vexpress" if PLATFORM_VEXPRESS
+	default "devicetree"
+	help
+	  Enter the name of the desired platform configuration directory to
+	  include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must
+	  exist.
+
+	  When PLATFORM_CUSTOM is set, this needs to be set manually to
+	  pick up the desired platform files.
+
+# MALI_EXPERT configuration options
+
+menuconfig MALI_EXPERT
+	depends on MALI_MIDGARD
+	bool "Enable Expert Settings"
+	default y
+	help
+	  Enabling this option and modifying the default settings may produce a driver with performance or
+	  other limitations.
+
+config MALI_CORESTACK
+	bool "Support controlling power to the GPU core stack"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Enabling this feature on supported GPUs will let the driver powering
+	  on/off the GPU core stack independently without involving the Power
+	  Domain Controller. This should only be enabled on platforms which
+	  integration of the PDC to the Mali GPU is known to be problematic.
+	  This feature is currently only supported on t-Six and t-HEx GPUs.
+
+	  If unsure, say N.
+
+config MALI_DEBUG
+	bool "Debug build"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default y if DEBUG
+	default n
+	help
+	  Select this option for increased checking and reporting of errors.
+
+config MALI_FENCE_DEBUG
+	bool "Debug sync fence usage"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default y if MALI_DEBUG
+	help
+	  Select this option to enable additional checking and reporting on the
+	  use of sync fences in the Mali driver.
+
+	  This will add a 3s timeout to all sync fence waits in the Mali
+	  driver, so that when work for Mali has been waiting on a sync fence
+	  for a long time a debug message will be printed, detailing what fence
+	  is causing the block, and which dependent Mali atoms are blocked as a
+	  result of this.
+
+	  The timeout can be changed at runtime through the js_soft_timeout
+	  device attribute, where the timeout is specified in milliseconds.
+
+config MALI_ERROR_INJECT
+	bool "Error injection"
+	depends on MALI_MIDGARD && MALI_EXPERT && NO_MALI
+	default n
+	help
+	  Enables insertion of errors to test module failure and recovery mechanisms.
+
+config MALI_ERROR_INJECT_RANDOM
+	bool "Random error injection"
+	depends on MALI_MIDGARD && MALI_EXPERT && NO_MALI && MALI_ERROR_INJECT
+	default n
+	help
+	  Injected errors are random, rather than user-driven.
+
+config MALI_SYSTEM_TRACE
+	bool "Enable system event tracing support"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Choose this option to enable system trace events for each
+	  kbase event.	This is typically used for debugging but has
+	  minimal overhead when not in use. Enable only if you know what
+	  you are doing.
+
+config MALI_2MB_ALLOC
+	bool "Attempt to allocate 2MB pages"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  Rather than allocating all GPU memory page-by-page, attempt to
+	  allocate 2MB pages from the kernel. This reduces TLB pressure and
+	  helps to prevent memory fragmentation.
+
+	  If in doubt, say N
+
+config MALI_FPGA_BUS_LOGGER
+	bool "Enable bus log integration"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+
+config MALI_PWRSOFT_765
+	bool "PWRSOFT-765 ticket"
+	depends on MALI_MIDGARD && MALI_EXPERT
+	default n
+	help
+	  PWRSOFT-765 fixes devfreq cooling devices issues. However, they are
+	  not merged in mainline kernel yet. So this define helps to guard those
+	  parts of the code.
+
+# Instrumentation options.
+
+# config MALI_JOB_DUMP exists in the Kernel Kconfig but is configured using CINSTR_JOB_DUMP in Mconfig.
+# config MALI_PRFCNT_SET_SECONDARY exists in the Kernel Kconfig but is configured using CINSTR_SECONDARY_HWC in Mconfig.
+
+source "kernel/drivers/gpu/arm/midgard/tests/Mconfig"
diff --git a/drivers/gpu/arm/midgard/backend/gpu/Kbuild b/drivers/gpu/arm/midgard/backend/gpu/Kbuild
new file mode 100644
index 0000000..2dc1455
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/Kbuild
@@ -0,0 +1,60 @@
+#
+# (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+BACKEND += \
+	backend/gpu/mali_kbase_cache_policy_backend.c \
+	backend/gpu/mali_kbase_device_hw.c \
+	backend/gpu/mali_kbase_gpu.c \
+	backend/gpu/mali_kbase_gpuprops_backend.c \
+	backend/gpu/mali_kbase_debug_job_fault_backend.c \
+	backend/gpu/mali_kbase_irq_linux.c \
+	backend/gpu/mali_kbase_instr_backend.c \
+	backend/gpu/mali_kbase_jm_as.c \
+	backend/gpu/mali_kbase_jm_hw.c \
+	backend/gpu/mali_kbase_jm_rb.c \
+	backend/gpu/mali_kbase_js_backend.c \
+	backend/gpu/mali_kbase_mmu_hw_direct.c \
+	backend/gpu/mali_kbase_pm_backend.c \
+	backend/gpu/mali_kbase_pm_driver.c \
+	backend/gpu/mali_kbase_pm_metrics.c \
+	backend/gpu/mali_kbase_pm_ca.c \
+	backend/gpu/mali_kbase_pm_always_on.c \
+	backend/gpu/mali_kbase_pm_coarse_demand.c \
+	backend/gpu/mali_kbase_pm_policy.c \
+	backend/gpu/mali_kbase_time.c
+
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+BACKEND += \
+	backend/gpu/mali_kbase_pm_always_on_demand.c
+endif
+
+ifeq ($(CONFIG_MALI_DEVFREQ),y)
+BACKEND += \
+	backend/gpu/mali_kbase_devfreq.c
+endif
+
+ifeq ($(CONFIG_MALI_NO_MALI),y)
+	# Dummy model
+	BACKEND += backend/gpu/mali_kbase_model_dummy.c
+	BACKEND += backend/gpu/mali_kbase_model_linux.c
+	# HW error simulation
+	BACKEND += backend/gpu/mali_kbase_model_error_generator.c
+endif
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h
new file mode 100644
index 0000000..4a61f96
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h
@@ -0,0 +1,31 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend specific configuration
+ */
+
+#ifndef _KBASE_BACKEND_CONFIG_H_
+#define _KBASE_BACKEND_CONFIG_H_
+
+#endif /* _KBASE_BACKEND_CONFIG_H_ */
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
new file mode 100644
index 0000000..7378bfd
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
@@ -0,0 +1,34 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "backend/gpu/mali_kbase_cache_policy_backend.h"
+#include <backend/gpu/mali_kbase_device_internal.h>
+
+void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
+		u32 mode)
+{
+	kbdev->current_gpu_coherency_mode = mode;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG))
+		kbase_reg_write(kbdev, COHERENCY_ENABLE, mode);
+}
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
new file mode 100644
index 0000000..f78ada7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#ifndef _KBASE_CACHE_POLICY_BACKEND_H_
+#define _KBASE_CACHE_POLICY_BACKEND_H_
+
+#include "mali_kbase.h"
+#include "mali_base_kernel.h"
+
+/**
+  * kbase_cache_set_coherency_mode() - Sets the system coherency mode
+  *			in the GPU.
+  * @kbdev:	Device pointer
+  * @mode:	Coherency mode. COHERENCY_ACE/ACE_LITE
+  */
+void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
+		u32 mode);
+
+#endif				/* _KBASE_CACHE_POLICY_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
new file mode 100644
index 0000000..450f6e7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
@@ -0,0 +1,162 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include "mali_kbase_debug_job_fault.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+/*GPU_CONTROL_REG(r)*/
+static int gpu_control_reg_snapshot[] = {
+	GPU_ID,
+	SHADER_READY_LO,
+	SHADER_READY_HI,
+	TILER_READY_LO,
+	TILER_READY_HI,
+	L2_READY_LO,
+	L2_READY_HI
+};
+
+/* JOB_CONTROL_REG(r) */
+static int job_control_reg_snapshot[] = {
+	JOB_IRQ_MASK,
+	JOB_IRQ_STATUS
+};
+
+/* JOB_SLOT_REG(n,r) */
+static int job_slot_reg_snapshot[] = {
+	JS_HEAD_LO,
+	JS_HEAD_HI,
+	JS_TAIL_LO,
+	JS_TAIL_HI,
+	JS_AFFINITY_LO,
+	JS_AFFINITY_HI,
+	JS_CONFIG,
+	JS_STATUS,
+	JS_HEAD_NEXT_LO,
+	JS_HEAD_NEXT_HI,
+	JS_AFFINITY_NEXT_LO,
+	JS_AFFINITY_NEXT_HI,
+	JS_CONFIG_NEXT
+};
+
+/*MMU_REG(r)*/
+static int mmu_reg_snapshot[] = {
+	MMU_IRQ_MASK,
+	MMU_IRQ_STATUS
+};
+
+/* MMU_AS_REG(n,r) */
+static int as_reg_snapshot[] = {
+	AS_TRANSTAB_LO,
+	AS_TRANSTAB_HI,
+	AS_MEMATTR_LO,
+	AS_MEMATTR_HI,
+	AS_FAULTSTATUS,
+	AS_FAULTADDRESS_LO,
+	AS_FAULTADDRESS_HI,
+	AS_STATUS
+};
+
+bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
+		int reg_range)
+{
+	int i, j;
+	int offset = 0;
+	int slot_number;
+	int as_number;
+
+	if (kctx->reg_dump == NULL)
+		return false;
+
+	slot_number = kctx->kbdev->gpu_props.num_job_slots;
+	as_number = kctx->kbdev->gpu_props.num_address_spaces;
+
+	/* get the GPU control registers*/
+	for (i = 0; i < sizeof(gpu_control_reg_snapshot)/4; i++) {
+		kctx->reg_dump[offset] =
+				GPU_CONTROL_REG(gpu_control_reg_snapshot[i]);
+		offset += 2;
+	}
+
+	/* get the Job control registers*/
+	for (i = 0; i < sizeof(job_control_reg_snapshot)/4; i++) {
+		kctx->reg_dump[offset] =
+				JOB_CONTROL_REG(job_control_reg_snapshot[i]);
+		offset += 2;
+	}
+
+	/* get the Job Slot registers*/
+	for (j = 0; j < slot_number; j++)	{
+		for (i = 0; i < sizeof(job_slot_reg_snapshot)/4; i++) {
+			kctx->reg_dump[offset] =
+			JOB_SLOT_REG(j, job_slot_reg_snapshot[i]);
+			offset += 2;
+		}
+	}
+
+	/* get the MMU registers*/
+	for (i = 0; i < sizeof(mmu_reg_snapshot)/4; i++) {
+		kctx->reg_dump[offset] = MMU_REG(mmu_reg_snapshot[i]);
+		offset += 2;
+	}
+
+	/* get the Address space registers*/
+	for (j = 0; j < as_number; j++) {
+		for (i = 0; i < sizeof(as_reg_snapshot)/4; i++) {
+			kctx->reg_dump[offset] =
+					MMU_AS_REG(j, as_reg_snapshot[i]);
+			offset += 2;
+		}
+	}
+
+	WARN_ON(offset >= (reg_range*2/4));
+
+	/* set the termination flag*/
+	kctx->reg_dump[offset] = REGISTER_DUMP_TERMINATION_FLAG;
+	kctx->reg_dump[offset + 1] = REGISTER_DUMP_TERMINATION_FLAG;
+
+	dev_dbg(kctx->kbdev->dev, "kbase_job_fault_reg_snapshot_init:%d\n",
+			offset);
+
+	return true;
+}
+
+bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx)
+{
+	int offset = 0;
+
+	if (kctx->reg_dump == NULL)
+		return false;
+
+	while (kctx->reg_dump[offset] != REGISTER_DUMP_TERMINATION_FLAG) {
+		kctx->reg_dump[offset+1] =
+				kbase_reg_read(kctx->kbdev,
+						kctx->reg_dump[offset]);
+		offset += 2;
+	}
+	return true;
+}
+
+
+#endif
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
new file mode 100644
index 0000000..5ade012
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
@@ -0,0 +1,447 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_config_defaults.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else /* Linux >= 3.13 */
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#include <linux/opp.h>
+#define dev_pm_opp opp
+#define dev_pm_opp_get_voltage opp_get_voltage
+#define dev_pm_opp_get_opp_count opp_get_opp_count
+#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
+#define dev_pm_opp_find_freq_floor opp_find_freq_floor
+#endif /* Linux >= 3.13 */
+
+/**
+ * opp_translate - Translate nominal OPP frequency from devicetree into real
+ *                 frequency and core mask
+ * @kbdev:     Device pointer
+ * @freq:      Nominal frequency
+ * @core_mask: Pointer to u64 to store core mask to
+ *
+ * Return: Real target frequency
+ *
+ * This function will only perform translation if an operating-points-v2-mali
+ * table is present in devicetree. If one is not present then it will return an
+ * untranslated frequency and all cores enabled.
+ */
+static unsigned long opp_translate(struct kbase_device *kbdev,
+		unsigned long freq, u64 *core_mask)
+{
+	int i;
+
+	for (i = 0; i < kbdev->num_opps; i++) {
+		if (kbdev->opp_table[i].opp_freq == freq) {
+			*core_mask = kbdev->opp_table[i].core_mask;
+			return kbdev->opp_table[i].real_freq;
+		}
+	}
+
+	/* Failed to find OPP - return all cores enabled & nominal frequency */
+	*core_mask = kbdev->gpu_props.props.raw_props.shader_present;
+
+	return freq;
+}
+
+static int
+kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+{
+	struct kbase_device *kbdev = dev_get_drvdata(dev);
+	struct dev_pm_opp *opp;
+	unsigned long nominal_freq;
+	unsigned long freq = 0;
+	unsigned long voltage;
+	int err;
+	u64 core_mask;
+
+	freq = *target_freq;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_lock();
+#endif
+	opp = devfreq_recommended_opp(dev, &freq, flags);
+	voltage = dev_pm_opp_get_voltage(opp);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_unlock();
+#endif
+	if (IS_ERR_OR_NULL(opp)) {
+		dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
+		return PTR_ERR(opp);
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+	dev_pm_opp_put(opp);
+#endif
+
+	nominal_freq = freq;
+
+	/*
+	 * Only update if there is a change of frequency
+	 */
+	if (kbdev->current_nominal_freq == nominal_freq) {
+		*target_freq = nominal_freq;
+		return 0;
+	}
+
+	freq = opp_translate(kbdev, nominal_freq, &core_mask);
+#ifdef CONFIG_REGULATOR
+	if (kbdev->regulator && kbdev->current_voltage != voltage
+			&& kbdev->current_freq < freq) {
+		err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
+		if (err) {
+			dev_err(dev, "Failed to increase voltage (%d)\n", err);
+			return err;
+		}
+	}
+#endif
+
+	err = clk_set_rate(kbdev->clock, freq);
+	if (err) {
+		dev_err(dev, "Failed to set clock %lu (target %lu)\n",
+				freq, *target_freq);
+		return err;
+	}
+
+#ifdef CONFIG_REGULATOR
+	if (kbdev->regulator && kbdev->current_voltage != voltage
+			&& kbdev->current_freq > freq) {
+		err = regulator_set_voltage(kbdev->regulator, voltage, voltage);
+		if (err) {
+			dev_err(dev, "Failed to decrease voltage (%d)\n", err);
+			return err;
+		}
+	}
+#endif
+
+	kbase_devfreq_set_core_mask(kbdev, core_mask);
+
+	*target_freq = nominal_freq;
+	kbdev->current_voltage = voltage;
+	kbdev->current_nominal_freq = nominal_freq;
+	kbdev->current_freq = freq;
+	kbdev->current_core_mask = core_mask;
+
+	KBASE_TLSTREAM_AUX_DEVFREQ_TARGET((u64)nominal_freq);
+
+	return err;
+}
+
+static int
+kbase_devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+	struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+	*freq = kbdev->current_nominal_freq;
+
+	return 0;
+}
+
+static int
+kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+	struct kbase_device *kbdev = dev_get_drvdata(dev);
+	struct kbasep_pm_metrics diff;
+
+	kbase_pm_get_dvfs_metrics(kbdev, &kbdev->last_devfreq_metrics, &diff);
+
+	stat->busy_time = diff.time_busy;
+	stat->total_time = diff.time_busy + diff.time_idle;
+	stat->current_frequency = kbdev->current_nominal_freq;
+	stat->private_data = NULL;
+
+	return 0;
+}
+
+static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
+		struct devfreq_dev_profile *dp)
+{
+	int count;
+	int i = 0;
+	unsigned long freq;
+	struct dev_pm_opp *opp;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_lock();
+#endif
+	count = dev_pm_opp_get_opp_count(kbdev->dev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_unlock();
+#endif
+	if (count < 0)
+		return count;
+
+	dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]),
+				GFP_KERNEL);
+	if (!dp->freq_table)
+		return -ENOMEM;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_lock();
+#endif
+	for (i = 0, freq = ULONG_MAX; i < count; i++, freq--) {
+		opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq);
+		if (IS_ERR(opp))
+			break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+		dev_pm_opp_put(opp);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
+
+		dp->freq_table[i] = freq;
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	rcu_read_unlock();
+#endif
+
+	if (count != i)
+		dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n",
+				count, i);
+
+	dp->max_state = i;
+
+	return 0;
+}
+
+static void kbase_devfreq_term_freq_table(struct kbase_device *kbdev)
+{
+	struct devfreq_dev_profile *dp = kbdev->devfreq->profile;
+
+	kfree(dp->freq_table);
+}
+
+static void kbase_devfreq_exit(struct device *dev)
+{
+	struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+	kbase_devfreq_term_freq_table(kbdev);
+}
+
+static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
+{
+	struct device_node *opp_node = of_parse_phandle(kbdev->dev->of_node,
+			"operating-points-v2", 0);
+	struct device_node *node;
+	int i = 0;
+	int count;
+	u64 shader_present = kbdev->gpu_props.props.raw_props.shader_present;
+
+	if (!opp_node)
+		return 0;
+	if (!of_device_is_compatible(opp_node, "operating-points-v2-mali"))
+		return 0;
+
+	count = dev_pm_opp_get_opp_count(kbdev->dev);
+	kbdev->opp_table = kmalloc_array(count,
+			sizeof(struct kbase_devfreq_opp), GFP_KERNEL);
+	if (!kbdev->opp_table)
+		return -ENOMEM;
+
+	for_each_available_child_of_node(opp_node, node) {
+		u64 core_mask;
+		u64 opp_freq, real_freq;
+		const void *core_count_p;
+
+		if (of_property_read_u64(node, "opp-hz", &opp_freq)) {
+			dev_warn(kbdev->dev, "OPP is missing required opp-hz property\n");
+			continue;
+		}
+		if (of_property_read_u64(node, "opp-hz-real", &real_freq))
+			real_freq = opp_freq;
+		if (of_property_read_u64(node, "opp-core-mask", &core_mask))
+			core_mask = shader_present;
+		if (core_mask != shader_present &&
+				(kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11056) ||
+				 corestack_driver_control ||
+				 platform_power_down_only)) {
+
+			dev_warn(kbdev->dev, "Ignoring OPP %llu - Dynamic Core Scaling not supported on this GPU\n",
+					opp_freq);
+			continue;
+		}
+
+		core_count_p = of_get_property(node, "opp-core-count", NULL);
+		if (core_count_p) {
+			u64 remaining_core_mask =
+				kbdev->gpu_props.props.raw_props.shader_present;
+			int core_count = be32_to_cpup(core_count_p);
+
+			core_mask = 0;
+
+			for (; core_count > 0; core_count--) {
+				int core = ffs(remaining_core_mask);
+
+				if (!core) {
+					dev_err(kbdev->dev, "OPP has more cores than GPU\n");
+					return -ENODEV;
+				}
+
+				core_mask |= (1ull << (core-1));
+				remaining_core_mask &= ~(1ull << (core-1));
+			}
+		}
+
+		if (!core_mask) {
+			dev_err(kbdev->dev, "OPP has invalid core mask of 0\n");
+			return -ENODEV;
+		}
+
+		kbdev->opp_table[i].opp_freq = opp_freq;
+		kbdev->opp_table[i].real_freq = real_freq;
+		kbdev->opp_table[i].core_mask = core_mask;
+
+		dev_info(kbdev->dev, "OPP %d : opp_freq=%llu real_freq=%llu core_mask=%llx\n",
+				i, opp_freq, real_freq, core_mask);
+
+		i++;
+	}
+
+	kbdev->num_opps = i;
+
+	return 0;
+}
+
+int kbase_devfreq_init(struct kbase_device *kbdev)
+{
+	struct devfreq_dev_profile *dp;
+	int err;
+
+	if (!kbdev->clock) {
+		dev_err(kbdev->dev, "Clock not available for devfreq\n");
+		return -ENODEV;
+	}
+
+	kbdev->current_freq = clk_get_rate(kbdev->clock);
+	kbdev->current_nominal_freq = kbdev->current_freq;
+
+	dp = &kbdev->devfreq_profile;
+
+	dp->initial_freq = kbdev->current_freq;
+	dp->polling_ms = 100;
+	dp->target = kbase_devfreq_target;
+	dp->get_dev_status = kbase_devfreq_status;
+	dp->get_cur_freq = kbase_devfreq_cur_freq;
+	dp->exit = kbase_devfreq_exit;
+
+	if (kbase_devfreq_init_freq_table(kbdev, dp))
+		return -EFAULT;
+
+	if (dp->max_state > 0) {
+		/* Record the maximum frequency possible */
+		kbdev->gpu_props.props.core_props.gpu_freq_khz_max =
+			dp->freq_table[0] / 1000;
+	};
+
+	err = kbase_devfreq_init_core_mask_table(kbdev);
+	if (err)
+		return err;
+
+	kbdev->devfreq = devfreq_add_device(kbdev->dev, dp,
+				"simple_ondemand", NULL);
+	if (IS_ERR(kbdev->devfreq)) {
+		kfree(dp->freq_table);
+		return PTR_ERR(kbdev->devfreq);
+	}
+
+	/* devfreq_add_device only copies a few of kbdev->dev's fields, so
+	 * set drvdata explicitly so IPA models can access kbdev. */
+	dev_set_drvdata(&kbdev->devfreq->dev, kbdev);
+
+	err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq);
+	if (err) {
+		dev_err(kbdev->dev,
+			"Failed to register OPP notifier (%d)\n", err);
+		goto opp_notifier_failed;
+	}
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+	err = kbase_ipa_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "IPA initialization failed\n");
+		goto cooling_failed;
+	}
+
+	kbdev->devfreq_cooling = of_devfreq_cooling_register_power(
+			kbdev->dev->of_node,
+			kbdev->devfreq,
+			&kbase_ipa_power_model_ops);
+	if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) {
+		err = PTR_ERR(kbdev->devfreq_cooling);
+		dev_err(kbdev->dev,
+			"Failed to register cooling device (%d)\n",
+			err);
+		goto cooling_failed;
+	}
+#endif
+
+	return 0;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+cooling_failed:
+	devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+opp_notifier_failed:
+	if (devfreq_remove_device(kbdev->devfreq))
+		dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+	else
+		kbdev->devfreq = NULL;
+
+	return err;
+}
+
+void kbase_devfreq_term(struct kbase_device *kbdev)
+{
+	int err;
+
+	dev_dbg(kbdev->dev, "Term Mali devfreq\n");
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+	if (kbdev->devfreq_cooling)
+		devfreq_cooling_unregister(kbdev->devfreq_cooling);
+
+	kbase_ipa_term(kbdev);
+#endif
+
+	devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+
+	err = devfreq_remove_device(kbdev->devfreq);
+	if (err)
+		dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+	else
+		kbdev->devfreq = NULL;
+
+	kfree(kbdev->opp_table);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h
new file mode 100644
index 0000000..0634038
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h
@@ -0,0 +1,29 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _BASE_DEVFREQ_H_
+#define _BASE_DEVFREQ_H_
+
+int kbase_devfreq_init(struct kbase_device *kbdev);
+void kbase_devfreq_term(struct kbase_device *kbdev);
+
+#endif /* _BASE_DEVFREQ_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
new file mode 100644
index 0000000..5dd059f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
@@ -0,0 +1,340 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ *
+ */
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_instr_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <mali_kbase_config_defaults.h>
+
+#if !defined(CONFIG_MALI_NO_MALI)
+
+
+#ifdef CONFIG_DEBUG_FS
+
+
+int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size)
+{
+	struct kbase_io_access *old_buf;
+	struct kbase_io_access *new_buf;
+	unsigned long flags;
+
+	if (!new_size)
+		goto out_err; /* The new size must not be 0 */
+
+	new_buf = vmalloc(new_size * sizeof(*h->buf));
+	if (!new_buf)
+		goto out_err;
+
+	spin_lock_irqsave(&h->lock, flags);
+
+	old_buf = h->buf;
+
+	/* Note: we won't bother with copying the old data over. The dumping
+	 * logic wouldn't work properly as it relies on 'count' both as a
+	 * counter and as an index to the buffer which would have changed with
+	 * the new array. This is a corner case that we don't need to support.
+	 */
+	h->count = 0;
+	h->size = new_size;
+	h->buf = new_buf;
+
+	spin_unlock_irqrestore(&h->lock, flags);
+
+	vfree(old_buf);
+
+	return 0;
+
+out_err:
+	return -1;
+}
+
+
+int kbase_io_history_init(struct kbase_io_history *h, u16 n)
+{
+	h->enabled = false;
+	spin_lock_init(&h->lock);
+	h->count = 0;
+	h->size = 0;
+	h->buf = NULL;
+	if (kbase_io_history_resize(h, n))
+		return -1;
+
+	return 0;
+}
+
+
+void kbase_io_history_term(struct kbase_io_history *h)
+{
+	vfree(h->buf);
+	h->buf = NULL;
+}
+
+
+/* kbase_io_history_add - add new entry to the register access history
+ *
+ * @h: Pointer to the history data structure
+ * @addr: Register address
+ * @value: The value that is either read from or written to the register
+ * @write: 1 if it's a register write, 0 if it's a read
+ */
+static void kbase_io_history_add(struct kbase_io_history *h,
+		void __iomem const *addr, u32 value, u8 write)
+{
+	struct kbase_io_access *io;
+	unsigned long flags;
+
+	spin_lock_irqsave(&h->lock, flags);
+
+	io = &h->buf[h->count % h->size];
+	io->addr = (uintptr_t)addr | write;
+	io->value = value;
+	++h->count;
+	/* If count overflows, move the index by the buffer size so the entire
+	 * buffer will still be dumped later */
+	if (unlikely(!h->count))
+		h->count = h->size;
+
+	spin_unlock_irqrestore(&h->lock, flags);
+}
+
+
+void kbase_io_history_dump(struct kbase_device *kbdev)
+{
+	struct kbase_io_history *const h = &kbdev->io_history;
+	u16 i;
+	size_t iters;
+	unsigned long flags;
+
+	if (!unlikely(h->enabled))
+		return;
+
+	spin_lock_irqsave(&h->lock, flags);
+
+	dev_err(kbdev->dev, "Register IO History:");
+	iters = (h->size > h->count) ? h->count : h->size;
+	dev_err(kbdev->dev, "Last %zu register accesses of %zu total:\n", iters,
+			h->count);
+	for (i = 0; i < iters; ++i) {
+		struct kbase_io_access *io =
+			&h->buf[(h->count - iters + i) % h->size];
+		char const access = (io->addr & 1) ? 'w' : 'r';
+
+		dev_err(kbdev->dev, "%6i: %c: reg 0x%p val %08x\n", i, access,
+				(void *)(io->addr & ~0x1), io->value);
+	}
+
+	spin_unlock_irqrestore(&h->lock, flags);
+}
+
+
+#endif /* CONFIG_DEBUG_FS */
+
+
+void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value)
+{
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+	KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+
+	writel(value, kbdev->reg + offset);
+
+#ifdef CONFIG_DEBUG_FS
+	if (unlikely(kbdev->io_history.enabled))
+		kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+				value, 1);
+#endif /* CONFIG_DEBUG_FS */
+	dev_dbg(kbdev->dev, "w: reg %08x val %08x", offset, value);
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_write);
+
+u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset)
+{
+	u32 val;
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+	KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+
+	val = readl(kbdev->reg + offset);
+
+#ifdef CONFIG_DEBUG_FS
+	if (unlikely(kbdev->io_history.enabled))
+		kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+				val, 0);
+#endif /* CONFIG_DEBUG_FS */
+	dev_dbg(kbdev->dev, "r: reg %08x val %08x", offset, val);
+
+	return val;
+}
+
+KBASE_EXPORT_TEST_API(kbase_reg_read);
+#endif /* !defined(CONFIG_MALI_NO_MALI) */
+
+/**
+ * kbase_report_gpu_fault - Report a GPU fault.
+ * @kbdev:    Kbase device pointer
+ * @multiple: Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS
+ *            was also set
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
+ * It reports the details of the fault using dev_warn().
+ */
+static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
+{
+	u32 status;
+	u64 address;
+
+	status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS));
+	address = (u64) kbase_reg_read(kbdev,
+			GPU_CONTROL_REG(GPU_FAULTADDRESS_HI)) << 32;
+	address |= kbase_reg_read(kbdev,
+			GPU_CONTROL_REG(GPU_FAULTADDRESS_LO));
+
+	dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
+			status & 0xFF,
+			kbase_exception_name(kbdev, status),
+			address);
+	if (multiple)
+		dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
+}
+
+void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev)
+{
+	u32 irq_mask;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbdev->cache_clean_in_progress) {
+		/* If this is called while another clean is in progress, we
+		 * can't rely on the current one to flush any new changes in
+		 * the cache. Instead, trigger another cache clean immediately
+		 * after this one finishes.
+		 */
+		kbdev->cache_clean_queued = true;
+		return;
+	}
+
+	/* Enable interrupt */
+	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+				irq_mask | CLEAN_CACHES_COMPLETED);
+
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_CLEAN_INV_CACHES);
+
+	kbdev->cache_clean_in_progress = true;
+}
+
+void kbase_gpu_start_cache_clean(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_gpu_start_cache_clean_nolock(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+static void kbase_clean_caches_done(struct kbase_device *kbdev)
+{
+	u32 irq_mask;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (kbdev->cache_clean_queued) {
+		kbdev->cache_clean_queued = false;
+
+		KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+				GPU_COMMAND_CLEAN_INV_CACHES);
+	} else {
+		/* Disable interrupt */
+		irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+				irq_mask & ~CLEAN_CACHES_COMPLETED);
+
+		kbdev->cache_clean_in_progress = false;
+
+		wake_up(&kbdev->cache_clean_wait);
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	while (kbdev->cache_clean_in_progress) {
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		wait_event_interruptible(kbdev->cache_clean_wait,
+				!kbdev->cache_clean_in_progress);
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
+{
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
+	if (val & GPU_FAULT)
+		kbase_report_gpu_fault(kbdev, val & MULTIPLE_GPU_FAULTS);
+
+	if (val & RESET_COMPLETED)
+		kbase_pm_reset_done(kbdev);
+
+	if (val & PRFCNT_SAMPLE_COMPLETED)
+		kbase_instr_hwcnt_sample_done(kbdev);
+
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val);
+
+	/* kbase_pm_check_transitions (called by kbase_pm_power_changed) must
+	 * be called after the IRQ has been cleared. This is because it might
+	 * trigger further power transitions and we don't want to miss the
+	 * interrupt raised to notify us that these further transitions have
+	 * finished. The same applies to kbase_clean_caches_done() - if another
+	 * clean was queued, it might trigger another clean, which might
+	 * generate another interrupt which shouldn't be missed.
+	 */
+
+	if (val & CLEAN_CACHES_COMPLETED)
+		kbase_clean_caches_done(kbdev);
+
+	/* When 'platform_power_down_only' is enabled, the L2 cache is not
+	 * powered down, but flushed before the GPU power down (which is done
+	 * by the platform code). So the L2 state machine requests a cache
+	 * flush. And when that flush completes, the L2 state machine needs to
+	 * be re-invoked to proceed with the GPU power down.
+	 */
+	if (val & POWER_CHANGED_ALL ||
+			(platform_power_down_only && (val & CLEAN_CACHES_COMPLETED)))
+		kbase_pm_power_changed(kbdev);
+
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
new file mode 100644
index 0000000..7886e96
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
@@ -0,0 +1,89 @@
+/*
+ *
+ * (C) COPYRIGHT 2014,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Backend-specific HW access device APIs
+ */
+
+#ifndef _KBASE_DEVICE_INTERNAL_H_
+#define _KBASE_DEVICE_INTERNAL_H_
+
+/**
+ * kbase_reg_write - write to GPU register
+ * @kbdev:  Kbase device pointer
+ * @offset: Offset of register
+ * @value:  Value to write
+ *
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false).
+ */
+void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value);
+
+/**
+ * kbase_reg_read - read from GPU register
+ * @kbdev:  Kbase device pointer
+ * @offset: Offset of register
+ *
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false).
+ *
+ * Return: Value in desired register
+ */
+u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset);
+
+/**
+ * kbase_gpu_start_cache_clean - Start a cache clean
+ * @kbdev: Kbase device
+ *
+ * Issue a cache clean and invalidate command to hardware. This function will
+ * take hwaccess_lock.
+ */
+void kbase_gpu_start_cache_clean(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_start_cache_clean_nolock - Start a cache clean
+ * @kbdev: Kbase device
+ *
+ * Issue a cache clean and invalidate command to hardware. hwaccess_lock
+ * must be held by the caller.
+ */
+void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_wait_cache_clean - Wait for cache cleaning to finish
+ * @kbdev: Kbase device
+ *
+ * This function will take hwaccess_lock, and may sleep.
+ */
+void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_interrupt - GPU interrupt handler
+ * @kbdev: Kbase device pointer
+ * @val:   The value of the GPU IRQ status register which triggered the call
+ *
+ * This function is called from the interrupt handler when a GPU irq is to be
+ * handled.
+ */
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val);
+
+#endif /* _KBASE_DEVICE_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c
new file mode 100644
index 0000000..995d34d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpu.c
@@ -0,0 +1,141 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend APIs
+ */
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_backend.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+int kbase_backend_early_init(struct kbase_device *kbdev)
+{
+	int err;
+
+	err = kbasep_platform_device_init(kbdev);
+	if (err)
+		return err;
+
+	err = kbase_pm_runtime_init(kbdev);
+	if (err)
+		goto fail_runtime_pm;
+
+	/* Ensure we can access the GPU registers */
+	kbase_pm_register_access_enable(kbdev);
+
+	/* Find out GPU properties based on the GPU feature registers */
+	kbase_gpuprops_set(kbdev);
+
+	/* We're done accessing the GPU registers for now. */
+	kbase_pm_register_access_disable(kbdev);
+
+	err = kbase_install_interrupts(kbdev);
+	if (err)
+		goto fail_interrupts;
+
+	err = kbase_hwaccess_pm_early_init(kbdev);
+	if (err)
+		goto fail_pm;
+
+	return 0;
+
+fail_pm:
+	kbase_release_interrupts(kbdev);
+fail_interrupts:
+	kbase_pm_runtime_term(kbdev);
+fail_runtime_pm:
+	kbasep_platform_device_term(kbdev);
+
+	return err;
+}
+
+void kbase_backend_early_term(struct kbase_device *kbdev)
+{
+	kbase_hwaccess_pm_early_term(kbdev);
+	kbase_release_interrupts(kbdev);
+	kbase_pm_runtime_term(kbdev);
+	kbasep_platform_device_term(kbdev);
+}
+
+int kbase_backend_late_init(struct kbase_device *kbdev)
+{
+	int err;
+
+	err = kbase_hwaccess_pm_late_init(kbdev);
+	if (err)
+		return err;
+
+	err = kbase_hwaccess_pm_powerup(kbdev, PM_HW_ISSUES_DETECT);
+	if (err)
+		goto fail_pm_powerup;
+
+	err = kbase_backend_timer_init(kbdev);
+	if (err)
+		goto fail_timer;
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+	if (kbasep_common_test_interrupt_handlers(kbdev) != 0) {
+		dev_err(kbdev->dev, "Interrupt assigment check failed.\n");
+		err = -EINVAL;
+		goto fail_interrupt_test;
+	}
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+	err = kbase_job_slot_init(kbdev);
+	if (err)
+		goto fail_job_slot;
+
+	init_waitqueue_head(&kbdev->hwaccess.backend.reset_wait);
+
+	return 0;
+
+fail_job_slot:
+
+#ifdef CONFIG_MALI_DEBUG
+#ifndef CONFIG_MALI_NO_MALI
+fail_interrupt_test:
+#endif /* !CONFIG_MALI_NO_MALI */
+#endif /* CONFIG_MALI_DEBUG */
+
+	kbase_backend_timer_term(kbdev);
+fail_timer:
+	kbase_hwaccess_pm_halt(kbdev);
+fail_pm_powerup:
+	kbase_hwaccess_pm_late_term(kbdev);
+
+	return err;
+}
+
+void kbase_backend_late_term(struct kbase_device *kbdev)
+{
+	kbase_job_slot_halt(kbdev);
+	kbase_job_slot_term(kbdev);
+	kbase_backend_timer_term(kbdev);
+	kbase_hwaccess_pm_halt(kbdev);
+	kbase_hwaccess_pm_late_term(kbdev);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
new file mode 100644
index 0000000..39773e6
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
@@ -0,0 +1,116 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel property query backend APIs
+ */
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <mali_kbase_hwaccess_gpuprops.h>
+
+void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump)
+{
+	int i;
+
+	/* Fill regdump with the content of the relevant registers */
+	regdump->gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID));
+
+	regdump->l2_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_FEATURES));
+	regdump->core_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(CORE_FEATURES));
+	regdump->tiler_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_FEATURES));
+	regdump->mem_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(MEM_FEATURES));
+	regdump->mmu_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(MMU_FEATURES));
+	regdump->as_present = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(AS_PRESENT));
+	regdump->js_present = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(JS_PRESENT));
+
+	for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
+		regdump->js_features[i] = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(JS_FEATURES_REG(i)));
+
+	for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+		regdump->texture_features[i] = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)));
+
+	regdump->thread_max_threads = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_MAX_THREADS));
+	regdump->thread_max_workgroup_size = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE));
+	regdump->thread_max_barrier_size = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE));
+	regdump->thread_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_FEATURES));
+	regdump->thread_tls_alloc = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(THREAD_TLS_ALLOC));
+
+	regdump->shader_present_lo = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(SHADER_PRESENT_LO));
+	regdump->shader_present_hi = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(SHADER_PRESENT_HI));
+
+	regdump->tiler_present_lo = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_PRESENT_LO));
+	regdump->tiler_present_hi = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_PRESENT_HI));
+
+	regdump->l2_present_lo = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_PRESENT_LO));
+	regdump->l2_present_hi = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_PRESENT_HI));
+
+	regdump->stack_present_lo = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(STACK_PRESENT_LO));
+	regdump->stack_present_hi = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(STACK_PRESENT_HI));
+}
+
+void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump)
+{
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG)) {
+		/* Ensure we can access the GPU registers */
+		kbase_pm_register_access_enable(kbdev);
+
+		regdump->coherency_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(COHERENCY_FEATURES));
+
+		/* We're done accessing the GPU registers for now. */
+		kbase_pm_register_access_disable(kbdev);
+	} else {
+		/* Pre COHERENCY_FEATURES we only supported ACE_LITE */
+		regdump->coherency_features =
+				COHERENCY_FEATURE_BIT(COHERENCY_NONE) |
+				COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
+	}
+}
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
new file mode 100644
index 0000000..79c04d9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
@@ -0,0 +1,409 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * GPU backend instrumentation APIs.
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_instr_internal.h>
+
+int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					struct kbase_instr_hwcnt_enable *enable)
+{
+	unsigned long flags;
+	int err = -EINVAL;
+	u32 irq_mask;
+	u32 prfcnt_config;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* alignment failure */
+	if ((enable->dump_buffer == 0ULL) || (enable->dump_buffer & (2048 - 1)))
+		goto out_err;
+
+	/* Override core availability policy to ensure all cores are available
+	 */
+	kbase_pm_ca_instr_enable(kbdev);
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
+		/* Instrumentation is already enabled */
+		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+		goto out_err;
+	}
+
+	/* Enable interrupt */
+	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask |
+						PRFCNT_SAMPLE_COMPLETED);
+
+	/* In use, this context is the owner */
+	kbdev->hwcnt.kctx = kctx;
+	/* Remember the dump address so we can reprogram it later */
+	kbdev->hwcnt.addr = enable->dump_buffer;
+	kbdev->hwcnt.addr_bytes = enable->dump_buffer_bytes;
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+	/* Configure */
+	prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
+	if (enable->use_secondary)
+	{
+		u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+		u32 product_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID)
+			>> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+		int arch_v6 = GPU_ID_IS_NEW_FORMAT(product_id);
+
+		if (arch_v6)
+			prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
+	}
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+			prfcnt_config | PRFCNT_CONFIG_MODE_OFF);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+					enable->dump_buffer & 0xFFFFFFFF);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
+					enable->dump_buffer >> 32);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
+					enable->jm_bm);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
+					enable->shader_bm);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
+					enable->mmu_l2_bm);
+	/* Due to PRLAM-8186 we need to disable the Tiler before we enable the
+	 * HW counter dump. */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0);
+	else
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+							enable->tiler_bm);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+			prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL);
+
+	/* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump
+	 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
+							enable->tiler_bm);
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+	kbdev->hwcnt.backend.triggered = 1;
+	wake_up(&kbdev->hwcnt.backend.wait);
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+	err = 0;
+
+	dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);
+	return err;
+ out_err:
+	return err;
+}
+
+int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
+{
+	unsigned long flags, pm_flags;
+	int err = -EINVAL;
+	u32 irq_mask;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	while (1) {
+		spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DISABLED) {
+			/* Instrumentation is not enabled */
+			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+			goto out;
+		}
+
+		if (kbdev->hwcnt.kctx != kctx) {
+			/* Instrumentation has been setup for another context */
+			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+			goto out;
+		}
+
+		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE)
+			break;
+
+		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+		/* Ongoing dump/setup - wait for its completion */
+		wait_event(kbdev->hwcnt.backend.wait,
+					kbdev->hwcnt.backend.triggered != 0);
+	}
+
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+	kbdev->hwcnt.backend.triggered = 0;
+
+	/* Disable interrupt */
+	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+				irq_mask & ~PRFCNT_SAMPLE_COMPLETED);
+
+	/* Disable the counters */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0);
+
+	kbdev->hwcnt.kctx = NULL;
+	kbdev->hwcnt.addr = 0ULL;
+	kbdev->hwcnt.addr_bytes = 0ULL;
+
+	kbase_pm_ca_instr_disable(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+	dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p",
+									kctx);
+
+	err = 0;
+
+ out:
+	return err;
+}
+
+int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
+{
+	unsigned long flags;
+	int err = -EINVAL;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.kctx != kctx) {
+		/* The instrumentation has been setup for another context */
+		goto unlock;
+	}
+
+	if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_IDLE) {
+		/* HW counters are disabled or another dump is ongoing, or we're
+		 * resetting */
+		goto unlock;
+	}
+
+	kbdev->hwcnt.backend.triggered = 0;
+
+	/* Mark that we're dumping - the PF handler can signal that we faulted
+	 */
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING;
+
+	/* Reconfigure the dump address */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+					kbdev->hwcnt.addr & 0xFFFFFFFF);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
+					kbdev->hwcnt.addr >> 32);
+
+	/* Start dumping */
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL,
+					kbdev->hwcnt.addr, 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_PRFCNT_SAMPLE);
+
+	dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx);
+
+	err = 0;
+
+ unlock:
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+	return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump);
+
+bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
+						bool * const success)
+{
+	unsigned long flags;
+	bool complete = false;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE) {
+		*success = true;
+		complete = true;
+	} else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+		*success = false;
+		complete = true;
+		kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+	return complete;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete);
+
+void kbasep_cache_clean_worker(struct work_struct *data)
+{
+	struct kbase_device *kbdev;
+	unsigned long flags, pm_flags;
+
+	kbdev = container_of(data, struct kbase_device,
+						hwcnt.backend.cache_clean_work);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	/* Clean and invalidate the caches so we're sure the mmu tables for the
+	 * dump buffer is valid.
+	 */
+	KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+					KBASE_INSTR_STATE_REQUEST_CLEAN);
+	kbase_gpu_start_cache_clean_nolock(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+
+	kbase_gpu_wait_cache_clean(kbdev);
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+	KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+					KBASE_INSTR_STATE_REQUEST_CLEAN);
+	/* All finished and idle */
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+	kbdev->hwcnt.backend.triggered = 1;
+	wake_up(&kbdev->hwcnt.backend.wait);
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+		kbdev->hwcnt.backend.triggered = 1;
+		wake_up(&kbdev->hwcnt.backend.wait);
+	} else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING) {
+		if (kbdev->mmu_mode->flags & KBASE_MMU_MODE_HAS_NON_CACHEABLE) {
+			/* All finished and idle */
+			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+			kbdev->hwcnt.backend.triggered = 1;
+			wake_up(&kbdev->hwcnt.backend.wait);
+		} else {
+			int ret;
+			/* Always clean and invalidate the cache after a successful dump
+			 */
+			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+			ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
+						&kbdev->hwcnt.backend.cache_clean_work);
+			KBASE_DEBUG_ASSERT(ret);
+		}
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+
+int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	unsigned long flags;
+	int err;
+
+	/* Wait for dump & cache clean to complete */
+	wait_event(kbdev->hwcnt.backend.wait,
+					kbdev->hwcnt.backend.triggered != 0);
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+		err = -EINVAL;
+		kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+	} else {
+		/* Dump done */
+		KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+							KBASE_INSTR_STATE_IDLE);
+		err = 0;
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+
+	return err;
+}
+
+int kbase_instr_hwcnt_clear(struct kbase_context *kctx)
+{
+	unsigned long flags;
+	int err = -EINVAL;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	/* Check it's the context previously set up and we're not already
+	 * dumping */
+	if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state !=
+							KBASE_INSTR_STATE_IDLE)
+		goto out;
+
+	/* Clear the counters */
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, NULL, 0u, 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+						GPU_COMMAND_PRFCNT_CLEAR);
+
+	err = 0;
+
+out:
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+	return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear);
+
+int kbase_instr_backend_init(struct kbase_device *kbdev)
+{
+	int ret = 0;
+
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+
+	init_waitqueue_head(&kbdev->hwcnt.backend.wait);
+	INIT_WORK(&kbdev->hwcnt.backend.cache_clean_work,
+						kbasep_cache_clean_worker);
+	kbdev->hwcnt.backend.triggered = 0;
+
+	kbdev->hwcnt.backend.cache_clean_wq =
+			alloc_workqueue("Mali cache cleaning workqueue", 0, 1);
+	if (NULL == kbdev->hwcnt.backend.cache_clean_wq)
+		ret = -EINVAL;
+
+	return ret;
+}
+
+void kbase_instr_backend_term(struct kbase_device *kbdev)
+{
+	destroy_workqueue(kbdev->hwcnt.backend.cache_clean_wq);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
new file mode 100644
index 0000000..c9fb759
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
@@ -0,0 +1,57 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend-specific instrumentation definitions
+ */
+
+#ifndef _KBASE_INSTR_DEFS_H_
+#define _KBASE_INSTR_DEFS_H_
+
+/*
+ * Instrumentation State Machine States
+ */
+enum kbase_instr_state {
+	/* State where instrumentation is not active */
+	KBASE_INSTR_STATE_DISABLED = 0,
+	/* State machine is active and ready for a command. */
+	KBASE_INSTR_STATE_IDLE,
+	/* Hardware is currently dumping a frame. */
+	KBASE_INSTR_STATE_DUMPING,
+	/* We've requested a clean to occur on a workqueue */
+	KBASE_INSTR_STATE_REQUEST_CLEAN,
+	/* An error has occured during DUMPING (page fault). */
+	KBASE_INSTR_STATE_FAULT
+};
+
+/* Structure used for instrumentation and HW counters dumping */
+struct kbase_instr_backend {
+	wait_queue_head_t wait;
+	int triggered;
+
+	enum kbase_instr_state state;
+	struct workqueue_struct *cache_clean_wq;
+	struct work_struct  cache_clean_work;
+};
+
+#endif /* _KBASE_INSTR_DEFS_H_ */
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h
new file mode 100644
index 0000000..2254b9f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Backend-specific HW access instrumentation APIs
+ */
+
+#ifndef _KBASE_INSTR_INTERNAL_H_
+#define _KBASE_INSTR_INTERNAL_H_
+
+/**
+ * kbasep_cache_clean_worker() - Workqueue for handling cache cleaning
+ * @data: a &struct work_struct
+ */
+void kbasep_cache_clean_worker(struct work_struct *data);
+
+/**
+ * kbase_instr_hwcnt_sample_done() - Dump complete interrupt received
+ * @kbdev: Kbase device
+ */
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev);
+
+#endif /* _KBASE_INSTR_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h
new file mode 100644
index 0000000..ca3c048
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend specific IRQ APIs
+ */
+
+#ifndef _KBASE_IRQ_INTERNAL_H_
+#define _KBASE_IRQ_INTERNAL_H_
+
+int kbase_install_interrupts(struct kbase_device *kbdev);
+
+void kbase_release_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_synchronize_irqs - Ensure that all IRQ handlers have completed
+ *                          execution
+ * @kbdev: The kbase device
+ */
+void kbase_synchronize_irqs(struct kbase_device *kbdev);
+
+int kbasep_common_test_interrupt_handlers(
+					struct kbase_device * const kbdev);
+
+#endif /* _KBASE_IRQ_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
new file mode 100644
index 0000000..dd0279a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
@@ -0,0 +1,474 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+
+#include <linux/interrupt.h>
+
+#if !defined(CONFIG_MALI_NO_MALI)
+
+/* GPU IRQ Tags */
+#define	JOB_IRQ_TAG	0
+#define MMU_IRQ_TAG	1
+#define GPU_IRQ_TAG	2
+
+static void *kbase_tag(void *ptr, u32 tag)
+{
+	return (void *)(((uintptr_t) ptr) | tag);
+}
+
+static void *kbase_untag(void *ptr)
+{
+	return (void *)(((uintptr_t) ptr) & ~3);
+}
+
+static irqreturn_t kbase_job_irq_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+									flags);
+		return IRQ_NONE;
+	}
+
+	val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS));
+
+#ifdef CONFIG_MALI_DEBUG
+	if (!kbdev->pm.backend.driver_ready_for_irqs)
+		dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+				__func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+	spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!val)
+		return IRQ_NONE;
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbase_job_done(kbdev, val);
+
+	return IRQ_HANDLED;
+}
+
+KBASE_EXPORT_TEST_API(kbase_job_irq_handler);
+
+static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+									flags);
+		return IRQ_NONE;
+	}
+
+	atomic_inc(&kbdev->faults_pending);
+
+	val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS));
+
+#ifdef CONFIG_MALI_DEBUG
+	if (!kbdev->pm.backend.driver_ready_for_irqs)
+		dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+				__func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+	spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!val) {
+		atomic_dec(&kbdev->faults_pending);
+		return IRQ_NONE;
+	}
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbase_mmu_interrupt(kbdev, val);
+
+	atomic_dec(&kbdev->faults_pending);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+									flags);
+		return IRQ_NONE;
+	}
+
+	val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS));
+
+#ifdef CONFIG_MALI_DEBUG
+	if (!kbdev->pm.backend.driver_ready_for_irqs)
+		dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+				__func__, irq, val);
+#endif /* CONFIG_MALI_DEBUG */
+	spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!val)
+		return IRQ_NONE;
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbase_gpu_interrupt(kbdev, val);
+
+	return IRQ_HANDLED;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_irq_handler);
+
+static irq_handler_t kbase_handler_table[] = {
+	[JOB_IRQ_TAG] = kbase_job_irq_handler,
+	[MMU_IRQ_TAG] = kbase_mmu_irq_handler,
+	[GPU_IRQ_TAG] = kbase_gpu_irq_handler,
+};
+
+#ifdef CONFIG_MALI_DEBUG
+#define  JOB_IRQ_HANDLER JOB_IRQ_TAG
+#define  MMU_IRQ_HANDLER MMU_IRQ_TAG
+#define  GPU_IRQ_HANDLER GPU_IRQ_TAG
+
+/**
+ * kbase_set_custom_irq_handler - Set a custom IRQ handler
+ * @kbdev: Device for which the handler is to be registered
+ * @custom_handler: Handler to be registered
+ * @irq_type: Interrupt type
+ *
+ * Registers given interrupt handler for requested interrupt type
+ * In the case where irq handler is not specified, the default handler shall be
+ * registered
+ *
+ * Return: 0 case success, error code otherwise
+ */
+int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
+					irq_handler_t custom_handler,
+					int irq_type)
+{
+	int result = 0;
+	irq_handler_t requested_irq_handler = NULL;
+
+	KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) &&
+						(GPU_IRQ_HANDLER >= irq_type));
+
+	/* Release previous handler */
+	if (kbdev->irqs[irq_type].irq)
+		free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
+
+	requested_irq_handler = (NULL != custom_handler) ? custom_handler :
+						kbase_handler_table[irq_type];
+
+	if (0 != request_irq(kbdev->irqs[irq_type].irq,
+			requested_irq_handler,
+			kbdev->irqs[irq_type].flags | IRQF_SHARED,
+			dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
+		result = -EINVAL;
+		dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+					kbdev->irqs[irq_type].irq, irq_type);
+#ifdef CONFIG_SPARSE_IRQ
+		dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+#endif /* CONFIG_SPARSE_IRQ */
+	}
+
+	return result;
+}
+
+KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler);
+
+/* test correct interrupt assigment and reception by cpu */
+struct kbasep_irq_test {
+	struct hrtimer timer;
+	wait_queue_head_t wait;
+	int triggered;
+	u32 timeout;
+};
+
+static struct kbasep_irq_test kbasep_irq_test_data;
+
+#define IRQ_TEST_TIMEOUT    500
+
+static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+									flags);
+		return IRQ_NONE;
+	}
+
+	val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS));
+
+	spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!val)
+		return IRQ_NONE;
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbasep_irq_test_data.triggered = 1;
+	wake_up(&kbasep_irq_test_data.wait);
+
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* GPU is turned off - IRQ is not for us */
+		spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+									flags);
+		return IRQ_NONE;
+	}
+
+	val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS));
+
+	spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (!val)
+		return IRQ_NONE;
+
+	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+
+	kbasep_irq_test_data.triggered = 1;
+	wake_up(&kbasep_irq_test_data.wait);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val);
+
+	return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
+{
+	struct kbasep_irq_test *test_data = container_of(timer,
+						struct kbasep_irq_test, timer);
+
+	test_data->timeout = 1;
+	test_data->triggered = 1;
+	wake_up(&test_data->wait);
+	return HRTIMER_NORESTART;
+}
+
+static int kbasep_common_test_interrupt(
+				struct kbase_device * const kbdev, u32 tag)
+{
+	int err = 0;
+	irq_handler_t test_handler;
+
+	u32 old_mask_val;
+	u16 mask_offset;
+	u16 rawstat_offset;
+
+	switch (tag) {
+	case JOB_IRQ_TAG:
+		test_handler = kbase_job_irq_test_handler;
+		rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT);
+		mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK);
+		break;
+	case MMU_IRQ_TAG:
+		test_handler = kbase_mmu_irq_test_handler;
+		rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT);
+		mask_offset = MMU_REG(MMU_IRQ_MASK);
+		break;
+	case GPU_IRQ_TAG:
+		/* already tested by pm_driver - bail out */
+	default:
+		return 0;
+	}
+
+	/* store old mask */
+	old_mask_val = kbase_reg_read(kbdev, mask_offset);
+	/* mask interrupts */
+	kbase_reg_write(kbdev, mask_offset, 0x0);
+
+	if (kbdev->irqs[tag].irq) {
+		/* release original handler and install test handler */
+		if (kbase_set_custom_irq_handler(kbdev, test_handler, tag) != 0) {
+			err = -EINVAL;
+		} else {
+			kbasep_irq_test_data.timeout = 0;
+			hrtimer_init(&kbasep_irq_test_data.timer,
+					CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+			kbasep_irq_test_data.timer.function =
+						kbasep_test_interrupt_timeout;
+
+			/* trigger interrupt */
+			kbase_reg_write(kbdev, mask_offset, 0x1);
+			kbase_reg_write(kbdev, rawstat_offset, 0x1);
+
+			hrtimer_start(&kbasep_irq_test_data.timer,
+					HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT),
+					HRTIMER_MODE_REL);
+
+			wait_event(kbasep_irq_test_data.wait,
+					kbasep_irq_test_data.triggered != 0);
+
+			if (kbasep_irq_test_data.timeout != 0) {
+				dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n",
+						kbdev->irqs[tag].irq, tag);
+				err = -EINVAL;
+			} else {
+				dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n",
+						kbdev->irqs[tag].irq, tag);
+			}
+
+			hrtimer_cancel(&kbasep_irq_test_data.timer);
+			kbasep_irq_test_data.triggered = 0;
+
+			/* mask interrupts */
+			kbase_reg_write(kbdev, mask_offset, 0x0);
+
+			/* release test handler */
+			free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag));
+		}
+
+		/* restore original interrupt */
+		if (request_irq(kbdev->irqs[tag].irq, kbase_handler_table[tag],
+				kbdev->irqs[tag].flags | IRQF_SHARED,
+				dev_name(kbdev->dev), kbase_tag(kbdev, tag))) {
+			dev_err(kbdev->dev, "Can't restore original interrupt %d (index %d)\n",
+						kbdev->irqs[tag].irq, tag);
+			err = -EINVAL;
+		}
+	}
+	/* restore old mask */
+	kbase_reg_write(kbdev, mask_offset, old_mask_val);
+
+	return err;
+}
+
+int kbasep_common_test_interrupt_handlers(
+					struct kbase_device * const kbdev)
+{
+	int err;
+
+	init_waitqueue_head(&kbasep_irq_test_data.wait);
+	kbasep_irq_test_data.triggered = 0;
+
+	/* A suspend won't happen during startup/insmod */
+	kbase_pm_context_active(kbdev);
+
+	err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG);
+	if (err) {
+		dev_err(kbdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n");
+		goto out;
+	}
+
+	err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG);
+	if (err) {
+		dev_err(kbdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n");
+		goto out;
+	}
+
+	dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n");
+
+ out:
+	kbase_pm_context_idle(kbdev);
+
+	return err;
+}
+#endif /* CONFIG_MALI_DEBUG */
+
+int kbase_install_interrupts(struct kbase_device *kbdev)
+{
+	u32 nr = ARRAY_SIZE(kbase_handler_table);
+	int err;
+	u32 i;
+
+	for (i = 0; i < nr; i++) {
+		err = request_irq(kbdev->irqs[i].irq, kbase_handler_table[i],
+				kbdev->irqs[i].flags | IRQF_SHARED,
+				dev_name(kbdev->dev),
+				kbase_tag(kbdev, i));
+		if (err) {
+			dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+							kbdev->irqs[i].irq, i);
+#ifdef CONFIG_SPARSE_IRQ
+			dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+#endif /* CONFIG_SPARSE_IRQ */
+			goto release;
+		}
+	}
+
+	return 0;
+
+ release:
+	while (i-- > 0)
+		free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
+
+	return err;
+}
+
+void kbase_release_interrupts(struct kbase_device *kbdev)
+{
+	u32 nr = ARRAY_SIZE(kbase_handler_table);
+	u32 i;
+
+	for (i = 0; i < nr; i++) {
+		if (kbdev->irqs[i].irq)
+			free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
+	}
+}
+
+void kbase_synchronize_irqs(struct kbase_device *kbdev)
+{
+	u32 nr = ARRAY_SIZE(kbase_handler_table);
+	u32 i;
+
+	for (i = 0; i < nr; i++) {
+		if (kbdev->irqs[i].irq)
+			synchronize_irq(kbdev->irqs[i].irq);
+	}
+}
+
+#endif /* !defined(CONFIG_MALI_NO_MALI) */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
new file mode 100644
index 0000000..c8153ba
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
@@ -0,0 +1,244 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register backend context / address space management
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+
+/**
+ * assign_and_activate_kctx_addr_space - Assign an AS to a context
+ * @kbdev: Kbase device
+ * @kctx: Kbase context
+ * @current_as: Address Space to assign
+ *
+ * Assign an Address Space (AS) to a context, and add the context to the Policy.
+ *
+ * This includes
+ *   setting up the global runpool_irq structure and the context on the AS,
+ *   Activating the MMU on the AS,
+ *   Allowing jobs to be submitted on the AS.
+ *
+ * Context:
+ *   kbasep_js_kctx_info.jsctx_mutex held,
+ *   kbasep_js_device_data.runpool_mutex held,
+ *   AS transaction mutex held,
+ *   Runpool IRQ lock held
+ */
+static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						struct kbase_as *current_as)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* Attribute handling */
+	kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
+
+	/* Allow it to run jobs */
+	kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+	kbase_js_runpool_inc_context_count(kbdev, kctx);
+}
+
+bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js)
+{
+	int i;
+
+	if (kbdev->hwaccess.active_kctx[js] == kctx) {
+		/* Context is already active */
+		return true;
+	}
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+		if (kbdev->as_to_kctx[i] == kctx) {
+			/* Context already has ASID - mark as active */
+			return true;
+		}
+	}
+
+	/* Context does not have address space assigned */
+	return false;
+}
+
+void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+	int as_nr = kctx->as_nr;
+
+	if (as_nr == KBASEP_AS_NR_INVALID) {
+		WARN(1, "Attempting to release context without ASID\n");
+		return;
+	}
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (atomic_read(&kctx->refcount) != 1) {
+		WARN(1, "Attempting to release active ASID\n");
+		return;
+	}
+
+	kbasep_js_clear_submit_allowed(&kbdev->js_data, kctx);
+
+	kbase_ctx_sched_release_ctx(kctx);
+	kbase_js_runpool_dec_context_count(kbdev, kctx);
+}
+
+void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+}
+
+int kbase_backend_find_and_release_free_address_space(
+		struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	unsigned long flags;
+	int i;
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+		struct kbasep_js_kctx_info *as_js_kctx_info;
+		struct kbase_context *as_kctx;
+
+		as_kctx = kbdev->as_to_kctx[i];
+		as_js_kctx_info = &as_kctx->jctx.sched_info;
+
+		/* Don't release privileged or active contexts, or contexts with
+		 * jobs running.
+		 * Note that a context will have at least 1 reference (which
+		 * was previously taken by kbasep_js_schedule_ctx()) until
+		 * descheduled.
+		 */
+		if (as_kctx && !kbase_ctx_flag(as_kctx, KCTX_PRIVILEGED) &&
+			atomic_read(&as_kctx->refcount) == 1) {
+			if (!kbasep_js_runpool_retain_ctx_nolock(kbdev,
+								as_kctx)) {
+				WARN(1, "Failed to retain active context\n");
+
+				spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+						flags);
+				mutex_unlock(&js_devdata->runpool_mutex);
+				mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+				return KBASEP_AS_NR_INVALID;
+			}
+
+			kbasep_js_clear_submit_allowed(js_devdata, as_kctx);
+
+			/* Drop and retake locks to take the jsctx_mutex on the
+			 * context we're about to release without violating lock
+			 * ordering
+			 */
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+			mutex_unlock(&js_devdata->runpool_mutex);
+			mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+
+			/* Release context from address space */
+			mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex);
+			mutex_lock(&js_devdata->runpool_mutex);
+
+			kbasep_js_runpool_release_ctx_nolock(kbdev, as_kctx);
+
+			if (!kbase_ctx_flag(as_kctx, KCTX_SCHEDULED)) {
+				kbasep_js_runpool_requeue_or_kill_ctx(kbdev,
+								as_kctx,
+								true);
+
+				mutex_unlock(&js_devdata->runpool_mutex);
+				mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
+
+				return i;
+			}
+
+			/* Context was retained while locks were dropped,
+			 * continue looking for free AS */
+
+			mutex_unlock(&js_devdata->runpool_mutex);
+			mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
+
+			mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+			mutex_lock(&js_devdata->runpool_mutex);
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		}
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	mutex_unlock(&js_devdata->runpool_mutex);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	return KBASEP_AS_NR_INVALID;
+}
+
+bool kbase_backend_use_ctx(struct kbase_device *kbdev,
+				struct kbase_context *kctx,
+				int as_nr)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_as *new_address_space = NULL;
+	int js;
+
+	js_devdata = &kbdev->js_data;
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		if (kbdev->hwaccess.active_kctx[js] == kctx) {
+			WARN(1, "Context is already scheduled in\n");
+			return false;
+		}
+	}
+
+	new_address_space = &kbdev->as[as_nr];
+
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	assign_and_activate_kctx_addr_space(kbdev, kctx, new_address_space);
+
+	if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
+		/* We need to retain it to keep the corresponding address space
+		 */
+		kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+	}
+
+	return true;
+}
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
new file mode 100644
index 0000000..b4d2ae1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
@@ -0,0 +1,116 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific definitions
+ */
+
+#ifndef _KBASE_HWACCESS_GPU_DEFS_H_
+#define _KBASE_HWACCESS_GPU_DEFS_H_
+
+/* SLOT_RB_SIZE must be < 256 */
+#define SLOT_RB_SIZE 2
+#define SLOT_RB_MASK (SLOT_RB_SIZE - 1)
+
+/**
+ * struct rb_entry - Ringbuffer entry
+ * @katom:	Atom associated with this entry
+ */
+struct rb_entry {
+	struct kbase_jd_atom *katom;
+};
+
+/**
+ * struct slot_rb - Slot ringbuffer
+ * @entries:		Ringbuffer entries
+ * @last_context:	The last context to submit a job on this slot
+ * @read_idx:		Current read index of buffer
+ * @write_idx:		Current write index of buffer
+ * @job_chain_flag:	Flag used to implement jobchain disambiguation
+ */
+struct slot_rb {
+	struct rb_entry entries[SLOT_RB_SIZE];
+
+	struct kbase_context *last_context;
+
+	u8 read_idx;
+	u8 write_idx;
+
+	u8 job_chain_flag;
+};
+
+/**
+ * struct kbase_backend_data - GPU backend specific data for HW access layer
+ * @slot_rb:			Slot ringbuffers
+ * @rmu_workaround_flag:	When PRLAM-8987 is present, this flag determines
+ *				whether slots 0/1 or slot 2 are currently being
+ *				pulled from
+ * @scheduling_timer:		The timer tick used for rescheduling jobs
+ * @timer_running:		Is the timer running? The runpool_mutex must be
+ *				held whilst modifying this.
+ * @suspend_timer:              Is the timer suspended? Set when a suspend
+ *                              occurs and cleared on resume. The runpool_mutex
+ *                              must be held whilst modifying this.
+ * @reset_gpu:			Set to a KBASE_RESET_xxx value (see comments)
+ * @reset_workq:		Work queue for performing the reset
+ * @reset_work:			Work item for performing the reset
+ * @reset_wait:			Wait event signalled when the reset is complete
+ * @reset_timer:		Timeout for soft-stops before the reset
+ * @timeouts_updated:           Have timeout values just been updated?
+ *
+ * The hwaccess_lock (a spinlock) must be held when accessing this structure
+ */
+struct kbase_backend_data {
+	struct slot_rb slot_rb[BASE_JM_MAX_NR_SLOTS];
+
+	bool rmu_workaround_flag;
+
+	struct hrtimer scheduling_timer;
+
+	bool timer_running;
+	bool suspend_timer;
+
+	atomic_t reset_gpu;
+
+/* The GPU reset isn't pending */
+#define KBASE_RESET_GPU_NOT_PENDING     0
+/* kbase_prepare_to_reset_gpu has been called */
+#define KBASE_RESET_GPU_PREPARED        1
+/* kbase_reset_gpu has been called - the reset will now definitely happen
+ * within the timeout period */
+#define KBASE_RESET_GPU_COMMITTED       2
+/* The GPU reset process is currently occuring (timeout has expired or
+ * kbasep_try_reset_gpu_early was called) */
+#define KBASE_RESET_GPU_HAPPENING       3
+/* Reset the GPU silently, used when resetting the GPU as part of normal
+ * behavior (e.g. when exiting protected mode). */
+#define KBASE_RESET_GPU_SILENT          4
+	struct workqueue_struct *reset_workq;
+	struct work_struct reset_work;
+	wait_queue_head_t reset_wait;
+	struct hrtimer reset_timer;
+
+	bool timeouts_updated;
+};
+
+#endif /* _KBASE_HWACCESS_GPU_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
new file mode 100644
index 0000000..acd4a5a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
@@ -0,0 +1,1465 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel job manager APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config.h>
+#include <mali_midg_regmap.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+#include <mali_kbase_hwcnt_context.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+
+#define beenthere(kctx, f, a...) \
+			dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev);
+static void kbasep_reset_timeout_worker(struct work_struct *data);
+static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer);
+
+static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js,
+						struct kbase_context *kctx)
+{
+	return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT));
+}
+
+static u64 kbase_job_write_affinity(struct kbase_device *kbdev,
+				base_jd_core_req core_req,
+				int js)
+{
+	u64 affinity;
+
+	if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) ==
+			BASE_JD_REQ_T) {
+		/* Tiler-only atom */
+		/* If the hardware supports XAFFINITY then we'll only enable
+		 * the tiler (which is the default so this is a no-op),
+		 * otherwise enable shader core 0.
+		 */
+		if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
+			affinity = 1;
+		else
+			affinity = 0;
+	} else if ((core_req & (BASE_JD_REQ_COHERENT_GROUP |
+			BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) {
+		unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
+		struct mali_base_gpu_coherent_group_info *coherency_info =
+			&kbdev->gpu_props.props.coherency_info;
+
+		affinity = kbdev->pm.backend.shaders_avail &
+				kbdev->pm.debug_core_mask[js];
+
+		/* JS2 on a dual core group system targets core group 1. All
+		 * other cases target core group 0.
+		 */
+		if (js == 2 && num_core_groups > 1)
+			affinity &= coherency_info->group[1].core_mask;
+		else
+			affinity &= coherency_info->group[0].core_mask;
+	} else {
+		/* Use all cores */
+		affinity = kbdev->pm.backend.shaders_avail &
+				kbdev->pm.debug_core_mask[js];
+	}
+
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO),
+					affinity & 0xFFFFFFFF);
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI),
+					affinity >> 32);
+
+	return affinity;
+}
+
+void kbase_job_hw_submit(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom,
+				int js)
+{
+	struct kbase_context *kctx;
+	u32 cfg;
+	u64 jc_head = katom->jc;
+	u64 affinity;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	KBASE_DEBUG_ASSERT(katom);
+
+	kctx = katom->kctx;
+
+	/* Command register must be available */
+	KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx));
+
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO),
+						jc_head & 0xFFFFFFFF);
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI),
+						jc_head >> 32);
+
+	affinity = kbase_job_write_affinity(kbdev, katom->core_req, js);
+
+	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
+	 * start */
+	cfg = kctx->as_nr;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION) &&
+			!(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET))
+		cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
+
+	if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_START))
+		cfg |= JS_CONFIG_START_FLUSH_NO_ACTION;
+	else
+		cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
+
+	if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_END) &&
+			!(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET))
+		cfg |= JS_CONFIG_END_FLUSH_NO_ACTION;
+	else if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_CLEAN_ONLY_SAFE))
+		cfg |= JS_CONFIG_END_FLUSH_CLEAN;
+	else
+		cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10649))
+		cfg |= JS_CONFIG_START_MMU;
+
+	cfg |= JS_CONFIG_THREAD_PRI(8);
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE) &&
+		(katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED))
+		cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK;
+
+	if (kbase_hw_has_feature(kbdev,
+				BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+		if (!kbdev->hwaccess.backend.slot_rb[js].job_chain_flag) {
+			cfg |= JS_CONFIG_JOB_CHAIN_FLAG;
+			katom->atom_flags |= KBASE_KATOM_FLAGS_JOBCHAIN;
+			kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
+								true;
+		} else {
+			katom->atom_flags &= ~KBASE_KATOM_FLAGS_JOBCHAIN;
+			kbdev->hwaccess.backend.slot_rb[js].job_chain_flag =
+								false;
+		}
+	}
+
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg);
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
+		kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_FLUSH_ID_NEXT),
+				katom->flush_id);
+
+	/* Write an approximate start timestamp.
+	 * It's approximate because there might be a job in the HEAD register.
+	 */
+	katom->start_timestamp = ktime_get();
+
+	/* GO ! */
+	dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx",
+				katom, kctx, js, jc_head);
+
+	KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js,
+							(u32)affinity);
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+	kbase_trace_mali_job_slots_event(
+				GATOR_MAKE_EVENT(GATOR_JOB_SLOT_START, js),
+				kctx, kbase_jd_atom_id(kctx, katom));
+#endif
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(katom, jc_head,
+			affinity, cfg);
+	KBASE_TLSTREAM_TL_RET_CTX_LPU(
+		kctx,
+		&kbdev->gpu_props.props.raw_props.js_features[
+			katom->slot_nr]);
+	KBASE_TLSTREAM_TL_RET_ATOM_AS(katom, &kbdev->as[kctx->as_nr]);
+	KBASE_TLSTREAM_TL_RET_ATOM_LPU(
+			katom,
+			&kbdev->gpu_props.props.raw_props.js_features[js],
+			"ctx_nr,atom_nr");
+#ifdef CONFIG_GPU_TRACEPOINTS
+	if (!kbase_backend_nr_atoms_submitted(kbdev, js)) {
+		/* If this is the only job on the slot, trace it as starting */
+		char js_string[16];
+
+		trace_gpu_sched_switch(
+				kbasep_make_job_slot_string(js, js_string,
+						sizeof(js_string)),
+				ktime_to_ns(katom->start_timestamp),
+				(u32)katom->kctx->id, 0, katom->work_id);
+		kbdev->hwaccess.backend.slot_rb[js].last_context = katom->kctx;
+	}
+#endif
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
+						JS_COMMAND_START);
+}
+
+/**
+ * kbasep_job_slot_update_head_start_timestamp - Update timestamp
+ * @kbdev: kbase device
+ * @js: job slot
+ * @end_timestamp: timestamp
+ *
+ * Update the start_timestamp of the job currently in the HEAD, based on the
+ * fact that we got an IRQ for the previous set of completed jobs.
+ *
+ * The estimate also takes into account the time the job was submitted, to
+ * work out the best estimate (which might still result in an over-estimate to
+ * the calculated time spent)
+ */
+static void kbasep_job_slot_update_head_start_timestamp(
+						struct kbase_device *kbdev,
+						int js,
+						ktime_t end_timestamp)
+{
+	if (kbase_backend_nr_atoms_on_slot(kbdev, js) > 0) {
+		struct kbase_jd_atom *katom;
+		ktime_t timestamp_diff;
+		/* The atom in the HEAD */
+		katom = kbase_gpu_inspect(kbdev, js, 0);
+
+		KBASE_DEBUG_ASSERT(katom != NULL);
+
+		timestamp_diff = ktime_sub(end_timestamp,
+				katom->start_timestamp);
+		if (ktime_to_ns(timestamp_diff) >= 0) {
+			/* Only update the timestamp if it's a better estimate
+			 * than what's currently stored. This is because our
+			 * estimate that accounts for the throttle time may be
+			 * too much of an overestimate */
+			katom->start_timestamp = end_timestamp;
+		}
+	}
+}
+
+/**
+ * kbasep_trace_tl_event_lpu_softstop - Call event_lpu_softstop timeline
+ * tracepoint
+ * @kbdev: kbase device
+ * @js: job slot
+ *
+ * Make a tracepoint call to the instrumentation module informing that
+ * softstop happened on given lpu (job slot).
+ */
+static void kbasep_trace_tl_event_lpu_softstop(struct kbase_device *kbdev,
+					int js)
+{
+	KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP(
+		&kbdev->gpu_props.props.raw_props.js_features[js]);
+}
+
+void kbase_job_done(struct kbase_device *kbdev, u32 done)
+{
+	unsigned long flags;
+	int i;
+	u32 count = 0;
+	ktime_t end_timestamp = ktime_get();
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	while (done) {
+		u32 failed = done >> 16;
+
+		/* treat failed slots as finished slots */
+		u32 finished = (done & 0xFFFF) | failed;
+
+		/* Note: This is inherently unfair, as we always check
+		 * for lower numbered interrupts before the higher
+		 * numbered ones.*/
+		i = ffs(finished) - 1;
+		KBASE_DEBUG_ASSERT(i >= 0);
+
+		do {
+			int nr_done;
+			u32 active;
+			u32 completion_code = BASE_JD_EVENT_DONE;/* assume OK */
+			u64 job_tail = 0;
+
+			if (failed & (1u << i)) {
+				/* read out the job slot status code if the job
+				 * slot reported failure */
+				completion_code = kbase_reg_read(kbdev,
+					JOB_SLOT_REG(i, JS_STATUS));
+
+				if (completion_code == BASE_JD_EVENT_STOPPED) {
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+					kbase_trace_mali_job_slots_event(
+						GATOR_MAKE_EVENT(
+						GATOR_JOB_SLOT_SOFT_STOPPED, i),
+								NULL, 0);
+#endif
+
+					kbasep_trace_tl_event_lpu_softstop(
+						kbdev, i);
+
+					/* Soft-stopped job - read the value of
+					 * JS<n>_TAIL so that the job chain can
+					 * be resumed */
+					job_tail = (u64)kbase_reg_read(kbdev,
+						JOB_SLOT_REG(i, JS_TAIL_LO)) |
+						((u64)kbase_reg_read(kbdev,
+						JOB_SLOT_REG(i, JS_TAIL_HI))
+						 << 32);
+				} else if (completion_code ==
+						BASE_JD_EVENT_NOT_STARTED) {
+					/* PRLAM-10673 can cause a TERMINATED
+					 * job to come back as NOT_STARTED, but
+					 * the error interrupt helps us detect
+					 * it */
+					completion_code =
+						BASE_JD_EVENT_TERMINATED;
+				}
+
+				kbase_gpu_irq_evict(kbdev, i, completion_code);
+			}
+
+			kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR),
+					done & ((1 << i) | (1 << (i + 16))));
+			active = kbase_reg_read(kbdev,
+					JOB_CONTROL_REG(JOB_IRQ_JS_STATE));
+
+			if (((active >> i) & 1) == 0 &&
+					(((done >> (i + 16)) & 1) == 0)) {
+				/* There is a potential race we must work
+				 * around:
+				 *
+				 *  1. A job slot has a job in both current and
+				 *     next registers
+				 *  2. The job in current completes
+				 *     successfully, the IRQ handler reads
+				 *     RAWSTAT and calls this function with the
+				 *     relevant bit set in "done"
+				 *  3. The job in the next registers becomes the
+				 *     current job on the GPU
+				 *  4. Sometime before the JOB_IRQ_CLEAR line
+				 *     above the job on the GPU _fails_
+				 *  5. The IRQ_CLEAR clears the done bit but not
+				 *     the failed bit. This atomically sets
+				 *     JOB_IRQ_JS_STATE. However since both jobs
+				 *     have now completed the relevant bits for
+				 *     the slot are set to 0.
+				 *
+				 * If we now did nothing then we'd incorrectly
+				 * assume that _both_ jobs had completed
+				 * successfully (since we haven't yet observed
+				 * the fail bit being set in RAWSTAT).
+				 *
+				 * So at this point if there are no active jobs
+				 * left we check to see if RAWSTAT has a failure
+				 * bit set for the job slot. If it does we know
+				 * that there has been a new failure that we
+				 * didn't previously know about, so we make sure
+				 * that we record this in active (but we wait
+				 * for the next loop to deal with it).
+				 *
+				 * If we were handling a job failure (i.e. done
+				 * has the relevant high bit set) then we know
+				 * that the value read back from
+				 * JOB_IRQ_JS_STATE is the correct number of
+				 * remaining jobs because the failed job will
+				 * have prevented any futher jobs from starting
+				 * execution.
+				 */
+				u32 rawstat = kbase_reg_read(kbdev,
+					JOB_CONTROL_REG(JOB_IRQ_RAWSTAT));
+
+				if ((rawstat >> (i + 16)) & 1) {
+					/* There is a failed job that we've
+					 * missed - add it back to active */
+					active |= (1u << i);
+				}
+			}
+
+			dev_dbg(kbdev->dev, "Job ended with status 0x%08X\n",
+							completion_code);
+
+			nr_done = kbase_backend_nr_atoms_submitted(kbdev, i);
+			nr_done -= (active >> i) & 1;
+			nr_done -= (active >> (i + 16)) & 1;
+
+			if (nr_done <= 0) {
+				dev_warn(kbdev->dev, "Spurious interrupt on slot %d",
+									i);
+
+				goto spurious;
+			}
+
+			count += nr_done;
+
+			while (nr_done) {
+				if (nr_done == 1) {
+					kbase_gpu_complete_hw(kbdev, i,
+								completion_code,
+								job_tail,
+								&end_timestamp);
+					kbase_jm_try_kick_all(kbdev);
+				} else {
+					/* More than one job has completed.
+					 * Since this is not the last job being
+					 * reported this time it must have
+					 * passed. This is because the hardware
+					 * will not allow further jobs in a job
+					 * slot to complete until the failed job
+					 * is cleared from the IRQ status.
+					 */
+					kbase_gpu_complete_hw(kbdev, i,
+							BASE_JD_EVENT_DONE,
+							0,
+							&end_timestamp);
+				}
+				nr_done--;
+			}
+ spurious:
+			done = kbase_reg_read(kbdev,
+					JOB_CONTROL_REG(JOB_IRQ_RAWSTAT));
+
+			if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) {
+				/* Workaround for missing interrupt caused by
+				 * PRLAM-10883 */
+				if (((active >> i) & 1) && (0 ==
+						kbase_reg_read(kbdev,
+							JOB_SLOT_REG(i,
+							JS_STATUS)))) {
+					/* Force job slot to be processed again
+					 */
+					done |= (1u << i);
+				}
+			}
+
+			failed = done >> 16;
+			finished = (done & 0xFFFF) | failed;
+			if (done)
+				end_timestamp = ktime_get();
+		} while (finished & (1 << i));
+
+		kbasep_job_slot_update_head_start_timestamp(kbdev, i,
+								end_timestamp);
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+						KBASE_RESET_GPU_COMMITTED) {
+		/* If we're trying to reset the GPU then we might be able to do
+		 * it early (without waiting for a timeout) because some jobs
+		 * have completed
+		 */
+		kbasep_try_reset_gpu_early(kbdev);
+	}
+	KBASE_TRACE_ADD(kbdev, JM_IRQ_END, NULL, NULL, 0, count);
+}
+KBASE_EXPORT_TEST_API(kbase_job_done);
+
+static bool kbasep_soft_stop_allowed(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom)
+{
+	bool soft_stops_allowed = true;
+
+	if (kbase_jd_katom_is_protected(katom)) {
+		soft_stops_allowed = false;
+	} else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408)) {
+		if ((katom->core_req & BASE_JD_REQ_T) != 0)
+			soft_stops_allowed = false;
+	}
+	return soft_stops_allowed;
+}
+
+static bool kbasep_hard_stop_allowed(struct kbase_device *kbdev,
+						base_jd_core_req core_reqs)
+{
+	bool hard_stops_allowed = true;
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8394)) {
+		if ((core_reqs & BASE_JD_REQ_T) != 0)
+			hard_stops_allowed = false;
+	}
+	return hard_stops_allowed;
+}
+
+void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+					int js,
+					u32 action,
+					base_jd_core_req core_reqs,
+					struct kbase_jd_atom *target_katom)
+{
+#if KBASE_TRACE_ENABLE
+	u32 status_reg_before;
+	u64 job_in_head_before;
+	u32 status_reg_after;
+
+	KBASE_DEBUG_ASSERT(!(action & (~JS_COMMAND_MASK)));
+
+	/* Check the head pointer */
+	job_in_head_before = ((u64) kbase_reg_read(kbdev,
+					JOB_SLOT_REG(js, JS_HEAD_LO)))
+			| (((u64) kbase_reg_read(kbdev,
+					JOB_SLOT_REG(js, JS_HEAD_HI)))
+									<< 32);
+	status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS));
+#endif
+
+	if (action == JS_COMMAND_SOFT_STOP) {
+		bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev,
+								target_katom);
+
+		if (!soft_stop_allowed) {
+#ifdef CONFIG_MALI_DEBUG
+			dev_dbg(kbdev->dev,
+					"Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X",
+					(unsigned int)core_reqs);
+#endif				/* CONFIG_MALI_DEBUG */
+			return;
+		}
+
+		/* We are about to issue a soft stop, so mark the atom as having
+		 * been soft stopped */
+		target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED;
+
+		/* Mark the point where we issue the soft-stop command */
+		KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(target_katom);
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+			int i;
+
+			for (i = 0;
+			     i < kbase_backend_nr_atoms_submitted(kbdev, js);
+			     i++) {
+				struct kbase_jd_atom *katom;
+
+				katom = kbase_gpu_inspect(kbdev, js, i);
+
+				KBASE_DEBUG_ASSERT(katom);
+
+				/* For HW_ISSUE_8316, only 'bad' jobs attacking
+				 * the system can cause this issue: normally,
+				 * all memory should be allocated in multiples
+				 * of 4 pages, and growable memory should be
+				 * changed size in multiples of 4 pages.
+				 *
+				 * Whilst such 'bad' jobs can be cleared by a
+				 * GPU reset, the locking up of a uTLB entry
+				 * caused by the bad job could also stall other
+				 * ASs, meaning that other ASs' jobs don't
+				 * complete in the 'grace' period before the
+				 * reset. We don't want to lose other ASs' jobs
+				 * when they would normally complete fine, so we
+				 * must 'poke' the MMU regularly to help other
+				 * ASs complete */
+				kbase_as_poking_timer_retain_atom(
+						kbdev, katom->kctx, katom);
+			}
+		}
+
+		if (kbase_hw_has_feature(
+				kbdev,
+				BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+			action = (target_katom->atom_flags &
+					KBASE_KATOM_FLAGS_JOBCHAIN) ?
+				JS_COMMAND_SOFT_STOP_1 :
+				JS_COMMAND_SOFT_STOP_0;
+		}
+	} else if (action == JS_COMMAND_HARD_STOP) {
+		bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev,
+								core_reqs);
+
+		if (!hard_stop_allowed) {
+			/* Jobs can be hard-stopped for the following reasons:
+			 *  * CFS decides the job has been running too long (and
+			 *    soft-stop has not occurred). In this case the GPU
+			 *    will be reset by CFS if the job remains on the
+			 *    GPU.
+			 *
+			 *  * The context is destroyed, kbase_jd_zap_context
+			 *    will attempt to hard-stop the job. However it also
+			 *    has a watchdog which will cause the GPU to be
+			 *    reset if the job remains on the GPU.
+			 *
+			 *  * An (unhandled) MMU fault occurred. As long as
+			 *    BASE_HW_ISSUE_8245 is defined then the GPU will be
+			 *    reset.
+			 *
+			 * All three cases result in the GPU being reset if the
+			 * hard-stop fails, so it is safe to just return and
+			 * ignore the hard-stop request.
+			 */
+			dev_warn(kbdev->dev,
+					"Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X",
+					(unsigned int)core_reqs);
+			return;
+		}
+		target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_HARD_STOPPED;
+
+		if (kbase_hw_has_feature(
+				kbdev,
+				BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
+			action = (target_katom->atom_flags &
+					KBASE_KATOM_FLAGS_JOBCHAIN) ?
+				JS_COMMAND_HARD_STOP_1 :
+				JS_COMMAND_HARD_STOP_0;
+		}
+	}
+
+	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action);
+
+#if KBASE_TRACE_ENABLE
+	status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS));
+	if (status_reg_after == BASE_JD_EVENT_ACTIVE) {
+		struct kbase_jd_atom *head;
+		struct kbase_context *head_kctx;
+
+		head = kbase_gpu_inspect(kbdev, js, 0);
+		head_kctx = head->kctx;
+
+		if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, head_kctx,
+						head, job_in_head_before, js);
+		else
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+						0, js);
+
+		switch (action) {
+		case JS_COMMAND_SOFT_STOP:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_SOFT_STOP_0:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_SOFT_STOP_1:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_HARD_STOP:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_HARD_STOP_0:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, head_kctx,
+							head, head->jc, js);
+			break;
+		case JS_COMMAND_HARD_STOP_1:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, head_kctx,
+							head, head->jc, js);
+			break;
+		default:
+			BUG();
+			break;
+		}
+	} else {
+		if (status_reg_before == BASE_JD_EVENT_ACTIVE)
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+							job_in_head_before, js);
+		else
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL,
+							0, js);
+
+		switch (action) {
+		case JS_COMMAND_SOFT_STOP:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, NULL, NULL, 0,
+							js);
+			break;
+		case JS_COMMAND_SOFT_STOP_0:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, NULL, NULL,
+							0, js);
+			break;
+		case JS_COMMAND_SOFT_STOP_1:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, NULL, NULL,
+							0, js);
+			break;
+		case JS_COMMAND_HARD_STOP:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, NULL, NULL, 0,
+							js);
+			break;
+		case JS_COMMAND_HARD_STOP_0:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, NULL, NULL,
+							0, js);
+			break;
+		case JS_COMMAND_HARD_STOP_1:
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, NULL, NULL,
+							0, js);
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+#endif
+}
+
+void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev;
+	int i;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	/* Cancel any remaining running jobs for this kctx  */
+	mutex_lock(&kctx->jctx.lock);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* Invalidate all jobs in context, to prevent re-submitting */
+	for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
+		if (!work_pending(&kctx->jctx.atoms[i].work))
+			kctx->jctx.atoms[i].event_code =
+						BASE_JD_EVENT_JOB_CANCELLED;
+	}
+
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+		kbase_job_slot_hardstop(kctx, i, NULL);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kctx->jctx.lock);
+}
+
+void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+				struct kbase_jd_atom *target_katom)
+{
+	struct kbase_device *kbdev;
+	int js = target_katom->slot_nr;
+	int priority = target_katom->sched_priority;
+	int i;
+	bool stop_sent = false;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
+		struct kbase_jd_atom *katom;
+
+		katom = kbase_gpu_inspect(kbdev, js, i);
+		if (!katom)
+			continue;
+
+		if ((kbdev->js_ctx_scheduling_mode ==
+			KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE) &&
+				(katom->kctx != kctx))
+			continue;
+
+		if (katom->sched_priority > priority) {
+			if (!stop_sent)
+				KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED(
+						target_katom);
+
+			kbase_job_slot_softstop(kbdev, js, katom);
+			stop_sent = true;
+		}
+	}
+}
+
+void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	unsigned long timeout = msecs_to_jiffies(ZAP_TIMEOUT);
+
+	timeout = wait_event_timeout(kctx->jctx.zero_jobs_wait,
+			kctx->jctx.job_nr == 0, timeout);
+
+	if (timeout != 0)
+		timeout = wait_event_timeout(
+			kctx->jctx.sched_info.ctx.is_scheduled_wait,
+			!kbase_ctx_flag(kctx, KCTX_SCHEDULED),
+			timeout);
+
+	/* Neither wait timed out; all done! */
+	if (timeout != 0)
+		goto exit;
+
+	if (kbase_prepare_to_reset_gpu(kbdev)) {
+		dev_err(kbdev->dev,
+			"Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
+			ZAP_TIMEOUT);
+		kbase_reset_gpu(kbdev);
+	}
+
+	/* Wait for the reset to complete */
+	wait_event(kbdev->hwaccess.backend.reset_wait,
+			atomic_read(&kbdev->hwaccess.backend.reset_gpu)
+			== KBASE_RESET_GPU_NOT_PENDING);
+exit:
+	dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx);
+
+	/* Ensure that the signallers of the waitqs have finished */
+	mutex_lock(&kctx->jctx.lock);
+	mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	mutex_unlock(&kctx->jctx.lock);
+}
+
+u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev)
+{
+	u32 flush_id = 0;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION)) {
+		mutex_lock(&kbdev->pm.lock);
+		if (kbdev->pm.backend.gpu_powered)
+			flush_id = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(LATEST_FLUSH));
+		mutex_unlock(&kbdev->pm.lock);
+	}
+
+	return flush_id;
+}
+
+int kbase_job_slot_init(struct kbase_device *kbdev)
+{
+	kbdev->hwaccess.backend.reset_workq = alloc_workqueue(
+						"Mali reset workqueue", 0, 1);
+	if (NULL == kbdev->hwaccess.backend.reset_workq)
+		return -EINVAL;
+
+	INIT_WORK(&kbdev->hwaccess.backend.reset_work,
+						kbasep_reset_timeout_worker);
+
+	hrtimer_init(&kbdev->hwaccess.backend.reset_timer, CLOCK_MONOTONIC,
+							HRTIMER_MODE_REL);
+	kbdev->hwaccess.backend.reset_timer.function =
+						kbasep_reset_timer_callback;
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_init);
+
+void kbase_job_slot_halt(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+void kbase_job_slot_term(struct kbase_device *kbdev)
+{
+	destroy_workqueue(kbdev->hwaccess.backend.reset_workq);
+}
+KBASE_EXPORT_TEST_API(kbase_job_slot_term);
+
+/**
+ * kbasep_check_for_afbc_on_slot() - Check whether AFBC is in use on this slot
+ * @kbdev: kbase device pointer
+ * @kctx:  context to check against
+ * @js:	   slot to check
+ * @target_katom: An atom to check, or NULL if all atoms from @kctx on
+ *                slot @js should be checked
+ *
+ * This checks are based upon parameters that would normally be passed to
+ * kbase_job_slot_hardstop().
+ *
+ * In the event of @target_katom being NULL, this will check the last jobs that
+ * are likely to be running on the slot to see if a) they belong to kctx, and
+ * so would be stopped, and b) whether they have AFBC
+ *
+ * In that case, It's guaranteed that a job currently executing on the HW with
+ * AFBC will be detected. However, this is a conservative check because it also
+ * detects jobs that have just completed too.
+ *
+ * Return: true when hard-stop _might_ stop an afbc atom, else false.
+ */
+static bool kbasep_check_for_afbc_on_slot(struct kbase_device *kbdev,
+		struct kbase_context *kctx, int js,
+		struct kbase_jd_atom *target_katom)
+{
+	bool ret = false;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* When we have an atom the decision can be made straight away. */
+	if (target_katom)
+		return !!(target_katom->core_req & BASE_JD_REQ_FS_AFBC);
+
+	/* Otherwise, we must chweck the hardware to see if it has atoms from
+	 * this context with AFBC. */
+	for (i = 0; i < kbase_backend_nr_atoms_on_slot(kbdev, js); i++) {
+		struct kbase_jd_atom *katom;
+
+		katom = kbase_gpu_inspect(kbdev, js, i);
+		if (!katom)
+			continue;
+
+		/* Ignore atoms from other contexts, they won't be stopped when
+		 * we use this for checking if we should hard-stop them */
+		if (katom->kctx != kctx)
+			continue;
+
+		/* An atom on this slot and this context: check for AFBC */
+		if (katom->core_req & BASE_JD_REQ_FS_AFBC) {
+			ret = true;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * kbase_job_slot_softstop_swflags - Soft-stop a job with flags
+ * @kbdev:         The kbase device
+ * @js:            The job slot to soft-stop
+ * @target_katom:  The job that should be soft-stopped (or NULL for any job)
+ * @sw_flags:      Flags to pass in about the soft-stop
+ *
+ * Context:
+ *   The job slot lock must be held when calling this function.
+ *   The job slot must not already be in the process of being soft-stopped.
+ *
+ * Soft-stop the specified job slot, with extra information about the stop
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ */
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+			struct kbase_jd_atom *target_katom, u32 sw_flags)
+{
+	KBASE_DEBUG_ASSERT(!(sw_flags & JS_COMMAND_MASK));
+	kbase_backend_soft_hard_stop_slot(kbdev, NULL, js, target_katom,
+			JS_COMMAND_SOFT_STOP | sw_flags);
+}
+
+/**
+ * kbase_job_slot_softstop - Soft-stop the specified job slot
+ * @kbdev:         The kbase device
+ * @js:            The job slot to soft-stop
+ * @target_katom:  The job that should be soft-stopped (or NULL for any job)
+ * Context:
+ *   The job slot lock must be held when calling this function.
+ *   The job slot must not already be in the process of being soft-stopped.
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ */
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
+				struct kbase_jd_atom *target_katom)
+{
+	kbase_job_slot_softstop_swflags(kbdev, js, target_katom, 0u);
+}
+
+/**
+ * kbase_job_slot_hardstop - Hard-stop the specified job slot
+ * @kctx:         The kbase context that contains the job(s) that should
+ *                be hard-stopped
+ * @js:           The job slot to hard-stop
+ * @target_katom: The job that should be hard-stopped (or NULL for all
+ *                jobs from the context)
+ * Context:
+ *   The job slot lock must be held when calling this function.
+ */
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+				struct kbase_jd_atom *target_katom)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	bool stopped;
+	/* We make the check for AFBC before evicting/stopping atoms.  Note
+	 * that no other thread can modify the slots whilst we have the
+	 * hwaccess_lock. */
+	int needs_workaround_for_afbc =
+			kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3542)
+			&& kbasep_check_for_afbc_on_slot(kbdev, kctx, js,
+					 target_katom);
+
+	stopped = kbase_backend_soft_hard_stop_slot(kbdev, kctx, js,
+							target_katom,
+							JS_COMMAND_HARD_STOP);
+	if (stopped && (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_8401) ||
+			kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_9510) ||
+			needs_workaround_for_afbc)) {
+		/* MIDBASE-2916 if a fragment job with AFBC encoding is
+		 * hardstopped, ensure to do a soft reset also in order to
+		 * clear the GPU status.
+		 * Workaround for HW issue 8401 has an issue,so after
+		 * hard-stopping just reset the GPU. This will ensure that the
+		 * jobs leave the GPU.*/
+		if (kbase_prepare_to_reset_gpu_locked(kbdev)) {
+			dev_err(kbdev->dev, "Issueing GPU soft-reset after hard stopping due to hardware issue");
+			kbase_reset_gpu_locked(kbdev);
+		}
+	}
+}
+
+/**
+ * kbase_job_check_enter_disjoint - potentiall enter disjoint mode
+ * @kbdev: kbase device
+ * @action: the event which has occurred
+ * @core_reqs: core requirements of the atom
+ * @target_katom: the atom which is being affected
+ *
+ * For a certain soft/hard-stop action, work out whether to enter disjoint
+ * state.
+ *
+ * This does not register multiple disjoint events if the atom has already
+ * started a disjoint period
+ *
+ * @core_reqs can be supplied as 0 if the atom had not started on the hardware
+ * (and so a 'real' soft/hard-stop was not required, but it still interrupted
+ * flow, perhaps on another context)
+ *
+ * kbase_job_check_leave_disjoint() should be used to end the disjoint
+ * state when the soft/hard-stop action is complete
+ */
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+		base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom)
+{
+	u32 hw_action = action & JS_COMMAND_MASK;
+
+	/* For hard-stop, don't enter if hard-stop not allowed */
+	if (hw_action == JS_COMMAND_HARD_STOP &&
+			!kbasep_hard_stop_allowed(kbdev, core_reqs))
+		return;
+
+	/* For soft-stop, don't enter if soft-stop not allowed, or isn't
+	 * causing disjoint */
+	if (hw_action == JS_COMMAND_SOFT_STOP &&
+			!(kbasep_soft_stop_allowed(kbdev, target_katom) &&
+			  (action & JS_COMMAND_SW_CAUSES_DISJOINT)))
+		return;
+
+	/* Nothing to do if already logged disjoint state on this atom */
+	if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT)
+		return;
+
+	target_katom->atom_flags |= KBASE_KATOM_FLAG_IN_DISJOINT;
+	kbase_disjoint_state_up(kbdev);
+}
+
+/**
+ * kbase_job_check_enter_disjoint - potentially leave disjoint state
+ * @kbdev: kbase device
+ * @target_katom: atom which is finishing
+ *
+ * Work out whether to leave disjoint state when finishing an atom that was
+ * originated by kbase_job_check_enter_disjoint().
+ */
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+		struct kbase_jd_atom *target_katom)
+{
+	if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) {
+		target_katom->atom_flags &= ~KBASE_KATOM_FLAG_IN_DISJOINT;
+		kbase_disjoint_state_down(kbdev);
+	}
+}
+
+static void kbase_debug_dump_registers(struct kbase_device *kbdev)
+{
+	int i;
+
+	kbase_io_history_dump(kbdev);
+
+	dev_err(kbdev->dev, "Register state:");
+	dev_err(kbdev->dev, "  GPU_IRQ_RAWSTAT=0x%08x GPU_STATUS=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)));
+	dev_err(kbdev->dev, "  JOB_IRQ_RAWSTAT=0x%08x JOB_IRQ_JS_STATE=0x%08x",
+		kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT)),
+		kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE)));
+	for (i = 0; i < 3; i++) {
+		dev_err(kbdev->dev, "  JS%d_STATUS=0x%08x      JS%d_HEAD_LO=0x%08x",
+			i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS)),
+			i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO)));
+	}
+	dev_err(kbdev->dev, "  MMU_IRQ_RAWSTAT=0x%08x GPU_FAULTSTATUS=0x%08x",
+		kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_RAWSTAT)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS)));
+	dev_err(kbdev->dev, "  GPU_IRQ_MASK=0x%08x    JOB_IRQ_MASK=0x%08x     MMU_IRQ_MASK=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK)),
+		kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK)),
+		kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK)));
+	dev_err(kbdev->dev, "  PWR_OVERRIDE0=0x%08x   PWR_OVERRIDE1=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE0)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1)));
+	dev_err(kbdev->dev, "  SHADER_CONFIG=0x%08x   L2_MMU_CONFIG=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_CONFIG)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG)));
+	dev_err(kbdev->dev, "  TILER_CONFIG=0x%08x    JM_CONFIG=0x%08x",
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(TILER_CONFIG)),
+		kbase_reg_read(kbdev, GPU_CONTROL_REG(JM_CONFIG)));
+}
+
+static void kbasep_reset_timeout_worker(struct work_struct *data)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev;
+	ktime_t end_timestamp = ktime_get();
+	struct kbasep_js_device_data *js_devdata;
+	bool silent = false;
+	u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+
+	KBASE_DEBUG_ASSERT(data);
+
+	kbdev = container_of(data, struct kbase_device,
+						hwaccess.backend.reset_work);
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	js_devdata = &kbdev->js_data;
+
+	if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+			KBASE_RESET_GPU_SILENT)
+		silent = true;
+
+	KBASE_TRACE_ADD(kbdev, JM_BEGIN_RESET_WORKER, NULL, NULL, 0u, 0);
+
+	/* Disable GPU hardware counters.
+	 * This call will block until counters are disabled.
+	 */
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	/* Make sure the timer has completed - this cannot be done from
+	 * interrupt context, so this cannot be done within
+	 * kbasep_try_reset_gpu_early. */
+	hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
+
+	if (kbase_pm_context_active_handle_suspend(kbdev,
+				KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+		/* This would re-activate the GPU. Since it's already idle,
+		 * there's no need to reset it */
+		atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_NOT_PENDING);
+		kbase_disjoint_state_down(kbdev);
+		wake_up(&kbdev->hwaccess.backend.reset_wait);
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return;
+	}
+
+	KBASE_DEBUG_ASSERT(kbdev->irq_reset_flush == false);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	spin_lock(&kbdev->mmu_mask_change);
+	kbase_pm_reset_start_locked(kbdev);
+
+	/* We're about to flush out the IRQs and their bottom half's */
+	kbdev->irq_reset_flush = true;
+
+	/* Disable IRQ to avoid IRQ handlers to kick in after releasing the
+	 * spinlock; this also clears any outstanding interrupts */
+	kbase_pm_disable_interrupts_nolock(kbdev);
+
+	spin_unlock(&kbdev->mmu_mask_change);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* Ensure that any IRQ handlers have finished
+	 * Must be done without any locks IRQ handlers will take */
+	kbase_synchronize_irqs(kbdev);
+
+	/* Flush out any in-flight work items */
+	kbase_flush_mmu_wqs(kbdev);
+
+	/* The flush has completed so reset the active indicator */
+	kbdev->irq_reset_flush = false;
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8463)) {
+		/* Ensure that L2 is not transitioning when we send the reset
+		 * command */
+		while (--max_loops && kbase_pm_get_trans_cores(kbdev,
+				KBASE_PM_CORE_L2))
+			;
+
+		WARN(!max_loops, "L2 power transition timed out while trying to reset\n");
+	}
+
+	mutex_lock(&kbdev->pm.lock);
+	/* We hold the pm lock, so there ought to be a current policy */
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.pm_current_policy);
+
+	/* All slot have been soft-stopped and we've waited
+	 * SOFT_STOP_RESET_TIMEOUT for the slots to clear, at this point we
+	 * assume that anything that is still left on the GPU is stuck there and
+	 * we'll kill it when we reset the GPU */
+
+	if (!silent)
+		dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
+								RESET_TIMEOUT);
+
+	/* Output the state of some interesting registers to help in the
+	 * debugging of GPU resets */
+	if (!silent)
+		kbase_debug_dump_registers(kbdev);
+
+	/* Complete any jobs that were still on the GPU */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbdev->protected_mode = false;
+	kbase_backend_reset(kbdev, &end_timestamp);
+	kbase_pm_metrics_update(kbdev, NULL);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* Reset the GPU */
+	kbase_pm_init_hw(kbdev, 0);
+
+	mutex_unlock(&kbdev->pm.lock);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_ctx_sched_restore_all_as(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	kbase_pm_enable_interrupts(kbdev);
+
+	kbase_disjoint_state_down(kbdev);
+
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	mutex_lock(&kbdev->pm.lock);
+
+	kbase_pm_reset_complete(kbdev);
+
+	/* Find out what cores are required now */
+	kbase_pm_update_cores_state(kbdev);
+
+	/* Synchronously request and wait for those cores, because if
+	 * instrumentation is enabled it would need them immediately. */
+	kbase_pm_wait_for_desired_state(kbdev);
+
+	mutex_unlock(&kbdev->pm.lock);
+
+	atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_NOT_PENDING);
+
+	wake_up(&kbdev->hwaccess.backend.reset_wait);
+	if (!silent)
+		dev_err(kbdev->dev, "Reset complete");
+
+	/* Try submitting some jobs to restart processing */
+	KBASE_TRACE_ADD(kbdev, JM_SUBMIT_AFTER_RESET, NULL, NULL, 0u, 0);
+	kbase_js_sched_all(kbdev);
+
+	/* Process any pending slot updates */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_backend_slot_update(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	kbase_pm_context_idle(kbdev);
+
+	/* Re-enable GPU hardware counters */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	KBASE_TRACE_ADD(kbdev, JM_END_RESET_WORKER, NULL, NULL, 0u, 0);
+}
+
+static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer)
+{
+	struct kbase_device *kbdev = container_of(timer, struct kbase_device,
+						hwaccess.backend.reset_timer);
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Reset still pending? */
+	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+			KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) ==
+						KBASE_RESET_GPU_COMMITTED)
+		queue_work(kbdev->hwaccess.backend.reset_workq,
+					&kbdev->hwaccess.backend.reset_work);
+
+	return HRTIMER_NORESTART;
+}
+
+/*
+ * If all jobs are evicted from the GPU then we can reset the GPU
+ * immediately instead of waiting for the timeout to elapse
+ */
+
+static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev)
+{
+	int i;
+	int pending_jobs = 0;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Count the number of jobs */
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+		pending_jobs += kbase_backend_nr_atoms_submitted(kbdev, i);
+
+	if (pending_jobs > 0) {
+		/* There are still jobs on the GPU - wait */
+		return;
+	}
+
+	/* To prevent getting incorrect registers when dumping failed job,
+	 * skip early reset.
+	 */
+	if (kbdev->job_fault_debug != false)
+		return;
+
+	/* Check that the reset has been committed to (i.e. kbase_reset_gpu has
+	 * been called), and that no other thread beat this thread to starting
+	 * the reset */
+	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+			KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) !=
+						KBASE_RESET_GPU_COMMITTED) {
+		/* Reset has already occurred */
+		return;
+	}
+
+	queue_work(kbdev->hwaccess.backend.reset_workq,
+					&kbdev->hwaccess.backend.reset_work);
+}
+
+static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbasep_try_reset_gpu_early_locked(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+/**
+ * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU
+ * @kbdev: kbase device
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return:
+ *   The function returns a boolean which should be interpreted as follows:
+ *   true - Prepared for reset, kbase_reset_gpu_locked should be called.
+ *   false - Another thread is performing a reset, kbase_reset_gpu should
+ *   not be called.
+ */
+bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev)
+{
+	int i;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_NOT_PENDING,
+						KBASE_RESET_GPU_PREPARED) !=
+						KBASE_RESET_GPU_NOT_PENDING) {
+		/* Some other thread is already resetting the GPU */
+		return false;
+	}
+
+	kbase_disjoint_state_up(kbdev);
+
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
+		kbase_job_slot_softstop(kbdev, i, NULL);
+
+	return true;
+}
+
+bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	ret = kbase_prepare_to_reset_gpu_locked(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return ret;
+}
+KBASE_EXPORT_TEST_API(kbase_prepare_to_reset_gpu);
+
+/*
+ * This function should be called after kbase_prepare_to_reset_gpu if it
+ * returns true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for
+ * kbdev->hwaccess.backend.reset_waitq to be signalled to know when the reset
+ * has completed.
+ */
+void kbase_reset_gpu(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Note this is an assert/atomic_set because it is a software issue for
+	 * a race to be occuring here */
+	KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+						KBASE_RESET_GPU_PREPARED);
+	atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_COMMITTED);
+
+	dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
+			kbdev->reset_timeout_ms);
+
+	hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
+			HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
+			HRTIMER_MODE_REL);
+
+	/* Try resetting early */
+	kbasep_try_reset_gpu_early(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbase_reset_gpu);
+
+void kbase_reset_gpu_locked(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Note this is an assert/atomic_set because it is a software issue for
+	 * a race to be occuring here */
+	KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+						KBASE_RESET_GPU_PREPARED);
+	atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_COMMITTED);
+
+	dev_err(kbdev->dev, "Preparing to soft-reset GPU: Waiting (upto %d ms) for all jobs to complete soft-stop\n",
+			kbdev->reset_timeout_ms);
+	hrtimer_start(&kbdev->hwaccess.backend.reset_timer,
+			HR_TIMER_DELAY_MSEC(kbdev->reset_timeout_ms),
+			HRTIMER_MODE_REL);
+
+	/* Try resetting early */
+	kbasep_try_reset_gpu_early_locked(kbdev);
+}
+
+int kbase_reset_gpu_silent(struct kbase_device *kbdev)
+{
+	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+						KBASE_RESET_GPU_NOT_PENDING,
+						KBASE_RESET_GPU_SILENT) !=
+						KBASE_RESET_GPU_NOT_PENDING) {
+		/* Some other thread is already resetting the GPU */
+		return -EAGAIN;
+	}
+
+	kbase_disjoint_state_up(kbdev);
+
+	queue_work(kbdev->hwaccess.backend.reset_workq,
+			&kbdev->hwaccess.backend.reset_work);
+
+	return 0;
+}
+
+bool kbase_reset_gpu_active(struct kbase_device *kbdev)
+{
+	if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+			KBASE_RESET_GPU_NOT_PENDING)
+		return false;
+
+	return true;
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
new file mode 100644
index 0000000..452ddee
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
@@ -0,0 +1,169 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Job Manager backend-specific low-level APIs.
+ */
+
+#ifndef _KBASE_JM_HWACCESS_H_
+#define _KBASE_JM_HWACCESS_H_
+
+#include <mali_kbase_hw.h>
+#include <mali_kbase_debug.h>
+#include <linux/atomic.h>
+
+#include <backend/gpu/mali_kbase_jm_rb.h>
+
+/**
+ * kbase_job_submit_nolock() - Submit a job to a certain job-slot
+ * @kbdev:	Device pointer
+ * @katom:	Atom to submit
+ * @js:		Job slot to submit on
+ *
+ * The caller must check kbasep_jm_is_submit_slots_free() != false before
+ * calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbase_job_submit_nolock(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom, int js);
+
+/**
+ * kbase_job_done_slot() - Complete the head job on a particular job-slot
+ * @kbdev:		Device pointer
+ * @s:			Job slot
+ * @completion_code:	Completion code of job reported by GPU
+ * @job_tail:		Job tail address reported by GPU
+ * @end_timestamp:	Timestamp of job completion
+ */
+void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code,
+					u64 job_tail, ktime_t *end_timestamp);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+static inline char *kbasep_make_job_slot_string(int js, char *js_string,
+						size_t js_size)
+{
+	snprintf(js_string, js_size, "job_slot_%i", js);
+	return js_string;
+}
+#endif
+
+/**
+ * kbase_job_hw_submit() - Submit a job to the GPU
+ * @kbdev:	Device pointer
+ * @katom:	Atom to submit
+ * @js:		Job slot to submit on
+ *
+ * The caller must check kbasep_jm_is_submit_slots_free() != false before
+ * calling this.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbase_job_hw_submit(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom,
+				int js);
+
+/**
+ * kbasep_job_slot_soft_or_hard_stop_do_action() - Perform a soft or hard stop
+ *						   on the specified atom
+ * @kbdev:		Device pointer
+ * @js:			Job slot to stop on
+ * @action:		The action to perform, either JSn_COMMAND_HARD_STOP or
+ *			JSn_COMMAND_SOFT_STOP
+ * @core_reqs:		Core requirements of atom to stop
+ * @target_katom:	Atom to stop
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold the hwaccess_lock
+ */
+void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+					int js,
+					u32 action,
+					base_jd_core_req core_reqs,
+					struct kbase_jd_atom *target_katom);
+
+/**
+ * kbase_backend_soft_hard_stop_slot() - Soft or hard stop jobs on a given job
+ *					 slot belonging to a given context.
+ * @kbdev:	Device pointer
+ * @kctx:	Context pointer. May be NULL
+ * @katom:	Specific atom to stop. May be NULL
+ * @js:		Job slot to hard stop
+ * @action:	The action to perform, either JSn_COMMAND_HARD_STOP or
+ *		JSn_COMMAND_SOFT_STOP
+ *
+ * If no context is provided then all jobs on the slot will be soft or hard
+ * stopped.
+ *
+ * If a katom is provided then only that specific atom will be stopped. In this
+ * case the kctx parameter is ignored.
+ *
+ * Jobs that are on the slot but are not yet on the GPU will be unpulled and
+ * returned to the job scheduler.
+ *
+ * Return: true if an atom was stopped, false otherwise
+ */
+bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					int js,
+					struct kbase_jd_atom *katom,
+					u32 action);
+
+/**
+ * kbase_job_slot_init - Initialise job slot framework
+ * @kbdev: Device pointer
+ *
+ * Called on driver initialisation
+ *
+ * Return: 0 on success
+ */
+int kbase_job_slot_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_halt - Halt the job slot framework
+ * @kbdev: Device pointer
+ *
+ * Should prevent any further job slot processing
+ */
+void kbase_job_slot_halt(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_term - Terminate job slot framework
+ * @kbdev: Device pointer
+ *
+ * Called on driver termination
+ */
+void kbase_job_slot_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpu_cache_clean - Cause a GPU cache clean & flush
+ * @kbdev: Device pointer
+ *
+ * Caller must not be in IRQ context
+ */
+void kbase_gpu_cache_clean(struct kbase_device *kbdev);
+
+#endif /* _KBASE_JM_HWACCESS_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
new file mode 100644
index 0000000..c714582
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
@@ -0,0 +1,1722 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_jm.h>
+#include <mali_kbase_js.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_hwcnt_context.h>
+#include <mali_kbase_10969_workaround.h>
+#include <backend/gpu/mali_kbase_cache_policy_backend.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+/* Return whether the specified ringbuffer is empty. HW access lock must be
+ * held */
+#define SLOT_RB_EMPTY(rb)   (rb->write_idx == rb->read_idx)
+/* Return number of atoms currently in the specified ringbuffer. HW access lock
+ * must be held */
+#define SLOT_RB_ENTRIES(rb) (int)(s8)(rb->write_idx - rb->read_idx)
+
+static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom,
+					ktime_t *end_timestamp);
+
+/**
+ * kbase_gpu_enqueue_atom - Enqueue an atom in the HW access ringbuffer
+ * @kbdev: Device pointer
+ * @katom: Atom to enqueue
+ *
+ * Context: Caller must hold the HW access lock
+ */
+static void kbase_gpu_enqueue_atom(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom)
+{
+	struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[katom->slot_nr];
+
+	WARN_ON(SLOT_RB_ENTRIES(rb) >= SLOT_RB_SIZE);
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	rb->entries[rb->write_idx & SLOT_RB_MASK].katom = katom;
+	rb->write_idx++;
+
+	katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
+}
+
+/**
+ * kbase_gpu_dequeue_atom - Remove an atom from the HW access ringbuffer, once
+ * it has been completed
+ * @kbdev:         Device pointer
+ * @js:            Job slot to remove atom from
+ * @end_timestamp: Pointer to timestamp of atom completion. May be NULL, in
+ *                 which case current time will be used.
+ *
+ * Context: Caller must hold the HW access lock
+ *
+ * Return: Atom removed from ringbuffer
+ */
+static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev,
+						int js,
+						ktime_t *end_timestamp)
+{
+	struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+	struct kbase_jd_atom *katom;
+
+	if (SLOT_RB_EMPTY(rb)) {
+		WARN(1, "GPU ringbuffer unexpectedly empty\n");
+		return NULL;
+	}
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	katom = rb->entries[rb->read_idx & SLOT_RB_MASK].katom;
+
+	kbase_gpu_release_atom(kbdev, katom, end_timestamp);
+
+	rb->read_idx++;
+
+	katom->gpu_rb_state = KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB;
+
+	return katom;
+}
+
+struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
+					int idx)
+{
+	struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if ((SLOT_RB_ENTRIES(rb) - 1) < idx)
+		return NULL; /* idx out of range */
+
+	return rb->entries[(rb->read_idx + idx) & SLOT_RB_MASK].katom;
+}
+
+struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
+					int js)
+{
+	struct slot_rb *rb = &kbdev->hwaccess.backend.slot_rb[js];
+
+	if (SLOT_RB_EMPTY(rb))
+		return NULL;
+
+	return rb->entries[(rb->write_idx - 1) & SLOT_RB_MASK].katom;
+}
+
+/**
+ * kbase_gpu_atoms_submitted - Inspect whether a slot has any atoms currently
+ * on the GPU
+ * @kbdev:  Device pointer
+ * @js:     Job slot to inspect
+ *
+ * Return: true if there are atoms on the GPU for slot js,
+ *         false otherwise
+ */
+static bool kbase_gpu_atoms_submitted(struct kbase_device *kbdev, int js)
+{
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < SLOT_RB_SIZE; i++) {
+		struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+		if (!katom)
+			return false;
+		if (katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED ||
+				katom->gpu_rb_state == KBASE_ATOM_GPU_RB_READY)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * kbase_gpu_atoms_submitted_any() - Inspect whether there are any atoms
+ * currently on the GPU
+ * @kbdev:  Device pointer
+ *
+ * Return: true if there are any atoms on the GPU, false otherwise
+ */
+static bool kbase_gpu_atoms_submitted_any(struct kbase_device *kbdev)
+{
+	int js;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		for (i = 0; i < SLOT_RB_SIZE; i++) {
+			struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+			if (katom && katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED)
+				return true;
+		}
+	}
+	return false;
+}
+
+int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js)
+{
+	int nr = 0;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < SLOT_RB_SIZE; i++) {
+		struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+		if (katom && (katom->gpu_rb_state ==
+						KBASE_ATOM_GPU_RB_SUBMITTED))
+			nr++;
+	}
+
+	return nr;
+}
+
+int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js)
+{
+	int nr = 0;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < SLOT_RB_SIZE; i++) {
+		if (kbase_gpu_inspect(kbdev, js, i))
+			nr++;
+	}
+
+	return nr;
+}
+
+static int kbase_gpu_nr_atoms_on_slot_min(struct kbase_device *kbdev, int js,
+				enum kbase_atom_gpu_rb_state min_rb_state)
+{
+	int nr = 0;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < SLOT_RB_SIZE; i++) {
+		struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, i);
+
+		if (katom && (katom->gpu_rb_state >= min_rb_state))
+			nr++;
+	}
+
+	return nr;
+}
+
+/**
+ * check_secure_atom - Check if the given atom is in the given secure state and
+ *                     has a ringbuffer state of at least
+ *                     KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @katom:  Atom pointer
+ * @secure: Desired secure state
+ *
+ * Return: true if atom is in the given state, false otherwise
+ */
+static bool check_secure_atom(struct kbase_jd_atom *katom, bool secure)
+{
+	if (katom->gpu_rb_state >=
+			KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION &&
+			((kbase_jd_katom_is_protected(katom) && secure) ||
+			(!kbase_jd_katom_is_protected(katom) && !secure)))
+		return true;
+
+	return false;
+}
+
+/**
+ * kbase_gpu_check_secure_atoms - Check if there are any atoms in the given
+ *                                secure state in the ringbuffers of at least
+ *                                state
+ *                                KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE
+ * @kbdev:  Device pointer
+ * @secure: Desired secure state
+ *
+ * Return: true if any atoms are in the given state, false otherwise
+ */
+static bool kbase_gpu_check_secure_atoms(struct kbase_device *kbdev,
+		bool secure)
+{
+	int js, i;
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		for (i = 0; i < SLOT_RB_SIZE; i++) {
+			struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+					js, i);
+
+			if (katom) {
+				if (check_secure_atom(katom, secure))
+					return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+int kbase_backend_slot_free(struct kbase_device *kbdev, int js)
+{
+	if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) !=
+						KBASE_RESET_GPU_NOT_PENDING) {
+		/* The GPU is being reset - so prevent submission */
+		return 0;
+	}
+
+	return SLOT_RB_SIZE - kbase_backend_nr_atoms_on_slot(kbdev, js);
+}
+
+
+static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+					struct kbase_jd_atom *katom,
+					ktime_t *end_timestamp)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	switch (katom->gpu_rb_state) {
+	case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
+		/* Should be impossible */
+		WARN(1, "Attempting to release atom not in ringbuffer\n");
+		break;
+
+	case KBASE_ATOM_GPU_RB_SUBMITTED:
+		/* Inform power management at start/finish of atom so it can
+		 * update its GPU utilisation metrics. Mark atom as not
+		 * submitted beforehand. */
+		katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
+		kbase_pm_metrics_update(kbdev, end_timestamp);
+
+		if (katom->core_req & BASE_JD_REQ_PERMON)
+			kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+		KBASE_TLSTREAM_TL_NRET_ATOM_LPU(katom,
+			&kbdev->gpu_props.props.raw_props.js_features
+				[katom->slot_nr]);
+		KBASE_TLSTREAM_TL_NRET_ATOM_AS(katom, &kbdev->as[kctx->as_nr]);
+		KBASE_TLSTREAM_TL_NRET_CTX_LPU(kctx,
+			&kbdev->gpu_props.props.raw_props.js_features
+				[katom->slot_nr]);
+
+	case KBASE_ATOM_GPU_RB_READY:
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
+		break;
+
+	case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
+		if (kbase_jd_katom_is_protected(katom) &&
+				(katom->protected_state.enter !=
+				KBASE_ATOM_ENTER_PROTECTED_CHECK) &&
+				(katom->protected_state.enter !=
+				KBASE_ATOM_ENTER_PROTECTED_HWCNT))
+			kbase_pm_protected_override_disable(kbdev);
+		if (!kbase_jd_katom_is_protected(katom) &&
+				(katom->protected_state.exit !=
+				KBASE_ATOM_EXIT_PROTECTED_CHECK) &&
+				(katom->protected_state.exit !=
+				KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT))
+			kbase_pm_protected_override_disable(kbdev);
+
+		if (katom->protected_state.enter !=
+				KBASE_ATOM_ENTER_PROTECTED_CHECK ||
+				katom->protected_state.exit !=
+				KBASE_ATOM_EXIT_PROTECTED_CHECK)
+			kbdev->protected_mode_transition = false;
+		/* If the atom has suspended hwcnt but has not yet entered
+		 * protected mode, then resume hwcnt now. If the GPU is now in
+		 * protected mode then hwcnt will be resumed by GPU reset so
+		 * don't resume it here.
+		 */
+		if (kbase_jd_katom_is_protected(katom) &&
+				((katom->protected_state.enter ==
+				KBASE_ATOM_ENTER_PROTECTED_IDLE_L2) ||
+				 (katom->protected_state.enter ==
+				KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY))) {
+			WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
+			kbdev->protected_mode_hwcnt_desired = true;
+			if (kbdev->protected_mode_hwcnt_disabled) {
+				kbase_hwcnt_context_enable(
+					kbdev->hwcnt_gpu_ctx);
+				kbdev->protected_mode_hwcnt_disabled = false;
+			}
+		}
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+			if (katom->atom_flags &
+					KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT) {
+				kbase_pm_protected_l2_override(kbdev, false);
+				katom->atom_flags &=
+					~KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+			}
+		}
+
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
+		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+
+	case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
+		break;
+	}
+
+	katom->gpu_rb_state = KBASE_ATOM_GPU_RB_WAITING_BLOCKED;
+	katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+}
+
+static void kbase_gpu_mark_atom_for_return(struct kbase_device *kbdev,
+						struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbase_gpu_release_atom(kbdev, katom, NULL);
+	katom->gpu_rb_state = KBASE_ATOM_GPU_RB_RETURN_TO_JS;
+}
+
+static inline bool kbase_gpu_rmu_workaround(struct kbase_device *kbdev, int js)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+	bool slot_busy[3];
+
+	if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+		return true;
+	slot_busy[0] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 0,
+					KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+	slot_busy[1] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 1,
+					KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+	slot_busy[2] = kbase_gpu_nr_atoms_on_slot_min(kbdev, 2,
+					KBASE_ATOM_GPU_RB_WAITING_AFFINITY);
+
+	if ((js == 2 && !(slot_busy[0] || slot_busy[1])) ||
+		(js != 2 && !slot_busy[2]))
+		return true;
+
+	/* Don't submit slot 2 atom while GPU has jobs on slots 0/1 */
+	if (js == 2 && (kbase_gpu_atoms_submitted(kbdev, 0) ||
+			kbase_gpu_atoms_submitted(kbdev, 1) ||
+			backend->rmu_workaround_flag))
+		return false;
+
+	/* Don't submit slot 0/1 atom while GPU has jobs on slot 2 */
+	if (js != 2 && (kbase_gpu_atoms_submitted(kbdev, 2) ||
+			!backend->rmu_workaround_flag))
+		return false;
+
+	backend->rmu_workaround_flag = !backend->rmu_workaround_flag;
+
+	return true;
+}
+
+/**
+ * other_slots_busy - Determine if any job slots other than @js are currently
+ *                    running atoms
+ * @kbdev: Device pointer
+ * @js:    Job slot
+ *
+ * Return: true if any slots other than @js are busy, false otherwise
+ */
+static inline bool other_slots_busy(struct kbase_device *kbdev, int js)
+{
+	int slot;
+
+	for (slot = 0; slot < kbdev->gpu_props.num_job_slots; slot++) {
+		if (slot == js)
+			continue;
+
+		if (kbase_gpu_nr_atoms_on_slot_min(kbdev, slot,
+				KBASE_ATOM_GPU_RB_SUBMITTED))
+			return true;
+	}
+
+	return false;
+}
+
+static inline bool kbase_gpu_in_protected_mode(struct kbase_device *kbdev)
+{
+	return kbdev->protected_mode;
+}
+
+static void kbase_gpu_disable_coherent(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/*
+	 * When entering into protected mode, we must ensure that the
+	 * GPU is not operating in coherent mode as well. This is to
+	 * ensure that no protected memory can be leaked.
+	 */
+	if (kbdev->system_coherency == COHERENCY_ACE)
+		kbase_cache_set_coherency_mode(kbdev, COHERENCY_ACE_LITE);
+}
+
+static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
+{
+	int err = -EINVAL;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ONCE(!kbdev->protected_ops,
+			"Cannot enter protected mode: protected callbacks not specified.\n");
+
+	if (kbdev->protected_ops) {
+		/* Switch GPU to protected mode */
+		err = kbdev->protected_ops->protected_mode_enable(
+				kbdev->protected_dev);
+
+		if (err) {
+			dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
+					err);
+		} else {
+			kbdev->protected_mode = true;
+			kbase_ipa_protection_mode_switch_event(kbdev);
+		}
+	}
+
+	return err;
+}
+
+static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ONCE(!kbdev->protected_ops,
+			"Cannot exit protected mode: protected callbacks not specified.\n");
+
+	if (!kbdev->protected_ops)
+		return -EINVAL;
+
+	/* The protected mode disable callback will be called as part of reset
+	 */
+	return kbase_reset_gpu_silent(kbdev);
+}
+
+static int kbase_jm_protected_entry(struct kbase_device *kbdev,
+				struct kbase_jd_atom **katom, int idx, int js)
+{
+	int err = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	err = kbase_gpu_protected_mode_enter(kbdev);
+
+	/*
+	 * Regardless of result before this call, we are no longer
+	 * transitioning the GPU.
+	 */
+
+	kbdev->protected_mode_transition = false;
+	kbase_pm_protected_override_disable(kbdev);
+	kbase_pm_update_cores_state_nolock(kbdev);
+
+	KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev);
+	if (err) {
+		/*
+		 * Failed to switch into protected mode, resume
+		 * GPU hwcnt and fail atom.
+		 */
+		WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
+		kbdev->protected_mode_hwcnt_desired = true;
+		if (kbdev->protected_mode_hwcnt_disabled) {
+			kbase_hwcnt_context_enable(
+				kbdev->hwcnt_gpu_ctx);
+			kbdev->protected_mode_hwcnt_disabled = false;
+		}
+
+		katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+		kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+		/*
+		 * Only return if head atom or previous atom
+		 * already removed - as atoms must be returned
+		 * in order.
+		 */
+		if (idx == 0 || katom[0]->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+			kbase_gpu_dequeue_atom(kbdev, js, NULL);
+			kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+		}
+
+		return -EINVAL;
+	}
+
+	/*
+	 * Protected mode sanity checks.
+	 */
+	KBASE_DEBUG_ASSERT_MSG(
+			kbase_jd_katom_is_protected(katom[idx]) ==
+			kbase_gpu_in_protected_mode(kbdev),
+			"Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+			kbase_jd_katom_is_protected(katom[idx]),
+			kbase_gpu_in_protected_mode(kbdev));
+	katom[idx]->gpu_rb_state =
+			KBASE_ATOM_GPU_RB_READY;
+
+	return err;
+}
+
+static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
+		struct kbase_jd_atom **katom, int idx, int js)
+{
+	int err = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	switch (katom[idx]->protected_state.enter) {
+	case KBASE_ATOM_ENTER_PROTECTED_CHECK:
+		KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(kbdev);
+		/* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+		 * should ensure that we are not already transitiong, and that
+		 * there are no atoms currently on the GPU. */
+		WARN_ON(kbdev->protected_mode_transition);
+		WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+		/* If hwcnt is disabled, it means we didn't clean up correctly
+		 * during last exit from protected mode.
+		 */
+		WARN_ON(kbdev->protected_mode_hwcnt_disabled);
+
+		katom[idx]->protected_state.enter =
+			KBASE_ATOM_ENTER_PROTECTED_HWCNT;
+
+		kbdev->protected_mode_transition = true;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_ENTER_PROTECTED_HWCNT:
+		/* See if we can get away with disabling hwcnt atomically */
+		kbdev->protected_mode_hwcnt_desired = false;
+		if (!kbdev->protected_mode_hwcnt_disabled) {
+			if (kbase_hwcnt_context_disable_atomic(
+				kbdev->hwcnt_gpu_ctx))
+				kbdev->protected_mode_hwcnt_disabled = true;
+		}
+
+		/* We couldn't disable atomically, so kick off a worker */
+		if (!kbdev->protected_mode_hwcnt_disabled) {
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+			queue_work(system_wq,
+				&kbdev->protected_mode_hwcnt_disable_work);
+#else
+			queue_work(system_highpri_wq,
+				&kbdev->protected_mode_hwcnt_disable_work);
+#endif
+			return -EAGAIN;
+		}
+
+		/* Once reaching this point GPU must be
+		 * switched to protected mode or hwcnt
+		 * re-enabled. */
+
+		/*
+		 * Not in correct mode, begin protected mode switch.
+		 * Entering protected mode requires us to power down the L2,
+		 * and drop out of fully coherent mode.
+		 */
+		katom[idx]->protected_state.enter =
+			KBASE_ATOM_ENTER_PROTECTED_IDLE_L2;
+
+		kbase_pm_protected_override_enable(kbdev);
+		kbase_pm_update_cores_state_nolock(kbdev);
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_ENTER_PROTECTED_IDLE_L2:
+		/* Avoid unnecessary waiting on non-ACE platforms. */
+		if (kbdev->current_gpu_coherency_mode == COHERENCY_ACE) {
+			if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
+				kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
+				/*
+				 * The L2 is still powered, wait for all the users to
+				 * finish with it before doing the actual reset.
+				 */
+				return -EAGAIN;
+			}
+		}
+
+		katom[idx]->protected_state.enter =
+			KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY:
+		/*
+		 * When entering into protected mode, we must ensure that the
+		 * GPU is not operating in coherent mode as well. This is to
+		 * ensure that no protected memory can be leaked.
+		 */
+		kbase_gpu_disable_coherent(kbdev);
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+			/*
+			 * Power on L2 caches; this will also result in the
+			 * correct value written to coherency enable register.
+			 */
+			kbase_pm_protected_l2_override(kbdev, true);
+
+			/*
+			 * Set the flag on the atom that additional
+			 * L2 references are taken.
+			 */
+			katom[idx]->atom_flags |=
+					KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+		}
+
+		katom[idx]->protected_state.enter =
+			KBASE_ATOM_ENTER_PROTECTED_FINISHED;
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234))
+			return -EAGAIN;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+			/*
+			 * Check that L2 caches are powered and, if so,
+			 * enter protected mode.
+			 */
+			if (kbdev->pm.backend.l2_state == KBASE_L2_ON) {
+				/*
+				 * Remove additional L2 reference and reset
+				 * the atom flag which denotes it.
+				 */
+				if (katom[idx]->atom_flags &
+					KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT) {
+					kbase_pm_protected_l2_override(kbdev,
+							false);
+					katom[idx]->atom_flags &=
+						~KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+				}
+
+				err = kbase_jm_protected_entry(kbdev, katom, idx, js);
+
+				if (err)
+					return err;
+			} else {
+				/*
+				 * still waiting for L2 caches to power up
+				 */
+				return -EAGAIN;
+			}
+		} else {
+			err = kbase_jm_protected_entry(kbdev, katom, idx, js);
+
+			if (err)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
+static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
+		struct kbase_jd_atom **katom, int idx, int js)
+{
+	int err = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	switch (katom[idx]->protected_state.exit) {
+	case KBASE_ATOM_EXIT_PROTECTED_CHECK:
+		KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(kbdev);
+		/* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+		 * should ensure that we are not already transitiong, and that
+		 * there are no atoms currently on the GPU. */
+		WARN_ON(kbdev->protected_mode_transition);
+		WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+
+		/*
+		 * Exiting protected mode requires a reset, but first the L2
+		 * needs to be powered down to ensure it's not active when the
+		 * reset is issued.
+		 */
+		katom[idx]->protected_state.exit =
+				KBASE_ATOM_EXIT_PROTECTED_IDLE_L2;
+
+		kbdev->protected_mode_transition = true;
+		kbase_pm_protected_override_enable(kbdev);
+		kbase_pm_update_cores_state_nolock(kbdev);
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+	case KBASE_ATOM_EXIT_PROTECTED_IDLE_L2:
+		if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
+				kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
+			/*
+			 * The L2 is still powered, wait for all the users to
+			 * finish with it before doing the actual reset.
+			 */
+			return -EAGAIN;
+		}
+		katom[idx]->protected_state.exit =
+				KBASE_ATOM_EXIT_PROTECTED_RESET;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_EXIT_PROTECTED_RESET:
+		/* Issue the reset to the GPU */
+		err = kbase_gpu_protected_mode_reset(kbdev);
+
+		if (err == -EAGAIN)
+			return -EAGAIN;
+
+		if (err) {
+			kbdev->protected_mode_transition = false;
+			kbase_pm_protected_override_disable(kbdev);
+
+			/* Failed to exit protected mode, fail atom */
+			katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+			kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+			/* Only return if head atom or previous atom
+			 * already removed - as atoms must be returned
+			 * in order */
+			if (idx == 0 || katom[0]->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+				kbase_gpu_dequeue_atom(kbdev, js, NULL);
+				kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+			}
+
+			/* If we're exiting from protected mode, hwcnt must have
+			 * been disabled during entry.
+			 */
+			WARN_ON(!kbdev->protected_mode_hwcnt_disabled);
+			kbdev->protected_mode_hwcnt_desired = true;
+			if (kbdev->protected_mode_hwcnt_disabled) {
+				kbase_hwcnt_context_enable(
+					kbdev->hwcnt_gpu_ctx);
+				kbdev->protected_mode_hwcnt_disabled = false;
+			}
+
+			return -EINVAL;
+		}
+
+		katom[idx]->protected_state.exit =
+				KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT;
+
+		/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+	case KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT:
+		/* A GPU reset is issued when exiting protected mode. Once the
+		 * reset is done all atoms' state will also be reset. For this
+		 * reason, if the atom is still in this state we can safely
+		 * say that the reset has not completed i.e., we have not
+		 * finished exiting protected mode yet.
+		 */
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+void kbase_backend_slot_update(struct kbase_device *kbdev)
+{
+	int js;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbase_reset_gpu_active(kbdev))
+		return;
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		struct kbase_jd_atom *katom[2];
+		int idx;
+
+		katom[0] = kbase_gpu_inspect(kbdev, js, 0);
+		katom[1] = kbase_gpu_inspect(kbdev, js, 1);
+		WARN_ON(katom[1] && !katom[0]);
+
+		for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+			bool cores_ready;
+			int ret;
+
+			if (!katom[idx])
+				continue;
+
+			switch (katom[idx]->gpu_rb_state) {
+			case KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB:
+				/* Should be impossible */
+				WARN(1, "Attempting to update atom not in ringbuffer\n");
+				break;
+
+			case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
+				if (katom[idx]->atom_flags &
+						KBASE_KATOM_FLAG_X_DEP_BLOCKED)
+					break;
+
+				katom[idx]->gpu_rb_state =
+				KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
+				if (kbase_gpu_check_secure_atoms(kbdev,
+						!kbase_jd_katom_is_protected(
+						katom[idx])))
+					break;
+
+				if ((idx == 1) && (kbase_jd_katom_is_protected(
+								katom[0]) !=
+						kbase_jd_katom_is_protected(
+								katom[1])))
+					break;
+
+				if (kbdev->protected_mode_transition)
+					break;
+
+				katom[idx]->gpu_rb_state =
+					KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
+
+				/*
+				 * Exiting protected mode must be done before
+				 * the references on the cores are taken as
+				 * a power down the L2 is required which
+				 * can't happen after the references for this
+				 * atom are taken.
+				 */
+
+				if (!kbase_gpu_in_protected_mode(kbdev) &&
+					kbase_jd_katom_is_protected(katom[idx])) {
+					/* Atom needs to transition into protected mode. */
+					ret = kbase_jm_enter_protected_mode(kbdev,
+							katom, idx, js);
+					if (ret)
+						break;
+				} else if (kbase_gpu_in_protected_mode(kbdev) &&
+					!kbase_jd_katom_is_protected(katom[idx])) {
+					/* Atom needs to transition out of protected mode. */
+					ret = kbase_jm_exit_protected_mode(kbdev,
+							katom, idx, js);
+					if (ret)
+						break;
+				}
+				katom[idx]->protected_state.exit =
+						KBASE_ATOM_EXIT_PROTECTED_CHECK;
+
+				/* Atom needs no protected mode transition. */
+
+				katom[idx]->gpu_rb_state =
+					KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
+				if (katom[idx]->will_fail_event_code) {
+					kbase_gpu_mark_atom_for_return(kbdev,
+							katom[idx]);
+					/* Set EVENT_DONE so this atom will be
+					   completed, not unpulled. */
+					katom[idx]->event_code =
+						BASE_JD_EVENT_DONE;
+					/* Only return if head atom or previous
+					 * atom already removed - as atoms must
+					 * be returned in order. */
+					if (idx == 0 ||	katom[0]->gpu_rb_state ==
+							KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+						kbase_gpu_dequeue_atom(kbdev, js, NULL);
+						kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+					}
+					break;
+				}
+
+				cores_ready = kbase_pm_cores_requested(kbdev,
+						true);
+
+				if (katom[idx]->event_code ==
+						BASE_JD_EVENT_PM_EVENT) {
+					katom[idx]->gpu_rb_state =
+						KBASE_ATOM_GPU_RB_RETURN_TO_JS;
+					break;
+				}
+
+				if (!cores_ready)
+					break;
+
+				katom[idx]->gpu_rb_state =
+					KBASE_ATOM_GPU_RB_WAITING_AFFINITY;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
+				if (!kbase_gpu_rmu_workaround(kbdev, js))
+					break;
+
+				katom[idx]->gpu_rb_state =
+					KBASE_ATOM_GPU_RB_READY;
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_READY:
+
+				if (idx == 1) {
+					/* Only submit if head atom or previous
+					 * atom already submitted */
+					if ((katom[0]->gpu_rb_state !=
+						KBASE_ATOM_GPU_RB_SUBMITTED &&
+						katom[0]->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB))
+						break;
+
+					/* If intra-slot serialization in use
+					 * then don't submit atom to NEXT slot
+					 */
+					if (kbdev->serialize_jobs &
+						KBASE_SERIALIZE_INTRA_SLOT)
+						break;
+				}
+
+				/* If inter-slot serialization in use then don't
+				 * submit atom if any other slots are in use */
+				if ((kbdev->serialize_jobs &
+						KBASE_SERIALIZE_INTER_SLOT) &&
+						other_slots_busy(kbdev, js))
+					break;
+
+				if ((kbdev->serialize_jobs &
+						KBASE_SERIALIZE_RESET) &&
+						kbase_reset_gpu_active(kbdev))
+					break;
+
+				/* Check if this job needs the cycle counter
+				 * enabled before submission */
+				if (katom[idx]->core_req & BASE_JD_REQ_PERMON)
+					kbase_pm_request_gpu_cycle_counter_l2_is_on(
+									kbdev);
+
+				kbase_job_hw_submit(kbdev, katom[idx], js);
+				katom[idx]->gpu_rb_state =
+						KBASE_ATOM_GPU_RB_SUBMITTED;
+
+				/* Inform power management at start/finish of
+				 * atom so it can update its GPU utilisation
+				 * metrics. */
+				kbase_pm_metrics_update(kbdev,
+						&katom[idx]->start_timestamp);
+
+			/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+			case KBASE_ATOM_GPU_RB_SUBMITTED:
+				/* Atom submitted to HW, nothing else to do */
+				break;
+
+			case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
+				/* Only return if head atom or previous atom
+				 * already removed - as atoms must be returned
+				 * in order */
+				if (idx == 0 || katom[0]->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+					kbase_gpu_dequeue_atom(kbdev, js, NULL);
+					kbase_jm_return_atom_to_js(kbdev,
+								katom[idx]);
+				}
+				break;
+			}
+		}
+	}
+
+	/* Warn if PRLAM-8987 affinity restrictions are violated */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+		WARN_ON((kbase_gpu_atoms_submitted(kbdev, 0) ||
+			kbase_gpu_atoms_submitted(kbdev, 1)) &&
+			kbase_gpu_atoms_submitted(kbdev, 2));
+}
+
+
+void kbase_backend_run_atom(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	kbase_gpu_enqueue_atom(kbdev, katom);
+	kbase_backend_slot_update(kbdev);
+}
+
+#define HAS_DEP(katom) (katom->pre_dep || katom->atom_flags & \
+	(KBASE_KATOM_FLAG_X_DEP_BLOCKED | KBASE_KATOM_FLAG_FAIL_BLOCKER))
+
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js,
+				u32 completion_code)
+{
+	struct kbase_jd_atom *katom;
+	struct kbase_jd_atom *next_katom;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	katom = kbase_gpu_inspect(kbdev, js, 0);
+	next_katom = kbase_gpu_inspect(kbdev, js, 1);
+
+	if (next_katom && katom->kctx == next_katom->kctx &&
+		next_katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED &&
+		(HAS_DEP(next_katom) || next_katom->sched_priority ==
+				katom->sched_priority) &&
+		(kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO))
+									!= 0 ||
+		kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI))
+									!= 0)) {
+		kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
+				JS_COMMAND_NOP);
+		next_katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
+
+		if (completion_code == BASE_JD_EVENT_STOPPED) {
+			KBASE_TLSTREAM_TL_NRET_ATOM_LPU(next_katom,
+				&kbdev->gpu_props.props.raw_props.js_features
+					[next_katom->slot_nr]);
+			KBASE_TLSTREAM_TL_NRET_ATOM_AS(next_katom, &kbdev->as
+					[next_katom->kctx->as_nr]);
+			KBASE_TLSTREAM_TL_NRET_CTX_LPU(next_katom->kctx,
+				&kbdev->gpu_props.props.raw_props.js_features
+					[next_katom->slot_nr]);
+		}
+
+		if (next_katom->core_req & BASE_JD_REQ_PERMON)
+			kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+
+		return true;
+	}
+
+	return false;
+}
+
+void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+				u32 completion_code,
+				u64 job_tail,
+				ktime_t *end_timestamp)
+{
+	struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
+	struct kbase_context *kctx = katom->kctx;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/*
+	 * When a hard-stop is followed close after a soft-stop, the completion
+	 * code may be set to STOPPED, even though the job is terminated
+	 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8438)) {
+		if (completion_code == BASE_JD_EVENT_STOPPED &&
+				(katom->atom_flags &
+				KBASE_KATOM_FLAG_BEEN_HARD_STOPPED)) {
+			completion_code = BASE_JD_EVENT_TERMINATED;
+		}
+	}
+
+	if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) || (katom->core_req &
+					BASE_JD_REQ_SKIP_CACHE_END)) &&
+			completion_code != BASE_JD_EVENT_DONE &&
+			!(completion_code & BASE_JD_SW_EVENT)) {
+		/* When a job chain fails, on a T60x or when
+		 * BASE_JD_REQ_SKIP_CACHE_END is set, the GPU cache is not
+		 * flushed. To prevent future evictions causing possible memory
+		 * corruption we need to flush the cache manually before any
+		 * affected memory gets reused. */
+		katom->need_cache_flush_cores_retained = true;
+	} else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
+		if (kbdev->gpu_props.num_core_groups > 1 &&
+				katom->device_nr >= 1) {
+			dev_info(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
+			katom->need_cache_flush_cores_retained = true;
+		}
+	}
+
+	katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
+
+	if (completion_code == BASE_JD_EVENT_STOPPED) {
+		struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
+									0);
+
+		/*
+		 * Dequeue next atom from ringbuffers on same slot if required.
+		 * This atom will already have been removed from the NEXT
+		 * registers by kbase_gpu_soft_hard_stop_slot(), to ensure that
+		 * the atoms on this slot are returned in the correct order.
+		 */
+		if (next_katom && katom->kctx == next_katom->kctx &&
+				next_katom->sched_priority ==
+				katom->sched_priority) {
+			WARN_ON(next_katom->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_SUBMITTED);
+			kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
+			kbase_jm_return_atom_to_js(kbdev, next_katom);
+		}
+	} else if (completion_code != BASE_JD_EVENT_DONE) {
+		struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+		int i;
+
+		if (!kbase_ctx_flag(katom->kctx, KCTX_DYING))
+			dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)",
+					js, completion_code,
+					kbase_exception_name
+					(kbdev,
+					completion_code));
+
+#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0
+		KBASE_TRACE_DUMP(kbdev);
+#endif
+		kbasep_js_clear_submit_allowed(js_devdata, katom->kctx);
+
+		/*
+		 * Remove all atoms on the same context from ringbuffers. This
+		 * will not remove atoms that are already on the GPU, as these
+		 * are guaranteed not to have fail dependencies on the failed
+		 * atom.
+		 */
+		for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) {
+			struct kbase_jd_atom *katom_idx0 =
+						kbase_gpu_inspect(kbdev, i, 0);
+			struct kbase_jd_atom *katom_idx1 =
+						kbase_gpu_inspect(kbdev, i, 1);
+
+			if (katom_idx0 && katom_idx0->kctx == katom->kctx &&
+					HAS_DEP(katom_idx0) &&
+					katom_idx0->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_SUBMITTED) {
+				/* Dequeue katom_idx0 from ringbuffer */
+				kbase_gpu_dequeue_atom(kbdev, i, end_timestamp);
+
+				if (katom_idx1 &&
+						katom_idx1->kctx == katom->kctx
+						&& HAS_DEP(katom_idx1) &&
+						katom_idx0->gpu_rb_state !=
+						KBASE_ATOM_GPU_RB_SUBMITTED) {
+					/* Dequeue katom_idx1 from ringbuffer */
+					kbase_gpu_dequeue_atom(kbdev, i,
+							end_timestamp);
+
+					katom_idx1->event_code =
+							BASE_JD_EVENT_STOPPED;
+					kbase_jm_return_atom_to_js(kbdev,
+								katom_idx1);
+				}
+				katom_idx0->event_code = BASE_JD_EVENT_STOPPED;
+				kbase_jm_return_atom_to_js(kbdev, katom_idx0);
+
+			} else if (katom_idx1 &&
+					katom_idx1->kctx == katom->kctx &&
+					HAS_DEP(katom_idx1) &&
+					katom_idx1->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_SUBMITTED) {
+				/* Can not dequeue this atom yet - will be
+				 * dequeued when atom at idx0 completes */
+				katom_idx1->event_code = BASE_JD_EVENT_STOPPED;
+				kbase_gpu_mark_atom_for_return(kbdev,
+								katom_idx1);
+			}
+		}
+	}
+
+	KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_JOB_DONE, kctx, katom, katom->jc,
+					js, completion_code);
+
+	if (job_tail != 0 && job_tail != katom->jc) {
+		bool was_updated = (job_tail != katom->jc);
+
+		/* Some of the job has been executed, so we update the job chain
+		 * address to where we should resume from */
+		katom->jc = job_tail;
+		if (was_updated)
+			KBASE_TRACE_ADD_SLOT(kbdev, JM_UPDATE_HEAD, katom->kctx,
+						katom, job_tail, js);
+	}
+
+	/* Only update the event code for jobs that weren't cancelled */
+	if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED)
+		katom->event_code = (base_jd_event_code)completion_code;
+
+	/* Complete the job, and start new ones
+	 *
+	 * Also defer remaining work onto the workqueue:
+	 * - Re-queue Soft-stopped jobs
+	 * - For any other jobs, queue the job back into the dependency system
+	 * - Schedule out the parent context if necessary, and schedule a new
+	 *   one in.
+	 */
+#ifdef CONFIG_GPU_TRACEPOINTS
+	{
+		/* The atom in the HEAD */
+		struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
+									0);
+
+		if (next_katom && next_katom->gpu_rb_state ==
+						KBASE_ATOM_GPU_RB_SUBMITTED) {
+			char js_string[16];
+
+			trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
+							js_string,
+							sizeof(js_string)),
+						ktime_to_ns(*end_timestamp),
+						(u32)next_katom->kctx->id, 0,
+						next_katom->work_id);
+			kbdev->hwaccess.backend.slot_rb[js].last_context =
+							next_katom->kctx;
+		} else {
+			char js_string[16];
+
+			trace_gpu_sched_switch(kbasep_make_job_slot_string(js,
+							js_string,
+							sizeof(js_string)),
+						ktime_to_ns(ktime_get()), 0, 0,
+						0);
+			kbdev->hwaccess.backend.slot_rb[js].last_context = 0;
+		}
+	}
+#endif
+
+	if (kbdev->serialize_jobs & KBASE_SERIALIZE_RESET)
+		kbase_reset_gpu_silent(kbdev);
+
+	if (completion_code == BASE_JD_EVENT_STOPPED)
+		katom = kbase_jm_return_atom_to_js(kbdev, katom);
+	else
+		katom = kbase_jm_complete(kbdev, katom, end_timestamp);
+
+	if (katom) {
+		/* Cross-slot dependency has now become runnable. Try to submit
+		 * it. */
+
+		/* Check if there are lower priority jobs to soft stop */
+		kbase_job_slot_ctx_priority_check_locked(kctx, katom);
+
+		kbase_jm_try_kick(kbdev, 1 << katom->slot_nr);
+	}
+
+	/* Job completion may have unblocked other atoms. Try to update all job
+	 * slots */
+	kbase_backend_slot_update(kbdev);
+}
+
+void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
+{
+	int js;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* Reset should always take the GPU out of protected mode */
+	WARN_ON(kbase_gpu_in_protected_mode(kbdev));
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		int atom_idx = 0;
+		int idx;
+
+		for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+			struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+					js, atom_idx);
+			bool keep_in_jm_rb = false;
+
+			if (!katom)
+				break;
+			if (katom->protected_state.exit ==
+			    KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT) {
+				/* protected mode sanity checks */
+				KBASE_DEBUG_ASSERT_MSG(
+					kbase_jd_katom_is_protected(katom) == kbase_gpu_in_protected_mode(kbdev),
+					"Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+					kbase_jd_katom_is_protected(katom), kbase_gpu_in_protected_mode(kbdev));
+				KBASE_DEBUG_ASSERT_MSG(
+					(kbase_jd_katom_is_protected(katom) && js == 0) ||
+					!kbase_jd_katom_is_protected(katom),
+					"Protected atom on JS%d not supported", js);
+			}
+			if ((katom->gpu_rb_state < KBASE_ATOM_GPU_RB_SUBMITTED) &&
+			    !kbase_ctx_flag(katom->kctx, KCTX_DYING))
+				keep_in_jm_rb = true;
+
+			kbase_gpu_release_atom(kbdev, katom, NULL);
+
+			/*
+			 * If the atom wasn't on HW when the reset was issued
+			 * then leave it in the RB and next time we're kicked
+			 * it will be processed again from the starting state.
+			 */
+			if (keep_in_jm_rb) {
+				katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+				/* As the atom was not removed, increment the
+				 * index so that we read the correct atom in the
+				 * next iteration. */
+				atom_idx++;
+				continue;
+			}
+
+			/*
+			 * The atom was on the HW when the reset was issued
+			 * all we can do is fail the atom.
+			 */
+			kbase_gpu_dequeue_atom(kbdev, js, NULL);
+			katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+			kbase_jm_complete(kbdev, katom, end_timestamp);
+		}
+	}
+
+	/* Re-enable GPU hardware counters if we're resetting from protected
+	 * mode.
+	 */
+	kbdev->protected_mode_hwcnt_desired = true;
+	if (kbdev->protected_mode_hwcnt_disabled) {
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+		kbdev->protected_mode_hwcnt_disabled = false;
+
+		KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev);
+	}
+
+	kbdev->protected_mode_transition = false;
+	kbase_pm_protected_override_disable(kbdev);
+}
+
+static inline void kbase_gpu_stop_atom(struct kbase_device *kbdev,
+					int js,
+					struct kbase_jd_atom *katom,
+					u32 action)
+{
+	u32 hw_action = action & JS_COMMAND_MASK;
+
+	kbase_job_check_enter_disjoint(kbdev, action, katom->core_req, katom);
+	kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, hw_action,
+							katom->core_req, katom);
+	katom->kctx->blocked_js[js][katom->sched_priority] = true;
+}
+
+static inline void kbase_gpu_remove_atom(struct kbase_device *kbdev,
+						struct kbase_jd_atom *katom,
+						u32 action,
+						bool disjoint)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
+	kbase_gpu_mark_atom_for_return(kbdev, katom);
+	katom->kctx->blocked_js[katom->slot_nr][katom->sched_priority] = true;
+
+	if (disjoint)
+		kbase_job_check_enter_disjoint(kbdev, action, katom->core_req,
+									katom);
+}
+
+static int should_stop_x_dep_slot(struct kbase_jd_atom *katom)
+{
+	if (katom->x_post_dep) {
+		struct kbase_jd_atom *dep_atom = katom->x_post_dep;
+
+		if (dep_atom->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB &&
+			dep_atom->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_RETURN_TO_JS)
+			return dep_atom->slot_nr;
+	}
+	return -1;
+}
+
+bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					int js,
+					struct kbase_jd_atom *katom,
+					u32 action)
+{
+	struct kbase_jd_atom *katom_idx0;
+	struct kbase_jd_atom *katom_idx1;
+
+	bool katom_idx0_valid, katom_idx1_valid;
+
+	bool ret = false;
+
+	int stop_x_dep_idx0 = -1, stop_x_dep_idx1 = -1;
+	int prio_idx0 = 0, prio_idx1 = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	katom_idx0 = kbase_gpu_inspect(kbdev, js, 0);
+	katom_idx1 = kbase_gpu_inspect(kbdev, js, 1);
+
+	if (katom_idx0)
+		prio_idx0 = katom_idx0->sched_priority;
+	if (katom_idx1)
+		prio_idx1 = katom_idx1->sched_priority;
+
+	if (katom) {
+		katom_idx0_valid = (katom_idx0 == katom);
+		/* If idx0 is to be removed and idx1 is on the same context,
+		 * then idx1 must also be removed otherwise the atoms might be
+		 * returned out of order */
+		if (katom_idx1)
+			katom_idx1_valid = (katom_idx1 == katom) ||
+						(katom_idx0_valid &&
+							(katom_idx0->kctx ==
+							katom_idx1->kctx));
+		else
+			katom_idx1_valid = false;
+	} else {
+		katom_idx0_valid = (katom_idx0 &&
+				(!kctx || katom_idx0->kctx == kctx));
+		katom_idx1_valid = (katom_idx1 &&
+				(!kctx || katom_idx1->kctx == kctx) &&
+				prio_idx0 == prio_idx1);
+	}
+
+	if (katom_idx0_valid)
+		stop_x_dep_idx0 = should_stop_x_dep_slot(katom_idx0);
+	if (katom_idx1_valid)
+		stop_x_dep_idx1 = should_stop_x_dep_slot(katom_idx1);
+
+	if (katom_idx0_valid) {
+		if (katom_idx0->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
+			/* Simple case - just dequeue and return */
+			kbase_gpu_dequeue_atom(kbdev, js, NULL);
+			if (katom_idx1_valid) {
+				kbase_gpu_dequeue_atom(kbdev, js, NULL);
+				katom_idx1->event_code =
+						BASE_JD_EVENT_REMOVED_FROM_NEXT;
+				kbase_jm_return_atom_to_js(kbdev, katom_idx1);
+				katom_idx1->kctx->blocked_js[js][prio_idx1] =
+						true;
+			}
+
+			katom_idx0->event_code =
+						BASE_JD_EVENT_REMOVED_FROM_NEXT;
+			kbase_jm_return_atom_to_js(kbdev, katom_idx0);
+			katom_idx0->kctx->blocked_js[js][prio_idx0] = true;
+		} else {
+			/* katom_idx0 is on GPU */
+			if (katom_idx1_valid && katom_idx1->gpu_rb_state ==
+						KBASE_ATOM_GPU_RB_SUBMITTED) {
+				/* katom_idx0 and katom_idx1 are on GPU */
+
+				if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+						JS_COMMAND_NEXT)) == 0) {
+					/* idx0 has already completed - stop
+					 * idx1 if needed*/
+					if (katom_idx1_valid) {
+						kbase_gpu_stop_atom(kbdev, js,
+								katom_idx1,
+								action);
+						ret = true;
+					}
+				} else {
+					/* idx1 is in NEXT registers - attempt
+					 * to remove */
+					kbase_reg_write(kbdev,
+							JOB_SLOT_REG(js,
+							JS_COMMAND_NEXT),
+							JS_COMMAND_NOP);
+
+					if (kbase_reg_read(kbdev,
+							JOB_SLOT_REG(js,
+							JS_HEAD_NEXT_LO))
+									!= 0 ||
+						kbase_reg_read(kbdev,
+							JOB_SLOT_REG(js,
+							JS_HEAD_NEXT_HI))
+									!= 0) {
+						/* idx1 removed successfully,
+						 * will be handled in IRQ */
+						kbase_gpu_remove_atom(kbdev,
+								katom_idx1,
+								action, true);
+						stop_x_dep_idx1 =
+					should_stop_x_dep_slot(katom_idx1);
+
+						/* stop idx0 if still on GPU */
+						kbase_gpu_stop_atom(kbdev, js,
+								katom_idx0,
+								action);
+						ret = true;
+					} else if (katom_idx1_valid) {
+						/* idx0 has already completed,
+						 * stop idx1 if needed */
+						kbase_gpu_stop_atom(kbdev, js,
+								katom_idx1,
+								action);
+						ret = true;
+					}
+				}
+			} else if (katom_idx1_valid) {
+				/* idx1 not on GPU but must be dequeued*/
+
+				/* idx1 will be handled in IRQ */
+				kbase_gpu_remove_atom(kbdev, katom_idx1, action,
+									false);
+				/* stop idx0 */
+				/* This will be repeated for anything removed
+				 * from the next registers, since their normal
+				 * flow was also interrupted, and this function
+				 * might not enter disjoint state e.g. if we
+				 * don't actually do a hard stop on the head
+				 * atom */
+				kbase_gpu_stop_atom(kbdev, js, katom_idx0,
+									action);
+				ret = true;
+			} else {
+				/* no atom in idx1 */
+				/* just stop idx0 */
+				kbase_gpu_stop_atom(kbdev, js, katom_idx0,
+									action);
+				ret = true;
+			}
+		}
+	} else if (katom_idx1_valid) {
+		if (katom_idx1->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED) {
+			/* Mark for return */
+			/* idx1 will be returned once idx0 completes */
+			kbase_gpu_remove_atom(kbdev, katom_idx1, action,
+									false);
+		} else {
+			/* idx1 is on GPU */
+			if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+						JS_COMMAND_NEXT)) == 0) {
+				/* idx0 has already completed - stop idx1 */
+				kbase_gpu_stop_atom(kbdev, js, katom_idx1,
+									action);
+				ret = true;
+			} else {
+				/* idx1 is in NEXT registers - attempt to
+				 * remove */
+				kbase_reg_write(kbdev, JOB_SLOT_REG(js,
+							JS_COMMAND_NEXT),
+							JS_COMMAND_NOP);
+
+				if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+						JS_HEAD_NEXT_LO)) != 0 ||
+				    kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+						JS_HEAD_NEXT_HI)) != 0) {
+					/* idx1 removed successfully, will be
+					 * handled in IRQ once idx0 completes */
+					kbase_gpu_remove_atom(kbdev, katom_idx1,
+									action,
+									false);
+				} else {
+					/* idx0 has already completed - stop
+					 * idx1 */
+					kbase_gpu_stop_atom(kbdev, js,
+								katom_idx1,
+								action);
+					ret = true;
+				}
+			}
+		}
+	}
+
+
+	if (stop_x_dep_idx0 != -1)
+		kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx0,
+								NULL, action);
+
+	if (stop_x_dep_idx1 != -1)
+		kbase_backend_soft_hard_stop_slot(kbdev, kctx, stop_x_dep_idx1,
+								NULL, action);
+
+	return ret;
+}
+
+void kbase_backend_cache_clean(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom)
+{
+	if (katom->need_cache_flush_cores_retained) {
+		kbase_gpu_start_cache_clean(kbdev);
+		kbase_gpu_wait_cache_clean(kbdev);
+
+		katom->need_cache_flush_cores_retained = false;
+	}
+}
+
+void kbase_backend_complete_wq(struct kbase_device *kbdev,
+						struct kbase_jd_atom *katom)
+{
+	/*
+	 * If cache flush required due to HW workaround then perform the flush
+	 * now
+	 */
+	kbase_backend_cache_clean(kbdev, katom);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969)            &&
+	    (katom->core_req & BASE_JD_REQ_FS)                        &&
+	    katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT       &&
+	    (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
+	    !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) {
+		dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n");
+		if (kbasep_10969_workaround_clamp_coordinates(katom)) {
+			/* The job had a TILE_RANGE_FAULT after was soft-stopped
+			 * Due to an HW issue we try to execute the job again.
+			 */
+			dev_dbg(kbdev->dev,
+				"Clamping has been executed, try to rerun the job\n"
+			);
+			katom->event_code = BASE_JD_EVENT_STOPPED;
+			katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
+		}
+	}
+}
+
+void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
+		base_jd_core_req core_req)
+{
+	if (!kbdev->pm.active_count) {
+		mutex_lock(&kbdev->js_data.runpool_mutex);
+		mutex_lock(&kbdev->pm.lock);
+		kbase_pm_update_active(kbdev);
+		mutex_unlock(&kbdev->pm.lock);
+		mutex_unlock(&kbdev->js_data.runpool_mutex);
+	}
+}
+
+void kbase_gpu_dump_slots(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	int js;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	dev_info(kbdev->dev, "kbase_gpu_dump_slots:\n");
+
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		int idx;
+
+		for (idx = 0; idx < SLOT_RB_SIZE; idx++) {
+			struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev,
+									js,
+									idx);
+
+			if (katom)
+				dev_info(kbdev->dev,
+				"  js%d idx%d : katom=%p gpu_rb_state=%d\n",
+				js, idx, katom, katom->gpu_rb_state);
+			else
+				dev_info(kbdev->dev, "  js%d idx%d : empty\n",
+								js, idx);
+		}
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
new file mode 100644
index 0000000..c3b9f2d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
@@ -0,0 +1,83 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific APIs
+ */
+
+#ifndef _KBASE_HWACCESS_GPU_H_
+#define _KBASE_HWACCESS_GPU_H_
+
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+/**
+ * kbase_gpu_irq_evict - Evict an atom from a NEXT slot
+ *
+ * @kbdev:           Device pointer
+ * @js:              Job slot to evict from
+ * @completion_code: Event code from job that was run.
+ *
+ * Evict the atom in the NEXT slot for the specified job slot. This function is
+ * called from the job complete IRQ handler when the previous job has failed.
+ *
+ * Return: true if job evicted from NEXT registers, false otherwise
+ */
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js,
+				u32 completion_code);
+
+/**
+ * kbase_gpu_complete_hw - Complete an atom on job slot js
+ *
+ * @kbdev:           Device pointer
+ * @js:              Job slot that has completed
+ * @completion_code: Event code from job that has completed
+ * @job_tail:        The tail address from the hardware if the job has partially
+ *                   completed
+ * @end_timestamp:   Time of completion
+ */
+void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+				u32 completion_code,
+				u64 job_tail,
+				ktime_t *end_timestamp);
+
+/**
+ * kbase_gpu_inspect - Inspect the contents of the HW access ringbuffer
+ *
+ * @kbdev:  Device pointer
+ * @js:     Job slot to inspect
+ * @idx:    Index into ringbuffer. 0 is the job currently running on
+ *          the slot, 1 is the job waiting, all other values are invalid.
+ * Return:  The atom at that position in the ringbuffer
+ *          or NULL if no atom present
+ */
+struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
+					int idx);
+
+/**
+ * kbase_gpu_dump_slots - Print the contents of the slot ringbuffers
+ *
+ * @kbdev:  Device pointer
+ */
+void kbase_gpu_dump_slots(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_GPU_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
new file mode 100644
index 0000000..7307be4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
@@ -0,0 +1,352 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific job scheduler APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+
+/*
+ * Hold the runpool_mutex for this
+ */
+static inline bool timer_callback_should_run(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+	s8 nr_running_ctxs;
+
+	lockdep_assert_held(&kbdev->js_data.runpool_mutex);
+
+	/* Timer must stop if we are suspending */
+	if (backend->suspend_timer)
+		return false;
+
+	/* nr_contexts_pullable is updated with the runpool_mutex. However, the
+	 * locking in the caller gives us a barrier that ensures
+	 * nr_contexts_pullable is up-to-date for reading */
+	nr_running_ctxs = atomic_read(&kbdev->js_data.nr_contexts_runnable);
+
+#ifdef CONFIG_MALI_DEBUG
+	if (kbdev->js_data.softstop_always) {
+		/* Debug support for allowing soft-stop on a single context */
+		return true;
+	}
+#endif				/* CONFIG_MALI_DEBUG */
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) {
+		/* Timeouts would have to be 4x longer (due to micro-
+		 * architectural design) to support OpenCL conformance tests, so
+		 * only run the timer when there's:
+		 * - 2 or more CL contexts
+		 * - 1 or more GLES contexts
+		 *
+		 * NOTE: We will treat a context that has both Compute and Non-
+		 * Compute jobs will be treated as an OpenCL context (hence, we
+		 * don't check KBASEP_JS_CTX_ATTR_NON_COMPUTE).
+		 */
+		{
+			s8 nr_compute_ctxs =
+				kbasep_js_ctx_attr_count_on_runpool(kbdev,
+						KBASEP_JS_CTX_ATTR_COMPUTE);
+			s8 nr_noncompute_ctxs = nr_running_ctxs -
+							nr_compute_ctxs;
+
+			return (bool) (nr_compute_ctxs >= 2 ||
+							nr_noncompute_ctxs > 0);
+		}
+	} else {
+		/* Run the timer callback whenever you have at least 1 context
+		 */
+		return (bool) (nr_running_ctxs > 0);
+	}
+}
+
+static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+{
+	unsigned long flags;
+	struct kbase_device *kbdev;
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_backend_data *backend;
+	int s;
+	bool reset_needed = false;
+
+	KBASE_DEBUG_ASSERT(timer != NULL);
+
+	backend = container_of(timer, struct kbase_backend_data,
+							scheduling_timer);
+	kbdev = container_of(backend, struct kbase_device, hwaccess.backend);
+	js_devdata = &kbdev->js_data;
+
+	/* Loop through the slots */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) {
+		struct kbase_jd_atom *atom = NULL;
+
+		if (kbase_backend_nr_atoms_on_slot(kbdev, s) > 0) {
+			atom = kbase_gpu_inspect(kbdev, s, 0);
+			KBASE_DEBUG_ASSERT(atom != NULL);
+		}
+
+		if (atom != NULL) {
+			/* The current version of the model doesn't support
+			 * Soft-Stop */
+			if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
+				u32 ticks = atom->ticks++;
+
+#if !defined(CONFIG_MALI_JOB_DUMP) && !defined(CONFIG_MALI_VECTOR_DUMP)
+				u32 soft_stop_ticks, hard_stop_ticks,
+								gpu_reset_ticks;
+				if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+					soft_stop_ticks =
+						js_devdata->soft_stop_ticks_cl;
+					hard_stop_ticks =
+						js_devdata->hard_stop_ticks_cl;
+					gpu_reset_ticks =
+						js_devdata->gpu_reset_ticks_cl;
+				} else {
+					soft_stop_ticks =
+						js_devdata->soft_stop_ticks;
+					hard_stop_ticks =
+						js_devdata->hard_stop_ticks_ss;
+					gpu_reset_ticks =
+						js_devdata->gpu_reset_ticks_ss;
+				}
+
+				/* If timeouts have been changed then ensure
+				 * that atom tick count is not greater than the
+				 * new soft_stop timeout. This ensures that
+				 * atoms do not miss any of the timeouts due to
+				 * races between this worker and the thread
+				 * changing the timeouts. */
+				if (backend->timeouts_updated &&
+						ticks > soft_stop_ticks)
+					ticks = atom->ticks = soft_stop_ticks;
+
+				/* Job is Soft-Stoppable */
+				if (ticks == soft_stop_ticks) {
+					/* Job has been scheduled for at least
+					 * js_devdata->soft_stop_ticks ticks.
+					 * Soft stop the slot so we can run
+					 * other jobs.
+					 */
+#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+					int disjoint_threshold =
+		KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD;
+					u32 softstop_flags = 0u;
+
+					dev_dbg(kbdev->dev, "Soft-stop");
+					/* nr_user_contexts_running is updated
+					 * with the runpool_mutex, but we can't
+					 * take that here.
+					 *
+					 * However, if it's about to be
+					 * increased then the new context can't
+					 * run any jobs until they take the
+					 * hwaccess_lock, so it's OK to observe
+					 * the older value.
+					 *
+					 * Similarly, if it's about to be
+					 * decreased, the last job from another
+					 * context has already finished, so it's
+					 * not too bad that we observe the older
+					 * value and register a disjoint event
+					 * when we try soft-stopping */
+					if (js_devdata->nr_user_contexts_running
+							>= disjoint_threshold)
+						softstop_flags |=
+						JS_COMMAND_SW_CAUSES_DISJOINT;
+
+					kbase_job_slot_softstop_swflags(kbdev,
+						s, atom, softstop_flags);
+#endif
+				} else if (ticks == hard_stop_ticks) {
+					/* Job has been scheduled for at least
+					 * js_devdata->hard_stop_ticks_ss ticks.
+					 * It should have been soft-stopped by
+					 * now. Hard stop the slot.
+					 */
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
+					int ms =
+						js_devdata->scheduling_period_ns
+								/ 1000000u;
+					dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
+							(unsigned long)ticks,
+							(unsigned long)ms);
+					kbase_job_slot_hardstop(atom->kctx, s,
+									atom);
+#endif
+				} else if (ticks == gpu_reset_ticks) {
+					/* Job has been scheduled for at least
+					 * js_devdata->gpu_reset_ticks_ss ticks.
+					 * It should have left the GPU by now.
+					 * Signal that the GPU needs to be
+					 * reset.
+					 */
+					reset_needed = true;
+				}
+#else				/* !CONFIG_MALI_JOB_DUMP */
+				/* NOTE: During CONFIG_MALI_JOB_DUMP, we use
+				 * the alternate timeouts, which makes the hard-
+				 * stop and GPU reset timeout much longer. We
+				 * also ensure that we don't soft-stop at all.
+				 */
+				if (ticks == js_devdata->soft_stop_ticks) {
+					/* Job has been scheduled for at least
+					 * js_devdata->soft_stop_ticks. We do
+					 * not soft-stop during
+					 * CONFIG_MALI_JOB_DUMP, however.
+					 */
+					dev_dbg(kbdev->dev, "Soft-stop");
+				} else if (ticks ==
+					js_devdata->hard_stop_ticks_dumping) {
+					/* Job has been scheduled for at least
+					 * js_devdata->hard_stop_ticks_dumping
+					 * ticks. Hard stop the slot.
+					 */
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
+					int ms =
+						js_devdata->scheduling_period_ns
+								/ 1000000u;
+					dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
+							(unsigned long)ticks,
+							(unsigned long)ms);
+					kbase_job_slot_hardstop(atom->kctx, s,
+									atom);
+#endif
+				} else if (ticks ==
+					js_devdata->gpu_reset_ticks_dumping) {
+					/* Job has been scheduled for at least
+					 * js_devdata->gpu_reset_ticks_dumping
+					 * ticks. It should have left the GPU by
+					 * now. Signal that the GPU needs to be
+					 * reset.
+					 */
+					reset_needed = true;
+				}
+#endif				/* !CONFIG_MALI_JOB_DUMP */
+			}
+		}
+	}
+	if (reset_needed) {
+		dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issueing GPU soft-reset to resolve.");
+
+		if (kbase_prepare_to_reset_gpu_locked(kbdev))
+			kbase_reset_gpu_locked(kbdev);
+	}
+	/* the timer is re-issued if there is contexts in the run-pool */
+
+	if (backend->timer_running)
+		hrtimer_start(&backend->scheduling_timer,
+			HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
+			HRTIMER_MODE_REL);
+
+	backend->timeouts_updated = false;
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return HRTIMER_NORESTART;
+}
+
+void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+	unsigned long flags;
+
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+
+	if (!timer_callback_should_run(kbdev)) {
+		/* Take spinlock to force synchronisation with timer */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		backend->timer_running = false;
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		/* From now on, return value of timer_callback_should_run() will
+		 * also cause the timer to not requeue itself. Its return value
+		 * cannot change, because it depends on variables updated with
+		 * the runpool_mutex held, which the caller of this must also
+		 * hold */
+		hrtimer_cancel(&backend->scheduling_timer);
+	}
+
+	if (timer_callback_should_run(kbdev) && !backend->timer_running) {
+		/* Take spinlock to force synchronisation with timer */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		backend->timer_running = true;
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		hrtimer_start(&backend->scheduling_timer,
+			HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
+							HRTIMER_MODE_REL);
+
+		KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u,
+									0u);
+	}
+}
+
+int kbase_backend_timer_init(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	hrtimer_init(&backend->scheduling_timer, CLOCK_MONOTONIC,
+							HRTIMER_MODE_REL);
+	backend->scheduling_timer.function = timer_callback;
+
+	backend->timer_running = false;
+
+	return 0;
+}
+
+void kbase_backend_timer_term(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	hrtimer_cancel(&backend->scheduling_timer);
+}
+
+void kbase_backend_timer_suspend(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	backend->suspend_timer = true;
+
+	kbase_backend_ctx_count_changed(kbdev);
+}
+
+void kbase_backend_timer_resume(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	backend->suspend_timer = false;
+
+	kbase_backend_ctx_count_changed(kbdev);
+}
+
+void kbase_backend_timeouts_changed(struct kbase_device *kbdev)
+{
+	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+
+	backend->timeouts_updated = true;
+}
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h
new file mode 100644
index 0000000..6576e55
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h
@@ -0,0 +1,74 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Register-based HW access backend specific job scheduler APIs
+ */
+
+#ifndef _KBASE_JS_BACKEND_H_
+#define _KBASE_JS_BACKEND_H_
+
+/**
+ * kbase_backend_timer_init() - Initialise the JS scheduling timer
+ * @kbdev:	Device pointer
+ *
+ * This function should be called at driver initialisation
+ *
+ * Return: 0 on success
+ */
+int kbase_backend_timer_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_term() - Terminate the JS scheduling timer
+ * @kbdev:	Device pointer
+ *
+ * This function should be called at driver termination
+ */
+void kbase_backend_timer_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_suspend - Suspend is happening, stop the JS scheduling
+ *                               timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called on suspend, after the active count has reached
+ * zero. This is required as the timer may have been started on job submission
+ * to the job scheduler, but before jobs are submitted to the GPU.
+ *
+ * Caller must hold runpool_mutex.
+ */
+void kbase_backend_timer_suspend(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timer_resume - Resume is happening, re-evaluate the JS
+ *                              scheduling timer
+ * @kbdev: Device pointer
+ *
+ * This function should be called on resume. Note that is is not guaranteed to
+ * re-start the timer, only evalute whether it should be re-started.
+ *
+ * Caller must hold runpool_mutex.
+ */
+void kbase_backend_timer_resume(struct kbase_device *kbdev);
+
+#endif /* _KBASE_JS_BACKEND_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
new file mode 100644
index 0000000..ba5bf72
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -0,0 +1,400 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/bitops.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_tlstream.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <mali_kbase_as_fault_debugfs.h>
+
+static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
+		u32 num_pages)
+{
+	u64 region;
+
+	/* can't lock a zero sized range */
+	KBASE_DEBUG_ASSERT(num_pages);
+
+	region = pfn << PAGE_SHIFT;
+	/*
+	 * fls returns (given the ASSERT above):
+	 * 1 .. 32
+	 *
+	 * 10 + fls(num_pages)
+	 * results in the range (11 .. 42)
+	 */
+
+	/* gracefully handle num_pages being zero */
+	if (0 == num_pages) {
+		region |= 11;
+	} else {
+		u8 region_width;
+
+		region_width = 10 + fls(num_pages);
+		if (num_pages != (1ul << (region_width - 11))) {
+			/* not pow2, so must go up to the next pow2 */
+			region_width += 1;
+		}
+		KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
+		KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
+		region |= region_width;
+	}
+
+	return region;
+}
+
+static int wait_ready(struct kbase_device *kbdev,
+		unsigned int as_nr)
+{
+	unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
+	u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
+
+	/* Wait for the MMU status to indicate there is no active command, in
+	 * case one is pending. Do not log remaining register accesses. */
+	while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
+		val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
+
+	if (max_loops == 0) {
+		dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
+		return -1;
+	}
+
+	/* If waiting in loop was performed, log last read value. */
+	if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
+		kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
+
+	return 0;
+}
+
+static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
+{
+	int status;
+
+	/* write AS_COMMAND when MMU is ready to accept another command */
+	status = wait_ready(kbdev, as_nr);
+	if (status == 0)
+		kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd);
+
+	return status;
+}
+
+static void validate_protected_page_fault(struct kbase_device *kbdev)
+{
+	/* GPUs which support (native) protected mode shall not report page
+	 * fault addresses unless it has protected debug mode and protected
+	 * debug mode is turned on */
+	u32 protected_debug_mode = 0;
+
+	if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE))
+		return;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
+		protected_debug_mode = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(GPU_STATUS)) & GPU_DBGEN;
+	}
+
+	if (!protected_debug_mode) {
+		/* fault_addr should never be reported in protected mode.
+		 * However, we just continue by printing an error message */
+		dev_err(kbdev->dev, "Fault address reported in protected mode\n");
+	}
+}
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
+{
+	const int num_as = 16;
+	const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
+	const int pf_shift = 0;
+	const unsigned long as_bit_mask = (1UL << num_as) - 1;
+	unsigned long flags;
+	u32 new_mask;
+	u32 tmp;
+
+	/* bus faults */
+	u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
+	/* page faults (note: Ignore ASes with both pf and bf) */
+	u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+	/* remember current mask */
+	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+	new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
+	/* mask interrupts for now */
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
+	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+
+	while (bf_bits | pf_bits) {
+		struct kbase_as *as;
+		int as_no;
+		struct kbase_context *kctx;
+		struct kbase_fault *fault;
+
+		/*
+		 * the while logic ensures we have a bit set, no need to check
+		 * for not-found here
+		 */
+		as_no = ffs(bf_bits | pf_bits) - 1;
+		as = &kbdev->as[as_no];
+
+		/* find the fault type */
+		as->fault_type = (bf_bits & (1 << as_no)) ?
+				KBASE_MMU_FAULT_TYPE_BUS :
+				KBASE_MMU_FAULT_TYPE_PAGE;
+
+		if (kbase_as_has_bus_fault(as))
+			fault = &as->bf_data;
+		else
+			fault = &as->pf_data;
+
+		/*
+		 * Refcount the kctx ASAP - it shouldn't disappear anyway, since
+		 * Bus/Page faults _should_ only occur whilst jobs are running,
+		 * and a job causing the Bus/Page fault shouldn't complete until
+		 * the MMU is updated
+		 */
+		kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
+
+		/* find faulting address */
+		fault->addr = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
+				AS_FAULTADDRESS_HI));
+		fault->addr <<= 32;
+		fault->addr |= kbase_reg_read(kbdev, MMU_AS_REG(as_no,
+				AS_FAULTADDRESS_LO));
+
+		/* Mark the fault protected or not */
+		fault->protected_mode = kbdev->protected_mode;
+
+		if (kbdev->protected_mode && fault->addr) {
+			/* check if address reporting is allowed */
+			validate_protected_page_fault(kbdev);
+		}
+
+		/* report the fault to debugfs */
+		kbase_as_fault_debugfs_new(kbdev, as_no);
+
+		/* record the fault status */
+		fault->status = kbase_reg_read(kbdev, MMU_AS_REG(as_no,
+				AS_FAULTSTATUS));
+
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
+			fault->extra_addr = kbase_reg_read(kbdev,
+					MMU_AS_REG(as_no, AS_FAULTEXTRA_HI));
+			fault->extra_addr <<= 32;
+			fault->extra_addr |= kbase_reg_read(kbdev,
+					MMU_AS_REG(as_no, AS_FAULTEXTRA_LO));
+		}
+
+		if (kbase_as_has_bus_fault(as)) {
+			/* Mark bus fault as handled.
+			 * Note that a bus fault is processed first in case
+			 * where both a bus fault and page fault occur.
+			 */
+			bf_bits &= ~(1UL << as_no);
+
+			/* remove the queued BF (and PF) from the mask */
+			new_mask &= ~(MMU_BUS_ERROR(as_no) |
+					MMU_PAGE_FAULT(as_no));
+		} else {
+			/* Mark page fault as handled */
+			pf_bits &= ~(1UL << as_no);
+
+			/* remove the queued PF from the mask */
+			new_mask &= ~MMU_PAGE_FAULT(as_no);
+		}
+
+		/* Process the interrupt for this address space */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_mmu_interrupt_process(kbdev, kctx, as, fault);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	}
+
+	/* reenable interrupts */
+	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+	tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
+	new_mask |= tmp;
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask);
+	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+
+void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
+{
+	struct kbase_mmu_setup *current_setup = &as->current_setup;
+	u64 transcfg = 0;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
+		transcfg = current_setup->transcfg;
+
+		/* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
+		/* Clear PTW_MEMATTR bits */
+		transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
+		/* Enable correct PTW_MEMATTR bits */
+		transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
+		/* Ensure page-tables reads use read-allocate cache-policy in
+		 * the L2
+		 */
+		transcfg |= AS_TRANSCFG_R_ALLOCATE;
+
+		if (kbdev->system_coherency == COHERENCY_ACE) {
+			/* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable) */
+			/* Clear PTW_SH bits */
+			transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
+			/* Enable correct PTW_SH bits */
+			transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
+		}
+
+		kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
+				transcfg);
+		kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
+				(transcfg >> 32) & 0xFFFFFFFFUL);
+	} else {
+		if (kbdev->system_coherency == COHERENCY_ACE)
+			current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
+	}
+
+	kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
+			current_setup->transtab & 0xFFFFFFFFUL);
+	kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
+			(current_setup->transtab >> 32) & 0xFFFFFFFFUL);
+
+	kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
+			current_setup->memattr & 0xFFFFFFFFUL);
+	kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
+			(current_setup->memattr >> 32) & 0xFFFFFFFFUL);
+
+	KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(as,
+			current_setup->transtab,
+			current_setup->memattr,
+			transcfg);
+
+	write_cmd(kbdev, as->number, AS_COMMAND_UPDATE);
+}
+
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+		u64 vpfn, u32 nr, u32 op,
+		unsigned int handling_irq)
+{
+	int ret;
+
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+	if (op == AS_COMMAND_UNLOCK) {
+		/* Unlock doesn't require a lock first */
+		ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+	} else {
+		u64 lock_addr = lock_region(kbdev, vpfn, nr);
+
+		/* Lock the region that needs to be updated */
+		kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
+				lock_addr & 0xFFFFFFFFUL);
+		kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
+				(lock_addr >> 32) & 0xFFFFFFFFUL);
+		write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
+
+		/* Run the MMU operation */
+		write_cmd(kbdev, as->number, op);
+
+		/* Wait for the flush to complete */
+		ret = wait_ready(kbdev, as->number);
+
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
+			/* Issue an UNLOCK command to ensure that valid page
+			   tables are re-read by the GPU after an update.
+			   Note that, the FLUSH command should perform all the
+			   actions necessary, however the bus logs show that if
+			   multiple page faults occur within an 8 page region
+			   the MMU does not always re-read the updated page
+			   table entries for later faults or is only partially
+			   read, it subsequently raises the page fault IRQ for
+			   the same addresses, the unlock ensures that the MMU
+			   cache is flushed, so updates can be re-read.  As the
+			   region is now unlocked we need to issue 2 UNLOCK
+			   commands in order to flush the MMU/uTLB,
+			   see PRLAM-8812.
+			 */
+			write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+			write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+		}
+	}
+
+	return ret;
+}
+
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+		enum kbase_mmu_fault_type type)
+{
+	unsigned long flags;
+	u32 pf_bf_mask;
+
+	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+
+	/*
+	 * A reset is in-flight and we're flushing the IRQ + bottom half
+	 * so don't update anything as it could race with the reset code.
+	 */
+	if (kbdev->irq_reset_flush)
+		goto unlock;
+
+	/* Clear the page (and bus fault IRQ as well in case one occurred) */
+	pf_bf_mask = MMU_PAGE_FAULT(as->number);
+	if (type == KBASE_MMU_FAULT_TYPE_BUS ||
+			type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+		pf_bf_mask |= MMU_BUS_ERROR(as->number);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask);
+
+unlock:
+	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+		enum kbase_mmu_fault_type type)
+{
+	unsigned long flags;
+	u32 irq_mask;
+
+	/* Enable the page fault IRQ (and bus fault IRQ as well in case one
+	 * occurred) */
+	spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+
+	/*
+	 * A reset is in-flight and we're flushing the IRQ + bottom half
+	 * so don't update anything as it could race with the reset code.
+	 */
+	if (kbdev->irq_reset_flush)
+		goto unlock;
+
+	irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK)) |
+			MMU_PAGE_FAULT(as->number);
+
+	if (type == KBASE_MMU_FAULT_TYPE_BUS ||
+			type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+		irq_mask |= MMU_BUS_ERROR(as->number);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask);
+
+unlock:
+	spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h
new file mode 100644
index 0000000..1f76eed
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Interface file for the direct implementation for MMU hardware access
+ *
+ * Direct MMU hardware interface
+ *
+ * This module provides the interface(s) that are required by the direct
+ * register access implementation of the MMU hardware interface
+ */
+
+#ifndef _MALI_KBASE_MMU_HW_DIRECT_H_
+#define _MALI_KBASE_MMU_HW_DIRECT_H_
+
+#include <mali_kbase_defs.h>
+
+/**
+ * kbase_mmu_interrupt - Process an MMU interrupt.
+ *
+ * Process the MMU interrupt that was reported by the &kbase_device.
+ *
+ * @kbdev:          kbase context to clear the fault from.
+ * @irq_stat:       Value of the MMU_IRQ_STATUS register
+ */
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+#endif	/* _MALI_KBASE_MMU_HW_DIRECT_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
new file mode 100644
index 0000000..51a10a2
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
@@ -0,0 +1,68 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * "Always on" power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static bool always_on_shaders_needed(struct kbase_device *kbdev)
+{
+	return true;
+}
+
+static bool always_on_get_core_active(struct kbase_device *kbdev)
+{
+	return true;
+}
+
+static void always_on_init(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+static void always_on_term(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+/*
+ * The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_always_on_policy_ops = {
+	"always_on",			/* name */
+	always_on_init,			/* init */
+	always_on_term,			/* term */
+	always_on_shaders_needed,	/* shaders_needed */
+	always_on_get_core_active,	/* get_core_active */
+	0u,				/* flags */
+	KBASE_PM_POLICY_ID_ALWAYS_ON,	/* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_always_on_policy_ops);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
new file mode 100644
index 0000000..e7927cf
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
@@ -0,0 +1,81 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * "Always on" power management policy
+ */
+
+#ifndef MALI_KBASE_PM_ALWAYS_ON_H
+#define MALI_KBASE_PM_ALWAYS_ON_H
+
+/**
+ * DOC:
+ * The "Always on" power management policy has the following
+ * characteristics:
+ *
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ *   know which Job Chains are to be run:
+ *    Shader Cores are powered up, regardless of whether or not they will be
+ *    needed later.
+ *
+ * - When KBase indicates that Shader Cores are needed to submit the currently
+ *   queued Job Chains:
+ *    Shader Cores are kept powered, regardless of whether or not they will be
+ *    needed
+ *
+ * - When KBase indicates that the GPU need not be powered:
+ *    The Shader Cores are kept powered, regardless of whether or not they will
+ *    be needed. The GPU itself is also kept powered, even though it is not
+ *    needed.
+ *
+ * This policy is automatically overridden during system suspend: the desired
+ * core state is ignored, and the cores are forced off regardless of what the
+ * policy requests. After resuming from suspend, new changes to the desired
+ * core state made by the policy are honored.
+ *
+ * Note:
+ *
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ *   has just started to submit Job Chains.
+ *
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ *   User Processes have finished, and it is waiting for a User Process to
+ *   submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_always_on - Private struct for policy instance data
+ * @dummy: unused dummy variable
+ *
+ * This contains data that is private to the particular power policy that is
+ * active.
+ */
+struct kbasep_pm_policy_always_on {
+	int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_always_on_policy_ops;
+
+#endif /* MALI_KBASE_PM_ALWAYS_ON_H */
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
new file mode 100644
index 0000000..c19a0d1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
@@ -0,0 +1,557 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * GPU backend implementation of base kernel power management APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_config_defaults.h>
+
+#include <mali_kbase_pm.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_hwcnt_context.h>
+#include <backend/gpu/mali_kbase_js_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_jm_internal.h>
+
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
+static void kbase_pm_hwcnt_disable_worker(struct work_struct *data);
+
+int kbase_pm_runtime_init(struct kbase_device *kbdev)
+{
+	struct kbase_pm_callback_conf *callbacks;
+
+	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+	if (callbacks) {
+		kbdev->pm.backend.callback_power_on =
+					callbacks->power_on_callback;
+		kbdev->pm.backend.callback_power_off =
+					callbacks->power_off_callback;
+		kbdev->pm.backend.callback_power_suspend =
+					callbacks->power_suspend_callback;
+		kbdev->pm.backend.callback_power_resume =
+					callbacks->power_resume_callback;
+		kbdev->pm.callback_power_runtime_init =
+					callbacks->power_runtime_init_callback;
+		kbdev->pm.callback_power_runtime_term =
+					callbacks->power_runtime_term_callback;
+		kbdev->pm.backend.callback_power_runtime_on =
+					callbacks->power_runtime_on_callback;
+		kbdev->pm.backend.callback_power_runtime_off =
+					callbacks->power_runtime_off_callback;
+		kbdev->pm.backend.callback_power_runtime_idle =
+					callbacks->power_runtime_idle_callback;
+
+		if (callbacks->power_runtime_init_callback)
+			return callbacks->power_runtime_init_callback(kbdev);
+		else
+			return 0;
+	}
+
+	kbdev->pm.backend.callback_power_on = NULL;
+	kbdev->pm.backend.callback_power_off = NULL;
+	kbdev->pm.backend.callback_power_suspend = NULL;
+	kbdev->pm.backend.callback_power_resume = NULL;
+	kbdev->pm.callback_power_runtime_init = NULL;
+	kbdev->pm.callback_power_runtime_term = NULL;
+	kbdev->pm.backend.callback_power_runtime_on = NULL;
+	kbdev->pm.backend.callback_power_runtime_off = NULL;
+	kbdev->pm.backend.callback_power_runtime_idle = NULL;
+
+	return 0;
+}
+
+void kbase_pm_runtime_term(struct kbase_device *kbdev)
+{
+	if (kbdev->pm.callback_power_runtime_term) {
+		kbdev->pm.callback_power_runtime_term(kbdev);
+	}
+}
+
+void kbase_pm_register_access_enable(struct kbase_device *kbdev)
+{
+	struct kbase_pm_callback_conf *callbacks;
+
+	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+	if (callbacks)
+		callbacks->power_on_callback(kbdev);
+
+	kbdev->pm.backend.gpu_powered = true;
+}
+
+void kbase_pm_register_access_disable(struct kbase_device *kbdev)
+{
+	struct kbase_pm_callback_conf *callbacks;
+
+	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+	if (callbacks)
+		callbacks->power_off_callback(kbdev);
+
+	kbdev->pm.backend.gpu_powered = false;
+}
+
+int kbase_hwaccess_pm_early_init(struct kbase_device *kbdev)
+{
+	int ret = 0;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_init(&kbdev->pm.lock);
+
+	kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
+			WQ_HIGHPRI | WQ_UNBOUND, 1);
+	if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
+		return -ENOMEM;
+
+	INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
+			kbase_pm_gpu_poweroff_wait_wq);
+
+	kbdev->pm.backend.ca_cores_enabled = ~0ull;
+	kbdev->pm.backend.gpu_powered = false;
+	kbdev->pm.suspending = false;
+#ifdef CONFIG_MALI_DEBUG
+	kbdev->pm.backend.driver_ready_for_irqs = false;
+#endif /* CONFIG_MALI_DEBUG */
+	init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
+
+	/* Initialise the metrics subsystem */
+	ret = kbasep_pm_metrics_init(kbdev);
+	if (ret)
+		return ret;
+
+	init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
+	kbdev->pm.backend.reset_done = false;
+
+	init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
+	kbdev->pm.active_count = 0;
+
+	spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
+	spin_lock_init(&kbdev->pm.backend.gpu_powered_lock);
+
+	init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
+
+	if (kbase_pm_ca_init(kbdev) != 0)
+		goto workq_fail;
+
+	if (kbase_pm_policy_init(kbdev) != 0)
+		goto pm_policy_fail;
+
+	if (kbase_pm_state_machine_init(kbdev) != 0)
+		goto pm_state_machine_fail;
+
+	return 0;
+
+pm_state_machine_fail:
+	kbase_pm_policy_term(kbdev);
+pm_policy_fail:
+	kbase_pm_ca_term(kbdev);
+workq_fail:
+	kbasep_pm_metrics_term(kbdev);
+	return -EINVAL;
+}
+
+int kbase_hwaccess_pm_late_init(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	kbdev->pm.backend.hwcnt_desired = false;
+	kbdev->pm.backend.hwcnt_disabled = true;
+	INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work,
+		kbase_pm_hwcnt_disable_worker);
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	return 0;
+}
+
+void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
+{
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	/* Turn clocks and interrupts on - no-op if we haven't done a previous
+	 * kbase_pm_clock_off() */
+	kbase_pm_clock_on(kbdev, is_resume);
+
+	if (!is_resume) {
+		unsigned long flags;
+
+		/* Force update of L2 state - if we have abandoned a power off
+		 * then this may be required to power the L2 back on.
+		 */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_pm_update_state(kbdev);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	}
+
+	/* Update core status as required by the policy */
+	kbase_pm_update_cores_state(kbdev);
+
+	/* NOTE: We don't wait to reach the desired state, since running atoms
+	 * will wait for that state to be reached anyway */
+}
+
+static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
+{
+	struct kbase_device *kbdev = container_of(data, struct kbase_device,
+			pm.backend.gpu_poweroff_wait_work);
+	struct kbase_pm_device_data *pm = &kbdev->pm;
+	struct kbase_pm_backend_data *backend = &pm->backend;
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	unsigned long flags;
+
+	if (!platform_power_down_only)
+		/* Wait for power transitions to complete. We do this with no locks held
+		 * so that we don't deadlock with any pending workqueues.
+		 */
+		kbase_pm_wait_for_desired_state(kbdev);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	if (!backend->poweron_required) {
+		if (!platform_power_down_only) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+			WARN_ON(backend->shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF ||
+					backend->l2_state != KBASE_L2_OFF);
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		}
+
+		/* Disable interrupts and turn the clock off */
+		if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
+			/*
+			 * Page/bus faults are pending, must drop locks to
+			 * process.  Interrupts are disabled so no more faults
+			 * should be generated at this point.
+			 */
+			mutex_unlock(&kbdev->pm.lock);
+			mutex_unlock(&js_devdata->runpool_mutex);
+			kbase_flush_mmu_wqs(kbdev);
+			mutex_lock(&js_devdata->runpool_mutex);
+			mutex_lock(&kbdev->pm.lock);
+
+			/* Turn off clock now that fault have been handled. We
+			 * dropped locks so poweron_required may have changed -
+			 * power back on if this is the case (effectively only
+			 * re-enabling of the interrupts would be done in this
+			 * case, as the clocks to GPU were not withdrawn yet).
+			 */
+			if (backend->poweron_required)
+				kbase_pm_clock_on(kbdev, false);
+			else
+				WARN_ON(!kbase_pm_clock_off(kbdev,
+						backend->poweroff_is_suspend));
+		}
+	}
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	backend->poweroff_wait_in_progress = false;
+	if (backend->poweron_required) {
+		backend->poweron_required = false;
+		kbdev->pm.backend.l2_desired = true;
+		kbase_pm_update_state(kbdev);
+		kbase_pm_update_cores_state_nolock(kbdev);
+		kbase_backend_slot_update(kbdev);
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	wake_up(&kbdev->pm.backend.poweroff_wait);
+}
+
+static void kbase_pm_hwcnt_disable_worker(struct work_struct *data)
+{
+	struct kbase_device *kbdev = container_of(data, struct kbase_device,
+			pm.backend.hwcnt_disable_work);
+	struct kbase_pm_device_data *pm = &kbdev->pm;
+	struct kbase_pm_backend_data *backend = &pm->backend;
+	unsigned long flags;
+
+	bool do_disable;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	do_disable = !backend->hwcnt_desired && !backend->hwcnt_disabled;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (!do_disable)
+		return;
+
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	do_disable = !backend->hwcnt_desired && !backend->hwcnt_disabled;
+
+	if (do_disable) {
+		/* PM state did not change while we were doing the disable,
+		 * so commit the work we just performed and continue the state
+		 * machine.
+		 */
+		backend->hwcnt_disabled = true;
+		kbase_pm_update_state(kbdev);
+	} else {
+		/* PM state was updated while we were doing the disable,
+		 * so we need to undo the disable we just performed.
+		 */
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend)
+{
+	unsigned long flags;
+
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	spin_lock(&kbdev->pm.backend.gpu_powered_lock);
+	if (!kbdev->pm.backend.gpu_powered) {
+		spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
+		goto unlock_hwaccess;
+	} else {
+		spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
+	}
+
+	if (kbdev->pm.backend.poweroff_wait_in_progress)
+		goto unlock_hwaccess;
+
+	/* Force all cores off */
+	kbdev->pm.backend.shaders_desired = false;
+	kbdev->pm.backend.l2_desired = false;
+
+	kbdev->pm.backend.poweroff_wait_in_progress = true;
+	kbdev->pm.backend.poweroff_is_suspend = is_suspend;
+	kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off = true;
+
+	/* l2_desired being false should cause the state machine to
+	 * start powering off the L2. When it actually is powered off,
+	 * the interrupt handler will call kbase_pm_l2_update_state()
+	 * again, which will trigger the kbase_pm_gpu_poweroff_wait_wq.
+	 * Callers of this function will need to wait on poweroff_wait.
+	 */
+	kbase_pm_update_state(kbdev);
+
+unlock_hwaccess:
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+static bool is_poweroff_in_progress(struct kbase_device *kbdev)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return ret;
+}
+
+void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
+{
+	wait_event_killable(kbdev->pm.backend.poweroff_wait,
+			is_poweroff_in_progress(kbdev));
+}
+
+int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+		unsigned int flags)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	unsigned long irq_flags;
+	int ret;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	/* A suspend won't happen during startup/insmod */
+	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+
+	/* Power up the GPU, don't enable IRQs as we are not ready to receive
+	 * them. */
+	ret = kbase_pm_init_hw(kbdev, flags);
+	if (ret) {
+		mutex_unlock(&kbdev->pm.lock);
+		mutex_unlock(&js_devdata->runpool_mutex);
+		return ret;
+	}
+
+	kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
+			kbdev->pm.debug_core_mask[1] =
+			kbdev->pm.debug_core_mask[2] =
+			kbdev->gpu_props.props.raw_props.shader_present;
+
+	/* Pretend the GPU is active to prevent a power policy turning the GPU
+	 * cores off */
+	kbdev->pm.active_count = 1;
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+								irq_flags);
+	/* Ensure cycle counter is off */
+	kbdev->pm.backend.gpu_cycle_counter_requests = 0;
+	spin_unlock_irqrestore(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+								irq_flags);
+
+	/* We are ready to receive IRQ's now as power policy is set up, so
+	 * enable them now. */
+#ifdef CONFIG_MALI_DEBUG
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
+	kbdev->pm.backend.driver_ready_for_irqs = true;
+	spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, irq_flags);
+#endif
+	kbase_pm_enable_interrupts(kbdev);
+
+	/* Turn on the GPU and any cores needed by the policy */
+	kbase_pm_do_poweron(kbdev, false);
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	/* Idle the GPU and/or cores, if the policy wants it to */
+	kbase_pm_context_idle(kbdev);
+
+	return 0;
+}
+
+void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_lock(&kbdev->pm.lock);
+	kbase_pm_do_poweroff(kbdev, false);
+	mutex_unlock(&kbdev->pm.lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
+
+void kbase_hwaccess_pm_early_term(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
+
+	/* Free any resources the policy allocated */
+	kbase_pm_state_machine_term(kbdev);
+	kbase_pm_policy_term(kbdev);
+	kbase_pm_ca_term(kbdev);
+
+	/* Shut down the metrics subsystem */
+	kbasep_pm_metrics_term(kbdev);
+
+	destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
+}
+
+void kbase_hwaccess_pm_late_term(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	cancel_work_sync(&kbdev->pm.backend.hwcnt_disable_work);
+
+	if (kbdev->pm.backend.hwcnt_disabled) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	}
+}
+
+void kbase_pm_power_changed(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_pm_update_state(kbdev);
+
+	kbase_backend_slot_update(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+		u64 new_core_mask_js0, u64 new_core_mask_js1,
+		u64 new_core_mask_js2)
+{
+	kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
+	kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
+	kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
+	kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
+			new_core_mask_js2;
+
+	kbase_pm_update_cores_state_nolock(kbdev);
+}
+
+void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
+{
+	kbase_pm_update_active(kbdev);
+}
+
+void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
+{
+	kbase_pm_update_active(kbdev);
+}
+
+void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	/* Force power off the GPU and all cores (regardless of policy), only
+	 * after the PM active count reaches zero (otherwise, we risk turning it
+	 * off prematurely) */
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	kbase_pm_do_poweroff(kbdev, true);
+
+	kbase_backend_timer_suspend(kbdev);
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	kbase_pm_wait_for_poweroff_complete(kbdev);
+}
+
+void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	kbdev->pm.suspending = false;
+	kbase_pm_do_poweron(kbdev, true);
+
+	kbase_backend_timer_resume(kbdev);
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
new file mode 100644
index 0000000..2cb9452
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
@@ -0,0 +1,107 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel core availability APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+int kbase_pm_ca_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+	struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
+
+	if (kbdev->current_core_mask)
+		pm_backend->ca_cores_enabled = kbdev->current_core_mask;
+	else
+		pm_backend->ca_cores_enabled =
+				kbdev->gpu_props.props.raw_props.shader_present;
+#endif
+
+	return 0;
+}
+
+void kbase_pm_ca_term(struct kbase_device *kbdev)
+{
+}
+
+#ifdef CONFIG_MALI_DEVFREQ
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
+{
+	struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (!(core_mask & kbdev->pm.debug_core_mask_all)) {
+		dev_err(kbdev->dev, "OPP core mask 0x%llX does not intersect with debug mask 0x%llX\n",
+				core_mask, kbdev->pm.debug_core_mask_all);
+		goto unlock;
+	}
+
+	pm_backend->ca_cores_enabled = core_mask;
+
+	kbase_pm_update_state(kbdev);
+
+unlock:
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX\n",
+			pm_backend->ca_cores_enabled);
+}
+#endif
+
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* All cores must be enabled when instrumentation is in use */
+	if (pm_backend->instr_enabled)
+		return kbdev->gpu_props.props.raw_props.shader_present &
+				kbdev->pm.debug_core_mask_all;
+
+#ifdef CONFIG_MALI_DEVFREQ
+	return pm_backend->ca_cores_enabled & kbdev->pm.debug_core_mask_all;
+#else
+	return kbdev->gpu_props.props.raw_props.shader_present &
+			kbdev->pm.debug_core_mask_all;
+#endif
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask);
+
+void kbase_pm_ca_instr_enable(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	kbdev->pm.backend.instr_enabled = true;
+}
+
+void kbase_pm_ca_instr_disable(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	kbdev->pm.backend.instr_enabled = false;
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h
new file mode 100644
index 0000000..274581d0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h
@@ -0,0 +1,97 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel core availability APIs
+ */
+
+#ifndef _KBASE_PM_CA_H_
+#define _KBASE_PM_CA_H_
+
+/**
+ * kbase_pm_ca_init - Initialize core availability framework
+ *
+ * Must be called before calling any other core availability function
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 if the core availability framework was successfully initialized,
+ *         -errno otherwise
+ */
+int kbase_pm_ca_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_term - Terminate core availability framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_ca_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_get_core_mask - Get currently available shaders core mask
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Returns a mask of the currently available shader cores.
+ * Calls into the core availability policy
+ *
+ * Return: The bit mask of available cores
+ */
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_update_core_status - Update core status
+ *
+ * @kbdev:               The kbase device structure for the device (must be
+ *                       a valid pointer)
+ * @cores_ready:         The bit mask of cores ready for job submission
+ * @cores_transitioning: The bit mask of cores that are transitioning power
+ *                       state
+ *
+ * Update core availability policy with current core power status
+ *
+ * Calls into the core availability policy
+ */
+void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
+						u64 cores_transitioning);
+
+/**
+ * kbase_pm_ca_instr_enable - Enable override for instrumentation
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This overrides the output of the core availability policy, ensuring that all
+ * cores are available
+ */
+void kbase_pm_ca_instr_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_ca_instr_disable - Disable override for instrumentation
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This disables any previously enabled override, and resumes normal policy
+ * functionality
+ */
+void kbase_pm_ca_instr_disable(struct kbase_device *kbdev);
+
+#endif /* _KBASE_PM_CA_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h
new file mode 100644
index 0000000..f67ec65
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * A core availability policy for use with devfreq, where core masks are
+ * associated with OPPs.
+ */
+
+#ifndef MALI_KBASE_PM_CA_DEVFREQ_H
+#define MALI_KBASE_PM_CA_DEVFREQ_H
+
+/**
+ * struct kbasep_pm_ca_policy_devfreq - Private structure for devfreq ca policy
+ *
+ * This contains data that is private to the devfreq core availability
+ * policy.
+ *
+ * @cores_desired: Cores that the policy wants to be available
+ * @cores_enabled: Cores that the policy is currently returning as available
+ * @cores_used: Cores currently powered or transitioning
+ */
+struct kbasep_pm_ca_policy_devfreq {
+	u64 cores_desired;
+	u64 cores_enabled;
+	u64 cores_used;
+};
+
+extern const struct kbase_pm_ca_policy kbase_pm_ca_devfreq_policy_ops;
+
+/**
+ * kbase_devfreq_set_core_mask - Set core mask for policy to use
+ * @kbdev: Device pointer
+ * @core_mask: New core mask
+ *
+ * The new core mask will have immediate effect if the GPU is powered, or will
+ * take effect when it is next powered on.
+ */
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask);
+
+#endif /* MALI_KBASE_PM_CA_DEVFREQ_H */
+
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
new file mode 100644
index 0000000..e90c44d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * "Coarse Demand" power management policy
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+
+static bool coarse_demand_shaders_needed(struct kbase_device *kbdev)
+{
+	return kbase_pm_is_active(kbdev);
+}
+
+static bool coarse_demand_get_core_active(struct kbase_device *kbdev)
+{
+	return kbase_pm_is_active(kbdev);
+}
+
+static void coarse_demand_init(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+static void coarse_demand_term(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+/* The struct kbase_pm_policy structure for the demand power policy.
+ *
+ * This is the static structure that defines the demand power policy's callback
+ * and name.
+ */
+const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
+	"coarse_demand",			/* name */
+	coarse_demand_init,			/* init */
+	coarse_demand_term,			/* term */
+	coarse_demand_shaders_needed,		/* shaders_needed */
+	coarse_demand_get_core_active,		/* get_core_active */
+	0u,					/* flags */
+	KBASE_PM_POLICY_ID_COARSE_DEMAND,	/* id */
+};
+
+KBASE_EXPORT_TEST_API(kbase_pm_coarse_demand_policy_ops);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
new file mode 100644
index 0000000..304e5d7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
@@ -0,0 +1,69 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * "Coarse Demand" power management policy
+ */
+
+#ifndef MALI_KBASE_PM_COARSE_DEMAND_H
+#define MALI_KBASE_PM_COARSE_DEMAND_H
+
+/**
+ * DOC:
+ * The "Coarse" demand power management policy has the following
+ * characteristics:
+ * - When KBase indicates that the GPU will be powered up, but we don't yet
+ *   know which Job Chains are to be run:
+ *  - Shader Cores are powered up, regardless of whether or not they will be
+ *    needed later.
+ * - When KBase indicates that Shader Cores are needed to submit the currently
+ *   queued Job Chains:
+ *  - Shader Cores are kept powered, regardless of whether or not they will
+ *    be needed
+ * - When KBase indicates that the GPU need not be powered:
+ *  - The Shader Cores are powered off, and the GPU itself is powered off too.
+ *
+ * @note:
+ * - KBase indicates the GPU will be powered up when it has a User Process that
+ *   has just started to submit Job Chains.
+ * - KBase indicates the GPU need not be powered when all the Job Chains from
+ *   User Processes have finished, and it is waiting for a User Process to
+ *   submit some more Job Chains.
+ */
+
+/**
+ * struct kbasep_pm_policy_coarse_demand - Private structure for coarse demand
+ *                                         policy
+ *
+ * This contains data that is private to the coarse demand power policy.
+ *
+ * @dummy: Dummy member - no state needed
+ */
+struct kbasep_pm_policy_coarse_demand {
+	int dummy;
+};
+
+extern const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops;
+
+#endif /* MALI_KBASE_PM_COARSE_DEMAND_H */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
new file mode 100644
index 0000000..0cff22e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
@@ -0,0 +1,472 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Backend-specific Power Manager definitions
+ */
+
+#ifndef _KBASE_PM_HWACCESS_DEFS_H_
+#define _KBASE_PM_HWACCESS_DEFS_H_
+
+#include "mali_kbase_pm_always_on.h"
+#include "mali_kbase_pm_coarse_demand.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_pm_always_on_demand.h"
+#endif
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+struct kbase_jd_atom;
+
+/**
+ * enum kbase_pm_core_type - The types of core in a GPU.
+ *
+ * These enumerated values are used in calls to
+ * - kbase_pm_get_present_cores()
+ * - kbase_pm_get_active_cores()
+ * - kbase_pm_get_trans_cores()
+ * - kbase_pm_get_ready_cores().
+ *
+ * They specify which type of core should be acted on.  These values are set in
+ * a manner that allows core_type_to_reg() function to be simpler and more
+ * efficient.
+ *
+ * @KBASE_PM_CORE_L2: The L2 cache
+ * @KBASE_PM_CORE_SHADER: Shader cores
+ * @KBASE_PM_CORE_TILER: Tiler cores
+ * @KBASE_PM_CORE_STACK: Core stacks
+ */
+enum kbase_pm_core_type {
+	KBASE_PM_CORE_L2 = L2_PRESENT_LO,
+	KBASE_PM_CORE_SHADER = SHADER_PRESENT_LO,
+	KBASE_PM_CORE_TILER = TILER_PRESENT_LO,
+	KBASE_PM_CORE_STACK = STACK_PRESENT_LO
+};
+
+/**
+ * enum kbase_l2_core_state - The states used for the L2 cache & tiler power
+ *                            state machine.
+ *
+ * @KBASE_L2_OFF: The L2 cache and tiler are off
+ * @KBASE_L2_PEND_ON: The L2 cache and tiler are powering on
+ * @KBASE_L2_ON_HWCNT_ENABLE: The L2 cache and tiler are on, and hwcnt is being
+ *                            enabled
+ * @KBASE_L2_ON: The L2 cache and tiler are on, and hwcnt is enabled
+ * @KBASE_L2_ON_HWCNT_DISABLE: The L2 cache and tiler are on, and hwcnt is being
+ *                             disabled
+ * @KBASE_L2_POWER_DOWN: The L2 cache and tiler are about to be powered off
+ * @KBASE_L2_PEND_OFF: The L2 cache and tiler are powering off
+ * @KBASE_L2_RESET_WAIT: The GPU is resetting, L2 cache and tiler power state
+ *                       are unknown
+ */
+enum kbase_l2_core_state {
+	KBASE_L2_OFF = 0,
+	KBASE_L2_PEND_ON,
+	KBASE_L2_ON_HWCNT_ENABLE,
+	KBASE_L2_ON,
+	KBASE_L2_ON_HWCNT_DISABLE,
+	KBASE_L2_POWER_DOWN,
+	KBASE_L2_PEND_OFF,
+	KBASE_L2_RESET_WAIT
+};
+
+/**
+ * enum kbase_shader_core_state - The states used for the shaders' state machine.
+ *
+ * @KBASE_SHADERS_OFF_CORESTACK_OFF: The shaders and core stacks are off
+ * @KBASE_SHADERS_OFF_CORESTACK_PEND_ON: The shaders are off, core stacks have
+ *                                       been requested to power on
+ * @KBASE_SHADERS_PEND_ON_CORESTACK_ON: Core stacks are on, shaders have been
+ *                                      requested to power on
+ * @KBASE_SHADERS_ON_CORESTACK_ON: The shaders and core stacks are on
+ * @KBASE_SHADERS_WAIT_OFF_CORESTACK_ON: The shaders have been requested to
+ *                                       power off, but they remain on for the
+ *                                       duration of the hysteresis timer
+ * @KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON: The hysteresis timer has expired
+ * @KBASE_SHADERS_PEND_OFF_CORESTACK_ON: The core stacks are on, and the shaders
+ *                                       have been requested to power off
+ * @KBASE_SHADERS_OFF_CORESTACK_PEND_OFF: The shaders are off, and the core stacks
+ *                                        have been requested to power off
+ * @KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF: Shaders and corestacks are
+ *                                                  off, but the tick timer
+ *                                                  cancellation is still
+ *                                                  pending.
+ * @KBASE_SHADERS_RESET_WAIT: The GPU is resetting, shader and core stack power
+ *                            states are unknown
+ */
+enum kbase_shader_core_state {
+	KBASE_SHADERS_OFF_CORESTACK_OFF = 0,
+	KBASE_SHADERS_OFF_CORESTACK_PEND_ON,
+	KBASE_SHADERS_PEND_ON_CORESTACK_ON,
+	KBASE_SHADERS_ON_CORESTACK_ON,
+	KBASE_SHADERS_WAIT_OFF_CORESTACK_ON,
+	KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON,
+	KBASE_SHADERS_PEND_OFF_CORESTACK_ON,
+	KBASE_SHADERS_OFF_CORESTACK_PEND_OFF,
+	KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF,
+	KBASE_SHADERS_RESET_WAIT
+};
+
+/**
+ * struct kbasep_pm_metrics - Metrics data collected for use by the power
+ *                            management framework.
+ *
+ *  @time_busy: number of ns the GPU was busy executing jobs since the
+ *          @time_period_start timestamp.
+ *  @time_idle: number of ns since time_period_start the GPU was not executing
+ *          jobs since the @time_period_start timestamp.
+ *  @busy_cl: number of ns the GPU was busy executing CL jobs. Note that
+ *           if two CL jobs were active for 400ns, this value would be updated
+ *           with 800.
+ *  @busy_gl: number of ns the GPU was busy executing GL jobs. Note that
+ *           if two GL jobs were active for 400ns, this value would be updated
+ *           with 800.
+ */
+struct kbasep_pm_metrics {
+	u32 time_busy;
+	u32 time_idle;
+	u32 busy_cl[2];
+	u32 busy_gl;
+};
+
+/**
+ * struct kbasep_pm_metrics_state - State required to collect the metrics in
+ *                                  struct kbasep_pm_metrics
+ *  @time_period_start: time at which busy/idle measurements started
+ *  @gpu_active: true when the GPU is executing jobs. false when
+ *           not. Updated when the job scheduler informs us a job in submitted
+ *           or removed from a GPU slot.
+ *  @active_cl_ctx: number of CL jobs active on the GPU. Array is per-device.
+ *  @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot. As
+ *           GL jobs never run on slot 2 this slot is not recorded.
+ *  @lock: spinlock protecting the kbasep_pm_metrics_data structure
+ *  @platform_data: pointer to data controlled by platform specific code
+ *  @kbdev: pointer to kbase device for which metrics are collected
+ *  @values: The current values of the power management metrics. The
+ *           kbase_pm_get_dvfs_metrics() function is used to compare these
+ *           current values with the saved values from a previous invocation.
+ *  @timer: timer to regularly make DVFS decisions based on the power
+ *           management metrics.
+ *  @timer_active: boolean indicating @timer is running
+ *  @dvfs_last: values of the PM metrics from the last DVFS tick
+ *  @dvfs_diff: different between the current and previous PM metrics.
+ */
+struct kbasep_pm_metrics_state {
+	ktime_t time_period_start;
+	bool gpu_active;
+	u32 active_cl_ctx[2];
+	u32 active_gl_ctx[2]; /* GL jobs can only run on 2 of the 3 job slots */
+	spinlock_t lock;
+
+	void *platform_data;
+	struct kbase_device *kbdev;
+
+	struct kbasep_pm_metrics values;
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+	struct hrtimer timer;
+	bool timer_active;
+	struct kbasep_pm_metrics dvfs_last;
+	struct kbasep_pm_metrics dvfs_diff;
+#endif
+};
+
+/**
+ * struct kbasep_pm_tick_timer_state - State for the shader hysteresis timer
+ * @wq: Work queue to wait for the timer to stopped
+ * @work: Work item which cancels the timer
+ * @timer: Timer for powering off the shader cores
+ * @configured_interval: Period of GPU poweroff timer
+ * @configured_ticks: User-configured number of ticks to wait after the shader
+ *                    power down request is received before turning off the cores
+ * @remaining_ticks: Number of remaining timer ticks until shaders are powered off
+ * @cancel_queued: True if the cancellation work item has been queued. This is
+ *                 required to ensure that it is not queued twice, e.g. after
+ *                 a reset, which could cause the timer to be incorrectly
+ *                 cancelled later by a delayed workitem.
+ * @needed: Whether the timer should restart itself
+ */
+struct kbasep_pm_tick_timer_state {
+	struct workqueue_struct *wq;
+	struct work_struct work;
+	struct hrtimer timer;
+
+	ktime_t configured_interval;
+	unsigned int configured_ticks;
+	unsigned int remaining_ticks;
+
+	bool cancel_queued;
+	bool needed;
+};
+
+union kbase_pm_policy_data {
+	struct kbasep_pm_policy_always_on always_on;
+	struct kbasep_pm_policy_coarse_demand coarse_demand;
+#if !MALI_CUSTOMER_RELEASE
+	struct kbasep_pm_policy_always_on_demand always_on_demand;
+#endif
+};
+
+/**
+ * struct kbase_pm_backend_data - Data stored per device for power management.
+ *
+ * This structure contains data for the power management framework. There is one
+ * instance of this structure per device in the system.
+ *
+ * @pm_current_policy: The policy that is currently actively controlling the
+ *                     power state.
+ * @pm_policy_data:    Private data for current PM policy
+ * @reset_done:        Flag when a reset is complete
+ * @reset_done_wait:   Wait queue to wait for changes to @reset_done
+ * @gpu_cycle_counter_requests: The reference count of active gpu cycle counter
+ *                              users
+ * @gpu_cycle_counter_requests_lock: Lock to protect @gpu_cycle_counter_requests
+ * @gpu_in_desired_state_wait: Wait queue set when the GPU is in the desired
+ *                             state according to the L2 and shader power state
+ *                             machines
+ * @gpu_powered:       Set to true when the GPU is powered and register
+ *                     accesses are possible, false otherwise
+ * @instr_enabled:     Set to true when instrumentation is enabled,
+ *                     false otherwise
+ * @cg1_disabled:      Set if the policy wants to keep the second core group
+ *                     powered off
+ * @driver_ready_for_irqs: Debug state indicating whether sufficient
+ *                         initialization of the driver has occurred to handle
+ *                         IRQs
+ * @gpu_powered_lock:  Spinlock that must be held when writing @gpu_powered or
+ *                     accessing @driver_ready_for_irqs
+ * @metrics:           Structure to hold metrics for the GPU
+ * @shader_tick_timer: Structure to hold the shader poweroff tick timer state
+ * @poweroff_wait_in_progress: true if a wait for GPU power off is in progress.
+ *                             hwaccess_lock must be held when accessing
+ * @invoke_poweroff_wait_wq_when_l2_off: flag indicating that the L2 power state
+ *                                       machine should invoke the poweroff
+ *                                       worker after the L2 has turned off.
+ * @poweron_required: true if a GPU power on is required. Should only be set
+ *                    when poweroff_wait_in_progress is true, and therefore the
+ *                    GPU can not immediately be powered on. pm.lock must be
+ *                    held when accessing
+ * @poweroff_is_suspend: true if the GPU is being powered off due to a suspend
+ *                       request. pm.lock must be held when accessing
+ * @gpu_poweroff_wait_wq: workqueue for waiting for GPU to power off
+ * @gpu_poweroff_wait_work: work item for use with @gpu_poweroff_wait_wq
+ * @poweroff_wait: waitqueue for waiting for @gpu_poweroff_wait_work to complete
+ * @callback_power_on: Callback when the GPU needs to be turned on. See
+ *                     &struct kbase_pm_callback_conf
+ * @callback_power_off: Callback when the GPU may be turned off. See
+ *                     &struct kbase_pm_callback_conf
+ * @callback_power_suspend: Callback when a suspend occurs and the GPU needs to
+ *                          be turned off. See &struct kbase_pm_callback_conf
+ * @callback_power_resume: Callback when a resume occurs and the GPU needs to
+ *                          be turned on. See &struct kbase_pm_callback_conf
+ * @callback_power_runtime_on: Callback when the GPU needs to be turned on. See
+ *                             &struct kbase_pm_callback_conf
+ * @callback_power_runtime_off: Callback when the GPU may be turned off. See
+ *                              &struct kbase_pm_callback_conf
+ * @callback_power_runtime_idle: Optional callback when the GPU may be idle. See
+ *                              &struct kbase_pm_callback_conf
+ * @ca_cores_enabled: Cores that are currently available
+ * @l2_state: The current state of the L2 cache state machine. See
+ *            &enum kbase_l2_core_state
+ * @l2_desired: True if the L2 cache should be powered on by the L2 cache state
+ *              machine
+ * @shaders_state: The current state of the shader state machine.
+ * @shaders_avail: This is updated by the state machine when it is in a state
+ *                 where it can handle changes to the core availability. This
+ *                 is internal to the shader state machine and should *not* be
+ *                 modified elsewhere.
+ * @shaders_desired: True if the PM active count or power policy requires the
+ *                   shader cores to be on. This is used as an input to the
+ *                   shader power state machine.  The current state of the
+ *                   cores may be different, but there should be transitions in
+ *                   progress that will eventually achieve this state (assuming
+ *                   that the policy doesn't change its mind in the mean time).
+ * @in_reset: True if a GPU is resetting and normal power manager operation is
+ *            suspended
+ * @protected_transition_override : True if a protected mode transition is in
+ *                                  progress and is overriding power manager
+ *                                  behaviour.
+ * @protected_l2_override : Non-zero if the L2 cache is required during a
+ *                          protected mode transition. Has no effect if not
+ *                          transitioning.
+ * @hwcnt_desired: True if we want GPU hardware counters to be enabled.
+ * @hwcnt_disabled: True if GPU hardware counters are not enabled.
+ * @hwcnt_disable_work: Work item to disable GPU hardware counters, used if
+ *                      atomic disable is not possible.
+ *
+ * Note:
+ * During an IRQ, @pm_current_policy can be NULL when the policy is being
+ * changed with kbase_pm_set_policy(). The change is protected under
+ * kbase_device.pm.pcower_change_lock. Direct access to this from IRQ context
+ * must therefore check for NULL. If NULL, then kbase_pm_set_policy() will
+ * re-issue the policy functions that would have been done under IRQ.
+ */
+struct kbase_pm_backend_data {
+	const struct kbase_pm_policy *pm_current_policy;
+	union kbase_pm_policy_data pm_policy_data;
+	bool reset_done;
+	wait_queue_head_t reset_done_wait;
+	int gpu_cycle_counter_requests;
+	spinlock_t gpu_cycle_counter_requests_lock;
+
+	wait_queue_head_t gpu_in_desired_state_wait;
+
+	bool gpu_powered;
+
+	bool instr_enabled;
+
+	bool cg1_disabled;
+
+#ifdef CONFIG_MALI_DEBUG
+	bool driver_ready_for_irqs;
+#endif /* CONFIG_MALI_DEBUG */
+
+	spinlock_t gpu_powered_lock;
+
+	struct kbasep_pm_metrics_state metrics;
+
+	struct kbasep_pm_tick_timer_state shader_tick_timer;
+
+	bool poweroff_wait_in_progress;
+	bool invoke_poweroff_wait_wq_when_l2_off;
+	bool poweron_required;
+	bool poweroff_is_suspend;
+
+	struct workqueue_struct *gpu_poweroff_wait_wq;
+	struct work_struct gpu_poweroff_wait_work;
+
+	wait_queue_head_t poweroff_wait;
+
+	int (*callback_power_on)(struct kbase_device *kbdev);
+	void (*callback_power_off)(struct kbase_device *kbdev);
+	void (*callback_power_suspend)(struct kbase_device *kbdev);
+	void (*callback_power_resume)(struct kbase_device *kbdev);
+	int (*callback_power_runtime_on)(struct kbase_device *kbdev);
+	void (*callback_power_runtime_off)(struct kbase_device *kbdev);
+	int (*callback_power_runtime_idle)(struct kbase_device *kbdev);
+
+	u64 ca_cores_enabled;
+
+	enum kbase_l2_core_state l2_state;
+	enum kbase_shader_core_state shaders_state;
+	u64 shaders_avail;
+	bool l2_desired;
+	bool shaders_desired;
+
+	bool in_reset;
+
+	bool protected_transition_override;
+	int protected_l2_override;
+
+	bool hwcnt_desired;
+	bool hwcnt_disabled;
+	struct work_struct hwcnt_disable_work;
+};
+
+
+/* List of policy IDs */
+enum kbase_pm_policy_id {
+	KBASE_PM_POLICY_ID_COARSE_DEMAND,
+#if !MALI_CUSTOMER_RELEASE
+	KBASE_PM_POLICY_ID_ALWAYS_ON_DEMAND,
+#endif
+	KBASE_PM_POLICY_ID_ALWAYS_ON
+};
+
+typedef u32 kbase_pm_policy_flags;
+
+#define KBASE_PM_POLICY_FLAG_DISABLED_WITH_POWER_DOWN_ONLY (1u)
+
+/**
+ * struct kbase_pm_policy - Power policy structure.
+ *
+ * Each power policy exposes a (static) instance of this structure which
+ * contains function pointers to the policy's methods.
+ *
+ * @name:               The name of this policy
+ * @init:               Function called when the policy is selected
+ * @term:               Function called when the policy is unselected
+ * @shaders_needed:     Function called to find out if shader cores are needed
+ * @get_core_active:    Function called to get the current overall GPU power
+ *                      state
+ * @flags:              Field indicating flags for this policy
+ * @id:                 Field indicating an ID for this policy. This is not
+ *                      necessarily the same as its index in the list returned
+ *                      by kbase_pm_list_policies().
+ *                      It is used purely for debugging.
+ */
+struct kbase_pm_policy {
+	char *name;
+
+	/**
+	 * Function called when the policy is selected
+	 *
+	 * This should initialize the kbdev->pm.pm_policy_data structure. It
+	 * should not attempt to make any changes to hardware state.
+	 *
+	 * It is undefined what state the cores are in when the function is
+	 * called.
+	 *
+	 * @kbdev: The kbase device structure for the device (must be a
+	 *         valid pointer)
+	 */
+	void (*init)(struct kbase_device *kbdev);
+
+	/**
+	 * Function called when the policy is unselected.
+	 *
+	 * @kbdev: The kbase device structure for the device (must be a
+	 *         valid pointer)
+	 */
+	void (*term)(struct kbase_device *kbdev);
+
+	/**
+	 * Function called to find out if shader cores are needed
+	 *
+	 * This needs to at least satisfy kbdev->pm.backend.shaders_desired,
+	 * and so must never return false when shaders_desired is true.
+	 *
+	 * @kbdev: The kbase device structure for the device (must be a
+	 *         valid pointer)
+	 *
+	 * Return: true if shader cores are needed, false otherwise
+	 */
+	bool (*shaders_needed)(struct kbase_device *kbdev);
+
+	/**
+	 * Function called to get the current overall GPU power state
+	 *
+	 * This function must meet or exceed the requirements for power
+	 * indicated by kbase_pm_is_active().
+	 *
+	 * @kbdev: The kbase device structure for the device (must be a
+	 *         valid pointer)
+	 *
+	 * Return: true if the GPU should be powered, false otherwise
+	 */
+	bool (*get_core_active)(struct kbase_device *kbdev);
+
+	kbase_pm_policy_flags flags;
+	enum kbase_pm_policy_id id;
+};
+
+#endif /* _KBASE_PM_HWACCESS_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
new file mode 100644
index 0000000..767be06
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
@@ -0,0 +1,1936 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel Power Management hardware control
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_gator.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_kbase_smc.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+#include <mali_kbase_hwcnt_context.h>
+#include <backend/gpu/mali_kbase_cache_policy_backend.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_irq_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+#include <linux/of.h>
+
+#ifdef CONFIG_MALI_CORESTACK
+bool corestack_driver_control = true;
+#else
+bool corestack_driver_control; /* Default value of 0/false */
+#endif
+module_param(corestack_driver_control, bool, 0000);
+MODULE_PARM_DESC(corestack_driver_control,
+		"Let the driver power on/off the GPU core stack independently "
+		"without involving the Power Domain Controller. This should "
+		"only be enabled on platforms for which integration of the PDC "
+		"to the Mali GPU is known to be problematic.");
+KBASE_EXPORT_TEST_API(corestack_driver_control);
+
+#ifdef CONFIG_MALI_PLATFORM_POWER_DOWN_ONLY
+bool platform_power_down_only = true;
+#else
+bool platform_power_down_only = false;
+#endif
+module_param(platform_power_down_only, bool, 0000);
+MODULE_PARM_DESC(platform_power_down_only,
+		"Disable power down of individual cores.");
+
+/**
+ * enum kbasep_pm_action - Actions that can be performed on a core.
+ *
+ * This enumeration is private to the file. Its values are set to allow
+ * core_type_to_reg() function, which decodes this enumeration, to be simpler
+ * and more efficient.
+ *
+ * @ACTION_PRESENT: The cores that are present
+ * @ACTION_READY: The cores that are ready
+ * @ACTION_PWRON: Power on the cores specified
+ * @ACTION_PWROFF: Power off the cores specified
+ * @ACTION_PWRTRANS: The cores that are transitioning
+ * @ACTION_PWRACTIVE: The cores that are active
+ */
+enum kbasep_pm_action {
+	ACTION_PRESENT = 0,
+	ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO),
+	ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO),
+	ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO),
+	ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO),
+	ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO)
+};
+
+static u64 kbase_pm_get_state(
+		struct kbase_device *kbdev,
+		enum kbase_pm_core_type core_type,
+		enum kbasep_pm_action action);
+
+static bool kbase_pm_is_l2_desired(struct kbase_device *kbdev)
+{
+	if (kbdev->pm.backend.protected_transition_override &&
+			kbdev->pm.backend.protected_l2_override)
+		return true;
+
+	if (kbdev->pm.backend.protected_transition_override &&
+			!kbdev->pm.backend.shaders_desired)
+		return false;
+
+	return kbdev->pm.backend.l2_desired;
+}
+
+void kbase_pm_protected_override_enable(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbdev->pm.backend.protected_transition_override = true;
+}
+void kbase_pm_protected_override_disable(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbdev->pm.backend.protected_transition_override = false;
+}
+
+void kbase_pm_protected_l2_override(struct kbase_device *kbdev, bool override)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (override) {
+		kbdev->pm.backend.protected_l2_override++;
+		WARN_ON(kbdev->pm.backend.protected_l2_override <= 0);
+	} else {
+		kbdev->pm.backend.protected_l2_override--;
+		WARN_ON(kbdev->pm.backend.protected_l2_override < 0);
+	}
+
+	kbase_pm_update_state(kbdev);
+}
+
+/**
+ * core_type_to_reg - Decode a core type and action to a register.
+ *
+ * Given a core type (defined by kbase_pm_core_type) and an action (defined
+ * by kbasep_pm_action) this function will return the register offset that
+ * will perform the action on the core type. The register returned is the _LO
+ * register and an offset must be applied to use the _HI register.
+ *
+ * @core_type: The type of core
+ * @action:    The type of action
+ *
+ * Return: The register offset of the _LO register that performs an action of
+ * type @action on a core of type @core_type.
+ */
+static u32 core_type_to_reg(enum kbase_pm_core_type core_type,
+						enum kbasep_pm_action action)
+{
+	if (corestack_driver_control) {
+		if (core_type == KBASE_PM_CORE_STACK) {
+			switch (action) {
+			case ACTION_PRESENT:
+				return STACK_PRESENT_LO;
+			case ACTION_READY:
+				return STACK_READY_LO;
+			case ACTION_PWRON:
+				return STACK_PWRON_LO;
+			case ACTION_PWROFF:
+				return STACK_PWROFF_LO;
+			case ACTION_PWRTRANS:
+				return STACK_PWRTRANS_LO;
+			default:
+				WARN(1, "Invalid action for core type\n");
+			}
+		}
+	}
+
+	return (u32)core_type + (u32)action;
+}
+
+#ifdef CONFIG_ARM64
+static void mali_cci_flush_l2(struct kbase_device *kbdev)
+{
+	const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED;
+	u32 loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+	u32 raw;
+
+	/*
+	 * Note that we don't take the cache flush mutex here since
+	 * we expect to be the last user of the L2, all other L2 users
+	 * would have dropped their references, to initiate L2 power
+	 * down, L2 power down being the only valid place for this
+	 * to be called from.
+	 */
+
+	kbase_reg_write(kbdev,
+			GPU_CONTROL_REG(GPU_COMMAND),
+			GPU_COMMAND_CLEAN_INV_CACHES);
+
+	raw = kbase_reg_read(kbdev,
+		GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
+
+	/* Wait for cache flush to complete before continuing, exit on
+	 * gpu resets or loop expiry. */
+	while (((raw & mask) == 0) && --loops) {
+		raw = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
+	}
+}
+#endif
+
+/**
+ * kbase_pm_invoke - Invokes an action on a core set
+ *
+ * This function performs the action given by @action on a set of cores of a
+ * type given by @core_type. It is a static function used by
+ * kbase_pm_transition_core_type()
+ *
+ * @kbdev:     The kbase device structure of the device
+ * @core_type: The type of core that the action should be performed on
+ * @cores:     A bit mask of cores to perform the action on (low 32 bits)
+ * @action:    The action to perform on the cores
+ */
+static void kbase_pm_invoke(struct kbase_device *kbdev,
+					enum kbase_pm_core_type core_type,
+					u64 cores,
+					enum kbasep_pm_action action)
+{
+	u32 reg;
+	u32 lo = cores & 0xFFFFFFFF;
+	u32 hi = (cores >> 32) & 0xFFFFFFFF;
+
+	/* When 'platform_power_down_only' is enabled, no core type should be
+	 * turned off individually.
+	 */
+	KBASE_DEBUG_ASSERT(!(action == ACTION_PWROFF &&
+			platform_power_down_only));
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	reg = core_type_to_reg(core_type, action);
+
+	KBASE_DEBUG_ASSERT(reg);
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+	if (cores) {
+		if (action == ACTION_PWRON)
+			kbase_trace_mali_pm_power_on(core_type, cores);
+		else if (action == ACTION_PWROFF)
+			kbase_trace_mali_pm_power_off(core_type, cores);
+	}
+#endif
+
+	if (cores) {
+		u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY);
+
+		if (action == ACTION_PWRON)
+			state |= cores;
+		else if (action == ACTION_PWROFF)
+			state &= ~cores;
+		KBASE_TLSTREAM_AUX_PM_STATE(core_type, state);
+	}
+
+	/* Tracing */
+	if (cores) {
+		if (action == ACTION_PWRON)
+			switch (core_type) {
+			case KBASE_PM_CORE_SHADER:
+				KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u,
+									lo);
+				break;
+			case KBASE_PM_CORE_TILER:
+				KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL,
+								NULL, 0u, lo);
+				break;
+			case KBASE_PM_CORE_L2:
+				KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL,
+									0u, lo);
+				break;
+			default:
+				break;
+			}
+		else if (action == ACTION_PWROFF)
+			switch (core_type) {
+			case KBASE_PM_CORE_SHADER:
+				KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL,
+									0u, lo);
+				break;
+			case KBASE_PM_CORE_TILER:
+				KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL,
+								NULL, 0u, lo);
+				break;
+			case KBASE_PM_CORE_L2:
+				KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL,
+									0u, lo);
+				/* disable snoops before L2 is turned off */
+				kbase_pm_cache_snoop_disable(kbdev);
+				break;
+			default:
+				break;
+			}
+	}
+
+	if (lo != 0)
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo);
+
+	if (hi != 0)
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi);
+}
+
+/**
+ * kbase_pm_get_state - Get information about a core set
+ *
+ * This function gets information (chosen by @action) about a set of cores of
+ * a type given by @core_type. It is a static function used by
+ * kbase_pm_get_active_cores(), kbase_pm_get_trans_cores() and
+ * kbase_pm_get_ready_cores().
+ *
+ * @kbdev:     The kbase device structure of the device
+ * @core_type: The type of core that the should be queried
+ * @action:    The property of the cores to query
+ *
+ * Return: A bit mask specifying the state of the cores
+ */
+static u64 kbase_pm_get_state(struct kbase_device *kbdev,
+					enum kbase_pm_core_type core_type,
+					enum kbasep_pm_action action)
+{
+	u32 reg;
+	u32 lo, hi;
+
+	reg = core_type_to_reg(core_type, action);
+
+	KBASE_DEBUG_ASSERT(reg);
+
+	lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg));
+	hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4));
+
+	return (((u64) hi) << 32) | ((u64) lo);
+}
+
+/**
+ * kbase_pm_get_present_cores - Get the cores that are present
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of the cores that are present
+ */
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	switch (type) {
+	case KBASE_PM_CORE_L2:
+		return kbdev->gpu_props.props.raw_props.l2_present;
+	case KBASE_PM_CORE_SHADER:
+		return kbdev->gpu_props.props.raw_props.shader_present;
+	case KBASE_PM_CORE_TILER:
+		return kbdev->gpu_props.props.raw_props.tiler_present;
+	case KBASE_PM_CORE_STACK:
+		return kbdev->gpu_props.props.raw_props.stack_present;
+	default:
+		break;
+	}
+	KBASE_DEBUG_ASSERT(0);
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores);
+
+/**
+ * kbase_pm_get_active_cores - Get the cores that are "active"
+ *                             (busy processing work)
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are active
+ */
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type)
+{
+	return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores);
+
+/**
+ * kbase_pm_get_trans_cores - Get the cores that are transitioning between
+ *                            power states
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are transitioning
+ */
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type)
+{
+	return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores);
+
+/**
+ * kbase_pm_get_ready_cores - Get the cores that are powered on
+ *
+ * @kbdev: Kbase device
+ * @type: The type of cores to query
+ *
+ * Return: Bitmask of cores that are ready (powered on)
+ */
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type)
+{
+	u64 result;
+
+	result = kbase_pm_get_state(kbdev, type, ACTION_READY);
+
+	switch (type) {
+	case KBASE_PM_CORE_SHADER:
+		KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u,
+								(u32) result);
+		break;
+	case KBASE_PM_CORE_TILER:
+		KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u,
+								(u32) result);
+		break;
+	case KBASE_PM_CORE_L2:
+		KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u,
+								(u32) result);
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores);
+
+static u64 kbase_pm_l2_update_state(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+	u64 l2_present = kbdev->gpu_props.props.raw_props.l2_present;
+	u64 tiler_present = kbdev->gpu_props.props.raw_props.tiler_present;
+	enum kbase_l2_core_state prev_state;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	do {
+		/* Get current state */
+		u64 l2_trans = kbase_pm_get_trans_cores(kbdev,
+				KBASE_PM_CORE_L2);
+		u64 l2_ready = kbase_pm_get_ready_cores(kbdev,
+				KBASE_PM_CORE_L2);
+		u64 tiler_trans = kbase_pm_get_trans_cores(kbdev,
+				KBASE_PM_CORE_TILER);
+		u64 tiler_ready = kbase_pm_get_ready_cores(kbdev,
+				KBASE_PM_CORE_TILER);
+
+		/* mask off ready from trans in case transitions finished
+		 * between the register reads
+		 */
+		l2_trans &= ~l2_ready;
+		tiler_trans &= ~tiler_ready;
+
+		prev_state = backend->l2_state;
+
+		switch (backend->l2_state) {
+		case KBASE_L2_OFF:
+			if (kbase_pm_is_l2_desired(kbdev)) {
+				/* L2 is required, power on.  Powering on the
+				 * tiler will also power the first L2 cache.
+				 */
+				kbase_pm_invoke(kbdev, KBASE_PM_CORE_TILER,
+						tiler_present, ACTION_PWRON);
+
+				/* If we have more than one L2 cache then we
+				 * must power them on explicitly.
+				 */
+				if (l2_present != 1)
+					kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2,
+							l2_present & ~1,
+							ACTION_PWRON);
+				backend->l2_state = KBASE_L2_PEND_ON;
+			}
+			break;
+
+		case KBASE_L2_PEND_ON:
+			if (!l2_trans && l2_ready == l2_present && !tiler_trans
+					&& tiler_ready == tiler_present) {
+				KBASE_TRACE_ADD(kbdev,
+						PM_CORES_CHANGE_AVAILABLE_TILER,
+						NULL, NULL, 0u,
+						(u32)tiler_ready);
+				/*
+				 * Ensure snoops are enabled after L2 is powered
+				 * up. Note that kbase keeps track of the snoop
+				 * state, so safe to repeatedly call.
+				 */
+				kbase_pm_cache_snoop_enable(kbdev);
+
+				/* With the L2 enabled, we can now enable
+				 * hardware counters.
+				 */
+				backend->l2_state = KBASE_L2_ON_HWCNT_ENABLE;
+
+				/* Now that the L2 is on, the shaders can start
+				 * powering on if they're required. The obvious
+				 * way to do this would be to call
+				 * kbase_pm_shaders_update_state() here.
+				 * However, that would make the two state
+				 * machines mutually recursive, as the opposite
+				 * would be needed for powering down. Instead,
+				 * callers of this function should use the
+				 * kbase_pm_update_state() wrapper, which will
+				 * call the shader state machine immediately
+				 * after the L2 (for power up), or
+				 * automatically re-invoke the L2 state machine
+				 * when the shaders power down.
+				 */
+			}
+			break;
+
+		case KBASE_L2_ON_HWCNT_ENABLE:
+			backend->hwcnt_desired = true;
+			if (backend->hwcnt_disabled) {
+				kbase_hwcnt_context_enable(
+					kbdev->hwcnt_gpu_ctx);
+				backend->hwcnt_disabled = false;
+			}
+			backend->l2_state = KBASE_L2_ON;
+			break;
+
+		case KBASE_L2_ON:
+			if (!kbase_pm_is_l2_desired(kbdev)) {
+				/* Do not power off L2 until the shaders and
+				 * core stacks are off.
+				 */
+				if (backend->shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
+					break;
+
+				/* We need to make sure hardware counters are
+				 * disabled before powering down the L2, to
+				 * prevent loss of data.
+				 *
+				 * We waited until after the cores were powered
+				 * down to prevent ping-ponging between hwcnt
+				 * enabled and disabled, which would have
+				 * happened if userspace submitted more work
+				 * while we were trying to power down.
+				 */
+				backend->l2_state = KBASE_L2_ON_HWCNT_DISABLE;
+			}
+			break;
+
+		case KBASE_L2_ON_HWCNT_DISABLE:
+			/* If the L2 became desired while we were waiting on the
+			 * worker to do the actual hwcnt disable (which might
+			 * happen if some work was submitted immediately after
+			 * the shaders powered off), then we need to early-out
+			 * of this state and re-enable hwcnt.
+			 *
+			 * If we get lucky, the hwcnt disable might not have
+			 * actually started yet, and the logic in the hwcnt
+			 * enable state will prevent the worker from
+			 * performing the disable entirely, preventing loss of
+			 * any hardware counter data.
+			 *
+			 * If the hwcnt disable has started, then we'll lose
+			 * a tiny amount of hardware counter data between the
+			 * disable and the re-enable occurring.
+			 *
+			 * This loss of data is preferable to the alternative,
+			 * which is to block the shader cores from doing any
+			 * work until we're sure hwcnt has been re-enabled.
+			 */
+			if (kbase_pm_is_l2_desired(kbdev)) {
+				backend->l2_state = KBASE_L2_ON_HWCNT_ENABLE;
+				break;
+			}
+
+			/* See if we can get away with disabling hwcnt
+			 * atomically, otherwise kick off a worker.
+			 */
+			backend->hwcnt_desired = false;
+			if (!backend->hwcnt_disabled) {
+				if (kbase_hwcnt_context_disable_atomic(
+					kbdev->hwcnt_gpu_ctx))
+					backend->hwcnt_disabled = true;
+				else
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+					queue_work(system_wq,
+						&backend->hwcnt_disable_work);
+#else
+					queue_work(system_highpri_wq,
+						&backend->hwcnt_disable_work);
+#endif
+			}
+
+			if (backend->hwcnt_disabled)
+				backend->l2_state = KBASE_L2_POWER_DOWN;
+			break;
+
+		case KBASE_L2_POWER_DOWN:
+			if (!platform_power_down_only)
+				/* Powering off the L2 will also power off the
+				 * tiler.
+				 */
+				kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2,
+						l2_present,
+						ACTION_PWROFF);
+			else
+				/* If L2 cache is powered then we must flush it
+				 * before we power off the GPU. Normally this
+				 * would have been handled when the L2 was
+				 * powered off.
+				 */
+				kbase_gpu_start_cache_clean_nolock(
+						kbdev);
+
+			KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
+					NULL, NULL, 0u, 0u);
+
+			backend->l2_state = KBASE_L2_PEND_OFF;
+			break;
+
+		case KBASE_L2_PEND_OFF:
+			if (!platform_power_down_only) {
+				/* We only need to check the L2 here - if the L2
+				 * is off then the tiler is definitely also off.
+				 */
+				if (!l2_trans && !l2_ready)
+					/* L2 is now powered off */
+					backend->l2_state = KBASE_L2_OFF;
+			} else {
+				if (!kbdev->cache_clean_in_progress)
+					backend->l2_state = KBASE_L2_OFF;
+			}
+			break;
+
+		case KBASE_L2_RESET_WAIT:
+			if (!backend->in_reset) {
+				/* Reset complete */
+				backend->l2_state = KBASE_L2_OFF;
+			}
+			break;
+
+		default:
+			WARN(1, "Invalid state in l2_state: %d",
+					backend->l2_state);
+		}
+	} while (backend->l2_state != prev_state);
+
+	if (kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off &&
+			backend->l2_state == KBASE_L2_OFF) {
+		kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off = false;
+		queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq,
+				&kbdev->pm.backend.gpu_poweroff_wait_work);
+	}
+
+	if (backend->l2_state == KBASE_L2_ON)
+		return l2_present;
+	return 0;
+}
+
+static void shader_poweroff_timer_stop_callback(struct work_struct *data)
+{
+	unsigned long flags;
+	struct kbasep_pm_tick_timer_state *stt = container_of(data,
+			struct kbasep_pm_tick_timer_state, work);
+	struct kbase_device *kbdev = container_of(stt, struct kbase_device,
+			pm.backend.shader_tick_timer);
+
+	hrtimer_cancel(&stt->timer);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	stt->cancel_queued = false;
+	if (kbdev->pm.backend.gpu_powered)
+		kbase_pm_update_state(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+/**
+ * shader_poweroff_timer_queue_cancel - cancel the shader poweroff tick timer
+ * @kbdev:      pointer to kbase device
+ *
+ * Synchronization between the shader state machine and the timer thread is
+ * difficult. This is because situations may arise where the state machine
+ * wants to start the timer, but the callback is already running, and has
+ * already passed the point at which it checks whether it is required, and so
+ * cancels itself, even though the state machine may have just tried to call
+ * hrtimer_start.
+ *
+ * This cannot be stopped by holding hwaccess_lock in the timer thread,
+ * because there are still infinitesimally small sections at the start and end
+ * of the callback where the lock is not held.
+ *
+ * Instead, a new state is added to the shader state machine,
+ * KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF. This is used to guarantee
+ * that when the shaders are switched off, the timer has definitely been
+ * cancelled. As a result, when KBASE_SHADERS_ON_CORESTACK_ON is left and the
+ * timer is started, it is guaranteed that either the timer is already running
+ * (from an availability change or cancelled timer), or hrtimer_start will
+ * succeed. It is critical to avoid ending up in
+ * KBASE_SHADERS_WAIT_OFF_CORESTACK_ON without the timer running, or it could
+ * hang there forever.
+ */
+static void shader_poweroff_timer_queue_cancel(struct kbase_device *kbdev)
+{
+	struct kbasep_pm_tick_timer_state *stt =
+			&kbdev->pm.backend.shader_tick_timer;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	stt->needed = false;
+
+	if (hrtimer_active(&stt->timer) && !stt->cancel_queued) {
+		stt->cancel_queued = true;
+		queue_work(stt->wq, &stt->work);
+	}
+}
+
+static void kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+	struct kbasep_pm_tick_timer_state *stt =
+			&kbdev->pm.backend.shader_tick_timer;
+	enum kbase_shader_core_state prev_state;
+	u64 stacks_avail = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (corestack_driver_control)
+		/* Always power on all the corestacks. Disabling certain
+		 * corestacks when their respective shaders are not in the
+		 * available bitmap is not currently supported.
+		 */
+		stacks_avail = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_STACK);
+
+	do {
+		u64 shaders_trans = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_SHADER);
+		u64 shaders_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
+		u64 stacks_trans = 0;
+		u64 stacks_ready = 0;
+
+		if (corestack_driver_control) {
+			stacks_trans = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_STACK);
+			stacks_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_STACK);
+		}
+
+		/* mask off ready from trans in case transitions finished
+		 * between the register reads
+		 */
+		shaders_trans &= ~shaders_ready;
+		stacks_trans &= ~stacks_ready;
+
+		prev_state = backend->shaders_state;
+
+		switch (backend->shaders_state) {
+		case KBASE_SHADERS_OFF_CORESTACK_OFF:
+			/* Ignore changes to the shader core availability
+			 * except at certain points where we can handle it,
+			 * i.e. off and SHADERS_ON_CORESTACK_ON.
+			 */
+			backend->shaders_avail = kbase_pm_ca_get_core_mask(kbdev);
+
+			if (backend->shaders_desired && backend->l2_state == KBASE_L2_ON) {
+				if (corestack_driver_control)
+					kbase_pm_invoke(kbdev, KBASE_PM_CORE_STACK,
+							stacks_avail, ACTION_PWRON);
+
+				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_PEND_ON;
+			}
+			break;
+
+		case KBASE_SHADERS_OFF_CORESTACK_PEND_ON:
+			if (!stacks_trans && stacks_ready == stacks_avail) {
+				kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
+						backend->shaders_avail, ACTION_PWRON);
+
+				backend->shaders_state = KBASE_SHADERS_PEND_ON_CORESTACK_ON;
+
+			}
+			break;
+
+		case KBASE_SHADERS_PEND_ON_CORESTACK_ON:
+			if (!shaders_trans && shaders_ready == backend->shaders_avail) {
+				KBASE_TRACE_ADD(kbdev,
+						PM_CORES_CHANGE_AVAILABLE,
+						NULL, NULL, 0u, (u32)shaders_ready);
+				backend->shaders_state = KBASE_SHADERS_ON_CORESTACK_ON;
+			}
+			break;
+
+		case KBASE_SHADERS_ON_CORESTACK_ON:
+			backend->shaders_avail = kbase_pm_ca_get_core_mask(kbdev);
+
+			if (!backend->shaders_desired) {
+				if (kbdev->pm.backend.protected_transition_override ||
+						!stt->configured_ticks ||
+						WARN_ON(stt->cancel_queued)) {
+					backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
+				} else {
+					stt->remaining_ticks = stt->configured_ticks;
+					stt->needed = true;
+
+					/* The shader hysteresis timer is not
+					 * done the obvious way, which would be
+					 * to start an hrtimer when the shader
+					 * power off is requested. Instead,
+					 * use a 'tick' timer, and set the
+					 * remaining number of ticks on a power
+					 * off request.  This avoids the
+					 * latency of starting, then
+					 * immediately cancelling an hrtimer
+					 * when the shaders are re-requested
+					 * before the timeout expires.
+					 */
+					if (!hrtimer_active(&stt->timer))
+						hrtimer_start(&stt->timer,
+								stt->configured_interval,
+								HRTIMER_MODE_REL);
+
+					backend->shaders_state = KBASE_SHADERS_WAIT_OFF_CORESTACK_ON;
+				}
+			} else if (!platform_power_down_only) {
+				if (backend->shaders_avail & ~shaders_ready) {
+					backend->shaders_avail |= shaders_ready;
+
+					kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
+							backend->shaders_avail & ~shaders_ready,
+							ACTION_PWRON);
+					backend->shaders_state = KBASE_SHADERS_PEND_ON_CORESTACK_ON;
+
+				}
+			}
+			break;
+
+		case KBASE_SHADERS_WAIT_OFF_CORESTACK_ON:
+			if (WARN_ON(!hrtimer_active(&stt->timer))) {
+				stt->remaining_ticks = 0;
+				backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
+			}
+
+			if (backend->shaders_desired) {
+				stt->remaining_ticks = 0;
+				backend->shaders_state = KBASE_SHADERS_ON_CORESTACK_ON;
+			} else if (stt->remaining_ticks == 0) {
+				backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
+			}
+			break;
+
+		case KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON:
+			shader_poweroff_timer_queue_cancel(kbdev);
+
+			if (!platform_power_down_only)
+				kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
+						shaders_ready, ACTION_PWROFF);
+
+			KBASE_TRACE_ADD(kbdev,
+					PM_CORES_CHANGE_AVAILABLE,
+					NULL, NULL, 0u, 0u);
+
+			backend->shaders_state = KBASE_SHADERS_PEND_OFF_CORESTACK_ON;
+			break;
+
+		case KBASE_SHADERS_PEND_OFF_CORESTACK_ON:
+			if ((!shaders_trans && !shaders_ready) || platform_power_down_only) {
+				if (corestack_driver_control && !platform_power_down_only)
+					kbase_pm_invoke(kbdev, KBASE_PM_CORE_STACK,
+							stacks_avail, ACTION_PWROFF);
+
+				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_PEND_OFF;
+			}
+			break;
+
+		case KBASE_SHADERS_OFF_CORESTACK_PEND_OFF:
+			if ((!stacks_trans && !stacks_ready) || platform_power_down_only)
+				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF;
+			break;
+
+		case KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF:
+			if (!hrtimer_active(&stt->timer) && !stt->cancel_queued)
+				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF;
+			break;
+
+		case KBASE_SHADERS_RESET_WAIT:
+			/* Reset complete */
+			if (!backend->in_reset)
+				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF;
+			break;
+		}
+	} while (backend->shaders_state != prev_state);
+}
+
+static bool kbase_pm_is_in_desired_state_nolock(struct kbase_device *kbdev)
+{
+	bool in_desired_state = true;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbase_pm_is_l2_desired(kbdev) &&
+			kbdev->pm.backend.l2_state != KBASE_L2_ON)
+		in_desired_state = false;
+	else if (!kbase_pm_is_l2_desired(kbdev) &&
+			kbdev->pm.backend.l2_state != KBASE_L2_OFF)
+		in_desired_state = false;
+
+	if (kbdev->pm.backend.shaders_desired &&
+			kbdev->pm.backend.shaders_state != KBASE_SHADERS_ON_CORESTACK_ON)
+		in_desired_state = false;
+	else if (!kbdev->pm.backend.shaders_desired &&
+			kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
+		in_desired_state = false;
+
+	return in_desired_state;
+}
+
+static bool kbase_pm_is_in_desired_state(struct kbase_device *kbdev)
+{
+	bool in_desired_state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	in_desired_state = kbase_pm_is_in_desired_state_nolock(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return in_desired_state;
+}
+
+static bool kbase_pm_is_in_desired_state_with_l2_powered(
+		struct kbase_device *kbdev)
+{
+	bool in_desired_state = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	if (kbase_pm_is_in_desired_state_nolock(kbdev) &&
+			(kbdev->pm.backend.l2_state == KBASE_L2_ON))
+		in_desired_state = true;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return in_desired_state;
+}
+
+static void kbase_pm_trace_power_state(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+	kbase_trace_mali_pm_status(KBASE_PM_CORE_L2,
+					kbase_pm_get_ready_cores(kbdev,
+						KBASE_PM_CORE_L2));
+	kbase_trace_mali_pm_status(KBASE_PM_CORE_SHADER,
+					kbase_pm_get_ready_cores(kbdev,
+						KBASE_PM_CORE_SHADER));
+	kbase_trace_mali_pm_status(KBASE_PM_CORE_TILER,
+					kbase_pm_get_ready_cores(kbdev,
+						KBASE_PM_CORE_TILER));
+	if (corestack_driver_control)
+		kbase_trace_mali_pm_status(KBASE_PM_CORE_STACK,
+						kbase_pm_get_ready_cores(kbdev,
+							KBASE_PM_CORE_STACK));
+#endif
+
+	KBASE_TLSTREAM_AUX_PM_STATE(
+			KBASE_PM_CORE_L2,
+			kbase_pm_get_ready_cores(
+				kbdev, KBASE_PM_CORE_L2));
+	KBASE_TLSTREAM_AUX_PM_STATE(
+			KBASE_PM_CORE_SHADER,
+			kbase_pm_get_ready_cores(
+				kbdev, KBASE_PM_CORE_SHADER));
+	KBASE_TLSTREAM_AUX_PM_STATE(
+			KBASE_PM_CORE_TILER,
+			kbase_pm_get_ready_cores(
+				kbdev,
+				KBASE_PM_CORE_TILER));
+
+	if (corestack_driver_control)
+		KBASE_TLSTREAM_AUX_PM_STATE(
+				KBASE_PM_CORE_STACK,
+				kbase_pm_get_ready_cores(
+					kbdev,
+					KBASE_PM_CORE_STACK));
+}
+
+void kbase_pm_update_state(struct kbase_device *kbdev)
+{
+	enum kbase_shader_core_state prev_shaders_state =
+			kbdev->pm.backend.shaders_state;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!kbdev->pm.backend.gpu_powered)
+		return; /* Do nothing if the GPU is off */
+
+	kbase_pm_l2_update_state(kbdev);
+	kbase_pm_shaders_update_state(kbdev);
+
+	/* If the shaders just turned off, re-invoke the L2 state machine, in
+	 * case it was waiting for the shaders to turn off before powering down
+	 * the L2.
+	 */
+	if (prev_shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF &&
+			kbdev->pm.backend.shaders_state == KBASE_SHADERS_OFF_CORESTACK_OFF)
+		kbase_pm_l2_update_state(kbdev);
+
+	if (kbase_pm_is_in_desired_state_nolock(kbdev)) {
+		KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, NULL,
+				true, kbdev->pm.backend.shaders_avail);
+
+		kbase_pm_trace_power_state(kbdev);
+
+		KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
+		wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait);
+	}
+}
+
+static enum hrtimer_restart
+shader_tick_timer_callback(struct hrtimer *timer)
+{
+	struct kbasep_pm_tick_timer_state *stt = container_of(timer,
+			struct kbasep_pm_tick_timer_state, timer);
+	struct kbase_device *kbdev = container_of(stt, struct kbase_device,
+			pm.backend.shader_tick_timer);
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+	unsigned long flags;
+	enum hrtimer_restart restart = HRTIMER_NORESTART;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (stt->remaining_ticks &&
+			backend->shaders_state == KBASE_SHADERS_WAIT_OFF_CORESTACK_ON) {
+		stt->remaining_ticks--;
+
+		/* If the remaining ticks just changed from 1 to 0, invoke the
+		 * PM state machine to power off the shader cores.
+		 */
+		if (!stt->remaining_ticks && !backend->shaders_desired)
+			kbase_pm_update_state(kbdev);
+	}
+
+	if (stt->needed) {
+		hrtimer_forward_now(timer, stt->configured_interval);
+		restart = HRTIMER_RESTART;
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return restart;
+}
+
+int kbase_pm_state_machine_init(struct kbase_device *kbdev)
+{
+	struct kbasep_pm_tick_timer_state *stt = &kbdev->pm.backend.shader_tick_timer;
+
+	stt->wq = alloc_workqueue("kbase_pm_shader_poweroff", WQ_HIGHPRI | WQ_UNBOUND, 1);
+	if (!stt->wq)
+		return -ENOMEM;
+
+	INIT_WORK(&stt->work, shader_poweroff_timer_stop_callback);
+
+	stt->needed = false;
+	hrtimer_init(&stt->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	stt->timer.function = shader_tick_timer_callback;
+	stt->configured_interval = HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS);
+	stt->configured_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER;
+
+	return 0;
+}
+
+void kbase_pm_state_machine_term(struct kbase_device *kbdev)
+{
+	hrtimer_cancel(&kbdev->pm.backend.shader_tick_timer.timer);
+	destroy_workqueue(kbdev->pm.backend.shader_tick_timer.wq);
+}
+
+void kbase_pm_reset_start_locked(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	backend->in_reset = true;
+	backend->l2_state = KBASE_L2_RESET_WAIT;
+	backend->shaders_state = KBASE_SHADERS_RESET_WAIT;
+
+	/* We're in a reset, so hwcnt will have been synchronously disabled by
+	 * this function's caller as part of the reset process. We therefore
+	 * know that any call to kbase_hwcnt_context_disable_atomic, if
+	 * required to sync the hwcnt refcount with our internal state, is
+	 * guaranteed to succeed.
+	 */
+	backend->hwcnt_desired = false;
+	if (!backend->hwcnt_disabled) {
+		WARN_ON(!kbase_hwcnt_context_disable_atomic(
+			kbdev->hwcnt_gpu_ctx));
+		backend->hwcnt_disabled = true;
+	}
+
+	shader_poweroff_timer_queue_cancel(kbdev);
+}
+
+void kbase_pm_reset_complete(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	backend->in_reset = false;
+	kbase_pm_update_state(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+/* Timeout for kbase_pm_wait_for_desired_state when wait_event_killable has
+ * aborted due to a fatal signal. If the time spent waiting has exceeded this
+ * threshold then there is most likely a hardware issue. */
+#define PM_TIMEOUT (5*HZ) /* 5s */
+
+static void kbase_pm_timed_out(struct kbase_device *kbdev)
+{
+	dev_err(kbdev->dev, "Power transition timed out unexpectedly\n");
+	dev_err(kbdev->dev, "Desired state :\n");
+	dev_err(kbdev->dev, "\tShader=%016llx\n",
+			kbdev->pm.backend.shaders_desired ? kbdev->pm.backend.shaders_avail : 0);
+	dev_err(kbdev->dev, "Current state :\n");
+	dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(SHADER_READY_HI)),
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(SHADER_READY_LO)));
+	dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_READY_HI)),
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(TILER_READY_LO)));
+	dev_err(kbdev->dev, "\tL2    =%08x%08x\n",
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_READY_HI)),
+			kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(L2_READY_LO)));
+	dev_err(kbdev->dev, "Cores transitioning :\n");
+	dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					SHADER_PWRTRANS_HI)),
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					SHADER_PWRTRANS_LO)));
+	dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					TILER_PWRTRANS_HI)),
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					TILER_PWRTRANS_LO)));
+	dev_err(kbdev->dev, "\tL2    =%08x%08x\n",
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					L2_PWRTRANS_HI)),
+			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+					L2_PWRTRANS_LO)));
+
+	dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n");
+	if (kbase_prepare_to_reset_gpu(kbdev))
+		kbase_reset_gpu(kbdev);
+}
+
+void kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	unsigned long timeout;
+	int err;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_pm_update_state(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	timeout = jiffies + PM_TIMEOUT;
+
+	/* Wait for cores */
+	err = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
+			kbase_pm_is_in_desired_state_with_l2_powered(kbdev));
+
+	if (err < 0 && time_after(jiffies, timeout))
+		kbase_pm_timed_out(kbdev);
+}
+
+void kbase_pm_wait_for_desired_state(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	unsigned long timeout;
+	int err;
+
+	/* Let the state machine latch the most recent desired state. */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_pm_update_state(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	timeout = jiffies + PM_TIMEOUT;
+
+	/* Wait for cores */
+	err = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
+			kbase_pm_is_in_desired_state(kbdev));
+
+	if (err < 0 && time_after(jiffies, timeout))
+		kbase_pm_timed_out(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbase_pm_wait_for_desired_state);
+
+void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	/*
+	 * Clear all interrupts,
+	 * and unmask them all.
+	 */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF);
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF);
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
+
+void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	/*
+	 * Mask all interrupts,
+	 * and clear them all.
+	 */
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0);
+	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF);
+
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
+	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF);
+}
+
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_pm_disable_interrupts_nolock(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
+
+/*
+ * pmu layout:
+ * 0x0000: PMU TAG (RO) (0xCAFECAFE)
+ * 0x0004: PMU VERSION ID (RO) (0x00000000)
+ * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE)
+ */
+void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
+{
+	bool reset_required = is_resume;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	lockdep_assert_held(&kbdev->js_data.runpool_mutex);
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	if (kbdev->pm.backend.gpu_powered) {
+		/* Already turned on */
+		if (kbdev->poweroff_pending)
+			kbase_pm_enable_interrupts(kbdev);
+		kbdev->poweroff_pending = false;
+		KBASE_DEBUG_ASSERT(!is_resume);
+		return;
+	}
+
+	kbdev->poweroff_pending = false;
+
+	KBASE_TRACE_ADD(kbdev, PM_GPU_ON, NULL, NULL, 0u, 0u);
+
+	if (is_resume && kbdev->pm.backend.callback_power_resume) {
+		kbdev->pm.backend.callback_power_resume(kbdev);
+		return;
+	} else if (kbdev->pm.backend.callback_power_on) {
+		reset_required = kbdev->pm.backend.callback_power_on(kbdev);
+	}
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+	kbdev->pm.backend.gpu_powered = true;
+	spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (reset_required) {
+		/* GPU state was lost, reset GPU to ensure it is in a
+		 * consistent state */
+		kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
+	}
+
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_ctx_sched_restore_all_as(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	/* Enable the interrupts */
+	kbase_pm_enable_interrupts(kbdev);
+
+	/* Turn on the L2 caches */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbdev->pm.backend.l2_desired = true;
+	kbase_pm_update_state(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_clock_on);
+
+bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
+{
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	/* ASSERT that the cores should now be unavailable. No lock needed. */
+	WARN_ON(kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF);
+
+	kbdev->poweroff_pending = true;
+
+	if (!kbdev->pm.backend.gpu_powered) {
+		/* Already turned off */
+		if (is_suspend && kbdev->pm.backend.callback_power_suspend)
+			kbdev->pm.backend.callback_power_suspend(kbdev);
+		return true;
+	}
+
+	KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u);
+
+	/* Disable interrupts. This also clears any outstanding interrupts */
+	kbase_pm_disable_interrupts(kbdev);
+	/* Ensure that any IRQ handlers have finished */
+	kbase_synchronize_irqs(kbdev);
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (atomic_read(&kbdev->faults_pending)) {
+		/* Page/bus faults are still being processed. The GPU can not
+		 * be powered off until they have completed */
+		spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+									flags);
+		return false;
+	}
+
+	kbase_pm_cache_snoop_disable(kbdev);
+
+	/* The GPU power may be turned off from this point */
+	kbdev->pm.backend.gpu_powered = false;
+	spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
+
+	if (is_suspend && kbdev->pm.backend.callback_power_suspend)
+		kbdev->pm.backend.callback_power_suspend(kbdev);
+	else if (kbdev->pm.backend.callback_power_off)
+		kbdev->pm.backend.callback_power_off(kbdev);
+	return true;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_clock_off);
+
+struct kbasep_reset_timeout_data {
+	struct hrtimer timer;
+	bool timed_out;
+	struct kbase_device *kbdev;
+};
+
+void kbase_pm_reset_done(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	kbdev->pm.backend.reset_done = true;
+	wake_up(&kbdev->pm.backend.reset_done_wait);
+}
+
+/**
+ * kbase_pm_wait_for_reset - Wait for a reset to happen
+ *
+ * Wait for the %RESET_COMPLETED IRQ to occur, then reset the waiting state.
+ *
+ * @kbdev: Kbase device
+ */
+static void kbase_pm_wait_for_reset(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	wait_event(kbdev->pm.backend.reset_done_wait,
+						(kbdev->pm.backend.reset_done));
+	kbdev->pm.backend.reset_done = false;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_reset_done);
+
+static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
+{
+	struct kbasep_reset_timeout_data *rtdata =
+		container_of(timer, struct kbasep_reset_timeout_data, timer);
+
+	rtdata->timed_out = 1;
+
+	/* Set the wait queue to wake up kbase_pm_init_hw even though the reset
+	 * hasn't completed */
+	kbase_pm_reset_done(rtdata->kbdev);
+
+	return HRTIMER_NORESTART;
+}
+
+static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
+{
+	struct device_node *np = kbdev->dev->of_node;
+	u32 jm_values[4];
+	const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+		GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+	const u32 major = (gpu_id & GPU_ID_VERSION_MAJOR) >>
+		GPU_ID_VERSION_MAJOR_SHIFT;
+
+	kbdev->hw_quirks_sc = 0;
+
+	/* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443.
+	 * and needed due to MIDGLES-3539. See PRLAM-11035 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) ||
+			kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035))
+		kbdev->hw_quirks_sc |= SC_LS_PAUSEBUFFER_DISABLE;
+
+	/* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327.
+	 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10327))
+		kbdev->hw_quirks_sc |= SC_SDC_DISABLE_OQ_DISCARD;
+
+#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
+	/* Enable alternative hardware counter selection if configured. */
+	if (!GPU_ID_IS_NEW_FORMAT(prod_id))
+		kbdev->hw_quirks_sc |= SC_ALT_COUNTERS;
+#endif
+
+	/* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
+		kbdev->hw_quirks_sc |= SC_ENABLE_TEXGRD_FLAGS;
+
+	if (!kbase_hw_has_issue(kbdev, GPUCORE_1619)) {
+		if (prod_id < 0x750 || prod_id == 0x6956) /* T60x, T62x, T72x */
+			kbdev->hw_quirks_sc |= SC_LS_ATTR_CHECK_DISABLE;
+		else if (prod_id >= 0x750 && prod_id <= 0x880) /* T76x, T8xx */
+			kbdev->hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES;
+	}
+
+	if (!kbdev->hw_quirks_sc)
+		kbdev->hw_quirks_sc = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(SHADER_CONFIG));
+
+	kbdev->hw_quirks_tiler = kbase_reg_read(kbdev,
+			GPU_CONTROL_REG(TILER_CONFIG));
+
+	/* Set tiler clock gate override if required */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
+		kbdev->hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
+
+	/* Limit the GPU bus bandwidth if the platform needs this. */
+	kbdev->hw_quirks_mmu = kbase_reg_read(kbdev,
+			GPU_CONTROL_REG(L2_MMU_CONFIG));
+
+
+	/* Limit read & write ID width for AXI */
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG)) {
+		kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS);
+		kbdev->hw_quirks_mmu |= (DEFAULT_3BIT_ARID_LIMIT & 0x7) <<
+				L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT;
+
+		kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES);
+		kbdev->hw_quirks_mmu |= (DEFAULT_3BIT_AWID_LIMIT & 0x7) <<
+				L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT;
+	} else {
+		kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
+		kbdev->hw_quirks_mmu |= (DEFAULT_ARID_LIMIT & 0x3) <<
+				L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
+
+		kbdev->hw_quirks_mmu &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
+		kbdev->hw_quirks_mmu |= (DEFAULT_AWID_LIMIT & 0x3) <<
+				L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
+	}
+
+	if (kbdev->system_coherency == COHERENCY_ACE) {
+		/* Allow memory configuration disparity to be ignored, we
+		 * optimize the use of shared memory and thus we expect
+		 * some disparity in the memory configuration */
+		kbdev->hw_quirks_mmu |= L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY;
+	}
+
+	kbdev->hw_quirks_jm = 0;
+	/* Only for T86x/T88x-based products after r2p0 */
+	if (prod_id >= 0x860 && prod_id <= 0x880 && major >= 2) {
+
+		if (of_property_read_u32_array(np,
+					"jm_config",
+					&jm_values[0],
+					ARRAY_SIZE(jm_values))) {
+			/* Entry not in device tree, use defaults  */
+			jm_values[0] = 0;
+			jm_values[1] = 0;
+			jm_values[2] = 0;
+			jm_values[3] = JM_MAX_JOB_THROTTLE_LIMIT;
+		}
+
+		/* Limit throttle limit to 6 bits*/
+		if (jm_values[3] > JM_MAX_JOB_THROTTLE_LIMIT) {
+			dev_dbg(kbdev->dev, "JOB_THROTTLE_LIMIT supplied in device tree is too large. Limiting to MAX (63).");
+			jm_values[3] = JM_MAX_JOB_THROTTLE_LIMIT;
+		}
+
+		/* Aggregate to one integer. */
+		kbdev->hw_quirks_jm |= (jm_values[0] ?
+				JM_TIMESTAMP_OVERRIDE : 0);
+		kbdev->hw_quirks_jm |= (jm_values[1] ?
+				JM_CLOCK_GATE_OVERRIDE : 0);
+		kbdev->hw_quirks_jm |= (jm_values[2] ?
+				JM_JOB_THROTTLE_ENABLE : 0);
+		kbdev->hw_quirks_jm |= (jm_values[3] <<
+				JM_JOB_THROTTLE_LIMIT_SHIFT);
+
+	} else if (GPU_ID_IS_NEW_FORMAT(prod_id) &&
+			   (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
+					   GPU_ID2_PRODUCT_TMIX)) {
+		/* Only for tMIx */
+		u32 coherency_features;
+
+		coherency_features = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(COHERENCY_FEATURES));
+
+		/* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
+		 * documented for tMIx so force correct value here.
+		 */
+		if (coherency_features ==
+				COHERENCY_FEATURE_BIT(COHERENCY_ACE)) {
+			kbdev->hw_quirks_jm |=
+				(COHERENCY_ACE_LITE | COHERENCY_ACE) <<
+				JM_FORCE_COHERENCY_FEATURES_SHIFT;
+		}
+	}
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_TLS_HASHING))
+		kbdev->hw_quirks_sc |= SC_TLS_HASH_ENABLE;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_IDVS_GROUP_SIZE)) {
+		int default_idvs_group_size = 0xF;
+		u32 tmp;
+
+		if (of_property_read_u32(kbdev->dev->of_node,
+					  "idvs-group-size", &tmp))
+			tmp = default_idvs_group_size;
+
+		if (tmp > JM_MAX_IDVS_GROUP_SIZE) {
+			dev_err(kbdev->dev,
+				"idvs-group-size of %d is too large. Maximum value is %d",
+				tmp, JM_MAX_IDVS_GROUP_SIZE);
+			tmp = default_idvs_group_size;
+		}
+
+		kbdev->hw_quirks_jm |= tmp << JM_IDVS_GROUP_SIZE_SHIFT;
+	}
+
+	if (!kbdev->hw_quirks_jm)
+		kbdev->hw_quirks_jm = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(JM_CONFIG));
+
+#define MANUAL_POWER_CONTROL ((u32)(1 << 8))
+	if (corestack_driver_control)
+		kbdev->hw_quirks_jm |= MANUAL_POWER_CONTROL;
+}
+
+static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
+{
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG),
+			kbdev->hw_quirks_sc);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG),
+			kbdev->hw_quirks_tiler);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
+			kbdev->hw_quirks_mmu);
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
+			kbdev->hw_quirks_jm);
+
+}
+
+void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
+{
+	if ((kbdev->current_gpu_coherency_mode == COHERENCY_ACE) &&
+		!kbdev->cci_snoop_enabled) {
+#ifdef CONFIG_ARM64
+		if (kbdev->snoop_enable_smc != 0)
+			kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0);
+#endif /* CONFIG_ARM64 */
+		dev_dbg(kbdev->dev, "MALI - CCI Snoops - Enabled\n");
+		kbdev->cci_snoop_enabled = true;
+	}
+}
+
+void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
+{
+	if (kbdev->cci_snoop_enabled) {
+#ifdef CONFIG_ARM64
+		if (kbdev->snoop_disable_smc != 0) {
+			mali_cci_flush_l2(kbdev);
+			kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0);
+		}
+#endif /* CONFIG_ARM64 */
+		dev_dbg(kbdev->dev, "MALI - CCI Snoops Disabled\n");
+		kbdev->cci_snoop_enabled = false;
+	}
+}
+
+static int kbase_pm_do_reset(struct kbase_device *kbdev)
+{
+	struct kbasep_reset_timeout_data rtdata;
+	struct kbase_pm_callback_conf *callbacks;
+	int ret;
+
+	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, NULL, 0u, 0);
+
+	KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev);
+
+	if (callbacks->soft_reset_callback) {
+		ret = callbacks->soft_reset_callback(kbdev);
+		if (ret)
+			return ret;
+	} else
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+				GPU_COMMAND_SOFT_RESET);
+
+	/* Unmask the reset complete interrupt only */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED);
+
+	/* Initialize a structure for tracking the status of the reset */
+	rtdata.kbdev = kbdev;
+	rtdata.timed_out = 0;
+
+	/* Create a timer to use as a timeout on the reset */
+	hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	rtdata.timer.function = kbasep_reset_timeout;
+
+	hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
+							HRTIMER_MODE_REL);
+
+	/* Wait for the RESET_COMPLETED interrupt to be raised */
+	kbase_pm_wait_for_reset(kbdev);
+
+	if (rtdata.timed_out == 0) {
+		/* GPU has been reset */
+		hrtimer_cancel(&rtdata.timer);
+		destroy_hrtimer_on_stack(&rtdata.timer);
+		return 0;
+	}
+
+	/* No interrupt has been received - check if the RAWSTAT register says
+	 * the reset has completed */
+	if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)) &
+							RESET_COMPLETED) {
+		/* The interrupt is set in the RAWSTAT; this suggests that the
+		 * interrupts are not getting to the CPU */
+		dev_err(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n");
+		/* If interrupts aren't working we can't continue. */
+		destroy_hrtimer_on_stack(&rtdata.timer);
+		return -EINVAL;
+	}
+
+	/* The GPU doesn't seem to be responding to the reset so try a hard
+	 * reset */
+	dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
+								RESET_TIMEOUT);
+	KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+						GPU_COMMAND_HARD_RESET);
+
+	/* Restart the timer to wait for the hard reset to complete */
+	rtdata.timed_out = 0;
+
+	hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
+							HRTIMER_MODE_REL);
+
+	/* Wait for the RESET_COMPLETED interrupt to be raised */
+	kbase_pm_wait_for_reset(kbdev);
+
+	if (rtdata.timed_out == 0) {
+		/* GPU has been reset */
+		hrtimer_cancel(&rtdata.timer);
+		destroy_hrtimer_on_stack(&rtdata.timer);
+		return 0;
+	}
+
+	destroy_hrtimer_on_stack(&rtdata.timer);
+
+	dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
+								RESET_TIMEOUT);
+
+	return -EINVAL;
+}
+
+static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
+{
+	struct kbase_device *kbdev = pdev->data;
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+		GPU_COMMAND_SET_PROTECTED_MODE);
+	return 0;
+}
+
+static int kbasep_protected_mode_disable(struct protected_mode_device *pdev)
+{
+	struct kbase_device *kbdev = pdev->data;
+
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	return kbase_pm_do_reset(kbdev);
+}
+
+struct protected_mode_ops kbase_native_protected_ops = {
+	.protected_mode_enable = kbasep_protected_mode_enable,
+	.protected_mode_disable = kbasep_protected_mode_disable
+};
+
+int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
+{
+	unsigned long irq_flags;
+	int err;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	/* Ensure the clock is on before attempting to access the hardware */
+	if (!kbdev->pm.backend.gpu_powered) {
+		if (kbdev->pm.backend.callback_power_on)
+			kbdev->pm.backend.callback_power_on(kbdev);
+
+		spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock,
+								irq_flags);
+		kbdev->pm.backend.gpu_powered = true;
+		spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
+								irq_flags);
+	}
+
+	/* Ensure interrupts are off to begin with, this also clears any
+	 * outstanding interrupts */
+	kbase_pm_disable_interrupts(kbdev);
+	/* Ensure cache snoops are disabled before reset. */
+	kbase_pm_cache_snoop_disable(kbdev);
+	/* Prepare for the soft-reset */
+	kbdev->pm.backend.reset_done = false;
+
+	/* The cores should be made unavailable due to the reset */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+	if (kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
+		KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
+				NULL, 0u, (u32)0u);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+	/* Soft reset the GPU */
+	if (kbdev->protected_mode_support)
+		err = kbdev->protected_ops->protected_mode_disable(
+				kbdev->protected_dev);
+	else
+		err = kbase_pm_do_reset(kbdev);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+	kbdev->protected_mode = false;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
+	if (err)
+		goto exit;
+
+	if (flags & PM_HW_ISSUES_DETECT)
+		kbase_pm_hw_issues_detect(kbdev);
+
+	kbase_pm_hw_issues_apply(kbdev);
+	kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
+
+	/* Sanity check protected mode was left after reset */
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+		u32 gpu_status = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(GPU_STATUS));
+
+		WARN_ON(gpu_status & GPU_STATUS_PROTECTED_MODE_ACTIVE);
+	}
+
+	/* If cycle counter was in use re-enable it, enable_irqs will only be
+	 * false when called from kbase_pm_powerup */
+	if (kbdev->pm.backend.gpu_cycle_counter_requests &&
+						(flags & PM_ENABLE_IRQS)) {
+		kbase_pm_enable_interrupts(kbdev);
+
+		/* Re-enable the counters if we need to */
+		spin_lock_irqsave(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+								irq_flags);
+		if (kbdev->pm.backend.gpu_cycle_counter_requests)
+			kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_CYCLE_COUNT_START);
+		spin_unlock_irqrestore(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+								irq_flags);
+
+		kbase_pm_disable_interrupts(kbdev);
+	}
+
+	if (flags & PM_ENABLE_IRQS)
+		kbase_pm_enable_interrupts(kbdev);
+
+exit:
+	/* Re-enable GPU hardware counters if we're resetting from protected
+	 * mode.
+	 */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
+	kbdev->protected_mode_hwcnt_desired = true;
+	if (kbdev->protected_mode_hwcnt_disabled) {
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+		kbdev->protected_mode_hwcnt_disabled = false;
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+	return err;
+}
+
+/**
+ * kbase_pm_request_gpu_cycle_counter_do_request - Request cycle counters
+ *
+ * Increase the count of cycle counter users and turn the cycle counters on if
+ * they were previously off
+ *
+ * This function is designed to be called by
+ * kbase_pm_request_gpu_cycle_counter() or
+ * kbase_pm_request_gpu_cycle_counter_l2_is_on() only
+ *
+ * When this function is called the l2 cache must be on - i.e., the GPU must be
+ * on.
+ *
+ * @kbdev:     The kbase device structure of the device
+ */
+static void
+kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+									flags);
+
+	++kbdev->pm.backend.gpu_cycle_counter_requests;
+
+	if (1 == kbdev->pm.backend.gpu_cycle_counter_requests)
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_CYCLE_COUNT_START);
+
+	spin_unlock_irqrestore(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+									flags);
+}
+
+void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
+								INT_MAX);
+
+	kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter);
+
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
+								INT_MAX);
+
+	kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on);
+
+void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+									flags);
+
+	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests > 0);
+
+	--kbdev->pm.backend.gpu_cycle_counter_requests;
+
+	if (0 == kbdev->pm.backend.gpu_cycle_counter_requests)
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+					GPU_COMMAND_CYCLE_COUNT_STOP);
+
+	spin_unlock_irqrestore(
+			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+									flags);
+}
+
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
new file mode 100644
index 0000000..e88b3a8
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
@@ -0,0 +1,624 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Power management API definitions used internally by GPU backend
+ */
+
+#ifndef _KBASE_BACKEND_PM_INTERNAL_H_
+#define _KBASE_BACKEND_PM_INTERNAL_H_
+
+#include <mali_kbase_hwaccess_pm.h>
+
+#include "mali_kbase_pm_ca.h"
+#include "mali_kbase_pm_policy.h"
+
+
+/**
+ * kbase_pm_dev_idle - The GPU is idle.
+ *
+ * The OS may choose to turn off idle devices
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_dev_idle(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_dev_activate - The GPU is active.
+ *
+ * The OS should avoid opportunistically turning off the GPU while it is active
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_dev_activate(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_get_present_cores - Get details of the cores that are present in
+ *                              the device.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) present in the GPU device and also a count of
+ * the number of cores.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid
+ *         pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of cores present
+ */
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_active_cores - Get details of the cores that are currently
+ *                             active in the device.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are actively processing work (i.e.
+ * turned on *and* busy).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of active cores
+ */
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_trans_cores - Get details of the cores that are currently
+ *                            transitioning between power states.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are currently transitioning between
+ * power states.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of transitioning cores
+ */
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_get_ready_cores - Get details of the cores that are currently
+ *                            powered and ready for jobs.
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) that are powered and ready for jobs (they may
+ * or may not be currently executing jobs).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * Return: The bit mask of ready cores
+ */
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
+						enum kbase_pm_core_type type);
+
+/**
+ * kbase_pm_clock_on - Turn the clock for the device on, and enable device
+ *                     interrupts.
+ *
+ * This function can be used by a power policy to turn the clock for the GPU on.
+ * It should be modified during integration to perform the necessary actions to
+ * ensure that the GPU is fully powered and clocked.
+ *
+ * @kbdev:     The kbase device structure for the device (must be a valid
+ *             pointer)
+ * @is_resume: true if clock on due to resume after suspend, false otherwise
+ */
+void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume);
+
+/**
+ * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the
+ *                      device off.
+ *
+ * This function can be used by a power policy to turn the clock for the GPU
+ * off. It should be modified during integration to perform the necessary
+ * actions to turn the clock off (if this is possible in the integration).
+ *
+ * @kbdev:      The kbase device structure for the device (must be a valid
+ *              pointer)
+ * @is_suspend: true if clock off due to suspend, false otherwise
+ *
+ * Return: true  if clock was turned off, or
+ *         false if clock can not be turned off due to pending page/bus fault
+ *               workers. Caller must flush MMU workqueues and retry
+ */
+bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend);
+
+/**
+ * kbase_pm_enable_interrupts - Enable interrupts on the device.
+ *
+ * Interrupts are also enabled after a call to kbase_pm_clock_on().
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_disable_interrupts - Disable interrupts on the device.
+ *
+ * This prevents delivery of Power Management interrupts to the CPU so that
+ * kbase_pm_update_state() will not be called from the IRQ handler
+ * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called.
+ *
+ * Interrupts are also disabled after a call to kbase_pm_clock_off().
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts()
+ *                                      that does not take the hwaccess_lock
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_init_hw - Initialize the hardware.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @flags: Flags specifying the type of PM init
+ *
+ * This function checks the GPU ID register to ensure that the GPU is supported
+ * by the driver and performs a reset on the device so that it is in a known
+ * state before the device is used.
+ *
+ * Return: 0 if the device is supported and successfully reset.
+ */
+int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags);
+
+/**
+ * kbase_pm_reset_done - The GPU has been reset successfully.
+ *
+ * This function must be called by the GPU interrupt handler when the
+ * RESET_COMPLETED bit is set. It signals to the power management initialization
+ * code that the GPU has been successfully reset.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_reset_done(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
+ *                                   reached
+ *
+ * Wait for the L2 and shader power state machines to reach the states
+ * corresponding to the values of 'l2_desired' and 'shaders_desired'.
+ *
+ * The usual use-case for this is to ensure cores are 'READY' after performing
+ * a GPU Reset.
+ *
+ * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
+ * because this function will take that lock itself.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_wait_for_l2_powered - Wait for the L2 cache to be powered on
+ *
+ * Wait for the L2 to be powered on, and for the L2 and shader state machines to
+ * stabilise by reaching the states corresponding to the values of 'l2_desired'
+ * and 'shaders_desired'.
+ *
+ * kbdev->pm.active_count must be non-zero when calling this function.
+ *
+ * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
+ * because this function will take that lock itself.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state()
+ *                                      where the caller must hold
+ *                                      kbase_device.pm.power_change_lock
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_state - Update the L2 and shader power state machines
+ * @kbdev: Device pointer
+ */
+void kbase_pm_update_state(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_state_machine_init - Initialize the state machines, primarily the
+ *                               shader poweroff timer
+ * @kbdev: Device pointer
+ */
+int kbase_pm_state_machine_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_state_machine_term - Clean up the PM state machines' data
+ * @kbdev: Device pointer
+ */
+void kbase_pm_state_machine_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores_state - Update the desired state of shader cores from
+ *                               the Power Policy, and begin any power
+ *                               transitions.
+ *
+ * This function will update the desired_xx_state members of
+ * struct kbase_pm_device_data by calling into the current Power Policy. It will
+ * then begin power transitions to make the hardware acheive the desired shader
+ * core state.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_update_cores_state(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_metrics_init - Initialize the metrics gathering framework.
+ *
+ * This must be called before other metric gathering APIs are called.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 on success, error code on error
+ */
+int kbasep_pm_metrics_init(struct kbase_device *kbdev);
+
+/**
+ * kbasep_pm_metrics_term - Terminate the metrics gathering framework.
+ *
+ * This must be called when metric gathering is no longer required. It is an
+ * error to call any metrics gathering function (other than
+ * kbasep_pm_metrics_init()) after calling this function.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbasep_pm_metrics_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_report_vsync - Function to be called by the frame buffer driver to
+ *                         update the vsync metric.
+ *
+ * This function should be called by the frame buffer driver to update whether
+ * the system is hitting the vsync target or not. buffer_updated should be true
+ * if the vsync corresponded with a new frame being displayed, otherwise it
+ * should be false. This function does not need to be called every vsync, but
+ * only when the value of @buffer_updated differs from a previous call.
+ *
+ * @kbdev:          The kbase device structure for the device (must be a
+ *                  valid pointer)
+ * @buffer_updated: True if the buffer has been updated on this VSync,
+ *                  false otherwise
+ */
+void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
+
+/**
+ * kbase_pm_get_dvfs_action - Determine whether the DVFS system should change
+ *                            the clock speed of the GPU.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function should be called regularly by the DVFS system to check whether
+ * the clock speed of the GPU needs updating.
+ */
+void kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is
+ *                                      needed
+ *
+ * If the caller is the first caller then the GPU cycle counters will be enabled
+ * along with the l2 cache
+ *
+ * The GPU must be powered when calling this function (i.e.
+ * kbase_pm_context_active() must have been called).
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is
+ *                                               needed (l2 cache already on)
+ *
+ * This is a version of the above function
+ * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the
+ * l2 cache is known to be on and assured to be on until the subsequent call of
+ * kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does
+ * not sleep and can be called from atomic functions.
+ *
+ * The GPU must be powered when calling this function (i.e.
+ * kbase_pm_context_active() must have been called) and the l2 cache must be
+ * powered on.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no
+ *                                      longer in use
+ *
+ * If the caller is the last caller then the GPU cycle counters will be
+ * disabled. A request must have been made before a call to this.
+ *
+ * Caller must not hold the hwaccess_lock, as it will be taken in this function.
+ * If the caller is already holding this lock then
+ * kbase_pm_release_gpu_cycle_counter_nolock() must be used instead.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter()
+ *                                             that does not take hwaccess_lock
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_wait_for_poweroff_complete - Wait for the poweroff workqueue to
+ *                                       complete
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_runtime_init - Initialize runtime-pm for Mali GPU platform device
+ *
+ * Setup the power management callbacks and initialize/enable the runtime-pm
+ * for the Mali GPU platform device, using the callback function. This must be
+ * called before the kbase_pm_register_access_enable() function.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+int kbase_pm_runtime_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_runtime_term - Disable runtime-pm for Mali GPU platform device
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_runtime_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_register_access_enable - Enable access to GPU registers
+ *
+ * Enables access to the GPU registers before power management has powered up
+ * the GPU with kbase_pm_powerup().
+ *
+ * This results in the power management callbacks provided in the driver
+ * configuration to get called to turn on power and/or clocks to the GPU. See
+ * kbase_pm_callback_conf.
+ *
+ * This should only be used before power management is powered up with
+ * kbase_pm_powerup()
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_register_access_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_register_access_disable - Disable early register access
+ *
+ * Disables access to the GPU registers enabled earlier by a call to
+ * kbase_pm_register_access_enable().
+ *
+ * This results in the power management callbacks provided in the driver
+ * configuration to get called to turn off power and/or clocks to the GPU. See
+ * kbase_pm_callback_conf
+ *
+ * This should only be used before power management is powered up with
+ * kbase_pm_powerup()
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_register_access_disable(struct kbase_device *kbdev);
+
+/* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline
+ * function */
+
+/**
+ * kbase_pm_metrics_is_active - Check if the power management metrics
+ *                              collection is active.
+ *
+ * Note that this returns if the power management metrics collection was
+ * active at the time of calling, it is possible that after the call the metrics
+ * collection enable may have changed state.
+ *
+ * The caller must handle the consequence that the state may have changed.
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * Return: true if metrics collection was active else false.
+ */
+bool kbase_pm_metrics_is_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_do_poweron - Power on the GPU, and any cores that are requested.
+ *
+ * @kbdev:     The kbase device structure for the device (must be a valid
+ *             pointer)
+ * @is_resume: true if power on due to resume after suspend,
+ *             false otherwise
+ */
+void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
+
+/**
+ * kbase_pm_do_poweroff - Power off the GPU, and any cores that have been
+ *                        requested.
+ *
+ * @kbdev:      The kbase device structure for the device (must be a valid
+ *              pointer)
+ * @is_suspend: true if power off due to suspend,
+ *              false otherwise
+ */
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend);
+
+#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
+void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+			       struct kbasep_pm_metrics *last,
+			       struct kbasep_pm_metrics *diff);
+#endif /* defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) */
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+
+/**
+ * kbase_platform_dvfs_event - Report utilisation to DVFS code
+ *
+ * Function provided by platform specific code when DVFS is enabled to allow
+ * the power management metrics system to report utilisation.
+ *
+ * @kbdev:         The kbase device structure for the device (must be a
+ *                 valid pointer)
+ * @utilisation:   The current calculated utilisation by the metrics system.
+ * @util_gl_share: The current calculated gl share of utilisation.
+ * @util_cl_share: The current calculated cl share of utilisation per core
+ *                 group.
+ * Return:         Returns 0 on failure and non zero on success.
+ */
+
+int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
+	u32 util_gl_share, u32 util_cl_share[2]);
+#endif
+
+void kbase_pm_power_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_metrics_update - Inform the metrics system that an atom is either
+ *                           about to be run or has just completed.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @now:   Pointer to the timestamp of the change, or NULL to use current time
+ *
+ * Caller must hold hwaccess_lock
+ */
+void kbase_pm_metrics_update(struct kbase_device *kbdev,
+				ktime_t *now);
+
+/**
+ * kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU
+ * If the GPU does not have coherency this is a no-op
+ * @kbdev:	Device pointer
+ *
+ * This function should be called after L2 power up.
+ */
+
+void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU
+ * If the GPU does not have coherency this is a no-op
+ * @kbdev:	Device pointer
+ *
+ * This function should be called before L2 power off.
+ */
+void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
+
+#ifdef CONFIG_MALI_DEVFREQ
+/**
+ * kbase_devfreq_set_core_mask - Set devfreq core mask
+ * @kbdev:     Device pointer
+ * @core_mask: New core mask
+ *
+ * This function is used by devfreq to change the available core mask as
+ * required by Dynamic Core Scaling.
+ */
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask);
+#endif
+
+/**
+ * kbase_pm_reset_start_locked - Signal that GPU reset has started
+ * @kbdev: Device pointer
+ *
+ * Normal power management operation will be suspended until the reset has
+ * completed.
+ *
+ * Caller must hold hwaccess_lock.
+ */
+void kbase_pm_reset_start_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_reset_complete - Signal that GPU reset has completed
+ * @kbdev: Device pointer
+ *
+ * Normal power management operation will be resumed. The power manager will
+ * re-evaluate what cores are needed and power on or off as required.
+ */
+void kbase_pm_reset_complete(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_protected_override_enable - Enable the protected mode override
+ * @kbdev: Device pointer
+ *
+ * When the protected mode override is enabled, all shader cores are requested
+ * to power down, and the L2 power state can be controlled by
+ * kbase_pm_protected_l2_override().
+ *
+ * Caller must hold hwaccess_lock.
+ */
+void kbase_pm_protected_override_enable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_protected_override_disable - Disable the protected mode override
+ * @kbdev: Device pointer
+ *
+ * Caller must hold hwaccess_lock.
+ */
+void kbase_pm_protected_override_disable(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_protected_l2_override - Control the protected mode L2 override
+ * @kbdev: Device pointer
+ * @override: true to enable the override, false to disable
+ *
+ * When the driver is transitioning in or out of protected mode, the L2 cache is
+ * forced to power off. This can be overridden to force the L2 cache to power
+ * on. This is required to change coherency settings on some GPUs.
+ */
+void kbase_pm_protected_l2_override(struct kbase_device *kbdev, bool override);
+
+/* If true, the driver should explicitly control corestack power management,
+ * instead of relying on the Power Domain Controller.
+ */
+extern bool corestack_driver_control;
+
+/* If true, disable powering-down of individual cores, and just power-down at
+ * the top-level using platform-specific code.
+ * If false, use the expected behaviour of controlling the individual cores
+ * from within the driver.
+ */
+extern bool platform_power_down_only;
+
+#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
new file mode 100644
index 0000000..6b9b686
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
@@ -0,0 +1,295 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Metrics for power management
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_pm.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include <backend/gpu/mali_kbase_jm_rb.h>
+#include <backend/gpu/mali_kbase_pm_defs.h>
+
+/* When VSync is being hit aim for utilisation between 70-90% */
+#define KBASE_PM_VSYNC_MIN_UTILISATION          70
+#define KBASE_PM_VSYNC_MAX_UTILISATION          90
+/* Otherwise aim for 10-40% */
+#define KBASE_PM_NO_VSYNC_MIN_UTILISATION       10
+#define KBASE_PM_NO_VSYNC_MAX_UTILISATION       40
+
+/* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
+ * This gives a maximum period between samples of 2^(32+8)/100 ns = slightly
+ * under 11s. Exceeding this will cause overflow */
+#define KBASE_PM_TIME_SHIFT			8
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
+{
+	unsigned long flags;
+	struct kbasep_pm_metrics_state *metrics;
+
+	KBASE_DEBUG_ASSERT(timer != NULL);
+
+	metrics = container_of(timer, struct kbasep_pm_metrics_state, timer);
+	kbase_pm_get_dvfs_action(metrics->kbdev);
+
+	spin_lock_irqsave(&metrics->lock, flags);
+
+	if (metrics->timer_active)
+		hrtimer_start(timer,
+			HR_TIMER_DELAY_MSEC(metrics->kbdev->pm.dvfs_period),
+			HRTIMER_MODE_REL);
+
+	spin_unlock_irqrestore(&metrics->lock, flags);
+
+	return HRTIMER_NORESTART;
+}
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+int kbasep_pm_metrics_init(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	kbdev->pm.backend.metrics.kbdev = kbdev;
+
+	kbdev->pm.backend.metrics.time_period_start = ktime_get();
+	kbdev->pm.backend.metrics.gpu_active = false;
+	kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
+	kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
+	kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
+	kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+
+	kbdev->pm.backend.metrics.values.time_busy = 0;
+	kbdev->pm.backend.metrics.values.time_idle = 0;
+	kbdev->pm.backend.metrics.values.busy_cl[0] = 0;
+	kbdev->pm.backend.metrics.values.busy_cl[1] = 0;
+	kbdev->pm.backend.metrics.values.busy_gl = 0;
+
+	spin_lock_init(&kbdev->pm.backend.metrics.lock);
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+	kbdev->pm.backend.metrics.timer_active = true;
+	hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC,
+							HRTIMER_MODE_REL);
+	kbdev->pm.backend.metrics.timer.function = dvfs_callback;
+
+	hrtimer_start(&kbdev->pm.backend.metrics.timer,
+			HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
+			HRTIMER_MODE_REL);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init);
+
+void kbasep_pm_metrics_term(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+	kbdev->pm.backend.metrics.timer_active = false;
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+
+	hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+}
+
+KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term);
+
+/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
+ * function
+ */
+static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
+								ktime_t now)
+{
+	ktime_t diff;
+
+	lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
+
+	diff = ktime_sub(now, kbdev->pm.backend.metrics.time_period_start);
+	if (ktime_to_ns(diff) < 0)
+		return;
+
+	if (kbdev->pm.backend.metrics.gpu_active) {
+		u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
+
+		kbdev->pm.backend.metrics.values.time_busy += ns_time;
+		if (kbdev->pm.backend.metrics.active_cl_ctx[0])
+			kbdev->pm.backend.metrics.values.busy_cl[0] += ns_time;
+		if (kbdev->pm.backend.metrics.active_cl_ctx[1])
+			kbdev->pm.backend.metrics.values.busy_cl[1] += ns_time;
+		if (kbdev->pm.backend.metrics.active_gl_ctx[0])
+			kbdev->pm.backend.metrics.values.busy_gl += ns_time;
+		if (kbdev->pm.backend.metrics.active_gl_ctx[1])
+			kbdev->pm.backend.metrics.values.busy_gl += ns_time;
+	} else {
+		kbdev->pm.backend.metrics.values.time_idle += (u32) (ktime_to_ns(diff)
+							>> KBASE_PM_TIME_SHIFT);
+	}
+
+	kbdev->pm.backend.metrics.time_period_start = now;
+}
+
+#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
+void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+			       struct kbasep_pm_metrics *last,
+			       struct kbasep_pm_metrics *diff)
+{
+	struct kbasep_pm_metrics *cur = &kbdev->pm.backend.metrics.values;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+	kbase_pm_get_dvfs_utilisation_calc(kbdev, ktime_get());
+
+	memset(diff, 0, sizeof(*diff));
+	diff->time_busy = cur->time_busy - last->time_busy;
+	diff->time_idle = cur->time_idle - last->time_idle;
+	diff->busy_cl[0] = cur->busy_cl[0] - last->busy_cl[0];
+	diff->busy_cl[1] = cur->busy_cl[1] - last->busy_cl[1];
+	diff->busy_gl = cur->busy_gl - last->busy_gl;
+
+	*last = *cur;
+
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
+KBASE_EXPORT_TEST_API(kbase_pm_get_dvfs_metrics);
+#endif
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+void kbase_pm_get_dvfs_action(struct kbase_device *kbdev)
+{
+	int utilisation, util_gl_share;
+	int util_cl_share[2];
+	int busy;
+	struct kbasep_pm_metrics *diff;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	diff = &kbdev->pm.backend.metrics.dvfs_diff;
+
+	kbase_pm_get_dvfs_metrics(kbdev, &kbdev->pm.backend.metrics.dvfs_last, diff);
+
+	utilisation = (100 * diff->time_busy) /
+			max(diff->time_busy + diff->time_idle, 1u);
+
+	busy = max(diff->busy_gl + diff->busy_cl[0] + diff->busy_cl[1], 1u);
+	util_gl_share = (100 * diff->busy_gl) / busy;
+	util_cl_share[0] = (100 * diff->busy_cl[0]) / busy;
+	util_cl_share[1] = (100 * diff->busy_cl[1]) / busy;
+
+	kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share, util_cl_share);
+}
+
+bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
+{
+	bool isactive;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+	isactive = kbdev->pm.backend.metrics.timer_active;
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+
+	return isactive;
+}
+KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active);
+
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+/**
+ * kbase_pm_metrics_active_calc - Update PM active counts based on currently
+ *                                running atoms
+ * @kbdev: Device pointer
+ *
+ * The caller must hold kbdev->pm.backend.metrics.lock
+ */
+static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev)
+{
+	int js;
+
+	lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
+
+	kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
+	kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
+	kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
+	kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
+	kbdev->pm.backend.metrics.gpu_active = false;
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
+
+		/* Head atom may have just completed, so if it isn't running
+		 * then try the next atom */
+		if (katom && katom->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED)
+			katom = kbase_gpu_inspect(kbdev, js, 1);
+
+		if (katom && katom->gpu_rb_state ==
+				KBASE_ATOM_GPU_RB_SUBMITTED) {
+			if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+				int device_nr = (katom->core_req &
+					BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)
+						? katom->device_nr : 0;
+				if (!WARN_ON(device_nr >= 2))
+					kbdev->pm.backend.metrics.
+						active_cl_ctx[device_nr] = 1;
+			} else {
+				/* Slot 2 should not be running non-compute
+				 * atoms */
+				if (!WARN_ON(js >= 2))
+					kbdev->pm.backend.metrics.
+						active_gl_ctx[js] = 1;
+			}
+			kbdev->pm.backend.metrics.gpu_active = true;
+		}
+	}
+}
+
+/* called when job is submitted to or removed from a GPU slot */
+void kbase_pm_metrics_update(struct kbase_device *kbdev, ktime_t *timestamp)
+{
+	unsigned long flags;
+	ktime_t now;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
+
+	if (!timestamp) {
+		now = ktime_get();
+		timestamp = &now;
+	}
+
+	/* Track how long CL and/or GL jobs have been busy for */
+	kbase_pm_get_dvfs_utilisation_calc(kbdev, *timestamp);
+
+	kbase_pm_metrics_active_calc(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
new file mode 100644
index 0000000..2f06a0a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
@@ -0,0 +1,257 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Power policy API implementations
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_config_defaults.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+static const struct kbase_pm_policy *const all_policy_list[] = {
+#ifdef CONFIG_MALI_NO_MALI
+	&kbase_pm_always_on_policy_ops,
+	&kbase_pm_coarse_demand_policy_ops,
+#if !MALI_CUSTOMER_RELEASE
+	&kbase_pm_always_on_demand_policy_ops,
+#endif
+#else				/* CONFIG_MALI_NO_MALI */
+	&kbase_pm_coarse_demand_policy_ops,
+#if !MALI_CUSTOMER_RELEASE
+	&kbase_pm_always_on_demand_policy_ops,
+#endif
+	&kbase_pm_always_on_policy_ops
+#endif /* CONFIG_MALI_NO_MALI */
+};
+
+/* A filtered list of policies available in the system, calculated by filtering
+ * all_policy_list based on the flags provided by each policy.
+ */
+static const struct kbase_pm_policy *enabled_policy_list[ARRAY_SIZE(all_policy_list)];
+static size_t enabled_policy_count;
+
+static void generate_filtered_policy_list(void)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(all_policy_list); ++i) {
+		const struct kbase_pm_policy *pol = all_policy_list[i];
+
+		if (platform_power_down_only &&
+				(pol->flags & KBASE_PM_POLICY_FLAG_DISABLED_WITH_POWER_DOWN_ONLY))
+			continue;
+
+		enabled_policy_list[enabled_policy_count++] = pol;
+	}
+}
+
+int kbase_pm_policy_init(struct kbase_device *kbdev)
+{
+	generate_filtered_policy_list();
+	if (enabled_policy_count == 0)
+		return -EINVAL;
+
+	kbdev->pm.backend.pm_current_policy = enabled_policy_list[0];
+	kbdev->pm.backend.pm_current_policy->init(kbdev);
+
+	return 0;
+}
+
+void kbase_pm_policy_term(struct kbase_device *kbdev)
+{
+	kbdev->pm.backend.pm_current_policy->term(kbdev);
+}
+
+void kbase_pm_update_active(struct kbase_device *kbdev)
+{
+	struct kbase_pm_device_data *pm = &kbdev->pm;
+	struct kbase_pm_backend_data *backend = &pm->backend;
+	unsigned long flags;
+	bool active;
+
+	lockdep_assert_held(&pm->lock);
+
+	/* pm_current_policy will never be NULL while pm.lock is held */
+	KBASE_DEBUG_ASSERT(backend->pm_current_policy);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	active = backend->pm_current_policy->get_core_active(kbdev);
+	WARN((kbase_pm_is_active(kbdev) && !active),
+		"GPU is active but policy '%s' is indicating that it can be powered off",
+		kbdev->pm.backend.pm_current_policy->name);
+
+	if (active) {
+		/* Power on the GPU and any cores requested by the policy */
+		if (!pm->backend.invoke_poweroff_wait_wq_when_l2_off &&
+				pm->backend.poweroff_wait_in_progress) {
+			KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+			pm->backend.poweron_required = true;
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		} else {
+			/* Cancel the the invocation of
+			 * kbase_pm_gpu_poweroff_wait_wq() from the L2 state
+			 * machine. This is safe - it
+			 * invoke_poweroff_wait_wq_when_l2_off is true, then
+			 * the poweroff work hasn't even been queued yet,
+			 * meaning we can go straight to powering on.
+			 */
+			pm->backend.invoke_poweroff_wait_wq_when_l2_off = false;
+			pm->backend.poweroff_wait_in_progress = false;
+			pm->backend.l2_desired = true;
+
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+			kbase_pm_do_poweron(kbdev, false);
+		}
+	} else {
+		/* It is an error for the power policy to power off the GPU
+		 * when there are contexts active */
+		KBASE_DEBUG_ASSERT(pm->active_count == 0);
+
+		/* Request power off */
+		if (pm->backend.gpu_powered) {
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+			/* Power off the GPU immediately */
+			kbase_pm_do_poweroff(kbdev, false);
+		} else {
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		}
+	}
+}
+
+void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
+{
+	bool shaders_desired;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbdev->pm.backend.pm_current_policy == NULL)
+		return;
+	if (kbdev->pm.backend.poweroff_wait_in_progress)
+		return;
+
+	if (kbdev->pm.backend.protected_transition_override)
+		/* We are trying to change in/out of protected mode - force all
+		 * cores off so that the L2 powers down */
+		shaders_desired = false;
+	else
+		shaders_desired = kbdev->pm.backend.pm_current_policy->shaders_needed(kbdev);
+
+	if (kbdev->pm.backend.shaders_desired != shaders_desired) {
+		KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u,
+				(u32)kbdev->pm.backend.shaders_desired);
+
+		kbdev->pm.backend.shaders_desired = shaders_desired;
+		kbase_pm_update_state(kbdev);
+	}
+}
+
+void kbase_pm_update_cores_state(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	kbase_pm_update_cores_state_nolock(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+int kbase_pm_list_policies(const struct kbase_pm_policy * const **list)
+{
+	WARN_ON(enabled_policy_count == 0);
+	if (list)
+		*list = enabled_policy_list;
+
+	return enabled_policy_count;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_list_policies);
+
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	return kbdev->pm.backend.pm_current_policy;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_get_policy);
+
+void kbase_pm_set_policy(struct kbase_device *kbdev,
+				const struct kbase_pm_policy *new_policy)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	const struct kbase_pm_policy *old_policy;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(new_policy != NULL);
+
+	KBASE_TRACE_ADD(kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id);
+
+	/* During a policy change we pretend the GPU is active */
+	/* A suspend won't happen here, because we're in a syscall from a
+	 * userspace thread */
+	kbase_pm_context_active(kbdev);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	/* Remove the policy to prevent IRQ handlers from working on it */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	old_policy = kbdev->pm.backend.pm_current_policy;
+	kbdev->pm.backend.pm_current_policy = NULL;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u,
+								old_policy->id);
+	if (old_policy->term)
+		old_policy->term(kbdev);
+
+	KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u,
+								new_policy->id);
+	if (new_policy->init)
+		new_policy->init(kbdev);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbdev->pm.backend.pm_current_policy = new_policy;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* If any core power state changes were previously attempted, but
+	 * couldn't be made because the policy was changing (current_policy was
+	 * NULL), then re-try them here. */
+	kbase_pm_update_active(kbdev);
+	kbase_pm_update_cores_state(kbdev);
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	/* Now the policy change is finished, we release our fake context active
+	 * reference */
+	kbase_pm_context_idle(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_set_policy);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h
new file mode 100644
index 0000000..28d258f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h
@@ -0,0 +1,107 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Power policy API definitions
+ */
+
+#ifndef _KBASE_PM_POLICY_H_
+#define _KBASE_PM_POLICY_H_
+
+/**
+ * kbase_pm_policy_init - Initialize power policy framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Must be called before calling any other policy function
+ *
+ * Return: 0 if the power policy framework was successfully
+ *         initialized, -errno otherwise.
+ */
+int kbase_pm_policy_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_policy_term - Terminate power policy framework
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_policy_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_active - Update the active power state of the GPU
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Calls into the current power policy
+ */
+void kbase_pm_update_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_update_cores - Update the desired core state of the GPU
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Calls into the current power policy
+ */
+void kbase_pm_update_cores(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_cores_requested - Check that a power request has been locked into
+ *                            the HW.
+ * @kbdev:           Kbase device
+ * @shader_required: true if shaders are required
+ *
+ * Called by the scheduler to check if a power on request has been locked into
+ * the HW.
+ *
+ * Note that there is no guarantee that the cores are actually ready, however
+ * when the request has been locked into the HW, then it is safe to submit work
+ * since the HW will wait for the transition to ready.
+ *
+ * A reference must first be taken prior to making this call.
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * Return: true if the request to the HW was successfully made else false if the
+ *         request is still pending.
+ */
+static inline bool kbase_pm_cores_requested(struct kbase_device *kbdev,
+		bool shader_required)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* If the L2 & tiler are not on or pending, then the tiler is not yet
+	 * available, and shaders are definitely not powered.
+	 */
+	if (kbdev->pm.backend.l2_state != KBASE_L2_PEND_ON &&
+			kbdev->pm.backend.l2_state != KBASE_L2_ON)
+		return false;
+
+	if (shader_required &&
+			kbdev->pm.backend.shaders_state != KBASE_SHADERS_PEND_ON_CORESTACK_ON &&
+			kbdev->pm.backend.shaders_state != KBASE_SHADERS_ON_CORESTACK_ON)
+		return false;
+
+	return true;
+}
+
+#endif /* _KBASE_PM_POLICY_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
new file mode 100644
index 0000000..16b2100
--- /dev/null
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
@@ -0,0 +1,104 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2016,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+				u64 *system_time, struct timespec64 *ts)
+{
+	u32 hi1, hi2;
+
+	kbase_pm_request_gpu_cycle_counter(kbdev);
+
+	/* Read hi, lo, hi to ensure that overflow from lo to hi is handled
+	 * correctly */
+	do {
+		hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI));
+		*cycle_counter = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(CYCLE_COUNT_LO));
+		hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI));
+		*cycle_counter |= (((u64) hi1) << 32);
+	} while (hi1 != hi2);
+
+	/* Read hi, lo, hi to ensure that overflow from lo to hi is handled
+	 * correctly */
+	do {
+		hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI));
+		*system_time = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(TIMESTAMP_LO));
+		hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI));
+		*system_time |= (((u64) hi1) << 32);
+	} while (hi1 != hi2);
+
+	/* Record the CPU's idea of current time */
+	ktime_get_raw_ts64(ts);
+
+	kbase_pm_release_gpu_cycle_counter(kbdev);
+}
+
+/**
+ * kbase_wait_write_flush -  Wait for GPU write flush
+ * @kbdev: Kbase device
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * Only in use for BASE_HW_ISSUE_6367
+ *
+ * Note : If GPU resets occur then the counters are reset to zero, the delay may
+ * not be as expected.
+ */
+#ifndef CONFIG_MALI_NO_MALI
+void kbase_wait_write_flush(struct kbase_device *kbdev)
+{
+	u32 base_count = 0;
+
+	/*
+	 * The caller must be holding onto the kctx or the call is from
+	 * userspace.
+	 */
+	kbase_pm_context_active(kbdev);
+	kbase_pm_request_gpu_cycle_counter(kbdev);
+
+	while (true) {
+		u32 new_count;
+
+		new_count = kbase_reg_read(kbdev,
+					GPU_CONTROL_REG(CYCLE_COUNT_LO));
+		/* First time around, just store the count. */
+		if (base_count == 0) {
+			base_count = new_count;
+			continue;
+		}
+
+		/* No need to handle wrapping, unsigned maths works for this. */
+		if ((new_count - base_count) > 1000)
+			break;
+	}
+
+	kbase_pm_release_gpu_cycle_counter(kbdev);
+	kbase_pm_context_idle(kbdev);
+}
+#endif				/* CONFIG_MALI_NO_MALI */
diff --git a/drivers/gpu/arm/midgard/build.bp b/drivers/gpu/arm/midgard/build.bp
new file mode 100644
index 0000000..2cf685c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/build.bp
@@ -0,0 +1,113 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ *      (C) COPYRIGHT 2017-2018 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+/* Kernel-side tests may include mali_kbase's headers. Therefore any config
+ * options which affect the sizes of any structs (e.g. adding extra members)
+ * must be included in these defaults, so that the structs are consistent in
+ * both mali_kbase and the test modules. */
+bob_defaults {
+    name: "mali_kbase_shared_config_defaults",
+    no_mali: {
+        kbuild_options: ["CONFIG_MALI_NO_MALI=y"],
+    },
+    mali_devfreq: {
+        kbuild_options: ["CONFIG_MALI_DEVFREQ=y"],
+    },
+    mali_midgard_dvfs: {
+        kbuild_options: ["CONFIG_MALI_MIDGARD_DVFS=y"],
+    },
+    mali_debug: {
+        kbuild_options: ["CONFIG_MALI_DEBUG=y"],
+    },
+    mali_fpga_bus_logger: {
+        kbuild_options: ["CONFIG_MALI_FPGA_BUS_LOGGER=y"],
+    },
+    cinstr_job_dump: {
+        kbuild_options: ["CONFIG_MALI_JOB_DUMP=y"],
+    },
+    cinstr_vector_dump: {
+        kbuild_options: ["CONFIG_MALI_VECTOR_DUMP=y"],
+    },
+    cinstr_gwt: {
+        kbuild_options: ["CONFIG_MALI_CINSTR_GWT=y"],
+    },
+    mali_gator_support: {
+        kbuild_options: ["CONFIG_MALI_GATOR_SUPPORT=y"],
+    },
+    mali_system_trace: {
+        kbuild_options: ["CONFIG_MALI_SYSTEM_TRACE=y"],
+    },
+    mali_pwrsoft_765: {
+        kbuild_options: ["CONFIG_MALI_PWRSOFT_765=y"],
+    },
+    kbuild_options: [
+        "MALI_UNIT_TEST={{.unit_test_code}}",
+        "MALI_CUSTOMER_RELEASE={{.release}}",
+        "MALI_USE_CSF={{.gpu_has_csf}}",
+        "MALI_KERNEL_TEST_API={{.debug}}",
+    ],
+    defaults: ["kernel_defaults"],
+}
+
+bob_kernel_module {
+    name: "mali_kbase",
+    srcs: [
+        "*.c",
+        "*.h",
+        "Kbuild",
+        "backend/gpu/*.c",
+        "backend/gpu/*.h",
+        "backend/gpu/Kbuild",
+        "ipa/*.c",
+        "ipa/*.h",
+        "ipa/Kbuild",
+        "platform/*.h",
+        "platform/*/*.c",
+        "platform/*/*.h",
+        "platform/*/Kbuild",
+        "thirdparty/*.c",
+    ],
+    kbuild_options: [
+        "CONFIG_MALI_KUTF=n",
+        "CONFIG_MALI_MIDGARD=m",
+        "CONFIG_MALI_NO_MALI_DEFAULT_GPU={{.gpu}}",
+        "CONFIG_MALI_PLATFORM_NAME={{.mali_platform_name}}",
+    ],
+    mali_fpga_bus_logger: {
+        extra_symbols: [
+            "bus_logger",
+        ],
+    },
+    mali_corestack: {
+        kbuild_options: ["CONFIG_MALI_CORESTACK=y"],
+    },
+    mali_error_inject: {
+        kbuild_options: ["CONFIG_MALI_ERROR_INJECT=y"],
+    },
+    mali_error_inject_random: {
+        kbuild_options: ["CONFIG_MALI_ERROR_INJECT_RANDOM=y"],
+    },
+    cinstr_secondary_hwc: {
+        kbuild_options: ["CONFIG_MALI_PRFCNT_SET_SECONDARY=y"],
+    },
+    mali_2mb_alloc: {
+        kbuild_options: ["CONFIG_MALI_2MB_ALLOC=y"],
+    },
+    gpu_has_csf: {
+        srcs: [
+            "csf/*.c",
+            "csf/*.h",
+            "csf/Kbuild",
+        ],
+    },
+    defaults: ["mali_kbase_shared_config_defaults"],
+}
diff --git a/drivers/gpu/arm/midgard/docs/Doxyfile b/drivers/gpu/arm/midgard/docs/Doxyfile
new file mode 100644
index 0000000..6498dcb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/docs/Doxyfile
@@ -0,0 +1,132 @@
+#
+# (C) COPYRIGHT 2011-2013, 2015, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+##############################################################################
+
+# This file contains per-module Doxygen configuration. Please do not add
+# extra settings to this file without consulting all stakeholders, as they
+# may cause override project-wide settings.
+#
+# Additionally, when defining aliases, macros, sections etc, use the module
+# name as a prefix e.g. gles_my_alias.
+
+##############################################################################
+
+@INCLUDE = ../../bldsys/Doxyfile_common
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT                  += ../../kernel/drivers/gpu/arm/midgard/
+
+##############################################################################
+# Everything below here is optional, and in most cases not required
+##############################################################################
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                +=
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       +=
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS          +=
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+EXCLUDE                += ../../kernel/drivers/gpu/arm/midgard/platform ../../kernel/drivers/gpu/arm/midgard/platform_dummy ../../kernel/drivers/gpu/arm/midgard/scripts ../../kernel/drivers/gpu/arm/midgard/tests ../../kernel/drivers/gpu/arm/midgard/Makefile ../../kernel/drivers/gpu/arm/midgard/Makefile.kbase ../../kernel/drivers/gpu/arm/midgard/Kbuild ../../kernel/drivers/gpu/arm/midgard/Kconfig ../../kernel/drivers/gpu/arm/midgard/sconscript ../../kernel/drivers/gpu/arm/midgard/docs ../../kernel/drivers/gpu/arm/midgard/mali_uk.h ../../kernel/drivers/gpu/arm/midgard/Makefile
+
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       +=
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS        +=
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH           +=
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH             +=
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH           +=
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED             +=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED      +=
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS           += ../../kernel/drivers/gpu/arm/midgard/docs
+
diff --git a/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot b/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot
new file mode 100644
index 0000000..a15b558
--- /dev/null
+++ b/drivers/gpu/arm/midgard/docs/policy_operation_diagram.dot
@@ -0,0 +1,117 @@
+/*
+ *
+ * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+digraph policy_objects_diagram {
+	rankdir=LR;
+	size="12,8";
+	compound=true;
+
+	node [ shape = box ];
+
+	subgraph cluster_policy_queues {
+		low_queue [ shape=record label = "LowP | {<ql>ctx_lo | ... | <qm>ctx_i | ... | <qr>ctx_hi}" ];
+		queues_middle_sep [ label="" shape=plaintext width=0 height=0 ];
+
+		rt_queue [ shape=record label = "RT | {<ql>ctx_lo | ... | <qm>ctx_j | ... | <qr>ctx_hi}" ];
+
+		label = "Policy's Queue(s)";
+	}
+
+	call_enqueue [ shape=plaintext label="enqueue_ctx()" ];
+
+	{
+		rank=same;
+		ordering=out;
+		call_dequeue [ shape=plaintext label="dequeue_head_ctx()\n+ runpool_add_ctx()" ];
+		call_ctxfinish [ shape=plaintext label="runpool_remove_ctx()" ];
+
+		call_ctxdone [ shape=plaintext label="don't requeue;\n/* ctx has no more jobs */" ];
+	}
+
+	subgraph cluster_runpool {
+
+		as0 [ width=2 height = 0.25 label="AS0: Job_1, ..., Job_n" ];
+		as1 [ width=2 height = 0.25 label="AS1: Job_1, ..., Job_m" ];
+		as2 [ width=2 height = 0.25 label="AS2: Job_1, ..., Job_p" ];
+		as3 [ width=2 height = 0.25 label="AS3: Job_1, ..., Job_q" ];
+
+		label = "Policy's Run Pool";
+	}
+
+	{
+		rank=same;
+		call_jdequeue [ shape=plaintext label="dequeue_job()" ];
+		sstop_dotfixup [ shape=plaintext label="" width=0 height=0 ];
+	}
+
+	{
+		rank=same;
+		ordering=out;
+		sstop [ shape=ellipse label="SS-Timer expires" ]
+		jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
+
+		irq [ label="IRQ" shape=ellipse ];
+
+		job_finish [ shape=plaintext label="don't requeue;\n/* job done */" ];
+	}
+
+	hstop [ shape=ellipse label="HS-Timer expires" ]
+
+	/*
+	 * Edges
+	 */
+
+	call_enqueue -> queues_middle_sep [ lhead=cluster_policy_queues ];
+
+	low_queue:qr -> call_dequeue:w;
+	rt_queue:qr -> call_dequeue:w;
+
+	call_dequeue -> as1 [lhead=cluster_runpool];
+
+	as1->call_jdequeue         [ltail=cluster_runpool];
+	call_jdequeue->jobslots:0;
+	call_jdequeue->sstop_dotfixup [ arrowhead=none];
+	sstop_dotfixup->sstop      [label="Spawn SS-Timer"];
+	sstop->jobslots            [label="SoftStop"];
+	sstop->hstop               [label="Spawn HS-Timer"];
+	hstop->jobslots:ne            [label="HardStop"];
+
+
+	as3->call_ctxfinish:ne [ ltail=cluster_runpool ];
+	call_ctxfinish:sw->rt_queue:qm [ lhead=cluster_policy_queues label="enqueue_ctx()\n/* ctx still has jobs */" ];
+
+	call_ctxfinish->call_ctxdone [constraint=false];
+
+	call_ctxdone->call_enqueue [weight=0.1 labeldistance=20.0 labelangle=0.0 taillabel="Job submitted to the ctx" style=dotted constraint=false];
+
+
+	{
+	jobslots->irq   [constraint=false];
+
+	irq->job_finish [constraint=false];
+	}
+
+	irq->as2  [lhead=cluster_runpool label="requeue_job()\n/* timeslice expired */" ];
+
+}
diff --git a/drivers/gpu/arm/midgard/docs/policy_overview.dot b/drivers/gpu/arm/midgard/docs/policy_overview.dot
new file mode 100644
index 0000000..6b87335
--- /dev/null
+++ b/drivers/gpu/arm/midgard/docs/policy_overview.dot
@@ -0,0 +1,68 @@
+/*
+ *
+ * (C) COPYRIGHT 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+digraph policy_objects_diagram {
+	rankdir=LR
+	size="6,6"
+	compound=true;
+
+	node [ shape = box ];
+
+	call_enqueue [ shape=plaintext label="enqueue ctx" ];
+
+
+	policy_queue [ label="Policy's Queue" ];
+
+	{
+		rank=same;
+		runpool [ label="Policy's Run Pool" ];
+
+		ctx_finish [ label="ctx finished" ];
+	}
+
+	{
+		rank=same;
+		jobslots [ shape=record label="Jobslots: | <0>js[0] | <1>js[1] | <2>js[2]" ];
+
+		job_finish [ label="Job finished" ];
+	}
+
+
+
+	/*
+	 * Edges
+	 */
+
+	call_enqueue -> policy_queue;
+
+	policy_queue->runpool [label="dequeue ctx" weight=0.1];
+	runpool->policy_queue [label="requeue ctx" weight=0.1];
+
+	runpool->ctx_finish [ style=dotted ];
+
+	runpool->jobslots  [label="dequeue job" weight=0.1];
+	jobslots->runpool  [label="requeue job" weight=0.1];
+
+	jobslots->job_finish [ style=dotted ];
+}
diff --git a/drivers/gpu/arm/midgard/ipa/Kbuild b/drivers/gpu/arm/midgard/ipa/Kbuild
new file mode 100644
index 0000000..3d9cf80
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/Kbuild
@@ -0,0 +1,28 @@
+#
+# (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	ipa/mali_kbase_ipa_simple.o \
+	ipa/mali_kbase_ipa.o \
+	ipa/mali_kbase_ipa_vinstr_g7x.o \
+	ipa/mali_kbase_ipa_vinstr_common.o
+
+mali_kbase-$(CONFIG_DEBUG_FS) += ipa/mali_kbase_ipa_debugfs.o
\ No newline at end of file
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c
new file mode 100644
index 0000000..9da2878
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c
@@ -0,0 +1,664 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#include <linux/thermal.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/of.h>
+#include "mali_kbase.h"
+#include "mali_kbase_ipa.h"
+#include "mali_kbase_ipa_debugfs.h"
+#include "mali_kbase_ipa_simple.h"
+#include "backend/gpu/mali_kbase_pm_internal.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+#include <linux/pm_opp.h>
+#else
+#include <linux/opp.h>
+#define dev_pm_opp_find_freq_exact opp_find_freq_exact
+#define dev_pm_opp_get_voltage opp_get_voltage
+#define dev_pm_opp opp
+#endif
+
+#define KBASE_IPA_FALLBACK_MODEL_NAME "mali-simple-power-model"
+
+static const struct kbase_ipa_model_ops *kbase_ipa_all_model_ops[] = {
+	&kbase_simple_ipa_model_ops,
+	&kbase_g71_ipa_model_ops,
+	&kbase_g72_ipa_model_ops,
+	&kbase_g76_ipa_model_ops,
+	&kbase_g52_ipa_model_ops,
+	&kbase_g52_r1_ipa_model_ops,
+	&kbase_g51_ipa_model_ops
+};
+
+int kbase_ipa_model_recalculate(struct kbase_ipa_model *model)
+{
+	int err = 0;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	if (model->ops->recalculate) {
+		err = model->ops->recalculate(model);
+		if (err) {
+			dev_err(model->kbdev->dev,
+				"recalculation of power model %s returned error %d\n",
+				model->ops->name, err);
+		}
+	}
+
+	return err;
+}
+
+const struct kbase_ipa_model_ops *kbase_ipa_model_ops_find(struct kbase_device *kbdev,
+							    const char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(kbase_ipa_all_model_ops); ++i) {
+		const struct kbase_ipa_model_ops *ops = kbase_ipa_all_model_ops[i];
+
+		if (!strcmp(ops->name, name))
+			return ops;
+	}
+
+	dev_err(kbdev->dev, "power model \'%s\' not found\n", name);
+
+	return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_model_ops_find);
+
+const char *kbase_ipa_model_name_from_id(u32 gpu_id)
+{
+	const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+			GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+	if (GPU_ID_IS_NEW_FORMAT(prod_id)) {
+		switch (GPU_ID2_MODEL_MATCH_VALUE(prod_id)) {
+		case GPU_ID2_PRODUCT_TMIX:
+			return "mali-g71-power-model";
+		case GPU_ID2_PRODUCT_THEX:
+			return "mali-g72-power-model";
+		case GPU_ID2_PRODUCT_TNOX:
+			return "mali-g76-power-model";
+		case GPU_ID2_PRODUCT_TSIX:
+			return "mali-g51-power-model";
+		case GPU_ID2_PRODUCT_TGOX:
+			if ((gpu_id & GPU_ID2_VERSION_MAJOR) ==
+					(0 << GPU_ID2_VERSION_MAJOR_SHIFT))
+				/* g52 aliased to g76 power-model's ops */
+				return "mali-g52-power-model";
+			else
+				return "mali-g52_r1-power-model";
+		default:
+			return KBASE_IPA_FALLBACK_MODEL_NAME;
+		}
+	}
+
+	return KBASE_IPA_FALLBACK_MODEL_NAME;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_model_name_from_id);
+
+static struct device_node *get_model_dt_node(struct kbase_ipa_model *model)
+{
+	struct device_node *model_dt_node;
+	char compat_string[64];
+
+	snprintf(compat_string, sizeof(compat_string), "arm,%s",
+		 model->ops->name);
+
+	/* of_find_compatible_node() will call of_node_put() on the root node,
+	 * so take a reference on it first.
+	 */
+	of_node_get(model->kbdev->dev->of_node);
+	model_dt_node = of_find_compatible_node(model->kbdev->dev->of_node,
+						NULL, compat_string);
+	if (!model_dt_node && !model->missing_dt_node_warning) {
+		dev_warn(model->kbdev->dev,
+			 "Couldn't find power_model DT node matching \'%s\'\n",
+			 compat_string);
+		model->missing_dt_node_warning = true;
+	}
+
+	return model_dt_node;
+}
+
+int kbase_ipa_model_add_param_s32(struct kbase_ipa_model *model,
+				  const char *name, s32 *addr,
+				  size_t num_elems, bool dt_required)
+{
+	int err, i;
+	struct device_node *model_dt_node = get_model_dt_node(model);
+	char *origin;
+
+	err = of_property_read_u32_array(model_dt_node, name, addr, num_elems);
+	/* We're done with model_dt_node now, so drop the reference taken in
+	 * get_model_dt_node()/of_find_compatible_node().
+	 */
+	of_node_put(model_dt_node);
+
+	if (err && dt_required) {
+		memset(addr, 0, sizeof(s32) * num_elems);
+		dev_warn(model->kbdev->dev,
+			 "Error %d, no DT entry: %s.%s = %zu*[0]\n",
+			 err, model->ops->name, name, num_elems);
+		origin = "zero";
+	} else if (err && !dt_required) {
+		origin = "default";
+	} else /* !err */ {
+		origin = "DT";
+	}
+
+	/* Create a unique debugfs entry for each element */
+	for (i = 0; i < num_elems; ++i) {
+		char elem_name[32];
+
+		if (num_elems == 1)
+			snprintf(elem_name, sizeof(elem_name), "%s", name);
+		else
+			snprintf(elem_name, sizeof(elem_name), "%s.%d",
+				name, i);
+
+		dev_dbg(model->kbdev->dev, "%s.%s = %d (%s)\n",
+			model->ops->name, elem_name, addr[i], origin);
+
+		err = kbase_ipa_model_param_add(model, elem_name,
+						&addr[i], sizeof(s32),
+						PARAM_TYPE_S32);
+		if (err)
+			goto exit;
+	}
+exit:
+	return err;
+}
+
+int kbase_ipa_model_add_param_string(struct kbase_ipa_model *model,
+				     const char *name, char *addr,
+				     size_t size, bool dt_required)
+{
+	int err;
+	struct device_node *model_dt_node = get_model_dt_node(model);
+	const char *string_prop_value;
+	char *origin;
+
+	err = of_property_read_string(model_dt_node, name,
+				      &string_prop_value);
+
+	/* We're done with model_dt_node now, so drop the reference taken in
+	 * get_model_dt_node()/of_find_compatible_node().
+	 */
+	of_node_put(model_dt_node);
+
+	if (err && dt_required) {
+		strncpy(addr, "", size - 1);
+		dev_warn(model->kbdev->dev,
+			 "Error %d, no DT entry: %s.%s = \'%s\'\n",
+			 err, model->ops->name, name, addr);
+		err = 0;
+		origin = "zero";
+	} else if (err && !dt_required) {
+		origin = "default";
+	} else /* !err */ {
+		strncpy(addr, string_prop_value, size - 1);
+		origin = "DT";
+	}
+
+	addr[size - 1] = '\0';
+
+	dev_dbg(model->kbdev->dev, "%s.%s = \'%s\' (%s)\n",
+		model->ops->name, name, string_prop_value, origin);
+
+	err = kbase_ipa_model_param_add(model, name, addr, size,
+					PARAM_TYPE_STRING);
+	return err;
+}
+
+void kbase_ipa_term_model(struct kbase_ipa_model *model)
+{
+	if (!model)
+		return;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	if (model->ops->term)
+		model->ops->term(model);
+
+	kbase_ipa_model_param_free_all(model);
+
+	kfree(model);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_term_model);
+
+struct kbase_ipa_model *kbase_ipa_init_model(struct kbase_device *kbdev,
+					     const struct kbase_ipa_model_ops *ops)
+{
+	struct kbase_ipa_model *model;
+	int err;
+
+	lockdep_assert_held(&kbdev->ipa.lock);
+
+	if (!ops || !ops->name)
+		return NULL;
+
+	model = kzalloc(sizeof(struct kbase_ipa_model), GFP_KERNEL);
+	if (!model)
+		return NULL;
+
+	model->kbdev = kbdev;
+	model->ops = ops;
+	INIT_LIST_HEAD(&model->params);
+
+	err = model->ops->init(model);
+	if (err) {
+		dev_err(kbdev->dev,
+			"init of power model \'%s\' returned error %d\n",
+			ops->name, err);
+		kfree(model);
+		return NULL;
+	}
+
+	err = kbase_ipa_model_recalculate(model);
+	if (err) {
+		kbase_ipa_term_model(model);
+		return NULL;
+	}
+
+	return model;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_init_model);
+
+static void kbase_ipa_term_locked(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->ipa.lock);
+
+	/* Clean up the models */
+	if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model)
+		kbase_ipa_term_model(kbdev->ipa.configured_model);
+	kbase_ipa_term_model(kbdev->ipa.fallback_model);
+
+	kbdev->ipa.configured_model = NULL;
+	kbdev->ipa.fallback_model = NULL;
+}
+
+int kbase_ipa_init(struct kbase_device *kbdev)
+{
+
+	const char *model_name;
+	const struct kbase_ipa_model_ops *ops;
+	struct kbase_ipa_model *default_model = NULL;
+	int err;
+
+	mutex_init(&kbdev->ipa.lock);
+	/*
+	 * Lock during init to avoid warnings from lockdep_assert_held (there
+	 * shouldn't be any concurrent access yet).
+	 */
+	mutex_lock(&kbdev->ipa.lock);
+
+	/* The simple IPA model must *always* be present.*/
+	ops = kbase_ipa_model_ops_find(kbdev, KBASE_IPA_FALLBACK_MODEL_NAME);
+
+	default_model = kbase_ipa_init_model(kbdev, ops);
+	if (!default_model) {
+		err = -EINVAL;
+		goto end;
+	}
+
+	kbdev->ipa.fallback_model = default_model;
+	err = of_property_read_string(kbdev->dev->of_node,
+				      "ipa-model",
+				      &model_name);
+	if (err) {
+		/* Attempt to load a match from GPU-ID */
+		u32 gpu_id;
+
+		gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+		model_name = kbase_ipa_model_name_from_id(gpu_id);
+		dev_dbg(kbdev->dev,
+			"Inferring model from GPU ID 0x%x: \'%s\'\n",
+			gpu_id, model_name);
+		err = 0;
+	} else {
+		dev_dbg(kbdev->dev,
+			"Using ipa-model parameter from DT: \'%s\'\n",
+			model_name);
+	}
+
+	if (strcmp(KBASE_IPA_FALLBACK_MODEL_NAME, model_name) != 0) {
+		ops = kbase_ipa_model_ops_find(kbdev, model_name);
+		kbdev->ipa.configured_model = kbase_ipa_init_model(kbdev, ops);
+		if (!kbdev->ipa.configured_model) {
+			dev_warn(kbdev->dev,
+				"Failed to initialize ipa-model: \'%s\'\n"
+				"Falling back on default model\n",
+				model_name);
+			kbdev->ipa.configured_model = default_model;
+		}
+	} else {
+		kbdev->ipa.configured_model = default_model;
+	}
+
+end:
+	if (err)
+		kbase_ipa_term_locked(kbdev);
+	else
+		dev_info(kbdev->dev,
+			 "Using configured power model %s, and fallback %s\n",
+			 kbdev->ipa.configured_model->ops->name,
+			 kbdev->ipa.fallback_model->ops->name);
+
+	mutex_unlock(&kbdev->ipa.lock);
+	return err;
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_init);
+
+void kbase_ipa_term(struct kbase_device *kbdev)
+{
+	mutex_lock(&kbdev->ipa.lock);
+	kbase_ipa_term_locked(kbdev);
+	mutex_unlock(&kbdev->ipa.lock);
+
+	mutex_destroy(&kbdev->ipa.lock);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_term);
+
+/**
+ * kbase_scale_dynamic_power() - Scale a dynamic power coefficient to an OPP
+ * @c:		Dynamic model coefficient, in pW/(Hz V^2). Should be in range
+ *		0 < c < 2^26 to prevent overflow.
+ * @freq:	Frequency, in Hz. Range: 2^23 < freq < 2^30 (~8MHz to ~1GHz)
+ * @voltage:	Voltage, in mV. Range: 2^9 < voltage < 2^13 (~0.5V to ~8V)
+ *
+ * Keep a record of the approximate range of each value at every stage of the
+ * calculation, to ensure we don't overflow. This makes heavy use of the
+ * approximations 1000 = 2^10 and 1000000 = 2^20, but does the actual
+ * calculations in decimal for increased accuracy.
+ *
+ * Return: Power consumption, in mW. Range: 0 < p < 2^13 (0W to ~8W)
+ */
+static u32 kbase_scale_dynamic_power(const u32 c, const u32 freq,
+				     const u32 voltage)
+{
+	/* Range: 2^8 < v2 < 2^16 m(V^2) */
+	const u32 v2 = (voltage * voltage) / 1000;
+
+	/* Range: 2^3 < f_MHz < 2^10 MHz */
+	const u32 f_MHz = freq / 1000000;
+
+	/* Range: 2^11 < v2f_big < 2^26 kHz V^2 */
+	const u32 v2f_big = v2 * f_MHz;
+
+	/* Range: 2^1 < v2f < 2^16 MHz V^2 */
+	const u32 v2f = v2f_big / 1000;
+
+	/* Range (working backwards from next line): 0 < v2fc < 2^23 uW.
+	 * Must be < 2^42 to avoid overflowing the return value. */
+	const u64 v2fc = (u64) c * (u64) v2f;
+
+	/* Range: 0 < v2fc / 1000 < 2^13 mW */
+	return div_u64(v2fc, 1000);
+}
+
+/**
+ * kbase_scale_static_power() - Scale a static power coefficient to an OPP
+ * @c:		Static model coefficient, in uW/V^3. Should be in range
+ *		0 < c < 2^32 to prevent overflow.
+ * @voltage:	Voltage, in mV. Range: 2^9 < voltage < 2^13 (~0.5V to ~8V)
+ *
+ * Return: Power consumption, in mW. Range: 0 < p < 2^13 (0W to ~8W)
+ */
+u32 kbase_scale_static_power(const u32 c, const u32 voltage)
+{
+	/* Range: 2^8 < v2 < 2^16 m(V^2) */
+	const u32 v2 = (voltage * voltage) / 1000;
+
+	/* Range: 2^17 < v3_big < 2^29 m(V^2) mV */
+	const u32 v3_big = v2 * voltage;
+
+	/* Range: 2^7 < v3 < 2^19 m(V^3) */
+	const u32 v3 = v3_big / 1000;
+
+	/*
+	 * Range (working backwards from next line): 0 < v3c_big < 2^33 nW.
+	 * The result should be < 2^52 to avoid overflowing the return value.
+	 */
+	const u64 v3c_big = (u64) c * (u64) v3;
+
+	/* Range: 0 < v3c_big / 1000000 < 2^13 mW */
+	return div_u64(v3c_big, 1000000);
+}
+
+void kbase_ipa_protection_mode_switch_event(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* Record the event of GPU entering protected mode. */
+	kbdev->ipa_protection_mode_switched = true;
+}
+
+static struct kbase_ipa_model *get_current_model(struct kbase_device *kbdev)
+{
+	struct kbase_ipa_model *model;
+	unsigned long flags;
+
+	lockdep_assert_held(&kbdev->ipa.lock);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (kbdev->ipa_protection_mode_switched ||
+			kbdev->ipa.force_fallback_model)
+		model = kbdev->ipa.fallback_model;
+	else
+		model = kbdev->ipa.configured_model;
+
+	/*
+	 * Having taken cognizance of the fact that whether GPU earlier
+	 * protected mode or not, the event can be now reset (if GPU is not
+	 * currently in protected mode) so that configured model is used
+	 * for the next sample.
+	 */
+	if (!kbdev->protected_mode)
+		kbdev->ipa_protection_mode_switched = false;
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return model;
+}
+
+static u32 get_static_power_locked(struct kbase_device *kbdev,
+				   struct kbase_ipa_model *model,
+				   unsigned long voltage)
+{
+	u32 power = 0;
+	int err;
+	u32 power_coeff;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	if (!model->ops->get_static_coeff)
+		model = kbdev->ipa.fallback_model;
+
+	if (model->ops->get_static_coeff) {
+		err = model->ops->get_static_coeff(model, &power_coeff);
+		if (!err)
+			power = kbase_scale_static_power(power_coeff,
+							 (u32) voltage);
+	}
+
+	return power;
+}
+
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static unsigned long kbase_get_static_power(struct devfreq *df,
+					    unsigned long voltage)
+#else
+static unsigned long kbase_get_static_power(unsigned long voltage)
+#endif
+{
+	struct kbase_ipa_model *model;
+	u32 power = 0;
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+	struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+#else
+	struct kbase_device *kbdev = kbase_find_device(-1);
+#endif
+
+	if (!kbdev)
+		return 0ul;
+
+	mutex_lock(&kbdev->ipa.lock);
+
+	model = get_current_model(kbdev);
+	power = get_static_power_locked(kbdev, model, voltage);
+
+	mutex_unlock(&kbdev->ipa.lock);
+
+#if !(defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	kbase_release_device(kbdev);
+#endif
+
+	return power;
+}
+
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static unsigned long kbase_get_dynamic_power(struct devfreq *df,
+					     unsigned long freq,
+					     unsigned long voltage)
+#else
+static unsigned long kbase_get_dynamic_power(unsigned long freq,
+					     unsigned long voltage)
+#endif
+{
+	struct kbase_ipa_model *model;
+	u32 power_coeff = 0, power = 0;
+	int err = 0;
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+	struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+#else
+	struct kbase_device *kbdev = kbase_find_device(-1);
+#endif
+
+	if (!kbdev)
+		return 0ul;
+
+	mutex_lock(&kbdev->ipa.lock);
+
+	model = kbdev->ipa.fallback_model;
+
+	err = model->ops->get_dynamic_coeff(model, &power_coeff);
+
+	if (!err)
+		power = kbase_scale_dynamic_power(power_coeff, freq, voltage);
+	else
+		dev_err_ratelimited(kbdev->dev,
+				    "Model %s returned error code %d\n",
+				    model->ops->name, err);
+
+	mutex_unlock(&kbdev->ipa.lock);
+
+#if !(defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+	kbase_release_device(kbdev);
+#endif
+
+	return power;
+}
+
+int kbase_get_real_power_locked(struct kbase_device *kbdev, u32 *power,
+				unsigned long freq,
+				unsigned long voltage)
+{
+	struct kbase_ipa_model *model;
+	u32 power_coeff = 0;
+	int err = 0;
+	struct kbasep_pm_metrics diff;
+	u64 total_time;
+
+	lockdep_assert_held(&kbdev->ipa.lock);
+
+	kbase_pm_get_dvfs_metrics(kbdev, &kbdev->ipa.last_metrics, &diff);
+
+	model = get_current_model(kbdev);
+
+	err = model->ops->get_dynamic_coeff(model, &power_coeff);
+
+	/* If the counter model returns an error (e.g. switching back to
+	 * protected mode and failing to read counters, or a counter sample
+	 * with too few cycles), revert to the fallback model.
+	 */
+	if (err && model != kbdev->ipa.fallback_model) {
+		model = kbdev->ipa.fallback_model;
+		err = model->ops->get_dynamic_coeff(model, &power_coeff);
+	}
+
+	if (err)
+		return err;
+
+	*power = kbase_scale_dynamic_power(power_coeff, freq, voltage);
+
+	/* time_busy / total_time cannot be >1, so assigning the 64-bit
+	 * result of div_u64 to *power cannot overflow.
+	 */
+	total_time = diff.time_busy + (u64) diff.time_idle;
+	*power = div_u64(*power * (u64) diff.time_busy,
+			 max(total_time, 1ull));
+
+	*power += get_static_power_locked(kbdev, model, voltage);
+
+	return err;
+}
+KBASE_EXPORT_TEST_API(kbase_get_real_power_locked);
+
+int kbase_get_real_power(struct devfreq *df, u32 *power,
+				unsigned long freq,
+				unsigned long voltage)
+{
+	int ret;
+	struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	mutex_lock(&kbdev->ipa.lock);
+	ret = kbase_get_real_power_locked(kbdev, power, freq, voltage);
+	mutex_unlock(&kbdev->ipa.lock);
+
+	return ret;
+}
+KBASE_EXPORT_TEST_API(kbase_get_real_power);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+struct devfreq_cooling_ops kbase_ipa_power_model_ops = {
+#else
+struct devfreq_cooling_power kbase_ipa_power_model_ops = {
+#endif
+	.get_static_power = &kbase_get_static_power,
+	.get_dynamic_power = &kbase_get_dynamic_power,
+#if defined(CONFIG_MALI_PWRSOFT_765) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+	.get_real_power = &kbase_get_real_power,
+#endif
+};
+KBASE_EXPORT_TEST_API(kbase_ipa_power_model_ops);
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h
new file mode 100644
index 0000000..7462048
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h
@@ -0,0 +1,250 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IPA_H_
+#define _KBASE_IPA_H_
+
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+
+struct devfreq;
+
+/**
+ * struct kbase_ipa_model - Object describing a particular IPA model.
+ * @kbdev:                    pointer to kbase device
+ * @model_data:               opaque pointer to model specific data, accessed
+ *                            only by model specific methods.
+ * @ops:                      pointer to object containing model specific methods.
+ * @params:                   head of the list of debugfs params added for model
+ * @missing_dt_node_warning:  flag to limit the matching power model DT not found
+ *                            warning to once.
+ */
+struct kbase_ipa_model {
+	struct kbase_device *kbdev;
+	void *model_data;
+	const struct kbase_ipa_model_ops *ops;
+	struct list_head params;
+	bool missing_dt_node_warning;
+};
+
+/**
+ * kbase_ipa_model_add_param_s32 - Add an integer model parameter
+ * @model:	pointer to IPA model
+ * @name:	name of corresponding debugfs entry
+ * @addr:	address where the value is stored
+ * @num_elems:	number of elements (1 if not an array)
+ * @dt_required: if false, a corresponding devicetree entry is not required,
+ *		 and the current value will be used. If true, a warning is
+ *		 output and the data is zeroed
+ *
+ * Return: 0 on success, or an error code
+ */
+int kbase_ipa_model_add_param_s32(struct kbase_ipa_model *model,
+				  const char *name, s32 *addr,
+				  size_t num_elems, bool dt_required);
+
+/**
+ * kbase_ipa_model_add_param_string - Add a string model parameter
+ * @model:	pointer to IPA model
+ * @name:	name of corresponding debugfs entry
+ * @addr:	address where the value is stored
+ * @size:	size, in bytes, of the value storage (so the maximum string
+ *		length is size - 1)
+ * @dt_required: if false, a corresponding devicetree entry is not required,
+ *		 and the current value will be used. If true, a warning is
+ *		 output and the data is zeroed
+ *
+ * Return: 0 on success, or an error code
+ */
+int kbase_ipa_model_add_param_string(struct kbase_ipa_model *model,
+				     const char *name, char *addr,
+				     size_t size, bool dt_required);
+
+struct kbase_ipa_model_ops {
+	char *name;
+	/* The init, recalculate and term ops on the default model are always
+	 * called.  However, all the other models are only invoked if the model
+	 * is selected in the device tree. Otherwise they are never
+	 * initialized. Additional resources can be acquired by models in
+	 * init(), however they must be terminated in the term().
+	 */
+	int (*init)(struct kbase_ipa_model *model);
+	/* Called immediately after init(), or when a parameter is changed, so
+	 * that any coefficients derived from model parameters can be
+	 * recalculated. */
+	int (*recalculate)(struct kbase_ipa_model *model);
+	void (*term)(struct kbase_ipa_model *model);
+	/*
+	 * get_dynamic_coeff() - calculate dynamic power coefficient
+	 * @model:		pointer to model
+	 * @coeffp:		pointer to return value location
+	 *
+	 * Calculate a dynamic power coefficient, with units pW/(Hz V^2), which
+	 * is then scaled by the IPA framework according to the current OPP's
+	 * frequency and voltage.
+	 *
+	 * Return: 0 on success, or an error code.
+	 */
+	int (*get_dynamic_coeff)(struct kbase_ipa_model *model, u32 *coeffp);
+	/*
+	 * get_static_coeff() - calculate static power coefficient
+	 * @model:		pointer to model
+	 * @coeffp:		pointer to return value location
+	 *
+	 * Calculate a static power coefficient, with units uW/(V^3), which is
+	 * scaled by the IPA framework according to the current OPP's voltage.
+	 *
+	 * Return: 0 on success, or an error code.
+	 */
+	int (*get_static_coeff)(struct kbase_ipa_model *model, u32 *coeffp);
+};
+
+/**
+ * kbase_ipa_init - Initialize the IPA feature
+ * @kbdev:      pointer to kbase device
+ *
+ * simple IPA power model is initialized as a fallback model and if that
+ * initialization fails then IPA is not used.
+ * The device tree is read for the name of ipa model to be used, by using the
+ * property string "ipa-model". If that ipa model is supported then it is
+ * initialized but if the initialization fails then simple power model is used.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
+int kbase_ipa_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_ipa_term - Terminate the IPA feature
+ * @kbdev:      pointer to kbase device
+ *
+ * Both simple IPA power model and model retrieved from device tree are
+ * terminated.
+ */
+void kbase_ipa_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_ipa_model_recalculate - Recalculate the model coefficients
+ * @model:      pointer to the IPA model object, already initialized
+ *
+ * It shall be called immediately after the model has been initialized
+ * or when the model parameter has changed, so that any coefficients
+ * derived from parameters can be recalculated.
+ * Its a wrapper for the module specific recalculate() method.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
+int kbase_ipa_model_recalculate(struct kbase_ipa_model *model);
+
+/**
+ * kbase_ipa_model_ops_find - Lookup an IPA model using its name
+ * @kbdev:      pointer to kbase device
+ * @name:       name of model to lookup
+ *
+ * Return: Pointer to model's 'ops' structure, or NULL if the lookup failed.
+ */
+const struct kbase_ipa_model_ops *kbase_ipa_model_ops_find(struct kbase_device *kbdev,
+							   const char *name);
+
+/**
+ * kbase_ipa_model_name_from_id - Find the best model for a given GPU ID
+ * @gpu_id:     GPU ID of GPU the model will be used for
+ *
+ * Return: The name of the appropriate counter-based model, or the name of the
+ *         fallback model if no counter model exists.
+ */
+const char *kbase_ipa_model_name_from_id(u32 gpu_id);
+
+/**
+ * kbase_ipa_init_model - Initilaize the particular IPA model
+ * @kbdev:      pointer to kbase device
+ * @ops:        pointer to object containing model specific methods.
+ *
+ * Initialize the model corresponding to the @ops pointer passed.
+ * The init() method specified in @ops would be called.
+ *
+ * Return: pointer to kbase_ipa_model on success, NULL on error
+ */
+struct kbase_ipa_model *kbase_ipa_init_model(struct kbase_device *kbdev,
+					     const struct kbase_ipa_model_ops *ops);
+/**
+ * kbase_ipa_term_model - Terminate the particular IPA model
+ * @model:      pointer to the IPA model object, already initialized
+ *
+ * Terminate the model, using the term() method.
+ * Module specific parameters would be freed.
+ */
+void kbase_ipa_term_model(struct kbase_ipa_model *model);
+
+/**
+ * kbase_ipa_protection_mode_switch_event - Inform IPA of the GPU's entry into
+ *                                          protected mode
+ * @kbdev:      pointer to kbase device
+ *
+ * Makes IPA aware of the GPU switching to protected mode.
+ */
+void kbase_ipa_protection_mode_switch_event(struct kbase_device *kbdev);
+
+extern const struct kbase_ipa_model_ops kbase_g71_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g72_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g76_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g52_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g52_r1_ipa_model_ops;
+extern const struct kbase_ipa_model_ops kbase_g51_ipa_model_ops;
+
+/**
+ * kbase_get_real_power() - get the real power consumption of the GPU
+ * @df: dynamic voltage and frequency scaling information for the GPU.
+ * @power: where to store the power consumption, in mW.
+ * @freq: a frequency, in HZ.
+ * @voltage: a voltage, in mV.
+ *
+ * The returned value incorporates both static and dynamic power consumption.
+ *
+ * Return: 0 on success, or an error code.
+ */
+int kbase_get_real_power(struct devfreq *df, u32 *power,
+				unsigned long freq,
+				unsigned long voltage);
+
+#if MALI_UNIT_TEST
+/* Called by kbase_get_real_power() to invoke the power models.
+ * Must be called with kbdev->ipa.lock held.
+ * This function is only exposed for use by unit tests.
+ */
+int kbase_get_real_power_locked(struct kbase_device *kbdev, u32 *power,
+				unsigned long freq,
+				unsigned long voltage);
+#endif /* MALI_UNIT_TEST */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+extern struct devfreq_cooling_ops kbase_ipa_power_model_ops;
+#else
+extern struct devfreq_cooling_power kbase_ipa_power_model_ops;
+#endif
+
+#else /* !(defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
+
+static inline void kbase_ipa_protection_mode_switch_event(struct kbase_device *kbdev)
+{ }
+
+#endif /* (defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
+
+#endif
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c
new file mode 100644
index 0000000..6e8c23c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c
@@ -0,0 +1,317 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include "mali_kbase.h"
+#include "mali_kbase_ipa.h"
+#include "mali_kbase_ipa_debugfs.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+#define DEFINE_DEBUGFS_ATTRIBUTE DEFINE_SIMPLE_ATTRIBUTE
+#endif
+
+struct kbase_ipa_model_param {
+	char *name;
+	union {
+		void *voidp;
+		s32 *s32p;
+		char *str;
+	} addr;
+	size_t size;
+	enum kbase_ipa_model_param_type type;
+	struct kbase_ipa_model *model;
+	struct list_head link;
+};
+
+static int param_int_get(void *data, u64 *val)
+{
+	struct kbase_ipa_model_param *param = data;
+
+	mutex_lock(&param->model->kbdev->ipa.lock);
+	*(s64 *) val = *param->addr.s32p;
+	mutex_unlock(&param->model->kbdev->ipa.lock);
+
+	return 0;
+}
+
+static int param_int_set(void *data, u64 val)
+{
+	struct kbase_ipa_model_param *param = data;
+	struct kbase_ipa_model *model = param->model;
+	s64 sval = (s64) val;
+	s32 old_val;
+	int err = 0;
+
+	if (sval < S32_MIN || sval > S32_MAX)
+		return -ERANGE;
+
+	mutex_lock(&param->model->kbdev->ipa.lock);
+	old_val = *param->addr.s32p;
+	*param->addr.s32p = val;
+	err = kbase_ipa_model_recalculate(model);
+	if (err < 0)
+		*param->addr.s32p = old_val;
+	mutex_unlock(&param->model->kbdev->ipa.lock);
+
+	return err;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_s32, param_int_get, param_int_set, "%lld\n");
+
+static ssize_t param_string_get(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct kbase_ipa_model_param *param = file->private_data;
+	ssize_t ret;
+	size_t len;
+
+	mutex_lock(&param->model->kbdev->ipa.lock);
+	len = strnlen(param->addr.str, param->size - 1) + 1;
+	ret = simple_read_from_buffer(user_buf, count, ppos,
+				      param->addr.str, len);
+	mutex_unlock(&param->model->kbdev->ipa.lock);
+
+	return ret;
+}
+
+static ssize_t param_string_set(struct file *file, const char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct kbase_ipa_model_param *param = file->private_data;
+	struct kbase_ipa_model *model = param->model;
+	char *old_str = NULL;
+	ssize_t ret = count;
+	size_t buf_size;
+	int err;
+
+	mutex_lock(&model->kbdev->ipa.lock);
+
+	if (count > param->size) {
+		ret = -EINVAL;
+		goto end;
+	}
+
+	old_str = kstrndup(param->addr.str, param->size, GFP_KERNEL);
+	if (!old_str) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	buf_size = min(param->size - 1, count);
+	if (copy_from_user(param->addr.str, user_buf, buf_size)) {
+		ret = -EFAULT;
+		goto end;
+	}
+
+	param->addr.str[buf_size] = '\0';
+
+	err = kbase_ipa_model_recalculate(model);
+	if (err < 0) {
+		ret = err;
+		strlcpy(param->addr.str, old_str, param->size);
+	}
+
+end:
+	kfree(old_str);
+	mutex_unlock(&model->kbdev->ipa.lock);
+
+	return ret;
+}
+
+static const struct file_operations fops_string = {
+	.read = param_string_get,
+	.write = param_string_set,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name,
+			      void *addr, size_t size,
+			      enum kbase_ipa_model_param_type type)
+{
+	struct kbase_ipa_model_param *param;
+
+	param = kzalloc(sizeof(*param), GFP_KERNEL);
+
+	if (!param)
+		return -ENOMEM;
+
+	/* 'name' is stack-allocated for array elements, so copy it into
+	 * heap-allocated storage */
+	param->name = kstrdup(name, GFP_KERNEL);
+
+	if (!param->name) {
+		kfree(param);
+		return -ENOMEM;
+	}
+
+	param->addr.voidp = addr;
+	param->size = size;
+	param->type = type;
+	param->model = model;
+
+	list_add(&param->link, &model->params);
+
+	return 0;
+}
+
+void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model)
+{
+	struct kbase_ipa_model_param *param_p, *param_n;
+
+	list_for_each_entry_safe(param_p, param_n, &model->params, link) {
+		list_del(&param_p->link);
+		kfree(param_p->name);
+		kfree(param_p);
+	}
+}
+
+static int force_fallback_model_get(void *data, u64 *val)
+{
+	struct kbase_device *kbdev = data;
+
+	mutex_lock(&kbdev->ipa.lock);
+	*val = kbdev->ipa.force_fallback_model;
+	mutex_unlock(&kbdev->ipa.lock);
+
+	return 0;
+}
+
+static int force_fallback_model_set(void *data, u64 val)
+{
+	struct kbase_device *kbdev = data;
+
+	mutex_lock(&kbdev->ipa.lock);
+	kbdev->ipa.force_fallback_model = (val ? true : false);
+	mutex_unlock(&kbdev->ipa.lock);
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(force_fallback_model,
+		force_fallback_model_get,
+		force_fallback_model_set,
+		"%llu\n");
+
+static int current_power_get(void *data, u64 *val)
+{
+	struct kbase_device *kbdev = data;
+	struct devfreq *df = kbdev->devfreq;
+	u32 power;
+
+	kbase_pm_context_active(kbdev);
+	kbase_get_real_power(df, &power,
+		kbdev->current_nominal_freq, (kbdev->current_voltage / 1000));
+	kbase_pm_context_idle(kbdev);
+
+	*val = power;
+
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(current_power, current_power_get, NULL, "%llu\n");
+
+static void kbase_ipa_model_debugfs_init(struct kbase_ipa_model *model)
+{
+	struct list_head *it;
+	struct dentry *dir;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	dir = debugfs_create_dir(model->ops->name,
+				 model->kbdev->mali_debugfs_directory);
+
+	if (!dir) {
+		dev_err(model->kbdev->dev,
+			"Couldn't create mali debugfs %s directory",
+			model->ops->name);
+		return;
+	}
+
+	list_for_each(it, &model->params) {
+		struct kbase_ipa_model_param *param =
+				list_entry(it,
+					   struct kbase_ipa_model_param,
+					   link);
+		const struct file_operations *fops = NULL;
+
+		switch (param->type) {
+		case PARAM_TYPE_S32:
+			fops = &fops_s32;
+			break;
+		case PARAM_TYPE_STRING:
+			fops = &fops_string;
+			break;
+		}
+
+		if (unlikely(!fops)) {
+			dev_err(model->kbdev->dev,
+				"Type not set for %s parameter %s\n",
+				model->ops->name, param->name);
+		} else {
+			debugfs_create_file(param->name, S_IRUGO | S_IWUSR,
+					    dir, param, fops);
+		}
+	}
+}
+
+void kbase_ipa_model_param_set_s32(struct kbase_ipa_model *model,
+	const char *name, s32 val)
+{
+	struct kbase_ipa_model_param *param;
+
+	mutex_lock(&model->kbdev->ipa.lock);
+
+	list_for_each_entry(param, &model->params, link) {
+		if (!strcmp(param->name, name)) {
+			if (param->type == PARAM_TYPE_S32) {
+				*param->addr.s32p = val;
+			} else {
+				dev_err(model->kbdev->dev,
+					"Wrong type for %s parameter %s\n",
+					model->ops->name, param->name);
+			}
+			break;
+		}
+	}
+
+	mutex_unlock(&model->kbdev->ipa.lock);
+}
+KBASE_EXPORT_TEST_API(kbase_ipa_model_param_set_s32);
+
+void kbase_ipa_debugfs_init(struct kbase_device *kbdev)
+{
+	mutex_lock(&kbdev->ipa.lock);
+
+	if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model)
+		kbase_ipa_model_debugfs_init(kbdev->ipa.configured_model);
+	kbase_ipa_model_debugfs_init(kbdev->ipa.fallback_model);
+
+	debugfs_create_file("ipa_current_power", 0444,
+		kbdev->mali_debugfs_directory, kbdev, &current_power);
+	debugfs_create_file("ipa_force_fallback_model", 0644,
+		kbdev->mali_debugfs_directory, kbdev, &force_fallback_model);
+
+	mutex_unlock(&kbdev->ipa.lock);
+}
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.h
new file mode 100644
index 0000000..a983d9c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.h
@@ -0,0 +1,68 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IPA_DEBUGFS_H_
+#define _KBASE_IPA_DEBUGFS_H_
+
+enum kbase_ipa_model_param_type {
+	PARAM_TYPE_S32 = 1,
+	PARAM_TYPE_STRING,
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+void kbase_ipa_debugfs_init(struct kbase_device *kbdev);
+int kbase_ipa_model_param_add(struct kbase_ipa_model *model, const char *name,
+			      void *addr, size_t size,
+			      enum kbase_ipa_model_param_type type);
+void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model);
+
+/**
+ * kbase_ipa_model_param_set_s32 - Set an integer model parameter
+ *
+ * @model:	pointer to IPA model
+ * @name:	name of corresponding debugfs entry
+ * @val:	new value of the parameter
+ *
+ * This function is only exposed for use by unit tests running in
+ * kernel space. Normally it is expected that parameter values will
+ * instead be set via debugfs.
+ */
+void kbase_ipa_model_param_set_s32(struct kbase_ipa_model *model,
+	const char *name, s32 val);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline int kbase_ipa_model_param_add(struct kbase_ipa_model *model,
+					    const char *name, void *addr,
+					    size_t size,
+					    enum kbase_ipa_model_param_type type)
+{
+	return 0;
+}
+
+static inline void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model)
+{ }
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _KBASE_IPA_DEBUGFS_H_ */
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c
new file mode 100644
index 0000000..c8399ab
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c
@@ -0,0 +1,351 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <uapi/linux/thermal.h>
+#include <linux/thermal.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include "mali_kbase.h"
+#include "mali_kbase_defs.h"
+#include "mali_kbase_ipa_simple.h"
+#include "mali_kbase_ipa_debugfs.h"
+
+#if MALI_UNIT_TEST
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static unsigned long dummy_temp;
+
+static int kbase_simple_power_model_get_dummy_temp(
+	struct thermal_zone_device *tz,
+	unsigned long *temp)
+{
+	*temp = READ_ONCE(dummy_temp);
+	return 0;
+}
+
+#else
+static int dummy_temp;
+
+static int kbase_simple_power_model_get_dummy_temp(
+	struct thermal_zone_device *tz,
+	int *temp)
+{
+	*temp = READ_ONCE(dummy_temp);
+	return 0;
+}
+#endif
+
+/* Intercept calls to the kernel function using a macro */
+#ifdef thermal_zone_get_temp
+#undef thermal_zone_get_temp
+#endif
+#define thermal_zone_get_temp(tz, temp) \
+	kbase_simple_power_model_get_dummy_temp(tz, temp)
+
+void kbase_simple_power_model_set_dummy_temp(int temp)
+{
+	WRITE_ONCE(dummy_temp, temp);
+}
+KBASE_EXPORT_TEST_API(kbase_simple_power_model_set_dummy_temp);
+
+#endif /* MALI_UNIT_TEST */
+
+/*
+ * This model is primarily designed for the Juno platform. It may not be
+ * suitable for other platforms. The additional resources in this model
+ * should preferably be minimal, as this model is rarely used when a dynamic
+ * model is available.
+ */
+
+/**
+ * struct kbase_ipa_model_simple_data - IPA context per device
+ * @dynamic_coefficient: dynamic coefficient of the model
+ * @static_coefficient:  static coefficient of the model
+ * @ts:                  Thermal scaling coefficients of the model
+ * @tz_name:             Thermal zone name
+ * @gpu_tz:              thermal zone device
+ * @poll_temperature_thread: Handle for temperature polling thread
+ * @current_temperature: Most recent value of polled temperature
+ * @temperature_poll_interval_ms: How often temperature should be checked, in ms
+ */
+
+struct kbase_ipa_model_simple_data {
+	u32 dynamic_coefficient;
+	u32 static_coefficient;
+	s32 ts[4];
+	char tz_name[THERMAL_NAME_LENGTH];
+	struct thermal_zone_device *gpu_tz;
+	struct task_struct *poll_temperature_thread;
+	int current_temperature;
+	int temperature_poll_interval_ms;
+};
+#define FALLBACK_STATIC_TEMPERATURE 55000
+
+/**
+ * calculate_temp_scaling_factor() - Calculate temperature scaling coefficient
+ * @ts:		Signed coefficients, in order t^0 to t^3, with units Deg^-N
+ * @t:		Temperature, in mDeg C. Range: -2^17 < t < 2^17
+ *
+ * Scale the temperature according to a cubic polynomial whose coefficients are
+ * provided in the device tree. The result is used to scale the static power
+ * coefficient, where 1000000 means no change.
+ *
+ * Return: Temperature scaling factor. Range 0 <= ret <= 10,000,000.
+ */
+static u32 calculate_temp_scaling_factor(s32 ts[4], s64 t)
+{
+	/* Range: -2^24 < t2 < 2^24 m(Deg^2) */
+	const s64 t2 = div_s64((t * t), 1000);
+
+	/* Range: -2^31 < t3 < 2^31 m(Deg^3) */
+	const s64 t3 = div_s64((t * t2), 1000);
+
+	/*
+	 * Sum the parts. t^[1-3] are in m(Deg^N), but the coefficients are in
+	 * Deg^-N, so we need to multiply the last coefficient by 1000.
+	 * Range: -2^63 < res_big < 2^63
+	 */
+	const s64 res_big = ts[3] * t3    /* +/- 2^62 */
+			  + ts[2] * t2    /* +/- 2^55 */
+			  + ts[1] * t     /* +/- 2^48 */
+			  + ts[0] * 1000; /* +/- 2^41 */
+
+	/* Range: -2^60 < res_unclamped < 2^60 */
+	s64 res_unclamped = div_s64(res_big, 1000);
+
+	/* Clamp to range of 0x to 10x the static power */
+	return clamp(res_unclamped, (s64) 0, (s64) 10000000);
+}
+
+/* We can't call thermal_zone_get_temp() directly in model_static_coeff(),
+ * because we don't know if tz->lock is held in the same thread. So poll it in
+ * a separate thread to get around this. */
+static int poll_temperature(void *data)
+{
+	struct kbase_ipa_model_simple_data *model_data =
+			(struct kbase_ipa_model_simple_data *) data;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+	unsigned long temp;
+#else
+	int temp;
+#endif
+
+	while (!kthread_should_stop()) {
+		struct thermal_zone_device *tz = READ_ONCE(model_data->gpu_tz);
+
+		if (tz) {
+			int ret;
+
+			ret = thermal_zone_get_temp(tz, &temp);
+			if (ret) {
+				pr_warn_ratelimited("Error reading temperature for gpu thermal zone: %d\n",
+						    ret);
+				temp = FALLBACK_STATIC_TEMPERATURE;
+			}
+		} else {
+			temp = FALLBACK_STATIC_TEMPERATURE;
+		}
+
+		WRITE_ONCE(model_data->current_temperature, temp);
+
+		msleep_interruptible(READ_ONCE(model_data->temperature_poll_interval_ms));
+	}
+
+	return 0;
+}
+
+static int model_static_coeff(struct kbase_ipa_model *model, u32 *coeffp)
+{
+	u32 temp_scaling_factor;
+	struct kbase_ipa_model_simple_data *model_data =
+		(struct kbase_ipa_model_simple_data *) model->model_data;
+	u64 coeff_big;
+	int temp;
+
+	temp = READ_ONCE(model_data->current_temperature);
+
+	/* Range: 0 <= temp_scaling_factor < 2^24 */
+	temp_scaling_factor = calculate_temp_scaling_factor(model_data->ts,
+							    temp);
+
+	/*
+	 * Range: 0 <= coeff_big < 2^52 to avoid overflowing *coeffp. This
+	 * means static_coefficient must be in range
+	 * 0 <= static_coefficient < 2^28.
+	 */
+	coeff_big = (u64) model_data->static_coefficient * (u64) temp_scaling_factor;
+	*coeffp = div_u64(coeff_big, 1000000);
+
+	return 0;
+}
+
+static int model_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp)
+{
+	struct kbase_ipa_model_simple_data *model_data =
+		(struct kbase_ipa_model_simple_data *) model->model_data;
+
+	*coeffp = model_data->dynamic_coefficient;
+
+	return 0;
+}
+
+static int add_params(struct kbase_ipa_model *model)
+{
+	int err = 0;
+	struct kbase_ipa_model_simple_data *model_data =
+			(struct kbase_ipa_model_simple_data *)model->model_data;
+
+	err = kbase_ipa_model_add_param_s32(model, "static-coefficient",
+					    &model_data->static_coefficient,
+					    1, true);
+	if (err)
+		goto end;
+
+	err = kbase_ipa_model_add_param_s32(model, "dynamic-coefficient",
+					    &model_data->dynamic_coefficient,
+					    1, true);
+	if (err)
+		goto end;
+
+	err = kbase_ipa_model_add_param_s32(model, "ts",
+					    model_data->ts, 4, true);
+	if (err)
+		goto end;
+
+	err = kbase_ipa_model_add_param_string(model, "thermal-zone",
+					       model_data->tz_name,
+					       sizeof(model_data->tz_name), true);
+	if (err)
+		goto end;
+
+	model_data->temperature_poll_interval_ms = 200;
+	err = kbase_ipa_model_add_param_s32(model, "temp-poll-interval-ms",
+					    &model_data->temperature_poll_interval_ms,
+					    1, false);
+
+end:
+	return err;
+}
+
+static int kbase_simple_power_model_init(struct kbase_ipa_model *model)
+{
+	int err;
+	struct kbase_ipa_model_simple_data *model_data;
+
+	model_data = kzalloc(sizeof(struct kbase_ipa_model_simple_data),
+			     GFP_KERNEL);
+	if (!model_data)
+		return -ENOMEM;
+
+	model->model_data = (void *) model_data;
+
+	model_data->current_temperature = FALLBACK_STATIC_TEMPERATURE;
+	model_data->poll_temperature_thread = kthread_run(poll_temperature,
+							  (void *) model_data,
+							  "mali-simple-power-model-temp-poll");
+	if (IS_ERR(model_data->poll_temperature_thread)) {
+		err = PTR_ERR(model_data->poll_temperature_thread);
+		kfree(model_data);
+		return err;
+	}
+
+	err = add_params(model);
+	if (err) {
+		kbase_ipa_model_param_free_all(model);
+		kthread_stop(model_data->poll_temperature_thread);
+		kfree(model_data);
+	}
+
+	return err;
+}
+
+static int kbase_simple_power_model_recalculate(struct kbase_ipa_model *model)
+{
+	struct kbase_ipa_model_simple_data *model_data =
+			(struct kbase_ipa_model_simple_data *)model->model_data;
+	struct thermal_zone_device *tz;
+
+	lockdep_assert_held(&model->kbdev->ipa.lock);
+
+	if (!strnlen(model_data->tz_name, sizeof(model_data->tz_name))) {
+		model_data->gpu_tz = NULL;
+	} else {
+		char tz_name[THERMAL_NAME_LENGTH];
+
+		strlcpy(tz_name, model_data->tz_name, sizeof(tz_name));
+
+		/* Release ipa.lock so that thermal_list_lock is not acquired
+		 * with ipa.lock held, thereby avoid lock ordering violation
+		 * lockdep warning. The warning comes as a chain of locks
+		 * ipa.lock --> thermal_list_lock --> tz->lock gets formed
+		 * on registering devfreq cooling device when probe method
+		 * of mali platform driver is invoked.
+		 */
+		mutex_unlock(&model->kbdev->ipa.lock);
+		tz = thermal_zone_get_zone_by_name(tz_name);
+		mutex_lock(&model->kbdev->ipa.lock);
+
+		if (IS_ERR_OR_NULL(tz)) {
+			pr_warn_ratelimited("Error %ld getting thermal zone \'%s\', not yet ready?\n",
+					    PTR_ERR(tz), tz_name);
+			return -EPROBE_DEFER;
+		}
+
+		/* Check if another thread raced against us & updated the
+		 * thermal zone name string. Update the gpu_tz pointer only if
+		 * the name string did not change whilst we retrieved the new
+		 * thermal_zone_device pointer, otherwise model_data->tz_name &
+		 * model_data->gpu_tz would become inconsistent with each other.
+		 * The below check will succeed only for the thread which last
+		 * updated the name string.
+		 */
+		if (strncmp(tz_name, model_data->tz_name, sizeof(tz_name)) == 0)
+			model_data->gpu_tz = tz;
+	}
+
+	return 0;
+}
+
+static void kbase_simple_power_model_term(struct kbase_ipa_model *model)
+{
+	struct kbase_ipa_model_simple_data *model_data =
+			(struct kbase_ipa_model_simple_data *)model->model_data;
+
+	kthread_stop(model_data->poll_temperature_thread);
+
+	kfree(model_data);
+}
+
+struct kbase_ipa_model_ops kbase_simple_ipa_model_ops = {
+		.name = "mali-simple-power-model",
+		.init = &kbase_simple_power_model_init,
+		.recalculate = &kbase_simple_power_model_recalculate,
+		.term = &kbase_simple_power_model_term,
+		.get_dynamic_coeff = &model_dynamic_coeff,
+		.get_static_coeff = &model_static_coeff,
+};
+KBASE_EXPORT_TEST_API(kbase_simple_ipa_model_ops);
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.h
new file mode 100644
index 0000000..fed67d5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IPA_SIMPLE_H_
+#define _KBASE_IPA_SIMPLE_H_
+
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+
+extern struct kbase_ipa_model_ops kbase_simple_ipa_model_ops;
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_simple_power_model_set_dummy_temp() - set a dummy temperature value
+ * @temp: Temperature of the thermal zone, in millidegrees celsius.
+ *
+ * This is only intended for use in unit tests, to ensure that the temperature
+ * values used by the simple power model are predictable. Deterministic
+ * behavior is necessary to allow validation of the static power values
+ * computed by this model.
+ */
+void kbase_simple_power_model_set_dummy_temp(int temp);
+#endif /* MALI_UNIT_TEST */
+
+#endif /* (defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
+
+#endif /* _KBASE_IPA_SIMPLE_H_ */
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c
new file mode 100644
index 0000000..1a6ba01
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c
@@ -0,0 +1,339 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_ipa_vinstr_common.h"
+#include "mali_kbase_ipa_debugfs.h"
+
+#define DEFAULT_SCALING_FACTOR 5
+
+/* If the value of GPU_ACTIVE is below this, use the simple model
+ * instead, to avoid extrapolating small amounts of counter data across
+ * large sample periods.
+ */
+#define DEFAULT_MIN_SAMPLE_CYCLES 10000
+
+/**
+ * read_hwcnt() - read a counter value
+ * @model_data:		pointer to model data
+ * @offset:		offset, in bytes, into vinstr buffer
+ *
+ * Return: A 32-bit counter value. Range: 0 < value < 2^27 (worst case would be
+ * incrementing every cycle over a ~100ms sample period at a high frequency,
+ * e.g. 1 GHz: 2^30 * 0.1seconds ~= 2^27.
+ */
+static inline u32 kbase_ipa_read_hwcnt(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	u32 offset)
+{
+	u8 *p = (u8 *)model_data->dump_buf.dump_buf;
+
+	return *(u32 *)&p[offset];
+}
+
+static inline s64 kbase_ipa_add_saturate(s64 a, s64 b)
+{
+	if (S64_MAX - a < b)
+		return S64_MAX;
+	return a + b;
+}
+
+s64 kbase_ipa_sum_all_shader_cores(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter)
+{
+	struct kbase_device *kbdev = model_data->kbdev;
+	u64 core_mask;
+	u32 base = 0;
+	s64 ret = 0;
+
+	core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+	while (core_mask != 0ull) {
+		if ((core_mask & 1ull) != 0ull) {
+			/* 0 < counter_value < 2^27 */
+			u32 counter_value = kbase_ipa_read_hwcnt(model_data,
+						       base + counter);
+
+			/* 0 < ret < 2^27 * max_num_cores = 2^32 */
+			ret = kbase_ipa_add_saturate(ret, counter_value);
+		}
+		base += KBASE_IPA_NR_BYTES_PER_BLOCK;
+		core_mask >>= 1;
+	}
+
+	/* Range: -2^54 < ret * coeff < 2^54 */
+	return ret * coeff;
+}
+
+s64 kbase_ipa_sum_all_memsys_blocks(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter)
+{
+	struct kbase_device *kbdev = model_data->kbdev;
+	const u32 num_blocks = kbdev->gpu_props.props.l2_props.num_l2_slices;
+	u32 base = 0;
+	s64 ret = 0;
+	u32 i;
+
+	for (i = 0; i < num_blocks; i++) {
+		/* 0 < counter_value < 2^27 */
+		u32 counter_value = kbase_ipa_read_hwcnt(model_data,
+					       base + counter);
+
+		/* 0 < ret < 2^27 * max_num_memsys_blocks = 2^29 */
+		ret = kbase_ipa_add_saturate(ret, counter_value);
+		base += KBASE_IPA_NR_BYTES_PER_BLOCK;
+	}
+
+	/* Range: -2^51 < ret * coeff < 2^51 */
+	return ret * coeff;
+}
+
+s64 kbase_ipa_single_counter(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter)
+{
+	/* Range: 0 < counter_value < 2^27 */
+	const u32 counter_value = kbase_ipa_read_hwcnt(model_data, counter);
+
+	/* Range: -2^49 < ret < 2^49 */
+	return counter_value * (s64) coeff;
+}
+
+int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
+{
+	int errcode;
+	struct kbase_device *kbdev = model_data->kbdev;
+	struct kbase_hwcnt_virtualizer *hvirt = kbdev->hwcnt_gpu_virt;
+	struct kbase_hwcnt_enable_map enable_map;
+	const struct kbase_hwcnt_metadata *metadata =
+		kbase_hwcnt_virtualizer_metadata(hvirt);
+
+	if (!metadata)
+		return -1;
+
+	errcode = kbase_hwcnt_enable_map_alloc(metadata, &enable_map);
+	if (errcode) {
+		dev_err(kbdev->dev, "Failed to allocate IPA enable map");
+		return errcode;
+	}
+
+	kbase_hwcnt_enable_map_enable_all(&enable_map);
+
+	errcode = kbase_hwcnt_virtualizer_client_create(
+		hvirt, &enable_map, &model_data->hvirt_cli);
+	kbase_hwcnt_enable_map_free(&enable_map);
+	if (errcode) {
+		dev_err(kbdev->dev, "Failed to register IPA with virtualizer");
+		model_data->hvirt_cli = NULL;
+		return errcode;
+	}
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(
+		metadata, &model_data->dump_buf);
+	if (errcode) {
+		dev_err(kbdev->dev, "Failed to allocate IPA dump buffer");
+		kbase_hwcnt_virtualizer_client_destroy(model_data->hvirt_cli);
+		model_data->hvirt_cli = NULL;
+		return errcode;
+	}
+
+	return 0;
+}
+
+void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
+{
+	if (model_data->hvirt_cli) {
+		kbase_hwcnt_virtualizer_client_destroy(model_data->hvirt_cli);
+		kbase_hwcnt_dump_buffer_free(&model_data->dump_buf);
+		model_data->hvirt_cli = NULL;
+	}
+}
+
+int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp)
+{
+	struct kbase_ipa_model_vinstr_data *model_data =
+			(struct kbase_ipa_model_vinstr_data *)model->model_data;
+	s64 energy = 0;
+	size_t i;
+	u64 coeff = 0, coeff_mul = 0;
+	u64 start_ts_ns, end_ts_ns;
+	u32 active_cycles;
+	int err = 0;
+
+	err = kbase_hwcnt_virtualizer_client_dump(model_data->hvirt_cli,
+		&start_ts_ns, &end_ts_ns, &model_data->dump_buf);
+	if (err)
+		goto err0;
+
+	/* Range: 0 (GPU not used at all), to the max sampling interval, say
+	 * 1s, * max GPU frequency (GPU 100% utilized).
+	 * 0 <= active_cycles <= 1 * ~2GHz
+	 * 0 <= active_cycles < 2^31
+	 */
+	active_cycles = model_data->get_active_cycles(model_data);
+
+	if (active_cycles < (u32) max(model_data->min_sample_cycles, 0)) {
+		err = -ENODATA;
+		goto err0;
+	}
+
+	/* Range: 1 <= active_cycles < 2^31 */
+	active_cycles = max(1u, active_cycles);
+
+	/* Range of 'energy' is +/- 2^54 * number of IPA groups (~8), so around
+	 * -2^57 < energy < 2^57
+	 */
+	for (i = 0; i < model_data->groups_def_num; i++) {
+		const struct kbase_ipa_group *group = &model_data->groups_def[i];
+		s32 coeff = model_data->group_values[i];
+		s64 group_energy = group->op(model_data, coeff,
+					     group->counter_block_offset);
+
+		energy = kbase_ipa_add_saturate(energy, group_energy);
+	}
+
+	/* Range: 0 <= coeff < 2^57 */
+	if (energy > 0)
+		coeff = energy;
+
+	/* Range: 0 <= coeff < 2^57 (because active_cycles >= 1). However, this
+	 * can be constrained further: Counter values can only be increased by
+	 * a theoretical maximum of about 64k per clock cycle. Beyond this,
+	 * we'd have to sample every 1ms to avoid them overflowing at the
+	 * lowest clock frequency (say 100MHz). Therefore, we can write the
+	 * range of 'coeff' in terms of active_cycles:
+	 *
+	 * coeff = SUM(coeffN * counterN * num_cores_for_counterN)
+	 * coeff <= SUM(coeffN * counterN) * max_num_cores
+	 * coeff <= num_IPA_groups * max_coeff * max_counter * max_num_cores
+	 *       (substitute max_counter = 2^16 * active_cycles)
+	 * coeff <= num_IPA_groups * max_coeff * 2^16 * active_cycles * max_num_cores
+	 * coeff <=    2^3         *    2^22   * 2^16 * active_cycles * 2^5
+	 * coeff <= 2^46 * active_cycles
+	 *
+	 * So after the division: 0 <= coeff <= 2^46
+	 */
+	coeff = div_u64(coeff, active_cycles);
+
+	/* Not all models were derived at the same reference voltage. Voltage
+	 * scaling is done by multiplying by V^2, so we need to *divide* by
+	 * Vref^2 here.
+	 * Range: 0 <= coeff <= 2^49
+	 */
+	coeff = div_u64(coeff * 1000, max(model_data->reference_voltage, 1));
+	/* Range: 0 <= coeff <= 2^52 */
+	coeff = div_u64(coeff * 1000, max(model_data->reference_voltage, 1));
+
+	/* Scale by user-specified integer factor.
+	 * Range: 0 <= coeff_mul < 2^57
+	 */
+	coeff_mul = coeff * model_data->scaling_factor;
+
+	/* The power models have results with units
+	 * mW/(MHz V^2), i.e. nW/(Hz V^2). With precision of 1/1000000, this
+	 * becomes fW/(Hz V^2), which are the units of coeff_mul. However,
+	 * kbase_scale_dynamic_power() expects units of pW/(Hz V^2), so divide
+	 * by 1000.
+	 * Range: 0 <= coeff_mul < 2^47
+	 */
+	coeff_mul = div_u64(coeff_mul, 1000u);
+
+err0:
+	/* Clamp to a sensible range - 2^16 gives about 14W at 400MHz/750mV */
+	*coeffp = clamp(coeff_mul, (u64) 0, (u64) 1 << 16);
+	return err;
+}
+
+int kbase_ipa_vinstr_common_model_init(struct kbase_ipa_model *model,
+				       const struct kbase_ipa_group *ipa_groups_def,
+				       size_t ipa_group_size,
+				       kbase_ipa_get_active_cycles_callback get_active_cycles,
+				       s32 reference_voltage)
+{
+	int err = 0;
+	size_t i;
+	struct kbase_ipa_model_vinstr_data *model_data;
+
+	if (!model || !ipa_groups_def || !ipa_group_size || !get_active_cycles)
+		return -EINVAL;
+
+	model_data = kzalloc(sizeof(*model_data), GFP_KERNEL);
+	if (!model_data)
+		return -ENOMEM;
+
+	model_data->kbdev = model->kbdev;
+	model_data->groups_def = ipa_groups_def;
+	model_data->groups_def_num = ipa_group_size;
+	model_data->get_active_cycles = get_active_cycles;
+
+	model->model_data = (void *) model_data;
+
+	for (i = 0; i < model_data->groups_def_num; ++i) {
+		const struct kbase_ipa_group *group = &model_data->groups_def[i];
+
+		model_data->group_values[i] = group->default_value;
+		err = kbase_ipa_model_add_param_s32(model, group->name,
+					&model_data->group_values[i],
+					1, false);
+		if (err)
+			goto exit;
+	}
+
+	model_data->scaling_factor = DEFAULT_SCALING_FACTOR;
+	err = kbase_ipa_model_add_param_s32(model, "scale",
+					    &model_data->scaling_factor,
+					    1, false);
+	if (err)
+		goto exit;
+
+	model_data->min_sample_cycles = DEFAULT_MIN_SAMPLE_CYCLES;
+	err = kbase_ipa_model_add_param_s32(model, "min_sample_cycles",
+					    &model_data->min_sample_cycles,
+					    1, false);
+	if (err)
+		goto exit;
+
+	model_data->reference_voltage = reference_voltage;
+	err = kbase_ipa_model_add_param_s32(model, "reference_voltage",
+					    &model_data->reference_voltage,
+					    1, false);
+	if (err)
+		goto exit;
+
+	err = kbase_ipa_attach_vinstr(model_data);
+
+exit:
+	if (err) {
+		kbase_ipa_model_param_free_all(model);
+		kfree(model_data);
+	}
+	return err;
+}
+
+void kbase_ipa_vinstr_common_model_term(struct kbase_ipa_model *model)
+{
+	struct kbase_ipa_model_vinstr_data *model_data =
+			(struct kbase_ipa_model_vinstr_data *)model->model_data;
+
+	kbase_ipa_detach_vinstr(model_data);
+	kfree(model_data);
+}
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h
new file mode 100644
index 0000000..46e3cd4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h
@@ -0,0 +1,217 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IPA_VINSTR_COMMON_H_
+#define _KBASE_IPA_VINSTR_COMMON_H_
+
+#include "mali_kbase.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_types.h"
+
+/* Maximum number of IPA groups for an IPA model. */
+#define KBASE_IPA_MAX_GROUP_DEF_NUM  16
+
+/* Number of bytes per hardware counter in a vinstr_buffer. */
+#define KBASE_IPA_NR_BYTES_PER_CNT    4
+
+/* Number of hardware counters per block in a vinstr_buffer. */
+#define KBASE_IPA_NR_CNT_PER_BLOCK   64
+
+/* Number of bytes per block in a vinstr_buffer. */
+#define KBASE_IPA_NR_BYTES_PER_BLOCK \
+	(KBASE_IPA_NR_CNT_PER_BLOCK * KBASE_IPA_NR_BYTES_PER_CNT)
+
+struct kbase_ipa_model_vinstr_data;
+
+typedef u32 (*kbase_ipa_get_active_cycles_callback)(struct kbase_ipa_model_vinstr_data *);
+
+/**
+ * struct kbase_ipa_model_vinstr_data - IPA context per device
+ * @kbdev:               pointer to kbase device
+ * @groups_def:          Array of IPA groups.
+ * @groups_def_num:      Number of elements in the array of IPA groups.
+ * @get_active_cycles:   Callback to return number of active cycles during
+ *                       counter sample period
+ * @hvirt_cli:           hardware counter virtualizer client handle
+ * @dump_buf:            buffer to dump hardware counters onto
+ * @reference_voltage:   voltage, in mV, of the operating point used when
+ *                       deriving the power model coefficients. Range approx
+ *                       0.1V - 5V (~= 8V): 2^7 <= reference_voltage <= 2^13
+ * @scaling_factor:      User-specified power scaling factor. This is an
+ *                       integer, which is multiplied by the power coefficient
+ *                       just before OPP scaling.
+ *                       Range approx 0-32: 0 < scaling_factor < 2^5
+ * @min_sample_cycles:   If the value of the GPU_ACTIVE counter (the number of
+ *                       cycles the GPU was working) is less than
+ *                       min_sample_cycles, the counter model will return an
+ *                       error, causing the IPA framework to approximate using
+ *                       the cached simple model results instead. This may be
+ *                       more accurate than extrapolating  using a very small
+ *                       counter dump.
+ */
+struct kbase_ipa_model_vinstr_data {
+	struct kbase_device *kbdev;
+	s32 group_values[KBASE_IPA_MAX_GROUP_DEF_NUM];
+	const struct kbase_ipa_group *groups_def;
+	size_t groups_def_num;
+	kbase_ipa_get_active_cycles_callback get_active_cycles;
+	struct kbase_hwcnt_virtualizer_client *hvirt_cli;
+	struct kbase_hwcnt_dump_buffer dump_buf;
+	s32 reference_voltage;
+	s32 scaling_factor;
+	s32 min_sample_cycles;
+};
+
+/**
+ * struct ipa_group - represents a single IPA group
+ * @name:               name of the IPA group
+ * @default_value:      default value of coefficient for IPA group.
+ *                      Coefficients are interpreted as fractions where the
+ *                      denominator is 1000000.
+ * @op:                 which operation to be performed on the counter values
+ * @counter_block_offset:  block offset in bytes of the counter used to calculate energy for IPA group
+ */
+struct kbase_ipa_group {
+	const char *name;
+	s32 default_value;
+	s64 (*op)(struct kbase_ipa_model_vinstr_data *, s32, u32);
+	u32 counter_block_offset;
+};
+
+/**
+ * kbase_ipa_sum_all_shader_cores() - sum a counter over all cores
+ * @model_data:		pointer to model data
+ * @coeff:		model coefficient. Unity is ~2^20, so range approx
+ *			+/- 4.0: -2^22 < coeff < 2^22
+ * @counter		offset in bytes of the counter used to calculate energy
+ *			for IPA group
+ *
+ * Calculate energy estimation based on hardware counter `counter'
+ * across all shader cores.
+ *
+ * Return: Sum of counter values. Range: -2^54 < ret < 2^54
+ */
+s64 kbase_ipa_sum_all_shader_cores(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter);
+
+/**
+ * kbase_ipa_sum_all_memsys_blocks() - sum a counter over all mem system blocks
+ * @model_data:		pointer to model data
+ * @coeff:		model coefficient. Unity is ~2^20, so range approx
+ *			+/- 4.0: -2^22 < coeff < 2^22
+ * @counter:		offset in bytes of the counter used to calculate energy
+ *			for IPA group
+ *
+ * Calculate energy estimation based on hardware counter `counter' across all
+ * memory system blocks.
+ *
+ * Return: Sum of counter values. Range: -2^51 < ret < 2^51
+ */
+s64 kbase_ipa_sum_all_memsys_blocks(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter);
+
+/**
+ * kbase_ipa_single_counter() - sum a single counter
+ * @model_data:		pointer to model data
+ * @coeff:		model coefficient. Unity is ~2^20, so range approx
+ *			+/- 4.0: -2^22 < coeff < 2^22
+ * @counter:		offset in bytes of the counter used to calculate energy
+ *			for IPA group
+ *
+ * Calculate energy estimation based on hardware counter `counter'.
+ *
+ * Return: Counter value. Range: -2^49 < ret < 2^49
+ */
+s64 kbase_ipa_single_counter(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff, u32 counter);
+
+/**
+ * attach_vinstr() - attach a vinstr_buffer to an IPA model.
+ * @model_data		pointer to model data
+ *
+ * Attach a vinstr_buffer to an IPA model. The vinstr_buffer
+ * allows access to the hardware counters used to calculate
+ * energy consumption.
+ *
+ * Return: 0 on success, or an error code.
+ */
+int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data);
+
+/**
+ * detach_vinstr() - detach a vinstr_buffer from an IPA model.
+ * @model_data		pointer to model data
+ *
+ * Detach a vinstr_buffer from an IPA model.
+ */
+void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data);
+
+/**
+ * kbase_ipa_vinstr_dynamic_coeff() - calculate dynamic power based on HW counters
+ * @model:		pointer to instantiated model
+ * @coeffp:		pointer to location where calculated power, in
+ *			pW/(Hz V^2), is stored.
+ *
+ * This is a GPU-agnostic implementation of the get_dynamic_coeff()
+ * function of an IPA model. It relies on the model being populated
+ * with GPU-specific attributes at initialization time.
+ *
+ * Return: 0 on success, or an error code.
+ */
+int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp);
+
+/**
+ * kbase_ipa_vinstr_common_model_init() - initialize ipa power model
+ * @model:		ipa power model to initialize
+ * @ipa_groups_def:	array of ipa groups which sets coefficients for
+ *			the corresponding counters used in the ipa model
+ * @ipa_group_size:     number of elements in the array @ipa_groups_def
+ * @get_active_cycles:  callback to return the number of cycles the GPU was
+ *			active during the counter sample period.
+ * @reference_voltage:  voltage, in mV, of the operating point used when
+ *                      deriving the power model coefficients.
+ *
+ * This initialization function performs initialization steps common
+ * for ipa models based on counter values. In each call, the model
+ * passes its specific coefficient values per ipa counter group via
+ * @ipa_groups_def array.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+int kbase_ipa_vinstr_common_model_init(struct kbase_ipa_model *model,
+				       const struct kbase_ipa_group *ipa_groups_def,
+				       size_t ipa_group_size,
+				       kbase_ipa_get_active_cycles_callback get_active_cycles,
+				       s32 reference_voltage);
+
+/**
+ * kbase_ipa_vinstr_common_model_term() - terminate ipa power model
+ * @model: ipa power model to terminate
+ *
+ * This function performs all necessary steps to terminate ipa power model
+ * including clean up of resources allocated to hold model data.
+ */
+void kbase_ipa_vinstr_common_model_term(struct kbase_ipa_model *model);
+
+#endif /* _KBASE_IPA_VINSTR_COMMON_H_ */
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c
new file mode 100644
index 0000000..6365d2f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c
@@ -0,0 +1,383 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#include <linux/thermal.h>
+
+#include "mali_kbase_ipa_vinstr_common.h"
+#include "mali_kbase.h"
+#include "mali_kbase_ipa_debugfs.h"
+
+
+/* Performance counter blocks base offsets */
+#define JM_BASE             (0 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define TILER_BASE          (1 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define MEMSYS_BASE         (2 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+
+/* JM counter block offsets */
+#define JM_GPU_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT *  6)
+
+/* Tiler counter block offsets */
+#define TILER_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 45)
+
+/* MEMSYS counter block offsets */
+#define MEMSYS_L2_ANY_LOOKUP (KBASE_IPA_NR_BYTES_PER_CNT * 25)
+
+/* SC counter block offsets */
+#define SC_FRAG_ACTIVE             (KBASE_IPA_NR_BYTES_PER_CNT *  4)
+#define SC_EXEC_CORE_ACTIVE        (KBASE_IPA_NR_BYTES_PER_CNT * 26)
+#define SC_EXEC_INSTR_COUNT        (KBASE_IPA_NR_BYTES_PER_CNT * 28)
+#define SC_TEX_COORD_ISSUE         (KBASE_IPA_NR_BYTES_PER_CNT * 40)
+#define SC_TEX_TFCH_NUM_OPERATIONS (KBASE_IPA_NR_BYTES_PER_CNT * 42)
+#define SC_VARY_INSTR              (KBASE_IPA_NR_BYTES_PER_CNT * 49)
+#define SC_VARY_SLOT_32            (KBASE_IPA_NR_BYTES_PER_CNT * 50)
+#define SC_VARY_SLOT_16            (KBASE_IPA_NR_BYTES_PER_CNT * 51)
+#define SC_BEATS_RD_LSC            (KBASE_IPA_NR_BYTES_PER_CNT * 56)
+#define SC_BEATS_WR_LSC            (KBASE_IPA_NR_BYTES_PER_CNT * 61)
+#define SC_BEATS_WR_TIB            (KBASE_IPA_NR_BYTES_PER_CNT * 62)
+
+/**
+ * get_jm_counter() - get performance counter offset inside the Job Manager block
+ * @model_data:            pointer to GPU model data.
+ * @counter_block_offset:  offset in bytes of the performance counter inside the Job Manager block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g7x_power_model_get_jm_counter(struct kbase_ipa_model_vinstr_data *model_data,
+						u32 counter_block_offset)
+{
+	return JM_BASE + counter_block_offset;
+}
+
+/**
+ * get_memsys_counter() - get performance counter offset inside the Memory System block
+ * @model_data:            pointer to GPU model data.
+ * @counter_block_offset:  offset in bytes of the performance counter inside the (first) Memory System block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g7x_power_model_get_memsys_counter(struct kbase_ipa_model_vinstr_data *model_data,
+						    u32 counter_block_offset)
+{
+	/* The base address of Memory System performance counters is always the same, although their number
+	 * may vary based on the number of cores. For the moment it's ok to return a constant.
+	 */
+	return MEMSYS_BASE + counter_block_offset;
+}
+
+/**
+ * get_sc_counter() - get performance counter offset inside the Shader Cores block
+ * @model_data:            pointer to GPU model data.
+ * @counter_block_offset:  offset in bytes of the performance counter inside the (first) Shader Cores block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g7x_power_model_get_sc_counter(struct kbase_ipa_model_vinstr_data *model_data,
+						u32 counter_block_offset)
+{
+	const u32 sc_base = MEMSYS_BASE +
+		(model_data->kbdev->gpu_props.props.l2_props.num_l2_slices *
+		 KBASE_IPA_NR_BYTES_PER_BLOCK);
+
+	return sc_base + counter_block_offset;
+}
+
+/**
+ * memsys_single_counter() - calculate energy for a single Memory System performance counter.
+ * @model_data:   pointer to GPU model data.
+ * @coeff:        default value of coefficient for IPA group.
+ * @offset:       offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a single Memory System performance counter.
+ */
+static s64 kbase_g7x_sum_all_memsys_blocks(
+		struct kbase_ipa_model_vinstr_data *model_data,
+		s32 coeff,
+		u32 offset)
+{
+	u32 counter;
+
+	counter = kbase_g7x_power_model_get_memsys_counter(model_data, offset);
+	return kbase_ipa_sum_all_memsys_blocks(model_data, coeff, counter);
+}
+
+/**
+ * sum_all_shader_cores() - calculate energy for a Shader Cores performance counter for all cores.
+ * @model_data:            pointer to GPU model data.
+ * @coeff:                 default value of coefficient for IPA group.
+ * @counter_block_offset:  offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a Shader Cores performance counter for all cores.
+ */
+static s64 kbase_g7x_sum_all_shader_cores(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff,
+	u32 counter_block_offset)
+{
+	u32 counter;
+
+	counter = kbase_g7x_power_model_get_sc_counter(model_data,
+						       counter_block_offset);
+	return kbase_ipa_sum_all_shader_cores(model_data, coeff, counter);
+}
+
+/**
+ * jm_single_counter() - calculate energy for a single Job Manager performance counter.
+ * @model_data:            pointer to GPU model data.
+ * @coeff:                 default value of coefficient for IPA group.
+ * @counter_block_offset:  offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a single Job Manager performance counter.
+ */
+static s64 kbase_g7x_jm_single_counter(
+	struct kbase_ipa_model_vinstr_data *model_data,
+	s32 coeff,
+	u32 counter_block_offset)
+{
+	u32 counter;
+
+	counter = kbase_g7x_power_model_get_jm_counter(model_data,
+						     counter_block_offset);
+	return kbase_ipa_single_counter(model_data, coeff, counter);
+}
+
+/**
+ * get_active_cycles() - return the GPU_ACTIVE counter
+ * @model_data:            pointer to GPU model data.
+ *
+ * Return: the number of cycles the GPU was active during the counter sampling
+ * period.
+ */
+static u32 kbase_g7x_get_active_cycles(
+	struct kbase_ipa_model_vinstr_data *model_data)
+{
+	u32 counter = kbase_g7x_power_model_get_jm_counter(model_data, JM_GPU_ACTIVE);
+
+	/* Counters are only 32-bit, so we can safely multiply by 1 then cast
+	 * the 64-bit result back to a u32.
+	 */
+	return kbase_ipa_single_counter(model_data, 1, counter);
+}
+
+/** Table of IPA group definitions.
+ *
+ * For each IPA group, this table defines a function to access the given performance block counter (or counters,
+ * if the operation needs to be iterated on multiple blocks) and calculate energy estimation.
+ */
+
+static const struct kbase_ipa_group ipa_groups_def_g71[] = {
+	{
+		.name = "l2_access",
+		.default_value = 526300,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 301100,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "tex_issue",
+		.default_value = 197400,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_COORD_ISSUE,
+	},
+	{
+		.name = "tile_wb",
+		.default_value = -156400,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_BEATS_WR_TIB,
+	},
+	{
+		.name = "gpu_active",
+		.default_value = 115800,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g72[] = {
+	{
+		.name = "l2_access",
+		.default_value = 393000,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 227000,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "tex_issue",
+		.default_value = 181900,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_COORD_ISSUE,
+	},
+	{
+		.name = "tile_wb",
+		.default_value = -120200,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_BEATS_WR_TIB,
+	},
+	{
+		.name = "gpu_active",
+		.default_value = 133100,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g76[] = {
+	{
+		.name = "gpu_active",
+		.default_value = 122000,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 488900,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "vary_instr",
+		.default_value = 212100,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_VARY_INSTR,
+	},
+	{
+		.name = "tex_tfch_num_operations",
+		.default_value = 288000,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_TFCH_NUM_OPERATIONS,
+	},
+	{
+		.name = "l2_access",
+		.default_value = 378100,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g52_r1[] = {
+	{
+		.name = "gpu_active",
+		.default_value = 224200,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 384700,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "vary_instr",
+		.default_value = 271900,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_VARY_INSTR,
+	},
+	{
+		.name = "tex_tfch_num_operations",
+		.default_value = 477700,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_TFCH_NUM_OPERATIONS,
+	},
+	{
+		.name = "l2_access",
+		.default_value = 551400,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g51[] = {
+	{
+		.name = "gpu_active",
+		.default_value = 201400,
+		.op = kbase_g7x_jm_single_counter,
+		.counter_block_offset = JM_GPU_ACTIVE,
+	},
+	{
+		.name = "exec_instr_count",
+		.default_value = 392700,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_EXEC_INSTR_COUNT,
+	},
+	{
+		.name = "vary_instr",
+		.default_value = 274000,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_VARY_INSTR,
+	},
+	{
+		.name = "tex_tfch_num_operations",
+		.default_value = 528000,
+		.op = kbase_g7x_sum_all_shader_cores,
+		.counter_block_offset = SC_TEX_TFCH_NUM_OPERATIONS,
+	},
+	{
+		.name = "l2_access",
+		.default_value = 506400,
+		.op = kbase_g7x_sum_all_memsys_blocks,
+		.counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+	},
+};
+
+#define IPA_POWER_MODEL_OPS(gpu, init_token) \
+	const struct kbase_ipa_model_ops kbase_ ## gpu ## _ipa_model_ops = { \
+		.name = "mali-" #gpu "-power-model", \
+		.init = kbase_ ## init_token ## _power_model_init, \
+		.term = kbase_ipa_vinstr_common_model_term, \
+		.get_dynamic_coeff = kbase_ipa_vinstr_dynamic_coeff, \
+	}; \
+	KBASE_EXPORT_TEST_API(kbase_ ## gpu ## _ipa_model_ops)
+
+#define STANDARD_POWER_MODEL(gpu, reference_voltage) \
+	static int kbase_ ## gpu ## _power_model_init(\
+			struct kbase_ipa_model *model) \
+	{ \
+		BUILD_BUG_ON(ARRAY_SIZE(ipa_groups_def_ ## gpu) > \
+				KBASE_IPA_MAX_GROUP_DEF_NUM); \
+		return kbase_ipa_vinstr_common_model_init(model, \
+				ipa_groups_def_ ## gpu, \
+				ARRAY_SIZE(ipa_groups_def_ ## gpu), \
+				kbase_g7x_get_active_cycles, \
+				(reference_voltage)); \
+	} \
+	IPA_POWER_MODEL_OPS(gpu, gpu)
+
+#define ALIAS_POWER_MODEL(gpu, as_gpu) \
+	IPA_POWER_MODEL_OPS(gpu, as_gpu)
+
+STANDARD_POWER_MODEL(g71, 800);
+STANDARD_POWER_MODEL(g72, 800);
+STANDARD_POWER_MODEL(g76, 800);
+STANDARD_POWER_MODEL(g52_r1, 1000);
+STANDARD_POWER_MODEL(g51, 1000);
+
+/* g52 is an alias of g76 (TNOX) for IPA */
+ALIAS_POWER_MODEL(g52, g76);
diff --git a/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h b/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h
new file mode 100644
index 0000000..5571f84
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h
@@ -0,0 +1,614 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update base/tools/hwconfig_generator/hwc_{issues,features}.py
+ * For more information see base/tools/hwconfig_generator/README
+ */
+
+#ifndef _BASE_HWCONFIG_FEATURES_H_
+#define _BASE_HWCONFIG_FEATURES_H_
+
+enum base_hw_feature {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+	BASE_HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_V4,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_TLS_HASHING,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_generic[] = {
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t60x[] = {
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_V4,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t62x[] = {
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_V4,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t72x[] = {
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_V4,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t76x[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tFxx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t83x[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t82x[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tMIx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tHEx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tSIx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tDVx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tNOx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_TLS_HASHING,
+	BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tGOx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_TLS_HASHING,
+	BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tKAx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tTRx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tNAx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tBEx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_CLEAN_ONLY_SAFE,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tULx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tBOx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tIDx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tVAx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_IDVS_GROUP_SIZE,
+	BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tEGx[] = {
+	BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+	BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+	BASE_HW_FEATURE_XAFFINITY,
+	BASE_HW_FEATURE_WARPING,
+	BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+	BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+	BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+	BASE_HW_FEATURE_BRNDOUT_CC,
+	BASE_HW_FEATURE_BRNDOUT_KILL,
+	BASE_HW_FEATURE_LD_ST_LEA_TEX,
+	BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+	BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+	BASE_HW_FEATURE_MRT,
+	BASE_HW_FEATURE_MSAA_16X,
+	BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+	BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+	BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+	BASE_HW_FEATURE_TEST4_DATUM_MODE,
+	BASE_HW_FEATURE_THREAD_GROUP_SPLIT,
+	BASE_HW_FEATURE_FLUSH_REDUCTION,
+	BASE_HW_FEATURE_PROTECTED_MODE,
+	BASE_HW_FEATURE_PROTECTED_DEBUG_MODE,
+	BASE_HW_FEATURE_COHERENCY_REG,
+	BASE_HW_FEATURE_AARCH64_MMU,
+	BASE_HW_FEATURE_TLS_HASHING,
+	BASE_HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
+	BASE_HW_FEATURE_END
+};
+
+#endif /* _BASE_HWCONFIG_FEATURES_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h b/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h
new file mode 100644
index 0000000..d7c40ef
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h
@@ -0,0 +1,1360 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update base/tools/hwconfig_generator/hwc_{issues,features}.py
+ * For more information see base/tools/hwconfig_generator/README
+ */
+
+#ifndef _BASE_HWCONFIG_ISSUES_H_
+#define _BASE_HWCONFIG_ISSUES_H_
+
+enum base_hw_issue {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_6367,
+	BASE_HW_ISSUE_6398,
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_6787,
+	BASE_HW_ISSUE_7027,
+	BASE_HW_ISSUE_7144,
+	BASE_HW_ISSUE_7304,
+	BASE_HW_ISSUE_8073,
+	BASE_HW_ISSUE_8186,
+	BASE_HW_ISSUE_8215,
+	BASE_HW_ISSUE_8245,
+	BASE_HW_ISSUE_8250,
+	BASE_HW_ISSUE_8260,
+	BASE_HW_ISSUE_8280,
+	BASE_HW_ISSUE_8316,
+	BASE_HW_ISSUE_8381,
+	BASE_HW_ISSUE_8394,
+	BASE_HW_ISSUE_8401,
+	BASE_HW_ISSUE_8408,
+	BASE_HW_ISSUE_8443,
+	BASE_HW_ISSUE_8456,
+	BASE_HW_ISSUE_8564,
+	BASE_HW_ISSUE_8634,
+	BASE_HW_ISSUE_8778,
+	BASE_HW_ISSUE_8791,
+	BASE_HW_ISSUE_8833,
+	BASE_HW_ISSUE_8879,
+	BASE_HW_ISSUE_8896,
+	BASE_HW_ISSUE_8975,
+	BASE_HW_ISSUE_8986,
+	BASE_HW_ISSUE_8987,
+	BASE_HW_ISSUE_9010,
+	BASE_HW_ISSUE_9418,
+	BASE_HW_ISSUE_9423,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_9510,
+	BASE_HW_ISSUE_9566,
+	BASE_HW_ISSUE_9630,
+	BASE_HW_ISSUE_10127,
+	BASE_HW_ISSUE_10327,
+	BASE_HW_ISSUE_10410,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10487,
+	BASE_HW_ISSUE_10607,
+	BASE_HW_ISSUE_10632,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10676,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10797,
+	BASE_HW_ISSUE_10817,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_10959,
+	BASE_HW_ISSUE_10969,
+	BASE_HW_ISSUE_10984,
+	BASE_HW_ISSUE_10995,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11035,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T720_1386,
+	BASE_HW_ISSUE_T76X_26,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3542,
+	BASE_HW_ISSUE_T76X_3556,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_T83X_817,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_7940,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8463,
+	BASE_HW_ISSUE_TMIX_8456,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_TNOX_1194,
+	BASE_HW_ISSUE_TGOX_R1_1234,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_generic[] = {
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
+	BASE_HW_ISSUE_6367,
+	BASE_HW_ISSUE_6398,
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_6787,
+	BASE_HW_ISSUE_7027,
+	BASE_HW_ISSUE_7144,
+	BASE_HW_ISSUE_7304,
+	BASE_HW_ISSUE_8073,
+	BASE_HW_ISSUE_8186,
+	BASE_HW_ISSUE_8215,
+	BASE_HW_ISSUE_8245,
+	BASE_HW_ISSUE_8250,
+	BASE_HW_ISSUE_8260,
+	BASE_HW_ISSUE_8280,
+	BASE_HW_ISSUE_8316,
+	BASE_HW_ISSUE_8381,
+	BASE_HW_ISSUE_8394,
+	BASE_HW_ISSUE_8401,
+	BASE_HW_ISSUE_8408,
+	BASE_HW_ISSUE_8443,
+	BASE_HW_ISSUE_8456,
+	BASE_HW_ISSUE_8564,
+	BASE_HW_ISSUE_8634,
+	BASE_HW_ISSUE_8778,
+	BASE_HW_ISSUE_8791,
+	BASE_HW_ISSUE_8833,
+	BASE_HW_ISSUE_8896,
+	BASE_HW_ISSUE_8975,
+	BASE_HW_ISSUE_8986,
+	BASE_HW_ISSUE_8987,
+	BASE_HW_ISSUE_9010,
+	BASE_HW_ISSUE_9418,
+	BASE_HW_ISSUE_9423,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_9510,
+	BASE_HW_ISSUE_9566,
+	BASE_HW_ISSUE_9630,
+	BASE_HW_ISSUE_10410,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10487,
+	BASE_HW_ISSUE_10607,
+	BASE_HW_ISSUE_10632,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10676,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_10969,
+	BASE_HW_ISSUE_10984,
+	BASE_HW_ISSUE_10995,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11035,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_3964,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
+	BASE_HW_ISSUE_6367,
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_6787,
+	BASE_HW_ISSUE_7027,
+	BASE_HW_ISSUE_7304,
+	BASE_HW_ISSUE_8408,
+	BASE_HW_ISSUE_8564,
+	BASE_HW_ISSUE_8778,
+	BASE_HW_ISSUE_8975,
+	BASE_HW_ISSUE_9010,
+	BASE_HW_ISSUE_9418,
+	BASE_HW_ISSUE_9423,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_9510,
+	BASE_HW_ISSUE_10410,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10487,
+	BASE_HW_ISSUE_10607,
+	BASE_HW_ISSUE_10632,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10676,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_10969,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11035,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = {
+	BASE_HW_ISSUE_6367,
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_6787,
+	BASE_HW_ISSUE_7027,
+	BASE_HW_ISSUE_7304,
+	BASE_HW_ISSUE_8408,
+	BASE_HW_ISSUE_8564,
+	BASE_HW_ISSUE_8778,
+	BASE_HW_ISSUE_8975,
+	BASE_HW_ISSUE_9010,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_9510,
+	BASE_HW_ISSUE_10410,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10487,
+	BASE_HW_ISSUE_10607,
+	BASE_HW_ISSUE_10632,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10676,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11035,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = {
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10127,
+	BASE_HW_ISSUE_10327,
+	BASE_HW_ISSUE_10410,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10487,
+	BASE_HW_ISSUE_10607,
+	BASE_HW_ISSUE_10632,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10676,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10817,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_10959,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11035,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = {
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_10959,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r1p1[] = {
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_10959,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_26,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3542,
+	BASE_HW_ISSUE_T76X_3556,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_26,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3542,
+	BASE_HW_ISSUE_T76X_3556,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_26,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3542,
+	BASE_HW_ISSUE_T76X_3556,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p2[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_26,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3542,
+	BASE_HW_ISSUE_T76X_3556,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p3[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_26,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3542,
+	BASE_HW_ISSUE_T76X_3556,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = {
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10797,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = {
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10797,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T720_1386,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = {
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10684,
+	BASE_HW_ISSUE_10797,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_11056,
+	BASE_HW_ISSUE_T720_1386,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t72x[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10471,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10797,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3964,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t76x[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t60x[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_8778,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3964,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t62x[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_6402,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10472,
+	BASE_HW_ISSUE_10649,
+	BASE_HW_ISSUE_10931,
+	BASE_HW_ISSUE_11012,
+	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11042,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3964,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r0p2[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tFRx_r2p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tFRx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r0p2[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r2p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3966,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t86x[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_TMIX_7891,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t83x_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T720_1386,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_T83X_817,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t83x_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T720_1386,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_T83X_817,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t83x[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_T83X_817,
+	BASE_HW_ISSUE_TMIX_7891,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t82x_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T720_1386,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3964,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_T83X_817,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t82x_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T720_1386,
+	BASE_HW_ISSUE_T76X_1909,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_T83X_817,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t82x_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10821,
+	BASE_HW_ISSUE_10883,
+	BASE_HW_ISSUE_10946,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T720_1386,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_T76X_3960,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_T83X_817,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t82x[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11051,
+	BASE_HW_ISSUE_T76X_1963,
+	BASE_HW_ISSUE_T76X_3086,
+	BASE_HW_ISSUE_T76X_3700,
+	BASE_HW_ISSUE_T76X_3793,
+	BASE_HW_ISSUE_T76X_3979,
+	BASE_HW_ISSUE_T83X_817,
+	BASE_HW_ISSUE_TMIX_7891,
+	GPUCORE_1619,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p0_05dev0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_T76X_3953,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8463,
+	BASE_HW_ISSUE_TMIX_8456,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_7940,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8463,
+	BASE_HW_ISSUE_TMIX_8456,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tMIx_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_7940,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8463,
+	BASE_HW_ISSUE_TMIX_8456,
+	BASE_HW_ISSUE_TMIX_8438,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tMIx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_7940,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TMIX_8138,
+	BASE_HW_ISSUE_TMIX_8206,
+	BASE_HW_ISSUE_TMIX_8343,
+	BASE_HW_ISSUE_TMIX_8456,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p2[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tHEx_r0p3[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_10682,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tHEx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_7891,
+	BASE_HW_ISSUE_TMIX_8042,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r0p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_11054,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tSIx_r1p1[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tSIx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tDVx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tDVx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tNOx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TNOX_1194,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tNOx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tGOx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TNOX_1194,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tGOx_r1p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TGOX_R1_1234,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tGOx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tKAx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tKAx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tTRx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tTRx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tNAx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tNAx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tBEx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tBEx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tULx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tULx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tBOx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tBOx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tIDx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tIDx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tVAx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tVAx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_TTRX_1337,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tEGx_r0p0[] = {
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_tEGx[] = {
+	BASE_HW_ISSUE_5736,
+	BASE_HW_ISSUE_9435,
+	BASE_HW_ISSUE_TMIX_8133,
+	BASE_HW_ISSUE_TSIX_1116,
+	BASE_HW_ISSUE_TSIX_2033,
+	BASE_HW_ISSUE_END
+};
+
+#endif /* _BASE_HWCONFIG_ISSUES_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_base_kernel.h b/drivers/gpu/arm/midgard/mali_base_kernel.h
new file mode 100644
index 0000000..70dc3c5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_base_kernel.h
@@ -0,0 +1,1763 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base structures shared with the kernel.
+ */
+
+#ifndef _BASE_KERNEL_H_
+#define _BASE_KERNEL_H_
+
+typedef struct base_mem_handle {
+	struct {
+		u64 handle;
+	} basep;
+} base_mem_handle;
+
+#include "mali_base_mem_priv.h"
+#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_id.h"
+
+/*
+ * Dependency stuff, keep it private for now. May want to expose it if
+ * we decide to make the number of semaphores a configurable
+ * option.
+ */
+#define BASE_JD_ATOM_COUNT              256
+
+/* Set/reset values for a software event */
+#define BASE_JD_SOFT_EVENT_SET             ((unsigned char)1)
+#define BASE_JD_SOFT_EVENT_RESET           ((unsigned char)0)
+
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 4
+
+#define BASE_MAX_COHERENT_GROUPS 16
+
+#if defined CDBG_ASSERT
+#define LOCAL_ASSERT CDBG_ASSERT
+#elif defined KBASE_DEBUG_ASSERT
+#define LOCAL_ASSERT KBASE_DEBUG_ASSERT
+#else
+#error assert macro not defined!
+#endif
+
+#if defined(PAGE_MASK) && defined(PAGE_SHIFT)
+#define LOCAL_PAGE_SHIFT PAGE_SHIFT
+#define LOCAL_PAGE_LSB ~PAGE_MASK
+#else
+#include <osu/mali_osu.h>
+
+#if defined OSU_CONFIG_CPU_PAGE_SIZE_LOG2
+#define LOCAL_PAGE_SHIFT OSU_CONFIG_CPU_PAGE_SIZE_LOG2
+#define LOCAL_PAGE_LSB ((1ul << OSU_CONFIG_CPU_PAGE_SIZE_LOG2) - 1)
+#else
+#error Failed to find page size
+#endif
+#endif
+
+/**
+ * @addtogroup base_user_api User-side Base APIs
+ * @{
+ */
+
+/**
+ * @addtogroup base_user_api_memory User-side Base Memory APIs
+ * @{
+ */
+
+/**
+ * typedef base_mem_alloc_flags - Memory allocation, access/hint flags.
+ *
+ * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator
+ * in order to determine the best cache policy. Some combinations are
+ * of course invalid (e.g. MEM_PROT_CPU_WR | MEM_HINT_CPU_RD),
+ * which defines a write-only region on the CPU side, which is
+ * heavily read by the CPU...
+ * Other flags are only meaningful to a particular allocator.
+ * More flags can be added to this list, as long as they don't clash
+ * (see BASE_MEM_FLAGS_NR_BITS for the number of the first free bit).
+ */
+typedef u32 base_mem_alloc_flags;
+
+/* Memory allocation, access/hint flags.
+ *
+ * See base_mem_alloc_flags.
+ */
+
+/* IN */
+/* Read access CPU side
+ */
+#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0)
+
+/* Write access CPU side
+ */
+#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1)
+
+/* Read access GPU side
+ */
+#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2)
+
+/* Write access GPU side
+ */
+#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3)
+
+/* Execute allowed on the GPU side
+ */
+#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
+
+/* Will be permanently mapped in kernel space.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASE_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5)
+
+/* The allocation will completely reside within the same 4GB chunk in the GPU
+ * virtual space.
+ * Since this flag is primarily required only for the TLS memory which will
+ * not be used to contain executable code and also not used for Tiler heap,
+ * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags.
+ */
+#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6)
+
+#define BASE_MEM_RESERVED_BIT_7 ((base_mem_alloc_flags)1 << 7)
+#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8)
+
+/* Grow backing store on GPU Page Fault
+ */
+#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
+
+/* Page coherence Outer shareable, if available
+ */
+#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10)
+
+/* Page coherence Inner shareable
+ */
+#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11)
+
+/* Should be cached on the CPU
+ */
+#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12)
+
+/* IN/OUT */
+/* Must have same VA on both the GPU and the CPU
+ */
+#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13)
+
+/* OUT */
+/* Must call mmap to acquire a GPU address for the alloc
+ */
+#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14)
+
+/* IN */
+/* Page coherence Outer shareable, required.
+ */
+#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
+
+/* Secure memory
+ */
+#define BASE_MEM_SECURE ((base_mem_alloc_flags)1 << 16)
+
+/* Not needed physical memory
+ */
+#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17)
+
+/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the
+ * addresses to be the same
+ */
+#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18)
+
+/**
+ * Bit 19 is reserved.
+ *
+ * Do not remove, use the next unreserved bit for new flags
+ */
+#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19)
+#define BASE_MEM_MAYBE_RESERVED_BIT_19 BASE_MEM_RESERVED_BIT_19
+
+/**
+ * Memory starting from the end of the initial commit is aligned to 'extent'
+ * pages, where 'extent' must be a power of 2 and no more than
+ * BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES
+ */
+#define BASE_MEM_TILER_ALIGN_TOP ((base_mem_alloc_flags)1 << 20)
+
+/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu mode.
+ * Some components within the GPU might only be able to access memory that is
+ * GPU cacheable. Refer to the specific GPU implementation for more details.
+ * The 3 shareability flags will be ignored for GPU uncached memory.
+ * If used while importing USER_BUFFER type memory, then the import will fail
+ * if the memory is not aligned to GPU and CPU cache line width.
+ */
+#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21)
+
+/* Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_BITS 22
+
+/* A mask for all output bits, excluding IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
+
+/* A mask for all input bits, including IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_INPUT_MASK \
+	(((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+
+/* A mask for all the flags which are modifiable via the base_mem_set_flags
+ * interface.
+ */
+#define BASE_MEM_FLAGS_MODIFIABLE \
+	(BASE_MEM_DONT_NEED | BASE_MEM_COHERENT_SYSTEM | \
+	 BASE_MEM_COHERENT_LOCAL)
+
+
+/* A mask of all currently reserved flags
+ */
+#define BASE_MEM_FLAGS_RESERVED \
+	(BASE_MEM_RESERVED_BIT_7 | BASE_MEM_RESERVED_BIT_8 | \
+		BASE_MEM_MAYBE_RESERVED_BIT_19)
+
+/* A mask of all the flags which are only valid for allocations within kbase,
+ * and may not be passed from user space.
+ */
+#define BASE_MEM_FLAGS_KERNEL_ONLY (BASE_MEM_PERMANENT_KERNEL_MAPPING)
+
+/* A mask of all the flags that can be returned via the base_mem_get_flags()
+ * interface.
+ */
+#define BASE_MEM_FLAGS_QUERYABLE \
+	(BASE_MEM_FLAGS_INPUT_MASK & ~(BASE_MEM_SAME_VA | \
+		BASE_MEM_COHERENT_SYSTEM_REQUIRED | BASE_MEM_DONT_NEED | \
+		BASE_MEM_IMPORT_SHARED | BASE_MEM_FLAGS_RESERVED | \
+		BASE_MEM_FLAGS_KERNEL_ONLY))
+
+/**
+ * enum base_mem_import_type - Memory types supported by @a base_mem_import
+ *
+ * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type
+ * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int)
+ * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a
+ * base_mem_import_user_buffer
+ *
+ * Each type defines what the supported handle type is.
+ *
+ * If any new type is added here ARM must be contacted
+ * to allocate a numeric value for it.
+ * Do not just add a new type without synchronizing with ARM
+ * as future releases from ARM might include other new types
+ * which could clash with your custom types.
+ */
+typedef enum base_mem_import_type {
+	BASE_MEM_IMPORT_TYPE_INVALID = 0,
+	/**
+	 * Import type with value 1 is deprecated.
+	 */
+	BASE_MEM_IMPORT_TYPE_UMM = 2,
+	BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
+} base_mem_import_type;
+
+/**
+ * struct base_mem_import_user_buffer - Handle of an imported user buffer
+ *
+ * @ptr:	address of imported user buffer
+ * @length:	length of imported user buffer in bytes
+ *
+ * This structure is used to represent a handle of an imported user buffer.
+ */
+
+struct base_mem_import_user_buffer {
+	u64 ptr;
+	u64 length;
+};
+
+/**
+ * @brief Invalid memory handle.
+ *
+ * Return value from functions returning @ref base_mem_handle on error.
+ *
+ * @warning @ref base_mem_handle_new_invalid must be used instead of this macro
+ *          in C++ code or other situations where compound literals cannot be used.
+ */
+#define BASE_MEM_INVALID_HANDLE ((base_mem_handle) { {BASEP_MEM_INVALID_HANDLE} })
+
+/**
+ * @brief Special write-alloc memory handle.
+ *
+ * A special handle is used to represent a region where a special page is mapped
+ * with a write-alloc cache setup, typically used when the write result of the
+ * GPU isn't needed, but the GPU must write anyway.
+ *
+ * @warning @ref base_mem_handle_new_write_alloc must be used instead of this macro
+ *          in C++ code or other situations where compound literals cannot be used.
+ */
+#define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE ((base_mem_handle) { {BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE} })
+
+#define BASEP_MEM_INVALID_HANDLE               (0ull  << 12)
+#define BASE_MEM_MMU_DUMP_HANDLE               (1ull  << 12)
+#define BASE_MEM_TRACE_BUFFER_HANDLE           (2ull  << 12)
+#define BASE_MEM_MAP_TRACKING_HANDLE           (3ull  << 12)
+#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE     (4ull  << 12)
+/* reserved handles ..-48<<PAGE_SHIFT> for future special handles */
+#define BASE_MEM_COOKIE_BASE                   (64ul  << 12)
+#define BASE_MEM_FIRST_FREE_ADDRESS            ((BITS_PER_LONG << 12) + \
+						BASE_MEM_COOKIE_BASE)
+
+/* Mask to detect 4GB boundary alignment */
+#define BASE_MEM_MASK_4GB  0xfffff000UL
+/* Mask to detect 4GB boundary (in page units) alignment */
+#define BASE_MEM_PFN_MASK_4GB  (BASE_MEM_MASK_4GB >> LOCAL_PAGE_SHIFT)
+
+/**
+ * Limit on the 'extent' parameter for an allocation with the
+ * BASE_MEM_TILER_ALIGN_TOP flag set
+ *
+ * This is the same as the maximum limit for a Buffer Descriptor's chunk size
+ */
+#define BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES_LOG2 \
+		(21u - (LOCAL_PAGE_SHIFT))
+#define BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES \
+		(1ull << (BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES_LOG2))
+
+/* Bit mask of cookies used for for memory allocation setup */
+#define KBASE_COOKIE_MASK  ~1UL /* bit 0 is reserved */
+
+/* Maximum size allowed in a single KBASE_IOCTL_MEM_ALLOC call */
+#define KBASE_MEM_ALLOC_MAX_SIZE ((8ull << 30) >> PAGE_SHIFT) /* 8 GB */
+
+
+/**
+ * @addtogroup base_user_api_memory_defered User-side Base Defered Memory Coherency APIs
+ * @{
+ */
+
+/**
+ * @brief a basic memory operation (sync-set).
+ *
+ * The content of this structure is private, and should only be used
+ * by the accessors.
+ */
+typedef struct base_syncset {
+	struct basep_syncset basep_sset;
+} base_syncset;
+
+/** @} end group base_user_api_memory_defered */
+
+/**
+ * Handle to represent imported memory object.
+ * Simple opague handle to imported memory, can't be used
+ * with anything but base_external_resource_init to bind to an atom.
+ */
+typedef struct base_import_handle {
+	struct {
+		u64 handle;
+	} basep;
+} base_import_handle;
+
+/** @} end group base_user_api_memory */
+
+/**
+ * @addtogroup base_user_api_job_dispatch User-side Base Job Dispatcher APIs
+ * @{
+ */
+
+typedef int platform_fence_type;
+#define INVALID_PLATFORM_FENCE ((platform_fence_type)-1)
+
+/**
+ * Base stream handle.
+ *
+ * References an underlying base stream object.
+ */
+typedef struct base_stream {
+	struct {
+		int fd;
+	} basep;
+} base_stream;
+
+/**
+ * Base fence handle.
+ *
+ * References an underlying base fence object.
+ */
+typedef struct base_fence {
+	struct {
+		int fd;
+		int stream_fd;
+	} basep;
+} base_fence;
+
+/**
+ * @brief Per-job data
+ *
+ * This structure is used to store per-job data, and is completely unused
+ * by the Base driver. It can be used to store things such as callback
+ * function pointer, data to handle job completion. It is guaranteed to be
+ * untouched by the Base driver.
+ */
+typedef struct base_jd_udata {
+	u64 blob[2];	 /**< per-job data array */
+} base_jd_udata;
+
+/**
+ * @brief Memory aliasing info
+ *
+ * Describes a memory handle to be aliased.
+ * A subset of the handle can be chosen for aliasing, given an offset and a
+ * length.
+ * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a
+ * region where a special page is mapped with a write-alloc cache setup,
+ * typically used when the write result of the GPU isn't needed, but the GPU
+ * must write anyway.
+ *
+ * Offset and length are specified in pages.
+ * Offset must be within the size of the handle.
+ * Offset+length must not overrun the size of the handle.
+ *
+ * @handle Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
+ * @offset Offset within the handle to start aliasing from, in pages.
+ *         Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE.
+ * @length Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
+ *         specifies the number of times the special page is needed.
+ */
+struct base_mem_aliasing_info {
+	base_mem_handle handle;
+	u64 offset;
+	u64 length;
+};
+
+/**
+ * Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the
+ * initial commit is aligned to 'extent' pages, where 'extent' must be a power
+ * of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES
+ */
+#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP  (1 << 0)
+
+/**
+ * struct base_jit_alloc_info - Structure which describes a JIT allocation
+ *                              request.
+ * @gpu_alloc_addr:             The GPU virtual address to write the JIT
+ *                              allocated GPU virtual address to.
+ * @va_pages:                   The minimum number of virtual pages required.
+ * @commit_pages:               The minimum number of physical pages which
+ *                              should back the allocation.
+ * @extent:                     Granularity of physical pages to grow the
+ *                              allocation by during a fault.
+ * @id:                         Unique ID provided by the caller, this is used
+ *                              to pair allocation and free requests.
+ *                              Zero is not a valid value.
+ * @bin_id:                     The JIT allocation bin, used in conjunction with
+ *                              @max_allocations to limit the number of each
+ *                              type of JIT allocation.
+ * @max_allocations:            The maximum number of allocations allowed within
+ *                              the bin specified by @bin_id. Should be the same
+ *                              for all JIT allocations within the same bin.
+ * @flags:                      flags specifying the special requirements for
+ *                              the JIT allocation.
+ * @padding:                    Expansion space - should be initialised to zero
+ * @usage_id:                   A hint about which allocation should be reused.
+ *                              The kernel should attempt to use a previous
+ *                              allocation with the same usage_id
+ */
+struct base_jit_alloc_info {
+	u64 gpu_alloc_addr;
+	u64 va_pages;
+	u64 commit_pages;
+	u64 extent;
+	u8 id;
+	u8 bin_id;
+	u8 max_allocations;
+	u8 flags;
+	u8 padding[2];
+	u16 usage_id;
+};
+
+/**
+ * @brief Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a dependency is a data or
+ * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without
+ * changing the structure size).
+ * When the flag is set for a particular dependency to signal that it is an ordering only dependency then
+ * errors will not be propagated.
+ */
+typedef u8 base_jd_dep_type;
+
+
+#define BASE_JD_DEP_TYPE_INVALID  (0)       /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA     (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER    (1U << 1) /**< Order dependency */
+
+/**
+ * @brief Job chain hardware requirements.
+ *
+ * A job chain must specify what GPU features it needs to allow the
+ * driver to schedule the job correctly.  By not specifying the
+ * correct settings can/will cause an early job termination.  Multiple
+ * values can be ORed together to specify multiple requirements.
+ * Special case is ::BASE_JD_REQ_DEP, which is used to express complex
+ * dependencies, and that doesn't execute anything on the hardware.
+ */
+typedef u32 base_jd_core_req;
+
+/* Requirements that come from the HW */
+
+/**
+ * No requirement, dependency only
+ */
+#define BASE_JD_REQ_DEP ((base_jd_core_req)0)
+
+/**
+ * Requires fragment shaders
+ */
+#define BASE_JD_REQ_FS  ((base_jd_core_req)1 << 0)
+
+/**
+ * Requires compute shaders
+ * This covers any of the following Midgard Job types:
+ * - Vertex Shader Job
+ * - Geometry Shader Job
+ * - An actual Compute Shader Job
+ *
+ * Compare this with @ref BASE_JD_REQ_ONLY_COMPUTE, which specifies that the
+ * job is specifically just the "Compute Shader" job type, and not the "Vertex
+ * Shader" nor the "Geometry Shader" job type.
+ */
+#define BASE_JD_REQ_CS  ((base_jd_core_req)1 << 1)
+#define BASE_JD_REQ_T   ((base_jd_core_req)1 << 2)   /**< Requires tiling */
+#define BASE_JD_REQ_CF  ((base_jd_core_req)1 << 3)   /**< Requires cache flushes */
+#define BASE_JD_REQ_V   ((base_jd_core_req)1 << 4)   /**< Requires value writeback */
+
+/* SW-only requirements - the HW does not expose these as part of the job slot capabilities */
+
+/* Requires fragment job with AFBC encoding */
+#define BASE_JD_REQ_FS_AFBC  ((base_jd_core_req)1 << 13)
+
+/**
+ * SW-only requirement: coalesce completion events.
+ * If this bit is set then completion of this atom will not cause an event to
+ * be sent to userspace, whether successful or not; completion events will be
+ * deferred until an atom completes which does not have this bit set.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES.
+ */
+#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5)
+
+/**
+ * SW Only requirement: the job chain requires a coherent core group. We don't
+ * mind which coherent core group is used.
+ */
+#define BASE_JD_REQ_COHERENT_GROUP  ((base_jd_core_req)1 << 6)
+
+/**
+ * SW Only requirement: The performance counters should be enabled only when
+ * they are needed, to reduce power consumption.
+ */
+
+#define BASE_JD_REQ_PERMON               ((base_jd_core_req)1 << 7)
+
+/**
+ * SW Only requirement: External resources are referenced by this atom.
+ * When external resources are referenced no syncsets can be bundled with the atom
+ * but should instead be part of a NULL jobs inserted into the dependency tree.
+ * The first pre_dep object must be configured for the external resouces to use,
+ * the second pre_dep object can be used to create other dependencies.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE and
+ * BASE_JD_REQ_SOFT_EVENT_WAIT.
+ */
+#define BASE_JD_REQ_EXTERNAL_RESOURCES   ((base_jd_core_req)1 << 8)
+
+/**
+ * SW Only requirement: Software defined job. Jobs with this bit set will not be submitted
+ * to the hardware but will cause some action to happen within the driver
+ */
+#define BASE_JD_REQ_SOFT_JOB        ((base_jd_core_req)1 << 9)
+
+#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME      (BASE_JD_REQ_SOFT_JOB | 0x1)
+#define BASE_JD_REQ_SOFT_FENCE_TRIGGER          (BASE_JD_REQ_SOFT_JOB | 0x2)
+#define BASE_JD_REQ_SOFT_FENCE_WAIT             (BASE_JD_REQ_SOFT_JOB | 0x3)
+
+/**
+ * SW Only requirement : Replay job.
+ *
+ * If the preceding job fails, the replay job will cause the jobs specified in
+ * the list of base_jd_replay_payload pointed to by the jc pointer to be
+ * replayed.
+ *
+ * A replay job will only cause jobs to be replayed up to BASEP_JD_REPLAY_LIMIT
+ * times. If a job fails more than BASEP_JD_REPLAY_LIMIT times then the replay
+ * job is failed, as well as any following dependencies.
+ *
+ * The replayed jobs will require a number of atom IDs. If there are not enough
+ * free atom IDs then the replay job will fail.
+ *
+ * If the preceding job does not fail, then the replay job is returned as
+ * completed.
+ *
+ * The replayed jobs will never be returned to userspace. The preceding failed
+ * job will be returned to userspace as failed; the status of this job should
+ * be ignored. Completion should be determined by the status of the replay soft
+ * job.
+ *
+ * In order for the jobs to be replayed, the job headers will have to be
+ * modified. The Status field will be reset to NOT_STARTED. If the Job Type
+ * field indicates a Vertex Shader Job then it will be changed to Null Job.
+ *
+ * The replayed jobs have the following assumptions :
+ *
+ * - No external resources. Any required external resources will be held by the
+ *   replay atom.
+ * - Pre-dependencies are created based on job order.
+ * - Atom numbers are automatically assigned.
+ * - device_nr is set to 0. This is not relevant as
+ *   BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
+ * - Priority is inherited from the replay job.
+ */
+#define BASE_JD_REQ_SOFT_REPLAY                 (BASE_JD_REQ_SOFT_JOB | 0x4)
+/**
+ * SW only requirement: event wait/trigger job.
+ *
+ * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
+ * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
+ *   other waiting jobs. It completes immediately.
+ * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
+ *   possible for other jobs to wait upon. It completes immediately.
+ */
+#define BASE_JD_REQ_SOFT_EVENT_WAIT             (BASE_JD_REQ_SOFT_JOB | 0x5)
+#define BASE_JD_REQ_SOFT_EVENT_SET              (BASE_JD_REQ_SOFT_JOB | 0x6)
+#define BASE_JD_REQ_SOFT_EVENT_RESET            (BASE_JD_REQ_SOFT_JOB | 0x7)
+
+#define BASE_JD_REQ_SOFT_DEBUG_COPY             (BASE_JD_REQ_SOFT_JOB | 0x8)
+
+/**
+ * SW only requirement: Just In Time allocation
+ *
+ * This job requests a single or multiple JIT allocations through a list
+ * of @base_jit_alloc_info structure which is passed via the jc element of
+ * the atom. The number of @base_jit_alloc_info structures present in the
+ * list is passed via the nr_extres element of the atom
+ *
+ * It should be noted that the id entry in @base_jit_alloc_info must not
+ * be reused until it has been released via @BASE_JD_REQ_SOFT_JIT_FREE.
+ *
+ * Should this soft job fail it is expected that a @BASE_JD_REQ_SOFT_JIT_FREE
+ * soft job to free the JIT allocation is still made.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_ALLOC              (BASE_JD_REQ_SOFT_JOB | 0x9)
+/**
+ * SW only requirement: Just In Time free
+ *
+ * This job requests a single or multiple JIT allocations created by
+ * @BASE_JD_REQ_SOFT_JIT_ALLOC to be freed. The ID list of the JIT
+ * allocations is passed via the jc element of the atom.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_FREE               (BASE_JD_REQ_SOFT_JOB | 0xa)
+
+/**
+ * SW only requirement: Map external resource
+ *
+ * This job requests external resource(s) are mapped once the dependencies
+ * of the job have been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_MAP            (BASE_JD_REQ_SOFT_JOB | 0xb)
+/**
+ * SW only requirement: Unmap external resource
+ *
+ * This job requests external resource(s) are unmapped once the dependencies
+ * of the job has been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP          (BASE_JD_REQ_SOFT_JOB | 0xc)
+
+/**
+ * HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
+ *
+ * This indicates that the Job Chain contains Midgard Jobs of the 'Compute Shaders' type.
+ *
+ * In contrast to @ref BASE_JD_REQ_CS, this does \b not indicate that the Job
+ * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
+ */
+#define BASE_JD_REQ_ONLY_COMPUTE    ((base_jd_core_req)1 << 10)
+
+/**
+ * HW Requirement: Use the base_jd_atom::device_nr field to specify a
+ * particular core group
+ *
+ * If both @ref BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag takes priority
+ *
+ * This is only guaranteed to work for @ref BASE_JD_REQ_ONLY_COMPUTE atoms.
+ *
+ * If the core availability policy is keeping the required core group turned off, then
+ * the job will fail with a @ref BASE_JD_EVENT_PM_EVENT error code.
+ */
+#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11)
+
+/**
+ * SW Flag: If this bit is set then the successful completion of this atom
+ * will not cause an event to be sent to userspace
+ */
+#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE   ((base_jd_core_req)1 << 12)
+
+/**
+ * SW Flag: If this bit is set then completion of this atom will not cause an
+ * event to be sent to userspace, whether successful or not.
+ */
+#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14)
+
+/**
+ * SW Flag: Skip GPU cache clean and invalidation before starting a GPU job.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job starts which does not have this bit set or a job completes
+ * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use if
+ * the CPU may have written to memory addressed by the job since the last job
+ * without this bit set was submitted.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15)
+
+/**
+ * SW Flag: Skip GPU cache clean and invalidation after a GPU job completes.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job completes which does not have this bit set or a job starts
+ * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_START bti set. Do not use if
+ * the CPU may read from or partially overwrite memory addressed by the job
+ * before the next job without this bit set completes.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
+
+/**
+ * These requirement bits are currently unused in base_jd_core_req
+ */
+#define BASEP_JD_REQ_RESERVED \
+	(~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
+	BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
+	BASE_JD_REQ_EVENT_COALESCE | \
+	BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
+	BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
+	BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END))
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of the atom.
+ *
+ * This allows dependency only atoms to have flags set
+ */
+#define BASE_JD_REQ_ATOM_TYPE \
+	(BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
+	BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of a soft job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f)
+
+/*
+ * Returns non-zero value if core requirements passed define a soft job or
+ * a dependency only job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \
+	((core_req & BASE_JD_REQ_SOFT_JOB) || \
+	(core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
+
+/*
+ * Base Atom priority
+ *
+ * Only certain priority levels are actually implemented, as specified by the
+ * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
+ * level that is not one of those defined below.
+ *
+ * Priority levels only affect scheduling after the atoms have had dependencies
+ * resolved. For example, a low priority atom that has had its dependencies
+ * resolved might run before a higher priority atom that has not had its
+ * dependencies resolved.
+ *
+ * In general, fragment atoms do not affect non-fragment atoms with
+ * lower priorities, and vice versa. One exception is that there is only one
+ * priority value for each context. So a high-priority (e.g.) fragment atom
+ * could increase its context priority, causing its non-fragment atoms to also
+ * be scheduled sooner.
+ *
+ * The atoms are scheduled as follows with respect to their priorities:
+ * - Let atoms 'X' and 'Y' be for the same job slot who have dependencies
+ *   resolved, and atom 'X' has a higher priority than atom 'Y'
+ * - If atom 'Y' is currently running on the HW, then it is interrupted to
+ *   allow atom 'X' to run soon after
+ * - If instead neither atom 'Y' nor atom 'X' are running, then when choosing
+ *   the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
+ * - Any two atoms that have the same priority could run in any order with
+ *   respect to each other. That is, there is no ordering constraint between
+ *   atoms of the same priority.
+ *
+ * The sysfs file 'js_ctx_scheduling_mode' is used to control how atoms are
+ * scheduled between contexts. The default value, 0, will cause higher-priority
+ * atoms to be scheduled first, regardless of their context. The value 1 will
+ * use a round-robin algorithm when deciding which context's atoms to schedule
+ * next, so higher-priority atoms can only preempt lower priority atoms within
+ * the same context. See KBASE_JS_SYSTEM_PRIORITY_MODE and
+ * KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE for more details.
+ */
+typedef u8 base_jd_prio;
+
+/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_MEDIUM  ((base_jd_prio)0)
+/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
+ * BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_HIGH    ((base_jd_prio)1)
+/* Low atom priority. */
+#define BASE_JD_PRIO_LOW     ((base_jd_prio)2)
+
+/* Count of the number of priority levels. This itself is not a valid
+ * base_jd_prio setting */
+#define BASE_JD_NR_PRIO_LEVELS 3
+
+enum kbase_jd_atom_state {
+	/** Atom is not used */
+	KBASE_JD_ATOM_STATE_UNUSED,
+	/** Atom is queued in JD */
+	KBASE_JD_ATOM_STATE_QUEUED,
+	/** Atom has been given to JS (is runnable/running) */
+	KBASE_JD_ATOM_STATE_IN_JS,
+	/** Atom has been completed, but not yet handed back to job dispatcher
+	 *  for dependency resolution */
+	KBASE_JD_ATOM_STATE_HW_COMPLETED,
+	/** Atom has been completed, but not yet handed back to userspace */
+	KBASE_JD_ATOM_STATE_COMPLETED
+};
+
+typedef u8 base_atom_id; /**< Type big enough to store an atom number in */
+
+struct base_dependency {
+	base_atom_id  atom_id;               /**< An atom number */
+	base_jd_dep_type dependency_type;    /**< Dependency type */
+};
+
+/* This structure has changed since UK 10.2 for which base_jd_core_req was a u16 value.
+ * In order to keep the size of the structure same, padding field has been adjusted
+ * accordingly and core_req field of a u32 type (to which UK 10.3 base_jd_core_req defines)
+ * is added at the end of the structure. Place in the structure previously occupied by u16 core_req
+ * is kept but renamed to compat_core_req and as such it can be used in ioctl call for job submission
+ * as long as UK 10.2 legacy is supported. Once when this support ends, this field can be left
+ * for possible future use. */
+typedef struct base_jd_atom_v2 {
+	u64 jc;			    /**< job-chain GPU address */
+	struct base_jd_udata udata;		    /**< user data */
+	u64 extres_list;	    /**< list of external resources */
+	u16 nr_extres;			    /**< nr of external resources or JIT allocations */
+	u16 compat_core_req;	            /**< core requirements which correspond to the legacy support for UK 10.2 */
+	struct base_dependency pre_dep[2];  /**< pre-dependencies, one need to use SETTER function to assign this field,
+	this is done in order to reduce possibility of improper assigment of a dependency field */
+	base_atom_id atom_number;	    /**< unique number to identify the atom */
+	base_jd_prio prio;                  /**< Atom priority. Refer to @ref base_jd_prio for more details */
+	u8 device_nr;			    /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
+	u8 padding[1];
+	base_jd_core_req core_req;          /**< core requirements */
+} base_jd_atom_v2;
+
+typedef enum base_external_resource_access {
+	BASE_EXT_RES_ACCESS_SHARED,
+	BASE_EXT_RES_ACCESS_EXCLUSIVE
+} base_external_resource_access;
+
+typedef struct base_external_resource {
+	u64 ext_resource;
+} base_external_resource;
+
+
+/**
+ * The maximum number of external resources which can be mapped/unmapped
+ * in a single request.
+ */
+#define BASE_EXT_RES_COUNT_MAX 10
+
+/**
+ * struct base_external_resource_list - Structure which describes a list of
+ *                                      external resources.
+ * @count:                              The number of resources.
+ * @ext_res:                            Array of external resources which is
+ *                                      sized at allocation time.
+ */
+struct base_external_resource_list {
+	u64 count;
+	struct base_external_resource ext_res[1];
+};
+
+struct base_jd_debug_copy_buffer {
+	u64 address;
+	u64 size;
+	struct base_external_resource extres;
+};
+
+/**
+ * @brief Setter for a dependency structure
+ *
+ * @param[in] dep          The kbase jd atom dependency to be initialized.
+ * @param     id           The atom_id to be assigned.
+ * @param     dep_type     The dep_type to be assigned.
+ *
+ */
+static inline void base_jd_atom_dep_set(struct base_dependency *dep,
+		base_atom_id id, base_jd_dep_type dep_type)
+{
+	LOCAL_ASSERT(dep != NULL);
+
+	/*
+	 * make sure we don't set not allowed combinations
+	 * of atom_id/dependency_type.
+	 */
+	LOCAL_ASSERT((id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) ||
+			(id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID));
+
+	dep->atom_id = id;
+	dep->dependency_type = dep_type;
+}
+
+/**
+ * @brief Make a copy of a dependency structure
+ *
+ * @param[in,out] dep          The kbase jd atom dependency to be written.
+ * @param[in]     from         The dependency to make a copy from.
+ *
+ */
+static inline void base_jd_atom_dep_copy(struct base_dependency *dep,
+		const struct base_dependency *from)
+{
+	LOCAL_ASSERT(dep != NULL);
+
+	base_jd_atom_dep_set(dep, from->atom_id, from->dependency_type);
+}
+
+/**
+ * @brief Soft-atom fence trigger setup.
+ *
+ * Sets up an atom to be a SW-only atom signaling a fence
+ * when it reaches the run state.
+ *
+ * Using the existing base dependency system the fence can
+ * be set to trigger when a GPU job has finished.
+ *
+ * The base fence object must not be terminated until the atom
+ * has been submitted to @ref base_jd_submit and @ref base_jd_submit
+ * has returned.
+ *
+ * @a fence must be a valid fence set up with @a base_fence_init.
+ * Calling this function with a uninitialized fence results in undefined behavior.
+ *
+ * @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom
+ * @param[in] fence The base fence object to trigger.
+ *
+ * @pre @p fence must reference a @ref base_fence successfully initialized by
+ *      calling @ref base_fence_init.
+ * @pre @p fence was @e not initialized by calling @ref base_fence_import, nor
+ *      is it associated with a fence-trigger job that was already submitted
+ *      by calling @ref base_jd_submit.
+ * @post @p atom can be submitted by calling @ref base_jd_submit.
+ */
+static inline void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
+{
+	LOCAL_ASSERT(atom);
+	LOCAL_ASSERT(fence);
+	LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE);
+	LOCAL_ASSERT(fence->basep.stream_fd >= 0);
+	atom->jc = (uintptr_t) fence;
+	atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER;
+}
+
+/**
+ * @brief Soft-atom fence wait setup.
+ *
+ * Sets up an atom to be a SW-only atom waiting on a fence.
+ * When the fence becomes triggered the atom becomes runnable
+ * and completes immediately.
+ *
+ * Using the existing base dependency system the fence can
+ * be set to block a GPU job until it has been triggered.
+ *
+ * The base fence object must not be terminated until the atom
+ * has been submitted to @ref base_jd_submit and
+ * @ref base_jd_submit has returned.
+ *
+ * @param[out] atom A pre-allocated atom to configure as a fence wait SW atom
+ * @param[in] fence The base fence object to wait on
+ *
+ * @pre @p fence must reference a @ref base_fence successfully initialized by
+ *      calling @ref base_fence_import, or it must be associated with a
+ *      fence-trigger job that was already submitted by calling
+ *      @ref base_jd_submit.
+ * @post @p atom can be submitted by calling @ref base_jd_submit.
+ */
+static inline void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
+{
+	LOCAL_ASSERT(atom);
+	LOCAL_ASSERT(fence);
+	LOCAL_ASSERT(fence->basep.fd >= 0);
+	atom->jc = (uintptr_t) fence;
+	atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
+}
+
+/**
+ * @brief External resource info initialization.
+ *
+ * Sets up an external resource object to reference
+ * a memory allocation and the type of access requested.
+ *
+ * @param[in] res     The resource object to initialize
+ * @param     handle  The handle to the imported memory object, must be
+ *                    obtained by calling @ref base_mem_as_import_handle().
+ * @param     access  The type of access requested
+ */
+static inline void base_external_resource_init(struct base_external_resource *res, struct base_import_handle handle, base_external_resource_access access)
+{
+	u64 address;
+
+	address = handle.basep.handle;
+
+	LOCAL_ASSERT(res != NULL);
+	LOCAL_ASSERT(0 == (address & LOCAL_PAGE_LSB));
+	LOCAL_ASSERT(access == BASE_EXT_RES_ACCESS_SHARED || access == BASE_EXT_RES_ACCESS_EXCLUSIVE);
+
+	res->ext_resource = address | (access & LOCAL_PAGE_LSB);
+}
+
+/**
+ * @brief Job chain event code bits
+ * Defines the bits used to create ::base_jd_event_code
+ */
+enum {
+	BASE_JD_SW_EVENT_KERNEL = (1u << 15), /**< Kernel side event */
+	BASE_JD_SW_EVENT = (1u << 14), /**< SW defined event */
+	BASE_JD_SW_EVENT_SUCCESS = (1u << 13), /**< Event idicates success (SW events only) */
+	BASE_JD_SW_EVENT_JOB = (0u << 11), /**< Job related event */
+	BASE_JD_SW_EVENT_BAG = (1u << 11), /**< Bag related event */
+	BASE_JD_SW_EVENT_INFO = (2u << 11), /**< Misc/info event */
+	BASE_JD_SW_EVENT_RESERVED = (3u << 11),	/**< Reserved event type */
+	BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11)	    /**< Mask to extract the type from an event code */
+};
+
+/**
+ * @brief Job chain event codes
+ *
+ * HW and low-level SW events are represented by event codes.
+ * The status of jobs which succeeded are also represented by
+ * an event code (see ::BASE_JD_EVENT_DONE).
+ * Events are usually reported as part of a ::base_jd_event.
+ *
+ * The event codes are encoded in the following way:
+ * @li 10:0  - subtype
+ * @li 12:11 - type
+ * @li 13    - SW success (only valid if the SW bit is set)
+ * @li 14    - SW event (HW event if not set)
+ * @li 15    - Kernel event (should never be seen in userspace)
+ *
+ * Events are split up into ranges as follows:
+ * - BASE_JD_EVENT_RANGE_\<description\>_START
+ * - BASE_JD_EVENT_RANGE_\<description\>_END
+ *
+ * \a code is in \<description\>'s range when:
+ * - <tt>BASE_JD_EVENT_RANGE_\<description\>_START <= code < BASE_JD_EVENT_RANGE_\<description\>_END </tt>
+ *
+ * Ranges can be asserted for adjacency by testing that the END of the previous
+ * is equal to the START of the next. This is useful for optimizing some tests
+ * for range.
+ *
+ * A limitation is that the last member of this enum must explicitly be handled
+ * (with an assert-unreachable statement) in switch statements that use
+ * variables of this type. Otherwise, the compiler warns that we have not
+ * handled that enum value.
+ */
+typedef enum base_jd_event_code {
+	/* HW defined exceptions */
+
+	/** Start of HW Non-fault status codes
+	 *
+	 * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
+	 * because the job was hard-stopped
+	 */
+	BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0,
+
+	/* non-fatal exceptions */
+	BASE_JD_EVENT_NOT_STARTED = 0x00, /**< Can't be seen by userspace, treated as 'previous job done' */
+	BASE_JD_EVENT_DONE = 0x01,
+	BASE_JD_EVENT_STOPPED = 0x03,	  /**< Can't be seen by userspace, becomes TERMINATED, DONE or JOB_CANCELLED */
+	BASE_JD_EVENT_TERMINATED = 0x04,  /**< This is actually a fault status code - the job was hard stopped */
+	BASE_JD_EVENT_ACTIVE = 0x08,	  /**< Can't be seen by userspace, jobs only returned on complete/fail/cancel */
+
+	/** End of HW Non-fault status codes
+	 *
+	 * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
+	 * because the job was hard-stopped
+	 */
+	BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40,
+
+	/** Start of HW fault and SW Error status codes */
+	BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40,
+
+	/* job exceptions */
+	BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40,
+	BASE_JD_EVENT_JOB_POWER_FAULT = 0x41,
+	BASE_JD_EVENT_JOB_READ_FAULT = 0x42,
+	BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43,
+	BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44,
+	BASE_JD_EVENT_JOB_BUS_FAULT = 0x48,
+	BASE_JD_EVENT_INSTR_INVALID_PC = 0x50,
+	BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51,
+	BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52,
+	BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53,
+	BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54,
+	BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55,
+	BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56,
+	BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58,
+	BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59,
+	BASE_JD_EVENT_STATE_FAULT = 0x5A,
+	BASE_JD_EVENT_OUT_OF_MEMORY = 0x60,
+	BASE_JD_EVENT_UNKNOWN = 0x7F,
+
+	/* GPU exceptions */
+	BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80,
+	BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88,
+
+	/* MMU exceptions */
+	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1,
+	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2,
+	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3,
+	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4,
+	BASE_JD_EVENT_PERMISSION_FAULT = 0xC8,
+	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1,
+	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2,
+	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3,
+	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4,
+	BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
+
+	/* SW defined exceptions */
+	BASE_JD_EVENT_MEM_GROWTH_FAILED	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+	BASE_JD_EVENT_TIMED_OUT		= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
+	BASE_JD_EVENT_JOB_CANCELLED	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+	BASE_JD_EVENT_JOB_INVALID	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+	BASE_JD_EVENT_PM_EVENT		= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+	BASE_JD_EVENT_FORCE_REPLAY	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x005,
+
+	BASE_JD_EVENT_BAG_INVALID	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
+
+	/** End of HW fault and SW Error status codes */
+	BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+	/** Start of SW Success status codes */
+	BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | 0x000,
+
+	BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000,
+	BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_BAG | 0x000,
+	BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
+
+	/** End of SW Success status codes */
+	BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+	/** Start of Kernel-only status codes. Such codes are never returned to user-space */
+	BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | 0x000,
+	BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
+
+	/** End of Kernel-only status codes. */
+	BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
+} base_jd_event_code;
+
+/**
+ * @brief Event reporting structure
+ *
+ * This structure is used by the kernel driver to report information
+ * about GPU events. The can either be HW-specific events or low-level
+ * SW events, such as job-chain completion.
+ *
+ * The event code contains an event type field which can be extracted
+ * by ANDing with ::BASE_JD_SW_EVENT_TYPE_MASK.
+ *
+ * Based on the event type base_jd_event::data holds:
+ * @li ::BASE_JD_SW_EVENT_JOB : the offset in the ring-buffer for the completed
+ * job-chain
+ * @li ::BASE_JD_SW_EVENT_BAG : The address of the ::base_jd_bag that has
+ * been completed (ie all contained job-chains have been completed).
+ * @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used
+ */
+typedef struct base_jd_event_v2 {
+	base_jd_event_code event_code;  /**< event code */
+	base_atom_id atom_number;       /**< the atom number that has completed */
+	struct base_jd_udata udata;     /**< user data */
+} base_jd_event_v2;
+
+/**
+ * @brief Structure for BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS jobs.
+ *
+ * This structure is stored into the memory pointed to by the @c jc field
+ * of @ref base_jd_atom.
+ *
+ * It must not occupy the same CPU cache line(s) as any neighboring data.
+ * This is to avoid cases where access to pages containing the structure
+ * is shared between cached and un-cached memory regions, which would
+ * cause memory corruption.
+ */
+
+typedef struct base_dump_cpu_gpu_counters {
+	u64 system_time;
+	u64 cycle_counter;
+	u64 sec;
+	u32 usec;
+	u8 padding[36];
+} base_dump_cpu_gpu_counters;
+
+/** @} end group base_user_api_job_dispatch */
+
+#define GPU_MAX_JOB_SLOTS 16
+
+/**
+ * @page page_base_user_api_gpuprops User-side Base GPU Property Query API
+ *
+ * The User-side Base GPU Property Query API encapsulates two
+ * sub-modules:
+ *
+ * - @ref base_user_api_gpuprops_dyn "Dynamic GPU Properties"
+ * - @ref base_plat_config_gpuprops "Base Platform Config GPU Properties"
+ *
+ * There is a related third module outside of Base, which is owned by the MIDG
+ * module:
+ * - @ref gpu_props_static "Midgard Compile-time GPU Properties"
+ *
+ * Base only deals with properties that vary between different Midgard
+ * implementations - the Dynamic GPU properties and the Platform Config
+ * properties.
+ *
+ * For properties that are constant for the Midgard Architecture, refer to the
+ * MIDG module. However, we will discuss their relevance here <b>just to
+ * provide background information.</b>
+ *
+ * @section sec_base_user_api_gpuprops_about About the GPU Properties in Base and MIDG modules
+ *
+ * The compile-time properties (Platform Config, Midgard Compile-time
+ * properties) are exposed as pre-processor macros.
+ *
+ * Complementing the compile-time properties are the Dynamic GPU
+ * Properties, which act as a conduit for the Midgard Configuration
+ * Discovery.
+ *
+ * In general, the dynamic properties are present to verify that the platform
+ * has been configured correctly with the right set of Platform Config
+ * Compile-time Properties.
+ *
+ * As a consistent guide across the entire DDK, the choice for dynamic or
+ * compile-time should consider the following, in order:
+ * -# Can the code be written so that it doesn't need to know the
+ * implementation limits at all?
+ * -# If you need the limits, get the information from the Dynamic Property
+ * lookup. This should be done once as you fetch the context, and then cached
+ * as part of the context data structure, so it's cheap to access.
+ * -# If there's a clear and arguable inefficiency in using Dynamic Properties,
+ * then use a Compile-Time Property (Platform Config, or Midgard Compile-time
+ * property). Examples of where this might be sensible follow:
+ *  - Part of a critical inner-loop
+ *  - Frequent re-use throughout the driver, causing significant extra load
+ * instructions or control flow that would be worthwhile optimizing out.
+ *
+ * We cannot provide an exhaustive set of examples, neither can we provide a
+ * rule for every possible situation. Use common sense, and think about: what
+ * the rest of the driver will be doing; how the compiler might represent the
+ * value if it is a compile-time constant; whether an OEM shipping multiple
+ * devices would benefit much more from a single DDK binary, instead of
+ * insignificant micro-optimizations.
+ *
+ * @section sec_base_user_api_gpuprops_dyn Dynamic GPU Properties
+ *
+ * Dynamic GPU properties are presented in two sets:
+ * -# the commonly used properties in @ref base_gpu_props, which have been
+ * unpacked from GPU register bitfields.
+ * -# The full set of raw, unprocessed properties in @ref gpu_raw_gpu_props
+ * (also a member of @ref base_gpu_props). All of these are presented in
+ * the packed form, as presented by the GPU  registers themselves.
+ *
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
+ * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
+ * behaving differently?". In this case, all information about the
+ * configuration is potentially useful, but it <b>does not need to be processed
+ * by the driver</b>. Instead, the raw registers can be processed by the Mali
+ * Tools software on the host PC.
+ *
+ * The properties returned extend the Midgard Configuration Discovery
+ * registers. For example, GPU clock speed is not specified in the Midgard
+ * Architecture, but is <b>necessary for OpenCL's clGetDeviceInfo() function</b>.
+ *
+ * The GPU properties are obtained by a call to
+ * base_get_gpu_props(). This simply returns a pointer to a const
+ * base_gpu_props structure. It is constant for the life of a base
+ * context. Multiple calls to base_get_gpu_props() to a base context
+ * return the same pointer to a constant structure. This avoids cache pollution
+ * of the common data.
+ *
+ * This pointer must not be freed, because it does not point to the start of a
+ * region allocated by the memory allocator; instead, just close the @ref
+ * base_context.
+ *
+ *
+ * @section sec_base_user_api_gpuprops_kernel Kernel Operation
+ *
+ * During Base Context Create time, user-side makes a single kernel call:
+ * - A call to fill user memory with GPU information structures
+ *
+ * The kernel-side will fill the provided the entire processed @ref base_gpu_props
+ * structure, because this information is required in both
+ * user and kernel side; it does not make sense to decode it twice.
+ *
+ * Coherency groups must be derived from the bitmasks, but this can be done
+ * kernel side, and just once at kernel startup: Coherency groups must already
+ * be known kernel-side, to support chains that specify a 'Only Coherent Group'
+ * SW requirement, or 'Only Coherent Group with Tiler' SW requirement.
+ *
+ * @section sec_base_user_api_gpuprops_cocalc Coherency Group calculation
+ * Creation of the coherent group data is done at device-driver startup, and so
+ * is one-time. This will most likely involve a loop with CLZ, shifting, and
+ * bit clearing on the L2_PRESENT mask, depending on whether the
+ * system is L2 Coherent. The number of shader cores is done by a
+ * population count, since faulty cores may be disabled during production,
+ * producing a non-contiguous mask.
+ *
+ * The memory requirements for this algorithm can be determined either by a u64
+ * population count on the L2_PRESENT mask (a LUT helper already is
+ * required for the above), or simple assumption that there can be no more than
+ * 16 coherent groups, since core groups are typically 4 cores.
+ */
+
+/**
+ * @addtogroup base_user_api_gpuprops User-side Base GPU Property Query APIs
+ * @{
+ */
+
+/**
+ * @addtogroup base_user_api_gpuprops_dyn Dynamic HW Properties
+ * @{
+ */
+
+#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 4
+
+#define BASE_MAX_COHERENT_GROUPS 16
+
+struct mali_base_gpu_core_props {
+	/**
+	 * Product specific value.
+	 */
+	u32 product_id;
+
+	/**
+	 * Status of the GPU release.
+	 * No defined values, but starts at 0 and increases by one for each
+	 * release status (alpha, beta, EAC, etc.).
+	 * 4 bit values (0-15).
+	 */
+	u16 version_status;
+
+	/**
+	 * Minor release number of the GPU. "P" part of an "RnPn" release number.
+     * 8 bit values (0-255).
+	 */
+	u16 minor_revision;
+
+	/**
+	 * Major release number of the GPU. "R" part of an "RnPn" release number.
+     * 4 bit values (0-15).
+	 */
+	u16 major_revision;
+
+	u16 padding;
+
+	/* The maximum GPU frequency. Reported to applications by
+	 * clGetDeviceInfo()
+	 */
+	u32 gpu_freq_khz_max;
+
+	/**
+	 * Size of the shader program counter, in bits.
+	 */
+	u32 log2_program_counter_size;
+
+	/**
+	 * TEXTURE_FEATURES_x registers, as exposed by the GPU. This is a
+	 * bitpattern where a set bit indicates that the format is supported.
+	 *
+	 * Before using a texture format, it is recommended that the corresponding
+	 * bit be checked.
+	 */
+	u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+
+	/**
+	 * Theoretical maximum memory available to the GPU. It is unlikely that a
+	 * client will be able to allocate all of this memory for their own
+	 * purposes, but this at least provides an upper bound on the memory
+	 * available to the GPU.
+	 *
+	 * This is required for OpenCL's clGetDeviceInfo() call when
+	 * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The
+	 * client will not be expecting to allocate anywhere near this value.
+	 */
+	u64 gpu_available_memory_size;
+
+	/**
+	 * The number of execution engines.
+	 */
+	u8 num_exec_engines;
+};
+
+/**
+ *
+ * More information is possible - but associativity and bus width are not
+ * required by upper-level apis.
+ */
+struct mali_base_gpu_l2_cache_props {
+	u8 log2_line_size;
+	u8 log2_cache_size;
+	u8 num_l2_slices; /* Number of L2C slices. 1 or higher */
+	u8 padding[5];
+};
+
+struct mali_base_gpu_tiler_props {
+	u32 bin_size_bytes;	/* Max is 4*2^15 */
+	u32 max_active_levels;	/* Max is 2^15 */
+};
+
+/**
+ * GPU threading system details.
+ */
+struct mali_base_gpu_thread_props {
+	u32 max_threads;            /* Max. number of threads per core */
+	u32 max_workgroup_size;     /* Max. number of threads per workgroup */
+	u32 max_barrier_size;       /* Max. number of threads that can synchronize on a simple barrier */
+	u16 max_registers;          /* Total size [1..65535] of the register file available per core. */
+	u8  max_task_queue;         /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */
+	u8  max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */
+	u8  impl_tech;              /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */
+	u8  padding[3];
+	u32 tls_alloc;              /* Number of threads per core that TLS must
+				     * be allocated for
+				     */
+};
+
+/**
+ * @brief descriptor for a coherent group
+ *
+ * \c core_mask exposes all cores in that coherent group, and \c num_cores
+ * provides a cached population-count for that mask.
+ *
+ * @note Whilst all cores are exposed in the mask, not all may be available to
+ * the application, depending on the Kernel Power policy.
+ *
+ * @note if u64s must be 8-byte aligned, then this structure has 32-bits of wastage.
+ */
+struct mali_base_gpu_coherent_group {
+	u64 core_mask;	       /**< Core restriction mask required for the group */
+	u16 num_cores;	       /**< Number of cores in the group */
+	u16 padding[3];
+};
+
+/**
+ * @brief Coherency group information
+ *
+ * Note that the sizes of the members could be reduced. However, the \c group
+ * member might be 8-byte aligned to ensure the u64 core_mask is 8-byte
+ * aligned, thus leading to wastage if the other members sizes were reduced.
+ *
+ * The groups are sorted by core mask. The core masks are non-repeating and do
+ * not intersect.
+ */
+struct mali_base_gpu_coherent_group_info {
+	u32 num_groups;
+
+	/**
+	 * Number of core groups (coherent or not) in the GPU. Equivalent to the number of L2 Caches.
+	 *
+	 * The GPU Counter dumping writes 2048 bytes per core group, regardless of
+	 * whether the core groups are coherent or not. Hence this member is needed
+	 * to calculate how much memory is required for dumping.
+	 *
+	 * @note Do not use it to work out how many valid elements are in the
+	 * group[] member. Use num_groups instead.
+	 */
+	u32 num_core_groups;
+
+	/**
+	 * Coherency features of the memory, accessed by @ref gpu_mem_features
+	 * methods
+	 */
+	u32 coherency;
+
+	u32 padding;
+
+	/**
+	 * Descriptors of coherent groups
+	 */
+	struct mali_base_gpu_coherent_group group[BASE_MAX_COHERENT_GROUPS];
+};
+
+/**
+ * A complete description of the GPU's Hardware Configuration Discovery
+ * registers.
+ *
+ * The information is presented inefficiently for access. For frequent access,
+ * the values should be better expressed in an unpacked form in the
+ * base_gpu_props structure.
+ *
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
+ * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
+ * behaving differently?". In this case, all information about the
+ * configuration is potentially useful, but it <b>does not need to be processed
+ * by the driver</b>. Instead, the raw registers can be processed by the Mali
+ * Tools software on the host PC.
+ *
+ */
+struct gpu_raw_gpu_props {
+	u64 shader_present;
+	u64 tiler_present;
+	u64 l2_present;
+	u64 stack_present;
+
+	u32 l2_features;
+	u32 core_features;
+	u32 mem_features;
+	u32 mmu_features;
+
+	u32 as_present;
+
+	u32 js_present;
+	u32 js_features[GPU_MAX_JOB_SLOTS];
+	u32 tiler_features;
+	u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+
+	u32 gpu_id;
+
+	u32 thread_max_threads;
+	u32 thread_max_workgroup_size;
+	u32 thread_max_barrier_size;
+	u32 thread_features;
+
+	/*
+	 * Note: This is the _selected_ coherency mode rather than the
+	 * available modes as exposed in the coherency_features register.
+	 */
+	u32 coherency_mode;
+
+	u32 thread_tls_alloc;
+};
+
+/**
+ * Return structure for base_get_gpu_props().
+ *
+ * NOTE: the raw_props member in this data structure contains the register
+ * values from which the value of the other members are derived. The derived
+ * members exist to allow for efficient access and/or shielding the details
+ * of the layout of the registers.
+ *
+ */
+typedef struct base_gpu_props {
+	struct mali_base_gpu_core_props core_props;
+	struct mali_base_gpu_l2_cache_props l2_props;
+	u64 unused_1; /* keep for backwards compatibility */
+	struct mali_base_gpu_tiler_props tiler_props;
+	struct mali_base_gpu_thread_props thread_props;
+
+	/** This member is large, likely to be 128 bytes */
+	struct gpu_raw_gpu_props raw_props;
+
+	/** This must be last member of the structure */
+	struct mali_base_gpu_coherent_group_info coherency_info;
+} base_gpu_props;
+
+/** @} end group base_user_api_gpuprops_dyn */
+
+/** @} end group base_user_api_gpuprops */
+
+/**
+ * @addtogroup base_user_api_core User-side Base core APIs
+ * @{
+ */
+
+/**
+ * Flags to pass to ::base_context_init.
+ * Flags can be ORed together to enable multiple things.
+ *
+ * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
+ * not collide with them.
+ */
+typedef u32 base_context_create_flags;
+
+/** No flags set */
+#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0)
+
+/** Base context is embedded in a cctx object (flag used for CINSTR
+ * software counter macros)
+ */
+#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0)
+
+/** Base context is a 'System Monitor' context for Hardware counters.
+ *
+ * One important side effect of this is that job submission is disabled.
+ */
+#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \
+	((base_context_create_flags)1 << 1)
+
+
+/**
+ * Bitpattern describing the ::base_context_create_flags that can be
+ * passed to base_context_init()
+ */
+#define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \
+	(BASE_CONTEXT_CCTX_EMBEDDED | \
+	 BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)
+
+/**
+ * Bitpattern describing the ::base_context_create_flags that can be
+ * passed to the kernel
+ */
+#define BASE_CONTEXT_CREATE_KERNEL_FLAGS \
+	BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED
+
+/*
+ * Private flags used on the base context
+ *
+ * These start at bit 31, and run down to zero.
+ *
+ * They share the same space as @ref base_context_create_flags, and so must
+ * not collide with them.
+ */
+/** Private flag tracking whether job descriptor dumping is disabled */
+#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED ((u32)(1 << 31))
+
+/** @} end group base_user_api_core */
+
+/** @} end group base_user_api */
+
+/**
+ * @addtogroup base_plat_config_gpuprops Base Platform Config GPU Properties
+ * @{
+ *
+ * C Pre-processor macros are exposed here to do with Platform
+ * Config.
+ *
+ * These include:
+ * - GPU Properties that are constant on a particular Midgard Family
+ * Implementation e.g. Maximum samples per pixel on Mali-T600.
+ * - General platform config for the GPU, such as the GPU major and minor
+ * revison.
+ */
+
+/** @} end group base_plat_config_gpuprops */
+
+/**
+ * @addtogroup base_api Base APIs
+ * @{
+ */
+
+/**
+ * @brief The payload for a replay job. This must be in GPU memory.
+ */
+typedef struct base_jd_replay_payload {
+	/**
+	 * Pointer to the first entry in the base_jd_replay_jc list.  These
+	 * will be replayed in @b reverse order (so that extra ones can be added
+	 * to the head in future soft jobs without affecting this soft job)
+	 */
+	u64 tiler_jc_list;
+
+	/**
+	 * Pointer to the fragment job chain.
+	 */
+	u64 fragment_jc;
+
+	/**
+	 * Pointer to the tiler heap free FBD field to be modified.
+	 */
+	u64 tiler_heap_free;
+
+	/**
+	 * Hierarchy mask for the replayed fragment jobs. May be zero.
+	 */
+	u16 fragment_hierarchy_mask;
+
+	/**
+	 * Hierarchy mask for the replayed tiler jobs. May be zero.
+	 */
+	u16 tiler_hierarchy_mask;
+
+	/**
+	 * Default weight to be used for hierarchy levels not in the original
+	 * mask.
+	 */
+	u32 hierarchy_default_weight;
+
+	/**
+	 * Core requirements for the tiler job chain
+	 */
+	base_jd_core_req tiler_core_req;
+
+	/**
+	 * Core requirements for the fragment job chain
+	 */
+	base_jd_core_req fragment_core_req;
+} base_jd_replay_payload;
+
+/**
+ * @brief An entry in the linked list of job chains to be replayed. This must
+ *        be in GPU memory.
+ */
+typedef struct base_jd_replay_jc {
+	/**
+	 * Pointer to next entry in the list. A setting of NULL indicates the
+	 * end of the list.
+	 */
+	u64 next;
+
+	/**
+	 * Pointer to the job chain.
+	 */
+	u64 jc;
+
+} base_jd_replay_jc;
+
+/* Maximum number of jobs allowed in a fragment chain in the payload of a
+ * replay job */
+#define BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT 256
+
+/** @} end group base_api */
+
+/* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
+ * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) */
+#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
+
+/* Indicate that job dumping is enabled. This could affect certain timers
+ * to account for the performance impact. */
+#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
+
+#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
+		BASE_TLSTREAM_JOB_DUMPING_ENABLED)
+
+
+#endif				/* _BASE_KERNEL_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_base_mem_priv.h b/drivers/gpu/arm/midgard/mali_base_mem_priv.h
new file mode 100644
index 0000000..52c8a4f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_base_mem_priv.h
@@ -0,0 +1,57 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _BASE_MEM_PRIV_H_
+#define _BASE_MEM_PRIV_H_
+
+#define BASE_SYNCSET_OP_MSYNC	(1U << 0)
+#define BASE_SYNCSET_OP_CSYNC	(1U << 1)
+
+/*
+ * This structure describe a basic memory coherency operation.
+ * It can either be:
+ * @li a sync from CPU to Memory:
+ *	- type = ::BASE_SYNCSET_OP_MSYNC
+ *	- mem_handle = a handle to the memory object on which the operation
+ *	  is taking place
+ *	- user_addr = the address of the range to be synced
+ *	- size = the amount of data to be synced, in bytes
+ *	- offset is ignored.
+ * @li a sync from Memory to CPU:
+ *	- type = ::BASE_SYNCSET_OP_CSYNC
+ *	- mem_handle = a handle to the memory object on which the operation
+ *	  is taking place
+ *	- user_addr = the address of the range to be synced
+ *	- size = the amount of data to be synced, in bytes.
+ *	- offset is ignored.
+ */
+struct basep_syncset {
+	base_mem_handle mem_handle;
+	u64 user_addr;
+	u64 size;
+	u8 type;
+	u8 padding[7];
+};
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase.h b/drivers/gpu/arm/midgard/mali_kbase.h
new file mode 100644
index 0000000..24a021d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase.h
@@ -0,0 +1,706 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_H_
+#define _KBASE_H_
+
+#include <mali_malisw.h>
+
+#include <mali_kbase_debug.h>
+
+#include <linux/atomic.h>
+#include <linux/highmem.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#include <linux/sched/mm.h>
+#endif
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "mali_base_kernel.h"
+#include <mali_kbase_linux.h>
+
+/*
+ * Include mali_kbase_defs.h first as this provides types needed by other local
+ * header files.
+ */
+#include "mali_kbase_defs.h"
+
+#include "mali_kbase_context.h"
+#include "mali_kbase_strings.h"
+#include "mali_kbase_mem_lowlevel.h"
+#include "mali_kbase_js.h"
+#include "mali_kbase_utility.h"
+#include "mali_kbase_mem.h"
+#include "mali_kbase_gpu_memory_debugfs.h"
+#include "mali_kbase_mem_profile_debugfs.h"
+#include "mali_kbase_debug_job_fault.h"
+#include "mali_kbase_jd_debugfs.h"
+#include "mali_kbase_gpuprops.h"
+#include "mali_kbase_jm.h"
+#include "mali_kbase_ioctl.h"
+
+#include "ipa/mali_kbase_ipa.h"
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+#include <trace/events/gpu.h>
+#endif
+
+
+#ifndef u64_to_user_ptr
+/* Introduced in Linux v4.6 */
+#define u64_to_user_ptr(x) ((void __user *)(uintptr_t)x)
+#endif
+
+/*
+ * Kernel-side Base (KBase) APIs
+ */
+
+struct kbase_device *kbase_device_alloc(void);
+/*
+* note: configuration attributes member of kbdev needs to have
+* been setup before calling kbase_device_init
+*/
+
+/*
+* API to acquire device list semaphore and return pointer
+* to the device list head
+*/
+const struct list_head *kbase_dev_list_get(void);
+/* API to release the device list semaphore */
+void kbase_dev_list_put(const struct list_head *dev_list);
+
+int kbase_device_init(struct kbase_device * const kbdev);
+void kbase_device_term(struct kbase_device *kbdev);
+void kbase_device_free(struct kbase_device *kbdev);
+int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
+
+/* Needed for gator integration and for reporting vsync information */
+struct kbase_device *kbase_find_device(int minor);
+void kbase_release_device(struct kbase_device *kbdev);
+
+
+/**
+ * kbase_get_unmapped_area() - get an address range which is currently
+ *                             unmapped.
+ * @filp: File operations associated with kbase device.
+ * @addr: CPU mapped address (set to 0 since MAP_FIXED mapping is not allowed
+ *        as Mali GPU driver decides about the mapping).
+ * @len: Length of the address range.
+ * @pgoff: Page offset within the GPU address space of the kbase context.
+ * @flags: Flags for the allocation.
+ *
+ * Finds the unmapped address range which satisfies requirements specific to
+ * GPU and those provided by the call parameters.
+ *
+ * 1) Requirement for allocations greater than 2MB:
+ * - alignment offset is set to 2MB and the alignment mask to 2MB decremented
+ * by 1.
+ *
+ * 2) Requirements imposed for the shader memory alignment:
+ * - alignment is decided by the number of GPU pc bits which can be read from
+ * GPU properties of the device associated with this kbase context; alignment
+ * offset is set to this value in bytes and the alignment mask to the offset
+ * decremented by 1.
+ * - allocations must not to be at 4GB boundaries. Such cases are indicated
+ * by the flag KBASE_REG_GPU_NX not being set (check the flags of the kbase
+ * region). 4GB boundaries can be checked against @ref BASE_MEM_MASK_4GB.
+ *
+ * 3) Requirements imposed for tiler memory alignment, cases indicated by
+ * the flag @ref KBASE_REG_TILER_ALIGN_TOP (check the flags of the kbase
+ * region):
+ * - alignment offset is set to the difference between the kbase region
+ * extent (converted from the original value in pages to bytes) and the kbase
+ * region initial_commit (also converted from the original value in pages to
+ * bytes); alignment mask is set to the kbase region extent in bytes and
+ * decremented by 1.
+ *
+ * Return: if successful, address of the unmapped area aligned as required;
+ *         error code (negative) in case of failure;
+ */
+unsigned long kbase_get_unmapped_area(struct file *filp,
+		const unsigned long addr, const unsigned long len,
+		const unsigned long pgoff, const unsigned long flags);
+
+int kbase_jd_init(struct kbase_context *kctx);
+void kbase_jd_exit(struct kbase_context *kctx);
+
+/**
+ * kbase_jd_submit - Submit atoms to the job dispatcher
+ *
+ * @kctx: The kbase context to submit to
+ * @user_addr: The address in user space of the struct base_jd_atom_v2 array
+ * @nr_atoms: The number of atoms in the array
+ * @stride: sizeof(struct base_jd_atom_v2)
+ * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6)
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_jd_submit(struct kbase_context *kctx,
+		void __user *user_addr, u32 nr_atoms, u32 stride,
+		bool uk6_atom);
+
+/**
+ * kbase_jd_done_worker - Handle a job completion
+ * @data: a &struct work_struct
+ *
+ * This function requeues the job from the runpool (if it was soft-stopped or
+ * removed from NEXT registers).
+ *
+ * Removes it from the system if it finished/failed/was cancelled.
+ *
+ * Resolves dependencies to add dependent jobs to the context, potentially
+ * starting them if necessary (which may add more references to the context)
+ *
+ * Releases the reference to the context from the no-longer-running job.
+ *
+ * Handles retrying submission outside of IRQ context if it failed from within
+ * IRQ context.
+ */
+void kbase_jd_done_worker(struct work_struct *data);
+
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
+		kbasep_js_atom_done_code done_code);
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+void kbase_jd_zap_context(struct kbase_context *kctx);
+bool jd_done_nolock(struct kbase_jd_atom *katom,
+		struct list_head *completed_jobs_ctx);
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
+bool jd_submit_atom(struct kbase_context *kctx,
+			 const struct base_jd_atom_v2 *user_atom,
+			 struct kbase_jd_atom *katom);
+void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
+
+void kbase_job_done(struct kbase_device *kbdev, u32 done);
+
+/**
+ * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
+ *                                               and soft stop them
+ * @kctx: Pointer to context to check.
+ * @katom: Pointer to priority atom.
+ *
+ * Atoms from @kctx on the same job slot as @katom, which have lower priority
+ * than @katom will be soft stopped and put back in the queue, so that atoms
+ * with higher priority can run.
+ *
+ * The hwaccess_lock must be held when calling this function.
+ */
+void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+				struct kbase_jd_atom *katom);
+
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
+		struct kbase_jd_atom *target_katom);
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+		struct kbase_jd_atom *target_katom, u32 sw_flags);
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+		struct kbase_jd_atom *target_katom);
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+		base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom);
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+		struct kbase_jd_atom *target_katom);
+
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
+int kbase_event_pending(struct kbase_context *ctx);
+int kbase_event_init(struct kbase_context *kctx);
+void kbase_event_close(struct kbase_context *kctx);
+void kbase_event_cleanup(struct kbase_context *kctx);
+void kbase_event_wakeup(struct kbase_context *kctx);
+
+/**
+ * kbasep_jit_alloc_validate() - Validate the JIT allocation info.
+ *
+ * @kctx:	Pointer to the kbase context within which the JIT
+ *		allocation is to be validated.
+ * @info:	Pointer to struct @base_jit_alloc_info
+ *			which is to be validated.
+ * @return: 0 if jit allocation is valid; negative error code otherwise
+ */
+int kbasep_jit_alloc_validate(struct kbase_context *kctx,
+					struct base_jit_alloc_info *info);
+/**
+ * kbase_free_user_buffer() - Free memory allocated for struct
+ *		@kbase_debug_copy_buffer.
+ *
+ * @buffer:	Pointer to the memory location allocated for the object
+ *		of the type struct @kbase_debug_copy_buffer.
+ */
+static inline void kbase_free_user_buffer(
+		struct kbase_debug_copy_buffer *buffer)
+{
+	struct page **pages = buffer->extres_pages;
+	int nr_pages = buffer->nr_extres_pages;
+
+	if (pages) {
+		int i;
+
+		for (i = 0; i < nr_pages; i++) {
+			struct page *pg = pages[i];
+
+			if (pg)
+				put_page(pg);
+		}
+		kfree(pages);
+	}
+}
+
+/**
+ * kbase_mem_copy_from_extres_page() - Copy pages from external resources.
+ *
+ * @kctx:		kbase context within which the copying is to take place.
+ * @extres_pages:	Pointer to the pages which correspond to the external
+ *			resources from which the copying will take place.
+ * @pages:		Pointer to the pages to which the content is to be
+ *			copied from the provided external resources.
+ * @nr_pages:		Number of pages to copy.
+ * @target_page_nr:	Number of target pages which will be used for copying.
+ * @offset:		Offset into the target pages from which the copying
+ *			is to be performed.
+ * @to_copy:		Size of the chunk to be copied, in bytes.
+ */
+void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
+		void *extres_page, struct page **pages, unsigned int nr_pages,
+		unsigned int *target_page_nr, size_t offset, size_t *to_copy);
+/**
+ * kbase_mem_copy_from_extres() - Copy from external resources.
+ *
+ * @kctx:	kbase context within which the copying is to take place.
+ * @buf_data:	Pointer to the information about external resources:
+ *		pages pertaining to the external resource, number of
+ *		pages to copy.
+ */
+int kbase_mem_copy_from_extres(struct kbase_context *kctx,
+		struct kbase_debug_copy_buffer *buf_data);
+int kbase_process_soft_job(struct kbase_jd_atom *katom);
+int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
+void kbase_finish_soft_job(struct kbase_jd_atom *katom);
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
+void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom);
+#endif
+int kbase_soft_event_update(struct kbase_context *kctx,
+			    u64 event,
+			    unsigned char new_status);
+
+bool kbase_replay_process(struct kbase_jd_atom *katom);
+
+void kbasep_soft_job_timeout_worker(struct timer_list *timer);
+void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
+
+void kbasep_as_do_poke(struct work_struct *work);
+
+/** Returns the name associated with a Mali exception code
+ *
+ * This function is called from the interrupt handler when a GPU fault occurs.
+ * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN.
+ *
+ * @param[in] kbdev     The kbase device that the GPU fault occurred from.
+ * @param[in] exception_code  exception code
+ * @return name associated with the exception code
+ */
+const char *kbase_exception_name(struct kbase_device *kbdev,
+		u32 exception_code);
+
+/**
+ * Check whether a system suspend is in progress, or has already been suspended
+ *
+ * The caller should ensure that either kbdev->pm.active_count_lock is held, or
+ * a dmb was executed recently (to ensure the value is most
+ * up-to-date). However, without a lock the value could change afterwards.
+ *
+ * @return false if a suspend is not in progress
+ * @return !=false otherwise
+ */
+static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
+{
+	return kbdev->pm.suspending;
+}
+
+/**
+ * kbase_pm_is_active - Determine whether the GPU is active
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This takes into account whether there is an active context reference.
+ *
+ * Return: true if the GPU is active, false otherwise
+ */
+static inline bool kbase_pm_is_active(struct kbase_device *kbdev)
+{
+	return kbdev->pm.active_count > 0;
+}
+
+/**
+ * Return the atom's ID, as was originally supplied by userspace in
+ * base_jd_atom_v2::atom_number
+ */
+static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	int result;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(katom->kctx == kctx);
+
+	result = katom - &kctx->jctx.atoms[0];
+	KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
+	return result;
+}
+
+/**
+ * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
+ * @kctx: Context pointer
+ * @id:   ID of atom to retrieve
+ *
+ * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
+ */
+static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
+		struct kbase_context *kctx, int id)
+{
+	return &kctx->jctx.atoms[id];
+}
+
+/**
+ * Initialize the disjoint state
+ *
+ * The disjoint event count and state are both set to zero.
+ *
+ * Disjoint functions usage:
+ *
+ * The disjoint event count should be incremented whenever a disjoint event occurs.
+ *
+ * There are several cases which are regarded as disjoint behavior. Rather than just increment
+ * the counter during disjoint events we also increment the counter when jobs may be affected
+ * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
+ *
+ * Disjoint state is entered during GPU reset and for the entire time that an atom is replaying
+ * (as part of the replay workaround). Increasing the disjoint state also increases the count of
+ * disjoint events.
+ *
+ * The disjoint state is then used to increase the count of disjoint events during job submission
+ * and job completion. Any atom submitted or completed while the disjoint state is greater than
+ * zero is regarded as a disjoint event.
+ *
+ * The disjoint event counter is also incremented immediately whenever a job is soft stopped
+ * and during context creation.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_init(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events
+ * called when a disjoint event has happened
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events only if the GPU is in a disjoint state
+ *
+ * This should be called when something happens which could be disjoint if the GPU
+ * is in a disjoint state. The state refcount keeps track of this.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev);
+
+/**
+ * Returns the count of disjoint events
+ *
+ * @param kbdev The kbase device
+ * @return the count of disjoint events
+ */
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
+
+/**
+ * Increment the refcount state indicating that the GPU is in a disjoint state.
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
+ * should be called
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_up(struct kbase_device *kbdev);
+
+/**
+ * Decrement the refcount state
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ *
+ * Called after @ref kbase_disjoint_state_up once the disjoint state is over
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_down(struct kbase_device *kbdev);
+
+/**
+ * If a job is soft stopped and the number of contexts is >= this value
+ * it is reported as a disjoint event
+ */
+#define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
+
+#if !defined(UINT64_MAX)
+	#define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+#endif
+
+#if KBASE_TRACE_ENABLE
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev);
+
+#ifndef CONFIG_MALI_SYSTEM_TRACE
+/** Add trace values about a job-slot
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0)
+
+/** Add trace values about a job-slot, with info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, info_val)
+
+/** Add trace values about a ctx refcount
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0)
+/** Add trace values about a ctx refcount, and info
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, info_val)
+
+/** Add trace values (no slot or refcount)
+ *
+ * @note Any functions called through this macro will still be evaluated in
+ * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
+ * functions called to get the parameters supplied to this macro must:
+ * - be static or static inline
+ * - must just return 0 and have no other statements present in the body.
+ */
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)     \
+	kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
+			0, 0, 0, info_val)
+
+/** Clear the trace */
+#define KBASE_TRACE_CLEAR(kbdev) \
+	kbasep_trace_clear(kbdev)
+
+/** Dump the slot trace */
+#define KBASE_TRACE_DUMP(kbdev) \
+	kbasep_trace_dump(kbdev)
+
+/** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val);
+/** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */
+void kbasep_trace_clear(struct kbase_device *kbdev);
+#else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
+/* Dispatch kbase trace events as system trace events */
+#include <mali_linux_kbase_trace.h>
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
+	trace_mali_##code(jobslot, 0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
+	trace_mali_##code(jobslot, info_val)
+
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
+	trace_mali_##code(refcount, 0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
+	trace_mali_##code(refcount, info_val)
+
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)\
+	trace_mali_##code(gpu_addr, info_val)
+
+#define KBASE_TRACE_CLEAR(kbdev)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(0);\
+	} while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
+#else
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(gpu_addr);\
+		CSTD_UNUSED(jobslot);\
+	} while (0)
+
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(gpu_addr);\
+		CSTD_UNUSED(jobslot);\
+		CSTD_UNUSED(info_val);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(gpu_addr);\
+		CSTD_UNUSED(refcount);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(gpu_addr);\
+		CSTD_UNUSED(info_val);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(code);\
+		CSTD_UNUSED(subcode);\
+		CSTD_UNUSED(ctx);\
+		CSTD_UNUSED(katom);\
+		CSTD_UNUSED(val);\
+		CSTD_NOP(0);\
+	} while (0)
+
+#define KBASE_TRACE_CLEAR(kbdev)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(0);\
+	} while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+	do {\
+		CSTD_UNUSED(kbdev);\
+		CSTD_NOP(0);\
+	} while (0)
+#endif /* KBASE_TRACE_ENABLE */
+/** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */
+void kbasep_trace_dump(struct kbase_device *kbdev);
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+/* kbase_io_history_init - initialize data struct for register access history
+ *
+ * @kbdev The register history to initialize
+ * @n The number of register accesses that the buffer could hold
+ *
+ * @return 0 if successfully initialized, failure otherwise
+ */
+int kbase_io_history_init(struct kbase_io_history *h, u16 n);
+
+/* kbase_io_history_term - uninit all resources for the register access history
+ *
+ * @h The register history to terminate
+ */
+void kbase_io_history_term(struct kbase_io_history *h);
+
+/* kbase_io_history_dump - print the register history to the kernel ring buffer
+ *
+ * @kbdev Pointer to kbase_device containing the register history to dump
+ */
+void kbase_io_history_dump(struct kbase_device *kbdev);
+
+/**
+ * kbase_io_history_resize - resize the register access history buffer.
+ *
+ * @h: Pointer to a valid register history to resize
+ * @new_size: Number of accesses the buffer could hold
+ *
+ * A successful resize will clear all recent register accesses.
+ * If resizing fails for any reason (e.g., could not allocate memory, invalid
+ * buffer size) then the original buffer will be kept intact.
+ *
+ * @return 0 if the buffer was resized, failure otherwise
+ */
+int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbase_io_history_init(...) ((int)0)
+
+#define kbase_io_history_term CSTD_NOP
+
+#define kbase_io_history_dump CSTD_NOP
+
+#define kbase_io_history_resize CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
new file mode 100644
index 0000000..8d71926
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
@@ -0,0 +1,209 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2015,2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <mali_kbase.h>
+#include <mali_kbase_10969_workaround.h>
+
+/* Mask of X and Y coordinates for the coordinates words in the descriptors*/
+#define X_COORDINATE_MASK 0x00000FFF
+#define Y_COORDINATE_MASK 0x0FFF0000
+/* Max number of words needed from the fragment shader job descriptor */
+#define JOB_HEADER_SIZE_IN_WORDS 10
+#define JOB_HEADER_SIZE (JOB_HEADER_SIZE_IN_WORDS*sizeof(u32))
+
+/* Word 0: Status Word */
+#define JOB_DESC_STATUS_WORD 0
+/* Word 1: Restart Index */
+#define JOB_DESC_RESTART_INDEX_WORD 1
+/* Word 2: Fault address low word */
+#define JOB_DESC_FAULT_ADDR_LOW_WORD 2
+/* Word 8: Minimum Tile Coordinates */
+#define FRAG_JOB_DESC_MIN_TILE_COORD_WORD 8
+/* Word 9: Maximum Tile Coordinates */
+#define FRAG_JOB_DESC_MAX_TILE_COORD_WORD 9
+
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
+{
+	struct device *dev = katom->kctx->kbdev->dev;
+	u32   clamped = 0;
+	struct kbase_va_region *region;
+	struct tagged_addr *page_array;
+	u64 page_index;
+	u32 offset = katom->jc & (~PAGE_MASK);
+	u32 *page_1 = NULL;
+	u32 *page_2 = NULL;
+	u32   job_header[JOB_HEADER_SIZE_IN_WORDS];
+	void *dst = job_header;
+	u32 minX, minY, maxX, maxY;
+	u32 restartX, restartY;
+	struct page *p;
+	u32 copy_size;
+
+	dev_warn(dev, "Called TILE_RANGE_FAULT workaround clamping function.\n");
+	if (!(katom->core_req & BASE_JD_REQ_FS))
+		return 0;
+
+	kbase_gpu_vm_lock(katom->kctx);
+	region = kbase_region_tracker_find_region_enclosing_address(katom->kctx,
+			katom->jc);
+	if (!region || (region->flags & KBASE_REG_FREE))
+		goto out_unlock;
+
+	page_array = kbase_get_cpu_phy_pages(region);
+	if (!page_array)
+		goto out_unlock;
+
+	page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn;
+
+	p = as_page(page_array[page_index]);
+
+	/* we need the first 10 words of the fragment shader job descriptor.
+	 * We need to check that the offset + 10 words is less that the page
+	 * size otherwise we need to load the next page.
+	 * page_size_overflow will be equal to 0 in case the whole descriptor
+	 * is within the page > 0 otherwise.
+	 */
+	copy_size = MIN(PAGE_SIZE - offset, JOB_HEADER_SIZE);
+
+	page_1 = kmap_atomic(p);
+
+	/* page_1 is a u32 pointer, offset is expressed in bytes */
+	page_1 += offset>>2;
+
+	kbase_sync_single_for_cpu(katom->kctx->kbdev,
+			kbase_dma_addr(p) + offset,
+			copy_size, DMA_BIDIRECTIONAL);
+
+	memcpy(dst, page_1, copy_size);
+
+	/* The data needed overflows page the dimension,
+	 * need to map the subsequent page */
+	if (copy_size < JOB_HEADER_SIZE) {
+		p = as_page(page_array[page_index + 1]);
+		page_2 = kmap_atomic(p);
+
+		kbase_sync_single_for_cpu(katom->kctx->kbdev,
+				kbase_dma_addr(p),
+				JOB_HEADER_SIZE - copy_size, DMA_BIDIRECTIONAL);
+
+		memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size);
+	}
+
+	/* We managed to correctly map one or two pages (in case of overflow) */
+	/* Get Bounding Box data and restart index from fault address low word */
+	minX = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & X_COORDINATE_MASK;
+	minY = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+	maxX = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & X_COORDINATE_MASK;
+	maxY = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+	restartX = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & X_COORDINATE_MASK;
+	restartY = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & Y_COORDINATE_MASK;
+
+	dev_warn(dev, "Before Clamping:\n"
+			"Jobstatus: %08x\n"
+			"restartIdx: %08x\n"
+			"Fault_addr_low: %08x\n"
+			"minCoordsX: %08x minCoordsY: %08x\n"
+			"maxCoordsX: %08x maxCoordsY: %08x\n",
+			job_header[JOB_DESC_STATUS_WORD],
+			job_header[JOB_DESC_RESTART_INDEX_WORD],
+			job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+			minX, minY,
+			maxX, maxY);
+
+	/* Set the restart index to the one which generated the fault*/
+	job_header[JOB_DESC_RESTART_INDEX_WORD] =
+			job_header[JOB_DESC_FAULT_ADDR_LOW_WORD];
+
+	if (restartX < minX) {
+		job_header[JOB_DESC_RESTART_INDEX_WORD] = (minX) | restartY;
+		dev_warn(dev,
+			"Clamping restart X index to minimum. %08x clamped to %08x\n",
+			restartX, minX);
+		clamped =  1;
+	}
+	if (restartY < minY) {
+		job_header[JOB_DESC_RESTART_INDEX_WORD] = (minY) | restartX;
+		dev_warn(dev,
+			"Clamping restart Y index to minimum. %08x clamped to %08x\n",
+			restartY, minY);
+		clamped =  1;
+	}
+	if (restartX > maxX) {
+		job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxX) | restartY;
+		dev_warn(dev,
+			"Clamping restart X index to maximum. %08x clamped to %08x\n",
+			restartX, maxX);
+		clamped =  1;
+	}
+	if (restartY > maxY) {
+		job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxY) | restartX;
+		dev_warn(dev,
+			"Clamping restart Y index to maximum. %08x clamped to %08x\n",
+			restartY, maxY);
+		clamped =  1;
+	}
+
+	if (clamped) {
+		/* Reset the fault address low word
+		 * and set the job status to STOPPED */
+		job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] = 0x0;
+		job_header[JOB_DESC_STATUS_WORD] = BASE_JD_EVENT_STOPPED;
+		dev_warn(dev, "After Clamping:\n"
+				"Jobstatus: %08x\n"
+				"restartIdx: %08x\n"
+				"Fault_addr_low: %08x\n"
+				"minCoordsX: %08x minCoordsY: %08x\n"
+				"maxCoordsX: %08x maxCoordsY: %08x\n",
+				job_header[JOB_DESC_STATUS_WORD],
+				job_header[JOB_DESC_RESTART_INDEX_WORD],
+				job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+				minX, minY,
+				maxX, maxY);
+
+		/* Flush CPU cache to update memory for future GPU reads*/
+		memcpy(page_1, dst, copy_size);
+		p = as_page(page_array[page_index]);
+
+		kbase_sync_single_for_device(katom->kctx->kbdev,
+				kbase_dma_addr(p) + offset,
+				copy_size, DMA_TO_DEVICE);
+
+		if (copy_size < JOB_HEADER_SIZE) {
+			memcpy(page_2, dst + copy_size,
+					JOB_HEADER_SIZE - copy_size);
+			p = as_page(page_array[page_index + 1]);
+
+			kbase_sync_single_for_device(katom->kctx->kbdev,
+					kbase_dma_addr(p),
+					JOB_HEADER_SIZE - copy_size,
+					DMA_TO_DEVICE);
+		}
+	}
+	if (copy_size < JOB_HEADER_SIZE)
+		kunmap_atomic(page_2);
+
+	kunmap_atomic(page_1);
+
+out_unlock:
+	kbase_gpu_vm_unlock(katom->kctx);
+	return clamped;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h
new file mode 100644
index 0000000..379a05a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2014, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_10969_WORKAROUND_
+#define _KBASE_10969_WORKAROUND_
+
+/**
+ * kbasep_10969_workaround_clamp_coordinates - Apply the WA to clamp the restart indices
+ * @katom: atom representing the fragment job for which the WA has to be applied
+ *
+ * This workaround is used to solve an HW issue with single iterator GPUs.
+ * If a fragment job is soft-stopped on the edge of its bounding box, it can happen
+ * that the restart index is out of bounds and the rerun causes a tile range
+ * fault. If this happens we try to clamp the restart index to a correct value.
+ */
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom);
+
+#endif /* _KBASE_10969_WORKAROUND_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c
new file mode 100644
index 0000000..4cc93a9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c
@@ -0,0 +1,111 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_as_fault_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+
+static int kbase_as_fault_read(struct seq_file *sfile, void *data)
+{
+	uintptr_t as_no = (uintptr_t) sfile->private;
+
+	struct list_head *entry;
+	const struct list_head *kbdev_list;
+	struct kbase_device *kbdev = NULL;
+
+	kbdev_list = kbase_dev_list_get();
+
+	list_for_each(entry, kbdev_list) {
+		kbdev = list_entry(entry, struct kbase_device, entry);
+
+		if (kbdev->debugfs_as_read_bitmap & (1ULL << as_no)) {
+
+			/* don't show this one again until another fault occors */
+			kbdev->debugfs_as_read_bitmap &= ~(1ULL << as_no);
+
+			/* output the last page fault addr */
+			seq_printf(sfile, "%llu\n",
+				   (u64) kbdev->as[as_no].pf_data.addr);
+		}
+
+	}
+
+	kbase_dev_list_put(kbdev_list);
+
+	return 0;
+}
+
+static int kbase_as_fault_debugfs_open(struct inode *in, struct file *file)
+{
+	return single_open(file, kbase_as_fault_read, in->i_private);
+}
+
+static const struct file_operations as_fault_fops = {
+	.open = kbase_as_fault_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ *  Initialize debugfs entry for each address space
+ */
+void kbase_as_fault_debugfs_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+	uint i;
+	char as_name[64];
+	struct dentry *debugfs_directory;
+
+	kbdev->debugfs_as_read_bitmap = 0ULL;
+
+	KBASE_DEBUG_ASSERT(kbdev->nr_hw_address_spaces);
+	KBASE_DEBUG_ASSERT(sizeof(kbdev->as[0].pf_data.addr) == sizeof(u64));
+
+	debugfs_directory = debugfs_create_dir("address_spaces",
+					       kbdev->mali_debugfs_directory);
+
+	if (debugfs_directory) {
+		for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+			snprintf(as_name, ARRAY_SIZE(as_name), "as%u", i);
+			debugfs_create_file(as_name, S_IRUGO,
+					    debugfs_directory,
+					    (void *)(uintptr_t)i,
+					    &as_fault_fops);
+		}
+	} else {
+		dev_warn(kbdev->dev,
+			 "unable to create address_spaces debugfs directory");
+	}
+
+#endif /* CONFIG_MALI_DEBUG */
+#endif /* CONFIG_DEBUG_FS */
+	return;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.h
new file mode 100644
index 0000000..496d8b1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_AS_FAULT_DEBUG_FS_H
+#define _KBASE_AS_FAULT_DEBUG_FS_H
+
+/**
+ * kbase_as_fault_debugfs_init() - Add debugfs files for reporting page faults
+ *
+ * @kbdev: Pointer to kbase_device
+ */
+void kbase_as_fault_debugfs_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_as_fault_debugfs_new() - make the last fault available on debugfs
+ *
+ * @kbdev: Pointer to kbase_device
+ * @as_no: The address space the fault occurred on
+ */
+static inline void
+kbase_as_fault_debugfs_new(struct kbase_device *kbdev, int as_no)
+{
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_MALI_DEBUG
+	kbdev->debugfs_as_read_bitmap |= (1ULL << as_no);
+#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_MALI_DEBUG */
+	return;
+}
+
+#endif  /*_KBASE_AS_FAULT_DEBUG_FS_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c
new file mode 100644
index 0000000..27a03cf
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Cache Policy API.
+ */
+
+#include "mali_kbase_cache_policy.h"
+
+/*
+ * The output flags should be a combination of the following values:
+ * KBASE_REG_CPU_CACHED: CPU cache should be enabled
+ * KBASE_REG_GPU_CACHED: GPU cache should be enabled
+ *
+ * NOTE: Some components within the GPU might only be able to access memory
+ * that is KBASE_REG_GPU_CACHED. Refer to the specific GPU implementation for
+ * more details.
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages)
+{
+	u32 cache_flags = 0;
+
+	CSTD_UNUSED(nr_pages);
+
+	if (!(flags & BASE_MEM_UNCACHED_GPU))
+		cache_flags |= KBASE_REG_GPU_CACHED;
+
+	if (flags & BASE_MEM_CACHED_CPU)
+		cache_flags |= KBASE_REG_CPU_CACHED;
+
+	return cache_flags;
+}
+
+
+void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir)
+{
+	dma_sync_single_for_device(kbdev->dev, handle, size, dir);
+}
+
+
+void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir)
+{
+	dma_sync_single_for_cpu(kbdev->dev, handle, size, dir);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h
new file mode 100644
index 0000000..8a1e529
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Cache Policy API.
+ */
+
+#ifndef _KBASE_CACHE_POLICY_H_
+#define _KBASE_CACHE_POLICY_H_
+
+#include "mali_kbase.h"
+#include "mali_base_kernel.h"
+
+/**
+ * kbase_cache_enabled - Choose the cache policy for a specific region
+ * @flags:    flags describing attributes of the region
+ * @nr_pages: total number of pages (backed or not) for the region
+ *
+ * Tells whether the CPU and GPU caches should be enabled or not for a specific
+ * region.
+ * This function can be modified to customize the cache policy depending on the
+ * flags and size of the region.
+ *
+ * Return: a combination of %KBASE_REG_CPU_CACHED and %KBASE_REG_GPU_CACHED
+ *         depending on the cache policy
+ */
+u32 kbase_cache_enabled(u32 flags, u32 nr_pages);
+
+#endif				/* _KBASE_CACHE_POLICY_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_config.c b/drivers/gpu/arm/midgard/mali_kbase_config.c
new file mode 100644
index 0000000..ce7070d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_config.c
@@ -0,0 +1,48 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config_defaults.h>
+
+int kbasep_platform_device_init(struct kbase_device *kbdev)
+{
+	struct kbase_platform_funcs_conf *platform_funcs_p;
+
+	platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS;
+	if (platform_funcs_p && platform_funcs_p->platform_init_func)
+		return platform_funcs_p->platform_init_func(kbdev);
+
+	return 0;
+}
+
+void kbasep_platform_device_term(struct kbase_device *kbdev)
+{
+	struct kbase_platform_funcs_conf *platform_funcs_p;
+
+	platform_funcs_p = (struct kbase_platform_funcs_conf *)PLATFORM_FUNCS;
+	if (platform_funcs_p && platform_funcs_p->platform_term_func)
+		platform_funcs_p->platform_term_func(kbdev);
+}
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_config.h b/drivers/gpu/arm/midgard/mali_kbase_config.h
new file mode 100644
index 0000000..7a2ec05
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_config.h
@@ -0,0 +1,304 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_config.h
+ * Configuration API and Attributes for KBase
+ */
+
+#ifndef _KBASE_CONFIG_H_
+#define _KBASE_CONFIG_H_
+
+#include <linux/mm.h>
+#include <mali_malisw.h>
+#include <mali_kbase_backend_config.h>
+#include <linux/rbtree.h>
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_config Configuration API and Attributes
+ * @{
+ */
+
+/* Forward declaration of struct kbase_device */
+struct kbase_device;
+
+/**
+ * kbase_platform_funcs_conf - Specifies platform init/term function pointers
+ *
+ * Specifies the functions pointers for platform specific initialization and
+ * termination. By default no functions are required. No additional platform
+ * specific control is necessary.
+ */
+struct kbase_platform_funcs_conf {
+	/**
+	 * platform_init_func - platform specific init function pointer
+	 * @kbdev - kbase_device pointer
+	 *
+	 * Returns 0 on success, negative error code otherwise.
+	 *
+	 * Function pointer for platform specific initialization or NULL if no
+	 * initialization function is required. At the point this the GPU is
+	 * not active and its power and clocks are in unknown (platform specific
+	 * state) as kbase doesn't yet have control of power and clocks.
+	 *
+	 * The platform specific private pointer kbase_device::platform_context
+	 * can be accessed (and possibly initialized) in here.
+	 */
+	int (*platform_init_func)(struct kbase_device *kbdev);
+	/**
+	 * platform_term_func - platform specific termination function pointer
+	 * @kbdev - kbase_device pointer
+	 *
+	 * Function pointer for platform specific termination or NULL if no
+	 * termination function is required. At the point this the GPU will be
+	 * idle but still powered and clocked.
+	 *
+	 * The platform specific private pointer kbase_device::platform_context
+	 * can be accessed (and possibly terminated) in here.
+	 */
+	void (*platform_term_func)(struct kbase_device *kbdev);
+};
+
+/*
+ * @brief Specifies the callbacks for power management
+ *
+ * By default no callbacks will be made and the GPU must not be powered off.
+ */
+struct kbase_pm_callback_conf {
+	/** Callback for when the GPU is idle and the power to it can be switched off.
+	 *
+	 * The system integrator can decide whether to either do nothing, just switch off
+	 * the clocks to the GPU, or to completely power down the GPU.
+	 * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
+	 * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
+	 */
+	void (*power_off_callback)(struct kbase_device *kbdev);
+
+	/** Callback for when the GPU is about to become active and power must be supplied.
+	 *
+	 * This function must not return until the GPU is powered and clocked sufficiently for register access to
+	 * succeed.  The return value specifies whether the GPU was powered down since the call to power_off_callback.
+	 * If the GPU state has been lost then this function must return 1, otherwise it should return 0.
+	 * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
+	 * platform \em callbacks responsibility to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
+	 *
+	 * The return value of the first call to this function is ignored.
+	 *
+	 * @return 1 if the GPU state may have been lost, 0 otherwise.
+	 */
+	int (*power_on_callback)(struct kbase_device *kbdev);
+
+	/** Callback for when the system is requesting a suspend and GPU power
+	 * must be switched off.
+	 *
+	 * Note that if this callback is present, then this may be called
+	 * without a preceding call to power_off_callback. Therefore this
+	 * callback must be able to take any action that might otherwise happen
+	 * in power_off_callback.
+	 *
+	 * The platform specific private pointer kbase_device::platform_context
+	 * can be accessed and modified in here. It is the platform \em
+	 * callbacks responsibility to initialize and terminate this pointer if
+	 * used (see @ref kbase_platform_funcs_conf).
+	 */
+	void (*power_suspend_callback)(struct kbase_device *kbdev);
+
+	/** Callback for when the system is resuming from a suspend and GPU
+	 * power must be switched on.
+	 *
+	 * Note that if this callback is present, then this may be called
+	 * without a following call to power_on_callback. Therefore this
+	 * callback must be able to take any action that might otherwise happen
+	 * in power_on_callback.
+	 *
+	 * The platform specific private pointer kbase_device::platform_context
+	 * can be accessed and modified in here. It is the platform \em
+	 * callbacks responsibility to initialize and terminate this pointer if
+	 * used (see @ref kbase_platform_funcs_conf).
+	 */
+	void (*power_resume_callback)(struct kbase_device *kbdev);
+
+	/** Callback for handling runtime power management initialization.
+	 *
+	 * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
+	 * will become active from calls made to the OS from within this function.
+	 * The runtime calls can be triggered by calls from @ref power_off_callback and @ref power_on_callback.
+	 * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+	 *
+	 * @return 0 on success, else int error code.
+	 */
+	 int (*power_runtime_init_callback)(struct kbase_device *kbdev);
+
+	/** Callback for handling runtime power management termination.
+	 *
+	 * The runtime power management callbacks @ref power_runtime_off_callback and @ref power_runtime_on_callback
+	 * should no longer be called by the OS on completion of this function.
+	 * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+	 */
+	void (*power_runtime_term_callback)(struct kbase_device *kbdev);
+
+	/** Callback for runtime power-off power management callback
+	 *
+	 * For linux this callback will be called by the kernel runtime_suspend callback.
+	 * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+	 *
+	 * @return 0 on success, else OS error code.
+	 */
+	void (*power_runtime_off_callback)(struct kbase_device *kbdev);
+
+	/** Callback for runtime power-on power management callback
+	 *
+	 * For linux this callback will be called by the kernel runtime_resume callback.
+	 * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
+	 */
+	int (*power_runtime_on_callback)(struct kbase_device *kbdev);
+
+	/*
+	 * Optional callback for checking if GPU can be suspended when idle
+	 *
+	 * This callback will be called by the runtime power management core
+	 * when the reference count goes to 0 to provide notification that the
+	 * GPU now seems idle.
+	 *
+	 * If this callback finds that the GPU can't be powered off, or handles
+	 * suspend by powering off directly or queueing up a power off, a
+	 * non-zero value must be returned to prevent the runtime PM core from
+	 * also triggering a suspend.
+	 *
+	 * Returning 0 will cause the runtime PM core to conduct a regular
+	 * autosuspend.
+	 *
+	 * This callback is optional and if not provided regular autosuspend
+	 * will be triggered.
+	 *
+	 * Note: The Linux kernel must have CONFIG_PM_RUNTIME enabled to use
+	 * this feature.
+	 *
+	 * Return 0 if GPU can be suspended, positive value if it can not be
+	 * suspeneded by runtime PM, else OS error code
+	 */
+	int (*power_runtime_idle_callback)(struct kbase_device *kbdev);
+
+	/*
+	 * Optional callback for Software Reset
+	 */
+	int (*soft_reset_callback)(struct kbase_device *kbdev);
+};
+
+#ifdef CONFIG_OF
+struct kbase_platform_config {
+};
+#else
+
+/*
+ * @brief Specifies start and end of I/O memory region.
+ */
+struct kbase_io_memory_region {
+	u64 start;
+	u64 end;
+};
+
+/*
+ * @brief Specifies I/O related resources like IRQs and memory region for I/O operations.
+ */
+struct kbase_io_resources {
+	u32                      job_irq_number;
+	u32                      mmu_irq_number;
+	u32                      gpu_irq_number;
+	struct kbase_io_memory_region io_memory_region;
+};
+
+struct kbase_platform_config {
+	const struct kbase_io_resources *io_resources;
+};
+
+#endif /* CONFIG_OF */
+
+/**
+ * @brief Gets the pointer to platform config.
+ *
+ * @return Pointer to the platform config
+ */
+struct kbase_platform_config *kbase_get_platform_config(void);
+
+/**
+ * kbasep_platform_device_init: - Platform specific call to initialize hardware
+ * @kbdev: kbase device pointer
+ *
+ * Function calls a platform defined routine if specified in the configuration
+ * attributes.  The routine can initialize any hardware and context state that
+ * is required for the GPU block to function.
+ *
+ * Return: 0 if no errors have been found in the config.
+ *         Negative error code otherwise.
+ */
+int kbasep_platform_device_init(struct kbase_device *kbdev);
+
+/**
+ * kbasep_platform_device_term - Platform specific call to terminate hardware
+ * @kbdev: Kbase device pointer
+ *
+ * Function calls a platform defined routine if specified in the configuration
+ * attributes. The routine can destroy any platform specific context state and
+ * shut down any hardware functionality that are outside of the Power Management
+ * callbacks.
+ *
+ */
+void kbasep_platform_device_term(struct kbase_device *kbdev);
+
+#ifndef CONFIG_OF
+/**
+ * kbase_platform_register - Register a platform device for the GPU
+ *
+ * This can be used to register a platform device on systems where device tree
+ * is not enabled and the platform initialisation code in the kernel doesn't
+ * create the GPU device. Where possible device tree should be used instead.
+ *
+ * Return: 0 for success, any other fail causes module initialisation to fail
+ */
+int kbase_platform_register(void);
+
+/**
+ * kbase_platform_unregister - Unregister a fake platform device
+ *
+ * Unregister the platform device created with kbase_platform_register()
+ */
+void kbase_platform_unregister(void);
+#endif
+
+	  /** @} *//* end group kbase_config */
+	  /** @} *//* end group base_kbase_api */
+	  /** @} *//* end group base_api */
+
+#endif				/* _KBASE_CONFIG_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
new file mode 100644
index 0000000..cfb9a41
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
@@ -0,0 +1,259 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_config_defaults.h
+ *
+ * Default values for configuration settings
+ *
+ */
+
+#ifndef _KBASE_CONFIG_DEFAULTS_H_
+#define _KBASE_CONFIG_DEFAULTS_H_
+
+/* Include mandatory definitions per platform */
+#include <mali_kbase_config_platform.h>
+
+/**
+* Boolean indicating whether the driver is configured to be secure at
+* a potential loss of performance.
+*
+* This currently affects only r0p0-15dev0 HW and earlier.
+*
+* On r0p0-15dev0 HW and earlier, there are tradeoffs between security and
+* performance:
+*
+* - When this is set to true, the driver remains fully secure,
+* but potentially loses performance compared with setting this to
+* false.
+* - When set to false, the driver is open to certain security
+* attacks.
+*
+* From r0p0-00rel0 and onwards, there is no security loss by setting
+* this to false, and no performance loss by setting it to
+* true.
+*/
+#define DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE false
+
+enum {
+	/**
+	 * Use unrestricted Address ID width on the AXI bus.
+	 */
+	KBASE_AID_32 = 0x0,
+
+	/**
+	 * Restrict GPU to a half of maximum Address ID count.
+	 * This will reduce performance, but reduce bus load due to GPU.
+	 */
+	KBASE_AID_16 = 0x3,
+
+	/**
+	 * Restrict GPU to a quarter of maximum Address ID count.
+	 * This will reduce performance, but reduce bus load due to GPU.
+	 */
+	KBASE_AID_8  = 0x2,
+
+	/**
+	 * Restrict GPU to an eighth of maximum Address ID count.
+	 * This will reduce performance, but reduce bus load due to GPU.
+	 */
+	KBASE_AID_4  = 0x1
+};
+
+enum {
+	/**
+	 * Use unrestricted Address ID width on the AXI bus.
+	 * Restricting ID width will reduce performance & bus load due to GPU.
+	 */
+	KBASE_3BIT_AID_32 = 0x0,
+
+	/* Restrict GPU to 7/8 of maximum Address ID count. */
+	KBASE_3BIT_AID_28 = 0x1,
+
+	/* Restrict GPU to 3/4 of maximum Address ID count. */
+	KBASE_3BIT_AID_24 = 0x2,
+
+	/* Restrict GPU to 5/8 of maximum Address ID count. */
+	KBASE_3BIT_AID_20 = 0x3,
+
+	/* Restrict GPU to 1/2 of maximum Address ID count.  */
+	KBASE_3BIT_AID_16 = 0x4,
+
+	/* Restrict GPU to 3/8 of maximum Address ID count. */
+	KBASE_3BIT_AID_12 = 0x5,
+
+	/* Restrict GPU to 1/4 of maximum Address ID count. */
+	KBASE_3BIT_AID_8  = 0x6,
+
+	/* Restrict GPU to 1/8 of maximum Address ID count. */
+	KBASE_3BIT_AID_4  = 0x7
+};
+
+/**
+ * Default setting for read Address ID limiting on AXI bus.
+ *
+ * Attached value: u32 register value
+ *    KBASE_AID_32 - use the full 32 IDs (5 ID bits)
+ *    KBASE_AID_16 - use 16 IDs (4 ID bits)
+ *    KBASE_AID_8  - use 8 IDs (3 ID bits)
+ *    KBASE_AID_4  - use 4 IDs (2 ID bits)
+ * Default value: KBASE_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_ARID_LIMIT KBASE_AID_32
+
+/**
+ * Default setting for write Address ID limiting on AXI.
+ *
+ * Attached value: u32 register value
+ *    KBASE_AID_32 - use the full 32 IDs (5 ID bits)
+ *    KBASE_AID_16 - use 16 IDs (4 ID bits)
+ *    KBASE_AID_8  - use 8 IDs (3 ID bits)
+ *    KBASE_AID_4  - use 4 IDs (2 ID bits)
+ * Default value: KBASE_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_AWID_LIMIT KBASE_AID_32
+
+/**
+ * Default setting for read Address ID limiting on AXI bus.
+ *
+ * Default value: KBASE_3BIT_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_3BIT_ARID_LIMIT KBASE_3BIT_AID_32
+
+/**
+ * Default setting for write Address ID limiting on AXI.
+ *
+ * Default value: KBASE_3BIT_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
+ */
+#define DEFAULT_3BIT_AWID_LIMIT KBASE_3BIT_AID_32
+
+/**
+ * Default period for DVFS sampling
+ */
+#define DEFAULT_PM_DVFS_PERIOD 100 /* 100ms */
+
+/**
+ * Power Management poweroff tick granuality. This is in nanoseconds to
+ * allow HR timer support.
+ *
+ * On each scheduling tick, the power manager core may decide to:
+ * -# Power off one or more shader cores
+ * -# Power off the entire GPU
+ */
+#define DEFAULT_PM_GPU_POWEROFF_TICK_NS (400000) /* 400us */
+
+/**
+ * Power Manager number of ticks before shader cores are powered off
+ */
+#define DEFAULT_PM_POWEROFF_TICK_SHADER (2) /* 400-800us */
+
+/**
+ * Default scheduling tick granuality
+ */
+#define DEFAULT_JS_SCHEDULING_PERIOD_NS    (100000000u) /* 100ms */
+
+/**
+ * Default minimum number of scheduling ticks before jobs are soft-stopped.
+ *
+ * This defines the time-slice for a job (which may be different from that of a
+ * context)
+ */
+#define DEFAULT_JS_SOFT_STOP_TICKS       (1) /* 100ms-200ms */
+
+/**
+ * Default minimum number of scheduling ticks before CL jobs are soft-stopped.
+ */
+#define DEFAULT_JS_SOFT_STOP_TICKS_CL    (1) /* 100ms-200ms */
+
+/**
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS    (50) /* 5s */
+#define DEFAULT_JS_HARD_STOP_TICKS_SS_8408  (300) /* 30s */
+
+/**
+ * Default minimum number of scheduling ticks before CL jobs are hard-stopped.
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_CL    (50) /* 5s */
+
+/**
+ * Default minimum number of scheduling ticks before jobs are hard-stopped
+ * during dumping
+ */
+#define DEFAULT_JS_HARD_STOP_TICKS_DUMPING   (15000) /* 1500s */
+
+/**
+ * Default timeout for some software jobs, after which the software event wait
+ * jobs will be cancelled.
+ */
+#define DEFAULT_JS_SOFT_JOB_TIMEOUT (3000) /* 3s */
+
+/**
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" job
+ */
+#define DEFAULT_JS_RESET_TICKS_SS           (55) /* 5.5s */
+#define DEFAULT_JS_RESET_TICKS_SS_8408     (450) /* 45s */
+
+/**
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" CL job.
+ */
+#define DEFAULT_JS_RESET_TICKS_CL        (55) /* 5.5s */
+
+/**
+ * Default minimum number of scheduling ticks before the GPU is reset to clear a
+ * "stuck" job during dumping.
+ */
+#define DEFAULT_JS_RESET_TICKS_DUMPING   (15020) /* 1502s */
+
+/**
+ * Default number of milliseconds given for other jobs on the GPU to be
+ * soft-stopped when the GPU needs to be reset.
+ */
+#define DEFAULT_RESET_TIMEOUT_MS (3000) /* 3s */
+
+/**
+ * Default timeslice that a context is scheduled in for, in nanoseconds.
+ *
+ * When a context has used up this amount of time across its jobs, it is
+ * scheduled out to let another run.
+ *
+ * @note the resolution is nanoseconds (ns) here, because that's the format
+ * often used by the OS.
+ */
+#define DEFAULT_JS_CTX_TIMESLICE_NS (50000000) /* 50ms */
+
+/**
+ * Maximum frequency (in kHz) that the GPU can be clocked. For some platforms
+ * this isn't available, so we simply define a dummy value here. If devfreq
+ * is enabled the value will be read from there, otherwise this should be
+ * overridden by defining GPU_FREQ_KHZ_MAX in the platform file.
+ */
+#define DEFAULT_GPU_FREQ_KHZ_MAX (5000)
+
+#endif /* _KBASE_CONFIG_DEFAULTS_H_ */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_context.c b/drivers/gpu/arm/midgard/mali_kbase_context.c
new file mode 100644
index 0000000..59609d7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_context.c
@@ -0,0 +1,333 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel context APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_dma_fence.h>
+#include <mali_kbase_ctx_sched.h>
+
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat)
+{
+	struct kbase_context *kctx;
+	int err;
+	struct page *p;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	/* zero-inited as lot of code assume it's zero'ed out on create */
+	kctx = vzalloc(sizeof(*kctx));
+
+	if (!kctx)
+		goto out;
+
+	/* creating a context is considered a disjoint event */
+	kbase_disjoint_event(kbdev);
+
+	kctx->kbdev = kbdev;
+	kctx->as_nr = KBASEP_AS_NR_INVALID;
+	atomic_set(&kctx->refcount, 0);
+	if (is_compat)
+		kbase_ctx_flag_set(kctx, KCTX_COMPAT);
+#if defined(CONFIG_64BIT)
+	else
+		kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
+#endif /* !defined(CONFIG_64BIT) */
+
+	atomic_set(&kctx->setup_complete, 0);
+	atomic_set(&kctx->setup_in_progress, 0);
+	spin_lock_init(&kctx->mm_update_lock);
+	kctx->process_mm = NULL;
+	atomic_set(&kctx->nonmapped_pages, 0);
+	kctx->slots_pullable = 0;
+	kctx->tgid = current->tgid;
+	kctx->pid = current->pid;
+
+	err = kbase_mem_pool_init(&kctx->mem_pool,
+				  kbdev->mem_pool_max_size_default,
+				  KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER,
+				  kctx->kbdev,
+				  &kbdev->mem_pool);
+	if (err)
+		goto free_kctx;
+
+	err = kbase_mem_pool_init(&kctx->lp_mem_pool,
+				  (kbdev->mem_pool_max_size_default >> 9),
+				  KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER,
+				  kctx->kbdev,
+				  &kbdev->lp_mem_pool);
+	if (err)
+		goto free_mem_pool;
+
+	err = kbase_mem_evictable_init(kctx);
+	if (err)
+		goto free_both_pools;
+
+	atomic_set(&kctx->used_pages, 0);
+
+	err = kbase_jd_init(kctx);
+	if (err)
+		goto deinit_evictable;
+
+	err = kbasep_js_kctx_init(kctx);
+	if (err)
+		goto free_jd;	/* safe to call kbasep_js_kctx_term  in this case */
+
+	err = kbase_event_init(kctx);
+	if (err)
+		goto free_jd;
+
+
+	atomic_set(&kctx->drain_pending, 0);
+
+	mutex_init(&kctx->reg_lock);
+
+	spin_lock_init(&kctx->mem_partials_lock);
+	INIT_LIST_HEAD(&kctx->mem_partials);
+
+	INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
+	spin_lock_init(&kctx->waiting_soft_jobs_lock);
+	err = kbase_dma_fence_init(kctx);
+	if (err)
+		goto free_kcpu_wq;
+
+	err = kbase_mmu_init(kbdev, &kctx->mmu, kctx);
+	if (err)
+		goto term_dma_fence;
+
+	p = kbase_mem_alloc_page(&kctx->mem_pool);
+	if (!p)
+		goto no_sink_page;
+	kctx->aliasing_sink_page = as_tagged(page_to_phys(p));
+
+	init_waitqueue_head(&kctx->event_queue);
+
+	kctx->cookies = KBASE_COOKIE_MASK;
+
+
+	/* Make sure page 0 is not used... */
+	err = kbase_region_tracker_init(kctx);
+	if (err)
+		goto no_region_tracker;
+
+	err = kbase_sticky_resource_init(kctx);
+	if (err)
+		goto no_sticky;
+
+	err = kbase_jit_init(kctx);
+	if (err)
+		goto no_jit;
+#ifdef CONFIG_GPU_TRACEPOINTS
+	atomic_set(&kctx->jctx.work_id, 0);
+#endif
+
+	kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
+
+	mutex_init(&kctx->legacy_hwcnt_lock);
+
+	kbase_timer_setup(&kctx->soft_job_timeout,
+			  kbasep_soft_job_timeout_worker);
+
+	return kctx;
+
+no_jit:
+	kbase_gpu_vm_lock(kctx);
+	kbase_sticky_resource_term(kctx);
+	kbase_gpu_vm_unlock(kctx);
+no_sticky:
+	kbase_region_tracker_term(kctx);
+no_region_tracker:
+	kbase_mem_pool_free(&kctx->mem_pool, p, false);
+no_sink_page:
+	kbase_mmu_term(kbdev, &kctx->mmu);
+term_dma_fence:
+	kbase_dma_fence_term(kctx);
+free_kcpu_wq:
+	kbase_event_cleanup(kctx);
+free_jd:
+	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+	kbasep_js_kctx_term(kctx);
+	kbase_jd_exit(kctx);
+deinit_evictable:
+	kbase_mem_evictable_deinit(kctx);
+free_both_pools:
+	kbase_mem_pool_term(&kctx->lp_mem_pool);
+free_mem_pool:
+	kbase_mem_pool_term(&kctx->mem_pool);
+free_kctx:
+	vfree(kctx);
+out:
+	return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_create_context);
+
+static void kbase_reg_pending_dtor(struct kbase_device *kbdev,
+		struct kbase_va_region *reg)
+{
+	dev_dbg(kbdev->dev, "Freeing pending unmapped region\n");
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+	kfree(reg);
+}
+
+void kbase_destroy_context(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev;
+	int pages;
+	unsigned long pending_regions_to_clean;
+	unsigned long flags;
+	struct page *p;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+	KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
+
+	/* Ensure the core is powered up for the destroy process */
+	/* A suspend won't happen here, because we're in a syscall from a userspace
+	 * thread. */
+	kbase_pm_context_active(kbdev);
+
+	kbase_mem_pool_mark_dying(&kctx->mem_pool);
+
+	kbase_jd_zap_context(kctx);
+
+	/* We have already waited for the jobs to complete (and hereafter there
+	 * can be no more submissions for the context). However the wait could
+	 * have timedout and there could still be work items in flight that
+	 * would do the completion processing of jobs.
+	 * kbase_jd_exit() will destroy the 'job_done_wq'. And destroying the wq
+	 * will cause it do drain and implicitly wait for those work items to
+	 * complete.
+	 */
+	kbase_jd_exit(kctx);
+
+#ifdef CONFIG_DEBUG_FS
+	/* Removing the rest of the debugfs entries here as we want to keep the
+	 * atom debugfs interface alive until all atoms have completed. This
+	 * is useful for debugging hung contexts. */
+	debugfs_remove_recursive(kctx->kctx_dentry);
+	kbase_debug_job_fault_context_term(kctx);
+#endif
+
+	kbase_event_cleanup(kctx);
+
+
+	/*
+	 * JIT must be terminated before the code below as it must be called
+	 * without the region lock being held.
+	 * The code above ensures no new JIT allocations can be made by
+	 * by the time we get to this point of context tear down.
+	 */
+	kbase_jit_term(kctx);
+
+	kbase_gpu_vm_lock(kctx);
+
+	kbase_sticky_resource_term(kctx);
+
+	/* drop the aliasing sink page now that it can't be mapped anymore */
+	p = as_page(kctx->aliasing_sink_page);
+	kbase_mem_pool_free(&kctx->mem_pool, p, false);
+
+	/* free pending region setups */
+	pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
+	while (pending_regions_to_clean) {
+		unsigned int cookie = __ffs(pending_regions_to_clean);
+
+		BUG_ON(!kctx->pending_regions[cookie]);
+
+		kbase_reg_pending_dtor(kbdev, kctx->pending_regions[cookie]);
+
+		kctx->pending_regions[cookie] = NULL;
+		pending_regions_to_clean &= ~(1UL << cookie);
+	}
+
+	kbase_region_tracker_term(kctx);
+	kbase_gpu_vm_unlock(kctx);
+
+
+	/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
+	kbasep_js_kctx_term(kctx);
+
+	kbase_dma_fence_term(kctx);
+
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
+	kbase_ctx_sched_remove_ctx(kctx);
+	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	kbase_mmu_term(kbdev, &kctx->mmu);
+
+	pages = atomic_read(&kctx->used_pages);
+	if (pages != 0)
+		dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
+
+	kbase_mem_evictable_deinit(kctx);
+	kbase_mem_pool_term(&kctx->mem_pool);
+	kbase_mem_pool_term(&kctx->lp_mem_pool);
+	WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
+
+	vfree(kctx);
+
+	kbase_pm_context_idle(kbdev);
+}
+KBASE_EXPORT_SYMBOL(kbase_destroy_context);
+
+int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
+{
+	int err = 0;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	unsigned long irq_flags;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* Validate flags */
+	if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+
+	/* Translate the flags */
+	if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
+		kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
+
+	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+ out:
+	return err;
+}
+KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_context.h b/drivers/gpu/arm/midgard/mali_kbase_context.h
new file mode 100644
index 0000000..30b0f64
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_context.h
@@ -0,0 +1,123 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_CONTEXT_H_
+#define _KBASE_CONTEXT_H_
+
+#include <linux/atomic.h>
+
+/**
+ * kbase_create_context() - Create a kernel base context.
+ * @kbdev: Kbase device
+ * @is_compat: Force creation of a 32-bit context
+ *
+ * Allocate and init a kernel base context.
+ *
+ * Return: new kbase context
+ */
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat);
+
+/**
+ * kbase_destroy_context - Destroy a kernel base context.
+ * @kctx: Context to destroy
+ *
+ * Calls kbase_destroy_os_context() to free OS specific structures.
+ * Will release all outstanding regions.
+ */
+void kbase_destroy_context(struct kbase_context *kctx);
+
+/**
+ * kbase_context_set_create_flags - Set creation flags on a context
+ * @kctx: Kbase context
+ * @flags: Flags to set, which shall be one of the flags of
+ *         BASE_CONTEXT_CREATE_KERNEL_FLAGS.
+ *
+ * Return: 0 on success, -EINVAL otherwise when an invalid flag is specified.
+ */
+int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags);
+
+/**
+ * kbase_ctx_flag - Check if @flag is set on @kctx
+ * @kctx: Pointer to kbase context to check
+ * @flag: Flag to check
+ *
+ * Return: true if @flag is set on @kctx, false if not.
+ */
+static inline bool kbase_ctx_flag(struct kbase_context *kctx,
+				      enum kbase_context_flags flag)
+{
+	return atomic_read(&kctx->flags) & flag;
+}
+
+/**
+ * kbase_ctx_flag_clear - Clear @flag on @kctx
+ * @kctx: Pointer to kbase context
+ * @flag: Flag to clear
+ *
+ * Clear the @flag on @kctx. This is done atomically, so other flags being
+ * cleared or set at the same time will be safe.
+ *
+ * Some flags have locking requirements, check the documentation for the
+ * respective flags.
+ */
+static inline void kbase_ctx_flag_clear(struct kbase_context *kctx,
+					enum kbase_context_flags flag)
+{
+#if KERNEL_VERSION(4, 3, 0) > LINUX_VERSION_CODE
+	/*
+	 * Earlier kernel versions doesn't have atomic_andnot() or
+	 * atomic_and(). atomic_clear_mask() was only available on some
+	 * architectures and removed on arm in v3.13 on arm and arm64.
+	 *
+	 * Use a compare-exchange loop to clear the flag on pre 4.3 kernels,
+	 * when atomic_andnot() becomes available.
+	 */
+	int old, new;
+
+	do {
+		old = atomic_read(&kctx->flags);
+		new = old & ~flag;
+
+	} while (atomic_cmpxchg(&kctx->flags, old, new) != old);
+#else
+	atomic_andnot(flag, &kctx->flags);
+#endif
+}
+
+/**
+ * kbase_ctx_flag_set - Set @flag on @kctx
+ * @kctx: Pointer to kbase context
+ * @flag: Flag to clear
+ *
+ * Set the @flag on @kctx. This is done atomically, so other flags being
+ * cleared or set at the same time will be safe.
+ *
+ * Some flags have locking requirements, check the documentation for the
+ * respective flags.
+ */
+static inline void kbase_ctx_flag_set(struct kbase_context *kctx,
+				      enum kbase_context_flags flag)
+{
+	atomic_or(flag, &kctx->flags);
+}
+#endif /* _KBASE_CONTEXT_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_core_linux.c b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
new file mode 100644
index 0000000..3f65b5f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
@@ -0,0 +1,4340 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_gator.h>
+#include <mali_kbase_mem_linux.h>
+#ifdef CONFIG_MALI_DEVFREQ
+#include <linux/devfreq.h>
+#include <backend/gpu/mali_kbase_devfreq.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <ipa/mali_kbase_ipa_debugfs.h>
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* CONFIG_MALI_DEVFREQ */
+#ifdef CONFIG_MALI_NO_MALI
+#include "mali_kbase_model_linux.h"
+#include <backend/gpu/mali_kbase_model_dummy.h>
+#endif /* CONFIG_MALI_NO_MALI */
+#include "mali_kbase_mem_profile_debugfs_buf_size.h"
+#include "mali_kbase_debug_mem_view.h"
+#include "mali_kbase_mem.h"
+#include "mali_kbase_mem_pool_debugfs.h"
+#if !MALI_CUSTOMER_RELEASE
+#include "mali_kbase_regs_dump_debugfs.h"
+#endif /* !MALI_CUSTOMER_RELEASE */
+#include "mali_kbase_regs_history_debugfs.h"
+#include <mali_kbase_hwaccess_backend.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_ctx_sched.h>
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <backend/gpu/mali_kbase_pm_internal.h>
+#include "mali_kbase_ioctl.h"
+#include "mali_kbase_hwcnt_context.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_legacy.h"
+#include "mali_kbase_vinstr.h"
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+#include "mali_kbase_gwt.h"
+#endif
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/compat.h>	/* is_compat_task/in_compat_syscall */
+#include <linux/mman.h>
+#include <linux/version.h>
+#include <mali_kbase_hw.h>
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <mali_kbase_sync.h>
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+
+#include <mali_kbase_config.h>
+
+
+#if (KERNEL_VERSION(3, 13, 0) <= LINUX_VERSION_CODE)
+#include <linux/pm_opp.h>
+#else
+#include <linux/opp.h>
+#endif
+
+#include <mali_kbase_tlstream.h>
+
+#include <mali_kbase_as_fault_debugfs.h>
+
+/* GPU IRQ Tags */
+#define	JOB_IRQ_TAG	0
+#define MMU_IRQ_TAG	1
+#define GPU_IRQ_TAG	2
+
+static int kbase_dev_nr;
+
+static DEFINE_MUTEX(kbase_dev_list_lock);
+static LIST_HEAD(kbase_dev_list);
+
+#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
+
+static int kbase_api_handshake(struct kbase_context *kctx,
+			       struct kbase_ioctl_version_check *version)
+{
+	switch (version->major) {
+	case BASE_UK_VERSION_MAJOR:
+		/* set minor to be the lowest common */
+		version->minor = min_t(int, BASE_UK_VERSION_MINOR,
+				       (int)version->minor);
+		break;
+	default:
+		/* We return our actual version regardless if it
+		 * matches the version returned by userspace -
+		 * userspace can bail if it can't handle this
+		 * version
+		 */
+		version->major = BASE_UK_VERSION_MAJOR;
+		version->minor = BASE_UK_VERSION_MINOR;
+		break;
+	}
+
+	/* save the proposed version number for later use */
+	kctx->api_version = KBASE_API_VERSION(version->major, version->minor);
+
+	return 0;
+}
+
+/**
+ * enum mali_error - Mali error codes shared with userspace
+ *
+ * This is subset of those common Mali errors that can be returned to userspace.
+ * Values of matching user and kernel space enumerators MUST be the same.
+ * MALI_ERROR_NONE is guaranteed to be 0.
+ *
+ * @MALI_ERROR_NONE: Success
+ * @MALI_ERROR_OUT_OF_GPU_MEMORY: Not used in the kernel driver
+ * @MALI_ERROR_OUT_OF_MEMORY: Memory allocation failure
+ * @MALI_ERROR_FUNCTION_FAILED: Generic error code
+ */
+enum mali_error {
+	MALI_ERROR_NONE = 0,
+	MALI_ERROR_OUT_OF_GPU_MEMORY,
+	MALI_ERROR_OUT_OF_MEMORY,
+	MALI_ERROR_FUNCTION_FAILED,
+};
+
+enum {
+	inited_mem = (1u << 0),
+	inited_js = (1u << 1),
+	/* Bit number 2 was earlier assigned to the runtime-pm initialization
+	 * stage (which has been merged with the backend_early stage).
+	 */
+#ifdef CONFIG_MALI_DEVFREQ
+	inited_devfreq = (1u << 3),
+#endif /* CONFIG_MALI_DEVFREQ */
+	inited_tlstream = (1u << 4),
+	inited_backend_early = (1u << 5),
+	inited_hwcnt_gpu_iface = (1u << 6),
+	inited_hwcnt_gpu_ctx = (1u << 7),
+	inited_hwcnt_gpu_virt = (1u << 8),
+	inited_vinstr = (1u << 9),
+	inited_backend_late = (1u << 10),
+	inited_device = (1u << 11),
+	inited_job_fault = (1u << 13),
+	inited_sysfs_group = (1u << 14),
+	inited_misc_register = (1u << 15),
+	inited_get_device = (1u << 16),
+	inited_dev_list = (1u << 17),
+	inited_debugfs = (1u << 18),
+	inited_gpu_device = (1u << 19),
+	inited_registers_map = (1u << 20),
+	inited_io_history = (1u << 21),
+	inited_power_control = (1u << 22),
+	inited_buslogger = (1u << 23),
+	inited_protected = (1u << 24),
+	inited_ctx_sched = (1u << 25)
+};
+
+static struct kbase_device *to_kbase_device(struct device *dev)
+{
+	return dev_get_drvdata(dev);
+}
+
+static int assign_irqs(struct platform_device *pdev)
+{
+	struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+	int i;
+
+	if (!kbdev)
+		return -ENODEV;
+
+	/* 3 IRQ resources */
+	for (i = 0; i < 3; i++) {
+		struct resource *irq_res;
+		int irqtag;
+
+		irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+		if (!irq_res) {
+			dev_err(kbdev->dev, "No IRQ resource at index %d\n", i);
+			return -ENOENT;
+		}
+
+#ifdef CONFIG_OF
+		if (!strncmp(irq_res->name, "job", 4)) {
+			irqtag = JOB_IRQ_TAG;
+		} else if (!strncmp(irq_res->name, "mmu", 4)) {
+			irqtag = MMU_IRQ_TAG;
+		} else if (!strncmp(irq_res->name, "gpu", 4)) {
+			irqtag = GPU_IRQ_TAG;
+		} else {
+			dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
+				irq_res->name);
+			return -EINVAL;
+		}
+#else
+		irqtag = i;
+#endif /* CONFIG_OF */
+		kbdev->irqs[irqtag].irq = irq_res->start;
+		kbdev->irqs[irqtag].flags = irq_res->flags & IRQF_TRIGGER_MASK;
+	}
+
+	return 0;
+}
+
+/*
+ * API to acquire device list mutex and
+ * return pointer to the device list head
+ */
+const struct list_head *kbase_dev_list_get(void)
+{
+	mutex_lock(&kbase_dev_list_lock);
+	return &kbase_dev_list;
+}
+KBASE_EXPORT_TEST_API(kbase_dev_list_get);
+
+/* API to release the device list mutex */
+void kbase_dev_list_put(const struct list_head *dev_list)
+{
+	mutex_unlock(&kbase_dev_list_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_dev_list_put);
+
+/* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
+struct kbase_device *kbase_find_device(int minor)
+{
+	struct kbase_device *kbdev = NULL;
+	struct list_head *entry;
+	const struct list_head *dev_list = kbase_dev_list_get();
+
+	list_for_each(entry, dev_list) {
+		struct kbase_device *tmp;
+
+		tmp = list_entry(entry, struct kbase_device, entry);
+		if (tmp->mdev.minor == minor || minor == -1) {
+			kbdev = tmp;
+			get_device(kbdev->dev);
+			break;
+		}
+	}
+	kbase_dev_list_put(dev_list);
+
+	return kbdev;
+}
+EXPORT_SYMBOL(kbase_find_device);
+
+void kbase_release_device(struct kbase_device *kbdev)
+{
+	put_device(kbdev->dev);
+}
+EXPORT_SYMBOL(kbase_release_device);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && \
+		!(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 28) && \
+		LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+/*
+ * Older versions, before v4.6, of the kernel doesn't have
+ * kstrtobool_from_user(), except longterm 4.4.y which had it added in 4.4.28
+ */
+static int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
+{
+	char buf[4];
+
+	count = min(count, sizeof(buf) - 1);
+
+	if (copy_from_user(buf, s, count))
+		return -EFAULT;
+	buf[count] = '\0';
+
+	return strtobool(buf, res);
+}
+#endif
+
+static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off)
+{
+	struct kbase_context *kctx = f->private_data;
+	int err;
+	bool value;
+
+	err = kstrtobool_from_user(ubuf, size, &value);
+	if (err)
+		return err;
+
+	if (value)
+		kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
+	else
+		kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
+
+	return size;
+}
+
+static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off)
+{
+	struct kbase_context *kctx = f->private_data;
+	char buf[32];
+	int count;
+	bool value;
+
+	value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
+
+	count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
+
+	return simple_read_from_buffer(ubuf, size, off, buf, count);
+}
+
+static const struct file_operations kbase_infinite_cache_fops = {
+	.open = simple_open,
+	.write = write_ctx_infinite_cache,
+	.read = read_ctx_infinite_cache,
+};
+
+static ssize_t write_ctx_force_same_va(struct file *f, const char __user *ubuf,
+		size_t size, loff_t *off)
+{
+	struct kbase_context *kctx = f->private_data;
+	int err;
+	bool value;
+
+	err = kstrtobool_from_user(ubuf, size, &value);
+	if (err)
+		return err;
+
+	if (value) {
+#if defined(CONFIG_64BIT)
+		/* 32-bit clients cannot force SAME_VA */
+		if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+			return -EINVAL;
+		kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
+#else /* defined(CONFIG_64BIT) */
+		/* 32-bit clients cannot force SAME_VA */
+		return -EINVAL;
+#endif /* defined(CONFIG_64BIT) */
+	} else {
+		kbase_ctx_flag_clear(kctx, KCTX_FORCE_SAME_VA);
+	}
+
+	return size;
+}
+
+static ssize_t read_ctx_force_same_va(struct file *f, char __user *ubuf,
+		size_t size, loff_t *off)
+{
+	struct kbase_context *kctx = f->private_data;
+	char buf[32];
+	int count;
+	bool value;
+
+	value = kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA);
+
+	count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
+
+	return simple_read_from_buffer(ubuf, size, off, buf, count);
+}
+
+static const struct file_operations kbase_force_same_va_fops = {
+	.open = simple_open,
+	.write = write_ctx_force_same_va,
+	.read = read_ctx_force_same_va,
+};
+
+static int kbase_open(struct inode *inode, struct file *filp)
+{
+	struct kbase_device *kbdev = NULL;
+	struct kbase_context *kctx;
+	int ret = 0;
+#ifdef CONFIG_DEBUG_FS
+	char kctx_name[64];
+#endif
+
+	kbdev = kbase_find_device(iminor(inode));
+
+	if (!kbdev)
+		return -ENODEV;
+
+#if (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE)
+	kctx = kbase_create_context(kbdev, in_compat_syscall());
+#else
+	kctx = kbase_create_context(kbdev, is_compat_task());
+#endif /* (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE) */
+	if (!kctx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	init_waitqueue_head(&kctx->event_queue);
+	filp->private_data = kctx;
+	filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+	kctx->filp = filp;
+
+	if (kbdev->infinite_cache_active_default)
+		kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
+
+#ifdef CONFIG_DEBUG_FS
+	snprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id);
+
+	kctx->kctx_dentry = debugfs_create_dir(kctx_name,
+			kbdev->debugfs_ctx_directory);
+
+	if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
+			kctx, &kbase_infinite_cache_fops);
+	debugfs_create_file("force_same_va", S_IRUSR | S_IWUSR,
+			kctx->kctx_dentry, kctx, &kbase_force_same_va_fops);
+
+	mutex_init(&kctx->mem_profile_lock);
+
+	kbasep_jd_debugfs_ctx_init(kctx);
+	kbase_debug_mem_view_init(filp);
+
+	kbase_debug_job_fault_context_init(kctx);
+
+	kbase_mem_pool_debugfs_init(kctx->kctx_dentry, &kctx->mem_pool, &kctx->lp_mem_pool);
+
+	kbase_jit_debugfs_init(kctx);
+#endif /* CONFIG_DEBUG_FS */
+
+	dev_dbg(kbdev->dev, "created base context\n");
+
+	{
+		struct kbasep_kctx_list_element *element;
+
+		element = kzalloc(sizeof(*element), GFP_KERNEL);
+		if (element) {
+			mutex_lock(&kbdev->kctx_list_lock);
+			element->kctx = kctx;
+			list_add(&element->link, &kbdev->kctx_list);
+			KBASE_TLSTREAM_TL_NEW_CTX(
+					element->kctx,
+					element->kctx->id,
+					(u32)(element->kctx->tgid));
+			mutex_unlock(&kbdev->kctx_list_lock);
+		} else {
+			/* we don't treat this as a fail - just warn about it */
+			dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
+		}
+	}
+	return 0;
+
+ out:
+	kbase_release_device(kbdev);
+	return ret;
+}
+
+static int kbase_release(struct inode *inode, struct file *filp)
+{
+	struct kbase_context *kctx = filp->private_data;
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct kbasep_kctx_list_element *element, *tmp;
+	bool found_element = false;
+
+	KBASE_TLSTREAM_TL_DEL_CTX(kctx);
+
+#ifdef CONFIG_DEBUG_FS
+	kbasep_mem_profile_debugfs_remove(kctx);
+#endif
+
+	mutex_lock(&kbdev->kctx_list_lock);
+	list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
+		if (element->kctx == kctx) {
+			list_del(&element->link);
+			kfree(element);
+			found_element = true;
+		}
+	}
+	mutex_unlock(&kbdev->kctx_list_lock);
+	if (!found_element)
+		dev_warn(kbdev->dev, "kctx not in kctx_list\n");
+
+	filp->private_data = NULL;
+
+	mutex_lock(&kctx->legacy_hwcnt_lock);
+	/* If this client was performing hwcnt dumping and did not explicitly
+	 * detach itself, destroy it now
+	 */
+	kbase_hwcnt_legacy_client_destroy(kctx->legacy_hwcnt_cli);
+	kctx->legacy_hwcnt_cli = NULL;
+	mutex_unlock(&kctx->legacy_hwcnt_lock);
+
+	kbase_destroy_context(kctx);
+
+	dev_dbg(kbdev->dev, "deleted base context\n");
+	kbase_release_device(kbdev);
+	return 0;
+}
+
+static int kbase_api_set_flags(struct kbase_context *kctx,
+		struct kbase_ioctl_set_flags *flags)
+{
+	int err;
+
+	/* setup pending, try to signal that we'll do the setup,
+	 * if setup was already in progress, err this call
+	 */
+	if (atomic_cmpxchg(&kctx->setup_in_progress, 0, 1) != 0)
+		return -EINVAL;
+
+	err = kbase_context_set_create_flags(kctx, flags->create_flags);
+	/* if bad flags, will stay stuck in setup mode */
+	if (err)
+		return err;
+
+	atomic_set(&kctx->setup_complete, 1);
+	return 0;
+}
+
+static int kbase_api_job_submit(struct kbase_context *kctx,
+		struct kbase_ioctl_job_submit *submit)
+{
+	return kbase_jd_submit(kctx, u64_to_user_ptr(submit->addr),
+			submit->nr_atoms,
+			submit->stride, false);
+}
+
+static int kbase_api_get_gpuprops(struct kbase_context *kctx,
+		struct kbase_ioctl_get_gpuprops *get_props)
+{
+	struct kbase_gpu_props *kprops = &kctx->kbdev->gpu_props;
+	int err;
+
+	if (get_props->flags != 0) {
+		dev_err(kctx->kbdev->dev, "Unsupported flags to get_gpuprops");
+		return -EINVAL;
+	}
+
+	if (get_props->size == 0)
+		return kprops->prop_buffer_size;
+	if (get_props->size < kprops->prop_buffer_size)
+		return -EINVAL;
+
+	err = copy_to_user(u64_to_user_ptr(get_props->buffer),
+			kprops->prop_buffer,
+			kprops->prop_buffer_size);
+	if (err)
+		return -EFAULT;
+	return kprops->prop_buffer_size;
+}
+
+static int kbase_api_post_term(struct kbase_context *kctx)
+{
+	kbase_event_close(kctx);
+	return 0;
+}
+
+static int kbase_api_mem_alloc(struct kbase_context *kctx,
+		union kbase_ioctl_mem_alloc *alloc)
+{
+	struct kbase_va_region *reg;
+	u64 flags = alloc->in.flags;
+	u64 gpu_va;
+
+	rcu_read_lock();
+	/* Don't allow memory allocation until user space has set up the
+	 * tracking page (which sets kctx->process_mm). Also catches when we've
+	 * forked.
+	 */
+	if (rcu_dereference(kctx->process_mm) != current->mm) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	rcu_read_unlock();
+
+	if (flags & BASE_MEM_FLAGS_KERNEL_ONLY)
+		return -ENOMEM;
+
+	/* Force SAME_VA if a 64-bit client.
+	 * The only exception is GPU-executable memory if an EXEC_VA zone
+	 * has been initialized. In that case, GPU-executable memory may
+	 * or may not be SAME_VA.
+	 */
+	if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
+			kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
+		if (!(flags & BASE_MEM_PROT_GPU_EX) || !kbase_has_exec_va_zone(kctx))
+			flags |= BASE_MEM_SAME_VA;
+	}
+
+
+	reg = kbase_mem_alloc(kctx, alloc->in.va_pages,
+			alloc->in.commit_pages,
+			alloc->in.extent,
+			&flags, &gpu_va);
+
+	if (!reg)
+		return -ENOMEM;
+
+	alloc->out.flags = flags;
+	alloc->out.gpu_va = gpu_va;
+
+	return 0;
+}
+
+static int kbase_api_mem_query(struct kbase_context *kctx,
+		union kbase_ioctl_mem_query *query)
+{
+	return kbase_mem_query(kctx, query->in.gpu_addr,
+			query->in.query, &query->out.value);
+}
+
+static int kbase_api_mem_free(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_free *free)
+{
+	return kbase_mem_free(kctx, free->gpu_addr);
+}
+
+static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx,
+		struct kbase_ioctl_hwcnt_reader_setup *setup)
+{
+	return kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, setup);
+}
+
+static int kbase_api_hwcnt_enable(struct kbase_context *kctx,
+		struct kbase_ioctl_hwcnt_enable *enable)
+{
+	int ret;
+
+	mutex_lock(&kctx->legacy_hwcnt_lock);
+	if (enable->dump_buffer != 0) {
+		/* Non-zero dump buffer, so user wants to create the client */
+		if (kctx->legacy_hwcnt_cli == NULL) {
+			ret = kbase_hwcnt_legacy_client_create(
+				kctx->kbdev->hwcnt_gpu_virt,
+				enable,
+				&kctx->legacy_hwcnt_cli);
+		} else {
+			/* This context already has a client */
+			ret = -EBUSY;
+		}
+	} else {
+		/* Zero dump buffer, so user wants to destroy the client */
+		if (kctx->legacy_hwcnt_cli != NULL) {
+			kbase_hwcnt_legacy_client_destroy(
+				kctx->legacy_hwcnt_cli);
+			kctx->legacy_hwcnt_cli = NULL;
+			ret = 0;
+		} else {
+			/* This context has no client to destroy */
+			ret = -EINVAL;
+		}
+	}
+	mutex_unlock(&kctx->legacy_hwcnt_lock);
+
+	return ret;
+}
+
+static int kbase_api_hwcnt_dump(struct kbase_context *kctx)
+{
+	int ret;
+
+	mutex_lock(&kctx->legacy_hwcnt_lock);
+	ret = kbase_hwcnt_legacy_client_dump(kctx->legacy_hwcnt_cli);
+	mutex_unlock(&kctx->legacy_hwcnt_lock);
+
+	return ret;
+}
+
+static int kbase_api_hwcnt_clear(struct kbase_context *kctx)
+{
+	int ret;
+
+	mutex_lock(&kctx->legacy_hwcnt_lock);
+	ret = kbase_hwcnt_legacy_client_clear(kctx->legacy_hwcnt_cli);
+	mutex_unlock(&kctx->legacy_hwcnt_lock);
+
+	return ret;
+}
+
+#ifdef CONFIG_MALI_NO_MALI
+static int kbase_api_hwcnt_set(struct kbase_context *kctx,
+		struct kbase_ioctl_hwcnt_values *values)
+{
+	gpu_model_set_dummy_prfcnt_sample(
+			(u32 __user *)(uintptr_t)values->data,
+			values->size);
+
+	return 0;
+}
+#endif
+
+static int kbase_api_disjoint_query(struct kbase_context *kctx,
+		struct kbase_ioctl_disjoint_query *query)
+{
+	query->counter = kbase_disjoint_event_get(kctx->kbdev);
+
+	return 0;
+}
+
+static int kbase_api_get_ddk_version(struct kbase_context *kctx,
+		struct kbase_ioctl_get_ddk_version *version)
+{
+	int ret;
+	int len = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
+
+	if (version->version_buffer == 0)
+		return len;
+
+	if (version->size < len)
+		return -EOVERFLOW;
+
+	ret = copy_to_user(u64_to_user_ptr(version->version_buffer),
+			KERNEL_SIDE_DDK_VERSION_STRING,
+			sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
+
+	if (ret)
+		return -EFAULT;
+
+	return len;
+}
+
+/* Defaults for legacy JIT init ioctl */
+#define DEFAULT_MAX_JIT_ALLOCATIONS 255
+#define JIT_LEGACY_TRIM_LEVEL (0) /* No trimming */
+
+static int kbase_api_mem_jit_init_old(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_jit_init_old *jit_init)
+{
+	kctx->jit_version = 1;
+
+	return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
+			DEFAULT_MAX_JIT_ALLOCATIONS,
+			JIT_LEGACY_TRIM_LEVEL);
+}
+
+static int kbase_api_mem_jit_init(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_jit_init *jit_init)
+{
+	int i;
+
+	kctx->jit_version = 2;
+
+	for (i = 0; i < sizeof(jit_init->padding); i++) {
+		/* Ensure all padding bytes are 0 for potential future
+		 * extension
+		 */
+		if (jit_init->padding[i])
+			return -EINVAL;
+	}
+
+	return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
+			jit_init->max_allocations, jit_init->trim_level);
+}
+
+static int kbase_api_mem_exec_init(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_exec_init *exec_init)
+{
+	return kbase_region_tracker_init_exec(kctx, exec_init->va_pages);
+}
+
+static int kbase_api_mem_sync(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_sync *sync)
+{
+	struct basep_syncset sset = {
+		.mem_handle.basep.handle = sync->handle,
+		.user_addr = sync->user_addr,
+		.size = sync->size,
+		.type = sync->type
+	};
+
+	return kbase_sync_now(kctx, &sset);
+}
+
+static int kbase_api_mem_find_cpu_offset(struct kbase_context *kctx,
+		union kbase_ioctl_mem_find_cpu_offset *find)
+{
+	return kbasep_find_enclosing_cpu_mapping_offset(
+			kctx,
+			find->in.cpu_addr,
+			find->in.size,
+			&find->out.offset);
+}
+
+static int kbase_api_mem_find_gpu_start_and_offset(struct kbase_context *kctx,
+		union kbase_ioctl_mem_find_gpu_start_and_offset *find)
+{
+	return kbasep_find_enclosing_gpu_mapping_start_and_offset(
+			kctx,
+			find->in.gpu_addr,
+			find->in.size,
+			&find->out.start,
+			&find->out.offset);
+}
+
+static int kbase_api_get_context_id(struct kbase_context *kctx,
+		struct kbase_ioctl_get_context_id *info)
+{
+	info->id = kctx->id;
+
+	return 0;
+}
+
+static int kbase_api_tlstream_acquire(struct kbase_context *kctx,
+		struct kbase_ioctl_tlstream_acquire *acquire)
+{
+	return kbase_tlstream_acquire(kctx, acquire->flags);
+}
+
+static int kbase_api_tlstream_flush(struct kbase_context *kctx)
+{
+	kbase_tlstream_flush_streams();
+
+	return 0;
+}
+
+static int kbase_api_mem_commit(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_commit *commit)
+{
+	return kbase_mem_commit(kctx, commit->gpu_addr, commit->pages);
+}
+
+static int kbase_api_mem_alias(struct kbase_context *kctx,
+		union kbase_ioctl_mem_alias *alias)
+{
+	struct base_mem_aliasing_info *ai;
+	u64 flags;
+	int err;
+
+	if (alias->in.nents == 0 || alias->in.nents > 2048)
+		return -EINVAL;
+
+	if (alias->in.stride > (U64_MAX / 2048))
+		return -EINVAL;
+
+	ai = vmalloc(sizeof(*ai) * alias->in.nents);
+	if (!ai)
+		return -ENOMEM;
+
+	err = copy_from_user(ai,
+			u64_to_user_ptr(alias->in.aliasing_info),
+			sizeof(*ai) * alias->in.nents);
+	if (err) {
+		vfree(ai);
+		return -EFAULT;
+	}
+
+	flags = alias->in.flags;
+	if (flags & BASE_MEM_FLAGS_KERNEL_ONLY) {
+		vfree(ai);
+		return -EINVAL;
+	}
+
+	alias->out.gpu_va = kbase_mem_alias(kctx, &flags,
+			alias->in.stride, alias->in.nents,
+			ai, &alias->out.va_pages);
+
+	alias->out.flags = flags;
+
+	vfree(ai);
+
+	if (alias->out.gpu_va == 0)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int kbase_api_mem_import(struct kbase_context *kctx,
+		union kbase_ioctl_mem_import *import)
+{
+	int ret;
+	u64 flags = import->in.flags;
+
+	if (flags & BASE_MEM_FLAGS_KERNEL_ONLY)
+		return -ENOMEM;
+
+	ret = kbase_mem_import(kctx,
+			import->in.type,
+			u64_to_user_ptr(import->in.phandle),
+			import->in.padding,
+			&import->out.gpu_va,
+			&import->out.va_pages,
+			&flags);
+
+	import->out.flags = flags;
+
+	return ret;
+}
+
+static int kbase_api_mem_flags_change(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_flags_change *change)
+{
+	if (change->flags & BASE_MEM_FLAGS_KERNEL_ONLY)
+		return -ENOMEM;
+
+	return kbase_mem_flags_change(kctx, change->gpu_va,
+			change->flags, change->mask);
+}
+
+static int kbase_api_stream_create(struct kbase_context *kctx,
+		struct kbase_ioctl_stream_create *stream)
+{
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	int fd, ret;
+
+	/* Name must be NULL-terminated and padded with NULLs, so check last
+	 * character is NULL
+	 */
+	if (stream->name[sizeof(stream->name)-1] != 0)
+		return -EINVAL;
+
+	ret = kbase_sync_fence_stream_create(stream->name, &fd);
+
+	if (ret)
+		return ret;
+	return fd;
+#else
+	return -ENOENT;
+#endif
+}
+
+static int kbase_api_fence_validate(struct kbase_context *kctx,
+		struct kbase_ioctl_fence_validate *validate)
+{
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	return kbase_sync_fence_validate(validate->fd);
+#else
+	return -ENOENT;
+#endif
+}
+
+static int kbase_api_mem_profile_add(struct kbase_context *kctx,
+		struct kbase_ioctl_mem_profile_add *data)
+{
+	char *buf;
+	int err;
+
+	if (data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
+		dev_err(kctx->kbdev->dev, "mem_profile_add: buffer too big\n");
+		return -EINVAL;
+	}
+
+	buf = kmalloc(data->len, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf))
+		return -ENOMEM;
+
+	err = copy_from_user(buf, u64_to_user_ptr(data->buffer),
+			data->len);
+	if (err) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	return kbasep_mem_profile_debugfs_insert(kctx, buf, data->len);
+}
+
+static int kbase_api_soft_event_update(struct kbase_context *kctx,
+		struct kbase_ioctl_soft_event_update *update)
+{
+	if (update->flags != 0)
+		return -EINVAL;
+
+	return kbase_soft_event_update(kctx, update->event, update->new_status);
+}
+
+static int kbase_api_sticky_resource_map(struct kbase_context *kctx,
+		struct kbase_ioctl_sticky_resource_map *map)
+{
+	int ret;
+	u64 i;
+	u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
+
+	if (!map->count || map->count > BASE_EXT_RES_COUNT_MAX)
+		return -EOVERFLOW;
+
+	ret = copy_from_user(gpu_addr, u64_to_user_ptr(map->address),
+			sizeof(u64) * map->count);
+
+	if (ret != 0)
+		return -EFAULT;
+
+	kbase_gpu_vm_lock(kctx);
+
+	for (i = 0; i < map->count; i++) {
+		if (!kbase_sticky_resource_acquire(kctx, gpu_addr[i])) {
+			/* Invalid resource */
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	if (ret != 0) {
+		while (i > 0) {
+			i--;
+			kbase_sticky_resource_release(kctx, NULL, gpu_addr[i]);
+		}
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return ret;
+}
+
+static int kbase_api_sticky_resource_unmap(struct kbase_context *kctx,
+		struct kbase_ioctl_sticky_resource_unmap *unmap)
+{
+	int ret;
+	u64 i;
+	u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
+
+	if (!unmap->count || unmap->count > BASE_EXT_RES_COUNT_MAX)
+		return -EOVERFLOW;
+
+	ret = copy_from_user(gpu_addr, u64_to_user_ptr(unmap->address),
+			sizeof(u64) * unmap->count);
+
+	if (ret != 0)
+		return -EFAULT;
+
+	kbase_gpu_vm_lock(kctx);
+
+	for (i = 0; i < unmap->count; i++) {
+		if (!kbase_sticky_resource_release(kctx, NULL, gpu_addr[i])) {
+			/* Invalid resource, but we keep going anyway */
+			ret = -EINVAL;
+		}
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return ret;
+}
+
+#if MALI_UNIT_TEST
+static int kbase_api_tlstream_test(struct kbase_context *kctx,
+		struct kbase_ioctl_tlstream_test *test)
+{
+	kbase_tlstream_test(
+			test->tpw_count,
+			test->msg_delay,
+			test->msg_count,
+			test->aux_msg);
+
+	return 0;
+}
+
+static int kbase_api_tlstream_stats(struct kbase_context *kctx,
+		struct kbase_ioctl_tlstream_stats *stats)
+{
+	kbase_tlstream_stats(
+			&stats->bytes_collected,
+			&stats->bytes_generated);
+
+	return 0;
+}
+#endif /* MALI_UNIT_TEST */
+
+
+#define KBASE_HANDLE_IOCTL(cmd, function)                          \
+	do {                                                       \
+		BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE);          \
+		return function(kctx);                             \
+	} while (0)
+
+#define KBASE_HANDLE_IOCTL_IN(cmd, function, type)                 \
+	do {                                                       \
+		type param;                                        \
+		int err;                                           \
+		BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE);         \
+		BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd));     \
+		err = copy_from_user(&param, uarg, sizeof(param)); \
+		if (err)                                           \
+			return -EFAULT;                            \
+		return function(kctx, &param);                     \
+	} while (0)
+
+#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type)                \
+	do {                                                       \
+		type param;                                        \
+		int ret, err;                                      \
+		BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ);          \
+		BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd));     \
+		ret = function(kctx, &param);                      \
+		err = copy_to_user(uarg, &param, sizeof(param));   \
+		if (err)                                           \
+			return -EFAULT;                            \
+		return ret;                                        \
+	} while (0)
+
+#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type)                  \
+	do {                                                           \
+		type param;                                            \
+		int ret, err;                                          \
+		BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE|_IOC_READ)); \
+		BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd));         \
+		err = copy_from_user(&param, uarg, sizeof(param));     \
+		if (err)                                               \
+			return -EFAULT;                                \
+		ret = function(kctx, &param);                          \
+		err = copy_to_user(uarg, &param, sizeof(param));       \
+		if (err)                                               \
+			return -EFAULT;                                \
+		return ret;                                            \
+	} while (0)
+
+static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct kbase_context *kctx = filp->private_data;
+	struct kbase_device *kbdev = kctx->kbdev;
+	void __user *uarg = (void __user *)arg;
+
+	/* Only these ioctls are available until setup is complete */
+	switch (cmd) {
+	case KBASE_IOCTL_VERSION_CHECK:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
+				kbase_api_handshake,
+				struct kbase_ioctl_version_check);
+		break;
+
+	case KBASE_IOCTL_SET_FLAGS:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS,
+				kbase_api_set_flags,
+				struct kbase_ioctl_set_flags);
+		break;
+	}
+
+	/* Block call until version handshake and setup is complete */
+	if (kctx->api_version == 0 || !atomic_read(&kctx->setup_complete))
+		return -EINVAL;
+
+	/* Normal ioctls */
+	switch (cmd) {
+	case KBASE_IOCTL_JOB_SUBMIT:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT,
+				kbase_api_job_submit,
+				struct kbase_ioctl_job_submit);
+		break;
+	case KBASE_IOCTL_GET_GPUPROPS:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS,
+				kbase_api_get_gpuprops,
+				struct kbase_ioctl_get_gpuprops);
+		break;
+	case KBASE_IOCTL_POST_TERM:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM,
+				kbase_api_post_term);
+		break;
+	case KBASE_IOCTL_MEM_ALLOC:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC,
+				kbase_api_mem_alloc,
+				union kbase_ioctl_mem_alloc);
+		break;
+	case KBASE_IOCTL_MEM_QUERY:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY,
+				kbase_api_mem_query,
+				union kbase_ioctl_mem_query);
+		break;
+	case KBASE_IOCTL_MEM_FREE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE,
+				kbase_api_mem_free,
+				struct kbase_ioctl_mem_free);
+		break;
+	case KBASE_IOCTL_DISJOINT_QUERY:
+		KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
+				kbase_api_disjoint_query,
+				struct kbase_ioctl_disjoint_query);
+		break;
+	case KBASE_IOCTL_GET_DDK_VERSION:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
+				kbase_api_get_ddk_version,
+				struct kbase_ioctl_get_ddk_version);
+		break;
+	case KBASE_IOCTL_MEM_JIT_INIT_OLD:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT_OLD,
+				kbase_api_mem_jit_init_old,
+				struct kbase_ioctl_mem_jit_init_old);
+		break;
+	case KBASE_IOCTL_MEM_JIT_INIT:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
+				kbase_api_mem_jit_init,
+				struct kbase_ioctl_mem_jit_init);
+		break;
+	case KBASE_IOCTL_MEM_EXEC_INIT:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_EXEC_INIT,
+				kbase_api_mem_exec_init,
+				struct kbase_ioctl_mem_exec_init);
+		break;
+	case KBASE_IOCTL_MEM_SYNC:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC,
+				kbase_api_mem_sync,
+				struct kbase_ioctl_mem_sync);
+		break;
+	case KBASE_IOCTL_MEM_FIND_CPU_OFFSET:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET,
+				kbase_api_mem_find_cpu_offset,
+				union kbase_ioctl_mem_find_cpu_offset);
+		break;
+	case KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET,
+				kbase_api_mem_find_gpu_start_and_offset,
+				union kbase_ioctl_mem_find_gpu_start_and_offset);
+		break;
+	case KBASE_IOCTL_GET_CONTEXT_ID:
+		KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
+				kbase_api_get_context_id,
+				struct kbase_ioctl_get_context_id);
+		break;
+	case KBASE_IOCTL_TLSTREAM_ACQUIRE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
+				kbase_api_tlstream_acquire,
+				struct kbase_ioctl_tlstream_acquire);
+		break;
+	case KBASE_IOCTL_TLSTREAM_FLUSH:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
+				kbase_api_tlstream_flush);
+		break;
+	case KBASE_IOCTL_MEM_COMMIT:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT,
+				kbase_api_mem_commit,
+				struct kbase_ioctl_mem_commit);
+		break;
+	case KBASE_IOCTL_MEM_ALIAS:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS,
+				kbase_api_mem_alias,
+				union kbase_ioctl_mem_alias);
+		break;
+	case KBASE_IOCTL_MEM_IMPORT:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
+				kbase_api_mem_import,
+				union kbase_ioctl_mem_import);
+		break;
+	case KBASE_IOCTL_MEM_FLAGS_CHANGE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
+				kbase_api_mem_flags_change,
+				struct kbase_ioctl_mem_flags_change);
+		break;
+	case KBASE_IOCTL_STREAM_CREATE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
+				kbase_api_stream_create,
+				struct kbase_ioctl_stream_create);
+		break;
+	case KBASE_IOCTL_FENCE_VALIDATE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
+				kbase_api_fence_validate,
+				struct kbase_ioctl_fence_validate);
+		break;
+	case KBASE_IOCTL_MEM_PROFILE_ADD:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
+				kbase_api_mem_profile_add,
+				struct kbase_ioctl_mem_profile_add);
+		break;
+	case KBASE_IOCTL_SOFT_EVENT_UPDATE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
+				kbase_api_soft_event_update,
+				struct kbase_ioctl_soft_event_update);
+		break;
+	case KBASE_IOCTL_STICKY_RESOURCE_MAP:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_MAP,
+				kbase_api_sticky_resource_map,
+				struct kbase_ioctl_sticky_resource_map);
+		break;
+	case KBASE_IOCTL_STICKY_RESOURCE_UNMAP:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_UNMAP,
+				kbase_api_sticky_resource_unmap,
+				struct kbase_ioctl_sticky_resource_unmap);
+		break;
+
+	/* Instrumentation. */
+	case KBASE_IOCTL_HWCNT_READER_SETUP:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
+				kbase_api_hwcnt_reader_setup,
+				struct kbase_ioctl_hwcnt_reader_setup);
+		break;
+	case KBASE_IOCTL_HWCNT_ENABLE:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_ENABLE,
+				kbase_api_hwcnt_enable,
+				struct kbase_ioctl_hwcnt_enable);
+		break;
+	case KBASE_IOCTL_HWCNT_DUMP:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_DUMP,
+				kbase_api_hwcnt_dump);
+		break;
+	case KBASE_IOCTL_HWCNT_CLEAR:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_CLEAR,
+				kbase_api_hwcnt_clear);
+		break;
+#ifdef CONFIG_MALI_NO_MALI
+	case KBASE_IOCTL_HWCNT_SET:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET,
+				kbase_api_hwcnt_set,
+				struct kbase_ioctl_hwcnt_values);
+		break;
+#endif
+#ifdef CONFIG_MALI_CINSTR_GWT
+	case KBASE_IOCTL_CINSTR_GWT_START:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_START,
+				kbase_gpu_gwt_start);
+		break;
+	case KBASE_IOCTL_CINSTR_GWT_STOP:
+		KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_STOP,
+				kbase_gpu_gwt_stop);
+		break;
+	case KBASE_IOCTL_CINSTR_GWT_DUMP:
+		KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CINSTR_GWT_DUMP,
+				kbase_gpu_gwt_dump,
+				union kbase_ioctl_cinstr_gwt_dump);
+		break;
+#endif
+#if MALI_UNIT_TEST
+	case KBASE_IOCTL_TLSTREAM_TEST:
+		KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_TEST,
+				kbase_api_tlstream_test,
+				struct kbase_ioctl_tlstream_test);
+		break;
+	case KBASE_IOCTL_TLSTREAM_STATS:
+		KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
+				kbase_api_tlstream_stats,
+				struct kbase_ioctl_tlstream_stats);
+		break;
+#endif
+	}
+
+	dev_warn(kbdev->dev, "Unknown ioctl 0x%x nr:%d", cmd, _IOC_NR(cmd));
+
+	return -ENOIOCTLCMD;
+}
+
+static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+	struct kbase_context *kctx = filp->private_data;
+	struct base_jd_event_v2 uevent;
+	int out_count = 0;
+
+	if (count < sizeof(uevent))
+		return -ENOBUFS;
+
+	do {
+		while (kbase_event_dequeue(kctx, &uevent)) {
+			if (out_count > 0)
+				goto out;
+
+			if (filp->f_flags & O_NONBLOCK)
+				return -EAGAIN;
+
+			if (wait_event_interruptible(kctx->event_queue,
+					kbase_event_pending(kctx)) != 0)
+				return -ERESTARTSYS;
+		}
+		if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
+			if (out_count == 0)
+				return -EPIPE;
+			goto out;
+		}
+
+		if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0)
+			return -EFAULT;
+
+		buf += sizeof(uevent);
+		out_count++;
+		count -= sizeof(uevent);
+	} while (count >= sizeof(uevent));
+
+ out:
+	return out_count * sizeof(uevent);
+}
+
+static unsigned int kbase_poll(struct file *filp, poll_table *wait)
+{
+	struct kbase_context *kctx = filp->private_data;
+
+	poll_wait(filp, &kctx->event_queue, wait);
+	if (kbase_event_pending(kctx))
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+void kbase_event_wakeup(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx);
+
+	wake_up_interruptible(&kctx->event_queue);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_wakeup);
+
+static int kbase_check_flags(int flags)
+{
+	/* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
+	 * closes the file descriptor in a child process.
+	 */
+	if (0 == (flags & O_CLOEXEC))
+		return -EINVAL;
+
+	return 0;
+}
+
+static const struct file_operations kbase_fops = {
+	.owner = THIS_MODULE,
+	.open = kbase_open,
+	.release = kbase_release,
+	.read = kbase_read,
+	.poll = kbase_poll,
+	.unlocked_ioctl = kbase_ioctl,
+	.compat_ioctl = kbase_ioctl,
+	.mmap = kbase_mmap,
+	.check_flags = kbase_check_flags,
+	.get_unmapped_area = kbase_get_unmapped_area,
+};
+
+/**
+ * show_policy - Show callback for the power_policy sysfs file.
+ *
+ * This function is called to get the contents of the power_policy sysfs
+ * file. This is a list of the available policies with the currently active one
+ * surrounded by square brackets.
+ *
+ * @dev:	The device this sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_policy(struct device *dev, struct device_attribute *attr, char *const buf)
+{
+	struct kbase_device *kbdev;
+	const struct kbase_pm_policy *current_policy;
+	const struct kbase_pm_policy *const *policy_list;
+	int policy_count;
+	int i;
+	ssize_t ret = 0;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	current_policy = kbase_pm_get_policy(kbdev);
+
+	policy_count = kbase_pm_list_policies(&policy_list);
+
+	for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
+		if (policy_list[i] == current_policy)
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
+		else
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
+	}
+
+	if (ret < PAGE_SIZE - 1) {
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+	} else {
+		buf[PAGE_SIZE - 2] = '\n';
+		buf[PAGE_SIZE - 1] = '\0';
+		ret = PAGE_SIZE - 1;
+	}
+
+	return ret;
+}
+
+/**
+ * set_policy - Store callback for the power_policy sysfs file.
+ *
+ * This function is called when the power_policy sysfs file is written to.
+ * It matches the requested policy against the available policies and if a
+ * matching policy is found calls kbase_pm_set_policy() to change the
+ * policy.
+ *
+ * @dev:	The device with sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The value written to the sysfs file
+ * @count:	The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	const struct kbase_pm_policy *new_policy = NULL;
+	const struct kbase_pm_policy *const *policy_list;
+	int policy_count;
+	int i;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	policy_count = kbase_pm_list_policies(&policy_list);
+
+	for (i = 0; i < policy_count; i++) {
+		if (sysfs_streq(policy_list[i]->name, buf)) {
+			new_policy = policy_list[i];
+			break;
+		}
+	}
+
+	if (!new_policy) {
+		dev_err(dev, "power_policy: policy not found\n");
+		return -EINVAL;
+	}
+
+	kbase_pm_set_policy(kbdev, new_policy);
+
+	return count;
+}
+
+/*
+ * The sysfs file power_policy.
+ *
+ * This is used for obtaining information about the available policies,
+ * determining which policy is currently active, and changing the active
+ * policy.
+ */
+static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
+
+/*
+ * show_core_mask - Show callback for the core_mask sysfs file.
+ *
+ * This function is called to get the contents of the core_mask sysfs file.
+ *
+ * @dev:	The device this sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret = 0;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+			"Current core mask (JS0) : 0x%llX\n",
+			kbdev->pm.debug_core_mask[0]);
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+			"Current core mask (JS1) : 0x%llX\n",
+			kbdev->pm.debug_core_mask[1]);
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+			"Current core mask (JS2) : 0x%llX\n",
+			kbdev->pm.debug_core_mask[2]);
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+			"Available core mask : 0x%llX\n",
+			kbdev->gpu_props.props.raw_props.shader_present);
+
+	return ret;
+}
+
+/**
+ * set_core_mask - Store callback for the core_mask sysfs file.
+ *
+ * This function is called when the core_mask sysfs file is written to.
+ *
+ * @dev:	The device with sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The value written to the sysfs file
+ * @count:	The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	u64 new_core_mask[3];
+	int items, i;
+	ssize_t err = count;
+	unsigned long flags;
+	u64 shader_present, group0_core_mask;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	items = sscanf(buf, "%llx %llx %llx",
+			&new_core_mask[0], &new_core_mask[1],
+			&new_core_mask[2]);
+
+	if (items != 1 && items != 3) {
+		dev_err(kbdev->dev, "Couldn't process core mask write operation.\n"
+			"Use format <core_mask>\n"
+			"or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
+		err = -EINVAL;
+		goto end;
+	}
+
+	if (items == 1)
+		new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	shader_present = kbdev->gpu_props.props.raw_props.shader_present;
+	group0_core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+
+	for (i = 0; i < 3; ++i) {
+		if ((new_core_mask[i] & shader_present) != new_core_mask[i]) {
+			dev_err(dev, "Invalid core mask 0x%llX for JS %d: Includes non-existent cores (present = 0x%llX)",
+					new_core_mask[i], i, shader_present);
+			err = -EINVAL;
+			goto unlock;
+
+		} else if (!(new_core_mask[i] & shader_present & kbdev->pm.backend.ca_cores_enabled)) {
+			dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with currently available cores (present = 0x%llX, CA enabled = 0x%llX\n",
+					new_core_mask[i], i,
+					kbdev->gpu_props.props.raw_props.shader_present,
+					kbdev->pm.backend.ca_cores_enabled);
+			err = -EINVAL;
+			goto unlock;
+
+		} else if (!(new_core_mask[i] & group0_core_mask)) {
+			dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with group 0 core mask 0x%llX\n",
+					new_core_mask[i], i, group0_core_mask);
+			err = -EINVAL;
+			goto unlock;
+		}
+	}
+
+	if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
+			kbdev->pm.debug_core_mask[1] !=
+					new_core_mask[1] ||
+			kbdev->pm.debug_core_mask[2] !=
+					new_core_mask[2]) {
+
+		kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
+				new_core_mask[1], new_core_mask[2]);
+	}
+
+unlock:
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+end:
+	return err;
+}
+
+/*
+ * The sysfs file core_mask.
+ *
+ * This is used to restrict shader core availability for debugging purposes.
+ * Reading it will show the current core mask and the mask of cores available.
+ * Writing to it will set the current core mask.
+ */
+static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
+
+/**
+ * set_soft_job_timeout - Store callback for the soft_job_timeout sysfs
+ * file.
+ *
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This allows setting the timeout for software jobs. Waiting soft event wait
+ * jobs will be cancelled after this period expires, while soft fence wait jobs
+ * will print debug information if the fence debug feature is enabled.
+ *
+ * This is expressed in milliseconds.
+ *
+ * Return: count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_soft_job_timeout(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int soft_job_timeout_ms;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
+	    (soft_job_timeout_ms <= 0))
+		return -EINVAL;
+
+	atomic_set(&kbdev->js_data.soft_job_timeout_ms,
+		   soft_job_timeout_ms);
+
+	return count;
+}
+
+/**
+ * show_soft_job_timeout - Show callback for the soft_job_timeout sysfs
+ * file.
+ *
+ * This will return the timeout for the software jobs.
+ *
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer for the sysfs file contents.
+ *
+ * Return: The number of bytes output to buf.
+ */
+static ssize_t show_soft_job_timeout(struct device *dev,
+				       struct device_attribute *attr,
+				       char * const buf)
+{
+	struct kbase_device *kbdev;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n",
+			 atomic_read(&kbdev->js_data.soft_job_timeout_ms));
+}
+
+static DEVICE_ATTR(soft_job_timeout, S_IRUGO | S_IWUSR,
+		   show_soft_job_timeout, set_soft_job_timeout);
+
+static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
+				int default_ticks, u32 old_ticks)
+{
+	if (timeout_ms > 0) {
+		u64 ticks = timeout_ms * 1000000ULL;
+		do_div(ticks, kbdev->js_data.scheduling_period_ns);
+		if (!ticks)
+			return 1;
+		return ticks;
+	} else if (timeout_ms < 0) {
+		return default_ticks;
+	} else {
+		return old_ticks;
+	}
+}
+
+/**
+ * set_js_timeouts - Store callback for the js_timeouts sysfs file.
+ *
+ * This function is called to get the contents of the js_timeouts sysfs
+ * file. This file contains five values separated by whitespace. The values
+ * are basically the same as %JS_SOFT_STOP_TICKS, %JS_HARD_STOP_TICKS_SS,
+ * %JS_HARD_STOP_TICKS_DUMPING, %JS_RESET_TICKS_SS, %JS_RESET_TICKS_DUMPING
+ * configuration values (in that order), with the difference that the js_timeout
+ * values are expressed in MILLISECONDS.
+ *
+ * The js_timeouts sysfile file allows the current values in
+ * use by the job scheduler to get override. Note that a value needs to
+ * be other than 0 for it to override the current job scheduler value.
+ *
+ * @dev:	The device with sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The value written to the sysfs file
+ * @count:	The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int items;
+	long js_soft_stop_ms;
+	long js_soft_stop_ms_cl;
+	long js_hard_stop_ms_ss;
+	long js_hard_stop_ms_cl;
+	long js_hard_stop_ms_dumping;
+	long js_reset_ms_ss;
+	long js_reset_ms_cl;
+	long js_reset_ms_dumping;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
+			&js_soft_stop_ms, &js_soft_stop_ms_cl,
+			&js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
+			&js_hard_stop_ms_dumping, &js_reset_ms_ss,
+			&js_reset_ms_cl, &js_reset_ms_dumping);
+
+	if (items == 8) {
+		struct kbasep_js_device_data *js_data = &kbdev->js_data;
+		unsigned long flags;
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+#define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
+	js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
+			default, js_data->ticks_name); \
+	dev_dbg(kbdev->dev, "Overriding " #ticks_name \
+			" with %lu ticks (%lu ms)\n", \
+			(unsigned long)js_data->ticks_name, \
+			ms_name); \
+	} while (0)
+
+		UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
+				DEFAULT_JS_SOFT_STOP_TICKS);
+		UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
+				DEFAULT_JS_SOFT_STOP_TICKS_CL);
+		UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
+				kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
+				DEFAULT_JS_HARD_STOP_TICKS_SS_8408 :
+				DEFAULT_JS_HARD_STOP_TICKS_SS);
+		UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
+				DEFAULT_JS_HARD_STOP_TICKS_CL);
+		UPDATE_TIMEOUT(hard_stop_ticks_dumping,
+				js_hard_stop_ms_dumping,
+				DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
+		UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
+				kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408) ?
+				DEFAULT_JS_RESET_TICKS_SS_8408 :
+				DEFAULT_JS_RESET_TICKS_SS);
+		UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
+				DEFAULT_JS_RESET_TICKS_CL);
+		UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
+				DEFAULT_JS_RESET_TICKS_DUMPING);
+
+		kbase_js_set_timeouts(kbdev);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		return count;
+	}
+
+	dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
+			"Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
+			"Write 0 for no change, -1 to restore default timeout\n");
+	return -EINVAL;
+}
+
+static unsigned long get_js_timeout_in_ms(
+		u32 scheduling_period_ns,
+		u32 ticks)
+{
+	u64 ms = (u64)ticks * scheduling_period_ns;
+
+	do_div(ms, 1000000UL);
+	return ms;
+}
+
+/**
+ * show_js_timeouts - Show callback for the js_timeouts sysfs file.
+ *
+ * This function is called to get the contents of the js_timeouts sysfs
+ * file. It returns the last set values written to the js_timeouts sysfs file.
+ * If the file didn't get written yet, the values will be current setting in
+ * use.
+ * @dev:	The device this sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+	unsigned long js_soft_stop_ms;
+	unsigned long js_soft_stop_ms_cl;
+	unsigned long js_hard_stop_ms_ss;
+	unsigned long js_hard_stop_ms_cl;
+	unsigned long js_hard_stop_ms_dumping;
+	unsigned long js_reset_ms_ss;
+	unsigned long js_reset_ms_cl;
+	unsigned long js_reset_ms_dumping;
+	u32 scheduling_period_ns;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
+
+#define GET_TIMEOUT(name) get_js_timeout_in_ms(\
+		scheduling_period_ns, \
+		kbdev->js_data.name)
+
+	js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
+	js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
+	js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
+	js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
+	js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
+	js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
+	js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
+	js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
+
+#undef GET_TIMEOUT
+
+	ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
+			js_soft_stop_ms, js_soft_stop_ms_cl,
+			js_hard_stop_ms_ss, js_hard_stop_ms_cl,
+			js_hard_stop_ms_dumping, js_reset_ms_ss,
+			js_reset_ms_cl, js_reset_ms_dumping);
+
+	if (ret >= PAGE_SIZE) {
+		buf[PAGE_SIZE - 2] = '\n';
+		buf[PAGE_SIZE - 1] = '\0';
+		ret = PAGE_SIZE - 1;
+	}
+
+	return ret;
+}
+
+/*
+ * The sysfs file js_timeouts.
+ *
+ * This is used to override the current job scheduler values for
+ * JS_STOP_STOP_TICKS_SS
+ * JS_STOP_STOP_TICKS_CL
+ * JS_HARD_STOP_TICKS_SS
+ * JS_HARD_STOP_TICKS_CL
+ * JS_HARD_STOP_TICKS_DUMPING
+ * JS_RESET_TICKS_SS
+ * JS_RESET_TICKS_CL
+ * JS_RESET_TICKS_DUMPING.
+ */
+static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
+
+static u32 get_new_js_timeout(
+		u32 old_period,
+		u32 old_ticks,
+		u32 new_scheduling_period_ns)
+{
+	u64 ticks = (u64)old_period * (u64)old_ticks;
+	do_div(ticks, new_scheduling_period_ns);
+	return ticks?ticks:1;
+}
+
+/**
+ * set_js_scheduling_period - Store callback for the js_scheduling_period sysfs
+ *                            file
+ * @dev:   The device the sysfs file is for
+ * @attr:  The attributes of the sysfs file
+ * @buf:   The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the js_scheduling_period sysfs file is written
+ * to. It checks the data written, and if valid updates the js_scheduling_period
+ * value
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_scheduling_period(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int ret;
+	unsigned int js_scheduling_period;
+	u32 new_scheduling_period_ns;
+	u32 old_period;
+	struct kbasep_js_device_data *js_data;
+	unsigned long flags;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	js_data = &kbdev->js_data;
+
+	ret = kstrtouint(buf, 0, &js_scheduling_period);
+	if (ret || !js_scheduling_period) {
+		dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
+				"Use format <js_scheduling_period_ms>\n");
+		return -EINVAL;
+	}
+
+	new_scheduling_period_ns = js_scheduling_period * 1000000;
+
+	/* Update scheduling timeouts */
+	mutex_lock(&js_data->runpool_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* If no contexts have been scheduled since js_timeouts was last written
+	 * to, the new timeouts might not have been latched yet. So check if an
+	 * update is pending and use the new values if necessary. */
+
+	/* Use previous 'new' scheduling period as a base if present. */
+	old_period = js_data->scheduling_period_ns;
+
+#define SET_TIMEOUT(name) \
+		(js_data->name = get_new_js_timeout(\
+				old_period, \
+				kbdev->js_data.name, \
+				new_scheduling_period_ns))
+
+	SET_TIMEOUT(soft_stop_ticks);
+	SET_TIMEOUT(soft_stop_ticks_cl);
+	SET_TIMEOUT(hard_stop_ticks_ss);
+	SET_TIMEOUT(hard_stop_ticks_cl);
+	SET_TIMEOUT(hard_stop_ticks_dumping);
+	SET_TIMEOUT(gpu_reset_ticks_ss);
+	SET_TIMEOUT(gpu_reset_ticks_cl);
+	SET_TIMEOUT(gpu_reset_ticks_dumping);
+
+#undef SET_TIMEOUT
+
+	js_data->scheduling_period_ns = new_scheduling_period_ns;
+
+	kbase_js_set_timeouts(kbdev);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&js_data->runpool_mutex);
+
+	dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
+			js_scheduling_period);
+
+	return count;
+}
+
+/**
+ * show_js_scheduling_period - Show callback for the js_scheduling_period sysfs
+ *                             entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the JS scheduling
+ * period.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_js_scheduling_period(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	u32 period;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	period = kbdev->js_data.scheduling_period_ns;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n",
+			period / 1000000);
+
+	return ret;
+}
+
+static DEVICE_ATTR(js_scheduling_period, S_IRUGO | S_IWUSR,
+		show_js_scheduling_period, set_js_scheduling_period);
+
+#if !MALI_CUSTOMER_RELEASE
+/**
+ * set_force_replay - Store callback for the force_replay sysfs file.
+ *
+ * @dev:	The device with sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The value written to the sysfs file
+ * @count:	The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_force_replay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	if (!strncmp("limit=", buf, MIN(6, count))) {
+		int force_replay_limit;
+		int items = sscanf(buf, "limit=%u", &force_replay_limit);
+
+		if (items == 1) {
+			kbdev->force_replay_random = false;
+			kbdev->force_replay_limit = force_replay_limit;
+			kbdev->force_replay_count = 0;
+
+			return count;
+		}
+	} else if (!strncmp("random_limit", buf, MIN(12, count))) {
+		kbdev->force_replay_random = true;
+		kbdev->force_replay_count = 0;
+
+		return count;
+	} else if (!strncmp("norandom_limit", buf, MIN(14, count))) {
+		kbdev->force_replay_random = false;
+		kbdev->force_replay_limit = KBASEP_FORCE_REPLAY_DISABLED;
+		kbdev->force_replay_count = 0;
+
+		return count;
+	} else if (!strncmp("core_req=", buf, MIN(9, count))) {
+		unsigned int core_req;
+		int items = sscanf(buf, "core_req=%x", &core_req);
+
+		if (items == 1) {
+			kbdev->force_replay_core_req = (base_jd_core_req)core_req;
+
+			return count;
+		}
+	}
+	dev_err(kbdev->dev, "Couldn't process force_replay write operation.\nPossible settings: limit=<limit>, random_limit, norandom_limit, core_req=<core_req>\n");
+	return -EINVAL;
+}
+
+/**
+ * show_force_replay - Show callback for the force_replay sysfs file.
+ *
+ * This function is called to get the contents of the force_replay sysfs
+ * file. It returns the last set value written to the force_replay sysfs file.
+ * If the file didn't get written yet, the values will be 0.
+ *
+ * @dev:	The device this sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_force_replay(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	if (kbdev->force_replay_random)
+		ret = scnprintf(buf, PAGE_SIZE,
+				"limit=0\nrandom_limit\ncore_req=%x\n",
+				kbdev->force_replay_core_req);
+	else
+		ret = scnprintf(buf, PAGE_SIZE,
+				"limit=%u\nnorandom_limit\ncore_req=%x\n",
+				kbdev->force_replay_limit,
+				kbdev->force_replay_core_req);
+
+	if (ret >= PAGE_SIZE) {
+		buf[PAGE_SIZE - 2] = '\n';
+		buf[PAGE_SIZE - 1] = '\0';
+		ret = PAGE_SIZE - 1;
+	}
+
+	return ret;
+}
+
+/*
+ * The sysfs file force_replay.
+ */
+static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay,
+		set_force_replay);
+#endif /* !MALI_CUSTOMER_RELEASE */
+
+#ifdef CONFIG_MALI_DEBUG
+static ssize_t set_js_softstop_always(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int ret;
+	int softstop_always;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = kstrtoint(buf, 0, &softstop_always);
+	if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
+		dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
+				"Use format <soft_stop_always>\n");
+		return -EINVAL;
+	}
+
+	kbdev->js_data.softstop_always = (bool) softstop_always;
+	dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
+			(kbdev->js_data.softstop_always) ?
+			"Enabled" : "Disabled");
+	return count;
+}
+
+static ssize_t show_js_softstop_always(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
+
+	if (ret >= PAGE_SIZE) {
+		buf[PAGE_SIZE - 2] = '\n';
+		buf[PAGE_SIZE - 1] = '\0';
+		ret = PAGE_SIZE - 1;
+	}
+
+	return ret;
+}
+
+/*
+ * By default, soft-stops are disabled when only a single context is present.
+ * The ability to enable soft-stop when only a single context is present can be
+ * used for debug and unit-testing purposes.
+ * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
+ */
+static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
+#endif /* CONFIG_MALI_DEBUG */
+
+#ifdef CONFIG_MALI_DEBUG
+typedef void (kbasep_debug_command_func) (struct kbase_device *);
+
+enum kbasep_debug_command_code {
+	KBASEP_DEBUG_COMMAND_DUMPTRACE,
+
+	/* This must be the last enum */
+	KBASEP_DEBUG_COMMAND_COUNT
+};
+
+struct kbasep_debug_command {
+	char *str;
+	kbasep_debug_command_func *func;
+};
+
+/* Debug commands supported by the driver */
+static const struct kbasep_debug_command debug_commands[] = {
+	{
+	 .str = "dumptrace",
+	 .func = &kbasep_trace_dump,
+	 }
+};
+
+/**
+ * show_debug - Show callback for the debug_command sysfs file.
+ *
+ * This function is called to get the contents of the debug_command sysfs
+ * file. This is a list of the available debug commands, separated by newlines.
+ *
+ * @dev:	The device this sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The output buffer for the sysfs file contents
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	int i;
+	ssize_t ret = 0;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
+
+	if (ret >= PAGE_SIZE) {
+		buf[PAGE_SIZE - 2] = '\n';
+		buf[PAGE_SIZE - 1] = '\0';
+		ret = PAGE_SIZE - 1;
+	}
+
+	return ret;
+}
+
+/**
+ * issue_debug - Store callback for the debug_command sysfs file.
+ *
+ * This function is called when the debug_command sysfs file is written to.
+ * It matches the requested command against the available commands, and if
+ * a matching command is found calls the associated function from
+ * @debug_commands to issue the command.
+ *
+ * @dev:	The device with sysfs file is for
+ * @attr:	The attributes of the sysfs file
+ * @buf:	The value written to the sysfs file
+ * @count:	The number of bytes written to the sysfs file
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int i;
+
+	kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
+		if (sysfs_streq(debug_commands[i].str, buf)) {
+			debug_commands[i].func(kbdev);
+			return count;
+		}
+	}
+
+	/* Debug Command not found */
+	dev_err(dev, "debug_command: command not known\n");
+	return -EINVAL;
+}
+
+/* The sysfs file debug_command.
+ *
+ * This is used to issue general debug commands to the device driver.
+ * Reading it will produce a list of debug commands, separated by newlines.
+ * Writing to it with one of those commands will issue said command.
+ */
+static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
+#endif /* CONFIG_MALI_DEBUG */
+
+/**
+ * kbase_show_gpuinfo - Show callback for the gpuinfo sysfs entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the GPU information.
+ *
+ * This function is called to get a description of the present Mali
+ * GPU via the gpuinfo sysfs entry.  This includes the GPU family, the
+ * number of cores, the hardware version and the raw product id.  For
+ * example
+ *
+ *    Mali-T60x MP4 r0p0 0x6956
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t kbase_show_gpuinfo(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	static const struct gpu_product_id_name {
+		unsigned id;
+		char *name;
+	} gpu_product_id_names[] = {
+		{ .id = GPU_ID_PI_T60X, .name = "Mali-T60x" },
+		{ .id = GPU_ID_PI_T62X, .name = "Mali-T62x" },
+		{ .id = GPU_ID_PI_T72X, .name = "Mali-T72x" },
+		{ .id = GPU_ID_PI_T76X, .name = "Mali-T76x" },
+		{ .id = GPU_ID_PI_T82X, .name = "Mali-T82x" },
+		{ .id = GPU_ID_PI_T83X, .name = "Mali-T83x" },
+		{ .id = GPU_ID_PI_T86X, .name = "Mali-T86x" },
+		{ .id = GPU_ID_PI_TFRX, .name = "Mali-T88x" },
+		{ .id = GPU_ID2_PRODUCT_TMIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G71" },
+		{ .id = GPU_ID2_PRODUCT_THEX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G72" },
+		{ .id = GPU_ID2_PRODUCT_TSIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G51" },
+		{ .id = GPU_ID2_PRODUCT_TNOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G76" },
+		{ .id = GPU_ID2_PRODUCT_TDVX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G31" },
+		{ .id = GPU_ID2_PRODUCT_TGOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+		  .name = "Mali-G52" },
+	};
+	const char *product_name = "(Unknown Mali GPU)";
+	struct kbase_device *kbdev;
+	u32 gpu_id;
+	unsigned product_id, product_id_mask;
+	unsigned i;
+	bool is_new_format;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	product_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+	is_new_format = GPU_ID_IS_NEW_FORMAT(product_id);
+	product_id_mask =
+		(is_new_format ?
+			GPU_ID2_PRODUCT_MODEL :
+			GPU_ID_VERSION_PRODUCT_ID) >>
+		GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+	for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
+		const struct gpu_product_id_name *p = &gpu_product_id_names[i];
+
+		if ((GPU_ID_IS_NEW_FORMAT(p->id) == is_new_format) &&
+		    (p->id & product_id_mask) ==
+		    (product_id & product_id_mask)) {
+			product_name = p->name;
+			break;
+		}
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%04X\n",
+		product_name, kbdev->gpu_props.num_cores,
+		(gpu_id & GPU_ID_VERSION_MAJOR) >> GPU_ID_VERSION_MAJOR_SHIFT,
+		(gpu_id & GPU_ID_VERSION_MINOR) >> GPU_ID_VERSION_MINOR_SHIFT,
+		product_id);
+}
+static DEVICE_ATTR(gpuinfo, S_IRUGO, kbase_show_gpuinfo, NULL);
+
+/**
+ * set_dvfs_period - Store callback for the dvfs_period sysfs file.
+ * @dev:   The device with sysfs file is for
+ * @attr:  The attributes of the sysfs file
+ * @buf:   The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the dvfs_period sysfs file is written to. It
+ * checks the data written, and if valid updates the DVFS period variable,
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_dvfs_period(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int ret;
+	int dvfs_period;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = kstrtoint(buf, 0, &dvfs_period);
+	if (ret || dvfs_period <= 0) {
+		dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
+				"Use format <dvfs_period_ms>\n");
+		return -EINVAL;
+	}
+
+	kbdev->pm.dvfs_period = dvfs_period;
+	dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
+
+	return count;
+}
+
+/**
+ * show_dvfs_period - Show callback for the dvfs_period sysfs entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the DVFS sample
+ * timer.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_dvfs_period(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
+
+	return ret;
+}
+
+static DEVICE_ATTR(dvfs_period, S_IRUGO | S_IWUSR, show_dvfs_period,
+		set_dvfs_period);
+
+/**
+ * set_pm_poweroff - Store callback for the pm_poweroff sysfs file.
+ * @dev:   The device with sysfs file is for
+ * @attr:  The attributes of the sysfs file
+ * @buf:   The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the pm_poweroff sysfs file is written to.
+ *
+ * This file contains three values separated by whitespace. The values
+ * are gpu_poweroff_time (the period of the poweroff timer, in ns),
+ * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
+ * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
+ * ticks before the GPU is powered off), in that order.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_pm_poweroff(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	struct kbasep_pm_tick_timer_state *stt;
+	int items;
+	u64 gpu_poweroff_time;
+	unsigned int poweroff_shader_ticks, poweroff_gpu_ticks;
+	unsigned long flags;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
+			&poweroff_shader_ticks,
+			&poweroff_gpu_ticks);
+	if (items != 3) {
+		dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
+				"Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	stt = &kbdev->pm.backend.shader_tick_timer;
+	stt->configured_interval = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
+	stt->configured_ticks = poweroff_shader_ticks;
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (poweroff_gpu_ticks != 0)
+		dev_warn(kbdev->dev, "Separate GPU poweroff delay no longer supported.\n");
+
+	return count;
+}
+
+/**
+ * show_pm_poweroff - Show callback for the pm_poweroff sysfs entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current period used for the DVFS sample
+ * timer.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_pm_poweroff(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	struct kbasep_pm_tick_timer_state *stt;
+	ssize_t ret;
+	unsigned long flags;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	stt = &kbdev->pm.backend.shader_tick_timer;
+	ret = scnprintf(buf, PAGE_SIZE, "%llu %u 0\n",
+			ktime_to_ns(stt->configured_interval),
+			stt->configured_ticks);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return ret;
+}
+
+static DEVICE_ATTR(pm_poweroff, S_IRUGO | S_IWUSR, show_pm_poweroff,
+		set_pm_poweroff);
+
+/**
+ * set_reset_timeout - Store callback for the reset_timeout sysfs file.
+ * @dev:   The device with sysfs file is for
+ * @attr:  The attributes of the sysfs file
+ * @buf:   The value written to the sysfs file
+ * @count: The number of bytes written to the sysfs file
+ *
+ * This function is called when the reset_timeout sysfs file is written to. It
+ * checks the data written, and if valid updates the reset timeout.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_reset_timeout(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	int ret;
+	int reset_timeout;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = kstrtoint(buf, 0, &reset_timeout);
+	if (ret || reset_timeout <= 0) {
+		dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
+				"Use format <reset_timeout_ms>\n");
+		return -EINVAL;
+	}
+
+	kbdev->reset_timeout_ms = reset_timeout;
+	dev_dbg(kbdev->dev, "Reset timeout: %dms\n", reset_timeout);
+
+	return count;
+}
+
+/**
+ * show_reset_timeout - Show callback for the reset_timeout sysfs entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the GPU information.
+ *
+ * This function is called to get the current reset timeout.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_reset_timeout(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
+
+	return ret;
+}
+
+static DEVICE_ATTR(reset_timeout, S_IRUGO | S_IWUSR, show_reset_timeout,
+		set_reset_timeout);
+
+
+
+static ssize_t show_mem_pool_size(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
+			kbase_mem_pool_size(&kbdev->mem_pool));
+
+	return ret;
+}
+
+static ssize_t set_mem_pool_size(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	size_t new_size;
+	int err;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	err = kstrtoul(buf, 0, (unsigned long *)&new_size);
+	if (err)
+		return err;
+
+	kbase_mem_pool_trim(&kbdev->mem_pool, new_size);
+
+	return count;
+}
+
+static DEVICE_ATTR(mem_pool_size, S_IRUGO | S_IWUSR, show_mem_pool_size,
+		set_mem_pool_size);
+
+static ssize_t show_mem_pool_max_size(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+	ssize_t ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = scnprintf(buf, PAGE_SIZE, "%zu\n",
+			kbase_mem_pool_max_size(&kbdev->mem_pool));
+
+	return ret;
+}
+
+static ssize_t set_mem_pool_max_size(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	size_t new_max_size;
+	int err;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	err = kstrtoul(buf, 0, (unsigned long *)&new_max_size);
+	if (err)
+		return -EINVAL;
+
+	kbase_mem_pool_set_max_size(&kbdev->mem_pool, new_max_size);
+
+	return count;
+}
+
+static DEVICE_ATTR(mem_pool_max_size, S_IRUGO | S_IWUSR, show_mem_pool_max_size,
+		set_mem_pool_max_size);
+
+/**
+ * show_lp_mem_pool_size - Show size of the large memory pages pool.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the pool size.
+ *
+ * This function is called to get the number of large memory pages which currently populate the kbdev pool.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_lp_mem_pool_size(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	return scnprintf(buf, PAGE_SIZE, "%zu\n", kbase_mem_pool_size(&kbdev->lp_mem_pool));
+}
+
+/**
+ * set_lp_mem_pool_size - Set size of the large memory pages pool.
+ * @dev:   The device this sysfs file is for.
+ * @attr:  The attributes of the sysfs file.
+ * @buf:   The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This function is called to set the number of large memory pages which should populate the kbdev pool.
+ * This may cause existing pages to be removed from the pool, or new pages to be created and then added to the pool.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_lp_mem_pool_size(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	unsigned long new_size;
+	int err;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	err = kstrtoul(buf, 0, &new_size);
+	if (err)
+		return err;
+
+	kbase_mem_pool_trim(&kbdev->lp_mem_pool, new_size);
+
+	return count;
+}
+
+static DEVICE_ATTR(lp_mem_pool_size, S_IRUGO | S_IWUSR, show_lp_mem_pool_size,
+		set_lp_mem_pool_size);
+
+/**
+ * show_lp_mem_pool_max_size - Show maximum size of the large memory pages pool.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the pool size.
+ *
+ * This function is called to get the maximum number of large memory pages that the kbdev pool can possibly contain.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_lp_mem_pool_max_size(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	return scnprintf(buf, PAGE_SIZE, "%zu\n", kbase_mem_pool_max_size(&kbdev->lp_mem_pool));
+}
+
+/**
+ * set_lp_mem_pool_max_size - Set maximum size of the large memory pages pool.
+ * @dev:   The device this sysfs file is for.
+ * @attr:  The attributes of the sysfs file.
+ * @buf:   The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This function is called to set the maximum number of large memory pages that the kbdev pool can possibly contain.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_lp_mem_pool_max_size(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbase_device *kbdev;
+	unsigned long new_max_size;
+	int err;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	err = kstrtoul(buf, 0, &new_max_size);
+	if (err)
+		return -EINVAL;
+
+	kbase_mem_pool_set_max_size(&kbdev->lp_mem_pool, new_max_size);
+
+	return count;
+}
+
+static DEVICE_ATTR(lp_mem_pool_max_size, S_IRUGO | S_IWUSR, show_lp_mem_pool_max_size,
+		set_lp_mem_pool_max_size);
+
+/**
+ * show_js_ctx_scheduling_mode - Show callback for js_ctx_scheduling_mode sysfs
+ *                               entry.
+ * @dev:  The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf:  The output buffer to receive the context scheduling mode information.
+ *
+ * This function is called to get the context scheduling mode being used by JS.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_js_ctx_scheduling_mode(struct device *dev,
+		struct device_attribute *attr, char * const buf)
+{
+	struct kbase_device *kbdev;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->js_ctx_scheduling_mode);
+}
+
+/**
+ * set_js_ctx_scheduling_mode - Set callback for js_ctx_scheduling_mode sysfs
+ *                              entry.
+ * @dev:   The device this sysfs file is for.
+ * @attr:  The attributes of the sysfs file.
+ * @buf:   The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This function is called when the js_ctx_scheduling_mode sysfs file is written
+ * to. It checks the data written, and if valid updates the ctx scheduling mode
+ * being by JS.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_ctx_scheduling_mode(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct kbasep_kctx_list_element *element;
+	u32 new_js_ctx_scheduling_mode;
+	struct kbase_device *kbdev;
+	unsigned long flags;
+	int ret;
+
+	kbdev = to_kbase_device(dev);
+	if (!kbdev)
+		return -ENODEV;
+
+	ret = kstrtouint(buf, 0, &new_js_ctx_scheduling_mode);
+	if (ret || new_js_ctx_scheduling_mode >= KBASE_JS_PRIORITY_MODE_COUNT) {
+		dev_err(kbdev->dev, "Couldn't process js_ctx_scheduling_mode"
+				" write operation.\n"
+				"Use format <js_ctx_scheduling_mode>\n");
+		return -EINVAL;
+	}
+
+	if (new_js_ctx_scheduling_mode == kbdev->js_ctx_scheduling_mode)
+		return count;
+
+	mutex_lock(&kbdev->kctx_list_lock);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* Update the context priority mode */
+	kbdev->js_ctx_scheduling_mode = new_js_ctx_scheduling_mode;
+
+	/* Adjust priority of all the contexts as per the new mode */
+	list_for_each_entry(element, &kbdev->kctx_list, link)
+		kbase_js_update_ctx_priority(element->kctx);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->kctx_list_lock);
+
+	dev_dbg(kbdev->dev, "JS ctx scheduling mode: %u\n", new_js_ctx_scheduling_mode);
+
+	return count;
+}
+
+static DEVICE_ATTR(js_ctx_scheduling_mode, S_IRUGO | S_IWUSR,
+		show_js_ctx_scheduling_mode,
+		set_js_ctx_scheduling_mode);
+#ifdef CONFIG_DEBUG_FS
+
+/* Number of entries in serialize_jobs_settings[] */
+#define NR_SERIALIZE_JOBS_SETTINGS 5
+/* Maximum string length in serialize_jobs_settings[].name */
+#define MAX_SERIALIZE_JOBS_NAME_LEN 16
+
+static struct
+{
+	char *name;
+	u8 setting;
+} serialize_jobs_settings[NR_SERIALIZE_JOBS_SETTINGS] = {
+	{"none", 0},
+	{"intra-slot", KBASE_SERIALIZE_INTRA_SLOT},
+	{"inter-slot", KBASE_SERIALIZE_INTER_SLOT},
+	{"full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT},
+	{"full-reset", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT |
+			KBASE_SERIALIZE_RESET}
+};
+
+/**
+ * kbasep_serialize_jobs_seq_show - Show callback for the serialize_jobs debugfs
+ *                                  file
+ * @sfile: seq_file pointer
+ * @data:  Private callback data
+ *
+ * This function is called to get the contents of the serialize_jobs debugfs
+ * file. This is a list of the available settings with the currently active one
+ * surrounded by square brackets.
+ *
+ * Return: 0 on success, or an error code on error
+ */
+static int kbasep_serialize_jobs_seq_show(struct seq_file *sfile, void *data)
+{
+	struct kbase_device *kbdev = sfile->private;
+	int i;
+
+	CSTD_UNUSED(data);
+
+	for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
+		if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting)
+			seq_printf(sfile, "[%s] ",
+					serialize_jobs_settings[i].name);
+		else
+			seq_printf(sfile, "%s ",
+					serialize_jobs_settings[i].name);
+	}
+
+	seq_puts(sfile, "\n");
+
+	return 0;
+}
+
+/**
+ * kbasep_serialize_jobs_debugfs_write - Store callback for the serialize_jobs
+ *                                       debugfs file.
+ * @file:  File pointer
+ * @ubuf:  User buffer containing data to store
+ * @count: Number of bytes in user buffer
+ * @ppos:  File position
+ *
+ * This function is called when the serialize_jobs debugfs file is written to.
+ * It matches the requested setting against the available settings and if a
+ * matching setting is found updates kbdev->serialize_jobs.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct kbase_device *kbdev = s->private;
+	char buf[MAX_SERIALIZE_JOBS_NAME_LEN];
+	int i;
+	bool valid = false;
+
+	CSTD_UNUSED(ppos);
+
+	count = min_t(size_t, sizeof(buf) - 1, count);
+	if (copy_from_user(buf, ubuf, count))
+		return -EFAULT;
+
+	buf[count] = 0;
+
+	for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
+		if (sysfs_streq(serialize_jobs_settings[i].name, buf)) {
+			kbdev->serialize_jobs =
+					serialize_jobs_settings[i].setting;
+			valid = true;
+			break;
+		}
+	}
+
+	if (!valid) {
+		dev_err(kbdev->dev, "serialize_jobs: invalid setting\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+/**
+ * kbasep_serialize_jobs_debugfs_open - Open callback for the serialize_jobs
+ *                                     debugfs file
+ * @in:   inode pointer
+ * @file: file pointer
+ *
+ * Return: Zero on success, error code on failure
+ */
+static int kbasep_serialize_jobs_debugfs_open(struct inode *in,
+		struct file *file)
+{
+	return single_open(file, kbasep_serialize_jobs_seq_show, in->i_private);
+}
+
+static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
+	.open = kbasep_serialize_jobs_debugfs_open,
+	.read = seq_read,
+	.write = kbasep_serialize_jobs_debugfs_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+#endif /* CONFIG_DEBUG_FS */
+
+static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
+{
+	struct kbase_device *kbdev = container_of(data, struct kbase_device,
+		protected_mode_hwcnt_disable_work);
+	unsigned long flags;
+
+	bool do_disable;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	do_disable = !kbdev->protected_mode_hwcnt_desired &&
+		!kbdev->protected_mode_hwcnt_disabled;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (!do_disable)
+		return;
+
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	do_disable = !kbdev->protected_mode_hwcnt_desired &&
+		!kbdev->protected_mode_hwcnt_disabled;
+
+	if (do_disable) {
+		/* Protected mode state did not change while we were doing the
+		 * disable, so commit the work we just performed and continue
+		 * the state machine.
+		 */
+		kbdev->protected_mode_hwcnt_disabled = true;
+		kbase_backend_slot_update(kbdev);
+	} else {
+		/* Protected mode state was updated while we were doing the
+		 * disable, so we need to undo the disable we just performed.
+		 */
+		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+static int kbasep_protected_mode_init(struct kbase_device *kbdev)
+{
+#ifdef CONFIG_OF
+	struct device_node *protected_node;
+	struct platform_device *pdev;
+	struct protected_mode_device *protected_dev;
+#endif
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+		/* Use native protected ops */
+		kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
+				GFP_KERNEL);
+		if (!kbdev->protected_dev)
+			return -ENOMEM;
+		kbdev->protected_dev->data = kbdev;
+		kbdev->protected_ops = &kbase_native_protected_ops;
+		kbdev->protected_mode_support = true;
+		INIT_WORK(&kbdev->protected_mode_hwcnt_disable_work,
+			kbasep_protected_mode_hwcnt_disable_worker);
+		kbdev->protected_mode_hwcnt_desired = true;
+		kbdev->protected_mode_hwcnt_disabled = false;
+		return 0;
+	}
+
+	kbdev->protected_mode_support = false;
+
+#ifdef CONFIG_OF
+	protected_node = of_parse_phandle(kbdev->dev->of_node,
+			"protected-mode-switcher", 0);
+
+	if (!protected_node)
+		protected_node = of_parse_phandle(kbdev->dev->of_node,
+				"secure-mode-switcher", 0);
+
+	if (!protected_node) {
+		/* If protected_node cannot be looked up then we assume
+		 * protected mode is not supported on this platform. */
+		dev_info(kbdev->dev, "Protected mode not available\n");
+		return 0;
+	}
+
+	pdev = of_find_device_by_node(protected_node);
+	if (!pdev)
+		return -EINVAL;
+
+	protected_dev = platform_get_drvdata(pdev);
+	if (!protected_dev)
+		return -EPROBE_DEFER;
+
+	kbdev->protected_ops = &protected_dev->ops;
+	kbdev->protected_dev = protected_dev;
+
+	if (kbdev->protected_ops) {
+		int err;
+
+		/* Make sure protected mode is disabled on startup */
+		mutex_lock(&kbdev->pm.lock);
+		err = kbdev->protected_ops->protected_mode_disable(
+				kbdev->protected_dev);
+		mutex_unlock(&kbdev->pm.lock);
+
+		/* protected_mode_disable() returns -EINVAL if not supported */
+		kbdev->protected_mode_support = (err != -EINVAL);
+	}
+#endif
+	return 0;
+}
+
+static void kbasep_protected_mode_term(struct kbase_device *kbdev)
+{
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
+		cancel_work_sync(&kbdev->protected_mode_hwcnt_disable_work);
+		kfree(kbdev->protected_dev);
+	}
+}
+
+#ifdef CONFIG_MALI_NO_MALI
+static int kbase_common_reg_map(struct kbase_device *kbdev)
+{
+	return 0;
+}
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+{
+}
+#else /* CONFIG_MALI_NO_MALI */
+static int kbase_common_reg_map(struct kbase_device *kbdev)
+{
+	int err = 0;
+
+	if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
+		dev_err(kbdev->dev, "Register window unavailable\n");
+		err = -EIO;
+		goto out_region;
+	}
+
+	kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
+	if (!kbdev->reg) {
+		dev_err(kbdev->dev, "Can't remap register window\n");
+		err = -EINVAL;
+		goto out_ioremap;
+	}
+
+	return err;
+
+ out_ioremap:
+	release_mem_region(kbdev->reg_start, kbdev->reg_size);
+ out_region:
+	return err;
+}
+
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
+{
+	if (kbdev->reg) {
+		iounmap(kbdev->reg);
+		release_mem_region(kbdev->reg_start, kbdev->reg_size);
+		kbdev->reg = NULL;
+		kbdev->reg_start = 0;
+		kbdev->reg_size = 0;
+	}
+}
+#endif /* CONFIG_MALI_NO_MALI */
+
+static int registers_map(struct kbase_device * const kbdev)
+{
+
+		/* the first memory resource is the physical address of the GPU
+		 * registers */
+		struct platform_device *pdev = to_platform_device(kbdev->dev);
+		struct resource *reg_res;
+		int err;
+
+		reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!reg_res) {
+			dev_err(kbdev->dev, "Invalid register resource\n");
+			return -ENOENT;
+		}
+
+		kbdev->reg_start = reg_res->start;
+		kbdev->reg_size = resource_size(reg_res);
+
+
+		err = kbase_common_reg_map(kbdev);
+		if (err) {
+			dev_err(kbdev->dev, "Failed to map registers\n");
+			return err;
+		}
+
+	return 0;
+}
+
+static void registers_unmap(struct kbase_device *kbdev)
+{
+	kbase_common_reg_unmap(kbdev);
+}
+
+static int power_control_init(struct platform_device *pdev)
+{
+	struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+	int err = 0;
+
+	if (!kbdev)
+		return -ENODEV;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+			&& defined(CONFIG_REGULATOR)
+	kbdev->regulator = regulator_get_optional(kbdev->dev, "mali");
+	if (IS_ERR_OR_NULL(kbdev->regulator)) {
+		err = PTR_ERR(kbdev->regulator);
+		kbdev->regulator = NULL;
+		if (err == -EPROBE_DEFER) {
+			dev_err(&pdev->dev, "Failed to get regulator\n");
+			return err;
+		}
+		dev_info(kbdev->dev,
+			"Continuing without Mali regulator control\n");
+		/* Allow probe to continue without regulator */
+	}
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+
+	kbdev->clock = of_clk_get(kbdev->dev->of_node, 0);
+	if (IS_ERR_OR_NULL(kbdev->clock)) {
+		err = PTR_ERR(kbdev->clock);
+		kbdev->clock = NULL;
+		if (err == -EPROBE_DEFER) {
+			dev_err(&pdev->dev, "Failed to get clock\n");
+			goto fail;
+		}
+		dev_info(kbdev->dev, "Continuing without Mali clock control\n");
+		/* Allow probe to continue without clock. */
+	} else {
+		err = clk_prepare_enable(kbdev->clock);
+		if (err) {
+			dev_err(kbdev->dev,
+				"Failed to prepare and enable clock (%d)\n",
+				err);
+			goto fail;
+		}
+	}
+
+#if defined(CONFIG_OF) && defined(CONFIG_PM_OPP)
+	/* Register the OPPs if they are available in device tree */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) \
+	|| defined(LSK_OPPV2_BACKPORT)
+	err = dev_pm_opp_of_add_table(kbdev->dev);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	err = of_init_opp_table(kbdev->dev);
+#else
+	err = 0;
+#endif /* LINUX_VERSION_CODE */
+	if (err)
+		dev_dbg(kbdev->dev, "OPP table not found\n");
+#endif /* CONFIG_OF && CONFIG_PM_OPP */
+
+	return 0;
+
+fail:
+
+if (kbdev->clock != NULL) {
+	clk_put(kbdev->clock);
+	kbdev->clock = NULL;
+}
+
+#ifdef CONFIG_REGULATOR
+	if (NULL != kbdev->regulator) {
+		regulator_put(kbdev->regulator);
+		kbdev->regulator = NULL;
+	}
+#endif
+
+	return err;
+}
+
+static void power_control_term(struct kbase_device *kbdev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \
+		defined(LSK_OPPV2_BACKPORT)
+	dev_pm_opp_of_remove_table(kbdev->dev);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+	of_free_opp_table(kbdev->dev);
+#endif
+
+	if (kbdev->clock) {
+		clk_disable_unprepare(kbdev->clock);
+		clk_put(kbdev->clock);
+		kbdev->clock = NULL;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+			&& defined(CONFIG_REGULATOR)
+	if (kbdev->regulator) {
+		regulator_put(kbdev->regulator);
+		kbdev->regulator = NULL;
+	}
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+}
+
+#ifdef MALI_KBASE_BUILD
+#ifdef CONFIG_DEBUG_FS
+
+#include <mali_kbase_hwaccess_jm.h>
+
+static void trigger_quirks_reload(struct kbase_device *kbdev)
+{
+	kbase_pm_context_active(kbdev);
+	if (kbase_prepare_to_reset_gpu(kbdev))
+		kbase_reset_gpu(kbdev);
+	kbase_pm_context_idle(kbdev);
+}
+
+#define MAKE_QUIRK_ACCESSORS(type) \
+static int type##_quirks_set(void *data, u64 val) \
+{ \
+	struct kbase_device *kbdev; \
+	kbdev = (struct kbase_device *)data; \
+	kbdev->hw_quirks_##type = (u32)val; \
+	trigger_quirks_reload(kbdev); \
+	return 0;\
+} \
+\
+static int type##_quirks_get(void *data, u64 *val) \
+{ \
+	struct kbase_device *kbdev;\
+	kbdev = (struct kbase_device *)data;\
+	*val = kbdev->hw_quirks_##type;\
+	return 0;\
+} \
+DEFINE_SIMPLE_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get,\
+		type##_quirks_set, "%llu\n")
+
+MAKE_QUIRK_ACCESSORS(sc);
+MAKE_QUIRK_ACCESSORS(tiler);
+MAKE_QUIRK_ACCESSORS(mmu);
+MAKE_QUIRK_ACCESSORS(jm);
+
+
+/**
+ * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
+ * @file: File object to read is for
+ * @buf:  User buffer to populate with data
+ * @len:  Length of user buffer
+ * @ppos: Offset within file object
+ *
+ * Retrieves the current status of protected debug mode
+ * (0 = disabled, 1 = enabled)
+ *
+ * Return: Number of bytes added to user buffer
+ */
+static ssize_t debugfs_protected_debug_mode_read(struct file *file,
+				char __user *buf, size_t len, loff_t *ppos)
+{
+	struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
+	u32 gpu_status;
+	ssize_t ret_val;
+
+	kbase_pm_context_active(kbdev);
+	gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS));
+	kbase_pm_context_idle(kbdev);
+
+	if (gpu_status & GPU_DBGEN)
+		ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
+	else
+		ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
+
+	return ret_val;
+}
+
+/*
+ * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops
+ *
+ * Contains the file operations for the "protected_debug_mode" debugfs file
+ */
+static const struct file_operations fops_protected_debug_mode = {
+	.open = simple_open,
+	.read = debugfs_protected_debug_mode_read,
+	.llseek = default_llseek,
+};
+
+static int kbase_device_debugfs_init(struct kbase_device *kbdev)
+{
+	struct dentry *debugfs_ctx_defaults_directory;
+	int err;
+
+	kbdev->mali_debugfs_directory = debugfs_create_dir(kbdev->devname,
+			NULL);
+	if (!kbdev->mali_debugfs_directory) {
+		dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	kbdev->debugfs_ctx_directory = debugfs_create_dir("ctx",
+			kbdev->mali_debugfs_directory);
+	if (!kbdev->debugfs_ctx_directory) {
+		dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	debugfs_ctx_defaults_directory = debugfs_create_dir("defaults",
+			kbdev->debugfs_ctx_directory);
+	if (!debugfs_ctx_defaults_directory) {
+		dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+#if !MALI_CUSTOMER_RELEASE
+	kbasep_regs_dump_debugfs_init(kbdev);
+#endif /* !MALI_CUSTOMER_RELEASE */
+	kbasep_regs_history_debugfs_init(kbdev);
+
+	kbase_debug_job_fault_debugfs_init(kbdev);
+	kbasep_gpu_memory_debugfs_init(kbdev);
+	kbase_as_fault_debugfs_init(kbdev);
+	/* fops_* variables created by invocations of macro
+	 * MAKE_QUIRK_ACCESSORS() above. */
+	debugfs_create_file("quirks_sc", 0644,
+			kbdev->mali_debugfs_directory, kbdev,
+			&fops_sc_quirks);
+	debugfs_create_file("quirks_tiler", 0644,
+			kbdev->mali_debugfs_directory, kbdev,
+			&fops_tiler_quirks);
+	debugfs_create_file("quirks_mmu", 0644,
+			kbdev->mali_debugfs_directory, kbdev,
+			&fops_mmu_quirks);
+	debugfs_create_file("quirks_jm", 0644,
+			kbdev->mali_debugfs_directory, kbdev,
+			&fops_jm_quirks);
+
+	debugfs_create_bool("infinite_cache", 0644,
+			debugfs_ctx_defaults_directory,
+			&kbdev->infinite_cache_active_default);
+
+	debugfs_create_size_t("mem_pool_max_size", 0644,
+			debugfs_ctx_defaults_directory,
+			&kbdev->mem_pool_max_size_default);
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
+		debugfs_create_file("protected_debug_mode", S_IRUGO,
+				kbdev->mali_debugfs_directory, kbdev,
+				&fops_protected_debug_mode);
+	}
+
+#if KBASE_TRACE_ENABLE
+	kbasep_trace_debugfs_init(kbdev);
+#endif /* KBASE_TRACE_ENABLE */
+
+#ifdef CONFIG_MALI_DEVFREQ
+#ifdef CONFIG_DEVFREQ_THERMAL
+	if (kbdev->inited_subsys & inited_devfreq)
+		kbase_ipa_debugfs_init(kbdev);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* CONFIG_MALI_DEVFREQ */
+
+#ifdef CONFIG_DEBUG_FS
+	debugfs_create_file("serialize_jobs", S_IRUGO | S_IWUSR,
+			kbdev->mali_debugfs_directory, kbdev,
+			&kbasep_serialize_jobs_debugfs_fops);
+#endif /* CONFIG_DEBUG_FS */
+
+	return 0;
+
+out:
+	debugfs_remove_recursive(kbdev->mali_debugfs_directory);
+	return err;
+}
+
+static void kbase_device_debugfs_term(struct kbase_device *kbdev)
+{
+	debugfs_remove_recursive(kbdev->mali_debugfs_directory);
+}
+
+#else /* CONFIG_DEBUG_FS */
+static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
+{
+	return 0;
+}
+
+static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
+#endif /* CONFIG_DEBUG_FS */
+#endif /* MALI_KBASE_BUILD */
+
+static void kbase_device_coherency_init(struct kbase_device *kbdev,
+		unsigned prod_id)
+{
+#ifdef CONFIG_OF
+	u32 supported_coherency_bitmap =
+		kbdev->gpu_props.props.raw_props.coherency_mode;
+	const void *coherency_override_dts;
+	u32 override_coherency;
+
+	/* Only for tMIx :
+	 * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
+	 * documented for tMIx so force correct value here.
+	 */
+	if (GPU_ID_IS_NEW_FORMAT(prod_id) &&
+		   (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
+				   GPU_ID2_PRODUCT_TMIX))
+		if (supported_coherency_bitmap ==
+				COHERENCY_FEATURE_BIT(COHERENCY_ACE))
+			supported_coherency_bitmap |=
+				COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
+
+#endif /* CONFIG_OF */
+
+	kbdev->system_coherency = COHERENCY_NONE;
+
+	/* device tree may override the coherency */
+#ifdef CONFIG_OF
+	coherency_override_dts = of_get_property(kbdev->dev->of_node,
+						"system-coherency",
+						NULL);
+	if (coherency_override_dts) {
+
+		override_coherency = be32_to_cpup(coherency_override_dts);
+
+		if ((override_coherency <= COHERENCY_NONE) &&
+			(supported_coherency_bitmap &
+			 COHERENCY_FEATURE_BIT(override_coherency))) {
+
+			kbdev->system_coherency = override_coherency;
+
+			dev_info(kbdev->dev,
+				"Using coherency mode %u set from dtb",
+				override_coherency);
+		} else
+			dev_warn(kbdev->dev,
+				"Ignoring unsupported coherency mode %u set from dtb",
+				override_coherency);
+	}
+
+	if (kbdev->dev->dma_coherent) {
+		kbdev->dev->dma_coherent = 0;
+		kbdev->system_coherency = COHERENCY_ACE_LITE;
+		dev_warn(kbdev->dev,
+			"Disable DMA coherency and set system coherency to ACE_LITE");
+	}
+#endif /* CONFIG_OF */
+
+	kbdev->gpu_props.props.raw_props.coherency_mode =
+		kbdev->system_coherency;
+}
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+
+/* Callback used by the kbase bus logger client, to initiate a GPU reset
+ * when the bus log is restarted.  GPU reset is used as reference point
+ * in HW bus log analyses.
+ */
+static void kbase_logging_started_cb(void *data)
+{
+	struct kbase_device *kbdev = (struct kbase_device *)data;
+
+	if (kbase_prepare_to_reset_gpu(kbdev))
+		kbase_reset_gpu(kbdev);
+	dev_info(kbdev->dev, "KBASE - Bus logger restarted\n");
+}
+#endif
+
+static struct attribute *kbase_attrs[] = {
+#ifdef CONFIG_MALI_DEBUG
+	&dev_attr_debug_command.attr,
+	&dev_attr_js_softstop_always.attr,
+#endif
+#if !MALI_CUSTOMER_RELEASE
+	&dev_attr_force_replay.attr,
+#endif
+	&dev_attr_js_timeouts.attr,
+	&dev_attr_soft_job_timeout.attr,
+	&dev_attr_gpuinfo.attr,
+	&dev_attr_dvfs_period.attr,
+	&dev_attr_pm_poweroff.attr,
+	&dev_attr_reset_timeout.attr,
+	&dev_attr_js_scheduling_period.attr,
+	&dev_attr_power_policy.attr,
+	&dev_attr_core_mask.attr,
+	&dev_attr_mem_pool_size.attr,
+	&dev_attr_mem_pool_max_size.attr,
+	&dev_attr_lp_mem_pool_size.attr,
+	&dev_attr_lp_mem_pool_max_size.attr,
+	&dev_attr_js_ctx_scheduling_mode.attr,
+	NULL
+};
+
+static const struct attribute_group kbase_attr_group = {
+	.attrs = kbase_attrs,
+};
+
+static int kbase_platform_device_remove(struct platform_device *pdev)
+{
+	struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
+	const struct list_head *dev_list;
+
+	if (!kbdev)
+		return -ENODEV;
+
+	kfree(kbdev->gpu_props.prop_buffer);
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+	if (kbdev->inited_subsys & inited_buslogger) {
+		bl_core_client_unregister(kbdev->buslogger);
+		kbdev->inited_subsys &= ~inited_buslogger;
+	}
+#endif
+
+
+	if (kbdev->inited_subsys & inited_dev_list) {
+		dev_list = kbase_dev_list_get();
+		list_del(&kbdev->entry);
+		kbase_dev_list_put(dev_list);
+		kbdev->inited_subsys &= ~inited_dev_list;
+	}
+
+	if (kbdev->inited_subsys & inited_misc_register) {
+		misc_deregister(&kbdev->mdev);
+		kbdev->inited_subsys &= ~inited_misc_register;
+	}
+
+	if (kbdev->inited_subsys & inited_sysfs_group) {
+		sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
+		kbdev->inited_subsys &= ~inited_sysfs_group;
+	}
+
+	if (kbdev->inited_subsys & inited_get_device) {
+		put_device(kbdev->dev);
+		kbdev->inited_subsys &= ~inited_get_device;
+	}
+
+#ifdef MALI_KBASE_BUILD
+	if (kbdev->inited_subsys & inited_debugfs) {
+		kbase_device_debugfs_term(kbdev);
+		kbdev->inited_subsys &= ~inited_debugfs;
+	}
+#endif
+
+	if (kbdev->inited_subsys & inited_job_fault) {
+		kbase_debug_job_fault_dev_term(kbdev);
+		kbdev->inited_subsys &= ~inited_job_fault;
+	}
+
+#ifdef CONFIG_MALI_DEVFREQ
+	if (kbdev->inited_subsys & inited_devfreq) {
+		kbase_devfreq_term(kbdev);
+		kbdev->inited_subsys &= ~inited_devfreq;
+	}
+#endif
+
+
+	if (kbdev->inited_subsys & inited_backend_late) {
+		kbase_backend_late_term(kbdev);
+		kbdev->inited_subsys &= ~inited_backend_late;
+	}
+
+	if (kbdev->inited_subsys & inited_vinstr) {
+		kbase_vinstr_term(kbdev->vinstr_ctx);
+		kbdev->inited_subsys &= ~inited_vinstr;
+	}
+
+	if (kbdev->inited_subsys & inited_hwcnt_gpu_virt) {
+		kbase_hwcnt_virtualizer_term(kbdev->hwcnt_gpu_virt);
+		kbdev->inited_subsys &= ~inited_hwcnt_gpu_virt;
+	}
+
+	if (kbdev->inited_subsys & inited_hwcnt_gpu_ctx) {
+		kbase_hwcnt_context_term(kbdev->hwcnt_gpu_ctx);
+		kbdev->inited_subsys &= ~inited_hwcnt_gpu_ctx;
+	}
+
+	if (kbdev->inited_subsys & inited_hwcnt_gpu_iface) {
+		kbase_hwcnt_backend_gpu_destroy(&kbdev->hwcnt_gpu_iface);
+		kbdev->inited_subsys &= ~inited_hwcnt_gpu_iface;
+	}
+
+	if (kbdev->inited_subsys & inited_tlstream) {
+		kbase_tlstream_term();
+		kbdev->inited_subsys &= ~inited_tlstream;
+	}
+
+	/* Bring job and mem sys to a halt before we continue termination */
+
+	if (kbdev->inited_subsys & inited_js)
+		kbasep_js_devdata_halt(kbdev);
+
+	if (kbdev->inited_subsys & inited_mem)
+		kbase_mem_halt(kbdev);
+
+	if (kbdev->inited_subsys & inited_protected) {
+		kbasep_protected_mode_term(kbdev);
+		kbdev->inited_subsys &= ~inited_protected;
+	}
+
+	if (kbdev->inited_subsys & inited_js) {
+		kbasep_js_devdata_term(kbdev);
+		kbdev->inited_subsys &= ~inited_js;
+	}
+
+	if (kbdev->inited_subsys & inited_mem) {
+		kbase_mem_term(kbdev);
+		kbdev->inited_subsys &= ~inited_mem;
+	}
+
+	if (kbdev->inited_subsys & inited_ctx_sched) {
+		kbase_ctx_sched_term(kbdev);
+		kbdev->inited_subsys &= ~inited_ctx_sched;
+	}
+
+	if (kbdev->inited_subsys & inited_device) {
+		kbase_device_term(kbdev);
+		kbdev->inited_subsys &= ~inited_device;
+	}
+
+	if (kbdev->inited_subsys & inited_backend_early) {
+		kbase_backend_early_term(kbdev);
+		kbdev->inited_subsys &= ~inited_backend_early;
+	}
+
+	if (kbdev->inited_subsys & inited_io_history) {
+		kbase_io_history_term(&kbdev->io_history);
+		kbdev->inited_subsys &= ~inited_io_history;
+	}
+
+	if (kbdev->inited_subsys & inited_power_control) {
+		power_control_term(kbdev);
+		kbdev->inited_subsys &= ~inited_power_control;
+	}
+
+	if (kbdev->inited_subsys & inited_registers_map) {
+		registers_unmap(kbdev);
+		kbdev->inited_subsys &= ~inited_registers_map;
+	}
+
+#ifdef CONFIG_MALI_NO_MALI
+	if (kbdev->inited_subsys & inited_gpu_device) {
+		gpu_device_destroy(kbdev);
+		kbdev->inited_subsys &= ~inited_gpu_device;
+	}
+#endif /* CONFIG_MALI_NO_MALI */
+
+	if (kbdev->inited_subsys != 0)
+		dev_err(kbdev->dev, "Missing sub system termination\n");
+
+	kbase_device_free(kbdev);
+
+	return 0;
+}
+
+
+/* Number of register accesses for the buffer that we allocate during
+ * initialization time. The buffer size can be changed later via debugfs. */
+#define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
+
+static int kbase_platform_device_probe(struct platform_device *pdev)
+{
+	struct kbase_device *kbdev;
+	struct mali_base_gpu_core_props *core_props;
+	u32 gpu_id;
+	unsigned prod_id;
+	const struct list_head *dev_list;
+	int err = 0;
+
+	kbdev = kbase_device_alloc();
+	if (!kbdev) {
+		dev_err(&pdev->dev, "Allocate device failed\n");
+		kbase_platform_device_remove(pdev);
+		return -ENOMEM;
+	}
+
+	kbdev->dev = &pdev->dev;
+	dev_set_drvdata(kbdev->dev, kbdev);
+
+#ifdef CONFIG_MALI_NO_MALI
+	err = gpu_device_create(kbdev);
+	if (err) {
+		dev_err(&pdev->dev, "Dummy model initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_gpu_device;
+#endif /* CONFIG_MALI_NO_MALI */
+
+	err = assign_irqs(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "IRQ search failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+
+	err = registers_map(kbdev);
+	if (err) {
+		dev_err(&pdev->dev, "Register map failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_registers_map;
+
+	err = power_control_init(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Power control initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_power_control;
+
+	err = kbase_io_history_init(&kbdev->io_history,
+			KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
+	if (err) {
+		dev_err(&pdev->dev, "Register access history initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return -ENOMEM;
+	}
+	kbdev->inited_subsys |= inited_io_history;
+
+	err = kbase_backend_early_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Early backend initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_backend_early;
+
+	scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
+			kbase_dev_nr);
+
+	kbase_disjoint_init(kbdev);
+
+	/* obtain max configured gpu frequency, if devfreq is enabled then
+	 * this will be overridden by the highest operating point found
+	 */
+	core_props = &(kbdev->gpu_props.props.core_props);
+#ifdef GPU_FREQ_KHZ_MAX
+	core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
+#else
+	core_props->gpu_freq_khz_max = DEFAULT_GPU_FREQ_KHZ_MAX;
+#endif
+
+	err = kbase_device_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Device initialization failed (%d)\n", err);
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_device;
+
+	err = kbase_ctx_sched_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Context scheduler initialization failed (%d)\n",
+				err);
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_ctx_sched;
+
+	err = kbase_mem_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Memory subsystem initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_mem;
+
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
+	prod_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+	kbase_device_coherency_init(kbdev, prod_id);
+
+	err = kbasep_protected_mode_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Protected mode subsystem initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_protected;
+
+	dev_list = kbase_dev_list_get();
+	list_add(&kbdev->entry, &kbase_dev_list);
+	kbase_dev_list_put(dev_list);
+	kbdev->inited_subsys |= inited_dev_list;
+
+	err = kbasep_js_devdata_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Job JS devdata initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_js;
+
+	err = kbase_tlstream_init();
+	if (err) {
+		dev_err(kbdev->dev, "Timeline stream initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_tlstream;
+
+	/* Initialize the kctx list. This is used by vinstr. */
+	mutex_init(&kbdev->kctx_list_lock);
+	INIT_LIST_HEAD(&kbdev->kctx_list);
+
+	err = kbase_hwcnt_backend_gpu_create(kbdev, &kbdev->hwcnt_gpu_iface);
+	if (err) {
+		dev_err(kbdev->dev, "GPU hwcnt backend creation failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_hwcnt_gpu_iface;
+
+	err = kbase_hwcnt_context_init(&kbdev->hwcnt_gpu_iface,
+		&kbdev->hwcnt_gpu_ctx);
+	if (err) {
+		dev_err(kbdev->dev,
+			"GPU hwcnt context initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_hwcnt_gpu_ctx;
+
+	err = kbase_hwcnt_virtualizer_init(
+		kbdev->hwcnt_gpu_ctx, &kbdev->hwcnt_gpu_virt);
+	if (err) {
+		dev_err(kbdev->dev,
+			"GPU hwcnt virtualizer initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_hwcnt_gpu_virt;
+
+	err = kbase_vinstr_init(kbdev->hwcnt_gpu_virt, &kbdev->vinstr_ctx);
+	if (err) {
+		dev_err(kbdev->dev,
+			"Virtual instrumentation initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return -EINVAL;
+	}
+	kbdev->inited_subsys |= inited_vinstr;
+
+	err = kbase_backend_late_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Late backend initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_backend_late;
+
+
+
+#ifdef CONFIG_MALI_DEVFREQ
+	/* Devfreq uses hardware counters, so must be initialized after it. */
+	err = kbase_devfreq_init(kbdev);
+	if (!err)
+		kbdev->inited_subsys |= inited_devfreq;
+	else
+		dev_err(kbdev->dev, "Continuing without devfreq\n");
+#endif /* CONFIG_MALI_DEVFREQ */
+
+#ifdef MALI_KBASE_BUILD
+	err = kbase_debug_job_fault_dev_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "Job fault debug initialization failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_job_fault;
+
+	err = kbase_device_debugfs_init(kbdev);
+	if (err) {
+		dev_err(kbdev->dev, "DebugFS initialization failed");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_debugfs;
+
+	kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
+	kbdev->mdev.name = kbdev->devname;
+	kbdev->mdev.fops = &kbase_fops;
+	kbdev->mdev.parent = get_device(kbdev->dev);
+	kbdev->mdev.mode = 0666;
+	kbdev->inited_subsys |= inited_get_device;
+
+	/* This needs to happen before registering the device with misc_register(),
+	 * otherwise it causes a race condition between registering the device and a
+	 * uevent event being generated for userspace, causing udev rules to run
+	 * which might expect certain sysfs attributes present. As a result of the
+	 * race condition we avoid, some Mali sysfs entries may have appeared to
+	 * udev to not exist.
+
+	 * For more information, see
+	 * https://www.kernel.org/doc/Documentation/driver-model/device.txt, the
+	 * paragraph that starts with "Word of warning", currently the second-last
+	 * paragraph.
+	 */
+	err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
+	if (err) {
+		dev_err(&pdev->dev, "SysFS group creation failed\n");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_sysfs_group;
+
+	err = misc_register(&kbdev->mdev);
+	if (err) {
+		dev_err(kbdev->dev, "Misc device registration failed for %s\n",
+			kbdev->devname);
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+	kbdev->inited_subsys |= inited_misc_register;
+
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+	err = bl_core_client_register(kbdev->devname,
+						kbase_logging_started_cb,
+						kbdev, &kbdev->buslogger,
+						THIS_MODULE, NULL);
+	if (err == 0) {
+		kbdev->inited_subsys |= inited_buslogger;
+		bl_core_set_threshold(kbdev->buslogger, 1024*1024*1024);
+	} else {
+		dev_warn(kbdev->dev, "Bus log client registration failed\n");
+		err = 0;
+	}
+#endif
+
+	err = kbase_gpuprops_populate_user_buffer(kbdev);
+	if (err) {
+		dev_err(&pdev->dev, "GPU property population failed");
+		kbase_platform_device_remove(pdev);
+		return err;
+	}
+
+	dev_info(kbdev->dev,
+			"Probed as %s\n", dev_name(kbdev->mdev.this_device));
+
+	kbase_dev_nr++;
+#endif /* MALI_KBASE_BUILD */
+
+	return err;
+}
+
+#undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE
+
+/**
+ * kbase_device_suspend - Suspend callback from the OS.
+ *
+ * This is called by Linux when the device should suspend.
+ *
+ * @dev:  The device to suspend
+ *
+ * Return: A standard Linux error code
+ */
+static int kbase_device_suspend(struct device *dev)
+{
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+		(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	if (kbdev->inited_subsys & inited_devfreq)
+		devfreq_suspend_device(kbdev->devfreq);
+#endif
+
+	kbase_pm_suspend(kbdev);
+	return 0;
+}
+
+/**
+ * kbase_device_resume - Resume callback from the OS.
+ *
+ * This is called by Linux when the device should resume from suspension.
+ *
+ * @dev:  The device to resume
+ *
+ * Return: A standard Linux error code
+ */
+static int kbase_device_resume(struct device *dev)
+{
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	kbase_pm_resume(kbdev);
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+		(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	if (kbdev->inited_subsys & inited_devfreq)
+		devfreq_resume_device(kbdev->devfreq);
+#endif
+	return 0;
+}
+
+/**
+ * kbase_device_runtime_suspend - Runtime suspend callback from the OS.
+ *
+ * This is called by Linux when the device should prepare for a condition in
+ * which it will not be able to communicate with the CPU(s) and RAM due to
+ * power management.
+ *
+ * @dev:  The device to suspend
+ *
+ * Return: A standard Linux error code
+ */
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_suspend(struct device *dev)
+{
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+		(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	if (kbdev->inited_subsys & inited_devfreq)
+		devfreq_suspend_device(kbdev->devfreq);
+#endif
+
+	if (kbdev->pm.backend.callback_power_runtime_off) {
+		kbdev->pm.backend.callback_power_runtime_off(kbdev);
+		dev_dbg(dev, "runtime suspend\n");
+	}
+	return 0;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+/**
+ * kbase_device_runtime_resume - Runtime resume callback from the OS.
+ *
+ * This is called by Linux when the device should go into a fully active state.
+ *
+ * @dev:  The device to suspend
+ *
+ * Return: A standard Linux error code
+ */
+
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_resume(struct device *dev)
+{
+	int ret = 0;
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	if (kbdev->pm.backend.callback_power_runtime_on) {
+		ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
+		dev_dbg(dev, "runtime resume\n");
+	}
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+		(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	if (kbdev->inited_subsys & inited_devfreq)
+		devfreq_resume_device(kbdev->devfreq);
+#endif
+
+	return ret;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+
+#ifdef KBASE_PM_RUNTIME
+/**
+ * kbase_device_runtime_idle - Runtime idle callback from the OS.
+ * @dev: The device to suspend
+ *
+ * This is called by Linux when the device appears to be inactive and it might
+ * be placed into a low power state.
+ *
+ * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
+ * otherwise a standard Linux error code
+ */
+static int kbase_device_runtime_idle(struct device *dev)
+{
+	struct kbase_device *kbdev = to_kbase_device(dev);
+
+	if (!kbdev)
+		return -ENODEV;
+
+	/* Use platform specific implementation if it exists. */
+	if (kbdev->pm.backend.callback_power_runtime_idle)
+		return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
+
+	return 0;
+}
+#endif /* KBASE_PM_RUNTIME */
+
+/* The power management operations for the platform driver.
+ */
+static const struct dev_pm_ops kbase_pm_ops = {
+	.suspend = kbase_device_suspend,
+	.resume = kbase_device_resume,
+#ifdef KBASE_PM_RUNTIME
+	.runtime_suspend = kbase_device_runtime_suspend,
+	.runtime_resume = kbase_device_runtime_resume,
+	.runtime_idle = kbase_device_runtime_idle,
+#endif /* KBASE_PM_RUNTIME */
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id kbase_dt_ids[] = {
+	{ .compatible = "arm,malit6xx" },
+	{ .compatible = "arm,mali-bifrost" },
+	{ .compatible = "arm,mali-midgard" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, kbase_dt_ids);
+#endif
+
+static struct platform_driver kbase_platform_driver = {
+	.probe = kbase_platform_device_probe,
+	.remove = kbase_platform_device_remove,
+	.driver = {
+		   .name = kbase_drv_name,
+		   .owner = THIS_MODULE,
+		   .pm = &kbase_pm_ops,
+		   .of_match_table = of_match_ptr(kbase_dt_ids),
+	},
+};
+
+/*
+ * The driver will not provide a shortcut to create the Mali platform device
+ * anymore when using Device Tree.
+ */
+#ifdef CONFIG_OF
+module_platform_driver(kbase_platform_driver);
+#else
+
+static int __init kbase_driver_init(void)
+{
+	int ret;
+
+	ret = kbase_platform_register();
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&kbase_platform_driver);
+
+	if (ret)
+		kbase_platform_unregister();
+
+	return ret;
+}
+
+static void __exit kbase_driver_exit(void)
+{
+	platform_driver_unregister(&kbase_platform_driver);
+	kbase_platform_unregister();
+}
+
+module_init(kbase_driver_init);
+module_exit(kbase_driver_exit);
+
+#endif /* CONFIG_OF */
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
+		__stringify(BASE_UK_VERSION_MAJOR) "." \
+		__stringify(BASE_UK_VERSION_MINOR) ")");
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
+#define CREATE_TRACE_POINTS
+#endif
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+/* Create the trace points (otherwise we just get code to call a tracepoint) */
+#include "mali_linux_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_on);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_power_off);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_in_use);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_mmu_as_released);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
+
+void kbase_trace_mali_pm_status(u32 event, u64 value)
+{
+	trace_mali_pm_status(event, value);
+}
+
+void kbase_trace_mali_pm_power_off(u32 event, u64 value)
+{
+	trace_mali_pm_power_off(event, value);
+}
+
+void kbase_trace_mali_pm_power_on(u32 event, u64 value)
+{
+	trace_mali_pm_power_on(event, value);
+}
+
+void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
+{
+	trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
+}
+
+void kbase_trace_mali_page_fault_insert_pages(int event, u32 value)
+{
+	trace_mali_page_fault_insert_pages(event, value);
+}
+
+void kbase_trace_mali_mmu_as_in_use(int event)
+{
+	trace_mali_mmu_as_in_use(event);
+}
+
+void kbase_trace_mali_mmu_as_released(int event)
+{
+	trace_mali_mmu_as_released(event);
+}
+
+void kbase_trace_mali_total_alloc_pages_change(long long int event)
+{
+	trace_mali_total_alloc_pages_change(event);
+}
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+#ifdef CONFIG_MALI_SYSTEM_TRACE
+#include "mali_linux_kbase_trace.h"
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c
new file mode 100644
index 0000000..bda0560
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c
@@ -0,0 +1,210 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+
+#include "mali_kbase_ctx_sched.h"
+
+int kbase_ctx_sched_init(struct kbase_device *kbdev)
+{
+	int as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
+
+	/* These two must be recalculated if nr_hw_address_spaces changes
+	 * (e.g. for HW workarounds) */
+	kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
+		bool use_workaround;
+
+		use_workaround = DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE;
+		if (use_workaround) {
+			dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
+			kbdev->nr_user_address_spaces = 1;
+		}
+	}
+
+	kbdev->as_free = as_present; /* All ASs initially free */
+
+	memset(kbdev->as_to_kctx, 0, sizeof(kbdev->as_to_kctx));
+
+	return 0;
+}
+
+void kbase_ctx_sched_term(struct kbase_device *kbdev)
+{
+	s8 i;
+
+	/* Sanity checks */
+	for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
+		WARN_ON(kbdev->as_to_kctx[i] != NULL);
+		WARN_ON(!(kbdev->as_free & (1u << i)));
+	}
+}
+
+/* kbasep_ctx_sched_find_as_for_ctx - Find a free address space
+ *
+ * @kbdev: The context for which to find a free address space
+ *
+ * Return: A valid AS if successful, otherwise KBASEP_AS_NR_INVALID
+ *
+ * This function returns an address space available for use. It would prefer
+ * returning an AS that has been previously assigned to the context to
+ * avoid having to reprogram the MMU.
+ */
+static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+	int free_as;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* First check if the previously assigned AS is available */
+	if ((kctx->as_nr != KBASEP_AS_NR_INVALID) &&
+			(kbdev->as_free & (1u << kctx->as_nr)))
+		return kctx->as_nr;
+
+	/* The previously assigned AS was taken, we'll be returning any free
+	 * AS at this point.
+	 */
+	free_as = ffs(kbdev->as_free) - 1;
+	if (free_as >= 0 && free_as < kbdev->nr_hw_address_spaces)
+		return free_as;
+
+	return KBASEP_AS_NR_INVALID;
+}
+
+int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(!kbdev->pm.backend.gpu_powered);
+
+	if (atomic_inc_return(&kctx->refcount) == 1) {
+		int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx);
+
+		if (free_as != KBASEP_AS_NR_INVALID) {
+			kbdev->as_free &= ~(1u << free_as);
+			/* Only program the MMU if the context has not been
+			 * assigned the same address space before.
+			 */
+			if (free_as != kctx->as_nr) {
+				struct kbase_context *const prev_kctx =
+					kbdev->as_to_kctx[free_as];
+
+				if (prev_kctx) {
+					WARN_ON(atomic_read(&prev_kctx->refcount) != 0);
+					kbase_mmu_disable(prev_kctx);
+					prev_kctx->as_nr = KBASEP_AS_NR_INVALID;
+				}
+
+				kctx->as_nr = free_as;
+				kbdev->as_to_kctx[free_as] = kctx;
+				kbase_mmu_update(kbdev, &kctx->mmu,
+					kctx->as_nr);
+			}
+		} else {
+			atomic_dec(&kctx->refcount);
+
+			/* Failed to find an available address space, we must
+			 * be returning an error at this point.
+			 */
+			WARN_ON(kctx->as_nr != KBASEP_AS_NR_INVALID);
+		}
+	}
+
+	return kctx->as_nr;
+}
+
+void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	WARN_ON(atomic_read(&kctx->refcount) == 0);
+	WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID);
+	WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx);
+
+	atomic_inc(&kctx->refcount);
+}
+
+void kbase_ctx_sched_release_ctx(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (atomic_dec_return(&kctx->refcount) == 0)
+		kbdev->as_free |= (1u << kctx->as_nr);
+}
+
+void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx)
+{
+	struct kbase_device *const kbdev = kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(atomic_read(&kctx->refcount) != 0);
+
+	if (kctx->as_nr != KBASEP_AS_NR_INVALID) {
+		if (kbdev->pm.backend.gpu_powered)
+			kbase_mmu_disable(kctx);
+
+		kbdev->as_to_kctx[kctx->as_nr] = NULL;
+		kctx->as_nr = KBASEP_AS_NR_INVALID;
+	}
+}
+
+void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev)
+{
+	s8 i;
+
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(!kbdev->pm.backend.gpu_powered);
+
+	for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
+		struct kbase_context *kctx;
+
+		kctx = kbdev->as_to_kctx[i];
+		if (kctx) {
+			if (atomic_read(&kctx->refcount)) {
+				WARN_ON(kctx->as_nr != i);
+
+				kbase_mmu_update(kbdev, &kctx->mmu,
+					kctx->as_nr);
+			} else {
+				/* This context might have been assigned an
+				 * AS before, clear it.
+				 */
+				kbdev->as_to_kctx[kctx->as_nr] = NULL;
+				kctx->as_nr = KBASEP_AS_NR_INVALID;
+			}
+		} else {
+			kbase_mmu_disable_as(kbdev, i);
+		}
+	}
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h
new file mode 100644
index 0000000..ab57a0d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h
@@ -0,0 +1,135 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_CTX_SCHED_H_
+#define _KBASE_CTX_SCHED_H_
+
+#include <mali_kbase.h>
+
+/**
+ * The Context Scheduler manages address space assignment and reference
+ * counting to kbase_context. The interface has been designed to minimise
+ * interactions between the Job Scheduler and Power Management/MMU to support
+ * the existing Job Scheduler interface.
+ *
+ * The initial implementation of the Context Scheduler does not schedule
+ * contexts. Instead it relies on the Job Scheduler to make decisions of
+ * when to schedule/evict contexts if address spaces are starved. In the
+ * future, once an interface between the CS and JS has been devised to
+ * provide enough information about how each context is consuming GPU resources,
+ * those decisions can be made in the CS itself, thereby reducing duplicated
+ * code.
+ */
+
+/**
+ * kbase_ctx_sched_init - Initialise the context scheduler
+ * @kbdev: The device for which the context scheduler needs to be initialised
+ *
+ * This must be called during device initialisation. The number of hardware
+ * address spaces must already be established before calling this function.
+ *
+ * Return: 0 for success, otherwise failure
+ */
+int kbase_ctx_sched_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_ctx_sched_term - Terminate the context scheduler
+ * @kbdev: The device for which the context scheduler needs to be terminated
+ *
+ * This must be called during device termination after all contexts have been
+ * destroyed.
+ */
+void kbase_ctx_sched_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context
+ * @kctx: The context to which to retain a reference
+ *
+ * This function should be called whenever an address space should be assigned
+ * to a context and programmed onto the MMU. It should typically be called
+ * when jobs are ready to be submitted to the GPU.
+ *
+ * It can be called as many times as necessary. The address space will be
+ * assigned to the context for as long as there is a reference to said context.
+ *
+ * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
+ * held whilst calling this function.
+ *
+ * Return: The address space that the context has been assigned to or
+ *         KBASEP_AS_NR_INVALID if no address space was available.
+ */
+int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx);
+
+/**
+ * kbase_ctx_sched_retain_ctx_refcount
+ * @kctx: The context to which to retain a reference
+ *
+ * This function only retains a reference to the context. It must be called
+ * only when the context already has a reference.
+ *
+ * This is typically called inside an atomic session where we know the context
+ * is already scheduled in but want to take an extra reference to ensure that
+ * it doesn't get descheduled.
+ *
+ * The kbase_device::hwaccess_lock must be held whilst calling this function
+ */
+void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx);
+
+/**
+ * kbase_ctx_sched_release_ctx - Release a reference to the @ref kbase_context
+ * @kctx: The context from which to release a reference
+ *
+ * This function should be called whenever an address space could be unassigned
+ * from a context. When there are no more references to said context, the
+ * address space previously assigned to this context shall be reassigned to
+ * other contexts as needed.
+ *
+ * The kbase_device::hwaccess_lock must be held whilst calling this function
+ */
+void kbase_ctx_sched_release_ctx(struct kbase_context *kctx);
+
+/**
+ * kbase_ctx_sched_remove_ctx - Unassign previously assigned address space
+ * @kctx: The context to be removed
+ *
+ * This function should be called when a context is being destroyed. The
+ * context must no longer have any reference. If it has been assigned an
+ * address space before then the AS will be unprogrammed.
+ *
+ * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
+ * held whilst calling this function.
+ */
+void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx);
+
+/**
+ * kbase_ctx_sched_restore_all_as - Reprogram all address spaces
+ * @kbdev: The device for which address spaces to be reprogrammed
+ *
+ * This function shall reprogram all address spaces previously assigned to
+ * contexts. It can be used after the GPU is reset.
+ *
+ * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
+ * held whilst calling this function.
+ */
+void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev);
+
+#endif /* _KBASE_CTX_SCHED_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug.c b/drivers/gpu/arm/midgard/mali_kbase_debug.c
new file mode 100644
index 0000000..118f787
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug.c
@@ -0,0 +1,44 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+
+static struct kbasep_debug_assert_cb kbasep_debug_assert_registered_cb = {
+	NULL,
+	NULL
+};
+
+void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param)
+{
+	kbasep_debug_assert_registered_cb.func = func;
+	kbasep_debug_assert_registered_cb.param = param;
+}
+
+void kbasep_debug_assert_call_hook(void)
+{
+	if (kbasep_debug_assert_registered_cb.func != NULL)
+		kbasep_debug_assert_registered_cb.func(kbasep_debug_assert_registered_cb.param);
+}
+KBASE_EXPORT_SYMBOL(kbasep_debug_assert_call_hook);
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug.h b/drivers/gpu/arm/midgard/mali_kbase_debug.h
new file mode 100644
index 0000000..2fdb72d9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug.h
@@ -0,0 +1,169 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_DEBUG_H
+#define _KBASE_DEBUG_H
+
+#include <linux/bug.h>
+
+/** @brief If equals to 0, a trace containing the file, line, and function will be displayed before each message. */
+#define KBASE_DEBUG_SKIP_TRACE 0
+
+/** @brief If different from 0, the trace will only contain the file and line. */
+#define KBASE_DEBUG_SKIP_FUNCTION_NAME 0
+
+/** @brief Disable the asserts tests if set to 1. Default is to disable the asserts in release. */
+#ifndef KBASE_DEBUG_DISABLE_ASSERTS
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_DEBUG_DISABLE_ASSERTS 0
+#else
+#define KBASE_DEBUG_DISABLE_ASSERTS 1
+#endif
+#endif				/* KBASE_DEBUG_DISABLE_ASSERTS */
+
+/** Function type that is called on an KBASE_DEBUG_ASSERT() or KBASE_DEBUG_ASSERT_MSG() */
+typedef void (kbase_debug_assert_hook) (void *);
+
+struct kbasep_debug_assert_cb {
+	kbase_debug_assert_hook *func;
+	void *param;
+};
+
+/**
+ * @def KBASEP_DEBUG_PRINT_TRACE
+ * @brief Private macro containing the format of the trace to display before every message
+ * @sa KBASE_DEBUG_SKIP_TRACE, KBASE_DEBUG_SKIP_FUNCTION_NAME
+ */
+#if !KBASE_DEBUG_SKIP_TRACE
+#define KBASEP_DEBUG_PRINT_TRACE \
+		"In file: " __FILE__ " line: " CSTD_STR2(__LINE__)
+#if !KBASE_DEBUG_SKIP_FUNCTION_NAME
+#define KBASEP_DEBUG_PRINT_FUNCTION __func__
+#else
+#define KBASEP_DEBUG_PRINT_FUNCTION ""
+#endif
+#else
+#define KBASEP_DEBUG_PRINT_TRACE ""
+#endif
+
+/**
+ * @def KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)
+ * @brief (Private) system printing function associated to the @ref KBASE_DEBUG_ASSERT_MSG event.
+ * @param trace location in the code from where the message is printed
+ * @param function function from where the message is printed
+ * @param ... Format string followed by format arguments.
+ * @note function parameter cannot be concatenated with other strings
+ */
+/* Select the correct system output function*/
+#ifdef CONFIG_MALI_DEBUG
+#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...)\
+		do { \
+			pr_err("Mali<ASSERT>: %s function:%s ", trace, function);\
+			pr_err(__VA_ARGS__);\
+			pr_err("\n");\
+		} while (false)
+#else
+#define KBASEP_DEBUG_ASSERT_OUT(trace, function, ...) CSTD_NOP()
+#endif
+
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_CALL_ASSERT_HOOK() kbasep_debug_assert_call_hook()
+#else
+#define KBASE_CALL_ASSERT_HOOK() CSTD_NOP()
+#endif
+
+/**
+ * @def KBASE_DEBUG_ASSERT(expr)
+ * @brief Calls @ref KBASE_PRINT_ASSERT and prints the expression @a expr if @a expr is false
+ *
+ * @note This macro does nothing if the flag @ref KBASE_DEBUG_DISABLE_ASSERTS is set to 1
+ *
+ * @param expr Boolean expression
+ */
+#define KBASE_DEBUG_ASSERT(expr) \
+	KBASE_DEBUG_ASSERT_MSG(expr, #expr)
+
+#if KBASE_DEBUG_DISABLE_ASSERTS
+#define KBASE_DEBUG_ASSERT_MSG(expr, ...) CSTD_NOP()
+#else
+	/**
+	 * @def KBASE_DEBUG_ASSERT_MSG(expr, ...)
+	 * @brief Calls @ref KBASEP_DEBUG_ASSERT_OUT and prints the given message if @a expr is false
+	 *
+	 * @note This macro does nothing if the flag @ref KBASE_DEBUG_DISABLE_ASSERTS is set to 1
+	 *
+	 * @param expr Boolean expression
+	 * @param ...  Message to display when @a expr is false, as a format string followed by format arguments.
+	 */
+#define KBASE_DEBUG_ASSERT_MSG(expr, ...) \
+		do { \
+			if (!(expr)) { \
+				KBASEP_DEBUG_ASSERT_OUT(KBASEP_DEBUG_PRINT_TRACE, KBASEP_DEBUG_PRINT_FUNCTION, __VA_ARGS__);\
+				KBASE_CALL_ASSERT_HOOK();\
+				BUG();\
+			} \
+		} while (false)
+#endif				/* KBASE_DEBUG_DISABLE_ASSERTS */
+
+/**
+ * @def KBASE_DEBUG_CODE( X )
+ * @brief Executes the code inside the macro only in debug mode
+ *
+ * @param X Code to compile only in debug mode.
+ */
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_DEBUG_CODE(X) X
+#else
+#define KBASE_DEBUG_CODE(X) CSTD_NOP()
+#endif				/* CONFIG_MALI_DEBUG */
+
+/** @} */
+
+/**
+ * @brief Register a function to call on ASSERT
+ *
+ * Such functions will \b only be called during Debug mode, and for debugging
+ * features \b only. Do not rely on them to be called in general use.
+ *
+ * To disable the hook, supply NULL to \a func.
+ *
+ * @note This function is not thread-safe, and should only be used to
+ * register/deregister once in the module's lifetime.
+ *
+ * @param[in] func the function to call when an assert is triggered.
+ * @param[in] param the parameter to pass to \a func when calling it
+ */
+void kbase_debug_assert_register_hook(kbase_debug_assert_hook *func, void *param);
+
+/**
+ * @brief Call a debug assert hook previously registered with kbase_debug_assert_register_hook()
+ *
+ * @note This function is not thread-safe with respect to multiple threads
+ * registering functions and parameters with
+ * kbase_debug_assert_register_hook(). Otherwise, thread safety is the
+ * responsibility of the registered hook.
+ */
+void kbasep_debug_assert_call_hook(void);
+
+#endif				/* _KBASE_DEBUG_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c
new file mode 100644
index 0000000..88bb0d3
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.c
@@ -0,0 +1,538 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <linux/spinlock.h>
+#include <mali_kbase_hwaccess_jm.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static bool kbase_is_job_fault_event_pending(struct kbase_device *kbdev)
+{
+	struct list_head *event_list = &kbdev->job_fault_event_list;
+	unsigned long    flags;
+	bool             ret;
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	ret = !list_empty(event_list);
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+	return ret;
+}
+
+static void kbase_ctx_remove_pending_event(struct kbase_context *kctx)
+{
+	struct list_head *event_list = &kctx->kbdev->job_fault_event_list;
+	struct base_job_fault_event *event;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kctx->kbdev->job_fault_event_lock, flags);
+	list_for_each_entry(event, event_list, head) {
+		if (event->katom->kctx == kctx) {
+			list_del(&event->head);
+			spin_unlock_irqrestore(&kctx->kbdev->job_fault_event_lock, flags);
+
+			wake_up(&kctx->kbdev->job_fault_resume_wq);
+			flush_work(&event->job_fault_work);
+
+			/* job_fault_event_list can only have a single atom for
+			 * each context.
+			 */
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&kctx->kbdev->job_fault_event_lock, flags);
+}
+
+static bool kbase_ctx_has_no_event_pending(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct list_head *event_list = &kctx->kbdev->job_fault_event_list;
+	struct base_job_fault_event *event;
+	unsigned long               flags;
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	if (list_empty(event_list)) {
+		spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+		return true;
+	}
+	list_for_each_entry(event, event_list, head) {
+		if (event->katom->kctx == kctx) {
+			spin_unlock_irqrestore(&kbdev->job_fault_event_lock,
+					flags);
+			return false;
+		}
+	}
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+	return true;
+}
+
+/* wait until the fault happen and copy the event */
+static int kbase_job_fault_event_wait(struct kbase_device *kbdev,
+		struct base_job_fault_event *event)
+{
+	struct list_head            *event_list = &kbdev->job_fault_event_list;
+	struct base_job_fault_event *event_in;
+	unsigned long               flags;
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	while (list_empty(event_list)) {
+		spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+		if (wait_event_interruptible(kbdev->job_fault_wq,
+				 kbase_is_job_fault_event_pending(kbdev)))
+			return -ERESTARTSYS;
+		spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	}
+
+	event_in = list_entry(event_list->next,
+			struct base_job_fault_event, head);
+	event->event_code = event_in->event_code;
+	event->katom = event_in->katom;
+
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+	return 0;
+
+}
+
+/* remove the event from the queue */
+static struct base_job_fault_event *kbase_job_fault_event_dequeue(
+		struct kbase_device *kbdev, struct list_head *event_list)
+{
+	struct base_job_fault_event *event;
+
+	event = list_entry(event_list->next,
+			struct base_job_fault_event, head);
+	list_del(event_list->next);
+
+	return event;
+
+}
+
+/* Remove all the following atoms after the failed atom in the same context
+ * Call the postponed bottom half of job done.
+ * Then, this context could be rescheduled.
+ */
+static void kbase_job_fault_resume_event_cleanup(struct kbase_context *kctx)
+{
+	struct list_head *event_list = &kctx->job_fault_resume_event_list;
+
+	while (!list_empty(event_list)) {
+		struct base_job_fault_event *event;
+
+		event = kbase_job_fault_event_dequeue(kctx->kbdev,
+				&kctx->job_fault_resume_event_list);
+		kbase_jd_done_worker(&event->katom->work);
+	}
+
+}
+
+/* Remove all the failed atoms that belong to different contexts
+ * Resume all the contexts that were suspend due to failed job
+ */
+static void kbase_job_fault_event_cleanup(struct kbase_device *kbdev)
+{
+	struct list_head *event_list = &kbdev->job_fault_event_list;
+	unsigned long    flags;
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	while (!list_empty(event_list)) {
+		kbase_job_fault_event_dequeue(kbdev, event_list);
+		spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+		wake_up(&kbdev->job_fault_resume_wq);
+		spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	}
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+}
+
+static void kbase_job_fault_resume_worker(struct work_struct *data)
+{
+	struct base_job_fault_event *event = container_of(data,
+			struct base_job_fault_event, job_fault_work);
+	struct kbase_context *kctx;
+	struct kbase_jd_atom *katom;
+
+	katom = event->katom;
+	kctx = katom->kctx;
+
+	dev_info(kctx->kbdev->dev, "Job dumping wait\n");
+
+	/* When it was waked up, it need to check if queue is empty or the
+	 * failed atom belongs to different context. If yes, wake up. Both
+	 * of them mean the failed job has been dumped. Please note, it
+	 * should never happen that the job_fault_event_list has the two
+	 * atoms belong to the same context.
+	 */
+	wait_event(kctx->kbdev->job_fault_resume_wq,
+			 kbase_ctx_has_no_event_pending(kctx));
+
+	atomic_set(&kctx->job_fault_count, 0);
+	kbase_jd_done_worker(&katom->work);
+
+	/* In case the following atoms were scheduled during failed job dump
+	 * the job_done_worker was held. We need to rerun it after the dump
+	 * was finished
+	 */
+	kbase_job_fault_resume_event_cleanup(kctx);
+
+	dev_info(kctx->kbdev->dev, "Job dumping finish, resume scheduler\n");
+}
+
+static struct base_job_fault_event *kbase_job_fault_event_queue(
+		struct list_head *event_list,
+		struct kbase_jd_atom *atom,
+		u32 completion_code)
+{
+	struct base_job_fault_event *event;
+
+	event = &atom->fault_event;
+
+	event->katom = atom;
+	event->event_code = completion_code;
+
+	list_add_tail(&event->head, event_list);
+
+	return event;
+
+}
+
+static void kbase_job_fault_event_post(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom, u32 completion_code)
+{
+	struct base_job_fault_event *event;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+	event = kbase_job_fault_event_queue(&kbdev->job_fault_event_list,
+				katom, completion_code);
+	spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+
+	wake_up_interruptible(&kbdev->job_fault_wq);
+
+	INIT_WORK(&event->job_fault_work, kbase_job_fault_resume_worker);
+	queue_work(kbdev->job_fault_resume_workq, &event->job_fault_work);
+
+	dev_info(katom->kctx->kbdev->dev, "Job fault happen, start dump: %d_%d",
+			katom->kctx->tgid, katom->kctx->id);
+
+}
+
+/*
+ * This function will process the job fault
+ * Get the register copy
+ * Send the failed job dump event
+ * Create a Wait queue to wait until the job dump finish
+ */
+
+bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom,
+		u32 completion_code)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Check if dumping is in the process
+	 * only one atom of each context can be dumped at the same time
+	 * If the atom belongs to different context, it can be dumped
+	 */
+	if (atomic_read(&kctx->job_fault_count) > 0) {
+		kbase_job_fault_event_queue(
+				&kctx->job_fault_resume_event_list,
+				katom, completion_code);
+		dev_info(kctx->kbdev->dev, "queue:%d\n",
+				kbase_jd_atom_id(kctx, katom));
+		return true;
+	}
+
+	if (kbase_ctx_flag(kctx, KCTX_DYING))
+		return false;
+
+	if (kctx->kbdev->job_fault_debug == true) {
+
+		if (completion_code != BASE_JD_EVENT_DONE) {
+
+			if (kbase_job_fault_get_reg_snapshot(kctx) == false) {
+				dev_warn(kctx->kbdev->dev, "get reg dump failed\n");
+				return false;
+			}
+
+			kbase_job_fault_event_post(kctx->kbdev, katom,
+					completion_code);
+			atomic_inc(&kctx->job_fault_count);
+			dev_info(kctx->kbdev->dev, "post:%d\n",
+					kbase_jd_atom_id(kctx, katom));
+			return true;
+
+		}
+	}
+	return false;
+
+}
+
+static int debug_job_fault_show(struct seq_file *m, void *v)
+{
+	struct kbase_device *kbdev = m->private;
+	struct base_job_fault_event *event = (struct base_job_fault_event *)v;
+	struct kbase_context *kctx = event->katom->kctx;
+	int i;
+
+	dev_info(kbdev->dev, "debug job fault seq show:%d_%d, %d",
+			kctx->tgid, kctx->id, event->reg_offset);
+
+	if (kctx->reg_dump == NULL) {
+		dev_warn(kbdev->dev, "reg dump is NULL");
+		return -1;
+	}
+
+	if (kctx->reg_dump[event->reg_offset] ==
+			REGISTER_DUMP_TERMINATION_FLAG) {
+		/* Return the error here to stop the read. And the
+		 * following next() will not be called. The stop can
+		 * get the real event resource and release it
+		 */
+		return -1;
+	}
+
+	if (event->reg_offset == 0)
+		seq_printf(m, "%d_%d\n", kctx->tgid, kctx->id);
+
+	for (i = 0; i < 50; i++) {
+		if (kctx->reg_dump[event->reg_offset] ==
+				REGISTER_DUMP_TERMINATION_FLAG) {
+			break;
+		}
+		seq_printf(m, "%08x: %08x\n",
+				kctx->reg_dump[event->reg_offset],
+				kctx->reg_dump[1+event->reg_offset]);
+		event->reg_offset += 2;
+
+	}
+
+
+	return 0;
+}
+static void *debug_job_fault_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct kbase_device *kbdev = m->private;
+	struct base_job_fault_event *event = (struct base_job_fault_event *)v;
+
+	dev_info(kbdev->dev, "debug job fault seq next:%d, %d",
+			event->reg_offset, (int)*pos);
+
+	return event;
+}
+
+static void *debug_job_fault_start(struct seq_file *m, loff_t *pos)
+{
+	struct kbase_device *kbdev = m->private;
+	struct base_job_fault_event *event;
+
+	dev_info(kbdev->dev, "fault job seq start:%d", (int)*pos);
+
+	/* The condition is trick here. It needs make sure the
+	 * fault hasn't happened and the dumping hasn't been started,
+	 * or the dumping has finished
+	 */
+	if (*pos == 0) {
+		event = kmalloc(sizeof(*event), GFP_KERNEL);
+		if (!event)
+			return NULL;
+		event->reg_offset = 0;
+		if (kbase_job_fault_event_wait(kbdev, event)) {
+			kfree(event);
+			return NULL;
+		}
+
+		/* The cache flush workaround is called in bottom half of
+		 * job done but we delayed it. Now we should clean cache
+		 * earlier. Then the GPU memory dump should be correct.
+		 */
+		kbase_backend_cache_clean(kbdev, event->katom);
+	} else
+		return NULL;
+
+	return event;
+}
+
+static void debug_job_fault_stop(struct seq_file *m, void *v)
+{
+	struct kbase_device *kbdev = m->private;
+
+	/* here we wake up the kbase_jd_done_worker after stop, it needs
+	 * get the memory dump before the register dump in debug daemon,
+	 * otherwise, the memory dump may be incorrect.
+	 */
+
+	if (v != NULL) {
+		kfree(v);
+		dev_info(kbdev->dev, "debug job fault seq stop stage 1");
+
+	} else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&kbdev->job_fault_event_lock, flags);
+		if (!list_empty(&kbdev->job_fault_event_list)) {
+			kbase_job_fault_event_dequeue(kbdev,
+				&kbdev->job_fault_event_list);
+			wake_up(&kbdev->job_fault_resume_wq);
+		}
+		spin_unlock_irqrestore(&kbdev->job_fault_event_lock, flags);
+		dev_info(kbdev->dev, "debug job fault seq stop stage 2");
+	}
+
+}
+
+static const struct seq_operations ops = {
+	.start = debug_job_fault_start,
+	.next = debug_job_fault_next,
+	.stop = debug_job_fault_stop,
+	.show = debug_job_fault_show,
+};
+
+static int debug_job_fault_open(struct inode *in, struct file *file)
+{
+	struct kbase_device *kbdev = in->i_private;
+
+	seq_open(file, &ops);
+
+	((struct seq_file *)file->private_data)->private = kbdev;
+	dev_info(kbdev->dev, "debug job fault seq open");
+
+	kbdev->job_fault_debug = true;
+
+	return 0;
+
+}
+
+static int debug_job_fault_release(struct inode *in, struct file *file)
+{
+	struct kbase_device *kbdev = in->i_private;
+
+	seq_release(in, file);
+
+	kbdev->job_fault_debug = false;
+
+	/* Clean the unprocessed job fault. After that, all the suspended
+	 * contexts could be rescheduled.
+	 */
+	kbase_job_fault_event_cleanup(kbdev);
+
+	dev_info(kbdev->dev, "debug job fault seq close");
+
+	return 0;
+}
+
+static const struct file_operations kbasep_debug_job_fault_fops = {
+	.open = debug_job_fault_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = debug_job_fault_release,
+};
+
+/*
+ *  Initialize debugfs entry for job fault dump
+ */
+void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev)
+{
+	debugfs_create_file("job_fault", S_IRUGO,
+			kbdev->mali_debugfs_directory, kbdev,
+			&kbasep_debug_job_fault_fops);
+}
+
+
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
+{
+
+	INIT_LIST_HEAD(&kbdev->job_fault_event_list);
+
+	init_waitqueue_head(&(kbdev->job_fault_wq));
+	init_waitqueue_head(&(kbdev->job_fault_resume_wq));
+	spin_lock_init(&kbdev->job_fault_event_lock);
+
+	kbdev->job_fault_resume_workq = alloc_workqueue(
+			"kbase_job_fault_resume_work_queue", WQ_MEM_RECLAIM, 1);
+	if (!kbdev->job_fault_resume_workq)
+		return -ENOMEM;
+
+	kbdev->job_fault_debug = false;
+
+	return 0;
+}
+
+/*
+ * Release the relevant resource per device
+ */
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev)
+{
+	destroy_workqueue(kbdev->job_fault_resume_workq);
+}
+
+
+/*
+ *  Initialize the relevant data structure per context
+ */
+void kbase_debug_job_fault_context_init(struct kbase_context *kctx)
+{
+
+	/* We need allocate double size register range
+	 * Because this memory will keep the register address and value
+	 */
+	kctx->reg_dump = vmalloc(0x4000 * 2);
+	if (kctx->reg_dump == NULL)
+		return;
+
+	if (kbase_debug_job_fault_reg_snapshot_init(kctx, 0x4000) == false) {
+		vfree(kctx->reg_dump);
+		kctx->reg_dump = NULL;
+	}
+	INIT_LIST_HEAD(&kctx->job_fault_resume_event_list);
+	atomic_set(&kctx->job_fault_count, 0);
+
+}
+
+/*
+ *  release the relevant resource per context
+ */
+void kbase_debug_job_fault_context_term(struct kbase_context *kctx)
+{
+	vfree(kctx->reg_dump);
+}
+
+void kbase_debug_job_fault_kctx_unblock(struct kbase_context *kctx)
+{
+	WARN_ON(!kbase_ctx_flag(kctx, KCTX_DYING));
+
+	kbase_ctx_remove_pending_event(kctx);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev)
+{
+	kbdev->job_fault_debug = false;
+
+	return 0;
+}
+
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h
new file mode 100644
index 0000000..ef69627
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug_job_fault.h
@@ -0,0 +1,116 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_DEBUG_JOB_FAULT_H
+#define _KBASE_DEBUG_JOB_FAULT_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define REGISTER_DUMP_TERMINATION_FLAG 0xFFFFFFFF
+
+/**
+ * kbase_debug_job_fault_dev_init - Create the fault event wait queue
+ *		per device and initialize the required lists.
+ * @kbdev:	Device pointer
+ *
+ * Return: Zero on success or a negative error code.
+ */
+int kbase_debug_job_fault_dev_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_debugfs_init - Initialize job fault debug sysfs
+ * @kbdev:	Device pointer
+ */
+void kbase_debug_job_fault_debugfs_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_dev_term - Clean up resources created in
+ *		kbase_debug_job_fault_dev_init.
+ * @kbdev:	Device pointer
+ */
+void kbase_debug_job_fault_dev_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_debug_job_fault_context_init - Initialize the relevant
+ *		data structure per context
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_context_init(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_context_term - Release the relevant
+ *		resource per context
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_context_term(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_kctx_unblock - Unblock the atoms blocked on job fault
+ *					dumping on context termination.
+ *
+ * This function is called during context termination to unblock the atom for
+ * which the job fault occurred and also the atoms following it. This is needed
+ * otherwise the wait for zero jobs could timeout (leading to an assertion
+ * failure, kernel panic in debug builds) in the pathological case where
+ * although the thread/daemon capturing the job fault events is running,
+ * but for some reasons has stopped consuming the events.
+ *
+ * @kctx: KBase context pointer
+ */
+void kbase_debug_job_fault_kctx_unblock(struct kbase_context *kctx);
+
+/**
+ * kbase_debug_job_fault_process - Process the failed job.
+ *      It will send a event and wake up the job fault waiting queue
+ *      Then create a work queue to wait for job dump finish
+ *      This function should be called in the interrupt handler and before
+ *      jd_done that make sure the jd_done_worker will be delayed until the
+ *      job dump finish
+ * @katom: The failed atom pointer
+ * @completion_code: the job status
+ * @return true if dump is going on
+ */
+bool kbase_debug_job_fault_process(struct kbase_jd_atom *katom,
+		u32 completion_code);
+
+
+/**
+ * kbase_debug_job_fault_reg_snapshot_init - Set the interested registers
+ *      address during the job fault process, the relevant registers will
+ *      be saved when a job fault happen
+ * @kctx: KBase context pointer
+ * @reg_range: Maximum register address space
+ * @return true if initializing successfully
+ */
+bool kbase_debug_job_fault_reg_snapshot_init(struct kbase_context *kctx,
+		int reg_range);
+
+/**
+ * kbase_job_fault_get_reg_snapshot - Read the interested registers for
+ *      failed job dump
+ * @kctx: KBase context pointer
+ * @return true if getting registers successfully
+ */
+bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx);
+
+#endif  /*_KBASE_DEBUG_JOB_FAULT_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c
new file mode 100644
index 0000000..8f46117
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c
@@ -0,0 +1,312 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Debugfs interface to dump the memory visible to the GPU
+ */
+
+#include "mali_kbase_debug_mem_view.h"
+#include "mali_kbase.h"
+
+#include <linux/list.h>
+#include <linux/file.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+#if (KERNEL_VERSION(4, 1, 0) > LINUX_VERSION_CODE)
+#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
+#endif
+
+struct debug_mem_mapping {
+	struct list_head node;
+
+	struct kbase_mem_phy_alloc *alloc;
+	unsigned long flags;
+
+	u64 start_pfn;
+	size_t nr_pages;
+};
+
+struct debug_mem_data {
+	struct list_head mapping_list;
+	struct kbase_context *kctx;
+};
+
+struct debug_mem_seq_off {
+	struct list_head *lh;
+	size_t offset;
+};
+
+static void *debug_mem_start(struct seq_file *m, loff_t *_pos)
+{
+	struct debug_mem_data *mem_data = m->private;
+	struct debug_mem_seq_off *data;
+	struct debug_mem_mapping *map;
+	loff_t pos = *_pos;
+
+	list_for_each_entry(map, &mem_data->mapping_list, node) {
+		if (pos >= map->nr_pages) {
+			pos -= map->nr_pages;
+		} else {
+			data = kmalloc(sizeof(*data), GFP_KERNEL);
+			if (!data)
+				return NULL;
+			data->lh = &map->node;
+			data->offset = pos;
+			return data;
+		}
+	}
+
+	/* Beyond the end */
+	return NULL;
+}
+
+static void debug_mem_stop(struct seq_file *m, void *v)
+{
+	kfree(v);
+}
+
+static void *debug_mem_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct debug_mem_data *mem_data = m->private;
+	struct debug_mem_seq_off *data = v;
+	struct debug_mem_mapping *map;
+
+	map = list_entry(data->lh, struct debug_mem_mapping, node);
+
+	if (data->offset < map->nr_pages - 1) {
+		data->offset++;
+		++*pos;
+		return data;
+	}
+
+	if (list_is_last(data->lh, &mem_data->mapping_list)) {
+		kfree(data);
+		return NULL;
+	}
+
+	data->lh = data->lh->next;
+	data->offset = 0;
+	++*pos;
+
+	return data;
+}
+
+static int debug_mem_show(struct seq_file *m, void *v)
+{
+	struct debug_mem_data *mem_data = m->private;
+	struct debug_mem_seq_off *data = v;
+	struct debug_mem_mapping *map;
+	int i, j;
+	struct page *page;
+	uint32_t *mapping;
+	pgprot_t prot = PAGE_KERNEL;
+
+	map = list_entry(data->lh, struct debug_mem_mapping, node);
+
+	kbase_gpu_vm_lock(mem_data->kctx);
+
+	if (data->offset >= map->alloc->nents) {
+		seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn +
+				data->offset) << PAGE_SHIFT);
+		goto out;
+	}
+
+	if (!(map->flags & KBASE_REG_CPU_CACHED))
+		prot = pgprot_writecombine(prot);
+
+	page = as_page(map->alloc->pages[data->offset]);
+	mapping = vmap(&page, 1, VM_MAP, prot);
+	if (!mapping)
+		goto out;
+
+	for (i = 0; i < PAGE_SIZE; i += 4*sizeof(*mapping)) {
+		seq_printf(m, "%016llx:", i + ((map->start_pfn +
+				data->offset) << PAGE_SHIFT));
+
+		for (j = 0; j < 4*sizeof(*mapping); j += sizeof(*mapping))
+			seq_printf(m, " %08x", mapping[(i+j)/sizeof(*mapping)]);
+		seq_putc(m, '\n');
+	}
+
+	vunmap(mapping);
+
+	seq_putc(m, '\n');
+
+out:
+	kbase_gpu_vm_unlock(mem_data->kctx);
+	return 0;
+}
+
+static const struct seq_operations ops = {
+	.start = debug_mem_start,
+	.next = debug_mem_next,
+	.stop = debug_mem_stop,
+	.show = debug_mem_show,
+};
+
+static int debug_mem_zone_open(struct rb_root *rbtree,
+						struct debug_mem_data *mem_data)
+{
+	int ret = 0;
+	struct rb_node *p;
+	struct kbase_va_region *reg;
+	struct debug_mem_mapping *mapping;
+
+	for (p = rb_first(rbtree); p; p = rb_next(p)) {
+		reg = rb_entry(p, struct kbase_va_region, rblink);
+
+		if (reg->gpu_alloc == NULL)
+			/* Empty region - ignore */
+			continue;
+
+		mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+		if (!mapping) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		mapping->alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+		mapping->start_pfn = reg->start_pfn;
+		mapping->nr_pages = reg->nr_pages;
+		mapping->flags = reg->flags;
+		list_add_tail(&mapping->node, &mem_data->mapping_list);
+	}
+
+out:
+	return ret;
+}
+
+static int debug_mem_open(struct inode *i, struct file *file)
+{
+	struct file *kctx_file = i->i_private;
+	struct kbase_context *kctx = kctx_file->private_data;
+	struct debug_mem_data *mem_data;
+	int ret;
+
+	if (get_file_rcu(kctx_file) == 0)
+		return -ENOENT;
+
+	ret = seq_open(file, &ops);
+	if (ret)
+		goto open_fail;
+
+	mem_data = kmalloc(sizeof(*mem_data), GFP_KERNEL);
+	if (!mem_data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	mem_data->kctx = kctx;
+
+	INIT_LIST_HEAD(&mem_data->mapping_list);
+
+	kbase_gpu_vm_lock(kctx);
+
+	ret = debug_mem_zone_open(&kctx->reg_rbtree_same, mem_data);
+	if (0 != ret) {
+		kbase_gpu_vm_unlock(kctx);
+		goto out;
+	}
+
+	ret = debug_mem_zone_open(&kctx->reg_rbtree_custom, mem_data);
+	if (0 != ret) {
+		kbase_gpu_vm_unlock(kctx);
+		goto out;
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	((struct seq_file *)file->private_data)->private = mem_data;
+
+	return 0;
+
+out:
+	if (mem_data) {
+		while (!list_empty(&mem_data->mapping_list)) {
+			struct debug_mem_mapping *mapping;
+
+			mapping = list_first_entry(&mem_data->mapping_list,
+					struct debug_mem_mapping, node);
+			kbase_mem_phy_alloc_put(mapping->alloc);
+			list_del(&mapping->node);
+			kfree(mapping);
+		}
+		kfree(mem_data);
+	}
+	seq_release(i, file);
+open_fail:
+	fput(kctx_file);
+
+	return ret;
+}
+
+static int debug_mem_release(struct inode *inode, struct file *file)
+{
+	struct file *kctx_file = inode->i_private;
+	struct seq_file *sfile = file->private_data;
+	struct debug_mem_data *mem_data = sfile->private;
+	struct debug_mem_mapping *mapping;
+
+	seq_release(inode, file);
+
+	while (!list_empty(&mem_data->mapping_list)) {
+		mapping = list_first_entry(&mem_data->mapping_list,
+				struct debug_mem_mapping, node);
+		kbase_mem_phy_alloc_put(mapping->alloc);
+		list_del(&mapping->node);
+		kfree(mapping);
+	}
+
+	kfree(mem_data);
+
+	fput(kctx_file);
+
+	return 0;
+}
+
+static const struct file_operations kbase_debug_mem_view_fops = {
+	.open = debug_mem_open,
+	.release = debug_mem_release,
+	.read = seq_read,
+	.llseek = seq_lseek
+};
+
+/**
+ * kbase_debug_mem_view_init - Initialise the mem_view sysfs file
+ * @kctx_file: The /dev/mali0 file instance for the context
+ *
+ * This function creates a "mem_view" file which can be used to get a view of
+ * the context's memory as the GPU sees it (i.e. using the GPU's page tables).
+ *
+ * The file is cleaned up by a call to debugfs_remove_recursive() deleting the
+ * parent directory.
+ */
+void kbase_debug_mem_view_init(struct file *kctx_file)
+{
+	struct kbase_context *kctx = kctx_file->private_data;
+
+	debugfs_create_file("mem_view", S_IRUSR, kctx->kctx_dentry, kctx_file,
+			&kbase_debug_mem_view_fops);
+}
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h
new file mode 100644
index 0000000..886ca94
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.h
@@ -0,0 +1,30 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_DEBUG_MEM_VIEW_H
+#define _KBASE_DEBUG_MEM_VIEW_H
+
+#include <mali_kbase.h>
+
+void kbase_debug_mem_view_init(struct file *kctx_file);
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_defs.h b/drivers/gpu/arm/midgard/mali_kbase_defs.h
new file mode 100644
index 0000000..cb31495
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_defs.h
@@ -0,0 +1,2223 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_defs.h
+ *
+ * Defintions (types, defines, etcs) common to Kbase. They are placed here to
+ * allow the hierarchy of header files to work.
+ */
+
+#ifndef _KBASE_DEFS_H_
+#define _KBASE_DEFS_H_
+
+#include <mali_kbase_config.h>
+#include <mali_base_hwconfig_features.h>
+#include <mali_base_hwconfig_issues.h>
+#include <mali_kbase_mem_lowlevel.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_instr_defs.h>
+#include <mali_kbase_pm.h>
+#include <mali_kbase_gpuprops_types.h>
+#include <mali_kbase_hwcnt_backend_gpu.h>
+#include <protected_mode_switcher.h>
+
+
+#include <linux/atomic.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+#include <linux/bus_logger.h>
+#endif
+
+#if defined(CONFIG_SYNC)
+#include <sync.h>
+#else
+#include "mali_kbase_fence_defs.h"
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif				/* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_MALI_DEVFREQ
+#include <linux/devfreq.h>
+#endif /* CONFIG_MALI_DEVFREQ */
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#if defined(CONFIG_PM_RUNTIME) || \
+	(defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+#define KBASE_PM_RUNTIME 1
+#endif
+
+/** Enable SW tracing when set */
+#ifdef CONFIG_MALI_MIDGARD_ENABLE_TRACE
+#define KBASE_TRACE_ENABLE 1
+#endif
+
+#ifndef KBASE_TRACE_ENABLE
+#ifdef CONFIG_MALI_DEBUG
+#define KBASE_TRACE_ENABLE 1
+#else
+#define KBASE_TRACE_ENABLE 0
+#endif				/* CONFIG_MALI_DEBUG */
+#endif				/* KBASE_TRACE_ENABLE */
+
+/** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */
+#define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1
+
+/**
+ * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware.
+ * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU
+ * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware
+ * before resetting.
+ */
+#define ZAP_TIMEOUT             1000
+
+/** Number of milliseconds before we time out on a GPU soft/hard reset */
+#define RESET_TIMEOUT           500
+
+/**
+ * Prevent soft-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * Therefore, soft stop may still be disabled due to HW issues.
+ *
+ * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0
+
+/**
+ * Prevent hard-stops from occuring in scheduling situations
+ *
+ * This is not due to HW issues, but when scheduling is desired to be more predictable.
+ *
+ * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context.
+ *
+ * @note if not in use, define this value to 0 instead of \#undef'ing it
+ */
+#define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0
+
+/**
+ * The maximum number of Job Slots to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of job slots.
+ */
+#define BASE_JM_MAX_NR_SLOTS        3
+
+/**
+ * The maximum number of Address Spaces to support in the Hardware.
+ *
+ * You can optimize this down if your target devices will only ever support a
+ * small number of Address Spaces
+ */
+#define BASE_MAX_NR_AS              16
+
+/* mmu */
+#define MIDGARD_MMU_LEVEL(x) (x)
+
+#define MIDGARD_MMU_TOPLEVEL    MIDGARD_MMU_LEVEL(0)
+
+#define MIDGARD_MMU_BOTTOMLEVEL MIDGARD_MMU_LEVEL(3)
+
+#define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW | KBASE_REG_GPU_WR)
+
+/** setting in kbase_context::as_nr that indicates it's invalid */
+#define KBASEP_AS_NR_INVALID     (-1)
+
+#define KBASE_LOCK_REGION_MAX_SIZE (63)
+#define KBASE_LOCK_REGION_MIN_SIZE (11)
+
+#define KBASE_TRACE_SIZE_LOG2 8	/* 256 entries */
+#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
+#define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
+
+#include "mali_kbase_js_defs.h"
+#include "mali_kbase_hwaccess_defs.h"
+
+#define KBASEP_FORCE_REPLAY_DISABLED 0
+
+/* Maximum force replay limit when randomization is enabled */
+#define KBASEP_FORCE_REPLAY_RANDOM_LIMIT 16
+
+/* Maximum number of pages of memory that require a permanent mapping, per
+ * kbase_context
+ */
+#define KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES ((1024ul * 1024ul) >> \
+								PAGE_SHIFT)
+
+/** Atom has been previously soft-stoppped */
+#define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
+/** Atom has been previously retried to execute */
+#define KBASE_KATOM_FLAGS_RERUN (1<<2)
+/* Atom submitted with JOB_CHAIN_FLAG bit set in JS_CONFIG_NEXT register, helps to
+ * disambiguate short-running job chains during soft/hard stopping of jobs
+ */
+#define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3)
+/** Atom has been previously hard-stopped. */
+#define KBASE_KATOM_FLAG_BEEN_HARD_STOPPED (1<<4)
+/** Atom has caused us to enter disjoint state */
+#define KBASE_KATOM_FLAG_IN_DISJOINT (1<<5)
+/* Atom blocked on cross-slot dependency */
+#define KBASE_KATOM_FLAG_X_DEP_BLOCKED (1<<7)
+/* Atom has fail dependency on cross-slot dependency */
+#define KBASE_KATOM_FLAG_FAIL_BLOCKER (1<<8)
+/* Atom is currently in the list of atoms blocked on cross-slot dependencies */
+#define KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST (1<<9)
+/* Atom is currently holding a context reference */
+#define KBASE_KATOM_FLAG_HOLDING_CTX_REF (1<<10)
+/* Atom requires GPU to be in protected mode */
+#define KBASE_KATOM_FLAG_PROTECTED (1<<11)
+/* Atom has been stored in runnable_tree */
+#define KBASE_KATOM_FLAG_JSCTX_IN_TREE (1<<12)
+/* Atom is waiting for L2 caches to power up in order to enter protected mode */
+#define KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT (1<<13)
+
+/* SW related flags about types of JS_COMMAND action
+ * NOTE: These must be masked off by JS_COMMAND_MASK */
+
+/** This command causes a disjoint event */
+#define JS_COMMAND_SW_CAUSES_DISJOINT 0x100
+
+/** Bitmask of all SW related flags */
+#define JS_COMMAND_SW_BITS  (JS_COMMAND_SW_CAUSES_DISJOINT)
+
+#if (JS_COMMAND_SW_BITS & JS_COMMAND_MASK)
+#error JS_COMMAND_SW_BITS not masked off by JS_COMMAND_MASK. Must update JS_COMMAND_SW_<..> bitmasks
+#endif
+
+/** Soft-stop command that causes a Disjoint event. This of course isn't
+ *  entirely masked off by JS_COMMAND_MASK */
+#define JS_COMMAND_SOFT_STOP_WITH_SW_DISJOINT \
+		(JS_COMMAND_SW_CAUSES_DISJOINT | JS_COMMAND_SOFT_STOP)
+
+#define KBASEP_ATOM_ID_INVALID BASE_JD_ATOM_COUNT
+
+/* Serialize atoms within a slot (ie only one atom per job slot) */
+#define KBASE_SERIALIZE_INTRA_SLOT (1 << 0)
+/* Serialize atoms between slots (ie only one job slot running at any time) */
+#define KBASE_SERIALIZE_INTER_SLOT (1 << 1)
+/* Reset the GPU after each atom completion */
+#define KBASE_SERIALIZE_RESET (1 << 2)
+
+/* Forward declarations */
+struct kbase_context;
+struct kbase_device;
+struct kbase_as;
+struct kbase_mmu_setup;
+struct kbase_ipa_model_vinstr_data;
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * struct base_job_fault_event - keeps track of the atom which faulted or which
+ *                               completed after the faulty atom but before the
+ *                               debug data for faulty atom was dumped.
+ *
+ * @event_code:     event code for the atom, should != BASE_JD_EVENT_DONE for the
+ *                  atom which faulted.
+ * @katom:          pointer to the atom for which job fault occurred or which completed
+ *                  after the faulty atom.
+ * @job_fault_work: work item, queued only for the faulty atom, which waits for
+ *                  the dumping to get completed and then does the bottom half
+ *                  of job done for the atoms which followed the faulty atom.
+ * @head:           List head used to store the atom in the global list of faulty
+ *                  atoms or context specific list of atoms which got completed
+ *                  during the dump.
+ * @reg_offset:     offset of the register to be dumped next, only applicable for
+ *                  the faulty atom.
+ */
+struct base_job_fault_event {
+
+	u32 event_code;
+	struct kbase_jd_atom *katom;
+	struct work_struct job_fault_work;
+	struct list_head head;
+	int reg_offset;
+};
+
+#endif
+
+/**
+ * struct kbase_jd_atom_dependency - Contains the dependency info for an atom.
+ * @atom:          pointer to the dependee atom.
+ * @dep_type:      type of dependency on the dependee @atom, i.e. order or data
+ *                 dependency. BASE_JD_DEP_TYPE_INVALID indicates no dependency.
+ */
+struct kbase_jd_atom_dependency {
+	struct kbase_jd_atom *atom;
+	u8 dep_type;
+};
+
+/**
+ * struct kbase_io_access - holds information about 1 register access
+ *
+ * @addr: first bit indicates r/w (r=0, w=1)
+ * @value: value written or read
+ */
+struct kbase_io_access {
+	uintptr_t addr;
+	u32 value;
+};
+
+/**
+ * struct kbase_io_history - keeps track of all recent register accesses
+ *
+ * @enabled: true if register accesses are recorded, false otherwise
+ * @lock: spinlock protecting kbase_io_access array
+ * @count: number of registers read/written
+ * @size: number of elements in kbase_io_access array
+ * @buf: array of kbase_io_access
+ */
+struct kbase_io_history {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+	bool enabled;
+#else
+	u32 enabled;
+#endif
+
+	spinlock_t lock;
+	size_t count;
+	u16 size;
+	struct kbase_io_access *buf;
+};
+
+/**
+ * kbase_jd_katom_dep_atom - Retrieves a read-only reference to the
+ *                           dependee atom.
+ * @dep:   pointer to the dependency info structure.
+ *
+ * Return: readonly reference to dependee atom.
+ */
+static inline const struct kbase_jd_atom *
+kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep)
+{
+	LOCAL_ASSERT(dep != NULL);
+
+	return (const struct kbase_jd_atom *)(dep->atom);
+}
+
+/**
+ * kbase_jd_katom_dep_type -  Retrieves the dependency type info
+ *
+ * @dep:   pointer to the dependency info structure.
+ *
+ * Return: the type of dependency there is on the dependee atom.
+ */
+static inline u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency *dep)
+{
+	LOCAL_ASSERT(dep != NULL);
+
+	return dep->dep_type;
+}
+
+/**
+ * kbase_jd_katom_dep_set - sets up the dependency info structure
+ *                          as per the values passed.
+ * @const_dep:    pointer to the dependency info structure to be setup.
+ * @a:            pointer to the dependee atom.
+ * @type:         type of dependency there is on the dependee atom.
+ */
+static inline void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency *const_dep,
+		struct kbase_jd_atom *a, u8 type)
+{
+	struct kbase_jd_atom_dependency *dep;
+
+	LOCAL_ASSERT(const_dep != NULL);
+
+	dep = (struct kbase_jd_atom_dependency *)const_dep;
+
+	dep->atom = a;
+	dep->dep_type = type;
+}
+
+/**
+ * kbase_jd_katom_dep_clear - resets the dependency info structure
+ *
+ * @const_dep:    pointer to the dependency info structure to be setup.
+ */
+static inline void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency *const_dep)
+{
+	struct kbase_jd_atom_dependency *dep;
+
+	LOCAL_ASSERT(const_dep != NULL);
+
+	dep = (struct kbase_jd_atom_dependency *)const_dep;
+
+	dep->atom = NULL;
+	dep->dep_type = BASE_JD_DEP_TYPE_INVALID;
+}
+
+/**
+ * enum kbase_atom_gpu_rb_state - The state of an atom, pertinent after it becomes
+ *                                runnable, with respect to job slot ringbuffer/fifo.
+ * @KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB: Atom not currently present in slot fifo, which
+ *                                implies that either atom has not become runnable
+ *                                due to dependency or has completed the execution
+ *                                on GPU.
+ * @KBASE_ATOM_GPU_RB_WAITING_BLOCKED: Atom has been added to slot fifo but is blocked
+ *                                due to cross slot dependency, can't be submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV: Atom has been added to slot fifo but
+ *                                is waiting for the completion of previously added atoms
+ *                                in current & other slots, as their protected mode
+ *                                requirements do not match with the current atom.
+ * @KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION: Atom is in slot fifo and is
+ *                                waiting for completion of protected mode transition,
+ *                                needed before the atom is submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE: Atom is in slot fifo but is waiting
+ *                                for the cores, which are needed to execute the job
+ *                                chain represented by the atom, to become available
+ * @KBASE_ATOM_GPU_RB_WAITING_AFFINITY: Atom is in slot fifo but is blocked on
+ *                                affinity due to rmu workaround for Hw issue 8987.
+ * @KBASE_ATOM_GPU_RB_READY:      Atom is in slot fifo and can be submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_SUBMITTED:  Atom is in slot fifo and has been submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_RETURN_TO_JS: Atom must be returned to JS due to some failure,
+ *                                but only after the previously added atoms in fifo
+ *                                have completed or have also been returned to JS.
+ */
+enum kbase_atom_gpu_rb_state {
+	KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB,
+	KBASE_ATOM_GPU_RB_WAITING_BLOCKED,
+	KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV,
+	KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION,
+	KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE,
+	KBASE_ATOM_GPU_RB_WAITING_AFFINITY,
+	KBASE_ATOM_GPU_RB_READY,
+	KBASE_ATOM_GPU_RB_SUBMITTED,
+	KBASE_ATOM_GPU_RB_RETURN_TO_JS = -1
+};
+
+/**
+ * enum kbase_atom_enter_protected_state - The state of an atom with respect to the
+ *                      preparation for GPU's entry into protected mode, becomes
+ *                      pertinent only after atom's state with respect to slot
+ *                      ringbuffer is KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @KBASE_ATOM_ENTER_PROTECTED_CHECK:  Starting state. Check if there are any atoms
+ *                      currently submitted to GPU and protected mode transition is
+ *                      not already in progress.
+ * @KBASE_ATOM_ENTER_PROTECTED_HWCNT: Wait for hardware counter context to
+ *                      become disabled before entry into protected mode.
+ * @KBASE_ATOM_ENTER_PROTECTED_IDLE_L2: Wait for the L2 to become idle in preparation
+ *                      for the coherency change. L2 shall be powered down and GPU shall
+ *                      come out of fully coherent mode before entering protected mode.
+ * @KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY: Prepare coherency change;
+ *                      for BASE_HW_ISSUE_TGOX_R1_1234 also request L2 power on so that
+ *                      coherency register contains correct value when GPU enters
+ *                      protected mode.
+ * @KBASE_ATOM_ENTER_PROTECTED_FINISHED: End state; for BASE_HW_ISSUE_TGOX_R1_1234 check
+ *                      that L2 is powered up and switch GPU to protected mode.
+ */
+enum kbase_atom_enter_protected_state {
+	/**
+	 * NOTE: The integer value of this must match KBASE_ATOM_EXIT_PROTECTED_CHECK.
+	 */
+	KBASE_ATOM_ENTER_PROTECTED_CHECK = 0,
+	KBASE_ATOM_ENTER_PROTECTED_HWCNT,
+	KBASE_ATOM_ENTER_PROTECTED_IDLE_L2,
+	KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY,
+	KBASE_ATOM_ENTER_PROTECTED_FINISHED,
+};
+
+/**
+ * enum kbase_atom_exit_protected_state - The state of an atom with respect to the
+ *                      preparation for GPU's exit from protected mode, becomes
+ *                      pertinent only after atom's state with respect to slot
+ *                      ringbuffer is KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @KBASE_ATOM_EXIT_PROTECTED_CHECK: Starting state. Check if there are any atoms
+ *                      currently submitted to GPU and protected mode transition is
+ *                      not already in progress.
+ * @KBASE_ATOM_EXIT_PROTECTED_IDLE_L2: Wait for the L2 to become idle in preparation
+ *                      for the reset, as exiting protected mode requires a reset.
+ * @KBASE_ATOM_EXIT_PROTECTED_RESET: Issue the reset to trigger exit from protected mode
+ * @KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT: End state, Wait for the reset to complete
+ */
+enum kbase_atom_exit_protected_state {
+	/**
+	 * NOTE: The integer value of this must match KBASE_ATOM_ENTER_PROTECTED_CHECK.
+	 */
+	KBASE_ATOM_EXIT_PROTECTED_CHECK = 0,
+	KBASE_ATOM_EXIT_PROTECTED_IDLE_L2,
+	KBASE_ATOM_EXIT_PROTECTED_RESET,
+	KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT,
+};
+
+/**
+ * struct kbase_ext_res - Contains the info for external resources referred
+ *                        by an atom, which have been mapped on GPU side.
+ * @gpu_address:          Start address of the memory region allocated for
+ *                        the resource from GPU virtual address space.
+ * @alloc:                pointer to physical pages tracking object, set on
+ *                        mapping the external resource on GPU side.
+ */
+struct kbase_ext_res {
+	u64 gpu_address;
+	struct kbase_mem_phy_alloc *alloc;
+};
+
+/**
+ * struct kbase_jd_atom  - object representing the atom, containing the complete
+ *                         state and attributes of an atom.
+ * @work:                  work item for the bottom half processing of the atom,
+ *                         by JD or JS, after it got executed on GPU or the input
+ *                         fence got signaled
+ * @start_timestamp:       time at which the atom was submitted to the GPU, by
+ *                         updating the JS_HEAD_NEXTn register.
+ * @udata:                 copy of the user data sent for the atom in base_jd_submit.
+ * @kctx:                  Pointer to the base context with which the atom is associated.
+ * @dep_head:              Array of 2 list heads, pointing to the two list of atoms
+ *                         which are blocked due to dependency on this atom.
+ * @dep_item:              Array of 2 list heads, used to store the atom in the list of
+ *                         other atoms depending on the same dependee atom.
+ * @dep:                   Array containing the dependency info for the 2 atoms on which
+ *                         the atom depends upon.
+ * @jd_item:               List head used during job dispatch job_done processing - as
+ *                         dependencies may not be entirely resolved at this point,
+ *                         we need to use a separate list head.
+ * @in_jd_list:            flag set to true if atom's @jd_item is currently on a list,
+ *                         prevents atom being processed twice.
+ * @nr_extres:             number of external resources referenced by the atom.
+ * @extres:                pointer to the location containing info about @nr_extres
+ *                         external resources referenced by the atom.
+ * @device_nr:             indicates the coregroup with which the atom is associated,
+ *                         when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified.
+ * @jc:                    GPU address of the job-chain.
+ * @softjob_data:          Copy of data read from the user space buffer that @jc
+ *                         points to.
+ * @fence:                 Stores either an input or output sync fence, depending
+ *                         on soft-job type
+ * @sync_waiter:           Pointer to the sync fence waiter structure passed to the
+ *                         callback function on signaling of the input fence.
+ * @dma_fence:             object containing pointers to both input & output fences
+ *                         and other related members used for explicit sync through
+ *                         soft jobs and for the implicit synchronization required
+ *                         on access to external resources.
+ * @event_code:            Event code for the job chain represented by the atom, both
+ *                         HW and low-level SW events are represented by event codes.
+ * @core_req:              bitmask of BASE_JD_REQ_* flags specifying either Hw or Sw
+ *                         requirements for the job chain represented by the atom.
+ * @ticks:                 Number of scheduling ticks for which atom has been running
+ *                         on the GPU.
+ * @sched_priority:        Priority of the atom for Job scheduling, as per the
+ *                         KBASE_JS_ATOM_SCHED_PRIO_*.
+ * @poking:                Indicates whether poking of MMU is ongoing for the atom,
+ *                         as a WA for the issue HW_ISSUE_8316.
+ * @completed:             Wait queue to wait upon for the completion of atom.
+ * @status:                Indicates at high level at what stage the atom is in,
+ *                         as per KBASE_JD_ATOM_STATE_*, that whether it is not in
+ *                         use or its queued in JD or given to JS or submitted to Hw
+ *                         or it completed the execution on Hw.
+ * @work_id:               used for GPU tracepoints, its a snapshot of the 'work_id'
+ *                         counter in kbase_jd_context which is incremented on
+ *                         every call to base_jd_submit.
+ * @slot_nr:               Job slot chosen for the atom.
+ * @atom_flags:            bitmask of KBASE_KATOM_FLAG* flags capturing the exact
+ *                         low level state of the atom.
+ * @retry_count:           Number of times this atom has been retried. Used by replay
+ *                         soft job.
+ * @gpu_rb_state:          bitmnask of KBASE_ATOM_GPU_RB_* flags, precisely tracking
+ *                         atom's state after it has entered Job scheduler on becoming
+ *                         runnable. Atom could be blocked due to cross slot dependency
+ *                         or waiting for the shader cores to become available or
+ *                         waiting for protected mode transitions to complete.
+ * @need_cache_flush_cores_retained: flag indicating that manual flush of GPU
+ *                         cache is needed for the atom and the shader cores used
+ *                         for atom have been kept on.
+ * @blocked:               flag indicating that atom's resubmission to GPU is
+ *                         blocked till the work item is scheduled to return the
+ *                         atom to JS.
+ * @pre_dep:               Pointer to atom that this atom has same-slot dependency on
+ * @post_dep:              Pointer to atom that has same-slot dependency on this atom
+ * @x_pre_dep:             Pointer to atom that this atom has cross-slot dependency on
+ * @x_post_dep:            Pointer to atom that has cross-slot dependency on this atom
+ * @flush_id:              The GPU's flush count recorded at the time of submission,
+ *                         used for the cache flush optimisation
+ * @fault_event:           Info for dumping the debug data on Job fault.
+ * @queue:                 List head used for 4 different purposes :
+ *                         Adds atom to the list of dma-buf fence waiting atoms.
+ *                         Adds atom to the list of atoms blocked due to cross
+ *                         slot dependency.
+ *                         Adds atom to the list of softjob atoms for which JIT
+ *                         allocation has been deferred
+ *                         Adds atom to the list of softjob atoms waiting for the
+ *                         signaling of fence.
+ * @jit_node:              Used to keep track of all JIT free/alloc jobs in submission order
+ * @jit_blocked:           Flag indicating that JIT allocation requested through
+ *                         softjob atom will be reattempted after the impending
+ *                         free of other active JIT allocations.
+ * @will_fail_event_code:  If non-zero, this indicates that the atom will fail
+ *                         with the set event_code when the atom is processed.
+ *                         Used for special handling of atoms, which have a data
+ *                         dependency on the failed atoms.
+ * @protected_state:       State of the atom, as per KBASE_ATOM_(ENTER|EXIT)_PROTECTED_*,
+ *                         when transitioning into or out of protected mode. Atom will
+ *                         be either entering or exiting the protected mode.
+ * @runnable_tree_node:    The node added to context's job slot specific rb tree
+ *                         when the atom becomes runnable.
+ * @age:                   Age of atom relative to other atoms in the context, is
+ *                         snapshot of the age_count counter in kbase context.
+ */
+struct kbase_jd_atom {
+	struct work_struct work;
+	ktime_t start_timestamp;
+
+	struct base_jd_udata udata;
+	struct kbase_context *kctx;
+
+	struct list_head dep_head[2];
+	struct list_head dep_item[2];
+	const struct kbase_jd_atom_dependency dep[2];
+	struct list_head jd_item;
+	bool in_jd_list;
+
+	u16 nr_extres;
+	struct kbase_ext_res *extres;
+
+	u32 device_nr;
+	u64 jc;
+	void *softjob_data;
+#if defined(CONFIG_SYNC)
+	struct sync_fence *fence;
+	struct sync_fence_waiter sync_waiter;
+#endif				/* CONFIG_SYNC */
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+	struct {
+		/* Use the functions/API defined in mali_kbase_fence.h to
+		 * when working with this sub struct */
+#if defined(CONFIG_SYNC_FILE)
+		/* Input fence */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+		struct fence *fence_in;
+#else
+		struct dma_fence *fence_in;
+#endif
+#endif
+		/* This points to the dma-buf output fence for this atom. If
+		 * this is NULL then there is no fence for this atom and the
+		 * following fields related to dma_fence may have invalid data.
+		 *
+		 * The context and seqno fields contain the details for this
+		 * fence.
+		 *
+		 * This fence is signaled when the katom is completed,
+		 * regardless of the event_code of the katom (signal also on
+		 * failure).
+		 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+		struct fence *fence;
+#else
+		struct dma_fence *fence;
+#endif
+		/* The dma-buf fence context number for this atom. A unique
+		 * context number is allocated to each katom in the context on
+		 * context creation.
+		 */
+		unsigned int context;
+		/* The dma-buf fence sequence number for this atom. This is
+		 * increased every time this katom uses dma-buf fence.
+		 */
+		atomic_t seqno;
+		/* This contains a list of all callbacks set up to wait on
+		 * other fences.  This atom must be held back from JS until all
+		 * these callbacks have been called and dep_count have reached
+		 * 0. The initial value of dep_count must be equal to the
+		 * number of callbacks on this list.
+		 *
+		 * This list is protected by jctx.lock. Callbacks are added to
+		 * this list when the atom is built and the wait are set up.
+		 * All the callbacks then stay on the list until all callbacks
+		 * have been called and the atom is queued, or cancelled, and
+		 * then all callbacks are taken off the list and freed.
+		 */
+		struct list_head callbacks;
+		/* Atomic counter of number of outstandind dma-buf fence
+		 * dependencies for this atom. When dep_count reaches 0 the
+		 * atom may be queued.
+		 *
+		 * The special value "-1" may only be set after the count
+		 * reaches 0, while holding jctx.lock. This indicates that the
+		 * atom has been handled, either queued in JS or cancelled.
+		 *
+		 * If anyone but the dma-fence worker sets this to -1 they must
+		 * ensure that any potentially queued worker must have
+		 * completed before allowing the atom to be marked as unused.
+		 * This can be done by flushing the fence work queue:
+		 * kctx->dma_fence.wq.
+		 */
+		atomic_t dep_count;
+	} dma_fence;
+#endif /* CONFIG_MALI_DMA_FENCE || CONFIG_SYNC_FILE*/
+
+	/* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
+	enum base_jd_event_code event_code;
+	base_jd_core_req core_req;
+
+	u32 ticks;
+	int sched_priority;
+
+	int poking;
+
+	wait_queue_head_t completed;
+	enum kbase_jd_atom_state status;
+#ifdef CONFIG_GPU_TRACEPOINTS
+	int work_id;
+#endif
+	int slot_nr;
+
+	u32 atom_flags;
+
+	int retry_count;
+
+	enum kbase_atom_gpu_rb_state gpu_rb_state;
+
+	bool need_cache_flush_cores_retained;
+
+	atomic_t blocked;
+
+	struct kbase_jd_atom *pre_dep;
+	struct kbase_jd_atom *post_dep;
+
+	struct kbase_jd_atom *x_pre_dep;
+	struct kbase_jd_atom *x_post_dep;
+
+	u32 flush_id;
+
+#ifdef CONFIG_DEBUG_FS
+	struct base_job_fault_event fault_event;
+#endif
+
+	struct list_head queue;
+
+	struct list_head jit_node;
+	bool jit_blocked;
+
+	enum base_jd_event_code will_fail_event_code;
+
+	union {
+		enum kbase_atom_enter_protected_state enter;
+		enum kbase_atom_exit_protected_state exit;
+	} protected_state;
+
+	struct rb_node runnable_tree_node;
+
+	u32 age;
+};
+
+/**
+ * struct kbase_debug_copy_buffer - information about the buffer to be copied.
+ *
+ * @size:	size of the buffer in bytes
+ * @pages:	pointer to an array of pointers to the pages which contain
+ *		the buffer
+ * @is_vmalloc: true if @pages was allocated with vzalloc. false if @pages was
+ *              allocated with kcalloc
+ * @nr_pages:	number of pages
+ * @offset:	offset into the pages
+ * @gpu_alloc:	pointer to physical memory allocated by the GPU
+ * @extres_pages: array of pointers to the pages containing external resources
+ *		for this buffer
+ * @nr_extres_pages: number of pages in @extres_pages
+ */
+struct kbase_debug_copy_buffer {
+	size_t size;
+	struct page **pages;
+	bool is_vmalloc;
+	int nr_pages;
+	size_t offset;
+	struct kbase_mem_phy_alloc *gpu_alloc;
+
+	struct page **extres_pages;
+	int nr_extres_pages;
+};
+
+static inline bool kbase_jd_katom_is_protected(const struct kbase_jd_atom *katom)
+{
+	return (bool)(katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED);
+}
+
+/*
+ * Theory of operations:
+ *
+ * Atom objects are statically allocated within the context structure.
+ *
+ * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set.
+ */
+
+#define KBASE_JD_DEP_QUEUE_SIZE 256
+
+/**
+ * struct kbase_jd_context  - per context object encapsulating all the Job dispatcher
+ *                            related state.
+ * @lock:                     lock to serialize the updates made to the Job dispatcher
+ *                            state and kbase_jd_atom objects.
+ * @sched_info:               Structure encapsulating all the Job scheduling info.
+ * @atoms:                    Array of the objects representing atoms, containing
+ *                            the complete state and attributes of an atom.
+ * @job_nr:                   Tracks the number of atoms being processed by the
+ *                            kbase. This includes atoms that are not tracked by
+ *                            scheduler: 'not ready to run' & 'dependency-only' jobs.
+ * @zero_jobs_wait:           Waitq that reflects whether there are no jobs
+ *                            (including SW-only dependency jobs). This is set
+ *                            when no jobs are present on the ctx, and clear when
+ *                            there are jobs.
+ *                            This must be updated atomically with @job_nr.
+ *                            note: Job Dispatcher knows about more jobs than the
+ *                            Job Scheduler as it is unaware of jobs that are
+ *                            blocked on dependencies and SW-only dependency jobs.
+ *                            This waitq can be waited upon to find out when the
+ *                            context jobs are all done/cancelled (including those
+ *                            that might've been blocked on dependencies) - and so,
+ *                            whether it can be terminated. However, it should only
+ *                            be terminated once it is not present in the run-pool.
+ *                            Since the waitq is only set under @lock, the waiter
+ *                            should also briefly obtain and drop @lock to guarantee
+ *                            that the setter has completed its work on the kbase_context
+ * @job_done_wq:              Workqueue to which the per atom work item is queued
+ *                            for bottom half processing when the atom completes
+ *                            execution on GPU or the input fence get signaled.
+ * @tb_lock:                  Lock to serialize the write access made to @tb to
+ *                            to store the register access trace messages.
+ * @tb:                       Pointer to the Userspace accessible buffer storing
+ *                            the trace messages for register read/write accesses
+ *                            made by the Kbase. The buffer is filled in circular
+ *                            fashion.
+ * @tb_wrap_offset:           Offset to the end location in the trace buffer, the
+ *                            write pointer is moved to the beginning on reaching
+ *                            this offset.
+ * @work_id:                  atomic variable used for GPU tracepoints, incremented
+ *                            on every call to base_jd_submit.
+ */
+struct kbase_jd_context {
+	struct mutex lock;
+	struct kbasep_js_kctx_info sched_info;
+	struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
+
+	u32 job_nr;
+
+	wait_queue_head_t zero_jobs_wait;
+
+	struct workqueue_struct *job_done_wq;
+
+	spinlock_t tb_lock;
+	u32 *tb;
+	size_t tb_wrap_offset;
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+	atomic_t work_id;
+#endif
+};
+
+struct kbase_device_info {
+	u32 features;
+};
+
+/** Poking state for BASE_HW_ISSUE_8316  */
+enum {
+	KBASE_AS_POKE_STATE_IN_FLIGHT     = 1<<0,
+	KBASE_AS_POKE_STATE_KILLING_POKE  = 1<<1
+};
+
+/** Poking state for BASE_HW_ISSUE_8316  */
+typedef u32 kbase_as_poke_state;
+
+struct kbase_mmu_setup {
+	u64	transtab;
+	u64	memattr;
+	u64	transcfg;
+};
+
+/**
+ * struct kbase_fault - object containing data relating to a page or bus fault.
+ * @addr:           Records the faulting address.
+ * @extra_addr:     Records the secondary fault address.
+ * @status:         Records the fault status as reported by Hw.
+ * @protected_mode: Flag indicating whether the fault occurred in protected mode
+ *                  or not.
+ */
+struct kbase_fault {
+	u64 addr;
+	u64 extra_addr;
+	u32 status;
+	bool protected_mode;
+};
+
+/**
+ * struct kbase_as   - object representing an address space of GPU.
+ * @number:            Index at which this address space structure is present
+ *                     in an array of address space structures embedded inside the
+ *                     struct kbase_device.
+ * @pf_wq:             Workqueue for processing work items related to Bus fault
+ *                     and Page fault handling.
+ * @work_pagefault:    Work item for the Page fault handling.
+ * @work_busfault:     Work item for the Bus fault handling.
+ * @fault_type:        Type of fault which occured for this address space,
+ *                     regular/unexpected Bus or Page fault.
+ * @pf_data:           Data relating to page fault.
+ * @bf_data:           Data relating to bus fault.
+ * @current_setup:     Stores the MMU configuration for this address space.
+ * @poke_wq:           Workqueue to process the work items queue for poking the
+ *                     MMU as a WA for BASE_HW_ISSUE_8316.
+ * @poke_work:         Work item to do the poking of MMU for this address space.
+ * @poke_refcount:     Refcount for the need of poking MMU. While the refcount is
+ *                     non zero the poking of MMU will continue.
+ *                     Protected by hwaccess_lock.
+ * @poke_state:        State indicating whether poking is in progress or it has
+ *                     been stopped. Protected by hwaccess_lock.
+ * @poke_timer:        Timer used to schedule the poking at regular intervals.
+ */
+struct kbase_as {
+	int number;
+	struct workqueue_struct *pf_wq;
+	struct work_struct work_pagefault;
+	struct work_struct work_busfault;
+	enum kbase_mmu_fault_type fault_type;
+	struct kbase_fault pf_data;
+	struct kbase_fault bf_data;
+	struct kbase_mmu_setup current_setup;
+	struct workqueue_struct *poke_wq;
+	struct work_struct poke_work;
+	int poke_refcount;
+	kbase_as_poke_state poke_state;
+	struct hrtimer poke_timer;
+};
+
+/**
+ * struct kbase_mmu_table  - object representing a set of GPU page tables
+ * @mmu_teardown_pages:   Buffer of 4 Pages in size, used to cache the entries
+ *                        of top & intermediate level page tables to avoid
+ *                        repeated calls to kmap_atomic during the MMU teardown.
+ * @mmu_lock:             Lock to serialize the accesses made to multi level GPU
+ *                        page tables
+ * @pgd:                  Physical address of the page allocated for the top
+ *                        level page table of the context, this is used for
+ *                        MMU HW programming as the address translation will
+ *                        start from the top level page table.
+ * @kctx:                 If this set of MMU tables belongs to a context then
+ *                        this is a back-reference to the context, otherwise
+ *                        it is NULL
+ */
+struct kbase_mmu_table {
+	u64 *mmu_teardown_pages;
+	struct mutex mmu_lock;
+	phys_addr_t pgd;
+	struct kbase_context *kctx;
+};
+
+static inline int kbase_as_has_bus_fault(struct kbase_as *as)
+{
+	return as->fault_type == KBASE_MMU_FAULT_TYPE_BUS;
+}
+
+static inline int kbase_as_has_page_fault(struct kbase_as *as)
+{
+	return as->fault_type == KBASE_MMU_FAULT_TYPE_PAGE;
+}
+
+struct kbasep_mem_device {
+	atomic_t used_pages;   /* Tracks usage of OS shared memory. Updated
+				   when OS memory is allocated/freed. */
+
+};
+
+#define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X
+
+enum kbase_trace_code {
+	/* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+	 * THIS MUST BE USED AT THE START OF THE ENUM */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
+#include "mali_kbase_trace_defs.h"
+#undef  KBASE_TRACE_CODE_MAKE_CODE
+	/* Comma on its own, to extend the list */
+	,
+	/* Must be the last in the enum */
+	KBASE_TRACE_CODE_COUNT
+};
+
+#define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
+#define KBASE_TRACE_FLAG_JOBSLOT  (((u8)1) << 1)
+
+/**
+ * struct kbase_trace - object representing a trace message added to trace buffer
+ *                      kbase_device::trace_rbuf
+ * @timestamp:          CPU timestamp at which the trace message was added.
+ * @thread_id:          id of the thread in the context of which trace message
+ *                      was added.
+ * @cpu:                indicates which CPU the @thread_id was scheduled on when
+ *                      the trace message was added.
+ * @ctx:                Pointer to the kbase context for which the trace message
+ *                      was added. Will be NULL for certain trace messages like
+ *                      for traces added corresponding to power management events.
+ *                      Will point to the appropriate context corresponding to
+ *                      job-slot & context's reference count related events.
+ * @katom:              indicates if the trace message has atom related info.
+ * @atom_number:        id of the atom for which trace message was added.
+ *                      Only valid if @katom is true.
+ * @atom_udata:         Copy of the user data sent for the atom in base_jd_submit.
+ *                      Only valid if @katom is true.
+ * @gpu_addr:           GPU address of the job-chain represented by atom. Could
+ *                      be valid even if @katom is false.
+ * @info_val:           value specific to the type of event being traced. For the
+ *                      case where @katom is true, will be set to atom's affinity,
+ *                      i.e. bitmask of shader cores chosen for atom's execution.
+ * @code:               Identifies the event, refer enum kbase_trace_code.
+ * @jobslot:            job-slot for which trace message was added, valid only for
+ *                      job-slot management events.
+ * @refcount:           reference count for the context, valid for certain events
+ *                      related to scheduler core and policy.
+ * @flags:              indicates if info related to @jobslot & @refcount is present
+ *                      in the trace message, used during dumping of the message.
+ */
+struct kbase_trace {
+	struct timespec64 timestamp;
+	u32 thread_id;
+	u32 cpu;
+	void *ctx;
+	bool katom;
+	int atom_number;
+	u64 atom_udata[2];
+	u64 gpu_addr;
+	unsigned long info_val;
+	u8 code;
+	u8 jobslot;
+	u8 refcount;
+	u8 flags;
+};
+
+struct kbasep_kctx_list_element {
+	struct list_head link;
+	struct kbase_context *kctx;
+};
+
+/**
+ * Data stored per device for power management.
+ *
+ * This structure contains data for the power management framework. There is one
+ * instance of this structure per device in the system.
+ */
+struct kbase_pm_device_data {
+	/**
+	 * The lock protecting Power Management structures accessed outside of
+	 * IRQ.
+	 *
+	 * This lock must also be held whenever the GPU is being powered on or
+	 * off.
+	 */
+	struct mutex lock;
+
+	/**
+	 * The reference count of active contexts on this device. Note that
+	 * some code paths keep shaders/the tiler powered whilst this is 0. Use
+	 * kbase_pm_is_active() instead to check for such cases.
+	 */
+	int active_count;
+	/** Flag indicating suspending/suspended */
+	bool suspending;
+	/* Wait queue set when active_count == 0 */
+	wait_queue_head_t zero_active_count_wait;
+
+	/**
+	 * Bit masks identifying the available shader cores that are specified
+	 * via sysfs. One mask per job slot.
+	 */
+	u64 debug_core_mask[BASE_JM_MAX_NR_SLOTS];
+	u64 debug_core_mask_all;
+
+	/**
+	 * Callback for initializing the runtime power management.
+	 *
+	 * @param kbdev The kbase device
+	 *
+	 * @return 0 on success, else error code
+	 */
+	 int (*callback_power_runtime_init)(struct kbase_device *kbdev);
+
+	/**
+	 * Callback for terminating the runtime power management.
+	 *
+	 * @param kbdev The kbase device
+	 */
+	void (*callback_power_runtime_term)(struct kbase_device *kbdev);
+
+	/* Time in milliseconds between each dvfs sample */
+	u32 dvfs_period;
+
+	struct kbase_pm_backend_data backend;
+};
+
+/**
+ * struct kbase_mem_pool - Page based memory pool for kctx/kbdev
+ * @kbdev:        Kbase device where memory is used
+ * @cur_size:     Number of free pages currently in the pool (may exceed
+ *                @max_size in some corner cases)
+ * @max_size:     Maximum number of free pages in the pool
+ * @order:        order = 0 refers to a pool of 4 KB pages
+ *                order = 9 refers to a pool of 2 MB pages (2^9 * 4KB = 2 MB)
+ * @pool_lock:    Lock protecting the pool - must be held when modifying
+ *                @cur_size and @page_list
+ * @page_list:    List of free pages in the pool
+ * @reclaim:      Shrinker for kernel reclaim of free pages
+ * @next_pool:    Pointer to next pool where pages can be allocated when this
+ *                pool is empty. Pages will spill over to the next pool when
+ *                this pool is full. Can be NULL if there is no next pool.
+ * @dying:        true if the pool is being terminated, and any ongoing
+ *                operations should be abandoned
+ * @dont_reclaim: true if the shrinker is forbidden from reclaiming memory from
+ *                this pool, eg during a grow operation
+ */
+struct kbase_mem_pool {
+	struct kbase_device *kbdev;
+	size_t              cur_size;
+	size_t              max_size;
+	size_t		    order;
+	spinlock_t          pool_lock;
+	struct list_head    page_list;
+	struct shrinker     reclaim;
+
+	struct kbase_mem_pool *next_pool;
+
+	bool dying;
+	bool dont_reclaim;
+};
+
+/**
+ * struct kbase_devfreq_opp - Lookup table for converting between nominal OPP
+ *                            frequency, and real frequency and core mask
+ * @opp_freq:  Nominal OPP frequency
+ * @real_freq: Real GPU frequency
+ * @core_mask: Shader core mask
+ */
+struct kbase_devfreq_opp {
+	u64 opp_freq;
+	u64 real_freq;
+	u64 core_mask;
+};
+
+/* MMU mode flags */
+#define KBASE_MMU_MODE_HAS_NON_CACHEABLE (1ul << 0) /* Has NON_CACHEABLE MEMATTR */
+
+/**
+ * struct kbase_mmu_mode - object containing pointer to methods invoked for
+ *                         programming the MMU, as per the MMU mode supported
+ *                         by Hw.
+ * @update:           enable & setup/configure one of the GPU address space.
+ * @get_as_setup:     retrieve the configuration of one of the GPU address space.
+ * @disable_as:       disable one of the GPU address space.
+ * @pte_to_phy_addr:  retrieve the physical address encoded in the page table entry.
+ * @ate_is_valid:     check if the pte is a valid address translation entry
+ *                    encoding the physical address of the actual mapped page.
+ * @pte_is_valid:     check if the pte is a valid entry encoding the physical
+ *                    address of the next lower level page table.
+ * @entry_set_ate:    program the pte to be a valid address translation entry to
+ *                    encode the physical address of the actual page being mapped.
+ * @entry_set_pte:    program the pte to be a valid entry to encode the physical
+ *                    address of the next lower level page table.
+ * @entry_invalidate: clear out or invalidate the pte.
+ * @flags:            bitmask of MMU mode flags. Refer to KBASE_MMU_MODE_ constants.
+ */
+struct kbase_mmu_mode {
+	void (*update)(struct kbase_device *kbdev,
+			struct kbase_mmu_table *mmut,
+			int as_nr);
+	void (*get_as_setup)(struct kbase_mmu_table *mmut,
+			struct kbase_mmu_setup * const setup);
+	void (*disable_as)(struct kbase_device *kbdev, int as_nr);
+	phys_addr_t (*pte_to_phy_addr)(u64 entry);
+	int (*ate_is_valid)(u64 ate, unsigned int level);
+	int (*pte_is_valid)(u64 pte, unsigned int level);
+	void (*entry_set_ate)(u64 *entry, struct tagged_addr phy,
+			unsigned long flags, unsigned int level);
+	void (*entry_set_pte)(u64 *entry, phys_addr_t phy);
+	void (*entry_invalidate)(u64 *entry);
+	unsigned long flags;
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void);
+struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
+
+
+#define DEVNAME_SIZE	16
+
+
+/**
+ * struct kbase_device   - Object representing an instance of GPU platform device,
+ *                         allocated from the probe method of mali driver.
+ * @hw_quirks_sc:          Configuration to be used for the shader cores as per
+ *                         the HW issues present in the GPU.
+ * @hw_quirks_tiler:       Configuration to be used for the Tiler as per the HW
+ *                         issues present in the GPU.
+ * @hw_quirks_mmu:         Configuration to be used for the MMU as per the HW
+ *                         issues present in the GPU.
+ * @hw_quirks_jm:          Configuration to be used for the Job Manager as per
+ *                         the HW issues present in the GPU.
+ * @entry:                 Links the device instance to the global list of GPU
+ *                         devices. The list would have as many entries as there
+ *                         are GPU device instances.
+ * @dev:                   Pointer to the kernel's generic/base representation
+ *                         of the GPU platform device.
+ * @mdev:                  Pointer to the miscellaneous device registered to
+ *                         provide Userspace access to kernel driver through the
+ *                         device file /dev/malixx.
+ * @reg_start:             Base address of the region in physical address space
+ *                         where GPU registers have been mapped.
+ * @reg_size:              Size of the region containing GPU registers
+ * @reg:                   Kernel virtual address of the region containing GPU
+ *                         registers, using which Driver will access the registers.
+ * @irqs:                  Array containing IRQ resource info for 3 types of
+ *                         interrupts : Job scheduling, MMU & GPU events (like
+ *                         power management, cache etc.)
+ * @clock:                 Pointer to the input clock resource (having an id of 0),
+ *                         referenced by the GPU device node.
+ * @regulator:             Pointer to the struct corresponding to the regulator
+ *                         for GPU device
+ * @devname:               string containing the name used for GPU device instance,
+ *                         miscellaneous device is registered using the same name.
+ * @model:                 Pointer, valid only when Driver is compiled to not access
+ *                         the real GPU Hw, to the dummy model which tries to mimic
+ *                         to some extent the state & behavior of GPU Hw in response
+ *                         to the register accesses made by the Driver.
+ * @irq_slab:              slab cache for allocating the work items queued when
+ *                         model mimics raising of IRQ to cause an interrupt on CPU.
+ * @irq_workq:             workqueue for processing the irq work items.
+ * @serving_job_irq:       function to execute work items queued when model mimics
+ *                         the raising of JS irq, mimics the interrupt handler
+ *                         processing JS interrupts.
+ * @serving_gpu_irq:       function to execute work items queued when model mimics
+ *                         the raising of GPU irq, mimics the interrupt handler
+ *                         processing GPU interrupts.
+ * @serving_mmu_irq:       function to execute work items queued when model mimics
+ *                         the raising of MMU irq, mimics the interrupt handler
+ *                         processing MMU interrupts.
+ * @reg_op_lock:           lock used by model to serialize the handling of register
+ *                         accesses made by the driver.
+ * @pm:                    Per device object for storing data for power management
+ *                         framework.
+ * @js_data:               Per device object encapsulating the current context of
+ *                         Job Scheduler, which is global to the device and is not
+ *                         tied to any particular struct kbase_context running on
+ *                         the device
+ * @mem_pool:              Object containing the state for global pool of 4KB size
+ *                         physical pages which can be used by all the contexts.
+ * @lp_mem_pool:           Object containing the state for global pool of 2MB size
+ *                         physical pages which can be used by all the contexts.
+ * @memdev:                keeps track of the in use physical pages allocated by
+ *                         the Driver.
+ * @mmu_mode:              Pointer to the object containing methods for programming
+ *                         the MMU, depending on the type of MMU supported by Hw.
+ * @as:                    Array of objects representing address spaces of GPU.
+ * @as_free:               Bitpattern of free/available GPU address spaces.
+ * @as_to_kctx:            Array of pointers to struct kbase_context, having
+ *                         GPU adrress spaces assigned to them.
+ * @mmu_mask_change:       Lock to serialize the access to MMU interrupt mask
+ *                         register used in the handling of Bus & Page faults.
+ * @gpu_props:             Object containing complete information about the
+ *                         configuration/properties of GPU HW device in use.
+ * @hw_issues_mask:        List of SW workarounds for HW issues
+ * @hw_features_mask:      List of available HW features.
+ * @disjoint_event:        struct for keeping track of the disjoint information,
+ *                         that whether the GPU is in a disjoint state and the
+ *                         number of disjoint events that have occurred on GPU.
+ * @nr_hw_address_spaces:  Number of address spaces actually available in the
+ *                         GPU, remains constant after driver initialisation.
+ * @nr_user_address_spaces: Number of address spaces available to user contexts
+ * @hwcnt:                  Structure used for instrumentation and HW counters
+ *                         dumping
+ * @hwcnt_gpu_iface:       Backend interface for GPU hardware counter access.
+ * @hwcnt_gpu_ctx:         Context for GPU hardware counter access.
+ *                         @hwaccess_lock must be held when calling
+ *                         kbase_hwcnt_context_enable() with @hwcnt_gpu_ctx.
+ * @hwcnt_gpu_virt:        Virtualizer for GPU hardware counters.
+ * @vinstr_ctx:            vinstr context created per device
+ * @trace_lock:            Lock to serialize the access to trace buffer.
+ * @trace_first_out:       Index/offset in the trace buffer at which the first
+ *                         unread message is present.
+ * @trace_next_in:         Index/offset in the trace buffer at which the new
+ *                         message will be written.
+ * @trace_rbuf:            Pointer to the buffer storing debug messages/prints
+ *                         tracing the various events in Driver.
+ *                         The buffer is filled in circular fashion.
+ * @reset_timeout_ms:      Number of milliseconds to wait for the soft stop to
+ *                         complete for the GPU jobs before proceeding with the
+ *                         GPU reset.
+ * @cache_clean_in_progress: Set when a cache clean has been started, and
+ *                         cleared when it has finished. This prevents multiple
+ *                         cache cleans being done simultaneously.
+ * @cache_clean_queued:    Set if a cache clean is invoked while another is in
+ *                         progress. If this happens, another cache clean needs
+ *                         to be triggered immediately after completion of the
+ *                         current one.
+ * @cache_clean_wait:      Signalled when a cache clean has finished.
+ * @platform_context:      Platform specific private data to be accessed by
+ *                         platform specific config files only.
+ * @kctx_list:             List of kbase_contexts created for the device, including
+ *                         the kbase_context created for vinstr_ctx.
+ * @kctx_list_lock:        Lock protecting concurrent accesses to @kctx_list.
+ * @devfreq_profile:       Describes devfreq profile for the Mali GPU device, passed
+ *                         to devfreq_add_device() to add devfreq feature to Mali
+ *                         GPU device.
+ * @devfreq:               Pointer to devfreq structure for Mali GPU device,
+ *                         returned on the call to devfreq_add_device().
+ * @current_freq:          The real frequency, corresponding to @current_nominal_freq,
+ *                         at which the Mali GPU device is currently operating, as
+ *                         retrieved from @opp_table in the target callback of
+ *                         @devfreq_profile.
+ * @current_nominal_freq:  The nominal frequency currently used for the Mali GPU
+ *                         device as retrieved through devfreq_recommended_opp()
+ *                         using the freq value passed as an argument to target
+ *                         callback of @devfreq_profile
+ * @current_voltage:       The voltage corresponding to @current_nominal_freq, as
+ *                         retrieved through dev_pm_opp_get_voltage().
+ * @current_core_mask:     bitmask of shader cores that are currently desired &
+ *                         enabled, corresponding to @current_nominal_freq as
+ *                         retrieved from @opp_table in the target callback of
+ *                         @devfreq_profile.
+ * @opp_table:             Pointer to the lookup table for converting between nominal
+ *                         OPP (operating performance point) frequency, and real
+ *                         frequency and core mask. This table is constructed according
+ *                         to operating-points-v2-mali table in devicetree.
+ * @num_opps:              Number of operating performance points available for the Mali
+ *                         GPU device.
+ * @devfreq_cooling:       Pointer returned on registering devfreq cooling device
+ *                         corresponding to @devfreq.
+ * @ipa_protection_mode_switched: is set to TRUE when GPU is put into protected
+ *                         mode. It is a sticky flag which is cleared by IPA
+ *                         once it has made use of information that GPU had
+ *                         previously entered protected mode.
+ * @ipa:                   Top level structure for IPA, containing pointers to both
+ *                         configured & fallback models.
+ * @timeline:              Stores the global timeline tracking information.
+ * @job_fault_debug:       Flag to control the dumping of debug data for job faults,
+ *                         set when the 'job_fault' debugfs file is opened.
+ * @mali_debugfs_directory: Root directory for the debugfs files created by the driver
+ * @debugfs_ctx_directory: Directory inside the @mali_debugfs_directory containing
+ *                         a sub-directory for every context.
+ * @debugfs_as_read_bitmap: bitmap of address spaces for which the bus or page fault
+ *                         has occurred.
+ * @job_fault_wq:          Waitqueue to block the job fault dumping daemon till the
+ *                         occurrence of a job fault.
+ * @job_fault_resume_wq:   Waitqueue on which every context with a faulty job wait
+ *                         for the job fault dumping to complete before they can
+ *                         do bottom half of job done for the atoms which followed
+ *                         the faulty atom.
+ * @job_fault_resume_workq: workqueue to process the work items queued for the faulty
+ *                         atoms, whereby the work item function waits for the dumping
+ *                         to get completed.
+ * @job_fault_event_list:  List of atoms, each belonging to a different context, which
+ *                         generated a job fault.
+ * @job_fault_event_lock:  Lock to protect concurrent accesses to @job_fault_event_list
+ * @regs_dump_debugfs_data: Contains the offset of register to be read through debugfs
+ *                         file "read_register".
+ * @force_replay_limit:    Number of gpu jobs, having replay atoms associated with them,
+ *                         that are run before a job is forced to fail and replay.
+ *                         Set to 0 to disable forced failures.
+ * @force_replay_count:    Count of gpu jobs, having replay atoms associated with them,
+ *                         between forced failures. Incremented on each gpu job which
+ *                         has replay atoms dependent on it. A gpu job is forced to
+ *                         fail once this is greater than or equal to @force_replay_limit
+ * @force_replay_core_req: Core requirements, set through the sysfs file, for the replay
+ *                         job atoms to consider the associated gpu job for forceful
+ *                         failure and replay. May be zero
+ * @force_replay_random:   Set to 1 to randomize the @force_replay_limit, in the
+ *                         range of 1 - KBASEP_FORCE_REPLAY_RANDOM_LIMIT.
+ * @ctx_num:               Total number of contexts created for the device.
+ * @io_history:            Pointer to an object keeping a track of all recent
+ *                         register accesses. The history of register accesses
+ *                         can be read through "regs_history" debugfs file.
+ * @hwaccess:              Contains a pointer to active kbase context and GPU
+ *                         backend specific data for HW access layer.
+ * @faults_pending:        Count of page/bus faults waiting for bottom half processing
+ *                         via workqueues.
+ * @poweroff_pending:      Set when power off operation for GPU is started, reset when
+ *                         power on for GPU is started.
+ * @infinite_cache_active_default: Set to enable using infinite cache for all the
+ *                         allocations of a new context.
+ * @mem_pool_max_size_default: Initial/default value for the maximum size of both
+ *                         types of pool created for a new context.
+ * @current_gpu_coherency_mode: coherency mode in use, which can be different
+ *                         from @system_coherency, when using protected mode.
+ * @system_coherency:      coherency mode as retrieved from the device tree.
+ * @cci_snoop_enabled:     Flag to track when CCI snoops have been enabled.
+ * @snoop_enable_smc:      SMC function ID to call into Trusted firmware to
+ *                         enable cache snooping. Value of 0 indicates that it
+ *                         is not used.
+ * @snoop_disable_smc:     SMC function ID to call disable cache snooping.
+ * @protected_ops:         Pointer to the methods for switching in or out of the
+ *                         protected mode, as per the @protected_dev being used.
+ * @protected_dev:         Pointer to the protected mode switcher device attached
+ *                         to the GPU device retrieved through device tree if
+ *                         GPU do not support protected mode switching natively.
+ * @protected_mode:        set to TRUE when GPU is put into protected mode
+ * @protected_mode_transition: set to TRUE when GPU is transitioning into or
+ *                         out of protected mode.
+ * @protected_mode_hwcnt_desired: True if we want GPU hardware counters to be
+ *                         enabled. Counters must be disabled before transition
+ *                         into protected mode.
+ * @protected_mode_hwcnt_disabled: True if GPU hardware counters are not
+ *                         enabled.
+ * @protected_mode_hwcnt_disable_work: Work item to disable GPU hardware
+ *                         counters, used if atomic disable is not possible.
+ * @protected_mode_support: set to true if protected mode is supported.
+ * @buslogger:              Pointer to the structure required for interfacing
+ *                          with the bus logger module to set the size of buffer
+ *                          used by the module for capturing bus logs.
+ * @irq_reset_flush:        Flag to indicate that GPU reset is in-flight and flush of
+ *                          IRQ + bottom half is being done, to prevent the writes
+ *                          to MMU_IRQ_CLEAR & MMU_IRQ_MASK registers.
+ * @inited_subsys:          Bitmap of inited sub systems at the time of device probe.
+ *                          Used during device remove or for handling error in probe.
+ * @hwaccess_lock:          Lock, which can be taken from IRQ context, to serialize
+ *                          the updates made to Job dispatcher + scheduler states.
+ * @mmu_hw_mutex:           Protects access to MMU operations and address space
+ *                          related state.
+ * @serialize_jobs:         Currently used mode for serialization of jobs, both
+ *                          intra & inter slots serialization is supported.
+ * @backup_serialize_jobs:  Copy of the original value of @serialize_jobs taken
+ *                          when GWT is enabled. Used to restore the original value
+ *                          on disabling of GWT.
+ * @js_ctx_scheduling_mode: Context scheduling mode currently being used by
+ *                          Job Scheduler
+ */
+struct kbase_device {
+	u32 hw_quirks_sc;
+	u32 hw_quirks_tiler;
+	u32 hw_quirks_mmu;
+	u32 hw_quirks_jm;
+
+	struct list_head entry;
+	struct device *dev;
+	struct miscdevice mdev;
+	u64 reg_start;
+	size_t reg_size;
+	void __iomem *reg;
+
+	struct {
+		int irq;
+		int flags;
+	} irqs[3];
+
+	struct clk *clock;
+#ifdef CONFIG_REGULATOR
+	struct regulator *regulator;
+#endif
+	char devname[DEVNAME_SIZE];
+
+#ifdef CONFIG_MALI_NO_MALI
+	void *model;
+	struct kmem_cache *irq_slab;
+	struct workqueue_struct *irq_workq;
+	atomic_t serving_job_irq;
+	atomic_t serving_gpu_irq;
+	atomic_t serving_mmu_irq;
+	spinlock_t reg_op_lock;
+#endif	/* CONFIG_MALI_NO_MALI */
+
+	struct kbase_pm_device_data pm;
+	struct kbasep_js_device_data js_data;
+	struct kbase_mem_pool mem_pool;
+	struct kbase_mem_pool lp_mem_pool;
+	struct kbasep_mem_device memdev;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	struct kbase_as as[BASE_MAX_NR_AS];
+	u16 as_free; /* Bitpattern of free Address Spaces */
+	struct kbase_context *as_to_kctx[BASE_MAX_NR_AS];
+
+	spinlock_t mmu_mask_change;
+
+	struct kbase_gpu_props gpu_props;
+
+	unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
+	unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
+
+	struct {
+		atomic_t count;
+		atomic_t state;
+	} disjoint_event;
+
+	s8 nr_hw_address_spaces;
+	s8 nr_user_address_spaces;
+
+	struct kbase_hwcnt {
+		/* The lock should be used when accessing any of the following members */
+		spinlock_t lock;
+
+		struct kbase_context *kctx;
+		u64 addr;
+		u64 addr_bytes;
+
+		struct kbase_instr_backend backend;
+	} hwcnt;
+
+	struct kbase_hwcnt_backend_interface hwcnt_gpu_iface;
+	struct kbase_hwcnt_context *hwcnt_gpu_ctx;
+	struct kbase_hwcnt_virtualizer *hwcnt_gpu_virt;
+	struct kbase_vinstr_context *vinstr_ctx;
+
+#if KBASE_TRACE_ENABLE
+	spinlock_t              trace_lock;
+	u16                     trace_first_out;
+	u16                     trace_next_in;
+	struct kbase_trace            *trace_rbuf;
+#endif
+
+	u32 reset_timeout_ms;
+
+	bool cache_clean_in_progress;
+	bool cache_clean_queued;
+	wait_queue_head_t cache_clean_wait;
+
+	void *platform_context;
+
+	struct list_head        kctx_list;
+	struct mutex            kctx_list_lock;
+
+#ifdef CONFIG_MALI_DEVFREQ
+	struct devfreq_dev_profile devfreq_profile;
+	struct devfreq *devfreq;
+	unsigned long current_freq;
+	unsigned long current_nominal_freq;
+	unsigned long current_voltage;
+	u64 current_core_mask;
+	struct kbase_devfreq_opp *opp_table;
+	int num_opps;
+	struct kbasep_pm_metrics last_devfreq_metrics;
+#ifdef CONFIG_DEVFREQ_THERMAL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+	struct devfreq_cooling_device *devfreq_cooling;
+#else
+	struct thermal_cooling_device *devfreq_cooling;
+#endif
+	bool ipa_protection_mode_switched;
+	struct {
+		/* Access to this struct must be with ipa.lock held */
+		struct mutex lock;
+		struct kbase_ipa_model *configured_model;
+		struct kbase_ipa_model *fallback_model;
+
+		/* Values of the PM utilization metrics from last time the
+		 * power model was invoked. The utilization is calculated as
+		 * the difference between last_metrics and the current values.
+		 */
+		struct kbasep_pm_metrics last_metrics;
+		/* Model data to pass to ipa_gpu_active/idle() */
+		struct kbase_ipa_model_vinstr_data *model_data;
+
+		/* true if use of fallback model has been forced by the User */
+		bool force_fallback_model;
+	} ipa;
+#endif /* CONFIG_DEVFREQ_THERMAL */
+#endif /* CONFIG_MALI_DEVFREQ */
+
+	bool job_fault_debug;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *mali_debugfs_directory;
+	struct dentry *debugfs_ctx_directory;
+
+#ifdef CONFIG_MALI_DEBUG
+	u64 debugfs_as_read_bitmap;
+#endif /* CONFIG_MALI_DEBUG */
+
+	wait_queue_head_t job_fault_wq;
+	wait_queue_head_t job_fault_resume_wq;
+	struct workqueue_struct *job_fault_resume_workq;
+	struct list_head job_fault_event_list;
+	spinlock_t job_fault_event_lock;
+
+#if !MALI_CUSTOMER_RELEASE
+	struct {
+		u16 reg_offset;
+	} regs_dump_debugfs_data;
+#endif /* !MALI_CUSTOMER_RELEASE */
+#endif /* CONFIG_DEBUG_FS */
+
+
+#if MALI_CUSTOMER_RELEASE == 0
+	int force_replay_limit;
+	int force_replay_count;
+	base_jd_core_req force_replay_core_req;
+	bool force_replay_random;
+#endif
+
+	atomic_t ctx_num;
+
+#ifdef CONFIG_DEBUG_FS
+	struct kbase_io_history io_history;
+#endif /* CONFIG_DEBUG_FS */
+
+	struct kbase_hwaccess_data hwaccess;
+
+	atomic_t faults_pending;
+
+	bool poweroff_pending;
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+	bool infinite_cache_active_default;
+#else
+	u32 infinite_cache_active_default;
+#endif
+	size_t mem_pool_max_size_default;
+
+	u32 current_gpu_coherency_mode;
+	u32 system_coherency;
+
+	bool cci_snoop_enabled;
+
+	u32 snoop_enable_smc;
+	u32 snoop_disable_smc;
+
+	struct protected_mode_ops *protected_ops;
+
+	struct protected_mode_device *protected_dev;
+
+	bool protected_mode;
+
+	bool protected_mode_transition;
+
+	bool protected_mode_hwcnt_desired;
+
+	bool protected_mode_hwcnt_disabled;
+
+	struct work_struct protected_mode_hwcnt_disable_work;
+
+	bool protected_mode_support;
+
+#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
+	struct bus_logger_client *buslogger;
+#endif
+
+	bool irq_reset_flush;
+
+	u32 inited_subsys;
+
+	spinlock_t hwaccess_lock;
+
+	struct mutex mmu_hw_mutex;
+
+	/* See KBASE_SERIALIZE_* for details */
+	u8 serialize_jobs;
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+	u8 backup_serialize_jobs;
+#endif
+
+	/* See KBASE_JS_*_PRIORITY_MODE for details. */
+	u32 js_ctx_scheduling_mode;
+
+};
+
+/**
+ * struct jsctx_queue - JS context atom queue
+ * @runnable_tree: Root of RB-tree containing currently runnable atoms on this
+ *                 job slot.
+ * @x_dep_head:    Head item of the linked list of atoms blocked on cross-slot
+ *                 dependencies. Atoms on this list will be moved to the
+ *                 runnable_tree when the blocking atom completes.
+ *
+ * hwaccess_lock must be held when accessing this structure.
+ */
+struct jsctx_queue {
+	struct rb_root runnable_tree;
+	struct list_head x_dep_head;
+};
+
+
+#define KBASE_API_VERSION(major, minor) ((((major) & 0xFFF) << 20)  | \
+					 (((minor) & 0xFFF) << 8) | \
+					 ((0 & 0xFF) << 0))
+
+/**
+ * enum kbase_context_flags - Flags for kbase contexts
+ *
+ * @KCTX_COMPAT: Set when the context process is a compat process, 32-bit
+ * process on a 64-bit kernel.
+ *
+ * @KCTX_RUNNABLE_REF: Set when context is counted in
+ * kbdev->js_data.nr_contexts_runnable. Must hold queue_mutex when accessing.
+ *
+ * @KCTX_ACTIVE: Set when the context is active.
+ *
+ * @KCTX_PULLED: Set when last kick() caused atoms to be pulled from this
+ * context.
+ *
+ * @KCTX_MEM_PROFILE_INITIALIZED: Set when the context's memory profile has been
+ * initialized.
+ *
+ * @KCTX_INFINITE_CACHE: Set when infinite cache is to be enabled for new
+ * allocations. Existing allocations will not change.
+ *
+ * @KCTX_SUBMIT_DISABLED: Set to prevent context from submitting any jobs.
+ *
+ * @KCTX_PRIVILEGED:Set if the context uses an address space and should be kept
+ * scheduled in.
+ *
+ * @KCTX_SCHEDULED: Set when the context is scheduled on the Run Pool.
+ * This is only ever updated whilst the jsctx_mutex is held.
+ *
+ * @KCTX_DYING: Set when the context process is in the process of being evicted.
+ *
+ * @KCTX_NO_IMPLICIT_SYNC: Set when explicit Android fences are in use on this
+ * context, to disable use of implicit dma-buf fences. This is used to avoid
+ * potential synchronization deadlocks.
+ *
+ * @KCTX_FORCE_SAME_VA: Set when BASE_MEM_SAME_VA should be forced on memory
+ * allocations. For 64-bit clients it is enabled by default, and disabled by
+ * default on 32-bit clients. Being able to clear this flag is only used for
+ * testing purposes of the custom zone allocation on 64-bit user-space builds,
+ * where we also require more control than is available through e.g. the JIT
+ * allocation mechanism. However, the 64-bit user-space client must still
+ * reserve a JIT region using KBASE_IOCTL_MEM_JIT_INIT
+ *
+ * @KCTX_PULLED_SINCE_ACTIVE_JS0: Set when the context has had an atom pulled
+ * from it for job slot 0. This is reset when the context first goes active or
+ * is re-activated on that slot.
+ *
+ * @KCTX_PULLED_SINCE_ACTIVE_JS1: Set when the context has had an atom pulled
+ * from it for job slot 1. This is reset when the context first goes active or
+ * is re-activated on that slot.
+ *
+ * @KCTX_PULLED_SINCE_ACTIVE_JS2: Set when the context has had an atom pulled
+ * from it for job slot 2. This is reset when the context first goes active or
+ * is re-activated on that slot.
+ *
+ * All members need to be separate bits. This enum is intended for use in a
+ * bitmask where multiple values get OR-ed together.
+ */
+enum kbase_context_flags {
+	KCTX_COMPAT = 1U << 0,
+	KCTX_RUNNABLE_REF = 1U << 1,
+	KCTX_ACTIVE = 1U << 2,
+	KCTX_PULLED = 1U << 3,
+	KCTX_MEM_PROFILE_INITIALIZED = 1U << 4,
+	KCTX_INFINITE_CACHE = 1U << 5,
+	KCTX_SUBMIT_DISABLED = 1U << 6,
+	KCTX_PRIVILEGED = 1U << 7,
+	KCTX_SCHEDULED = 1U << 8,
+	KCTX_DYING = 1U << 9,
+	KCTX_NO_IMPLICIT_SYNC = 1U << 10,
+	KCTX_FORCE_SAME_VA = 1U << 11,
+	KCTX_PULLED_SINCE_ACTIVE_JS0 = 1U << 12,
+	KCTX_PULLED_SINCE_ACTIVE_JS1 = 1U << 13,
+	KCTX_PULLED_SINCE_ACTIVE_JS2 = 1U << 14,
+};
+
+struct kbase_sub_alloc {
+	struct list_head link;
+	struct page *page;
+	DECLARE_BITMAP(sub_pages, SZ_2M / SZ_4K);
+};
+
+
+/**
+ * struct kbase_context - Object representing an entity, among which GPU is
+ *                        scheduled and gets its own GPU address space.
+ *                        Created when the device file /dev/malixx is opened.
+ * @filp:                 Pointer to the struct file corresponding to device file
+ *                        /dev/malixx instance, passed to the file's open method.
+ * @kbdev:                Pointer to the Kbase device for which the context is created.
+ * @mmu:                  Structure holding details of the MMU tables for this
+ *                        context
+ * @id:                   Unique indentifier for the context, indicates the number of
+ *                        contexts which have been created for the device so far.
+ * @api_version:          contains the version number for User/kernel interface,
+ *                        used for compatibility check.
+ * @event_list:           list of posted events about completed atoms, to be sent to
+ *                        event handling thread of Userpsace.
+ * @event_coalesce_list:  list containing events corresponding to successive atoms
+ *                        which have requested deferred delivery of the completion
+ *                        events to Userspace.
+ * @event_mutex:          Lock to protect the concurrent access to @event_list &
+ *                        @event_mutex.
+ * @event_closed:         Flag set through POST_TERM ioctl, indicates that Driver
+ *                        should stop posting events and also inform event handling
+ *                        thread that context termination is in progress.
+ * @event_workq:          Workqueue for processing work items corresponding to atoms
+ *                        that do not return an event to Userspace or have to perform
+ *                        a replay job
+ * @event_count:          Count of the posted events to be consumed by Userspace.
+ * @event_coalesce_count: Count of the events present in @event_coalesce_list.
+ * @flags:                bitmap of enums from kbase_context_flags, indicating the
+ *                        state & attributes for the context.
+ * @setup_complete:       Indicates if the setup for context has completed, i.e.
+ *                        flags have been set for the context. Driver allows only
+ *                        2 ioctls until the setup is done. Valid only for
+ *                        @api_version value 0.
+ * @setup_in_progress:    Indicates if the context's setup is in progress and other
+ *                        setup calls during that shall be rejected.
+ * @aliasing_sink_page:   Special page used for KBASE_MEM_TYPE_ALIAS allocations,
+ *                        which can alias number of memory regions. The page is
+ *                        represent a region where it is mapped with a write-alloc
+ *                        cache setup, typically used when the write result of the
+ *                        GPU isn't needed, but the GPU must write anyway.
+ * @mem_partials_lock:    Lock for protecting the operations done on the elements
+ *                        added to @mem_partials list.
+ * @mem_partials:         List head for the list of large pages, 2MB in size, which
+ *                        which have been split into 4 KB pages and are used
+ *                        partially for the allocations >= 2 MB in size.
+ * @reg_lock:             Lock used for GPU virtual address space management operations,
+ *                        like adding/freeing a memory region in the address space.
+ *                        Can be converted to a rwlock ?.
+ * @reg_rbtree_same:      RB tree of the memory regions allocated from the SAME_VA
+ *                        zone of the GPU virtual address space. Used for allocations
+ *                        having the same value for GPU & CPU virtual address.
+ * @reg_rbtree_custom:    RB tree of the memory regions allocated from the CUSTOM_VA
+ *                        zone of the GPU virtual address space.
+ * @reg_rbtree_exec:      RB tree of the memory regions allocated from the EXEC_VA
+ *                        zone of the GPU virtual address space. Used for GPU-executable
+ *                        allocations which don't need the SAME_VA property.
+ * @cookies:              Bitmask containing of BITS_PER_LONG bits, used mainly for
+ *                        SAME_VA allocations to defer the reservation of memory region
+ *                        (from the GPU virtual address space) from base_mem_alloc
+ *                        ioctl to mmap system call. This helps returning unique
+ *                        handles, disguised as GPU VA, to Userspace from base_mem_alloc
+ *                        and later retrieving the pointer to memory region structure
+ *                        in the mmap handler.
+ * @pending_regions:      Array containing pointers to memory region structures,
+ *                        used in conjunction with @cookies bitmask mainly for
+ *                        providing a mechansim to have the same value for CPU &
+ *                        GPU virtual address.
+ * @event_queue:          Wait queue used for blocking the thread, which consumes
+ *                        the base_jd_event corresponding to an atom, when there
+ *                        are no more posted events.
+ * @tgid:                 thread group id of the process, whose thread opened the
+ *                        device file /dev/malixx instance to create a context.
+ * @pid:                  id of the thread, corresponding to process @tgid, which
+ *                        actually which opened the device file.
+ * @jctx:                 object encapsulating all the Job dispatcher related state,
+ *                        including the array of atoms.
+ * @used_pages:           Keeps a track of the number of 4KB physical pages in use
+ *                        for the context.
+ * @nonmapped_pages:      Updated in the same way as @used_pages, except for the case
+ *                        when special tracking page is freed by userspace where it
+ *                        is reset to 0.
+ * @permanent_mapped_pages: Usage count of permanently mapped memory
+ * @mem_pool:             Object containing the state for the context specific pool of
+ *                        4KB size physical pages.
+ * @lp_mem_pool:          Object containing the state for the context specific pool of
+ *                        2MB size physical pages.
+ * @reclaim:              Shrinker object registered with the kernel containing
+ *                        the pointer to callback function which is invoked under
+ *                        low memory conditions. In the callback function Driver
+ *                        frees up the memory for allocations marked as
+ *                        evictable/reclaimable.
+ * @evict_list:           List head for the list containing the allocations which
+ *                        can be evicted or freed up in the shrinker callback.
+ * @waiting_soft_jobs:    List head for the list containing softjob atoms, which
+ *                        are either waiting for the event set operation, or waiting
+ *                        for the signaling of input fence or waiting for the GPU
+ *                        device to powered on so as to dump the CPU/GPU timestamps.
+ * @waiting_soft_jobs_lock: Lock to protect @waiting_soft_jobs list from concurrent
+ *                        accesses.
+ * @dma_fence:            Object containing list head for the list of dma-buf fence
+ *                        waiting atoms and the waitqueue to process the work item
+ *                        queued for the atoms blocked on the signaling of dma-buf
+ *                        fences.
+ * @as_nr:                id of the address space being used for the scheduled in
+ *                        context. This is effectively part of the Run Pool, because
+ *                        it only has a valid setting (!=KBASEP_AS_NR_INVALID) whilst
+ *                        the context is scheduled in. The hwaccess_lock must be held
+ *                        whilst accessing this.
+ *                        If the context relating to this value of as_nr is required,
+ *                        then the context must be retained to ensure that it doesn't
+ *                        disappear whilst it is being used. Alternatively, hwaccess_lock
+ *                        can be held to ensure the context doesn't disappear (but this
+ *                        has restrictions on what other locks can be taken simutaneously).
+ * @refcount:             Keeps track of the number of users of this context. A user
+ *                        can be a job that is available for execution, instrumentation
+ *                        needing to 'pin' a context for counter collection, etc.
+ *                        If the refcount reaches 0 then this context is considered
+ *                        inactive and the previously programmed AS might be cleared
+ *                        at any point.
+ *                        Generally the reference count is incremented when the context
+ *                        is scheduled in and an atom is pulled from the context's per
+ *                        slot runnable tree.
+ * @mm_update_lock:       lock used for handling of special tracking page.
+ * @process_mm:           Pointer to the memory descriptor of the process which
+ *                        created the context. Used for accounting the physical
+ *                        pages used for GPU allocations, done for the context,
+ *                        to the memory consumed by the process.
+ * @same_va_end:          End address of the SAME_VA zone (in 4KB page units)
+ * @exec_va_start:        Start address of the EXEC_VA zone (in 4KB page units)
+ *                        or U64_MAX if the EXEC_VA zone is uninitialized.
+ * @gpu_va_end:           End address of the GPU va space (in 4KB page units)
+ * @jit_va:               Indicates if a JIT_VA zone has been created.
+ * @timeline:             Object tracking the number of atoms currently in flight for
+ *                        the context and thread group id of the process, i.e. @tgid.
+ * @mem_profile_data:     Buffer containing the profiling information provided by
+ *                        Userspace, can be read through the mem_profile debugfs file.
+ * @mem_profile_size:     Size of the @mem_profile_data.
+ * @mem_profile_lock:     Lock to serialize the operations related to mem_profile
+ *                        debugfs file.
+ * @kctx_dentry:          Pointer to the debugfs directory created for every context,
+ *                        inside kbase_device::debugfs_ctx_directory, containing
+ *                        context specific files.
+ * @reg_dump:             Buffer containing a register offset & value pair, used
+ *                        for dumping job fault debug info.
+ * @job_fault_count:      Indicates that a job fault occurred for the context and
+ *                        dumping of its debug info is in progress.
+ * @job_fault_resume_event_list: List containing atoms completed after the faulty
+ *                        atom but before the debug data for faulty atom was dumped.
+ * @jsctx_queue:          Per slot & priority arrays of object containing the root
+ *                        of RB-tree holding currently runnable atoms on the job slot
+ *                        and the head item of the linked list of atoms blocked on
+ *                        cross-slot dependencies.
+ * @atoms_pulled:         Total number of atoms currently pulled from the context.
+ * @atoms_pulled_slot:    Per slot count of the number of atoms currently pulled
+ *                        from the context.
+ * @atoms_pulled_slot_pri: Per slot & priority count of the number of atoms currently
+ *                        pulled from the context. hwaccess_lock shall be held when
+ *                        accessing it.
+ * @blocked_js:           Indicates if the context is blocked from submitting atoms
+ *                        on a slot at a given priority. This is set to true, when
+ *                        the atom corresponding to context is soft/hard stopped or
+ *                        removed from the HEAD_NEXT register in response to
+ *                        soft/hard stop.
+ * @slots_pullable:       Bitmask of slots, indicating the slots for which the
+ *                        context has pullable atoms in the runnable tree.
+ * @work:                 Work structure used for deferred ASID assignment.
+ * @legacy_hwcnt_cli:     Pointer to the legacy userspace hardware counters
+ *                        client, there can be only such client per kbase
+ *                        context.
+ * @legacy_hwcnt_lock:    Lock used to prevent concurrent access to
+ *                        @legacy_hwcnt_cli.
+ * @completed_jobs:       List containing completed atoms for which base_jd_event is
+ *                        to be posted.
+ * @work_count:           Number of work items, corresponding to atoms, currently
+ *                        pending on job_done workqueue of @jctx.
+ * @soft_job_timeout:     Timer object used for failing/cancelling the waiting
+ *                        soft-jobs which have been blocked for more than the
+ *                        timeout value used for the soft-jobs
+ * @jit_alloc:            Array of 256 pointers to GPU memory regions, used for
+ *                        for JIT allocations.
+ * @jit_max_allocations:  Maximum number of JIT allocations allowed at once.
+ * @jit_current_allocations: Current number of in-flight JIT allocations.
+ * @jit_current_allocations_per_bin: Current number of in-flight JIT allocations per bin
+ * @jit_version:          version number indicating whether userspace is using
+ *                        old or new version of interface for JIT allocations
+ *	                  1 -> client used KBASE_IOCTL_MEM_JIT_INIT_OLD
+ *	                  2 -> client used KBASE_IOCTL_MEM_JIT_INIT
+ * @jit_active_head:      List containing the JIT allocations which are in use.
+ * @jit_pool_head:        List containing the JIT allocations which have been
+ *                        freed up by userpsace and so not being used by them.
+ *                        Driver caches them to quickly fulfill requests for new
+ *                        JIT allocations. They are released in case of memory
+ *                        pressure as they are put on the @evict_list when they
+ *                        are freed up by userspace.
+ * @jit_destroy_head:     List containing the JIT allocations which were moved to it
+ *                        from @jit_pool_head, in the shrinker callback, after freeing
+ *                        their backing physical pages.
+ * @jit_evict_lock:       Lock used for operations done on JIT allocations and also
+ *                        for accessing @evict_list.
+ * @jit_work:             Work item queued to defer the freeing of memory region when
+ *                        JIT allocation is moved to @jit_destroy_head.
+ * @jit_atoms_head:       A list of the JIT soft-jobs, both alloc & free, in submission
+ *                        order, protected by kbase_jd_context.lock.
+ * @jit_pending_alloc:    A list of JIT alloc soft-jobs for which allocation will be
+ *                        reattempted after the impending free of other active JIT
+ *                        allocations.
+ * @ext_res_meta_head:    A list of sticky external resources which were requested to
+ *                        be mapped on GPU side, through a softjob atom of type
+ *                        EXT_RES_MAP or STICKY_RESOURCE_MAP ioctl.
+ * @drain_pending:        Used to record that a flush/invalidate of the GPU caches was
+ *                        requested from atomic context, so that the next flush request
+ *                        can wait for the flush of GPU writes.
+ * @age_count:            Counter incremented on every call to jd_submit_atom,
+ *                        atom is assigned the snapshot of this counter, which
+ *                        is used to determine the atom's age when it is added to
+ *                        the runnable RB-tree.
+ * @trim_level:           Level of JIT allocation trimming to perform on free (0-100%)
+ * @gwt_enabled:          Indicates if tracking of GPU writes is enabled, protected by
+ *                        kbase_context.reg_lock.
+ * @gwt_was_enabled:      Simple sticky bit flag to know if GWT was ever enabled.
+ * @gwt_current_list:     A list of addresses for which GPU has generated write faults,
+ *                        after the last snapshot of it was sent to userspace.
+ * @gwt_snapshot_list:    Snapshot of the @gwt_current_list for sending to user space.
+ * @priority:             Indicates the context priority. Used along with @atoms_count
+ *                        for context scheduling, protected by hwaccess_lock.
+ * @atoms_count:          Number of gpu atoms currently in use, per priority
+ */
+struct kbase_context {
+	struct file *filp;
+	struct kbase_device *kbdev;
+	struct kbase_mmu_table mmu;
+
+	u32 id;
+	unsigned long api_version;
+	struct list_head event_list;
+	struct list_head event_coalesce_list;
+	struct mutex event_mutex;
+	atomic_t event_closed;
+	struct workqueue_struct *event_workq;
+	atomic_t event_count;
+	int event_coalesce_count;
+
+	atomic_t flags;
+
+	atomic_t                setup_complete;
+	atomic_t                setup_in_progress;
+
+	struct tagged_addr aliasing_sink_page;
+
+	spinlock_t              mem_partials_lock;
+	struct list_head        mem_partials;
+
+	struct mutex            reg_lock;
+	struct rb_root reg_rbtree_same;
+	struct rb_root reg_rbtree_custom;
+	struct rb_root reg_rbtree_exec;
+
+
+	unsigned long    cookies;
+	struct kbase_va_region *pending_regions[BITS_PER_LONG];
+
+	wait_queue_head_t event_queue;
+	pid_t tgid;
+	pid_t pid;
+
+	struct kbase_jd_context jctx;
+	atomic_t used_pages;
+	atomic_t         nonmapped_pages;
+	unsigned long permanent_mapped_pages;
+
+	struct kbase_mem_pool mem_pool;
+	struct kbase_mem_pool lp_mem_pool;
+
+	struct shrinker         reclaim;
+	struct list_head        evict_list;
+
+	struct list_head waiting_soft_jobs;
+	spinlock_t waiting_soft_jobs_lock;
+#ifdef CONFIG_MALI_DMA_FENCE
+	struct {
+		struct list_head waiting_resource;
+		struct workqueue_struct *wq;
+	} dma_fence;
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	int as_nr;
+
+	atomic_t refcount;
+
+
+	/* NOTE:
+	 *
+	 * Flags are in jctx.sched_info.ctx.flags
+	 * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex
+	 *
+	 * All other flags must be added there */
+	spinlock_t         mm_update_lock;
+	struct mm_struct __rcu *process_mm;
+	u64 same_va_end;
+	u64 exec_va_start;
+	u64 gpu_va_end;
+	bool jit_va;
+
+#ifdef CONFIG_DEBUG_FS
+	char *mem_profile_data;
+	size_t mem_profile_size;
+	struct mutex mem_profile_lock;
+	struct dentry *kctx_dentry;
+
+	unsigned int *reg_dump;
+	atomic_t job_fault_count;
+	struct list_head job_fault_resume_event_list;
+
+#endif /* CONFIG_DEBUG_FS */
+
+	struct jsctx_queue jsctx_queue
+		[KBASE_JS_ATOM_SCHED_PRIO_COUNT][BASE_JM_MAX_NR_SLOTS];
+
+	atomic_t atoms_pulled;
+	atomic_t atoms_pulled_slot[BASE_JM_MAX_NR_SLOTS];
+	int atoms_pulled_slot_pri[BASE_JM_MAX_NR_SLOTS][
+			KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+	bool blocked_js[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+	u32 slots_pullable;
+
+	struct work_struct work;
+
+	struct kbase_hwcnt_legacy_client *legacy_hwcnt_cli;
+	struct mutex legacy_hwcnt_lock;
+
+	struct list_head completed_jobs;
+	atomic_t work_count;
+
+	struct timer_list soft_job_timeout;
+
+	struct kbase_va_region *jit_alloc[256];
+	u8 jit_max_allocations;
+	u8 jit_current_allocations;
+	u8 jit_current_allocations_per_bin[256];
+	u8 jit_version;
+	struct list_head jit_active_head;
+	struct list_head jit_pool_head;
+	struct list_head jit_destroy_head;
+	struct mutex jit_evict_lock;
+	struct work_struct jit_work;
+
+	struct list_head jit_atoms_head;
+	struct list_head jit_pending_alloc;
+
+	struct list_head ext_res_meta_head;
+
+	atomic_t drain_pending;
+
+	u32 age_count;
+
+	u8 trim_level;
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+	bool gwt_enabled;
+
+	bool gwt_was_enabled;
+
+	struct list_head gwt_current_list;
+
+	struct list_head gwt_snapshot_list;
+#endif
+
+	int priority;
+	s16 atoms_count[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+};
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+/**
+ * struct kbasep_gwt_list_element - Structure used to collect GPU
+ *                                  write faults.
+ * @link:                           List head for adding write faults.
+ * @region:                         Details of the region where we have the
+ *                                  faulting page address.
+ * @page_addr:                      Page address where GPU write fault occurred.
+ * @num_pages:                      The number of pages modified.
+ *
+ * Using this structure all GPU write faults are stored in a list.
+ */
+struct kbasep_gwt_list_element {
+	struct list_head link;
+	struct kbase_va_region *region;
+	u64 page_addr;
+	u64 num_pages;
+};
+
+#endif
+
+/**
+ * struct kbase_ctx_ext_res_meta - Structure which binds an external resource
+ *                                 to a @kbase_context.
+ * @ext_res_node:                  List head for adding the metadata to a
+ *                                 @kbase_context.
+ * @alloc:                         The physical memory allocation structure
+ *                                 which is mapped.
+ * @gpu_addr:                      The GPU virtual address the resource is
+ *                                 mapped to.
+ *
+ * External resources can be mapped into multiple contexts as well as the same
+ * context multiple times.
+ * As kbase_va_region itself isn't refcounted we can't attach our extra
+ * information to it as it could be removed under our feet leaving external
+ * resources pinned.
+ * This metadata structure binds a single external resource to a single
+ * context, ensuring that per context mapping is tracked separately so it can
+ * be overridden when needed and abuses by the application (freeing the resource
+ * multiple times) don't effect the refcount of the physical allocation.
+ */
+struct kbase_ctx_ext_res_meta {
+	struct list_head ext_res_node;
+	struct kbase_mem_phy_alloc *alloc;
+	u64 gpu_addr;
+};
+
+enum kbase_reg_access_type {
+	REG_READ,
+	REG_WRITE
+};
+
+enum kbase_share_attr_bits {
+	/* (1ULL << 8) bit is reserved */
+	SHARE_BOTH_BITS = (2ULL << 8),	/* inner and outer shareable coherency */
+	SHARE_INNER_BITS = (3ULL << 8)	/* inner shareable coherency */
+};
+
+/**
+ * kbase_device_is_cpu_coherent - Returns if the device is CPU coherent.
+ * @kbdev: kbase device
+ *
+ * Return: true if the device access are coherent, false if not.
+ */
+static inline bool kbase_device_is_cpu_coherent(struct kbase_device *kbdev)
+{
+	if ((kbdev->system_coherency == COHERENCY_ACE_LITE) ||
+			(kbdev->system_coherency == COHERENCY_ACE))
+		return true;
+
+	return false;
+}
+
+/* Conversion helpers for setting up high resolution timers */
+#define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime(((u64)(x))*1000000U))
+#define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
+
+/* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
+#define KBASE_CLEAN_CACHE_MAX_LOOPS     100000
+/* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */
+#define KBASE_AS_INACTIVE_MAX_LOOPS     100000
+
+/* Maximum number of times a job can be replayed */
+#define BASEP_JD_REPLAY_LIMIT 15
+
+/* JobDescriptorHeader - taken from the architecture specifications, the layout
+ * is currently identical for all GPU archs. */
+struct job_descriptor_header {
+	u32 exception_status;
+	u32 first_incomplete_task;
+	u64 fault_pointer;
+	u8 job_descriptor_size : 1;
+	u8 job_type : 7;
+	u8 job_barrier : 1;
+	u8 _reserved_01 : 1;
+	u8 _reserved_1 : 1;
+	u8 _reserved_02 : 1;
+	u8 _reserved_03 : 1;
+	u8 _reserved_2 : 1;
+	u8 _reserved_04 : 1;
+	u8 _reserved_05 : 1;
+	u16 job_index;
+	u16 job_dependency_index_1;
+	u16 job_dependency_index_2;
+	union {
+		u64 _64;
+		u32 _32;
+	} next_job;
+};
+
+#endif				/* _KBASE_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_device.c b/drivers/gpu/arm/midgard/mali_kbase_device.c
new file mode 100644
index 0000000..530bb45
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_device.c
@@ -0,0 +1,540 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel device APIs
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_hwaccess_instr.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_config_defaults.h>
+
+/* NOTE: Magic - 0x45435254 (TRCE in ASCII).
+ * Supports tracing feature provided in the base module.
+ * Please keep it in sync with the value of base module.
+ */
+#define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
+
+#if KBASE_TRACE_ENABLE
+static const char *kbasep_trace_code_string[] = {
+	/* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
+	 * THIS MUST BE USED AT THE START OF THE ARRAY */
+#define KBASE_TRACE_CODE_MAKE_CODE(X) # X
+#include "mali_kbase_trace_defs.h"
+#undef  KBASE_TRACE_CODE_MAKE_CODE
+};
+#endif
+
+#define DEBUG_MESSAGE_SIZE 256
+
+static int kbasep_trace_init(struct kbase_device *kbdev);
+static void kbasep_trace_term(struct kbase_device *kbdev);
+static void kbasep_trace_hook_wrapper(void *param);
+
+struct kbase_device *kbase_device_alloc(void)
+{
+	return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
+}
+
+static int kbase_device_as_init(struct kbase_device *kbdev, int i)
+{
+	const char format[] = "mali_mmu%d";
+	char name[sizeof(format)];
+	const char poke_format[] = "mali_mmu%d_poker";
+	char poke_name[sizeof(poke_format)];
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+		snprintf(poke_name, sizeof(poke_name), poke_format, i);
+
+	snprintf(name, sizeof(name), format, i);
+
+	kbdev->as[i].number = i;
+	kbdev->as[i].bf_data.addr = 0ULL;
+	kbdev->as[i].pf_data.addr = 0ULL;
+
+	kbdev->as[i].pf_wq = alloc_workqueue(name, 0, 1);
+	if (!kbdev->as[i].pf_wq)
+		return -EINVAL;
+
+	INIT_WORK(&kbdev->as[i].work_pagefault, page_fault_worker);
+	INIT_WORK(&kbdev->as[i].work_busfault, bus_fault_worker);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316)) {
+		struct hrtimer *poke_timer = &kbdev->as[i].poke_timer;
+		struct work_struct *poke_work = &kbdev->as[i].poke_work;
+
+		kbdev->as[i].poke_wq = alloc_workqueue(poke_name, 0, 1);
+		if (!kbdev->as[i].poke_wq) {
+			destroy_workqueue(kbdev->as[i].pf_wq);
+			return -EINVAL;
+		}
+		INIT_WORK(poke_work, kbasep_as_do_poke);
+
+		hrtimer_init(poke_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+		poke_timer->function = kbasep_as_poke_timer_callback;
+
+		kbdev->as[i].poke_refcount = 0;
+		kbdev->as[i].poke_state = 0u;
+	}
+
+	return 0;
+}
+
+static void kbase_device_as_term(struct kbase_device *kbdev, int i)
+{
+	destroy_workqueue(kbdev->as[i].pf_wq);
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+		destroy_workqueue(kbdev->as[i].poke_wq);
+}
+
+static int kbase_device_all_as_init(struct kbase_device *kbdev)
+{
+	int i, err;
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+		err = kbase_device_as_init(kbdev, i);
+		if (err)
+			goto free_workqs;
+	}
+
+	return 0;
+
+free_workqs:
+	for (; i > 0; i--)
+		kbase_device_as_term(kbdev, i);
+
+	return err;
+}
+
+static void kbase_device_all_as_term(struct kbase_device *kbdev)
+{
+	int i;
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
+		kbase_device_as_term(kbdev, i);
+}
+
+int kbase_device_init(struct kbase_device * const kbdev)
+{
+	int err;
+#ifdef CONFIG_ARM64
+	struct device_node *np = NULL;
+#endif /* CONFIG_ARM64 */
+
+	spin_lock_init(&kbdev->mmu_mask_change);
+	mutex_init(&kbdev->mmu_hw_mutex);
+#ifdef CONFIG_ARM64
+	kbdev->cci_snoop_enabled = false;
+	np = kbdev->dev->of_node;
+	if (np != NULL) {
+		if (of_property_read_u32(np, "snoop_enable_smc",
+					&kbdev->snoop_enable_smc))
+			kbdev->snoop_enable_smc = 0;
+		if (of_property_read_u32(np, "snoop_disable_smc",
+					&kbdev->snoop_disable_smc))
+			kbdev->snoop_disable_smc = 0;
+		/* Either both or none of the calls should be provided. */
+		if (!((kbdev->snoop_disable_smc == 0
+			&& kbdev->snoop_enable_smc == 0)
+			|| (kbdev->snoop_disable_smc != 0
+			&& kbdev->snoop_enable_smc != 0))) {
+			WARN_ON(1);
+			err = -EINVAL;
+			goto fail;
+		}
+	}
+#endif /* CONFIG_ARM64 */
+	/* Get the list of workarounds for issues on the current HW
+	 * (identified by the GPU_ID register)
+	 */
+	err = kbase_hw_set_issues_mask(kbdev);
+	if (err)
+		goto fail;
+
+	/* Set the list of features available on the current HW
+	 * (identified by the GPU_ID register)
+	 */
+	kbase_hw_set_features_mask(kbdev);
+
+	kbase_gpuprops_set_features(kbdev);
+
+	/* On Linux 4.0+, dma coherency is determined from device tree */
+#if defined(CONFIG_ARM64) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
+	set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops);
+#endif
+
+	/* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
+	 * device structure was created by device-tree
+	 */
+	if (!kbdev->dev->dma_mask)
+		kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
+
+	err = dma_set_mask(kbdev->dev,
+			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
+	if (err)
+		goto dma_set_mask_failed;
+
+	err = dma_set_coherent_mask(kbdev->dev,
+			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
+	if (err)
+		goto dma_set_mask_failed;
+
+	kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
+
+	err = kbase_device_all_as_init(kbdev);
+	if (err)
+		goto as_init_failed;
+
+	spin_lock_init(&kbdev->hwcnt.lock);
+
+	err = kbasep_trace_init(kbdev);
+	if (err)
+		goto term_as;
+
+	init_waitqueue_head(&kbdev->cache_clean_wait);
+
+	kbase_debug_assert_register_hook(&kbasep_trace_hook_wrapper, kbdev);
+
+	atomic_set(&kbdev->ctx_num, 0);
+
+	err = kbase_instr_backend_init(kbdev);
+	if (err)
+		goto term_trace;
+
+	kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
+
+	kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS;
+
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+		kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
+	else
+		kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
+
+	return 0;
+term_trace:
+	kbasep_trace_term(kbdev);
+term_as:
+	kbase_device_all_as_term(kbdev);
+as_init_failed:
+dma_set_mask_failed:
+fail:
+	return err;
+}
+
+void kbase_device_term(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev);
+
+#if KBASE_TRACE_ENABLE
+	kbase_debug_assert_register_hook(NULL, NULL);
+#endif
+
+	kbase_instr_backend_term(kbdev);
+
+	kbasep_trace_term(kbdev);
+
+	kbase_device_all_as_term(kbdev);
+}
+
+void kbase_device_free(struct kbase_device *kbdev)
+{
+	kfree(kbdev);
+}
+
+/*
+ * Device trace functions
+ */
+#if KBASE_TRACE_ENABLE
+
+static int kbasep_trace_init(struct kbase_device *kbdev)
+{
+	struct kbase_trace *rbuf;
+
+	rbuf = kmalloc_array(KBASE_TRACE_SIZE, sizeof(*rbuf), GFP_KERNEL);
+
+	if (!rbuf)
+		return -EINVAL;
+
+	kbdev->trace_rbuf = rbuf;
+	spin_lock_init(&kbdev->trace_lock);
+	return 0;
+}
+
+static void kbasep_trace_term(struct kbase_device *kbdev)
+{
+	kfree(kbdev->trace_rbuf);
+}
+
+static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len)
+{
+	s32 written = 0;
+
+	/* Initial part of message */
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d.%.6d,%d,%d,%s,%p,", (int)trace_msg->timestamp.tv_sec, (int)(trace_msg->timestamp.tv_nsec / 1000), trace_msg->thread_id, trace_msg->cpu, kbasep_trace_code_string[trace_msg->code], trace_msg->ctx), 0);
+
+	if (trace_msg->katom)
+		written += MAX(snprintf(buffer + written, MAX(len - written, 0), "atom %d (ud: 0x%llx 0x%llx)", trace_msg->atom_number, trace_msg->atom_udata[0], trace_msg->atom_udata[1]), 0);
+
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), ",%.8llx,", trace_msg->gpu_addr), 0);
+
+	/* NOTE: Could add function callbacks to handle different message types */
+	/* Jobslot present */
+	if (trace_msg->flags & KBASE_TRACE_FLAG_JOBSLOT)
+		written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->jobslot), 0);
+
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+	/* Refcount present */
+	if (trace_msg->flags & KBASE_TRACE_FLAG_REFCOUNT)
+		written += MAX(snprintf(buffer + written, MAX(len - written, 0), "%d", trace_msg->refcount), 0);
+
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), ","), 0);
+
+	/* Rest of message */
+	written += MAX(snprintf(buffer + written, MAX(len - written, 0), "0x%.8lx", trace_msg->info_val), 0);
+}
+
+static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg)
+{
+	char buffer[DEBUG_MESSAGE_SIZE];
+
+	kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+	dev_dbg(kbdev->dev, "%s", buffer);
+}
+
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
+{
+	unsigned long irqflags;
+	struct kbase_trace *trace_msg;
+
+	spin_lock_irqsave(&kbdev->trace_lock, irqflags);
+
+	trace_msg = &kbdev->trace_rbuf[kbdev->trace_next_in];
+
+	/* Fill the message */
+	trace_msg->thread_id = task_pid_nr(current);
+	trace_msg->cpu = task_cpu(current);
+
+	getnstimeofday(&trace_msg->timestamp);
+
+	trace_msg->code = code;
+	trace_msg->ctx = ctx;
+
+	if (NULL == katom) {
+		trace_msg->katom = false;
+	} else {
+		trace_msg->katom = true;
+		trace_msg->atom_number = kbase_jd_atom_id(katom->kctx, katom);
+		trace_msg->atom_udata[0] = katom->udata.blob[0];
+		trace_msg->atom_udata[1] = katom->udata.blob[1];
+	}
+
+	trace_msg->gpu_addr = gpu_addr;
+	trace_msg->jobslot = jobslot;
+	trace_msg->refcount = MIN((unsigned int)refcount, 0xFF);
+	trace_msg->info_val = info_val;
+	trace_msg->flags = flags;
+
+	/* Update the ringbuffer indices */
+	kbdev->trace_next_in = (kbdev->trace_next_in + 1) & KBASE_TRACE_MASK;
+	if (kbdev->trace_next_in == kbdev->trace_first_out)
+		kbdev->trace_first_out = (kbdev->trace_first_out + 1) & KBASE_TRACE_MASK;
+
+	/* Done */
+
+	spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
+}
+
+void kbasep_trace_clear(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->trace_lock, flags);
+	kbdev->trace_first_out = kbdev->trace_next_in;
+	spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+}
+
+void kbasep_trace_dump(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	u32 start;
+	u32 end;
+
+	dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
+	spin_lock_irqsave(&kbdev->trace_lock, flags);
+	start = kbdev->trace_first_out;
+	end = kbdev->trace_next_in;
+
+	while (start != end) {
+		struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
+
+		kbasep_trace_dump_msg(kbdev, trace_msg);
+
+		start = (start + 1) & KBASE_TRACE_MASK;
+	}
+	dev_dbg(kbdev->dev, "TRACE_END");
+
+	spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+	KBASE_TRACE_CLEAR(kbdev);
+}
+
+static void kbasep_trace_hook_wrapper(void *param)
+{
+	struct kbase_device *kbdev = (struct kbase_device *)param;
+
+	kbasep_trace_dump(kbdev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct trace_seq_state {
+	struct kbase_trace trace_buf[KBASE_TRACE_SIZE];
+	u32 start;
+	u32 end;
+};
+
+static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct trace_seq_state *state = s->private;
+	int i;
+
+	if (*pos > KBASE_TRACE_SIZE)
+		return NULL;
+	i = state->start + *pos;
+	if ((state->end >= state->start && i >= state->end) ||
+			i >= state->end + KBASE_TRACE_SIZE)
+		return NULL;
+
+	i &= KBASE_TRACE_MASK;
+
+	return &state->trace_buf[i];
+}
+
+static void kbasep_trace_seq_stop(struct seq_file *s, void *data)
+{
+}
+
+static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
+{
+	struct trace_seq_state *state = s->private;
+	int i;
+
+	(*pos)++;
+
+	i = (state->start + *pos) & KBASE_TRACE_MASK;
+	if (i == state->end)
+		return NULL;
+
+	return &state->trace_buf[i];
+}
+
+static int kbasep_trace_seq_show(struct seq_file *s, void *data)
+{
+	struct kbase_trace *trace_msg = data;
+	char buffer[DEBUG_MESSAGE_SIZE];
+
+	kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
+	seq_printf(s, "%s\n", buffer);
+	return 0;
+}
+
+static const struct seq_operations kbasep_trace_seq_ops = {
+	.start = kbasep_trace_seq_start,
+	.next = kbasep_trace_seq_next,
+	.stop = kbasep_trace_seq_stop,
+	.show = kbasep_trace_seq_show,
+};
+
+static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct kbase_device *kbdev = inode->i_private;
+	unsigned long flags;
+
+	struct trace_seq_state *state;
+
+	state = __seq_open_private(file, &kbasep_trace_seq_ops, sizeof(*state));
+	if (!state)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&kbdev->trace_lock, flags);
+	state->start = kbdev->trace_first_out;
+	state->end = kbdev->trace_next_in;
+	memcpy(state->trace_buf, kbdev->trace_rbuf, sizeof(state->trace_buf));
+	spin_unlock_irqrestore(&kbdev->trace_lock, flags);
+
+	return 0;
+}
+
+static const struct file_operations kbasep_trace_debugfs_fops = {
+	.open = kbasep_trace_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release_private,
+};
+
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
+{
+	debugfs_create_file("mali_trace", S_IRUGO,
+			kbdev->mali_debugfs_directory, kbdev,
+			&kbasep_trace_debugfs_fops);
+}
+
+#else
+void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
+{
+}
+#endif				/* CONFIG_DEBUG_FS */
+
+#else				/* KBASE_TRACE_ENABLE  */
+static int kbasep_trace_init(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+	return 0;
+}
+
+static void kbasep_trace_term(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+static void kbasep_trace_hook_wrapper(void *param)
+{
+	CSTD_UNUSED(param);
+}
+
+void kbasep_trace_dump(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+#endif				/* KBASE_TRACE_ENABLE  */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c b/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c
new file mode 100644
index 0000000..68eb4ed
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c
@@ -0,0 +1,81 @@
+/*
+ *
+ * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Base kernel disjoint events helper functions
+ */
+
+#include <mali_kbase.h>
+
+void kbase_disjoint_init(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	atomic_set(&kbdev->disjoint_event.count, 0);
+	atomic_set(&kbdev->disjoint_event.state, 0);
+}
+
+/* increment the disjoint event count */
+void kbase_disjoint_event(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	atomic_inc(&kbdev->disjoint_event.count);
+}
+
+/* increment the state and the event counter */
+void kbase_disjoint_state_up(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	atomic_inc(&kbdev->disjoint_event.state);
+
+	kbase_disjoint_event(kbdev);
+}
+
+/* decrement the state */
+void kbase_disjoint_state_down(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(atomic_read(&kbdev->disjoint_event.state) > 0);
+
+	kbase_disjoint_event(kbdev);
+
+	atomic_dec(&kbdev->disjoint_event.state);
+}
+
+/* increments the count only if the state is > 0 */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	if (atomic_read(&kbdev->disjoint_event.state))
+		kbase_disjoint_event(kbdev);
+}
+
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	return atomic_read(&kbdev->disjoint_event.count);
+}
+KBASE_EXPORT_TEST_API(kbase_disjoint_event_get);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c
new file mode 100644
index 0000000..628cf12
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c
@@ -0,0 +1,454 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/* Include mali_kbase_dma_fence.h before checking for CONFIG_MALI_DMA_FENCE as
+ * it will be set there.
+ */
+#include "mali_kbase_dma_fence.h"
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/dma-resv.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/ww_mutex.h>
+
+#include <mali_kbase.h>
+
+static void
+kbase_dma_fence_work(struct work_struct *pwork);
+
+static void
+kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	list_add_tail(&katom->queue, &kctx->dma_fence.waiting_resource);
+}
+
+static void
+kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom)
+{
+	list_del(&katom->queue);
+}
+
+static int
+kbase_dma_fence_lock_reservations(struct kbase_dma_fence_resv_info *info,
+				  struct ww_acquire_ctx *ctx)
+{
+	struct dma_resv *content_res = NULL;
+	unsigned int content_res_idx = 0;
+	unsigned int r;
+	int err = 0;
+
+	ww_acquire_init(ctx, &reservation_ww_class);
+
+retry:
+	for (r = 0; r < info->dma_fence_resv_count; r++) {
+		if (info->resv_objs[r] == content_res) {
+			content_res = NULL;
+			continue;
+		}
+
+		err = ww_mutex_lock(&info->resv_objs[r]->lock, ctx);
+		if (err)
+			goto error;
+	}
+
+	ww_acquire_done(ctx);
+	return err;
+
+error:
+	content_res_idx = r;
+
+	/* Unlock the locked one ones */
+	while (r--)
+		ww_mutex_unlock(&info->resv_objs[r]->lock);
+
+	if (content_res)
+		ww_mutex_unlock(&content_res->lock);
+
+	/* If we deadlock try with lock_slow and retry */
+	if (err == -EDEADLK) {
+		content_res = info->resv_objs[content_res_idx];
+		ww_mutex_lock_slow(&content_res->lock, ctx);
+		goto retry;
+	}
+
+	/* If we are here the function failed */
+	ww_acquire_fini(ctx);
+	return err;
+}
+
+static void
+kbase_dma_fence_unlock_reservations(struct kbase_dma_fence_resv_info *info,
+				    struct ww_acquire_ctx *ctx)
+{
+	unsigned int r;
+
+	for (r = 0; r < info->dma_fence_resv_count; r++)
+		ww_mutex_unlock(&info->resv_objs[r]->lock);
+	ww_acquire_fini(ctx);
+}
+
+/**
+ * kbase_dma_fence_queue_work() - Queue work to handle @katom
+ * @katom: Pointer to atom for which to queue work
+ *
+ * Queue kbase_dma_fence_work() for @katom to clean up the fence callbacks and
+ * submit the atom.
+ */
+static void
+kbase_dma_fence_queue_work(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	bool ret;
+
+	INIT_WORK(&katom->work, kbase_dma_fence_work);
+	ret = queue_work(kctx->dma_fence.wq, &katom->work);
+	/* Warn if work was already queued, that should not happen. */
+	WARN_ON(!ret);
+}
+
+/**
+ * kbase_dma_fence_cancel_atom() - Cancels waiting on an atom
+ * @katom:	Katom to cancel
+ *
+ * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
+ */
+static void
+kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&katom->kctx->jctx.lock);
+
+	/* Cancel callbacks and clean up. */
+	kbase_fence_free_callbacks(katom);
+
+	/* Mark the atom as handled in case all fences signaled just before
+	 * canceling the callbacks and the worker was queued.
+	 */
+	kbase_fence_dep_count_set(katom, -1);
+
+	/* Prevent job_done_nolock from being called twice on an atom when
+	 * there is a race between job completion and cancellation.
+	 */
+
+	if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
+		/* Wait was cancelled - zap the atom */
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		if (jd_done_nolock(katom, NULL))
+			kbase_js_sched_all(katom->kctx->kbdev);
+	}
+}
+
+/**
+ * kbase_dma_fence_work() - Worker thread called when a fence is signaled
+ * @pwork:	work_struct containing a pointer to a katom
+ *
+ * This function will clean and mark all dependencies as satisfied
+ */
+static void
+kbase_dma_fence_work(struct work_struct *pwork)
+{
+	struct kbase_jd_atom *katom;
+	struct kbase_jd_context *ctx;
+
+	katom = container_of(pwork, struct kbase_jd_atom, work);
+	ctx = &katom->kctx->jctx;
+
+	mutex_lock(&ctx->lock);
+	if (kbase_fence_dep_count_read(katom) != 0)
+		goto out;
+
+	kbase_fence_dep_count_set(katom, -1);
+
+	/* Remove atom from list of dma-fence waiting atoms. */
+	kbase_dma_fence_waiters_remove(katom);
+	/* Cleanup callbacks. */
+	kbase_fence_free_callbacks(katom);
+	/*
+	 * Queue atom on GPU, unless it has already completed due to a failing
+	 * dependency. Run jd_done_nolock() on the katom if it is completed.
+	 */
+	if (unlikely(katom->status == KBASE_JD_ATOM_STATE_COMPLETED))
+		jd_done_nolock(katom, NULL);
+	else
+		kbase_jd_dep_clear_locked(katom);
+
+out:
+	mutex_unlock(&ctx->lock);
+}
+
+static void
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb)
+#else
+kbase_dma_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+#endif
+{
+	struct kbase_fence_cb *kcb = container_of(cb,
+				struct kbase_fence_cb,
+				fence_cb);
+	struct kbase_jd_atom *katom = kcb->katom;
+
+	/* If the atom is zapped dep_count will be forced to a negative number
+	 * preventing this callback from ever scheduling work. Which in turn
+	 * would reschedule the atom.
+	 */
+
+	if (kbase_fence_dep_count_dec_and_test(katom))
+		kbase_dma_fence_queue_work(katom);
+}
+
+static int
+kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom,
+					 struct dma_resv *resv,
+					 bool exclusive)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *excl_fence = NULL;
+	struct fence **shared_fences = NULL;
+#else
+	struct dma_fence *excl_fence = NULL;
+	struct dma_fence **shared_fences = NULL;
+#endif
+	unsigned int shared_count = 0;
+	int err, i;
+
+	err = dma_resv_get_fences_rcu(resv,
+						&excl_fence,
+						&shared_count,
+						&shared_fences);
+	if (err)
+		return err;
+
+	if (excl_fence) {
+		err = kbase_fence_add_callback(katom,
+						excl_fence,
+						kbase_dma_fence_cb);
+
+		/* Release our reference, taken by dma_resv_get_fences_rcu(),
+		 * to the fence. We have set up our callback (if that was possible),
+		 * and it's the fence's owner is responsible for singling the fence
+		 * before allowing it to disappear.
+		 */
+		dma_fence_put(excl_fence);
+
+		if (err)
+			goto out;
+	}
+
+	if (exclusive) {
+		for (i = 0; i < shared_count; i++) {
+			err = kbase_fence_add_callback(katom,
+							shared_fences[i],
+							kbase_dma_fence_cb);
+			if (err)
+				goto out;
+		}
+	}
+
+	/* Release all our references to the shared fences, taken by
+	 * dma_resv_get_fences_rcu(). We have set up our callback (if
+	 * that was possible), and it's the fence's owner is responsible for
+	 * signaling the fence before allowing it to disappear.
+	 */
+out:
+	for (i = 0; i < shared_count; i++)
+		dma_fence_put(shared_fences[i]);
+	kfree(shared_fences);
+
+	if (err) {
+		/*
+		 * On error, cancel and clean up all callbacks that was set up
+		 * before the error.
+		 */
+		kbase_fence_free_callbacks(katom);
+	}
+
+	return err;
+}
+
+void kbase_dma_fence_add_reservation(struct dma_resv *resv,
+				     struct kbase_dma_fence_resv_info *info,
+				     bool exclusive)
+{
+	unsigned int i;
+
+	for (i = 0; i < info->dma_fence_resv_count; i++) {
+		/* Duplicate resource, ignore */
+		if (info->resv_objs[i] == resv)
+			return;
+	}
+
+	info->resv_objs[info->dma_fence_resv_count] = resv;
+	if (exclusive)
+		set_bit(info->dma_fence_resv_count,
+			info->dma_fence_excl_bitmap);
+	(info->dma_fence_resv_count)++;
+}
+
+int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
+			 struct kbase_dma_fence_resv_info *info)
+{
+	int err, i;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+	struct ww_acquire_ctx ww_ctx;
+
+	lockdep_assert_held(&katom->kctx->jctx.lock);
+
+	fence = kbase_fence_out_new(katom);
+	if (!fence) {
+		err = -ENOMEM;
+		dev_err(katom->kctx->kbdev->dev,
+			"Error %d creating fence.\n", err);
+		return err;
+	}
+
+	kbase_fence_dep_count_set(katom, 1);
+
+	err = kbase_dma_fence_lock_reservations(info, &ww_ctx);
+	if (err) {
+		dev_err(katom->kctx->kbdev->dev,
+			"Error %d locking reservations.\n", err);
+		kbase_fence_dep_count_set(katom, -1);
+		kbase_fence_out_remove(katom);
+		return err;
+	}
+
+	for (i = 0; i < info->dma_fence_resv_count; i++) {
+		struct dma_resv *obj = info->resv_objs[i];
+
+		if (!test_bit(i, info->dma_fence_excl_bitmap)) {
+			err = dma_resv_reserve_shared(obj, 1);
+			if (err) {
+				dev_err(katom->kctx->kbdev->dev,
+					"Error %d reserving space for shared fence.\n", err);
+				goto end;
+			}
+
+			err = kbase_dma_fence_add_reservation_callback(katom, obj, false);
+			if (err) {
+				dev_err(katom->kctx->kbdev->dev,
+					"Error %d adding reservation to callback.\n", err);
+				goto end;
+			}
+
+			dma_resv_add_shared_fence(obj, fence);
+		} else {
+			err = kbase_dma_fence_add_reservation_callback(katom, obj, true);
+			if (err) {
+				dev_err(katom->kctx->kbdev->dev,
+					"Error %d adding reservation to callback.\n", err);
+				goto end;
+			}
+
+			dma_resv_add_excl_fence(obj, fence);
+		}
+	}
+
+end:
+	kbase_dma_fence_unlock_reservations(info, &ww_ctx);
+
+	if (likely(!err)) {
+		/* Test if the callbacks are already triggered */
+		if (kbase_fence_dep_count_dec_and_test(katom)) {
+			kbase_fence_dep_count_set(katom, -1);
+			kbase_fence_free_callbacks(katom);
+		} else {
+			/* Add katom to the list of dma-buf fence waiting atoms
+			 * only if it is still waiting.
+			 */
+			kbase_dma_fence_waiters_add(katom);
+		}
+	} else {
+		/* There was an error, cancel callbacks, set dep_count to -1 to
+		 * indicate that the atom has been handled (the caller will
+		 * kill it for us), signal the fence, free callbacks and the
+		 * fence.
+		 */
+		kbase_fence_free_callbacks(katom);
+		kbase_fence_dep_count_set(katom, -1);
+		kbase_dma_fence_signal(katom);
+	}
+
+	return err;
+}
+
+void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx)
+{
+	struct list_head *list = &kctx->dma_fence.waiting_resource;
+
+	while (!list_empty(list)) {
+		struct kbase_jd_atom *katom;
+
+		katom = list_first_entry(list, struct kbase_jd_atom, queue);
+		kbase_dma_fence_waiters_remove(katom);
+		kbase_dma_fence_cancel_atom(katom);
+	}
+}
+
+void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom)
+{
+	/* Cancel callbacks and clean up. */
+	if (kbase_fence_free_callbacks(katom))
+		kbase_dma_fence_queue_work(katom);
+}
+
+void kbase_dma_fence_signal(struct kbase_jd_atom *katom)
+{
+	if (!katom->dma_fence.fence)
+		return;
+
+	/* Signal the atom's fence. */
+	dma_fence_signal(katom->dma_fence.fence);
+
+	kbase_fence_out_remove(katom);
+
+	kbase_fence_free_callbacks(katom);
+}
+
+void kbase_dma_fence_term(struct kbase_context *kctx)
+{
+	destroy_workqueue(kctx->dma_fence.wq);
+	kctx->dma_fence.wq = NULL;
+}
+
+int kbase_dma_fence_init(struct kbase_context *kctx)
+{
+	INIT_LIST_HEAD(&kctx->dma_fence.waiting_resource);
+
+	kctx->dma_fence.wq = alloc_workqueue("mali-fence-%d",
+					     WQ_UNBOUND, 1, kctx->pid);
+	if (!kctx->dma_fence.wq)
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h
new file mode 100644
index 0000000..8eadbf2
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h
@@ -0,0 +1,136 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_DMA_FENCE_H_
+#define _KBASE_DMA_FENCE_H_
+
+#ifdef CONFIG_MALI_DMA_FENCE
+
+#include <linux/list.h>
+#include <linux/dma-resv.h>
+#include <mali_kbase_fence.h>
+
+
+/* Forward declaration from mali_kbase_defs.h */
+struct kbase_jd_atom;
+struct kbase_context;
+
+/**
+ * struct kbase_dma_fence_resv_info - Structure with list of reservation objects
+ * @resv_objs:             Array of reservation objects to attach the
+ *                         new fence to.
+ * @dma_fence_resv_count:  Number of reservation objects in the array.
+ * @dma_fence_excl_bitmap: Specifies which resv_obj are exclusive.
+ *
+ * This is used by some functions to pass around a collection of data about
+ * reservation objects.
+ */
+struct kbase_dma_fence_resv_info {
+	struct dma_resv **resv_objs;
+	unsigned int dma_fence_resv_count;
+	unsigned long *dma_fence_excl_bitmap;
+};
+
+/**
+ * kbase_dma_fence_add_reservation() - Adds a resv to the array of resv_objs
+ * @resv:      Reservation object to add to the array.
+ * @info:      Pointer to struct with current reservation info
+ * @exclusive: Boolean indicating if exclusive access is needed
+ *
+ * The function adds a new dma_resv to an existing array of
+ * dma_resvs. At the same time keeps track of which objects require
+ * exclusive access in dma_fence_excl_bitmap.
+ */
+void kbase_dma_fence_add_reservation(struct dma_resv *resv,
+				     struct kbase_dma_fence_resv_info *info,
+				     bool exclusive);
+
+/**
+ * kbase_dma_fence_wait() - Creates a new fence and attaches it to the resv_objs
+ * @katom: Katom with the external dependency.
+ * @info:  Pointer to struct with current reservation info
+ *
+ * Return: An error code or 0 if succeeds
+ */
+int kbase_dma_fence_wait(struct kbase_jd_atom *katom,
+			 struct kbase_dma_fence_resv_info *info);
+
+/**
+ * kbase_dma_fence_cancel_ctx() - Cancel all dma-fences blocked atoms on kctx
+ * @kctx: Pointer to kbase context
+ *
+ * This function will cancel and clean up all katoms on @kctx that is waiting
+ * on dma-buf fences.
+ *
+ * Locking: jctx.lock needs to be held when calling this function.
+ */
+void kbase_dma_fence_cancel_all_atoms(struct kbase_context *kctx);
+
+/**
+ * kbase_dma_fence_cancel_callbacks() - Cancel only callbacks on katom
+ * @katom: Pointer to katom whose callbacks are to be canceled
+ *
+ * This function cancels all dma-buf fence callbacks on @katom, but does not
+ * cancel the katom itself.
+ *
+ * The caller is responsible for ensuring that jd_done_nolock is called on
+ * @katom.
+ *
+ * Locking: jctx.lock must be held when calling this function.
+ */
+void kbase_dma_fence_cancel_callbacks(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_dma_fence_signal() - Signal katom's fence and clean up after wait
+ * @katom: Pointer to katom to signal and clean up
+ *
+ * This function will signal the @katom's fence, if it has one, and clean up
+ * the callback data from the katom's wait on earlier fences.
+ *
+ * Locking: jctx.lock must be held while calling this function.
+ */
+void kbase_dma_fence_signal(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_dma_fence_term() - Terminate Mali dma-fence context
+ * @kctx: kbase context to terminate
+ */
+void kbase_dma_fence_term(struct kbase_context *kctx);
+
+/**
+ * kbase_dma_fence_init() - Initialize Mali dma-fence context
+ * @kctx: kbase context to initialize
+ */
+int kbase_dma_fence_init(struct kbase_context *kctx);
+
+
+#else /* CONFIG_MALI_DMA_FENCE */
+/* Dummy functions for when dma-buf fence isn't enabled. */
+
+static inline int kbase_dma_fence_init(struct kbase_context *kctx)
+{
+	return 0;
+}
+
+static inline void kbase_dma_fence_term(struct kbase_context *kctx) {}
+#endif /* CONFIG_MALI_DMA_FENCE */
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_event.c b/drivers/gpu/arm/midgard/mali_kbase_event.c
new file mode 100644
index 0000000..3c9cef3
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_event.c
@@ -0,0 +1,262 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_debug.h>
+#include <mali_kbase_tlstream.h>
+
+static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	struct base_jd_udata data;
+
+	lockdep_assert_held(&kctx->jctx.lock);
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(katom != NULL);
+	KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+	data = katom->udata;
+
+	KBASE_TLSTREAM_TL_NRET_ATOM_CTX(katom, kctx);
+	KBASE_TLSTREAM_TL_DEL_ATOM(katom);
+
+	katom->status = KBASE_JD_ATOM_STATE_UNUSED;
+
+	wake_up(&katom->completed);
+
+	return data;
+}
+
+int kbase_event_pending(struct kbase_context *ctx)
+{
+	KBASE_DEBUG_ASSERT(ctx);
+
+	return (atomic_read(&ctx->event_count) != 0) ||
+			(atomic_read(&ctx->event_closed) != 0);
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_pending);
+
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
+{
+	struct kbase_jd_atom *atom;
+
+	KBASE_DEBUG_ASSERT(ctx);
+
+	mutex_lock(&ctx->event_mutex);
+
+	if (list_empty(&ctx->event_list)) {
+		if (!atomic_read(&ctx->event_closed)) {
+			mutex_unlock(&ctx->event_mutex);
+			return -1;
+		}
+
+		/* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
+		mutex_unlock(&ctx->event_mutex);
+		uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
+		memset(&uevent->udata, 0, sizeof(uevent->udata));
+		dev_dbg(ctx->kbdev->dev,
+				"event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
+				BASE_JD_EVENT_DRV_TERMINATED);
+		return 0;
+	}
+
+	/* normal event processing */
+	atomic_dec(&ctx->event_count);
+	atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
+	list_del(ctx->event_list.next);
+
+	mutex_unlock(&ctx->event_mutex);
+
+	dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
+	uevent->event_code = atom->event_code;
+	uevent->atom_number = (atom - ctx->jctx.atoms);
+
+	if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+		kbase_jd_free_external_resources(atom);
+
+	mutex_lock(&ctx->jctx.lock);
+	uevent->udata = kbase_event_process(ctx, atom);
+	mutex_unlock(&ctx->jctx.lock);
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_dequeue);
+
+/**
+ * kbase_event_process_noreport_worker - Worker for processing atoms that do not
+ *                                       return an event but do have external
+ *                                       resources
+ * @data:  Work structure
+ */
+static void kbase_event_process_noreport_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
+			work);
+	struct kbase_context *kctx = katom->kctx;
+
+	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+		kbase_jd_free_external_resources(katom);
+
+	mutex_lock(&kctx->jctx.lock);
+	kbase_event_process(kctx, katom);
+	mutex_unlock(&kctx->jctx.lock);
+}
+
+/**
+ * kbase_event_process_noreport - Process atoms that do not return an event
+ * @kctx:  Context pointer
+ * @katom: Atom to be processed
+ *
+ * Atoms that do not have external resources will be processed immediately.
+ * Atoms that do have external resources will be processed on a workqueue, in
+ * order to avoid locking issues.
+ */
+static void kbase_event_process_noreport(struct kbase_context *kctx,
+		struct kbase_jd_atom *katom)
+{
+	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+		INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
+		queue_work(kctx->event_workq, &katom->work);
+	} else {
+		kbase_event_process(kctx, katom);
+	}
+}
+
+/**
+ * kbase_event_coalesce - Move pending events to the main event list
+ * @kctx:  Context pointer
+ *
+ * kctx->event_list and kctx->event_coalesce_count must be protected
+ * by a lock unless this is the last thread using them
+ * (and we're about to terminate the lock).
+ *
+ * Return: The number of pending events moved to the main event list
+ */
+static int kbase_event_coalesce(struct kbase_context *kctx)
+{
+	const int event_count = kctx->event_coalesce_count;
+
+	/* Join the list of pending events onto the tail of the main list
+	   and reset it */
+	list_splice_tail_init(&kctx->event_coalesce_list, &kctx->event_list);
+	kctx->event_coalesce_count = 0;
+
+	/* Return the number of events moved */
+	return event_count;
+}
+
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
+{
+	if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
+		if (atom->event_code == BASE_JD_EVENT_DONE) {
+			/* Don't report the event */
+			kbase_event_process_noreport(ctx, atom);
+			return;
+		}
+	}
+
+	if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
+		/* Don't report the event */
+		kbase_event_process_noreport(ctx, atom);
+		return;
+	}
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, TL_ATOM_STATE_POSTED);
+	if (atom->core_req & BASE_JD_REQ_EVENT_COALESCE) {
+		/* Don't report the event until other event(s) have completed */
+		mutex_lock(&ctx->event_mutex);
+		list_add_tail(&atom->dep_item[0], &ctx->event_coalesce_list);
+		++ctx->event_coalesce_count;
+		mutex_unlock(&ctx->event_mutex);
+	} else {
+		/* Report the event and any pending events now */
+		int event_count = 1;
+
+		mutex_lock(&ctx->event_mutex);
+		event_count += kbase_event_coalesce(ctx);
+		list_add_tail(&atom->dep_item[0], &ctx->event_list);
+		atomic_add(event_count, &ctx->event_count);
+		mutex_unlock(&ctx->event_mutex);
+
+		kbase_event_wakeup(ctx);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_event_post);
+
+void kbase_event_close(struct kbase_context *kctx)
+{
+	mutex_lock(&kctx->event_mutex);
+	atomic_set(&kctx->event_closed, true);
+	mutex_unlock(&kctx->event_mutex);
+	kbase_event_wakeup(kctx);
+}
+
+int kbase_event_init(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx);
+
+	INIT_LIST_HEAD(&kctx->event_list);
+	INIT_LIST_HEAD(&kctx->event_coalesce_list);
+	mutex_init(&kctx->event_mutex);
+	atomic_set(&kctx->event_count, 0);
+	kctx->event_coalesce_count = 0;
+	atomic_set(&kctx->event_closed, false);
+	kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
+
+	if (NULL == kctx->event_workq)
+		return -EINVAL;
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_init);
+
+void kbase_event_cleanup(struct kbase_context *kctx)
+{
+	int event_count;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(kctx->event_workq);
+
+	flush_workqueue(kctx->event_workq);
+	destroy_workqueue(kctx->event_workq);
+
+	/* We use kbase_event_dequeue to remove the remaining events as that
+	 * deals with all the cleanup needed for the atoms.
+	 *
+	 * Note: use of kctx->event_list without a lock is safe because this must be the last
+	 * thread using it (because we're about to terminate the lock)
+	 */
+	event_count = kbase_event_coalesce(kctx);
+	atomic_add(event_count, &kctx->event_count);
+
+	while (!list_empty(&kctx->event_list)) {
+		struct base_jd_event_v2 event;
+
+		kbase_event_dequeue(kctx, &event);
+	}
+}
+
+KBASE_EXPORT_TEST_API(kbase_event_cleanup);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence.c b/drivers/gpu/arm/midgard/mali_kbase_fence.c
new file mode 100644
index 0000000..3272836
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_fence.c
@@ -0,0 +1,208 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <mali_kbase_fence_defs.h>
+#include <mali_kbase_fence.h>
+#include <mali_kbase.h>
+
+/* Spin lock protecting all Mali fences as fence->lock. */
+static DEFINE_SPINLOCK(kbase_fence_lock);
+
+static const char *
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_get_driver_name(struct fence *fence)
+#else
+kbase_fence_get_driver_name(struct dma_fence *fence)
+#endif
+{
+	return kbase_drv_name;
+}
+
+static const char *
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_get_timeline_name(struct fence *fence)
+#else
+kbase_fence_get_timeline_name(struct dma_fence *fence)
+#endif
+{
+	return kbase_timeline_name;
+}
+
+static bool
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_enable_signaling(struct fence *fence)
+#else
+kbase_fence_enable_signaling(struct dma_fence *fence)
+#endif
+{
+	return true;
+}
+
+static void
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+kbase_fence_fence_value_str(struct fence *fence, char *str, int size)
+#else
+kbase_fence_fence_value_str(struct dma_fence *fence, char *str, int size)
+#endif
+{
+	snprintf(str, size, "%u", fence->seqno);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+const struct fence_ops kbase_fence_ops = {
+	.wait = fence_default_wait,
+#else
+const struct dma_fence_ops kbase_fence_ops = {
+	.wait = dma_fence_default_wait,
+#endif
+	.get_driver_name = kbase_fence_get_driver_name,
+	.get_timeline_name = kbase_fence_get_timeline_name,
+	.enable_signaling = kbase_fence_enable_signaling,
+	.fence_value_str = kbase_fence_fence_value_str
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+struct fence *
+kbase_fence_out_new(struct kbase_jd_atom *katom)
+#else
+struct dma_fence *
+kbase_fence_out_new(struct kbase_jd_atom *katom)
+#endif
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+
+	WARN_ON(katom->dma_fence.fence);
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return NULL;
+
+	dma_fence_init(fence,
+		       &kbase_fence_ops,
+		       &kbase_fence_lock,
+		       katom->dma_fence.context,
+		       atomic_inc_return(&katom->dma_fence.seqno));
+
+	katom->dma_fence.fence = fence;
+
+	return fence;
+}
+
+bool
+kbase_fence_free_callbacks(struct kbase_jd_atom *katom)
+{
+	struct kbase_fence_cb *cb, *tmp;
+	bool res = false;
+
+	lockdep_assert_held(&katom->kctx->jctx.lock);
+
+	/* Clean up and free callbacks. */
+	list_for_each_entry_safe(cb, tmp, &katom->dma_fence.callbacks, node) {
+		bool ret;
+
+		/* Cancel callbacks that hasn't been called yet. */
+		ret = dma_fence_remove_callback(cb->fence, &cb->fence_cb);
+		if (ret) {
+			int ret;
+
+			/* Fence had not signaled, clean up after
+			 * canceling.
+			 */
+			ret = atomic_dec_return(&katom->dma_fence.dep_count);
+
+			if (unlikely(ret == 0))
+				res = true;
+		}
+
+		/*
+		 * Release the reference taken in
+		 * kbase_fence_add_callback().
+		 */
+		dma_fence_put(cb->fence);
+		list_del(&cb->node);
+		kfree(cb);
+	}
+
+	return res;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+int
+kbase_fence_add_callback(struct kbase_jd_atom *katom,
+			 struct fence *fence,
+			 fence_func_t callback)
+#else
+int
+kbase_fence_add_callback(struct kbase_jd_atom *katom,
+			 struct dma_fence *fence,
+			 dma_fence_func_t callback)
+#endif
+{
+	int err = 0;
+	struct kbase_fence_cb *kbase_fence_cb;
+
+	if (!fence)
+		return -EINVAL;
+
+	kbase_fence_cb = kmalloc(sizeof(*kbase_fence_cb), GFP_KERNEL);
+	if (!kbase_fence_cb)
+		return -ENOMEM;
+
+	kbase_fence_cb->fence = fence;
+	kbase_fence_cb->katom = katom;
+	INIT_LIST_HEAD(&kbase_fence_cb->node);
+	atomic_inc(&katom->dma_fence.dep_count);
+
+	err = dma_fence_add_callback(fence, &kbase_fence_cb->fence_cb,
+				     callback);
+	if (err == -ENOENT) {
+		/* Fence signaled, get the completion result */
+		err = dma_fence_get_status(fence);
+
+		/* remap success completion to err code */
+		if (err == 1)
+			err = 0;
+
+		kfree(kbase_fence_cb);
+		atomic_dec(&katom->dma_fence.dep_count);
+	} else if (err) {
+		kfree(kbase_fence_cb);
+		atomic_dec(&katom->dma_fence.dep_count);
+	} else {
+		/*
+		 * Get reference to fence that will be kept until callback gets
+		 * cleaned up in kbase_fence_free_callbacks().
+		 */
+		dma_fence_get(fence);
+		/* Add callback to katom's list of callbacks */
+		list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks);
+	}
+
+	return err;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence.h b/drivers/gpu/arm/midgard/mali_kbase_fence.h
new file mode 100644
index 0000000..d7a65e0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_fence.h
@@ -0,0 +1,280 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_FENCE_H_
+#define _KBASE_FENCE_H_
+
+/*
+ * mali_kbase_fence.[hc] has common fence code used by both
+ * - CONFIG_MALI_DMA_FENCE - implicit DMA fences
+ * - CONFIG_SYNC_FILE      - explicit fences beginning with 4.9 kernel
+ */
+
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+
+#include <linux/list.h>
+#include "mali_kbase_fence_defs.h"
+#include "mali_kbase.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+extern const struct fence_ops kbase_fence_ops;
+#else
+extern const struct dma_fence_ops kbase_fence_ops;
+#endif
+
+/**
+* struct kbase_fence_cb - Mali dma-fence callback data struct
+* @fence_cb: Callback function
+* @katom:    Pointer to katom that is waiting on this callback
+* @fence:    Pointer to the fence object on which this callback is waiting
+* @node:     List head for linking this callback to the katom
+*/
+struct kbase_fence_cb {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence_cb fence_cb;
+	struct fence *fence;
+#else
+	struct dma_fence_cb fence_cb;
+	struct dma_fence *fence;
+#endif
+	struct kbase_jd_atom *katom;
+	struct list_head node;
+};
+
+/**
+ * kbase_fence_out_new() - Creates a new output fence and puts it on the atom
+ * @katom: Atom to create an output fence for
+ *
+ * return: A new fence object on success, NULL on failure.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+struct fence *kbase_fence_out_new(struct kbase_jd_atom *katom);
+#else
+struct dma_fence *kbase_fence_out_new(struct kbase_jd_atom *katom);
+#endif
+
+#if defined(CONFIG_SYNC_FILE)
+/**
+ * kbase_fence_fence_in_set() - Assign input fence to atom
+ * @katom: Atom to assign input fence to
+ * @fence: Input fence to assign to atom
+ *
+ * This function will take ownership of one fence reference!
+ */
+#define kbase_fence_fence_in_set(katom, fence) \
+	do { \
+		WARN_ON((katom)->dma_fence.fence_in); \
+		(katom)->dma_fence.fence_in = fence; \
+	} while (0)
+#endif
+
+/**
+ * kbase_fence_out_remove() - Removes the output fence from atom
+ * @katom: Atom to remove output fence for
+ *
+ * This will also release the reference to this fence which the atom keeps
+ */
+static inline void kbase_fence_out_remove(struct kbase_jd_atom *katom)
+{
+	if (katom->dma_fence.fence) {
+		dma_fence_put(katom->dma_fence.fence);
+		katom->dma_fence.fence = NULL;
+	}
+}
+
+#if defined(CONFIG_SYNC_FILE)
+/**
+ * kbase_fence_out_remove() - Removes the input fence from atom
+ * @katom: Atom to remove input fence for
+ *
+ * This will also release the reference to this fence which the atom keeps
+ */
+static inline void kbase_fence_in_remove(struct kbase_jd_atom *katom)
+{
+	if (katom->dma_fence.fence_in) {
+		dma_fence_put(katom->dma_fence.fence_in);
+		katom->dma_fence.fence_in = NULL;
+	}
+}
+#endif
+
+/**
+ * kbase_fence_out_is_ours() - Check if atom has a valid fence created by us
+ * @katom: Atom to check output fence for
+ *
+ * Return: true if fence exists and is valid, otherwise false
+ */
+static inline bool kbase_fence_out_is_ours(struct kbase_jd_atom *katom)
+{
+	return katom->dma_fence.fence &&
+				katom->dma_fence.fence->ops == &kbase_fence_ops;
+}
+
+/**
+ * kbase_fence_out_signal() - Signal output fence of atom
+ * @katom: Atom to signal output fence for
+ * @status: Status to signal with (0 for success, < 0 for error)
+ *
+ * Return: 0 on success, < 0 on error
+ */
+static inline int kbase_fence_out_signal(struct kbase_jd_atom *katom,
+					 int status)
+{
+	if (status) {
+#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE && \
+	  KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE)
+		fence_set_error(katom->dma_fence.fence, status);
+#elif (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE)
+		dma_fence_set_error(katom->dma_fence.fence, status);
+#else
+		katom->dma_fence.fence->status = status;
+#endif
+	}
+	return dma_fence_signal(katom->dma_fence.fence);
+}
+
+/**
+ * kbase_fence_add_callback() - Add callback on @fence to block @katom
+ * @katom: Pointer to katom that will be blocked by @fence
+ * @fence: Pointer to fence on which to set up the callback
+ * @callback: Pointer to function to be called when fence is signaled
+ *
+ * Caller needs to hold a reference to @fence when calling this function, and
+ * the caller is responsible for releasing that reference.  An additional
+ * reference to @fence will be taken when the callback was successfully set up
+ * and @fence needs to be kept valid until the callback has been called and
+ * cleanup have been done.
+ *
+ * Return: 0 on success: fence was either already signaled, or callback was
+ * set up. Negative error code is returned on error.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+int kbase_fence_add_callback(struct kbase_jd_atom *katom,
+			     struct fence *fence,
+			     fence_func_t callback);
+#else
+int kbase_fence_add_callback(struct kbase_jd_atom *katom,
+			     struct dma_fence *fence,
+			     dma_fence_func_t callback);
+#endif
+
+/**
+ * kbase_fence_dep_count_set() - Set dep_count value on atom to specified value
+ * @katom: Atom to set dep_count for
+ * @val: value to set dep_count to
+ *
+ * The dep_count is available to the users of this module so that they can
+ * synchronize completion of the wait with cancellation and adding of more
+ * callbacks. For instance, a user could do the following:
+ *
+ * dep_count set to 1
+ * callback #1 added, dep_count is increased to 2
+ *                             callback #1 happens, dep_count decremented to 1
+ *                             since dep_count > 0, no completion is done
+ * callback #2 is added, dep_count is increased to 2
+ * dep_count decremented to 1
+ *                             callback #2 happens, dep_count decremented to 0
+ *                             since dep_count now is zero, completion executes
+ *
+ * The dep_count can also be used to make sure that the completion only
+ * executes once. This is typically done by setting dep_count to -1 for the
+ * thread that takes on this responsibility.
+ */
+static inline void
+kbase_fence_dep_count_set(struct kbase_jd_atom *katom, int val)
+{
+	atomic_set(&katom->dma_fence.dep_count, val);
+}
+
+/**
+ * kbase_fence_dep_count_dec_and_test() - Decrements dep_count
+ * @katom: Atom to decrement dep_count for
+ *
+ * See @kbase_fence_dep_count_set for general description about dep_count
+ *
+ * Return: true if value was decremented to zero, otherwise false
+ */
+static inline bool
+kbase_fence_dep_count_dec_and_test(struct kbase_jd_atom *katom)
+{
+	return atomic_dec_and_test(&katom->dma_fence.dep_count);
+}
+
+/**
+ * kbase_fence_dep_count_read() - Returns the current dep_count value
+ * @katom: Pointer to katom
+ *
+ * See @kbase_fence_dep_count_set for general description about dep_count
+ *
+ * Return: The current dep_count value
+ */
+static inline int kbase_fence_dep_count_read(struct kbase_jd_atom *katom)
+{
+	return atomic_read(&katom->dma_fence.dep_count);
+}
+
+/**
+ * kbase_fence_free_callbacks() - Free dma-fence callbacks on a katom
+ * @katom: Pointer to katom
+ *
+ * This function will free all fence callbacks on the katom's list of
+ * callbacks. Callbacks that have not yet been called, because their fence
+ * hasn't yet signaled, will first be removed from the fence.
+ *
+ * Locking: katom->dma_fence.callbacks list assumes jctx.lock is held.
+ *
+ * Return: true if dep_count reached 0, otherwise false.
+ */
+bool kbase_fence_free_callbacks(struct kbase_jd_atom *katom);
+
+#if defined(CONFIG_SYNC_FILE)
+/**
+ * kbase_fence_in_get() - Retrieve input fence for atom.
+ * @katom: Atom to get input fence from
+ *
+ * A ref will be taken for the fence, so use @kbase_fence_put() to release it
+ *
+ * Return: The fence, or NULL if there is no input fence for atom
+ */
+#define kbase_fence_in_get(katom) dma_fence_get((katom)->dma_fence.fence_in)
+#endif
+
+/**
+ * kbase_fence_out_get() - Retrieve output fence for atom.
+ * @katom: Atom to get output fence from
+ *
+ * A ref will be taken for the fence, so use @kbase_fence_put() to release it
+ *
+ * Return: The fence, or NULL if there is no output fence for atom
+ */
+#define kbase_fence_out_get(katom) dma_fence_get((katom)->dma_fence.fence)
+
+/**
+ * kbase_fence_put() - Releases a reference to a fence
+ * @fence: Fence to release reference for.
+ */
+#define kbase_fence_put(fence) dma_fence_put(fence)
+
+
+#endif /* CONFIG_MALI_DMA_FENCE || defined(CONFIG_SYNC_FILE */
+
+#endif /* _KBASE_FENCE_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h b/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h
new file mode 100644
index 0000000..607a95c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h
@@ -0,0 +1,68 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_FENCE_DEFS_H_
+#define _KBASE_FENCE_DEFS_H_
+
+/*
+ * There was a big rename in the 4.10 kernel (fence* -> dma_fence*)
+ * This file hides the compatibility issues with this for the rest the driver
+ */
+
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+
+#include <linux/fence.h>
+
+#define dma_fence_context_alloc(a) fence_context_alloc(a)
+#define dma_fence_init(a, b, c, d, e) fence_init(a, b, c, d, e)
+#define dma_fence_get(a) fence_get(a)
+#define dma_fence_put(a) fence_put(a)
+#define dma_fence_signal(a) fence_signal(a)
+#define dma_fence_is_signaled(a) fence_is_signaled(a)
+#define dma_fence_add_callback(a, b, c) fence_add_callback(a, b, c)
+#define dma_fence_remove_callback(a, b) fence_remove_callback(a, b)
+
+#if (KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE)
+#define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->error ?: 1 : 0)
+#else
+#define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->status ?: 1 : 0)
+#endif
+
+#else
+
+#include <linux/dma-fence.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+#define dma_fence_get_status(a) (dma_fence_is_signaled(a) ? \
+	(a)->status ?: 1 \
+	: 0)
+#endif
+
+#endif /* < 4.10.0 */
+
+#endif /* CONFIG_MALI_DMA_FENCE || CONFIG_SYNC_FILE */
+
+#endif /* _KBASE_FENCE_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator.h b/drivers/gpu/arm/midgard/mali_kbase_gator.h
new file mode 100644
index 0000000..4f54817
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* NB taken from gator  */
+/*
+ * List of possible actions to be controlled by DS-5 Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping
+ * and s/w counter reporting. We cannot use the enums in mali_uk_types.h because
+ * they are unknown inside gator.
+ */
+#ifndef _KBASE_GATOR_H_
+#define _KBASE_GATOR_H_
+
+#ifdef CONFIG_MALI_GATOR_SUPPORT
+#define GATOR_MAKE_EVENT(type, number) (((type) << 24) | ((number) << 16))
+#define GATOR_JOB_SLOT_START 1
+#define GATOR_JOB_SLOT_STOP  2
+#define GATOR_JOB_SLOT_SOFT_STOPPED  3
+
+void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id);
+void kbase_trace_mali_pm_status(u32 event, u64 value);
+void kbase_trace_mali_pm_power_off(u32 event, u64 value);
+void kbase_trace_mali_pm_power_on(u32 event, u64 value);
+void kbase_trace_mali_page_fault_insert_pages(int event, u32 value);
+void kbase_trace_mali_mmu_as_in_use(int event);
+void kbase_trace_mali_mmu_as_released(int event);
+void kbase_trace_mali_total_alloc_pages_change(long long int event);
+
+#endif /* CONFIG_MALI_GATOR_SUPPORT */
+
+#endif  /* _KBASE_GATOR_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_api.c b/drivers/gpu/arm/midgard/mali_kbase_gator_api.c
new file mode 100644
index 0000000..1719edf
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_api.c
@@ -0,0 +1,390 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase.h"
+#include "mali_kbase_hw.h"
+#include "mali_kbase_mem_linux.h"
+#include "mali_kbase_gator_api.h"
+#include "mali_kbase_gator_hwcnt_names.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase_hwcnt_gpu.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+
+#define MALI_MAX_CORES_PER_GROUP		4
+#define MALI_MAX_NUM_BLOCKS_PER_GROUP	8
+#define MALI_COUNTERS_PER_BLOCK			64
+#define MALI_BYTES_PER_COUNTER			4
+
+struct kbase_gator_hwcnt_handles {
+	struct kbase_device *kbdev;
+	struct kbase_hwcnt_virtualizer_client *hvcli;
+	struct kbase_hwcnt_enable_map enable_map;
+	struct kbase_hwcnt_dump_buffer dump_buf;
+	struct work_struct dump_work;
+	int dump_complete;
+	spinlock_t dump_lock;
+};
+
+static void dump_worker(struct work_struct *work);
+
+const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters)
+{
+	const char * const *hardware_counters;
+	struct kbase_device *kbdev;
+	uint32_t product_id;
+	uint32_t count;
+
+	if (!total_counters)
+		return NULL;
+
+	/* Get the first device - it doesn't matter in this case */
+	kbdev = kbase_find_device(-1);
+	if (!kbdev)
+		return NULL;
+
+	product_id = kbdev->gpu_props.props.core_props.product_id;
+
+	if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+		switch (GPU_ID2_MODEL_MATCH_VALUE(product_id)) {
+		case GPU_ID2_PRODUCT_TMIX:
+			hardware_counters = hardware_counters_mali_tMIx;
+			count = ARRAY_SIZE(hardware_counters_mali_tMIx);
+			break;
+		case GPU_ID2_PRODUCT_THEX:
+			hardware_counters = hardware_counters_mali_tHEx;
+			count = ARRAY_SIZE(hardware_counters_mali_tHEx);
+			break;
+		case GPU_ID2_PRODUCT_TSIX:
+			hardware_counters = hardware_counters_mali_tSIx;
+			count = ARRAY_SIZE(hardware_counters_mali_tSIx);
+			break;
+		case GPU_ID2_PRODUCT_TDVX:
+			hardware_counters = hardware_counters_mali_tSIx;
+			count = ARRAY_SIZE(hardware_counters_mali_tSIx);
+			break;
+		case GPU_ID2_PRODUCT_TNOX:
+			hardware_counters = hardware_counters_mali_tNOx;
+			count = ARRAY_SIZE(hardware_counters_mali_tNOx);
+			break;
+		case GPU_ID2_PRODUCT_TGOX:
+			hardware_counters = hardware_counters_mali_tGOx;
+			count = ARRAY_SIZE(hardware_counters_mali_tGOx);
+			break;
+		case GPU_ID2_PRODUCT_TKAX:
+			hardware_counters = hardware_counters_mali_tKAx;
+			count = ARRAY_SIZE(hardware_counters_mali_tKAx);
+			break;
+		case GPU_ID2_PRODUCT_TTRX:
+			hardware_counters = hardware_counters_mali_tTRx;
+			count = ARRAY_SIZE(hardware_counters_mali_tTRx);
+			break;
+		default:
+			hardware_counters = NULL;
+			count = 0;
+			dev_err(kbdev->dev, "Unrecognized product ID: %u\n",
+				product_id);
+			break;
+		}
+	} else {
+		switch (product_id) {
+			/* If we are using a Mali-T60x device */
+		case GPU_ID_PI_T60X:
+			hardware_counters = hardware_counters_mali_t60x;
+			count = ARRAY_SIZE(hardware_counters_mali_t60x);
+			break;
+			/* If we are using a Mali-T62x device */
+		case GPU_ID_PI_T62X:
+			hardware_counters = hardware_counters_mali_t62x;
+			count = ARRAY_SIZE(hardware_counters_mali_t62x);
+			break;
+			/* If we are using a Mali-T72x device */
+		case GPU_ID_PI_T72X:
+			hardware_counters = hardware_counters_mali_t72x;
+			count = ARRAY_SIZE(hardware_counters_mali_t72x);
+			break;
+			/* If we are using a Mali-T76x device */
+		case GPU_ID_PI_T76X:
+			hardware_counters = hardware_counters_mali_t76x;
+			count = ARRAY_SIZE(hardware_counters_mali_t76x);
+			break;
+			/* If we are using a Mali-T82x device */
+		case GPU_ID_PI_T82X:
+			hardware_counters = hardware_counters_mali_t82x;
+			count = ARRAY_SIZE(hardware_counters_mali_t82x);
+			break;
+			/* If we are using a Mali-T83x device */
+		case GPU_ID_PI_T83X:
+			hardware_counters = hardware_counters_mali_t83x;
+			count = ARRAY_SIZE(hardware_counters_mali_t83x);
+			break;
+			/* If we are using a Mali-T86x device */
+		case GPU_ID_PI_T86X:
+			hardware_counters = hardware_counters_mali_t86x;
+			count = ARRAY_SIZE(hardware_counters_mali_t86x);
+			break;
+			/* If we are using a Mali-T88x device */
+		case GPU_ID_PI_TFRX:
+			hardware_counters = hardware_counters_mali_t88x;
+			count = ARRAY_SIZE(hardware_counters_mali_t88x);
+			break;
+		default:
+			hardware_counters = NULL;
+			count = 0;
+			dev_err(kbdev->dev, "Unrecognized product ID: %u\n",
+				product_id);
+			break;
+		}
+	}
+
+	/* Release the kbdev reference. */
+	kbase_release_device(kbdev);
+
+	*total_counters = count;
+
+	/* If we return a string array take a reference on the module (or fail). */
+	if (hardware_counters && !try_module_get(THIS_MODULE))
+		return NULL;
+
+	return hardware_counters;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init_names);
+
+void kbase_gator_hwcnt_term_names(void)
+{
+	/* Release the module reference. */
+	module_put(THIS_MODULE);
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term_names);
+
+struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info)
+{
+	int errcode;
+	struct kbase_gator_hwcnt_handles *hand;
+	const struct kbase_hwcnt_metadata *metadata;
+	struct kbase_hwcnt_physical_enable_map phys_map;
+	uint32_t dump_size = 0, i = 0;
+
+	if (!in_out_info)
+		return NULL;
+
+	hand = kzalloc(sizeof(*hand), GFP_KERNEL);
+	if (!hand)
+		return NULL;
+
+	INIT_WORK(&hand->dump_work, dump_worker);
+	spin_lock_init(&hand->dump_lock);
+
+	/* Get the first device */
+	hand->kbdev = kbase_find_device(-1);
+	if (!hand->kbdev)
+		goto free_hand;
+
+	metadata = kbase_hwcnt_virtualizer_metadata(
+		hand->kbdev->hwcnt_gpu_virt);
+	if (!metadata)
+		goto release_device;
+
+	errcode = kbase_hwcnt_enable_map_alloc(metadata, &hand->enable_map);
+	if (errcode)
+		goto release_device;
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(metadata, &hand->dump_buf);
+	if (errcode)
+		goto free_enable_map;
+
+	in_out_info->kernel_dump_buffer = hand->dump_buf.dump_buf;
+
+	in_out_info->nr_cores = hand->kbdev->gpu_props.num_cores;
+	in_out_info->nr_core_groups = hand->kbdev->gpu_props.num_core_groups;
+	in_out_info->gpu_id = hand->kbdev->gpu_props.props.core_props.product_id;
+
+	/* If we are using a v4 device (Mali-T6xx or Mali-T72x) */
+	if (kbase_hw_has_feature(hand->kbdev, BASE_HW_FEATURE_V4)) {
+		uint32_t cg, j;
+		uint64_t core_mask;
+
+		/* There are 8 hardware counters blocks per core group */
+		in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) *
+			MALI_MAX_NUM_BLOCKS_PER_GROUP *
+			in_out_info->nr_core_groups, GFP_KERNEL);
+
+		if (!in_out_info->hwc_layout)
+			goto free_dump_buf;
+
+		dump_size = in_out_info->nr_core_groups *
+			MALI_MAX_NUM_BLOCKS_PER_GROUP *
+			MALI_COUNTERS_PER_BLOCK *
+			MALI_BYTES_PER_COUNTER;
+
+		for (cg = 0; cg < in_out_info->nr_core_groups; cg++) {
+			core_mask = hand->kbdev->gpu_props.props.coherency_info.group[cg].core_mask;
+
+			for (j = 0; j < MALI_MAX_CORES_PER_GROUP; j++) {
+				if (core_mask & (1u << j))
+					in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+				else
+					in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+			}
+
+			in_out_info->hwc_layout[i++] = TILER_BLOCK;
+			in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+			in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+
+			if (0 == cg)
+				in_out_info->hwc_layout[i++] = JM_BLOCK;
+			else
+				in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+		}
+	/* If we are using any other device */
+	} else {
+		uint32_t nr_l2, nr_sc_bits, j;
+		uint64_t core_mask;
+
+		nr_l2 = hand->kbdev->gpu_props.props.l2_props.num_l2_slices;
+
+		core_mask = hand->kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+
+		nr_sc_bits = fls64(core_mask);
+
+		/* The job manager and tiler sets of counters
+		 * are always present */
+		in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * (2 + nr_sc_bits + nr_l2), GFP_KERNEL);
+
+		if (!in_out_info->hwc_layout)
+			goto free_dump_buf;
+
+		dump_size = (2 + nr_sc_bits + nr_l2) * MALI_COUNTERS_PER_BLOCK * MALI_BYTES_PER_COUNTER;
+
+		in_out_info->hwc_layout[i++] = JM_BLOCK;
+		in_out_info->hwc_layout[i++] = TILER_BLOCK;
+
+		for (j = 0; j < nr_l2; j++)
+			in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+		while (core_mask != 0ull) {
+			if ((core_mask & 1ull) != 0ull)
+				in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+			else
+				in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+			core_mask >>= 1;
+		}
+	}
+
+	/* Calculated dump size must be the same as real dump size */
+	if (WARN_ON(dump_size != metadata->dump_buf_bytes))
+		goto free_layout;
+
+	in_out_info->nr_hwc_blocks = i;
+	in_out_info->size = dump_size;
+
+	phys_map.jm_bm = in_out_info->bitmask[JM_BLOCK];
+	phys_map.tiler_bm = in_out_info->bitmask[TILER_BLOCK];
+	phys_map.shader_bm = in_out_info->bitmask[SHADER_BLOCK];
+	phys_map.mmu_l2_bm = in_out_info->bitmask[MMU_L2_BLOCK];
+	kbase_hwcnt_gpu_enable_map_from_physical(&hand->enable_map, &phys_map);
+	errcode = kbase_hwcnt_virtualizer_client_create(
+		hand->kbdev->hwcnt_gpu_virt, &hand->enable_map, &hand->hvcli);
+	if (errcode) {
+		dev_err(hand->kbdev->dev,
+			"Failed to register gator with hwcnt virtualizer core");
+		goto free_layout;
+	}
+
+	return hand;
+
+free_layout:
+	kfree(in_out_info->hwc_layout);
+free_dump_buf:
+	kbase_hwcnt_dump_buffer_free(&hand->dump_buf);
+free_enable_map:
+	kbase_hwcnt_enable_map_free(&hand->enable_map);
+release_device:
+	kbase_release_device(hand->kbdev);
+free_hand:
+	kfree(hand);
+	return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init);
+
+void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+	if (in_out_info)
+		kfree(in_out_info->hwc_layout);
+
+	if (opaque_handles) {
+		cancel_work_sync(&opaque_handles->dump_work);
+		kbase_hwcnt_virtualizer_client_destroy(opaque_handles->hvcli);
+		kbase_hwcnt_dump_buffer_free(&opaque_handles->dump_buf);
+		kbase_hwcnt_enable_map_free(&opaque_handles->enable_map);
+		kbase_release_device(opaque_handles->kbdev);
+		kfree(opaque_handles);
+	}
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term);
+
+static void dump_worker(struct work_struct *work)
+{
+	int errcode;
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+	struct kbase_gator_hwcnt_handles *hand;
+
+	hand = container_of(work, struct kbase_gator_hwcnt_handles, dump_work);
+	errcode = kbase_hwcnt_virtualizer_client_dump(
+		hand->hvcli, &ts_start_ns, &ts_end_ns, &hand->dump_buf);
+	if (!errcode) {
+		/* Patch the header to hide other client's counter choices */
+		kbase_hwcnt_gpu_patch_dump_headers(
+			&hand->dump_buf, &hand->enable_map);
+		/* Zero all non-enabled counters (currently undefined values) */
+		kbase_hwcnt_dump_buffer_zero_non_enabled(
+			&hand->dump_buf, &hand->enable_map);
+		spin_lock_bh(&hand->dump_lock);
+		hand->dump_complete = 1;
+		spin_unlock_bh(&hand->dump_lock);
+	} else {
+		schedule_work(&hand->dump_work);
+	}
+}
+
+uint32_t kbase_gator_instr_hwcnt_dump_complete(
+		struct kbase_gator_hwcnt_handles *opaque_handles,
+		uint32_t * const success)
+{
+
+	if (opaque_handles && success) {
+		*success = opaque_handles->dump_complete;
+		opaque_handles->dump_complete = 0;
+		return *success;
+	}
+	return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_complete);
+
+uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+	if (opaque_handles)
+		schedule_work(&opaque_handles->dump_work);
+	return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_irq);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_api.h b/drivers/gpu/arm/midgard/mali_kbase_gator_api.h
new file mode 100644
index 0000000..bd0589e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_api.h
@@ -0,0 +1,224 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_GATOR_API_H_
+#define _KBASE_GATOR_API_H_
+
+/**
+ * @brief This file describes the API used by Gator to fetch hardware counters.
+ */
+
+/* This define is used by the gator kernel module compile to select which DDK
+ * API calling convention to use. If not defined (legacy DDK) gator assumes
+ * version 1. The version to DDK release mapping is:
+ *     Version 1 API: DDK versions r1px, r2px
+ *     Version 2 API: DDK versions r3px, r4px
+ *     Version 3 API: DDK version r5p0 and newer
+ *
+ * API Usage
+ * =========
+ *
+ * 1] Call kbase_gator_hwcnt_init_names() to return the list of short counter
+ * names for the GPU present in this device.
+ *
+ * 2] Create a kbase_gator_hwcnt_info structure and set the counter enables for
+ * the counters you want enabled. The enables can all be set for simplicity in
+ * most use cases, but disabling some will let you minimize bandwidth impact.
+ *
+ * 3] Call kbase_gator_hwcnt_init() using the above structure, to create a
+ * counter context. On successful return the DDK will have populated the
+ * structure with a variety of useful information.
+ *
+ * 4] Call kbase_gator_hwcnt_dump_irq() to queue a non-blocking request for a
+ * counter dump. If this returns a non-zero value the request has been queued,
+ * otherwise the driver has been unable to do so (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 5] Call kbase_gator_hwcnt_dump_complete() to test whether the  previously
+ * requested dump has been succesful. If this returns non-zero the counter dump
+ * has resolved, but the value of *success must also be tested as the dump
+ * may have not been successful. If it returns zero the counter dump was
+ * abandoned due to the device being busy (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 6] Process the counters stored in the buffer pointed to by ...
+ *
+ *        kbase_gator_hwcnt_info->kernel_dump_buffer
+ *
+ *    In pseudo code you can find all of the counters via this approach:
+ *
+ *
+ *        hwcnt_info # pointer to kbase_gator_hwcnt_info structure
+ *        hwcnt_name # pointer to name list
+ *
+ *        u32 * hwcnt_data = (u32*)hwcnt_info->kernel_dump_buffer
+ *
+ *        # Iterate over each 64-counter block in this GPU configuration
+ *        for( i = 0; i < hwcnt_info->nr_hwc_blocks; i++) {
+ *            hwc_type type = hwcnt_info->hwc_layout[i];
+ *
+ *            # Skip reserved type blocks - they contain no counters at all
+ *            if( type == RESERVED_BLOCK ) {
+ *                continue;
+ *            }
+ *
+ *            size_t name_offset = type * 64;
+ *            size_t data_offset = i * 64;
+ *
+ *            # Iterate over the names of the counters in this block type
+ *            for( j = 0; j < 64; j++) {
+ *                const char * name = hwcnt_name[name_offset+j];
+ *
+ *                # Skip empty name strings - there is no counter here
+ *                if( name[0] == '\0' ) {
+ *                    continue;
+ *                }
+ *
+ *                u32 data = hwcnt_data[data_offset+j];
+ *
+ *                printk( "COUNTER: %s DATA: %u\n", name, data );
+ *            }
+ *        }
+ *
+ *
+ *     Note that in most implementations you typically want to either SUM or
+ *     AVERAGE multiple instances of the same counter if, for example, you have
+ *     multiple shader cores or multiple L2 caches. The most sensible view for
+ *     analysis is to AVERAGE shader core counters, but SUM L2 cache and MMU
+ *     counters.
+ *
+ * 7] Goto 4, repeating until you want to stop collecting counters.
+ *
+ * 8] Release the dump resources by calling kbase_gator_hwcnt_term().
+ *
+ * 9] Release the name table resources by calling
+ *    kbase_gator_hwcnt_term_names(). This function must only be called if
+ *    init_names() returned a non-NULL value.
+ **/
+
+#define MALI_DDK_GATOR_API_VERSION 3
+
+enum hwc_type {
+	JM_BLOCK = 0,
+	TILER_BLOCK,
+	SHADER_BLOCK,
+	MMU_L2_BLOCK,
+	RESERVED_BLOCK
+};
+
+struct kbase_gator_hwcnt_info {
+	/* Passed from Gator to kbase */
+
+	/* the bitmask of enabled hardware counters for each counter block */
+	uint16_t bitmask[4];
+
+	/* Passed from kbase to Gator */
+
+	/* ptr to counter dump memory */
+	void *kernel_dump_buffer;
+
+	/* size of counter dump memory */
+	uint32_t size;
+
+	/* the ID of the Mali device */
+	uint32_t gpu_id;
+
+	/* the number of shader cores in the GPU */
+	uint32_t nr_cores;
+
+	/* the number of core groups */
+	uint32_t nr_core_groups;
+
+	/* the memory layout of the performance counters */
+	enum hwc_type *hwc_layout;
+
+	/* the total number of hardware couter blocks */
+	uint32_t nr_hwc_blocks;
+};
+
+/**
+ * @brief Opaque block of Mali data which Gator needs to return to the API later.
+ */
+struct kbase_gator_hwcnt_handles;
+
+/**
+ * @brief Initialize the resources Gator needs for performance profiling.
+ *
+ * @param in_out_info   A pointer to a structure containing the enabled counters passed from Gator and all the Mali
+ *                      specific information that will be returned to Gator. On entry Gator must have populated the
+ *                      'bitmask' field with the counters it wishes to enable for each class of counter block.
+ *                      Each entry in the array corresponds to a single counter class based on the "hwc_type"
+ *                      enumeration, and each bit corresponds to an enable for 4 sequential counters (LSB enables
+ *                      the first 4 counters in the block, and so on). See the GPU counter array as returned by
+ *                      kbase_gator_hwcnt_get_names() for the index values of each counter for the curernt GPU.
+ *
+ * @return              Pointer to an opaque handle block on success, NULL on error.
+ */
+extern struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info);
+
+/**
+ * @brief Free all resources once Gator has finished using performance counters.
+ *
+ * @param in_out_info       A pointer to a structure containing the enabled counters passed from Gator and all the
+ *                          Mali specific information that will be returned to Gator.
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ */
+extern void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief Poll whether a counter dump is successful.
+ *
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ * @param[out] success      Non-zero on success, zero on failure.
+ *
+ * @return                  Zero if the dump is still pending, non-zero if the dump has completed. Note that a
+ *                          completed dump may not have dumped succesfully, so the caller must test for both
+ *                          a completed and successful dump before processing counters.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success);
+
+/**
+ * @brief Request the generation of a new counter dump.
+ *
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ *
+ * @return                  Zero if the hardware device is busy and cannot handle the request, non-zero otherwise.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief This function is used to fetch the names table based on the Mali device in use.
+ *
+ * @param[out] total_counters The total number of counters short names in the Mali devices' list.
+ *
+ * @return                    Pointer to an array of strings of length *total_counters.
+ */
+extern const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters);
+
+/**
+ * @brief This function is used to terminate the use of the names table.
+ *
+ * This function must only be called if the initial call to kbase_gator_hwcnt_init_names returned a non-NULL value.
+ */
+extern void kbase_gator_hwcnt_term_names(void);
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h
new file mode 100644
index 0000000..5d38c7b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h
@@ -0,0 +1,2178 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_H_
+#define _KBASE_GATOR_HWCNT_NAMES_H_
+
+/*
+ * "Short names" for hardware counters used by Streamline. Counters names are
+ * stored in accordance with their memory layout in the binary counter block
+ * emitted by the Mali GPU. Each "master" in the GPU emits a fixed-size block
+ * of 64 counters, and each GPU implements the same set of "masters" although
+ * the counters each master exposes within its block of 64 may vary.
+ *
+ * Counters which are an empty string are simply "holes" in the counter memory
+ * where no counter exists.
+ */
+
+static const char * const hardware_counters_mali_t60x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T60x_MESSAGES_SENT",
+	"T60x_MESSAGES_RECEIVED",
+	"T60x_GPU_ACTIVE",
+	"T60x_IRQ_ACTIVE",
+	"T60x_JS0_JOBS",
+	"T60x_JS0_TASKS",
+	"T60x_JS0_ACTIVE",
+	"",
+	"T60x_JS0_WAIT_READ",
+	"T60x_JS0_WAIT_ISSUE",
+	"T60x_JS0_WAIT_DEPEND",
+	"T60x_JS0_WAIT_FINISH",
+	"T60x_JS1_JOBS",
+	"T60x_JS1_TASKS",
+	"T60x_JS1_ACTIVE",
+	"",
+	"T60x_JS1_WAIT_READ",
+	"T60x_JS1_WAIT_ISSUE",
+	"T60x_JS1_WAIT_DEPEND",
+	"T60x_JS1_WAIT_FINISH",
+	"T60x_JS2_JOBS",
+	"T60x_JS2_TASKS",
+	"T60x_JS2_ACTIVE",
+	"",
+	"T60x_JS2_WAIT_READ",
+	"T60x_JS2_WAIT_ISSUE",
+	"T60x_JS2_WAIT_DEPEND",
+	"T60x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T60x_TI_JOBS_PROCESSED",
+	"T60x_TI_TRIANGLES",
+	"T60x_TI_QUADS",
+	"T60x_TI_POLYGONS",
+	"T60x_TI_POINTS",
+	"T60x_TI_LINES",
+	"T60x_TI_VCACHE_HIT",
+	"T60x_TI_VCACHE_MISS",
+	"T60x_TI_FRONT_FACING",
+	"T60x_TI_BACK_FACING",
+	"T60x_TI_PRIM_VISIBLE",
+	"T60x_TI_PRIM_CULLED",
+	"T60x_TI_PRIM_CLIPPED",
+	"T60x_TI_LEVEL0",
+	"T60x_TI_LEVEL1",
+	"T60x_TI_LEVEL2",
+	"T60x_TI_LEVEL3",
+	"T60x_TI_LEVEL4",
+	"T60x_TI_LEVEL5",
+	"T60x_TI_LEVEL6",
+	"T60x_TI_LEVEL7",
+	"T60x_TI_COMMAND_1",
+	"T60x_TI_COMMAND_2",
+	"T60x_TI_COMMAND_3",
+	"T60x_TI_COMMAND_4",
+	"T60x_TI_COMMAND_4_7",
+	"T60x_TI_COMMAND_8_15",
+	"T60x_TI_COMMAND_16_63",
+	"T60x_TI_COMMAND_64",
+	"T60x_TI_COMPRESS_IN",
+	"T60x_TI_COMPRESS_OUT",
+	"T60x_TI_COMPRESS_FLUSH",
+	"T60x_TI_TIMESTAMPS",
+	"T60x_TI_PCACHE_HIT",
+	"T60x_TI_PCACHE_MISS",
+	"T60x_TI_PCACHE_LINE",
+	"T60x_TI_PCACHE_STALL",
+	"T60x_TI_WRBUF_HIT",
+	"T60x_TI_WRBUF_MISS",
+	"T60x_TI_WRBUF_LINE",
+	"T60x_TI_WRBUF_PARTIAL",
+	"T60x_TI_WRBUF_STALL",
+	"T60x_TI_ACTIVE",
+	"T60x_TI_LOADING_DESC",
+	"T60x_TI_INDEX_WAIT",
+	"T60x_TI_INDEX_RANGE_WAIT",
+	"T60x_TI_VERTEX_WAIT",
+	"T60x_TI_PCACHE_WAIT",
+	"T60x_TI_WRBUF_WAIT",
+	"T60x_TI_BUS_READ",
+	"T60x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T60x_TI_UTLB_STALL",
+	"T60x_TI_UTLB_REPLAY_MISS",
+	"T60x_TI_UTLB_REPLAY_FULL",
+	"T60x_TI_UTLB_NEW_MISS",
+	"T60x_TI_UTLB_HIT",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T60x_FRAG_ACTIVE",
+	"T60x_FRAG_PRIMITIVES",
+	"T60x_FRAG_PRIMITIVES_DROPPED",
+	"T60x_FRAG_CYCLES_DESC",
+	"T60x_FRAG_CYCLES_PLR",
+	"T60x_FRAG_CYCLES_VERT",
+	"T60x_FRAG_CYCLES_TRISETUP",
+	"T60x_FRAG_CYCLES_RAST",
+	"T60x_FRAG_THREADS",
+	"T60x_FRAG_DUMMY_THREADS",
+	"T60x_FRAG_QUADS_RAST",
+	"T60x_FRAG_QUADS_EZS_TEST",
+	"T60x_FRAG_QUADS_EZS_KILLED",
+	"T60x_FRAG_THREADS_LZS_TEST",
+	"T60x_FRAG_THREADS_LZS_KILLED",
+	"T60x_FRAG_CYCLES_NO_TILE",
+	"T60x_FRAG_NUM_TILES",
+	"T60x_FRAG_TRANS_ELIM",
+	"T60x_COMPUTE_ACTIVE",
+	"T60x_COMPUTE_TASKS",
+	"T60x_COMPUTE_THREADS",
+	"T60x_COMPUTE_CYCLES_DESC",
+	"T60x_TRIPIPE_ACTIVE",
+	"T60x_ARITH_WORDS",
+	"T60x_ARITH_CYCLES_REG",
+	"T60x_ARITH_CYCLES_L0",
+	"T60x_ARITH_FRAG_DEPEND",
+	"T60x_LS_WORDS",
+	"T60x_LS_ISSUES",
+	"T60x_LS_RESTARTS",
+	"T60x_LS_REISSUES_MISS",
+	"T60x_LS_REISSUES_VD",
+	"T60x_LS_REISSUE_ATTRIB_MISS",
+	"T60x_LS_NO_WB",
+	"T60x_TEX_WORDS",
+	"T60x_TEX_BUBBLES",
+	"T60x_TEX_WORDS_L0",
+	"T60x_TEX_WORDS_DESC",
+	"T60x_TEX_ISSUES",
+	"T60x_TEX_RECIRC_FMISS",
+	"T60x_TEX_RECIRC_DESC",
+	"T60x_TEX_RECIRC_MULTI",
+	"T60x_TEX_RECIRC_PMISS",
+	"T60x_TEX_RECIRC_CONF",
+	"T60x_LSC_READ_HITS",
+	"T60x_LSC_READ_MISSES",
+	"T60x_LSC_WRITE_HITS",
+	"T60x_LSC_WRITE_MISSES",
+	"T60x_LSC_ATOMIC_HITS",
+	"T60x_LSC_ATOMIC_MISSES",
+	"T60x_LSC_LINE_FETCHES",
+	"T60x_LSC_DIRTY_LINE",
+	"T60x_LSC_SNOOPS",
+	"T60x_AXI_TLB_STALL",
+	"T60x_AXI_TLB_MISS",
+	"T60x_AXI_TLB_TRANSACTION",
+	"T60x_LS_TLB_MISS",
+	"T60x_LS_TLB_HIT",
+	"T60x_AXI_BEATS_READ",
+	"T60x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T60x_MMU_HIT",
+	"T60x_MMU_NEW_MISS",
+	"T60x_MMU_REPLAY_FULL",
+	"T60x_MMU_REPLAY_MISS",
+	"T60x_MMU_TABLE_WALK",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T60x_UTLB_HIT",
+	"T60x_UTLB_NEW_MISS",
+	"T60x_UTLB_REPLAY_FULL",
+	"T60x_UTLB_REPLAY_MISS",
+	"T60x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T60x_L2_EXT_WRITE_BEATS",
+	"T60x_L2_EXT_READ_BEATS",
+	"T60x_L2_ANY_LOOKUP",
+	"T60x_L2_READ_LOOKUP",
+	"T60x_L2_SREAD_LOOKUP",
+	"T60x_L2_READ_REPLAY",
+	"T60x_L2_READ_SNOOP",
+	"T60x_L2_READ_HIT",
+	"T60x_L2_CLEAN_MISS",
+	"T60x_L2_WRITE_LOOKUP",
+	"T60x_L2_SWRITE_LOOKUP",
+	"T60x_L2_WRITE_REPLAY",
+	"T60x_L2_WRITE_SNOOP",
+	"T60x_L2_WRITE_HIT",
+	"T60x_L2_EXT_READ_FULL",
+	"T60x_L2_EXT_READ_HALF",
+	"T60x_L2_EXT_WRITE_FULL",
+	"T60x_L2_EXT_WRITE_HALF",
+	"T60x_L2_EXT_READ",
+	"T60x_L2_EXT_READ_LINE",
+	"T60x_L2_EXT_WRITE",
+	"T60x_L2_EXT_WRITE_LINE",
+	"T60x_L2_EXT_WRITE_SMALL",
+	"T60x_L2_EXT_BARRIER",
+	"T60x_L2_EXT_AR_STALL",
+	"T60x_L2_EXT_R_BUF_FULL",
+	"T60x_L2_EXT_RD_BUF_FULL",
+	"T60x_L2_EXT_R_RAW",
+	"T60x_L2_EXT_W_STALL",
+	"T60x_L2_EXT_W_BUF_FULL",
+	"T60x_L2_EXT_R_W_HAZARD",
+	"T60x_L2_TAG_HAZARD",
+	"T60x_L2_SNOOP_FULL",
+	"T60x_L2_REPLAY_FULL"
+};
+static const char * const hardware_counters_mali_t62x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T62x_MESSAGES_SENT",
+	"T62x_MESSAGES_RECEIVED",
+	"T62x_GPU_ACTIVE",
+	"T62x_IRQ_ACTIVE",
+	"T62x_JS0_JOBS",
+	"T62x_JS0_TASKS",
+	"T62x_JS0_ACTIVE",
+	"",
+	"T62x_JS0_WAIT_READ",
+	"T62x_JS0_WAIT_ISSUE",
+	"T62x_JS0_WAIT_DEPEND",
+	"T62x_JS0_WAIT_FINISH",
+	"T62x_JS1_JOBS",
+	"T62x_JS1_TASKS",
+	"T62x_JS1_ACTIVE",
+	"",
+	"T62x_JS1_WAIT_READ",
+	"T62x_JS1_WAIT_ISSUE",
+	"T62x_JS1_WAIT_DEPEND",
+	"T62x_JS1_WAIT_FINISH",
+	"T62x_JS2_JOBS",
+	"T62x_JS2_TASKS",
+	"T62x_JS2_ACTIVE",
+	"",
+	"T62x_JS2_WAIT_READ",
+	"T62x_JS2_WAIT_ISSUE",
+	"T62x_JS2_WAIT_DEPEND",
+	"T62x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T62x_TI_JOBS_PROCESSED",
+	"T62x_TI_TRIANGLES",
+	"T62x_TI_QUADS",
+	"T62x_TI_POLYGONS",
+	"T62x_TI_POINTS",
+	"T62x_TI_LINES",
+	"T62x_TI_VCACHE_HIT",
+	"T62x_TI_VCACHE_MISS",
+	"T62x_TI_FRONT_FACING",
+	"T62x_TI_BACK_FACING",
+	"T62x_TI_PRIM_VISIBLE",
+	"T62x_TI_PRIM_CULLED",
+	"T62x_TI_PRIM_CLIPPED",
+	"T62x_TI_LEVEL0",
+	"T62x_TI_LEVEL1",
+	"T62x_TI_LEVEL2",
+	"T62x_TI_LEVEL3",
+	"T62x_TI_LEVEL4",
+	"T62x_TI_LEVEL5",
+	"T62x_TI_LEVEL6",
+	"T62x_TI_LEVEL7",
+	"T62x_TI_COMMAND_1",
+	"T62x_TI_COMMAND_2",
+	"T62x_TI_COMMAND_3",
+	"T62x_TI_COMMAND_4",
+	"T62x_TI_COMMAND_5_7",
+	"T62x_TI_COMMAND_8_15",
+	"T62x_TI_COMMAND_16_63",
+	"T62x_TI_COMMAND_64",
+	"T62x_TI_COMPRESS_IN",
+	"T62x_TI_COMPRESS_OUT",
+	"T62x_TI_COMPRESS_FLUSH",
+	"T62x_TI_TIMESTAMPS",
+	"T62x_TI_PCACHE_HIT",
+	"T62x_TI_PCACHE_MISS",
+	"T62x_TI_PCACHE_LINE",
+	"T62x_TI_PCACHE_STALL",
+	"T62x_TI_WRBUF_HIT",
+	"T62x_TI_WRBUF_MISS",
+	"T62x_TI_WRBUF_LINE",
+	"T62x_TI_WRBUF_PARTIAL",
+	"T62x_TI_WRBUF_STALL",
+	"T62x_TI_ACTIVE",
+	"T62x_TI_LOADING_DESC",
+	"T62x_TI_INDEX_WAIT",
+	"T62x_TI_INDEX_RANGE_WAIT",
+	"T62x_TI_VERTEX_WAIT",
+	"T62x_TI_PCACHE_WAIT",
+	"T62x_TI_WRBUF_WAIT",
+	"T62x_TI_BUS_READ",
+	"T62x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T62x_TI_UTLB_STALL",
+	"T62x_TI_UTLB_REPLAY_MISS",
+	"T62x_TI_UTLB_REPLAY_FULL",
+	"T62x_TI_UTLB_NEW_MISS",
+	"T62x_TI_UTLB_HIT",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"T62x_SHADER_CORE_ACTIVE",
+	"T62x_FRAG_ACTIVE",
+	"T62x_FRAG_PRIMITIVES",
+	"T62x_FRAG_PRIMITIVES_DROPPED",
+	"T62x_FRAG_CYCLES_DESC",
+	"T62x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T62x_FRAG_CYCLES_VERT",
+	"T62x_FRAG_CYCLES_TRISETUP",
+	"T62x_FRAG_CYCLES_EZS_ACTIVE",
+	"T62x_FRAG_THREADS",
+	"T62x_FRAG_DUMMY_THREADS",
+	"T62x_FRAG_QUADS_RAST",
+	"T62x_FRAG_QUADS_EZS_TEST",
+	"T62x_FRAG_QUADS_EZS_KILLED",
+	"T62x_FRAG_THREADS_LZS_TEST",
+	"T62x_FRAG_THREADS_LZS_KILLED",
+	"T62x_FRAG_CYCLES_NO_TILE",
+	"T62x_FRAG_NUM_TILES",
+	"T62x_FRAG_TRANS_ELIM",
+	"T62x_COMPUTE_ACTIVE",
+	"T62x_COMPUTE_TASKS",
+	"T62x_COMPUTE_THREADS",
+	"T62x_COMPUTE_CYCLES_DESC",
+	"T62x_TRIPIPE_ACTIVE",
+	"T62x_ARITH_WORDS",
+	"T62x_ARITH_CYCLES_REG",
+	"T62x_ARITH_CYCLES_L0",
+	"T62x_ARITH_FRAG_DEPEND",
+	"T62x_LS_WORDS",
+	"T62x_LS_ISSUES",
+	"T62x_LS_RESTARTS",
+	"T62x_LS_REISSUES_MISS",
+	"T62x_LS_REISSUES_VD",
+	"T62x_LS_REISSUE_ATTRIB_MISS",
+	"T62x_LS_NO_WB",
+	"T62x_TEX_WORDS",
+	"T62x_TEX_BUBBLES",
+	"T62x_TEX_WORDS_L0",
+	"T62x_TEX_WORDS_DESC",
+	"T62x_TEX_ISSUES",
+	"T62x_TEX_RECIRC_FMISS",
+	"T62x_TEX_RECIRC_DESC",
+	"T62x_TEX_RECIRC_MULTI",
+	"T62x_TEX_RECIRC_PMISS",
+	"T62x_TEX_RECIRC_CONF",
+	"T62x_LSC_READ_HITS",
+	"T62x_LSC_READ_MISSES",
+	"T62x_LSC_WRITE_HITS",
+	"T62x_LSC_WRITE_MISSES",
+	"T62x_LSC_ATOMIC_HITS",
+	"T62x_LSC_ATOMIC_MISSES",
+	"T62x_LSC_LINE_FETCHES",
+	"T62x_LSC_DIRTY_LINE",
+	"T62x_LSC_SNOOPS",
+	"T62x_AXI_TLB_STALL",
+	"T62x_AXI_TLB_MISS",
+	"T62x_AXI_TLB_TRANSACTION",
+	"T62x_LS_TLB_MISS",
+	"T62x_LS_TLB_HIT",
+	"T62x_AXI_BEATS_READ",
+	"T62x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T62x_MMU_HIT",
+	"T62x_MMU_NEW_MISS",
+	"T62x_MMU_REPLAY_FULL",
+	"T62x_MMU_REPLAY_MISS",
+	"T62x_MMU_TABLE_WALK",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T62x_UTLB_HIT",
+	"T62x_UTLB_NEW_MISS",
+	"T62x_UTLB_REPLAY_FULL",
+	"T62x_UTLB_REPLAY_MISS",
+	"T62x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T62x_L2_EXT_WRITE_BEATS",
+	"T62x_L2_EXT_READ_BEATS",
+	"T62x_L2_ANY_LOOKUP",
+	"T62x_L2_READ_LOOKUP",
+	"T62x_L2_SREAD_LOOKUP",
+	"T62x_L2_READ_REPLAY",
+	"T62x_L2_READ_SNOOP",
+	"T62x_L2_READ_HIT",
+	"T62x_L2_CLEAN_MISS",
+	"T62x_L2_WRITE_LOOKUP",
+	"T62x_L2_SWRITE_LOOKUP",
+	"T62x_L2_WRITE_REPLAY",
+	"T62x_L2_WRITE_SNOOP",
+	"T62x_L2_WRITE_HIT",
+	"T62x_L2_EXT_READ_FULL",
+	"T62x_L2_EXT_READ_HALF",
+	"T62x_L2_EXT_WRITE_FULL",
+	"T62x_L2_EXT_WRITE_HALF",
+	"T62x_L2_EXT_READ",
+	"T62x_L2_EXT_READ_LINE",
+	"T62x_L2_EXT_WRITE",
+	"T62x_L2_EXT_WRITE_LINE",
+	"T62x_L2_EXT_WRITE_SMALL",
+	"T62x_L2_EXT_BARRIER",
+	"T62x_L2_EXT_AR_STALL",
+	"T62x_L2_EXT_R_BUF_FULL",
+	"T62x_L2_EXT_RD_BUF_FULL",
+	"T62x_L2_EXT_R_RAW",
+	"T62x_L2_EXT_W_STALL",
+	"T62x_L2_EXT_W_BUF_FULL",
+	"T62x_L2_EXT_R_W_HAZARD",
+	"T62x_L2_TAG_HAZARD",
+	"T62x_L2_SNOOP_FULL",
+	"T62x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t72x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T72x_GPU_ACTIVE",
+	"T72x_IRQ_ACTIVE",
+	"T72x_JS0_JOBS",
+	"T72x_JS0_TASKS",
+	"T72x_JS0_ACTIVE",
+	"T72x_JS1_JOBS",
+	"T72x_JS1_TASKS",
+	"T72x_JS1_ACTIVE",
+	"T72x_JS2_JOBS",
+	"T72x_JS2_TASKS",
+	"T72x_JS2_ACTIVE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T72x_TI_JOBS_PROCESSED",
+	"T72x_TI_TRIANGLES",
+	"T72x_TI_QUADS",
+	"T72x_TI_POLYGONS",
+	"T72x_TI_POINTS",
+	"T72x_TI_LINES",
+	"T72x_TI_FRONT_FACING",
+	"T72x_TI_BACK_FACING",
+	"T72x_TI_PRIM_VISIBLE",
+	"T72x_TI_PRIM_CULLED",
+	"T72x_TI_PRIM_CLIPPED",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T72x_TI_ACTIVE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T72x_FRAG_ACTIVE",
+	"T72x_FRAG_PRIMITIVES",
+	"T72x_FRAG_PRIMITIVES_DROPPED",
+	"T72x_FRAG_THREADS",
+	"T72x_FRAG_DUMMY_THREADS",
+	"T72x_FRAG_QUADS_RAST",
+	"T72x_FRAG_QUADS_EZS_TEST",
+	"T72x_FRAG_QUADS_EZS_KILLED",
+	"T72x_FRAG_THREADS_LZS_TEST",
+	"T72x_FRAG_THREADS_LZS_KILLED",
+	"T72x_FRAG_CYCLES_NO_TILE",
+	"T72x_FRAG_NUM_TILES",
+	"T72x_FRAG_TRANS_ELIM",
+	"T72x_COMPUTE_ACTIVE",
+	"T72x_COMPUTE_TASKS",
+	"T72x_COMPUTE_THREADS",
+	"T72x_TRIPIPE_ACTIVE",
+	"T72x_ARITH_WORDS",
+	"T72x_ARITH_CYCLES_REG",
+	"T72x_LS_WORDS",
+	"T72x_LS_ISSUES",
+	"T72x_LS_RESTARTS",
+	"T72x_LS_REISSUES_MISS",
+	"T72x_TEX_WORDS",
+	"T72x_TEX_BUBBLES",
+	"T72x_TEX_ISSUES",
+	"T72x_LSC_READ_HITS",
+	"T72x_LSC_READ_MISSES",
+	"T72x_LSC_WRITE_HITS",
+	"T72x_LSC_WRITE_MISSES",
+	"T72x_LSC_ATOMIC_HITS",
+	"T72x_LSC_ATOMIC_MISSES",
+	"T72x_LSC_LINE_FETCHES",
+	"T72x_LSC_DIRTY_LINE",
+	"T72x_LSC_SNOOPS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T72x_L2_EXT_WRITE_BEAT",
+	"T72x_L2_EXT_READ_BEAT",
+	"T72x_L2_READ_SNOOP",
+	"T72x_L2_READ_HIT",
+	"T72x_L2_WRITE_SNOOP",
+	"T72x_L2_WRITE_HIT",
+	"T72x_L2_EXT_WRITE_SMALL",
+	"T72x_L2_EXT_BARRIER",
+	"T72x_L2_EXT_AR_STALL",
+	"T72x_L2_EXT_W_STALL",
+	"T72x_L2_SNOOP_FULL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	""
+};
+
+static const char * const hardware_counters_mali_t76x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T76x_MESSAGES_SENT",
+	"T76x_MESSAGES_RECEIVED",
+	"T76x_GPU_ACTIVE",
+	"T76x_IRQ_ACTIVE",
+	"T76x_JS0_JOBS",
+	"T76x_JS0_TASKS",
+	"T76x_JS0_ACTIVE",
+	"",
+	"T76x_JS0_WAIT_READ",
+	"T76x_JS0_WAIT_ISSUE",
+	"T76x_JS0_WAIT_DEPEND",
+	"T76x_JS0_WAIT_FINISH",
+	"T76x_JS1_JOBS",
+	"T76x_JS1_TASKS",
+	"T76x_JS1_ACTIVE",
+	"",
+	"T76x_JS1_WAIT_READ",
+	"T76x_JS1_WAIT_ISSUE",
+	"T76x_JS1_WAIT_DEPEND",
+	"T76x_JS1_WAIT_FINISH",
+	"T76x_JS2_JOBS",
+	"T76x_JS2_TASKS",
+	"T76x_JS2_ACTIVE",
+	"",
+	"T76x_JS2_WAIT_READ",
+	"T76x_JS2_WAIT_ISSUE",
+	"T76x_JS2_WAIT_DEPEND",
+	"T76x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T76x_TI_JOBS_PROCESSED",
+	"T76x_TI_TRIANGLES",
+	"T76x_TI_QUADS",
+	"T76x_TI_POLYGONS",
+	"T76x_TI_POINTS",
+	"T76x_TI_LINES",
+	"T76x_TI_VCACHE_HIT",
+	"T76x_TI_VCACHE_MISS",
+	"T76x_TI_FRONT_FACING",
+	"T76x_TI_BACK_FACING",
+	"T76x_TI_PRIM_VISIBLE",
+	"T76x_TI_PRIM_CULLED",
+	"T76x_TI_PRIM_CLIPPED",
+	"T76x_TI_LEVEL0",
+	"T76x_TI_LEVEL1",
+	"T76x_TI_LEVEL2",
+	"T76x_TI_LEVEL3",
+	"T76x_TI_LEVEL4",
+	"T76x_TI_LEVEL5",
+	"T76x_TI_LEVEL6",
+	"T76x_TI_LEVEL7",
+	"T76x_TI_COMMAND_1",
+	"T76x_TI_COMMAND_2",
+	"T76x_TI_COMMAND_3",
+	"T76x_TI_COMMAND_4",
+	"T76x_TI_COMMAND_5_7",
+	"T76x_TI_COMMAND_8_15",
+	"T76x_TI_COMMAND_16_63",
+	"T76x_TI_COMMAND_64",
+	"T76x_TI_COMPRESS_IN",
+	"T76x_TI_COMPRESS_OUT",
+	"T76x_TI_COMPRESS_FLUSH",
+	"T76x_TI_TIMESTAMPS",
+	"T76x_TI_PCACHE_HIT",
+	"T76x_TI_PCACHE_MISS",
+	"T76x_TI_PCACHE_LINE",
+	"T76x_TI_PCACHE_STALL",
+	"T76x_TI_WRBUF_HIT",
+	"T76x_TI_WRBUF_MISS",
+	"T76x_TI_WRBUF_LINE",
+	"T76x_TI_WRBUF_PARTIAL",
+	"T76x_TI_WRBUF_STALL",
+	"T76x_TI_ACTIVE",
+	"T76x_TI_LOADING_DESC",
+	"T76x_TI_INDEX_WAIT",
+	"T76x_TI_INDEX_RANGE_WAIT",
+	"T76x_TI_VERTEX_WAIT",
+	"T76x_TI_PCACHE_WAIT",
+	"T76x_TI_WRBUF_WAIT",
+	"T76x_TI_BUS_READ",
+	"T76x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T76x_TI_UTLB_HIT",
+	"T76x_TI_UTLB_NEW_MISS",
+	"T76x_TI_UTLB_REPLAY_FULL",
+	"T76x_TI_UTLB_REPLAY_MISS",
+	"T76x_TI_UTLB_STALL",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T76x_FRAG_ACTIVE",
+	"T76x_FRAG_PRIMITIVES",
+	"T76x_FRAG_PRIMITIVES_DROPPED",
+	"T76x_FRAG_CYCLES_DESC",
+	"T76x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T76x_FRAG_CYCLES_VERT",
+	"T76x_FRAG_CYCLES_TRISETUP",
+	"T76x_FRAG_CYCLES_EZS_ACTIVE",
+	"T76x_FRAG_THREADS",
+	"T76x_FRAG_DUMMY_THREADS",
+	"T76x_FRAG_QUADS_RAST",
+	"T76x_FRAG_QUADS_EZS_TEST",
+	"T76x_FRAG_QUADS_EZS_KILLED",
+	"T76x_FRAG_THREADS_LZS_TEST",
+	"T76x_FRAG_THREADS_LZS_KILLED",
+	"T76x_FRAG_CYCLES_NO_TILE",
+	"T76x_FRAG_NUM_TILES",
+	"T76x_FRAG_TRANS_ELIM",
+	"T76x_COMPUTE_ACTIVE",
+	"T76x_COMPUTE_TASKS",
+	"T76x_COMPUTE_THREADS",
+	"T76x_COMPUTE_CYCLES_DESC",
+	"T76x_TRIPIPE_ACTIVE",
+	"T76x_ARITH_WORDS",
+	"T76x_ARITH_CYCLES_REG",
+	"T76x_ARITH_CYCLES_L0",
+	"T76x_ARITH_FRAG_DEPEND",
+	"T76x_LS_WORDS",
+	"T76x_LS_ISSUES",
+	"T76x_LS_REISSUE_ATTR",
+	"T76x_LS_REISSUES_VARY",
+	"T76x_LS_VARY_RV_MISS",
+	"T76x_LS_VARY_RV_HIT",
+	"T76x_LS_NO_UNPARK",
+	"T76x_TEX_WORDS",
+	"T76x_TEX_BUBBLES",
+	"T76x_TEX_WORDS_L0",
+	"T76x_TEX_WORDS_DESC",
+	"T76x_TEX_ISSUES",
+	"T76x_TEX_RECIRC_FMISS",
+	"T76x_TEX_RECIRC_DESC",
+	"T76x_TEX_RECIRC_MULTI",
+	"T76x_TEX_RECIRC_PMISS",
+	"T76x_TEX_RECIRC_CONF",
+	"T76x_LSC_READ_HITS",
+	"T76x_LSC_READ_OP",
+	"T76x_LSC_WRITE_HITS",
+	"T76x_LSC_WRITE_OP",
+	"T76x_LSC_ATOMIC_HITS",
+	"T76x_LSC_ATOMIC_OP",
+	"T76x_LSC_LINE_FETCHES",
+	"T76x_LSC_DIRTY_LINE",
+	"T76x_LSC_SNOOPS",
+	"T76x_AXI_TLB_STALL",
+	"T76x_AXI_TLB_MISS",
+	"T76x_AXI_TLB_TRANSACTION",
+	"T76x_LS_TLB_MISS",
+	"T76x_LS_TLB_HIT",
+	"T76x_AXI_BEATS_READ",
+	"T76x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T76x_MMU_HIT",
+	"T76x_MMU_NEW_MISS",
+	"T76x_MMU_REPLAY_FULL",
+	"T76x_MMU_REPLAY_MISS",
+	"T76x_MMU_TABLE_WALK",
+	"T76x_MMU_REQUESTS",
+	"",
+	"",
+	"T76x_UTLB_HIT",
+	"T76x_UTLB_NEW_MISS",
+	"T76x_UTLB_REPLAY_FULL",
+	"T76x_UTLB_REPLAY_MISS",
+	"T76x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T76x_L2_EXT_WRITE_BEATS",
+	"T76x_L2_EXT_READ_BEATS",
+	"T76x_L2_ANY_LOOKUP",
+	"T76x_L2_READ_LOOKUP",
+	"T76x_L2_SREAD_LOOKUP",
+	"T76x_L2_READ_REPLAY",
+	"T76x_L2_READ_SNOOP",
+	"T76x_L2_READ_HIT",
+	"T76x_L2_CLEAN_MISS",
+	"T76x_L2_WRITE_LOOKUP",
+	"T76x_L2_SWRITE_LOOKUP",
+	"T76x_L2_WRITE_REPLAY",
+	"T76x_L2_WRITE_SNOOP",
+	"T76x_L2_WRITE_HIT",
+	"T76x_L2_EXT_READ_FULL",
+	"",
+	"T76x_L2_EXT_WRITE_FULL",
+	"T76x_L2_EXT_R_W_HAZARD",
+	"T76x_L2_EXT_READ",
+	"T76x_L2_EXT_READ_LINE",
+	"T76x_L2_EXT_WRITE",
+	"T76x_L2_EXT_WRITE_LINE",
+	"T76x_L2_EXT_WRITE_SMALL",
+	"T76x_L2_EXT_BARRIER",
+	"T76x_L2_EXT_AR_STALL",
+	"T76x_L2_EXT_R_BUF_FULL",
+	"T76x_L2_EXT_RD_BUF_FULL",
+	"T76x_L2_EXT_R_RAW",
+	"T76x_L2_EXT_W_STALL",
+	"T76x_L2_EXT_W_BUF_FULL",
+	"T76x_L2_EXT_R_BUF_FULL",
+	"T76x_L2_TAG_HAZARD",
+	"T76x_L2_SNOOP_FULL",
+	"T76x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t82x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T82x_MESSAGES_SENT",
+	"T82x_MESSAGES_RECEIVED",
+	"T82x_GPU_ACTIVE",
+	"T82x_IRQ_ACTIVE",
+	"T82x_JS0_JOBS",
+	"T82x_JS0_TASKS",
+	"T82x_JS0_ACTIVE",
+	"",
+	"T82x_JS0_WAIT_READ",
+	"T82x_JS0_WAIT_ISSUE",
+	"T82x_JS0_WAIT_DEPEND",
+	"T82x_JS0_WAIT_FINISH",
+	"T82x_JS1_JOBS",
+	"T82x_JS1_TASKS",
+	"T82x_JS1_ACTIVE",
+	"",
+	"T82x_JS1_WAIT_READ",
+	"T82x_JS1_WAIT_ISSUE",
+	"T82x_JS1_WAIT_DEPEND",
+	"T82x_JS1_WAIT_FINISH",
+	"T82x_JS2_JOBS",
+	"T82x_JS2_TASKS",
+	"T82x_JS2_ACTIVE",
+	"",
+	"T82x_JS2_WAIT_READ",
+	"T82x_JS2_WAIT_ISSUE",
+	"T82x_JS2_WAIT_DEPEND",
+	"T82x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T82x_TI_JOBS_PROCESSED",
+	"T82x_TI_TRIANGLES",
+	"T82x_TI_QUADS",
+	"T82x_TI_POLYGONS",
+	"T82x_TI_POINTS",
+	"T82x_TI_LINES",
+	"T82x_TI_FRONT_FACING",
+	"T82x_TI_BACK_FACING",
+	"T82x_TI_PRIM_VISIBLE",
+	"T82x_TI_PRIM_CULLED",
+	"T82x_TI_PRIM_CLIPPED",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T82x_TI_ACTIVE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T82x_FRAG_ACTIVE",
+	"T82x_FRAG_PRIMITIVES",
+	"T82x_FRAG_PRIMITIVES_DROPPED",
+	"T82x_FRAG_CYCLES_DESC",
+	"T82x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T82x_FRAG_CYCLES_VERT",
+	"T82x_FRAG_CYCLES_TRISETUP",
+	"T82x_FRAG_CYCLES_EZS_ACTIVE",
+	"T82x_FRAG_THREADS",
+	"T82x_FRAG_DUMMY_THREADS",
+	"T82x_FRAG_QUADS_RAST",
+	"T82x_FRAG_QUADS_EZS_TEST",
+	"T82x_FRAG_QUADS_EZS_KILLED",
+	"T82x_FRAG_THREADS_LZS_TEST",
+	"T82x_FRAG_THREADS_LZS_KILLED",
+	"T82x_FRAG_CYCLES_NO_TILE",
+	"T82x_FRAG_NUM_TILES",
+	"T82x_FRAG_TRANS_ELIM",
+	"T82x_COMPUTE_ACTIVE",
+	"T82x_COMPUTE_TASKS",
+	"T82x_COMPUTE_THREADS",
+	"T82x_COMPUTE_CYCLES_DESC",
+	"T82x_TRIPIPE_ACTIVE",
+	"T82x_ARITH_WORDS",
+	"T82x_ARITH_CYCLES_REG",
+	"T82x_ARITH_CYCLES_L0",
+	"T82x_ARITH_FRAG_DEPEND",
+	"T82x_LS_WORDS",
+	"T82x_LS_ISSUES",
+	"T82x_LS_REISSUE_ATTR",
+	"T82x_LS_REISSUES_VARY",
+	"T82x_LS_VARY_RV_MISS",
+	"T82x_LS_VARY_RV_HIT",
+	"T82x_LS_NO_UNPARK",
+	"T82x_TEX_WORDS",
+	"T82x_TEX_BUBBLES",
+	"T82x_TEX_WORDS_L0",
+	"T82x_TEX_WORDS_DESC",
+	"T82x_TEX_ISSUES",
+	"T82x_TEX_RECIRC_FMISS",
+	"T82x_TEX_RECIRC_DESC",
+	"T82x_TEX_RECIRC_MULTI",
+	"T82x_TEX_RECIRC_PMISS",
+	"T82x_TEX_RECIRC_CONF",
+	"T82x_LSC_READ_HITS",
+	"T82x_LSC_READ_OP",
+	"T82x_LSC_WRITE_HITS",
+	"T82x_LSC_WRITE_OP",
+	"T82x_LSC_ATOMIC_HITS",
+	"T82x_LSC_ATOMIC_OP",
+	"T82x_LSC_LINE_FETCHES",
+	"T82x_LSC_DIRTY_LINE",
+	"T82x_LSC_SNOOPS",
+	"T82x_AXI_TLB_STALL",
+	"T82x_AXI_TLB_MISS",
+	"T82x_AXI_TLB_TRANSACTION",
+	"T82x_LS_TLB_MISS",
+	"T82x_LS_TLB_HIT",
+	"T82x_AXI_BEATS_READ",
+	"T82x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T82x_MMU_HIT",
+	"T82x_MMU_NEW_MISS",
+	"T82x_MMU_REPLAY_FULL",
+	"T82x_MMU_REPLAY_MISS",
+	"T82x_MMU_TABLE_WALK",
+	"T82x_MMU_REQUESTS",
+	"",
+	"",
+	"T82x_UTLB_HIT",
+	"T82x_UTLB_NEW_MISS",
+	"T82x_UTLB_REPLAY_FULL",
+	"T82x_UTLB_REPLAY_MISS",
+	"T82x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T82x_L2_EXT_WRITE_BEATS",
+	"T82x_L2_EXT_READ_BEATS",
+	"T82x_L2_ANY_LOOKUP",
+	"T82x_L2_READ_LOOKUP",
+	"T82x_L2_SREAD_LOOKUP",
+	"T82x_L2_READ_REPLAY",
+	"T82x_L2_READ_SNOOP",
+	"T82x_L2_READ_HIT",
+	"T82x_L2_CLEAN_MISS",
+	"T82x_L2_WRITE_LOOKUP",
+	"T82x_L2_SWRITE_LOOKUP",
+	"T82x_L2_WRITE_REPLAY",
+	"T82x_L2_WRITE_SNOOP",
+	"T82x_L2_WRITE_HIT",
+	"T82x_L2_EXT_READ_FULL",
+	"",
+	"T82x_L2_EXT_WRITE_FULL",
+	"T82x_L2_EXT_R_W_HAZARD",
+	"T82x_L2_EXT_READ",
+	"T82x_L2_EXT_READ_LINE",
+	"T82x_L2_EXT_WRITE",
+	"T82x_L2_EXT_WRITE_LINE",
+	"T82x_L2_EXT_WRITE_SMALL",
+	"T82x_L2_EXT_BARRIER",
+	"T82x_L2_EXT_AR_STALL",
+	"T82x_L2_EXT_R_BUF_FULL",
+	"T82x_L2_EXT_RD_BUF_FULL",
+	"T82x_L2_EXT_R_RAW",
+	"T82x_L2_EXT_W_STALL",
+	"T82x_L2_EXT_W_BUF_FULL",
+	"T82x_L2_EXT_R_BUF_FULL",
+	"T82x_L2_TAG_HAZARD",
+	"T82x_L2_SNOOP_FULL",
+	"T82x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t83x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T83x_MESSAGES_SENT",
+	"T83x_MESSAGES_RECEIVED",
+	"T83x_GPU_ACTIVE",
+	"T83x_IRQ_ACTIVE",
+	"T83x_JS0_JOBS",
+	"T83x_JS0_TASKS",
+	"T83x_JS0_ACTIVE",
+	"",
+	"T83x_JS0_WAIT_READ",
+	"T83x_JS0_WAIT_ISSUE",
+	"T83x_JS0_WAIT_DEPEND",
+	"T83x_JS0_WAIT_FINISH",
+	"T83x_JS1_JOBS",
+	"T83x_JS1_TASKS",
+	"T83x_JS1_ACTIVE",
+	"",
+	"T83x_JS1_WAIT_READ",
+	"T83x_JS1_WAIT_ISSUE",
+	"T83x_JS1_WAIT_DEPEND",
+	"T83x_JS1_WAIT_FINISH",
+	"T83x_JS2_JOBS",
+	"T83x_JS2_TASKS",
+	"T83x_JS2_ACTIVE",
+	"",
+	"T83x_JS2_WAIT_READ",
+	"T83x_JS2_WAIT_ISSUE",
+	"T83x_JS2_WAIT_DEPEND",
+	"T83x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T83x_TI_JOBS_PROCESSED",
+	"T83x_TI_TRIANGLES",
+	"T83x_TI_QUADS",
+	"T83x_TI_POLYGONS",
+	"T83x_TI_POINTS",
+	"T83x_TI_LINES",
+	"T83x_TI_FRONT_FACING",
+	"T83x_TI_BACK_FACING",
+	"T83x_TI_PRIM_VISIBLE",
+	"T83x_TI_PRIM_CULLED",
+	"T83x_TI_PRIM_CLIPPED",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T83x_TI_ACTIVE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T83x_FRAG_ACTIVE",
+	"T83x_FRAG_PRIMITIVES",
+	"T83x_FRAG_PRIMITIVES_DROPPED",
+	"T83x_FRAG_CYCLES_DESC",
+	"T83x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T83x_FRAG_CYCLES_VERT",
+	"T83x_FRAG_CYCLES_TRISETUP",
+	"T83x_FRAG_CYCLES_EZS_ACTIVE",
+	"T83x_FRAG_THREADS",
+	"T83x_FRAG_DUMMY_THREADS",
+	"T83x_FRAG_QUADS_RAST",
+	"T83x_FRAG_QUADS_EZS_TEST",
+	"T83x_FRAG_QUADS_EZS_KILLED",
+	"T83x_FRAG_THREADS_LZS_TEST",
+	"T83x_FRAG_THREADS_LZS_KILLED",
+	"T83x_FRAG_CYCLES_NO_TILE",
+	"T83x_FRAG_NUM_TILES",
+	"T83x_FRAG_TRANS_ELIM",
+	"T83x_COMPUTE_ACTIVE",
+	"T83x_COMPUTE_TASKS",
+	"T83x_COMPUTE_THREADS",
+	"T83x_COMPUTE_CYCLES_DESC",
+	"T83x_TRIPIPE_ACTIVE",
+	"T83x_ARITH_WORDS",
+	"T83x_ARITH_CYCLES_REG",
+	"T83x_ARITH_CYCLES_L0",
+	"T83x_ARITH_FRAG_DEPEND",
+	"T83x_LS_WORDS",
+	"T83x_LS_ISSUES",
+	"T83x_LS_REISSUE_ATTR",
+	"T83x_LS_REISSUES_VARY",
+	"T83x_LS_VARY_RV_MISS",
+	"T83x_LS_VARY_RV_HIT",
+	"T83x_LS_NO_UNPARK",
+	"T83x_TEX_WORDS",
+	"T83x_TEX_BUBBLES",
+	"T83x_TEX_WORDS_L0",
+	"T83x_TEX_WORDS_DESC",
+	"T83x_TEX_ISSUES",
+	"T83x_TEX_RECIRC_FMISS",
+	"T83x_TEX_RECIRC_DESC",
+	"T83x_TEX_RECIRC_MULTI",
+	"T83x_TEX_RECIRC_PMISS",
+	"T83x_TEX_RECIRC_CONF",
+	"T83x_LSC_READ_HITS",
+	"T83x_LSC_READ_OP",
+	"T83x_LSC_WRITE_HITS",
+	"T83x_LSC_WRITE_OP",
+	"T83x_LSC_ATOMIC_HITS",
+	"T83x_LSC_ATOMIC_OP",
+	"T83x_LSC_LINE_FETCHES",
+	"T83x_LSC_DIRTY_LINE",
+	"T83x_LSC_SNOOPS",
+	"T83x_AXI_TLB_STALL",
+	"T83x_AXI_TLB_MISS",
+	"T83x_AXI_TLB_TRANSACTION",
+	"T83x_LS_TLB_MISS",
+	"T83x_LS_TLB_HIT",
+	"T83x_AXI_BEATS_READ",
+	"T83x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T83x_MMU_HIT",
+	"T83x_MMU_NEW_MISS",
+	"T83x_MMU_REPLAY_FULL",
+	"T83x_MMU_REPLAY_MISS",
+	"T83x_MMU_TABLE_WALK",
+	"T83x_MMU_REQUESTS",
+	"",
+	"",
+	"T83x_UTLB_HIT",
+	"T83x_UTLB_NEW_MISS",
+	"T83x_UTLB_REPLAY_FULL",
+	"T83x_UTLB_REPLAY_MISS",
+	"T83x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T83x_L2_EXT_WRITE_BEATS",
+	"T83x_L2_EXT_READ_BEATS",
+	"T83x_L2_ANY_LOOKUP",
+	"T83x_L2_READ_LOOKUP",
+	"T83x_L2_SREAD_LOOKUP",
+	"T83x_L2_READ_REPLAY",
+	"T83x_L2_READ_SNOOP",
+	"T83x_L2_READ_HIT",
+	"T83x_L2_CLEAN_MISS",
+	"T83x_L2_WRITE_LOOKUP",
+	"T83x_L2_SWRITE_LOOKUP",
+	"T83x_L2_WRITE_REPLAY",
+	"T83x_L2_WRITE_SNOOP",
+	"T83x_L2_WRITE_HIT",
+	"T83x_L2_EXT_READ_FULL",
+	"",
+	"T83x_L2_EXT_WRITE_FULL",
+	"T83x_L2_EXT_R_W_HAZARD",
+	"T83x_L2_EXT_READ",
+	"T83x_L2_EXT_READ_LINE",
+	"T83x_L2_EXT_WRITE",
+	"T83x_L2_EXT_WRITE_LINE",
+	"T83x_L2_EXT_WRITE_SMALL",
+	"T83x_L2_EXT_BARRIER",
+	"T83x_L2_EXT_AR_STALL",
+	"T83x_L2_EXT_R_BUF_FULL",
+	"T83x_L2_EXT_RD_BUF_FULL",
+	"T83x_L2_EXT_R_RAW",
+	"T83x_L2_EXT_W_STALL",
+	"T83x_L2_EXT_W_BUF_FULL",
+	"T83x_L2_EXT_R_BUF_FULL",
+	"T83x_L2_TAG_HAZARD",
+	"T83x_L2_SNOOP_FULL",
+	"T83x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t86x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T86x_MESSAGES_SENT",
+	"T86x_MESSAGES_RECEIVED",
+	"T86x_GPU_ACTIVE",
+	"T86x_IRQ_ACTIVE",
+	"T86x_JS0_JOBS",
+	"T86x_JS0_TASKS",
+	"T86x_JS0_ACTIVE",
+	"",
+	"T86x_JS0_WAIT_READ",
+	"T86x_JS0_WAIT_ISSUE",
+	"T86x_JS0_WAIT_DEPEND",
+	"T86x_JS0_WAIT_FINISH",
+	"T86x_JS1_JOBS",
+	"T86x_JS1_TASKS",
+	"T86x_JS1_ACTIVE",
+	"",
+	"T86x_JS1_WAIT_READ",
+	"T86x_JS1_WAIT_ISSUE",
+	"T86x_JS1_WAIT_DEPEND",
+	"T86x_JS1_WAIT_FINISH",
+	"T86x_JS2_JOBS",
+	"T86x_JS2_TASKS",
+	"T86x_JS2_ACTIVE",
+	"",
+	"T86x_JS2_WAIT_READ",
+	"T86x_JS2_WAIT_ISSUE",
+	"T86x_JS2_WAIT_DEPEND",
+	"T86x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T86x_TI_JOBS_PROCESSED",
+	"T86x_TI_TRIANGLES",
+	"T86x_TI_QUADS",
+	"T86x_TI_POLYGONS",
+	"T86x_TI_POINTS",
+	"T86x_TI_LINES",
+	"T86x_TI_VCACHE_HIT",
+	"T86x_TI_VCACHE_MISS",
+	"T86x_TI_FRONT_FACING",
+	"T86x_TI_BACK_FACING",
+	"T86x_TI_PRIM_VISIBLE",
+	"T86x_TI_PRIM_CULLED",
+	"T86x_TI_PRIM_CLIPPED",
+	"T86x_TI_LEVEL0",
+	"T86x_TI_LEVEL1",
+	"T86x_TI_LEVEL2",
+	"T86x_TI_LEVEL3",
+	"T86x_TI_LEVEL4",
+	"T86x_TI_LEVEL5",
+	"T86x_TI_LEVEL6",
+	"T86x_TI_LEVEL7",
+	"T86x_TI_COMMAND_1",
+	"T86x_TI_COMMAND_2",
+	"T86x_TI_COMMAND_3",
+	"T86x_TI_COMMAND_4",
+	"T86x_TI_COMMAND_5_7",
+	"T86x_TI_COMMAND_8_15",
+	"T86x_TI_COMMAND_16_63",
+	"T86x_TI_COMMAND_64",
+	"T86x_TI_COMPRESS_IN",
+	"T86x_TI_COMPRESS_OUT",
+	"T86x_TI_COMPRESS_FLUSH",
+	"T86x_TI_TIMESTAMPS",
+	"T86x_TI_PCACHE_HIT",
+	"T86x_TI_PCACHE_MISS",
+	"T86x_TI_PCACHE_LINE",
+	"T86x_TI_PCACHE_STALL",
+	"T86x_TI_WRBUF_HIT",
+	"T86x_TI_WRBUF_MISS",
+	"T86x_TI_WRBUF_LINE",
+	"T86x_TI_WRBUF_PARTIAL",
+	"T86x_TI_WRBUF_STALL",
+	"T86x_TI_ACTIVE",
+	"T86x_TI_LOADING_DESC",
+	"T86x_TI_INDEX_WAIT",
+	"T86x_TI_INDEX_RANGE_WAIT",
+	"T86x_TI_VERTEX_WAIT",
+	"T86x_TI_PCACHE_WAIT",
+	"T86x_TI_WRBUF_WAIT",
+	"T86x_TI_BUS_READ",
+	"T86x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T86x_TI_UTLB_HIT",
+	"T86x_TI_UTLB_NEW_MISS",
+	"T86x_TI_UTLB_REPLAY_FULL",
+	"T86x_TI_UTLB_REPLAY_MISS",
+	"T86x_TI_UTLB_STALL",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T86x_FRAG_ACTIVE",
+	"T86x_FRAG_PRIMITIVES",
+	"T86x_FRAG_PRIMITIVES_DROPPED",
+	"T86x_FRAG_CYCLES_DESC",
+	"T86x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T86x_FRAG_CYCLES_VERT",
+	"T86x_FRAG_CYCLES_TRISETUP",
+	"T86x_FRAG_CYCLES_EZS_ACTIVE",
+	"T86x_FRAG_THREADS",
+	"T86x_FRAG_DUMMY_THREADS",
+	"T86x_FRAG_QUADS_RAST",
+	"T86x_FRAG_QUADS_EZS_TEST",
+	"T86x_FRAG_QUADS_EZS_KILLED",
+	"T86x_FRAG_THREADS_LZS_TEST",
+	"T86x_FRAG_THREADS_LZS_KILLED",
+	"T86x_FRAG_CYCLES_NO_TILE",
+	"T86x_FRAG_NUM_TILES",
+	"T86x_FRAG_TRANS_ELIM",
+	"T86x_COMPUTE_ACTIVE",
+	"T86x_COMPUTE_TASKS",
+	"T86x_COMPUTE_THREADS",
+	"T86x_COMPUTE_CYCLES_DESC",
+	"T86x_TRIPIPE_ACTIVE",
+	"T86x_ARITH_WORDS",
+	"T86x_ARITH_CYCLES_REG",
+	"T86x_ARITH_CYCLES_L0",
+	"T86x_ARITH_FRAG_DEPEND",
+	"T86x_LS_WORDS",
+	"T86x_LS_ISSUES",
+	"T86x_LS_REISSUE_ATTR",
+	"T86x_LS_REISSUES_VARY",
+	"T86x_LS_VARY_RV_MISS",
+	"T86x_LS_VARY_RV_HIT",
+	"T86x_LS_NO_UNPARK",
+	"T86x_TEX_WORDS",
+	"T86x_TEX_BUBBLES",
+	"T86x_TEX_WORDS_L0",
+	"T86x_TEX_WORDS_DESC",
+	"T86x_TEX_ISSUES",
+	"T86x_TEX_RECIRC_FMISS",
+	"T86x_TEX_RECIRC_DESC",
+	"T86x_TEX_RECIRC_MULTI",
+	"T86x_TEX_RECIRC_PMISS",
+	"T86x_TEX_RECIRC_CONF",
+	"T86x_LSC_READ_HITS",
+	"T86x_LSC_READ_OP",
+	"T86x_LSC_WRITE_HITS",
+	"T86x_LSC_WRITE_OP",
+	"T86x_LSC_ATOMIC_HITS",
+	"T86x_LSC_ATOMIC_OP",
+	"T86x_LSC_LINE_FETCHES",
+	"T86x_LSC_DIRTY_LINE",
+	"T86x_LSC_SNOOPS",
+	"T86x_AXI_TLB_STALL",
+	"T86x_AXI_TLB_MISS",
+	"T86x_AXI_TLB_TRANSACTION",
+	"T86x_LS_TLB_MISS",
+	"T86x_LS_TLB_HIT",
+	"T86x_AXI_BEATS_READ",
+	"T86x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T86x_MMU_HIT",
+	"T86x_MMU_NEW_MISS",
+	"T86x_MMU_REPLAY_FULL",
+	"T86x_MMU_REPLAY_MISS",
+	"T86x_MMU_TABLE_WALK",
+	"T86x_MMU_REQUESTS",
+	"",
+	"",
+	"T86x_UTLB_HIT",
+	"T86x_UTLB_NEW_MISS",
+	"T86x_UTLB_REPLAY_FULL",
+	"T86x_UTLB_REPLAY_MISS",
+	"T86x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T86x_L2_EXT_WRITE_BEATS",
+	"T86x_L2_EXT_READ_BEATS",
+	"T86x_L2_ANY_LOOKUP",
+	"T86x_L2_READ_LOOKUP",
+	"T86x_L2_SREAD_LOOKUP",
+	"T86x_L2_READ_REPLAY",
+	"T86x_L2_READ_SNOOP",
+	"T86x_L2_READ_HIT",
+	"T86x_L2_CLEAN_MISS",
+	"T86x_L2_WRITE_LOOKUP",
+	"T86x_L2_SWRITE_LOOKUP",
+	"T86x_L2_WRITE_REPLAY",
+	"T86x_L2_WRITE_SNOOP",
+	"T86x_L2_WRITE_HIT",
+	"T86x_L2_EXT_READ_FULL",
+	"",
+	"T86x_L2_EXT_WRITE_FULL",
+	"T86x_L2_EXT_R_W_HAZARD",
+	"T86x_L2_EXT_READ",
+	"T86x_L2_EXT_READ_LINE",
+	"T86x_L2_EXT_WRITE",
+	"T86x_L2_EXT_WRITE_LINE",
+	"T86x_L2_EXT_WRITE_SMALL",
+	"T86x_L2_EXT_BARRIER",
+	"T86x_L2_EXT_AR_STALL",
+	"T86x_L2_EXT_R_BUF_FULL",
+	"T86x_L2_EXT_RD_BUF_FULL",
+	"T86x_L2_EXT_R_RAW",
+	"T86x_L2_EXT_W_STALL",
+	"T86x_L2_EXT_W_BUF_FULL",
+	"T86x_L2_EXT_R_BUF_FULL",
+	"T86x_L2_TAG_HAZARD",
+	"T86x_L2_SNOOP_FULL",
+	"T86x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counters_mali_t88x[] = {
+	/* Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"T88x_MESSAGES_SENT",
+	"T88x_MESSAGES_RECEIVED",
+	"T88x_GPU_ACTIVE",
+	"T88x_IRQ_ACTIVE",
+	"T88x_JS0_JOBS",
+	"T88x_JS0_TASKS",
+	"T88x_JS0_ACTIVE",
+	"",
+	"T88x_JS0_WAIT_READ",
+	"T88x_JS0_WAIT_ISSUE",
+	"T88x_JS0_WAIT_DEPEND",
+	"T88x_JS0_WAIT_FINISH",
+	"T88x_JS1_JOBS",
+	"T88x_JS1_TASKS",
+	"T88x_JS1_ACTIVE",
+	"",
+	"T88x_JS1_WAIT_READ",
+	"T88x_JS1_WAIT_ISSUE",
+	"T88x_JS1_WAIT_DEPEND",
+	"T88x_JS1_WAIT_FINISH",
+	"T88x_JS2_JOBS",
+	"T88x_JS2_TASKS",
+	"T88x_JS2_ACTIVE",
+	"",
+	"T88x_JS2_WAIT_READ",
+	"T88x_JS2_WAIT_ISSUE",
+	"T88x_JS2_WAIT_DEPEND",
+	"T88x_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/*Tiler */
+	"",
+	"",
+	"",
+	"T88x_TI_JOBS_PROCESSED",
+	"T88x_TI_TRIANGLES",
+	"T88x_TI_QUADS",
+	"T88x_TI_POLYGONS",
+	"T88x_TI_POINTS",
+	"T88x_TI_LINES",
+	"T88x_TI_VCACHE_HIT",
+	"T88x_TI_VCACHE_MISS",
+	"T88x_TI_FRONT_FACING",
+	"T88x_TI_BACK_FACING",
+	"T88x_TI_PRIM_VISIBLE",
+	"T88x_TI_PRIM_CULLED",
+	"T88x_TI_PRIM_CLIPPED",
+	"T88x_TI_LEVEL0",
+	"T88x_TI_LEVEL1",
+	"T88x_TI_LEVEL2",
+	"T88x_TI_LEVEL3",
+	"T88x_TI_LEVEL4",
+	"T88x_TI_LEVEL5",
+	"T88x_TI_LEVEL6",
+	"T88x_TI_LEVEL7",
+	"T88x_TI_COMMAND_1",
+	"T88x_TI_COMMAND_2",
+	"T88x_TI_COMMAND_3",
+	"T88x_TI_COMMAND_4",
+	"T88x_TI_COMMAND_5_7",
+	"T88x_TI_COMMAND_8_15",
+	"T88x_TI_COMMAND_16_63",
+	"T88x_TI_COMMAND_64",
+	"T88x_TI_COMPRESS_IN",
+	"T88x_TI_COMPRESS_OUT",
+	"T88x_TI_COMPRESS_FLUSH",
+	"T88x_TI_TIMESTAMPS",
+	"T88x_TI_PCACHE_HIT",
+	"T88x_TI_PCACHE_MISS",
+	"T88x_TI_PCACHE_LINE",
+	"T88x_TI_PCACHE_STALL",
+	"T88x_TI_WRBUF_HIT",
+	"T88x_TI_WRBUF_MISS",
+	"T88x_TI_WRBUF_LINE",
+	"T88x_TI_WRBUF_PARTIAL",
+	"T88x_TI_WRBUF_STALL",
+	"T88x_TI_ACTIVE",
+	"T88x_TI_LOADING_DESC",
+	"T88x_TI_INDEX_WAIT",
+	"T88x_TI_INDEX_RANGE_WAIT",
+	"T88x_TI_VERTEX_WAIT",
+	"T88x_TI_PCACHE_WAIT",
+	"T88x_TI_WRBUF_WAIT",
+	"T88x_TI_BUS_READ",
+	"T88x_TI_BUS_WRITE",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T88x_TI_UTLB_HIT",
+	"T88x_TI_UTLB_NEW_MISS",
+	"T88x_TI_UTLB_REPLAY_FULL",
+	"T88x_TI_UTLB_REPLAY_MISS",
+	"T88x_TI_UTLB_STALL",
+
+	/* Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"T88x_FRAG_ACTIVE",
+	"T88x_FRAG_PRIMITIVES",
+	"T88x_FRAG_PRIMITIVES_DROPPED",
+	"T88x_FRAG_CYCLES_DESC",
+	"T88x_FRAG_CYCLES_FPKQ_ACTIVE",
+	"T88x_FRAG_CYCLES_VERT",
+	"T88x_FRAG_CYCLES_TRISETUP",
+	"T88x_FRAG_CYCLES_EZS_ACTIVE",
+	"T88x_FRAG_THREADS",
+	"T88x_FRAG_DUMMY_THREADS",
+	"T88x_FRAG_QUADS_RAST",
+	"T88x_FRAG_QUADS_EZS_TEST",
+	"T88x_FRAG_QUADS_EZS_KILLED",
+	"T88x_FRAG_THREADS_LZS_TEST",
+	"T88x_FRAG_THREADS_LZS_KILLED",
+	"T88x_FRAG_CYCLES_NO_TILE",
+	"T88x_FRAG_NUM_TILES",
+	"T88x_FRAG_TRANS_ELIM",
+	"T88x_COMPUTE_ACTIVE",
+	"T88x_COMPUTE_TASKS",
+	"T88x_COMPUTE_THREADS",
+	"T88x_COMPUTE_CYCLES_DESC",
+	"T88x_TRIPIPE_ACTIVE",
+	"T88x_ARITH_WORDS",
+	"T88x_ARITH_CYCLES_REG",
+	"T88x_ARITH_CYCLES_L0",
+	"T88x_ARITH_FRAG_DEPEND",
+	"T88x_LS_WORDS",
+	"T88x_LS_ISSUES",
+	"T88x_LS_REISSUE_ATTR",
+	"T88x_LS_REISSUES_VARY",
+	"T88x_LS_VARY_RV_MISS",
+	"T88x_LS_VARY_RV_HIT",
+	"T88x_LS_NO_UNPARK",
+	"T88x_TEX_WORDS",
+	"T88x_TEX_BUBBLES",
+	"T88x_TEX_WORDS_L0",
+	"T88x_TEX_WORDS_DESC",
+	"T88x_TEX_ISSUES",
+	"T88x_TEX_RECIRC_FMISS",
+	"T88x_TEX_RECIRC_DESC",
+	"T88x_TEX_RECIRC_MULTI",
+	"T88x_TEX_RECIRC_PMISS",
+	"T88x_TEX_RECIRC_CONF",
+	"T88x_LSC_READ_HITS",
+	"T88x_LSC_READ_OP",
+	"T88x_LSC_WRITE_HITS",
+	"T88x_LSC_WRITE_OP",
+	"T88x_LSC_ATOMIC_HITS",
+	"T88x_LSC_ATOMIC_OP",
+	"T88x_LSC_LINE_FETCHES",
+	"T88x_LSC_DIRTY_LINE",
+	"T88x_LSC_SNOOPS",
+	"T88x_AXI_TLB_STALL",
+	"T88x_AXI_TLB_MISS",
+	"T88x_AXI_TLB_TRANSACTION",
+	"T88x_LS_TLB_MISS",
+	"T88x_LS_TLB_HIT",
+	"T88x_AXI_BEATS_READ",
+	"T88x_AXI_BEATS_WRITTEN",
+
+	/*L2 and MMU */
+	"",
+	"",
+	"",
+	"",
+	"T88x_MMU_HIT",
+	"T88x_MMU_NEW_MISS",
+	"T88x_MMU_REPLAY_FULL",
+	"T88x_MMU_REPLAY_MISS",
+	"T88x_MMU_TABLE_WALK",
+	"T88x_MMU_REQUESTS",
+	"",
+	"",
+	"T88x_UTLB_HIT",
+	"T88x_UTLB_NEW_MISS",
+	"T88x_UTLB_REPLAY_FULL",
+	"T88x_UTLB_REPLAY_MISS",
+	"T88x_UTLB_STALL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"T88x_L2_EXT_WRITE_BEATS",
+	"T88x_L2_EXT_READ_BEATS",
+	"T88x_L2_ANY_LOOKUP",
+	"T88x_L2_READ_LOOKUP",
+	"T88x_L2_SREAD_LOOKUP",
+	"T88x_L2_READ_REPLAY",
+	"T88x_L2_READ_SNOOP",
+	"T88x_L2_READ_HIT",
+	"T88x_L2_CLEAN_MISS",
+	"T88x_L2_WRITE_LOOKUP",
+	"T88x_L2_SWRITE_LOOKUP",
+	"T88x_L2_WRITE_REPLAY",
+	"T88x_L2_WRITE_SNOOP",
+	"T88x_L2_WRITE_HIT",
+	"T88x_L2_EXT_READ_FULL",
+	"",
+	"T88x_L2_EXT_WRITE_FULL",
+	"T88x_L2_EXT_R_W_HAZARD",
+	"T88x_L2_EXT_READ",
+	"T88x_L2_EXT_READ_LINE",
+	"T88x_L2_EXT_WRITE",
+	"T88x_L2_EXT_WRITE_LINE",
+	"T88x_L2_EXT_WRITE_SMALL",
+	"T88x_L2_EXT_BARRIER",
+	"T88x_L2_EXT_AR_STALL",
+	"T88x_L2_EXT_R_BUF_FULL",
+	"T88x_L2_EXT_RD_BUF_FULL",
+	"T88x_L2_EXT_R_RAW",
+	"T88x_L2_EXT_W_STALL",
+	"T88x_L2_EXT_W_BUF_FULL",
+	"T88x_L2_EXT_R_BUF_FULL",
+	"T88x_L2_TAG_HAZARD",
+	"T88x_L2_SNOOP_FULL",
+	"T88x_L2_REPLAY_FULL"
+};
+
+#include "mali_kbase_gator_hwcnt_names_tmix.h"
+
+#include "mali_kbase_gator_hwcnt_names_thex.h"
+
+#include "mali_kbase_gator_hwcnt_names_tsix.h"
+
+#include "mali_kbase_gator_hwcnt_names_tnox.h"
+
+#include "mali_kbase_gator_hwcnt_names_tgox.h"
+
+#include "mali_kbase_gator_hwcnt_names_tkax.h"
+
+#include "mali_kbase_gator_hwcnt_names_ttrx.h"
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h
new file mode 100644
index 0000000..72b5266
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TGOX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TGOX_H_
+
+static const char * const hardware_counters_mali_tGOx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TGOx_MESSAGES_SENT",
+	"TGOx_MESSAGES_RECEIVED",
+	"TGOx_GPU_ACTIVE",
+	"TGOx_IRQ_ACTIVE",
+	"TGOx_JS0_JOBS",
+	"TGOx_JS0_TASKS",
+	"TGOx_JS0_ACTIVE",
+	"",
+	"TGOx_JS0_WAIT_READ",
+	"TGOx_JS0_WAIT_ISSUE",
+	"TGOx_JS0_WAIT_DEPEND",
+	"TGOx_JS0_WAIT_FINISH",
+	"TGOx_JS1_JOBS",
+	"TGOx_JS1_TASKS",
+	"TGOx_JS1_ACTIVE",
+	"",
+	"TGOx_JS1_WAIT_READ",
+	"TGOx_JS1_WAIT_ISSUE",
+	"TGOx_JS1_WAIT_DEPEND",
+	"TGOx_JS1_WAIT_FINISH",
+	"TGOx_JS2_JOBS",
+	"TGOx_JS2_TASKS",
+	"TGOx_JS2_ACTIVE",
+	"",
+	"TGOx_JS2_WAIT_READ",
+	"TGOx_JS2_WAIT_ISSUE",
+	"TGOx_JS2_WAIT_DEPEND",
+	"TGOx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TGOx_TILER_ACTIVE",
+	"TGOx_JOBS_PROCESSED",
+	"TGOx_TRIANGLES",
+	"TGOx_LINES",
+	"TGOx_POINTS",
+	"TGOx_FRONT_FACING",
+	"TGOx_BACK_FACING",
+	"TGOx_PRIM_VISIBLE",
+	"TGOx_PRIM_CULLED",
+	"TGOx_PRIM_CLIPPED",
+	"TGOx_PRIM_SAT_CULLED",
+	"TGOx_BIN_ALLOC_INIT",
+	"TGOx_BIN_ALLOC_OVERFLOW",
+	"TGOx_BUS_READ",
+	"",
+	"TGOx_BUS_WRITE",
+	"TGOx_LOADING_DESC",
+	"TGOx_IDVS_POS_SHAD_REQ",
+	"TGOx_IDVS_POS_SHAD_WAIT",
+	"TGOx_IDVS_POS_SHAD_STALL",
+	"TGOx_IDVS_POS_FIFO_FULL",
+	"TGOx_PREFETCH_STALL",
+	"TGOx_VCACHE_HIT",
+	"TGOx_VCACHE_MISS",
+	"TGOx_VCACHE_LINE_WAIT",
+	"TGOx_VFETCH_POS_READ_WAIT",
+	"TGOx_VFETCH_VERTEX_WAIT",
+	"TGOx_VFETCH_STALL",
+	"TGOx_PRIMASSY_STALL",
+	"TGOx_BBOX_GEN_STALL",
+	"TGOx_IDVS_VBU_HIT",
+	"TGOx_IDVS_VBU_MISS",
+	"TGOx_IDVS_VBU_LINE_DEALLOCATE",
+	"TGOx_IDVS_VAR_SHAD_REQ",
+	"TGOx_IDVS_VAR_SHAD_STALL",
+	"TGOx_BINNER_STALL",
+	"TGOx_ITER_STALL",
+	"TGOx_COMPRESS_MISS",
+	"TGOx_COMPRESS_STALL",
+	"TGOx_PCACHE_HIT",
+	"TGOx_PCACHE_MISS",
+	"TGOx_PCACHE_MISS_STALL",
+	"TGOx_PCACHE_EVICT_STALL",
+	"TGOx_PMGR_PTR_WR_STALL",
+	"TGOx_PMGR_PTR_RD_STALL",
+	"TGOx_PMGR_CMD_WR_STALL",
+	"TGOx_WRBUF_ACTIVE",
+	"TGOx_WRBUF_HIT",
+	"TGOx_WRBUF_MISS",
+	"TGOx_WRBUF_NO_FREE_LINE_STALL",
+	"TGOx_WRBUF_NO_AXI_ID_STALL",
+	"TGOx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TGOx_UTLB_TRANS",
+	"TGOx_UTLB_TRANS_HIT",
+	"TGOx_UTLB_TRANS_STALL",
+	"TGOx_UTLB_TRANS_MISS_DELAY",
+	"TGOx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TGOx_FRAG_ACTIVE",
+	"TGOx_FRAG_PRIMITIVES",
+	"TGOx_FRAG_PRIM_RAST",
+	"TGOx_FRAG_FPK_ACTIVE",
+	"TGOx_FRAG_STARVING",
+	"TGOx_FRAG_WARPS",
+	"TGOx_FRAG_PARTIAL_WARPS",
+	"TGOx_FRAG_QUADS_RAST",
+	"TGOx_FRAG_QUADS_EZS_TEST",
+	"TGOx_FRAG_QUADS_EZS_UPDATE",
+	"TGOx_FRAG_QUADS_EZS_KILL",
+	"TGOx_FRAG_LZS_TEST",
+	"TGOx_FRAG_LZS_KILL",
+	"TGOx_WARP_REG_SIZE_64",
+	"TGOx_FRAG_PTILES",
+	"TGOx_FRAG_TRANS_ELIM",
+	"TGOx_QUAD_FPK_KILLER",
+	"TGOx_FULL_QUAD_WARPS",
+	"TGOx_COMPUTE_ACTIVE",
+	"TGOx_COMPUTE_TASKS",
+	"TGOx_COMPUTE_WARPS",
+	"TGOx_COMPUTE_STARVING",
+	"TGOx_EXEC_CORE_ACTIVE",
+	"TGOx_EXEC_ACTIVE",
+	"TGOx_EXEC_INSTR_COUNT",
+	"TGOx_EXEC_INSTR_DIVERGED",
+	"TGOx_EXEC_INSTR_STARVING",
+	"TGOx_ARITH_INSTR_SINGLE_FMA",
+	"TGOx_ARITH_INSTR_DOUBLE",
+	"TGOx_ARITH_INSTR_MSG",
+	"TGOx_ARITH_INSTR_MSG_ONLY",
+	"TGOx_TEX_MSGI_NUM_QUADS",
+	"TGOx_TEX_DFCH_NUM_PASSES",
+	"TGOx_TEX_DFCH_NUM_PASSES_MISS",
+	"TGOx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TGOx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TGOx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TGOx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TGOx_TEX_TFCH_NUM_OPERATIONS",
+	"TGOx_TEX_FILT_NUM_OPERATIONS",
+	"TGOx_LS_MEM_READ_FULL",
+	"TGOx_LS_MEM_READ_SHORT",
+	"TGOx_LS_MEM_WRITE_FULL",
+	"TGOx_LS_MEM_WRITE_SHORT",
+	"TGOx_LS_MEM_ATOMIC",
+	"TGOx_VARY_INSTR",
+	"TGOx_VARY_SLOT_32",
+	"TGOx_VARY_SLOT_16",
+	"TGOx_ATTR_INSTR",
+	"TGOx_ARITH_INSTR_FP_MUL",
+	"TGOx_BEATS_RD_FTC",
+	"TGOx_BEATS_RD_FTC_EXT",
+	"TGOx_BEATS_RD_LSC",
+	"TGOx_BEATS_RD_LSC_EXT",
+	"TGOx_BEATS_RD_TEX",
+	"TGOx_BEATS_RD_TEX_EXT",
+	"TGOx_BEATS_RD_OTHER",
+	"TGOx_BEATS_WR_LSC_WB",
+	"TGOx_BEATS_WR_TIB",
+	"TGOx_BEATS_WR_LSC_OTHER",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TGOx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TGOx_L2_RD_MSG_IN",
+	"TGOx_L2_RD_MSG_IN_STALL",
+	"TGOx_L2_WR_MSG_IN",
+	"TGOx_L2_WR_MSG_IN_STALL",
+	"TGOx_L2_SNP_MSG_IN",
+	"TGOx_L2_SNP_MSG_IN_STALL",
+	"TGOx_L2_RD_MSG_OUT",
+	"TGOx_L2_RD_MSG_OUT_STALL",
+	"TGOx_L2_WR_MSG_OUT",
+	"TGOx_L2_ANY_LOOKUP",
+	"TGOx_L2_READ_LOOKUP",
+	"TGOx_L2_WRITE_LOOKUP",
+	"TGOx_L2_EXT_SNOOP_LOOKUP",
+	"TGOx_L2_EXT_READ",
+	"TGOx_L2_EXT_READ_NOSNP",
+	"TGOx_L2_EXT_READ_UNIQUE",
+	"TGOx_L2_EXT_READ_BEATS",
+	"TGOx_L2_EXT_AR_STALL",
+	"TGOx_L2_EXT_AR_CNT_Q1",
+	"TGOx_L2_EXT_AR_CNT_Q2",
+	"TGOx_L2_EXT_AR_CNT_Q3",
+	"TGOx_L2_EXT_RRESP_0_127",
+	"TGOx_L2_EXT_RRESP_128_191",
+	"TGOx_L2_EXT_RRESP_192_255",
+	"TGOx_L2_EXT_RRESP_256_319",
+	"TGOx_L2_EXT_RRESP_320_383",
+	"TGOx_L2_EXT_WRITE",
+	"TGOx_L2_EXT_WRITE_NOSNP_FULL",
+	"TGOx_L2_EXT_WRITE_NOSNP_PTL",
+	"TGOx_L2_EXT_WRITE_SNP_FULL",
+	"TGOx_L2_EXT_WRITE_SNP_PTL",
+	"TGOx_L2_EXT_WRITE_BEATS",
+	"TGOx_L2_EXT_W_STALL",
+	"TGOx_L2_EXT_AW_CNT_Q1",
+	"TGOx_L2_EXT_AW_CNT_Q2",
+	"TGOx_L2_EXT_AW_CNT_Q3",
+	"TGOx_L2_EXT_SNOOP",
+	"TGOx_L2_EXT_SNOOP_STALL",
+	"TGOx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TGOx_L2_EXT_SNOOP_RESP_DATA",
+	"TGOx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TGOX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h
new file mode 100644
index 0000000..e24e91a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_THEX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_THEX_H_
+
+static const char * const hardware_counters_mali_tHEx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"THEx_MESSAGES_SENT",
+	"THEx_MESSAGES_RECEIVED",
+	"THEx_GPU_ACTIVE",
+	"THEx_IRQ_ACTIVE",
+	"THEx_JS0_JOBS",
+	"THEx_JS0_TASKS",
+	"THEx_JS0_ACTIVE",
+	"",
+	"THEx_JS0_WAIT_READ",
+	"THEx_JS0_WAIT_ISSUE",
+	"THEx_JS0_WAIT_DEPEND",
+	"THEx_JS0_WAIT_FINISH",
+	"THEx_JS1_JOBS",
+	"THEx_JS1_TASKS",
+	"THEx_JS1_ACTIVE",
+	"",
+	"THEx_JS1_WAIT_READ",
+	"THEx_JS1_WAIT_ISSUE",
+	"THEx_JS1_WAIT_DEPEND",
+	"THEx_JS1_WAIT_FINISH",
+	"THEx_JS2_JOBS",
+	"THEx_JS2_TASKS",
+	"THEx_JS2_ACTIVE",
+	"",
+	"THEx_JS2_WAIT_READ",
+	"THEx_JS2_WAIT_ISSUE",
+	"THEx_JS2_WAIT_DEPEND",
+	"THEx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"THEx_TILER_ACTIVE",
+	"THEx_JOBS_PROCESSED",
+	"THEx_TRIANGLES",
+	"THEx_LINES",
+	"THEx_POINTS",
+	"THEx_FRONT_FACING",
+	"THEx_BACK_FACING",
+	"THEx_PRIM_VISIBLE",
+	"THEx_PRIM_CULLED",
+	"THEx_PRIM_CLIPPED",
+	"THEx_PRIM_SAT_CULLED",
+	"THEx_BIN_ALLOC_INIT",
+	"THEx_BIN_ALLOC_OVERFLOW",
+	"THEx_BUS_READ",
+	"",
+	"THEx_BUS_WRITE",
+	"THEx_LOADING_DESC",
+	"THEx_IDVS_POS_SHAD_REQ",
+	"THEx_IDVS_POS_SHAD_WAIT",
+	"THEx_IDVS_POS_SHAD_STALL",
+	"THEx_IDVS_POS_FIFO_FULL",
+	"THEx_PREFETCH_STALL",
+	"THEx_VCACHE_HIT",
+	"THEx_VCACHE_MISS",
+	"THEx_VCACHE_LINE_WAIT",
+	"THEx_VFETCH_POS_READ_WAIT",
+	"THEx_VFETCH_VERTEX_WAIT",
+	"THEx_VFETCH_STALL",
+	"THEx_PRIMASSY_STALL",
+	"THEx_BBOX_GEN_STALL",
+	"THEx_IDVS_VBU_HIT",
+	"THEx_IDVS_VBU_MISS",
+	"THEx_IDVS_VBU_LINE_DEALLOCATE",
+	"THEx_IDVS_VAR_SHAD_REQ",
+	"THEx_IDVS_VAR_SHAD_STALL",
+	"THEx_BINNER_STALL",
+	"THEx_ITER_STALL",
+	"THEx_COMPRESS_MISS",
+	"THEx_COMPRESS_STALL",
+	"THEx_PCACHE_HIT",
+	"THEx_PCACHE_MISS",
+	"THEx_PCACHE_MISS_STALL",
+	"THEx_PCACHE_EVICT_STALL",
+	"THEx_PMGR_PTR_WR_STALL",
+	"THEx_PMGR_PTR_RD_STALL",
+	"THEx_PMGR_CMD_WR_STALL",
+	"THEx_WRBUF_ACTIVE",
+	"THEx_WRBUF_HIT",
+	"THEx_WRBUF_MISS",
+	"THEx_WRBUF_NO_FREE_LINE_STALL",
+	"THEx_WRBUF_NO_AXI_ID_STALL",
+	"THEx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"THEx_UTLB_TRANS",
+	"THEx_UTLB_TRANS_HIT",
+	"THEx_UTLB_TRANS_STALL",
+	"THEx_UTLB_TRANS_MISS_DELAY",
+	"THEx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"THEx_FRAG_ACTIVE",
+	"THEx_FRAG_PRIMITIVES",
+	"THEx_FRAG_PRIM_RAST",
+	"THEx_FRAG_FPK_ACTIVE",
+	"THEx_FRAG_STARVING",
+	"THEx_FRAG_WARPS",
+	"THEx_FRAG_PARTIAL_WARPS",
+	"THEx_FRAG_QUADS_RAST",
+	"THEx_FRAG_QUADS_EZS_TEST",
+	"THEx_FRAG_QUADS_EZS_UPDATE",
+	"THEx_FRAG_QUADS_EZS_KILL",
+	"THEx_FRAG_LZS_TEST",
+	"THEx_FRAG_LZS_KILL",
+	"",
+	"THEx_FRAG_PTILES",
+	"THEx_FRAG_TRANS_ELIM",
+	"THEx_QUAD_FPK_KILLER",
+	"",
+	"THEx_COMPUTE_ACTIVE",
+	"THEx_COMPUTE_TASKS",
+	"THEx_COMPUTE_WARPS",
+	"THEx_COMPUTE_STARVING",
+	"THEx_EXEC_CORE_ACTIVE",
+	"THEx_EXEC_ACTIVE",
+	"THEx_EXEC_INSTR_COUNT",
+	"THEx_EXEC_INSTR_DIVERGED",
+	"THEx_EXEC_INSTR_STARVING",
+	"THEx_ARITH_INSTR_SINGLE_FMA",
+	"THEx_ARITH_INSTR_DOUBLE",
+	"THEx_ARITH_INSTR_MSG",
+	"THEx_ARITH_INSTR_MSG_ONLY",
+	"THEx_TEX_INSTR",
+	"THEx_TEX_INSTR_MIPMAP",
+	"THEx_TEX_INSTR_COMPRESSED",
+	"THEx_TEX_INSTR_3D",
+	"THEx_TEX_INSTR_TRILINEAR",
+	"THEx_TEX_COORD_ISSUE",
+	"THEx_TEX_COORD_STALL",
+	"THEx_TEX_STARVE_CACHE",
+	"THEx_TEX_STARVE_FILTER",
+	"THEx_LS_MEM_READ_FULL",
+	"THEx_LS_MEM_READ_SHORT",
+	"THEx_LS_MEM_WRITE_FULL",
+	"THEx_LS_MEM_WRITE_SHORT",
+	"THEx_LS_MEM_ATOMIC",
+	"THEx_VARY_INSTR",
+	"THEx_VARY_SLOT_32",
+	"THEx_VARY_SLOT_16",
+	"THEx_ATTR_INSTR",
+	"THEx_ARITH_INSTR_FP_MUL",
+	"THEx_BEATS_RD_FTC",
+	"THEx_BEATS_RD_FTC_EXT",
+	"THEx_BEATS_RD_LSC",
+	"THEx_BEATS_RD_LSC_EXT",
+	"THEx_BEATS_RD_TEX",
+	"THEx_BEATS_RD_TEX_EXT",
+	"THEx_BEATS_RD_OTHER",
+	"THEx_BEATS_WR_LSC",
+	"THEx_BEATS_WR_TIB",
+	"",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"THEx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"THEx_L2_RD_MSG_IN",
+	"THEx_L2_RD_MSG_IN_STALL",
+	"THEx_L2_WR_MSG_IN",
+	"THEx_L2_WR_MSG_IN_STALL",
+	"THEx_L2_SNP_MSG_IN",
+	"THEx_L2_SNP_MSG_IN_STALL",
+	"THEx_L2_RD_MSG_OUT",
+	"THEx_L2_RD_MSG_OUT_STALL",
+	"THEx_L2_WR_MSG_OUT",
+	"THEx_L2_ANY_LOOKUP",
+	"THEx_L2_READ_LOOKUP",
+	"THEx_L2_WRITE_LOOKUP",
+	"THEx_L2_EXT_SNOOP_LOOKUP",
+	"THEx_L2_EXT_READ",
+	"THEx_L2_EXT_READ_NOSNP",
+	"THEx_L2_EXT_READ_UNIQUE",
+	"THEx_L2_EXT_READ_BEATS",
+	"THEx_L2_EXT_AR_STALL",
+	"THEx_L2_EXT_AR_CNT_Q1",
+	"THEx_L2_EXT_AR_CNT_Q2",
+	"THEx_L2_EXT_AR_CNT_Q3",
+	"THEx_L2_EXT_RRESP_0_127",
+	"THEx_L2_EXT_RRESP_128_191",
+	"THEx_L2_EXT_RRESP_192_255",
+	"THEx_L2_EXT_RRESP_256_319",
+	"THEx_L2_EXT_RRESP_320_383",
+	"THEx_L2_EXT_WRITE",
+	"THEx_L2_EXT_WRITE_NOSNP_FULL",
+	"THEx_L2_EXT_WRITE_NOSNP_PTL",
+	"THEx_L2_EXT_WRITE_SNP_FULL",
+	"THEx_L2_EXT_WRITE_SNP_PTL",
+	"THEx_L2_EXT_WRITE_BEATS",
+	"THEx_L2_EXT_W_STALL",
+	"THEx_L2_EXT_AW_CNT_Q1",
+	"THEx_L2_EXT_AW_CNT_Q2",
+	"THEx_L2_EXT_AW_CNT_Q3",
+	"THEx_L2_EXT_SNOOP",
+	"THEx_L2_EXT_SNOOP_STALL",
+	"THEx_L2_EXT_SNOOP_RESP_CLEAN",
+	"THEx_L2_EXT_SNOOP_RESP_DATA",
+	"THEx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_THEX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h
new file mode 100644
index 0000000..73db45c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TKAX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TKAX_H_
+
+static const char * const hardware_counters_mali_tKAx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TKAx_MESSAGES_SENT",
+	"TKAx_MESSAGES_RECEIVED",
+	"TKAx_GPU_ACTIVE",
+	"TKAx_IRQ_ACTIVE",
+	"TKAx_JS0_JOBS",
+	"TKAx_JS0_TASKS",
+	"TKAx_JS0_ACTIVE",
+	"",
+	"TKAx_JS0_WAIT_READ",
+	"TKAx_JS0_WAIT_ISSUE",
+	"TKAx_JS0_WAIT_DEPEND",
+	"TKAx_JS0_WAIT_FINISH",
+	"TKAx_JS1_JOBS",
+	"TKAx_JS1_TASKS",
+	"TKAx_JS1_ACTIVE",
+	"",
+	"TKAx_JS1_WAIT_READ",
+	"TKAx_JS1_WAIT_ISSUE",
+	"TKAx_JS1_WAIT_DEPEND",
+	"TKAx_JS1_WAIT_FINISH",
+	"TKAx_JS2_JOBS",
+	"TKAx_JS2_TASKS",
+	"TKAx_JS2_ACTIVE",
+	"",
+	"TKAx_JS2_WAIT_READ",
+	"TKAx_JS2_WAIT_ISSUE",
+	"TKAx_JS2_WAIT_DEPEND",
+	"TKAx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TKAx_TILER_ACTIVE",
+	"TKAx_JOBS_PROCESSED",
+	"TKAx_TRIANGLES",
+	"TKAx_LINES",
+	"TKAx_POINTS",
+	"TKAx_FRONT_FACING",
+	"TKAx_BACK_FACING",
+	"TKAx_PRIM_VISIBLE",
+	"TKAx_PRIM_CULLED",
+	"TKAx_PRIM_CLIPPED",
+	"TKAx_PRIM_SAT_CULLED",
+	"TKAx_BIN_ALLOC_INIT",
+	"TKAx_BIN_ALLOC_OVERFLOW",
+	"TKAx_BUS_READ",
+	"",
+	"TKAx_BUS_WRITE",
+	"TKAx_LOADING_DESC",
+	"TKAx_IDVS_POS_SHAD_REQ",
+	"TKAx_IDVS_POS_SHAD_WAIT",
+	"TKAx_IDVS_POS_SHAD_STALL",
+	"TKAx_IDVS_POS_FIFO_FULL",
+	"TKAx_PREFETCH_STALL",
+	"TKAx_VCACHE_HIT",
+	"TKAx_VCACHE_MISS",
+	"TKAx_VCACHE_LINE_WAIT",
+	"TKAx_VFETCH_POS_READ_WAIT",
+	"TKAx_VFETCH_VERTEX_WAIT",
+	"TKAx_VFETCH_STALL",
+	"TKAx_PRIMASSY_STALL",
+	"TKAx_BBOX_GEN_STALL",
+	"TKAx_IDVS_VBU_HIT",
+	"TKAx_IDVS_VBU_MISS",
+	"TKAx_IDVS_VBU_LINE_DEALLOCATE",
+	"TKAx_IDVS_VAR_SHAD_REQ",
+	"TKAx_IDVS_VAR_SHAD_STALL",
+	"TKAx_BINNER_STALL",
+	"TKAx_ITER_STALL",
+	"TKAx_COMPRESS_MISS",
+	"TKAx_COMPRESS_STALL",
+	"TKAx_PCACHE_HIT",
+	"TKAx_PCACHE_MISS",
+	"TKAx_PCACHE_MISS_STALL",
+	"TKAx_PCACHE_EVICT_STALL",
+	"TKAx_PMGR_PTR_WR_STALL",
+	"TKAx_PMGR_PTR_RD_STALL",
+	"TKAx_PMGR_CMD_WR_STALL",
+	"TKAx_WRBUF_ACTIVE",
+	"TKAx_WRBUF_HIT",
+	"TKAx_WRBUF_MISS",
+	"TKAx_WRBUF_NO_FREE_LINE_STALL",
+	"TKAx_WRBUF_NO_AXI_ID_STALL",
+	"TKAx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TKAx_UTLB_TRANS",
+	"TKAx_UTLB_TRANS_HIT",
+	"TKAx_UTLB_TRANS_STALL",
+	"TKAx_UTLB_TRANS_MISS_DELAY",
+	"TKAx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TKAx_FRAG_ACTIVE",
+	"TKAx_FRAG_PRIMITIVES",
+	"TKAx_FRAG_PRIM_RAST",
+	"TKAx_FRAG_FPK_ACTIVE",
+	"TKAx_FRAG_STARVING",
+	"TKAx_FRAG_WARPS",
+	"TKAx_FRAG_PARTIAL_WARPS",
+	"TKAx_FRAG_QUADS_RAST",
+	"TKAx_FRAG_QUADS_EZS_TEST",
+	"TKAx_FRAG_QUADS_EZS_UPDATE",
+	"TKAx_FRAG_QUADS_EZS_KILL",
+	"TKAx_FRAG_LZS_TEST",
+	"TKAx_FRAG_LZS_KILL",
+	"TKAx_WARP_REG_SIZE_64",
+	"TKAx_FRAG_PTILES",
+	"TKAx_FRAG_TRANS_ELIM",
+	"TKAx_QUAD_FPK_KILLER",
+	"TKAx_FULL_QUAD_WARPS",
+	"TKAx_COMPUTE_ACTIVE",
+	"TKAx_COMPUTE_TASKS",
+	"TKAx_COMPUTE_WARPS",
+	"TKAx_COMPUTE_STARVING",
+	"TKAx_EXEC_CORE_ACTIVE",
+	"TKAx_EXEC_ACTIVE",
+	"TKAx_EXEC_INSTR_COUNT",
+	"TKAx_EXEC_INSTR_DIVERGED",
+	"TKAx_EXEC_INSTR_STARVING",
+	"TKAx_ARITH_INSTR_SINGLE_FMA",
+	"TKAx_ARITH_INSTR_DOUBLE",
+	"TKAx_ARITH_INSTR_MSG",
+	"TKAx_ARITH_INSTR_MSG_ONLY",
+	"TKAx_TEX_MSGI_NUM_QUADS",
+	"TKAx_TEX_DFCH_NUM_PASSES",
+	"TKAx_TEX_DFCH_NUM_PASSES_MISS",
+	"TKAx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TKAx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TKAx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TKAx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TKAx_TEX_TFCH_NUM_OPERATIONS",
+	"TKAx_TEX_FILT_NUM_OPERATIONS",
+	"TKAx_LS_MEM_READ_FULL",
+	"TKAx_LS_MEM_READ_SHORT",
+	"TKAx_LS_MEM_WRITE_FULL",
+	"TKAx_LS_MEM_WRITE_SHORT",
+	"TKAx_LS_MEM_ATOMIC",
+	"TKAx_VARY_INSTR",
+	"TKAx_VARY_SLOT_32",
+	"TKAx_VARY_SLOT_16",
+	"TKAx_ATTR_INSTR",
+	"TKAx_ARITH_INSTR_FP_MUL",
+	"TKAx_BEATS_RD_FTC",
+	"TKAx_BEATS_RD_FTC_EXT",
+	"TKAx_BEATS_RD_LSC",
+	"TKAx_BEATS_RD_LSC_EXT",
+	"TKAx_BEATS_RD_TEX",
+	"TKAx_BEATS_RD_TEX_EXT",
+	"TKAx_BEATS_RD_OTHER",
+	"TKAx_BEATS_WR_LSC_OTHER",
+	"TKAx_BEATS_WR_TIB",
+	"TKAx_BEATS_WR_LSC_WB",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TKAx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TKAx_L2_RD_MSG_IN",
+	"TKAx_L2_RD_MSG_IN_STALL",
+	"TKAx_L2_WR_MSG_IN",
+	"TKAx_L2_WR_MSG_IN_STALL",
+	"TKAx_L2_SNP_MSG_IN",
+	"TKAx_L2_SNP_MSG_IN_STALL",
+	"TKAx_L2_RD_MSG_OUT",
+	"TKAx_L2_RD_MSG_OUT_STALL",
+	"TKAx_L2_WR_MSG_OUT",
+	"TKAx_L2_ANY_LOOKUP",
+	"TKAx_L2_READ_LOOKUP",
+	"TKAx_L2_WRITE_LOOKUP",
+	"TKAx_L2_EXT_SNOOP_LOOKUP",
+	"TKAx_L2_EXT_READ",
+	"TKAx_L2_EXT_READ_NOSNP",
+	"TKAx_L2_EXT_READ_UNIQUE",
+	"TKAx_L2_EXT_READ_BEATS",
+	"TKAx_L2_EXT_AR_STALL",
+	"TKAx_L2_EXT_AR_CNT_Q1",
+	"TKAx_L2_EXT_AR_CNT_Q2",
+	"TKAx_L2_EXT_AR_CNT_Q3",
+	"TKAx_L2_EXT_RRESP_0_127",
+	"TKAx_L2_EXT_RRESP_128_191",
+	"TKAx_L2_EXT_RRESP_192_255",
+	"TKAx_L2_EXT_RRESP_256_319",
+	"TKAx_L2_EXT_RRESP_320_383",
+	"TKAx_L2_EXT_WRITE",
+	"TKAx_L2_EXT_WRITE_NOSNP_FULL",
+	"TKAx_L2_EXT_WRITE_NOSNP_PTL",
+	"TKAx_L2_EXT_WRITE_SNP_FULL",
+	"TKAx_L2_EXT_WRITE_SNP_PTL",
+	"TKAx_L2_EXT_WRITE_BEATS",
+	"TKAx_L2_EXT_W_STALL",
+	"TKAx_L2_EXT_AW_CNT_Q1",
+	"TKAx_L2_EXT_AW_CNT_Q2",
+	"TKAx_L2_EXT_AW_CNT_Q3",
+	"TKAx_L2_EXT_SNOOP",
+	"TKAx_L2_EXT_SNOOP_STALL",
+	"TKAx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TKAx_L2_EXT_SNOOP_RESP_DATA",
+	"TKAx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TKAX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h
new file mode 100644
index 0000000..63eac50
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TMIX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TMIX_H_
+
+static const char * const hardware_counters_mali_tMIx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TMIx_MESSAGES_SENT",
+	"TMIx_MESSAGES_RECEIVED",
+	"TMIx_GPU_ACTIVE",
+	"TMIx_IRQ_ACTIVE",
+	"TMIx_JS0_JOBS",
+	"TMIx_JS0_TASKS",
+	"TMIx_JS0_ACTIVE",
+	"",
+	"TMIx_JS0_WAIT_READ",
+	"TMIx_JS0_WAIT_ISSUE",
+	"TMIx_JS0_WAIT_DEPEND",
+	"TMIx_JS0_WAIT_FINISH",
+	"TMIx_JS1_JOBS",
+	"TMIx_JS1_TASKS",
+	"TMIx_JS1_ACTIVE",
+	"",
+	"TMIx_JS1_WAIT_READ",
+	"TMIx_JS1_WAIT_ISSUE",
+	"TMIx_JS1_WAIT_DEPEND",
+	"TMIx_JS1_WAIT_FINISH",
+	"TMIx_JS2_JOBS",
+	"TMIx_JS2_TASKS",
+	"TMIx_JS2_ACTIVE",
+	"",
+	"TMIx_JS2_WAIT_READ",
+	"TMIx_JS2_WAIT_ISSUE",
+	"TMIx_JS2_WAIT_DEPEND",
+	"TMIx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TMIx_TILER_ACTIVE",
+	"TMIx_JOBS_PROCESSED",
+	"TMIx_TRIANGLES",
+	"TMIx_LINES",
+	"TMIx_POINTS",
+	"TMIx_FRONT_FACING",
+	"TMIx_BACK_FACING",
+	"TMIx_PRIM_VISIBLE",
+	"TMIx_PRIM_CULLED",
+	"TMIx_PRIM_CLIPPED",
+	"TMIx_PRIM_SAT_CULLED",
+	"TMIx_BIN_ALLOC_INIT",
+	"TMIx_BIN_ALLOC_OVERFLOW",
+	"TMIx_BUS_READ",
+	"",
+	"TMIx_BUS_WRITE",
+	"TMIx_LOADING_DESC",
+	"TMIx_IDVS_POS_SHAD_REQ",
+	"TMIx_IDVS_POS_SHAD_WAIT",
+	"TMIx_IDVS_POS_SHAD_STALL",
+	"TMIx_IDVS_POS_FIFO_FULL",
+	"TMIx_PREFETCH_STALL",
+	"TMIx_VCACHE_HIT",
+	"TMIx_VCACHE_MISS",
+	"TMIx_VCACHE_LINE_WAIT",
+	"TMIx_VFETCH_POS_READ_WAIT",
+	"TMIx_VFETCH_VERTEX_WAIT",
+	"TMIx_VFETCH_STALL",
+	"TMIx_PRIMASSY_STALL",
+	"TMIx_BBOX_GEN_STALL",
+	"TMIx_IDVS_VBU_HIT",
+	"TMIx_IDVS_VBU_MISS",
+	"TMIx_IDVS_VBU_LINE_DEALLOCATE",
+	"TMIx_IDVS_VAR_SHAD_REQ",
+	"TMIx_IDVS_VAR_SHAD_STALL",
+	"TMIx_BINNER_STALL",
+	"TMIx_ITER_STALL",
+	"TMIx_COMPRESS_MISS",
+	"TMIx_COMPRESS_STALL",
+	"TMIx_PCACHE_HIT",
+	"TMIx_PCACHE_MISS",
+	"TMIx_PCACHE_MISS_STALL",
+	"TMIx_PCACHE_EVICT_STALL",
+	"TMIx_PMGR_PTR_WR_STALL",
+	"TMIx_PMGR_PTR_RD_STALL",
+	"TMIx_PMGR_CMD_WR_STALL",
+	"TMIx_WRBUF_ACTIVE",
+	"TMIx_WRBUF_HIT",
+	"TMIx_WRBUF_MISS",
+	"TMIx_WRBUF_NO_FREE_LINE_STALL",
+	"TMIx_WRBUF_NO_AXI_ID_STALL",
+	"TMIx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TMIx_UTLB_TRANS",
+	"TMIx_UTLB_TRANS_HIT",
+	"TMIx_UTLB_TRANS_STALL",
+	"TMIx_UTLB_TRANS_MISS_DELAY",
+	"TMIx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TMIx_FRAG_ACTIVE",
+	"TMIx_FRAG_PRIMITIVES",
+	"TMIx_FRAG_PRIM_RAST",
+	"TMIx_FRAG_FPK_ACTIVE",
+	"TMIx_FRAG_STARVING",
+	"TMIx_FRAG_WARPS",
+	"TMIx_FRAG_PARTIAL_WARPS",
+	"TMIx_FRAG_QUADS_RAST",
+	"TMIx_FRAG_QUADS_EZS_TEST",
+	"TMIx_FRAG_QUADS_EZS_UPDATE",
+	"TMIx_FRAG_QUADS_EZS_KILL",
+	"TMIx_FRAG_LZS_TEST",
+	"TMIx_FRAG_LZS_KILL",
+	"",
+	"TMIx_FRAG_PTILES",
+	"TMIx_FRAG_TRANS_ELIM",
+	"TMIx_QUAD_FPK_KILLER",
+	"",
+	"TMIx_COMPUTE_ACTIVE",
+	"TMIx_COMPUTE_TASKS",
+	"TMIx_COMPUTE_WARPS",
+	"TMIx_COMPUTE_STARVING",
+	"TMIx_EXEC_CORE_ACTIVE",
+	"TMIx_EXEC_ACTIVE",
+	"TMIx_EXEC_INSTR_COUNT",
+	"TMIx_EXEC_INSTR_DIVERGED",
+	"TMIx_EXEC_INSTR_STARVING",
+	"TMIx_ARITH_INSTR_SINGLE_FMA",
+	"TMIx_ARITH_INSTR_DOUBLE",
+	"TMIx_ARITH_INSTR_MSG",
+	"TMIx_ARITH_INSTR_MSG_ONLY",
+	"TMIx_TEX_INSTR",
+	"TMIx_TEX_INSTR_MIPMAP",
+	"TMIx_TEX_INSTR_COMPRESSED",
+	"TMIx_TEX_INSTR_3D",
+	"TMIx_TEX_INSTR_TRILINEAR",
+	"TMIx_TEX_COORD_ISSUE",
+	"TMIx_TEX_COORD_STALL",
+	"TMIx_TEX_STARVE_CACHE",
+	"TMIx_TEX_STARVE_FILTER",
+	"TMIx_LS_MEM_READ_FULL",
+	"TMIx_LS_MEM_READ_SHORT",
+	"TMIx_LS_MEM_WRITE_FULL",
+	"TMIx_LS_MEM_WRITE_SHORT",
+	"TMIx_LS_MEM_ATOMIC",
+	"TMIx_VARY_INSTR",
+	"TMIx_VARY_SLOT_32",
+	"TMIx_VARY_SLOT_16",
+	"TMIx_ATTR_INSTR",
+	"TMIx_ARITH_INSTR_FP_MUL",
+	"TMIx_BEATS_RD_FTC",
+	"TMIx_BEATS_RD_FTC_EXT",
+	"TMIx_BEATS_RD_LSC",
+	"TMIx_BEATS_RD_LSC_EXT",
+	"TMIx_BEATS_RD_TEX",
+	"TMIx_BEATS_RD_TEX_EXT",
+	"TMIx_BEATS_RD_OTHER",
+	"TMIx_BEATS_WR_LSC",
+	"TMIx_BEATS_WR_TIB",
+	"",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TMIx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TMIx_L2_RD_MSG_IN",
+	"TMIx_L2_RD_MSG_IN_STALL",
+	"TMIx_L2_WR_MSG_IN",
+	"TMIx_L2_WR_MSG_IN_STALL",
+	"TMIx_L2_SNP_MSG_IN",
+	"TMIx_L2_SNP_MSG_IN_STALL",
+	"TMIx_L2_RD_MSG_OUT",
+	"TMIx_L2_RD_MSG_OUT_STALL",
+	"TMIx_L2_WR_MSG_OUT",
+	"TMIx_L2_ANY_LOOKUP",
+	"TMIx_L2_READ_LOOKUP",
+	"TMIx_L2_WRITE_LOOKUP",
+	"TMIx_L2_EXT_SNOOP_LOOKUP",
+	"TMIx_L2_EXT_READ",
+	"TMIx_L2_EXT_READ_NOSNP",
+	"TMIx_L2_EXT_READ_UNIQUE",
+	"TMIx_L2_EXT_READ_BEATS",
+	"TMIx_L2_EXT_AR_STALL",
+	"TMIx_L2_EXT_AR_CNT_Q1",
+	"TMIx_L2_EXT_AR_CNT_Q2",
+	"TMIx_L2_EXT_AR_CNT_Q3",
+	"TMIx_L2_EXT_RRESP_0_127",
+	"TMIx_L2_EXT_RRESP_128_191",
+	"TMIx_L2_EXT_RRESP_192_255",
+	"TMIx_L2_EXT_RRESP_256_319",
+	"TMIx_L2_EXT_RRESP_320_383",
+	"TMIx_L2_EXT_WRITE",
+	"TMIx_L2_EXT_WRITE_NOSNP_FULL",
+	"TMIx_L2_EXT_WRITE_NOSNP_PTL",
+	"TMIx_L2_EXT_WRITE_SNP_FULL",
+	"TMIx_L2_EXT_WRITE_SNP_PTL",
+	"TMIx_L2_EXT_WRITE_BEATS",
+	"TMIx_L2_EXT_W_STALL",
+	"TMIx_L2_EXT_AW_CNT_Q1",
+	"TMIx_L2_EXT_AW_CNT_Q2",
+	"TMIx_L2_EXT_AW_CNT_Q3",
+	"TMIx_L2_EXT_SNOOP",
+	"TMIx_L2_EXT_SNOOP_STALL",
+	"TMIx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TMIx_L2_EXT_SNOOP_RESP_DATA",
+	"TMIx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TMIX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h
new file mode 100644
index 0000000..932663c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TNOX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TNOX_H_
+
+static const char * const hardware_counters_mali_tNOx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TNOx_MESSAGES_SENT",
+	"TNOx_MESSAGES_RECEIVED",
+	"TNOx_GPU_ACTIVE",
+	"TNOx_IRQ_ACTIVE",
+	"TNOx_JS0_JOBS",
+	"TNOx_JS0_TASKS",
+	"TNOx_JS0_ACTIVE",
+	"",
+	"TNOx_JS0_WAIT_READ",
+	"TNOx_JS0_WAIT_ISSUE",
+	"TNOx_JS0_WAIT_DEPEND",
+	"TNOx_JS0_WAIT_FINISH",
+	"TNOx_JS1_JOBS",
+	"TNOx_JS1_TASKS",
+	"TNOx_JS1_ACTIVE",
+	"",
+	"TNOx_JS1_WAIT_READ",
+	"TNOx_JS1_WAIT_ISSUE",
+	"TNOx_JS1_WAIT_DEPEND",
+	"TNOx_JS1_WAIT_FINISH",
+	"TNOx_JS2_JOBS",
+	"TNOx_JS2_TASKS",
+	"TNOx_JS2_ACTIVE",
+	"",
+	"TNOx_JS2_WAIT_READ",
+	"TNOx_JS2_WAIT_ISSUE",
+	"TNOx_JS2_WAIT_DEPEND",
+	"TNOx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TNOx_TILER_ACTIVE",
+	"TNOx_JOBS_PROCESSED",
+	"TNOx_TRIANGLES",
+	"TNOx_LINES",
+	"TNOx_POINTS",
+	"TNOx_FRONT_FACING",
+	"TNOx_BACK_FACING",
+	"TNOx_PRIM_VISIBLE",
+	"TNOx_PRIM_CULLED",
+	"TNOx_PRIM_CLIPPED",
+	"TNOx_PRIM_SAT_CULLED",
+	"TNOx_BIN_ALLOC_INIT",
+	"TNOx_BIN_ALLOC_OVERFLOW",
+	"TNOx_BUS_READ",
+	"",
+	"TNOx_BUS_WRITE",
+	"TNOx_LOADING_DESC",
+	"TNOx_IDVS_POS_SHAD_REQ",
+	"TNOx_IDVS_POS_SHAD_WAIT",
+	"TNOx_IDVS_POS_SHAD_STALL",
+	"TNOx_IDVS_POS_FIFO_FULL",
+	"TNOx_PREFETCH_STALL",
+	"TNOx_VCACHE_HIT",
+	"TNOx_VCACHE_MISS",
+	"TNOx_VCACHE_LINE_WAIT",
+	"TNOx_VFETCH_POS_READ_WAIT",
+	"TNOx_VFETCH_VERTEX_WAIT",
+	"TNOx_VFETCH_STALL",
+	"TNOx_PRIMASSY_STALL",
+	"TNOx_BBOX_GEN_STALL",
+	"TNOx_IDVS_VBU_HIT",
+	"TNOx_IDVS_VBU_MISS",
+	"TNOx_IDVS_VBU_LINE_DEALLOCATE",
+	"TNOx_IDVS_VAR_SHAD_REQ",
+	"TNOx_IDVS_VAR_SHAD_STALL",
+	"TNOx_BINNER_STALL",
+	"TNOx_ITER_STALL",
+	"TNOx_COMPRESS_MISS",
+	"TNOx_COMPRESS_STALL",
+	"TNOx_PCACHE_HIT",
+	"TNOx_PCACHE_MISS",
+	"TNOx_PCACHE_MISS_STALL",
+	"TNOx_PCACHE_EVICT_STALL",
+	"TNOx_PMGR_PTR_WR_STALL",
+	"TNOx_PMGR_PTR_RD_STALL",
+	"TNOx_PMGR_CMD_WR_STALL",
+	"TNOx_WRBUF_ACTIVE",
+	"TNOx_WRBUF_HIT",
+	"TNOx_WRBUF_MISS",
+	"TNOx_WRBUF_NO_FREE_LINE_STALL",
+	"TNOx_WRBUF_NO_AXI_ID_STALL",
+	"TNOx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TNOx_UTLB_TRANS",
+	"TNOx_UTLB_TRANS_HIT",
+	"TNOx_UTLB_TRANS_STALL",
+	"TNOx_UTLB_TRANS_MISS_DELAY",
+	"TNOx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TNOx_FRAG_ACTIVE",
+	"TNOx_FRAG_PRIMITIVES",
+	"TNOx_FRAG_PRIM_RAST",
+	"TNOx_FRAG_FPK_ACTIVE",
+	"TNOx_FRAG_STARVING",
+	"TNOx_FRAG_WARPS",
+	"TNOx_FRAG_PARTIAL_WARPS",
+	"TNOx_FRAG_QUADS_RAST",
+	"TNOx_FRAG_QUADS_EZS_TEST",
+	"TNOx_FRAG_QUADS_EZS_UPDATE",
+	"TNOx_FRAG_QUADS_EZS_KILL",
+	"TNOx_FRAG_LZS_TEST",
+	"TNOx_FRAG_LZS_KILL",
+	"TNOx_WARP_REG_SIZE_64",
+	"TNOx_FRAG_PTILES",
+	"TNOx_FRAG_TRANS_ELIM",
+	"TNOx_QUAD_FPK_KILLER",
+	"TNOx_FULL_QUAD_WARPS",
+	"TNOx_COMPUTE_ACTIVE",
+	"TNOx_COMPUTE_TASKS",
+	"TNOx_COMPUTE_WARPS",
+	"TNOx_COMPUTE_STARVING",
+	"TNOx_EXEC_CORE_ACTIVE",
+	"TNOx_EXEC_ACTIVE",
+	"TNOx_EXEC_INSTR_COUNT",
+	"TNOx_EXEC_INSTR_DIVERGED",
+	"TNOx_EXEC_INSTR_STARVING",
+	"TNOx_ARITH_INSTR_SINGLE_FMA",
+	"TNOx_ARITH_INSTR_DOUBLE",
+	"TNOx_ARITH_INSTR_MSG",
+	"TNOx_ARITH_INSTR_MSG_ONLY",
+	"TNOx_TEX_MSGI_NUM_QUADS",
+	"TNOx_TEX_DFCH_NUM_PASSES",
+	"TNOx_TEX_DFCH_NUM_PASSES_MISS",
+	"TNOx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TNOx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TNOx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TNOx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TNOx_TEX_TFCH_NUM_OPERATIONS",
+	"TNOx_TEX_FILT_NUM_OPERATIONS",
+	"TNOx_LS_MEM_READ_FULL",
+	"TNOx_LS_MEM_READ_SHORT",
+	"TNOx_LS_MEM_WRITE_FULL",
+	"TNOx_LS_MEM_WRITE_SHORT",
+	"TNOx_LS_MEM_ATOMIC",
+	"TNOx_VARY_INSTR",
+	"TNOx_VARY_SLOT_32",
+	"TNOx_VARY_SLOT_16",
+	"TNOx_ATTR_INSTR",
+	"TNOx_ARITH_INSTR_FP_MUL",
+	"TNOx_BEATS_RD_FTC",
+	"TNOx_BEATS_RD_FTC_EXT",
+	"TNOx_BEATS_RD_LSC",
+	"TNOx_BEATS_RD_LSC_EXT",
+	"TNOx_BEATS_RD_TEX",
+	"TNOx_BEATS_RD_TEX_EXT",
+	"TNOx_BEATS_RD_OTHER",
+	"TNOx_BEATS_WR_LSC_OTHER",
+	"TNOx_BEATS_WR_TIB",
+	"TNOx_BEATS_WR_LSC_WB",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TNOx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TNOx_L2_RD_MSG_IN",
+	"TNOx_L2_RD_MSG_IN_STALL",
+	"TNOx_L2_WR_MSG_IN",
+	"TNOx_L2_WR_MSG_IN_STALL",
+	"TNOx_L2_SNP_MSG_IN",
+	"TNOx_L2_SNP_MSG_IN_STALL",
+	"TNOx_L2_RD_MSG_OUT",
+	"TNOx_L2_RD_MSG_OUT_STALL",
+	"TNOx_L2_WR_MSG_OUT",
+	"TNOx_L2_ANY_LOOKUP",
+	"TNOx_L2_READ_LOOKUP",
+	"TNOx_L2_WRITE_LOOKUP",
+	"TNOx_L2_EXT_SNOOP_LOOKUP",
+	"TNOx_L2_EXT_READ",
+	"TNOx_L2_EXT_READ_NOSNP",
+	"TNOx_L2_EXT_READ_UNIQUE",
+	"TNOx_L2_EXT_READ_BEATS",
+	"TNOx_L2_EXT_AR_STALL",
+	"TNOx_L2_EXT_AR_CNT_Q1",
+	"TNOx_L2_EXT_AR_CNT_Q2",
+	"TNOx_L2_EXT_AR_CNT_Q3",
+	"TNOx_L2_EXT_RRESP_0_127",
+	"TNOx_L2_EXT_RRESP_128_191",
+	"TNOx_L2_EXT_RRESP_192_255",
+	"TNOx_L2_EXT_RRESP_256_319",
+	"TNOx_L2_EXT_RRESP_320_383",
+	"TNOx_L2_EXT_WRITE",
+	"TNOx_L2_EXT_WRITE_NOSNP_FULL",
+	"TNOx_L2_EXT_WRITE_NOSNP_PTL",
+	"TNOx_L2_EXT_WRITE_SNP_FULL",
+	"TNOx_L2_EXT_WRITE_SNP_PTL",
+	"TNOx_L2_EXT_WRITE_BEATS",
+	"TNOx_L2_EXT_W_STALL",
+	"TNOx_L2_EXT_AW_CNT_Q1",
+	"TNOx_L2_EXT_AW_CNT_Q2",
+	"TNOx_L2_EXT_AW_CNT_Q3",
+	"TNOx_L2_EXT_SNOOP",
+	"TNOx_L2_EXT_SNOOP_STALL",
+	"TNOx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TNOx_L2_EXT_SNOOP_RESP_DATA",
+	"TNOx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TNOX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h
new file mode 100644
index 0000000..b8dde32
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TSIX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TSIX_H_
+
+static const char * const hardware_counters_mali_tSIx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TSIx_MESSAGES_SENT",
+	"TSIx_MESSAGES_RECEIVED",
+	"TSIx_GPU_ACTIVE",
+	"TSIx_IRQ_ACTIVE",
+	"TSIx_JS0_JOBS",
+	"TSIx_JS0_TASKS",
+	"TSIx_JS0_ACTIVE",
+	"",
+	"TSIx_JS0_WAIT_READ",
+	"TSIx_JS0_WAIT_ISSUE",
+	"TSIx_JS0_WAIT_DEPEND",
+	"TSIx_JS0_WAIT_FINISH",
+	"TSIx_JS1_JOBS",
+	"TSIx_JS1_TASKS",
+	"TSIx_JS1_ACTIVE",
+	"",
+	"TSIx_JS1_WAIT_READ",
+	"TSIx_JS1_WAIT_ISSUE",
+	"TSIx_JS1_WAIT_DEPEND",
+	"TSIx_JS1_WAIT_FINISH",
+	"TSIx_JS2_JOBS",
+	"TSIx_JS2_TASKS",
+	"TSIx_JS2_ACTIVE",
+	"",
+	"TSIx_JS2_WAIT_READ",
+	"TSIx_JS2_WAIT_ISSUE",
+	"TSIx_JS2_WAIT_DEPEND",
+	"TSIx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TSIx_TILER_ACTIVE",
+	"TSIx_JOBS_PROCESSED",
+	"TSIx_TRIANGLES",
+	"TSIx_LINES",
+	"TSIx_POINTS",
+	"TSIx_FRONT_FACING",
+	"TSIx_BACK_FACING",
+	"TSIx_PRIM_VISIBLE",
+	"TSIx_PRIM_CULLED",
+	"TSIx_PRIM_CLIPPED",
+	"TSIx_PRIM_SAT_CULLED",
+	"TSIx_BIN_ALLOC_INIT",
+	"TSIx_BIN_ALLOC_OVERFLOW",
+	"TSIx_BUS_READ",
+	"",
+	"TSIx_BUS_WRITE",
+	"TSIx_LOADING_DESC",
+	"TSIx_IDVS_POS_SHAD_REQ",
+	"TSIx_IDVS_POS_SHAD_WAIT",
+	"TSIx_IDVS_POS_SHAD_STALL",
+	"TSIx_IDVS_POS_FIFO_FULL",
+	"TSIx_PREFETCH_STALL",
+	"TSIx_VCACHE_HIT",
+	"TSIx_VCACHE_MISS",
+	"TSIx_VCACHE_LINE_WAIT",
+	"TSIx_VFETCH_POS_READ_WAIT",
+	"TSIx_VFETCH_VERTEX_WAIT",
+	"TSIx_VFETCH_STALL",
+	"TSIx_PRIMASSY_STALL",
+	"TSIx_BBOX_GEN_STALL",
+	"TSIx_IDVS_VBU_HIT",
+	"TSIx_IDVS_VBU_MISS",
+	"TSIx_IDVS_VBU_LINE_DEALLOCATE",
+	"TSIx_IDVS_VAR_SHAD_REQ",
+	"TSIx_IDVS_VAR_SHAD_STALL",
+	"TSIx_BINNER_STALL",
+	"TSIx_ITER_STALL",
+	"TSIx_COMPRESS_MISS",
+	"TSIx_COMPRESS_STALL",
+	"TSIx_PCACHE_HIT",
+	"TSIx_PCACHE_MISS",
+	"TSIx_PCACHE_MISS_STALL",
+	"TSIx_PCACHE_EVICT_STALL",
+	"TSIx_PMGR_PTR_WR_STALL",
+	"TSIx_PMGR_PTR_RD_STALL",
+	"TSIx_PMGR_CMD_WR_STALL",
+	"TSIx_WRBUF_ACTIVE",
+	"TSIx_WRBUF_HIT",
+	"TSIx_WRBUF_MISS",
+	"TSIx_WRBUF_NO_FREE_LINE_STALL",
+	"TSIx_WRBUF_NO_AXI_ID_STALL",
+	"TSIx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TSIx_UTLB_TRANS",
+	"TSIx_UTLB_TRANS_HIT",
+	"TSIx_UTLB_TRANS_STALL",
+	"TSIx_UTLB_TRANS_MISS_DELAY",
+	"TSIx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TSIx_FRAG_ACTIVE",
+	"TSIx_FRAG_PRIMITIVES",
+	"TSIx_FRAG_PRIM_RAST",
+	"TSIx_FRAG_FPK_ACTIVE",
+	"TSIx_FRAG_STARVING",
+	"TSIx_FRAG_WARPS",
+	"TSIx_FRAG_PARTIAL_WARPS",
+	"TSIx_FRAG_QUADS_RAST",
+	"TSIx_FRAG_QUADS_EZS_TEST",
+	"TSIx_FRAG_QUADS_EZS_UPDATE",
+	"TSIx_FRAG_QUADS_EZS_KILL",
+	"TSIx_FRAG_LZS_TEST",
+	"TSIx_FRAG_LZS_KILL",
+	"",
+	"TSIx_FRAG_PTILES",
+	"TSIx_FRAG_TRANS_ELIM",
+	"TSIx_QUAD_FPK_KILLER",
+	"",
+	"TSIx_COMPUTE_ACTIVE",
+	"TSIx_COMPUTE_TASKS",
+	"TSIx_COMPUTE_WARPS",
+	"TSIx_COMPUTE_STARVING",
+	"TSIx_EXEC_CORE_ACTIVE",
+	"TSIx_EXEC_ACTIVE",
+	"TSIx_EXEC_INSTR_COUNT",
+	"TSIx_EXEC_INSTR_DIVERGED",
+	"TSIx_EXEC_INSTR_STARVING",
+	"TSIx_ARITH_INSTR_SINGLE_FMA",
+	"TSIx_ARITH_INSTR_DOUBLE",
+	"TSIx_ARITH_INSTR_MSG",
+	"TSIx_ARITH_INSTR_MSG_ONLY",
+	"TSIx_TEX_MSGI_NUM_QUADS",
+	"TSIx_TEX_DFCH_NUM_PASSES",
+	"TSIx_TEX_DFCH_NUM_PASSES_MISS",
+	"TSIx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TSIx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TSIx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TSIx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TSIx_TEX_TFCH_NUM_OPERATIONS",
+	"TSIx_TEX_FILT_NUM_OPERATIONS",
+	"TSIx_LS_MEM_READ_FULL",
+	"TSIx_LS_MEM_READ_SHORT",
+	"TSIx_LS_MEM_WRITE_FULL",
+	"TSIx_LS_MEM_WRITE_SHORT",
+	"TSIx_LS_MEM_ATOMIC",
+	"TSIx_VARY_INSTR",
+	"TSIx_VARY_SLOT_32",
+	"TSIx_VARY_SLOT_16",
+	"TSIx_ATTR_INSTR",
+	"TSIx_ARITH_INSTR_FP_MUL",
+	"TSIx_BEATS_RD_FTC",
+	"TSIx_BEATS_RD_FTC_EXT",
+	"TSIx_BEATS_RD_LSC",
+	"TSIx_BEATS_RD_LSC_EXT",
+	"TSIx_BEATS_RD_TEX",
+	"TSIx_BEATS_RD_TEX_EXT",
+	"TSIx_BEATS_RD_OTHER",
+	"TSIx_BEATS_WR_LSC_OTHER",
+	"TSIx_BEATS_WR_TIB",
+	"TSIx_BEATS_WR_LSC_WB",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TSIx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TSIx_L2_RD_MSG_IN",
+	"TSIx_L2_RD_MSG_IN_STALL",
+	"TSIx_L2_WR_MSG_IN",
+	"TSIx_L2_WR_MSG_IN_STALL",
+	"TSIx_L2_SNP_MSG_IN",
+	"TSIx_L2_SNP_MSG_IN_STALL",
+	"TSIx_L2_RD_MSG_OUT",
+	"TSIx_L2_RD_MSG_OUT_STALL",
+	"TSIx_L2_WR_MSG_OUT",
+	"TSIx_L2_ANY_LOOKUP",
+	"TSIx_L2_READ_LOOKUP",
+	"TSIx_L2_WRITE_LOOKUP",
+	"TSIx_L2_EXT_SNOOP_LOOKUP",
+	"TSIx_L2_EXT_READ",
+	"TSIx_L2_EXT_READ_NOSNP",
+	"TSIx_L2_EXT_READ_UNIQUE",
+	"TSIx_L2_EXT_READ_BEATS",
+	"TSIx_L2_EXT_AR_STALL",
+	"TSIx_L2_EXT_AR_CNT_Q1",
+	"TSIx_L2_EXT_AR_CNT_Q2",
+	"TSIx_L2_EXT_AR_CNT_Q3",
+	"TSIx_L2_EXT_RRESP_0_127",
+	"TSIx_L2_EXT_RRESP_128_191",
+	"TSIx_L2_EXT_RRESP_192_255",
+	"TSIx_L2_EXT_RRESP_256_319",
+	"TSIx_L2_EXT_RRESP_320_383",
+	"TSIx_L2_EXT_WRITE",
+	"TSIx_L2_EXT_WRITE_NOSNP_FULL",
+	"TSIx_L2_EXT_WRITE_NOSNP_PTL",
+	"TSIx_L2_EXT_WRITE_SNP_FULL",
+	"TSIx_L2_EXT_WRITE_SNP_PTL",
+	"TSIx_L2_EXT_WRITE_BEATS",
+	"TSIx_L2_EXT_W_STALL",
+	"TSIx_L2_EXT_AW_CNT_Q1",
+	"TSIx_L2_EXT_AW_CNT_Q2",
+	"TSIx_L2_EXT_AW_CNT_Q3",
+	"TSIx_L2_EXT_SNOOP",
+	"TSIx_L2_EXT_SNOOP_STALL",
+	"TSIx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TSIx_L2_EXT_SNOOP_RESP_DATA",
+	"TSIx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TSIX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h
new file mode 100644
index 0000000..c1e315b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TTRX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TTRX_H_
+
+static const char * const hardware_counters_mali_tTRx[] = {
+	/* Performance counters for the Job Manager */
+	"",
+	"",
+	"",
+	"",
+	"TTRx_MESSAGES_SENT",
+	"TTRx_MESSAGES_RECEIVED",
+	"TTRx_GPU_ACTIVE",
+	"TTRx_IRQ_ACTIVE",
+	"TTRx_JS0_JOBS",
+	"TTRx_JS0_TASKS",
+	"TTRx_JS0_ACTIVE",
+	"",
+	"TTRx_JS0_WAIT_READ",
+	"TTRx_JS0_WAIT_ISSUE",
+	"TTRx_JS0_WAIT_DEPEND",
+	"TTRx_JS0_WAIT_FINISH",
+	"TTRx_JS1_JOBS",
+	"TTRx_JS1_TASKS",
+	"TTRx_JS1_ACTIVE",
+	"",
+	"TTRx_JS1_WAIT_READ",
+	"TTRx_JS1_WAIT_ISSUE",
+	"TTRx_JS1_WAIT_DEPEND",
+	"TTRx_JS1_WAIT_FINISH",
+	"TTRx_JS2_JOBS",
+	"TTRx_JS2_TASKS",
+	"TTRx_JS2_ACTIVE",
+	"",
+	"TTRx_JS2_WAIT_READ",
+	"TTRx_JS2_WAIT_ISSUE",
+	"TTRx_JS2_WAIT_DEPEND",
+	"TTRx_JS2_WAIT_FINISH",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+
+	/* Performance counters for the Tiler */
+	"",
+	"",
+	"",
+	"",
+	"TTRx_TILER_ACTIVE",
+	"TTRx_JOBS_PROCESSED",
+	"TTRx_TRIANGLES",
+	"TTRx_LINES",
+	"TTRx_POINTS",
+	"TTRx_FRONT_FACING",
+	"TTRx_BACK_FACING",
+	"TTRx_PRIM_VISIBLE",
+	"TTRx_PRIM_CULLED",
+	"TTRx_PRIM_CLIPPED",
+	"TTRx_PRIM_SAT_CULLED",
+	"TTRx_BIN_ALLOC_INIT",
+	"TTRx_BIN_ALLOC_OVERFLOW",
+	"TTRx_BUS_READ",
+	"",
+	"TTRx_BUS_WRITE",
+	"TTRx_LOADING_DESC",
+	"TTRx_IDVS_POS_SHAD_REQ",
+	"TTRx_IDVS_POS_SHAD_WAIT",
+	"TTRx_IDVS_POS_SHAD_STALL",
+	"TTRx_IDVS_POS_FIFO_FULL",
+	"TTRx_PREFETCH_STALL",
+	"TTRx_VCACHE_HIT",
+	"TTRx_VCACHE_MISS",
+	"TTRx_VCACHE_LINE_WAIT",
+	"TTRx_VFETCH_POS_READ_WAIT",
+	"TTRx_VFETCH_VERTEX_WAIT",
+	"TTRx_VFETCH_STALL",
+	"TTRx_PRIMASSY_STALL",
+	"TTRx_BBOX_GEN_STALL",
+	"TTRx_IDVS_VBU_HIT",
+	"TTRx_IDVS_VBU_MISS",
+	"TTRx_IDVS_VBU_LINE_DEALLOCATE",
+	"TTRx_IDVS_VAR_SHAD_REQ",
+	"TTRx_IDVS_VAR_SHAD_STALL",
+	"TTRx_BINNER_STALL",
+	"TTRx_ITER_STALL",
+	"TTRx_COMPRESS_MISS",
+	"TTRx_COMPRESS_STALL",
+	"TTRx_PCACHE_HIT",
+	"TTRx_PCACHE_MISS",
+	"TTRx_PCACHE_MISS_STALL",
+	"TTRx_PCACHE_EVICT_STALL",
+	"TTRx_PMGR_PTR_WR_STALL",
+	"TTRx_PMGR_PTR_RD_STALL",
+	"TTRx_PMGR_CMD_WR_STALL",
+	"TTRx_WRBUF_ACTIVE",
+	"TTRx_WRBUF_HIT",
+	"TTRx_WRBUF_MISS",
+	"TTRx_WRBUF_NO_FREE_LINE_STALL",
+	"TTRx_WRBUF_NO_AXI_ID_STALL",
+	"TTRx_WRBUF_AXI_STALL",
+	"",
+	"",
+	"",
+	"TTRx_UTLB_TRANS",
+	"TTRx_UTLB_TRANS_HIT",
+	"TTRx_UTLB_TRANS_STALL",
+	"TTRx_UTLB_TRANS_MISS_DELAY",
+	"TTRx_UTLB_MMU_REQ",
+
+	/* Performance counters for the Shader Core */
+	"",
+	"",
+	"",
+	"",
+	"TTRx_FRAG_ACTIVE",
+	"TTRx_FRAG_PRIMITIVES",
+	"TTRx_FRAG_PRIM_RAST",
+	"TTRx_FRAG_FPK_ACTIVE",
+	"TTRx_FRAG_STARVING",
+	"TTRx_FRAG_WARPS",
+	"TTRx_FRAG_PARTIAL_WARPS",
+	"TTRx_FRAG_QUADS_RAST",
+	"TTRx_FRAG_QUADS_EZS_TEST",
+	"TTRx_FRAG_QUADS_EZS_UPDATE",
+	"TTRx_FRAG_QUADS_EZS_KILL",
+	"TTRx_FRAG_LZS_TEST",
+	"TTRx_FRAG_LZS_KILL",
+	"TTRx_WARP_REG_SIZE_64",
+	"TTRx_FRAG_PTILES",
+	"TTRx_FRAG_TRANS_ELIM",
+	"TTRx_QUAD_FPK_KILLER",
+	"TTRx_FULL_QUAD_WARPS",
+	"TTRx_COMPUTE_ACTIVE",
+	"TTRx_COMPUTE_TASKS",
+	"TTRx_COMPUTE_WARPS",
+	"TTRx_COMPUTE_STARVING",
+	"TTRx_EXEC_CORE_ACTIVE",
+	"TTRx_EXEC_INSTR_FMA",
+	"TTRx_EXEC_INSTR_CVT",
+	"TTRx_EXEC_INSTR_SFU",
+	"TTRx_EXEC_INSTR_MSG",
+	"TTRx_EXEC_INSTR_DIVERGED",
+	"TTRx_EXEC_ICACHE_MISS",
+	"TTRx_EXEC_STARVE_ARITH",
+	"TTRx_CALL_BLEND_SHADER",
+	"TTRx_TEX_MSGI_NUM_QUADS",
+	"TTRx_TEX_DFCH_NUM_PASSES",
+	"TTRx_TEX_DFCH_NUM_PASSES_MISS",
+	"TTRx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+	"TTRx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+	"TTRx_TEX_TFCH_NUM_LINES_FETCHED",
+	"TTRx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+	"TTRx_TEX_TFCH_NUM_OPERATIONS",
+	"TTRx_TEX_FILT_NUM_OPERATIONS",
+	"TTRx_LS_MEM_READ_FULL",
+	"TTRx_LS_MEM_READ_SHORT",
+	"TTRx_LS_MEM_WRITE_FULL",
+	"TTRx_LS_MEM_WRITE_SHORT",
+	"TTRx_LS_MEM_ATOMIC",
+	"TTRx_VARY_INSTR",
+	"TTRx_VARY_SLOT_32",
+	"TTRx_VARY_SLOT_16",
+	"TTRx_ATTR_INSTR",
+	"TTRx_ARITH_INSTR_FP_MUL",
+	"TTRx_BEATS_RD_FTC",
+	"TTRx_BEATS_RD_FTC_EXT",
+	"TTRx_BEATS_RD_LSC",
+	"TTRx_BEATS_RD_LSC_EXT",
+	"TTRx_BEATS_RD_TEX",
+	"TTRx_BEATS_RD_TEX_EXT",
+	"TTRx_BEATS_RD_OTHER",
+	"TTRx_BEATS_WR_LSC_OTHER",
+	"TTRx_BEATS_WR_TIB",
+	"TTRx_BEATS_WR_LSC_WB",
+
+	/* Performance counters for the Memory System */
+	"",
+	"",
+	"",
+	"",
+	"TTRx_MMU_REQUESTS",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TTRx_L2_RD_MSG_IN",
+	"TTRx_L2_RD_MSG_IN_STALL",
+	"TTRx_L2_WR_MSG_IN",
+	"TTRx_L2_WR_MSG_IN_STALL",
+	"TTRx_L2_SNP_MSG_IN",
+	"TTRx_L2_SNP_MSG_IN_STALL",
+	"TTRx_L2_RD_MSG_OUT",
+	"TTRx_L2_RD_MSG_OUT_STALL",
+	"TTRx_L2_WR_MSG_OUT",
+	"TTRx_L2_ANY_LOOKUP",
+	"TTRx_L2_READ_LOOKUP",
+	"TTRx_L2_WRITE_LOOKUP",
+	"TTRx_L2_EXT_SNOOP_LOOKUP",
+	"TTRx_L2_EXT_READ",
+	"TTRx_L2_EXT_READ_NOSNP",
+	"TTRx_L2_EXT_READ_UNIQUE",
+	"TTRx_L2_EXT_READ_BEATS",
+	"TTRx_L2_EXT_AR_STALL",
+	"TTRx_L2_EXT_AR_CNT_Q1",
+	"TTRx_L2_EXT_AR_CNT_Q2",
+	"TTRx_L2_EXT_AR_CNT_Q3",
+	"TTRx_L2_EXT_RRESP_0_127",
+	"TTRx_L2_EXT_RRESP_128_191",
+	"TTRx_L2_EXT_RRESP_192_255",
+	"TTRx_L2_EXT_RRESP_256_319",
+	"TTRx_L2_EXT_RRESP_320_383",
+	"TTRx_L2_EXT_WRITE",
+	"TTRx_L2_EXT_WRITE_NOSNP_FULL",
+	"TTRx_L2_EXT_WRITE_NOSNP_PTL",
+	"TTRx_L2_EXT_WRITE_SNP_FULL",
+	"TTRx_L2_EXT_WRITE_SNP_PTL",
+	"TTRx_L2_EXT_WRITE_BEATS",
+	"TTRx_L2_EXT_W_STALL",
+	"TTRx_L2_EXT_AW_CNT_Q1",
+	"TTRx_L2_EXT_AW_CNT_Q2",
+	"TTRx_L2_EXT_AW_CNT_Q3",
+	"TTRx_L2_EXT_SNOOP",
+	"TTRx_L2_EXT_SNOOP_STALL",
+	"TTRx_L2_EXT_SNOOP_RESP_CLEAN",
+	"TTRx_L2_EXT_SNOOP_RESP_DATA",
+	"TTRx_L2_EXT_SNOOP_INTERNAL",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TTRX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h b/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h
new file mode 100644
index 0000000..d432f8e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h
@@ -0,0 +1,134 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#ifndef _KBASE_GPU_ID_H_
+#define _KBASE_GPU_ID_H_
+
+/* GPU_ID register */
+#define GPU_ID_VERSION_STATUS_SHIFT       0
+#define GPU_ID_VERSION_MINOR_SHIFT        4
+#define GPU_ID_VERSION_MAJOR_SHIFT        12
+#define GPU_ID_VERSION_PRODUCT_ID_SHIFT   16
+#define GPU_ID_VERSION_STATUS             (0xFu  << GPU_ID_VERSION_STATUS_SHIFT)
+#define GPU_ID_VERSION_MINOR              (0xFFu << GPU_ID_VERSION_MINOR_SHIFT)
+#define GPU_ID_VERSION_MAJOR              (0xFu  << GPU_ID_VERSION_MAJOR_SHIFT)
+#define GPU_ID_VERSION_PRODUCT_ID  (0xFFFFu << GPU_ID_VERSION_PRODUCT_ID_SHIFT)
+
+/* Values for GPU_ID_VERSION_PRODUCT_ID bitfield */
+#define GPU_ID_PI_T60X                    0x6956u
+#define GPU_ID_PI_T62X                    0x0620u
+#define GPU_ID_PI_T76X                    0x0750u
+#define GPU_ID_PI_T72X                    0x0720u
+#define GPU_ID_PI_TFRX                    0x0880u
+#define GPU_ID_PI_T86X                    0x0860u
+#define GPU_ID_PI_T82X                    0x0820u
+#define GPU_ID_PI_T83X                    0x0830u
+
+/* New GPU ID format when PRODUCT_ID is >= 0x1000 (and not 0x6956) */
+#define GPU_ID_PI_NEW_FORMAT_START        0x1000
+#define GPU_ID_IS_NEW_FORMAT(product_id)  ((product_id) != GPU_ID_PI_T60X && \
+						(product_id) >= \
+						GPU_ID_PI_NEW_FORMAT_START)
+
+#define GPU_ID2_VERSION_STATUS_SHIFT      0
+#define GPU_ID2_VERSION_MINOR_SHIFT       4
+#define GPU_ID2_VERSION_MAJOR_SHIFT       12
+#define GPU_ID2_PRODUCT_MAJOR_SHIFT       16
+#define GPU_ID2_ARCH_REV_SHIFT            20
+#define GPU_ID2_ARCH_MINOR_SHIFT          24
+#define GPU_ID2_ARCH_MAJOR_SHIFT          28
+#define GPU_ID2_VERSION_STATUS            (0xFu << GPU_ID2_VERSION_STATUS_SHIFT)
+#define GPU_ID2_VERSION_MINOR             (0xFFu << GPU_ID2_VERSION_MINOR_SHIFT)
+#define GPU_ID2_VERSION_MAJOR             (0xFu << GPU_ID2_VERSION_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MAJOR             (0xFu << GPU_ID2_PRODUCT_MAJOR_SHIFT)
+#define GPU_ID2_ARCH_REV                  (0xFu << GPU_ID2_ARCH_REV_SHIFT)
+#define GPU_ID2_ARCH_MINOR                (0xFu << GPU_ID2_ARCH_MINOR_SHIFT)
+#define GPU_ID2_ARCH_MAJOR                (0xFu << GPU_ID2_ARCH_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MODEL  (GPU_ID2_ARCH_MAJOR | GPU_ID2_PRODUCT_MAJOR)
+#define GPU_ID2_VERSION        (GPU_ID2_VERSION_MAJOR | \
+								GPU_ID2_VERSION_MINOR | \
+								GPU_ID2_VERSION_STATUS)
+
+/* Helper macro to create a partial GPU_ID (new format) that defines
+   a product ignoring its version. */
+#define GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, product_major) \
+		((((u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT)  | \
+		 (((u32)arch_minor) << GPU_ID2_ARCH_MINOR_SHIFT)  | \
+		 (((u32)arch_rev) << GPU_ID2_ARCH_REV_SHIFT)      | \
+		 (((u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+
+/* Helper macro to create a partial GPU_ID (new format) that specifies the
+   revision (major, minor, status) of a product */
+#define GPU_ID2_VERSION_MAKE(version_major, version_minor, version_status) \
+		((((u32)version_major) << GPU_ID2_VERSION_MAJOR_SHIFT)  | \
+		 (((u32)version_minor) << GPU_ID2_VERSION_MINOR_SHIFT)  | \
+		 (((u32)version_status) << GPU_ID2_VERSION_STATUS_SHIFT))
+
+/* Helper macro to create a complete GPU_ID (new format) */
+#define GPU_ID2_MAKE(arch_major, arch_minor, arch_rev, product_major, \
+	version_major, version_minor, version_status) \
+		(GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, \
+			product_major) | \
+		 GPU_ID2_VERSION_MAKE(version_major, version_minor,     \
+			version_status))
+
+/* Helper macro to create a partial GPU_ID (new format) that identifies
+   a particular GPU model by its arch_major and product_major. */
+#define GPU_ID2_MODEL_MAKE(arch_major, product_major) \
+		((((u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT)  | \
+		(((u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+
+/* Strip off the non-relevant bits from a product_id value and make it suitable
+   for comparison against the GPU_ID2_PRODUCT_xxx values which identify a GPU
+   model. */
+#define GPU_ID2_MODEL_MATCH_VALUE(product_id) \
+		((((u32)product_id) << GPU_ID2_PRODUCT_MAJOR_SHIFT) & \
+		    GPU_ID2_PRODUCT_MODEL)
+
+#define GPU_ID2_PRODUCT_TMIX              GPU_ID2_MODEL_MAKE(6, 0)
+#define GPU_ID2_PRODUCT_THEX              GPU_ID2_MODEL_MAKE(6, 1)
+#define GPU_ID2_PRODUCT_TSIX              GPU_ID2_MODEL_MAKE(7, 0)
+#define GPU_ID2_PRODUCT_TDVX              GPU_ID2_MODEL_MAKE(7, 3)
+#define GPU_ID2_PRODUCT_TNOX              GPU_ID2_MODEL_MAKE(7, 1)
+#define GPU_ID2_PRODUCT_TGOX              GPU_ID2_MODEL_MAKE(7, 2)
+#define GPU_ID2_PRODUCT_TKAX              GPU_ID2_MODEL_MAKE(8, 0)
+#define GPU_ID2_PRODUCT_TBOX              GPU_ID2_MODEL_MAKE(8, 2)
+#define GPU_ID2_PRODUCT_TEGX              GPU_ID2_MODEL_MAKE(8, 3)
+#define GPU_ID2_PRODUCT_TTRX              GPU_ID2_MODEL_MAKE(9, 0)
+#define GPU_ID2_PRODUCT_TNAX              GPU_ID2_MODEL_MAKE(9, 1)
+#define GPU_ID2_PRODUCT_TBEX              GPU_ID2_MODEL_MAKE(9, 2)
+#define GPU_ID2_PRODUCT_TULX              GPU_ID2_MODEL_MAKE(10, 0)
+#define GPU_ID2_PRODUCT_TIDX              GPU_ID2_MODEL_MAKE(10, 3)
+#define GPU_ID2_PRODUCT_TVAX              GPU_ID2_MODEL_MAKE(10, 4)
+
+/* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */
+#define GPU_ID_S_15DEV0                   0x1
+#define GPU_ID_S_EAC                      0x2
+
+/* Helper macro to create a GPU_ID assuming valid values for id, major,
+   minor, status */
+#define GPU_ID_MAKE(id, major, minor, status) \
+		((((u32)id) << GPU_ID_VERSION_PRODUCT_ID_SHIFT) | \
+		(((u32)major) << GPU_ID_VERSION_MAJOR_SHIFT) |   \
+		(((u32)minor) << GPU_ID_VERSION_MINOR_SHIFT) |   \
+		(((u32)status) << GPU_ID_VERSION_STATUS_SHIFT))
+
+#endif /* _KBASE_GPU_ID_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c
new file mode 100644
index 0000000..514b065
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c
@@ -0,0 +1,102 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+
+#ifdef CONFIG_DEBUG_FS
+/** Show callback for the @c gpu_memory debugfs file.
+ *
+ * This function is called to get the contents of the @c gpu_memory debugfs
+ * file. This is a report of current gpu memory usage.
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if successfully prints data in debugfs entry file
+ *         -1 if it encountered an error
+ */
+
+static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
+{
+	struct list_head *entry;
+	const struct list_head *kbdev_list;
+
+	kbdev_list = kbase_dev_list_get();
+	list_for_each(entry, kbdev_list) {
+		struct kbase_device *kbdev = NULL;
+		struct kbasep_kctx_list_element *element;
+
+		kbdev = list_entry(entry, struct kbase_device, entry);
+		/* output the total memory usage and cap for this device */
+		seq_printf(sfile, "%-16s  %10u\n",
+				kbdev->devname,
+				atomic_read(&(kbdev->memdev.used_pages)));
+		mutex_lock(&kbdev->kctx_list_lock);
+		list_for_each_entry(element, &kbdev->kctx_list, link) {
+			/* output the memory usage and cap for each kctx
+			* opened on this device */
+			seq_printf(sfile, "  %s-0x%p %10u\n",
+				"kctx",
+				element->kctx,
+				atomic_read(&(element->kctx->used_pages)));
+		}
+		mutex_unlock(&kbdev->kctx_list_lock);
+	}
+	kbase_dev_list_put(kbdev_list);
+	return 0;
+}
+
+/*
+ *  File operations related to debugfs entry for gpu_memory
+ */
+static int kbasep_gpu_memory_debugfs_open(struct inode *in, struct file *file)
+{
+	return single_open(file, kbasep_gpu_memory_seq_show, NULL);
+}
+
+static const struct file_operations kbasep_gpu_memory_debugfs_fops = {
+	.open = kbasep_gpu_memory_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+/*
+ *  Initialize debugfs entry for gpu_memory
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
+{
+	debugfs_create_file("gpu_memory", S_IRUGO,
+			kbdev->mali_debugfs_directory, NULL,
+			&kbasep_gpu_memory_debugfs_fops);
+	return;
+}
+
+#else
+/*
+ * Stub functions for when debugfs is disabled
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
+{
+	return;
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h
new file mode 100644
index 0000000..28a871a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_gpu_memory_debugfs.h
+ * Header file for gpu_memory entry in debugfs
+ *
+ */
+
+#ifndef _KBASE_GPU_MEMORY_DEBUGFS_H
+#define _KBASE_GPU_MEMORY_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/**
+ * @brief Initialize gpu_memory debugfs entry
+ */
+void kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev);
+
+#endif  /*_KBASE_GPU_MEMORY_DEBUGFS_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c
new file mode 100644
index 0000000..ee33b0c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c
@@ -0,0 +1,486 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Base kernel property query APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_gpuprops.h>
+#include <mali_kbase_hwaccess_gpuprops.h>
+#include "mali_kbase_ioctl.h"
+#include <linux/clk.h>
+
+/**
+ * KBASE_UBFX32 - Extracts bits from a 32-bit bitfield.
+ * @value:  The value from which to extract bits.
+ * @offset: The first bit to extract (0 being the LSB).
+ * @size:   The number of bits to extract.
+ *
+ * Context: @offset + @size <= 32.
+ *
+ * Return: Bits [@offset, @offset + @size) from @value.
+ */
+/* from mali_cdsb.h */
+#define KBASE_UBFX32(value, offset, size) \
+	(((u32)(value) >> (u32)(offset)) & (u32)((1ULL << (u32)(size)) - 1))
+
+static void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const props)
+{
+	struct mali_base_gpu_coherent_group *current_group;
+	u64 group_present;
+	u64 group_mask;
+	u64 first_set, first_set_prev;
+	u32 num_groups = 0;
+
+	KBASE_DEBUG_ASSERT(NULL != props);
+
+	props->coherency_info.coherency = props->raw_props.mem_features;
+	props->coherency_info.num_core_groups = hweight64(props->raw_props.l2_present);
+
+	if (props->coherency_info.coherency & GROUPS_L2_COHERENT) {
+		/* Group is l2 coherent */
+		group_present = props->raw_props.l2_present;
+	} else {
+		/* Group is l1 coherent */
+		group_present = props->raw_props.shader_present;
+	}
+
+	/*
+	 * The coherent group mask can be computed from the l2 present
+	 * register.
+	 *
+	 * For the coherent group n:
+	 * group_mask[n] = (first_set[n] - 1) & ~(first_set[n-1] - 1)
+	 * where first_set is group_present with only its nth set-bit kept
+	 * (i.e. the position from where a new group starts).
+	 *
+	 * For instance if the groups are l2 coherent and l2_present=0x0..01111:
+	 * The first mask is:
+	 * group_mask[1] = (first_set[1] - 1) & ~(first_set[0] - 1)
+	 *               = (0x0..010     - 1) & ~(0x0..01      - 1)
+	 *               =  0x0..00f
+	 * The second mask is:
+	 * group_mask[2] = (first_set[2] - 1) & ~(first_set[1] - 1)
+	 *               = (0x0..100     - 1) & ~(0x0..010     - 1)
+	 *               =  0x0..0f0
+	 * And so on until all the bits from group_present have been cleared
+	 * (i.e. there is no group left).
+	 */
+
+	current_group = props->coherency_info.group;
+	first_set = group_present & ~(group_present - 1);
+
+	while (group_present != 0 && num_groups < BASE_MAX_COHERENT_GROUPS) {
+		group_present -= first_set;	/* Clear the current group bit */
+		first_set_prev = first_set;
+
+		first_set = group_present & ~(group_present - 1);
+		group_mask = (first_set - 1) & ~(first_set_prev - 1);
+
+		/* Populate the coherent_group structure for each group */
+		current_group->core_mask = group_mask & props->raw_props.shader_present;
+		current_group->num_cores = hweight64(current_group->core_mask);
+
+		num_groups++;
+		current_group++;
+	}
+
+	if (group_present != 0)
+		pr_warn("Too many coherent groups (keeping only %d groups).\n", BASE_MAX_COHERENT_GROUPS);
+
+	props->coherency_info.num_groups = num_groups;
+}
+
+/**
+ * kbase_gpuprops_get_props - Get the GPU configuration
+ * @gpu_props: The &base_gpu_props structure
+ * @kbdev: The &struct kbase_device structure for the device
+ *
+ * Fill the &base_gpu_props structure with values from the GPU configuration
+ * registers. Only the raw properties are filled in this function
+ */
+static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
+{
+	struct kbase_gpuprops_regdump regdump;
+	int i;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	KBASE_DEBUG_ASSERT(NULL != gpu_props);
+
+	/* Dump relevant registers */
+	kbase_backend_gpuprops_get(kbdev, &regdump);
+
+	gpu_props->raw_props.gpu_id = regdump.gpu_id;
+	gpu_props->raw_props.tiler_features = regdump.tiler_features;
+	gpu_props->raw_props.mem_features = regdump.mem_features;
+	gpu_props->raw_props.mmu_features = regdump.mmu_features;
+	gpu_props->raw_props.l2_features = regdump.l2_features;
+	gpu_props->raw_props.core_features = regdump.core_features;
+
+	gpu_props->raw_props.as_present = regdump.as_present;
+	gpu_props->raw_props.js_present = regdump.js_present;
+	gpu_props->raw_props.shader_present =
+		((u64) regdump.shader_present_hi << 32) +
+		regdump.shader_present_lo;
+	gpu_props->raw_props.tiler_present =
+		((u64) regdump.tiler_present_hi << 32) +
+		regdump.tiler_present_lo;
+	gpu_props->raw_props.l2_present =
+		((u64) regdump.l2_present_hi << 32) +
+		regdump.l2_present_lo;
+	gpu_props->raw_props.stack_present =
+		((u64) regdump.stack_present_hi << 32) +
+		regdump.stack_present_lo;
+
+	for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
+		gpu_props->raw_props.js_features[i] = regdump.js_features[i];
+
+	for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+		gpu_props->raw_props.texture_features[i] = regdump.texture_features[i];
+
+	gpu_props->raw_props.thread_max_barrier_size = regdump.thread_max_barrier_size;
+	gpu_props->raw_props.thread_max_threads = regdump.thread_max_threads;
+	gpu_props->raw_props.thread_max_workgroup_size = regdump.thread_max_workgroup_size;
+	gpu_props->raw_props.thread_features = regdump.thread_features;
+	gpu_props->raw_props.thread_tls_alloc = regdump.thread_tls_alloc;
+}
+
+void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props)
+{
+	gpu_props->core_props.version_status =
+		KBASE_UBFX32(gpu_props->raw_props.gpu_id, 0U, 4);
+	gpu_props->core_props.minor_revision =
+		KBASE_UBFX32(gpu_props->raw_props.gpu_id, 4U, 8);
+	gpu_props->core_props.major_revision =
+		KBASE_UBFX32(gpu_props->raw_props.gpu_id, 12U, 4);
+	gpu_props->core_props.product_id =
+		KBASE_UBFX32(gpu_props->raw_props.gpu_id, 16U, 16);
+}
+
+/**
+ * kbase_gpuprops_calculate_props - Calculate the derived properties
+ * @gpu_props: The &base_gpu_props structure
+ * @kbdev:     The &struct kbase_device structure for the device
+ *
+ * Fill the &base_gpu_props structure with values derived from the GPU
+ * configuration registers
+ */
+static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
+{
+	int i;
+
+	/* Populate the base_gpu_props structure */
+	kbase_gpuprops_update_core_props_gpu_id(gpu_props);
+	gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+	gpu_props->core_props.gpu_available_memory_size = totalram_pages() << PAGE_SHIFT;
+#else
+	gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;
+#endif
+	gpu_props->core_props.num_exec_engines =
+		KBASE_UBFX32(gpu_props->raw_props.core_features, 0, 4);
+
+	for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+		gpu_props->core_props.texture_features[i] = gpu_props->raw_props.texture_features[i];
+
+	gpu_props->l2_props.log2_line_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 0U, 8);
+	gpu_props->l2_props.log2_cache_size = KBASE_UBFX32(gpu_props->raw_props.l2_features, 16U, 8);
+
+	/* Field with number of l2 slices is added to MEM_FEATURES register
+	 * since t76x. Below code assumes that for older GPU reserved bits will
+	 * be read as zero. */
+	gpu_props->l2_props.num_l2_slices =
+		KBASE_UBFX32(gpu_props->raw_props.mem_features, 8U, 4) + 1;
+
+	gpu_props->tiler_props.bin_size_bytes = 1 << KBASE_UBFX32(gpu_props->raw_props.tiler_features, 0U, 6);
+	gpu_props->tiler_props.max_active_levels = KBASE_UBFX32(gpu_props->raw_props.tiler_features, 8U, 4);
+
+	if (gpu_props->raw_props.thread_max_threads == 0)
+		gpu_props->thread_props.max_threads = THREAD_MT_DEFAULT;
+	else
+		gpu_props->thread_props.max_threads = gpu_props->raw_props.thread_max_threads;
+
+	if (gpu_props->raw_props.thread_max_workgroup_size == 0)
+		gpu_props->thread_props.max_workgroup_size = THREAD_MWS_DEFAULT;
+	else
+		gpu_props->thread_props.max_workgroup_size = gpu_props->raw_props.thread_max_workgroup_size;
+
+	if (gpu_props->raw_props.thread_max_barrier_size == 0)
+		gpu_props->thread_props.max_barrier_size = THREAD_MBS_DEFAULT;
+	else
+		gpu_props->thread_props.max_barrier_size = gpu_props->raw_props.thread_max_barrier_size;
+
+	if (gpu_props->raw_props.thread_tls_alloc == 0)
+		gpu_props->thread_props.tls_alloc =
+				gpu_props->thread_props.max_threads;
+	else
+		gpu_props->thread_props.tls_alloc =
+				gpu_props->raw_props.thread_tls_alloc;
+
+	gpu_props->thread_props.max_registers = KBASE_UBFX32(gpu_props->raw_props.thread_features, 0U, 16);
+	gpu_props->thread_props.max_task_queue = KBASE_UBFX32(gpu_props->raw_props.thread_features, 16U, 8);
+	gpu_props->thread_props.max_thread_group_split = KBASE_UBFX32(gpu_props->raw_props.thread_features, 24U, 6);
+	gpu_props->thread_props.impl_tech = KBASE_UBFX32(gpu_props->raw_props.thread_features, 30U, 2);
+
+	/* If values are not specified, then use defaults */
+	if (gpu_props->thread_props.max_registers == 0) {
+		gpu_props->thread_props.max_registers = THREAD_MR_DEFAULT;
+		gpu_props->thread_props.max_task_queue = THREAD_MTQ_DEFAULT;
+		gpu_props->thread_props.max_thread_group_split = THREAD_MTGS_DEFAULT;
+	}
+	/* Initialize the coherent_group structure for each group */
+	kbase_gpuprops_construct_coherent_groups(gpu_props);
+}
+
+void kbase_gpuprops_set(struct kbase_device *kbdev)
+{
+	struct kbase_gpu_props *gpu_props;
+	struct gpu_raw_gpu_props *raw;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	gpu_props = &kbdev->gpu_props;
+	raw = &gpu_props->props.raw_props;
+
+	/* Initialize the base_gpu_props structure from the hardware */
+	kbase_gpuprops_get_props(&gpu_props->props, kbdev);
+
+	/* Populate the derived properties */
+	kbase_gpuprops_calculate_props(&gpu_props->props, kbdev);
+
+	/* Populate kbase-only fields */
+	gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8);
+	gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8);
+
+	gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1);
+
+	gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8);
+	gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8);
+
+	gpu_props->num_cores = hweight64(raw->shader_present);
+	gpu_props->num_core_groups = hweight64(raw->l2_present);
+	gpu_props->num_address_spaces = hweight32(raw->as_present);
+	gpu_props->num_job_slots = hweight32(raw->js_present);
+}
+
+void kbase_gpuprops_set_features(struct kbase_device *kbdev)
+{
+	base_gpu_props *gpu_props;
+	struct kbase_gpuprops_regdump regdump;
+
+	gpu_props = &kbdev->gpu_props.props;
+
+	/* Dump relevant registers */
+	kbase_backend_gpuprops_get_features(kbdev, &regdump);
+
+	/*
+	 * Copy the raw value from the register, later this will get turned
+	 * into the selected coherency mode.
+	 * Additionally, add non-coherent mode, as this is always supported.
+	 */
+	gpu_props->raw_props.coherency_mode = regdump.coherency_features |
+		COHERENCY_FEATURE_BIT(COHERENCY_NONE);
+
+	if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_THREAD_GROUP_SPLIT))
+		gpu_props->thread_props.max_thread_group_split = 0;
+}
+
+static struct {
+	u32 type;
+	size_t offset;
+	int size;
+} gpu_property_mapping[] = {
+#define PROP(name, member) \
+	{KBASE_GPUPROP_ ## name, offsetof(struct base_gpu_props, member), \
+		sizeof(((struct base_gpu_props *)0)->member)}
+	PROP(PRODUCT_ID,                  core_props.product_id),
+	PROP(VERSION_STATUS,              core_props.version_status),
+	PROP(MINOR_REVISION,              core_props.minor_revision),
+	PROP(MAJOR_REVISION,              core_props.major_revision),
+	PROP(GPU_FREQ_KHZ_MAX,            core_props.gpu_freq_khz_max),
+	PROP(LOG2_PROGRAM_COUNTER_SIZE,   core_props.log2_program_counter_size),
+	PROP(TEXTURE_FEATURES_0,          core_props.texture_features[0]),
+	PROP(TEXTURE_FEATURES_1,          core_props.texture_features[1]),
+	PROP(TEXTURE_FEATURES_2,          core_props.texture_features[2]),
+	PROP(TEXTURE_FEATURES_3,          core_props.texture_features[3]),
+	PROP(GPU_AVAILABLE_MEMORY_SIZE,   core_props.gpu_available_memory_size),
+	PROP(NUM_EXEC_ENGINES,            core_props.num_exec_engines),
+
+	PROP(L2_LOG2_LINE_SIZE,           l2_props.log2_line_size),
+	PROP(L2_LOG2_CACHE_SIZE,          l2_props.log2_cache_size),
+	PROP(L2_NUM_L2_SLICES,            l2_props.num_l2_slices),
+
+	PROP(TILER_BIN_SIZE_BYTES,        tiler_props.bin_size_bytes),
+	PROP(TILER_MAX_ACTIVE_LEVELS,     tiler_props.max_active_levels),
+
+	PROP(MAX_THREADS,                 thread_props.max_threads),
+	PROP(MAX_WORKGROUP_SIZE,          thread_props.max_workgroup_size),
+	PROP(MAX_BARRIER_SIZE,            thread_props.max_barrier_size),
+	PROP(MAX_REGISTERS,               thread_props.max_registers),
+	PROP(MAX_TASK_QUEUE,              thread_props.max_task_queue),
+	PROP(MAX_THREAD_GROUP_SPLIT,      thread_props.max_thread_group_split),
+	PROP(IMPL_TECH,                   thread_props.impl_tech),
+	PROP(TLS_ALLOC,                   thread_props.tls_alloc),
+
+	PROP(RAW_SHADER_PRESENT,          raw_props.shader_present),
+	PROP(RAW_TILER_PRESENT,           raw_props.tiler_present),
+	PROP(RAW_L2_PRESENT,              raw_props.l2_present),
+	PROP(RAW_STACK_PRESENT,           raw_props.stack_present),
+	PROP(RAW_L2_FEATURES,             raw_props.l2_features),
+	PROP(RAW_CORE_FEATURES,           raw_props.core_features),
+	PROP(RAW_MEM_FEATURES,            raw_props.mem_features),
+	PROP(RAW_MMU_FEATURES,            raw_props.mmu_features),
+	PROP(RAW_AS_PRESENT,              raw_props.as_present),
+	PROP(RAW_JS_PRESENT,              raw_props.js_present),
+	PROP(RAW_JS_FEATURES_0,           raw_props.js_features[0]),
+	PROP(RAW_JS_FEATURES_1,           raw_props.js_features[1]),
+	PROP(RAW_JS_FEATURES_2,           raw_props.js_features[2]),
+	PROP(RAW_JS_FEATURES_3,           raw_props.js_features[3]),
+	PROP(RAW_JS_FEATURES_4,           raw_props.js_features[4]),
+	PROP(RAW_JS_FEATURES_5,           raw_props.js_features[5]),
+	PROP(RAW_JS_FEATURES_6,           raw_props.js_features[6]),
+	PROP(RAW_JS_FEATURES_7,           raw_props.js_features[7]),
+	PROP(RAW_JS_FEATURES_8,           raw_props.js_features[8]),
+	PROP(RAW_JS_FEATURES_9,           raw_props.js_features[9]),
+	PROP(RAW_JS_FEATURES_10,          raw_props.js_features[10]),
+	PROP(RAW_JS_FEATURES_11,          raw_props.js_features[11]),
+	PROP(RAW_JS_FEATURES_12,          raw_props.js_features[12]),
+	PROP(RAW_JS_FEATURES_13,          raw_props.js_features[13]),
+	PROP(RAW_JS_FEATURES_14,          raw_props.js_features[14]),
+	PROP(RAW_JS_FEATURES_15,          raw_props.js_features[15]),
+	PROP(RAW_TILER_FEATURES,          raw_props.tiler_features),
+	PROP(RAW_TEXTURE_FEATURES_0,      raw_props.texture_features[0]),
+	PROP(RAW_TEXTURE_FEATURES_1,      raw_props.texture_features[1]),
+	PROP(RAW_TEXTURE_FEATURES_2,      raw_props.texture_features[2]),
+	PROP(RAW_TEXTURE_FEATURES_3,      raw_props.texture_features[3]),
+	PROP(RAW_GPU_ID,                  raw_props.gpu_id),
+	PROP(RAW_THREAD_MAX_THREADS,      raw_props.thread_max_threads),
+	PROP(RAW_THREAD_MAX_WORKGROUP_SIZE,
+			raw_props.thread_max_workgroup_size),
+	PROP(RAW_THREAD_MAX_BARRIER_SIZE, raw_props.thread_max_barrier_size),
+	PROP(RAW_THREAD_FEATURES,         raw_props.thread_features),
+	PROP(RAW_THREAD_TLS_ALLOC,        raw_props.thread_tls_alloc),
+	PROP(RAW_COHERENCY_MODE,          raw_props.coherency_mode),
+
+	PROP(COHERENCY_NUM_GROUPS,        coherency_info.num_groups),
+	PROP(COHERENCY_NUM_CORE_GROUPS,   coherency_info.num_core_groups),
+	PROP(COHERENCY_COHERENCY,         coherency_info.coherency),
+	PROP(COHERENCY_GROUP_0,           coherency_info.group[0].core_mask),
+	PROP(COHERENCY_GROUP_1,           coherency_info.group[1].core_mask),
+	PROP(COHERENCY_GROUP_2,           coherency_info.group[2].core_mask),
+	PROP(COHERENCY_GROUP_3,           coherency_info.group[3].core_mask),
+	PROP(COHERENCY_GROUP_4,           coherency_info.group[4].core_mask),
+	PROP(COHERENCY_GROUP_5,           coherency_info.group[5].core_mask),
+	PROP(COHERENCY_GROUP_6,           coherency_info.group[6].core_mask),
+	PROP(COHERENCY_GROUP_7,           coherency_info.group[7].core_mask),
+	PROP(COHERENCY_GROUP_8,           coherency_info.group[8].core_mask),
+	PROP(COHERENCY_GROUP_9,           coherency_info.group[9].core_mask),
+	PROP(COHERENCY_GROUP_10,          coherency_info.group[10].core_mask),
+	PROP(COHERENCY_GROUP_11,          coherency_info.group[11].core_mask),
+	PROP(COHERENCY_GROUP_12,          coherency_info.group[12].core_mask),
+	PROP(COHERENCY_GROUP_13,          coherency_info.group[13].core_mask),
+	PROP(COHERENCY_GROUP_14,          coherency_info.group[14].core_mask),
+	PROP(COHERENCY_GROUP_15,          coherency_info.group[15].core_mask),
+
+#undef PROP
+};
+
+int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev)
+{
+	struct kbase_gpu_props *kprops = &kbdev->gpu_props;
+	struct base_gpu_props *props = &kprops->props;
+	u32 count = ARRAY_SIZE(gpu_property_mapping);
+	u32 i;
+	u32 size = 0;
+	u8 *p;
+
+	for (i = 0; i < count; i++) {
+		/* 4 bytes for the ID, and the size of the property */
+		size += 4 + gpu_property_mapping[i].size;
+	}
+
+	kprops->prop_buffer_size = size;
+	kprops->prop_buffer = kmalloc(size, GFP_KERNEL);
+
+	if (!kprops->prop_buffer) {
+		kprops->prop_buffer_size = 0;
+		return -ENOMEM;
+	}
+
+	p = kprops->prop_buffer;
+
+#define WRITE_U8(v) (*p++ = (v) & 0xFF)
+#define WRITE_U16(v) do { WRITE_U8(v); WRITE_U8((v) >> 8); } while (0)
+#define WRITE_U32(v) do { WRITE_U16(v); WRITE_U16((v) >> 16); } while (0)
+#define WRITE_U64(v) do { WRITE_U32(v); WRITE_U32((v) >> 32); } while (0)
+
+	for (i = 0; i < count; i++) {
+		u32 type = gpu_property_mapping[i].type;
+		u8 type_size;
+		void *field = ((u8 *)props) + gpu_property_mapping[i].offset;
+
+		switch (gpu_property_mapping[i].size) {
+		case 1:
+			type_size = KBASE_GPUPROP_VALUE_SIZE_U8;
+			break;
+		case 2:
+			type_size = KBASE_GPUPROP_VALUE_SIZE_U16;
+			break;
+		case 4:
+			type_size = KBASE_GPUPROP_VALUE_SIZE_U32;
+			break;
+		case 8:
+			type_size = KBASE_GPUPROP_VALUE_SIZE_U64;
+			break;
+		default:
+			dev_err(kbdev->dev,
+				"Invalid gpu_property_mapping type=%d size=%d",
+				type, gpu_property_mapping[i].size);
+			return -EINVAL;
+		}
+
+		WRITE_U32((type<<2) | type_size);
+
+		switch (type_size) {
+		case KBASE_GPUPROP_VALUE_SIZE_U8:
+			WRITE_U8(*((u8 *)field));
+			break;
+		case KBASE_GPUPROP_VALUE_SIZE_U16:
+			WRITE_U16(*((u16 *)field));
+			break;
+		case KBASE_GPUPROP_VALUE_SIZE_U32:
+			WRITE_U32(*((u32 *)field));
+			break;
+		case KBASE_GPUPROP_VALUE_SIZE_U64:
+			WRITE_U64(*((u64 *)field));
+			break;
+		default: /* Cannot be reached */
+			WARN_ON(1);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h
new file mode 100644
index 0000000..37d9c08
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.h
@@ -0,0 +1,77 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_gpuprops.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_H_
+#define _KBASE_GPUPROPS_H_
+
+#include "mali_kbase_gpuprops_types.h"
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+
+/**
+ * @brief Set up Kbase GPU properties.
+ *
+ * Set up Kbase GPU properties with information from the GPU registers
+ *
+ * @param kbdev		The struct kbase_device structure for the device
+ */
+void kbase_gpuprops_set(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_set_features - Set up Kbase GPU properties
+ * @kbdev:   Device pointer
+ *
+ * This function sets up GPU properties that are dependent on the hardware
+ * features bitmask. This function must be preceeded by a call to
+ * kbase_hw_set_features_mask().
+ */
+void kbase_gpuprops_set_features(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_populate_user_buffer - Populate the GPU properties buffer
+ * @kbdev: The kbase device
+ *
+ * Fills kbdev->gpu_props->prop_buffer with the GPU properties for user
+ * space to read.
+ */
+int kbase_gpuprops_populate_user_buffer(struct kbase_device *kbdev);
+
+/**
+ * kbase_gpuprops_update_core_props_gpu_id - break down gpu id value
+ * @gpu_props: the &base_gpu_props structure
+ *
+ * Break down gpu_id value stored in base_gpu_props::raw_props.gpu_id into
+ * separate fields (version_status, minor_revision, major_revision, product_id)
+ * stored in base_gpu_props::core_props.
+ */
+void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props);
+
+
+#endif				/* _KBASE_GPUPROPS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h b/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h
new file mode 100644
index 0000000..d7877d1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h
@@ -0,0 +1,98 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_gpuprops_types.h
+ * Base kernel property query APIs
+ */
+
+#ifndef _KBASE_GPUPROPS_TYPES_H_
+#define _KBASE_GPUPROPS_TYPES_H_
+
+#include "mali_base_kernel.h"
+
+#define KBASE_GPU_SPEED_MHZ    123
+#define KBASE_GPU_PC_SIZE_LOG2 24U
+
+struct kbase_gpuprops_regdump {
+	u32 gpu_id;
+	u32 l2_features;
+	u32 core_features;
+	u32 tiler_features;
+	u32 mem_features;
+	u32 mmu_features;
+	u32 as_present;
+	u32 js_present;
+	u32 thread_max_threads;
+	u32 thread_max_workgroup_size;
+	u32 thread_max_barrier_size;
+	u32 thread_features;
+	u32 thread_tls_alloc;
+	u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
+	u32 js_features[GPU_MAX_JOB_SLOTS];
+	u32 shader_present_lo;
+	u32 shader_present_hi;
+	u32 tiler_present_lo;
+	u32 tiler_present_hi;
+	u32 l2_present_lo;
+	u32 l2_present_hi;
+	u32 stack_present_lo;
+	u32 stack_present_hi;
+	u32 coherency_features;
+};
+
+struct kbase_gpu_cache_props {
+	u8 associativity;
+	u8 external_bus_width;
+};
+
+struct kbase_gpu_mem_props {
+	u8 core_group;
+};
+
+struct kbase_gpu_mmu_props {
+	u8 va_bits;
+	u8 pa_bits;
+};
+
+struct kbase_gpu_props {
+	/* kernel-only properties */
+	u8 num_cores;
+	u8 num_core_groups;
+	u8 num_address_spaces;
+	u8 num_job_slots;
+
+	struct kbase_gpu_cache_props l2_props;
+
+	struct kbase_gpu_mem_props mem;
+	struct kbase_gpu_mmu_props mmu;
+
+	/* Properties shared with userspace */
+	base_gpu_props props;
+
+	u32 prop_buffer_size;
+	void *prop_buffer;
+};
+
+#endif				/* _KBASE_GPUPROPS_TYPES_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gwt.c b/drivers/gpu/arm/midgard/mali_kbase_gwt.c
new file mode 100644
index 0000000..0481f80
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gwt.c
@@ -0,0 +1,268 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_gwt.h"
+#include <linux/list_sort.h>
+
+static inline void kbase_gpu_gwt_setup_page_permission(
+				struct kbase_context *kctx,
+				unsigned long flag,
+				struct rb_node *node)
+{
+	struct rb_node *rbnode = node;
+
+	while (rbnode) {
+		struct kbase_va_region *reg;
+		int err = 0;
+
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		if (reg->nr_pages && !(reg->flags & KBASE_REG_FREE) &&
+					(reg->flags & KBASE_REG_GPU_WR)) {
+			err = kbase_mmu_update_pages(kctx, reg->start_pfn,
+					kbase_get_gpu_phy_pages(reg),
+					reg->gpu_alloc->nents,
+					reg->flags & flag);
+			if (err)
+				dev_warn(kctx->kbdev->dev, "kbase_mmu_update_pages failure\n");
+		}
+
+		rbnode = rb_next(rbnode);
+	}
+}
+
+static void kbase_gpu_gwt_setup_pages(struct kbase_context *kctx,
+					unsigned long flag)
+{
+	kbase_gpu_gwt_setup_page_permission(kctx, flag,
+				rb_first(&(kctx->reg_rbtree_same)));
+	kbase_gpu_gwt_setup_page_permission(kctx, flag,
+				rb_first(&(kctx->reg_rbtree_custom)));
+}
+
+
+int kbase_gpu_gwt_start(struct kbase_context *kctx)
+{
+	kbase_gpu_vm_lock(kctx);
+	if (kctx->gwt_enabled) {
+		kbase_gpu_vm_unlock(kctx);
+		return -EBUSY;
+	}
+
+	INIT_LIST_HEAD(&kctx->gwt_current_list);
+	INIT_LIST_HEAD(&kctx->gwt_snapshot_list);
+
+	/* If GWT is enabled using new vector dumping format
+	 * from user space, back up status of the job serialization flag and
+	 * use full serialisation of jobs for dumping.
+	 * Status will be restored on end of dumping in gwt_stop.
+	 */
+	kctx->kbdev->backup_serialize_jobs = kctx->kbdev->serialize_jobs;
+	kctx->kbdev->serialize_jobs = KBASE_SERIALIZE_INTRA_SLOT |
+						KBASE_SERIALIZE_INTER_SLOT;
+
+	/* Mark gwt enabled before making pages read only in case a
+	   write page fault is triggered while we're still in this loop.
+	   (kbase_gpu_vm_lock() doesn't prevent this!)
+	*/
+	kctx->gwt_enabled = true;
+	kctx->gwt_was_enabled = true;
+
+	kbase_gpu_gwt_setup_pages(kctx, ~KBASE_REG_GPU_WR);
+
+	kbase_gpu_vm_unlock(kctx);
+	return 0;
+}
+
+int kbase_gpu_gwt_stop(struct kbase_context *kctx)
+{
+	struct kbasep_gwt_list_element *pos, *n;
+
+	kbase_gpu_vm_lock(kctx);
+	if (!kctx->gwt_enabled) {
+		kbase_gpu_vm_unlock(kctx);
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(pos, n, &kctx->gwt_current_list, link) {
+		list_del(&pos->link);
+		kfree(pos);
+	}
+
+	list_for_each_entry_safe(pos, n, &kctx->gwt_snapshot_list, link) {
+		list_del(&pos->link);
+		kfree(pos);
+	}
+
+	kctx->kbdev->serialize_jobs = kctx->kbdev->backup_serialize_jobs;
+
+	kbase_gpu_gwt_setup_pages(kctx, ~0UL);
+
+	kctx->gwt_enabled = false;
+	kbase_gpu_vm_unlock(kctx);
+	return 0;
+}
+
+
+static int list_cmp_function(void *priv, struct list_head *a,
+				struct list_head *b)
+{
+	struct kbasep_gwt_list_element *elementA = container_of(a,
+				struct kbasep_gwt_list_element, link);
+	struct kbasep_gwt_list_element *elementB = container_of(b,
+				struct kbasep_gwt_list_element, link);
+
+	CSTD_UNUSED(priv);
+
+	if (elementA->page_addr > elementB->page_addr)
+		return 1;
+	return -1;
+}
+
+static void kbase_gpu_gwt_collate(struct kbase_context *kctx,
+		struct list_head *snapshot_list)
+{
+	struct kbasep_gwt_list_element *pos, *n;
+	struct kbasep_gwt_list_element *collated = NULL;
+
+	/* Sort the list */
+	list_sort(NULL, snapshot_list, list_cmp_function);
+
+	/* Combine contiguous areas. */
+	list_for_each_entry_safe(pos, n, snapshot_list, link) {
+		if (collated == NULL ||	collated->region !=
+					pos->region ||
+					(collated->page_addr +
+					(collated->num_pages * PAGE_SIZE)) !=
+					pos->page_addr) {
+			/* This is the first time through, a new region or
+			 * is not contiguous - start collating to this element
+			 */
+			collated = pos;
+		} else {
+			/* contiguous so merge */
+			collated->num_pages += pos->num_pages;
+			/* remove element from list */
+			list_del(&pos->link);
+			kfree(pos);
+		}
+	}
+}
+
+int kbase_gpu_gwt_dump(struct kbase_context *kctx,
+			union kbase_ioctl_cinstr_gwt_dump *gwt_dump)
+{
+	const u32 ubuf_size = gwt_dump->in.len;
+	u32 ubuf_count = 0;
+	__user void *user_addr = (__user void *)
+			(uintptr_t)gwt_dump->in.addr_buffer;
+	__user void *user_sizes = (__user void *)
+			(uintptr_t)gwt_dump->in.size_buffer;
+
+	kbase_gpu_vm_lock(kctx);
+
+	if (!kctx->gwt_enabled) {
+		kbase_gpu_vm_unlock(kctx);
+		/* gwt_dump shouldn't be called when gwt is disabled */
+		return -EPERM;
+	}
+
+	if (!gwt_dump->in.len || !gwt_dump->in.addr_buffer
+			|| !gwt_dump->in.size_buffer) {
+		kbase_gpu_vm_unlock(kctx);
+		/* We don't have any valid user space buffer to copy the
+		 * write modified addresses.
+		 */
+		return -EINVAL;
+	}
+
+	if (list_empty(&kctx->gwt_snapshot_list) &&
+			!list_empty(&kctx->gwt_current_list)) {
+
+		list_replace_init(&kctx->gwt_current_list,
+					&kctx->gwt_snapshot_list);
+
+		/* We have collected all write faults so far
+		 * and they will be passed on to user space.
+		 * Reset the page flags state to allow collection of
+		 * further write faults.
+		 */
+		kbase_gpu_gwt_setup_pages(kctx, ~KBASE_REG_GPU_WR);
+
+		/* Sort and combine consecutive pages in the dump list*/
+		kbase_gpu_gwt_collate(kctx, &kctx->gwt_snapshot_list);
+	}
+
+	while ((!list_empty(&kctx->gwt_snapshot_list))) {
+		u64 addr_buffer[32];
+		u64 num_page_buffer[32];
+		u32 count = 0;
+		int err;
+		struct kbasep_gwt_list_element *dump_info, *n;
+
+		list_for_each_entry_safe(dump_info, n,
+				&kctx->gwt_snapshot_list, link) {
+			addr_buffer[count] = dump_info->page_addr;
+			num_page_buffer[count] = dump_info->num_pages;
+			count++;
+			list_del(&dump_info->link);
+			kfree(dump_info);
+			if (ARRAY_SIZE(addr_buffer) == count ||
+					ubuf_size == (ubuf_count + count))
+				break;
+		}
+
+		if (count) {
+			err = copy_to_user((user_addr +
+					(ubuf_count * sizeof(u64))),
+					(void *)addr_buffer,
+					count * sizeof(u64));
+			if (err) {
+				dev_err(kctx->kbdev->dev, "Copy to user failure\n");
+				kbase_gpu_vm_unlock(kctx);
+				return err;
+			}
+			err = copy_to_user((user_sizes +
+					(ubuf_count * sizeof(u64))),
+					(void *)num_page_buffer,
+					count * sizeof(u64));
+			if (err) {
+				dev_err(kctx->kbdev->dev, "Copy to user failure\n");
+				kbase_gpu_vm_unlock(kctx);
+				return err;
+			}
+
+			ubuf_count += count;
+		}
+
+		if (ubuf_count == ubuf_size)
+			break;
+	}
+
+	if (!list_empty(&kctx->gwt_snapshot_list))
+		gwt_dump->out.more_data_available = 1;
+	else
+		gwt_dump->out.more_data_available = 0;
+
+	gwt_dump->out.no_of_addr_collected = ubuf_count;
+	kbase_gpu_vm_unlock(kctx);
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gwt.h b/drivers/gpu/arm/midgard/mali_kbase_gwt.h
new file mode 100644
index 0000000..7e7746e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gwt.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_GWT_H)
+#define _KBASE_GWT_H
+
+#include <mali_kbase.h>
+#include <mali_kbase_ioctl.h>
+
+/**
+ * kbase_gpu_gwt_start - Start the GPU write tracking
+ * @kctx: Pointer to kernel context
+ *
+ * @return 0 on success, error on failure.
+ */
+int kbase_gpu_gwt_start(struct kbase_context *kctx);
+
+/**
+ * kbase_gpu_gwt_stop - Stop the GPU write tracking
+ * @kctx: Pointer to kernel context
+ *
+ * @return 0 on success, error on failure.
+ */
+int kbase_gpu_gwt_stop(struct kbase_context *kctx);
+
+/**
+ * kbase_gpu_gwt_dump - Pass page address of faulting addresses to user space.
+ * @kctx:	Pointer to kernel context
+ * @gwt_dump:	User space data to be passed.
+ *
+ * @return 0 on success, error on failure.
+ */
+int kbase_gpu_gwt_dump(struct kbase_context *kctx,
+			union kbase_ioctl_cinstr_gwt_dump *gwt_dump);
+
+#endif /* _KBASE_GWT_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hw.c b/drivers/gpu/arm/midgard/mali_kbase_hw.c
new file mode 100644
index 0000000..450926c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hw.c
@@ -0,0 +1,564 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Run-time work-arounds helpers
+ */
+
+#include <mali_base_hwconfig_features.h>
+#include <mali_base_hwconfig_issues.h>
+#include <mali_midg_regmap.h>
+#include "mali_kbase.h"
+#include "mali_kbase_hw.h"
+
+void kbase_hw_set_features_mask(struct kbase_device *kbdev)
+{
+	const enum base_hw_feature *features;
+	u32 gpu_id;
+	u32 product_id;
+
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
+	product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+
+	if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+		switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+		case GPU_ID2_PRODUCT_TMIX:
+			features = base_hw_features_tMIx;
+			break;
+		case GPU_ID2_PRODUCT_THEX:
+			features = base_hw_features_tHEx;
+			break;
+		case GPU_ID2_PRODUCT_TSIX:
+			features = base_hw_features_tSIx;
+			break;
+		case GPU_ID2_PRODUCT_TDVX:
+			features = base_hw_features_tDVx;
+			break;
+		case GPU_ID2_PRODUCT_TNOX:
+			features = base_hw_features_tNOx;
+			break;
+		case GPU_ID2_PRODUCT_TGOX:
+			features = base_hw_features_tGOx;
+			break;
+		case GPU_ID2_PRODUCT_TKAX:
+			features = base_hw_features_tKAx;
+			break;
+		case GPU_ID2_PRODUCT_TEGX:
+			features = base_hw_features_tEGx;
+			break;
+		case GPU_ID2_PRODUCT_TTRX:
+			features = base_hw_features_tTRx;
+			break;
+		case GPU_ID2_PRODUCT_TNAX:
+			features = base_hw_features_tNAx;
+			break;
+		case GPU_ID2_PRODUCT_TBEX:
+			features = base_hw_features_tBEx;
+			break;
+		case GPU_ID2_PRODUCT_TULX:
+			features = base_hw_features_tULx;
+			break;
+		case GPU_ID2_PRODUCT_TBOX:
+			features = base_hw_features_tBOx;
+			break;
+		case GPU_ID2_PRODUCT_TIDX:
+			features = base_hw_features_tIDx;
+			break;
+		case GPU_ID2_PRODUCT_TVAX:
+			features = base_hw_features_tVAx;
+			break;
+		default:
+			features = base_hw_features_generic;
+			break;
+		}
+	} else {
+		switch (product_id) {
+		case GPU_ID_PI_TFRX:
+			/* FALLTHROUGH */
+		case GPU_ID_PI_T86X:
+			features = base_hw_features_tFxx;
+			break;
+		case GPU_ID_PI_T83X:
+			features = base_hw_features_t83x;
+			break;
+		case GPU_ID_PI_T82X:
+			features = base_hw_features_t82x;
+			break;
+		case GPU_ID_PI_T76X:
+			features = base_hw_features_t76x;
+			break;
+		case GPU_ID_PI_T72X:
+			features = base_hw_features_t72x;
+			break;
+		case GPU_ID_PI_T62X:
+			features = base_hw_features_t62x;
+			break;
+		case GPU_ID_PI_T60X:
+			features = base_hw_features_t60x;
+			break;
+		default:
+			features = base_hw_features_generic;
+			break;
+		}
+	}
+
+	for (; *features != BASE_HW_FEATURE_END; features++)
+		set_bit(*features, &kbdev->hw_features_mask[0]);
+}
+
+/**
+ * kbase_hw_get_issues_for_new_id - Get the hardware issues for a new GPU ID
+ * @kbdev: Device pointer
+ *
+ * Return: pointer to an array of hardware issues, terminated by
+ * BASE_HW_ISSUE_END.
+ *
+ * This function can only be used on new-format GPU IDs, i.e. those for which
+ * GPU_ID_IS_NEW_FORMAT evaluates as true. The GPU ID is read from the @kbdev.
+ *
+ * In debugging versions of the driver, unknown versions of a known GPU will
+ * be treated as the most recent known version not later than the actual
+ * version. In such circumstances, the GPU ID in @kbdev will also be replaced
+ * with the most recent known version.
+ *
+ * Note: The GPU configuration must have been read by kbase_gpuprops_get_props()
+ * before calling this function.
+ */
+static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
+					struct kbase_device *kbdev)
+{
+	const enum base_hw_issue *issues = NULL;
+
+	struct base_hw_product {
+		u32 product_model;
+		struct {
+			u32 version;
+			const enum base_hw_issue *issues;
+		} map[7];
+	};
+
+	static const struct base_hw_product base_hw_products[] = {
+		{GPU_ID2_PRODUCT_TMIX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 1),
+		   base_hw_issues_tMIx_r0p0_05dev0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 2), base_hw_issues_tMIx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tMIx_r0p1},
+		  {U32_MAX /* sentinel value */, NULL} } },
+
+		{GPU_ID2_PRODUCT_THEX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tHEx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 1), base_hw_issues_tHEx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tHEx_r0p1},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 1), base_hw_issues_tHEx_r0p1},
+		  {GPU_ID2_VERSION_MAKE(0, 2, 0), base_hw_issues_tHEx_r0p2},
+		  {GPU_ID2_VERSION_MAKE(0, 3, 0), base_hw_issues_tHEx_r0p3},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TSIX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tSIx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 0, 1), base_hw_issues_tSIx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(0, 1, 0), base_hw_issues_tSIx_r0p1},
+		  {GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tSIx_r1p0},
+		  {GPU_ID2_VERSION_MAKE(1, 1, 0), base_hw_issues_tSIx_r1p1},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TDVX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tDVx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TNOX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tNOx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TGOX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tGOx_r0p0},
+		  {GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tGOx_r1p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TKAX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tKAx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TEGX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tEGx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TTRX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tTRx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TNAX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tNAx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TBEX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tBEx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TULX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tULx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TBOX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tBOx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TIDX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tIDx_r0p0},
+		  {U32_MAX, NULL} } },
+
+		{GPU_ID2_PRODUCT_TVAX,
+		 {{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tVAx_r0p0},
+		  {U32_MAX, NULL} } },
+	};
+
+	u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	const u32 product_model = gpu_id & GPU_ID2_PRODUCT_MODEL;
+	const struct base_hw_product *product = NULL;
+	size_t p;
+
+	/* Stop when we reach the end of the products array. */
+	for (p = 0; p < ARRAY_SIZE(base_hw_products); ++p) {
+		if (product_model == base_hw_products[p].product_model) {
+			product = &base_hw_products[p];
+			break;
+		}
+	}
+
+	if (product != NULL) {
+		/* Found a matching product. */
+		const u32 version = gpu_id & GPU_ID2_VERSION;
+		u32 fallback_version = 0;
+		const enum base_hw_issue *fallback_issues = NULL;
+		size_t v;
+
+		/* Stop when we reach the end of the map. */
+		for (v = 0; product->map[v].version != U32_MAX; ++v) {
+
+			if (version == product->map[v].version) {
+				/* Exact match so stop. */
+				issues = product->map[v].issues;
+				break;
+			}
+
+			/* Check whether this is a candidate for most recent
+				known version not later than the actual
+				version. */
+			if ((version > product->map[v].version) &&
+				(product->map[v].version >= fallback_version)) {
+#if MALI_CUSTOMER_RELEASE
+				/* Match on version's major and minor fields */
+				if (((version ^ product->map[v].version) >>
+					GPU_ID2_VERSION_MINOR_SHIFT) == 0)
+#endif
+				{
+					fallback_version = product->map[v].version;
+					fallback_issues = product->map[v].issues;
+				}
+			}
+		}
+
+		if ((issues == NULL) && (fallback_issues != NULL)) {
+			/* Fall back to the issue set of the most recent known
+				version not later than the actual version. */
+			issues = fallback_issues;
+
+#if MALI_CUSTOMER_RELEASE
+			dev_warn(kbdev->dev,
+				"GPU hardware issue table may need updating:\n"
+#else
+			dev_info(kbdev->dev,
+#endif
+				"r%dp%d status %d is unknown; treating as r%dp%d status %d",
+				(gpu_id & GPU_ID2_VERSION_MAJOR) >>
+					GPU_ID2_VERSION_MAJOR_SHIFT,
+				(gpu_id & GPU_ID2_VERSION_MINOR) >>
+					GPU_ID2_VERSION_MINOR_SHIFT,
+				(gpu_id & GPU_ID2_VERSION_STATUS) >>
+					GPU_ID2_VERSION_STATUS_SHIFT,
+				(fallback_version & GPU_ID2_VERSION_MAJOR) >>
+					GPU_ID2_VERSION_MAJOR_SHIFT,
+				(fallback_version & GPU_ID2_VERSION_MINOR) >>
+					GPU_ID2_VERSION_MINOR_SHIFT,
+				(fallback_version & GPU_ID2_VERSION_STATUS) >>
+					GPU_ID2_VERSION_STATUS_SHIFT);
+
+			gpu_id &= ~GPU_ID2_VERSION;
+			gpu_id |= fallback_version;
+			kbdev->gpu_props.props.raw_props.gpu_id = gpu_id;
+
+			kbase_gpuprops_update_core_props_gpu_id(
+				&kbdev->gpu_props.props);
+		}
+	}
+	return issues;
+}
+
+int kbase_hw_set_issues_mask(struct kbase_device *kbdev)
+{
+	const enum base_hw_issue *issues;
+	u32 gpu_id;
+	u32 product_id;
+	u32 impl_tech;
+
+	gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+	product_id = gpu_id & GPU_ID_VERSION_PRODUCT_ID;
+	product_id >>= GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+	impl_tech = kbdev->gpu_props.props.thread_props.impl_tech;
+
+	if (impl_tech != IMPLEMENTATION_MODEL) {
+		if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+			issues = kbase_hw_get_issues_for_new_id(kbdev);
+			if (issues == NULL) {
+				dev_err(kbdev->dev,
+					"Unknown GPU ID %x", gpu_id);
+				return -EINVAL;
+			}
+
+#if !MALI_CUSTOMER_RELEASE
+			/* The GPU ID might have been replaced with the last
+			   known version of the same GPU. */
+			gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+#endif
+
+		} else {
+			switch (gpu_id) {
+			case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_15DEV0):
+				issues = base_hw_issues_t60x_r0p0_15dev0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_EAC):
+				issues = base_hw_issues_t60x_r0p0_eac;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 1, 0):
+				issues = base_hw_issues_t60x_r0p1;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0):
+				issues = base_hw_issues_t62x_r0p1;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 0):
+			case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 1):
+				issues = base_hw_issues_t62x_r1p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 1, 0):
+				issues = base_hw_issues_t62x_r1p1;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 1):
+				issues = base_hw_issues_t76x_r0p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 1):
+				issues = base_hw_issues_t76x_r0p1;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 9):
+				issues = base_hw_issues_t76x_r0p1_50rel0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1):
+				issues = base_hw_issues_t76x_r0p2;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1):
+				issues = base_hw_issues_t76x_r0p3;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0):
+				issues = base_hw_issues_t76x_r1p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 0):
+			case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 1):
+			case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 2):
+				issues = base_hw_issues_t72x_r0p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 0, 0):
+				issues = base_hw_issues_t72x_r1p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 1, 0):
+				issues = base_hw_issues_t72x_r1p1;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 1, 2):
+				issues = base_hw_issues_tFRx_r0p1;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 2, 0):
+				issues = base_hw_issues_tFRx_r0p2;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 0):
+			case GPU_ID_MAKE(GPU_ID_PI_TFRX, 1, 0, 8):
+				issues = base_hw_issues_tFRx_r1p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_TFRX, 2, 0, 0):
+				issues = base_hw_issues_tFRx_r2p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T86X, 0, 2, 0):
+				issues = base_hw_issues_t86x_r0p2;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 0):
+			case GPU_ID_MAKE(GPU_ID_PI_T86X, 1, 0, 8):
+				issues = base_hw_issues_t86x_r1p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T86X, 2, 0, 0):
+				issues = base_hw_issues_t86x_r2p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T83X, 0, 1, 0):
+				issues = base_hw_issues_t83x_r0p1;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 0):
+			case GPU_ID_MAKE(GPU_ID_PI_T83X, 1, 0, 8):
+				issues = base_hw_issues_t83x_r1p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 0, 0):
+				issues = base_hw_issues_t82x_r0p0;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T82X, 0, 1, 0):
+				issues = base_hw_issues_t82x_r0p1;
+				break;
+			case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 0):
+			case GPU_ID_MAKE(GPU_ID_PI_T82X, 1, 0, 8):
+				issues = base_hw_issues_t82x_r1p0;
+				break;
+			default:
+				dev_err(kbdev->dev,
+					"Unknown GPU ID %x", gpu_id);
+				return -EINVAL;
+			}
+		}
+	} else {
+		/* Software model */
+		if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+			switch (gpu_id & GPU_ID2_PRODUCT_MODEL) {
+			case GPU_ID2_PRODUCT_TMIX:
+				issues = base_hw_issues_model_tMIx;
+				break;
+			case GPU_ID2_PRODUCT_THEX:
+				issues = base_hw_issues_model_tHEx;
+				break;
+			case GPU_ID2_PRODUCT_TSIX:
+				issues = base_hw_issues_model_tSIx;
+				break;
+			case GPU_ID2_PRODUCT_TDVX:
+				issues = base_hw_issues_model_tDVx;
+				break;
+			case GPU_ID2_PRODUCT_TNOX:
+				issues = base_hw_issues_model_tNOx;
+				break;
+			case GPU_ID2_PRODUCT_TGOX:
+				issues = base_hw_issues_model_tGOx;
+				break;
+			case GPU_ID2_PRODUCT_TKAX:
+				issues = base_hw_issues_model_tKAx;
+				break;
+			case GPU_ID2_PRODUCT_TEGX:
+				issues = base_hw_issues_model_tEGx;
+				break;
+			case GPU_ID2_PRODUCT_TTRX:
+				issues = base_hw_issues_model_tTRx;
+				break;
+			case GPU_ID2_PRODUCT_TNAX:
+				issues = base_hw_issues_model_tNAx;
+				break;
+			case GPU_ID2_PRODUCT_TBEX:
+				issues = base_hw_issues_model_tBEx;
+				break;
+			case GPU_ID2_PRODUCT_TULX:
+				issues = base_hw_issues_model_tULx;
+				break;
+			case GPU_ID2_PRODUCT_TBOX:
+				issues = base_hw_issues_model_tBOx;
+				break;
+			case GPU_ID2_PRODUCT_TIDX:
+				issues = base_hw_issues_model_tIDx;
+				break;
+			case GPU_ID2_PRODUCT_TVAX:
+				issues = base_hw_issues_model_tVAx;
+				break;
+			default:
+				dev_err(kbdev->dev,
+					"Unknown GPU ID %x", gpu_id);
+				return -EINVAL;
+			}
+		} else {
+			switch (product_id) {
+			case GPU_ID_PI_T60X:
+				issues = base_hw_issues_model_t60x;
+				break;
+			case GPU_ID_PI_T62X:
+				issues = base_hw_issues_model_t62x;
+				break;
+			case GPU_ID_PI_T72X:
+				issues = base_hw_issues_model_t72x;
+				break;
+			case GPU_ID_PI_T76X:
+				issues = base_hw_issues_model_t76x;
+				break;
+			case GPU_ID_PI_TFRX:
+				issues = base_hw_issues_model_tFRx;
+				break;
+			case GPU_ID_PI_T86X:
+				issues = base_hw_issues_model_t86x;
+				break;
+			case GPU_ID_PI_T83X:
+				issues = base_hw_issues_model_t83x;
+				break;
+			case GPU_ID_PI_T82X:
+				issues = base_hw_issues_model_t82x;
+				break;
+			default:
+				dev_err(kbdev->dev, "Unknown GPU ID %x",
+					gpu_id);
+				return -EINVAL;
+			}
+		}
+	}
+
+	if (GPU_ID_IS_NEW_FORMAT(product_id)) {
+		dev_info(kbdev->dev,
+			"GPU identified as 0x%x arch %d.%d.%d r%dp%d status %d",
+			(gpu_id & GPU_ID2_PRODUCT_MAJOR) >>
+				GPU_ID2_PRODUCT_MAJOR_SHIFT,
+			(gpu_id & GPU_ID2_ARCH_MAJOR) >>
+				GPU_ID2_ARCH_MAJOR_SHIFT,
+			(gpu_id & GPU_ID2_ARCH_MINOR) >>
+				GPU_ID2_ARCH_MINOR_SHIFT,
+			(gpu_id & GPU_ID2_ARCH_REV) >>
+				GPU_ID2_ARCH_REV_SHIFT,
+			(gpu_id & GPU_ID2_VERSION_MAJOR) >>
+				GPU_ID2_VERSION_MAJOR_SHIFT,
+			(gpu_id & GPU_ID2_VERSION_MINOR) >>
+				GPU_ID2_VERSION_MINOR_SHIFT,
+			(gpu_id & GPU_ID2_VERSION_STATUS) >>
+				GPU_ID2_VERSION_STATUS_SHIFT);
+	} else {
+		dev_info(kbdev->dev,
+			"GPU identified as 0x%04x r%dp%d status %d",
+			(gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
+				GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+			(gpu_id & GPU_ID_VERSION_MAJOR) >>
+				GPU_ID_VERSION_MAJOR_SHIFT,
+			(gpu_id & GPU_ID_VERSION_MINOR) >>
+				GPU_ID_VERSION_MINOR_SHIFT,
+			(gpu_id & GPU_ID_VERSION_STATUS) >>
+				GPU_ID_VERSION_STATUS_SHIFT);
+	}
+
+	for (; *issues != BASE_HW_ISSUE_END; issues++)
+		set_bit(*issues, &kbdev->hw_issues_mask[0]);
+
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hw.h b/drivers/gpu/arm/midgard/mali_kbase_hw.h
new file mode 100644
index 0000000..f386b16
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hw.h
@@ -0,0 +1,70 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file
+ * Run-time work-arounds helpers
+ */
+
+#ifndef _KBASE_HW_H_
+#define _KBASE_HW_H_
+
+#include "mali_kbase_defs.h"
+
+/**
+ * @brief Tell whether a work-around should be enabled
+ */
+#define kbase_hw_has_issue(kbdev, issue)\
+	test_bit(issue, &(kbdev)->hw_issues_mask[0])
+
+/**
+ * @brief Tell whether a feature is supported
+ */
+#define kbase_hw_has_feature(kbdev, feature)\
+	test_bit(feature, &(kbdev)->hw_features_mask[0])
+
+/**
+ * kbase_hw_set_issues_mask - Set the hardware issues mask based on the GPU ID
+ * @kbdev: Device pointer
+ *
+ * Return: 0 if the GPU ID was recognized, otherwise -EINVAL.
+ *
+ * The GPU ID is read from the @kbdev.
+ *
+ * In debugging versions of the driver, unknown versions of a known GPU with a
+ * new-format ID will be treated as the most recent known version not later
+ * than the actual version. In such circumstances, the GPU ID in @kbdev will
+ * also be replaced with the most recent known version.
+ *
+ * Note: The GPU configuration must have been read by
+ * kbase_gpuprops_get_props() before calling this function.
+ */
+int kbase_hw_set_issues_mask(struct kbase_device *kbdev);
+
+/**
+ * @brief Set the features mask depending on the GPU ID
+ */
+void kbase_hw_set_features_mask(struct kbase_device *kbdev);
+
+#endif				/* _KBASE_HW_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h
new file mode 100644
index 0000000..dde4965
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_backend.h
@@ -0,0 +1,59 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * HW access backend common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_BACKEND_H_
+#define _KBASE_HWACCESS_BACKEND_H_
+
+/**
+ * kbase_backend_early_init - Perform any backend-specific initialization.
+ * @kbdev:	Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_early_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_late_init - Perform any backend-specific initialization.
+ * @kbdev:	Device pointer
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int kbase_backend_late_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_early_term - Perform any backend-specific termination.
+ * @kbdev:	Device pointer
+ */
+void kbase_backend_early_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_late_term - Perform any backend-specific termination.
+ * @kbdev:	Device pointer
+ */
+void kbase_backend_late_term(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_BACKEND_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h
new file mode 100644
index 0000000..124a2d9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h
@@ -0,0 +1,51 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/**
+ * @file mali_kbase_hwaccess_gpu_defs.h
+ * HW access common definitions
+ */
+
+#ifndef _KBASE_HWACCESS_DEFS_H_
+#define _KBASE_HWACCESS_DEFS_H_
+
+#include <mali_kbase_jm_defs.h>
+
+/**
+ * struct kbase_hwaccess_data - object encapsulating the GPU backend specific
+ *                              data for the HW access layer.
+ *                              hwaccess_lock (a spinlock) must be held when
+ *                              accessing this structure.
+ * @active_kctx:     pointer to active kbase context which last submitted an
+ *                   atom to GPU and while the context is active it can
+ *                   submit new atoms to GPU from the irq context also, without
+ *                   going through the bottom half of job completion path.
+ * @backend:         GPU backend specific data for HW access layer
+ */
+struct kbase_hwaccess_data {
+	struct kbase_context *active_kctx[BASE_JM_MAX_NR_SLOTS];
+
+	struct kbase_backend_data backend;
+};
+
+#endif /* _KBASE_HWACCESS_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h
new file mode 100644
index 0000000..63844d9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h
@@ -0,0 +1,54 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/**
+ * Base kernel property query backend APIs
+ */
+
+#ifndef _KBASE_HWACCESS_GPUPROPS_H_
+#define _KBASE_HWACCESS_GPUPROPS_H_
+
+/**
+ * kbase_backend_gpuprops_get() - Fill @regdump with GPU properties read from
+ *				  GPU
+ * @kbdev:	Device pointer
+ * @regdump:	Pointer to struct kbase_gpuprops_regdump structure
+ *
+ * The caller should ensure that GPU remains powered-on during this function.
+ */
+void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump);
+
+/**
+ * kbase_backend_gpuprops_get - Fill @regdump with GPU properties read from GPU
+ * @kbdev:   Device pointer
+ * @regdump: Pointer to struct kbase_gpuprops_regdump structure
+ *
+ * This function reads GPU properties that are dependent on the hardware
+ * features bitmask. It will power-on the GPU if required.
+ */
+void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+					struct kbase_gpuprops_regdump *regdump);
+
+
+#endif /* _KBASE_HWACCESS_GPUPROPS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h
new file mode 100644
index 0000000..d5b9099
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h
@@ -0,0 +1,142 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * HW Access instrumentation common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_INSTR_H_
+#define _KBASE_HWACCESS_INSTR_H_
+
+#include <mali_kbase_instr_defs.h>
+
+/**
+ * struct kbase_instr_hwcnt_enable - Enable hardware counter collection.
+ * @dump_buffer:       GPU address to write counters to.
+ * @dump_buffer_bytes: Size in bytes of the buffer pointed to by dump_buffer.
+ * @jm_bm:             counters selection bitmask (JM).
+ * @shader_bm:         counters selection bitmask (Shader).
+ * @tiler_bm:          counters selection bitmask (Tiler).
+ * @mmu_l2_bm:         counters selection bitmask (MMU_L2).
+ * @use_secondary:     use secondary performance counters set for applicable
+ *                     counter blocks.
+ */
+struct kbase_instr_hwcnt_enable {
+	u64 dump_buffer;
+	u64 dump_buffer_bytes;
+	u32 jm_bm;
+	u32 shader_bm;
+	u32 tiler_bm;
+	u32 mmu_l2_bm;
+	bool use_secondary;
+};
+
+/**
+ * kbase_instr_hwcnt_enable_internal() - Enable HW counters collection
+ * @kbdev:	Kbase device
+ * @kctx:	Kbase context
+ * @enable:	HW counter setup parameters
+ *
+ * Context: might sleep, waiting for reset to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+				struct kbase_context *kctx,
+				struct kbase_instr_hwcnt_enable *enable);
+
+/**
+ * kbase_instr_hwcnt_disable_internal() - Disable HW counters collection
+ * @kctx: Kbase context
+ *
+ * Context: might sleep, waiting for an ongoing dump to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_request_dump() - Request HW counter dump from GPU
+ * @kctx:	Kbase context
+ *
+ * Caller must either wait for kbase_instr_hwcnt_dump_complete() to return true,
+ * of call kbase_instr_hwcnt_wait_for_dump().
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_wait_for_dump() - Wait until pending HW counter dump has
+ *				       completed.
+ * @kctx:	Kbase context
+ *
+ * Context: will sleep, waiting for dump to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_wait_for_dump(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_hwcnt_dump_complete - Tell whether the HW counters dump has
+ *				     completed
+ * @kctx:	Kbase context
+ * @success:	Set to true if successful
+ *
+ * Context: does not sleep.
+ *
+ * Return: true if the dump is complete
+ */
+bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
+						bool * const success);
+
+/**
+ * kbase_instr_hwcnt_clear() - Clear HW counters
+ * @kctx:	Kbase context
+ *
+ * Context: might sleep, waiting for reset to complete
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_hwcnt_clear(struct kbase_context *kctx);
+
+/**
+ * kbase_instr_backend_init() - Initialise the instrumentation backend
+ * @kbdev:	Kbase device
+ *
+ * This function should be called during driver initialization.
+ *
+ * Return: 0 on success
+ */
+int kbase_instr_backend_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_instr_backend_init() - Terminate the instrumentation backend
+ * @kbdev:	Kbase device
+ *
+ * This function should be called during driver termination.
+ */
+void kbase_instr_backend_term(struct kbase_device *kbdev);
+
+#endif /* _KBASE_HWACCESS_INSTR_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h
new file mode 100644
index 0000000..e2798eb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h
@@ -0,0 +1,377 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * HW access job manager common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_JM_H_
+#define _KBASE_HWACCESS_JM_H_
+
+/**
+ * kbase_backend_run_atom() - Run an atom on the GPU
+ * @kbdev:	Device pointer
+ * @atom:	Atom to run
+ *
+ * Caller must hold the HW access lock
+ */
+void kbase_backend_run_atom(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom);
+
+/**
+ * kbase_backend_slot_update - Update state based on slot ringbuffers
+ *
+ * @kbdev:  Device pointer
+ *
+ * Inspect the jobs in the slot ringbuffers and update state.
+ *
+ * This will cause jobs to be submitted to hardware if they are unblocked
+ */
+void kbase_backend_slot_update(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_find_and_release_free_address_space() - Release a free AS
+ * @kbdev:	Device pointer
+ * @kctx:	Context pointer
+ *
+ * This function can evict an idle context from the runpool, freeing up the
+ * address space it was using.
+ *
+ * The address space is marked as in use. The caller must either assign a
+ * context using kbase_gpu_use_ctx(), or release it using
+ * kbase_ctx_sched_release()
+ *
+ * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none
+ *	   available
+ */
+int kbase_backend_find_and_release_free_address_space(
+		struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the
+ *			     provided address space.
+ * @kbdev:	Device pointer
+ * @kctx:	Context pointer. May be NULL
+ * @as_nr:	Free address space to use
+ *
+ * kbase_gpu_next_job() will pull atoms from the active context.
+ *
+ * Return: true if successful, false if ASID not assigned.
+ */
+bool kbase_backend_use_ctx(struct kbase_device *kbdev,
+				struct kbase_context *kctx,
+				int as_nr);
+
+/**
+ * kbase_backend_use_ctx_sched() - Activate a context.
+ * @kbdev:	Device pointer
+ * @kctx:	Context pointer
+ * @js:         Job slot to activate context on
+ *
+ * kbase_gpu_next_job() will pull atoms from the active context.
+ *
+ * The context must already be scheduled and assigned to an address space. If
+ * the context is not scheduled, then kbase_gpu_use_ctx() should be used
+ * instead.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if context is now active, false otherwise (ie if context does
+ *	   not have an address space assigned)
+ */
+bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
+					struct kbase_context *kctx, int js);
+
+/**
+ * kbase_backend_release_ctx_irq - Release a context from the GPU. This will
+ *                                 de-assign the assigned address space.
+ * @kbdev: Device pointer
+ * @kctx:  Context pointer
+ *
+ * Caller must hold kbase_device->mmu_hw_mutex and hwaccess_lock
+ */
+void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
+				struct kbase_context *kctx);
+
+/**
+ * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will
+ *                                   de-assign the assigned address space.
+ * @kbdev: Device pointer
+ * @kctx:  Context pointer
+ *
+ * Caller must hold kbase_device->mmu_hw_mutex
+ *
+ * This function must perform any operations that could not be performed in IRQ
+ * context by kbase_backend_release_ctx_irq().
+ */
+void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
+						struct kbase_context *kctx);
+
+/**
+ * kbase_backend_cache_clean - Perform a cache clean if the given atom requires
+ *                            one
+ * @kbdev:	Device pointer
+ * @katom:	Pointer to the failed atom
+ *
+ * On some GPUs, the GPU cache must be cleaned following a failed atom. This
+ * function performs a clean if it is required by @katom.
+ */
+void kbase_backend_cache_clean(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom);
+
+
+/**
+ * kbase_backend_complete_wq() - Perform backend-specific actions required on
+ *				 completing an atom.
+ * @kbdev:	Device pointer
+ * @katom:	Pointer to the atom to complete
+ *
+ * This function should only be called from kbase_jd_done_worker() or
+ * js_return_worker().
+ *
+ * Return: true if atom has completed, false if atom should be re-submitted
+ */
+void kbase_backend_complete_wq(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom);
+
+/**
+ * kbase_backend_complete_wq_post_sched - Perform backend-specific actions
+ *                                        required on completing an atom, after
+ *                                        any scheduling has taken place.
+ * @kbdev:         Device pointer
+ * @core_req:      Core requirements of atom
+ *
+ * This function should only be called from kbase_jd_done_worker() or
+ * js_return_worker().
+ */
+void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
+		base_jd_core_req core_req);
+
+/**
+ * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU
+ *			   and remove any others from the ringbuffers.
+ * @kbdev:		Device pointer
+ * @end_timestamp:	Timestamp of reset
+ */
+void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
+
+/**
+ * kbase_backend_inspect_tail - Return the atom currently at the tail of slot
+ *                              @js
+ * @kbdev: Device pointer
+ * @js:    Job slot to inspect
+ *
+ * Return : Atom currently at the head of slot @js, or NULL
+ */
+struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
+					int js);
+
+/**
+ * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a
+ *				      slot.
+ * @kbdev:	Device pointer
+ * @js:		Job slot to inspect
+ *
+ * Return : Number of atoms currently on slot
+ */
+int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot
+ *					that are currently on the GPU.
+ * @kbdev:	Device pointer
+ * @js:		Job slot to inspect
+ *
+ * Return : Number of atoms currently on slot @js that are currently on the GPU.
+ */
+int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs
+ *				       has changed.
+ * @kbdev:	Device pointer
+ *
+ * Perform any required backend-specific actions (eg starting/stopping
+ * scheduling timers).
+ */
+void kbase_backend_ctx_count_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_timeouts_changed() - Job Scheduler timeouts have changed.
+ * @kbdev:	Device pointer
+ *
+ * Perform any required backend-specific actions (eg updating timeouts of
+ * currently running atoms).
+ */
+void kbase_backend_timeouts_changed(struct kbase_device *kbdev);
+
+/**
+ * kbase_backend_slot_free() - Return the number of jobs that can be currently
+ *			       submitted to slot @js.
+ * @kbdev:	Device pointer
+ * @js:		Job slot to inspect
+ *
+ * Return : Number of jobs that can be submitted.
+ */
+int kbase_backend_slot_free(struct kbase_device *kbdev, int js);
+
+/**
+ * kbase_job_check_enter_disjoint - potentially leave disjoint state
+ * @kbdev: kbase device
+ * @target_katom: atom which is finishing
+ *
+ * Work out whether to leave disjoint state when finishing an atom that was
+ * originated by kbase_job_check_enter_disjoint().
+ */
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+		struct kbase_jd_atom *target_katom);
+
+/**
+ * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently
+ *                                        running from a context
+ * @kctx: Context pointer
+ *
+ * This is used in response to a page fault to remove all jobs from the faulting
+ * context from the hardware.
+ */
+void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx);
+
+/**
+ * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
+ *                               to be descheduled.
+ * @kctx: Context pointer
+ *
+ * This should be called following kbase_js_zap_context(), to ensure the context
+ * can be safely destroyed.
+ */
+void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx);
+
+/**
+ * kbase_backend_get_current_flush_id - Return the current flush ID
+ *
+ * @kbdev: Device pointer
+ *
+ * Return: the current flush ID to be recorded for each job chain
+ */
+u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev);
+
+/**
+ * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU.
+ * @kbdev: Device pointer
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return: a boolean which should be interpreted as follows:
+ * - true  - Prepared for reset, kbase_reset_gpu should be called.
+ * - false - Another thread is performing a reset, kbase_reset_gpu should
+ *                not be called.
+ */
+bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu - Reset the GPU
+ * @kbdev: Device pointer
+ *
+ * This function should be called after kbase_prepare_to_reset_gpu if it returns
+ * true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for kbdev->reset_waitq to be
+ * signalled to know when the reset has completed.
+ */
+void kbase_reset_gpu(struct kbase_device *kbdev);
+
+/**
+ * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU.
+ * @kbdev: Device pointer
+ *
+ * This function just soft-stops all the slots to ensure that as many jobs as
+ * possible are saved.
+ *
+ * Return: a boolean which should be interpreted as follows:
+ * - true  - Prepared for reset, kbase_reset_gpu should be called.
+ * - false - Another thread is performing a reset, kbase_reset_gpu should
+ *                not be called.
+ */
+bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_locked - Reset the GPU
+ * @kbdev: Device pointer
+ *
+ * This function should be called after kbase_prepare_to_reset_gpu if it
+ * returns true. It should never be called without a corresponding call to
+ * kbase_prepare_to_reset_gpu.
+ *
+ * After this function is called (or not called if kbase_prepare_to_reset_gpu
+ * returned false), the caller should wait for kbdev->reset_waitq to be
+ * signalled to know when the reset has completed.
+ */
+void kbase_reset_gpu_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_silent - Reset the GPU silently
+ * @kbdev: Device pointer
+ *
+ * Reset the GPU without trying to cancel jobs and don't emit messages into
+ * the kernel log while doing the reset.
+ *
+ * This function should be used in cases where we are doing a controlled reset
+ * of the GPU as part of normal processing (e.g. exiting protected mode) where
+ * the driver will have ensured the scheduler has been idled and all other
+ * users of the GPU (e.g. instrumentation) have been suspended.
+ *
+ * Return: 0 if the reset was started successfully
+ *         -EAGAIN if another reset is currently in progress
+ */
+int kbase_reset_gpu_silent(struct kbase_device *kbdev);
+
+/**
+ * kbase_reset_gpu_active - Reports if the GPU is being reset
+ * @kbdev: Device pointer
+ *
+ * Return: True if the GPU is in the process of being reset.
+ */
+bool kbase_reset_gpu_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_job_slot_hardstop - Hard-stop the specified job slot
+ * @kctx:         The kbase context that contains the job(s) that should
+ *                be hard-stopped
+ * @js:           The job slot to hard-stop
+ * @target_katom: The job that should be hard-stopped (or NULL for all
+ *                jobs from the context)
+ * Context:
+ *   The job slot lock must be held when calling this function.
+ */
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+				struct kbase_jd_atom *target_katom);
+
+/* Object containing callbacks for enabling/disabling protected mode, used
+ * on GPU which supports protected mode switching natively.
+ */
+extern struct protected_mode_ops kbase_native_protected_ops;
+
+#endif /* _KBASE_HWACCESS_JM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h
new file mode 100644
index 0000000..5bb3887
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_pm.h
@@ -0,0 +1,233 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/**
+ * @file mali_kbase_hwaccess_pm.h
+ * HW access power manager common APIs
+ */
+
+#ifndef _KBASE_HWACCESS_PM_H_
+#define _KBASE_HWACCESS_PM_H_
+
+#include <mali_midg_regmap.h>
+#include <linux/atomic.h>
+
+#include <mali_kbase_pm_defs.h>
+
+/* Forward definition - see mali_kbase.h */
+struct kbase_device;
+
+/* Functions common to all HW access backends */
+
+/**
+ * Initialize the power management framework.
+ *
+ * Must be called before any other power management function
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 if the power management framework was successfully initialized.
+ */
+int kbase_hwaccess_pm_early_init(struct kbase_device *kbdev);
+
+/**
+ * Initialize the power management framework.
+ *
+ * Must be called before any other power management function (except
+ * @ref kbase_hwaccess_pm_early_init)
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Return: 0 if the power management framework was successfully initialized.
+ */
+int kbase_hwaccess_pm_late_init(struct kbase_device *kbdev);
+
+/**
+ * Terminate the power management framework.
+ *
+ * No power management functions may be called after this (except
+ * @ref kbase_pm_init)
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_hwaccess_pm_early_term(struct kbase_device *kbdev);
+
+/**
+ * Terminate the power management framework.
+ *
+ * No power management functions may be called after this (except
+ * @ref kbase_hwaccess_pm_early_term or @ref kbase_hwaccess_pm_late_init)
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_hwaccess_pm_late_term(struct kbase_device *kbdev);
+
+/**
+ * kbase_hwaccess_pm_powerup - Power up the GPU.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @flags: Flags to pass on to kbase_pm_init_hw
+ *
+ * Power up GPU after all modules have been initialized and interrupt handlers
+ * installed.
+ *
+ * Return: 0 if powerup was successful.
+ */
+int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+		unsigned int flags);
+
+/**
+ * Halt the power management framework.
+ *
+ * Should ensure that no new interrupts are generated, but allow any currently
+ * running interrupt handlers to complete successfully. The GPU is forced off by
+ * the time this function returns, regardless of whether or not the active power
+ * policy asks for the GPU to be powered off.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_halt(struct kbase_device *kbdev);
+
+/**
+ * Perform any backend-specific actions to suspend the GPU
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev);
+
+/**
+ * Perform any backend-specific actions to resume the GPU from a suspend
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_resume(struct kbase_device *kbdev);
+
+/**
+ * Perform any required actions for activating the GPU. Called when the first
+ * context goes active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev);
+
+/**
+ * Perform any required actions for idling the GPU. Called when the last
+ * context goes idle.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ */
+void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev);
+
+
+/**
+ * Set the debug core mask.
+ *
+ * This determines which cores the power manager is allowed to use.
+ *
+ * @param kbdev         The kbase device structure for the device (must be a
+ *                      valid pointer)
+ * @param new_core_mask_js0 The core mask to use for job slot 0
+ * @param new_core_mask_js0 The core mask to use for job slot 1
+ * @param new_core_mask_js0 The core mask to use for job slot 2
+ */
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+		u64 new_core_mask_js0, u64 new_core_mask_js1,
+		u64 new_core_mask_js2);
+
+
+/**
+ * Get the current policy.
+ *
+ * Returns the policy that is currently active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ *
+ * @return The current policy
+ */
+const struct kbase_pm_ca_policy
+*kbase_pm_ca_get_policy(struct kbase_device *kbdev);
+
+/**
+ * Change the policy to the one specified.
+ *
+ * @param kbdev  The kbase device structure for the device (must be a valid
+ *               pointer)
+ * @param policy The policy to change to (valid pointer returned from
+ *               @ref kbase_pm_ca_list_policies)
+ */
+void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
+				const struct kbase_pm_ca_policy *policy);
+
+/**
+ * Retrieve a static list of the available policies.
+ *
+ * @param[out] policies An array pointer to take the list of policies. This may
+ *                      be NULL. The contents of this array must not be
+ *                      modified.
+ *
+ * @return The number of policies
+ */
+int
+kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **policies);
+
+
+/**
+ * Get the current policy.
+ *
+ * Returns the policy that is currently active.
+ *
+ * @param kbdev The kbase device structure for the device (must be a valid
+ *              pointer)
+ *
+ * @return The current policy
+ */
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev);
+
+/**
+ * Change the policy to the one specified.
+ *
+ * @param kbdev  The kbase device structure for the device (must be a valid
+ *               pointer)
+ * @param policy The policy to change to (valid pointer returned from
+ *               @ref kbase_pm_list_policies)
+ */
+void kbase_pm_set_policy(struct kbase_device *kbdev,
+					const struct kbase_pm_policy *policy);
+
+/**
+ * Retrieve a static list of the available policies.
+ *
+ * @param[out] policies An array pointer to take the list of policies. This may
+ *                      be NULL. The contents of this array must not be
+ *                      modified.
+ *
+ * @return The number of policies
+ */
+int kbase_pm_list_policies(const struct kbase_pm_policy * const **policies);
+
+#endif /* _KBASE_HWACCESS_PM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h
new file mode 100644
index 0000000..1c6d232
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h
@@ -0,0 +1,62 @@
+/*
+ *
+ * (C) COPYRIGHT 2014,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/**
+ *
+ */
+
+#ifndef _KBASE_BACKEND_TIME_H_
+#define _KBASE_BACKEND_TIME_H_
+
+/**
+ * kbase_backend_get_gpu_time() - Get current GPU time
+ * @kbdev:		Device pointer
+ * @cycle_counter:	Pointer to u64 to store cycle counter in
+ * @system_time:	Pointer to u64 to store system time in
+ * @ts:			Pointer to struct timespec64 to store current monotonic
+ *			time in
+ */
+void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
+				u64 *system_time, struct timespec64 *ts);
+
+/**
+ * kbase_wait_write_flush() -  Wait for GPU write flush
+ * @kbdev:	Kbase device
+ *
+ * Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
+ * its write buffer.
+ *
+ * If GPU resets occur then the counters are reset to zero, the delay may not be
+ * as expected.
+ *
+ * This function is only in use for BASE_HW_ISSUE_6367
+ */
+#ifdef CONFIG_MALI_NO_MALI
+static inline void kbase_wait_write_flush(struct kbase_device *kbdev)
+{
+}
+#else
+void kbase_wait_write_flush(struct kbase_device *kbdev);
+#endif
+
+#endif /* _KBASE_BACKEND_TIME_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt.c
new file mode 100644
index 0000000..efbac6f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt.c
@@ -0,0 +1,796 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Implementation of hardware counter context and accumulator APIs.
+ */
+
+#include "mali_kbase_hwcnt_context.h"
+#include "mali_kbase_hwcnt_accumulator.h"
+#include "mali_kbase_hwcnt_backend.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_malisw.h"
+#include "mali_kbase_debug.h"
+#include "mali_kbase_linux.h"
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+/**
+ * enum kbase_hwcnt_accum_state - Hardware counter accumulator states.
+ * @ACCUM_STATE_ERROR:    Error state, where all accumulator operations fail.
+ * @ACCUM_STATE_DISABLED: Disabled state, where dumping is always disabled.
+ * @ACCUM_STATE_ENABLED:  Enabled state, where dumping is enabled if there are
+ *                        any enabled counters.
+ */
+enum kbase_hwcnt_accum_state {
+	ACCUM_STATE_ERROR,
+	ACCUM_STATE_DISABLED,
+	ACCUM_STATE_ENABLED
+};
+
+/**
+ * struct kbase_hwcnt_accumulator - Hardware counter accumulator structure.
+ * @backend:                Pointer to created counter backend.
+ * @state:                  The current state of the accumulator.
+ *                           - State transition from disabled->enabled or
+ *                             disabled->error requires state_lock.
+ *                           - State transition from enabled->disabled or
+ *                             enabled->error requires both accum_lock and
+ *                             state_lock.
+ *                           - Error state persists until next disable.
+ * @enable_map:             The current set of enabled counters.
+ *                           - Must only be modified while holding both
+ *                             accum_lock and state_lock.
+ *                           - Can be read while holding either lock.
+ *                           - Must stay in sync with enable_map_any_enabled.
+ * @enable_map_any_enabled: True if any counters in the map are enabled, else
+ *                          false. If true, and state is ACCUM_STATE_ENABLED,
+ *                          then the counter backend will be enabled.
+ *                           - Must only be modified while holding both
+ *                             accum_lock and state_lock.
+ *                           - Can be read while holding either lock.
+ *                           - Must stay in sync with enable_map.
+ * @scratch_map:            Scratch enable map, used as temporary enable map
+ *                          storage during dumps.
+ *                           - Must only be read or modified while holding
+ *                             accum_lock.
+ * @accum_buf:              Accumulation buffer, where dumps will be accumulated
+ *                          into on transition to a disable state.
+ *                           - Must only be read or modified while holding
+ *                             accum_lock.
+ * @accumulated:            True if the accumulation buffer has been accumulated
+ *                          into and not subsequently read from yet, else false.
+ *                           - Must only be read or modified while holding
+ *                             accum_lock.
+ * @ts_last_dump_ns:        Timestamp (ns) of the end time of the most recent
+ *                          dump that was requested by the user.
+ *                           - Must only be read or modified while holding
+ *                             accum_lock.
+ */
+struct kbase_hwcnt_accumulator {
+	struct kbase_hwcnt_backend *backend;
+	enum kbase_hwcnt_accum_state state;
+	struct kbase_hwcnt_enable_map enable_map;
+	bool enable_map_any_enabled;
+	struct kbase_hwcnt_enable_map scratch_map;
+	struct kbase_hwcnt_dump_buffer accum_buf;
+	bool accumulated;
+	u64 ts_last_dump_ns;
+};
+
+/**
+ * struct kbase_hwcnt_context - Hardware counter context structure.
+ * @iface:         Pointer to hardware counter backend interface.
+ * @state_lock:    Spinlock protecting state.
+ * @disable_count: Disable count of the context. Initialised to 1.
+ *                 Decremented when the accumulator is acquired, and incremented
+ *                 on release. Incremented on calls to
+ *                 kbase_hwcnt_context_disable[_atomic], and decremented on
+ *                 calls to kbase_hwcnt_context_enable.
+ *                  - Must only be read or modified while holding state_lock.
+ * @accum_lock:    Mutex protecting accumulator.
+ * @accum_inited:  Flag to prevent concurrent accumulator initialisation and/or
+ *                 termination. Set to true before accumulator initialisation,
+ *                 and false after accumulator termination.
+ *                  - Must only be modified while holding both accum_lock and
+ *                    state_lock.
+ *                  - Can be read while holding either lock.
+ * @accum:         Hardware counter accumulator structure.
+ */
+struct kbase_hwcnt_context {
+	const struct kbase_hwcnt_backend_interface *iface;
+	spinlock_t state_lock;
+	size_t disable_count;
+	struct mutex accum_lock;
+	bool accum_inited;
+	struct kbase_hwcnt_accumulator accum;
+};
+
+int kbase_hwcnt_context_init(
+	const struct kbase_hwcnt_backend_interface *iface,
+	struct kbase_hwcnt_context **out_hctx)
+{
+	struct kbase_hwcnt_context *hctx = NULL;
+
+	if (!iface || !out_hctx)
+		return -EINVAL;
+
+	hctx = kzalloc(sizeof(*hctx), GFP_KERNEL);
+	if (!hctx)
+		return -ENOMEM;
+
+	hctx->iface = iface;
+	spin_lock_init(&hctx->state_lock);
+	hctx->disable_count = 1;
+	mutex_init(&hctx->accum_lock);
+	hctx->accum_inited = false;
+
+	*out_hctx = hctx;
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_init);
+
+void kbase_hwcnt_context_term(struct kbase_hwcnt_context *hctx)
+{
+	if (!hctx)
+		return;
+
+	/* Make sure we didn't leak the accumulator */
+	WARN_ON(hctx->accum_inited);
+	kfree(hctx);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_term);
+
+/**
+ * kbasep_hwcnt_accumulator_term() - Terminate the accumulator for the context.
+ * @hctx: Non-NULL pointer to hardware counter context.
+ */
+static void kbasep_hwcnt_accumulator_term(struct kbase_hwcnt_context *hctx)
+{
+	WARN_ON(!hctx);
+	WARN_ON(!hctx->accum_inited);
+
+	kbase_hwcnt_enable_map_free(&hctx->accum.scratch_map);
+	kbase_hwcnt_dump_buffer_free(&hctx->accum.accum_buf);
+	kbase_hwcnt_enable_map_free(&hctx->accum.enable_map);
+	hctx->iface->term(hctx->accum.backend);
+	memset(&hctx->accum, 0, sizeof(hctx->accum));
+}
+
+/**
+ * kbasep_hwcnt_accumulator_init() - Initialise the accumulator for the context.
+ * @hctx: Non-NULL pointer to hardware counter context.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_accumulator_init(struct kbase_hwcnt_context *hctx)
+{
+	int errcode;
+
+	WARN_ON(!hctx);
+	WARN_ON(!hctx->accum_inited);
+
+	errcode = hctx->iface->init(
+		hctx->iface->info, &hctx->accum.backend);
+	if (errcode)
+		goto error;
+
+	hctx->accum.state = ACCUM_STATE_ERROR;
+
+	errcode = kbase_hwcnt_enable_map_alloc(
+		hctx->iface->metadata, &hctx->accum.enable_map);
+	if (errcode)
+		goto error;
+
+	hctx->accum.enable_map_any_enabled = false;
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(
+		hctx->iface->metadata, &hctx->accum.accum_buf);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_enable_map_alloc(
+		hctx->iface->metadata, &hctx->accum.scratch_map);
+	if (errcode)
+		goto error;
+
+	hctx->accum.accumulated = false;
+
+	hctx->accum.ts_last_dump_ns =
+		hctx->iface->timestamp_ns(hctx->accum.backend);
+
+	return 0;
+
+error:
+	kbasep_hwcnt_accumulator_term(hctx);
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_accumulator_disable() - Transition the accumulator into the
+ *                                      disabled state, from the enabled or
+ *                                      error states.
+ * @hctx:       Non-NULL pointer to hardware counter context.
+ * @accumulate: True if we should accumulate before disabling, else false.
+ */
+static void kbasep_hwcnt_accumulator_disable(
+	struct kbase_hwcnt_context *hctx, bool accumulate)
+{
+	int errcode = 0;
+	bool backend_enabled = false;
+	struct kbase_hwcnt_accumulator *accum;
+	unsigned long flags;
+
+	WARN_ON(!hctx);
+	lockdep_assert_held(&hctx->accum_lock);
+	WARN_ON(!hctx->accum_inited);
+
+	accum = &hctx->accum;
+
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	WARN_ON(hctx->disable_count != 0);
+	WARN_ON(hctx->accum.state == ACCUM_STATE_DISABLED);
+
+	if ((hctx->accum.state == ACCUM_STATE_ENABLED) &&
+	    (accum->enable_map_any_enabled))
+		backend_enabled = true;
+
+	if (!backend_enabled)
+		hctx->accum.state = ACCUM_STATE_DISABLED;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	/* Early out if the backend is not already enabled */
+	if (!backend_enabled)
+		return;
+
+	if (!accumulate)
+		goto disable;
+
+	/* Try and accumulate before disabling */
+	errcode = hctx->iface->dump_request(accum->backend);
+	if (errcode)
+		goto disable;
+
+	errcode = hctx->iface->dump_wait(accum->backend);
+	if (errcode)
+		goto disable;
+
+	errcode = hctx->iface->dump_get(accum->backend,
+		&accum->accum_buf, &accum->enable_map, accum->accumulated);
+	if (errcode)
+		goto disable;
+
+	accum->accumulated = true;
+
+disable:
+	hctx->iface->dump_disable(accum->backend);
+
+	/* Regardless of any errors during the accumulate, put the accumulator
+	 * in the disabled state.
+	 */
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	hctx->accum.state = ACCUM_STATE_DISABLED;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+}
+
+/**
+ * kbasep_hwcnt_accumulator_enable() - Transition the accumulator into the
+ *                                     enabled state, from the disabled state.
+ * @hctx: Non-NULL pointer to hardware counter context.
+ */
+static void kbasep_hwcnt_accumulator_enable(struct kbase_hwcnt_context *hctx)
+{
+	int errcode = 0;
+	struct kbase_hwcnt_accumulator *accum;
+
+	WARN_ON(!hctx);
+	lockdep_assert_held(&hctx->state_lock);
+	WARN_ON(!hctx->accum_inited);
+	WARN_ON(hctx->accum.state != ACCUM_STATE_DISABLED);
+
+	accum = &hctx->accum;
+
+	/* The backend only needs enabling if any counters are enabled */
+	if (accum->enable_map_any_enabled)
+		errcode = hctx->iface->dump_enable_nolock(
+			accum->backend, &accum->enable_map);
+
+	if (!errcode)
+		accum->state = ACCUM_STATE_ENABLED;
+	else
+		accum->state = ACCUM_STATE_ERROR;
+}
+
+/**
+ * kbasep_hwcnt_accumulator_dump() - Perform a dump with the most up-to-date
+ *                                   values of enabled counters possible, and
+ *                                   optionally update the set of enabled
+ *                                   counters.
+ * @hctx :       Non-NULL pointer to the hardware counter context
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ * @new_map:     Pointer to the new counter enable map. If non-NULL, must have
+ *               the same metadata as the accumulator. If NULL, the set of
+ *               enabled counters will be unchanged.
+ */
+static int kbasep_hwcnt_accumulator_dump(
+	struct kbase_hwcnt_context *hctx,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf,
+	const struct kbase_hwcnt_enable_map *new_map)
+{
+	int errcode = 0;
+	unsigned long flags;
+	enum kbase_hwcnt_accum_state state;
+	bool dump_requested = false;
+	bool dump_written = false;
+	bool cur_map_any_enabled;
+	struct kbase_hwcnt_enable_map *cur_map;
+	bool new_map_any_enabled = false;
+	u64 dump_time_ns;
+	struct kbase_hwcnt_accumulator *accum;
+
+	WARN_ON(!hctx);
+	WARN_ON(!ts_start_ns);
+	WARN_ON(!ts_end_ns);
+	WARN_ON(dump_buf && (dump_buf->metadata != hctx->iface->metadata));
+	WARN_ON(new_map && (new_map->metadata != hctx->iface->metadata));
+	WARN_ON(!hctx->accum_inited);
+	lockdep_assert_held(&hctx->accum_lock);
+
+	accum = &hctx->accum;
+	cur_map = &accum->scratch_map;
+
+	/* Save out info about the current enable map */
+	cur_map_any_enabled = accum->enable_map_any_enabled;
+	kbase_hwcnt_enable_map_copy(cur_map, &accum->enable_map);
+
+	if (new_map)
+		new_map_any_enabled =
+			kbase_hwcnt_enable_map_any_enabled(new_map);
+
+	/*
+	 * We're holding accum_lock, so the accumulator state might transition
+	 * from disabled to enabled during this function (as enabling is lock
+	 * free), but it will never disable (as disabling needs to hold the
+	 * accum_lock), nor will it ever transition from enabled to error (as
+	 * an enable while we're already enabled is impossible).
+	 *
+	 * If we're already disabled, we'll only look at the accumulation buffer
+	 * rather than do a real dump, so a concurrent enable does not affect
+	 * us.
+	 *
+	 * If a concurrent enable fails, we might transition to the error
+	 * state, but again, as we're only looking at the accumulation buffer,
+	 * it's not an issue.
+	 */
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	state = accum->state;
+
+	/*
+	 * Update the new map now, such that if an enable occurs during this
+	 * dump then that enable will set the new map. If we're already enabled,
+	 * then we'll do it ourselves after the dump.
+	 */
+	if (new_map) {
+		kbase_hwcnt_enable_map_copy(
+			&accum->enable_map, new_map);
+		accum->enable_map_any_enabled = new_map_any_enabled;
+	}
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	/* Error state, so early out. No need to roll back any map updates */
+	if (state == ACCUM_STATE_ERROR)
+		return -EIO;
+
+	/* Initiate the dump if the backend is enabled. */
+	if ((state == ACCUM_STATE_ENABLED) && cur_map_any_enabled) {
+		/* Disable pre-emption, to make the timestamp as accurate as
+		 * possible.
+		 */
+		preempt_disable();
+		{
+			dump_time_ns = hctx->iface->timestamp_ns(
+				accum->backend);
+			if (dump_buf) {
+				errcode = hctx->iface->dump_request(
+					accum->backend);
+				dump_requested = true;
+			} else {
+				errcode = hctx->iface->dump_clear(
+					accum->backend);
+			}
+		}
+		preempt_enable();
+		if (errcode)
+			goto error;
+	} else {
+		dump_time_ns = hctx->iface->timestamp_ns(accum->backend);
+	}
+
+	/* Copy any accumulation into the dest buffer */
+	if (accum->accumulated && dump_buf) {
+		kbase_hwcnt_dump_buffer_copy(
+			dump_buf, &accum->accum_buf, cur_map);
+		dump_written = true;
+	}
+
+	/* Wait for any requested dumps to complete */
+	if (dump_requested) {
+		WARN_ON(state != ACCUM_STATE_ENABLED);
+		errcode = hctx->iface->dump_wait(accum->backend);
+		if (errcode)
+			goto error;
+	}
+
+	/* If we're enabled and there's a new enable map, change the enabled set
+	 * as soon after the dump has completed as possible.
+	 */
+	if ((state == ACCUM_STATE_ENABLED) && new_map) {
+		/* Backend is only enabled if there were any enabled counters */
+		if (cur_map_any_enabled)
+			hctx->iface->dump_disable(accum->backend);
+
+		/* (Re-)enable the backend if the new map has enabled counters.
+		 * No need to acquire the spinlock, as concurrent enable while
+		 * we're already enabled and holding accum_lock is impossible.
+		 */
+		if (new_map_any_enabled) {
+			errcode = hctx->iface->dump_enable(
+				accum->backend, new_map);
+			if (errcode)
+				goto error;
+		}
+	}
+
+	/* Copy, accumulate, or zero into the dest buffer to finish */
+	if (dump_buf) {
+		/* If we dumped, copy or accumulate it into the destination */
+		if (dump_requested) {
+			WARN_ON(state != ACCUM_STATE_ENABLED);
+			errcode = hctx->iface->dump_get(
+				accum->backend,
+				dump_buf,
+				cur_map,
+				dump_written);
+			if (errcode)
+				goto error;
+			dump_written = true;
+		}
+
+		/* If we've not written anything into the dump buffer so far, it
+		 * means there was nothing to write. Zero any enabled counters.
+		 */
+		if (!dump_written)
+			kbase_hwcnt_dump_buffer_zero(dump_buf, cur_map);
+	}
+
+	/* Write out timestamps */
+	*ts_start_ns = accum->ts_last_dump_ns;
+	*ts_end_ns = dump_time_ns;
+
+	accum->accumulated = false;
+	accum->ts_last_dump_ns = dump_time_ns;
+
+	return 0;
+error:
+	/* An error was only physically possible if the backend was enabled */
+	WARN_ON(state != ACCUM_STATE_ENABLED);
+
+	/* Disable the backend, and transition to the error state */
+	hctx->iface->dump_disable(accum->backend);
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	accum->state = ACCUM_STATE_ERROR;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_context_disable() - Increment the disable count of the context.
+ * @hctx:       Non-NULL pointer to hardware counter context.
+ * @accumulate: True if we should accumulate before disabling, else false.
+ */
+static void kbasep_hwcnt_context_disable(
+	struct kbase_hwcnt_context *hctx, bool accumulate)
+{
+	unsigned long flags;
+
+	WARN_ON(!hctx);
+	lockdep_assert_held(&hctx->accum_lock);
+
+	if (!kbase_hwcnt_context_disable_atomic(hctx)) {
+		kbasep_hwcnt_accumulator_disable(hctx, accumulate);
+
+		spin_lock_irqsave(&hctx->state_lock, flags);
+
+		/* Atomic disable failed and we're holding the mutex, so current
+		 * disable count must be 0.
+		 */
+		WARN_ON(hctx->disable_count != 0);
+		hctx->disable_count++;
+
+		spin_unlock_irqrestore(&hctx->state_lock, flags);
+	}
+}
+
+int kbase_hwcnt_accumulator_acquire(
+	struct kbase_hwcnt_context *hctx,
+	struct kbase_hwcnt_accumulator **accum)
+{
+	int errcode = 0;
+	unsigned long flags;
+
+	if (!hctx || !accum)
+		return -EINVAL;
+
+	mutex_lock(&hctx->accum_lock);
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	if (!hctx->accum_inited)
+		/* Set accum initing now to prevent concurrent init */
+		hctx->accum_inited = true;
+	else
+		/* Already have an accum, or already being inited */
+		errcode = -EBUSY;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+	mutex_unlock(&hctx->accum_lock);
+
+	if (errcode)
+		return errcode;
+
+	errcode = kbasep_hwcnt_accumulator_init(hctx);
+
+	if (errcode) {
+		mutex_lock(&hctx->accum_lock);
+		spin_lock_irqsave(&hctx->state_lock, flags);
+
+		hctx->accum_inited = false;
+
+		spin_unlock_irqrestore(&hctx->state_lock, flags);
+		mutex_unlock(&hctx->accum_lock);
+
+		return errcode;
+	}
+
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	WARN_ON(hctx->disable_count == 0);
+	WARN_ON(hctx->accum.enable_map_any_enabled);
+
+	/* Decrement the disable count to allow the accumulator to be accessible
+	 * now that it's fully constructed.
+	 */
+	hctx->disable_count--;
+
+	/*
+	 * Make sure the accumulator is initialised to the correct state.
+	 * Regardless of initial state, counters don't need to be enabled via
+	 * the backend, as the initial enable map has no enabled counters.
+	 */
+	hctx->accum.state = (hctx->disable_count == 0) ?
+		ACCUM_STATE_ENABLED :
+		ACCUM_STATE_DISABLED;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	*accum = &hctx->accum;
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_accumulator_acquire);
+
+void kbase_hwcnt_accumulator_release(struct kbase_hwcnt_accumulator *accum)
+{
+	unsigned long flags;
+	struct kbase_hwcnt_context *hctx;
+
+	if (!accum)
+		return;
+
+	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
+
+	mutex_lock(&hctx->accum_lock);
+
+	/* Double release is a programming error */
+	WARN_ON(!hctx->accum_inited);
+
+	/* Disable the context to ensure the accumulator is inaccesible while
+	 * we're destroying it. This performs the corresponding disable count
+	 * increment to the decrement done during acquisition.
+	 */
+	kbasep_hwcnt_context_disable(hctx, false);
+
+	mutex_unlock(&hctx->accum_lock);
+
+	kbasep_hwcnt_accumulator_term(hctx);
+
+	mutex_lock(&hctx->accum_lock);
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	hctx->accum_inited = false;
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+	mutex_unlock(&hctx->accum_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_accumulator_release);
+
+void kbase_hwcnt_context_disable(struct kbase_hwcnt_context *hctx)
+{
+	if (WARN_ON(!hctx))
+		return;
+
+	/* Try and atomically disable first, so we can avoid locking the mutex
+	 * if we don't need to.
+	 */
+	if (kbase_hwcnt_context_disable_atomic(hctx))
+		return;
+
+	mutex_lock(&hctx->accum_lock);
+
+	kbasep_hwcnt_context_disable(hctx, true);
+
+	mutex_unlock(&hctx->accum_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_disable);
+
+bool kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context *hctx)
+{
+	unsigned long flags;
+	bool atomic_disabled = false;
+
+	if (WARN_ON(!hctx))
+		return false;
+
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	if (!WARN_ON(hctx->disable_count == SIZE_MAX)) {
+		/*
+		 * If disable count is non-zero or no counters are enabled, we
+		 * can just bump the disable count.
+		 *
+		 * Otherwise, we can't disable in an atomic context.
+		 */
+		if (hctx->disable_count != 0) {
+			hctx->disable_count++;
+			atomic_disabled = true;
+		} else {
+			WARN_ON(!hctx->accum_inited);
+			if (!hctx->accum.enable_map_any_enabled) {
+				hctx->disable_count++;
+				hctx->accum.state = ACCUM_STATE_DISABLED;
+				atomic_disabled = true;
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+
+	return atomic_disabled;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_disable_atomic);
+
+void kbase_hwcnt_context_enable(struct kbase_hwcnt_context *hctx)
+{
+	unsigned long flags;
+
+	if (WARN_ON(!hctx))
+		return;
+
+	spin_lock_irqsave(&hctx->state_lock, flags);
+
+	if (!WARN_ON(hctx->disable_count == 0)) {
+		if (hctx->disable_count == 1)
+			kbasep_hwcnt_accumulator_enable(hctx);
+
+		hctx->disable_count--;
+	}
+
+	spin_unlock_irqrestore(&hctx->state_lock, flags);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_enable);
+
+const struct kbase_hwcnt_metadata *kbase_hwcnt_context_metadata(
+	struct kbase_hwcnt_context *hctx)
+{
+	if (!hctx)
+		return NULL;
+
+	return hctx->iface->metadata;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_context_metadata);
+
+int kbase_hwcnt_accumulator_set_counters(
+	struct kbase_hwcnt_accumulator *accum,
+	const struct kbase_hwcnt_enable_map *new_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_context *hctx;
+
+	if (!accum || !new_map || !ts_start_ns || !ts_end_ns)
+		return -EINVAL;
+
+	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
+
+	if ((new_map->metadata != hctx->iface->metadata) ||
+	    (dump_buf && (dump_buf->metadata != hctx->iface->metadata)))
+		return -EINVAL;
+
+	mutex_lock(&hctx->accum_lock);
+
+	errcode = kbasep_hwcnt_accumulator_dump(
+		hctx, ts_start_ns, ts_end_ns, dump_buf, new_map);
+
+	mutex_unlock(&hctx->accum_lock);
+
+	return errcode;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_accumulator_set_counters);
+
+int kbase_hwcnt_accumulator_dump(
+	struct kbase_hwcnt_accumulator *accum,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_context *hctx;
+
+	if (!accum || !ts_start_ns || !ts_end_ns)
+		return -EINVAL;
+
+	hctx = container_of(accum, struct kbase_hwcnt_context, accum);
+
+	if (dump_buf && (dump_buf->metadata != hctx->iface->metadata))
+		return -EINVAL;
+
+	mutex_lock(&hctx->accum_lock);
+
+	errcode = kbasep_hwcnt_accumulator_dump(
+		hctx, ts_start_ns, ts_end_ns, dump_buf, NULL);
+
+	mutex_unlock(&hctx->accum_lock);
+
+	return errcode;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_accumulator_dump);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_accumulator.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_accumulator.h
new file mode 100644
index 0000000..fc45743
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_accumulator.h
@@ -0,0 +1,137 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Hardware counter accumulator API.
+ */
+
+#ifndef _KBASE_HWCNT_ACCUMULATOR_H_
+#define _KBASE_HWCNT_ACCUMULATOR_H_
+
+#include <linux/types.h>
+
+struct kbase_hwcnt_context;
+struct kbase_hwcnt_accumulator;
+struct kbase_hwcnt_enable_map;
+struct kbase_hwcnt_dump_buffer;
+
+/**
+ * kbase_hwcnt_accumulator_acquire() - Acquire the hardware counter accumulator
+ *                                     for a hardware counter context.
+ * @hctx:  Non-NULL pointer to a hardware counter context.
+ * @accum: Non-NULL pointer to where the pointer to the created accumulator
+ *         will be stored on success.
+ *
+ * There can exist at most one instance of the hardware counter accumulator per
+ * context at a time.
+ *
+ * If multiple clients need access to the hardware counters at the same time,
+ * then an abstraction built on top of the single instance to the hardware
+ * counter accumulator is required.
+ *
+ * No counters will be enabled with the returned accumulator. A subsequent call
+ * to kbase_hwcnt_accumulator_set_counters must be used to turn them on.
+ *
+ * There are four components to a hardware counter dump:
+ *  - A set of enabled counters
+ *  - A start time
+ *  - An end time
+ *  - A dump buffer containing the accumulated counter values for all enabled
+ *    counters between the start and end times.
+ *
+ * For each dump, it is guaranteed that all enabled counters were active for the
+ * entirety of the period between the start and end times.
+ *
+ * It is also guaranteed that the start time of dump "n" is always equal to the
+ * end time of dump "n - 1".
+ *
+ * For all dumps, the values of any counters that were not enabled is undefined.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_accumulator_acquire(
+	struct kbase_hwcnt_context *hctx,
+	struct kbase_hwcnt_accumulator **accum);
+
+/**
+ * kbase_hwcnt_accumulator_release() - Release a hardware counter accumulator.
+ * @accum: Non-NULL pointer to the hardware counter accumulator.
+ *
+ * The accumulator must be released before the context the accumulator was
+ * created from is terminated.
+ */
+void kbase_hwcnt_accumulator_release(struct kbase_hwcnt_accumulator *accum);
+
+/**
+ * kbase_hwcnt_accumulator_set_counters() - Perform a dump of the currently
+ *                                          enabled counters, and enable a new
+ *                                          set of counters that will be used
+ *                                          for subsequent dumps.
+ * @accum:       Non-NULL pointer to the hardware counter accumulator.
+ * @new_map:     Non-NULL pointer to the new counter enable map. Must have the
+ *               same metadata as the accumulator.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * If this function fails for some unexpected reason (i.e. anything other than
+ * invalid args), then the accumulator will be put into the error state until
+ * the parent context is next disabled.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_accumulator_set_counters(
+	struct kbase_hwcnt_accumulator *accum,
+	const struct kbase_hwcnt_enable_map *new_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+/**
+ * kbase_hwcnt_accumulator_dump() - Perform a dump of the currently enabled
+ *                                  counters.
+ * @accum:       Non-NULL pointer to the hardware counter accumulator.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * If this function fails for some unexpected reason (i.e. anything other than
+ * invalid args), then the accumulator will be put into the error state until
+ * the parent context is next disabled.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_accumulator_dump(
+	struct kbase_hwcnt_accumulator *accum,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+#endif /* _KBASE_HWCNT_ACCUMULATOR_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend.h
new file mode 100644
index 0000000..b7aa0e1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend.h
@@ -0,0 +1,217 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Virtual interface for hardware counter backends.
+ */
+
+#ifndef _KBASE_HWCNT_BACKEND_H_
+#define _KBASE_HWCNT_BACKEND_H_
+
+#include <linux/types.h>
+
+struct kbase_hwcnt_metadata;
+struct kbase_hwcnt_enable_map;
+struct kbase_hwcnt_dump_buffer;
+
+/*
+ * struct kbase_hwcnt_backend_info - Opaque pointer to information used to
+ *                                   create an instance of a hardware counter
+ *                                   backend.
+ */
+struct kbase_hwcnt_backend_info;
+
+/*
+ * struct kbase_hwcnt_backend_info - Opaque pointer to a hardware counter
+ *                                   backend, used to perform dumps.
+ */
+struct kbase_hwcnt_backend;
+
+/**
+ * typedef kbase_hwcnt_backend_init_fn - Initialise a counter backend.
+ * @info:        Non-NULL pointer to backend info.
+ * @out_backend: Non-NULL pointer to where backend is stored on success.
+ *
+ * All uses of the created hardware counter backend must be externally
+ * synchronised.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_init_fn)(
+	const struct kbase_hwcnt_backend_info *info,
+	struct kbase_hwcnt_backend **out_backend);
+
+/**
+ * typedef kbase_hwcnt_backend_term_fn - Terminate a counter backend.
+ * @backend: Pointer to backend to be terminated.
+ */
+typedef void (*kbase_hwcnt_backend_term_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_timestamp_ns_fn - Get the current backend
+ *                                               timestamp.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * Return: Backend timestamp in nanoseconds.
+ */
+typedef u64 (*kbase_hwcnt_backend_timestamp_ns_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_enable_fn - Start counter dumping with the
+ *                                              backend.
+ * @backend:    Non-NULL pointer to backend.
+ * @enable_map: Non-NULL pointer to enable map specifying enabled counters.
+ *
+ * The enable_map must have been created using the interface's metadata.
+ * If the backend has already been enabled, an error is returned.
+ *
+ * May be called in an atomic context.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_enable_fn)(
+	struct kbase_hwcnt_backend *backend,
+	const struct kbase_hwcnt_enable_map *enable_map);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_enable_nolock_fn - Start counter dumping
+ *                                                     with the backend.
+ * @backend:    Non-NULL pointer to backend.
+ * @enable_map: Non-NULL pointer to enable map specifying enabled counters.
+ *
+ * Exactly the same as kbase_hwcnt_backend_dump_enable_fn(), except must be
+ * called in an atomic context with the spinlock documented by the specific
+ * backend interface held.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_enable_nolock_fn)(
+	struct kbase_hwcnt_backend *backend,
+	const struct kbase_hwcnt_enable_map *enable_map);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_disable_fn - Disable counter dumping with
+ *                                               the backend.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * If the backend is already disabled, does nothing.
+ * Any undumped counter values since the last dump get will be lost.
+ */
+typedef void (*kbase_hwcnt_backend_dump_disable_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_clear_fn - Reset all the current undumped
+ *                                             counters.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * If the backend is not enabled, returns an error.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_clear_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_request_fn - Request an asynchronous counter
+ *                                               dump.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * If the backend is not enabled or another dump is already in progress,
+ * returns an error.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_request_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_wait_fn - Wait until the last requested
+ *                                            counter dump has completed.
+ * @backend: Non-NULL pointer to backend.
+ *
+ * If the backend is not enabled, returns an error.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_wait_fn)(
+	struct kbase_hwcnt_backend *backend);
+
+/**
+ * typedef kbase_hwcnt_backend_dump_get_fn - Copy or accumulate enable the
+ *                                           counters dumped after the last dump
+ *                                           request into the dump buffer.
+ * @backend:     Non-NULL pointer to backend.
+ * @dump_buffer: Non-NULL pointer to destination dump buffer.
+ * @enable_map:  Non-NULL pointer to enable map specifying enabled values.
+ * @accumulate:  True if counters should be accumulated into dump_buffer, rather
+ *               than copied.
+ *
+ * If the backend is not enabled, returns an error.
+ * If a dump is in progress (i.e. dump_wait has not yet returned successfully)
+ * then the resultant contents of the dump buffer will be undefined.
+ *
+ * Return: 0 on success, else error code.
+ */
+typedef int (*kbase_hwcnt_backend_dump_get_fn)(
+	struct kbase_hwcnt_backend *backend,
+	struct kbase_hwcnt_dump_buffer *dump_buffer,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	bool accumulate);
+
+/**
+ * struct kbase_hwcnt_backend_interface - Hardware counter backend virtual
+ *                                        interface.
+ * @metadata:           Immutable hardware counter metadata.
+ * @info:               Immutable info used to initialise an instance of the
+ *                      backend.
+ * @init:               Function ptr to initialise an instance of the backend.
+ * @term:               Function ptr to terminate an instance of the backend.
+ * @timestamp_ns:       Function ptr to get the current backend timestamp.
+ * @dump_enable:        Function ptr to enable dumping.
+ * @dump_enable_nolock: Function ptr to enable dumping while the
+ *                      backend-specific spinlock is already held.
+ * @dump_disable:       Function ptr to disable dumping.
+ * @dump_clear:         Function ptr to clear counters.
+ * @dump_request:       Function ptr to request a dump.
+ * @dump_wait:          Function ptr to wait until dump to complete.
+ * @dump_get:           Function ptr to copy or accumulate dump into a dump
+ *                      buffer.
+ */
+struct kbase_hwcnt_backend_interface {
+	const struct kbase_hwcnt_metadata *metadata;
+	const struct kbase_hwcnt_backend_info *info;
+	kbase_hwcnt_backend_init_fn init;
+	kbase_hwcnt_backend_term_fn term;
+	kbase_hwcnt_backend_timestamp_ns_fn timestamp_ns;
+	kbase_hwcnt_backend_dump_enable_fn dump_enable;
+	kbase_hwcnt_backend_dump_enable_nolock_fn dump_enable_nolock;
+	kbase_hwcnt_backend_dump_disable_fn dump_disable;
+	kbase_hwcnt_backend_dump_clear_fn dump_clear;
+	kbase_hwcnt_backend_dump_request_fn dump_request;
+	kbase_hwcnt_backend_dump_wait_fn dump_wait;
+	kbase_hwcnt_backend_dump_get_fn dump_get;
+};
+
+#endif /* _KBASE_HWCNT_BACKEND_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.c
new file mode 100644
index 0000000..3e93443
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.c
@@ -0,0 +1,538 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_backend_gpu.h"
+#include "mali_kbase_hwcnt_gpu.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase.h"
+#include "mali_kbase_pm_policy.h"
+#include "mali_kbase_hwaccess_instr.h"
+#include "mali_kbase_tlstream.h"
+#ifdef CONFIG_MALI_NO_MALI
+#include "backend/gpu/mali_kbase_model_dummy.h"
+#endif
+
+/**
+ * struct kbase_hwcnt_backend_gpu_info - Information used to create an instance
+ *                                       of a GPU hardware counter backend.
+ * @kbdev:         KBase device.
+ * @use_secondary: True if secondary performance counters should be used,
+ *                 else false. Ignored if secondary counters are not supported.
+ * @metadata:      Hardware counter metadata.
+ * @dump_bytes:    Bytes of GPU memory required to perform a
+ *                 hardware counter dump.
+ */
+struct kbase_hwcnt_backend_gpu_info {
+	struct kbase_device *kbdev;
+	bool use_secondary;
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t dump_bytes;
+};
+
+/**
+ * struct kbase_hwcnt_backend_gpu - Instance of a GPU hardware counter backend.
+ * @info:         Info used to create the backend.
+ * @kctx:         KBase context used for GPU memory allocation and
+ *                counter dumping.
+ * @kctx_element: List element used to add kctx to device context list.
+ * @gpu_dump_va:  GPU hardware counter dump buffer virtual address.
+ * @cpu_dump_va:  CPU mapping of gpu_dump_va.
+ * @vmap:         Dump buffer vmap.
+ * @enabled:      True if dumping has been enabled, else false.
+ */
+struct kbase_hwcnt_backend_gpu {
+	const struct kbase_hwcnt_backend_gpu_info *info;
+	struct kbase_context *kctx;
+	struct kbasep_kctx_list_element *kctx_element;
+	u64 gpu_dump_va;
+	void *cpu_dump_va;
+	struct kbase_vmap_struct *vmap;
+	bool enabled;
+};
+
+/* GPU backend implementation of kbase_hwcnt_backend_timestamp_ns_fn */
+static u64 kbasep_hwcnt_backend_gpu_timestamp_ns(
+	struct kbase_hwcnt_backend *backend)
+{
+	struct timespec64 ts;
+
+	(void)backend;
+	ktime_get_raw_ts64(&ts);
+	return (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_enable_nolock_fn */
+static int kbasep_hwcnt_backend_gpu_dump_enable_nolock(
+	struct kbase_hwcnt_backend *backend,
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	int errcode;
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+	struct kbase_hwcnt_physical_enable_map phys;
+	struct kbase_instr_hwcnt_enable enable;
+
+	if (!backend_gpu || !enable_map || backend_gpu->enabled ||
+	    (enable_map->metadata != backend_gpu->info->metadata))
+		return -EINVAL;
+
+	kctx = backend_gpu->kctx;
+	kbdev = backend_gpu->kctx->kbdev;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbase_hwcnt_gpu_enable_map_to_physical(&phys, enable_map);
+
+	enable.jm_bm = phys.jm_bm;
+	enable.shader_bm = phys.shader_bm;
+	enable.tiler_bm = phys.tiler_bm;
+	enable.mmu_l2_bm = phys.mmu_l2_bm;
+	enable.use_secondary = backend_gpu->info->use_secondary;
+	enable.dump_buffer = backend_gpu->gpu_dump_va;
+	enable.dump_buffer_bytes = backend_gpu->info->dump_bytes;
+
+	errcode = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &enable);
+	if (errcode)
+		goto error;
+
+	backend_gpu->enabled = true;
+
+	return 0;
+error:
+	return errcode;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_enable_fn */
+static int kbasep_hwcnt_backend_gpu_dump_enable(
+	struct kbase_hwcnt_backend *backend,
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	unsigned long flags;
+	int errcode;
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+	struct kbase_device *kbdev;
+
+	if (!backend_gpu)
+		return -EINVAL;
+
+	kbdev = backend_gpu->kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	errcode = kbasep_hwcnt_backend_gpu_dump_enable_nolock(
+		backend, enable_map);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return errcode;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_disable_fn */
+static void kbasep_hwcnt_backend_gpu_dump_disable(
+	struct kbase_hwcnt_backend *backend)
+{
+	int errcode;
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (WARN_ON(!backend_gpu) || !backend_gpu->enabled)
+		return;
+
+	errcode = kbase_instr_hwcnt_disable_internal(backend_gpu->kctx);
+	WARN_ON(errcode);
+
+	backend_gpu->enabled = false;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_clear_fn */
+static int kbasep_hwcnt_backend_gpu_dump_clear(
+	struct kbase_hwcnt_backend *backend)
+{
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (!backend_gpu || !backend_gpu->enabled)
+		return -EINVAL;
+
+	return kbase_instr_hwcnt_clear(backend_gpu->kctx);
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_request_fn */
+static int kbasep_hwcnt_backend_gpu_dump_request(
+	struct kbase_hwcnt_backend *backend)
+{
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (!backend_gpu || !backend_gpu->enabled)
+		return -EINVAL;
+
+	return kbase_instr_hwcnt_request_dump(backend_gpu->kctx);
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_wait_fn */
+static int kbasep_hwcnt_backend_gpu_dump_wait(
+	struct kbase_hwcnt_backend *backend)
+{
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (!backend_gpu || !backend_gpu->enabled)
+		return -EINVAL;
+
+	return kbase_instr_hwcnt_wait_for_dump(backend_gpu->kctx);
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_dump_get_fn */
+static int kbasep_hwcnt_backend_gpu_dump_get(
+	struct kbase_hwcnt_backend *backend,
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map,
+	bool accumulate)
+{
+	struct kbase_hwcnt_backend_gpu *backend_gpu =
+		(struct kbase_hwcnt_backend_gpu *)backend;
+
+	if (!backend_gpu || !dst || !dst_enable_map ||
+	    (backend_gpu->info->metadata != dst->metadata) ||
+	    (dst_enable_map->metadata != dst->metadata))
+		return -EINVAL;
+
+	/* Invalidate the kernel buffer before reading from it. */
+	kbase_sync_mem_regions(
+		backend_gpu->kctx, backend_gpu->vmap, KBASE_SYNC_TO_CPU);
+
+	return kbase_hwcnt_gpu_dump_get(
+		dst, backend_gpu->cpu_dump_va, dst_enable_map, accumulate);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_dump_alloc() - Allocate a GPU dump buffer.
+ * @info:        Non-NULL pointer to GPU backend info.
+ * @kctx:        Non-NULL pointer to kbase context.
+ * @gpu_dump_va: Non-NULL pointer to where GPU dump buffer virtual address
+ *               is stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_dump_alloc(
+	const struct kbase_hwcnt_backend_gpu_info *info,
+	struct kbase_context *kctx,
+	u64 *gpu_dump_va)
+{
+	struct kbase_va_region *reg;
+	u64 flags;
+	u64 nr_pages;
+
+	WARN_ON(!info);
+	WARN_ON(!kctx);
+	WARN_ON(!gpu_dump_va);
+
+	flags = BASE_MEM_PROT_CPU_RD |
+		BASE_MEM_PROT_GPU_WR |
+		BASE_MEM_PERMANENT_KERNEL_MAPPING |
+		BASE_MEM_CACHED_CPU;
+
+	if (kctx->kbdev->mmu_mode->flags & KBASE_MMU_MODE_HAS_NON_CACHEABLE)
+		flags |= BASE_MEM_UNCACHED_GPU;
+
+	nr_pages = PFN_UP(info->dump_bytes);
+
+	reg = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags, gpu_dump_va);
+
+	if (!reg)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_dump_free() - Free an allocated GPU dump buffer.
+ * @kctx:        Non-NULL pointer to kbase context.
+ * @gpu_dump_va: GPU dump buffer virtual address.
+ */
+static void kbasep_hwcnt_backend_gpu_dump_free(
+	struct kbase_context *kctx,
+	u64 gpu_dump_va)
+{
+	WARN_ON(!kctx);
+	if (gpu_dump_va)
+		kbase_mem_free(kctx, gpu_dump_va);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_destroy() - Destroy a GPU backend.
+ * @backend: Pointer to GPU backend to destroy.
+ *
+ * Can be safely called on a backend in any state of partial construction.
+ */
+static void kbasep_hwcnt_backend_gpu_destroy(
+	struct kbase_hwcnt_backend_gpu *backend)
+{
+	if (!backend)
+		return;
+
+	if (backend->kctx) {
+		struct kbase_context *kctx = backend->kctx;
+		struct kbase_device *kbdev = kctx->kbdev;
+
+		if (backend->cpu_dump_va)
+			kbase_phy_alloc_mapping_put(kctx, backend->vmap);
+
+		if (backend->gpu_dump_va)
+			kbasep_hwcnt_backend_gpu_dump_free(
+				kctx, backend->gpu_dump_va);
+
+		if (backend->kctx_element) {
+			mutex_lock(&kbdev->kctx_list_lock);
+
+			KBASE_TLSTREAM_TL_DEL_CTX(kctx);
+			list_del(&backend->kctx_element->link);
+
+			mutex_unlock(&kbdev->kctx_list_lock);
+			kfree(backend->kctx_element);
+		}
+
+		kbasep_js_release_privileged_ctx(kbdev, kctx);
+		kbase_destroy_context(kctx);
+	}
+
+	kfree(backend);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_create() - Create a GPU backend.
+ * @info:        Non-NULL pointer to backend info.
+ * @out_backend: Non-NULL pointer to where backend is stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_create(
+	const struct kbase_hwcnt_backend_gpu_info *info,
+	struct kbase_hwcnt_backend_gpu **out_backend)
+{
+	int errcode;
+	struct kbase_device *kbdev;
+	struct kbase_hwcnt_backend_gpu *backend = NULL;
+
+	WARN_ON(!info);
+	WARN_ON(!out_backend);
+
+	kbdev = info->kbdev;
+
+	backend = kzalloc(sizeof(*backend), GFP_KERNEL);
+	if (!backend)
+		goto alloc_error;
+
+	backend->info = info;
+
+	backend->kctx = kbase_create_context(kbdev, true);
+	if (!backend->kctx)
+		goto alloc_error;
+
+	kbasep_js_schedule_privileged_ctx(kbdev, backend->kctx);
+
+	backend->kctx_element = kzalloc(
+		sizeof(*backend->kctx_element), GFP_KERNEL);
+	if (!backend->kctx_element)
+		goto alloc_error;
+
+	backend->kctx_element->kctx = backend->kctx;
+
+	/* Add kernel context to list of contexts associated with device. */
+	mutex_lock(&kbdev->kctx_list_lock);
+
+	list_add(&backend->kctx_element->link, &kbdev->kctx_list);
+	/* Fire tracepoint while lock is held, to ensure tracepoint is not
+	 * created in both body and summary stream
+	 */
+	KBASE_TLSTREAM_TL_NEW_CTX(
+		backend->kctx, backend->kctx->id, (u32)(backend->kctx->tgid));
+
+	mutex_unlock(&kbdev->kctx_list_lock);
+
+	errcode = kbasep_hwcnt_backend_gpu_dump_alloc(
+		info, backend->kctx, &backend->gpu_dump_va);
+	if (errcode)
+		goto error;
+
+	backend->cpu_dump_va = kbase_phy_alloc_mapping_get(backend->kctx,
+		backend->gpu_dump_va, &backend->vmap);
+	if (!backend->cpu_dump_va)
+		goto alloc_error;
+
+#ifdef CONFIG_MALI_NO_MALI
+	/* The dummy model needs the CPU mapping. */
+	gpu_model_set_dummy_prfcnt_base_cpu(backend->cpu_dump_va);
+#endif
+
+	*out_backend = backend;
+	return 0;
+
+alloc_error:
+	errcode = -ENOMEM;
+error:
+	kbasep_hwcnt_backend_gpu_destroy(backend);
+	return errcode;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_init_fn */
+static int kbasep_hwcnt_backend_gpu_init(
+	const struct kbase_hwcnt_backend_info *info,
+	struct kbase_hwcnt_backend **out_backend)
+{
+	int errcode;
+	struct kbase_hwcnt_backend_gpu *backend = NULL;
+
+	if (!info || !out_backend)
+		return -EINVAL;
+
+	errcode = kbasep_hwcnt_backend_gpu_create(
+		(const struct kbase_hwcnt_backend_gpu_info *) info, &backend);
+	if (errcode)
+		return errcode;
+
+	*out_backend = (struct kbase_hwcnt_backend *)backend;
+
+	return 0;
+}
+
+/* GPU backend implementation of kbase_hwcnt_backend_term_fn */
+static void kbasep_hwcnt_backend_gpu_term(struct kbase_hwcnt_backend *backend)
+{
+	if (!backend)
+		return;
+
+	kbasep_hwcnt_backend_gpu_dump_disable(backend);
+	kbasep_hwcnt_backend_gpu_destroy(
+		(struct kbase_hwcnt_backend_gpu *)backend);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_info_destroy() - Destroy a GPU backend info.
+ * @info: Pointer to info to destroy.
+ *
+ * Can be safely called on a backend info in any state of partial construction.
+ */
+static void kbasep_hwcnt_backend_gpu_info_destroy(
+	const struct kbase_hwcnt_backend_gpu_info *info)
+{
+	if (!info)
+		return;
+
+	kbase_hwcnt_gpu_metadata_destroy(info->metadata);
+	kfree(info);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_info_create() - Create a GPU backend info.
+ * @kbdev: Non_NULL pointer to kbase device.
+ * @out_info: Non-NULL pointer to where info is stored on success.
+ *
+ * Return 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_info_create(
+	struct kbase_device *kbdev,
+	const struct kbase_hwcnt_backend_gpu_info **out_info)
+{
+	int errcode = -ENOMEM;
+	struct kbase_hwcnt_gpu_info hwcnt_gpu_info;
+	struct kbase_hwcnt_backend_gpu_info *info = NULL;
+
+	WARN_ON(!kbdev);
+	WARN_ON(!out_info);
+
+	errcode = kbase_hwcnt_gpu_info_init(kbdev, &hwcnt_gpu_info);
+	if (errcode)
+		return errcode;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		goto error;
+
+	info->kbdev = kbdev;
+
+#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY
+	info->use_secondary = true;
+#else
+	info->use_secondary = false;
+#endif
+
+	errcode = kbase_hwcnt_gpu_metadata_create(
+		&hwcnt_gpu_info, info->use_secondary,
+		&info->metadata,
+		&info->dump_bytes);
+	if (errcode)
+		goto error;
+
+	*out_info = info;
+
+	return 0;
+error:
+	kbasep_hwcnt_backend_gpu_info_destroy(info);
+	return errcode;
+}
+
+int kbase_hwcnt_backend_gpu_create(
+	struct kbase_device *kbdev,
+	struct kbase_hwcnt_backend_interface *iface)
+{
+	int errcode;
+	const struct kbase_hwcnt_backend_gpu_info *info = NULL;
+
+	if (!kbdev || !iface)
+		return -EINVAL;
+
+	errcode = kbasep_hwcnt_backend_gpu_info_create(kbdev, &info);
+
+	if (errcode)
+		return errcode;
+
+	iface->metadata = info->metadata;
+	iface->info = (struct kbase_hwcnt_backend_info *)info;
+	iface->init = kbasep_hwcnt_backend_gpu_init;
+	iface->term = kbasep_hwcnt_backend_gpu_term;
+	iface->timestamp_ns = kbasep_hwcnt_backend_gpu_timestamp_ns;
+	iface->dump_enable = kbasep_hwcnt_backend_gpu_dump_enable;
+	iface->dump_enable_nolock = kbasep_hwcnt_backend_gpu_dump_enable_nolock;
+	iface->dump_disable = kbasep_hwcnt_backend_gpu_dump_disable;
+	iface->dump_clear = kbasep_hwcnt_backend_gpu_dump_clear;
+	iface->dump_request = kbasep_hwcnt_backend_gpu_dump_request;
+	iface->dump_wait = kbasep_hwcnt_backend_gpu_dump_wait;
+	iface->dump_get = kbasep_hwcnt_backend_gpu_dump_get;
+
+	return 0;
+}
+
+void kbase_hwcnt_backend_gpu_destroy(
+	struct kbase_hwcnt_backend_interface *iface)
+{
+	if (!iface)
+		return;
+
+	kbasep_hwcnt_backend_gpu_info_destroy(
+		(const struct kbase_hwcnt_backend_gpu_info *)iface->info);
+	memset(iface, 0, sizeof(*iface));
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.h
new file mode 100644
index 0000000..7712f14
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.h
@@ -0,0 +1,61 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Concrete implementation of mali_kbase_hwcnt_backend interface for GPU
+ * backend.
+ */
+
+#ifndef _KBASE_HWCNT_BACKEND_GPU_H_
+#define _KBASE_HWCNT_BACKEND_GPU_H_
+
+#include "mali_kbase_hwcnt_backend.h"
+
+struct kbase_device;
+
+/**
+ * kbase_hwcnt_backend_gpu_create() - Create a GPU hardware counter backend
+ *                                    interface.
+ * @kbdev: Non-NULL pointer to kbase device.
+ * @iface: Non-NULL pointer to backend interface structure that is filled in
+ *             on creation success.
+ *
+ * Calls to iface->dump_enable_nolock() require kbdev->hwaccess_lock held.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_backend_gpu_create(
+	struct kbase_device *kbdev,
+	struct kbase_hwcnt_backend_interface *iface);
+
+/**
+ * kbase_hwcnt_backend_gpu_destroy() - Destroy a GPU hardware counter backend
+ *                                     interface.
+ * @iface: Pointer to interface to destroy.
+ *
+ * Can be safely called on an all-zeroed interface, or on an already destroyed
+ * interface.
+ */
+void kbase_hwcnt_backend_gpu_destroy(
+	struct kbase_hwcnt_backend_interface *iface);
+
+#endif /* _KBASE_HWCNT_BACKEND_GPU_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_context.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_context.h
new file mode 100644
index 0000000..bc50ad1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_context.h
@@ -0,0 +1,119 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Hardware counter context API.
+ */
+
+#ifndef _KBASE_HWCNT_CONTEXT_H_
+#define _KBASE_HWCNT_CONTEXT_H_
+
+#include <linux/types.h>
+
+struct kbase_hwcnt_backend_interface;
+struct kbase_hwcnt_context;
+
+/**
+ * kbase_hwcnt_context_init() - Initialise a hardware counter context.
+ * @iface:    Non-NULL pointer to a hardware counter backend interface.
+ * @out_hctx: Non-NULL pointer to where the pointer to the created context will
+ *            be stored on success.
+ *
+ * On creation, the disable count of the context will be 0.
+ * A hardware counter accumulator can be acquired using a created context.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_context_init(
+	const struct kbase_hwcnt_backend_interface *iface,
+	struct kbase_hwcnt_context **out_hctx);
+
+/**
+ * kbase_hwcnt_context_term() - Terminate a hardware counter context.
+ * @hctx: Pointer to context to be terminated.
+ */
+void kbase_hwcnt_context_term(struct kbase_hwcnt_context *hctx);
+
+/**
+ * kbase_hwcnt_context_metadata() - Get the hardware counter metadata used by
+ *                                  the context, so related counter data
+ *                                  structures can be created.
+ * @hctx: Non-NULL pointer to the hardware counter context.
+ *
+ * Return: Non-NULL pointer to metadata, or NULL on error.
+ */
+const struct kbase_hwcnt_metadata *kbase_hwcnt_context_metadata(
+	struct kbase_hwcnt_context *hctx);
+
+/**
+ * kbase_hwcnt_context_disable() - Increment the disable count of the context.
+ * @hctx: Pointer to the hardware counter context.
+ *
+ * If a call to this function increments the disable count from 0 to 1, and
+ * an accumulator has been acquired, then a counter dump will be performed
+ * before counters are disabled via the backend interface.
+ *
+ * Subsequent dumps via the accumulator while counters are disabled will first
+ * return the accumulated dump, then will return dumps with zeroed counters.
+ *
+ * After this function call returns, it is guaranteed that counters will not be
+ * enabled via the backend interface.
+ */
+void kbase_hwcnt_context_disable(struct kbase_hwcnt_context *hctx);
+
+/**
+ * kbase_hwcnt_context_disable_atomic() - Increment the disable count of the
+ *                                        context if possible in an atomic
+ *                                        context.
+ * @hctx: Pointer to the hardware counter context.
+ *
+ * This function will only succeed if hardware counters are effectively already
+ * disabled, i.e. there is no accumulator, the disable count is already
+ * non-zero, or the accumulator has no counters set.
+ *
+ * After this function call returns true, it is guaranteed that counters will
+ * not be enabled via the backend interface.
+ *
+ * Return: True if the disable count was incremented, else False.
+ */
+bool kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context *hctx);
+
+/**
+ * kbase_hwcnt_context_enable() - Decrement the disable count of the context.
+ * @hctx: Pointer to the hardware counter context.
+ *
+ * If a call to this function decrements the disable count from 1 to 0, and
+ * an accumulator has been acquired, then counters will be re-enabled via the
+ * backend interface.
+ *
+ * If an accumulator has been acquired and enabling counters fails for some
+ * reason, the accumulator will be placed into an error state.
+ *
+ * It is only valid to call this function one time for each prior returned call
+ * to kbase_hwcnt_context_disable.
+ *
+ * The spinlock documented in the backend interface that was passed in to
+ * kbase_hwcnt_context_init() must be held before calling this function.
+ */
+void kbase_hwcnt_context_enable(struct kbase_hwcnt_context *hctx);
+
+#endif /* _KBASE_HWCNT_CONTEXT_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.c
new file mode 100644
index 0000000..647d3ec
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.c
@@ -0,0 +1,716 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_gpu.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase.h"
+#ifdef CONFIG_MALI_NO_MALI
+#include "backend/gpu/mali_kbase_model_dummy.h"
+#endif
+
+#define KBASE_HWCNT_V4_BLOCKS_PER_GROUP 8
+#define KBASE_HWCNT_V4_SC_BLOCKS_PER_GROUP 4
+#define KBASE_HWCNT_V4_MAX_GROUPS \
+	(KBASE_HWCNT_AVAIL_MASK_BITS / KBASE_HWCNT_V4_BLOCKS_PER_GROUP)
+#define KBASE_HWCNT_V4_HEADERS_PER_BLOCK 4
+#define KBASE_HWCNT_V4_COUNTERS_PER_BLOCK 60
+#define KBASE_HWCNT_V4_VALUES_PER_BLOCK \
+	(KBASE_HWCNT_V4_HEADERS_PER_BLOCK + KBASE_HWCNT_V4_COUNTERS_PER_BLOCK)
+/* Index of the PRFCNT_EN header into a V4 counter block */
+#define KBASE_HWCNT_V4_PRFCNT_EN_HEADER 2
+
+#define KBASE_HWCNT_V5_BLOCK_TYPE_COUNT 4
+#define KBASE_HWCNT_V5_HEADERS_PER_BLOCK 4
+#define KBASE_HWCNT_V5_COUNTERS_PER_BLOCK 60
+#define KBASE_HWCNT_V5_VALUES_PER_BLOCK \
+	(KBASE_HWCNT_V5_HEADERS_PER_BLOCK + KBASE_HWCNT_V5_COUNTERS_PER_BLOCK)
+/* Index of the PRFCNT_EN header into a V5 counter block */
+#define KBASE_HWCNT_V5_PRFCNT_EN_HEADER 2
+
+/**
+ * kbasep_hwcnt_backend_gpu_metadata_v4_create() - Create hardware counter
+ *                                                 metadata for a v4 GPU.
+ * @v4_info:  Non-NULL pointer to hwcnt info for a v4 GPU.
+ * @metadata: Non-NULL pointer to where created metadata is stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_metadata_v4_create(
+	const struct kbase_hwcnt_gpu_v4_info *v4_info,
+	const struct kbase_hwcnt_metadata **metadata)
+{
+	size_t grp;
+	int errcode = -ENOMEM;
+	struct kbase_hwcnt_description desc;
+	struct kbase_hwcnt_group_description *grps;
+	size_t avail_mask_bit;
+
+	WARN_ON(!v4_info);
+	WARN_ON(!metadata);
+
+	/* Check if there are enough bits in the availability mask to represent
+	 * all the hardware counter blocks in the system.
+	 */
+	if (v4_info->cg_count > KBASE_HWCNT_V4_MAX_GROUPS)
+		return -EINVAL;
+
+	grps = kcalloc(v4_info->cg_count, sizeof(*grps), GFP_KERNEL);
+	if (!grps)
+		goto clean_up;
+
+	desc.grp_cnt = v4_info->cg_count;
+	desc.grps = grps;
+
+	for (grp = 0; grp < v4_info->cg_count; grp++) {
+		size_t blk;
+		size_t sc;
+		const u64 core_mask = v4_info->cgs[grp].core_mask;
+		struct kbase_hwcnt_block_description *blks = kcalloc(
+			KBASE_HWCNT_V4_BLOCKS_PER_GROUP,
+			sizeof(*blks),
+			GFP_KERNEL);
+
+		if (!blks)
+			goto clean_up;
+
+		grps[grp].type = KBASE_HWCNT_GPU_GROUP_TYPE_V4;
+		grps[grp].blk_cnt = KBASE_HWCNT_V4_BLOCKS_PER_GROUP;
+		grps[grp].blks = blks;
+
+		for (blk = 0; blk < KBASE_HWCNT_V4_BLOCKS_PER_GROUP; blk++) {
+			blks[blk].inst_cnt = 1;
+			blks[blk].hdr_cnt =
+				KBASE_HWCNT_V4_HEADERS_PER_BLOCK;
+			blks[blk].ctr_cnt =
+				KBASE_HWCNT_V4_COUNTERS_PER_BLOCK;
+		}
+
+		for (sc = 0; sc < KBASE_HWCNT_V4_SC_BLOCKS_PER_GROUP; sc++) {
+			blks[sc].type = core_mask & (1ull << sc) ?
+				KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER :
+				KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED;
+		}
+
+		blks[4].type = KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER;
+		blks[5].type = KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2;
+		blks[6].type = KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED;
+		blks[7].type = (grp == 0) ?
+			KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM :
+			KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED;
+
+		WARN_ON(KBASE_HWCNT_V4_BLOCKS_PER_GROUP != 8);
+	}
+
+	/* Initialise the availability mask */
+	desc.avail_mask = 0;
+	avail_mask_bit = 0;
+
+	for (grp = 0; grp < desc.grp_cnt; grp++) {
+		size_t blk;
+		const struct kbase_hwcnt_block_description *blks =
+			desc.grps[grp].blks;
+		for (blk = 0; blk < desc.grps[grp].blk_cnt; blk++) {
+			WARN_ON(blks[blk].inst_cnt != 1);
+			if (blks[blk].type !=
+			    KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED)
+				desc.avail_mask |= (1ull << avail_mask_bit);
+
+			avail_mask_bit++;
+		}
+	}
+
+	errcode = kbase_hwcnt_metadata_create(&desc, metadata);
+
+	/* Always clean up, as metadata will make a copy of the input args */
+clean_up:
+	if (grps) {
+		for (grp = 0; grp < v4_info->cg_count; grp++)
+			kfree(grps[grp].blks);
+		kfree(grps);
+	}
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_v4_dump_bytes() - Get the raw dump buffer size for a
+ *                                            V4 GPU.
+ * @v4_info: Non-NULL pointer to hwcnt info for a v4 GPU.
+ *
+ * Return: Size of buffer the V4 GPU needs to perform a counter dump.
+ */
+static size_t kbasep_hwcnt_backend_gpu_v4_dump_bytes(
+	const struct kbase_hwcnt_gpu_v4_info *v4_info)
+{
+	return v4_info->cg_count *
+		KBASE_HWCNT_V4_BLOCKS_PER_GROUP *
+		KBASE_HWCNT_V4_VALUES_PER_BLOCK *
+		KBASE_HWCNT_VALUE_BYTES;
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_metadata_v5_create() - Create hardware counter
+ *                                                 metadata for a v5 GPU.
+ * @v5_info:       Non-NULL pointer to hwcnt info for a v5 GPU.
+ * @use_secondary: True if secondary performance counters should be used, else
+ *                 false. Ignored if secondary counters are not supported.
+ * @metadata:      Non-NULL pointer to where created metadata is stored
+ *                 on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_backend_gpu_metadata_v5_create(
+	const struct kbase_hwcnt_gpu_v5_info *v5_info,
+	bool use_secondary,
+	const struct kbase_hwcnt_metadata **metadata)
+{
+	struct kbase_hwcnt_description desc;
+	struct kbase_hwcnt_group_description group;
+	struct kbase_hwcnt_block_description
+		blks[KBASE_HWCNT_V5_BLOCK_TYPE_COUNT];
+	size_t non_sc_block_count;
+	size_t sc_block_count;
+
+	WARN_ON(!v5_info);
+	WARN_ON(!metadata);
+
+	/* Calculate number of block instances that aren't shader cores */
+	non_sc_block_count = 2 + v5_info->l2_count;
+	/* Calculate number of block instances that are shader cores */
+	sc_block_count = fls64(v5_info->core_mask);
+
+	/*
+	 * A system can have up to 64 shader cores, but the 64-bit
+	 * availability mask can't physically represent that many cores as well
+	 * as the other hardware blocks.
+	 * Error out if there are more blocks than our implementation can
+	 * support.
+	 */
+	if ((sc_block_count + non_sc_block_count) > KBASE_HWCNT_AVAIL_MASK_BITS)
+		return -EINVAL;
+
+	/* One Job Manager block */
+	blks[0].type = KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM;
+	blks[0].inst_cnt = 1;
+	blks[0].hdr_cnt = KBASE_HWCNT_V5_HEADERS_PER_BLOCK;
+	blks[0].ctr_cnt = KBASE_HWCNT_V5_COUNTERS_PER_BLOCK;
+
+	/* One Tiler block */
+	blks[1].type = KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER;
+	blks[1].inst_cnt = 1;
+	blks[1].hdr_cnt = KBASE_HWCNT_V5_HEADERS_PER_BLOCK;
+	blks[1].ctr_cnt = KBASE_HWCNT_V5_COUNTERS_PER_BLOCK;
+
+	/* l2_count memsys blks */
+	blks[2].type = use_secondary ?
+		KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2 :
+		KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS;
+	blks[2].inst_cnt = v5_info->l2_count;
+	blks[2].hdr_cnt = KBASE_HWCNT_V5_HEADERS_PER_BLOCK;
+	blks[2].ctr_cnt = KBASE_HWCNT_V5_COUNTERS_PER_BLOCK;
+
+	/*
+	 * There are as many shader cores in the system as there are bits set in
+	 * the core mask. However, the dump buffer memory requirements need to
+	 * take into account the fact that the core mask may be non-contiguous.
+	 *
+	 * For example, a system with a core mask of 0b1011 has the same dump
+	 * buffer memory requirements as a system with 0b1111, but requires more
+	 * memory than a system with 0b0111. However, core 2 of the system with
+	 * 0b1011 doesn't physically exist, and the dump buffer memory that
+	 * accounts for that core will never be written to when we do a counter
+	 * dump.
+	 *
+	 * We find the core mask's last set bit to determine the memory
+	 * requirements, and embed the core mask into the availability mask so
+	 * we can determine later which shader cores physically exist.
+	 */
+	blks[3].type = use_secondary ?
+		KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2 :
+		KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC;
+	blks[3].inst_cnt = sc_block_count;
+	blks[3].hdr_cnt = KBASE_HWCNT_V5_HEADERS_PER_BLOCK;
+	blks[3].ctr_cnt = KBASE_HWCNT_V5_COUNTERS_PER_BLOCK;
+
+	WARN_ON(KBASE_HWCNT_V5_BLOCK_TYPE_COUNT != 4);
+
+	group.type = KBASE_HWCNT_GPU_GROUP_TYPE_V5;
+	group.blk_cnt = KBASE_HWCNT_V5_BLOCK_TYPE_COUNT;
+	group.blks = blks;
+
+	desc.grp_cnt = 1;
+	desc.grps = &group;
+
+	/* The JM, Tiler, and L2s are always available, and are before cores */
+	desc.avail_mask = (1ull << non_sc_block_count) - 1;
+	/* Embed the core mask directly in the availability mask */
+	desc.avail_mask |= (v5_info->core_mask << non_sc_block_count);
+
+	return kbase_hwcnt_metadata_create(&desc, metadata);
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_v5_dump_bytes() - Get the raw dump buffer size for a
+ *                                            V5 GPU.
+ * @v5_info: Non-NULL pointer to hwcnt info for a v5 GPU.
+ *
+ * Return: Size of buffer the V5 GPU needs to perform a counter dump.
+ */
+static size_t kbasep_hwcnt_backend_gpu_v5_dump_bytes(
+	const struct kbase_hwcnt_gpu_v5_info *v5_info)
+{
+	WARN_ON(!v5_info);
+	return (2 + v5_info->l2_count + fls64(v5_info->core_mask)) *
+		KBASE_HWCNT_V5_VALUES_PER_BLOCK *
+		KBASE_HWCNT_VALUE_BYTES;
+}
+
+int kbase_hwcnt_gpu_info_init(
+	struct kbase_device *kbdev,
+	struct kbase_hwcnt_gpu_info *info)
+{
+	if (!kbdev || !info)
+		return -EINVAL;
+
+#ifdef CONFIG_MALI_NO_MALI
+	/* NO_MALI uses V5 layout, regardless of the underlying platform. */
+	info->type = KBASE_HWCNT_GPU_GROUP_TYPE_V5;
+	info->v5.l2_count = KBASE_DUMMY_MODEL_MAX_MEMSYS_BLOCKS;
+	info->v5.core_mask = (1ull << KBASE_DUMMY_MODEL_MAX_SHADER_CORES) - 1;
+#else
+	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_V4)) {
+		info->type = KBASE_HWCNT_GPU_GROUP_TYPE_V4;
+		info->v4.cg_count = kbdev->gpu_props.num_core_groups;
+		info->v4.cgs = kbdev->gpu_props.props.coherency_info.group;
+	} else {
+		const struct base_gpu_props *props = &kbdev->gpu_props.props;
+		const size_t l2_count = props->l2_props.num_l2_slices;
+		const size_t core_mask =
+			props->coherency_info.group[0].core_mask;
+
+		info->type = KBASE_HWCNT_GPU_GROUP_TYPE_V5;
+		info->v5.l2_count = l2_count;
+		info->v5.core_mask = core_mask;
+	}
+#endif
+	return 0;
+}
+
+int kbase_hwcnt_gpu_metadata_create(
+	const struct kbase_hwcnt_gpu_info *info,
+	bool use_secondary,
+	const struct kbase_hwcnt_metadata **out_metadata,
+	size_t *out_dump_bytes)
+{
+	int errcode;
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t dump_bytes;
+
+	if (!info || !out_metadata || !out_dump_bytes)
+		return -EINVAL;
+
+	switch (info->type) {
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+		dump_bytes = kbasep_hwcnt_backend_gpu_v4_dump_bytes(&info->v4);
+		errcode = kbasep_hwcnt_backend_gpu_metadata_v4_create(
+			&info->v4, &metadata);
+		break;
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+		dump_bytes = kbasep_hwcnt_backend_gpu_v5_dump_bytes(&info->v5);
+		errcode = kbasep_hwcnt_backend_gpu_metadata_v5_create(
+			&info->v5, use_secondary, &metadata);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (errcode)
+		return errcode;
+
+	/*
+	 * Dump abstraction size should be exactly the same size and layout as
+	 * the physical dump size, for backwards compatibility.
+	 */
+	WARN_ON(dump_bytes != metadata->dump_buf_bytes);
+
+	*out_metadata = metadata;
+	*out_dump_bytes = dump_bytes;
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_metadata_create);
+
+void kbase_hwcnt_gpu_metadata_destroy(
+	const struct kbase_hwcnt_metadata *metadata)
+{
+	if (!metadata)
+		return;
+
+	kbase_hwcnt_metadata_destroy(metadata);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_metadata_destroy);
+
+int kbase_hwcnt_gpu_dump_get(
+	struct kbase_hwcnt_dump_buffer *dst,
+	void *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map,
+	bool accumulate)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	const u32 *dump_src;
+	size_t src_offset, grp, blk, blk_inst;
+
+	if (!dst || !src || !dst_enable_map ||
+	    (dst_enable_map->metadata != dst->metadata))
+		return -EINVAL;
+
+	metadata = dst->metadata;
+	dump_src = (const u32 *)src;
+	src_offset = 0;
+
+	kbase_hwcnt_metadata_for_each_block(
+		metadata, grp, blk, blk_inst) {
+		const size_t hdr_cnt =
+			kbase_hwcnt_metadata_block_headers_count(
+				metadata, grp, blk);
+		const size_t ctr_cnt =
+			kbase_hwcnt_metadata_block_counters_count(
+				metadata, grp, blk);
+
+		/* Early out if no values in the dest block are enabled */
+		if (kbase_hwcnt_enable_map_block_enabled(
+			dst_enable_map, grp, blk, blk_inst)) {
+			u32 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+				dst, grp, blk, blk_inst);
+			const u32 *src_blk = dump_src + src_offset;
+
+			if (accumulate) {
+				kbase_hwcnt_dump_buffer_block_accumulate(
+					dst_blk, src_blk, hdr_cnt, ctr_cnt);
+			} else {
+				kbase_hwcnt_dump_buffer_block_copy(
+					dst_blk, src_blk, (hdr_cnt + ctr_cnt));
+			}
+		}
+
+		src_offset += (hdr_cnt + ctr_cnt);
+	}
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_dump_get);
+
+/**
+ * kbasep_hwcnt_backend_gpu_block_map_to_physical() - Convert from a block
+ *                                                    enable map abstraction to
+ *                                                    a physical block enable
+ *                                                    map.
+ * @lo: Low 64 bits of block enable map abstraction.
+ * @hi: High 64 bits of block enable map abstraction.
+ *
+ * The abstraction uses 128 bits to enable 128 block values, whereas the
+ * physical uses just 32 bits, as bit n enables values [n*4, n*4+3].
+ * Therefore, this conversion is lossy.
+ *
+ * Return: 32-bit physical block enable map.
+ */
+static inline u32 kbasep_hwcnt_backend_gpu_block_map_to_physical(
+	u64 lo,
+	u64 hi)
+{
+	u32 phys = 0;
+	u64 dwords[2] = {lo, hi};
+	size_t dword_idx;
+
+	for (dword_idx = 0; dword_idx < 2; dword_idx++) {
+		const u64 dword = dwords[dword_idx];
+		u16 packed = 0;
+
+		size_t hword_bit;
+
+		for (hword_bit = 0; hword_bit < 16; hword_bit++) {
+			const size_t dword_bit = hword_bit * 4;
+			const u16 mask =
+				((dword >> (dword_bit + 0)) & 0x1) |
+				((dword >> (dword_bit + 1)) & 0x1) |
+				((dword >> (dword_bit + 2)) & 0x1) |
+				((dword >> (dword_bit + 3)) & 0x1);
+			packed |= (mask << hword_bit);
+		}
+		phys |= ((u32)packed) << (16 * dword_idx);
+	}
+	return phys;
+}
+
+/**
+ * kbasep_hwcnt_backend_gpu_block_map_from_physical() - Convert from a physical
+ *                                                      block enable map to a
+ *                                                      block enable map
+ *                                                      abstraction.
+ * @phys: Physical 32-bit block enable map
+ * @lo:   Non-NULL pointer to where low 64 bits of block enable map abstraction
+ *        will be stored.
+ * @hi:   Non-NULL pointer to where high 64 bits of block enable map abstraction
+ *        will be stored.
+ */
+static inline void kbasep_hwcnt_backend_gpu_block_map_from_physical(
+	u32 phys,
+	u64 *lo,
+	u64 *hi)
+{
+	u64 dwords[2] = {0, 0};
+
+	size_t dword_idx;
+
+	for (dword_idx = 0; dword_idx < 2; dword_idx++) {
+		const u16 packed = phys >> (16 * dword_idx);
+		u64 dword = 0;
+
+		size_t hword_bit;
+
+		for (hword_bit = 0; hword_bit < 16; hword_bit++) {
+			const size_t dword_bit = hword_bit * 4;
+			const u64 mask = (packed >> (hword_bit)) & 0x1;
+
+			dword |= mask << (dword_bit + 0);
+			dword |= mask << (dword_bit + 1);
+			dword |= mask << (dword_bit + 2);
+			dword |= mask << (dword_bit + 3);
+		}
+		dwords[dword_idx] = dword;
+	}
+	*lo = dwords[0];
+	*hi = dwords[1];
+}
+
+void kbase_hwcnt_gpu_enable_map_to_physical(
+	struct kbase_hwcnt_physical_enable_map *dst,
+	const struct kbase_hwcnt_enable_map *src)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+
+	u64 jm_bm = 0;
+	u64 shader_bm = 0;
+	u64 tiler_bm = 0;
+	u64 mmu_l2_bm = 0;
+
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!src) || WARN_ON(!dst))
+		return;
+
+	metadata = src->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(
+		metadata, grp, blk, blk_inst) {
+		const u64 grp_type = kbase_hwcnt_metadata_group_type(
+			metadata, grp);
+		const u64 blk_type = kbase_hwcnt_metadata_block_type(
+			metadata, grp, blk);
+		const size_t blk_val_cnt =
+			kbase_hwcnt_metadata_block_values_count(
+				metadata, grp, blk);
+		const u64 *blk_map = kbase_hwcnt_enable_map_block_instance(
+			src, grp, blk, blk_inst);
+
+		switch ((enum kbase_hwcnt_gpu_group_type)grp_type) {
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+			WARN_ON(blk_val_cnt != KBASE_HWCNT_V4_VALUES_PER_BLOCK);
+			switch ((enum kbase_hwcnt_gpu_v4_block_type)blk_type) {
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER:
+				shader_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER:
+				tiler_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2:
+				mmu_l2_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM:
+				jm_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED:
+				break;
+			default:
+				WARN_ON(true);
+			}
+			break;
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+			WARN_ON(blk_val_cnt != KBASE_HWCNT_V5_VALUES_PER_BLOCK);
+			switch ((enum kbase_hwcnt_gpu_v5_block_type)blk_type) {
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM:
+				jm_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER:
+				tiler_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC:
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2:
+				shader_bm |= *blk_map;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS:
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2:
+				mmu_l2_bm |= *blk_map;
+				break;
+			default:
+				WARN_ON(true);
+			}
+			break;
+		default:
+			WARN_ON(true);
+		}
+	}
+
+	dst->jm_bm =
+		kbasep_hwcnt_backend_gpu_block_map_to_physical(jm_bm, 0);
+	dst->shader_bm =
+		kbasep_hwcnt_backend_gpu_block_map_to_physical(shader_bm, 0);
+	dst->tiler_bm =
+		kbasep_hwcnt_backend_gpu_block_map_to_physical(tiler_bm, 0);
+	dst->mmu_l2_bm =
+		kbasep_hwcnt_backend_gpu_block_map_to_physical(mmu_l2_bm, 0);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_enable_map_to_physical);
+
+void kbase_hwcnt_gpu_enable_map_from_physical(
+	struct kbase_hwcnt_enable_map *dst,
+	const struct kbase_hwcnt_physical_enable_map *src)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+
+	u64 ignored_hi;
+	u64 jm_bm;
+	u64 shader_bm;
+	u64 tiler_bm;
+	u64 mmu_l2_bm;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!src) || WARN_ON(!dst))
+		return;
+
+	metadata = dst->metadata;
+
+	kbasep_hwcnt_backend_gpu_block_map_from_physical(
+		src->jm_bm, &jm_bm, &ignored_hi);
+	kbasep_hwcnt_backend_gpu_block_map_from_physical(
+		src->shader_bm, &shader_bm, &ignored_hi);
+	kbasep_hwcnt_backend_gpu_block_map_from_physical(
+		src->tiler_bm, &tiler_bm, &ignored_hi);
+	kbasep_hwcnt_backend_gpu_block_map_from_physical(
+		src->mmu_l2_bm, &mmu_l2_bm, &ignored_hi);
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		const u64 grp_type = kbase_hwcnt_metadata_group_type(
+			metadata, grp);
+		const u64 blk_type = kbase_hwcnt_metadata_block_type(
+			metadata, grp, blk);
+		const size_t blk_val_cnt =
+			kbase_hwcnt_metadata_block_values_count(
+				metadata, grp, blk);
+		u64 *blk_map = kbase_hwcnt_enable_map_block_instance(
+			dst, grp, blk, blk_inst);
+
+		switch ((enum kbase_hwcnt_gpu_group_type)grp_type) {
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+			WARN_ON(blk_val_cnt != KBASE_HWCNT_V4_VALUES_PER_BLOCK);
+			switch ((enum kbase_hwcnt_gpu_v4_block_type)blk_type) {
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER:
+				*blk_map = shader_bm;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER:
+				*blk_map = tiler_bm;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2:
+				*blk_map = mmu_l2_bm;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM:
+				*blk_map = jm_bm;
+				break;
+			case KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED:
+				break;
+			default:
+				WARN_ON(true);
+			}
+			break;
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+			WARN_ON(blk_val_cnt != KBASE_HWCNT_V5_VALUES_PER_BLOCK);
+			switch ((enum kbase_hwcnt_gpu_v5_block_type)blk_type) {
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM:
+				*blk_map = jm_bm;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER:
+				*blk_map = tiler_bm;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC:
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2:
+				*blk_map = shader_bm;
+				break;
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS:
+			case KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2:
+				*blk_map = mmu_l2_bm;
+				break;
+			default:
+				WARN_ON(true);
+			}
+			break;
+		default:
+			WARN_ON(true);
+		}
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_enable_map_from_physical);
+
+void kbase_hwcnt_gpu_patch_dump_headers(
+	struct kbase_hwcnt_dump_buffer *buf,
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!buf) || WARN_ON(!enable_map) ||
+	    WARN_ON(buf->metadata != enable_map->metadata))
+		return;
+
+	metadata = buf->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		const u64 grp_type =
+			kbase_hwcnt_metadata_group_type(metadata, grp);
+		u32 *buf_blk = kbase_hwcnt_dump_buffer_block_instance(
+			buf, grp, blk, blk_inst);
+		const u64 *blk_map = kbase_hwcnt_enable_map_block_instance(
+			enable_map, grp, blk, blk_inst);
+		const u32 prfcnt_en =
+			kbasep_hwcnt_backend_gpu_block_map_to_physical(
+				blk_map[0], 0);
+
+		switch ((enum kbase_hwcnt_gpu_group_type)grp_type) {
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+			buf_blk[KBASE_HWCNT_V4_PRFCNT_EN_HEADER] = prfcnt_en;
+			break;
+		case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+			buf_blk[KBASE_HWCNT_V5_PRFCNT_EN_HEADER] = prfcnt_en;
+			break;
+		default:
+			WARN_ON(true);
+		}
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_gpu_patch_dump_headers);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.h
new file mode 100644
index 0000000..509608a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_gpu.h
@@ -0,0 +1,249 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_HWCNT_GPU_H_
+#define _KBASE_HWCNT_GPU_H_
+
+#include <linux/types.h>
+
+struct kbase_device;
+struct kbase_hwcnt_metadata;
+struct kbase_hwcnt_enable_map;
+struct kbase_hwcnt_dump_buffer;
+
+/**
+ * enum kbase_hwcnt_gpu_group_type - GPU hardware counter group types, used to
+ *                                   identify metadata groups.
+ * @KBASE_HWCNT_GPU_GROUP_TYPE_V4: GPU V4 group type.
+ * @KBASE_HWCNT_GPU_GROUP_TYPE_V5: GPU V5 group type.
+ */
+enum kbase_hwcnt_gpu_group_type {
+	KBASE_HWCNT_GPU_GROUP_TYPE_V4 = 0x10,
+	KBASE_HWCNT_GPU_GROUP_TYPE_V5,
+};
+
+/**
+ * enum kbase_hwcnt_gpu_v4_block_type - GPU V4 hardware counter block types,
+ *                                      used to identify metadata blocks.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER:   Shader block.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER:    Tiler block.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2:   MMU/L2 block.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM:       Job Manager block.
+ * @KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED: Reserved block.
+ */
+enum kbase_hwcnt_gpu_v4_block_type {
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_SHADER = 0x20,
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_TILER,
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_MMU_L2,
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_JM,
+	KBASE_HWCNT_GPU_V4_BLOCK_TYPE_RESERVED,
+};
+
+/**
+ * enum kbase_hwcnt_gpu_v5_block_type - GPU V5 hardware counter block types,
+ *                                      used to identify metadata blocks.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM:      Job Manager block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER:   Tiler block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC:      Shader Core block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2:     Secondary Shader Core block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS:  Memsys block.
+ * @KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2: Secondary Memsys block.
+ */
+enum kbase_hwcnt_gpu_v5_block_type {
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_JM = 0x40,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_TILER,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_SC2,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS,
+	KBASE_HWCNT_GPU_V5_BLOCK_TYPE_PERF_MEMSYS2,
+};
+
+/**
+ * struct kbase_hwcnt_physical_enable_map - Representation of enable map
+ *                                          directly used by GPU.
+ * @jm_bm:     Job Manager counters selection bitmask.
+ * @shader_bm: Shader counters selection bitmask.
+ * @tiler_bm:  Tiler counters selection bitmask.
+ * @mmu_l2_bm: MMU_L2 counters selection bitmask.
+ */
+struct kbase_hwcnt_physical_enable_map {
+	u32 jm_bm;
+	u32 shader_bm;
+	u32 tiler_bm;
+	u32 mmu_l2_bm;
+};
+
+/**
+ * struct kbase_hwcnt_gpu_v4_info - Information about hwcnt blocks on v4 GPUs.
+ * @cg_count: Core group count.
+ * @cgs:      Non-NULL pointer to array of cg_count coherent group structures.
+ *
+ * V4 devices are Mali-T6xx or Mali-T72x, and have one or more core groups,
+ * where each core group may have a physically different layout.
+ */
+struct kbase_hwcnt_gpu_v4_info {
+	size_t cg_count;
+	const struct mali_base_gpu_coherent_group *cgs;
+};
+
+/**
+ * struct kbase_hwcnt_gpu_v5_info - Information about hwcnt blocks on v5 GPUs.
+ * @l2_count:   L2 cache count.
+ * @core_mask:  Shader core mask. May be sparse.
+ */
+struct kbase_hwcnt_gpu_v5_info {
+	size_t l2_count;
+	u64 core_mask;
+};
+
+/**
+ * struct kbase_hwcnt_gpu_info - Tagged union with information about the current
+ *                               GPU's hwcnt blocks.
+ * @type: GPU type.
+ * @v4:   Info filled in if a v4 GPU.
+ * @v5:   Info filled in if a v5 GPU.
+ */
+struct kbase_hwcnt_gpu_info {
+	enum kbase_hwcnt_gpu_group_type type;
+	union {
+		struct kbase_hwcnt_gpu_v4_info v4;
+		struct kbase_hwcnt_gpu_v5_info v5;
+	};
+};
+
+/**
+ * kbase_hwcnt_gpu_info_init() - Initialise an info structure used to create the
+ *                               hwcnt metadata.
+ * @kbdev: Non-NULL pointer to kbase device.
+ * @info:  Non-NULL pointer to data structure to be filled in.
+ *
+ * The initialised info struct will only be valid for use while kbdev is valid.
+ */
+int kbase_hwcnt_gpu_info_init(
+	struct kbase_device *kbdev,
+	struct kbase_hwcnt_gpu_info *info);
+
+/**
+ * kbase_hwcnt_gpu_metadata_create() - Create hardware counter metadata for the
+ *                                     current GPU.
+ * @info:           Non-NULL pointer to info struct initialised by
+ *                  kbase_hwcnt_gpu_info_init.
+ * @use_secondary:  True if secondary performance counters should be used, else
+ *                  false. Ignored if secondary counters are not supported.
+ * @out_metadata:   Non-NULL pointer to where created metadata is stored on
+ *                  success.
+ * @out_dump_bytes: Non-NULL pointer to where the size of the GPU counter dump
+ *                  buffer is stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_gpu_metadata_create(
+	const struct kbase_hwcnt_gpu_info *info,
+	bool use_secondary,
+	const struct kbase_hwcnt_metadata **out_metadata,
+	size_t *out_dump_bytes);
+
+/**
+ * kbase_hwcnt_gpu_metadata_destroy() - Destroy GPU hardware counter metadata.
+ * @metadata: Pointer to metadata to destroy.
+ */
+void kbase_hwcnt_gpu_metadata_destroy(
+	const struct kbase_hwcnt_metadata *metadata);
+
+/**
+ * kbase_hwcnt_gpu_dump_get() - Copy or accumulate enabled counters from the raw
+ *                              dump buffer in src into the dump buffer
+ *                              abstraction in dst.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src raw dump buffer, of same length
+ *                  as returned in out_dump_bytes parameter of
+ *                  kbase_hwcnt_gpu_metadata_create.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ * @accumulate:     True if counters in src should be accumulated into dst,
+ *                  rather than copied.
+ *
+ * The dst and dst_enable_map MUST have been created from the same metadata as
+ * returned from the call to kbase_hwcnt_gpu_metadata_create as was used to get
+ * the length of src.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_gpu_dump_get(
+	struct kbase_hwcnt_dump_buffer *dst,
+	void *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map,
+	bool accumulate);
+
+/**
+ * kbase_hwcnt_gpu_enable_map_to_physical() - Convert an enable map abstraction
+ *                                            into a physical enable map.
+ * @dst: Non-NULL pointer to dst physical enable map.
+ * @src: Non-NULL pointer to src enable map abstraction.
+ *
+ * The src must have been created from a metadata returned from a call to
+ * kbase_hwcnt_gpu_metadata_create.
+ *
+ * This is a lossy conversion, as the enable map abstraction has one bit per
+ * individual counter block value, but the physical enable map uses 1 bit for
+ * every 4 counters, shared over all instances of a block.
+ */
+void kbase_hwcnt_gpu_enable_map_to_physical(
+	struct kbase_hwcnt_physical_enable_map *dst,
+	const struct kbase_hwcnt_enable_map *src);
+
+/**
+ * kbase_hwcnt_gpu_enable_map_from_physical() - Convert a physical enable map to
+ *                                              an enable map abstraction.
+ * @dst: Non-NULL pointer to dst enable map abstraction.
+ * @src: Non-NULL pointer to src physical enable map.
+ *
+ * The dst must have been created from a metadata returned from a call to
+ * kbase_hwcnt_gpu_metadata_create.
+ *
+ * This is a lossy conversion, as the physical enable map can technically
+ * support counter blocks with 128 counters each, but no hardware actually uses
+ * more than 64, so the enable map abstraction has nowhere to store the enable
+ * information for the 64 non-existent counters.
+ */
+void kbase_hwcnt_gpu_enable_map_from_physical(
+	struct kbase_hwcnt_enable_map *dst,
+	const struct kbase_hwcnt_physical_enable_map *src);
+
+/**
+ * kbase_hwcnt_gpu_patch_dump_headers() - Patch all the performance counter
+ *                                        enable headers in a dump buffer to
+ *                                        reflect the specified enable map.
+ * @buf:        Non-NULL pointer to dump buffer to patch.
+ * @enable_map: Non-NULL pointer to enable map.
+ *
+ * The buf and enable_map must have been created from a metadata returned from
+ * a call to kbase_hwcnt_gpu_metadata_create.
+ *
+ * This function should be used before handing off a dump buffer over the
+ * kernel-user boundary, to ensure the header is accurate for the enable map
+ * used by the user.
+ */
+void kbase_hwcnt_gpu_patch_dump_headers(
+	struct kbase_hwcnt_dump_buffer *buf,
+	const struct kbase_hwcnt_enable_map *enable_map);
+
+#endif /* _KBASE_HWCNT_GPU_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.c
new file mode 100644
index 0000000..b0e6aee
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.c
@@ -0,0 +1,152 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_legacy.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase_hwcnt_gpu.h"
+#include "mali_kbase_ioctl.h"
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+/**
+ * struct kbase_hwcnt_legacy_client - Legacy hardware counter client.
+ * @user_dump_buf: Pointer to a non-NULL user buffer, where dumps are returned.
+ * @enable_map:    Counter enable map.
+ * @dump_buf:      Dump buffer used to manipulate dumps before copied to user.
+ * @hvcli:         Hardware counter virtualizer client.
+ */
+struct kbase_hwcnt_legacy_client {
+	void __user *user_dump_buf;
+	struct kbase_hwcnt_enable_map enable_map;
+	struct kbase_hwcnt_dump_buffer dump_buf;
+	struct kbase_hwcnt_virtualizer_client *hvcli;
+};
+
+int kbase_hwcnt_legacy_client_create(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_ioctl_hwcnt_enable *enable,
+	struct kbase_hwcnt_legacy_client **out_hlcli)
+{
+	int errcode;
+	struct kbase_hwcnt_legacy_client *hlcli;
+	const struct kbase_hwcnt_metadata *metadata;
+	struct kbase_hwcnt_physical_enable_map phys_em;
+
+	if (!hvirt || !enable || !enable->dump_buffer || !out_hlcli)
+		return -EINVAL;
+
+	metadata = kbase_hwcnt_virtualizer_metadata(hvirt);
+
+	hlcli = kzalloc(sizeof(*hlcli), GFP_KERNEL);
+	if (!hlcli)
+		return -ENOMEM;
+
+	hlcli->user_dump_buf = (void __user *)(uintptr_t)enable->dump_buffer;
+
+	errcode = kbase_hwcnt_enable_map_alloc(metadata, &hlcli->enable_map);
+	if (errcode)
+		goto error;
+
+	/* Translate from the ioctl enable map to the internal one */
+	phys_em.jm_bm = enable->jm_bm;
+	phys_em.shader_bm = enable->shader_bm;
+	phys_em.tiler_bm = enable->tiler_bm;
+	phys_em.mmu_l2_bm = enable->mmu_l2_bm;
+	kbase_hwcnt_gpu_enable_map_from_physical(&hlcli->enable_map, &phys_em);
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(metadata, &hlcli->dump_buf);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_virtualizer_client_create(
+		hvirt, &hlcli->enable_map, &hlcli->hvcli);
+	if (errcode)
+		goto error;
+
+	*out_hlcli = hlcli;
+	return 0;
+
+error:
+	kbase_hwcnt_legacy_client_destroy(hlcli);
+	return errcode;
+}
+
+void kbase_hwcnt_legacy_client_destroy(struct kbase_hwcnt_legacy_client *hlcli)
+{
+	if (!hlcli)
+		return;
+
+	kbase_hwcnt_virtualizer_client_destroy(hlcli->hvcli);
+	kbase_hwcnt_dump_buffer_free(&hlcli->dump_buf);
+	kbase_hwcnt_enable_map_free(&hlcli->enable_map);
+	kfree(hlcli);
+}
+
+int kbase_hwcnt_legacy_client_dump(struct kbase_hwcnt_legacy_client *hlcli)
+{
+	int errcode;
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	if (!hlcli)
+		return -EINVAL;
+
+	/* Dump into the kernel buffer */
+	errcode = kbase_hwcnt_virtualizer_client_dump(hlcli->hvcli,
+		&ts_start_ns, &ts_end_ns, &hlcli->dump_buf);
+	if (errcode)
+		return errcode;
+
+	/* Patch the dump buf headers, to hide the counters that other hwcnt
+	 * clients are using.
+	 */
+	kbase_hwcnt_gpu_patch_dump_headers(
+		&hlcli->dump_buf, &hlcli->enable_map);
+
+	/* Zero all non-enabled counters (current values are undefined) */
+	kbase_hwcnt_dump_buffer_zero_non_enabled(
+		&hlcli->dump_buf, &hlcli->enable_map);
+
+	/* Copy into the user's buffer */
+	errcode = copy_to_user(hlcli->user_dump_buf, hlcli->dump_buf.dump_buf,
+		hlcli->dump_buf.metadata->dump_buf_bytes);
+	/* Non-zero errcode implies user buf was invalid or too small */
+	if (errcode)
+		return -EFAULT;
+
+	return 0;
+}
+
+int kbase_hwcnt_legacy_client_clear(struct kbase_hwcnt_legacy_client *hlcli)
+{
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	if (!hlcli)
+		return -EINVAL;
+
+	/* Dump with a NULL buffer to clear this client's counters */
+	return kbase_hwcnt_virtualizer_client_dump(hlcli->hvcli,
+		&ts_start_ns, &ts_end_ns, NULL);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.h
new file mode 100644
index 0000000..7a610ae
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_legacy.h
@@ -0,0 +1,94 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Legacy hardware counter interface, giving userspace clients simple,
+ * synchronous access to hardware counters.
+ *
+ * Any functions operating on an single legacy hardware counter client instance
+ * must be externally synchronised.
+ * Different clients may safely be used concurrently.
+ */
+
+#ifndef _KBASE_HWCNT_LEGACY_H_
+#define _KBASE_HWCNT_LEGACY_H_
+
+struct kbase_hwcnt_legacy_client;
+struct kbase_ioctl_hwcnt_enable;
+struct kbase_hwcnt_virtualizer;
+
+/**
+ * kbase_hwcnt_legacy_client_create() - Create a legacy hardware counter client.
+ * @hvirt:     Non-NULL pointer to hardware counter virtualizer the client
+ *             should be attached to.
+ * @enable:    Non-NULL pointer to hwcnt_enable structure, containing a valid
+ *             pointer to a user dump buffer large enough to hold a dump, and
+ *             the counters that should be enabled.
+ * @out_hlcli: Non-NULL pointer to where the pointer to the created client will
+ *             be stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_legacy_client_create(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_ioctl_hwcnt_enable *enable,
+	struct kbase_hwcnt_legacy_client **out_hlcli);
+
+/**
+ * kbase_hwcnt_legacy_client_destroy() - Destroy a legacy hardware counter
+ *                                       client.
+ * @hlcli: Pointer to the legacy hardware counter client.
+ *
+ * Will safely destroy a client in any partial state of construction.
+ */
+void kbase_hwcnt_legacy_client_destroy(struct kbase_hwcnt_legacy_client *hlcli);
+
+/**
+ * kbase_hwcnt_legacy_client_dump() - Perform a hardware counter dump into the
+ *                                    client's user buffer.
+ * @hlcli: Non-NULL pointer to the legacy hardware counter client.
+ *
+ * This function will synchronously dump hardware counters into the user buffer
+ * specified on client creation, with the counters specified on client creation.
+ *
+ * The counters are automatically cleared after each dump, such that the next
+ * dump performed will return the counter values accumulated between the time of
+ * this function call and the next dump.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_legacy_client_dump(struct kbase_hwcnt_legacy_client *hlcli);
+
+/**
+ * kbase_hwcnt_legacy_client_clear() - Perform and discard a hardware counter
+ *                                     dump.
+ * @hlcli: Non-NULL pointer to the legacy hardware counter client.
+ *
+ * This function will synchronously clear the hardware counters, such that the
+ * next dump performed will return the counter values accumulated between the
+ * time of this function call and the next dump.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_legacy_client_clear(struct kbase_hwcnt_legacy_client *hlcli);
+
+#endif /* _KBASE_HWCNT_LEGACY_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h
new file mode 100644
index 0000000..10706b8
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_reader.h
@@ -0,0 +1,71 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_HWCNT_READER_H_
+#define _KBASE_HWCNT_READER_H_
+
+/* The ids of ioctl commands. */
+#define KBASE_HWCNT_READER 0xBE
+#define KBASE_HWCNT_READER_GET_HWVER       _IOR(KBASE_HWCNT_READER, 0x00, u32)
+#define KBASE_HWCNT_READER_GET_BUFFER_SIZE _IOR(KBASE_HWCNT_READER, 0x01, u32)
+#define KBASE_HWCNT_READER_DUMP            _IOW(KBASE_HWCNT_READER, 0x10, u32)
+#define KBASE_HWCNT_READER_CLEAR           _IOW(KBASE_HWCNT_READER, 0x11, u32)
+#define KBASE_HWCNT_READER_GET_BUFFER      _IOR(KBASE_HWCNT_READER, 0x20,\
+		struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_PUT_BUFFER      _IOW(KBASE_HWCNT_READER, 0x21,\
+		struct kbase_hwcnt_reader_metadata)
+#define KBASE_HWCNT_READER_SET_INTERVAL    _IOW(KBASE_HWCNT_READER, 0x30, u32)
+#define KBASE_HWCNT_READER_ENABLE_EVENT    _IOW(KBASE_HWCNT_READER, 0x40, u32)
+#define KBASE_HWCNT_READER_DISABLE_EVENT   _IOW(KBASE_HWCNT_READER, 0x41, u32)
+#define KBASE_HWCNT_READER_GET_API_VERSION _IOW(KBASE_HWCNT_READER, 0xFF, u32)
+
+/**
+ * struct kbase_hwcnt_reader_metadata - hwcnt reader sample buffer metadata
+ * @timestamp:  time when sample was collected
+ * @event_id:   id of an event that triggered sample collection
+ * @buffer_idx: position in sampling area where sample buffer was stored
+ */
+struct kbase_hwcnt_reader_metadata {
+	u64 timestamp;
+	u32 event_id;
+	u32 buffer_idx;
+};
+
+/**
+ * enum base_hwcnt_reader_event - hwcnt dumping events
+ * @BASE_HWCNT_READER_EVENT_MANUAL:   manual request for dump
+ * @BASE_HWCNT_READER_EVENT_PERIODIC: periodic dump
+ * @BASE_HWCNT_READER_EVENT_PREJOB:   prejob dump request
+ * @BASE_HWCNT_READER_EVENT_POSTJOB:  postjob dump request
+ * @BASE_HWCNT_READER_EVENT_COUNT:    number of supported events
+ */
+enum base_hwcnt_reader_event {
+	BASE_HWCNT_READER_EVENT_MANUAL,
+	BASE_HWCNT_READER_EVENT_PERIODIC,
+	BASE_HWCNT_READER_EVENT_PREJOB,
+	BASE_HWCNT_READER_EVENT_POSTJOB,
+
+	BASE_HWCNT_READER_EVENT_COUNT
+};
+
+#endif /* _KBASE_HWCNT_READER_H_ */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.c
new file mode 100644
index 0000000..1e9efde
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.c
@@ -0,0 +1,538 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase.h"
+
+/* Minimum alignment of each block of hardware counters */
+#define KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT \
+	(KBASE_HWCNT_BITFIELD_BITS * KBASE_HWCNT_VALUE_BYTES)
+
+/**
+ * KBASE_HWCNT_ALIGN_UPWARDS() - Align a value to an alignment.
+ * @value:     The value to align upwards.
+ * @alignment: The alignment.
+ *
+ * Return: A number greater than or equal to value that is aligned to alignment.
+ */
+#define KBASE_HWCNT_ALIGN_UPWARDS(value, alignment) \
+	(value + ((alignment - (value % alignment)) % alignment))
+
+int kbase_hwcnt_metadata_create(
+	const struct kbase_hwcnt_description *desc,
+	const struct kbase_hwcnt_metadata **out_metadata)
+{
+	char *buf;
+	struct kbase_hwcnt_metadata *metadata;
+	struct kbase_hwcnt_group_metadata *grp_mds;
+	size_t grp;
+	size_t enable_map_count; /* Number of u64 bitfields (inc padding) */
+	size_t dump_buf_count; /* Number of u32 values (inc padding) */
+	size_t avail_mask_bits; /* Number of availability mask bits */
+
+	size_t size;
+	size_t offset;
+
+	if (!desc || !out_metadata)
+		return -EINVAL;
+
+	/* Calculate the bytes needed to tightly pack the metadata */
+
+	/* Top level metadata */
+	size = 0;
+	size += sizeof(struct kbase_hwcnt_metadata);
+
+	/* Group metadata */
+	size += sizeof(struct kbase_hwcnt_group_metadata) * desc->grp_cnt;
+
+	/* Block metadata */
+	for (grp = 0; grp < desc->grp_cnt; grp++) {
+		size += sizeof(struct kbase_hwcnt_block_metadata) *
+			desc->grps[grp].blk_cnt;
+	}
+
+	/* Single allocation for the entire metadata */
+	buf = kmalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Use the allocated memory for the metadata and its members */
+
+	/* Bump allocate the top level metadata */
+	offset = 0;
+	metadata = (struct kbase_hwcnt_metadata *)(buf + offset);
+	offset += sizeof(struct kbase_hwcnt_metadata);
+
+	/* Bump allocate the group metadata */
+	grp_mds = (struct kbase_hwcnt_group_metadata *)(buf + offset);
+	offset += sizeof(struct kbase_hwcnt_group_metadata) * desc->grp_cnt;
+
+	enable_map_count = 0;
+	dump_buf_count = 0;
+	avail_mask_bits = 0;
+
+	for (grp = 0; grp < desc->grp_cnt; grp++) {
+		size_t blk;
+
+		const struct kbase_hwcnt_group_description *grp_desc =
+			desc->grps + grp;
+		struct kbase_hwcnt_group_metadata *grp_md = grp_mds + grp;
+
+		size_t group_enable_map_count = 0;
+		size_t group_dump_buffer_count = 0;
+		size_t group_avail_mask_bits = 0;
+
+		/* Bump allocate this group's block metadata */
+		struct kbase_hwcnt_block_metadata *blk_mds =
+			(struct kbase_hwcnt_block_metadata *)(buf + offset);
+		offset += sizeof(struct kbase_hwcnt_block_metadata) *
+			grp_desc->blk_cnt;
+
+		/* Fill in each block in the group's information */
+		for (blk = 0; blk < grp_desc->blk_cnt; blk++) {
+			const struct kbase_hwcnt_block_description *blk_desc =
+				grp_desc->blks + blk;
+			struct kbase_hwcnt_block_metadata *blk_md =
+				blk_mds + blk;
+			const size_t n_values =
+				blk_desc->hdr_cnt + blk_desc->ctr_cnt;
+
+			blk_md->type = blk_desc->type;
+			blk_md->inst_cnt = blk_desc->inst_cnt;
+			blk_md->hdr_cnt = blk_desc->hdr_cnt;
+			blk_md->ctr_cnt = blk_desc->ctr_cnt;
+			blk_md->enable_map_index = group_enable_map_count;
+			blk_md->enable_map_stride =
+				kbase_hwcnt_bitfield_count(n_values);
+			blk_md->dump_buf_index = group_dump_buffer_count;
+			blk_md->dump_buf_stride =
+				KBASE_HWCNT_ALIGN_UPWARDS(
+					n_values,
+					(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT /
+					 KBASE_HWCNT_VALUE_BYTES));
+			blk_md->avail_mask_index = group_avail_mask_bits;
+
+			group_enable_map_count +=
+				blk_md->enable_map_stride * blk_md->inst_cnt;
+			group_dump_buffer_count +=
+				blk_md->dump_buf_stride * blk_md->inst_cnt;
+			group_avail_mask_bits += blk_md->inst_cnt;
+		}
+
+		/* Fill in the group's information */
+		grp_md->type = grp_desc->type;
+		grp_md->blk_cnt = grp_desc->blk_cnt;
+		grp_md->blk_metadata = blk_mds;
+		grp_md->enable_map_index = enable_map_count;
+		grp_md->dump_buf_index = dump_buf_count;
+		grp_md->avail_mask_index = avail_mask_bits;
+
+		enable_map_count += group_enable_map_count;
+		dump_buf_count += group_dump_buffer_count;
+		avail_mask_bits += group_avail_mask_bits;
+	}
+
+	/* Fill in the top level metadata's information */
+	metadata->grp_cnt = desc->grp_cnt;
+	metadata->grp_metadata = grp_mds;
+	metadata->enable_map_bytes =
+		enable_map_count * KBASE_HWCNT_BITFIELD_BYTES;
+	metadata->dump_buf_bytes = dump_buf_count * KBASE_HWCNT_VALUE_BYTES;
+	metadata->avail_mask = desc->avail_mask;
+
+	WARN_ON(size != offset);
+	/* Due to the block alignment, there should be exactly one enable map
+	 * bit per 4 bytes in the dump buffer.
+	 */
+	WARN_ON(metadata->dump_buf_bytes !=
+		(metadata->enable_map_bytes *
+		 BITS_PER_BYTE * KBASE_HWCNT_VALUE_BYTES));
+
+	*out_metadata = metadata;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_metadata_create);
+
+void kbase_hwcnt_metadata_destroy(const struct kbase_hwcnt_metadata *metadata)
+{
+	kfree(metadata);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_metadata_destroy);
+
+int kbase_hwcnt_enable_map_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_enable_map *enable_map)
+{
+	u64 *enable_map_buf;
+
+	if (!metadata || !enable_map)
+		return -EINVAL;
+
+	enable_map_buf = kzalloc(metadata->enable_map_bytes, GFP_KERNEL);
+	if (!enable_map_buf)
+		return -ENOMEM;
+
+	enable_map->metadata = metadata;
+	enable_map->enable_map = enable_map_buf;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_enable_map_alloc);
+
+void kbase_hwcnt_enable_map_free(struct kbase_hwcnt_enable_map *enable_map)
+{
+	if (!enable_map)
+		return;
+
+	kfree(enable_map->enable_map);
+	enable_map->enable_map = NULL;
+	enable_map->metadata = NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_enable_map_free);
+
+int kbase_hwcnt_dump_buffer_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	u32 *buf;
+
+	if (!metadata || !dump_buf)
+		return -EINVAL;
+
+	buf = kmalloc(metadata->dump_buf_bytes, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	dump_buf->metadata = metadata;
+	dump_buf->dump_buf = buf;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_alloc);
+
+void kbase_hwcnt_dump_buffer_free(struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	if (!dump_buf)
+		return;
+
+	kfree(dump_buf->dump_buf);
+	memset(dump_buf, 0, sizeof(*dump_buf));
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_free);
+
+int kbase_hwcnt_dump_buffer_array_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	size_t n,
+	struct kbase_hwcnt_dump_buffer_array *dump_bufs)
+{
+	struct kbase_hwcnt_dump_buffer *buffers;
+	size_t buf_idx;
+	unsigned int order;
+	unsigned long addr;
+
+	if (!metadata || !dump_bufs)
+		return -EINVAL;
+
+	/* Allocate memory for the dump buffer struct array */
+	buffers = kmalloc_array(n, sizeof(*buffers), GFP_KERNEL);
+	if (!buffers)
+		return -ENOMEM;
+
+	/* Allocate pages for the actual dump buffers, as they tend to be fairly
+	 * large.
+	 */
+	order = get_order(metadata->dump_buf_bytes * n);
+	addr = __get_free_pages(GFP_KERNEL, order);
+
+	if (!addr) {
+		kfree(buffers);
+		return -ENOMEM;
+	}
+
+	dump_bufs->page_addr = addr;
+	dump_bufs->page_order = order;
+	dump_bufs->buf_cnt = n;
+	dump_bufs->bufs = buffers;
+
+	/* Set the buffer of each dump buf */
+	for (buf_idx = 0; buf_idx < n; buf_idx++) {
+		const size_t offset = metadata->dump_buf_bytes * buf_idx;
+
+		buffers[buf_idx].metadata = metadata;
+		buffers[buf_idx].dump_buf = (u32 *)(addr + offset);
+	}
+
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_array_alloc);
+
+void kbase_hwcnt_dump_buffer_array_free(
+	struct kbase_hwcnt_dump_buffer_array *dump_bufs)
+{
+	if (!dump_bufs)
+		return;
+
+	kfree(dump_bufs->bufs);
+	free_pages(dump_bufs->page_addr, dump_bufs->page_order);
+	memset(dump_bufs, 0, sizeof(*dump_bufs));
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_array_free);
+
+void kbase_hwcnt_dump_buffer_zero(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk;
+		size_t val_cnt;
+
+		if (!kbase_hwcnt_enable_map_block_enabled(
+			dst_enable_map, grp, blk, blk_inst))
+			continue;
+
+		dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		val_cnt = kbase_hwcnt_metadata_block_values_count(
+			metadata, grp, blk);
+
+		kbase_hwcnt_dump_buffer_block_zero(dst_blk, val_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_zero);
+
+void kbase_hwcnt_dump_buffer_zero_strict(
+	struct kbase_hwcnt_dump_buffer *dst)
+{
+	if (WARN_ON(!dst))
+		return;
+
+	memset(dst->dump_buf, 0, dst->metadata->dump_buf_bytes);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_zero_strict);
+
+void kbase_hwcnt_dump_buffer_zero_non_enabled(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		const u64 *blk_em = kbase_hwcnt_enable_map_block_instance(
+			dst_enable_map, grp, blk, blk_inst);
+		size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+			metadata, grp, blk);
+
+		/* Align upwards to include padding bytes */
+		val_cnt = KBASE_HWCNT_ALIGN_UPWARDS(val_cnt,
+			(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT /
+			 KBASE_HWCNT_VALUE_BYTES));
+
+		if (kbase_hwcnt_metadata_block_instance_avail(
+			metadata, grp, blk, blk_inst)) {
+			/* Block available, so only zero non-enabled values */
+			kbase_hwcnt_dump_buffer_block_zero_non_enabled(
+				dst_blk, blk_em, val_cnt);
+		} else {
+			/* Block not available, so zero the entire thing */
+			kbase_hwcnt_dump_buffer_block_zero(dst_blk, val_cnt);
+		}
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_zero_non_enabled);
+
+void kbase_hwcnt_dump_buffer_copy(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!src) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst == src) ||
+	    WARN_ON(dst->metadata != src->metadata) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk;
+		const u32 *src_blk;
+		size_t val_cnt;
+
+		if (!kbase_hwcnt_enable_map_block_enabled(
+			dst_enable_map, grp, blk, blk_inst))
+			continue;
+
+		dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		src_blk = kbase_hwcnt_dump_buffer_block_instance(
+			src, grp, blk, blk_inst);
+		val_cnt = kbase_hwcnt_metadata_block_values_count(
+			metadata, grp, blk);
+
+		kbase_hwcnt_dump_buffer_block_copy(dst_blk, src_blk, val_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_copy);
+
+void kbase_hwcnt_dump_buffer_copy_strict(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!src) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst == src) ||
+	    WARN_ON(dst->metadata != src->metadata) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		const u32 *src_blk = kbase_hwcnt_dump_buffer_block_instance(
+			src, grp, blk, blk_inst);
+		const u64 *blk_em = kbase_hwcnt_enable_map_block_instance(
+			dst_enable_map, grp, blk, blk_inst);
+		size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+			metadata, grp, blk);
+		/* Align upwards to include padding bytes */
+		val_cnt = KBASE_HWCNT_ALIGN_UPWARDS(val_cnt,
+			(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT /
+			 KBASE_HWCNT_VALUE_BYTES));
+
+		kbase_hwcnt_dump_buffer_block_copy_strict(
+			dst_blk, src_blk, blk_em, val_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_copy_strict);
+
+void kbase_hwcnt_dump_buffer_accumulate(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!src) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst == src) ||
+	    WARN_ON(dst->metadata != src->metadata) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk;
+		const u32 *src_blk;
+		size_t hdr_cnt;
+		size_t ctr_cnt;
+
+		if (!kbase_hwcnt_enable_map_block_enabled(
+			dst_enable_map, grp, blk, blk_inst))
+			continue;
+
+		dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		src_blk = kbase_hwcnt_dump_buffer_block_instance(
+			src, grp, blk, blk_inst);
+		hdr_cnt = kbase_hwcnt_metadata_block_headers_count(
+			metadata, grp, blk);
+		ctr_cnt = kbase_hwcnt_metadata_block_counters_count(
+			metadata, grp, blk);
+
+		kbase_hwcnt_dump_buffer_block_accumulate(
+			dst_blk, src_blk, hdr_cnt, ctr_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_accumulate);
+
+void kbase_hwcnt_dump_buffer_accumulate_strict(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map)
+{
+	const struct kbase_hwcnt_metadata *metadata;
+	size_t grp, blk, blk_inst;
+
+	if (WARN_ON(!dst) ||
+	    WARN_ON(!src) ||
+	    WARN_ON(!dst_enable_map) ||
+	    WARN_ON(dst == src) ||
+	    WARN_ON(dst->metadata != src->metadata) ||
+	    WARN_ON(dst->metadata != dst_enable_map->metadata))
+		return;
+
+	metadata = dst->metadata;
+
+	kbase_hwcnt_metadata_for_each_block(metadata, grp, blk, blk_inst) {
+		u32 *dst_blk = kbase_hwcnt_dump_buffer_block_instance(
+			dst, grp, blk, blk_inst);
+		const u32 *src_blk = kbase_hwcnt_dump_buffer_block_instance(
+			src, grp, blk, blk_inst);
+		const u64 *blk_em = kbase_hwcnt_enable_map_block_instance(
+			dst_enable_map, grp, blk, blk_inst);
+		size_t hdr_cnt = kbase_hwcnt_metadata_block_headers_count(
+			metadata, grp, blk);
+		size_t ctr_cnt = kbase_hwcnt_metadata_block_counters_count(
+			metadata, grp, blk);
+		/* Align upwards to include padding bytes */
+		ctr_cnt = KBASE_HWCNT_ALIGN_UPWARDS(hdr_cnt + ctr_cnt,
+			(KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT /
+			 KBASE_HWCNT_VALUE_BYTES) - hdr_cnt);
+
+		kbase_hwcnt_dump_buffer_block_accumulate_strict(
+			dst_blk, src_blk, blk_em, hdr_cnt, ctr_cnt);
+	}
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_dump_buffer_accumulate_strict);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.h
new file mode 100644
index 0000000..4d78c84
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_types.h
@@ -0,0 +1,1087 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Hardware counter types.
+ * Contains structures for describing the physical layout of hardware counter
+ * dump buffers and enable maps within a system.
+ *
+ * Also contains helper functions for manipulation of these dump buffers and
+ * enable maps.
+ *
+ * Through use of these structures and functions, hardware counters can be
+ * enabled, copied, accumulated, and generally manipulated in a generic way,
+ * regardless of the physical counter dump layout.
+ *
+ * Terminology:
+ *
+ * Hardware Counter System:
+ *   A collection of hardware counter groups, making a full hardware counter
+ *   system.
+ * Hardware Counter Group:
+ *   A group of Hardware Counter Blocks (e.g. a t62x might have more than one
+ *   core group, so has one counter group per core group, where each group
+ *   may have a different number and layout of counter blocks).
+ * Hardware Counter Block:
+ *   A block of hardware counters (e.g. shader block, tiler block).
+ * Hardware Counter Block Instance:
+ *   An instance of a Hardware Counter Block (e.g. an MP4 GPU might have
+ *   4 shader block instances).
+ *
+ * Block Header:
+ *   A header value inside a counter block. Headers don't count anything,
+ *   so it is only valid to copy or zero them. Headers are always the first
+ *   values in the block.
+ * Block Counter:
+ *   A counter value inside a counter block. Counters can be zeroed, copied,
+ *   or accumulated. Counters are always immediately after the headers in the
+ *   block.
+ * Block Value:
+ *   A catch-all term for block headers and block counters.
+ *
+ * Enable Map:
+ *   An array of u64 bitfields, where each bit either enables exactly one
+ *   block value, or is unused (padding).
+ * Dump Buffer:
+ *   An array of u32 values, where each u32 corresponds either to one block
+ *   value, or is unused (padding).
+ * Availability Mask:
+ *   A bitfield, where each bit corresponds to whether a block instance is
+ *   physically available (e.g. an MP3 GPU may have a sparse core mask of
+ *   0b1011, meaning it only has 3 cores but for hardware counter dumps has the
+ *   same dump buffer layout as an MP4 GPU with a core mask of 0b1111. In this
+ *   case, the availability mask might be 0b1011111 (the exact layout will
+ *   depend on the specific hardware architecture), with the 3 extra early bits
+ *   corresponding to other block instances in the hardware counter system).
+ * Metadata:
+ *   Structure describing the physical layout of the enable map and dump buffers
+ *   for a specific hardware counter system.
+ *
+ */
+
+#ifndef _KBASE_HWCNT_TYPES_H_
+#define _KBASE_HWCNT_TYPES_H_
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include "mali_malisw.h"
+
+/* Number of bytes in each bitfield */
+#define KBASE_HWCNT_BITFIELD_BYTES (sizeof(u64))
+
+/* Number of bits in each bitfield */
+#define KBASE_HWCNT_BITFIELD_BITS (KBASE_HWCNT_BITFIELD_BYTES * BITS_PER_BYTE)
+
+/* Number of bytes for each counter value */
+#define KBASE_HWCNT_VALUE_BYTES (sizeof(u32))
+
+/* Number of bits in an availability mask (i.e. max total number of block
+ * instances supported in a Hardware Counter System)
+ */
+#define KBASE_HWCNT_AVAIL_MASK_BITS (sizeof(u64) * BITS_PER_BYTE)
+
+/**
+ * struct kbase_hwcnt_block_description - Description of one or more identical,
+ *                                        contiguous, Hardware Counter Blocks.
+ * @type:     The arbitrary identifier used to identify the type of the block.
+ * @inst_cnt: The number of Instances of the block.
+ * @hdr_cnt:  The number of 32-bit Block Headers in the block.
+ * @ctr_cnt:  The number of 32-bit Block Counters in the block.
+ */
+struct kbase_hwcnt_block_description {
+	u64 type;
+	size_t inst_cnt;
+	size_t hdr_cnt;
+	size_t ctr_cnt;
+};
+
+/**
+ * struct kbase_hwcnt_group_description - Description of one or more identical,
+ *                                        contiguous Hardware Counter Groups.
+ * @type:    The arbitrary identifier used to identify the type of the group.
+ * @blk_cnt: The number of types of Hardware Counter Block in the group.
+ * @blks:    Non-NULL pointer to an array of blk_cnt block descriptions,
+ *           describing each type of Hardware Counter Block in the group.
+ */
+struct kbase_hwcnt_group_description {
+	u64 type;
+	size_t blk_cnt;
+	const struct kbase_hwcnt_block_description *blks;
+};
+
+/**
+ * struct kbase_hwcnt_description - Description of a Hardware Counter System.
+ * @grp_cnt:    The number of Hardware Counter Groups.
+ * @grps:       Non-NULL pointer to an array of grp_cnt group descriptions,
+ *              describing each Hardware Counter Group in the system.
+ * @avail_mask: Flat Availability Mask for all block instances in the system.
+ */
+struct kbase_hwcnt_description {
+	size_t grp_cnt;
+	const struct kbase_hwcnt_group_description *grps;
+	u64 avail_mask;
+};
+
+/**
+ * struct kbase_hwcnt_block_metadata - Metadata describing the physical layout
+ *                                     of a block in a Hardware Counter System's
+ *                                     Dump Buffers and Enable Maps.
+ * @type:              The arbitrary identifier used to identify the type of the
+ *                     block.
+ * @inst_cnt:          The number of Instances of the block.
+ * @hdr_cnt:           The number of 32-bit Block Headers in the block.
+ * @ctr_cnt:           The number of 32-bit Block Counters in the block.
+ * @enable_map_index:  Index in u64s into the parent's Enable Map where the
+ *                     Enable Map bitfields of the Block Instances described by
+ *                     this metadata start.
+ * @enable_map_stride: Stride in u64s between the Enable Maps of each of the
+ *                     Block Instances described by this metadata.
+ * @dump_buf_index:    Index in u32s into the parent's Dump Buffer where the
+ *                     Dump Buffers of the Block Instances described by this
+ *                     metadata start.
+ * @dump_buf_stride:   Stride in u32s between the Dump Buffers of each of the
+ *                     Block Instances described by this metadata.
+ * @avail_mask_index:  Index in bits into the parent's Availability Mask where
+ *                     the Availability Masks of the Block Instances described
+ *                     by this metadata start.
+ */
+struct kbase_hwcnt_block_metadata {
+	u64 type;
+	size_t inst_cnt;
+	size_t hdr_cnt;
+	size_t ctr_cnt;
+	size_t enable_map_index;
+	size_t enable_map_stride;
+	size_t dump_buf_index;
+	size_t dump_buf_stride;
+	size_t avail_mask_index;
+};
+
+/**
+ * struct kbase_hwcnt_group_metadata - Metadata describing the physical layout
+ *                                     of a group of blocks in a Hardware
+ *                                     Counter System's Dump Buffers and Enable
+ *                                     Maps.
+ * @type:             The arbitrary identifier used to identify the type of the
+ *                    group.
+ * @blk_cnt:          The number of types of Hardware Counter Block in the
+ *                    group.
+ * @blk_metadata:     Non-NULL pointer to an array of blk_cnt block metadata,
+ *                    describing the physical layout of each type of Hardware
+ *                    Counter Block in the group.
+ * @enable_map_index: Index in u64s into the parent's Enable Map where the
+ *                    Enable Maps of the blocks within the group described by
+ *                    this metadata start.
+ * @dump_buf_index:   Index in u32s into the parent's Dump Buffer where the
+ *                    Dump Buffers of the blocks within the group described by
+ *                    metadata start.
+ * @avail_mask_index: Index in bits into the parent's Availability Mask where
+ *                    the Availability Masks of the blocks within the group
+ *                    described by this metadata start.
+ */
+struct kbase_hwcnt_group_metadata {
+	u64 type;
+	size_t blk_cnt;
+	const struct kbase_hwcnt_block_metadata *blk_metadata;
+	size_t enable_map_index;
+	size_t dump_buf_index;
+	size_t avail_mask_index;
+};
+
+/**
+ * struct kbase_hwcnt_metadata - Metadata describing the physical layout
+ *                               of Dump Buffers and Enable Maps within a
+ *                               Hardware Counter System.
+ * @grp_cnt:          The number of Hardware Counter Groups.
+ * @grp_metadata:     Non-NULL pointer to an array of grp_cnt group metadata,
+ *                    describing the physical layout of each Hardware Counter
+ *                    Group in the system.
+ * @enable_map_bytes: The size in bytes of an Enable Map needed for the system.
+ * @dump_buf_bytes:   The size in bytes of a Dump Buffer needed for the system.
+ * @avail_mask:       The Availability Mask for the system.
+ */
+struct kbase_hwcnt_metadata {
+	size_t grp_cnt;
+	const struct kbase_hwcnt_group_metadata *grp_metadata;
+	size_t enable_map_bytes;
+	size_t dump_buf_bytes;
+	u64 avail_mask;
+};
+
+/**
+ * struct kbase_hwcnt_enable_map - Hardware Counter Enable Map. Array of u64
+ *                                 bitfields.
+ * @metadata:   Non-NULL pointer to metadata used to identify, and to describe
+ *              the layout of the enable map.
+ * @enable_map: Non-NULL pointer of size metadata->enable_map_bytes to an array
+ *              of u64 bitfields, each bit of which enables one hardware
+ *              counter.
+ */
+struct kbase_hwcnt_enable_map {
+	const struct kbase_hwcnt_metadata *metadata;
+	u64 *enable_map;
+};
+
+/**
+ * struct kbase_hwcnt_dump_buffer - Hardware Counter Dump Buffer. Array of u32
+ *                                  values.
+ * @metadata: Non-NULL pointer to metadata used to identify, and to describe
+ *            the layout of the Dump Buffer.
+ * @dump_buf: Non-NULL pointer of size metadata->dump_buf_bytes to an array
+ *            of u32 values.
+ */
+struct kbase_hwcnt_dump_buffer {
+	const struct kbase_hwcnt_metadata *metadata;
+	u32 *dump_buf;
+};
+
+/**
+ * struct kbase_hwcnt_dump_buffer_array - Hardware Counter Dump Buffer array.
+ * @page_addr:  Address of allocated pages. A single allocation is used for all
+ *              Dump Buffers in the array.
+ * @page_order: The allocation order of the pages.
+ * @buf_cnt:    The number of allocated Dump Buffers.
+ * @bufs:       Non-NULL pointer to the array of Dump Buffers.
+ */
+struct kbase_hwcnt_dump_buffer_array {
+	unsigned long page_addr;
+	unsigned int page_order;
+	size_t buf_cnt;
+	struct kbase_hwcnt_dump_buffer *bufs;
+};
+
+/**
+ * kbase_hwcnt_metadata_create() - Create a hardware counter metadata object
+ *                                 from a description.
+ * @desc:     Non-NULL pointer to a hardware counter description.
+ * @metadata: Non-NULL pointer to where created metadata will be stored on
+ *            success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_metadata_create(
+	const struct kbase_hwcnt_description *desc,
+	const struct kbase_hwcnt_metadata **metadata);
+
+/**
+ * kbase_hwcnt_metadata_destroy() - Destroy a hardware counter metadata object.
+ * @metadata: Pointer to hardware counter metadata
+ */
+void kbase_hwcnt_metadata_destroy(const struct kbase_hwcnt_metadata *metadata);
+
+/**
+ * kbase_hwcnt_metadata_group_count() - Get the number of groups.
+ * @metadata: Non-NULL pointer to metadata.
+ *
+ * Return: Number of hardware counter groups described by metadata.
+ */
+#define kbase_hwcnt_metadata_group_count(metadata) \
+	((metadata)->grp_cnt)
+
+/**
+ * kbase_hwcnt_metadata_group_type() - Get the arbitrary type of a group.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ *
+ * Return: Type of the group grp.
+ */
+#define kbase_hwcnt_metadata_group_type(metadata, grp) \
+	((metadata)->grp_metadata[(grp)].type)
+
+/**
+ * kbase_hwcnt_metadata_block_count() - Get the number of blocks in a group.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ *
+ * Return: Number of blocks in group grp.
+ */
+#define kbase_hwcnt_metadata_block_count(metadata, grp) \
+	((metadata)->grp_metadata[(grp)].blk_cnt)
+
+/**
+ * kbase_hwcnt_metadata_block_type() - Get the arbitrary type of a block.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Type of the block blk in group grp.
+ */
+#define kbase_hwcnt_metadata_block_type(metadata, grp, blk) \
+	((metadata)->grp_metadata[(grp)].blk_metadata[(blk)].type)
+
+/**
+ * kbase_hwcnt_metadata_block_instance_count() - Get the number of instances of
+ *                                               a block.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Number of instances of block blk in group grp.
+ */
+#define kbase_hwcnt_metadata_block_instance_count(metadata, grp, blk) \
+	((metadata)->grp_metadata[(grp)].blk_metadata[(blk)].inst_cnt)
+
+/**
+ * kbase_hwcnt_metadata_block_headers_count() - Get the number of counter
+ *                                              headers.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Number of u32 counter headers in each instance of block blk in
+ *         group grp.
+ */
+#define kbase_hwcnt_metadata_block_headers_count(metadata, grp, blk) \
+	((metadata)->grp_metadata[(grp)].blk_metadata[(blk)].hdr_cnt)
+
+/**
+ * kbase_hwcnt_metadata_block_counters_count() - Get the number of counters.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Number of u32 counters in each instance of block blk in group
+ *         grp.
+ */
+#define kbase_hwcnt_metadata_block_counters_count(metadata, grp, blk) \
+	((metadata)->grp_metadata[(grp)].blk_metadata[(blk)].ctr_cnt)
+
+/**
+ * kbase_hwcnt_metadata_block_values_count() - Get the number of values.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: Number of u32 headers plus counters in each instance of block blk
+ *         in group grp.
+ */
+#define kbase_hwcnt_metadata_block_values_count(metadata, grp, blk) \
+	(kbase_hwcnt_metadata_block_counters_count((metadata), (grp), (blk)) \
+	+ kbase_hwcnt_metadata_block_headers_count((metadata), (grp), (blk)))
+
+/**
+ * kbase_hwcnt_metadata_for_each_block() - Iterate over each block instance in
+ *                                         the metadata.
+ * @md:       Non-NULL pointer to metadata.
+ * @grp:      size_t variable used as group iterator.
+ * @blk:      size_t variable used as block iterator.
+ * @blk_inst: size_t variable used as block instance iterator.
+ *
+ * Iteration order is group, then block, then block instance (i.e. linearly
+ * through memory).
+ */
+#define kbase_hwcnt_metadata_for_each_block(md, grp, blk, blk_inst) \
+	for ((grp) = 0; (grp) < kbase_hwcnt_metadata_group_count((md)); (grp)++) \
+		for ((blk) = 0; (blk) < kbase_hwcnt_metadata_block_count((md), (grp)); (blk)++) \
+			for ((blk_inst) = 0; (blk_inst) < kbase_hwcnt_metadata_block_instance_count((md), (grp), (blk)); (blk_inst)++)
+
+/**
+ * kbase_hwcnt_metadata_block_avail_bit() - Get the bit index into the avail
+ *                                          mask corresponding to the block.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ *
+ * Return: The bit index into the avail mask for the block.
+ */
+static inline size_t kbase_hwcnt_metadata_block_avail_bit(
+	const struct kbase_hwcnt_metadata *metadata,
+	size_t grp,
+	size_t blk)
+{
+	const size_t bit =
+		metadata->grp_metadata[grp].avail_mask_index +
+		metadata->grp_metadata[grp].blk_metadata[blk].avail_mask_index;
+
+	return bit;
+}
+
+/**
+ * kbase_hwcnt_metadata_block_instance_avail() - Check if a block instance is
+ *                                               available.
+ * @metadata: Non-NULL pointer to metadata.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ *
+ * Return: true if the block instance is available, else false.
+ */
+static inline bool kbase_hwcnt_metadata_block_instance_avail(
+	const struct kbase_hwcnt_metadata *metadata,
+	size_t grp,
+	size_t blk,
+	size_t blk_inst)
+{
+	const size_t bit = kbase_hwcnt_metadata_block_avail_bit(
+		metadata, grp, blk) + blk_inst;
+	const u64 mask = 1ull << bit;
+
+	return (metadata->avail_mask & mask) != 0;
+}
+
+/**
+ * kbase_hwcnt_enable_map_alloc() - Allocate an enable map.
+ * @metadata:   Non-NULL pointer to metadata describing the system.
+ * @enable_map: Non-NULL pointer to enable map to be initialised. Will be
+ *              initialised to all zeroes (i.e. all counters disabled).
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_enable_map_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_enable_map *enable_map);
+
+/**
+ * kbase_hwcnt_enable_map_free() - Free an enable map.
+ * @enable_map: Enable map to be freed.
+ *
+ * Can be safely called on an all-zeroed enable map structure, or on an already
+ * freed enable map.
+ */
+void kbase_hwcnt_enable_map_free(struct kbase_hwcnt_enable_map *enable_map);
+
+/**
+ * kbase_hwcnt_enable_map_block_instance() - Get the pointer to a block
+ *                                           instance's enable map.
+ * @map:      Non-NULL pointer to (const) enable map.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ *
+ * Return: (const) u64* to the bitfield(s) used as the enable map for the
+ *         block instance.
+ */
+#define kbase_hwcnt_enable_map_block_instance(map, grp, blk, blk_inst) \
+	((map)->enable_map + \
+	 (map)->metadata->grp_metadata[(grp)].enable_map_index + \
+	 (map)->metadata->grp_metadata[(grp)].blk_metadata[(blk)].enable_map_index + \
+	 (map)->metadata->grp_metadata[(grp)].blk_metadata[(blk)].enable_map_stride * (blk_inst))
+
+/**
+ * kbase_hwcnt_bitfield_count() - Calculate the number of u64 bitfields required
+ *                                to have at minimum one bit per value.
+ * @val_cnt: Number of values.
+ *
+ * Return: Number of required bitfields.
+ */
+static inline size_t kbase_hwcnt_bitfield_count(size_t val_cnt)
+{
+	return (val_cnt + KBASE_HWCNT_BITFIELD_BITS - 1) /
+		KBASE_HWCNT_BITFIELD_BITS;
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_disable_all() - Disable all values in a block.
+ * @dst:      Non-NULL pointer to enable map.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ */
+static inline void kbase_hwcnt_enable_map_block_disable_all(
+	struct kbase_hwcnt_enable_map *dst,
+	size_t grp,
+	size_t blk,
+	size_t blk_inst)
+{
+	const size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+		dst->metadata, grp, blk);
+	const size_t bitfld_cnt = kbase_hwcnt_bitfield_count(val_cnt);
+	u64 *block_enable_map = kbase_hwcnt_enable_map_block_instance(
+		dst, grp, blk, blk_inst);
+
+	memset(block_enable_map, 0, bitfld_cnt * KBASE_HWCNT_BITFIELD_BYTES);
+}
+
+/**
+ * kbase_hwcnt_enable_map_disable_all() - Disable all values in the enable map.
+ * @dst: Non-NULL pointer to enable map to zero.
+ */
+static inline void kbase_hwcnt_enable_map_disable_all(
+	struct kbase_hwcnt_enable_map *dst)
+{
+	memset(dst->enable_map, 0, dst->metadata->enable_map_bytes);
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_enable_all() - Enable all values in a block.
+ * @dst:      Non-NULL pointer to enable map.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ */
+static inline void kbase_hwcnt_enable_map_block_enable_all(
+	struct kbase_hwcnt_enable_map *dst,
+	size_t grp,
+	size_t blk,
+	size_t blk_inst)
+{
+	const size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+		dst->metadata, grp, blk);
+	const size_t bitfld_cnt = kbase_hwcnt_bitfield_count(val_cnt);
+	u64 *block_enable_map = kbase_hwcnt_enable_map_block_instance(
+		dst, grp, blk, blk_inst);
+
+	size_t bitfld_idx;
+
+	for (bitfld_idx = 0; bitfld_idx < bitfld_cnt; bitfld_idx++) {
+		const u64 remaining_values = val_cnt -
+			(bitfld_idx * KBASE_HWCNT_BITFIELD_BITS);
+		u64 block_enable_map_mask = U64_MAX;
+
+		if (remaining_values < KBASE_HWCNT_BITFIELD_BITS)
+			block_enable_map_mask = (1ull << remaining_values) - 1;
+
+		block_enable_map[bitfld_idx] = block_enable_map_mask;
+	}
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_enable_all() - Enable all values in an enable
+ *                                             map.
+ * @dst: Non-NULL pointer to enable map.
+ */
+static inline void kbase_hwcnt_enable_map_enable_all(
+	struct kbase_hwcnt_enable_map *dst)
+{
+	size_t grp, blk, blk_inst;
+
+	kbase_hwcnt_metadata_for_each_block(dst->metadata, grp, blk, blk_inst)
+		kbase_hwcnt_enable_map_block_enable_all(
+			dst, grp, blk, blk_inst);
+}
+
+/**
+ * kbase_hwcnt_enable_map_copy() - Copy an enable map to another.
+ * @dst: Non-NULL pointer to destination enable map.
+ * @src: Non-NULL pointer to source enable map.
+ *
+ * The dst and src MUST have been created from the same metadata.
+ */
+static inline void kbase_hwcnt_enable_map_copy(
+	struct kbase_hwcnt_enable_map *dst,
+	const struct kbase_hwcnt_enable_map *src)
+{
+	memcpy(dst->enable_map,
+	       src->enable_map,
+	       dst->metadata->enable_map_bytes);
+}
+
+/**
+ * kbase_hwcnt_enable_map_union() - Union dst and src enable maps into dst.
+ * @dst: Non-NULL pointer to destination enable map.
+ * @src: Non-NULL pointer to source enable map.
+ *
+ * The dst and src MUST have been created from the same metadata.
+ */
+static inline void kbase_hwcnt_enable_map_union(
+	struct kbase_hwcnt_enable_map *dst,
+	const struct kbase_hwcnt_enable_map *src)
+{
+	const size_t bitfld_count =
+		dst->metadata->enable_map_bytes / KBASE_HWCNT_BITFIELD_BYTES;
+	size_t i;
+
+	for (i = 0; i < bitfld_count; i++)
+		dst->enable_map[i] |= src->enable_map[i];
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_enabled() - Check if any values in a block
+ *                                          instance are enabled.
+ * @enable_map: Non-NULL pointer to enable map.
+ * @grp:        Index of the group in the metadata.
+ * @blk:        Index of the block in the group.
+ * @blk_inst:   Index of the block instance in the block.
+ *
+ * Return: true if any values in the block are enabled, else false.
+ */
+static inline bool kbase_hwcnt_enable_map_block_enabled(
+	const struct kbase_hwcnt_enable_map *enable_map,
+	size_t grp,
+	size_t blk,
+	size_t blk_inst)
+{
+	bool any_enabled = false;
+	const size_t val_cnt = kbase_hwcnt_metadata_block_values_count(
+		enable_map->metadata, grp, blk);
+	const size_t bitfld_cnt = kbase_hwcnt_bitfield_count(val_cnt);
+	const u64 *block_enable_map = kbase_hwcnt_enable_map_block_instance(
+		enable_map, grp, blk, blk_inst);
+
+	size_t bitfld_idx;
+
+	for (bitfld_idx = 0; bitfld_idx < bitfld_cnt; bitfld_idx++) {
+		const u64 remaining_values = val_cnt -
+			(bitfld_idx * KBASE_HWCNT_BITFIELD_BITS);
+		u64 block_enable_map_mask = U64_MAX;
+
+		if (remaining_values < KBASE_HWCNT_BITFIELD_BITS)
+			block_enable_map_mask = (1ull << remaining_values) - 1;
+
+		any_enabled = any_enabled ||
+			(block_enable_map[bitfld_idx] & block_enable_map_mask);
+	}
+
+	return any_enabled;
+}
+
+/**
+ * kbase_hwcnt_enable_map_any_enabled() - Check if any values are enabled.
+ * @enable_map: Non-NULL pointer to enable map.
+ *
+ * Return: true if any values are enabled, else false.
+ */
+static inline bool kbase_hwcnt_enable_map_any_enabled(
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	size_t grp, blk, blk_inst;
+
+	kbase_hwcnt_metadata_for_each_block(
+		enable_map->metadata, grp, blk, blk_inst) {
+		if (kbase_hwcnt_enable_map_block_enabled(
+			enable_map, grp, blk, blk_inst))
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_value_enabled() - Check if a value in a block
+ *                                                instance is enabled.
+ * @bitfld:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_idx: Index of the value to check in the block instance.
+ *
+ * Return: true if the value was enabled, else false.
+ */
+static inline bool kbase_hwcnt_enable_map_block_value_enabled(
+	const u64 *bitfld,
+	size_t val_idx)
+{
+	const size_t idx = val_idx / KBASE_HWCNT_BITFIELD_BITS;
+	const size_t bit = val_idx % KBASE_HWCNT_BITFIELD_BITS;
+	const u64 mask = 1ull << bit;
+
+	return (bitfld[idx] & mask) != 0;
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_enable_value() - Enable a value in a block
+ *                                               instance.
+ * @bitfld:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_idx: Index of the value to enable in the block instance.
+ */
+static inline void kbase_hwcnt_enable_map_block_enable_value(
+	u64 *bitfld,
+	size_t val_idx)
+{
+	const size_t idx = val_idx / KBASE_HWCNT_BITFIELD_BITS;
+	const size_t bit = val_idx % KBASE_HWCNT_BITFIELD_BITS;
+	const u64 mask = 1ull << bit;
+
+	bitfld[idx] |= mask;
+}
+
+/**
+ * kbase_hwcnt_enable_map_block_disable_value() - Disable a value in a block
+ *                                                instance.
+ * @bitfld:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_idx: Index of the value to disable in the block instance.
+ */
+static inline void kbase_hwcnt_enable_map_block_disable_value(
+	u64 *bitfld,
+	size_t val_idx)
+{
+	const size_t idx = val_idx / KBASE_HWCNT_BITFIELD_BITS;
+	const size_t bit = val_idx % KBASE_HWCNT_BITFIELD_BITS;
+	const u64 mask = 1ull << bit;
+
+	bitfld[idx] &= ~mask;
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_alloc() - Allocate a dump buffer.
+ * @metadata: Non-NULL pointer to metadata describing the system.
+ * @dump_buf: Non-NULL pointer to dump buffer to be initialised. Will be
+ *            initialised to undefined values, so must be used as a copy dest,
+ *            or cleared before use.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_dump_buffer_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+/**
+ * kbase_hwcnt_dump_buffer_free() - Free a dump buffer.
+ * @dump_buf: Dump buffer to be freed.
+ *
+ * Can be safely called on an all-zeroed dump buffer structure, or on an already
+ * freed dump buffer.
+ */
+void kbase_hwcnt_dump_buffer_free(struct kbase_hwcnt_dump_buffer *dump_buf);
+
+/**
+ * kbase_hwcnt_dump_buffer_array_alloc() - Allocate an array of dump buffers.
+ * @metadata:  Non-NULL pointer to metadata describing the system.
+ * @n:         Number of dump buffers to allocate
+ * @dump_bufs: Non-NULL pointer to dump buffer array to be initialised. Each
+ *             dump buffer in the array will be initialised to undefined values,
+ *             so must be used as a copy dest, or cleared before use.
+ *
+ * A single contiguous page allocation will be used for all of the buffers
+ * inside the array, where:
+ * dump_bufs[n].dump_buf == page_addr + n * metadata.dump_buf_bytes
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_dump_buffer_array_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	size_t n,
+	struct kbase_hwcnt_dump_buffer_array *dump_bufs);
+
+/**
+ * kbase_hwcnt_dump_buffer_array_free() - Free a dump buffer array.
+ * @dump_bufs: Dump buffer array to be freed.
+ *
+ * Can be safely called on an all-zeroed dump buffer array structure, or on an
+ * already freed dump buffer array.
+ */
+void kbase_hwcnt_dump_buffer_array_free(
+	struct kbase_hwcnt_dump_buffer_array *dump_bufs);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_instance() - Get the pointer to a block
+ *                                            instance's dump buffer.
+ * @buf:      Non-NULL pointer to (const) dump buffer.
+ * @grp:      Index of the group in the metadata.
+ * @blk:      Index of the block in the group.
+ * @blk_inst: Index of the block instance in the block.
+ *
+ * Return: (const) u32* to the dump buffer for the block instance.
+ */
+#define kbase_hwcnt_dump_buffer_block_instance(buf, grp, blk, blk_inst) \
+	((buf)->dump_buf + \
+	 (buf)->metadata->grp_metadata[(grp)].dump_buf_index + \
+	 (buf)->metadata->grp_metadata[(grp)].blk_metadata[(blk)].dump_buf_index + \
+	 (buf)->metadata->grp_metadata[(grp)].blk_metadata[(blk)].dump_buf_stride * (blk_inst))
+
+/**
+ * kbase_hwcnt_dump_buffer_zero() - Zero all enabled values in dst.
+ *                                  After the operation, all non-enabled values
+ *                                  will be undefined.
+ * @dst:            Non-NULL pointer to dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst and dst_enable_map MUST have been created from the same metadata.
+ */
+void kbase_hwcnt_dump_buffer_zero(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_zero() - Zero all values in a block.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @val_cnt: Number of values in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_zero(
+	u32 *dst_blk,
+	size_t val_cnt)
+{
+	memset(dst_blk, 0, (val_cnt * KBASE_HWCNT_VALUE_BYTES));
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_zero_strict() - Zero all values in dst.
+ *                                         After the operation, all values
+ *                                         (including padding bytes) will be
+ *                                         zero.
+ *                                         Slower than the non-strict variant.
+ * @dst: Non-NULL pointer to dump buffer.
+ */
+void kbase_hwcnt_dump_buffer_zero_strict(
+	struct kbase_hwcnt_dump_buffer *dst);
+
+/**
+ * kbase_hwcnt_dump_buffer_zero_non_enabled() - Zero all non-enabled values in
+ *                                              dst (including padding bytes and
+ *                                              unavailable blocks).
+ *                                              After the operation, all enabled
+ *                                              values will be unchanged.
+ * @dst:            Non-NULL pointer to dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst and dst_enable_map MUST have been created from the same metadata.
+ */
+void kbase_hwcnt_dump_buffer_zero_non_enabled(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_zero_non_enabled() - Zero all non-enabled
+ *                                                    values in a block.
+ *                                                    After the operation, all
+ *                                                    enabled values will be
+ *                                                    unchanged.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @blk_em:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_cnt: Number of values in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_zero_non_enabled(
+	u32 *dst_blk,
+	const u64 *blk_em,
+	size_t val_cnt)
+{
+	size_t val;
+
+	for (val = 0; val < val_cnt; val++) {
+		if (!kbase_hwcnt_enable_map_block_value_enabled(blk_em, val))
+			dst_blk[val] = 0;
+	}
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_copy() - Copy all enabled values from src to dst.
+ *                                  After the operation, all non-enabled values
+ *                                  will be undefined.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst, src, and dst_enable_map MUST have been created from the same
+ * metadata.
+ */
+void kbase_hwcnt_dump_buffer_copy(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_copy() - Copy all block values from src to dst.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @src_blk: Non-NULL pointer to src block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @val_cnt: Number of values in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_copy(
+	u32 *dst_blk,
+	const u32 *src_blk,
+	size_t val_cnt)
+{
+	/* Copy all the counters in the block instance.
+	 * Values of non-enabled counters are undefined.
+	 */
+	memcpy(dst_blk, src_blk, (val_cnt * KBASE_HWCNT_VALUE_BYTES));
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_copy_strict() - Copy all enabled values from src to
+ *                                         dst.
+ *                                         After the operation, all non-enabled
+ *                                         values (including padding bytes) will
+ *                                         be zero.
+ *                                         Slower than the non-strict variant.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst, src, and dst_enable_map MUST have been created from the same
+ * metadata.
+ */
+void kbase_hwcnt_dump_buffer_copy_strict(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_copy_strict() - Copy all enabled block values
+ *                                               from src to dst.
+ *                                               After the operation, all
+ *                                               non-enabled values will be
+ *                                               zero.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @src_blk: Non-NULL pointer to src block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @blk_em:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @val_cnt: Number of values in the block.
+ *
+ * After the copy, any disabled values in dst will be zero.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_copy_strict(
+	u32 *dst_blk,
+	const u32 *src_blk,
+	const u64 *blk_em,
+	size_t val_cnt)
+{
+	size_t val;
+
+	for (val = 0; val < val_cnt; val++) {
+		bool val_enabled = kbase_hwcnt_enable_map_block_value_enabled(
+			blk_em, val);
+
+		dst_blk[val] = val_enabled ? src_blk[val] : 0;
+	}
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_accumulate() - Copy all enabled headers and
+ *                                        accumulate all enabled counters from
+ *                                        src to dst.
+ *                                        After the operation, all non-enabled
+ *                                        values will be undefined.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst, src, and dst_enable_map MUST have been created from the same
+ * metadata.
+ */
+void kbase_hwcnt_dump_buffer_accumulate(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_accumulate() - Copy all block headers and
+ *                                              accumulate all block counters
+ *                                              from src to dst.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @src_blk: Non-NULL pointer to src block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @hdr_cnt: Number of headers in the block.
+ * @ctr_cnt: Number of counters in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_accumulate(
+	u32 *dst_blk,
+	const u32 *src_blk,
+	size_t hdr_cnt,
+	size_t ctr_cnt)
+{
+	size_t ctr;
+	/* Copy all the headers in the block instance.
+	 * Values of non-enabled headers are undefined.
+	 */
+	memcpy(dst_blk, src_blk, hdr_cnt * KBASE_HWCNT_VALUE_BYTES);
+
+	/* Accumulate all the counters in the block instance.
+	 * Values of non-enabled counters are undefined.
+	 */
+	for (ctr = hdr_cnt; ctr < ctr_cnt + hdr_cnt; ctr++) {
+		u32 *dst_ctr = dst_blk + ctr;
+		const u32 *src_ctr = src_blk + ctr;
+
+		const u32 src_counter = *src_ctr;
+		const u32 dst_counter = *dst_ctr;
+
+		/* Saturating add */
+		u32 accumulated = src_counter + dst_counter;
+
+		if (accumulated < src_counter)
+			accumulated = U32_MAX;
+
+		*dst_ctr = accumulated;
+	}
+}
+
+/**
+ * kbase_hwcnt_dump_buffer_accumulate_strict() - Copy all enabled headers and
+ *                                               accumulate all enabled counters
+ *                                               from src to dst.
+ *                                               After the operation, all
+ *                                               non-enabled values (including
+ *                                               padding bytes) will be zero.
+ *                                               Slower than the non-strict
+ *                                               variant.
+ * @dst:            Non-NULL pointer to dst dump buffer.
+ * @src:            Non-NULL pointer to src dump buffer.
+ * @dst_enable_map: Non-NULL pointer to enable map specifying enabled values.
+ *
+ * The dst, src, and dst_enable_map MUST have been created from the same
+ * metadata.
+ */
+void kbase_hwcnt_dump_buffer_accumulate_strict(
+	struct kbase_hwcnt_dump_buffer *dst,
+	const struct kbase_hwcnt_dump_buffer *src,
+	const struct kbase_hwcnt_enable_map *dst_enable_map);
+
+/**
+ * kbase_hwcnt_dump_buffer_block_accumulate_strict() - Copy all enabled block
+ *                                                     headers and accumulate
+ *                                                     all block counters from
+ *                                                     src to dst.
+ *                                                     After the operation, all
+ *                                                     non-enabled values will
+ *                                                     be zero.
+ * @dst_blk: Non-NULL pointer to dst block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @src_blk: Non-NULL pointer to src block obtained from a call to
+ *           kbase_hwcnt_dump_buffer_block_instance.
+ * @blk_em:  Non-NULL pointer to the block bitfield(s) obtained from a call to
+ *           kbase_hwcnt_enable_map_block_instance.
+ * @hdr_cnt: Number of headers in the block.
+ * @ctr_cnt: Number of counters in the block.
+ */
+static inline void kbase_hwcnt_dump_buffer_block_accumulate_strict(
+	u32 *dst_blk,
+	const u32 *src_blk,
+	const u64 *blk_em,
+	size_t hdr_cnt,
+	size_t ctr_cnt)
+{
+	size_t ctr;
+
+	kbase_hwcnt_dump_buffer_block_copy_strict(
+		dst_blk, src_blk, blk_em, hdr_cnt);
+
+	for (ctr = hdr_cnt; ctr < ctr_cnt + hdr_cnt; ctr++) {
+		bool ctr_enabled = kbase_hwcnt_enable_map_block_value_enabled(
+			blk_em, ctr);
+
+		u32 *dst_ctr = dst_blk + ctr;
+		const u32 *src_ctr = src_blk + ctr;
+
+		const u32 src_counter = *src_ctr;
+		const u32 dst_counter = *dst_ctr;
+
+		/* Saturating add */
+		u32 accumulated = src_counter + dst_counter;
+
+		if (accumulated < src_counter)
+			accumulated = U32_MAX;
+
+		*dst_ctr = ctr_enabled ? accumulated : 0;
+	}
+}
+
+#endif /* _KBASE_HWCNT_TYPES_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.c b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.c
new file mode 100644
index 0000000..26e98521
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.c
@@ -0,0 +1,688 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_accumulator.h"
+#include "mali_kbase_hwcnt_context.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_malisw.h"
+#include "mali_kbase_debug.h"
+#include "mali_kbase_linux.h"
+
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+/**
+ * struct kbase_hwcnt_virtualizer - Hardware counter virtualizer structure.
+ * @hctx:         Hardware counter context being virtualized.
+ * @metadata:     Hardware counter metadata.
+ * @lock:         Lock acquired at all entrypoints, to protect mutable state.
+ * @client_count: Current number of virtualizer clients.
+ * @clients:      List of virtualizer clients.
+ * @accum:        Hardware counter accumulator. NULL if no clients.
+ * @scratch_map:  Enable map used as scratch space during counter changes.
+ * @scratch_buf:  Dump buffer used as scratch space during dumps.
+ */
+struct kbase_hwcnt_virtualizer {
+	struct kbase_hwcnt_context *hctx;
+	const struct kbase_hwcnt_metadata *metadata;
+	struct mutex lock;
+	size_t client_count;
+	struct list_head clients;
+	struct kbase_hwcnt_accumulator *accum;
+	struct kbase_hwcnt_enable_map scratch_map;
+	struct kbase_hwcnt_dump_buffer scratch_buf;
+};
+
+/**
+ * struct kbase_hwcnt_virtualizer_client - Virtualizer client structure.
+ * @node:        List node used for virtualizer client list.
+ * @hvirt:       Hardware counter virtualizer.
+ * @enable_map:  Enable map with client's current enabled counters.
+ * @accum_buf:   Dump buffer with client's current accumulated counters.
+ * @has_accum:   True if accum_buf contains any accumulated counters.
+ * @ts_start_ns: Counter collection start time of current dump.
+ */
+struct kbase_hwcnt_virtualizer_client {
+	struct list_head node;
+	struct kbase_hwcnt_virtualizer *hvirt;
+	struct kbase_hwcnt_enable_map enable_map;
+	struct kbase_hwcnt_dump_buffer accum_buf;
+	bool has_accum;
+	u64 ts_start_ns;
+};
+
+const struct kbase_hwcnt_metadata *kbase_hwcnt_virtualizer_metadata(
+	struct kbase_hwcnt_virtualizer *hvirt)
+{
+	if (!hvirt)
+		return NULL;
+
+	return hvirt->metadata;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_metadata);
+
+/**
+ * kbasep_hwcnt_virtualizer_client_free - Free a virtualizer client's memory.
+ * @hvcli: Pointer to virtualizer client.
+ *
+ * Will safely free a client in any partial state of construction.
+ */
+static void kbasep_hwcnt_virtualizer_client_free(
+	struct kbase_hwcnt_virtualizer_client *hvcli)
+{
+	if (!hvcli)
+		return;
+
+	kbase_hwcnt_dump_buffer_free(&hvcli->accum_buf);
+	kbase_hwcnt_enable_map_free(&hvcli->enable_map);
+	kfree(hvcli);
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_alloc - Allocate memory for a virtualizer
+ *                                         client.
+ * @metadata:  Non-NULL pointer to counter metadata.
+ * @out_hvcli: Non-NULL pointer to where created client will be stored on
+ *             success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_virtualizer_client_alloc(
+	const struct kbase_hwcnt_metadata *metadata,
+	struct kbase_hwcnt_virtualizer_client **out_hvcli)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer_client *hvcli = NULL;
+
+	WARN_ON(!metadata);
+	WARN_ON(!out_hvcli);
+
+	hvcli = kzalloc(sizeof(*hvcli), GFP_KERNEL);
+	if (!hvcli)
+		return -ENOMEM;
+
+	errcode = kbase_hwcnt_enable_map_alloc(metadata, &hvcli->enable_map);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(metadata, &hvcli->accum_buf);
+	if (errcode)
+		goto error;
+
+	*out_hvcli = hvcli;
+	return 0;
+error:
+	kbasep_hwcnt_virtualizer_client_free(hvcli);
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_accumulate - Accumulate a dump buffer into a
+ *                                              client's accumulation buffer.
+ * @hvcli:    Non-NULL pointer to virtualizer client.
+ * @dump_buf: Non-NULL pointer to dump buffer to accumulate from.
+ */
+static void kbasep_hwcnt_virtualizer_client_accumulate(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	WARN_ON(!hvcli);
+	WARN_ON(!dump_buf);
+	lockdep_assert_held(&hvcli->hvirt->lock);
+
+	if (hvcli->has_accum) {
+		/* If already some accumulation, accumulate */
+		kbase_hwcnt_dump_buffer_accumulate(
+			&hvcli->accum_buf, dump_buf, &hvcli->enable_map);
+	} else {
+		/* If no accumulation, copy */
+		kbase_hwcnt_dump_buffer_copy(
+			&hvcli->accum_buf, dump_buf, &hvcli->enable_map);
+	}
+	hvcli->has_accum = true;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_accumulator_term - Terminate the hardware counter
+ *                                             accumulator after final client
+ *                                             removal.
+ * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
+ *
+ * Will safely terminate the accumulator in any partial state of initialisation.
+ */
+static void kbasep_hwcnt_virtualizer_accumulator_term(
+	struct kbase_hwcnt_virtualizer *hvirt)
+{
+	WARN_ON(!hvirt);
+	lockdep_assert_held(&hvirt->lock);
+	WARN_ON(hvirt->client_count);
+
+	kbase_hwcnt_dump_buffer_free(&hvirt->scratch_buf);
+	kbase_hwcnt_enable_map_free(&hvirt->scratch_map);
+	kbase_hwcnt_accumulator_release(hvirt->accum);
+	hvirt->accum = NULL;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_accumulator_init - Initialise the hardware counter
+ *                                             accumulator before first client
+ *                                             addition.
+ * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_virtualizer_accumulator_init(
+	struct kbase_hwcnt_virtualizer *hvirt)
+{
+	int errcode;
+
+	WARN_ON(!hvirt);
+	lockdep_assert_held(&hvirt->lock);
+	WARN_ON(hvirt->client_count);
+	WARN_ON(hvirt->accum);
+
+	errcode = kbase_hwcnt_accumulator_acquire(
+		hvirt->hctx, &hvirt->accum);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_enable_map_alloc(
+		hvirt->metadata, &hvirt->scratch_map);
+	if (errcode)
+		goto error;
+
+	errcode = kbase_hwcnt_dump_buffer_alloc(
+		hvirt->metadata, &hvirt->scratch_buf);
+	if (errcode)
+		goto error;
+
+	return 0;
+error:
+	kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_add - Add a newly allocated client to the
+ *                                       virtualizer.
+ * @hvirt:      Non-NULL pointer to the hardware counter virtualizer.
+ * @hvcli:      Non-NULL pointer to the virtualizer client to add.
+ * @enable_map: Non-NULL pointer to client's initial enable map.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_hwcnt_virtualizer_client_add(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_enable_map *enable_map)
+{
+	int errcode = 0;
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	WARN_ON(!hvirt);
+	WARN_ON(!hvcli);
+	WARN_ON(!enable_map);
+	lockdep_assert_held(&hvirt->lock);
+
+	if (hvirt->client_count == 0)
+		/* First client added, so initialise the accumulator */
+		errcode = kbasep_hwcnt_virtualizer_accumulator_init(hvirt);
+	if (errcode)
+		return errcode;
+
+	hvirt->client_count += 1;
+
+	if (hvirt->client_count == 1) {
+		/* First client, so just pass the enable map onwards as is */
+		errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
+			enable_map, &ts_start_ns, &ts_end_ns, NULL);
+	} else {
+		struct kbase_hwcnt_virtualizer_client *pos;
+
+		/* Make the scratch enable map the union of all enable maps */
+		kbase_hwcnt_enable_map_copy(
+			&hvirt->scratch_map, enable_map);
+		list_for_each_entry(pos, &hvirt->clients, node)
+			kbase_hwcnt_enable_map_union(
+				&hvirt->scratch_map, &pos->enable_map);
+
+		/* Set the counters with the new union enable map */
+		errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
+			&hvirt->scratch_map,
+			&ts_start_ns, &ts_end_ns,
+			&hvirt->scratch_buf);
+		/* Accumulate into only existing clients' accumulation bufs */
+		if (!errcode)
+			list_for_each_entry(pos, &hvirt->clients, node)
+				kbasep_hwcnt_virtualizer_client_accumulate(
+					pos, &hvirt->scratch_buf);
+	}
+	if (errcode)
+		goto error;
+
+	list_add(&hvcli->node, &hvirt->clients);
+	hvcli->hvirt = hvirt;
+	kbase_hwcnt_enable_map_copy(&hvcli->enable_map, enable_map);
+	hvcli->has_accum = false;
+	hvcli->ts_start_ns = ts_end_ns;
+
+	return 0;
+error:
+	hvirt->client_count -= 1;
+	if (hvirt->client_count == 0)
+		kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
+	return errcode;
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_remove - Remove a client from the
+ *                                          virtualizer.
+ * @hvirt:      Non-NULL pointer to the hardware counter virtualizer.
+ * @hvcli:      Non-NULL pointer to the virtualizer client to remove.
+ */
+static void kbasep_hwcnt_virtualizer_client_remove(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_hwcnt_virtualizer_client *hvcli)
+{
+	int errcode = 0;
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	WARN_ON(!hvirt);
+	WARN_ON(!hvcli);
+	lockdep_assert_held(&hvirt->lock);
+
+	list_del(&hvcli->node);
+	hvirt->client_count -= 1;
+
+	if (hvirt->client_count == 0) {
+		/* Last client removed, so terminate the accumulator */
+		kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
+	} else {
+		struct kbase_hwcnt_virtualizer_client *pos;
+		/* Make the scratch enable map the union of all enable maps */
+		kbase_hwcnt_enable_map_disable_all(&hvirt->scratch_map);
+		list_for_each_entry(pos, &hvirt->clients, node)
+			kbase_hwcnt_enable_map_union(
+				&hvirt->scratch_map, &pos->enable_map);
+		/* Set the counters with the new union enable map */
+		errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
+			&hvirt->scratch_map,
+			&ts_start_ns, &ts_end_ns,
+			&hvirt->scratch_buf);
+		/* Accumulate into remaining clients' accumulation bufs */
+		if (!errcode)
+			list_for_each_entry(pos, &hvirt->clients, node)
+				kbasep_hwcnt_virtualizer_client_accumulate(
+					pos, &hvirt->scratch_buf);
+	}
+	WARN_ON(errcode);
+}
+
+/**
+ * kbasep_hwcnt_virtualizer_client_set_counters - Perform a dump of the client's
+ *                                                currently enabled counters,
+ *                                                and enable a new set of
+ *                                                counters that will be used for
+ *                                                subsequent dumps.
+ * @hvirt:       Non-NULL pointer to the hardware counter virtualizer.
+ * @hvcli:       Non-NULL pointer to the virtualizer client.
+ * @enable_map:  Non-NULL pointer to the new counter enable map for the client.
+ *               Must have the same metadata as the virtualizer.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * Return: 0 on success or error code.
+ */
+static int kbasep_hwcnt_virtualizer_client_set_counters(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer_client *pos;
+
+	WARN_ON(!hvirt);
+	WARN_ON(!hvcli);
+	WARN_ON(!enable_map);
+	WARN_ON(!ts_start_ns);
+	WARN_ON(!ts_end_ns);
+	WARN_ON(enable_map->metadata != hvirt->metadata);
+	WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
+	lockdep_assert_held(&hvirt->lock);
+
+	/* Make the scratch enable map the union of all enable maps */
+	kbase_hwcnt_enable_map_copy(&hvirt->scratch_map, enable_map);
+	list_for_each_entry(pos, &hvirt->clients, node)
+		/* Ignore the enable map of the selected client */
+		if (pos != hvcli)
+			kbase_hwcnt_enable_map_union(
+				&hvirt->scratch_map, &pos->enable_map);
+
+	/* Set the counters with the new union enable map */
+	errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
+		&hvirt->scratch_map, ts_start_ns, ts_end_ns,
+		&hvirt->scratch_buf);
+	if (errcode)
+		return errcode;
+
+	/* Accumulate into all accumulation bufs except the selected client's */
+	list_for_each_entry(pos, &hvirt->clients, node)
+		if (pos != hvcli)
+			kbasep_hwcnt_virtualizer_client_accumulate(
+				pos, &hvirt->scratch_buf);
+
+	/* Finally, write into the dump buf */
+	if (dump_buf) {
+		const struct kbase_hwcnt_dump_buffer *src = &hvirt->scratch_buf;
+
+		if (hvcli->has_accum) {
+			kbase_hwcnt_dump_buffer_accumulate(
+				&hvcli->accum_buf, src, &hvcli->enable_map);
+			src = &hvcli->accum_buf;
+		}
+		kbase_hwcnt_dump_buffer_copy(dump_buf, src, &hvcli->enable_map);
+	}
+	hvcli->has_accum = false;
+
+	/* Update the selected client's enable map */
+	kbase_hwcnt_enable_map_copy(&hvcli->enable_map, enable_map);
+
+	/* Fix up the timestamps */
+	*ts_start_ns = hvcli->ts_start_ns;
+	hvcli->ts_start_ns = *ts_end_ns;
+
+	return errcode;
+}
+
+int kbase_hwcnt_virtualizer_client_set_counters(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer *hvirt;
+
+	if (!hvcli || !enable_map || !ts_start_ns || !ts_end_ns)
+		return -EINVAL;
+
+	hvirt = hvcli->hvirt;
+
+	if ((enable_map->metadata != hvirt->metadata) ||
+	    (dump_buf && (dump_buf->metadata != hvirt->metadata)))
+		return -EINVAL;
+
+	mutex_lock(&hvirt->lock);
+
+	if ((hvirt->client_count == 1) && (!hvcli->has_accum)) {
+		/*
+		 * If there's only one client with no prior accumulation, we can
+		 * completely skip the virtualize and just pass through the call
+		 * to the accumulator, saving a fair few copies and
+		 * accumulations.
+		 */
+		errcode = kbase_hwcnt_accumulator_set_counters(
+			hvirt->accum, enable_map,
+			ts_start_ns, ts_end_ns, dump_buf);
+
+		if (!errcode) {
+			/* Update the selected client's enable map */
+			kbase_hwcnt_enable_map_copy(
+				&hvcli->enable_map, enable_map);
+
+			/* Fix up the timestamps */
+			*ts_start_ns = hvcli->ts_start_ns;
+			hvcli->ts_start_ns = *ts_end_ns;
+		}
+	} else {
+		/* Otherwise, do the full virtualize */
+		errcode = kbasep_hwcnt_virtualizer_client_set_counters(
+			hvirt, hvcli, enable_map,
+			ts_start_ns, ts_end_ns, dump_buf);
+	}
+
+	mutex_unlock(&hvirt->lock);
+
+	return errcode;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_client_set_counters);
+
+/**
+ * kbasep_hwcnt_virtualizer_client_dump - Perform a dump of the client's
+ *                                        currently enabled counters.
+ * @hvirt:       Non-NULL pointer to the hardware counter virtualizer.
+ * @hvcli:       Non-NULL pointer to the virtualizer client.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * Return: 0 on success or error code.
+ */
+static int kbasep_hwcnt_virtualizer_client_dump(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer_client *pos;
+
+	WARN_ON(!hvirt);
+	WARN_ON(!hvcli);
+	WARN_ON(!ts_start_ns);
+	WARN_ON(!ts_end_ns);
+	WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
+	lockdep_assert_held(&hvirt->lock);
+
+	/* Perform the dump */
+	errcode = kbase_hwcnt_accumulator_dump(hvirt->accum,
+		ts_start_ns, ts_end_ns, &hvirt->scratch_buf);
+	if (errcode)
+		return errcode;
+
+	/* Accumulate into all accumulation bufs except the selected client's */
+	list_for_each_entry(pos, &hvirt->clients, node)
+		if (pos != hvcli)
+			kbasep_hwcnt_virtualizer_client_accumulate(
+				pos, &hvirt->scratch_buf);
+
+	/* Finally, write into the dump buf */
+	if (dump_buf) {
+		const struct kbase_hwcnt_dump_buffer *src = &hvirt->scratch_buf;
+
+		if (hvcli->has_accum) {
+			kbase_hwcnt_dump_buffer_accumulate(
+				&hvcli->accum_buf, src, &hvcli->enable_map);
+			src = &hvcli->accum_buf;
+		}
+		kbase_hwcnt_dump_buffer_copy(dump_buf, src, &hvcli->enable_map);
+	}
+	hvcli->has_accum = false;
+
+	/* Fix up the timestamps */
+	*ts_start_ns = hvcli->ts_start_ns;
+	hvcli->ts_start_ns = *ts_end_ns;
+
+	return errcode;
+}
+
+int kbase_hwcnt_virtualizer_client_dump(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer *hvirt;
+
+	if (!hvcli || !ts_start_ns || !ts_end_ns)
+		return -EINVAL;
+
+	hvirt = hvcli->hvirt;
+
+	if (dump_buf && (dump_buf->metadata != hvirt->metadata))
+		return -EINVAL;
+
+	mutex_lock(&hvirt->lock);
+
+	if ((hvirt->client_count == 1) && (!hvcli->has_accum)) {
+		/*
+		 * If there's only one client with no prior accumulation, we can
+		 * completely skip the virtualize and just pass through the call
+		 * to the accumulator, saving a fair few copies and
+		 * accumulations.
+		 */
+		errcode = kbase_hwcnt_accumulator_dump(
+			hvirt->accum, ts_start_ns, ts_end_ns, dump_buf);
+
+		if (!errcode) {
+			/* Fix up the timestamps */
+			*ts_start_ns = hvcli->ts_start_ns;
+			hvcli->ts_start_ns = *ts_end_ns;
+		}
+	} else {
+		/* Otherwise, do the full virtualize */
+		errcode = kbasep_hwcnt_virtualizer_client_dump(
+			hvirt, hvcli, ts_start_ns, ts_end_ns, dump_buf);
+	}
+
+	mutex_unlock(&hvirt->lock);
+
+	return errcode;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_client_dump);
+
+int kbase_hwcnt_virtualizer_client_create(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	struct kbase_hwcnt_virtualizer_client **out_hvcli)
+{
+	int errcode;
+	struct kbase_hwcnt_virtualizer_client *hvcli;
+
+	if (!hvirt || !enable_map || !out_hvcli ||
+	    (enable_map->metadata != hvirt->metadata))
+		return -EINVAL;
+
+	errcode = kbasep_hwcnt_virtualizer_client_alloc(
+		hvirt->metadata, &hvcli);
+	if (errcode)
+		return errcode;
+
+	mutex_lock(&hvirt->lock);
+
+	errcode = kbasep_hwcnt_virtualizer_client_add(hvirt, hvcli, enable_map);
+
+	mutex_unlock(&hvirt->lock);
+
+	if (errcode) {
+		kbasep_hwcnt_virtualizer_client_free(hvcli);
+		return errcode;
+	}
+
+	*out_hvcli = hvcli;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_client_create);
+
+void kbase_hwcnt_virtualizer_client_destroy(
+	struct kbase_hwcnt_virtualizer_client *hvcli)
+{
+	if (!hvcli)
+		return;
+
+	mutex_lock(&hvcli->hvirt->lock);
+
+	kbasep_hwcnt_virtualizer_client_remove(hvcli->hvirt, hvcli);
+
+	mutex_unlock(&hvcli->hvirt->lock);
+
+	kbasep_hwcnt_virtualizer_client_free(hvcli);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_client_destroy);
+
+int kbase_hwcnt_virtualizer_init(
+	struct kbase_hwcnt_context *hctx,
+	struct kbase_hwcnt_virtualizer **out_hvirt)
+{
+	struct kbase_hwcnt_virtualizer *virt;
+	const struct kbase_hwcnt_metadata *metadata;
+
+	if (!hctx || !out_hvirt)
+		return -EINVAL;
+
+	metadata = kbase_hwcnt_context_metadata(hctx);
+	if (!metadata)
+		return -EINVAL;
+
+	virt = kzalloc(sizeof(*virt), GFP_KERNEL);
+	if (!virt)
+		return -ENOMEM;
+
+	virt->hctx = hctx;
+	virt->metadata = metadata;
+
+	mutex_init(&virt->lock);
+	INIT_LIST_HEAD(&virt->clients);
+
+	*out_hvirt = virt;
+	return 0;
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_init);
+
+void kbase_hwcnt_virtualizer_term(
+	struct kbase_hwcnt_virtualizer *hvirt)
+{
+	if (!hvirt)
+		return;
+
+	/* Non-zero client count implies client leak */
+	if (WARN_ON(hvirt->client_count != 0)) {
+		struct kbase_hwcnt_virtualizer_client *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &hvirt->clients, node)
+			kbase_hwcnt_virtualizer_client_destroy(pos);
+	}
+
+	WARN_ON(hvirt->client_count != 0);
+	WARN_ON(hvirt->accum);
+
+	kfree(hvirt);
+}
+KBASE_EXPORT_TEST_API(kbase_hwcnt_virtualizer_term);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.h b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.h
new file mode 100644
index 0000000..1efa81d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwcnt_virtualizer.h
@@ -0,0 +1,139 @@
+/*
+ *
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Hardware counter virtualizer API.
+ *
+ * Virtualizes a hardware counter context, so multiple clients can access
+ * a single hardware counter resource as though each was the exclusive user.
+ */
+
+#ifndef _KBASE_HWCNT_VIRTUALIZER_H_
+#define _KBASE_HWCNT_VIRTUALIZER_H_
+
+#include <linux/types.h>
+
+struct kbase_hwcnt_context;
+struct kbase_hwcnt_virtualizer;
+struct kbase_hwcnt_virtualizer_client;
+struct kbase_hwcnt_enable_map;
+struct kbase_hwcnt_dump_buffer;
+
+/**
+ * kbase_hwcnt_virtualizer_init - Initialise a hardware counter virtualizer.
+ * @hctx:      Non-NULL pointer to the hardware counter context to virtualize.
+ * @out_hvirt: Non-NULL pointer to where the pointer to the created virtualizer
+ *             will be stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_virtualizer_init(
+	struct kbase_hwcnt_context *hctx,
+	struct kbase_hwcnt_virtualizer **out_hvirt);
+
+/**
+ * kbase_hwcnt_virtualizer_term - Terminate a hardware counter virtualizer.
+ * @hvirt: Pointer to virtualizer to be terminated.
+ */
+void kbase_hwcnt_virtualizer_term(
+	struct kbase_hwcnt_virtualizer *hvirt);
+
+/**
+ * kbase_hwcnt_virtualizer_metadata - Get the hardware counter metadata used by
+ *                                    the virtualizer, so related counter data
+ *                                    structures can be created.
+ * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
+ *
+ * Return: Non-NULL pointer to metadata, or NULL on error.
+ */
+const struct kbase_hwcnt_metadata *kbase_hwcnt_virtualizer_metadata(
+	struct kbase_hwcnt_virtualizer *hvirt);
+
+/**
+ * kbase_hwcnt_virtualizer_client_create - Create a new virtualizer client.
+ * @hvirt:      Non-NULL pointer to the hardware counter virtualizer.
+ * @enable_map: Non-NULL pointer to the enable map for the client. Must have the
+ *              same metadata as the virtualizer.
+ * @out_hvcli:  Non-NULL pointer to where the pointer to the created client will
+ *              be stored on success.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_hwcnt_virtualizer_client_create(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	struct kbase_hwcnt_virtualizer_client **out_hvcli);
+
+/**
+ * kbase_hwcnt_virtualizer_client_destroy() - Destroy a virtualizer client.
+ * @hvcli: Pointer to the hardware counter client.
+ */
+void kbase_hwcnt_virtualizer_client_destroy(
+	struct kbase_hwcnt_virtualizer_client *hvcli);
+
+/**
+ * kbase_hwcnt_virtualizer_client_set_counters - Perform a dump of the client's
+ *                                               currently enabled counters, and
+ *                                               enable a new set of counters
+ *                                               that will be used for
+ *                                               subsequent dumps.
+ * @hvcli:       Non-NULL pointer to the virtualizer client.
+ * @enable_map:  Non-NULL pointer to the new counter enable map for the client.
+ *               Must have the same metadata as the virtualizer.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_virtualizer_client_set_counters(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	const struct kbase_hwcnt_enable_map *enable_map,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+/**
+ * kbase_hwcnt_virtualizer_client_dump - Perform a dump of the client's
+ *                                       currently enabled counters.
+ * @hvcli:       Non-NULL pointer to the virtualizer client.
+ * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
+ *               be written out to on success.
+ * @ts_end_ns:   Non-NULL pointer where the end timestamp of the dump will
+ *               be written out to on success.
+ * @dump_buf:    Pointer to the buffer where the dump will be written out to on
+ *               success. If non-NULL, must have the same metadata as the
+ *               accumulator. If NULL, the dump will be discarded.
+ *
+ * Return: 0 on success or error code.
+ */
+int kbase_hwcnt_virtualizer_client_dump(
+	struct kbase_hwcnt_virtualizer_client *hvcli,
+	u64 *ts_start_ns,
+	u64 *ts_end_ns,
+	struct kbase_hwcnt_dump_buffer *dump_buf);
+
+#endif /* _KBASE_HWCNT_VIRTUALIZER_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_ioctl.h b/drivers/gpu/arm/midgard/mali_kbase_ioctl.h
new file mode 100644
index 0000000..ccf67df
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_ioctl.h
@@ -0,0 +1,885 @@
+/*
+ *
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_IOCTL_H_
+#define _KBASE_IOCTL_H_
+
+#ifdef __cpluscplus
+extern "C" {
+#endif
+
+#include <asm-generic/ioctl.h>
+#include <linux/types.h>
+
+#define KBASE_IOCTL_TYPE 0x80
+
+/*
+ * 11.1:
+ * - Add BASE_MEM_TILER_ALIGN_TOP under base_mem_alloc_flags
+ * 11.2:
+ * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_SECURE,
+ *   which some user-side clients prior to 11.2 might fault if they received
+ *   them
+ * 11.3:
+ * - New ioctls KBASE_IOCTL_STICKY_RESOURCE_MAP and
+ *   KBASE_IOCTL_STICKY_RESOURCE_UNMAP
+ * 11.4:
+ * - New ioctl KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET
+ * 11.5:
+ * - New ioctl: KBASE_IOCTL_MEM_JIT_INIT (old ioctl renamed to _OLD)
+ * 11.6:
+ * - Added flags field to base_jit_alloc_info structure, which can be used to
+ *   specify pseudo chunked tiler alignment for JIT allocations.
+ * 11.7:
+ * - Removed UMP support
+ * 11.8:
+ * - Added BASE_MEM_UNCACHED_GPU under base_mem_alloc_flags
+ * 11.9:
+ * - Added BASE_MEM_PERMANENT_KERNEL_MAPPING and BASE_MEM_FLAGS_KERNEL_ONLY
+ *   under base_mem_alloc_flags
+ * 11.10:
+ * - Enabled the use of nr_extres field of base_jd_atom_v2 structure for
+ *   JIT_ALLOC and JIT_FREE type softjobs to enable multiple JIT allocations
+ *   with one softjob.
+ * 11.11:
+ * - Added BASE_MEM_GPU_VA_SAME_4GB_PAGE under base_mem_alloc_flags
+ * 11.12:
+ * - Removed ioctl: KBASE_IOCTL_GET_PROFILING_CONTROLS
+ * 11.13:
+ * - New ioctl: KBASE_IOCTL_MEM_EXEC_INIT
+ */
+#define BASE_UK_VERSION_MAJOR 11
+#define BASE_UK_VERSION_MINOR 13
+
+/**
+ * struct kbase_ioctl_version_check - Check version compatibility with kernel
+ *
+ * @major: Major version number
+ * @minor: Minor version number
+ */
+struct kbase_ioctl_version_check {
+	__u16 major;
+	__u16 minor;
+};
+
+#define KBASE_IOCTL_VERSION_CHECK \
+	_IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
+
+/**
+ * struct kbase_ioctl_set_flags - Set kernel context creation flags
+ *
+ * @create_flags: Flags - see base_context_create_flags
+ */
+struct kbase_ioctl_set_flags {
+	__u32 create_flags;
+};
+
+#define KBASE_IOCTL_SET_FLAGS \
+	_IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags)
+
+/**
+ * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel
+ *
+ * @addr: Memory address of an array of struct base_jd_atom_v2
+ * @nr_atoms: Number of entries in the array
+ * @stride: sizeof(struct base_jd_atom_v2)
+ */
+struct kbase_ioctl_job_submit {
+	__u64 addr;
+	__u32 nr_atoms;
+	__u32 stride;
+};
+
+#define KBASE_IOCTL_JOB_SUBMIT \
+	_IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit)
+
+/**
+ * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel
+ *
+ * @buffer: Pointer to the buffer to store properties into
+ * @size: Size of the buffer
+ * @flags: Flags - must be zero for now
+ *
+ * The ioctl will return the number of bytes stored into @buffer or an error
+ * on failure (e.g. @size is too small). If @size is specified as 0 then no
+ * data will be written but the return value will be the number of bytes needed
+ * for all the properties.
+ *
+ * @flags may be used in the future to request a different format for the
+ * buffer. With @flags == 0 the following format is used.
+ *
+ * The buffer will be filled with pairs of values, a u32 key identifying the
+ * property followed by the value. The size of the value is identified using
+ * the bottom bits of the key. The value then immediately followed the key and
+ * is tightly packed (there is no padding). All keys and values are
+ * little-endian.
+ *
+ * 00 = u8
+ * 01 = u16
+ * 10 = u32
+ * 11 = u64
+ */
+struct kbase_ioctl_get_gpuprops {
+	__u64 buffer;
+	__u32 size;
+	__u32 flags;
+};
+
+#define KBASE_IOCTL_GET_GPUPROPS \
+	_IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops)
+
+#define KBASE_IOCTL_POST_TERM \
+	_IO(KBASE_IOCTL_TYPE, 4)
+
+/**
+ * union kbase_ioctl_mem_alloc - Allocate memory on the GPU
+ *
+ * @va_pages: The number of pages of virtual address space to reserve
+ * @commit_pages: The number of physical pages to allocate
+ * @extent: The number of extra pages to allocate on each GPU fault which grows
+ *          the region
+ * @flags: Flags
+ * @gpu_va: The GPU virtual address which is allocated
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_alloc {
+	struct {
+		__u64 va_pages;
+		__u64 commit_pages;
+		__u64 extent;
+		__u64 flags;
+	} in;
+	struct {
+		__u64 flags;
+		__u64 gpu_va;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_ALLOC \
+	_IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc)
+
+/**
+ * struct kbase_ioctl_mem_query - Query properties of a GPU memory region
+ * @gpu_addr: A GPU address contained within the region
+ * @query: The type of query
+ * @value: The result of the query
+ *
+ * Use a %KBASE_MEM_QUERY_xxx flag as input for @query.
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_query {
+	struct {
+		__u64 gpu_addr;
+		__u64 query;
+	} in;
+	struct {
+		__u64 value;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_QUERY \
+	_IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query)
+
+#define KBASE_MEM_QUERY_COMMIT_SIZE	((u64)1)
+#define KBASE_MEM_QUERY_VA_SIZE		((u64)2)
+#define KBASE_MEM_QUERY_FLAGS		((u64)3)
+
+/**
+ * struct kbase_ioctl_mem_free - Free a memory region
+ * @gpu_addr: Handle to the region to free
+ */
+struct kbase_ioctl_mem_free {
+	__u64 gpu_addr;
+};
+
+#define KBASE_IOCTL_MEM_FREE \
+	_IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free)
+
+/**
+ * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader
+ * @buffer_count: requested number of dumping buffers
+ * @jm_bm:        counters selection bitmask (JM)
+ * @shader_bm:    counters selection bitmask (Shader)
+ * @tiler_bm:     counters selection bitmask (Tiler)
+ * @mmu_l2_bm:    counters selection bitmask (MMU_L2)
+ *
+ * A fd is returned from the ioctl if successful, or a negative value on error
+ */
+struct kbase_ioctl_hwcnt_reader_setup {
+	__u32 buffer_count;
+	__u32 jm_bm;
+	__u32 shader_bm;
+	__u32 tiler_bm;
+	__u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_READER_SETUP \
+	_IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup)
+
+/**
+ * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection
+ * @dump_buffer:  GPU address to write counters to
+ * @jm_bm:        counters selection bitmask (JM)
+ * @shader_bm:    counters selection bitmask (Shader)
+ * @tiler_bm:     counters selection bitmask (Tiler)
+ * @mmu_l2_bm:    counters selection bitmask (MMU_L2)
+ */
+struct kbase_ioctl_hwcnt_enable {
+	__u64 dump_buffer;
+	__u32 jm_bm;
+	__u32 shader_bm;
+	__u32 tiler_bm;
+	__u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_ENABLE \
+	_IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable)
+
+#define KBASE_IOCTL_HWCNT_DUMP \
+	_IO(KBASE_IOCTL_TYPE, 10)
+
+#define KBASE_IOCTL_HWCNT_CLEAR \
+	_IO(KBASE_IOCTL_TYPE, 11)
+
+/**
+ * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to.
+ * @data:    Counter samples for the dummy model.
+ * @size:    Size of the counter sample data.
+ * @padding: Padding.
+ */
+struct kbase_ioctl_hwcnt_values {
+	__u64 data;
+	__u32 size;
+	__u32 padding;
+};
+
+#define KBASE_IOCTL_HWCNT_SET \
+	_IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values)
+
+/**
+ * struct kbase_ioctl_disjoint_query - Query the disjoint counter
+ * @counter:   A counter of disjoint events in the kernel
+ */
+struct kbase_ioctl_disjoint_query {
+	__u32 counter;
+};
+
+#define KBASE_IOCTL_DISJOINT_QUERY \
+	_IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query)
+
+/**
+ * struct kbase_ioctl_get_ddk_version - Query the kernel version
+ * @version_buffer: Buffer to receive the kernel version string
+ * @size: Size of the buffer
+ * @padding: Padding
+ *
+ * The ioctl will return the number of bytes written into version_buffer
+ * (which includes a NULL byte) or a negative error code
+ *
+ * The ioctl request code has to be _IOW because the data in ioctl struct is
+ * being copied to the kernel, even though the kernel then writes out the
+ * version info to the buffer specified in the ioctl.
+ */
+struct kbase_ioctl_get_ddk_version {
+	__u64 version_buffer;
+	__u32 size;
+	__u32 padding;
+};
+
+#define KBASE_IOCTL_GET_DDK_VERSION \
+	_IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version)
+
+/**
+ * struct kbase_ioctl_mem_jit_init_old - Initialise the JIT memory allocator
+ *
+ * @va_pages: Number of VA pages to reserve for JIT
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ *
+ * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for
+ * backwards compatibility.
+ */
+struct kbase_ioctl_mem_jit_init_old {
+	__u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT_OLD \
+	_IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_old)
+
+/**
+ * struct kbase_ioctl_mem_jit_init - Initialise the JIT memory allocator
+ *
+ * @va_pages: Number of VA pages to reserve for JIT
+ * @max_allocations: Maximum number of concurrent allocations
+ * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @padding: Currently unused, must be zero
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ */
+struct kbase_ioctl_mem_jit_init {
+	__u64 va_pages;
+	__u8 max_allocations;
+	__u8 trim_level;
+	__u8 padding[6];
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT \
+	_IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init)
+
+/**
+ * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory
+ *
+ * @handle: GPU memory handle (GPU VA)
+ * @user_addr: The address where it is mapped in user space
+ * @size: The number of bytes to synchronise
+ * @type: The direction to synchronise: 0 is sync to memory (clean),
+ * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants.
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_mem_sync {
+	__u64 handle;
+	__u64 user_addr;
+	__u64 size;
+	__u8 type;
+	__u8 padding[7];
+};
+
+#define KBASE_IOCTL_MEM_SYNC \
+	_IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync)
+
+/**
+ * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer
+ *
+ * @gpu_addr: The GPU address of the memory region
+ * @cpu_addr: The CPU address to locate
+ * @size: A size in bytes to validate is contained within the region
+ * @offset: The offset from the start of the memory region to @cpu_addr
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_find_cpu_offset {
+	struct {
+		__u64 gpu_addr;
+		__u64 cpu_addr;
+		__u64 size;
+	} in;
+	struct {
+		__u64 offset;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \
+	_IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset)
+
+/**
+ * struct kbase_ioctl_get_context_id - Get the kernel context ID
+ *
+ * @id: The kernel context ID
+ */
+struct kbase_ioctl_get_context_id {
+	__u32 id;
+};
+
+#define KBASE_IOCTL_GET_CONTEXT_ID \
+	_IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id)
+
+/**
+ * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd
+ *
+ * @flags: Flags
+ *
+ * The ioctl returns a file descriptor when successful
+ */
+struct kbase_ioctl_tlstream_acquire {
+	__u32 flags;
+};
+
+#define KBASE_IOCTL_TLSTREAM_ACQUIRE \
+	_IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire)
+
+#define KBASE_IOCTL_TLSTREAM_FLUSH \
+	_IO(KBASE_IOCTL_TYPE, 19)
+
+/**
+ * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region
+ *
+ * @gpu_addr: The memory region to modify
+ * @pages:    The number of physical pages that should be present
+ *
+ * The ioctl may return on the following error codes or 0 for success:
+ *   -ENOMEM: Out of memory
+ *   -EINVAL: Invalid arguments
+ */
+struct kbase_ioctl_mem_commit {
+	__u64 gpu_addr;
+	__u64 pages;
+};
+
+#define KBASE_IOCTL_MEM_COMMIT \
+	_IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit)
+
+/**
+ * union kbase_ioctl_mem_alias - Create an alias of memory regions
+ * @flags: Flags, see BASE_MEM_xxx
+ * @stride: Bytes between start of each memory region
+ * @nents: The number of regions to pack together into the alias
+ * @aliasing_info: Pointer to an array of struct base_mem_aliasing_info
+ * @gpu_va: Address of the new alias
+ * @va_pages: Size of the new alias
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_alias {
+	struct {
+		__u64 flags;
+		__u64 stride;
+		__u64 nents;
+		__u64 aliasing_info;
+	} in;
+	struct {
+		__u64 flags;
+		__u64 gpu_va;
+		__u64 va_pages;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_ALIAS \
+	_IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias)
+
+/**
+ * union kbase_ioctl_mem_import - Import memory for use by the GPU
+ * @flags: Flags, see BASE_MEM_xxx
+ * @phandle: Handle to the external memory
+ * @type: Type of external memory, see base_mem_import_type
+ * @padding: Amount of extra VA pages to append to the imported buffer
+ * @gpu_va: Address of the new alias
+ * @va_pages: Size of the new alias
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_import {
+	struct {
+		__u64 flags;
+		__u64 phandle;
+		__u32 type;
+		__u32 padding;
+	} in;
+	struct {
+		__u64 flags;
+		__u64 gpu_va;
+		__u64 va_pages;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_IMPORT \
+	_IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import)
+
+/**
+ * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region
+ * @gpu_va: The GPU region to modify
+ * @flags: The new flags to set
+ * @mask: Mask of the flags to modify
+ */
+struct kbase_ioctl_mem_flags_change {
+	__u64 gpu_va;
+	__u64 flags;
+	__u64 mask;
+};
+
+#define KBASE_IOCTL_MEM_FLAGS_CHANGE \
+	_IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change)
+
+/**
+ * struct kbase_ioctl_stream_create - Create a synchronisation stream
+ * @name: A name to identify this stream. Must be NULL-terminated.
+ *
+ * Note that this is also called a "timeline", but is named stream to avoid
+ * confusion with other uses of the word.
+ *
+ * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes.
+ *
+ * The ioctl returns a file descriptor.
+ */
+struct kbase_ioctl_stream_create {
+	char name[32];
+};
+
+#define KBASE_IOCTL_STREAM_CREATE \
+	_IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create)
+
+/**
+ * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence
+ * @fd: The file descriptor to validate
+ */
+struct kbase_ioctl_fence_validate {
+	int fd;
+};
+
+#define KBASE_IOCTL_FENCE_VALIDATE \
+	_IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate)
+
+/**
+ * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel
+ * @buffer: Pointer to the information
+ * @len: Length
+ * @padding: Padding
+ *
+ * The data provided is accessible through a debugfs file
+ */
+struct kbase_ioctl_mem_profile_add {
+	__u64 buffer;
+	__u32 len;
+	__u32 padding;
+};
+
+#define KBASE_IOCTL_MEM_PROFILE_ADD \
+	_IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add)
+
+/**
+ * struct kbase_ioctl_soft_event_update - Update the status of a soft-event
+ * @event: GPU address of the event which has been updated
+ * @new_status: The new status to set
+ * @flags: Flags for future expansion
+ */
+struct kbase_ioctl_soft_event_update {
+	__u64 event;
+	__u32 new_status;
+	__u32 flags;
+};
+
+#define KBASE_IOCTL_SOFT_EVENT_UPDATE \
+	_IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource
+ * @count: Number of resources
+ * @address: Array of u64 GPU addresses of the external resources to map
+ */
+struct kbase_ioctl_sticky_resource_map {
+	__u64 count;
+	__u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_MAP \
+	_IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Unmap a resource mapped which was
+ *                                          previously permanently mapped
+ * @count: Number of resources
+ * @address: Array of u64 GPU addresses of the external resources to unmap
+ */
+struct kbase_ioctl_sticky_resource_unmap {
+	__u64 count;
+	__u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \
+	_IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap)
+
+/**
+ * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of
+ *                                                   the GPU memory region for
+ *                                                   the given gpu address and
+ *                                                   the offset of that address
+ *                                                   into the region
+ *
+ * @gpu_addr: GPU virtual address
+ * @size: Size in bytes within the region
+ * @start: Address of the beginning of the memory region enclosing @gpu_addr
+ *         for the length of @offset bytes
+ * @offset: The offset from the start of the memory region to @gpu_addr
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_mem_find_gpu_start_and_offset {
+	struct {
+		__u64 gpu_addr;
+		__u64 size;
+	} in;
+	struct {
+		__u64 start;
+		__u64 offset;
+	} out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \
+	_IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset)
+
+
+#define KBASE_IOCTL_CINSTR_GWT_START \
+	_IO(KBASE_IOCTL_TYPE, 33)
+
+#define KBASE_IOCTL_CINSTR_GWT_STOP \
+	_IO(KBASE_IOCTL_TYPE, 34)
+
+/**
+ * union kbase_ioctl_gwt_dump - Used to collect all GPU write fault addresses.
+ * @addr_buffer: Address of buffer to hold addresses of gpu modified areas.
+ * @size_buffer: Address of buffer to hold size of modified areas (in pages)
+ * @len: Number of addresses the buffers can hold.
+ * @more_data_available: Status indicating if more addresses are available.
+ * @no_of_addr_collected: Number of addresses collected into addr_buffer.
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ *
+ * This structure is used when performing a call to dump GPU write fault
+ * addresses.
+ */
+union kbase_ioctl_cinstr_gwt_dump {
+	struct {
+		__u64 addr_buffer;
+		__u64 size_buffer;
+		__u32 len;
+		__u32 padding;
+
+	} in;
+	struct {
+		__u32 no_of_addr_collected;
+		__u8 more_data_available;
+		__u8 padding[27];
+	} out;
+};
+
+#define KBASE_IOCTL_CINSTR_GWT_DUMP \
+	_IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump)
+
+
+/**
+ * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone
+ *
+ * @va_pages: Number of VA pages to reserve for EXEC_VA
+ */
+struct kbase_ioctl_mem_exec_init {
+	__u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_EXEC_INIT \
+	_IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init)
+
+
+/***************
+ * test ioctls *
+ ***************/
+#if MALI_UNIT_TEST
+/* These ioctls are purely for test purposes and are not used in the production
+ * driver, they therefore may change without notice
+ */
+
+#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1)
+
+/**
+ * struct kbase_ioctl_tlstream_test - Start a timeline stream test
+ *
+ * @tpw_count: number of trace point writers in each context
+ * @msg_delay: time delay between tracepoints from one writer in milliseconds
+ * @msg_count: number of trace points written by one writer
+ * @aux_msg:   if non-zero aux messages will be included
+ */
+struct kbase_ioctl_tlstream_test {
+	__u32 tpw_count;
+	__u32 msg_delay;
+	__u32 msg_count;
+	__u32 aux_msg;
+};
+
+#define KBASE_IOCTL_TLSTREAM_TEST \
+	_IOW(KBASE_IOCTL_TEST_TYPE, 1, struct kbase_ioctl_tlstream_test)
+
+/**
+ * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes
+ * @bytes_collected: number of bytes read by user
+ * @bytes_generated: number of bytes generated by tracepoints
+ */
+struct kbase_ioctl_tlstream_stats {
+	__u32 bytes_collected;
+	__u32 bytes_generated;
+};
+
+#define KBASE_IOCTL_TLSTREAM_STATS \
+	_IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats)
+
+/**
+ * struct kbase_ioctl_cs_event_memory_write - Write an event memory address
+ * @cpu_addr: Memory address to write
+ * @value: Value to write
+ * @padding: Currently unused, must be zero
+ */
+struct kbase_ioctl_cs_event_memory_write {
+	__u64 cpu_addr;
+	__u8 value;
+	__u8 padding[7];
+};
+
+/**
+ * union kbase_ioctl_cs_event_memory_read - Read an event memory address
+ * @cpu_addr: Memory address to read
+ * @value: Value read
+ * @padding: Currently unused, must be zero
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_cs_event_memory_read {
+	struct {
+		__u64 cpu_addr;
+	} in;
+	struct {
+		__u8 value;
+		__u8 padding[7];
+	} out;
+};
+
+#endif
+
+/* Customer extension range */
+#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2)
+
+/* If the integration needs extra ioctl add them there
+ * like this:
+ *
+ * struct my_ioctl_args {
+ *  ....
+ * }
+ *
+ * #define KBASE_IOCTL_MY_IOCTL \
+ *         _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args)
+ */
+
+
+/**********************************
+ * Definitions for GPU properties *
+ **********************************/
+#define KBASE_GPUPROP_VALUE_SIZE_U8	(0x0)
+#define KBASE_GPUPROP_VALUE_SIZE_U16	(0x1)
+#define KBASE_GPUPROP_VALUE_SIZE_U32	(0x2)
+#define KBASE_GPUPROP_VALUE_SIZE_U64	(0x3)
+
+#define KBASE_GPUPROP_PRODUCT_ID			1
+#define KBASE_GPUPROP_VERSION_STATUS			2
+#define KBASE_GPUPROP_MINOR_REVISION			3
+#define KBASE_GPUPROP_MAJOR_REVISION			4
+/* 5 previously used for GPU speed */
+#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX			6
+/* 7 previously used for minimum GPU speed */
+#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE		8
+#define KBASE_GPUPROP_TEXTURE_FEATURES_0		9
+#define KBASE_GPUPROP_TEXTURE_FEATURES_1		10
+#define KBASE_GPUPROP_TEXTURE_FEATURES_2		11
+#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE		12
+
+#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE			13
+#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE		14
+#define KBASE_GPUPROP_L2_NUM_L2_SLICES			15
+
+#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES		16
+#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS		17
+
+#define KBASE_GPUPROP_MAX_THREADS			18
+#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE		19
+#define KBASE_GPUPROP_MAX_BARRIER_SIZE			20
+#define KBASE_GPUPROP_MAX_REGISTERS			21
+#define KBASE_GPUPROP_MAX_TASK_QUEUE			22
+#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT		23
+#define KBASE_GPUPROP_IMPL_TECH				24
+
+#define KBASE_GPUPROP_RAW_SHADER_PRESENT		25
+#define KBASE_GPUPROP_RAW_TILER_PRESENT			26
+#define KBASE_GPUPROP_RAW_L2_PRESENT			27
+#define KBASE_GPUPROP_RAW_STACK_PRESENT			28
+#define KBASE_GPUPROP_RAW_L2_FEATURES			29
+#define KBASE_GPUPROP_RAW_CORE_FEATURES			30
+#define KBASE_GPUPROP_RAW_MEM_FEATURES			31
+#define KBASE_GPUPROP_RAW_MMU_FEATURES			32
+#define KBASE_GPUPROP_RAW_AS_PRESENT			33
+#define KBASE_GPUPROP_RAW_JS_PRESENT			34
+#define KBASE_GPUPROP_RAW_JS_FEATURES_0			35
+#define KBASE_GPUPROP_RAW_JS_FEATURES_1			36
+#define KBASE_GPUPROP_RAW_JS_FEATURES_2			37
+#define KBASE_GPUPROP_RAW_JS_FEATURES_3			38
+#define KBASE_GPUPROP_RAW_JS_FEATURES_4			39
+#define KBASE_GPUPROP_RAW_JS_FEATURES_5			40
+#define KBASE_GPUPROP_RAW_JS_FEATURES_6			41
+#define KBASE_GPUPROP_RAW_JS_FEATURES_7			42
+#define KBASE_GPUPROP_RAW_JS_FEATURES_8			43
+#define KBASE_GPUPROP_RAW_JS_FEATURES_9			44
+#define KBASE_GPUPROP_RAW_JS_FEATURES_10		45
+#define KBASE_GPUPROP_RAW_JS_FEATURES_11		46
+#define KBASE_GPUPROP_RAW_JS_FEATURES_12		47
+#define KBASE_GPUPROP_RAW_JS_FEATURES_13		48
+#define KBASE_GPUPROP_RAW_JS_FEATURES_14		49
+#define KBASE_GPUPROP_RAW_JS_FEATURES_15		50
+#define KBASE_GPUPROP_RAW_TILER_FEATURES		51
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0		52
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1		53
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2		54
+#define KBASE_GPUPROP_RAW_GPU_ID			55
+#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS		56
+#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE	57
+#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE	58
+#define KBASE_GPUPROP_RAW_THREAD_FEATURES		59
+#define KBASE_GPUPROP_RAW_COHERENCY_MODE		60
+
+#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS		61
+#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS		62
+#define KBASE_GPUPROP_COHERENCY_COHERENCY		63
+#define KBASE_GPUPROP_COHERENCY_GROUP_0			64
+#define KBASE_GPUPROP_COHERENCY_GROUP_1			65
+#define KBASE_GPUPROP_COHERENCY_GROUP_2			66
+#define KBASE_GPUPROP_COHERENCY_GROUP_3			67
+#define KBASE_GPUPROP_COHERENCY_GROUP_4			68
+#define KBASE_GPUPROP_COHERENCY_GROUP_5			69
+#define KBASE_GPUPROP_COHERENCY_GROUP_6			70
+#define KBASE_GPUPROP_COHERENCY_GROUP_7			71
+#define KBASE_GPUPROP_COHERENCY_GROUP_8			72
+#define KBASE_GPUPROP_COHERENCY_GROUP_9			73
+#define KBASE_GPUPROP_COHERENCY_GROUP_10		74
+#define KBASE_GPUPROP_COHERENCY_GROUP_11		75
+#define KBASE_GPUPROP_COHERENCY_GROUP_12		76
+#define KBASE_GPUPROP_COHERENCY_GROUP_13		77
+#define KBASE_GPUPROP_COHERENCY_GROUP_14		78
+#define KBASE_GPUPROP_COHERENCY_GROUP_15		79
+
+#define KBASE_GPUPROP_TEXTURE_FEATURES_3		80
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3		81
+
+#define KBASE_GPUPROP_NUM_EXEC_ENGINES                  82
+
+#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC		83
+#define KBASE_GPUPROP_TLS_ALLOC				84
+
+#ifdef __cpluscplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd.c b/drivers/gpu/arm/midgard/mali_kbase_jd.c
new file mode 100644
index 0000000..0a01614
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jd.c
@@ -0,0 +1,1621 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#endif				/* defined(CONFIG_DMA_SHARED_BUFFER) */
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+#include <mali_kbase.h>
+#include <linux/random.h>
+#include <linux/version.h>
+#include <linux/ratelimit.h>
+
+#include <mali_kbase_jm.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_tlstream.h>
+
+#include "mali_kbase_dma_fence.h"
+
+#define beenthere(kctx, f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+/* random32 was renamed to prandom_u32 in 3.8 */
+#define prandom_u32 random32
+#endif
+
+/* Return whether katom will run on the GPU or not. Currently only soft jobs and
+ * dependency-only atoms do not run on the GPU */
+#define IS_GPU_ATOM(katom) (!((katom->core_req & BASE_JD_REQ_SOFT_JOB) ||  \
+			((katom->core_req & BASE_JD_REQ_ATOM_TYPE) ==    \
+							BASE_JD_REQ_DEP)))
+/*
+ * This is the kernel side of the API. Only entry points are:
+ * - kbase_jd_submit(): Called from userspace to submit a single bag
+ * - kbase_jd_done(): Called from interrupt context to track the
+ *   completion of a job.
+ * Callouts:
+ * - to the job manager (enqueue a job)
+ * - to the event subsystem (signals the completion/failure of bag/job-chains).
+ */
+
+static void __user *
+get_compat_pointer(struct kbase_context *kctx, const u64 p)
+{
+#ifdef CONFIG_COMPAT
+	if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+		return compat_ptr(p);
+#endif
+	return u64_to_user_ptr(p);
+}
+
+/* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs
+ *
+ * Returns whether the JS needs a reschedule.
+ *
+ * Note that the caller must also check the atom status and
+ * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock
+ */
+static int jd_run_atom(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+	if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
+		/* Dependency only atom */
+		katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+		return 0;
+	} else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+		/* Soft-job */
+		if (katom->will_fail_event_code) {
+			kbase_finish_soft_job(katom);
+			katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+			return 0;
+		}
+		if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+						  == BASE_JD_REQ_SOFT_REPLAY) {
+			if (!kbase_replay_process(katom))
+				katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+		} else if (kbase_process_soft_job(katom) == 0) {
+			kbase_finish_soft_job(katom);
+			katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+		}
+		return 0;
+	}
+
+	katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+	/* Queue an action about whether we should try scheduling a context */
+	return kbasep_js_add_job(kctx, katom);
+}
+
+#if defined(CONFIG_MALI_DMA_FENCE)
+void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
+{
+	struct kbase_device *kbdev;
+
+	KBASE_DEBUG_ASSERT(katom);
+	kbdev = katom->kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Check whether the atom's other dependencies were already met. If
+	 * katom is a GPU atom then the job scheduler may be able to represent
+	 * the dependencies, hence we may attempt to submit it before they are
+	 * met. Other atoms must have had both dependencies resolved.
+	 */
+	if (IS_GPU_ATOM(katom) ||
+			(!kbase_jd_katom_dep_atom(&katom->dep[0]) &&
+			!kbase_jd_katom_dep_atom(&katom->dep[1]))) {
+		/* katom dep complete, attempt to run it */
+		bool resched = false;
+
+		resched = jd_run_atom(katom);
+
+		if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+			/* The atom has already finished */
+			resched |= jd_done_nolock(katom, NULL);
+		}
+
+		if (resched)
+			kbase_js_sched_all(kbdev);
+	}
+}
+#endif
+
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
+{
+#ifdef CONFIG_MALI_DMA_FENCE
+	/* Flush dma-fence workqueue to ensure that any callbacks that may have
+	 * been queued are done before continuing.
+	 * Any successfully completed atom would have had all it's callbacks
+	 * completed before the atom was run, so only flush for failed atoms.
+	 */
+	if (katom->event_code != BASE_JD_EVENT_DONE)
+		flush_workqueue(katom->kctx->dma_fence.wq);
+#endif /* CONFIG_MALI_DMA_FENCE */
+}
+
+static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
+{
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	kbase_dma_fence_signal(katom);
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	kbase_gpu_vm_lock(katom->kctx);
+	/* only roll back if extres is non-NULL */
+	if (katom->extres) {
+		u32 res_no;
+
+		res_no = katom->nr_extres;
+		while (res_no-- > 0) {
+			struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+			struct kbase_va_region *reg;
+
+			reg = kbase_region_tracker_find_region_base_address(
+					katom->kctx,
+					katom->extres[res_no].gpu_address);
+			kbase_unmap_external_resource(katom->kctx, reg, alloc);
+		}
+		kfree(katom->extres);
+		katom->extres = NULL;
+	}
+	kbase_gpu_vm_unlock(katom->kctx);
+}
+
+/*
+ * Set up external resources needed by this job.
+ *
+ * jctx.lock must be held when this is called.
+ */
+
+static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom_v2 *user_atom)
+{
+	int err_ret_val = -EINVAL;
+	u32 res_no;
+#ifdef CONFIG_MALI_DMA_FENCE
+	struct kbase_dma_fence_resv_info info = {
+		.dma_fence_resv_count = 0,
+	};
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	/*
+	 * When both dma-buf fence and Android native sync is enabled, we
+	 * disable dma-buf fence for contexts that are using Android native
+	 * fences.
+	 */
+	const bool implicit_sync = !kbase_ctx_flag(katom->kctx,
+						   KCTX_NO_IMPLICIT_SYNC);
+#else /* CONFIG_SYNC || CONFIG_SYNC_FILE*/
+	const bool implicit_sync = true;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+#endif /* CONFIG_MALI_DMA_FENCE */
+	struct base_external_resource *input_extres;
+
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
+
+	/* no resources encoded, early out */
+	if (!katom->nr_extres)
+		return -EINVAL;
+
+	katom->extres = kmalloc_array(katom->nr_extres, sizeof(*katom->extres), GFP_KERNEL);
+	if (NULL == katom->extres) {
+		err_ret_val = -ENOMEM;
+		goto early_err_out;
+	}
+
+	/* copy user buffer to the end of our real buffer.
+	 * Make sure the struct sizes haven't changed in a way
+	 * we don't support */
+	BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres));
+	input_extres = (struct base_external_resource *)
+			(((unsigned char *)katom->extres) +
+			(sizeof(*katom->extres) - sizeof(*input_extres)) *
+			katom->nr_extres);
+
+	if (copy_from_user(input_extres,
+			get_compat_pointer(katom->kctx, user_atom->extres_list),
+			sizeof(*input_extres) * katom->nr_extres) != 0) {
+		err_ret_val = -EINVAL;
+		goto early_err_out;
+	}
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (implicit_sync) {
+		info.resv_objs = kmalloc_array(katom->nr_extres,
+					sizeof(struct dma_resv *),
+					GFP_KERNEL);
+		if (!info.resv_objs) {
+			err_ret_val = -ENOMEM;
+			goto early_err_out;
+		}
+
+		info.dma_fence_excl_bitmap =
+				kcalloc(BITS_TO_LONGS(katom->nr_extres),
+					sizeof(unsigned long), GFP_KERNEL);
+		if (!info.dma_fence_excl_bitmap) {
+			err_ret_val = -ENOMEM;
+			goto early_err_out;
+		}
+	}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	/* Take the processes mmap lock */
+	mmap_read_lock(current->mm);
+
+	/* need to keep the GPU VM locked while we set up UMM buffers */
+	kbase_gpu_vm_lock(katom->kctx);
+	for (res_no = 0; res_no < katom->nr_extres; res_no++) {
+		struct base_external_resource *res;
+		struct kbase_va_region *reg;
+		struct kbase_mem_phy_alloc *alloc;
+		bool exclusive;
+
+		res = &input_extres[res_no];
+		exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE)
+				? true : false;
+		reg = kbase_region_tracker_find_region_enclosing_address(
+				katom->kctx,
+				res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+		/* did we find a matching region object? */
+		if (NULL == reg || (reg->flags & KBASE_REG_FREE)) {
+			/* roll back */
+			goto failed_loop;
+		}
+
+		if (!(katom->core_req & BASE_JD_REQ_SOFT_JOB) &&
+				(reg->flags & KBASE_REG_SECURE)) {
+			katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED;
+		}
+
+		alloc = kbase_map_external_resource(katom->kctx, reg,
+				current->mm);
+		if (!alloc) {
+			err_ret_val = -EINVAL;
+			goto failed_loop;
+		}
+
+#ifdef CONFIG_MALI_DMA_FENCE
+		if (implicit_sync &&
+		    reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+			struct dma_resv *resv;
+
+			resv = reg->gpu_alloc->imported.umm.dma_buf->resv;
+			if (resv)
+				kbase_dma_fence_add_reservation(resv, &info,
+								exclusive);
+		}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+		/* finish with updating out array with the data we found */
+		/* NOTE: It is important that this is the last thing we do (or
+		 * at least not before the first write) as we overwrite elements
+		 * as we loop and could be overwriting ourself, so no writes
+		 * until the last read for an element.
+		 * */
+		katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */
+		katom->extres[res_no].alloc = alloc;
+	}
+	/* successfully parsed the extres array */
+	/* drop the vm lock now */
+	kbase_gpu_vm_unlock(katom->kctx);
+
+	/* Release the processes mmap lock */
+	mmap_read_unlock(current->mm);
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (implicit_sync) {
+		if (info.dma_fence_resv_count) {
+			int ret;
+
+			ret = kbase_dma_fence_wait(katom, &info);
+			if (ret < 0)
+				goto failed_dma_fence_setup;
+		}
+
+		kfree(info.resv_objs);
+		kfree(info.dma_fence_excl_bitmap);
+	}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	/* all done OK */
+	return 0;
+
+/* error handling section */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+failed_dma_fence_setup:
+	/* Lock the processes mmap lock */
+	mmap_read_lock(current->mm);
+
+	/* lock before we unmap */
+	kbase_gpu_vm_lock(katom->kctx);
+#endif
+
+ failed_loop:
+	/* undo the loop work */
+	while (res_no-- > 0) {
+		struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+
+		kbase_unmap_external_resource(katom->kctx, NULL, alloc);
+	}
+	kbase_gpu_vm_unlock(katom->kctx);
+
+	/* Release the processes mmap lock */
+	mmap_read_unlock(current->mm);
+
+ early_err_out:
+	kfree(katom->extres);
+	katom->extres = NULL;
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (implicit_sync) {
+		kfree(info.resv_objs);
+		kfree(info.dma_fence_excl_bitmap);
+	}
+#endif
+	return err_ret_val;
+}
+
+static inline void jd_resolve_dep(struct list_head *out_list,
+					struct kbase_jd_atom *katom,
+					u8 d, bool ctx_is_dying)
+{
+	u8 other_d = !d;
+
+	while (!list_empty(&katom->dep_head[d])) {
+		struct kbase_jd_atom *dep_atom;
+		struct kbase_jd_atom *other_dep_atom;
+		u8 dep_type;
+
+		dep_atom = list_entry(katom->dep_head[d].next,
+				struct kbase_jd_atom, dep_item[d]);
+		list_del(katom->dep_head[d].next);
+
+		dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]);
+		kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
+
+		if (katom->event_code != BASE_JD_EVENT_DONE &&
+			(dep_type != BASE_JD_DEP_TYPE_ORDER)) {
+#ifdef CONFIG_MALI_DMA_FENCE
+			kbase_dma_fence_cancel_callbacks(dep_atom);
+#endif
+
+			dep_atom->event_code = katom->event_code;
+			KBASE_DEBUG_ASSERT(dep_atom->status !=
+						KBASE_JD_ATOM_STATE_UNUSED);
+
+			if ((dep_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+					!= BASE_JD_REQ_SOFT_REPLAY) {
+				dep_atom->will_fail_event_code =
+					dep_atom->event_code;
+			} else {
+				dep_atom->status =
+					KBASE_JD_ATOM_STATE_COMPLETED;
+			}
+		}
+		other_dep_atom = (struct kbase_jd_atom *)
+			kbase_jd_katom_dep_atom(&dep_atom->dep[other_d]);
+
+		if (!dep_atom->in_jd_list && (!other_dep_atom ||
+				(IS_GPU_ATOM(dep_atom) && !ctx_is_dying &&
+				!dep_atom->will_fail_event_code &&
+				!other_dep_atom->will_fail_event_code))) {
+			bool dep_satisfied = true;
+#ifdef CONFIG_MALI_DMA_FENCE
+			int dep_count;
+
+			dep_count = kbase_fence_dep_count_read(dep_atom);
+			if (likely(dep_count == -1)) {
+				dep_satisfied = true;
+			} else {
+				/*
+				 * There are either still active callbacks, or
+				 * all fences for this @dep_atom has signaled,
+				 * but the worker that will queue the atom has
+				 * not yet run.
+				 *
+				 * Wait for the fences to signal and the fence
+				 * worker to run and handle @dep_atom. If
+				 * @dep_atom was completed due to error on
+				 * @katom, then the fence worker will pick up
+				 * the complete status and error code set on
+				 * @dep_atom above.
+				 */
+				dep_satisfied = false;
+			}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+			if (dep_satisfied) {
+				dep_atom->in_jd_list = true;
+				list_add_tail(&dep_atom->jd_item, out_list);
+			}
+		}
+	}
+}
+
+KBASE_EXPORT_TEST_API(jd_resolve_dep);
+
+#if MALI_CUSTOMER_RELEASE == 0
+static void jd_force_failure(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
+{
+	kbdev->force_replay_count++;
+
+	if (kbdev->force_replay_count >= kbdev->force_replay_limit) {
+		kbdev->force_replay_count = 0;
+		katom->event_code = BASE_JD_EVENT_FORCE_REPLAY;
+
+		if (kbdev->force_replay_random)
+			kbdev->force_replay_limit =
+			   (prandom_u32() % KBASEP_FORCE_REPLAY_RANDOM_LIMIT) + 1;
+
+		dev_info(kbdev->dev, "force_replay : promoting to error\n");
+	}
+}
+
+/** Test to see if atom should be forced to fail.
+ *
+ * This function will check if an atom has a replay job as a dependent. If so
+ * then it will be considered for forced failure. */
+static void jd_check_force_failure(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	int i;
+
+	if ((kbdev->force_replay_limit == KBASEP_FORCE_REPLAY_DISABLED) ||
+	    (katom->core_req & BASEP_JD_REQ_EVENT_NEVER))
+		return;
+
+	for (i = 1; i < BASE_JD_ATOM_COUNT; i++) {
+		if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom ||
+		    kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) {
+			struct kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
+
+			if ((dep_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
+						     BASE_JD_REQ_SOFT_REPLAY &&
+			    (dep_atom->core_req & kbdev->force_replay_core_req)
+					     == kbdev->force_replay_core_req) {
+				jd_force_failure(kbdev, katom);
+				return;
+			}
+		}
+	}
+}
+#endif
+
+/**
+ * is_dep_valid - Validate that a dependency is valid for early dependency
+ *                submission
+ * @katom: Dependency atom to validate
+ *
+ * A dependency is valid if any of the following are true :
+ * - It does not exist (a non-existent dependency does not block submission)
+ * - It is in the job scheduler
+ * - It has completed, does not have a failure event code, and has not been
+ *   marked to fail in the future
+ *
+ * Return: true if valid, false otherwise
+ */
+static bool is_dep_valid(struct kbase_jd_atom *katom)
+{
+	/* If there's no dependency then this is 'valid' from the perspective of
+	 * early dependency submission */
+	if (!katom)
+		return true;
+
+	/* Dependency must have reached the job scheduler */
+	if (katom->status < KBASE_JD_ATOM_STATE_IN_JS)
+		return false;
+
+	/* If dependency has completed and has failed or will fail then it is
+	 * not valid */
+	if (katom->status >= KBASE_JD_ATOM_STATE_HW_COMPLETED &&
+			(katom->event_code != BASE_JD_EVENT_DONE ||
+			katom->will_fail_event_code))
+		return false;
+
+	return true;
+}
+
+static void jd_try_submitting_deps(struct list_head *out_list,
+		struct kbase_jd_atom *node)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct list_head *pos;
+
+		list_for_each(pos, &node->dep_head[i]) {
+			struct kbase_jd_atom *dep_atom = list_entry(pos,
+					struct kbase_jd_atom, dep_item[i]);
+
+			if (IS_GPU_ATOM(dep_atom) && !dep_atom->in_jd_list) {
+				/*Check if atom deps look sane*/
+				bool dep0_valid = is_dep_valid(
+						dep_atom->dep[0].atom);
+				bool dep1_valid = is_dep_valid(
+						dep_atom->dep[1].atom);
+				bool dep_satisfied = true;
+#ifdef CONFIG_MALI_DMA_FENCE
+				int dep_count;
+
+				dep_count = kbase_fence_dep_count_read(
+								dep_atom);
+				if (likely(dep_count == -1)) {
+					dep_satisfied = true;
+				} else {
+				/*
+				 * There are either still active callbacks, or
+				 * all fences for this @dep_atom has signaled,
+				 * but the worker that will queue the atom has
+				 * not yet run.
+				 *
+				 * Wait for the fences to signal and the fence
+				 * worker to run and handle @dep_atom. If
+				 * @dep_atom was completed due to error on
+				 * @katom, then the fence worker will pick up
+				 * the complete status and error code set on
+				 * @dep_atom above.
+				 */
+					dep_satisfied = false;
+				}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+				if (dep0_valid && dep1_valid && dep_satisfied) {
+					dep_atom->in_jd_list = true;
+					list_add(&dep_atom->jd_item, out_list);
+				}
+			}
+		}
+	}
+}
+
+/*
+ * Perform the necessary handling of an atom that has finished running
+ * on the GPU.
+ *
+ * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller
+ * is responsible for calling kbase_finish_soft_job *before* calling this function.
+ *
+ * The caller must hold the kbase_jd_context.lock.
+ */
+bool jd_done_nolock(struct kbase_jd_atom *katom,
+		struct list_head *completed_jobs_ctx)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct list_head completed_jobs;
+	struct list_head runnable_jobs;
+	bool need_to_try_schedule_context = false;
+	int i;
+
+	INIT_LIST_HEAD(&completed_jobs);
+	INIT_LIST_HEAD(&runnable_jobs);
+
+	KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+#if MALI_CUSTOMER_RELEASE == 0
+	jd_check_force_failure(katom);
+#endif
+
+	/* This is needed in case an atom is failed due to being invalid, this
+	 * can happen *before* the jobs that the atom depends on have completed */
+	for (i = 0; i < 2; i++) {
+		if (kbase_jd_katom_dep_atom(&katom->dep[i])) {
+			list_del(&katom->dep_item[i]);
+			kbase_jd_katom_dep_clear(&katom->dep[i]);
+		}
+	}
+
+	/* With PRLAM-10817 or PRLAM-10959 the last tile of a fragment job being soft-stopped can fail with
+	 * BASE_JD_EVENT_TILE_RANGE_FAULT.
+	 *
+	 * So here if the fragment job failed with TILE_RANGE_FAULT and it has been soft-stopped, then we promote the
+	 * error code to BASE_JD_EVENT_DONE
+	 */
+
+	if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) || kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10959)) &&
+		  katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT) {
+		if ((katom->core_req & BASE_JD_REQ_FS) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED)) {
+			/* Promote the failure to job done */
+			katom->event_code = BASE_JD_EVENT_DONE;
+			katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED);
+		}
+	}
+
+	katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+	list_add_tail(&katom->jd_item, &completed_jobs);
+
+	while (!list_empty(&completed_jobs)) {
+		katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, jd_item);
+		list_del(completed_jobs.prev);
+		KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
+
+		for (i = 0; i < 2; i++)
+			jd_resolve_dep(&runnable_jobs, katom, i,
+					kbase_ctx_flag(kctx, KCTX_DYING));
+
+		if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
+			kbase_jd_post_external_resources(katom);
+
+		while (!list_empty(&runnable_jobs)) {
+			struct kbase_jd_atom *node;
+
+			node = list_entry(runnable_jobs.next,
+					struct kbase_jd_atom, jd_item);
+			list_del(runnable_jobs.next);
+			node->in_jd_list = false;
+
+			KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
+
+			if (node->status != KBASE_JD_ATOM_STATE_COMPLETED &&
+					!kbase_ctx_flag(kctx, KCTX_DYING)) {
+				need_to_try_schedule_context |= jd_run_atom(node);
+			} else {
+				node->event_code = katom->event_code;
+
+				if ((node->core_req &
+					BASE_JD_REQ_SOFT_JOB_TYPE) ==
+					BASE_JD_REQ_SOFT_REPLAY) {
+					if (kbase_replay_process(node))
+						/* Don't complete this atom */
+						continue;
+				} else if (node->core_req &
+							BASE_JD_REQ_SOFT_JOB) {
+					WARN_ON(!list_empty(&node->queue));
+					kbase_finish_soft_job(node);
+				}
+				node->status = KBASE_JD_ATOM_STATE_COMPLETED;
+			}
+
+			if (node->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+				list_add_tail(&node->jd_item, &completed_jobs);
+			} else if (node->status == KBASE_JD_ATOM_STATE_IN_JS &&
+					!node->will_fail_event_code) {
+				/* Node successfully submitted, try submitting
+				 * dependencies as they may now be representable
+				 * in JS */
+				jd_try_submitting_deps(&runnable_jobs, node);
+			}
+		}
+
+		/* Register a completed job as a disjoint event when the GPU
+		 * is in a disjoint state (ie. being reset or replaying jobs).
+		 */
+		kbase_disjoint_event_potential(kctx->kbdev);
+		if (completed_jobs_ctx)
+			list_add_tail(&katom->jd_item, completed_jobs_ctx);
+		else
+			kbase_event_post(kctx, katom);
+
+		/* Decrement and check the TOTAL number of jobs. This includes
+		 * those not tracked by the scheduler: 'not ready to run' and
+		 * 'dependency-only' jobs. */
+		if (--kctx->jctx.job_nr == 0)
+			wake_up(&kctx->jctx.zero_jobs_wait);	/* All events are safely queued now, and we can signal any waiter
+								 * that we've got no more jobs (so we can be safely terminated) */
+	}
+
+	return need_to_try_schedule_context;
+}
+
+KBASE_EXPORT_TEST_API(jd_done_nolock);
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+enum {
+	CORE_REQ_DEP_ONLY,
+	CORE_REQ_SOFT,
+	CORE_REQ_COMPUTE,
+	CORE_REQ_FRAGMENT,
+	CORE_REQ_VERTEX,
+	CORE_REQ_TILER,
+	CORE_REQ_FRAGMENT_VERTEX,
+	CORE_REQ_FRAGMENT_VERTEX_TILER,
+	CORE_REQ_FRAGMENT_TILER,
+	CORE_REQ_VERTEX_TILER,
+	CORE_REQ_UNKNOWN
+};
+static const char * const core_req_strings[] = {
+	"Dependency Only Job",
+	"Soft Job",
+	"Compute Shader Job",
+	"Fragment Shader Job",
+	"Vertex/Geometry Shader Job",
+	"Tiler Job",
+	"Fragment Shader + Vertex/Geometry Shader Job",
+	"Fragment Shader + Vertex/Geometry Shader Job + Tiler Job",
+	"Fragment Shader + Tiler Job",
+	"Vertex/Geometry Shader Job + Tiler Job",
+	"Unknown Job"
+};
+static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req)
+{
+	if (core_req & BASE_JD_REQ_SOFT_JOB)
+		return core_req_strings[CORE_REQ_SOFT];
+	if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+		return core_req_strings[CORE_REQ_COMPUTE];
+	switch (core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) {
+	case BASE_JD_REQ_DEP:
+		return core_req_strings[CORE_REQ_DEP_ONLY];
+	case BASE_JD_REQ_FS:
+		return core_req_strings[CORE_REQ_FRAGMENT];
+	case BASE_JD_REQ_CS:
+		return core_req_strings[CORE_REQ_VERTEX];
+	case BASE_JD_REQ_T:
+		return core_req_strings[CORE_REQ_TILER];
+	case (BASE_JD_REQ_FS | BASE_JD_REQ_CS):
+		return core_req_strings[CORE_REQ_FRAGMENT_VERTEX];
+	case (BASE_JD_REQ_FS | BASE_JD_REQ_T):
+		return core_req_strings[CORE_REQ_FRAGMENT_TILER];
+	case (BASE_JD_REQ_CS | BASE_JD_REQ_T):
+		return core_req_strings[CORE_REQ_VERTEX_TILER];
+	case (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T):
+		return core_req_strings[CORE_REQ_FRAGMENT_VERTEX_TILER];
+	}
+	return core_req_strings[CORE_REQ_UNKNOWN];
+}
+#endif
+
+bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *user_atom, struct kbase_jd_atom *katom)
+{
+	struct kbase_jd_context *jctx = &kctx->jctx;
+	int queued = 0;
+	int i;
+	int sched_prio;
+	bool ret;
+	bool will_fail = false;
+
+	/* Update the TOTAL number of jobs. This includes those not tracked by
+	 * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
+	jctx->job_nr++;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+	katom->start_timestamp.tv64 = 0;
+#else
+	katom->start_timestamp = 0;
+#endif
+	katom->udata = user_atom->udata;
+	katom->kctx = kctx;
+	katom->nr_extres = user_atom->nr_extres;
+	katom->extres = NULL;
+	katom->device_nr = user_atom->device_nr;
+	katom->jc = user_atom->jc;
+	katom->core_req = user_atom->core_req;
+	katom->atom_flags = 0;
+	katom->retry_count = 0;
+	katom->need_cache_flush_cores_retained = 0;
+	katom->pre_dep = NULL;
+	katom->post_dep = NULL;
+	katom->x_pre_dep = NULL;
+	katom->x_post_dep = NULL;
+	katom->will_fail_event_code = BASE_JD_EVENT_NOT_STARTED;
+	katom->softjob_data = NULL;
+
+	/* Implicitly sets katom->protected_state.enter as well. */
+	katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+
+	katom->age = kctx->age_count++;
+
+	INIT_LIST_HEAD(&katom->queue);
+	INIT_LIST_HEAD(&katom->jd_item);
+#ifdef CONFIG_MALI_DMA_FENCE
+	kbase_fence_dep_count_set(katom, -1);
+#endif
+
+	/* Don't do anything if there is a mess up with dependencies.
+	   This is done in a separate cycle to check both the dependencies at ones, otherwise
+	   it will be extra complexity to deal with 1st dependency ( just added to the list )
+	   if only the 2nd one has invalid config.
+	 */
+	for (i = 0; i < 2; i++) {
+		int dep_atom_number = user_atom->pre_dep[i].atom_id;
+		base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
+
+		if (dep_atom_number) {
+			if (dep_atom_type != BASE_JD_DEP_TYPE_ORDER &&
+					dep_atom_type != BASE_JD_DEP_TYPE_DATA) {
+				katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
+				katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+
+				/* Wrong dependency setup. Atom will be sent
+				 * back to user space. Do not record any
+				 * dependencies. */
+				KBASE_TLSTREAM_TL_NEW_ATOM(
+						katom,
+						kbase_jd_atom_id(kctx, katom));
+				KBASE_TLSTREAM_TL_RET_ATOM_CTX(
+						katom, kctx);
+				KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom,
+						TL_ATOM_STATE_IDLE);
+
+				ret = jd_done_nolock(katom, NULL);
+				goto out;
+			}
+		}
+	}
+
+	/* Add dependencies */
+	for (i = 0; i < 2; i++) {
+		int dep_atom_number = user_atom->pre_dep[i].atom_id;
+		base_jd_dep_type dep_atom_type;
+		struct kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
+
+		dep_atom_type = user_atom->pre_dep[i].dependency_type;
+		kbase_jd_katom_dep_clear(&katom->dep[i]);
+
+		if (!dep_atom_number)
+			continue;
+
+		if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED ||
+				dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
+
+			if (dep_atom->event_code == BASE_JD_EVENT_DONE)
+				continue;
+			/* don't stop this atom if it has an order dependency
+			 * only to the failed one, try to submit it through
+			 * the normal path
+			 */
+			if (dep_atom_type == BASE_JD_DEP_TYPE_ORDER &&
+					dep_atom->event_code > BASE_JD_EVENT_ACTIVE) {
+				continue;
+			}
+
+			/* Atom has completed, propagate the error code if any */
+			katom->event_code = dep_atom->event_code;
+			katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+
+			/* This atom is going through soft replay or
+			 * will be sent back to user space. Do not record any
+			 * dependencies. */
+			KBASE_TLSTREAM_TL_NEW_ATOM(
+					katom,
+					kbase_jd_atom_id(kctx, katom));
+			KBASE_TLSTREAM_TL_RET_ATOM_CTX(katom, kctx);
+			KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom,
+					TL_ATOM_STATE_IDLE);
+
+			if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+					 == BASE_JD_REQ_SOFT_REPLAY) {
+				if (kbase_replay_process(katom)) {
+					ret = false;
+					goto out;
+				}
+			}
+			will_fail = true;
+
+		} else {
+			/* Atom is in progress, add this atom to the list */
+			list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
+			kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type);
+			queued = 1;
+		}
+	}
+
+	if (will_fail) {
+		if (!queued) {
+			if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+				/* This softjob has failed due to a previous
+				 * dependency, however we should still run the
+				 * prepare & finish functions
+				 */
+				int err = kbase_prepare_soft_job(katom);
+
+				if (err >= 0)
+					kbase_finish_soft_job(katom);
+			}
+
+			ret = jd_done_nolock(katom, NULL);
+
+			goto out;
+		} else {
+
+			if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+				/* This softjob has failed due to a previous
+				 * dependency, however we should still run the
+				 * prepare & finish functions
+				 */
+				if (kbase_prepare_soft_job(katom) != 0) {
+					katom->event_code =
+						BASE_JD_EVENT_JOB_INVALID;
+					ret = jd_done_nolock(katom, NULL);
+					goto out;
+				}
+			}
+
+			katom->will_fail_event_code = katom->event_code;
+			ret = false;
+
+			goto out;
+		}
+	} else {
+		/* These must occur after the above loop to ensure that an atom
+		 * that depends on a previous atom with the same number behaves
+		 * as expected */
+		katom->event_code = BASE_JD_EVENT_DONE;
+		katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+	}
+
+	/* For invalid priority, be most lenient and choose the default */
+	sched_prio = kbasep_js_atom_prio_to_sched_prio(user_atom->prio);
+	if (sched_prio == KBASE_JS_ATOM_SCHED_PRIO_INVALID)
+		sched_prio = KBASE_JS_ATOM_SCHED_PRIO_DEFAULT;
+	katom->sched_priority = sched_prio;
+
+	/* Create a new atom. */
+	KBASE_TLSTREAM_TL_NEW_ATOM(
+			katom,
+			kbase_jd_atom_id(kctx, katom));
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom, TL_ATOM_STATE_IDLE);
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY(katom, katom->sched_priority);
+	KBASE_TLSTREAM_TL_RET_ATOM_CTX(katom, kctx);
+
+	/* Reject atoms with job chain = NULL, as these cause issues with soft-stop */
+	if (!katom->jc && (katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+		dev_warn(kctx->kbdev->dev, "Rejecting atom with jc = NULL");
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		ret = jd_done_nolock(katom, NULL);
+		goto out;
+	}
+
+	/* Reject atoms with an invalid device_nr */
+	if ((katom->core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) &&
+	    (katom->device_nr >= kctx->kbdev->gpu_props.num_core_groups)) {
+		dev_warn(kctx->kbdev->dev,
+				"Rejecting atom with invalid device_nr %d",
+				katom->device_nr);
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		ret = jd_done_nolock(katom, NULL);
+		goto out;
+	}
+
+	/* Reject atoms with invalid core requirements */
+	if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) &&
+			(katom->core_req & BASE_JD_REQ_EVENT_COALESCE)) {
+		dev_warn(kctx->kbdev->dev,
+				"Rejecting atom with invalid core requirements");
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		katom->core_req &= ~BASE_JD_REQ_EVENT_COALESCE;
+		ret = jd_done_nolock(katom, NULL);
+		goto out;
+	}
+
+	/* Reject soft-job atom of certain types from accessing external resources */
+	if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) &&
+			(((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_FENCE_WAIT) ||
+			 ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_JIT_ALLOC) ||
+			 ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_JIT_FREE))) {
+		dev_warn(kctx->kbdev->dev,
+				"Rejecting soft-job atom accessing external resources");
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		ret = jd_done_nolock(katom, NULL);
+		goto out;
+	}
+
+	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+		/* handle what we need to do to access the external resources */
+		if (kbase_jd_pre_external_resources(katom, user_atom) != 0) {
+			/* setup failed (no access, bad resource, unknown resource types, etc.) */
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			ret = jd_done_nolock(katom, NULL);
+			goto out;
+		}
+	}
+
+	/* Validate the atom. Function will return error if the atom is
+	 * malformed.
+	 *
+	 * Soft-jobs never enter the job scheduler but have their own initialize method.
+	 *
+	 * If either fail then we immediately complete the atom with an error.
+	 */
+	if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) {
+		if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) {
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			ret = jd_done_nolock(katom, NULL);
+			goto out;
+		}
+	} else {
+		/* Soft-job */
+		if (kbase_prepare_soft_job(katom) != 0) {
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			ret = jd_done_nolock(katom, NULL);
+			goto out;
+		}
+	}
+
+#ifdef CONFIG_GPU_TRACEPOINTS
+	katom->work_id = atomic_inc_return(&jctx->work_id);
+	trace_gpu_job_enqueue(kctx->id, katom->work_id,
+			kbasep_map_core_reqs_to_string(katom->core_req));
+#endif
+
+	if (queued && !IS_GPU_ATOM(katom)) {
+		ret = false;
+		goto out;
+	}
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (kbase_fence_dep_count_read(katom) != -1) {
+		ret = false;
+		goto out;
+	}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+	if ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+						  == BASE_JD_REQ_SOFT_REPLAY) {
+		if (kbase_replay_process(katom))
+			ret = false;
+		else
+			ret = jd_done_nolock(katom, NULL);
+
+		goto out;
+	} else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+		if (kbase_process_soft_job(katom) == 0) {
+			kbase_finish_soft_job(katom);
+			ret = jd_done_nolock(katom, NULL);
+			goto out;
+		}
+
+		ret = false;
+	} else if ((katom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_DEP) {
+		katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+		ret = kbasep_js_add_job(kctx, katom);
+		/* If job was cancelled then resolve immediately */
+		if (katom->event_code == BASE_JD_EVENT_JOB_CANCELLED)
+			ret = jd_done_nolock(katom, NULL);
+	} else {
+		/* This is a pure dependency. Resolve it immediately */
+		ret = jd_done_nolock(katom, NULL);
+	}
+
+ out:
+	return ret;
+}
+
+int kbase_jd_submit(struct kbase_context *kctx,
+		void __user *user_addr, u32 nr_atoms, u32 stride,
+		bool uk6_atom)
+{
+	struct kbase_jd_context *jctx = &kctx->jctx;
+	int err = 0;
+	int i;
+	bool need_to_try_schedule_context = false;
+	struct kbase_device *kbdev;
+	u32 latest_flush;
+
+	/*
+	 * kbase_jd_submit isn't expected to fail and so all errors with the
+	 * jobs are reported by immediately failing them (through event system)
+	 */
+	kbdev = kctx->kbdev;
+
+	beenthere(kctx, "%s", "Enter");
+
+	if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+		dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it");
+		return -EINVAL;
+	}
+
+	if (stride != sizeof(base_jd_atom_v2)) {
+		dev_err(kbdev->dev, "Stride passed to job_submit doesn't match kernel");
+		return -EINVAL;
+	}
+
+	/* All atoms submitted in this call have the same flush ID */
+	latest_flush = kbase_backend_get_current_flush_id(kbdev);
+
+	for (i = 0; i < nr_atoms; i++) {
+		struct base_jd_atom_v2 user_atom;
+		struct kbase_jd_atom *katom;
+
+		if (copy_from_user(&user_atom, user_addr,
+					sizeof(user_atom)) != 0) {
+			err = -EINVAL;
+			break;
+		}
+
+		user_addr = (void __user *)((uintptr_t) user_addr + stride);
+
+		mutex_lock(&jctx->lock);
+#ifndef compiletime_assert
+#define compiletime_assert_defined
+#define compiletime_assert(x, msg) do { switch (0) { case 0: case (x):; } } \
+while (false)
+#endif
+		compiletime_assert((1 << (8*sizeof(user_atom.atom_number))) ==
+					BASE_JD_ATOM_COUNT,
+			"BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
+		compiletime_assert(sizeof(user_atom.pre_dep[0].atom_id) ==
+					sizeof(user_atom.atom_number),
+			"BASE_JD_ATOM_COUNT and base_atom_id type out of sync");
+#ifdef compiletime_assert_defined
+#undef compiletime_assert
+#undef compiletime_assert_defined
+#endif
+		katom = &jctx->atoms[user_atom.atom_number];
+
+		/* Record the flush ID for the cache flush optimisation */
+		katom->flush_id = latest_flush;
+
+		while (katom->status != KBASE_JD_ATOM_STATE_UNUSED) {
+			/* Atom number is already in use, wait for the atom to
+			 * complete
+			 */
+			mutex_unlock(&jctx->lock);
+
+			/* This thread will wait for the atom to complete. Due
+			 * to thread scheduling we are not sure that the other
+			 * thread that owns the atom will also schedule the
+			 * context, so we force the scheduler to be active and
+			 * hence eventually schedule this context at some point
+			 * later.
+			 */
+			kbase_js_sched_all(kbdev);
+
+			if (wait_event_killable(katom->completed,
+					katom->status ==
+					KBASE_JD_ATOM_STATE_UNUSED) != 0) {
+				/* We're being killed so the result code
+				 * doesn't really matter
+				 */
+				return 0;
+			}
+			mutex_lock(&jctx->lock);
+		}
+
+		need_to_try_schedule_context |=
+				       jd_submit_atom(kctx, &user_atom, katom);
+
+		/* Register a completed job as a disjoint event when the GPU is in a disjoint state
+		 * (ie. being reset or replaying jobs).
+		 */
+		kbase_disjoint_event_potential(kbdev);
+
+		mutex_unlock(&jctx->lock);
+	}
+
+	if (need_to_try_schedule_context)
+		kbase_js_sched_all(kbdev);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_submit);
+
+void kbase_jd_done_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+	struct kbase_jd_context *jctx;
+	struct kbase_context *kctx;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	struct kbase_device *kbdev;
+	struct kbasep_js_device_data *js_devdata;
+	u64 cache_jc = katom->jc;
+	struct kbasep_js_atom_retained_state katom_retained_state;
+	bool context_idle;
+	base_jd_core_req core_req = katom->core_req;
+
+	/* Soft jobs should never reach this function */
+	KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+	kctx = katom->kctx;
+	jctx = &kctx->jctx;
+	kbdev = kctx->kbdev;
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+
+	KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0);
+
+	kbase_backend_complete_wq(kbdev, katom);
+
+	/*
+	 * Begin transaction on JD context and JS context
+	 */
+	mutex_lock(&jctx->lock);
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom, TL_ATOM_STATE_DONE);
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/* This worker only gets called on contexts that are scheduled *in*. This is
+	 * because it only happens in response to an IRQ from a job that was
+	 * running.
+	 */
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	if (katom->event_code == BASE_JD_EVENT_STOPPED) {
+		/* Atom has been promoted to stopped */
+		unsigned long flags;
+
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+		katom->status = KBASE_JD_ATOM_STATE_IN_JS;
+		kbase_js_unpull(kctx, katom);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&jctx->lock);
+
+		return;
+	}
+
+	if ((katom->event_code != BASE_JD_EVENT_DONE) &&
+			(!kbase_ctx_flag(katom->kctx, KCTX_DYING)))
+		dev_err(kbdev->dev,
+			"t6xx: GPU fault 0x%02lx from job slot %d\n",
+					(unsigned long)katom->event_code,
+								katom->slot_nr);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+		kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
+
+	/* Retain state before the katom disappears */
+	kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+
+	context_idle = kbase_js_complete_atom_wq(kctx, katom);
+
+	KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state));
+
+	kbasep_js_remove_job(kbdev, kctx, katom);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_unlock(&js_devdata->queue_mutex);
+	katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+	/* jd_done_nolock() requires the jsctx_mutex lock to be dropped */
+	jd_done_nolock(katom, &kctx->completed_jobs);
+
+	/* katom may have been freed now, do not use! */
+
+	if (context_idle) {
+		unsigned long flags;
+
+		context_idle = false;
+		mutex_lock(&js_devdata->queue_mutex);
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+		/* If kbase_sched() has scheduled this context back in then
+		 * KCTX_ACTIVE will have been set after we marked it as
+		 * inactive, and another pm reference will have been taken, so
+		 * drop our reference. But do not call kbase_jm_idle_ctx(), as
+		 * the context is active and fast-starting is allowed.
+		 *
+		 * If an atom has been fast-started then kctx->atoms_pulled will
+		 * be non-zero but KCTX_ACTIVE will still be false (as the
+		 * previous pm reference has been inherited). Do NOT drop our
+		 * reference, as it has been re-used, and leave the context as
+		 * active.
+		 *
+		 * If no new atoms have been started then KCTX_ACTIVE will still
+		 * be false and atoms_pulled will be zero, so drop the reference
+		 * and call kbase_jm_idle_ctx().
+		 *
+		 * As the checks are done under both the queue_mutex and
+		 * hwaccess_lock is should be impossible for this to race
+		 * with the scheduler code.
+		 */
+		if (kbase_ctx_flag(kctx, KCTX_ACTIVE) ||
+		    !atomic_read(&kctx->atoms_pulled)) {
+			/* Calling kbase_jm_idle_ctx() here will ensure that
+			 * atoms are not fast-started when we drop the
+			 * hwaccess_lock. This is not performed if
+			 * KCTX_ACTIVE is set as in that case another pm
+			 * reference has been taken and a fast-start would be
+			 * valid.
+			 */
+			if (!kbase_ctx_flag(kctx, KCTX_ACTIVE))
+				kbase_jm_idle_ctx(kbdev, kctx);
+			context_idle = true;
+		} else {
+			kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
+		}
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&js_devdata->queue_mutex);
+	}
+
+	/*
+	 * Transaction complete
+	 */
+	mutex_unlock(&jctx->lock);
+
+	/* Job is now no longer running, so can now safely release the context
+	 * reference, and handle any actions that were logged against the atom's retained state */
+
+	kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx, &katom_retained_state);
+
+	kbase_js_sched_all(kbdev);
+
+	if (!atomic_dec_return(&kctx->work_count)) {
+		/* If worker now idle then post all events that jd_done_nolock()
+		 * has queued */
+		mutex_lock(&jctx->lock);
+		while (!list_empty(&kctx->completed_jobs)) {
+			struct kbase_jd_atom *atom = list_entry(
+					kctx->completed_jobs.next,
+					struct kbase_jd_atom, jd_item);
+			list_del(kctx->completed_jobs.next);
+
+			kbase_event_post(kctx, atom);
+		}
+		mutex_unlock(&jctx->lock);
+	}
+
+	kbase_backend_complete_wq_post_sched(kbdev, core_req);
+
+	if (context_idle)
+		kbase_pm_context_idle(kbdev);
+
+	KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER_END, kctx, NULL, cache_jc, 0);
+}
+
+/**
+ * jd_cancel_worker - Work queue job cancel function.
+ * @data: a &struct work_struct
+ *
+ * Only called as part of 'Zapping' a context (which occurs on termination).
+ * Operates serially with the kbase_jd_done_worker() on the work queue.
+ *
+ * This can only be called on contexts that aren't scheduled.
+ *
+ * We don't need to release most of the resources that would occur on
+ * kbase_jd_done() or kbase_jd_done_worker(), because the atoms here must not be
+ * running (by virtue of only being called on contexts that aren't
+ * scheduled).
+ */
+static void jd_cancel_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+	struct kbase_jd_context *jctx;
+	struct kbase_context *kctx;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool need_to_try_schedule_context;
+	bool attr_state_changed;
+	struct kbase_device *kbdev;
+
+	/* Soft jobs should never reach this function */
+	KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
+
+	kctx = katom->kctx;
+	kbdev = kctx->kbdev;
+	jctx = &kctx->jctx;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	KBASE_TRACE_ADD(kbdev, JD_CANCEL_WORKER, kctx, katom, katom->jc, 0);
+
+	/* This only gets called on contexts that are scheduled out. Hence, we must
+	 * make sure we don't de-ref the number of running jobs (there aren't
+	 * any), nor must we try to schedule out the context (it's already
+	 * scheduled out).
+	 */
+	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	/* Scheduler: Remove the job from the system */
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	attr_state_changed = kbasep_js_remove_cancelled_job(kbdev, kctx, katom);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	mutex_lock(&jctx->lock);
+
+	need_to_try_schedule_context = jd_done_nolock(katom, NULL);
+	/* Because we're zapping, we're not adding any more jobs to this ctx, so no need to
+	 * schedule the context. There's also no need for the jsctx_mutex to have been taken
+	 * around this too. */
+	KBASE_DEBUG_ASSERT(!need_to_try_schedule_context);
+
+	/* katom may have been freed now, do not use! */
+	mutex_unlock(&jctx->lock);
+
+	if (attr_state_changed)
+		kbase_js_sched_all(kbdev);
+}
+
+/**
+ * kbase_jd_done - Complete a job that has been removed from the Hardware
+ * @katom: atom which has been completed
+ * @slot_nr: slot the atom was on
+ * @end_timestamp: completion time
+ * @done_code: completion code
+ *
+ * This must be used whenever a job has been removed from the Hardware, e.g.:
+ * An IRQ indicates that the job finished (for both error and 'done' codes), or
+ * the job was evicted from the JS_HEAD_NEXT registers during a Soft/Hard stop.
+ *
+ * Some work is carried out immediately, and the rest is deferred onto a
+ * workqueue
+ *
+ * Context:
+ *   This can be called safely from atomic context.
+ *   The caller must hold kbdev->hwaccess_lock
+ */
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
+		ktime_t *end_timestamp, kbasep_js_atom_done_code done_code)
+{
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+
+	KBASE_DEBUG_ASSERT(katom);
+	kctx = katom->kctx;
+	KBASE_DEBUG_ASSERT(kctx);
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT)
+		katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
+
+	KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0);
+
+	kbase_job_check_leave_disjoint(kbdev, katom);
+
+	katom->slot_nr = slot_nr;
+
+	atomic_inc(&kctx->work_count);
+
+#ifdef CONFIG_DEBUG_FS
+	/* a failed job happened and is waiting for dumping*/
+	if (!katom->will_fail_event_code &&
+			kbase_debug_job_fault_process(katom, katom->event_code))
+		return;
+#endif
+
+	WARN_ON(work_pending(&katom->work));
+	INIT_WORK(&katom->work, kbase_jd_done_worker);
+	queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_done);
+
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx;
+
+	KBASE_DEBUG_ASSERT(NULL != kbdev);
+	KBASE_DEBUG_ASSERT(NULL != katom);
+	kctx = katom->kctx;
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+
+	KBASE_TRACE_ADD(kbdev, JD_CANCEL, kctx, katom, katom->jc, 0);
+
+	/* This should only be done from a context that is not scheduled */
+	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	WARN_ON(work_pending(&katom->work));
+
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	INIT_WORK(&katom->work, jd_cancel_worker);
+	queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+
+void kbase_jd_zap_context(struct kbase_context *kctx)
+{
+	struct kbase_jd_atom *katom;
+	struct list_head *entry, *tmp;
+	struct kbase_device *kbdev;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	kbdev = kctx->kbdev;
+
+	KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u);
+
+	kbase_js_zap_context(kctx);
+
+	mutex_lock(&kctx->jctx.lock);
+
+	/*
+	 * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are
+	 * queued outside the job scheduler.
+	 */
+
+	del_timer_sync(&kctx->soft_job_timeout);
+	list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+		katom = list_entry(entry, struct kbase_jd_atom, queue);
+		kbase_cancel_soft_job(katom);
+	}
+
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	kbase_dma_fence_cancel_all_atoms(kctx);
+#endif
+
+	mutex_unlock(&kctx->jctx.lock);
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	/* Flush dma-fence workqueue to ensure that any callbacks that may have
+	 * been queued are done before continuing.
+	 */
+	flush_workqueue(kctx->dma_fence.wq);
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+	kbase_debug_job_fault_kctx_unblock(kctx);
+#endif
+
+	kbase_jm_wait_for_zero_jobs(kctx);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_zap_context);
+
+int kbase_jd_init(struct kbase_context *kctx)
+{
+	int i;
+	int mali_err = 0;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	kctx->jctx.job_done_wq = alloc_workqueue("mali_jd",
+			WQ_HIGHPRI | WQ_UNBOUND, 1);
+	if (NULL == kctx->jctx.job_done_wq) {
+		mali_err = -ENOMEM;
+		goto out1;
+	}
+
+	for (i = 0; i < BASE_JD_ATOM_COUNT; i++) {
+		init_waitqueue_head(&kctx->jctx.atoms[i].completed);
+
+		INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[0]);
+		INIT_LIST_HEAD(&kctx->jctx.atoms[i].dep_head[1]);
+
+		/* Catch userspace attempting to use an atom which doesn't exist as a pre-dependency */
+		kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID;
+		kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED;
+
+#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+		kctx->jctx.atoms[i].dma_fence.context =
+						dma_fence_context_alloc(1);
+		atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0);
+		INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks);
+#endif
+	}
+
+	mutex_init(&kctx->jctx.lock);
+
+	init_waitqueue_head(&kctx->jctx.zero_jobs_wait);
+
+	spin_lock_init(&kctx->jctx.tb_lock);
+
+	kctx->jctx.job_nr = 0;
+	INIT_LIST_HEAD(&kctx->completed_jobs);
+	atomic_set(&kctx->work_count, 0);
+
+	return 0;
+
+ out1:
+	return mali_err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_init);
+
+void kbase_jd_exit(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx);
+
+	/* Work queue is emptied by this */
+	destroy_workqueue(kctx->jctx.job_done_wq);
+}
+
+KBASE_EXPORT_TEST_API(kbase_jd_exit);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c
new file mode 100644
index 0000000..7b15d8a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c
@@ -0,0 +1,240 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+#include <mali_kbase.h>
+#include <mali_kbase_jd_debugfs.h>
+#include <mali_kbase_dma_fence.h>
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <mali_kbase_sync.h>
+#endif
+#include <mali_kbase_ioctl.h>
+
+struct kbase_jd_debugfs_depinfo {
+	u8 id;
+	char type;
+};
+
+static void kbase_jd_debugfs_fence_info(struct kbase_jd_atom *atom,
+					struct seq_file *sfile)
+{
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	struct kbase_sync_fence_info info;
+	int res;
+
+	switch (atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+		res = kbase_sync_fence_out_info_get(atom, &info);
+		if (0 == res) {
+			seq_printf(sfile, "Sa([%p]%d) ",
+				   info.fence, info.status);
+			break;
+		}
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+		res = kbase_sync_fence_in_info_get(atom, &info);
+		if (0 == res) {
+			seq_printf(sfile, "Wa([%p]%d) ",
+				   info.fence, info.status);
+			break;
+		}
+	default:
+		break;
+	}
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+
+#ifdef CONFIG_MALI_DMA_FENCE
+	if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+		struct kbase_fence_cb *cb;
+
+		if (atom->dma_fence.fence) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+			struct fence *fence = atom->dma_fence.fence;
+#else
+			struct dma_fence *fence = atom->dma_fence.fence;
+#endif
+
+			seq_printf(sfile,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+					"Sd(%u#%u: %s) ",
+#else
+					"Sd(%llu#%u: %s) ",
+#endif
+					fence->context,
+					fence->seqno,
+					dma_fence_is_signaled(fence) ?
+						"signaled" : "active");
+		}
+
+		list_for_each_entry(cb, &atom->dma_fence.callbacks,
+				    node) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+			struct fence *fence = cb->fence;
+#else
+			struct dma_fence *fence = cb->fence;
+#endif
+
+			seq_printf(sfile,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+					"Wd(%u#%u: %s) ",
+#else
+					"Wd(%llu#%u: %s) ",
+#endif
+					fence->context,
+					fence->seqno,
+					dma_fence_is_signaled(fence) ?
+						"signaled" : "active");
+		}
+	}
+#endif /* CONFIG_MALI_DMA_FENCE */
+
+}
+
+static void kbasep_jd_debugfs_atom_deps(
+		struct kbase_jd_debugfs_depinfo *deps,
+		struct kbase_jd_atom *atom)
+{
+	struct kbase_context *kctx = atom->kctx;
+	int i;
+
+	for (i = 0; i < 2; i++)	{
+		deps[i].id = (unsigned)(atom->dep[i].atom ?
+				kbase_jd_atom_id(kctx, atom->dep[i].atom) : 0);
+
+		switch (atom->dep[i].dep_type) {
+		case BASE_JD_DEP_TYPE_INVALID:
+			deps[i].type = ' ';
+			break;
+		case BASE_JD_DEP_TYPE_DATA:
+			deps[i].type = 'D';
+			break;
+		case BASE_JD_DEP_TYPE_ORDER:
+			deps[i].type = '>';
+			break;
+		default:
+			deps[i].type = '?';
+			break;
+		}
+	}
+}
+/**
+ * kbasep_jd_debugfs_atoms_show - Show callback for the JD atoms debugfs file.
+ * @sfile: The debugfs entry
+ * @data:  Data associated with the entry
+ *
+ * This function is called to get the contents of the JD atoms debugfs file.
+ * This is a report of all atoms managed by kbase_jd_context.atoms
+ *
+ * Return: 0 if successfully prints data in debugfs entry file, failure
+ * otherwise
+ */
+static int kbasep_jd_debugfs_atoms_show(struct seq_file *sfile, void *data)
+{
+	struct kbase_context *kctx = sfile->private;
+	struct kbase_jd_atom *atoms;
+	unsigned long irq_flags;
+	int i;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	/* Print version */
+	seq_printf(sfile, "v%u\n", MALI_JD_DEBUGFS_VERSION);
+
+	/* Print U/K API version */
+	seq_printf(sfile, "ukv%u.%u\n", BASE_UK_VERSION_MAJOR,
+			BASE_UK_VERSION_MINOR);
+
+	/* Print table heading */
+	seq_puts(sfile, " ID, Core req, St, CR,   Predeps,           Start time, Additional info...\n");
+
+	atoms = kctx->jctx.atoms;
+	/* General atom states */
+	mutex_lock(&kctx->jctx.lock);
+	/* JS-related states */
+	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
+	for (i = 0; i != BASE_JD_ATOM_COUNT; ++i) {
+		struct kbase_jd_atom *atom = &atoms[i];
+		s64 start_timestamp = 0;
+		struct kbase_jd_debugfs_depinfo deps[2];
+
+		if (atom->status == KBASE_JD_ATOM_STATE_UNUSED)
+			continue;
+
+		/* start_timestamp is cleared as soon as the atom leaves UNUSED state
+		 * and set before a job is submitted to the h/w, a non-zero value means
+		 * it is valid */
+		if (ktime_to_ns(atom->start_timestamp))
+			start_timestamp = ktime_to_ns(
+					ktime_sub(ktime_get(), atom->start_timestamp));
+
+		kbasep_jd_debugfs_atom_deps(deps, atom);
+
+		seq_printf(sfile,
+				"%3u, %8x, %2u, %c%3u %c%3u, %20lld, ",
+				i, atom->core_req, atom->status,
+				deps[0].type, deps[0].id,
+				deps[1].type, deps[1].id,
+				start_timestamp);
+
+
+		kbase_jd_debugfs_fence_info(atom, sfile);
+
+		seq_puts(sfile, "\n");
+	}
+	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
+	mutex_unlock(&kctx->jctx.lock);
+
+	return 0;
+}
+
+
+/**
+ * kbasep_jd_debugfs_atoms_open - open operation for atom debugfs file
+ * @in: &struct inode pointer
+ * @file: &struct file pointer
+ *
+ * Return: file descriptor
+ */
+static int kbasep_jd_debugfs_atoms_open(struct inode *in, struct file *file)
+{
+	return single_open(file, kbasep_jd_debugfs_atoms_show, in->i_private);
+}
+
+static const struct file_operations kbasep_jd_debugfs_atoms_fops = {
+	.open = kbasep_jd_debugfs_atoms_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	/* Expose all atoms */
+	debugfs_create_file("atoms", S_IRUGO, kctx->kctx_dentry, kctx,
+			&kbasep_jd_debugfs_atoms_fops);
+
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h
new file mode 100644
index 0000000..697bdef
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_jd_debugfs.h
+ * Header file for job dispatcher-related entries in debugfs
+ */
+
+#ifndef _KBASE_JD_DEBUGFS_H
+#define _KBASE_JD_DEBUGFS_H
+
+#include <linux/debugfs.h>
+
+#define MALI_JD_DEBUGFS_VERSION 3
+
+/* Forward declarations */
+struct kbase_context;
+
+/**
+ * kbasep_jd_debugfs_ctx_init() - Add debugfs entries for JD system
+ *
+ * @kctx Pointer to kbase_context
+ */
+void kbasep_jd_debugfs_ctx_init(struct kbase_context *kctx);
+
+#endif  /*_KBASE_JD_DEBUGFS_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jm.c b/drivers/gpu/arm/midgard/mali_kbase_jm.c
new file mode 100644
index 0000000..da78a16
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jm.c
@@ -0,0 +1,140 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * HW access job manager common APIs
+ */
+
+#include <mali_kbase.h>
+#include "mali_kbase_hwaccess_jm.h"
+#include "mali_kbase_jm.h"
+
+/**
+ * kbase_jm_next_job() - Attempt to run the next @nr_jobs_to_submit jobs on slot
+ *			 @js on the active context.
+ * @kbdev:		Device pointer
+ * @js:			Job slot to run on
+ * @nr_jobs_to_submit:	Number of jobs to attempt to submit
+ *
+ * Return: true if slot can still be submitted on, false if slot is now full.
+ */
+static bool kbase_jm_next_job(struct kbase_device *kbdev, int js,
+				int nr_jobs_to_submit)
+{
+	struct kbase_context *kctx;
+	int i;
+
+	kctx = kbdev->hwaccess.active_kctx[js];
+
+	if (!kctx)
+		return true;
+
+	for (i = 0; i < nr_jobs_to_submit; i++) {
+		struct kbase_jd_atom *katom = kbase_js_pull(kctx, js);
+
+		if (!katom)
+			return true; /* Context has no jobs on this slot */
+
+		kbase_backend_run_atom(kbdev, katom);
+	}
+
+	return false; /* Slot ringbuffer should now be full */
+}
+
+u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask)
+{
+	u32 ret_mask = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	while (js_mask) {
+		int js = ffs(js_mask) - 1;
+		int nr_jobs_to_submit = kbase_backend_slot_free(kbdev, js);
+
+		if (kbase_jm_next_job(kbdev, js, nr_jobs_to_submit))
+			ret_mask |= (1 << js);
+
+		js_mask &= ~(1 << js);
+	}
+
+	return ret_mask;
+}
+
+void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!down_trylock(&js_devdata->schedule_sem)) {
+		kbase_jm_kick(kbdev, js_mask);
+		up(&js_devdata->schedule_sem);
+	}
+}
+
+void kbase_jm_try_kick_all(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!down_trylock(&js_devdata->schedule_sem)) {
+		kbase_jm_kick_all(kbdev);
+		up(&js_devdata->schedule_sem);
+	}
+}
+
+void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	int js;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		if (kbdev->hwaccess.active_kctx[js] == kctx)
+			kbdev->hwaccess.active_kctx[js] = NULL;
+	}
+}
+
+struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (katom->event_code != BASE_JD_EVENT_STOPPED &&
+			katom->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT) {
+		return kbase_js_complete_atom(katom, NULL);
+	} else {
+		kbase_js_unpull(katom->kctx, katom);
+		return NULL;
+	}
+}
+
+struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom, ktime_t *end_timestamp)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	return kbase_js_complete_atom(katom, end_timestamp);
+}
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jm.h b/drivers/gpu/arm/midgard/mali_kbase_jm.h
new file mode 100644
index 0000000..c468ea4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_jm.h
@@ -0,0 +1,115 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+/*
+ * Job manager common APIs
+ */
+
+#ifndef _KBASE_JM_H_
+#define _KBASE_JM_H_
+
+/**
+ * kbase_jm_kick() - Indicate that there are jobs ready to run.
+ * @kbdev:	Device pointer
+ * @js_mask:	Mask of the job slots that can be pulled from.
+ *
+ * Caller must hold the hwaccess_lock and schedule_sem semaphore
+ *
+ * Return: Mask of the job slots that can still be submitted to.
+ */
+u32 kbase_jm_kick(struct kbase_device *kbdev, u32 js_mask);
+
+/**
+ * kbase_jm_kick_all() - Indicate that there are jobs ready to run on all job
+ *			 slots.
+ * @kbdev:	Device pointer
+ *
+ * Caller must hold the hwaccess_lock and schedule_sem semaphore
+ *
+ * Return: Mask of the job slots that can still be submitted to.
+ */
+static inline u32 kbase_jm_kick_all(struct kbase_device *kbdev)
+{
+	return kbase_jm_kick(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
+}
+
+/**
+ * kbase_jm_try_kick - Attempt to call kbase_jm_kick
+ * @kbdev:   Device pointer
+ * @js_mask: Mask of the job slots that can be pulled from
+ * Context: Caller must hold hwaccess_lock
+ *
+ * If schedule_sem can be immediately obtained then this function will call
+ * kbase_jm_kick() otherwise it will do nothing.
+ */
+void kbase_jm_try_kick(struct kbase_device *kbdev, u32 js_mask);
+
+/**
+ * kbase_jm_try_kick_all() - Attempt to call kbase_jm_kick_all
+ * @kbdev:  Device pointer
+ * Context: Caller must hold hwaccess_lock
+ *
+ * If schedule_sem can be immediately obtained then this function will call
+ * kbase_jm_kick_all() otherwise it will do nothing.
+ */
+void kbase_jm_try_kick_all(struct kbase_device *kbdev);
+
+/**
+ * kbase_jm_idle_ctx() - Mark a context as idle.
+ * @kbdev:	Device pointer
+ * @kctx:	Context to mark as idle
+ *
+ * No more atoms will be pulled from this context until it is marked as active
+ * by kbase_js_use_ctx().
+ *
+ * The context should have no atoms currently pulled from it
+ * (kctx->atoms_pulled == 0).
+ *
+ * Caller must hold the hwaccess_lock
+ */
+void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * kbase_jm_return_atom_to_js() - Return an atom to the job scheduler that has
+ *				  been soft-stopped or will fail due to a
+ *				  dependency
+ * @kbdev:	Device pointer
+ * @katom:	Atom that has been stopped or will be failed
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev,
+			struct kbase_jd_atom *katom);
+
+/**
+ * kbase_jm_complete() - Complete an atom
+ * @kbdev:		Device pointer
+ * @katom:		Atom that has completed
+ * @end_timestamp:	Timestamp of atom completion
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_jm_complete(struct kbase_device *kbdev,
+		struct kbase_jd_atom *katom, ktime_t *end_timestamp);
+
+#endif /* _KBASE_JM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js.c b/drivers/gpu/arm/midgard/mali_kbase_js.c
new file mode 100644
index 0000000..80b6d77
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js.c
@@ -0,0 +1,2893 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/*
+ * Job Scheduler Implementation
+ */
+#include <mali_kbase.h>
+#include <mali_kbase_js.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_ctx_sched.h>
+
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config_defaults.h>
+
+#include "mali_kbase_jm.h"
+#include "mali_kbase_hwaccess_jm.h"
+
+/*
+ * Private types
+ */
+
+/* Bitpattern indicating the result of releasing a context */
+enum {
+	/* The context was descheduled - caller should try scheduling in a new
+	 * one to keep the runpool full */
+	KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED = (1u << 0),
+	/* Ctx attributes were changed - caller should try scheduling all
+	 * contexts */
+	KBASEP_JS_RELEASE_RESULT_SCHED_ALL = (1u << 1)
+};
+
+typedef u32 kbasep_js_release_result;
+
+const int kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS] = {
+	KBASE_JS_ATOM_SCHED_PRIO_MED, /* BASE_JD_PRIO_MEDIUM */
+	KBASE_JS_ATOM_SCHED_PRIO_HIGH, /* BASE_JD_PRIO_HIGH */
+	KBASE_JS_ATOM_SCHED_PRIO_LOW  /* BASE_JD_PRIO_LOW */
+};
+
+const base_jd_prio
+kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT] = {
+	BASE_JD_PRIO_HIGH,   /* KBASE_JS_ATOM_SCHED_PRIO_HIGH */
+	BASE_JD_PRIO_MEDIUM, /* KBASE_JS_ATOM_SCHED_PRIO_MED */
+	BASE_JD_PRIO_LOW     /* KBASE_JS_ATOM_SCHED_PRIO_LOW */
+};
+
+
+/*
+ * Private function prototypes
+ */
+static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
+		struct kbase_device *kbdev, struct kbase_context *kctx,
+		struct kbasep_js_atom_retained_state *katom_retained_state);
+
+static int kbase_js_get_slot(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom);
+
+static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
+		kbasep_js_ctx_job_cb callback);
+
+/* Helper for trace subcodes */
+#if KBASE_TRACE_ENABLE
+static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	return atomic_read(&kctx->refcount);
+}
+#else				/* KBASE_TRACE_ENABLE  */
+static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	CSTD_UNUSED(kbdev);
+	CSTD_UNUSED(kctx);
+	return 0;
+}
+#endif				/* KBASE_TRACE_ENABLE  */
+
+/*
+ * Private functions
+ */
+
+/**
+ * core_reqs_from_jsn_features - Convert JSn_FEATURES to core requirements
+ * @features: JSn_FEATURE register value
+ *
+ * Given a JSn_FEATURE register value returns the core requirements that match
+ *
+ * Return: Core requirement bit mask
+ */
+static base_jd_core_req core_reqs_from_jsn_features(u16 features)
+{
+	base_jd_core_req core_req = 0u;
+
+	if ((features & JS_FEATURE_SET_VALUE_JOB) != 0)
+		core_req |= BASE_JD_REQ_V;
+
+	if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0)
+		core_req |= BASE_JD_REQ_CF;
+
+	if ((features & JS_FEATURE_COMPUTE_JOB) != 0)
+		core_req |= BASE_JD_REQ_CS;
+
+	if ((features & JS_FEATURE_TILER_JOB) != 0)
+		core_req |= BASE_JD_REQ_T;
+
+	if ((features & JS_FEATURE_FRAGMENT_JOB) != 0)
+		core_req |= BASE_JD_REQ_FS;
+
+	return core_req;
+}
+
+static void kbase_js_sync_timers(struct kbase_device *kbdev)
+{
+	mutex_lock(&kbdev->js_data.runpool_mutex);
+	kbase_backend_ctx_count_changed(kbdev);
+	mutex_unlock(&kbdev->js_data.runpool_mutex);
+}
+
+/* Hold the mmu_hw_mutex and hwaccess_lock for this */
+bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	bool result = false;
+	int as_nr;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	as_nr = kctx->as_nr;
+	if (atomic_read(&kctx->refcount) > 0) {
+		KBASE_DEBUG_ASSERT(as_nr >= 0);
+
+		kbase_ctx_sched_retain_ctx_refcount(kctx);
+		KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RETAIN_CTX_NOLOCK, kctx,
+				NULL, 0u, atomic_read(&kctx->refcount));
+		result = true;
+	}
+
+	return result;
+}
+
+/**
+ * jsctx_rb_none_to_pull_prio(): - Check if there are no pullable atoms
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js:   Job slot id to check.
+ * @prio: Priority to check.
+ *
+ * Return true if there are no atoms to pull. There may be running atoms in the
+ * ring buffer even if there are no atoms to pull. It is also possible for the
+ * ring buffer to be full (with running atoms) when this functions returns
+ * true.
+ *
+ * Return: true if there are no atoms to pull, false otherwise.
+ */
+static inline bool
+jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio)
+{
+	struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	return RB_EMPTY_ROOT(&rb->runnable_tree);
+}
+
+/**
+ * jsctx_rb_none_to_pull(): - Check if all priority ring buffers have no
+ * pullable atoms
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js:   Job slot id to check.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if the ring buffers for all priorities have no pullable atoms,
+ *	   false otherwise.
+ */
+static inline bool
+jsctx_rb_none_to_pull(struct kbase_context *kctx, int js)
+{
+	int prio;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+		if (!jsctx_rb_none_to_pull_prio(kctx, js, prio))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * jsctx_queue_foreach_prio(): - Execute callback for each entry in the queue.
+ * @kctx:     Pointer to kbase context with the queue.
+ * @js:       Job slot id to iterate.
+ * @prio:     Priority id to iterate.
+ * @callback: Function pointer to callback.
+ *
+ * Iterate over a queue and invoke @callback for each entry in the queue, and
+ * remove the entry from the queue.
+ *
+ * If entries are added to the queue while this is running those entries may, or
+ * may not be covered. To ensure that all entries in the buffer have been
+ * enumerated when this function returns jsctx->lock must be held when calling
+ * this function.
+ *
+ * The HW access lock must always be held when calling this function.
+ */
+static void
+jsctx_queue_foreach_prio(struct kbase_context *kctx, int js, int prio,
+		kbasep_js_ctx_job_cb callback)
+{
+	struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	while (!RB_EMPTY_ROOT(&queue->runnable_tree)) {
+		struct rb_node *node = rb_first(&queue->runnable_tree);
+		struct kbase_jd_atom *entry = rb_entry(node,
+				struct kbase_jd_atom, runnable_tree_node);
+
+		rb_erase(node, &queue->runnable_tree);
+		callback(kctx->kbdev, entry);
+	}
+
+	while (!list_empty(&queue->x_dep_head)) {
+		struct kbase_jd_atom *entry = list_entry(queue->x_dep_head.next,
+				struct kbase_jd_atom, queue);
+
+		list_del(queue->x_dep_head.next);
+
+		callback(kctx->kbdev, entry);
+	}
+}
+
+/**
+ * jsctx_queue_foreach(): - Execute callback for each entry in every queue
+ * @kctx:     Pointer to kbase context with queue.
+ * @js:       Job slot id to iterate.
+ * @callback: Function pointer to callback.
+ *
+ * Iterate over all the different priorities, and for each call
+ * jsctx_queue_foreach_prio() to iterate over the queue and invoke @callback
+ * for each entry, and remove the entry from the queue.
+ */
+static inline void
+jsctx_queue_foreach(struct kbase_context *kctx, int js,
+		kbasep_js_ctx_job_cb callback)
+{
+	int prio;
+
+	for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++)
+		jsctx_queue_foreach_prio(kctx, js, prio, callback);
+}
+
+/**
+ * jsctx_rb_peek_prio(): - Check buffer and get next atom
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js:   Job slot id to check.
+ * @prio: Priority id to check.
+ *
+ * Check the ring buffer for the specified @js and @prio and return a pointer to
+ * the next atom, unless the ring buffer is empty.
+ *
+ * Return: Pointer to next atom in buffer, or NULL if there is no atom.
+ */
+static inline struct kbase_jd_atom *
+jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio)
+{
+	struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+	struct rb_node *node;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	node = rb_first(&rb->runnable_tree);
+	if (!node)
+		return NULL;
+
+	return rb_entry(node, struct kbase_jd_atom, runnable_tree_node);
+}
+
+/**
+ * jsctx_rb_peek(): - Check all priority buffers and get next atom
+ * @kctx: Pointer to kbase context with ring buffer.
+ * @js:   Job slot id to check.
+ *
+ * Check the ring buffers for all priorities, starting from
+ * KBASE_JS_ATOM_SCHED_PRIO_HIGH, for the specified @js and @prio and return a
+ * pointer to the next atom, unless all the priority's ring buffers are empty.
+ *
+ * Caller must hold the hwaccess_lock.
+ *
+ * Return: Pointer to next atom in buffer, or NULL if there is no atom.
+ */
+static inline struct kbase_jd_atom *
+jsctx_rb_peek(struct kbase_context *kctx, int js)
+{
+	int prio;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+		struct kbase_jd_atom *katom;
+
+		katom = jsctx_rb_peek_prio(kctx, js, prio);
+		if (katom)
+			return katom;
+	}
+
+	return NULL;
+}
+
+/**
+ * jsctx_rb_pull(): - Mark atom in list as running
+ * @kctx:  Pointer to kbase context with ring buffer.
+ * @katom: Pointer to katom to pull.
+ *
+ * Mark an atom previously obtained from jsctx_rb_peek() as running.
+ *
+ * @katom must currently be at the head of the ring buffer.
+ */
+static inline void
+jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	int prio = katom->sched_priority;
+	int js = katom->slot_nr;
+	struct jsctx_queue *rb = &kctx->jsctx_queue[prio][js];
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	/* Atoms must be pulled in the correct order. */
+	WARN_ON(katom != jsctx_rb_peek_prio(kctx, js, prio));
+
+	rb_erase(&katom->runnable_tree_node, &rb->runnable_tree);
+}
+
+#define LESS_THAN_WRAP(a, b) ((s32)(a - b) < 0)
+
+static void
+jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	int prio = katom->sched_priority;
+	int js = katom->slot_nr;
+	struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+	struct rb_node **new = &(queue->runnable_tree.rb_node), *parent = NULL;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	while (*new) {
+		struct kbase_jd_atom *entry = container_of(*new,
+				struct kbase_jd_atom, runnable_tree_node);
+
+		parent = *new;
+		if (LESS_THAN_WRAP(katom->age, entry->age))
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&katom->runnable_tree_node, parent, new);
+	rb_insert_color(&katom->runnable_tree_node, &queue->runnable_tree);
+
+	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(katom, TL_ATOM_STATE_READY);
+}
+
+/**
+ * jsctx_rb_unpull(): - Undo marking of atom in list as running
+ * @kctx:  Pointer to kbase context with ring buffer.
+ * @katom: Pointer to katom to unpull.
+ *
+ * Undo jsctx_rb_pull() and put @katom back in the queue.
+ *
+ * jsctx_rb_unpull() must be called on atoms in the same order the atoms were
+ * pulled.
+ */
+static inline void
+jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	jsctx_tree_add(kctx, katom);
+}
+
+static bool kbase_js_ctx_pullable(struct kbase_context *kctx,
+					int js,
+					bool is_scheduled);
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js);
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js);
+
+/*
+ * Functions private to KBase ('Protected' functions)
+ */
+int kbasep_js_devdata_init(struct kbase_device * const kbdev)
+{
+	struct kbasep_js_device_data *jsdd;
+	int i, j;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	jsdd = &kbdev->js_data;
+
+#ifdef CONFIG_MALI_DEBUG
+	/* Soft-stop will be disabled on a single context by default unless
+	 * softstop_always is set */
+	jsdd->softstop_always = false;
+#endif				/* CONFIG_MALI_DEBUG */
+	jsdd->nr_all_contexts_running = 0;
+	jsdd->nr_user_contexts_running = 0;
+	jsdd->nr_contexts_pullable = 0;
+	atomic_set(&jsdd->nr_contexts_runnable, 0);
+	/* No ctx allowed to submit */
+	jsdd->runpool_irq.submit_allowed = 0u;
+	memset(jsdd->runpool_irq.ctx_attr_ref_count, 0,
+			sizeof(jsdd->runpool_irq.ctx_attr_ref_count));
+	memset(jsdd->runpool_irq.slot_affinities, 0,
+			sizeof(jsdd->runpool_irq.slot_affinities));
+	memset(jsdd->runpool_irq.slot_affinity_refcount, 0,
+			sizeof(jsdd->runpool_irq.slot_affinity_refcount));
+	INIT_LIST_HEAD(&jsdd->suspended_soft_jobs_list);
+
+	/* Config attributes */
+	jsdd->scheduling_period_ns = DEFAULT_JS_SCHEDULING_PERIOD_NS;
+	jsdd->soft_stop_ticks = DEFAULT_JS_SOFT_STOP_TICKS;
+	jsdd->soft_stop_ticks_cl = DEFAULT_JS_SOFT_STOP_TICKS_CL;
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+		jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS_8408;
+	else
+		jsdd->hard_stop_ticks_ss = DEFAULT_JS_HARD_STOP_TICKS_SS;
+	jsdd->hard_stop_ticks_cl = DEFAULT_JS_HARD_STOP_TICKS_CL;
+	jsdd->hard_stop_ticks_dumping = DEFAULT_JS_HARD_STOP_TICKS_DUMPING;
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
+		jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS_8408;
+	else
+		jsdd->gpu_reset_ticks_ss = DEFAULT_JS_RESET_TICKS_SS;
+	jsdd->gpu_reset_ticks_cl = DEFAULT_JS_RESET_TICKS_CL;
+	jsdd->gpu_reset_ticks_dumping = DEFAULT_JS_RESET_TICKS_DUMPING;
+	jsdd->ctx_timeslice_ns = DEFAULT_JS_CTX_TIMESLICE_NS;
+	atomic_set(&jsdd->soft_job_timeout_ms, DEFAULT_JS_SOFT_JOB_TIMEOUT);
+
+	dev_dbg(kbdev->dev, "JS Config Attribs: ");
+	dev_dbg(kbdev->dev, "\tscheduling_period_ns:%u",
+			jsdd->scheduling_period_ns);
+	dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u",
+			jsdd->soft_stop_ticks);
+	dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u",
+			jsdd->soft_stop_ticks_cl);
+	dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u",
+			jsdd->hard_stop_ticks_ss);
+	dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u",
+			jsdd->hard_stop_ticks_cl);
+	dev_dbg(kbdev->dev, "\thard_stop_ticks_dumping:%u",
+			jsdd->hard_stop_ticks_dumping);
+	dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u",
+			jsdd->gpu_reset_ticks_ss);
+	dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u",
+			jsdd->gpu_reset_ticks_cl);
+	dev_dbg(kbdev->dev, "\tgpu_reset_ticks_dumping:%u",
+			jsdd->gpu_reset_ticks_dumping);
+	dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u",
+			jsdd->ctx_timeslice_ns);
+	dev_dbg(kbdev->dev, "\tsoft_job_timeout:%i",
+		atomic_read(&jsdd->soft_job_timeout_ms));
+
+	if (!(jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_ss &&
+			jsdd->hard_stop_ticks_ss < jsdd->gpu_reset_ticks_ss &&
+			jsdd->soft_stop_ticks < jsdd->hard_stop_ticks_dumping &&
+			jsdd->hard_stop_ticks_dumping <
+			jsdd->gpu_reset_ticks_dumping)) {
+		dev_err(kbdev->dev, "Job scheduler timeouts invalid; soft/hard/reset tick counts should be in increasing order\n");
+		return -EINVAL;
+	}
+
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+	dev_dbg(kbdev->dev, "Job Scheduling Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.",
+			jsdd->soft_stop_ticks,
+			jsdd->scheduling_period_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_HARD_STOPS
+	dev_dbg(kbdev->dev, "Job Scheduling Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_dumping==%u at %uns per tick. Other hard-stops may still occur.",
+			jsdd->hard_stop_ticks_ss,
+			jsdd->hard_stop_ticks_dumping,
+			jsdd->scheduling_period_ns);
+#endif
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS
+	dev_dbg(kbdev->dev, "Note: The JS tick timer (if coded) will still be run, but do nothing.");
+#endif
+
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i)
+		jsdd->js_reqs[i] = core_reqs_from_jsn_features(
+			kbdev->gpu_props.props.raw_props.js_features[i]);
+
+	/* On error, we could continue on: providing none of the below resources
+	 * rely on the ones above */
+
+	mutex_init(&jsdd->runpool_mutex);
+	mutex_init(&jsdd->queue_mutex);
+	spin_lock_init(&kbdev->hwaccess_lock);
+	sema_init(&jsdd->schedule_sem, 1);
+
+	for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
+		for (j = 0; j < KBASE_JS_ATOM_SCHED_PRIO_COUNT; ++j) {
+			INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i][j]);
+			INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i][j]);
+		}
+	}
+
+	return 0;
+}
+
+void kbasep_js_devdata_halt(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+void kbasep_js_devdata_term(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata;
+	s8 zero_ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT] = { 0, };
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	js_devdata = &kbdev->js_data;
+
+	/* The caller must de-register all contexts before calling this
+	 */
+	KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
+	KBASE_DEBUG_ASSERT(memcmp(
+				  js_devdata->runpool_irq.ctx_attr_ref_count,
+				  zero_ctx_attr_ref_count,
+				  sizeof(zero_ctx_attr_ref_count)) == 0);
+	CSTD_UNUSED(zero_ctx_attr_ref_count);
+}
+
+int kbasep_js_kctx_init(struct kbase_context *const kctx)
+{
+	struct kbase_device *kbdev;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	int i, j;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
+		INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]);
+
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	js_kctx_info->ctx.nr_jobs = 0;
+	kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+	kbase_ctx_flag_clear(kctx, KCTX_DYING);
+	memset(js_kctx_info->ctx.ctx_attr_ref_count, 0,
+			sizeof(js_kctx_info->ctx.ctx_attr_ref_count));
+
+	/* Initially, the context is disabled from submission until the create
+	 * flags are set */
+	kbase_ctx_flag_set(kctx, KCTX_SUBMIT_DISABLED);
+
+	/* On error, we could continue on: providing none of the below resources
+	 * rely on the ones above */
+	mutex_init(&js_kctx_info->ctx.jsctx_mutex);
+
+	init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait);
+
+	for (i = 0; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
+		for (j = 0; j < BASE_JM_MAX_NR_SLOTS; j++) {
+			INIT_LIST_HEAD(&kctx->jsctx_queue[i][j].x_dep_head);
+			kctx->jsctx_queue[i][j].runnable_tree = RB_ROOT;
+		}
+	}
+
+	return 0;
+}
+
+void kbasep_js_kctx_term(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	int js;
+	bool update_ctx_count = false;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	kbdev = kctx->kbdev;
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* The caller must de-register all jobs before calling this */
+	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0);
+
+	mutex_lock(&kbdev->js_data.queue_mutex);
+	mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)) {
+		WARN_ON(atomic_read(&kbdev->js_data.nr_contexts_runnable) <= 0);
+		atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+		update_ctx_count = true;
+		kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+	}
+
+	mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	mutex_unlock(&kbdev->js_data.queue_mutex);
+
+	if (update_ctx_count) {
+		mutex_lock(&kbdev->js_data.runpool_mutex);
+		kbase_backend_ctx_count_changed(kbdev);
+		mutex_unlock(&kbdev->js_data.runpool_mutex);
+	}
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_nolock - Variant of
+ *                                         kbase_jd_ctx_list_add_pullable()
+ *                                         where the caller must hold
+ *                                         hwaccess_lock
+ * @kbdev:  Device pointer
+ * @kctx:   Context to add to queue
+ * @js:     Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return: true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+	list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+			&kbdev->js_data.ctx_list_pullable[js][kctx->priority]);
+
+	if (!kctx->slots_pullable) {
+		kbdev->js_data.nr_contexts_pullable++;
+		ret = true;
+		if (!atomic_read(&kctx->atoms_pulled)) {
+			WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+			atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+		}
+	}
+	kctx->slots_pullable |= (1 << js);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_head_nolock - Variant of
+ *                                              kbase_js_ctx_list_add_pullable_head()
+ *                                              where the caller must hold
+ *                                              hwaccess_lock
+ * @kbdev:  Device pointer
+ * @kctx:   Context to add to queue
+ * @js:     Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:  true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_head_nolock(
+		struct kbase_device *kbdev, struct kbase_context *kctx, int js)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+	list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+			&kbdev->js_data.ctx_list_pullable[js][kctx->priority]);
+
+	if (!kctx->slots_pullable) {
+		kbdev->js_data.nr_contexts_pullable++;
+		ret = true;
+		if (!atomic_read(&kctx->atoms_pulled)) {
+			WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+			atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+		}
+	}
+	kctx->slots_pullable |= (1 << js);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_pullable_head - Add context to the head of the
+ *                                       per-slot pullable context queue
+ * @kbdev:  Device pointer
+ * @kctx:   Context to add to queue
+ * @js:     Job slot to use
+ *
+ * If the context is on either the pullable or unpullable queues, then it is
+ * removed before being added to the head.
+ *
+ * This function should be used when a context has been scheduled, but no jobs
+ * can currently be pulled from it.
+ *
+ * Return:  true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	ret = kbase_js_ctx_list_add_pullable_head_nolock(kbdev, kctx, js);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_add_unpullable_nolock - Add context to the tail of the
+ *                                           per-slot unpullable context queue
+ * @kbdev:  Device pointer
+ * @kctx:   Context to add to queue
+ * @js:     Job slot to use
+ *
+ * The context must already be on the per-slot pullable queue. It will be
+ * removed from the pullable queue before being added to the unpullable queue.
+ *
+ * This function should be used when a context has been pulled from, and there
+ * are no jobs remaining on the specified slot.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:  true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						int js)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+		&kbdev->js_data.ctx_list_unpullable[js][kctx->priority]);
+
+	if (kctx->slots_pullable == (1 << js)) {
+		kbdev->js_data.nr_contexts_pullable--;
+		ret = true;
+		if (!atomic_read(&kctx->atoms_pulled)) {
+			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+		}
+	}
+	kctx->slots_pullable &= ~(1 << js);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_remove_nolock - Remove context from the per-slot pullable
+ *                                   or unpullable context queues
+ * @kbdev:  Device pointer
+ * @kctx:   Context to remove from queue
+ * @js:     Job slot to use
+ *
+ * The context must already be on one of the queues.
+ *
+ * This function should be used when a context has no jobs on the GPU, and no
+ * jobs remaining for the specified slot.
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:  true if caller should call kbase_backend_ctx_count_changed()
+ */
+static bool kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					int js)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	WARN_ON(list_empty(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]));
+
+	list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+	if (kctx->slots_pullable == (1 << js)) {
+		kbdev->js_data.nr_contexts_pullable--;
+		ret = true;
+		if (!atomic_read(&kctx->atoms_pulled)) {
+			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+		}
+	}
+	kctx->slots_pullable &= ~(1 << js);
+
+	return ret;
+}
+
+/**
+ * kbase_js_ctx_list_pop_head_nolock - Variant of kbase_js_ctx_list_pop_head()
+ *                                     where the caller must hold
+ *                                     hwaccess_lock
+ * @kbdev:  Device pointer
+ * @js:     Job slot to use
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:  Context to use for specified slot.
+ *          NULL if no contexts present for specified slot
+ */
+static struct kbase_context *kbase_js_ctx_list_pop_head_nolock(
+						struct kbase_device *kbdev,
+						int js)
+{
+	struct kbase_context *kctx;
+	int i;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (i = 0; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
+		if (list_empty(&kbdev->js_data.ctx_list_pullable[js][i]))
+			continue;
+
+		kctx = list_entry(kbdev->js_data.ctx_list_pullable[js][i].next,
+				struct kbase_context,
+				jctx.sched_info.ctx.ctx_list_entry[js]);
+
+		list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+
+		return kctx;
+	}
+	return NULL;
+}
+
+/**
+ * kbase_js_ctx_list_pop_head - Pop the head context off the per-slot pullable
+ *                              queue.
+ * @kbdev:  Device pointer
+ * @js:     Job slot to use
+ *
+ * Return:  Context to use for specified slot.
+ *          NULL if no contexts present for specified slot
+ */
+static struct kbase_context *kbase_js_ctx_list_pop_head(
+		struct kbase_device *kbdev, int js)
+{
+	struct kbase_context *kctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kctx = kbase_js_ctx_list_pop_head_nolock(kbdev, js);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return kctx;
+}
+
+/**
+ * kbase_js_ctx_pullable - Return if a context can be pulled from on the
+ *                         specified slot
+ * @kctx:          Context pointer
+ * @js:            Job slot to use
+ * @is_scheduled:  true if the context is currently scheduled
+ *
+ * Caller must hold hwaccess_lock
+ *
+ * Return:         true if context can be pulled from on specified slot
+ *                 false otherwise
+ */
+static bool kbase_js_ctx_pullable(struct kbase_context *kctx, int js,
+					bool is_scheduled)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_jd_atom *katom;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	js_devdata = &kctx->kbdev->js_data;
+
+	if (is_scheduled) {
+		if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+			return false;
+	}
+	katom = jsctx_rb_peek(kctx, js);
+	if (!katom)
+		return false; /* No pullable atoms */
+	if (kctx->blocked_js[js][katom->sched_priority])
+		return false;
+	if (atomic_read(&katom->blocked))
+		return false; /* next atom blocked */
+	if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
+		if (katom->x_pre_dep->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
+					katom->x_pre_dep->will_fail_event_code)
+			return false;
+		if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
+				kbase_backend_nr_atoms_on_slot(kctx->kbdev, js))
+			return false;
+	}
+
+	return true;
+}
+
+static bool kbase_js_dep_validate(struct kbase_context *kctx,
+				struct kbase_jd_atom *katom)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	bool ret = true;
+	bool has_dep = false, has_x_dep = false;
+	int js = kbase_js_get_slot(kbdev, katom);
+	int prio = katom->sched_priority;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
+
+		if (dep_atom) {
+			int dep_js = kbase_js_get_slot(kbdev, dep_atom);
+			int dep_prio = dep_atom->sched_priority;
+
+			/* Dependent atom must already have been submitted */
+			if (!(dep_atom->atom_flags &
+					KBASE_KATOM_FLAG_JSCTX_IN_TREE)) {
+				ret = false;
+				break;
+			}
+
+			/* Dependencies with different priorities can't
+			  be represented in the ringbuffer */
+			if (prio != dep_prio) {
+				ret = false;
+				break;
+			}
+
+			if (js == dep_js) {
+				/* Only one same-slot dependency can be
+				 * represented in the ringbuffer */
+				if (has_dep) {
+					ret = false;
+					break;
+				}
+				/* Each dependee atom can only have one
+				 * same-slot dependency */
+				if (dep_atom->post_dep) {
+					ret = false;
+					break;
+				}
+				has_dep = true;
+			} else {
+				/* Only one cross-slot dependency can be
+				 * represented in the ringbuffer */
+				if (has_x_dep) {
+					ret = false;
+					break;
+				}
+				/* Each dependee atom can only have one
+				 * cross-slot dependency */
+				if (dep_atom->x_post_dep) {
+					ret = false;
+					break;
+				}
+				/* The dependee atom can not already be in the
+				 * HW access ringbuffer */
+				if (dep_atom->gpu_rb_state !=
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+					ret = false;
+					break;
+				}
+				/* The dependee atom can not already have
+				 * completed */
+				if (dep_atom->status !=
+						KBASE_JD_ATOM_STATE_IN_JS) {
+					ret = false;
+					break;
+				}
+				/* Cross-slot dependencies must not violate
+				 * PRLAM-8987 affinity restrictions */
+				if (kbase_hw_has_issue(kbdev,
+							BASE_HW_ISSUE_8987) &&
+						(js == 2 || dep_js == 2)) {
+					ret = false;
+					break;
+				}
+				has_x_dep = true;
+			}
+
+			/* Dependency can be represented in ringbuffers */
+		}
+	}
+
+	/* If dependencies can be represented by ringbuffer then clear them from
+	 * atom structure */
+	if (ret) {
+		for (i = 0; i < 2; i++) {
+			struct kbase_jd_atom *dep_atom = katom->dep[i].atom;
+
+			if (dep_atom) {
+				int dep_js = kbase_js_get_slot(kbdev, dep_atom);
+
+				if ((js != dep_js) &&
+					(dep_atom->status !=
+						KBASE_JD_ATOM_STATE_COMPLETED)
+					&& (dep_atom->status !=
+					KBASE_JD_ATOM_STATE_HW_COMPLETED)
+					&& (dep_atom->status !=
+						KBASE_JD_ATOM_STATE_UNUSED)) {
+
+					katom->atom_flags |=
+						KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+					katom->x_pre_dep = dep_atom;
+					dep_atom->x_post_dep = katom;
+					if (kbase_jd_katom_dep_type(
+							&katom->dep[i]) ==
+							BASE_JD_DEP_TYPE_DATA)
+						katom->atom_flags |=
+						KBASE_KATOM_FLAG_FAIL_BLOCKER;
+				}
+				if ((kbase_jd_katom_dep_type(&katom->dep[i])
+						== BASE_JD_DEP_TYPE_DATA) &&
+						(js == dep_js)) {
+					katom->pre_dep = dep_atom;
+					dep_atom->post_dep = katom;
+				}
+
+				list_del(&katom->dep_item[i]);
+				kbase_jd_katom_dep_clear(&katom->dep[i]);
+			}
+		}
+	}
+
+	return ret;
+}
+
+void kbase_js_set_ctx_priority(struct kbase_context *kctx, int new_priority)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	int js;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* Move kctx to the pullable/upullable list as per the new priority */
+	if (new_priority != kctx->priority) {
+		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+			if (kctx->slots_pullable & (1 << js))
+				list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+					&kbdev->js_data.ctx_list_pullable[js][new_priority]);
+			else
+				list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+					&kbdev->js_data.ctx_list_unpullable[js][new_priority]);
+		}
+
+		kctx->priority = new_priority;
+	}
+}
+
+void kbase_js_update_ctx_priority(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	int new_priority = KBASE_JS_ATOM_SCHED_PRIO_LOW;
+	int prio;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbdev->js_ctx_scheduling_mode == KBASE_JS_SYSTEM_PRIORITY_MODE) {
+		/* Determine the new priority for context, as per the priority
+		 * of currently in-use atoms.
+		 */
+		for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+			if (kctx->atoms_count[prio]) {
+				new_priority = prio;
+				break;
+			}
+		}
+	}
+
+	kbase_js_set_ctx_priority(kctx, new_priority);
+}
+
+bool kbasep_js_add_job(struct kbase_context *kctx,
+		struct kbase_jd_atom *atom)
+{
+	unsigned long flags;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	struct kbase_device *kbdev;
+	struct kbasep_js_device_data *js_devdata;
+
+	bool enqueue_required = false;
+	bool timer_sync = false;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(atom != NULL);
+	lockdep_assert_held(&kctx->jctx.lock);
+
+	kbdev = kctx->kbdev;
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/*
+	 * Begin Runpool transaction
+	 */
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	/* Refcount ctx.nr_jobs */
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs < U32_MAX);
+	++(js_kctx_info->ctx.nr_jobs);
+
+	/* Lock for state available during IRQ */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (++kctx->atoms_count[atom->sched_priority] == 1)
+		kbase_js_update_ctx_priority(kctx);
+
+	if (!kbase_js_dep_validate(kctx, atom)) {
+		/* Dependencies could not be represented */
+		--(js_kctx_info->ctx.nr_jobs);
+
+		/* Setting atom status back to queued as it still has unresolved
+		 * dependencies */
+		atom->status = KBASE_JD_ATOM_STATE_QUEUED;
+
+		/* Undo the count, as the atom will get added again later but
+		 * leave the context priority adjusted or boosted, in case if
+		 * this was the first higher priority atom received for this
+		 * context.
+		 * This will prevent the scenario of priority inversion, where
+		 * another context having medium priority atoms keeps getting
+		 * scheduled over this context, which is having both lower and
+		 * higher priority atoms, but higher priority atoms are blocked
+		 * due to dependency on lower priority atoms. With priority
+		 * boost the high priority atom will get to run at earliest.
+		 */
+		kctx->atoms_count[atom->sched_priority]--;
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&js_devdata->runpool_mutex);
+
+		goto out_unlock;
+	}
+
+	enqueue_required = kbase_js_dep_resolved_submit(kctx, atom);
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_ADD_JOB, kctx, atom, atom->jc,
+				kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+	/* Context Attribute Refcounting */
+	kbasep_js_ctx_attr_ctx_retain_atom(kbdev, kctx, atom);
+
+	if (enqueue_required) {
+		if (kbase_js_ctx_pullable(kctx, atom->slot_nr, false))
+			timer_sync = kbase_js_ctx_list_add_pullable_nolock(
+					kbdev, kctx, atom->slot_nr);
+		else
+			timer_sync = kbase_js_ctx_list_add_unpullable_nolock(
+					kbdev, kctx, atom->slot_nr);
+	}
+	/* If this context is active and the atom is the first on its slot,
+	 * kick the job manager to attempt to fast-start the atom */
+	if (enqueue_required && kctx ==
+			kbdev->hwaccess.active_kctx[atom->slot_nr])
+		kbase_jm_try_kick(kbdev, 1 << atom->slot_nr);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	if (timer_sync)
+		kbase_backend_ctx_count_changed(kbdev);
+	mutex_unlock(&js_devdata->runpool_mutex);
+	/* End runpool transaction */
+
+	if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
+		if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+			/* A job got added while/after kbase_job_zap_context()
+			 * was called on a non-scheduled context. Kill that job
+			 * by killing the context. */
+			kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx,
+					false);
+		} else if (js_kctx_info->ctx.nr_jobs == 1) {
+			/* Handle Refcount going from 0 to 1: schedule the
+			 * context on the Queue */
+			KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+			dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx);
+
+			/* Queue was updated - caller must try to
+			 * schedule the head context */
+			WARN_ON(!enqueue_required);
+		}
+	}
+out_unlock:
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	mutex_unlock(&js_devdata->queue_mutex);
+
+	return enqueue_required;
+}
+
+void kbasep_js_remove_job(struct kbase_device *kbdev,
+		struct kbase_context *kctx, struct kbase_jd_atom *atom)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(atom != NULL);
+
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_REMOVE_JOB, kctx, atom, atom->jc,
+			kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+	/* De-refcount ctx.nr_jobs */
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
+	--(js_kctx_info->ctx.nr_jobs);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	if (--kctx->atoms_count[atom->sched_priority] == 0)
+		kbase_js_update_ctx_priority(kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
+		struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	unsigned long flags;
+	struct kbasep_js_atom_retained_state katom_retained_state;
+	bool attr_state_changed;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(katom != NULL);
+
+	kbasep_js_atom_retained_state_copy(&katom_retained_state, katom);
+	kbasep_js_remove_job(kbdev, kctx, katom);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* The atom has 'finished' (will not be re-run), so no need to call
+	 * kbasep_js_has_atom_finished().
+	 *
+	 * This is because it returns false for soft-stopped atoms, but we
+	 * want to override that, because we're cancelling an atom regardless of
+	 * whether it was soft-stopped or not */
+	attr_state_changed = kbasep_js_ctx_attr_ctx_release_atom(kbdev, kctx,
+			&katom_retained_state);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return attr_state_changed;
+}
+
+bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	unsigned long flags;
+	bool result;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	result = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	return result;
+}
+
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev,
+		int as_nr)
+{
+	unsigned long flags;
+	struct kbase_context *found_kctx = NULL;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	found_kctx = kbdev->as_to_kctx[as_nr];
+
+	if (found_kctx != NULL)
+		kbase_ctx_sched_retain_ctx_refcount(found_kctx);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return found_kctx;
+}
+
+/**
+ * kbasep_js_run_jobs_after_ctx_and_atom_release - Try running more jobs after
+ *                           releasing a context and/or atom
+ * @kbdev:                   The kbase_device to operate on
+ * @kctx:                    The kbase_context to operate on
+ * @katom_retained_state:    Retained state from the atom
+ * @runpool_ctx_attr_change: True if the runpool context attributes have changed
+ *
+ * This collates a set of actions that must happen whilst hwaccess_lock is held.
+ *
+ * This includes running more jobs when:
+ * - The previously released kctx caused a ctx attribute change,
+ * - The released atom caused a ctx attribute change,
+ * - Slots were previously blocked due to affinity restrictions,
+ * - Submission during IRQ handling failed.
+ *
+ * Return: %KBASEP_JS_RELEASE_RESULT_SCHED_ALL if context attributes were
+ *         changed. The caller should try scheduling all contexts
+ */
+static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release(
+		struct kbase_device *kbdev,
+		struct kbase_context *kctx,
+		struct kbasep_js_atom_retained_state *katom_retained_state,
+		bool runpool_ctx_attr_change)
+{
+	struct kbasep_js_device_data *js_devdata;
+	kbasep_js_release_result result = 0;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(katom_retained_state != NULL);
+	js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (js_devdata->nr_user_contexts_running != 0 && runpool_ctx_attr_change) {
+		/* A change in runpool ctx attributes might mean we can
+		 * run more jobs than before  */
+		result = KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
+
+		KBASE_TRACE_ADD_SLOT(kbdev, JD_DONE_TRY_RUN_NEXT_JOB,
+					kctx, NULL, 0u, 0);
+	}
+	return result;
+}
+
+/**
+ * kbasep_js_runpool_release_ctx_internal - Internal function to release the reference
+ *                                          on a ctx and an atom's "retained state", only
+ *                                          taking the runpool and as transaction mutexes
+ * @kbdev:                   The kbase_device to operate on
+ * @kctx:                    The kbase_context to operate on
+ * @katom_retained_state:    Retained state from the atom
+ *
+ * This also starts more jobs running in the case of an ctx-attribute state change
+ *
+ * This does none of the followup actions for scheduling:
+ * - It does not schedule in a new context
+ * - It does not requeue or handle dying contexts
+ *
+ * For those tasks, just call kbasep_js_runpool_release_ctx() instead
+ *
+ * Has following requirements
+ * - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
+ * - Context has a non-zero refcount
+ * - Caller holds js_kctx_info->ctx.jsctx_mutex
+ * - Caller holds js_devdata->runpool_mutex
+ *
+ * Return: A bitpattern, containing KBASEP_JS_RELEASE_RESULT_* flags, indicating
+ *         the result of releasing a context that whether the caller should try
+ *         scheduling a new context or should try scheduling all contexts.
+ */
+static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
+		struct kbase_device *kbdev,
+		struct kbase_context *kctx,
+		struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	unsigned long flags;
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	kbasep_js_release_result release_result = 0u;
+	bool runpool_ctx_attr_change = false;
+	int kctx_as_nr;
+	int new_ref_count;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+
+	/* Ensure context really is scheduled in */
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	kctx_as_nr = kctx->as_nr;
+	KBASE_DEBUG_ASSERT(kctx_as_nr != KBASEP_AS_NR_INVALID);
+	KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
+
+	/*
+	 * Transaction begins on AS and runpool_irq
+	 *
+	 * Assert about out calling contract
+	 */
+	mutex_lock(&kbdev->pm.lock);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	KBASE_DEBUG_ASSERT(kctx_as_nr == kctx->as_nr);
+	KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
+
+	/* Update refcount */
+	kbase_ctx_sched_release_ctx(kctx);
+	new_ref_count = atomic_read(&kctx->refcount);
+
+	/* Release the atom if it finished (i.e. wasn't soft-stopped) */
+	if (kbasep_js_has_atom_finished(katom_retained_state))
+		runpool_ctx_attr_change |= kbasep_js_ctx_attr_ctx_release_atom(
+				kbdev, kctx, katom_retained_state);
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u,
+			new_ref_count);
+
+	if (new_ref_count == 2 && kbase_ctx_flag(kctx, KCTX_PRIVILEGED) &&
+			!kbase_pm_is_suspending(kbdev)) {
+		/* Context is kept scheduled into an address space even when
+		 * there are no jobs, in this case we have to handle the
+		 * situation where all jobs have been evicted from the GPU and
+		 * submission is disabled.
+		 *
+		 * At this point we re-enable submission to allow further jobs
+		 * to be executed
+		 */
+		kbasep_js_set_submit_allowed(js_devdata, kctx);
+	}
+
+	/* Make a set of checks to see if the context should be scheduled out.
+	 * Note that there'll always be at least 1 reference to the context
+	 * which was previously acquired by kbasep_js_schedule_ctx(). */
+	if (new_ref_count == 1 &&
+		(!kbasep_js_is_submit_allowed(js_devdata, kctx) ||
+							kbdev->pm.suspending)) {
+		int num_slots = kbdev->gpu_props.num_job_slots;
+		int slot;
+
+		/* Last reference, and we've been told to remove this context
+		 * from the Run Pool */
+		dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because refcount=%d, jobs=%d, allowed=%d",
+				kctx, new_ref_count, js_kctx_info->ctx.nr_jobs,
+				kbasep_js_is_submit_allowed(js_devdata, kctx));
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+		kbase_trace_mali_mmu_as_released(kctx->as_nr);
+#endif
+		KBASE_TLSTREAM_TL_NRET_AS_CTX(&kbdev->as[kctx->as_nr], kctx);
+
+		kbase_backend_release_ctx_irq(kbdev, kctx);
+
+		for (slot = 0; slot < num_slots; slot++) {
+			if (kbdev->hwaccess.active_kctx[slot] == kctx)
+				kbdev->hwaccess.active_kctx[slot] = NULL;
+		}
+
+		/* Ctx Attribute handling
+		 *
+		 * Releasing atoms attributes must either happen before this, or
+		 * after the KCTX_SHEDULED flag is changed, otherwise we
+		 * double-decount the attributes
+		 */
+		runpool_ctx_attr_change |=
+			kbasep_js_ctx_attr_runpool_release_ctx(kbdev, kctx);
+
+		/* Releasing the context and katom retained state can allow
+		 * more jobs to run */
+		release_result |=
+			kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev,
+						kctx, katom_retained_state,
+						runpool_ctx_attr_change);
+
+		/*
+		 * Transaction ends on AS and runpool_irq:
+		 *
+		 * By this point, the AS-related data is now clear and ready
+		 * for re-use.
+		 *
+		 * Since releases only occur once for each previous successful
+		 * retain, and no more retains are allowed on this context, no
+		 * other thread will be operating in this
+		 * code whilst we are
+		 */
+
+		/* Recalculate pullable status for all slots */
+		for (slot = 0; slot < num_slots; slot++) {
+			if (kbase_js_ctx_pullable(kctx, slot, false))
+				kbase_js_ctx_list_add_pullable_nolock(kbdev,
+						kctx, slot);
+		}
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		kbase_backend_release_ctx_noirq(kbdev, kctx);
+
+		mutex_unlock(&kbdev->pm.lock);
+
+		/* Note: Don't reuse kctx_as_nr now */
+
+		/* Synchronize with any timers */
+		kbase_backend_ctx_count_changed(kbdev);
+
+		/* update book-keeping info */
+		kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+		/* Signal any waiter that the context is not scheduled, so is
+		 * safe for termination - once the jsctx_mutex is also dropped,
+		 * and jobs have finished. */
+		wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+		/* Queue an action to occur after we've dropped the lock */
+		release_result |= KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED |
+			KBASEP_JS_RELEASE_RESULT_SCHED_ALL;
+	} else {
+		kbasep_js_run_jobs_after_ctx_and_atom_release(kbdev, kctx,
+				katom_retained_state, runpool_ctx_attr_change);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&kbdev->pm.lock);
+	}
+
+	return release_result;
+}
+
+void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+	struct kbasep_js_atom_retained_state katom_retained_state;
+
+	/* Setup a dummy katom_retained_state */
+	kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+	kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+							&katom_retained_state);
+}
+
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx, bool has_pm_ref)
+{
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	/* This is called if and only if you've you've detached the context from
+	 * the Runpool Queue, and not added it back to the Runpool
+	 */
+	KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+		/* Dying: don't requeue, but kill all jobs on the context. This
+		 * happens asynchronously */
+		dev_dbg(kbdev->dev,
+			"JS: ** Killing Context %p on RunPool Remove **", kctx);
+		kbase_js_foreach_ctx_job(kctx, &kbase_jd_cancel);
+	}
+}
+
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(
+		struct kbase_device *kbdev, struct kbase_context *kctx,
+		struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	kbasep_js_release_result release_result;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+			katom_retained_state);
+
+	/* Drop the runpool mutex to allow requeing kctx */
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+		kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
+
+	/* Drop the jsctx_mutex to allow scheduling in a new context */
+
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_unlock(&js_devdata->queue_mutex);
+
+	if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL)
+		kbase_js_sched_all(kbdev);
+}
+
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	struct kbasep_js_atom_retained_state katom_retained_state;
+
+	kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
+
+	kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
+			&katom_retained_state);
+}
+
+/* Variant of kbasep_js_runpool_release_ctx() that doesn't call into
+ * kbase_js_sched_all() */
+static void kbasep_js_runpool_release_ctx_no_schedule(
+		struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	kbasep_js_release_result release_result;
+	struct kbasep_js_atom_retained_state katom_retained_state_struct;
+	struct kbasep_js_atom_retained_state *katom_retained_state =
+		&katom_retained_state_struct;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+	kbasep_js_atom_retained_state_init_invalid(katom_retained_state);
+
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_lock(&js_devdata->runpool_mutex);
+
+	release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx,
+			katom_retained_state);
+
+	/* Drop the runpool mutex to allow requeing kctx */
+	mutex_unlock(&js_devdata->runpool_mutex);
+	if ((release_result & KBASEP_JS_RELEASE_RESULT_WAS_DESCHEDULED) != 0u)
+		kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true);
+
+	/* Drop the jsctx_mutex to allow scheduling in a new context */
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/* NOTE: could return release_result if the caller would like to know
+	 * whether it should schedule a new context, but currently no callers do
+	 */
+}
+
+void kbase_js_set_timeouts(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	kbase_backend_timeouts_changed(kbdev);
+}
+
+static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
+					struct kbase_context *kctx,
+					int js)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	unsigned long flags;
+	bool kctx_suspended = false;
+	int as_nr;
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* Pick available address space for this context */
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	as_nr = kbase_ctx_sched_retain_ctx(kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+	if (as_nr == KBASEP_AS_NR_INVALID) {
+		as_nr = kbase_backend_find_and_release_free_address_space(
+				kbdev, kctx);
+		if (as_nr != KBASEP_AS_NR_INVALID) {
+			/* Attempt to retain the context again, this should
+			 * succeed */
+			mutex_lock(&kbdev->mmu_hw_mutex);
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+			as_nr = kbase_ctx_sched_retain_ctx(kctx);
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+			mutex_unlock(&kbdev->mmu_hw_mutex);
+
+			WARN_ON(as_nr == KBASEP_AS_NR_INVALID);
+		}
+	}
+	if (as_nr == KBASEP_AS_NR_INVALID)
+		return false; /* No address spaces currently available */
+
+	/*
+	 * Atomic transaction on the Context and Run Pool begins
+	 */
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* Check to see if context is dying due to kbase_job_zap_context() */
+	if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+		/* Roll back the transaction so far and return */
+		kbase_ctx_sched_release_ctx(kctx);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+		mutex_unlock(&js_devdata->runpool_mutex);
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+		return false;
+	}
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, kctx, NULL,
+				0u,
+				kbasep_js_trace_get_refcnt(kbdev, kctx));
+
+	kbase_ctx_flag_set(kctx, KCTX_SCHEDULED);
+
+	/* Assign context to previously chosen address space */
+	if (!kbase_backend_use_ctx(kbdev, kctx, as_nr)) {
+		/* Roll back the transaction so far and return */
+		kbase_ctx_sched_release_ctx(kctx);
+		kbase_ctx_flag_clear(kctx, KCTX_SCHEDULED);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+		mutex_unlock(&js_devdata->runpool_mutex);
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+		return false;
+	}
+
+	kbdev->hwaccess.active_kctx[js] = kctx;
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+	kbase_trace_mali_mmu_as_in_use(kctx->as_nr);
+#endif
+	KBASE_TLSTREAM_TL_RET_AS_CTX(&kbdev->as[kctx->as_nr], kctx);
+
+	/* Cause any future waiter-on-termination to wait until the context is
+	 * descheduled */
+	wake_up(&js_kctx_info->ctx.is_scheduled_wait);
+
+	/* Re-check for suspending: a suspend could've occurred, and all the
+	 * contexts could've been removed from the runpool before we took this
+	 * lock. In this case, we don't want to allow this context to run jobs,
+	 * we just want it out immediately.
+	 *
+	 * The DMB required to read the suspend flag was issued recently as part
+	 * of the hwaccess_lock locking. If a suspend occurs *after* that lock
+	 * was taken (i.e. this condition doesn't execute), then the
+	 * kbasep_js_suspend() code will cleanup this context instead (by virtue
+	 * of it being called strictly after the suspend flag is set, and will
+	 * wait for this lock to drop) */
+	if (kbase_pm_is_suspending(kbdev)) {
+		/* Cause it to leave at some later point */
+		bool retained;
+
+		retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+		KBASE_DEBUG_ASSERT(retained);
+
+		kbasep_js_clear_submit_allowed(js_devdata, kctx);
+		kctx_suspended = true;
+	}
+
+	kbase_ctx_flag_clear(kctx, KCTX_PULLED_SINCE_ACTIVE_JS0 << js);
+
+	/* Transaction complete */
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	/* Synchronize with any timers */
+	kbase_backend_ctx_count_changed(kbdev);
+
+	mutex_unlock(&js_devdata->runpool_mutex);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+	/* Note: after this point, the context could potentially get scheduled
+	 * out immediately */
+
+	if (kctx_suspended) {
+		/* Finishing forcing out the context due to a suspend. Use a
+		 * variant of kbasep_js_runpool_release_ctx() that doesn't
+		 * schedule a new context, to prevent a risk of recursion back
+		 * into this function */
+		kbasep_js_runpool_release_ctx_no_schedule(kbdev, kctx);
+		return false;
+	}
+	return true;
+}
+
+static bool kbase_js_use_ctx(struct kbase_device *kbdev,
+				struct kbase_context *kctx,
+				int js)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
+			kbase_backend_use_ctx_sched(kbdev, kctx, js)) {
+		/* Context already has ASID - mark as active */
+		if (kbdev->hwaccess.active_kctx[js] != kctx) {
+			kbdev->hwaccess.active_kctx[js] = kctx;
+			kbase_ctx_flag_clear(kctx,
+					KCTX_PULLED_SINCE_ACTIVE_JS0 << js);
+		}
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return true; /* Context already scheduled */
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	return kbasep_js_schedule_ctx(kbdev, kctx, js);
+}
+
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	struct kbasep_js_device_data *js_devdata;
+	bool is_scheduled;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* This must never be attempted whilst suspending - i.e. it should only
+	 * happen in response to a syscall from a user-space thread */
+	BUG_ON(kbase_pm_is_suspending(kbdev));
+
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/* Mark the context as privileged */
+	kbase_ctx_flag_set(kctx, KCTX_PRIVILEGED);
+
+	is_scheduled = kbase_ctx_flag(kctx, KCTX_SCHEDULED);
+	if (!is_scheduled) {
+		/* Add the context to the pullable list */
+		if (kbase_js_ctx_list_add_pullable_head(kbdev, kctx, 0))
+			kbase_js_sync_timers(kbdev);
+
+		/* Fast-starting requires the jsctx_mutex to be dropped,
+		 * because it works on multiple ctxs */
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+
+		/* Try to schedule the context in */
+		kbase_js_sched_all(kbdev);
+
+		/* Wait for the context to be scheduled in */
+		wait_event(kctx->jctx.sched_info.ctx.is_scheduled_wait,
+			   kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+	} else {
+		/* Already scheduled in - We need to retain it to keep the
+		 * corresponding address space */
+		kbasep_js_runpool_retain_ctx(kbdev, kctx);
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+	}
+}
+KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx);
+
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev,
+		struct kbase_context *kctx)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* We don't need to use the address space anymore */
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	kbase_ctx_flag_clear(kctx, KCTX_PRIVILEGED);
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+	/* Release the context - it will be scheduled out */
+	kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+	kbase_js_sched_all(kbdev);
+}
+KBASE_EXPORT_TEST_API(kbasep_js_release_privileged_ctx);
+
+void kbasep_js_suspend(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	struct kbasep_js_device_data *js_devdata;
+	int i;
+	u16 retained = 0u;
+	int nr_privileged_ctx = 0;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	KBASE_DEBUG_ASSERT(kbase_pm_is_suspending(kbdev));
+	js_devdata = &kbdev->js_data;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	/* Prevent all contexts from submitting */
+	js_devdata->runpool_irq.submit_allowed = 0;
+
+	/* Retain each of the contexts, so we can cause it to leave even if it
+	 * had no refcount to begin with */
+	for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
+		struct kbase_context *kctx = kbdev->as_to_kctx[i];
+
+		retained = retained << 1;
+
+		if (kctx && !(kbdev->as_free & (1u << i))) {
+			kbase_ctx_sched_retain_ctx_refcount(kctx);
+			retained |= 1u;
+			/* We can only cope with up to 1 privileged context -
+			 * the instrumented context. It'll be suspended by
+			 * disabling instrumentation */
+			if (kbase_ctx_flag(kctx, KCTX_PRIVILEGED)) {
+				++nr_privileged_ctx;
+				WARN_ON(nr_privileged_ctx != 1);
+			}
+		}
+	}
+	CSTD_UNUSED(nr_privileged_ctx);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* De-ref the previous retain to ensure each context gets pulled out
+	 * sometime later. */
+	for (i = 0;
+		 i < BASE_MAX_NR_AS;
+		 ++i, retained = retained >> 1) {
+		struct kbase_context *kctx = kbdev->as_to_kctx[i];
+
+		if (retained & 1u)
+			kbasep_js_runpool_release_ctx(kbdev, kctx);
+	}
+
+	/* Caller must wait for all Power Manager active references to be
+	 * dropped */
+}
+
+void kbasep_js_resume(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata;
+	int js, prio;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	js_devdata = &kbdev->js_data;
+	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+
+	mutex_lock(&js_devdata->queue_mutex);
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+		for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+			struct kbase_context *kctx, *n;
+			unsigned long flags;
+
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+			list_for_each_entry_safe(kctx, n,
+				 &kbdev->js_data.ctx_list_unpullable[js][prio],
+				 jctx.sched_info.ctx.ctx_list_entry[js]) {
+				struct kbasep_js_kctx_info *js_kctx_info;
+				bool timer_sync = false;
+
+				/* Drop lock so we can take kctx mutexes */
+				spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+						flags);
+
+				js_kctx_info = &kctx->jctx.sched_info;
+
+				mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+				mutex_lock(&js_devdata->runpool_mutex);
+				spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+				if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
+					kbase_js_ctx_pullable(kctx, js, false))
+					timer_sync =
+						kbase_js_ctx_list_add_pullable_nolock(
+								kbdev, kctx, js);
+				spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+				if (timer_sync)
+					kbase_backend_ctx_count_changed(kbdev);
+				mutex_unlock(&js_devdata->runpool_mutex);
+				mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+				/* Take lock before accessing list again */
+				spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+			}
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		}
+	}
+	mutex_unlock(&js_devdata->queue_mutex);
+
+	/* Restart atom processing */
+	kbase_js_sched_all(kbdev);
+
+	/* JS Resume complete */
+}
+
+bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom)
+{
+	if ((katom->core_req & BASE_JD_REQ_FS) &&
+	    (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE |
+								BASE_JD_REQ_T)))
+		return false;
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987) &&
+	    (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) &&
+	    (katom->core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_T)))
+		return false;
+
+	return true;
+}
+
+static int kbase_js_get_slot(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom)
+{
+	if (katom->core_req & BASE_JD_REQ_FS)
+		return 0;
+
+	if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+		if (katom->device_nr == 1 &&
+				kbdev->gpu_props.num_core_groups == 2)
+			return 2;
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
+			return 2;
+	}
+
+	return 1;
+}
+
+bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
+					struct kbase_jd_atom *katom)
+{
+	bool enqueue_required;
+
+	katom->slot_nr = kbase_js_get_slot(kctx->kbdev, katom);
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+	lockdep_assert_held(&kctx->jctx.lock);
+
+	/* If slot will transition from unpullable to pullable then add to
+	 * pullable list */
+	if (jsctx_rb_none_to_pull(kctx, katom->slot_nr)) {
+		enqueue_required = true;
+	} else {
+		enqueue_required = false;
+	}
+	if ((katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) ||
+			(katom->pre_dep && (katom->pre_dep->atom_flags &
+			KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
+		int prio = katom->sched_priority;
+		int js = katom->slot_nr;
+		struct jsctx_queue *queue = &kctx->jsctx_queue[prio][js];
+
+		list_add_tail(&katom->queue, &queue->x_dep_head);
+		katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
+		enqueue_required = false;
+	} else {
+		/* Check if there are lower priority jobs to soft stop */
+		kbase_job_slot_ctx_priority_check_locked(kctx, katom);
+
+		/* Add atom to ring buffer. */
+		jsctx_tree_add(kctx, katom);
+		katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
+	}
+
+	return enqueue_required;
+}
+
+/**
+ * kbase_js_move_to_tree - Move atom (and any dependent atoms) to the
+ *                         runnable_tree, ready for execution
+ * @katom: Atom to submit
+ *
+ * It is assumed that @katom does not have KBASE_KATOM_FLAG_X_DEP_BLOCKED set,
+ * but is still present in the x_dep list. If @katom has a same-slot dependent
+ * atom then that atom (and any dependents) will also be moved.
+ */
+static void kbase_js_move_to_tree(struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&katom->kctx->kbdev->hwaccess_lock);
+
+	while (katom) {
+		WARN_ON(!(katom->atom_flags &
+				KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST));
+
+		if (!(katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+			list_del(&katom->queue);
+			katom->atom_flags &=
+					~KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST;
+			jsctx_tree_add(katom->kctx, katom);
+			katom->atom_flags |= KBASE_KATOM_FLAG_JSCTX_IN_TREE;
+		} else {
+			break;
+		}
+
+		katom = katom->post_dep;
+	}
+}
+
+
+/**
+ * kbase_js_evict_deps - Evict dependencies of a failed atom.
+ * @kctx:       Context pointer
+ * @katom:      Pointer to the atom that has failed.
+ * @js:         The job slot the katom was run on.
+ * @prio:       Priority of the katom.
+ *
+ * Remove all post dependencies of an atom from the context ringbuffers.
+ *
+ * The original atom's event_code will be propogated to all dependent atoms.
+ *
+ * Context: Caller must hold the HW access lock
+ */
+static void kbase_js_evict_deps(struct kbase_context *kctx,
+				struct kbase_jd_atom *katom, int js, int prio)
+{
+	struct kbase_jd_atom *x_dep = katom->x_post_dep;
+	struct kbase_jd_atom *next_katom = katom->post_dep;
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	if (next_katom) {
+		KBASE_DEBUG_ASSERT(next_katom->status !=
+				KBASE_JD_ATOM_STATE_HW_COMPLETED);
+		next_katom->will_fail_event_code = katom->event_code;
+
+	}
+
+	/* Has cross slot depenency. */
+	if (x_dep && (x_dep->atom_flags & (KBASE_KATOM_FLAG_JSCTX_IN_TREE |
+				KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST))) {
+		/* Remove dependency.*/
+		x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+
+		/* Fail if it had a data dependency. */
+		if (x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) {
+			x_dep->will_fail_event_code = katom->event_code;
+		}
+		if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_X_DEP_LIST)
+			kbase_js_move_to_tree(x_dep);
+	}
+}
+
+struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js)
+{
+	struct kbase_jd_atom *katom;
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_device *kbdev;
+	int pulled;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	kbdev = kctx->kbdev;
+
+	js_devdata = &kbdev->js_data;
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+		return NULL;
+	if (kbase_pm_is_suspending(kbdev))
+		return NULL;
+
+	katom = jsctx_rb_peek(kctx, js);
+	if (!katom)
+		return NULL;
+	if (kctx->blocked_js[js][katom->sched_priority])
+		return NULL;
+	if (atomic_read(&katom->blocked))
+		return NULL;
+
+	/* Due to ordering restrictions when unpulling atoms on failure, we do
+	 * not allow multiple runs of fail-dep atoms from the same context to be
+	 * present on the same slot */
+	if (katom->pre_dep && atomic_read(&kctx->atoms_pulled_slot[js])) {
+		struct kbase_jd_atom *prev_atom =
+				kbase_backend_inspect_tail(kbdev, js);
+
+		if (prev_atom && prev_atom->kctx != kctx)
+			return NULL;
+	}
+
+	if (katom->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED) {
+		if (katom->x_pre_dep->gpu_rb_state ==
+					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB ||
+					katom->x_pre_dep->will_fail_event_code)
+			return NULL;
+		if ((katom->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER) &&
+				kbase_backend_nr_atoms_on_slot(kbdev, js))
+			return NULL;
+	}
+
+	kbase_ctx_flag_set(kctx, KCTX_PULLED);
+	kbase_ctx_flag_set(kctx, (KCTX_PULLED_SINCE_ACTIVE_JS0 << js));
+
+	pulled = atomic_inc_return(&kctx->atoms_pulled);
+	if (pulled == 1 && !kctx->slots_pullable) {
+		WARN_ON(kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+		kbase_ctx_flag_set(kctx, KCTX_RUNNABLE_REF);
+		atomic_inc(&kbdev->js_data.nr_contexts_runnable);
+	}
+	atomic_inc(&kctx->atoms_pulled_slot[katom->slot_nr]);
+	kctx->atoms_pulled_slot_pri[katom->slot_nr][katom->sched_priority]++;
+	jsctx_rb_pull(kctx, katom);
+
+	kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+
+	katom->atom_flags |= KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+
+	katom->ticks = 0;
+
+	return katom;
+}
+
+
+static void js_return_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
+									work);
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
+	struct kbasep_js_atom_retained_state retained_state;
+	int js = katom->slot_nr;
+	int prio = katom->sched_priority;
+	bool timer_sync = false;
+	bool context_idle = false;
+	unsigned long flags;
+	base_jd_core_req core_req = katom->core_req;
+
+	KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(katom);
+
+	kbase_backend_complete_wq(kbdev, katom);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
+		kbase_as_poking_timer_release_atom(kbdev, kctx, katom);
+
+	kbasep_js_atom_retained_state_copy(&retained_state, katom);
+
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+
+	atomic_dec(&kctx->atoms_pulled);
+	atomic_dec(&kctx->atoms_pulled_slot[js]);
+
+	atomic_dec(&katom->blocked);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	kctx->atoms_pulled_slot_pri[js][katom->sched_priority]--;
+
+	if (!atomic_read(&kctx->atoms_pulled_slot[js]) &&
+			jsctx_rb_none_to_pull(kctx, js))
+		timer_sync |= kbase_js_ctx_list_remove_nolock(kbdev, kctx, js);
+
+	/* If this slot has been blocked due to soft-stopped atoms, and all
+	 * atoms have now been processed, then unblock the slot */
+	if (!kctx->atoms_pulled_slot_pri[js][prio] &&
+			kctx->blocked_js[js][prio]) {
+		kctx->blocked_js[js][prio] = false;
+
+		/* Only mark the slot as pullable if the context is not idle -
+		 * that case is handled below */
+		if (atomic_read(&kctx->atoms_pulled) &&
+				kbase_js_ctx_pullable(kctx, js, true))
+			timer_sync |= kbase_js_ctx_list_add_pullable_nolock(
+					kbdev, kctx, js);
+	}
+
+	if (!atomic_read(&kctx->atoms_pulled)) {
+		if (!kctx->slots_pullable) {
+			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+			timer_sync = true;
+		}
+
+		if (kctx->as_nr != KBASEP_AS_NR_INVALID &&
+				!kbase_ctx_flag(kctx, KCTX_DYING)) {
+			int num_slots = kbdev->gpu_props.num_job_slots;
+			int slot;
+
+			if (!kbasep_js_is_submit_allowed(js_devdata, kctx))
+				kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+			for (slot = 0; slot < num_slots; slot++) {
+				if (kbase_js_ctx_pullable(kctx, slot, true))
+					timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kbdev, kctx, slot);
+			}
+		}
+
+		kbase_jm_idle_ctx(kbdev, kctx);
+
+		context_idle = true;
+	}
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (context_idle) {
+		WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+		kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+		kbase_pm_context_idle(kbdev);
+	}
+
+	if (timer_sync)
+		kbase_js_sync_timers(kbdev);
+
+	mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+	mutex_unlock(&js_devdata->queue_mutex);
+
+	katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF;
+	kbasep_js_runpool_release_ctx_and_katom_retained_state(kbdev, kctx,
+							&retained_state);
+
+	kbase_js_sched_all(kbdev);
+
+	kbase_backend_complete_wq_post_sched(kbdev, core_req);
+}
+
+void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	jsctx_rb_unpull(kctx, katom);
+
+	WARN_ON(work_pending(&katom->work));
+
+	/* Block re-submission until workqueue has run */
+	atomic_inc(&katom->blocked);
+
+	kbase_job_check_leave_disjoint(kctx->kbdev, katom);
+
+	INIT_WORK(&katom->work, js_return_worker);
+	queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
+						struct kbase_jd_atom *katom)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_device *kbdev;
+	unsigned long flags;
+	bool timer_sync = false;
+	int atom_slot;
+	bool context_idle = false;
+	int prio = katom->sched_priority;
+
+	kbdev = kctx->kbdev;
+	atom_slot = katom->slot_nr;
+
+	js_kctx_info = &kctx->jctx.sched_info;
+	js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	if (katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE) {
+		context_idle = !atomic_dec_return(&kctx->atoms_pulled);
+		atomic_dec(&kctx->atoms_pulled_slot[atom_slot]);
+		kctx->atoms_pulled_slot_pri[atom_slot][prio]--;
+
+		if (!atomic_read(&kctx->atoms_pulled) &&
+				!kctx->slots_pullable) {
+			WARN_ON(!kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF));
+			kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF);
+			atomic_dec(&kbdev->js_data.nr_contexts_runnable);
+			timer_sync = true;
+		}
+
+		/* If this slot has been blocked due to soft-stopped atoms, and
+		 * all atoms have now been processed, then unblock the slot */
+		if (!kctx->atoms_pulled_slot_pri[atom_slot][prio]
+				&& kctx->blocked_js[atom_slot][prio]) {
+			kctx->blocked_js[atom_slot][prio] = false;
+			if (kbase_js_ctx_pullable(kctx, atom_slot, true))
+				timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+						kbdev, kctx, atom_slot);
+		}
+	}
+	WARN_ON(!(katom->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE));
+
+	if (!atomic_read(&kctx->atoms_pulled_slot[atom_slot]) &&
+			jsctx_rb_none_to_pull(kctx, atom_slot)) {
+		if (!list_empty(
+			&kctx->jctx.sched_info.ctx.ctx_list_entry[atom_slot]))
+			timer_sync |= kbase_js_ctx_list_remove_nolock(
+					kctx->kbdev, kctx, atom_slot);
+	}
+
+	/*
+	 * If submission is disabled on this context (most likely due to an
+	 * atom failure) and there are now no atoms left in the system then
+	 * re-enable submission so that context can be scheduled again.
+	 */
+	if (!kbasep_js_is_submit_allowed(js_devdata, kctx) &&
+					!atomic_read(&kctx->atoms_pulled) &&
+					!kbase_ctx_flag(kctx, KCTX_DYING)) {
+		int js;
+
+		kbasep_js_set_submit_allowed(js_devdata, kctx);
+
+		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+			if (kbase_js_ctx_pullable(kctx, js, true))
+				timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kbdev, kctx, js);
+		}
+	} else if (katom->x_post_dep &&
+			kbasep_js_is_submit_allowed(js_devdata, kctx)) {
+		int js;
+
+		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+			if (kbase_js_ctx_pullable(kctx, js, true))
+				timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kbdev, kctx, js);
+		}
+	}
+
+	/* Mark context as inactive. The pm reference will be dropped later in
+	 * jd_done_worker().
+	 */
+	if (context_idle)
+		kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	if (timer_sync)
+		kbase_backend_ctx_count_changed(kbdev);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	return context_idle;
+}
+
+struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
+		ktime_t *end_timestamp)
+{
+	struct kbase_device *kbdev;
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_jd_atom *x_dep = katom->x_post_dep;
+
+	kbdev = kctx->kbdev;
+
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	if (katom->will_fail_event_code)
+		katom->event_code = katom->will_fail_event_code;
+
+	katom->status = KBASE_JD_ATOM_STATE_HW_COMPLETED;
+
+	if (katom->event_code != BASE_JD_EVENT_DONE) {
+		kbase_js_evict_deps(kctx, katom, katom->slot_nr,
+				katom->sched_priority);
+	}
+
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+	kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_STOP,
+				katom->slot_nr), NULL, 0);
+#endif
+
+	kbase_jd_done(katom, katom->slot_nr, end_timestamp, 0);
+
+	/* Unblock cross dependency if present */
+	if (x_dep && (katom->event_code == BASE_JD_EVENT_DONE ||
+			!(x_dep->atom_flags & KBASE_KATOM_FLAG_FAIL_BLOCKER)) &&
+			(x_dep->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)) {
+		bool was_pullable = kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+				false);
+		x_dep->atom_flags &= ~KBASE_KATOM_FLAG_X_DEP_BLOCKED;
+		kbase_js_move_to_tree(x_dep);
+		if (!was_pullable && kbase_js_ctx_pullable(kctx, x_dep->slot_nr,
+				false))
+			kbase_js_ctx_list_add_pullable_nolock(kbdev, kctx,
+					x_dep->slot_nr);
+
+		if (x_dep->atom_flags & KBASE_KATOM_FLAG_JSCTX_IN_TREE)
+			return x_dep;
+	}
+
+	return NULL;
+}
+
+void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbase_context *last_active[BASE_JM_MAX_NR_SLOTS];
+	bool timer_sync = false;
+	bool ctx_waiting[BASE_JM_MAX_NR_SLOTS];
+	int js;
+
+	js_devdata = &kbdev->js_data;
+
+	down(&js_devdata->schedule_sem);
+	mutex_lock(&js_devdata->queue_mutex);
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		last_active[js] = kbdev->hwaccess.active_kctx[js];
+		ctx_waiting[js] = false;
+	}
+
+	while (js_mask) {
+		js = ffs(js_mask) - 1;
+
+		while (1) {
+			struct kbase_context *kctx;
+			unsigned long flags;
+			bool context_idle = false;
+
+			kctx = kbase_js_ctx_list_pop_head(kbdev, js);
+
+			if (!kctx) {
+				js_mask &= ~(1 << js);
+				break; /* No contexts on pullable list */
+			}
+
+			if (!kbase_ctx_flag(kctx, KCTX_ACTIVE)) {
+				context_idle = true;
+
+				if (kbase_pm_context_active_handle_suspend(
+									kbdev,
+				      KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
+					/* Suspend pending - return context to
+					 * queue and stop scheduling */
+					mutex_lock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+					if (kbase_js_ctx_list_add_pullable_head(
+						kctx->kbdev, kctx, js))
+						kbase_js_sync_timers(kbdev);
+					mutex_unlock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+					mutex_unlock(&js_devdata->queue_mutex);
+					up(&js_devdata->schedule_sem);
+					return;
+				}
+				kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
+			}
+
+			if (!kbase_js_use_ctx(kbdev, kctx, js)) {
+				mutex_lock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+				/* Context can not be used at this time */
+				spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+				if (kbase_js_ctx_pullable(kctx, js, false)
+				    || kbase_ctx_flag(kctx, KCTX_PRIVILEGED))
+					timer_sync |=
+					kbase_js_ctx_list_add_pullable_head_nolock(
+							kctx->kbdev, kctx, js);
+				else
+					timer_sync |=
+					kbase_js_ctx_list_add_unpullable_nolock(
+							kctx->kbdev, kctx, js);
+				spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+						flags);
+				mutex_unlock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+				if (context_idle) {
+					WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+					kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+					kbase_pm_context_idle(kbdev);
+				}
+
+				/* No more jobs can be submitted on this slot */
+				js_mask &= ~(1 << js);
+				break;
+			}
+			mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+			kbase_ctx_flag_clear(kctx, KCTX_PULLED);
+
+			if (!kbase_jm_kick(kbdev, 1 << js))
+				/* No more jobs can be submitted on this slot */
+				js_mask &= ~(1 << js);
+
+			if (!kbase_ctx_flag(kctx, KCTX_PULLED)) {
+				bool pullable = kbase_js_ctx_pullable(kctx, js,
+						true);
+
+				/* Failed to pull jobs - push to head of list.
+				 * Unless this context is already 'active', in
+				 * which case it's effectively already scheduled
+				 * so push it to the back of the list. */
+				if (pullable && kctx == last_active[js] &&
+						kbase_ctx_flag(kctx,
+						(KCTX_PULLED_SINCE_ACTIVE_JS0 <<
+						js)))
+					timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kctx->kbdev,
+							kctx, js);
+				else if (pullable)
+					timer_sync |=
+					kbase_js_ctx_list_add_pullable_head_nolock(
+							kctx->kbdev,
+							kctx, js);
+				else
+					timer_sync |=
+					kbase_js_ctx_list_add_unpullable_nolock(
+								kctx->kbdev,
+								kctx, js);
+
+				/* If this context is not the active context,
+				 * but the active context is pullable on this
+				 * slot, then we need to remove the active
+				 * marker to prevent it from submitting atoms in
+				 * the IRQ handler, which would prevent this
+				 * context from making progress. */
+				if (last_active[js] && kctx != last_active[js]
+						&& kbase_js_ctx_pullable(
+						last_active[js], js, true))
+					ctx_waiting[js] = true;
+
+				if (context_idle) {
+					kbase_jm_idle_ctx(kbdev, kctx);
+					spin_unlock_irqrestore(
+							&kbdev->hwaccess_lock,
+							flags);
+					WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE));
+					kbase_ctx_flag_clear(kctx, KCTX_ACTIVE);
+					kbase_pm_context_idle(kbdev);
+				} else {
+					spin_unlock_irqrestore(
+							&kbdev->hwaccess_lock,
+							flags);
+				}
+				mutex_unlock(
+					&kctx->jctx.sched_info.ctx.jsctx_mutex);
+
+				js_mask &= ~(1 << js);
+				break; /* Could not run atoms on this slot */
+			}
+
+			/* Push to back of list */
+			if (kbase_js_ctx_pullable(kctx, js, true))
+				timer_sync |=
+					kbase_js_ctx_list_add_pullable_nolock(
+							kctx->kbdev, kctx, js);
+			else
+				timer_sync |=
+					kbase_js_ctx_list_add_unpullable_nolock(
+							kctx->kbdev, kctx, js);
+
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+			mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+		}
+	}
+
+	if (timer_sync)
+		kbase_js_sync_timers(kbdev);
+
+	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+		if (kbdev->hwaccess.active_kctx[js] == last_active[js] &&
+				ctx_waiting[js])
+			kbdev->hwaccess.active_kctx[js] = NULL;
+	}
+
+	mutex_unlock(&js_devdata->queue_mutex);
+	up(&js_devdata->schedule_sem);
+}
+
+void kbase_js_zap_context(struct kbase_context *kctx)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
+	int js;
+
+	/*
+	 * Critical assumption: No more submission is possible outside of the
+	 * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs)
+	 * whilst the struct kbase_context is terminating.
+	 */
+
+	/* First, atomically do the following:
+	 * - mark the context as dying
+	 * - try to evict it from the queue */
+	mutex_lock(&kctx->jctx.lock);
+	mutex_lock(&js_devdata->queue_mutex);
+	mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+	kbase_ctx_flag_set(kctx, KCTX_DYING);
+
+	dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
+
+	/*
+	 * At this point we know:
+	 * - If eviction succeeded, it was in the queue, but now no
+	 *   longer is
+	 *  - We must cancel the jobs here. No Power Manager active reference to
+	 *    release.
+	 *  - This happens asynchronously - kbase_jd_zap_context() will wait for
+	 *    those jobs to be killed.
+	 * - If eviction failed, then it wasn't in the queue. It is one
+	 *   of the following:
+	 *  - a. it didn't have any jobs, and so is not in the Queue or
+	 *       the Run Pool (not scheduled)
+	 *   - Hence, no more work required to cancel jobs. No Power Manager
+	 *     active reference to release.
+	 *  - b. it was in the middle of a scheduling transaction (and thus must
+	 *       have at least 1 job). This can happen from a syscall or a
+	 *       kernel thread. We still hold the jsctx_mutex, and so the thread
+	 *       must be waiting inside kbasep_js_try_schedule_head_ctx(),
+	 *       before checking whether the runpool is full. That thread will
+	 *       continue after we drop the mutex, and will notice the context
+	 *       is dying. It will rollback the transaction, killing all jobs at
+	 *       the same time. kbase_jd_zap_context() will wait for those jobs
+	 *       to be killed.
+	 *   - Hence, no more work required to cancel jobs, or to release the
+	 *     Power Manager active reference.
+	 *  - c. it is scheduled, and may or may not be running jobs
+	 * - We must cause it to leave the runpool by stopping it from
+	 * submitting any more jobs. When it finally does leave,
+	 * kbasep_js_runpool_requeue_or_kill_ctx() will kill all remaining jobs
+	 * (because it is dying), release the Power Manager active reference,
+	 * and will not requeue the context in the queue.
+	 * kbase_jd_zap_context() will wait for those jobs to be killed.
+	 *  - Hence, work required just to make it leave the runpool. Cancelling
+	 *    jobs and releasing the Power manager active reference will be
+	 *    handled when it leaves the runpool.
+	 */
+	if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+			if (!list_empty(
+				&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
+				list_del_init(
+				&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+		}
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		/* The following events require us to kill off remaining jobs
+		 * and update PM book-keeping:
+		 * - we evicted it correctly (it must have jobs to be in the
+		 *   Queue)
+		 *
+		 * These events need no action, but take this path anyway:
+		 * - Case a: it didn't have any jobs, and was never in the Queue
+		 * - Case b: scheduling transaction will be partially rolled-
+		 *           back (this already cancels the jobs)
+		 */
+
+		KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u,
+						kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+		dev_dbg(kbdev->dev, "Zap: Ctx %p scheduled=0", kctx);
+
+		/* Only cancel jobs when we evicted from the
+		 * queue. No Power Manager active reference was held.
+		 *
+		 * Having is_dying set ensures that this kills, and
+		 * doesn't requeue */
+		kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false);
+
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+		mutex_unlock(&kctx->jctx.lock);
+	} else {
+		unsigned long flags;
+		bool was_retained;
+
+		/* Case c: didn't evict, but it is scheduled - it's in the Run
+		 * Pool */
+		KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u,
+						kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+		dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
+
+		/* Disable the ctx from submitting any more jobs */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+		kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+		/* Retain and (later) release the context whilst it is is now
+		 * disallowed from submitting jobs - ensures that someone
+		 * somewhere will be removing the context later on */
+		was_retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);
+
+		/* Since it's scheduled and we have the jsctx_mutex, it must be
+		 * retained successfully */
+		KBASE_DEBUG_ASSERT(was_retained);
+
+		dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
+
+		/* Cancel any remaining running jobs for this kctx - if any.
+		 * Submit is disallowed which takes effect immediately, so no
+		 * more new jobs will appear after we do this. */
+		for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+			kbase_job_slot_hardstop(kctx, js, NULL);
+
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+		mutex_unlock(&js_devdata->queue_mutex);
+		mutex_unlock(&kctx->jctx.lock);
+
+		dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)",
+									kctx);
+
+		kbasep_js_runpool_release_ctx(kbdev, kctx);
+	}
+
+	KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
+
+	/* After this, you must wait on both the
+	 * kbase_jd_context::zero_jobs_wait and the
+	 * kbasep_js_kctx_info::ctx::is_scheduled_waitq - to wait for the jobs
+	 * to be destroyed, and the context to be de-scheduled (if it was on the
+	 * runpool).
+	 *
+	 * kbase_jd_zap_context() will do this. */
+}
+
+static inline int trace_get_refcnt(struct kbase_device *kbdev,
+					struct kbase_context *kctx)
+{
+	return atomic_read(&kctx->refcount);
+}
+
+/**
+ * kbase_js_foreach_ctx_job(): - Call a function on all jobs in context
+ * @kctx:     Pointer to context.
+ * @callback: Pointer to function to call for each job.
+ *
+ * Call a function on all jobs belonging to a non-queued, non-running
+ * context, and detach the jobs from the context as it goes.
+ *
+ * Due to the locks that might be held at the time of the call, the callback
+ * may need to defer work on a workqueue to complete its actions (e.g. when
+ * cancelling jobs)
+ *
+ * Atoms will be removed from the queue, so this must only be called when
+ * cancelling jobs (which occurs as part of context destruction).
+ *
+ * The locking conditions on the caller are as follows:
+ * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
+ */
+static void kbase_js_foreach_ctx_job(struct kbase_context *kctx,
+		kbasep_js_ctx_job_cb callback)
+{
+	struct kbase_device *kbdev;
+	unsigned long flags;
+	u32 js;
+
+	kbdev = kctx->kbdev;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL,
+					0u, trace_get_refcnt(kbdev, kctx));
+
+	/* Invoke callback on jobs on each slot in turn */
+	for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
+		jsctx_queue_foreach(kctx, js, callback);
+
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js.h b/drivers/gpu/arm/midgard/mali_kbase_js.h
new file mode 100644
index 0000000..355da27
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js.h
@@ -0,0 +1,912 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler APIs.
+ */
+
+#ifndef _KBASE_JS_H_
+#define _KBASE_JS_H_
+
+#include "mali_kbase_js_defs.h"
+#include "mali_kbase_context.h"
+#include "mali_kbase_defs.h"
+#include "mali_kbase_debug.h"
+
+#include "mali_kbase_js_ctx_attr.h"
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js Job Scheduler Internal APIs
+ * @{
+ *
+ * These APIs are Internal to KBase.
+ */
+
+/**
+ * @brief Initialize the Job Scheduler
+ *
+ * The struct kbasep_js_device_data sub-structure of \a kbdev must be zero
+ * initialized before passing to the kbasep_js_devdata_init() function. This is
+ * to give efficient error path code.
+ */
+int kbasep_js_devdata_init(struct kbase_device * const kbdev);
+
+/**
+ * @brief Halt the Job Scheduler.
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ *
+ */
+void kbasep_js_devdata_halt(struct kbase_device *kbdev);
+
+/**
+ * @brief Terminate the Job Scheduler
+ *
+ * It is safe to call this on \a kbdev even if it the kbasep_js_device_data
+ * sub-structure was never initialized/failed initialization, to give efficient
+ * error-path code.
+ *
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
+ * be zero initialized before passing to the kbasep_js_devdata_init()
+ * function. This is to give efficient error path code.
+ *
+ * It is a Programming Error to call this whilst there are still kbase_context
+ * structures registered with this scheduler.
+ */
+void kbasep_js_devdata_term(struct kbase_device *kbdev);
+
+/**
+ * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler.
+ *
+ * This effectively registers a struct kbase_context with a Job Scheduler.
+ *
+ * It does not register any jobs owned by the struct kbase_context with the scheduler.
+ * Those must be separately registered by kbasep_js_add_job().
+ *
+ * The struct kbase_context must be zero intitialized before passing to the
+ * kbase_js_init() function. This is to give efficient error path code.
+ */
+int kbasep_js_kctx_init(struct kbase_context * const kctx);
+
+/**
+ * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler
+ *
+ * This effectively de-registers a struct kbase_context from its Job Scheduler
+ *
+ * It is safe to call this on a struct kbase_context that has never had or failed
+ * initialization of its jctx.sched_info member, to give efficient error-path
+ * code.
+ *
+ * For this to work, the struct kbase_context must be zero intitialized before passing
+ * to the kbase_js_init() function.
+ *
+ * It is a Programming Error to call this whilst there are still jobs
+ * registered with this context.
+ */
+void kbasep_js_kctx_term(struct kbase_context *kctx);
+
+/**
+ * @brief Add a job chain to the Job Scheduler, and take necessary actions to
+ * schedule the context/run the job.
+ *
+ * This atomically does the following:
+ * - Update the numbers of jobs information
+ * - Add the job to the run pool if necessary (part of init_job)
+ *
+ * Once this is done, then an appropriate action is taken:
+ * - If the ctx is scheduled, it attempts to start the next job (which might be
+ * this added job)
+ * - Otherwise, and if this is the first job on the context, it enqueues it on
+ * the Policy Queue
+ *
+ * The Policy's Queue can be updated by this in the following ways:
+ * - In the above case that this is the first job on the context
+ * - If the context is high priority and the context is not scheduled, then it
+ * could cause the Policy to schedule out a low-priority context, allowing
+ * this context to be scheduled in.
+ *
+ * If the context is already scheduled on the RunPool, then adding a job to it
+ * is guarenteed not to update the Policy Queue. And so, the caller is
+ * guarenteed to not need to try scheduling a context from the Run Pool - it
+ * can safely assert that the result is false.
+ *
+ * It is a programming error to have more than U32_MAX jobs in flight at a time.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold hwaccess_lock (as this will be obtained internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ *
+ * @return true indicates that the Policy Queue was updated, and so the
+ * caller will need to try scheduling a context onto the Run Pool.
+ * @return false indicates that no updates were made to the Policy Queue,
+ * so no further action is required from the caller. This is \b always returned
+ * when the context is currently scheduled.
+ */
+bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom);
+
+/**
+ * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'.
+ *
+ * Completely removing a job requires several calls:
+ * - kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of
+ *   the atom
+ * - kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler
+ * - kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the
+ *   remaining state held as part of the job having been run.
+ *
+ * In the common case of atoms completing normally, this set of actions is more optimal for spinlock purposes than having kbasep_js_remove_job() handle all of the actions.
+ *
+ * In the case of cancelling atoms, it is easier to call kbasep_js_remove_cancelled_job(), which handles all the necessary actions.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool
+ *
+ * Do not use this for removing jobs being killed by kbase_jd_cancel() - use
+ * kbasep_js_remove_cancelled_job() instead.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ *
+ */
+void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom);
+
+/**
+ * @brief Completely remove a job chain from the Job Scheduler, in the case
+ * where the job chain was cancelled.
+ *
+ * This is a variant of kbasep_js_remove_job() that takes care of removing all
+ * of the retained state too. This is generally useful for cancelled atoms,
+ * which need not be handled in an optimal way.
+ *
+ * It is a programming error to call this when:
+ * - \a atom is not a job belonging to kctx.
+ * - \a atom has already been removed from the Job Scheduler.
+ * - \a atom is still in the runpool:
+ *  - it is not being killed with kbasep_jd_cancel()
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold the hwaccess_lock, (as this will be obtained
+ *   internally)
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be
+ * obtained internally)
+ *
+ * @return true indicates that ctx attributes have changed and the caller
+ * should call kbase_js_sched_all() to try to run more jobs
+ * @return false otherwise
+ */
+bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
+						struct kbase_context *kctx,
+						struct kbase_jd_atom *katom);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold mmu_hw_mutex and hwaccess_lock, because they will be
+ *   used internally.
+ *
+ * @return value != false if the retain succeeded, and the context will not be scheduled out.
+ * @return false if the retain failed (because the context is being/has been scheduled out).
+ */
+bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Refcount a context as being busy, preventing it from being scheduled
+ * out.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locks must be held by the caller:
+ * - mmu_hw_mutex, hwaccess_lock
+ *
+ * @return value != false if the retain succeeded, and the context will not be scheduled out.
+ * @return false if the retain failed (because the context is being/has been scheduled out).
+ */
+bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Lookup a context in the Run Pool based upon its current address space
+ * and ensure that is stays scheduled in.
+ *
+ * The context is refcounted as being busy to prevent it from scheduling
+ * out. It must be released with kbasep_js_runpool_release_ctx() when it is no
+ * longer required to stay scheduled in.
+ *
+ * @note This function can safely be called from IRQ context.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ *   If the hwaccess_lock is already held, then the caller should use
+ *   kbasep_js_runpool_lookup_ctx_nolock() instead.
+ *
+ * @return a valid struct kbase_context on success, which has been refcounted as being busy.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr);
+
+/**
+ * @brief Handling the requeuing/killing of a context that was evicted from the
+ * policy queue or runpool.
+ *
+ * This should be used whenever handing off a context that has been evicted
+ * from the policy queue or the runpool:
+ * - If the context is not dying and has jobs, it gets re-added to the policy
+ * queue
+ * - Otherwise, it is not added
+ *
+ * In addition, if the context is dying the jobs are killed asynchronously.
+ *
+ * In all cases, the Power Manager active reference is released
+ * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true.  \a
+ * has_pm_ref must be set to false whenever the context was not previously in
+ * the runpool and does not hold a Power Manager active refcount. Note that
+ * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an
+ * active refcount even though they weren't in the runpool.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ */
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref);
+
+/**
+ * @brief Release a refcount of a context being busy, allowing it to be
+ * scheduled out.
+ *
+ * When the refcount reaches zero and the context \em might be scheduled out
+ * (depending on whether the Scheudling Policy has deemed it so, or if it has run
+ * out of jobs).
+ *
+ * If the context does get scheduled out, then The following actions will be
+ * taken as part of deschduling a context:
+ * - For the context being descheduled:
+ *  - If the context is in the processing of dying (all the jobs are being
+ * removed from it), then descheduling also kills off any jobs remaining in the
+ * context.
+ *  - If the context is not dying, and any jobs remain after descheduling the
+ * context then it is re-enqueued to the Policy's Queue.
+ *  - Otherwise, the context is still known to the scheduler, but remains absent
+ * from the Policy Queue until a job is next added to it.
+ *  - In all descheduling cases, the Power Manager active reference (obtained
+ * during kbasep_js_try_schedule_head_ctx()) is released (kbase_pm_context_idle()).
+ *
+ * Whilst the context is being descheduled, this also handles actions that
+ * cause more atoms to be run:
+ * - Attempt submitting atoms when the Context Attributes on the Runpool have
+ * changed. This is because the context being scheduled out could mean that
+ * there are more opportunities to run atoms.
+ * - Attempt submitting to a slot that was previously blocked due to affinity
+ * restrictions. This is usually only necessary when releasing a context
+ * happens as part of completing a previous job, but is harmless nonetheless.
+ * - Attempt scheduling in a new context (if one is available), and if necessary,
+ * running a job from that new context.
+ *
+ * Unlike retaining a context in the runpool, this function \b cannot be called
+ * from IRQ context.
+ *
+ * It is a programming error to call this on a \a kctx that is not currently
+ * scheduled, or that already has a zero refcount.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
+ * obtained internally)
+ *
+ */
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional
+ * actions from completing an atom.
+ *
+ * This is usually called as part of completing an atom and releasing the
+ * refcount on the context held by the atom.
+ *
+ * Therefore, the extra actions carried out are part of handling actions queued
+ * on a completed atom, namely:
+ * - Releasing the atom's context attributes
+ * - Retrying the submission on a particular slot, because we couldn't submit
+ * on that slot from an IRQ handler.
+ *
+ * The locking conditions of this function are the same as those for
+ * kbasep_js_runpool_release_ctx()
+ */
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * @brief Variant of kbase_js_runpool_release_ctx() that assumes that
+ * kbasep_js_device_data::runpool_mutex and
+ * kbasep_js_kctx_info::ctx::jsctx_mutex are held by the caller, and does not
+ * attempt to schedule new contexts.
+ */
+void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
+						struct kbase_context *kctx);
+
+/**
+ * @brief Schedule in a privileged context
+ *
+ * This schedules a context in regardless of the context priority.
+ * If the runpool is full, a context will be forced out of the runpool and the function will wait
+ * for the new context to be scheduled in.
+ * The context will be kept scheduled in (and the corresponding address space reserved) until
+ * kbasep_js_release_privileged_ctx is called).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used internally).
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
+ * be used internally.
+ *
+ */
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Release a privileged context, allowing it to be scheduled out.
+ *
+ * See kbasep_js_runpool_release_ctx for potential side effects.
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ * - it must \em not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this will be
+ * obtained internally)
+ * - it must \em not hold the kbase_device::mmu_hw_mutex (as this will be
+ * obtained internally)
+ *
+ */
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * @brief Try to submit the next job on each slot
+ *
+ * The following locks may be used:
+ * - kbasep_js_device_data::runpool_mutex
+ * - hwaccess_lock
+ */
+void kbase_js_try_run_jobs(struct kbase_device *kbdev);
+
+/**
+ * @brief Suspend the job scheduler during a Power Management Suspend event.
+ *
+ * Causes all contexts to be removed from the runpool, and prevents any
+ * contexts from (re)entering the runpool.
+ *
+ * This does not handle suspending the one privileged context: the caller must
+ * instead do this by by suspending the GPU HW Counter Instrumentation.
+ *
+ * This will eventually cause all Power Management active references held by
+ * contexts on the runpool to be released, without running any more atoms.
+ *
+ * The caller must then wait for all Power Mangement active refcount to become
+ * zero before completing the suspend.
+ *
+ * The emptying mechanism may take some time to complete, since it can wait for
+ * jobs to complete naturally instead of forcing them to end quickly. However,
+ * this is bounded by the Job Scheduler's Job Timeouts. Hence, this
+ * function is guaranteed to complete in a finite time.
+ */
+void kbasep_js_suspend(struct kbase_device *kbdev);
+
+/**
+ * @brief Resume the Job Scheduler after a Power Management Resume event.
+ *
+ * This restores the actions from kbasep_js_suspend():
+ * - Schedules contexts back into the runpool
+ * - Resumes running atoms on the GPU
+ */
+void kbasep_js_resume(struct kbase_device *kbdev);
+
+/**
+ * @brief Submit an atom to the job scheduler.
+ *
+ * The atom is enqueued on the context's ringbuffer. The caller must have
+ * ensured that all dependencies can be represented in the ringbuffer.
+ *
+ * Caller must hold jctx->lock
+ *
+ * @param[in] kctx  Context pointer
+ * @param[in] atom  Pointer to the atom to submit
+ *
+ * @return Whether the context requires to be enqueued. */
+bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
+					struct kbase_jd_atom *katom);
+
+/**
+  * jsctx_ll_flush_to_rb() - Pushes atoms from the linked list to ringbuffer.
+  * @kctx:  Context Pointer
+  * @prio:  Priority (specifies the queue together with js).
+  * @js:    Job slot (specifies the queue together with prio).
+  *
+  * Pushes all possible atoms from the linked list to the ringbuffer.
+  * Number of atoms are limited to free space in the ringbuffer and
+  * number of available atoms in the linked list.
+  *
+  */
+void jsctx_ll_flush_to_rb(struct kbase_context *kctx, int prio, int js);
+/**
+ * @brief Pull an atom from a context in the job scheduler for execution.
+ *
+ * The atom will not be removed from the ringbuffer at this stage.
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] kctx  Context to pull from
+ * @param[in] js    Job slot to pull from
+ * @return          Pointer to an atom, or NULL if there are no atoms for this
+ *                  slot that can be currently run.
+ */
+struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js);
+
+/**
+ * @brief Return an atom to the job scheduler ringbuffer.
+ *
+ * An atom is 'unpulled' if execution is stopped but intended to be returned to
+ * later. The most common reason for this is that the atom has been
+ * soft-stopped.
+ *
+ * Note that if multiple atoms are to be 'unpulled', they must be returned in
+ * the reverse order to which they were originally pulled. It is a programming
+ * error to return atoms in any other order.
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] kctx  Context pointer
+ * @param[in] atom  Pointer to the atom to unpull
+ */
+void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Complete an atom from jd_done_worker(), removing it from the job
+ * scheduler ringbuffer.
+ *
+ * If the atom failed then all dependee atoms marked for failure propagation
+ * will also fail.
+ *
+ * @param[in] kctx  Context pointer
+ * @param[in] katom Pointer to the atom to complete
+ * @return true if the context is now idle (no jobs pulled)
+ *         false otherwise
+ */
+bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
+				struct kbase_jd_atom *katom);
+
+/**
+ * @brief Complete an atom.
+ *
+ * Most of the work required to complete an atom will be performed by
+ * jd_done_worker().
+ *
+ * The HW access lock must be held when calling this function.
+ *
+ * @param[in] katom         Pointer to the atom to complete
+ * @param[in] end_timestamp The time that the atom completed (may be NULL)
+ *
+ * Return: Atom that has now been unblocked and can now be run, or NULL if none
+ */
+struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
+		ktime_t *end_timestamp);
+
+/**
+ * @brief Submit atoms from all available contexts.
+ *
+ * This will attempt to submit as many jobs as possible to the provided job
+ * slots. It will exit when either all job slots are full, or all contexts have
+ * been used.
+ *
+ * @param[in] kbdev    Device pointer
+ * @param[in] js_mask  Mask of job slots to submit to
+ */
+void kbase_js_sched(struct kbase_device *kbdev, int js_mask);
+
+/**
+ * kbase_jd_zap_context - Attempt to deschedule a context that is being
+ *                        destroyed
+ * @kctx: Context pointer
+ *
+ * This will attempt to remove a context from any internal job scheduler queues
+ * and perform any other actions to ensure a context will not be submitted
+ * from.
+ *
+ * If the context is currently scheduled, then the caller must wait for all
+ * pending jobs to complete before taking any further action.
+ */
+void kbase_js_zap_context(struct kbase_context *kctx);
+
+/**
+ * @brief Validate an atom
+ *
+ * This will determine whether the atom can be scheduled onto the GPU. Atoms
+ * with invalid combinations of core requirements will be rejected.
+ *
+ * @param[in] kbdev  Device pointer
+ * @param[in] katom  Atom to validate
+ * @return           true if atom is valid
+ *                   false otherwise
+ */
+bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
+				struct kbase_jd_atom *katom);
+
+/**
+ * kbase_js_set_timeouts - update all JS timeouts with user specified data
+ * @kbdev: Device pointer
+ *
+ * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
+ * set to a positive number then that becomes the new value used, if a timeout
+ * is negative then the default is set.
+ */
+void kbase_js_set_timeouts(struct kbase_device *kbdev);
+
+/**
+ * kbase_js_set_ctx_priority - set the context priority
+ * @kctx: Context pointer
+ * @new_priority: New priority value for the Context
+ *
+ * The context priority is set to a new value and it is moved to the
+ * pullable/unpullable list as per the new priority.
+ */
+void kbase_js_set_ctx_priority(struct kbase_context *kctx, int new_priority);
+
+
+/**
+ * kbase_js_update_ctx_priority - update the context priority
+ * @kctx: Context pointer
+ *
+ * The context priority gets updated as per the priority of atoms currently in
+ * use for that context, but only if system priority mode for context scheduling
+ * is being used.
+ */
+void kbase_js_update_ctx_priority(struct kbase_context *kctx);
+
+/*
+ * Helpers follow
+ */
+
+/**
+ * @brief Check that a context is allowed to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * As with any bool, never test the return value with true.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+	u16 test_bit;
+
+	/* Ensure context really is scheduled in */
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	test_bit = (u16) (1u << kctx->as_nr);
+
+	return (bool) (js_devdata->runpool_irq.submit_allowed & test_bit);
+}
+
+/**
+ * @brief Allow a context to submit jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+	u16 set_bit;
+
+	/* Ensure context really is scheduled in */
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	set_bit = (u16) (1u << kctx->as_nr);
+
+	dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)",
+			kctx, kctx->as_nr);
+
+	js_devdata->runpool_irq.submit_allowed |= set_bit;
+}
+
+/**
+ * @brief Prevent a context from submitting more jobs on this policy
+ *
+ * The purpose of this abstraction is to hide the underlying data size, and wrap up
+ * the long repeated line of code.
+ *
+ * The caller must hold hwaccess_lock.
+ */
+static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
+{
+	u16 clear_bit;
+	u16 clear_mask;
+
+	/* Ensure context really is scheduled in */
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	clear_bit = (u16) (1u << kctx->as_nr);
+	clear_mask = ~clear_bit;
+
+	dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)",
+			kctx, kctx->as_nr);
+
+	js_devdata->runpool_irq.submit_allowed &= clear_mask;
+}
+
+/**
+ * Create an initial 'invalid' atom retained state, that requires no
+ * atom-related work to be done on releasing with
+ * kbasep_js_runpool_release_ctx_and_katom_retained_state()
+ */
+static inline void kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state)
+{
+	retained_state->event_code = BASE_JD_EVENT_NOT_STARTED;
+	retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID;
+}
+
+/**
+ * Copy atom state that can be made available after jd_done_nolock() is called
+ * on that atom.
+ */
+static inline void kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom)
+{
+	retained_state->event_code = katom->event_code;
+	retained_state->core_req = katom->core_req;
+	retained_state->sched_priority = katom->sched_priority;
+	retained_state->device_nr = katom->device_nr;
+}
+
+/**
+ * @brief Determine whether an atom has finished (given its retained state),
+ * and so should be given back to userspace/removed from the system.
+ *
+ * Reasons for an atom not finishing include:
+ * - Being soft-stopped (and so, the atom should be resubmitted sometime later)
+ *
+ * @param[in] katom_retained_state the retained state of the atom to check
+ * @return    false if the atom has not finished
+ * @return    !=false if the atom has finished
+ */
+static inline bool kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	return (bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT);
+}
+
+/**
+ * @brief Determine whether a struct kbasep_js_atom_retained_state is valid
+ *
+ * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates that the
+ * code should just ignore it.
+ *
+ * @param[in] katom_retained_state the atom's retained state to check
+ * @return    false if the retained state is invalid, and can be ignored
+ * @return    !=false if the retained state is valid
+ */
+static inline bool kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	return (bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID);
+}
+
+/**
+ * @brief Variant of kbasep_js_runpool_lookup_ctx() that can be used when the
+ * context is guaranteed to be already previously retained.
+ *
+ * It is a programming error to supply the \a as_nr of a context that has not
+ * been previously retained/has a busy refcount of zero. The only exception is
+ * when there is no ctx in \a as_nr (NULL returned).
+ *
+ * The following locking conditions are made on the caller:
+ * - it must \em not hold the hwaccess_lock, because it will be used internally.
+ *
+ * @return a valid struct kbase_context on success, with a refcount that is guaranteed
+ * to be non-zero and unmodified by this function.
+ * @return NULL on failure, indicating that no context was found in \a as_nr
+ */
+static inline struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr)
+{
+	struct kbase_context *found_kctx;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
+
+	found_kctx = kbdev->as_to_kctx[as_nr];
+	KBASE_DEBUG_ASSERT(found_kctx == NULL ||
+			atomic_read(&found_kctx->refcount) > 0);
+
+	return found_kctx;
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+static inline void kbase_js_runpool_inc_context_count(
+						struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+
+	/* Track total contexts */
+	KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
+	++(js_devdata->nr_all_contexts_running);
+
+	if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+		/* Track contexts that can submit jobs */
+		KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running <
+									S8_MAX);
+		++(js_devdata->nr_user_contexts_running);
+	}
+}
+
+/*
+ * The following locking conditions are made on the caller:
+ * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
+ * - The caller must hold the kbasep_js_device_data::runpool_mutex
+ */
+static inline void kbase_js_runpool_dec_context_count(
+						struct kbase_device *kbdev,
+						struct kbase_context *kctx)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	lockdep_assert_held(&js_devdata->runpool_mutex);
+
+	/* Track total contexts */
+	--(js_devdata->nr_all_contexts_running);
+	KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
+
+	if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
+		/* Track contexts that can submit jobs */
+		--(js_devdata->nr_user_contexts_running);
+		KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);
+	}
+}
+
+
+/**
+ * @brief Submit atoms from all available contexts to all job slots.
+ *
+ * This will attempt to submit as many jobs as possible. It will exit when
+ * either all job slots are full, or all contexts have been used.
+ *
+ * @param[in] kbdev    Device pointer
+ */
+static inline void kbase_js_sched_all(struct kbase_device *kbdev)
+{
+	kbase_js_sched(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
+}
+
+extern const int
+kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS];
+
+extern const base_jd_prio
+kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+/**
+ * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio)
+ *                                        to relative ordering
+ * @atom_prio: Priority ID to translate.
+ *
+ * Atom priority values for @ref base_jd_prio cannot be compared directly to
+ * find out which are higher or lower.
+ *
+ * This function will convert base_jd_prio values for successively lower
+ * priorities into a monotonically increasing sequence. That is, the lower the
+ * base_jd_prio priority, the higher the value produced by this function. This
+ * is in accordance with how the rest of the kernel treates priority.
+ *
+ * The mapping is 1:1 and the size of the valid input range is the same as the
+ * size of the valid output range, i.e.
+ * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS
+ *
+ * Note This must be kept in sync with BASE_JD_PRIO_<...> definitions
+ *
+ * Return: On success: a value in the inclusive range
+ *         0..KBASE_JS_ATOM_SCHED_PRIO_COUNT-1. On failure:
+ *         KBASE_JS_ATOM_SCHED_PRIO_INVALID
+ */
+static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)
+{
+	if (atom_prio >= BASE_JD_NR_PRIO_LEVELS)
+		return KBASE_JS_ATOM_SCHED_PRIO_INVALID;
+
+	return kbasep_js_atom_priority_to_relative[atom_prio];
+}
+
+static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio)
+{
+	unsigned int prio_idx;
+
+	KBASE_DEBUG_ASSERT(0 <= sched_prio
+			&& sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT);
+
+	prio_idx = (unsigned int)sched_prio;
+
+	return kbasep_js_relative_priority_to_atom[prio_idx];
+}
+
+	  /** @} *//* end group kbase_js */
+	  /** @} *//* end group base_kbase_api */
+	  /** @} *//* end group base_api */
+
+#endif				/* _KBASE_JS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c
new file mode 100644
index 0000000..1ff230c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c
@@ -0,0 +1,283 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_config.h>
+
+/*
+ * Private functions follow
+ */
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, retain that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_runpool_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool runpool_state_changed = false;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
+		KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] < S8_MAX);
+		++(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+		if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 1) {
+			/* First refcount indicates a state change */
+			runpool_state_changed = true;
+			KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_RUNPOOL, kctx, NULL, 0u, attribute);
+		}
+	}
+
+	return runpool_state_changed;
+}
+
+/**
+ * @brief Check whether a ctx has a certain attribute, and if so, release that
+ * attribute on the runpool.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx is scheduled on the runpool
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_runpool_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_device_data *js_devdata;
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool runpool_state_changed = false;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_devdata = &kbdev->js_data;
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
+
+	if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, attribute) != false) {
+		KBASE_DEBUG_ASSERT(js_devdata->runpool_irq.ctx_attr_ref_count[attribute] > 0);
+		--(js_devdata->runpool_irq.ctx_attr_ref_count[attribute]);
+
+		if (js_devdata->runpool_irq.ctx_attr_ref_count[attribute] == 0) {
+			/* Last de-refcount indicates a state change */
+			runpool_state_changed = true;
+			KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_RUNPOOL, kctx, NULL, 0u, attribute);
+		}
+	}
+
+	return runpool_state_changed;
+}
+
+/**
+ * @brief Retain a certain attribute on a ctx, also retaining it on the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_ctx_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool runpool_state_changed = false;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] < U32_MAX);
+
+	++(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+	if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+		/* Only ref-count the attribute on the runpool for the first time this contexts sees this attribute */
+		KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_ON_CTX, kctx, NULL, 0u, attribute);
+		runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, attribute);
+	}
+
+	return runpool_state_changed;
+}
+
+/*
+ * @brief Release a certain attribute on a ctx, also releasing it from the runpool
+ * if the context is scheduled.
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * This may allow the scheduler to submit more jobs than previously.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+static bool kbasep_js_ctx_attr_ctx_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+	bool runpool_state_changed = false;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
+	KBASE_DEBUG_ASSERT(js_kctx_info->ctx.ctx_attr_ref_count[attribute] > 0);
+
+	if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) && js_kctx_info->ctx.ctx_attr_ref_count[attribute] == 1) {
+		lockdep_assert_held(&kbdev->hwaccess_lock);
+		/* Only de-ref-count the attribute on the runpool when this is the last ctx-reference to it */
+		runpool_state_changed = kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, attribute);
+		KBASE_TRACE_ADD(kbdev, JS_CTX_ATTR_NOW_OFF_CTX, kctx, NULL, 0u, attribute);
+	}
+
+	/* De-ref must happen afterwards, because kbasep_js_ctx_attr_runpool_release() needs to check it too */
+	--(js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+
+	return runpool_state_changed;
+}
+
+/*
+ * More commonly used public functions
+ */
+
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	bool runpool_state_changed;
+	int i;
+
+	/* Retain any existing attributes */
+	for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+		if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
+			/* The context is being scheduled in, so update the runpool with the new attributes */
+			runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
+
+			/* We don't need to know about state changed, because retaining a
+			 * context occurs on scheduling it, and that itself will also try
+			 * to run new atoms */
+			CSTD_UNUSED(runpool_state_changed);
+		}
+	}
+}
+
+bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
+{
+	bool runpool_state_changed = false;
+	int i;
+
+	/* Release any existing attributes */
+	for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
+		if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != false) {
+			/* The context is being scheduled out, so update the runpool on the removed attributes */
+			runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
+		}
+	}
+
+	return runpool_state_changed;
+}
+
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	bool runpool_state_changed = false;
+	base_jd_core_req core_req;
+
+	KBASE_DEBUG_ASSERT(katom);
+	core_req = katom->core_req;
+
+	if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+	else
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+	if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+		/* Atom that can run on slot1 or slot2, and can use all cores */
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_retain_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+	}
+
+	/* We don't need to know about state changed, because retaining an
+	 * atom occurs on adding it, and that itself will also try to run
+	 * new atoms */
+	CSTD_UNUSED(runpool_state_changed);
+}
+
+bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state)
+{
+	bool runpool_state_changed = false;
+	base_jd_core_req core_req;
+
+	KBASE_DEBUG_ASSERT(katom_retained_state);
+	core_req = katom_retained_state->core_req;
+
+	/* No-op for invalid atoms */
+	if (kbasep_js_atom_retained_state_is_valid(katom_retained_state) == false)
+		return false;
+
+	if (core_req & BASE_JD_REQ_ONLY_COMPUTE)
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
+	else
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
+
+	if ((core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T)) != 0 && (core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)) == 0) {
+		/* Atom that can run on slot1 or slot2, and can use all cores */
+		runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES);
+	}
+
+	return runpool_state_changed;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h
new file mode 100644
index 0000000..25fd397
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h
@@ -0,0 +1,155 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js_ctx_attr.h
+ * Job Scheduler Context Attribute APIs
+ */
+
+#ifndef _KBASE_JS_CTX_ATTR_H_
+#define _KBASE_JS_CTX_ATTR_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+
+/**
+ * Retain all attributes of a context
+ *
+ * This occurs on scheduling in the context on the runpool (but after
+ * is_scheduled is set)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ */
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Release all attributes of a context
+ *
+ * This occurs on scheduling out the context from the runpool (but before
+ * is_scheduled is cleared)
+ *
+ * Requires:
+ * - jsctx mutex
+ * - runpool_irq spinlock
+ * - ctx->is_scheduled is true
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
+
+/**
+ * Retain all attributes of an atom
+ *
+ * This occurs on adding an atom to a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ */
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * Release all attributes of an atom, given its retained state.
+ *
+ * This occurs after (permanently) removing an atom from a context
+ *
+ * Requires:
+ * - jsctx mutex
+ * - If the context is scheduled, then runpool_irq spinlock must also be held
+ *
+ * This is a no-op when \a katom_retained_state is invalid.
+ *
+ * @return true indicates a change in ctx attributes state of the runpool.
+ * In this state, the scheduler might be able to submit more jobs than
+ * previously, and so the caller should ensure kbasep_js_try_run_next_job_nolock()
+ * or similar is called sometime later.
+ * @return false indicates no change in ctx attributes state of the runpool.
+ */
+bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static inline s8 kbasep_js_ctx_attr_count_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_device_data *js_devdata;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_devdata = &kbdev->js_data;
+
+	return js_devdata->runpool_irq.ctx_attr_ref_count[attribute];
+}
+
+/**
+ * Requires:
+ * - runpool_irq spinlock
+ */
+static inline bool kbasep_js_ctx_attr_is_attr_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
+{
+	/* In general, attributes are 'on' when they have a non-zero refcount (note: the refcount will never be < 0) */
+	return (bool) kbasep_js_ctx_attr_count_on_runpool(kbdev, attribute);
+}
+
+/**
+ * Requires:
+ * - jsctx mutex
+ */
+static inline bool kbasep_js_ctx_attr_is_attr_on_ctx(struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
+{
+	struct kbasep_js_kctx_info *js_kctx_info;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
+	js_kctx_info = &kctx->jctx.sched_info;
+
+	/* In general, attributes are 'on' when they have a refcount (which should never be < 0) */
+	return (bool) (js_kctx_info->ctx.ctx_attr_ref_count[attribute]);
+}
+
+	  /** @} *//* end group kbase_js */
+	  /** @} *//* end group base_kbase_api */
+	  /** @} *//* end group base_api */
+
+#endif				/* _KBASE_JS_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_defs.h b/drivers/gpu/arm/midgard/mali_kbase_js_defs.h
new file mode 100644
index 0000000..052a0b3
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_js_defs.h
@@ -0,0 +1,416 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_js.h
+ * Job Scheduler Type Definitions
+ */
+
+#ifndef _KBASE_JS_DEFS_H_
+#define _KBASE_JS_DEFS_H_
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup kbase_js
+ * @{
+ */
+/* Forward decls */
+struct kbase_device;
+struct kbase_jd_atom;
+
+
+typedef u32 kbase_context_flags;
+
+struct kbasep_atom_req {
+	base_jd_core_req core_req;
+	kbase_context_flags ctx_req;
+	u32 device_nr;
+};
+
+/** Callback function run on all of a context's jobs registered with the Job
+ * Scheduler */
+typedef void (*kbasep_js_ctx_job_cb)(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+
+/**
+ * @brief Maximum number of jobs that can be submitted to a job slot whilst
+ * inside the IRQ handler.
+ *
+ * This is important because GPU NULL jobs can complete whilst the IRQ handler
+ * is running. Otherwise, it potentially allows an unlimited number of GPU NULL
+ * jobs to be submitted inside the IRQ handler, which increases IRQ latency.
+ */
+#define KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ 2
+
+/**
+ * @brief Context attributes
+ *
+ * Each context attribute can be thought of as a boolean value that caches some
+ * state information about either the runpool, or the context:
+ * - In the case of the runpool, it is a cache of "Do any contexts owned by
+ * the runpool have attribute X?"
+ * - In the case of a context, it is a cache of "Do any atoms owned by the
+ * context have attribute X?"
+ *
+ * The boolean value of the context attributes often affect scheduling
+ * decisions, such as affinities to use and job slots to use.
+ *
+ * To accomodate changes of state in the context, each attribute is refcounted
+ * in the context, and in the runpool for all running contexts. Specifically:
+ * - The runpool holds a refcount of how many contexts in the runpool have this
+ * attribute.
+ * - The context holds a refcount of how many atoms have this attribute.
+ */
+enum kbasep_js_ctx_attr {
+	/** Attribute indicating a context that contains Compute jobs. That is,
+	 * the context has jobs of type @ref BASE_JD_REQ_ONLY_COMPUTE
+	 *
+	 * @note A context can be both 'Compute' and 'Non Compute' if it contains
+	 * both types of jobs.
+	 */
+	KBASEP_JS_CTX_ATTR_COMPUTE,
+
+	/** Attribute indicating a context that contains Non-Compute jobs. That is,
+	 * the context has some jobs that are \b not of type @ref
+	 * BASE_JD_REQ_ONLY_COMPUTE.
+	 *
+	 * @note A context can be both 'Compute' and 'Non Compute' if it contains
+	 * both types of jobs.
+	 */
+	KBASEP_JS_CTX_ATTR_NON_COMPUTE,
+
+	/** Attribute indicating that a context contains compute-job atoms that
+	 * aren't restricted to a coherent group, and can run on all cores.
+	 *
+	 * Specifically, this is when the atom's \a core_req satisfy:
+	 * - (\a core_req & (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE | BASE_JD_REQ_T) // uses slot 1 or slot 2
+	 * - && !(\a core_req & BASE_JD_REQ_COHERENT_GROUP) // not restricted to coherent groups
+	 *
+	 * Such atoms could be blocked from running if one of the coherent groups
+	 * is being used by another job slot, so tracking this context attribute
+	 * allows us to prevent such situations.
+	 *
+	 * @note This doesn't take into account the 1-coregroup case, where all
+	 * compute atoms would effectively be able to run on 'all cores', but
+	 * contexts will still not always get marked with this attribute. Instead,
+	 * it is the caller's responsibility to take into account the number of
+	 * coregroups when interpreting this attribute.
+	 *
+	 * @note Whilst Tiler atoms are normally combined with
+	 * BASE_JD_REQ_COHERENT_GROUP, it is possible to send such atoms without
+	 * BASE_JD_REQ_COHERENT_GROUP set. This is an unlikely case, but it's easy
+	 * enough to handle anyway.
+	 */
+	KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES,
+
+	/** Must be the last in the enum */
+	KBASEP_JS_CTX_ATTR_COUNT
+};
+
+enum {
+	/** Bit indicating that new atom should be started because this atom completed */
+	KBASE_JS_ATOM_DONE_START_NEW_ATOMS = (1u << 0),
+	/** Bit indicating that the atom was evicted from the JS_NEXT registers */
+	KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT = (1u << 1)
+};
+
+/** Combination of KBASE_JS_ATOM_DONE_<...> bits */
+typedef u32 kbasep_js_atom_done_code;
+
+/*
+ * Context scheduling mode defines for kbase_device::js_ctx_scheduling_mode
+ */
+enum {
+	/*
+	 * In this mode, higher priority atoms will be scheduled first,
+	 * regardless of the context they belong to. Newly-runnable higher
+	 * priority atoms can preempt lower priority atoms currently running on
+	 * the GPU, even if they belong to a different context.
+	 */
+	KBASE_JS_SYSTEM_PRIORITY_MODE = 0,
+
+	/*
+	 * In this mode, the highest-priority atom will be chosen from each
+	 * context in turn using a round-robin algorithm, so priority only has
+	 * an effect within the context an atom belongs to. Newly-runnable
+	 * higher priority atoms can preempt the lower priority atoms currently
+	 * running on the GPU, but only if they belong to the same context.
+	 */
+	KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE,
+
+	/* Must be the last in the enum */
+	KBASE_JS_PRIORITY_MODE_COUNT,
+};
+
+/*
+ * Internal atom priority defines for kbase_jd_atom::sched_prio
+ */
+enum {
+	KBASE_JS_ATOM_SCHED_PRIO_HIGH = 0,
+	KBASE_JS_ATOM_SCHED_PRIO_MED,
+	KBASE_JS_ATOM_SCHED_PRIO_LOW,
+	KBASE_JS_ATOM_SCHED_PRIO_COUNT,
+};
+
+/* Invalid priority for kbase_jd_atom::sched_prio */
+#define KBASE_JS_ATOM_SCHED_PRIO_INVALID -1
+
+/* Default priority in the case of contexts with no atoms, or being lenient
+ * about invalid priorities from userspace.
+ */
+#define KBASE_JS_ATOM_SCHED_PRIO_DEFAULT KBASE_JS_ATOM_SCHED_PRIO_MED
+
+/**
+ * @brief KBase Device Data Job Scheduler sub-structure
+ *
+ * This encapsulates the current context of the Job Scheduler on a particular
+ * device. This context is global to the device, and is not tied to any
+ * particular struct kbase_context running on the device.
+ *
+ * nr_contexts_running and as_free are optimized for packing together (by making
+ * them smaller types than u32). The operations on them should rarely involve
+ * masking. The use of signed types for arithmetic indicates to the compiler that
+ * the value will not rollover (which would be undefined behavior), and so under
+ * the Total License model, it is free to make optimizations based on that (i.e.
+ * to remove masking).
+ */
+struct kbasep_js_device_data {
+	/* Sub-structure to collect together Job Scheduling data used in IRQ
+	 * context. The hwaccess_lock must be held when accessing. */
+	struct runpool_irq {
+		/** Bitvector indicating whether a currently scheduled context is allowed to submit jobs.
+		 * When bit 'N' is set in this, it indicates whether the context bound to address space
+		 * 'N' is allowed to submit jobs.
+		 */
+		u16 submit_allowed;
+
+		/** Context Attributes:
+		 * Each is large enough to hold a refcount of the number of contexts
+		 * that can fit into the runpool. This is currently BASE_MAX_NR_AS
+		 *
+		 * Note that when BASE_MAX_NR_AS==16 we need 5 bits (not 4) to store
+		 * the refcount. Hence, it's not worthwhile reducing this to
+		 * bit-manipulation on u32s to save space (where in contrast, 4 bit
+		 * sub-fields would be easy to do and would save space).
+		 *
+		 * Whilst this must not become negative, the sign bit is used for:
+		 * - error detection in debug builds
+		 * - Optimization: it is undefined for a signed int to overflow, and so
+		 * the compiler can optimize for that never happening (thus, no masking
+		 * is required on updating the variable) */
+		s8 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+		/*
+		 * Affinity management and tracking
+		 */
+		/** Bitvector to aid affinity checking. Element 'n' bit 'i' indicates
+		 * that slot 'n' is using core i (i.e. slot_affinity_refcount[n][i] > 0) */
+		u64 slot_affinities[BASE_JM_MAX_NR_SLOTS];
+		/** Refcount for each core owned by each slot. Used to generate the
+		 * slot_affinities array of bitvectors
+		 *
+		 * The value of the refcount will not exceed BASE_JM_SUBMIT_SLOTS,
+		 * because it is refcounted only when a job is definitely about to be
+		 * submitted to a slot, and is de-refcounted immediately after a job
+		 * finishes */
+		s8 slot_affinity_refcount[BASE_JM_MAX_NR_SLOTS][64];
+	} runpool_irq;
+
+	/**
+	 * Run Pool mutex, for managing contexts within the runpool.
+	 * Unless otherwise specified, you must hold this lock whilst accessing any
+	 * members that follow
+	 *
+	 * In addition, this is used to access:
+	 * - the kbasep_js_kctx_info::runpool substructure
+	 */
+	struct mutex runpool_mutex;
+
+	/**
+	 * Queue Lock, used to access the Policy's queue of contexts independently
+	 * of the Run Pool.
+	 *
+	 * Of course, you don't need the Run Pool lock to access this.
+	 */
+	struct mutex queue_mutex;
+
+	/**
+	 * Scheduling semaphore. This must be held when calling
+	 * kbase_jm_kick()
+	 */
+	struct semaphore schedule_sem;
+
+	/**
+	 * List of contexts that can currently be pulled from
+	 */
+	struct list_head ctx_list_pullable[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+	/**
+	 * List of contexts that can not currently be pulled from, but have
+	 * jobs currently running.
+	 */
+	struct list_head ctx_list_unpullable[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+
+	/** Number of currently scheduled user contexts (excluding ones that are not submitting jobs) */
+	s8 nr_user_contexts_running;
+	/** Number of currently scheduled contexts (including ones that are not submitting jobs) */
+	s8 nr_all_contexts_running;
+
+	/** Core Requirements to match up with base_js_atom's core_req memeber
+	 * @note This is a write-once member, and so no locking is required to read */
+	base_jd_core_req js_reqs[BASE_JM_MAX_NR_SLOTS];
+
+	u32 scheduling_period_ns;    /*< Value for JS_SCHEDULING_PERIOD_NS */
+	u32 soft_stop_ticks;	     /*< Value for JS_SOFT_STOP_TICKS */
+	u32 soft_stop_ticks_cl;	     /*< Value for JS_SOFT_STOP_TICKS_CL */
+	u32 hard_stop_ticks_ss;	     /*< Value for JS_HARD_STOP_TICKS_SS */
+	u32 hard_stop_ticks_cl;	     /*< Value for JS_HARD_STOP_TICKS_CL */
+	u32 hard_stop_ticks_dumping; /*< Value for JS_HARD_STOP_TICKS_DUMPING */
+	u32 gpu_reset_ticks_ss;	     /*< Value for JS_RESET_TICKS_SS */
+	u32 gpu_reset_ticks_cl;	     /*< Value for JS_RESET_TICKS_CL */
+	u32 gpu_reset_ticks_dumping; /*< Value for JS_RESET_TICKS_DUMPING */
+	u32 ctx_timeslice_ns;		 /**< Value for JS_CTX_TIMESLICE_NS */
+
+	/**< Value for JS_SOFT_JOB_TIMEOUT */
+	atomic_t soft_job_timeout_ms;
+
+	/** List of suspended soft jobs */
+	struct list_head suspended_soft_jobs_list;
+
+#ifdef CONFIG_MALI_DEBUG
+	/* Support soft-stop on a single context */
+	bool softstop_always;
+#endif				/* CONFIG_MALI_DEBUG */
+
+	/** The initalized-flag is placed at the end, to avoid cache-pollution (we should
+	 * only be using this during init/term paths).
+	 * @note This is a write-once member, and so no locking is required to read */
+	int init_status;
+
+	/* Number of contexts that can currently be pulled from */
+	u32 nr_contexts_pullable;
+
+	/* Number of contexts that can either be pulled from or are currently
+	 * running */
+	atomic_t nr_contexts_runnable;
+};
+
+/**
+ * @brief KBase Context Job Scheduling information structure
+ *
+ * This is a substructure in the struct kbase_context that encapsulates all the
+ * scheduling information.
+ */
+struct kbasep_js_kctx_info {
+
+	/**
+	 * Job Scheduler Context information sub-structure. These members are
+	 * accessed regardless of whether the context is:
+	 * - In the Policy's Run Pool
+	 * - In the Policy's Queue
+	 * - Not queued nor in the Run Pool.
+	 *
+	 * You must obtain the jsctx_mutex before accessing any other members of
+	 * this substructure.
+	 *
+	 * You may not access any of these members from IRQ context.
+	 */
+	struct kbase_jsctx {
+		struct mutex jsctx_mutex;		    /**< Job Scheduler Context lock */
+
+		/** Number of jobs <b>ready to run</b> - does \em not include the jobs waiting in
+		 * the dispatcher, and dependency-only jobs. See kbase_jd_context::job_nr
+		 * for such jobs*/
+		u32 nr_jobs;
+
+		/** Context Attributes:
+		 * Each is large enough to hold a refcount of the number of atoms on
+		 * the context. **/
+		u32 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
+
+		/**
+		 * Wait queue to wait for KCTX_SHEDULED flag state changes.
+		 * */
+		wait_queue_head_t is_scheduled_wait;
+
+		/** Link implementing JS queues. Context can be present on one
+		 * list per job slot
+		 */
+		struct list_head ctx_list_entry[BASE_JM_MAX_NR_SLOTS];
+	} ctx;
+
+	/* The initalized-flag is placed at the end, to avoid cache-pollution (we should
+	 * only be using this during init/term paths) */
+	int init_status;
+};
+
+/** Subset of atom state that can be available after jd_done_nolock() is called
+ * on that atom. A copy must be taken via kbasep_js_atom_retained_state_copy(),
+ * because the original atom could disappear. */
+struct kbasep_js_atom_retained_state {
+	/** Event code - to determine whether the atom has finished */
+	enum base_jd_event_code event_code;
+	/** core requirements */
+	base_jd_core_req core_req;
+	/* priority */
+	int sched_priority;
+	/* Core group atom was executed on */
+	u32 device_nr;
+
+};
+
+/**
+ * Value signifying 'no retry on a slot required' for:
+ * - kbase_js_atom_retained_state::retry_submit_on_slot
+ * - kbase_jd_atom::retry_submit_on_slot
+ */
+#define KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID (-1)
+
+/**
+ * base_jd_core_req value signifying 'invalid' for a kbase_jd_atom_retained_state.
+ *
+ * @see kbase_atom_retained_state_is_valid()
+ */
+#define KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID BASE_JD_REQ_DEP
+
+/**
+ * @brief The JS timer resolution, in microseconds
+ *
+ * Any non-zero difference in time will be at least this size.
+ */
+#define KBASEP_JS_TICK_RESOLUTION_US 1
+
+
+	  /** @} *//* end group kbase_js */
+	  /** @} *//* end group base_kbase_api */
+	  /** @} *//* end group base_api */
+
+#endif				/* _KBASE_JS_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_linux.h b/drivers/gpu/arm/midgard/mali_kbase_linux.h
new file mode 100644
index 0000000..003ac9e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_linux.h
@@ -0,0 +1,48 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_linux.h
+ * Base kernel APIs, Linux implementation.
+ */
+
+#ifndef _KBASE_LINUX_H_
+#define _KBASE_LINUX_H_
+
+/* All things that are needed for the Linux port. */
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+
+#if (defined(MALI_KERNEL_TEST_API) && (1 == MALI_KERNEL_TEST_API))
+	#define KBASE_EXPORT_TEST_API(func) EXPORT_SYMBOL(func)
+#else
+	#define KBASE_EXPORT_TEST_API(func)
+#endif
+
+#define KBASE_EXPORT_SYMBOL(func) EXPORT_SYMBOL(func)
+
+#endif /* _KBASE_LINUX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.c b/drivers/gpu/arm/midgard/mali_kbase_mem.c
new file mode 100644
index 0000000..04015e1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.c
@@ -0,0 +1,3825 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem.c
+ * Base kernel memory APIs
+ */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif				/* CONFIG_DMA_SHARED_BUFFER */
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/version.h>
+#include <linux/log2.h>
+
+#include <mali_kbase_config.h>
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_cache_policy.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_tlstream.h>
+
+/* Forward declarations */
+static void free_partial_locked(struct kbase_context *kctx,
+		struct kbase_mem_pool *pool, struct tagged_addr tp);
+
+static size_t kbase_get_num_cpu_va_bits(struct kbase_context *kctx)
+{
+#if defined(CONFIG_ARM64)
+	/* VA_BITS can be as high as 48 bits, but all bits are available for
+	 * both user and kernel.
+	 */
+	size_t cpu_va_bits = VA_BITS;
+#elif defined(CONFIG_X86_64)
+	/* x86_64 can access 48 bits of VA, but the 48th is used to denote
+	 * kernel (1) vs userspace (0), so the max here is 47.
+	 */
+	size_t cpu_va_bits = 47;
+#elif defined(CONFIG_ARM) || defined(CONFIG_X86_32)
+	size_t cpu_va_bits = sizeof(void *) * BITS_PER_BYTE;
+#else
+#error "Unknown CPU VA width for this architecture"
+#endif
+
+#ifdef CONFIG_64BIT
+	if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+		cpu_va_bits = 32;
+#endif
+
+	return cpu_va_bits;
+}
+
+/* This function finds out which RB tree the given pfn from the GPU VA belongs
+ * to based on the memory zone the pfn refers to */
+static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx,
+								    u64 gpu_pfn)
+{
+	struct rb_root *rbtree = NULL;
+
+	/* The gpu_pfn can only be greater than the starting pfn of the EXEC_VA
+	 * zone if this has been initialized.
+	 */
+	if (gpu_pfn >= kctx->exec_va_start)
+		rbtree = &kctx->reg_rbtree_exec;
+	else {
+		u64 same_va_end;
+
+#ifdef CONFIG_64BIT
+		if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+#endif /* CONFIG_64BIT */
+			same_va_end = KBASE_REG_ZONE_CUSTOM_VA_BASE;
+#ifdef CONFIG_64BIT
+		else
+			same_va_end = kctx->same_va_end;
+#endif /* CONFIG_64BIT */
+
+		if (gpu_pfn >= same_va_end)
+			rbtree = &kctx->reg_rbtree_custom;
+		else
+			rbtree = &kctx->reg_rbtree_same;
+	}
+
+	return rbtree;
+}
+
+/* This function inserts a region into the tree. */
+static void kbase_region_tracker_insert(struct kbase_va_region *new_reg)
+{
+	u64 start_pfn = new_reg->start_pfn;
+	struct rb_node **link = NULL;
+	struct rb_node *parent = NULL;
+	struct rb_root *rbtree = NULL;
+
+	rbtree = new_reg->rbtree;
+
+	link = &(rbtree->rb_node);
+	/* Find the right place in the tree using tree search */
+	while (*link) {
+		struct kbase_va_region *old_reg;
+
+		parent = *link;
+		old_reg = rb_entry(parent, struct kbase_va_region, rblink);
+
+		/* RBTree requires no duplicate entries. */
+		KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn);
+
+		if (old_reg->start_pfn > start_pfn)
+			link = &(*link)->rb_left;
+		else
+			link = &(*link)->rb_right;
+	}
+
+	/* Put the new node there, and rebalance tree */
+	rb_link_node(&(new_reg->rblink), parent, link);
+
+	rb_insert_color(&(new_reg->rblink), rbtree);
+}
+
+static struct kbase_va_region *find_region_enclosing_range_rbtree(
+		struct rb_root *rbtree, u64 start_pfn, size_t nr_pages)
+{
+	struct rb_node *rbnode;
+	struct kbase_va_region *reg;
+	u64 end_pfn = start_pfn + nr_pages;
+
+	rbnode = rbtree->rb_node;
+
+	while (rbnode) {
+		u64 tmp_start_pfn, tmp_end_pfn;
+
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		tmp_start_pfn = reg->start_pfn;
+		tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+		/* If start is lower than this, go left. */
+		if (start_pfn < tmp_start_pfn)
+			rbnode = rbnode->rb_left;
+		/* If end is higher than this, then go right. */
+		else if (end_pfn > tmp_end_pfn)
+			rbnode = rbnode->rb_right;
+		else	/* Enclosing */
+			return reg;
+	}
+
+	return NULL;
+}
+
+struct kbase_va_region *kbase_find_region_enclosing_address(
+		struct rb_root *rbtree, u64 gpu_addr)
+{
+	u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+	struct rb_node *rbnode;
+	struct kbase_va_region *reg;
+
+	rbnode = rbtree->rb_node;
+
+	while (rbnode) {
+		u64 tmp_start_pfn, tmp_end_pfn;
+
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		tmp_start_pfn = reg->start_pfn;
+		tmp_end_pfn = reg->start_pfn + reg->nr_pages;
+
+		/* If start is lower than this, go left. */
+		if (gpu_pfn < tmp_start_pfn)
+			rbnode = rbnode->rb_left;
+		/* If end is higher than this, then go right. */
+		else if (gpu_pfn >= tmp_end_pfn)
+			rbnode = rbnode->rb_right;
+		else	/* Enclosing */
+			return reg;
+	}
+
+	return NULL;
+}
+
+/* Find region enclosing given address. */
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(
+		struct kbase_context *kctx, u64 gpu_addr)
+{
+	u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+	struct rb_root *rbtree = NULL;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn);
+
+	return kbase_find_region_enclosing_address(rbtree, gpu_addr);
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address);
+
+struct kbase_va_region *kbase_find_region_base_address(
+		struct rb_root *rbtree, u64 gpu_addr)
+{
+	u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+	struct rb_node *rbnode = NULL;
+	struct kbase_va_region *reg = NULL;
+
+	rbnode = rbtree->rb_node;
+
+	while (rbnode) {
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		if (reg->start_pfn > gpu_pfn)
+			rbnode = rbnode->rb_left;
+		else if (reg->start_pfn < gpu_pfn)
+			rbnode = rbnode->rb_right;
+		else
+			return reg;
+	}
+
+	return NULL;
+}
+
+/* Find region with given base address */
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(
+		struct kbase_context *kctx, u64 gpu_addr)
+{
+	u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+	struct rb_root *rbtree = NULL;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn);
+
+	return kbase_find_region_base_address(rbtree, gpu_addr);
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address);
+
+/* Find region meeting given requirements */
+static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
+		struct kbase_va_region *reg_reqs,
+		size_t nr_pages, size_t align_offset, size_t align_mask,
+		u64 *out_start_pfn)
+{
+	struct rb_node *rbnode = NULL;
+	struct kbase_va_region *reg = NULL;
+	struct rb_root *rbtree = NULL;
+
+	/* Note that this search is a linear search, as we do not have a target
+	   address in mind, so does not benefit from the rbtree search */
+	rbtree = reg_reqs->rbtree;
+
+	for (rbnode = rb_first(rbtree); rbnode; rbnode = rb_next(rbnode)) {
+		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+		if ((reg->nr_pages >= nr_pages) &&
+				(reg->flags & KBASE_REG_FREE)) {
+			/* Check alignment */
+			u64 start_pfn = reg->start_pfn;
+
+			/* When align_offset == align, this sequence is
+			 * equivalent to:
+			 *   (start_pfn + align_mask) & ~(align_mask)
+			 *
+			 * Otherwise, it aligns to n*align + offset, for the
+			 * lowest value n that makes this still >start_pfn */
+			start_pfn += align_mask;
+			start_pfn -= (start_pfn - align_offset) & (align_mask);
+
+			if (!(reg_reqs->flags & KBASE_REG_GPU_NX)) {
+				/* Can't end at 4GB boundary */
+				if (0 == ((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB))
+					start_pfn += align_offset;
+
+				/* Can't start at 4GB boundary */
+				if (0 == (start_pfn & BASE_MEM_PFN_MASK_4GB))
+					start_pfn += align_offset;
+
+				if (!((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB) ||
+				    !(start_pfn & BASE_MEM_PFN_MASK_4GB))
+					continue;
+			} else if (reg_reqs->flags &
+					KBASE_REG_GPU_VA_SAME_4GB_PAGE) {
+				u64 end_pfn = start_pfn + nr_pages - 1;
+
+				if ((start_pfn & ~BASE_MEM_PFN_MASK_4GB) !=
+				    (end_pfn & ~BASE_MEM_PFN_MASK_4GB))
+					start_pfn = end_pfn & ~BASE_MEM_PFN_MASK_4GB;
+			}
+
+			if ((start_pfn >= reg->start_pfn) &&
+					(start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) &&
+					((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1))) {
+				*out_start_pfn = start_pfn;
+				return reg;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * @brief Remove a region object from the global list.
+ *
+ * The region reg is removed, possibly by merging with other free and
+ * compatible adjacent regions.  It must be called with the context
+ * region lock held. The associated memory is not released (see
+ * kbase_free_alloced_region). Internal use only.
+ */
+int kbase_remove_va_region(struct kbase_va_region *reg)
+{
+	struct rb_node *rbprev;
+	struct kbase_va_region *prev = NULL;
+	struct rb_node *rbnext;
+	struct kbase_va_region *next = NULL;
+	struct rb_root *reg_rbtree = NULL;
+
+	int merged_front = 0;
+	int merged_back = 0;
+	int err = 0;
+
+	reg_rbtree = reg->rbtree;
+
+	/* Try to merge with the previous block first */
+	rbprev = rb_prev(&(reg->rblink));
+	if (rbprev) {
+		prev = rb_entry(rbprev, struct kbase_va_region, rblink);
+		if (prev->flags & KBASE_REG_FREE) {
+			/* We're compatible with the previous VMA,
+			 * merge with it */
+			WARN_ON((prev->flags & KBASE_REG_ZONE_MASK) !=
+					    (reg->flags & KBASE_REG_ZONE_MASK));
+			prev->nr_pages += reg->nr_pages;
+			rb_erase(&(reg->rblink), reg_rbtree);
+			reg = prev;
+			merged_front = 1;
+		}
+	}
+
+	/* Try to merge with the next block second */
+	/* Note we do the lookup here as the tree may have been rebalanced. */
+	rbnext = rb_next(&(reg->rblink));
+	if (rbnext) {
+		/* We're compatible with the next VMA, merge with it */
+		next = rb_entry(rbnext, struct kbase_va_region, rblink);
+		if (next->flags & KBASE_REG_FREE) {
+			WARN_ON((next->flags & KBASE_REG_ZONE_MASK) !=
+					    (reg->flags & KBASE_REG_ZONE_MASK));
+			next->start_pfn = reg->start_pfn;
+			next->nr_pages += reg->nr_pages;
+			rb_erase(&(reg->rblink), reg_rbtree);
+			merged_back = 1;
+			if (merged_front) {
+				/* We already merged with prev, free it */
+				kbase_free_alloced_region(reg);
+			}
+		}
+	}
+
+	/* If we failed to merge then we need to add a new block */
+	if (!(merged_front || merged_back)) {
+		/*
+		 * We didn't merge anything. Add a new free
+		 * placeholder and remove the original one.
+		 */
+		struct kbase_va_region *free_reg;
+
+		free_reg = kbase_alloc_free_region(reg_rbtree,
+				reg->start_pfn, reg->nr_pages,
+				reg->flags & KBASE_REG_ZONE_MASK);
+		if (!free_reg) {
+			err = -ENOMEM;
+			goto out;
+		}
+		rb_replace_node(&(reg->rblink), &(free_reg->rblink), reg_rbtree);
+	}
+
+ out:
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_remove_va_region);
+
+/**
+ * kbase_insert_va_region_nolock - Insert a VA region to the list,
+ * replacing the existing one.
+ *
+ * @new_reg: The new region to insert
+ * @at_reg: The region to replace
+ * @start_pfn: The Page Frame Number to insert at
+ * @nr_pages: The number of pages of the region
+ */
+static int kbase_insert_va_region_nolock(struct kbase_va_region *new_reg,
+		struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages)
+{
+	struct rb_root *reg_rbtree = NULL;
+	int err = 0;
+
+	reg_rbtree = at_reg->rbtree;
+
+	/* Must be a free region */
+	KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0);
+	/* start_pfn should be contained within at_reg */
+	KBASE_DEBUG_ASSERT((start_pfn >= at_reg->start_pfn) && (start_pfn < at_reg->start_pfn + at_reg->nr_pages));
+	/* at least nr_pages from start_pfn should be contained within at_reg */
+	KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= at_reg->start_pfn + at_reg->nr_pages);
+
+	new_reg->start_pfn = start_pfn;
+	new_reg->nr_pages = nr_pages;
+
+	/* Regions are a whole use, so swap and delete old one. */
+	if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) {
+		rb_replace_node(&(at_reg->rblink), &(new_reg->rblink),
+								reg_rbtree);
+		kbase_free_alloced_region(at_reg);
+	}
+	/* New region replaces the start of the old one, so insert before. */
+	else if (at_reg->start_pfn == start_pfn) {
+		at_reg->start_pfn += nr_pages;
+		KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages);
+		at_reg->nr_pages -= nr_pages;
+
+		kbase_region_tracker_insert(new_reg);
+	}
+	/* New region replaces the end of the old one, so insert after. */
+	else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) {
+		at_reg->nr_pages -= nr_pages;
+
+		kbase_region_tracker_insert(new_reg);
+	}
+	/* New region splits the old one, so insert and create new */
+	else {
+		struct kbase_va_region *new_front_reg;
+
+		new_front_reg = kbase_alloc_free_region(reg_rbtree,
+				at_reg->start_pfn,
+				start_pfn - at_reg->start_pfn,
+				at_reg->flags & KBASE_REG_ZONE_MASK);
+
+		if (new_front_reg) {
+			at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages;
+			at_reg->start_pfn = start_pfn + nr_pages;
+
+			kbase_region_tracker_insert(new_front_reg);
+			kbase_region_tracker_insert(new_reg);
+		} else {
+			err = -ENOMEM;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * kbase_add_va_region - Add a VA region to the region list for a context.
+ *
+ * @kctx: kbase context containing the region
+ * @reg: the region to add
+ * @addr: the address to insert the region at
+ * @nr_pages: the number of pages in the region
+ * @align: the minimum alignment in pages
+ */
+int kbase_add_va_region(struct kbase_context *kctx,
+		struct kbase_va_region *reg, u64 addr,
+		size_t nr_pages, size_t align)
+{
+	int err = 0;
+	struct kbase_device *kbdev = kctx->kbdev;
+	int cpu_va_bits = kbase_get_num_cpu_va_bits(kctx);
+	int gpu_pc_bits =
+		kbdev->gpu_props.props.core_props.log2_program_counter_size;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	KBASE_DEBUG_ASSERT(NULL != reg);
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* The executable allocation from the SAME_VA zone would already have an
+	 * appropriately aligned GPU VA chosen for it.
+	 */
+	if (!(reg->flags & KBASE_REG_GPU_NX) && !addr) {
+		if (cpu_va_bits > gpu_pc_bits) {
+			align = max(align, (size_t)((1ULL << gpu_pc_bits)
+						>> PAGE_SHIFT));
+		}
+	}
+
+	do {
+		err = kbase_add_va_region_rbtree(kbdev, reg, addr, nr_pages,
+				align);
+		if (err != -ENOMEM)
+			break;
+
+		/*
+		 * If the allocation is not from the same zone as JIT
+		 * then don't retry, we're out of VA and there is
+		 * nothing which can be done about it.
+		 */
+		if ((reg->flags & KBASE_REG_ZONE_MASK) !=
+				KBASE_REG_ZONE_CUSTOM_VA)
+			break;
+	} while (kbase_jit_evict(kctx));
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_add_va_region);
+
+/**
+ * kbase_add_va_region_rbtree - Insert a region into its corresponding rbtree
+ *
+ * Insert a region into the rbtree that was specified when the region was
+ * created. If addr is 0 a free area in the rbtree is used, otherwise the
+ * specified address is used.
+ *
+ * @kbdev: The kbase device
+ * @reg: The region to add
+ * @addr: The address to add the region at, or 0 to map at any available address
+ * @nr_pages: The size of the region in pages
+ * @align: The minimum alignment in pages
+ */
+int kbase_add_va_region_rbtree(struct kbase_device *kbdev,
+		struct kbase_va_region *reg,
+		u64 addr, size_t nr_pages, size_t align)
+{
+	struct rb_root *rbtree = NULL;
+	struct kbase_va_region *tmp;
+	u64 gpu_pfn = addr >> PAGE_SHIFT;
+	int err = 0;
+
+	rbtree = reg->rbtree;
+
+	if (!align)
+		align = 1;
+
+	/* must be a power of 2 */
+	KBASE_DEBUG_ASSERT(is_power_of_2(align));
+	KBASE_DEBUG_ASSERT(nr_pages > 0);
+
+	/* Path 1: Map a specific address. Find the enclosing region,
+	 * which *must* be free.
+	 */
+	if (gpu_pfn) {
+		struct device *dev = kbdev->dev;
+
+		KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1)));
+
+		tmp = find_region_enclosing_range_rbtree(rbtree, gpu_pfn,
+				nr_pages);
+		if (!tmp) {
+			dev_warn(dev, "Enclosing region not found: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
+			err = -ENOMEM;
+			goto exit;
+		}
+		if (!(tmp->flags & KBASE_REG_FREE)) {
+			dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n",
+					tmp->start_pfn, tmp->flags,
+					tmp->nr_pages, gpu_pfn, nr_pages);
+			err = -ENOMEM;
+			goto exit;
+		}
+
+		err = kbase_insert_va_region_nolock(reg, tmp, gpu_pfn,
+				nr_pages);
+		if (err) {
+			dev_warn(dev, "Failed to insert va region");
+			err = -ENOMEM;
+		}
+	} else {
+		/* Path 2: Map any free address which meets the requirements. */
+		u64 start_pfn;
+		size_t align_offset = align;
+		size_t align_mask = align - 1;
+
+		if ((reg->flags & KBASE_REG_TILER_ALIGN_TOP)) {
+			WARN(align > 1, "%s with align %lx might not be honored for KBASE_REG_TILER_ALIGN_TOP memory",
+					__func__,
+					(unsigned long)align);
+			align_mask  = reg->extent - 1;
+			align_offset = reg->extent - reg->initial_commit;
+		}
+
+		tmp = kbase_region_tracker_find_region_meeting_reqs(reg,
+				nr_pages, align_offset, align_mask,
+				&start_pfn);
+		if (tmp) {
+			err = kbase_insert_va_region_nolock(reg, tmp,
+							start_pfn, nr_pages);
+		} else {
+			err = -ENOMEM;
+		}
+	}
+
+exit:
+	return err;
+}
+
+/**
+ * @brief Initialize the internal region tracker data structure.
+ */
+static void kbase_region_tracker_ds_init(struct kbase_context *kctx,
+		struct kbase_va_region *same_va_reg,
+		struct kbase_va_region *custom_va_reg)
+{
+	kctx->reg_rbtree_same = RB_ROOT;
+	kbase_region_tracker_insert(same_va_reg);
+
+	/* Although custom_va_reg and exec_va_reg don't always exist,
+	 * initialize unconditionally because of the mem_view debugfs
+	 * implementation which relies on them being empty.
+	 *
+	 * The difference between the two is that the EXEC_VA region
+	 * is never initialized at this stage.
+	 */
+	kctx->reg_rbtree_custom = RB_ROOT;
+	kctx->reg_rbtree_exec = RB_ROOT;
+
+	if (custom_va_reg)
+		kbase_region_tracker_insert(custom_va_reg);
+}
+
+static void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree)
+{
+	struct rb_node *rbnode;
+	struct kbase_va_region *reg;
+
+	do {
+		rbnode = rb_first(rbtree);
+		if (rbnode) {
+			rb_erase(rbnode, rbtree);
+			reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+			kbase_free_alloced_region(reg);
+		}
+	} while (rbnode);
+}
+
+void kbase_region_tracker_term(struct kbase_context *kctx)
+{
+	kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_same);
+	kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_custom);
+	kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_exec);
+}
+
+void kbase_region_tracker_term_rbtree(struct rb_root *rbtree)
+{
+	kbase_region_tracker_erase_rbtree(rbtree);
+}
+
+static size_t kbase_get_same_va_bits(struct kbase_context *kctx)
+{
+	return min(kbase_get_num_cpu_va_bits(kctx),
+			(size_t) kctx->kbdev->gpu_props.mmu.va_bits);
+}
+
+int kbase_region_tracker_init(struct kbase_context *kctx)
+{
+	struct kbase_va_region *same_va_reg;
+	struct kbase_va_region *custom_va_reg = NULL;
+	size_t same_va_bits = kbase_get_same_va_bits(kctx);
+	u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE;
+	u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT;
+	u64 same_va_pages;
+	int err;
+
+	/* Take the lock as kbase_free_alloced_region requires it */
+	kbase_gpu_vm_lock(kctx);
+
+	same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
+	/* all have SAME_VA */
+	same_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 1,
+			same_va_pages,
+			KBASE_REG_ZONE_SAME_VA);
+
+	if (!same_va_reg) {
+		err = -ENOMEM;
+		goto fail_unlock;
+	}
+
+#ifdef CONFIG_64BIT
+	/* 32-bit clients have custom VA zones */
+	if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+#endif
+		if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE) {
+			err = -EINVAL;
+			goto fail_free_same_va;
+		}
+		/* If the current size of TMEM is out of range of the
+		 * virtual address space addressable by the MMU then
+		 * we should shrink it to fit
+		 */
+		if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit)
+			custom_va_size = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE;
+
+		custom_va_reg = kbase_alloc_free_region(
+				&kctx->reg_rbtree_custom,
+				KBASE_REG_ZONE_CUSTOM_VA_BASE,
+				custom_va_size, KBASE_REG_ZONE_CUSTOM_VA);
+
+		if (!custom_va_reg) {
+			err = -ENOMEM;
+			goto fail_free_same_va;
+		}
+#ifdef CONFIG_64BIT
+	} else {
+		custom_va_size = 0;
+	}
+#endif
+
+	kbase_region_tracker_ds_init(kctx, same_va_reg, custom_va_reg);
+
+	kctx->same_va_end = same_va_pages + 1;
+	kctx->gpu_va_end = kctx->same_va_end + custom_va_size;
+	kctx->exec_va_start = U64_MAX;
+	kctx->jit_va = false;
+
+
+	kbase_gpu_vm_unlock(kctx);
+	return 0;
+
+fail_free_same_va:
+	kbase_free_alloced_region(same_va_reg);
+fail_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return err;
+}
+
+#ifdef CONFIG_64BIT
+static int kbase_region_tracker_init_jit_64(struct kbase_context *kctx,
+		u64 jit_va_pages)
+{
+	struct kbase_va_region *same_va;
+	struct kbase_va_region *custom_va_reg;
+	u64 same_va_bits = kbase_get_same_va_bits(kctx);
+	u64 total_va_size;
+
+	total_va_size = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
+
+	/* First verify that a JIT_VA zone has not been created already. */
+	if (kctx->jit_va)
+		return -EINVAL;
+
+	/*
+	 * Modify the same VA free region after creation. Be careful to ensure
+	 * that allocations haven't been made as they could cause an overlap
+	 * to happen with existing same VA allocations and the custom VA zone.
+	 */
+	same_va = kbase_region_tracker_find_region_base_address(kctx,
+			PAGE_SIZE);
+	if (!same_va)
+		return -ENOMEM;
+
+	if (same_va->nr_pages < jit_va_pages || kctx->same_va_end < jit_va_pages)
+		return -ENOMEM;
+
+	/* It's safe to adjust the same VA zone now */
+	same_va->nr_pages -= jit_va_pages;
+	kctx->same_va_end -= jit_va_pages;
+
+	/*
+	 * Create a custom VA zone at the end of the VA for allocations which
+	 * JIT can use so it doesn't have to allocate VA from the kernel.
+	 */
+	custom_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
+				kctx->same_va_end,
+				jit_va_pages,
+				KBASE_REG_ZONE_CUSTOM_VA);
+
+	/*
+	 * The context will be destroyed if we fail here so no point
+	 * reverting the change we made to same_va.
+	 */
+	if (!custom_va_reg)
+		return -ENOMEM;
+
+	kbase_region_tracker_insert(custom_va_reg);
+	return 0;
+}
+#endif
+
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
+		u8 max_allocations, u8 trim_level)
+{
+	int err = 0;
+
+	if (trim_level > 100)
+		return -EINVAL;
+
+	kbase_gpu_vm_lock(kctx);
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
+		err = kbase_region_tracker_init_jit_64(kctx, jit_va_pages);
+#endif
+	/*
+	 * Nothing to do for 32-bit clients, JIT uses the existing
+	 * custom VA zone.
+	 */
+
+	if (!err) {
+		kctx->jit_max_allocations = max_allocations;
+		kctx->trim_level = trim_level;
+		kctx->jit_va = true;
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return err;
+}
+
+int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages)
+{
+	struct kbase_va_region *shrinking_va_reg;
+	struct kbase_va_region *exec_va_reg;
+	u64 exec_va_start, exec_va_base_addr;
+	int err;
+
+	/* The EXEC_VA zone shall be created by making space at the end of the
+	 * address space. Firstly, verify that the number of EXEC_VA pages
+	 * requested by the client is reasonable and then make sure that it is
+	 * not greater than the address space itself before calculating the base
+	 * address of the new zone.
+	 */
+	if (exec_va_pages == 0 || exec_va_pages > KBASE_REG_ZONE_EXEC_VA_MAX_PAGES)
+		return -EINVAL;
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* First verify that a JIT_VA zone has not been created already. */
+	if (kctx->jit_va) {
+		err = -EPERM;
+		goto exit_unlock;
+	}
+
+	if (exec_va_pages > kctx->gpu_va_end) {
+		err = -ENOMEM;
+		goto exit_unlock;
+	}
+
+	exec_va_start = kctx->gpu_va_end - exec_va_pages;
+	exec_va_base_addr = exec_va_start << PAGE_SHIFT;
+
+	shrinking_va_reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+			exec_va_base_addr);
+	if (!shrinking_va_reg) {
+		err = -ENOMEM;
+		goto exit_unlock;
+	}
+
+	/* Make sure that the EXEC_VA region is still uninitialized */
+	if ((shrinking_va_reg->flags & KBASE_REG_ZONE_MASK) ==
+			KBASE_REG_ZONE_EXEC_VA) {
+		err = -EPERM;
+		goto exit_unlock;
+	}
+
+	if (shrinking_va_reg->nr_pages <= exec_va_pages) {
+		err = -ENOMEM;
+		goto exit_unlock;
+	}
+
+	exec_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_exec,
+			exec_va_start,
+			exec_va_pages,
+			KBASE_REG_ZONE_EXEC_VA);
+	if (!exec_va_reg) {
+		err = -ENOMEM;
+		goto exit_unlock;
+	}
+
+	shrinking_va_reg->nr_pages -= exec_va_pages;
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
+		kctx->same_va_end -= exec_va_pages;
+#endif
+	kctx->exec_va_start = exec_va_start;
+
+	kbase_region_tracker_insert(exec_va_reg);
+	err = 0;
+
+exit_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return err;
+}
+
+
+int kbase_mem_init(struct kbase_device *kbdev)
+{
+	struct kbasep_mem_device *memdev;
+	int ret;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	memdev = &kbdev->memdev;
+	kbdev->mem_pool_max_size_default = KBASE_MEM_POOL_MAX_SIZE_KCTX;
+
+	/* Initialize memory usage */
+	atomic_set(&memdev->used_pages, 0);
+
+	ret = kbase_mem_pool_init(&kbdev->mem_pool,
+			KBASE_MEM_POOL_MAX_SIZE_KBDEV,
+			KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER,
+			kbdev,
+			NULL);
+	if (ret)
+		return ret;
+
+	ret = kbase_mem_pool_init(&kbdev->lp_mem_pool,
+			(KBASE_MEM_POOL_MAX_SIZE_KBDEV >> 9),
+			KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER,
+			kbdev,
+			NULL);
+	if (ret)
+		kbase_mem_pool_term(&kbdev->mem_pool);
+
+	return ret;
+}
+
+void kbase_mem_halt(struct kbase_device *kbdev)
+{
+	CSTD_UNUSED(kbdev);
+}
+
+void kbase_mem_term(struct kbase_device *kbdev)
+{
+	struct kbasep_mem_device *memdev;
+	int pages;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	memdev = &kbdev->memdev;
+
+	pages = atomic_read(&memdev->used_pages);
+	if (pages != 0)
+		dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
+
+	kbase_mem_pool_term(&kbdev->mem_pool);
+	kbase_mem_pool_term(&kbdev->lp_mem_pool);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_term);
+
+
+
+
+/**
+ * @brief Allocate a free region object.
+ *
+ * The allocated object is not part of any list yet, and is flagged as
+ * KBASE_REG_FREE. No mapping is allocated yet.
+ *
+ * zone is KBASE_REG_ZONE_CUSTOM_VA or KBASE_REG_ZONE_SAME_VA.
+ *
+ */
+struct kbase_va_region *kbase_alloc_free_region(struct rb_root *rbtree,
+		u64 start_pfn, size_t nr_pages, int zone)
+{
+	struct kbase_va_region *new_reg;
+
+	KBASE_DEBUG_ASSERT(rbtree != NULL);
+
+	/* zone argument should only contain zone related region flags */
+	KBASE_DEBUG_ASSERT((zone & ~KBASE_REG_ZONE_MASK) == 0);
+	KBASE_DEBUG_ASSERT(nr_pages > 0);
+	/* 64-bit address range is the max */
+	KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= (U64_MAX / PAGE_SIZE));
+
+	new_reg = kzalloc(sizeof(*new_reg), GFP_KERNEL);
+
+	if (!new_reg)
+		return NULL;
+
+	new_reg->cpu_alloc = NULL; /* no alloc bound yet */
+	new_reg->gpu_alloc = NULL; /* no alloc bound yet */
+	new_reg->rbtree = rbtree;
+	new_reg->flags = zone | KBASE_REG_FREE;
+
+	new_reg->flags |= KBASE_REG_GROWABLE;
+
+	new_reg->start_pfn = start_pfn;
+	new_reg->nr_pages = nr_pages;
+
+	INIT_LIST_HEAD(&new_reg->jit_node);
+
+	return new_reg;
+}
+
+KBASE_EXPORT_TEST_API(kbase_alloc_free_region);
+
+static struct kbase_context *kbase_reg_flags_to_kctx(
+		struct kbase_va_region *reg)
+{
+	struct kbase_context *kctx = NULL;
+	struct rb_root *rbtree = reg->rbtree;
+
+	switch (reg->flags & KBASE_REG_ZONE_MASK) {
+	case KBASE_REG_ZONE_CUSTOM_VA:
+		kctx = container_of(rbtree, struct kbase_context,
+				reg_rbtree_custom);
+		break;
+	case KBASE_REG_ZONE_SAME_VA:
+		kctx = container_of(rbtree, struct kbase_context,
+				reg_rbtree_same);
+		break;
+	case KBASE_REG_ZONE_EXEC_VA:
+		kctx = container_of(rbtree, struct kbase_context,
+				reg_rbtree_exec);
+		break;
+	default:
+		WARN(1, "Unknown zone in region: flags=0x%lx\n", reg->flags);
+		break;
+	}
+
+	return kctx;
+}
+
+/**
+ * @brief Free a region object.
+ *
+ * The described region must be freed of any mapping.
+ *
+ * If the region is not flagged as KBASE_REG_FREE, the region's
+ * alloc object will be released.
+ * It is a bug if no alloc object exists for non-free regions.
+ *
+ */
+void kbase_free_alloced_region(struct kbase_va_region *reg)
+{
+	if (!(reg->flags & KBASE_REG_FREE)) {
+		struct kbase_context *kctx = kbase_reg_flags_to_kctx(reg);
+
+		if (WARN_ON(!kctx))
+			return;
+
+
+		mutex_lock(&kctx->jit_evict_lock);
+
+		/*
+		 * The physical allocation should have been removed from the
+		 * eviction list before this function is called. However, in the
+		 * case of abnormal process termination or the app leaking the
+		 * memory kbase_mem_free_region is not called so it can still be
+		 * on the list at termination time of the region tracker.
+		 */
+		if (!list_empty(&reg->gpu_alloc->evict_node)) {
+			mutex_unlock(&kctx->jit_evict_lock);
+
+			/*
+			 * Unlink the physical allocation before unmaking it
+			 * evictable so that the allocation isn't grown back to
+			 * its last backed size as we're going to unmap it
+			 * anyway.
+			 */
+			reg->cpu_alloc->reg = NULL;
+			if (reg->cpu_alloc != reg->gpu_alloc)
+				reg->gpu_alloc->reg = NULL;
+
+			/*
+			 * If a region has been made evictable then we must
+			 * unmake it before trying to free it.
+			 * If the memory hasn't been reclaimed it will be
+			 * unmapped and freed below, if it has been reclaimed
+			 * then the operations below are no-ops.
+			 */
+			if (reg->flags & KBASE_REG_DONT_NEED) {
+				KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
+						   KBASE_MEM_TYPE_NATIVE);
+				kbase_mem_evictable_unmake(reg->gpu_alloc);
+			}
+		} else {
+			mutex_unlock(&kctx->jit_evict_lock);
+		}
+
+		/*
+		 * Remove the region from the sticky resource metadata
+		 * list should it be there.
+		 */
+		kbase_sticky_resource_release(kctx, NULL,
+				reg->start_pfn << PAGE_SHIFT);
+
+		kbase_mem_phy_alloc_put(reg->cpu_alloc);
+		kbase_mem_phy_alloc_put(reg->gpu_alloc);
+		/* To detect use-after-free in debug builds */
+		KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
+	}
+	kfree(reg);
+}
+
+KBASE_EXPORT_TEST_API(kbase_free_alloced_region);
+
+int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align)
+{
+	int err;
+	size_t i = 0;
+	unsigned long attr;
+	unsigned long mask = ~KBASE_REG_MEMATTR_MASK;
+	unsigned long gwt_mask = ~0;
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+	if (kctx->gwt_enabled)
+		gwt_mask = ~KBASE_REG_GPU_WR;
+#endif
+
+	if ((kctx->kbdev->system_coherency == COHERENCY_ACE) &&
+		(reg->flags & KBASE_REG_SHARE_BOTH))
+		attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_OUTER_WA);
+	else
+		attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC);
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	KBASE_DEBUG_ASSERT(NULL != reg);
+
+	err = kbase_add_va_region(kctx, reg, addr, nr_pages, align);
+	if (err)
+		return err;
+
+	if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+		u64 stride;
+		struct kbase_mem_phy_alloc *alloc;
+
+		alloc = reg->gpu_alloc;
+		stride = alloc->imported.alias.stride;
+		KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased);
+		for (i = 0; i < alloc->imported.alias.nents; i++) {
+			if (alloc->imported.alias.aliased[i].alloc) {
+				err = kbase_mmu_insert_pages(kctx->kbdev,
+						&kctx->mmu,
+						reg->start_pfn + (i * stride),
+						alloc->imported.alias.aliased[i].alloc->pages + alloc->imported.alias.aliased[i].offset,
+						alloc->imported.alias.aliased[i].length,
+						reg->flags & gwt_mask,
+						kctx->as_nr);
+				if (err)
+					goto bad_insert;
+
+				kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc);
+			} else {
+				err = kbase_mmu_insert_single_page(kctx,
+					reg->start_pfn + i * stride,
+					kctx->aliasing_sink_page,
+					alloc->imported.alias.aliased[i].length,
+					(reg->flags & mask & gwt_mask) | attr);
+
+				if (err)
+					goto bad_insert;
+			}
+		}
+	} else {
+		err = kbase_mmu_insert_pages(kctx->kbdev,
+				&kctx->mmu,
+				reg->start_pfn,
+				kbase_get_gpu_phy_pages(reg),
+				kbase_reg_current_backed_size(reg),
+				reg->flags & gwt_mask,
+				kctx->as_nr);
+		if (err)
+			goto bad_insert;
+		kbase_mem_phy_alloc_gpu_mapped(reg->gpu_alloc);
+	}
+
+	return err;
+
+bad_insert:
+	if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+		u64 stride;
+
+		stride = reg->gpu_alloc->imported.alias.stride;
+		KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
+		while (i--)
+			if (reg->gpu_alloc->imported.alias.aliased[i].alloc) {
+				kbase_mmu_teardown_pages(kctx->kbdev,
+					&kctx->mmu,
+					reg->start_pfn + (i * stride),
+					reg->gpu_alloc->imported.alias.aliased[i].length,
+					kctx->as_nr);
+				kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
+			}
+	}
+
+	kbase_remove_va_region(reg);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_mmap);
+
+static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
+		struct kbase_mem_phy_alloc *alloc, bool writeable);
+
+int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+	int err;
+
+	if (reg->start_pfn == 0)
+		return 0;
+
+	if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+		size_t i;
+
+		err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+				reg->start_pfn, reg->nr_pages, kctx->as_nr);
+		KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
+		for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
+			if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
+				kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
+	} else {
+		err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+			reg->start_pfn, kbase_reg_current_backed_size(reg),
+			kctx->as_nr);
+		kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc);
+	}
+
+	if (reg->gpu_alloc && reg->gpu_alloc->type ==
+			KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
+		struct kbase_alloc_import_user_buf *user_buf =
+			&reg->gpu_alloc->imported.user_buf;
+
+		if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) {
+			user_buf->current_mapping_usage_count &=
+				~PINNED_ON_IMPORT;
+
+			kbase_jd_user_buf_unmap(kctx, reg->gpu_alloc,
+					(reg->flags & KBASE_REG_GPU_WR));
+		}
+	}
+
+	if (err)
+		return err;
+
+	err = kbase_remove_va_region(reg);
+	return err;
+}
+
+static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping(
+		struct kbase_context *kctx,
+		unsigned long uaddr, size_t size, u64 *offset)
+{
+	struct vm_area_struct *vma;
+	struct kbase_cpu_mapping *map;
+	unsigned long vm_pgoff_in_region;
+	unsigned long vm_off_in_region;
+	unsigned long map_start;
+	size_t map_size;
+
+	lockdep_assert_held(&current->mm->mmap_lock);
+
+	if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */
+		return NULL;
+
+	vma = find_vma_intersection(current->mm, uaddr, uaddr+size);
+
+	if (!vma || vma->vm_start > uaddr)
+		return NULL;
+	if (vma->vm_ops != &kbase_vm_ops)
+		/* Not ours! */
+		return NULL;
+
+	map = vma->vm_private_data;
+
+	if (map->kctx != kctx)
+		/* Not from this context! */
+		return NULL;
+
+	vm_pgoff_in_region = vma->vm_pgoff - map->region->start_pfn;
+	vm_off_in_region = vm_pgoff_in_region << PAGE_SHIFT;
+	map_start = vma->vm_start - vm_off_in_region;
+	map_size = map->region->nr_pages << PAGE_SHIFT;
+
+	if ((uaddr + size) > (map_start + map_size))
+		/* Not within the CPU mapping */
+		return NULL;
+
+	*offset = (uaddr - vma->vm_start) + vm_off_in_region;
+
+	return map;
+}
+
+int kbasep_find_enclosing_cpu_mapping_offset(
+		struct kbase_context *kctx,
+		unsigned long uaddr, size_t size, u64 *offset)
+{
+	struct kbase_cpu_mapping *map;
+
+	kbase_os_mem_map_lock(kctx);
+
+	map = kbasep_find_enclosing_cpu_mapping(kctx, uaddr, size, offset);
+
+	kbase_os_mem_map_unlock(kctx);
+
+	if (!map)
+		return -EINVAL;
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_offset);
+
+int kbasep_find_enclosing_gpu_mapping_start_and_offset(struct kbase_context *kctx,
+		u64 gpu_addr, size_t size, u64 *start, u64 *offset)
+{
+	struct kbase_va_region *region;
+
+	kbase_gpu_vm_lock(kctx);
+
+	region = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+
+	if (!region) {
+		kbase_gpu_vm_unlock(kctx);
+		return -EINVAL;
+	}
+
+	*start = region->start_pfn << PAGE_SHIFT;
+
+	*offset = gpu_addr - *start;
+
+	if (((region->start_pfn + region->nr_pages) << PAGE_SHIFT) < (gpu_addr + size)) {
+		kbase_gpu_vm_unlock(kctx);
+		return -EINVAL;
+	}
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbasep_find_enclosing_gpu_mapping_start_and_offset);
+
+void kbase_sync_single(struct kbase_context *kctx,
+		struct tagged_addr t_cpu_pa, struct tagged_addr t_gpu_pa,
+		off_t offset, size_t size, enum kbase_sync_type sync_fn)
+{
+	struct page *cpu_page;
+	phys_addr_t cpu_pa = as_phys_addr_t(t_cpu_pa);
+	phys_addr_t gpu_pa = as_phys_addr_t(t_gpu_pa);
+
+	cpu_page = pfn_to_page(PFN_DOWN(cpu_pa));
+
+	if (likely(cpu_pa == gpu_pa)) {
+		dma_addr_t dma_addr;
+
+		BUG_ON(!cpu_page);
+		BUG_ON(offset + size > PAGE_SIZE);
+
+		dma_addr = kbase_dma_addr(cpu_page) + offset;
+		if (sync_fn == KBASE_SYNC_TO_CPU)
+			dma_sync_single_for_cpu(kctx->kbdev->dev, dma_addr,
+					size, DMA_BIDIRECTIONAL);
+		else if (sync_fn == KBASE_SYNC_TO_DEVICE)
+			dma_sync_single_for_device(kctx->kbdev->dev, dma_addr,
+					size, DMA_BIDIRECTIONAL);
+	} else {
+		void *src = NULL;
+		void *dst = NULL;
+		struct page *gpu_page;
+
+		if (WARN(!gpu_pa, "No GPU PA found for infinite cache op"))
+			return;
+
+		gpu_page = pfn_to_page(PFN_DOWN(gpu_pa));
+
+		if (sync_fn == KBASE_SYNC_TO_DEVICE) {
+			src = ((unsigned char *)kmap(cpu_page)) + offset;
+			dst = ((unsigned char *)kmap(gpu_page)) + offset;
+		} else if (sync_fn == KBASE_SYNC_TO_CPU) {
+			dma_sync_single_for_cpu(kctx->kbdev->dev,
+					kbase_dma_addr(gpu_page) + offset,
+					size, DMA_BIDIRECTIONAL);
+			src = ((unsigned char *)kmap(gpu_page)) + offset;
+			dst = ((unsigned char *)kmap(cpu_page)) + offset;
+		}
+		memcpy(dst, src, size);
+		kunmap(gpu_page);
+		kunmap(cpu_page);
+		if (sync_fn == KBASE_SYNC_TO_DEVICE)
+			dma_sync_single_for_device(kctx->kbdev->dev,
+					kbase_dma_addr(gpu_page) + offset,
+					size, DMA_BIDIRECTIONAL);
+	}
+}
+
+static int kbase_do_syncset(struct kbase_context *kctx,
+		struct basep_syncset *sset, enum kbase_sync_type sync_fn)
+{
+	int err = 0;
+	struct kbase_va_region *reg;
+	struct kbase_cpu_mapping *map;
+	unsigned long start;
+	size_t size;
+	struct tagged_addr *cpu_pa;
+	struct tagged_addr *gpu_pa;
+	u64 page_off, page_count;
+	u64 i;
+	u64 offset;
+
+	kbase_os_mem_map_lock(kctx);
+	kbase_gpu_vm_lock(kctx);
+
+	/* find the region where the virtual address is contained */
+	reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+			sset->mem_handle.basep.handle);
+	if (!reg) {
+		dev_warn(kctx->kbdev->dev, "Can't find region at VA 0x%016llX",
+				sset->mem_handle.basep.handle);
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (!(reg->flags & KBASE_REG_CPU_CACHED) ||
+			kbase_mem_is_imported(reg->gpu_alloc->type))
+		goto out_unlock;
+
+	start = (uintptr_t)sset->user_addr;
+	size = (size_t)sset->size;
+
+	map = kbasep_find_enclosing_cpu_mapping(kctx, start, size, &offset);
+	if (!map) {
+		dev_warn(kctx->kbdev->dev, "Can't find CPU mapping 0x%016lX for VA 0x%016llX",
+				start, sset->mem_handle.basep.handle);
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	page_off = offset >> PAGE_SHIFT;
+	offset &= ~PAGE_MASK;
+	page_count = (size + offset + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+	cpu_pa = kbase_get_cpu_phy_pages(reg);
+	gpu_pa = kbase_get_gpu_phy_pages(reg);
+
+	if (page_off > reg->nr_pages ||
+			page_off + page_count > reg->nr_pages) {
+		/* Sync overflows the region */
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	/* Sync first page */
+	if (as_phys_addr_t(cpu_pa[page_off])) {
+		size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+
+		kbase_sync_single(kctx, cpu_pa[page_off], gpu_pa[page_off],
+				offset, sz, sync_fn);
+	}
+
+	/* Sync middle pages (if any) */
+	for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+		/* we grow upwards, so bail on first non-present page */
+		if (!as_phys_addr_t(cpu_pa[page_off + i]))
+			break;
+
+		kbase_sync_single(kctx, cpu_pa[page_off + i],
+				gpu_pa[page_off + i], 0, PAGE_SIZE, sync_fn);
+	}
+
+	/* Sync last page (if any) */
+	if (page_count > 1 &&
+	    as_phys_addr_t(cpu_pa[page_off + page_count - 1])) {
+		size_t sz = ((start + size - 1) & ~PAGE_MASK) + 1;
+
+		kbase_sync_single(kctx, cpu_pa[page_off + page_count - 1],
+				gpu_pa[page_off + page_count - 1], 0, sz,
+				sync_fn);
+	}
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	kbase_os_mem_map_unlock(kctx);
+	return err;
+}
+
+int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset)
+{
+	int err = -EINVAL;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	KBASE_DEBUG_ASSERT(sset != NULL);
+
+	if (sset->mem_handle.basep.handle & ~PAGE_MASK) {
+		dev_warn(kctx->kbdev->dev,
+				"mem_handle: passed parameter is invalid");
+		return -EINVAL;
+	}
+
+	switch (sset->type) {
+	case BASE_SYNCSET_OP_MSYNC:
+		err = kbase_do_syncset(kctx, sset, KBASE_SYNC_TO_DEVICE);
+		break;
+
+	case BASE_SYNCSET_OP_CSYNC:
+		err = kbase_do_syncset(kctx, sset, KBASE_SYNC_TO_CPU);
+		break;
+
+	default:
+		dev_warn(kctx->kbdev->dev, "Unknown msync op %d\n", sset->type);
+		break;
+	}
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_sync_now);
+
+/* vm lock must be held */
+int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+	int err;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	KBASE_DEBUG_ASSERT(NULL != reg);
+	lockdep_assert_held(&kctx->reg_lock);
+
+	if (reg->flags & KBASE_REG_JIT) {
+		dev_warn(kctx->kbdev->dev, "Attempt to free JIT memory!\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Unlink the physical allocation before unmaking it evictable so
+	 * that the allocation isn't grown back to its last backed size
+	 * as we're going to unmap it anyway.
+	 */
+	reg->cpu_alloc->reg = NULL;
+	if (reg->cpu_alloc != reg->gpu_alloc)
+		reg->gpu_alloc->reg = NULL;
+
+	/*
+	 * If a region has been made evictable then we must unmake it
+	 * before trying to free it.
+	 * If the memory hasn't been reclaimed it will be unmapped and freed
+	 * below, if it has been reclaimed then the operations below are no-ops.
+	 */
+	if (reg->flags & KBASE_REG_DONT_NEED) {
+		KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
+				   KBASE_MEM_TYPE_NATIVE);
+		kbase_mem_evictable_unmake(reg->gpu_alloc);
+	}
+
+	err = kbase_gpu_munmap(kctx, reg);
+	if (err) {
+		dev_warn(kctx->kbdev->dev, "Could not unmap from the GPU...\n");
+		goto out;
+	}
+
+	/* This will also free the physical pages */
+	kbase_free_alloced_region(reg);
+
+ out:
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free_region);
+
+/**
+ * @brief Free the region from the GPU and unregister it.
+ *
+ * This function implements the free operation on a memory segment.
+ * It will loudly fail if called with outstanding mappings.
+ */
+int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
+{
+	int err = 0;
+	struct kbase_va_region *reg;
+
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+
+	if ((gpu_addr & ~PAGE_MASK) && (gpu_addr >= PAGE_SIZE)) {
+		dev_warn(kctx->kbdev->dev, "kbase_mem_free: gpu_addr parameter is invalid");
+		return -EINVAL;
+	}
+
+	if (0 == gpu_addr) {
+		dev_warn(kctx->kbdev->dev, "gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using kbase_mem_free\n");
+		return -EINVAL;
+	}
+	kbase_gpu_vm_lock(kctx);
+
+	if (gpu_addr >= BASE_MEM_COOKIE_BASE &&
+	    gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) {
+		int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE);
+
+		reg = kctx->pending_regions[cookie];
+		if (!reg) {
+			err = -EINVAL;
+			goto out_unlock;
+		}
+
+		/* ask to unlink the cookie as we'll free it */
+
+		kctx->pending_regions[cookie] = NULL;
+		kctx->cookies |= (1UL << cookie);
+
+		kbase_free_alloced_region(reg);
+	} else {
+		/* A real GPU va */
+		/* Validate the region */
+		reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+		if (!reg || (reg->flags & KBASE_REG_FREE)) {
+			dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
+					gpu_addr);
+			err = -EINVAL;
+			goto out_unlock;
+		}
+
+		if ((reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA) {
+			/* SAME_VA must be freed through munmap */
+			dev_warn(kctx->kbdev->dev, "%s called on SAME_VA memory 0x%llX", __func__,
+					gpu_addr);
+			err = -EINVAL;
+			goto out_unlock;
+		}
+		err = kbase_mem_free_region(kctx, reg);
+	}
+
+ out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_free);
+
+int kbase_update_region_flags(struct kbase_context *kctx,
+		struct kbase_va_region *reg, unsigned long flags)
+{
+	KBASE_DEBUG_ASSERT(NULL != reg);
+	KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
+
+	reg->flags |= kbase_cache_enabled(flags, reg->nr_pages);
+	/* all memory is now growable */
+	reg->flags |= KBASE_REG_GROWABLE;
+
+	if (flags & BASE_MEM_GROW_ON_GPF)
+		reg->flags |= KBASE_REG_PF_GROW;
+
+	if (flags & BASE_MEM_PROT_CPU_WR)
+		reg->flags |= KBASE_REG_CPU_WR;
+
+	if (flags & BASE_MEM_PROT_CPU_RD)
+		reg->flags |= KBASE_REG_CPU_RD;
+
+	if (flags & BASE_MEM_PROT_GPU_WR)
+		reg->flags |= KBASE_REG_GPU_WR;
+
+	if (flags & BASE_MEM_PROT_GPU_RD)
+		reg->flags |= KBASE_REG_GPU_RD;
+
+	if (0 == (flags & BASE_MEM_PROT_GPU_EX))
+		reg->flags |= KBASE_REG_GPU_NX;
+
+	if (!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		if (flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED &&
+				!(flags & BASE_MEM_UNCACHED_GPU))
+			return -EINVAL;
+	} else if (flags & (BASE_MEM_COHERENT_SYSTEM |
+			BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
+		reg->flags |= KBASE_REG_SHARE_BOTH;
+	}
+
+	if (!(reg->flags & KBASE_REG_SHARE_BOTH) &&
+			flags & BASE_MEM_COHERENT_LOCAL) {
+		reg->flags |= KBASE_REG_SHARE_IN;
+	}
+
+	if (flags & BASE_MEM_TILER_ALIGN_TOP)
+		reg->flags |= KBASE_REG_TILER_ALIGN_TOP;
+
+
+	/* Set up default MEMATTR usage */
+	if (!(reg->flags & KBASE_REG_GPU_CACHED)) {
+		if (kctx->kbdev->mmu_mode->flags &
+				KBASE_MMU_MODE_HAS_NON_CACHEABLE) {
+			/* Override shareability, and MEMATTR for uncached */
+			reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
+			reg->flags |= KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_NON_CACHEABLE);
+		} else {
+			dev_warn(kctx->kbdev->dev,
+				"Can't allocate GPU uncached memory due to MMU in Legacy Mode\n");
+			return -EINVAL;
+		}
+	} else if (kctx->kbdev->system_coherency == COHERENCY_ACE &&
+		(reg->flags & KBASE_REG_SHARE_BOTH)) {
+		reg->flags |=
+			KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT_ACE);
+	} else {
+		reg->flags |=
+			KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
+	}
+
+	if (flags & BASE_MEM_PERMANENT_KERNEL_MAPPING)
+		reg->flags |= KBASE_REG_PERMANENT_KERNEL_MAPPING;
+
+	if (flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE)
+		reg->flags |= KBASE_REG_GPU_VA_SAME_4GB_PAGE;
+
+	return 0;
+}
+
+int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
+		size_t nr_pages_requested)
+{
+	int new_page_count __maybe_unused;
+	size_t nr_left = nr_pages_requested;
+	int res;
+	struct kbase_context *kctx;
+	struct tagged_addr *tp;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+	KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
+
+	if (alloc->reg) {
+		if (nr_pages_requested > alloc->reg->nr_pages - alloc->nents)
+			goto invalid_request;
+	}
+
+	kctx = alloc->imported.native.kctx;
+
+	if (nr_pages_requested == 0)
+		goto done; /*nothing to do*/
+
+	new_page_count = kbase_atomic_add_pages(
+			nr_pages_requested, &kctx->used_pages);
+	kbase_atomic_add_pages(nr_pages_requested,
+			       &kctx->kbdev->memdev.used_pages);
+
+	/* Increase mm counters before we allocate pages so that this
+	 * allocation is visible to the OOM killer */
+	kbase_process_page_usage_inc(kctx, nr_pages_requested);
+
+	tp = alloc->pages + alloc->nents;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	/* Check if we have enough pages requested so we can allocate a large
+	 * page (512 * 4KB = 2MB )
+	 */
+	if (nr_left >= (SZ_2M / SZ_4K)) {
+		int nr_lp = nr_left / (SZ_2M / SZ_4K);
+
+		res = kbase_mem_pool_alloc_pages(&kctx->lp_mem_pool,
+						 nr_lp * (SZ_2M / SZ_4K),
+						 tp,
+						 true);
+
+		if (res > 0) {
+			nr_left -= res;
+			tp += res;
+		}
+
+		if (nr_left) {
+			struct kbase_sub_alloc *sa, *temp_sa;
+
+			spin_lock(&kctx->mem_partials_lock);
+
+			list_for_each_entry_safe(sa, temp_sa,
+						 &kctx->mem_partials, link) {
+				int pidx = 0;
+
+				while (nr_left) {
+					pidx = find_next_zero_bit(sa->sub_pages,
+								  SZ_2M / SZ_4K,
+								  pidx);
+					bitmap_set(sa->sub_pages, pidx, 1);
+					*tp++ = as_tagged_tag(page_to_phys(sa->page +
+									   pidx),
+							      FROM_PARTIAL);
+					nr_left--;
+
+					if (bitmap_full(sa->sub_pages, SZ_2M / SZ_4K)) {
+						/* unlink from partial list when full */
+						list_del_init(&sa->link);
+						break;
+					}
+				}
+			}
+			spin_unlock(&kctx->mem_partials_lock);
+		}
+
+		/* only if we actually have a chunk left <512. If more it indicates
+		 * that we couldn't allocate a 2MB above, so no point to retry here.
+		 */
+		if (nr_left > 0 && nr_left < (SZ_2M / SZ_4K)) {
+			/* create a new partial and suballocate the rest from it */
+			struct page *np = NULL;
+
+			do {
+				int err;
+
+				np = kbase_mem_pool_alloc(&kctx->lp_mem_pool);
+				if (np)
+					break;
+				err = kbase_mem_pool_grow(&kctx->lp_mem_pool, 1);
+				if (err)
+					break;
+			} while (1);
+
+			if (np) {
+				int i;
+				struct kbase_sub_alloc *sa;
+				struct page *p;
+
+				sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+				if (!sa) {
+					kbase_mem_pool_free(&kctx->lp_mem_pool, np, false);
+					goto no_new_partial;
+				}
+
+				/* store pointers back to the control struct */
+				np->lru.next = (void *)sa;
+				for (p = np; p < np + SZ_2M / SZ_4K; p++)
+					p->lru.prev = (void *)np;
+				INIT_LIST_HEAD(&sa->link);
+				bitmap_zero(sa->sub_pages, SZ_2M / SZ_4K);
+				sa->page = np;
+
+				for (i = 0; i < nr_left; i++)
+					*tp++ = as_tagged_tag(page_to_phys(np + i), FROM_PARTIAL);
+
+				bitmap_set(sa->sub_pages, 0, nr_left);
+				nr_left = 0;
+
+				/* expose for later use */
+				spin_lock(&kctx->mem_partials_lock);
+				list_add(&sa->link, &kctx->mem_partials);
+				spin_unlock(&kctx->mem_partials_lock);
+			}
+		}
+	}
+no_new_partial:
+#endif
+
+	if (nr_left) {
+		res = kbase_mem_pool_alloc_pages(&kctx->mem_pool,
+						 nr_left,
+						 tp,
+						 false);
+		if (res <= 0)
+			goto alloc_failed;
+	}
+
+	KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kctx->id,
+			(u64)new_page_count);
+
+	alloc->nents += nr_pages_requested;
+done:
+	return 0;
+
+alloc_failed:
+	/* rollback needed if got one or more 2MB but failed later */
+	if (nr_left != nr_pages_requested) {
+		size_t nr_pages_to_free = nr_pages_requested - nr_left;
+
+		alloc->nents += nr_pages_to_free;
+
+		kbase_process_page_usage_inc(kctx, nr_pages_to_free);
+		kbase_atomic_add_pages(nr_pages_to_free, &kctx->used_pages);
+		kbase_atomic_add_pages(nr_pages_to_free,
+			       &kctx->kbdev->memdev.used_pages);
+
+		kbase_free_phy_pages_helper(alloc, nr_pages_to_free);
+	}
+
+	kbase_process_page_usage_dec(kctx, nr_pages_requested);
+	kbase_atomic_sub_pages(nr_pages_requested, &kctx->used_pages);
+	kbase_atomic_sub_pages(nr_pages_requested,
+			       &kctx->kbdev->memdev.used_pages);
+
+invalid_request:
+	return -ENOMEM;
+}
+
+struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
+		struct kbase_mem_phy_alloc *alloc, struct kbase_mem_pool *pool,
+		size_t nr_pages_requested,
+		struct kbase_sub_alloc **prealloc_sa)
+{
+	int new_page_count __maybe_unused;
+	size_t nr_left = nr_pages_requested;
+	int res;
+	struct kbase_context *kctx;
+	struct tagged_addr *tp;
+	struct tagged_addr *new_pages = NULL;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+	KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
+
+	lockdep_assert_held(&pool->pool_lock);
+
+#if !defined(CONFIG_MALI_2MB_ALLOC)
+	WARN_ON(pool->order);
+#endif
+
+	if (alloc->reg) {
+		if (nr_pages_requested > alloc->reg->nr_pages - alloc->nents)
+			goto invalid_request;
+	}
+
+	kctx = alloc->imported.native.kctx;
+
+	lockdep_assert_held(&kctx->mem_partials_lock);
+
+	if (nr_pages_requested == 0)
+		goto done; /*nothing to do*/
+
+	new_page_count = kbase_atomic_add_pages(
+			nr_pages_requested, &kctx->used_pages);
+	kbase_atomic_add_pages(nr_pages_requested,
+			       &kctx->kbdev->memdev.used_pages);
+
+	/* Increase mm counters before we allocate pages so that this
+	 * allocation is visible to the OOM killer
+	 */
+	kbase_process_page_usage_inc(kctx, nr_pages_requested);
+
+	tp = alloc->pages + alloc->nents;
+	new_pages = tp;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	if (pool->order) {
+		int nr_lp = nr_left / (SZ_2M / SZ_4K);
+
+		res = kbase_mem_pool_alloc_pages_locked(pool,
+						 nr_lp * (SZ_2M / SZ_4K),
+						 tp);
+
+		if (res > 0) {
+			nr_left -= res;
+			tp += res;
+		}
+
+		if (nr_left) {
+			struct kbase_sub_alloc *sa, *temp_sa;
+
+			list_for_each_entry_safe(sa, temp_sa,
+						 &kctx->mem_partials, link) {
+				int pidx = 0;
+
+				while (nr_left) {
+					pidx = find_next_zero_bit(sa->sub_pages,
+								  SZ_2M / SZ_4K,
+								  pidx);
+					bitmap_set(sa->sub_pages, pidx, 1);
+					*tp++ = as_tagged_tag(page_to_phys(
+							sa->page + pidx),
+							FROM_PARTIAL);
+					nr_left--;
+
+					if (bitmap_full(sa->sub_pages,
+							SZ_2M / SZ_4K)) {
+						/* unlink from partial list when
+						 * full
+						 */
+						list_del_init(&sa->link);
+						break;
+					}
+				}
+			}
+		}
+
+		/* only if we actually have a chunk left <512. If more it
+		 * indicates that we couldn't allocate a 2MB above, so no point
+		 * to retry here.
+		 */
+		if (nr_left > 0 && nr_left < (SZ_2M / SZ_4K)) {
+			/* create a new partial and suballocate the rest from it
+			 */
+			struct page *np = NULL;
+
+			np = kbase_mem_pool_alloc_locked(pool);
+
+			if (np) {
+				int i;
+				struct kbase_sub_alloc *const sa = *prealloc_sa;
+				struct page *p;
+
+				/* store pointers back to the control struct */
+				np->lru.next = (void *)sa;
+				for (p = np; p < np + SZ_2M / SZ_4K; p++)
+					p->lru.prev = (void *)np;
+				INIT_LIST_HEAD(&sa->link);
+				bitmap_zero(sa->sub_pages, SZ_2M / SZ_4K);
+				sa->page = np;
+
+				for (i = 0; i < nr_left; i++)
+					*tp++ = as_tagged_tag(
+							page_to_phys(np + i),
+							FROM_PARTIAL);
+
+				bitmap_set(sa->sub_pages, 0, nr_left);
+				nr_left = 0;
+				/* Indicate to user that we'll free this memory
+				 * later.
+				 */
+				*prealloc_sa = NULL;
+
+				/* expose for later use */
+				list_add(&sa->link, &kctx->mem_partials);
+			}
+		}
+		if (nr_left)
+			goto alloc_failed;
+	} else {
+#endif
+		res = kbase_mem_pool_alloc_pages_locked(pool,
+						 nr_left,
+						 tp);
+		if (res <= 0)
+			goto alloc_failed;
+#ifdef CONFIG_MALI_2MB_ALLOC
+	}
+#endif
+
+	KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kctx->id,
+			(u64)new_page_count);
+
+	alloc->nents += nr_pages_requested;
+done:
+	return new_pages;
+
+alloc_failed:
+	/* rollback needed if got one or more 2MB but failed later */
+	if (nr_left != nr_pages_requested) {
+		size_t nr_pages_to_free = nr_pages_requested - nr_left;
+
+		struct tagged_addr *start_free = alloc->pages + alloc->nents;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+		if (pool->order) {
+			while (nr_pages_to_free) {
+				if (is_huge_head(*start_free)) {
+					kbase_mem_pool_free_pages_locked(
+						pool, 512,
+						start_free,
+						false, /* not dirty */
+						true); /* return to pool */
+					nr_pages_to_free -= 512;
+					start_free += 512;
+				} else if (is_partial(*start_free)) {
+					free_partial_locked(kctx, pool,
+							*start_free);
+					nr_pages_to_free--;
+					start_free++;
+				}
+			}
+		} else {
+#endif
+			kbase_mem_pool_free_pages_locked(pool,
+					nr_pages_to_free,
+					start_free,
+					false, /* not dirty */
+					true); /* return to pool */
+#ifdef CONFIG_MALI_2MB_ALLOC
+		}
+#endif
+	}
+
+	kbase_process_page_usage_dec(kctx, nr_pages_requested);
+	kbase_atomic_sub_pages(nr_pages_requested, &kctx->used_pages);
+	kbase_atomic_sub_pages(nr_pages_requested,
+			       &kctx->kbdev->memdev.used_pages);
+
+invalid_request:
+	return NULL;
+}
+
+static void free_partial(struct kbase_context *kctx, struct tagged_addr tp)
+{
+	struct page *p, *head_page;
+	struct kbase_sub_alloc *sa;
+
+	p = as_page(tp);
+	head_page = (struct page *)p->lru.prev;
+	sa = (struct kbase_sub_alloc *)head_page->lru.next;
+	spin_lock(&kctx->mem_partials_lock);
+	clear_bit(p - head_page, sa->sub_pages);
+	if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
+		list_del(&sa->link);
+		kbase_mem_pool_free(&kctx->lp_mem_pool, head_page, true);
+		kfree(sa);
+	} else if (bitmap_weight(sa->sub_pages, SZ_2M / SZ_4K) ==
+		   SZ_2M / SZ_4K - 1) {
+		/* expose the partial again */
+		list_add(&sa->link, &kctx->mem_partials);
+	}
+	spin_unlock(&kctx->mem_partials_lock);
+}
+
+int kbase_free_phy_pages_helper(
+	struct kbase_mem_phy_alloc *alloc,
+	size_t nr_pages_to_free)
+{
+	struct kbase_context *kctx = alloc->imported.native.kctx;
+	bool syncback;
+	bool reclaimed = (alloc->evicted != 0);
+	struct tagged_addr *start_free;
+	int new_page_count __maybe_unused;
+	size_t freed = 0;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+	KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
+	KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free);
+
+	/* early out if nothing to do */
+	if (0 == nr_pages_to_free)
+		return 0;
+
+	start_free = alloc->pages + alloc->nents - nr_pages_to_free;
+
+	syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+	/* pad start_free to a valid start location */
+	while (nr_pages_to_free && is_huge(*start_free) &&
+	       !is_huge_head(*start_free)) {
+		nr_pages_to_free--;
+		start_free++;
+	}
+
+	while (nr_pages_to_free) {
+		if (is_huge_head(*start_free)) {
+			/* This is a 2MB entry, so free all the 512 pages that
+			 * it points to
+			 */
+			kbase_mem_pool_free_pages(&kctx->lp_mem_pool,
+					512,
+					start_free,
+					syncback,
+					reclaimed);
+			nr_pages_to_free -= 512;
+			start_free += 512;
+			freed += 512;
+		} else if (is_partial(*start_free)) {
+			free_partial(kctx, *start_free);
+			nr_pages_to_free--;
+			start_free++;
+			freed++;
+		} else {
+			struct tagged_addr *local_end_free;
+
+			local_end_free = start_free;
+			while (nr_pages_to_free &&
+			       !is_huge(*local_end_free) &&
+			       !is_partial(*local_end_free)) {
+				local_end_free++;
+				nr_pages_to_free--;
+			}
+			kbase_mem_pool_free_pages(&kctx->mem_pool,
+					local_end_free - start_free,
+					start_free,
+					syncback,
+					reclaimed);
+			freed += local_end_free - start_free;
+			start_free += local_end_free - start_free;
+		}
+	}
+
+	alloc->nents -= freed;
+
+	/*
+	 * If the allocation was not evicted (i.e. evicted == 0) then
+	 * the page accounting needs to be done.
+	 */
+	if (!reclaimed) {
+		kbase_process_page_usage_dec(kctx, freed);
+		new_page_count = kbase_atomic_sub_pages(freed,
+							&kctx->used_pages);
+		kbase_atomic_sub_pages(freed,
+				       &kctx->kbdev->memdev.used_pages);
+
+		KBASE_TLSTREAM_AUX_PAGESALLOC(
+				kctx->id,
+				(u64)new_page_count);
+	}
+
+	return 0;
+}
+
+static void free_partial_locked(struct kbase_context *kctx,
+		struct kbase_mem_pool *pool, struct tagged_addr tp)
+{
+	struct page *p, *head_page;
+	struct kbase_sub_alloc *sa;
+
+	lockdep_assert_held(&pool->pool_lock);
+	lockdep_assert_held(&kctx->mem_partials_lock);
+
+	p = as_page(tp);
+	head_page = (struct page *)p->lru.prev;
+	sa = (struct kbase_sub_alloc *)head_page->lru.next;
+	clear_bit(p - head_page, sa->sub_pages);
+	if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
+		list_del(&sa->link);
+		kbase_mem_pool_free_locked(pool, head_page, true);
+		kfree(sa);
+	} else if (bitmap_weight(sa->sub_pages, SZ_2M / SZ_4K) ==
+		   SZ_2M / SZ_4K - 1) {
+		/* expose the partial again */
+		list_add(&sa->link, &kctx->mem_partials);
+	}
+}
+
+void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
+		struct kbase_mem_pool *pool, struct tagged_addr *pages,
+		size_t nr_pages_to_free)
+{
+	struct kbase_context *kctx = alloc->imported.native.kctx;
+	bool syncback;
+	bool reclaimed = (alloc->evicted != 0);
+	struct tagged_addr *start_free;
+	size_t freed = 0;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+	KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
+	KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free);
+
+	lockdep_assert_held(&pool->pool_lock);
+	lockdep_assert_held(&kctx->mem_partials_lock);
+
+	/* early out if nothing to do */
+	if (!nr_pages_to_free)
+		return;
+
+	start_free = pages;
+
+	syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+	/* pad start_free to a valid start location */
+	while (nr_pages_to_free && is_huge(*start_free) &&
+	       !is_huge_head(*start_free)) {
+		nr_pages_to_free--;
+		start_free++;
+	}
+
+	while (nr_pages_to_free) {
+		if (is_huge_head(*start_free)) {
+			/* This is a 2MB entry, so free all the 512 pages that
+			 * it points to
+			 */
+			WARN_ON(!pool->order);
+			kbase_mem_pool_free_pages_locked(pool,
+					512,
+					start_free,
+					syncback,
+					reclaimed);
+			nr_pages_to_free -= 512;
+			start_free += 512;
+			freed += 512;
+		} else if (is_partial(*start_free)) {
+			WARN_ON(!pool->order);
+			free_partial_locked(kctx, pool, *start_free);
+			nr_pages_to_free--;
+			start_free++;
+			freed++;
+		} else {
+			struct tagged_addr *local_end_free;
+
+			WARN_ON(pool->order);
+			local_end_free = start_free;
+			while (nr_pages_to_free &&
+			       !is_huge(*local_end_free) &&
+			       !is_partial(*local_end_free)) {
+				local_end_free++;
+				nr_pages_to_free--;
+			}
+			kbase_mem_pool_free_pages_locked(pool,
+					local_end_free - start_free,
+					start_free,
+					syncback,
+					reclaimed);
+			freed += local_end_free - start_free;
+			start_free += local_end_free - start_free;
+		}
+	}
+
+	alloc->nents -= freed;
+
+	/*
+	 * If the allocation was not evicted (i.e. evicted == 0) then
+	 * the page accounting needs to be done.
+	 */
+	if (!reclaimed) {
+		int new_page_count;
+
+		kbase_process_page_usage_dec(kctx, freed);
+		new_page_count = kbase_atomic_sub_pages(freed,
+							&kctx->used_pages);
+		kbase_atomic_sub_pages(freed,
+				       &kctx->kbdev->memdev.used_pages);
+
+		KBASE_TLSTREAM_AUX_PAGESALLOC(
+				kctx->id,
+				(u64)new_page_count);
+	}
+}
+
+void kbase_mem_kref_free(struct kref *kref)
+{
+	struct kbase_mem_phy_alloc *alloc;
+
+	alloc = container_of(kref, struct kbase_mem_phy_alloc, kref);
+
+	switch (alloc->type) {
+	case KBASE_MEM_TYPE_NATIVE: {
+
+		if (!WARN_ON(!alloc->imported.native.kctx)) {
+			if (alloc->permanent_map)
+				kbase_phy_alloc_mapping_term(
+						alloc->imported.native.kctx,
+						alloc);
+
+			/*
+			 * The physical allocation must have been removed from
+			 * the eviction list before trying to free it.
+			 */
+			mutex_lock(
+				&alloc->imported.native.kctx->jit_evict_lock);
+			WARN_ON(!list_empty(&alloc->evict_node));
+			mutex_unlock(
+				&alloc->imported.native.kctx->jit_evict_lock);
+
+			kbase_process_page_usage_dec(
+					alloc->imported.native.kctx,
+					alloc->imported.native.nr_struct_pages);
+		}
+		kbase_free_phy_pages_helper(alloc, alloc->nents);
+		break;
+	}
+	case KBASE_MEM_TYPE_ALIAS: {
+		/* just call put on the underlying phy allocs */
+		size_t i;
+		struct kbase_aliased *aliased;
+
+		aliased = alloc->imported.alias.aliased;
+		if (aliased) {
+			for (i = 0; i < alloc->imported.alias.nents; i++)
+				if (aliased[i].alloc)
+					kbase_mem_phy_alloc_put(aliased[i].alloc);
+			vfree(aliased);
+		}
+		break;
+	}
+	case KBASE_MEM_TYPE_RAW:
+		/* raw pages, external cleanup */
+		break;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	case KBASE_MEM_TYPE_IMPORTED_UMM:
+		dma_buf_detach(alloc->imported.umm.dma_buf,
+			       alloc->imported.umm.dma_attachment);
+		dma_buf_put(alloc->imported.umm.dma_buf);
+		break;
+#endif
+	case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+		if (alloc->imported.user_buf.mm)
+			mmdrop(alloc->imported.user_buf.mm);
+		if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
+			vfree(alloc->imported.user_buf.pages);
+		else
+			kfree(alloc->imported.user_buf.pages);
+		break;
+	default:
+		WARN(1, "Unexecpted free of type %d\n", alloc->type);
+		break;
+	}
+
+	/* Free based on allocation type */
+	if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
+		vfree(alloc);
+	else
+		kfree(alloc);
+}
+
+KBASE_EXPORT_TEST_API(kbase_mem_kref_free);
+
+int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size)
+{
+	KBASE_DEBUG_ASSERT(NULL != reg);
+	KBASE_DEBUG_ASSERT(vsize > 0);
+
+	/* validate user provided arguments */
+	if (size > vsize || vsize > reg->nr_pages)
+		goto out_term;
+
+	/* Prevent vsize*sizeof from wrapping around.
+	 * For instance, if vsize is 2**29+1, we'll allocate 1 byte and the alloc won't fail.
+	 */
+	if ((size_t) vsize > ((size_t) -1 / sizeof(*reg->cpu_alloc->pages)))
+		goto out_term;
+
+	KBASE_DEBUG_ASSERT(0 != vsize);
+
+	if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, size) != 0)
+		goto out_term;
+
+	reg->cpu_alloc->reg = reg;
+	if (reg->cpu_alloc != reg->gpu_alloc) {
+		if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, size) != 0)
+			goto out_rollback;
+		reg->gpu_alloc->reg = reg;
+	}
+
+	return 0;
+
+out_rollback:
+	kbase_free_phy_pages_helper(reg->cpu_alloc, size);
+out_term:
+	return -1;
+}
+
+KBASE_EXPORT_TEST_API(kbase_alloc_phy_pages);
+
+bool kbase_check_alloc_flags(unsigned long flags)
+{
+	/* Only known input flags should be set. */
+	if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
+		return false;
+
+	/* At least one flag should be set */
+	if (flags == 0)
+		return false;
+
+	/* Either the GPU or CPU must be reading from the allocated memory */
+	if ((flags & (BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD)) == 0)
+		return false;
+
+	/* Either the GPU or CPU must be writing to the allocated memory */
+	if ((flags & (BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR)) == 0)
+		return false;
+
+	/* GPU executable memory cannot:
+	 * - Be written by the GPU
+	 * - Be grown on GPU page fault
+	 * - Have the top of its initial commit aligned to 'extent' */
+	if ((flags & BASE_MEM_PROT_GPU_EX) && (flags &
+			(BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
+			BASE_MEM_TILER_ALIGN_TOP)))
+		return false;
+
+	/* To have an allocation lie within a 4GB chunk is required only for
+	 * TLS memory, which will never be used to contain executable code
+	 * and also used for Tiler heap.
+	 */
+	if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) && (flags &
+			(BASE_MEM_PROT_GPU_EX | BASE_MEM_TILER_ALIGN_TOP)))
+		return false;
+
+	/* GPU should have at least read or write access otherwise there is no
+	   reason for allocating. */
+	if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
+		return false;
+
+	/* BASE_MEM_IMPORT_SHARED is only valid for imported memory */
+	if ((flags & BASE_MEM_IMPORT_SHARED) == BASE_MEM_IMPORT_SHARED)
+		return false;
+
+	/* Should not combine BASE_MEM_COHERENT_LOCAL with
+	 * BASE_MEM_COHERENT_SYSTEM */
+	if ((flags & (BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM)) ==
+			(BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM))
+		return false;
+
+	return true;
+}
+
+bool kbase_check_import_flags(unsigned long flags)
+{
+	/* Only known input flags should be set. */
+	if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
+		return false;
+
+	/* At least one flag should be set */
+	if (flags == 0)
+		return false;
+
+	/* Imported memory cannot be GPU executable */
+	if (flags & BASE_MEM_PROT_GPU_EX)
+		return false;
+
+	/* Imported memory cannot grow on page fault */
+	if (flags & BASE_MEM_GROW_ON_GPF)
+		return false;
+
+	/* Imported memory cannot be aligned to the end of its initial commit */
+	if (flags & BASE_MEM_TILER_ALIGN_TOP)
+		return false;
+
+	/* GPU should have at least read or write access otherwise there is no
+	   reason for importing. */
+	if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
+		return false;
+
+	/* Secure memory cannot be read by the CPU */
+	if ((flags & BASE_MEM_SECURE) && (flags & BASE_MEM_PROT_CPU_RD))
+		return false;
+
+	return true;
+}
+
+int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
+		u64 va_pages, u64 commit_pages, u64 large_extent)
+{
+	struct device *dev = kctx->kbdev->dev;
+	int gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+	u64 gpu_pc_pages_max = 1ULL << gpu_pc_bits >> PAGE_SHIFT;
+	struct kbase_va_region test_reg;
+
+	/* kbase_va_region's extent member can be of variable size, so check against that type */
+	test_reg.extent = large_extent;
+
+#define KBASE_MSG_PRE "GPU allocation attempted with "
+
+	if (0 == va_pages) {
+		dev_warn(dev, KBASE_MSG_PRE "0 va_pages!");
+		return -EINVAL;
+	}
+
+	if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) {
+		dev_warn(dev, KBASE_MSG_PRE "va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!",
+				(unsigned long long)va_pages);
+		return -ENOMEM;
+	}
+
+	/* Note: commit_pages is checked against va_pages during
+	 * kbase_alloc_phy_pages() */
+
+	/* Limit GPU executable allocs to GPU PC size */
+	if ((flags & BASE_MEM_PROT_GPU_EX) && (va_pages > gpu_pc_pages_max)) {
+		dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_PROT_GPU_EX and va_pages==%lld larger than GPU PC range %lld",
+				(unsigned long long)va_pages,
+				(unsigned long long)gpu_pc_pages_max);
+
+		return -EINVAL;
+	}
+
+	if ((flags & (BASE_MEM_GROW_ON_GPF | BASE_MEM_TILER_ALIGN_TOP)) &&
+			test_reg.extent == 0) {
+		dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_GROW_ON_GPF or BASE_MEM_TILER_ALIGN_TOP but extent == 0\n");
+		return -EINVAL;
+	}
+
+	if (!(flags & (BASE_MEM_GROW_ON_GPF | BASE_MEM_TILER_ALIGN_TOP)) &&
+			test_reg.extent != 0) {
+		dev_warn(dev, KBASE_MSG_PRE "neither BASE_MEM_GROW_ON_GPF nor BASE_MEM_TILER_ALIGN_TOP set but extent != 0\n");
+		return -EINVAL;
+	}
+
+	/* BASE_MEM_TILER_ALIGN_TOP memory has a number of restrictions */
+	if (flags & BASE_MEM_TILER_ALIGN_TOP) {
+#define KBASE_MSG_PRE_FLAG KBASE_MSG_PRE "BASE_MEM_TILER_ALIGN_TOP and "
+		unsigned long small_extent;
+
+		if (large_extent > BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES) {
+			dev_warn(dev, KBASE_MSG_PRE_FLAG "extent==%lld pages exceeds limit %lld",
+					(unsigned long long)large_extent,
+					BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES);
+			return -EINVAL;
+		}
+		/* For use with is_power_of_2, which takes unsigned long, so
+		 * must ensure e.g. on 32-bit kernel it'll fit in that type */
+		small_extent = (unsigned long)large_extent;
+
+		if (!is_power_of_2(small_extent)) {
+			dev_warn(dev, KBASE_MSG_PRE_FLAG "extent==%ld not a non-zero power of 2",
+					small_extent);
+			return -EINVAL;
+		}
+
+		if (commit_pages > large_extent) {
+			dev_warn(dev, KBASE_MSG_PRE_FLAG "commit_pages==%ld exceeds extent==%ld",
+					(unsigned long)commit_pages,
+					(unsigned long)large_extent);
+			return -EINVAL;
+		}
+#undef KBASE_MSG_PRE_FLAG
+	}
+
+	if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) &&
+	    (va_pages > (BASE_MEM_PFN_MASK_4GB + 1))) {
+		dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_GPU_VA_SAME_4GB_PAGE and va_pages==%lld greater than that needed for 4GB space",
+				(unsigned long long)va_pages);
+		return -EINVAL;
+	}
+
+	return 0;
+#undef KBASE_MSG_PRE
+}
+
+/**
+ * @brief Acquire the per-context region list lock
+ */
+void kbase_gpu_vm_lock(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	mutex_lock(&kctx->reg_lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_vm_lock);
+
+/**
+ * @brief Release the per-context region list lock
+ */
+void kbase_gpu_vm_unlock(struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(kctx != NULL);
+	mutex_unlock(&kctx->reg_lock);
+}
+
+KBASE_EXPORT_TEST_API(kbase_gpu_vm_unlock);
+
+#ifdef CONFIG_DEBUG_FS
+struct kbase_jit_debugfs_data {
+	int (*func)(struct kbase_jit_debugfs_data *);
+	struct mutex lock;
+	struct kbase_context *kctx;
+	u64 active_value;
+	u64 pool_value;
+	u64 destroy_value;
+	char buffer[50];
+};
+
+static int kbase_jit_debugfs_common_open(struct inode *inode,
+		struct file *file, int (*func)(struct kbase_jit_debugfs_data *))
+{
+	struct kbase_jit_debugfs_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->func = func;
+	mutex_init(&data->lock);
+	data->kctx = (struct kbase_context *) inode->i_private;
+
+	file->private_data = data;
+
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t kbase_jit_debugfs_common_read(struct file *file,
+		char __user *buf, size_t len, loff_t *ppos)
+{
+	struct kbase_jit_debugfs_data *data;
+	size_t size;
+	int ret;
+
+	data = (struct kbase_jit_debugfs_data *) file->private_data;
+	mutex_lock(&data->lock);
+
+	if (*ppos) {
+		size = strnlen(data->buffer, sizeof(data->buffer));
+	} else {
+		if (!data->func) {
+			ret = -EACCES;
+			goto out_unlock;
+		}
+
+		if (data->func(data)) {
+			ret = -EACCES;
+			goto out_unlock;
+		}
+
+		size = scnprintf(data->buffer, sizeof(data->buffer),
+				"%llu,%llu,%llu", data->active_value,
+				data->pool_value, data->destroy_value);
+	}
+
+	ret = simple_read_from_buffer(buf, len, ppos, data->buffer, size);
+
+out_unlock:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static int kbase_jit_debugfs_common_release(struct inode *inode,
+		struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+#define KBASE_JIT_DEBUGFS_DECLARE(__fops, __func) \
+static int __fops ## _open(struct inode *inode, struct file *file) \
+{ \
+	return kbase_jit_debugfs_common_open(inode, file, __func); \
+} \
+static const struct file_operations __fops = { \
+	.owner = THIS_MODULE, \
+	.open = __fops ## _open, \
+	.release = kbase_jit_debugfs_common_release, \
+	.read = kbase_jit_debugfs_common_read, \
+	.write = NULL, \
+	.llseek = generic_file_llseek, \
+}
+
+static int kbase_jit_debugfs_count_get(struct kbase_jit_debugfs_data *data)
+{
+	struct kbase_context *kctx = data->kctx;
+	struct list_head *tmp;
+
+	mutex_lock(&kctx->jit_evict_lock);
+	list_for_each(tmp, &kctx->jit_active_head) {
+		data->active_value++;
+	}
+
+	list_for_each(tmp, &kctx->jit_pool_head) {
+		data->pool_value++;
+	}
+
+	list_for_each(tmp, &kctx->jit_destroy_head) {
+		data->destroy_value++;
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_count_fops,
+		kbase_jit_debugfs_count_get);
+
+static int kbase_jit_debugfs_vm_get(struct kbase_jit_debugfs_data *data)
+{
+	struct kbase_context *kctx = data->kctx;
+	struct kbase_va_region *reg;
+
+	mutex_lock(&kctx->jit_evict_lock);
+	list_for_each_entry(reg, &kctx->jit_active_head, jit_node) {
+		data->active_value += reg->nr_pages;
+	}
+
+	list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) {
+		data->pool_value += reg->nr_pages;
+	}
+
+	list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) {
+		data->destroy_value += reg->nr_pages;
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_vm_fops,
+		kbase_jit_debugfs_vm_get);
+
+static int kbase_jit_debugfs_phys_get(struct kbase_jit_debugfs_data *data)
+{
+	struct kbase_context *kctx = data->kctx;
+	struct kbase_va_region *reg;
+
+	mutex_lock(&kctx->jit_evict_lock);
+	list_for_each_entry(reg, &kctx->jit_active_head, jit_node) {
+		data->active_value += reg->gpu_alloc->nents;
+	}
+
+	list_for_each_entry(reg, &kctx->jit_pool_head, jit_node) {
+		data->pool_value += reg->gpu_alloc->nents;
+	}
+
+	list_for_each_entry(reg, &kctx->jit_destroy_head, jit_node) {
+		data->destroy_value += reg->gpu_alloc->nents;
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	return 0;
+}
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_phys_fops,
+		kbase_jit_debugfs_phys_get);
+
+void kbase_jit_debugfs_init(struct kbase_context *kctx)
+{
+	/* Debugfs entry for getting the number of JIT allocations. */
+	debugfs_create_file("mem_jit_count", S_IRUGO, kctx->kctx_dentry,
+			kctx, &kbase_jit_debugfs_count_fops);
+
+	/*
+	 * Debugfs entry for getting the total number of virtual pages
+	 * used by JIT allocations.
+	 */
+	debugfs_create_file("mem_jit_vm", S_IRUGO, kctx->kctx_dentry,
+			kctx, &kbase_jit_debugfs_vm_fops);
+
+	/*
+	 * Debugfs entry for getting the number of physical pages used
+	 * by JIT allocations.
+	 */
+	debugfs_create_file("mem_jit_phys", S_IRUGO, kctx->kctx_dentry,
+			kctx, &kbase_jit_debugfs_phys_fops);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * kbase_jit_destroy_worker - Deferred worker which frees JIT allocations
+ * @work: Work item
+ *
+ * This function does the work of freeing JIT allocations whose physical
+ * backing has been released.
+ */
+static void kbase_jit_destroy_worker(struct work_struct *work)
+{
+	struct kbase_context *kctx;
+	struct kbase_va_region *reg;
+
+	kctx = container_of(work, struct kbase_context, jit_work);
+	do {
+		mutex_lock(&kctx->jit_evict_lock);
+		if (list_empty(&kctx->jit_destroy_head)) {
+			mutex_unlock(&kctx->jit_evict_lock);
+			break;
+		}
+
+		reg = list_first_entry(&kctx->jit_destroy_head,
+				struct kbase_va_region, jit_node);
+
+		list_del(&reg->jit_node);
+		mutex_unlock(&kctx->jit_evict_lock);
+
+		kbase_gpu_vm_lock(kctx);
+		reg->flags &= ~KBASE_REG_JIT;
+		kbase_mem_free_region(kctx, reg);
+		kbase_gpu_vm_unlock(kctx);
+	} while (1);
+}
+
+int kbase_jit_init(struct kbase_context *kctx)
+{
+	mutex_lock(&kctx->jit_evict_lock);
+	INIT_LIST_HEAD(&kctx->jit_active_head);
+	INIT_LIST_HEAD(&kctx->jit_pool_head);
+	INIT_LIST_HEAD(&kctx->jit_destroy_head);
+	INIT_WORK(&kctx->jit_work, kbase_jit_destroy_worker);
+
+	INIT_LIST_HEAD(&kctx->jit_pending_alloc);
+	INIT_LIST_HEAD(&kctx->jit_atoms_head);
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	kctx->jit_max_allocations = 0;
+	kctx->jit_current_allocations = 0;
+	kctx->trim_level = 0;
+
+	return 0;
+}
+
+/* Check if the allocation from JIT pool is of the same size as the new JIT
+ * allocation and also, if BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP is set, meets
+ * the alignment requirements.
+ */
+static bool meet_size_and_tiler_align_top_requirements(struct kbase_context *kctx,
+	struct kbase_va_region *walker, struct base_jit_alloc_info *info)
+{
+	bool meet_reqs = true;
+
+	if (walker->nr_pages != info->va_pages)
+		meet_reqs = false;
+	else if (info->flags & BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP) {
+		size_t align = info->extent;
+		size_t align_mask = align - 1;
+
+		if ((walker->start_pfn + info->commit_pages) & align_mask)
+			meet_reqs = false;
+	}
+
+	return meet_reqs;
+}
+
+static int kbase_jit_grow(struct kbase_context *kctx,
+		struct base_jit_alloc_info *info, struct kbase_va_region *reg)
+{
+	size_t delta;
+	size_t pages_required;
+	size_t old_size;
+	struct kbase_mem_pool *pool;
+	int ret = -ENOMEM;
+	struct tagged_addr *gpu_pages;
+	struct kbase_sub_alloc *prealloc_sas[2] = { NULL, NULL };
+	int i;
+
+	if (info->commit_pages > reg->nr_pages) {
+		/* Attempted to grow larger than maximum size */
+		return -EINVAL;
+	}
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* Make the physical backing no longer reclaimable */
+	if (!kbase_mem_evictable_unmake(reg->gpu_alloc))
+		goto update_failed;
+
+	if (reg->gpu_alloc->nents >= info->commit_pages)
+		goto done;
+
+	/* Grow the backing */
+	old_size = reg->gpu_alloc->nents;
+
+	/* Allocate some more pages */
+	delta = info->commit_pages - reg->gpu_alloc->nents;
+	pages_required = delta;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	/* Preallocate memory for the sub-allocation structs */
+	for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i) {
+		prealloc_sas[i] = kmalloc(sizeof(*prealloc_sas[i]),
+				GFP_KERNEL);
+		if (!prealloc_sas[i])
+			goto update_failed;
+	}
+
+	if (pages_required >= (SZ_2M / SZ_4K)) {
+		pool = &kctx->lp_mem_pool;
+		/* Round up to number of 2 MB pages required */
+		pages_required += ((SZ_2M / SZ_4K) - 1);
+		pages_required /= (SZ_2M / SZ_4K);
+	} else {
+#endif
+		pool = &kctx->mem_pool;
+#ifdef CONFIG_MALI_2MB_ALLOC
+	}
+#endif
+
+	if (reg->cpu_alloc != reg->gpu_alloc)
+		pages_required *= 2;
+
+	spin_lock(&kctx->mem_partials_lock);
+	kbase_mem_pool_lock(pool);
+
+	/* As we can not allocate memory from the kernel with the vm_lock held,
+	 * grow the pool to the required size with the lock dropped. We hold the
+	 * pool lock to prevent another thread from allocating from the pool
+	 * between the grow and allocation.
+	 */
+	while (kbase_mem_pool_size(pool) < pages_required) {
+		int pool_delta = pages_required - kbase_mem_pool_size(pool);
+
+		kbase_mem_pool_unlock(pool);
+		spin_unlock(&kctx->mem_partials_lock);
+		kbase_gpu_vm_unlock(kctx);
+
+		if (kbase_mem_pool_grow(pool, pool_delta))
+			goto update_failed_unlocked;
+
+		kbase_gpu_vm_lock(kctx);
+		spin_lock(&kctx->mem_partials_lock);
+		kbase_mem_pool_lock(pool);
+	}
+
+	gpu_pages = kbase_alloc_phy_pages_helper_locked(reg->gpu_alloc, pool,
+			delta, &prealloc_sas[0]);
+	if (!gpu_pages) {
+		kbase_mem_pool_unlock(pool);
+		spin_unlock(&kctx->mem_partials_lock);
+		goto update_failed;
+	}
+
+	if (reg->cpu_alloc != reg->gpu_alloc) {
+		struct tagged_addr *cpu_pages;
+
+		cpu_pages = kbase_alloc_phy_pages_helper_locked(reg->cpu_alloc,
+				pool, delta, &prealloc_sas[1]);
+		if (!cpu_pages) {
+			kbase_free_phy_pages_helper_locked(reg->gpu_alloc,
+					pool, gpu_pages, delta);
+			kbase_mem_pool_unlock(pool);
+			spin_unlock(&kctx->mem_partials_lock);
+			goto update_failed;
+		}
+	}
+	kbase_mem_pool_unlock(pool);
+	spin_unlock(&kctx->mem_partials_lock);
+
+	ret = kbase_mem_grow_gpu_mapping(kctx, reg, info->commit_pages,
+			old_size);
+	/*
+	 * The grow failed so put the allocation back in the
+	 * pool and return failure.
+	 */
+	if (ret)
+		goto update_failed;
+
+done:
+	ret = 0;
+
+	/* Update attributes of JIT allocation taken from the pool */
+	reg->initial_commit = info->commit_pages;
+	reg->extent = info->extent;
+
+update_failed:
+	kbase_gpu_vm_unlock(kctx);
+update_failed_unlocked:
+	for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i)
+		kfree(prealloc_sas[i]);
+
+	return ret;
+}
+
+static void trace_jit_stats(struct kbase_context *kctx,
+		u32 bin_id, u32 max_allocations)
+{
+	const u32 alloc_count =
+		kctx->jit_current_allocations_per_bin[bin_id];
+
+	struct kbase_va_region *walker;
+	u32 va_pages = 0;
+	u32 ph_pages = 0;
+
+	mutex_lock(&kctx->jit_evict_lock);
+	list_for_each_entry(walker, &kctx->jit_active_head, jit_node) {
+		if (walker->jit_bin_id != bin_id)
+			continue;
+
+		va_pages += walker->nr_pages;
+		ph_pages += walker->gpu_alloc->nents;
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	KBASE_TLSTREAM_AUX_JIT_STATS(kctx->id, bin_id, max_allocations,
+		alloc_count, va_pages, ph_pages);
+}
+
+struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
+		struct base_jit_alloc_info *info)
+{
+	struct kbase_va_region *reg = NULL;
+
+	if (kctx->jit_current_allocations >= kctx->jit_max_allocations) {
+		/* Too many current allocations */
+		return NULL;
+	}
+	if (info->max_allocations > 0 &&
+			kctx->jit_current_allocations_per_bin[info->bin_id] >=
+			info->max_allocations) {
+		/* Too many current allocations in this bin */
+		return NULL;
+	}
+
+	mutex_lock(&kctx->jit_evict_lock);
+
+	/*
+	 * Scan the pool for an existing allocation which meets our
+	 * requirements and remove it.
+	 */
+	if (info->usage_id != 0) {
+		/* First scan for an allocation with the same usage ID */
+		struct kbase_va_region *walker;
+		size_t current_diff = SIZE_MAX;
+
+		list_for_each_entry(walker, &kctx->jit_pool_head, jit_node) {
+
+			if (walker->jit_usage_id == info->usage_id &&
+					walker->jit_bin_id == info->bin_id &&
+					meet_size_and_tiler_align_top_requirements(
+							kctx, walker, info)) {
+				size_t min_size, max_size, diff;
+
+				/*
+				 * The JIT allocations VA requirements have been
+				 * met, it's suitable but other allocations
+				 * might be a better fit.
+				 */
+				min_size = min_t(size_t,
+						walker->gpu_alloc->nents,
+						info->commit_pages);
+				max_size = max_t(size_t,
+						walker->gpu_alloc->nents,
+						info->commit_pages);
+				diff = max_size - min_size;
+
+				if (current_diff > diff) {
+					current_diff = diff;
+					reg = walker;
+				}
+
+				/* The allocation is an exact match */
+				if (current_diff == 0)
+					break;
+			}
+		}
+	}
+
+	if (!reg) {
+		/* No allocation with the same usage ID, or usage IDs not in
+		 * use. Search for an allocation we can reuse.
+		 */
+		struct kbase_va_region *walker;
+		size_t current_diff = SIZE_MAX;
+
+		list_for_each_entry(walker, &kctx->jit_pool_head, jit_node) {
+
+			if (walker->jit_bin_id == info->bin_id &&
+					meet_size_and_tiler_align_top_requirements(
+							kctx, walker, info)) {
+				size_t min_size, max_size, diff;
+
+				/*
+				 * The JIT allocations VA requirements have been
+				 * met, it's suitable but other allocations
+				 * might be a better fit.
+				 */
+				min_size = min_t(size_t,
+						walker->gpu_alloc->nents,
+						info->commit_pages);
+				max_size = max_t(size_t,
+						walker->gpu_alloc->nents,
+						info->commit_pages);
+				diff = max_size - min_size;
+
+				if (current_diff > diff) {
+					current_diff = diff;
+					reg = walker;
+				}
+
+				/* The allocation is an exact match, so stop
+				 * looking.
+				 */
+				if (current_diff == 0)
+					break;
+			}
+		}
+	}
+
+	if (reg) {
+		/*
+		 * Remove the found region from the pool and add it to the
+		 * active list.
+		 */
+		list_move(&reg->jit_node, &kctx->jit_active_head);
+
+		/*
+		 * Remove the allocation from the eviction list as it's no
+		 * longer eligible for eviction. This must be done before
+		 * dropping the jit_evict_lock
+		 */
+		list_del_init(&reg->gpu_alloc->evict_node);
+		mutex_unlock(&kctx->jit_evict_lock);
+
+		if (kbase_jit_grow(kctx, info, reg) < 0) {
+			/*
+			 * An update to an allocation from the pool failed,
+			 * chances are slim a new allocation would fair any
+			 * better so return the allocation to the pool and
+			 * return the function with failure.
+			 */
+			goto update_failed_unlocked;
+		}
+	} else {
+		/* No suitable JIT allocation was found so create a new one */
+		u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD |
+				BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
+				BASE_MEM_COHERENT_LOCAL;
+		u64 gpu_addr;
+
+		mutex_unlock(&kctx->jit_evict_lock);
+
+		if (info->flags & BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP)
+			flags |= BASE_MEM_TILER_ALIGN_TOP;
+
+		reg = kbase_mem_alloc(kctx, info->va_pages, info->commit_pages,
+				info->extent, &flags, &gpu_addr);
+		if (!reg)
+			goto out_unlocked;
+
+		reg->flags |= KBASE_REG_JIT;
+
+		mutex_lock(&kctx->jit_evict_lock);
+		list_add(&reg->jit_node, &kctx->jit_active_head);
+		mutex_unlock(&kctx->jit_evict_lock);
+	}
+
+	kctx->jit_current_allocations++;
+	kctx->jit_current_allocations_per_bin[info->bin_id]++;
+
+	trace_jit_stats(kctx, info->bin_id, info->max_allocations);
+
+	reg->jit_usage_id = info->usage_id;
+	reg->jit_bin_id = info->bin_id;
+
+	return reg;
+
+update_failed_unlocked:
+	mutex_lock(&kctx->jit_evict_lock);
+	list_move(&reg->jit_node, &kctx->jit_pool_head);
+	mutex_unlock(&kctx->jit_evict_lock);
+out_unlocked:
+	return NULL;
+}
+
+void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+	u64 old_pages;
+
+	/* Get current size of JIT region */
+	old_pages = kbase_reg_current_backed_size(reg);
+	if (reg->initial_commit < old_pages) {
+		/* Free trim_level % of region, but don't go below initial
+		 * commit size
+		 */
+		u64 new_size = MAX(reg->initial_commit,
+			div_u64(old_pages * (100 - kctx->trim_level), 100));
+		u64 delta = old_pages - new_size;
+
+		if (delta) {
+			kbase_mem_shrink_cpu_mapping(kctx, reg, old_pages-delta,
+					old_pages);
+			kbase_mem_shrink_gpu_mapping(kctx, reg, old_pages-delta,
+					old_pages);
+
+			kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+			if (reg->cpu_alloc != reg->gpu_alloc)
+				kbase_free_phy_pages_helper(reg->gpu_alloc,
+						delta);
+		}
+	}
+
+	kctx->jit_current_allocations--;
+	kctx->jit_current_allocations_per_bin[reg->jit_bin_id]--;
+
+	trace_jit_stats(kctx, reg->jit_bin_id, UINT_MAX);
+
+	kbase_mem_evictable_mark_reclaim(reg->gpu_alloc);
+
+	kbase_gpu_vm_lock(kctx);
+	reg->flags |= KBASE_REG_DONT_NEED;
+	kbase_mem_shrink_cpu_mapping(kctx, reg, 0, reg->gpu_alloc->nents);
+	kbase_gpu_vm_unlock(kctx);
+
+	/*
+	 * Add the allocation to the eviction list and the jit pool, after this
+	 * point the shrink can reclaim it, or it may be reused.
+	 */
+	mutex_lock(&kctx->jit_evict_lock);
+
+	/* This allocation can't already be on a list. */
+	WARN_ON(!list_empty(&reg->gpu_alloc->evict_node));
+	list_add(&reg->gpu_alloc->evict_node, &kctx->evict_list);
+
+	list_move(&reg->jit_node, &kctx->jit_pool_head);
+
+	mutex_unlock(&kctx->jit_evict_lock);
+}
+
+void kbase_jit_backing_lost(struct kbase_va_region *reg)
+{
+	struct kbase_context *kctx = kbase_reg_flags_to_kctx(reg);
+
+	if (WARN_ON(!kctx))
+		return;
+
+	lockdep_assert_held(&kctx->jit_evict_lock);
+
+	/*
+	 * JIT allocations will always be on a list, if the region
+	 * is not on a list then it's not a JIT allocation.
+	 */
+	if (list_empty(&reg->jit_node))
+		return;
+
+	/*
+	 * Freeing the allocation requires locks we might not be able
+	 * to take now, so move the allocation to the free list and kick
+	 * the worker which will do the freeing.
+	 */
+	list_move(&reg->jit_node, &kctx->jit_destroy_head);
+
+	schedule_work(&kctx->jit_work);
+}
+
+bool kbase_jit_evict(struct kbase_context *kctx)
+{
+	struct kbase_va_region *reg = NULL;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* Free the oldest allocation from the pool */
+	mutex_lock(&kctx->jit_evict_lock);
+	if (!list_empty(&kctx->jit_pool_head)) {
+		reg = list_entry(kctx->jit_pool_head.prev,
+				struct kbase_va_region, jit_node);
+		list_del(&reg->jit_node);
+		list_del_init(&reg->gpu_alloc->evict_node);
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	if (reg) {
+		reg->flags &= ~KBASE_REG_JIT;
+		kbase_mem_free_region(kctx, reg);
+	}
+
+	return (reg != NULL);
+}
+
+void kbase_jit_term(struct kbase_context *kctx)
+{
+	struct kbase_va_region *walker;
+
+	/* Free all allocations for this context */
+
+	kbase_gpu_vm_lock(kctx);
+	mutex_lock(&kctx->jit_evict_lock);
+	/* Free all allocations from the pool */
+	while (!list_empty(&kctx->jit_pool_head)) {
+		walker = list_first_entry(&kctx->jit_pool_head,
+				struct kbase_va_region, jit_node);
+		list_del(&walker->jit_node);
+		list_del_init(&walker->gpu_alloc->evict_node);
+		mutex_unlock(&kctx->jit_evict_lock);
+		walker->flags &= ~KBASE_REG_JIT;
+		kbase_mem_free_region(kctx, walker);
+		mutex_lock(&kctx->jit_evict_lock);
+	}
+
+	/* Free all allocations from active list */
+	while (!list_empty(&kctx->jit_active_head)) {
+		walker = list_first_entry(&kctx->jit_active_head,
+				struct kbase_va_region, jit_node);
+		list_del(&walker->jit_node);
+		list_del_init(&walker->gpu_alloc->evict_node);
+		mutex_unlock(&kctx->jit_evict_lock);
+		walker->flags &= ~KBASE_REG_JIT;
+		kbase_mem_free_region(kctx, walker);
+		mutex_lock(&kctx->jit_evict_lock);
+	}
+	mutex_unlock(&kctx->jit_evict_lock);
+	kbase_gpu_vm_unlock(kctx);
+
+	/*
+	 * Flush the freeing of allocations whose backing has been freed
+	 * (i.e. everything in jit_destroy_head).
+	 */
+	cancel_work_sync(&kctx->jit_work);
+}
+
+bool kbase_has_exec_va_zone(struct kbase_context *kctx)
+{
+	bool has_exec_va_zone;
+
+	kbase_gpu_vm_lock(kctx);
+	has_exec_va_zone = (kctx->exec_va_start != U64_MAX);
+	kbase_gpu_vm_unlock(kctx);
+
+	return has_exec_va_zone;
+}
+
+static int kbase_jd_user_buf_map(struct kbase_context *kctx,
+		struct kbase_va_region *reg)
+{
+	long pinned_pages;
+	struct kbase_mem_phy_alloc *alloc;
+	struct page **pages;
+	struct tagged_addr *pa;
+	long i;
+	int err = -ENOMEM;
+	unsigned long address;
+	struct mm_struct *mm;
+	struct device *dev;
+	unsigned long offset;
+	unsigned long local_size;
+	unsigned long gwt_mask = ~0;
+
+	alloc = reg->gpu_alloc;
+	pa = kbase_get_gpu_phy_pages(reg);
+	address = alloc->imported.user_buf.address;
+	mm = alloc->imported.user_buf.mm;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+
+	pages = alloc->imported.user_buf.pages;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+	pinned_pages = get_user_pages(NULL, mm,
+			address,
+			alloc->imported.user_buf.nr_pages,
+			reg->flags & KBASE_REG_GPU_WR,
+			0, pages, NULL);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+	pinned_pages = get_user_pages_remote(NULL, mm,
+			address,
+			alloc->imported.user_buf.nr_pages,
+			reg->flags & KBASE_REG_GPU_WR,
+			0, pages, NULL);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+	pinned_pages = get_user_pages_remote(NULL, mm,
+			address,
+			alloc->imported.user_buf.nr_pages,
+			reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+			pages, NULL);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
+	pinned_pages = get_user_pages_remote(NULL, mm,
+			address,
+			alloc->imported.user_buf.nr_pages,
+			reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+			pages, NULL, NULL);
+#else
+	pinned_pages = get_user_pages_remote(mm,
+			address,
+			alloc->imported.user_buf.nr_pages,
+			reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+			pages, NULL, NULL);
+#endif
+
+	if (pinned_pages <= 0)
+		return pinned_pages;
+
+	if (pinned_pages != alloc->imported.user_buf.nr_pages) {
+		for (i = 0; i < pinned_pages; i++)
+			put_page(pages[i]);
+		return -ENOMEM;
+	}
+
+	dev = kctx->kbdev->dev;
+	offset = address & ~PAGE_MASK;
+	local_size = alloc->imported.user_buf.size;
+
+	for (i = 0; i < pinned_pages; i++) {
+		dma_addr_t dma_addr;
+		unsigned long min;
+
+		min = MIN(PAGE_SIZE - offset, local_size);
+		dma_addr = dma_map_page(dev, pages[i],
+				offset, min,
+				DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dev, dma_addr))
+			goto unwind;
+
+		alloc->imported.user_buf.dma_addrs[i] = dma_addr;
+		pa[i] = as_tagged(page_to_phys(pages[i]));
+
+		local_size -= min;
+		offset = 0;
+	}
+
+	alloc->nents = pinned_pages;
+#ifdef CONFIG_MALI_CINSTR_GWT
+	if (kctx->gwt_enabled)
+		gwt_mask = ~KBASE_REG_GPU_WR;
+#endif
+
+	err = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+			pa, kbase_reg_current_backed_size(reg),
+			reg->flags & gwt_mask, kctx->as_nr);
+	if (err == 0)
+		return 0;
+
+	alloc->nents = 0;
+	/* fall down */
+unwind:
+	while (i--) {
+		dma_unmap_page(kctx->kbdev->dev,
+				alloc->imported.user_buf.dma_addrs[i],
+				PAGE_SIZE, DMA_BIDIRECTIONAL);
+	}
+
+	while (++i < pinned_pages) {
+		put_page(pages[i]);
+		pages[i] = NULL;
+	}
+
+	return err;
+}
+
+static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
+		struct kbase_mem_phy_alloc *alloc, bool writeable)
+{
+	long i;
+	struct page **pages;
+	unsigned long size = alloc->imported.user_buf.size;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+	pages = alloc->imported.user_buf.pages;
+	for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) {
+		unsigned long local_size;
+		dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i];
+
+		local_size = MIN(size, PAGE_SIZE - (dma_addr & ~PAGE_MASK));
+		dma_unmap_page(kctx->kbdev->dev, dma_addr, local_size,
+				DMA_BIDIRECTIONAL);
+		if (writeable)
+			set_page_dirty_lock(pages[i]);
+		put_page(pages[i]);
+		pages[i] = NULL;
+
+		size -= local_size;
+	}
+	alloc->nents = 0;
+}
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+static int kbase_jd_umm_map(struct kbase_context *kctx,
+		struct kbase_va_region *reg)
+{
+	struct sg_table *sgt;
+	struct scatterlist *s;
+	int i;
+	struct tagged_addr *pa;
+	int err;
+	size_t count = 0;
+	struct kbase_mem_phy_alloc *alloc;
+	unsigned long gwt_mask = ~0;
+
+	alloc = reg->gpu_alloc;
+
+	KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM);
+	KBASE_DEBUG_ASSERT(NULL == alloc->imported.umm.sgt);
+	sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment,
+			DMA_BIDIRECTIONAL);
+
+	if (IS_ERR_OR_NULL(sgt))
+		return -EINVAL;
+
+	/* save for later */
+	alloc->imported.umm.sgt = sgt;
+
+	pa = kbase_get_gpu_phy_pages(reg);
+	KBASE_DEBUG_ASSERT(pa);
+
+	for_each_sg(sgt->sgl, s, sgt->nents, i) {
+		size_t j, pages = PFN_UP(sg_dma_len(s));
+
+		WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1),
+		"sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n",
+		sg_dma_len(s));
+
+		WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1),
+		"sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n",
+		(unsigned long long) sg_dma_address(s));
+
+		for (j = 0; (j < pages) && (count < reg->nr_pages); j++,
+				count++)
+			*pa++ = as_tagged(sg_dma_address(s) +
+				(j << PAGE_SHIFT));
+		WARN_ONCE(j < pages,
+			  "sg list from dma_buf_map_attachment > dma_buf->size=%zu\n",
+		alloc->imported.umm.dma_buf->size);
+	}
+
+	if (!(reg->flags & KBASE_REG_IMPORT_PAD) &&
+			WARN_ONCE(count < reg->nr_pages,
+			"sg list from dma_buf_map_attachment < dma_buf->size=%zu\n",
+			alloc->imported.umm.dma_buf->size)) {
+		err = -EINVAL;
+		goto err_unmap_attachment;
+	}
+
+	/* Update nents as we now have pages to map */
+	alloc->nents = reg->nr_pages;
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+	if (kctx->gwt_enabled)
+		gwt_mask = ~KBASE_REG_GPU_WR;
+#endif
+
+	err = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+			kbase_get_gpu_phy_pages(reg),
+			count,
+			(reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD) &
+			 gwt_mask,
+			kctx->as_nr);
+	if (err)
+		goto err_unmap_attachment;
+
+	if (reg->flags & KBASE_REG_IMPORT_PAD) {
+		err = kbase_mmu_insert_single_page(kctx,
+				reg->start_pfn + count,
+				kctx->aliasing_sink_page,
+				reg->nr_pages - count,
+				(reg->flags | KBASE_REG_GPU_RD) &
+				~KBASE_REG_GPU_WR);
+		if (err)
+			goto err_teardown_orig_pages;
+	}
+
+	return 0;
+
+err_teardown_orig_pages:
+	kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+			count, kctx->as_nr);
+err_unmap_attachment:
+	dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+			alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+	alloc->imported.umm.sgt = NULL;
+
+	return err;
+}
+
+static void kbase_jd_umm_unmap(struct kbase_context *kctx,
+		struct kbase_mem_phy_alloc *alloc)
+{
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(alloc);
+	KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment);
+	KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt);
+	dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+	    alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+	alloc->imported.umm.sgt = NULL;
+	alloc->nents = 0;
+}
+#endif				/* CONFIG_DMA_SHARED_BUFFER */
+
+struct kbase_mem_phy_alloc *kbase_map_external_resource(
+		struct kbase_context *kctx, struct kbase_va_region *reg,
+		struct mm_struct *locked_mm)
+{
+	int err;
+
+	/* decide what needs to happen for this resource */
+	switch (reg->gpu_alloc->type) {
+	case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+		if (reg->gpu_alloc->imported.user_buf.mm != locked_mm)
+			goto exit;
+
+		reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
+		if (1 == reg->gpu_alloc->imported.user_buf.current_mapping_usage_count) {
+			err = kbase_jd_user_buf_map(kctx, reg);
+			if (err) {
+				reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--;
+				goto exit;
+			}
+		}
+	}
+	break;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	case KBASE_MEM_TYPE_IMPORTED_UMM: {
+		reg->gpu_alloc->imported.umm.current_mapping_usage_count++;
+		if (1 == reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
+			err = kbase_jd_umm_map(kctx, reg);
+			if (err) {
+				reg->gpu_alloc->imported.umm.current_mapping_usage_count--;
+				goto exit;
+			}
+		}
+		break;
+	}
+#endif
+	default:
+		goto exit;
+	}
+
+	return kbase_mem_phy_alloc_get(reg->gpu_alloc);
+exit:
+	return NULL;
+}
+
+void kbase_unmap_external_resource(struct kbase_context *kctx,
+		struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc)
+{
+	switch (alloc->type) {
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	case KBASE_MEM_TYPE_IMPORTED_UMM: {
+		alloc->imported.umm.current_mapping_usage_count--;
+
+		if (0 == alloc->imported.umm.current_mapping_usage_count) {
+			if (reg && reg->gpu_alloc == alloc) {
+				int err;
+
+				err = kbase_mmu_teardown_pages(
+						kctx->kbdev,
+						&kctx->mmu,
+						reg->start_pfn,
+						alloc->nents,
+						kctx->as_nr);
+				WARN_ON(err);
+			}
+
+			kbase_jd_umm_unmap(kctx, alloc);
+		}
+	}
+	break;
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+	case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
+		alloc->imported.user_buf.current_mapping_usage_count--;
+
+		if (0 == alloc->imported.user_buf.current_mapping_usage_count) {
+			bool writeable = true;
+
+			if (reg && reg->gpu_alloc == alloc)
+				kbase_mmu_teardown_pages(
+						kctx->kbdev,
+						&kctx->mmu,
+						reg->start_pfn,
+						kbase_reg_current_backed_size(reg),
+						kctx->as_nr);
+
+			if (reg && ((reg->flags & KBASE_REG_GPU_WR) == 0))
+				writeable = false;
+
+			kbase_jd_user_buf_unmap(kctx, alloc, writeable);
+		}
+	}
+	break;
+	default:
+	break;
+	}
+	kbase_mem_phy_alloc_put(alloc);
+}
+
+struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
+		struct kbase_context *kctx, u64 gpu_addr)
+{
+	struct kbase_ctx_ext_res_meta *meta = NULL;
+	struct kbase_ctx_ext_res_meta *walker;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/*
+	 * Walk the per context external resource metadata list for the
+	 * metadata which matches the region which is being acquired.
+	 */
+	list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) {
+		if (walker->gpu_addr == gpu_addr) {
+			meta = walker;
+			break;
+		}
+	}
+
+	/* No metadata exists so create one. */
+	if (!meta) {
+		struct kbase_va_region *reg;
+
+		/* Find the region */
+		reg = kbase_region_tracker_find_region_enclosing_address(
+				kctx, gpu_addr);
+		if (NULL == reg || (reg->flags & KBASE_REG_FREE))
+			goto failed;
+
+		/* Allocate the metadata object */
+		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+		if (!meta)
+			goto failed;
+
+		/*
+		 * Fill in the metadata object and acquire a reference
+		 * for the physical resource.
+		 */
+		meta->alloc = kbase_map_external_resource(kctx, reg, NULL);
+
+		if (!meta->alloc)
+			goto fail_map;
+
+		meta->gpu_addr = reg->start_pfn << PAGE_SHIFT;
+
+		list_add(&meta->ext_res_node, &kctx->ext_res_meta_head);
+	}
+
+	return meta;
+
+fail_map:
+	kfree(meta);
+failed:
+	return NULL;
+}
+
+bool kbase_sticky_resource_release(struct kbase_context *kctx,
+		struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr)
+{
+	struct kbase_ctx_ext_res_meta *walker;
+	struct kbase_va_region *reg;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* Search of the metadata if one isn't provided. */
+	if (!meta) {
+		/*
+		 * Walk the per context external resource metadata list for the
+		 * metadata which matches the region which is being released.
+		 */
+		list_for_each_entry(walker, &kctx->ext_res_meta_head,
+				ext_res_node) {
+			if (walker->gpu_addr == gpu_addr) {
+				meta = walker;
+				break;
+			}
+		}
+	}
+
+	/* No metadata so just return. */
+	if (!meta)
+		return false;
+
+	/* Drop the physical memory reference and free the metadata. */
+	reg = kbase_region_tracker_find_region_enclosing_address(
+			kctx,
+			meta->gpu_addr);
+
+	kbase_unmap_external_resource(kctx, reg, meta->alloc);
+	list_del(&meta->ext_res_node);
+	kfree(meta);
+
+	return true;
+}
+
+int kbase_sticky_resource_init(struct kbase_context *kctx)
+{
+	INIT_LIST_HEAD(&kctx->ext_res_meta_head);
+
+	return 0;
+}
+
+void kbase_sticky_resource_term(struct kbase_context *kctx)
+{
+	struct kbase_ctx_ext_res_meta *walker;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/*
+	 * Free any sticky resources which haven't been unmapped.
+	 *
+	 * Note:
+	 * We don't care about refcounts at this point as no future
+	 * references to the meta data will be made.
+	 * Region termination would find these if we didn't free them
+	 * here, but it's more efficient if we do the clean up here.
+	 */
+	while (!list_empty(&kctx->ext_res_meta_head)) {
+		walker = list_first_entry(&kctx->ext_res_meta_head,
+				struct kbase_ctx_ext_res_meta, ext_res_node);
+
+		kbase_sticky_resource_release(kctx, walker, 0);
+	}
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.h b/drivers/gpu/arm/midgard/mali_kbase_mem.h
new file mode 100644
index 0000000..a873bb1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.h
@@ -0,0 +1,1488 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem.h
+ * Base kernel memory APIs
+ */
+
+#ifndef _KBASE_MEM_H_
+#define _KBASE_MEM_H_
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+#include <linux/kref.h>
+#include "mali_base_kernel.h"
+#include <mali_kbase_hw.h>
+#include "mali_kbase_pm.h"
+#include "mali_kbase_defs.h"
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include "mali_kbase_gator.h"
+#endif
+/* Required for kbase_mem_evictable_unmake */
+#include "mali_kbase_mem_linux.h"
+
+static inline void kbase_process_page_usage_inc(struct kbase_context *kctx,
+		int pages);
+
+/* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2)	/* round to 4 pages */
+
+/* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
+The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and
+page tables are updated accordingly, the MMU does not re-read the page table entries from memory for the subsequent page table
+updates and generates duplicate page faults as the page table information used by the MMU is not valid.   */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3)	/* round to 8 pages */
+
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0)	/* round to 1 page */
+
+/* This must always be a power of 2 */
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
+#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
+/**
+ * A CPU mapping
+ */
+struct kbase_cpu_mapping {
+	struct   list_head mappings_list;
+	struct   kbase_mem_phy_alloc *alloc;
+	struct   kbase_context *kctx;
+	struct   kbase_va_region *region;
+	int      count;
+	int      free_on_close;
+};
+
+enum kbase_memory_type {
+	KBASE_MEM_TYPE_NATIVE,
+	KBASE_MEM_TYPE_IMPORTED_UMM,
+	KBASE_MEM_TYPE_IMPORTED_USER_BUF,
+	KBASE_MEM_TYPE_ALIAS,
+	KBASE_MEM_TYPE_RAW
+};
+
+/* internal structure, mirroring base_mem_aliasing_info,
+ * but with alloc instead of a gpu va (handle) */
+struct kbase_aliased {
+	struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */
+	u64 offset; /* in pages */
+	u64 length; /* in pages */
+};
+
+/**
+ * @brief Physical pages tracking object properties
+  */
+#define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED  (1ul << 0)
+#define KBASE_MEM_PHY_ALLOC_LARGE            (1ul << 1)
+
+/* physical pages tracking object.
+ * Set up to track N pages.
+ * N not stored here, the creator holds that info.
+ * This object only tracks how many elements are actually valid (present).
+ * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc is not
+ * shared with another region or client. CPU mappings are OK to exist when changing, as
+ * long as the tracked mappings objects are updated as part of the change.
+ */
+struct kbase_mem_phy_alloc {
+	struct kref           kref; /* number of users of this alloc */
+	atomic_t              gpu_mappings;
+	size_t                nents; /* 0..N */
+	struct tagged_addr    *pages; /* N elements, only 0..nents are valid */
+
+	/* kbase_cpu_mappings */
+	struct list_head      mappings;
+
+	/* Node used to store this allocation on the eviction list */
+	struct list_head      evict_node;
+	/* Physical backing size when the pages where evicted */
+	size_t                evicted;
+	/*
+	 * Back reference to the region structure which created this
+	 * allocation, or NULL if it has been freed.
+	 */
+	struct kbase_va_region *reg;
+
+	/* type of buffer */
+	enum kbase_memory_type type;
+
+	/* Kernel side mapping of the alloc, shall never be referred directly.
+	 * kbase_phy_alloc_mapping_get() & kbase_phy_alloc_mapping_put() pair
+	 * should be used around access to the kernel-side CPU mapping so that
+	 * mapping doesn't disappear whilst it is being accessed.
+	 */
+	struct kbase_vmap_struct *permanent_map;
+
+	unsigned long properties;
+
+	/* member in union valid based on @a type */
+	union {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+		struct {
+			struct dma_buf *dma_buf;
+			struct dma_buf_attachment *dma_attachment;
+			unsigned int current_mapping_usage_count;
+			struct sg_table *sgt;
+		} umm;
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+		struct {
+			u64 stride;
+			size_t nents;
+			struct kbase_aliased *aliased;
+		} alias;
+		struct {
+			struct kbase_context *kctx;
+			/* Number of pages in this structure, including *pages.
+			 * Used for kernel memory tracking.
+			 */
+			size_t nr_struct_pages;
+		} native;
+		struct kbase_alloc_import_user_buf {
+			unsigned long address;
+			unsigned long size;
+			unsigned long nr_pages;
+			struct page **pages;
+			/* top bit (1<<31) of current_mapping_usage_count
+			 * specifies that this import was pinned on import
+			 * See PINNED_ON_IMPORT
+			 */
+			u32 current_mapping_usage_count;
+			struct mm_struct *mm;
+			dma_addr_t *dma_addrs;
+		} user_buf;
+	} imported;
+};
+
+/* The top bit of kbase_alloc_import_user_buf::current_mapping_usage_count is
+ * used to signify that a buffer was pinned when it was imported. Since the
+ * reference count is limited by the number of atoms that can be submitted at
+ * once there should be no danger of overflowing into this bit.
+ * Stealing the top bit also has the benefit that
+ * current_mapping_usage_count != 0 if and only if the buffer is mapped.
+ */
+#define PINNED_ON_IMPORT	(1<<31)
+
+static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
+{
+	KBASE_DEBUG_ASSERT(alloc);
+	/* we only track mappings of NATIVE buffers */
+	if (alloc->type == KBASE_MEM_TYPE_NATIVE)
+		atomic_inc(&alloc->gpu_mappings);
+}
+
+static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc)
+{
+	KBASE_DEBUG_ASSERT(alloc);
+	/* we only track mappings of NATIVE buffers */
+	if (alloc->type == KBASE_MEM_TYPE_NATIVE)
+		if (0 > atomic_dec_return(&alloc->gpu_mappings)) {
+			pr_err("Mismatched %s:\n", __func__);
+			dump_stack();
+		}
+}
+
+/**
+ * kbase_mem_is_imported - Indicate whether a memory type is imported
+ *
+ * @type: the memory type
+ *
+ * Return: true if the memory type is imported, false otherwise
+ */
+static inline bool kbase_mem_is_imported(enum kbase_memory_type type)
+{
+	return (type == KBASE_MEM_TYPE_IMPORTED_UMM) ||
+		(type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+}
+
+void kbase_mem_kref_free(struct kref *kref);
+
+int kbase_mem_init(struct kbase_device *kbdev);
+void kbase_mem_halt(struct kbase_device *kbdev);
+void kbase_mem_term(struct kbase_device *kbdev);
+
+static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc)
+{
+	kref_get(&alloc->kref);
+	return alloc;
+}
+
+static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc)
+{
+	kref_put(&alloc->kref, kbase_mem_kref_free);
+	return NULL;
+}
+
+/**
+ * A GPU memory region, and attributes for CPU mappings.
+ */
+struct kbase_va_region {
+	struct rb_node rblink;
+	struct list_head link;
+
+	struct rb_root *rbtree;	/* Backlink to rb tree */
+
+	u64 start_pfn;		/* The PFN in GPU space */
+	size_t nr_pages;
+	/* Initial commit, for aligning the start address and correctly growing
+	 * KBASE_REG_TILER_ALIGN_TOP regions */
+	size_t initial_commit;
+
+/* Free region */
+#define KBASE_REG_FREE              (1ul << 0)
+/* CPU write access */
+#define KBASE_REG_CPU_WR            (1ul << 1)
+/* GPU write access */
+#define KBASE_REG_GPU_WR            (1ul << 2)
+/* No eXecute flag */
+#define KBASE_REG_GPU_NX            (1ul << 3)
+/* Is CPU cached? */
+#define KBASE_REG_CPU_CACHED        (1ul << 4)
+/* Is GPU cached?
+ * Some components within the GPU might only be able to access memory that is
+ * GPU cacheable. Refer to the specific GPU implementation for more details.
+ */
+#define KBASE_REG_GPU_CACHED        (1ul << 5)
+
+#define KBASE_REG_GROWABLE          (1ul << 6)
+/* Can grow on pf? */
+#define KBASE_REG_PF_GROW           (1ul << 7)
+
+/* Allocation doesn't straddle the 4GB boundary in GPU virtual space */
+#define KBASE_REG_GPU_VA_SAME_4GB_PAGE (1ul << 8)
+
+/* inner shareable coherency */
+#define KBASE_REG_SHARE_IN          (1ul << 9)
+/* inner & outer shareable coherency */
+#define KBASE_REG_SHARE_BOTH        (1ul << 10)
+
+/* Space for 4 different zones */
+#define KBASE_REG_ZONE_MASK         (3ul << 11)
+#define KBASE_REG_ZONE(x)           (((x) & 3) << 11)
+
+/* GPU read access */
+#define KBASE_REG_GPU_RD            (1ul<<13)
+/* CPU read access */
+#define KBASE_REG_CPU_RD            (1ul<<14)
+
+/* Index of chosen MEMATTR for this region (0..7) */
+#define KBASE_REG_MEMATTR_MASK      (7ul << 16)
+#define KBASE_REG_MEMATTR_INDEX(x)  (((x) & 7) << 16)
+#define KBASE_REG_MEMATTR_VALUE(x)  (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
+
+#define KBASE_REG_SECURE            (1ul << 19)
+
+#define KBASE_REG_DONT_NEED         (1ul << 20)
+
+/* Imported buffer is padded? */
+#define KBASE_REG_IMPORT_PAD        (1ul << 21)
+
+/* Bit 22 is reserved.
+ *
+ * Do not remove, use the next unreserved bit for new flags */
+#define KBASE_REG_RESERVED_BIT_22   (1ul << 22)
+
+/* The top of the initial commit is aligned to extent pages.
+ * Extent must be a power of 2 */
+#define KBASE_REG_TILER_ALIGN_TOP   (1ul << 23)
+
+/* Memory is handled by JIT - user space should not be able to free it */
+#define KBASE_REG_JIT               (1ul << 24)
+
+/* Memory has permanent kernel side mapping */
+#define KBASE_REG_PERMANENT_KERNEL_MAPPING (1ul << 25)
+
+#define KBASE_REG_ZONE_SAME_VA      KBASE_REG_ZONE(0)
+
+/* only used with 32-bit clients */
+/*
+ * On a 32bit platform, custom VA should be wired from 4GB
+ * to the VA limit of the GPU. Unfortunately, the Linux mmap() interface
+ * limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference).
+ * So we put the default limit to the maximum possible on Linux and shrink
+ * it down, if required by the GPU, during initialization.
+ */
+
+#define KBASE_REG_ZONE_CUSTOM_VA         KBASE_REG_ZONE(1)
+#define KBASE_REG_ZONE_CUSTOM_VA_BASE    (0x100000000ULL >> PAGE_SHIFT)
+#define KBASE_REG_ZONE_CUSTOM_VA_SIZE    (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
+/* end 32-bit clients only */
+
+/* The starting address and size of the GPU-executable zone are dynamic
+ * and depend on the platform and the number of pages requested by the
+ * user process, with an upper limit of 4 GB.
+ */
+#define KBASE_REG_ZONE_EXEC_VA           KBASE_REG_ZONE(2)
+#define KBASE_REG_ZONE_EXEC_VA_MAX_PAGES ((1ULL << 32) >> PAGE_SHIFT) /* 4 GB */
+
+
+	unsigned long flags;
+
+	size_t extent; /* nr of pages alloc'd on PF */
+
+	struct kbase_mem_phy_alloc *cpu_alloc; /* the one alloc object we mmap to the CPU when mapping this region */
+	struct kbase_mem_phy_alloc *gpu_alloc; /* the one alloc object we mmap to the GPU when mapping this region */
+
+	/* List head used to store the region in the JIT allocation pool */
+	struct list_head jit_node;
+	/* The last JIT usage ID for this region */
+	u16 jit_usage_id;
+	/* The JIT bin this allocation came from */
+	u8 jit_bin_id;
+};
+
+/* Common functions */
+static inline struct tagged_addr *kbase_get_cpu_phy_pages(
+		struct kbase_va_region *reg)
+{
+	KBASE_DEBUG_ASSERT(reg);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+	return reg->cpu_alloc->pages;
+}
+
+static inline struct tagged_addr *kbase_get_gpu_phy_pages(
+		struct kbase_va_region *reg)
+{
+	KBASE_DEBUG_ASSERT(reg);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+	return reg->gpu_alloc->pages;
+}
+
+static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
+{
+	KBASE_DEBUG_ASSERT(reg);
+	/* if no alloc object the backed size naturally is 0 */
+	if (!reg->cpu_alloc)
+		return 0;
+
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
+
+	return reg->cpu_alloc->nents;
+}
+
+#define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
+
+static inline struct kbase_mem_phy_alloc *kbase_alloc_create(
+		struct kbase_context *kctx, size_t nr_pages,
+		enum kbase_memory_type type)
+{
+	struct kbase_mem_phy_alloc *alloc;
+	size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
+	size_t per_page_size = sizeof(*alloc->pages);
+
+	/* Imported pages may have page private data already in use */
+	if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
+		alloc_size += nr_pages *
+				sizeof(*alloc->imported.user_buf.dma_addrs);
+		per_page_size += sizeof(*alloc->imported.user_buf.dma_addrs);
+	}
+
+	/*
+	 * Prevent nr_pages*per_page_size + sizeof(*alloc) from
+	 * wrapping around.
+	 */
+	if (nr_pages > ((((size_t) -1) - sizeof(*alloc))
+			/ per_page_size))
+		return ERR_PTR(-ENOMEM);
+
+	/* Allocate based on the size to reduce internal fragmentation of vmem */
+	if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+		alloc = vzalloc(alloc_size);
+	else
+		alloc = kzalloc(alloc_size, GFP_KERNEL);
+
+	if (!alloc)
+		return ERR_PTR(-ENOMEM);
+
+	if (type == KBASE_MEM_TYPE_NATIVE) {
+		alloc->imported.native.nr_struct_pages =
+				(alloc_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+		kbase_process_page_usage_inc(kctx,
+				alloc->imported.native.nr_struct_pages);
+	}
+
+	/* Store allocation method */
+	if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+		alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
+
+	kref_init(&alloc->kref);
+	atomic_set(&alloc->gpu_mappings, 0);
+	alloc->nents = 0;
+	alloc->pages = (void *)(alloc + 1);
+	INIT_LIST_HEAD(&alloc->mappings);
+	alloc->type = type;
+
+	if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF)
+		alloc->imported.user_buf.dma_addrs =
+				(void *) (alloc->pages + nr_pages);
+
+	return alloc;
+}
+
+static inline int kbase_reg_prepare_native(struct kbase_va_region *reg,
+		struct kbase_context *kctx)
+{
+	KBASE_DEBUG_ASSERT(reg);
+	KBASE_DEBUG_ASSERT(!reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);
+
+	reg->cpu_alloc = kbase_alloc_create(kctx, reg->nr_pages,
+			KBASE_MEM_TYPE_NATIVE);
+	if (IS_ERR(reg->cpu_alloc))
+		return PTR_ERR(reg->cpu_alloc);
+	else if (!reg->cpu_alloc)
+		return -ENOMEM;
+
+	reg->cpu_alloc->imported.native.kctx = kctx;
+	if (kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE)
+	    && (reg->flags & KBASE_REG_CPU_CACHED)) {
+		reg->gpu_alloc = kbase_alloc_create(kctx, reg->nr_pages,
+				KBASE_MEM_TYPE_NATIVE);
+		if (IS_ERR_OR_NULL(reg->gpu_alloc)) {
+			kbase_mem_phy_alloc_put(reg->cpu_alloc);
+			return -ENOMEM;
+		}
+		reg->gpu_alloc->imported.native.kctx = kctx;
+	} else {
+		reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+	}
+
+	mutex_lock(&kctx->jit_evict_lock);
+	INIT_LIST_HEAD(&reg->cpu_alloc->evict_node);
+	INIT_LIST_HEAD(&reg->gpu_alloc->evict_node);
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	reg->flags &= ~KBASE_REG_FREE;
+
+	return 0;
+}
+
+static inline u32 kbase_atomic_add_pages(u32 num_pages, atomic_t *used_pages)
+{
+	int new_val = atomic_add_return(num_pages, used_pages);
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+	kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
+#endif
+	return new_val;
+}
+
+static inline u32 kbase_atomic_sub_pages(u32 num_pages, atomic_t *used_pages)
+{
+	int new_val = atomic_sub_return(num_pages, used_pages);
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+	kbase_trace_mali_total_alloc_pages_change((long long int)new_val);
+#endif
+	return new_val;
+}
+
+/*
+ * Max size for kbdev memory pool (in pages)
+ */
+#define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT)
+
+/*
+ * Max size for kctx memory pool (in pages)
+ */
+#define KBASE_MEM_POOL_MAX_SIZE_KCTX  (SZ_64M >> PAGE_SHIFT)
+
+/*
+ * The order required for a 2MB page allocation (2^order * 4KB = 2MB)
+ */
+#define KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER	9
+
+/*
+ * The order required for a 4KB page allocation
+ */
+#define KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER	0
+
+/**
+ * kbase_mem_pool_init - Create a memory pool for a kbase device
+ * @pool:      Memory pool to initialize
+ * @max_size:  Maximum number of free pages the pool can hold
+ * @order:     Page order for physical page size (order=0=>4kB, order=9=>2MB)
+ * @kbdev:     Kbase device where memory is used
+ * @next_pool: Pointer to the next pool or NULL.
+ *
+ * Allocations from @pool are in whole pages. Each @pool has a free list where
+ * pages can be quickly allocated from. The free list is initially empty and
+ * filled whenever pages are freed back to the pool. The number of free pages
+ * in the pool will in general not exceed @max_size, but the pool may in
+ * certain corner cases grow above @max_size.
+ *
+ * If @next_pool is not NULL, we will allocate from @next_pool before going to
+ * the kernel allocator. Similarily pages can spill over to @next_pool when
+ * @pool is full. Pages are zeroed before they spill over to another pool, to
+ * prevent leaking information between applications.
+ *
+ * A shrinker is registered so that Linux mm can reclaim pages from the pool as
+ * needed.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
+int kbase_mem_pool_init(struct kbase_mem_pool *pool,
+		size_t max_size,
+		size_t order,
+		struct kbase_device *kbdev,
+		struct kbase_mem_pool *next_pool);
+
+/**
+ * kbase_mem_pool_term - Destroy a memory pool
+ * @pool:  Memory pool to destroy
+ *
+ * Pages in the pool will spill over to @next_pool (if available) or freed to
+ * the kernel.
+ */
+void kbase_mem_pool_term(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_alloc - Allocate a page from memory pool
+ * @pool:  Memory pool to allocate from
+ *
+ * Allocations from the pool are made as follows:
+ * 1. If there are free pages in the pool, allocate a page from @pool.
+ * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
+ *    from @next_pool.
+ * 3. Return NULL if no memory in the pool
+ *
+ * Return: Pointer to allocated page, or NULL if allocation failed.
+ *
+ * Note : This function should not be used if the pool lock is held. Use
+ * kbase_mem_pool_alloc_locked() instead.
+ */
+struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_alloc_locked - Allocate a page from memory pool
+ * @pool:  Memory pool to allocate from
+ *
+ * If there are free pages in the pool, this function allocates a page from
+ * @pool. This function does not use @next_pool.
+ *
+ * Return: Pointer to allocated page, or NULL if allocation failed.
+ *
+ * Note : Caller must hold the pool lock.
+ */
+struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_pool_free - Free a page to memory pool
+ * @pool:  Memory pool where page should be freed
+ * @page:  Page to free to the pool
+ * @dirty: Whether some of the page may be dirty in the cache.
+ *
+ * Pages are freed to the pool as follows:
+ * 1. If @pool is not full, add @page to @pool.
+ * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
+ *    @next_pool.
+ * 3. Finally, free @page to the kernel.
+ *
+ * Note : This function should not be used if the pool lock is held. Use
+ * kbase_mem_pool_free_locked() instead.
+ */
+void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
+		bool dirty);
+
+/**
+ * kbase_mem_pool_free_locked - Free a page to memory pool
+ * @pool:  Memory pool where page should be freed
+ * @p:     Page to free to the pool
+ * @dirty: Whether some of the page may be dirty in the cache.
+ *
+ * If @pool is not full, this function adds @page to @pool. Otherwise, @page is
+ * freed to the kernel. This function does not use @next_pool.
+ *
+ * Note : Caller must hold the pool lock.
+ */
+void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
+		bool dirty);
+
+/**
+ * kbase_mem_pool_alloc_pages - Allocate pages from memory pool
+ * @pool:     Memory pool to allocate from
+ * @nr_pages: Number of pages to allocate
+ * @pages:    Pointer to array where the physical address of the allocated
+ *            pages will be stored.
+ * @partial_allowed: If fewer pages allocated is allowed
+ *
+ * Like kbase_mem_pool_alloc() but optimized for allocating many pages.
+ *
+ * Return:
+ * On success number of pages allocated (could be less than nr_pages if
+ * partial_allowed).
+ * On error an error code.
+ *
+ * Note : This function should not be used if the pool lock is held. Use
+ * kbase_mem_pool_alloc_pages_locked() instead.
+ *
+ * The caller must not hold vm_lock, as this could cause a deadlock if
+ * the kernel OoM killer runs. If the caller must allocate pages while holding
+ * this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
+ */
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+		struct tagged_addr *pages, bool partial_allowed);
+
+/**
+ * kbase_mem_pool_alloc_pages_locked - Allocate pages from memory pool
+ * @pool:        Memory pool to allocate from
+ * @nr_4k_pages: Number of pages to allocate
+ * @pages:       Pointer to array where the physical address of the allocated
+ *               pages will be stored.
+ *
+ * Like kbase_mem_pool_alloc() but optimized for allocating many pages. This
+ * version does not allocate new pages from the kernel, and therefore will never
+ * trigger the OoM killer. Therefore, it can be run while the vm_lock is held.
+ *
+ * As new pages can not be allocated, the caller must ensure there are
+ * sufficient pages in the pool. Usage of this function should look like :
+ *
+ *   kbase_gpu_vm_lock(kctx);
+ *   kbase_mem_pool_lock(pool)
+ *   while (kbase_mem_pool_size(pool) < pages_required) {
+ *     kbase_mem_pool_unlock(pool)
+ *     kbase_gpu_vm_unlock(kctx);
+ *     kbase_mem_pool_grow(pool)
+ *     kbase_gpu_vm_lock(kctx);
+ *     kbase_mem_pool_lock(pool)
+ *   }
+ *   kbase_mem_pool_alloc_pages_locked(pool)
+ *   kbase_mem_pool_unlock(pool)
+ *   Perform other processing that requires vm_lock...
+ *   kbase_gpu_vm_unlock(kctx);
+ *
+ * This ensures that the pool can be grown to the required size and that the
+ * allocation can complete without another thread using the newly grown pages.
+ *
+ * Return:
+ * On success number of pages allocated.
+ * On error an error code.
+ *
+ * Note : Caller must hold the pool lock.
+ */
+int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool,
+		size_t nr_4k_pages, struct tagged_addr *pages);
+
+/**
+ * kbase_mem_pool_free_pages - Free pages to memory pool
+ * @pool:     Memory pool where pages should be freed
+ * @nr_pages: Number of pages to free
+ * @pages:    Pointer to array holding the physical addresses of the pages to
+ *            free.
+ * @dirty:    Whether any pages may be dirty in the cache.
+ * @reclaimed: Whether the pages where reclaimable and thus should bypass
+ *             the pool and go straight to the kernel.
+ *
+ * Like kbase_mem_pool_free() but optimized for freeing many pages.
+ */
+void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+		struct tagged_addr *pages, bool dirty, bool reclaimed);
+
+/**
+ * kbase_mem_pool_free_pages_locked - Free pages to memory pool
+ * @pool:     Memory pool where pages should be freed
+ * @nr_pages: Number of pages to free
+ * @pages:    Pointer to array holding the physical addresses of the pages to
+ *            free.
+ * @dirty:    Whether any pages may be dirty in the cache.
+ * @reclaimed: Whether the pages where reclaimable and thus should bypass
+ *             the pool and go straight to the kernel.
+ *
+ * Like kbase_mem_pool_free() but optimized for freeing many pages.
+ */
+void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
+		size_t nr_pages, struct tagged_addr *pages, bool dirty,
+		bool reclaimed);
+
+/**
+ * kbase_mem_pool_size - Get number of free pages in memory pool
+ * @pool:  Memory pool to inspect
+ *
+ * Note: the size of the pool may in certain corner cases exceed @max_size!
+ *
+ * Return: Number of free pages in the pool
+ */
+static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
+{
+	return READ_ONCE(pool->cur_size);
+}
+
+/**
+ * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool
+ * @pool:  Memory pool to inspect
+ *
+ * Return: Maximum number of free pages in the pool
+ */
+static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool)
+{
+	return pool->max_size;
+}
+
+
+/**
+ * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool
+ * @pool:     Memory pool to inspect
+ * @max_size: Maximum number of free pages the pool can hold
+ *
+ * If @max_size is reduced, the pool will be shrunk to adhere to the new limit.
+ * For details see kbase_mem_pool_shrink().
+ */
+void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
+
+/**
+ * kbase_mem_pool_grow - Grow the pool
+ * @pool:       Memory pool to grow
+ * @nr_to_grow: Number of pages to add to the pool
+ *
+ * Adds @nr_to_grow pages to the pool. Note that this may cause the pool to
+ * become larger than the maximum size specified.
+ *
+ * Returns: 0 on success, -ENOMEM if unable to allocate sufficent pages
+ */
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow);
+
+/**
+ * kbase_mem_pool_trim - Grow or shrink the pool to a new size
+ * @pool:     Memory pool to trim
+ * @new_size: New number of pages in the pool
+ *
+ * If @new_size > @cur_size, fill the pool with new pages from the kernel, but
+ * not above the max_size for the pool.
+ * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel.
+ */
+void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
+
+/**
+ * kbase_mem_pool_mark_dying - Mark that this pool is dying
+ * @pool:     Memory pool
+ *
+ * This will cause any ongoing allocation operations (eg growing on page fault)
+ * to be terminated.
+ */
+void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_mem_alloc_page - Allocate a new page for a device
+ * @pool:  Memory pool to allocate a page from
+ *
+ * Most uses should use kbase_mem_pool_alloc to allocate a page. However that
+ * function can fail in the event the pool is empty.
+ *
+ * Return: A new page or NULL if no memory
+ */
+struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool);
+
+/**
+ * kbase_region_tracker_init - Initialize the region tracker data structure
+ * @kctx: kbase context
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+int kbase_region_tracker_init(struct kbase_context *kctx);
+
+/**
+ * kbase_region_tracker_init_jit - Initialize the JIT region
+ * @kctx: kbase context
+ * @jit_va_pages: Size of the JIT region in pages
+ * @max_allocations: Maximum number of allocations allowed for the JIT region
+ * @trim_level: Trim level for the JIT region
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
+		u8 max_allocations, u8 trim_level);
+
+/**
+ * kbase_region_tracker_init_exec - Initialize the EXEC_VA region
+ * @kctx: kbase context
+ * @exec_va_pages: Size of the JIT region in pages.
+ *                 It must not be greater than 4 GB.
+ *
+ * Return: 0 if success, negative error code otherwise.
+ */
+int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages);
+
+/**
+ * kbase_region_tracker_term - Terminate the JIT region
+ * @kctx: kbase context
+ */
+void kbase_region_tracker_term(struct kbase_context *kctx);
+
+/**
+ * kbase_region_tracker_term_rbtree - Free memory for a region tracker
+ *
+ * This will free all the regions within the region tracker
+ *
+ * @rbtree: Region tracker tree root
+ */
+void kbase_region_tracker_term_rbtree(struct rb_root *rbtree);
+
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(
+		struct kbase_context *kctx, u64 gpu_addr);
+struct kbase_va_region *kbase_find_region_enclosing_address(
+		struct rb_root *rbtree, u64 gpu_addr);
+
+/**
+ * @brief Check that a pointer is actually a valid region.
+ *
+ * Must be called with context lock held.
+ */
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(
+		struct kbase_context *kctx, u64 gpu_addr);
+struct kbase_va_region *kbase_find_region_base_address(struct rb_root *rbtree,
+		u64 gpu_addr);
+
+struct kbase_va_region *kbase_alloc_free_region(struct rb_root *rbtree,
+		u64 start_pfn, size_t nr_pages, int zone);
+void kbase_free_alloced_region(struct kbase_va_region *reg);
+int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg,
+		u64 addr, size_t nr_pages, size_t align);
+int kbase_add_va_region_rbtree(struct kbase_device *kbdev,
+		struct kbase_va_region *reg, u64 addr, size_t nr_pages,
+		size_t align);
+int kbase_remove_va_region(struct kbase_va_region *reg);
+
+bool kbase_check_alloc_flags(unsigned long flags);
+bool kbase_check_import_flags(unsigned long flags);
+
+/**
+ * kbase_check_alloc_sizes - check user space sizes parameters for an
+ *                           allocation
+ *
+ * @kctx:         kbase context
+ * @flags:        The flags passed from user space
+ * @va_pages:     The size of the requested region, in pages.
+ * @commit_pages: Number of pages to commit initially.
+ * @extent:       Number of pages to grow by on GPU page fault and/or alignment
+ *                (depending on flags)
+ *
+ * Makes checks on the size parameters passed in from user space for a memory
+ * allocation call, with respect to the flags requested.
+ *
+ * Return: 0 if sizes are valid for these flags, negative error code otherwise
+ */
+int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
+		u64 va_pages, u64 commit_pages, u64 extent);
+
+/**
+ * kbase_update_region_flags - Convert user space flags to kernel region flags
+ *
+ * @kctx:  kbase context
+ * @reg:   The region to update the flags on
+ * @flags: The flags passed from user space
+ *
+ * The user space flag BASE_MEM_COHERENT_SYSTEM_REQUIRED will be rejected and
+ * this function will fail if the system does not support system coherency.
+ *
+ * Return: 0 if successful, -EINVAL if the flags are not supported
+ */
+int kbase_update_region_flags(struct kbase_context *kctx,
+		struct kbase_va_region *reg, unsigned long flags);
+
+void kbase_gpu_vm_lock(struct kbase_context *kctx);
+void kbase_gpu_vm_unlock(struct kbase_context *kctx);
+
+int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
+
+/**
+ * kbase_mmu_init - Initialise an object representing GPU page tables
+ *
+ * The structure should be terminated using kbase_mmu_term()
+ *
+ * @kbdev: kbase device
+ * @mmut:  structure to initialise
+ * @kctx:  optional kbase context, may be NULL if this set of MMU tables is not
+ *         associated with a context
+ */
+int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+		struct kbase_context *kctx);
+/**
+ * kbase_mmu_term - Terminate an object representing GPU page tables
+ *
+ * This will free any page tables that have been allocated
+ *
+ * @kbdev: kbase device
+ * @mmut:  kbase_mmu_table to be destroyed
+ */
+void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
+
+int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
+				    struct kbase_mmu_table *mmut,
+				    const u64 start_vpfn,
+				    struct tagged_addr *phys, size_t nr,
+				    unsigned long flags);
+int kbase_mmu_insert_pages(struct kbase_device *kbdev,
+			   struct kbase_mmu_table *mmut, u64 vpfn,
+			   struct tagged_addr *phys, size_t nr,
+			   unsigned long flags, int as_nr);
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
+					struct tagged_addr phys, size_t nr,
+					unsigned long flags);
+
+int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
+			     struct kbase_mmu_table *mmut, u64 vpfn,
+			     size_t nr, int as_nr);
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
+			   struct tagged_addr *phys, size_t nr,
+			   unsigned long flags);
+
+/**
+ * @brief Register region and map it on the GPU.
+ *
+ * Call kbase_add_va_region() and map the region on the GPU.
+ */
+int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
+
+/**
+ * @brief Remove the region from the GPU and unregister it.
+ *
+ * Must be called with context lock held.
+ */
+int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
+
+/**
+ * kbase_mmu_update - Configure an address space on the GPU to the specified
+ *                    MMU tables
+ *
+ * The caller has the following locking conditions:
+ * - It must hold kbase_device->mmu_hw_mutex
+ * - It must hold the hwaccess_lock
+ *
+ * @kbdev: Kbase device structure
+ * @mmut:  The set of MMU tables to be configured on the address space
+ * @as_nr: The address space to be configured
+ */
+void kbase_mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+		int as_nr);
+
+/**
+ * kbase_mmu_disable() - Disable the MMU for a previously active kbase context.
+ * @kctx:	Kbase context
+ *
+ * Disable and perform the required cache maintenance to remove the all
+ * data from provided kbase context from the GPU caches.
+ *
+ * The caller has the following locking conditions:
+ * - It must hold kbase_device->mmu_hw_mutex
+ * - It must hold the hwaccess_lock
+ */
+void kbase_mmu_disable(struct kbase_context *kctx);
+
+/**
+ * kbase_mmu_disable_as() - Set the MMU to unmapped mode for the specified
+ * address space.
+ * @kbdev:	Kbase device
+ * @as_nr:	The address space number to set to unmapped.
+ *
+ * This function must only be called during reset/power-up and it used to
+ * ensure the registers are in a known state.
+ *
+ * The caller must hold kbdev->mmu_hw_mutex.
+ */
+void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr);
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+/** Dump the MMU tables to a buffer
+ *
+ * This function allocates a buffer (of @c nr_pages pages) to hold a dump of the MMU tables and fills it. If the
+ * buffer is too small then the return value will be NULL.
+ *
+ * The GPU vm lock must be held when calling this function.
+ *
+ * The buffer returned should be freed with @ref vfree when it is no longer required.
+ *
+ * @param[in]   kctx        The kbase context to dump
+ * @param[in]   nr_pages    The number of pages to allocate for the buffer.
+ *
+ * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too
+ * small)
+ */
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages);
+
+/**
+ * kbase_sync_now - Perform cache maintenance on a memory region
+ *
+ * @kctx: The kbase context of the region
+ * @sset: A syncset structure describing the region and direction of the
+ *        synchronisation required
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset);
+void kbase_sync_single(struct kbase_context *kctx, struct tagged_addr cpu_pa,
+		struct tagged_addr gpu_pa, off_t offset, size_t size,
+		enum kbase_sync_type sync_fn);
+void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
+void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
+
+/* OS specific functions */
+int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr);
+int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
+void kbase_os_mem_map_lock(struct kbase_context *kctx);
+void kbase_os_mem_map_unlock(struct kbase_context *kctx);
+
+/**
+ * @brief Update the memory allocation counters for the current process
+ *
+ * OS specific call to updates the current memory allocation counters for the current process with
+ * the supplied delta.
+ *
+ * @param[in] kctx  The kbase context
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);
+
+/**
+ * @brief Add to the memory allocation counters for the current process
+ *
+ * OS specific call to add to the current memory allocation counters for the current process by
+ * the supplied amount.
+ *
+ * @param[in] kctx  The kernel base context used for the allocation.
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
+{
+	kbasep_os_process_page_usage_update(kctx, pages);
+}
+
+/**
+ * @brief Subtract from the memory allocation counters for the current process
+ *
+ * OS specific call to subtract from the current memory allocation counters for the current process by
+ * the supplied amount.
+ *
+ * @param[in] kctx  The kernel base context used for the allocation.
+ * @param[in] pages The desired delta to apply to the memory usage counters.
+ */
+
+static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
+{
+	kbasep_os_process_page_usage_update(kctx, 0 - pages);
+}
+
+/**
+ * kbasep_find_enclosing_cpu_mapping_offset() - Find the offset of the CPU
+ * mapping of a memory allocation containing a given address range
+ *
+ * Searches for a CPU mapping of any part of any region that fully encloses the
+ * CPU virtual address range specified by @uaddr and @size. Returns a failure
+ * indication if only part of the address range lies within a CPU mapping.
+ *
+ * @kctx:      The kernel base context used for the allocation.
+ * @uaddr:     Start of the CPU virtual address range.
+ * @size:      Size of the CPU virtual address range (in bytes).
+ * @offset:    The offset from the start of the allocation to the specified CPU
+ *             virtual address.
+ *
+ * Return: 0 if offset was obtained successfully. Error code otherwise.
+ */
+int kbasep_find_enclosing_cpu_mapping_offset(
+		struct kbase_context *kctx,
+		unsigned long uaddr, size_t size, u64 *offset);
+
+/**
+ * kbasep_find_enclosing_gpu_mapping_start_and_offset() - Find the address of
+ * the start of GPU virtual memory region which encloses @gpu_addr for the
+ * @size length in bytes
+ *
+ * Searches for the memory region in GPU virtual memory space which contains
+ * the region defined by the @gpu_addr and @size, where @gpu_addr is the
+ * beginning and @size the length in bytes of the provided region. If found,
+ * the location of the start address of the GPU virtual memory region is
+ * passed in @start pointer and the location of the offset of the region into
+ * the GPU virtual memory region is passed in @offset pointer.
+ *
+ * @kctx:	The kernel base context within which the memory is searched.
+ * @gpu_addr:	GPU virtual address for which the region is sought; defines
+ *              the beginning of the provided region.
+ * @size:       The length (in bytes) of the provided region for which the
+ *              GPU virtual memory region is sought.
+ * @start:      Pointer to the location where the address of the start of
+ *              the found GPU virtual memory region is.
+ * @offset:     Pointer to the location where the offset of @gpu_addr into
+ *              the found GPU virtual memory region is.
+ */
+int kbasep_find_enclosing_gpu_mapping_start_and_offset(
+		struct kbase_context *kctx,
+		u64 gpu_addr, size_t size, u64 *start, u64 *offset);
+
+enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer);
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+
+/**
+ * kbase_alloc_phy_pages_helper - Allocates physical pages.
+ * @alloc:              allocation object to add pages to
+ * @nr_pages_requested: number of physical pages to allocate
+ *
+ * Allocates \a nr_pages_requested and updates the alloc object.
+ *
+ * Return: 0 if all pages have been successfully allocated. Error code otherwise
+ *
+ * Note : The caller must not hold vm_lock, as this could cause a deadlock if
+ * the kernel OoM killer runs. If the caller must allocate pages while holding
+ * this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
+ *
+ * This function cannot be used from interrupt context
+ */
+int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
+		size_t nr_pages_requested);
+
+/**
+ * kbase_alloc_phy_pages_helper_locked - Allocates physical pages.
+ * @alloc:              allocation object to add pages to
+ * @pool:               Memory pool to allocate from
+ * @nr_pages_requested: number of physical pages to allocate
+ * @prealloc_sa:        Information about the partial allocation if the amount
+ *                      of memory requested is not a multiple of 2MB. One
+ *                      instance of struct kbase_sub_alloc must be allocated by
+ *                      the caller iff CONFIG_MALI_2MB_ALLOC is enabled.
+ *
+ * Allocates \a nr_pages_requested and updates the alloc object. This function
+ * does not allocate new pages from the kernel, and therefore will never trigger
+ * the OoM killer. Therefore, it can be run while the vm_lock is held.
+ *
+ * As new pages can not be allocated, the caller must ensure there are
+ * sufficient pages in the pool. Usage of this function should look like :
+ *
+ *   kbase_gpu_vm_lock(kctx);
+ *   kbase_mem_pool_lock(pool)
+ *   while (kbase_mem_pool_size(pool) < pages_required) {
+ *     kbase_mem_pool_unlock(pool)
+ *     kbase_gpu_vm_unlock(kctx);
+ *     kbase_mem_pool_grow(pool)
+ *     kbase_gpu_vm_lock(kctx);
+ *     kbase_mem_pool_lock(pool)
+ *   }
+ *   kbase_alloc_phy_pages_helper_locked(pool)
+ *   kbase_mem_pool_unlock(pool)
+ *   Perform other processing that requires vm_lock...
+ *   kbase_gpu_vm_unlock(kctx);
+ *
+ * This ensures that the pool can be grown to the required size and that the
+ * allocation can complete without another thread using the newly grown pages.
+ *
+ * If CONFIG_MALI_2MB_ALLOC is defined and the allocation is >= 2MB, then
+ * @pool must be alloc->imported.native.kctx->lp_mem_pool. Otherwise it must be
+ * alloc->imported.native.kctx->mem_pool.
+ * @prealloc_sa is used to manage the non-2MB sub-allocation. It has to be
+ * pre-allocated because we must not sleep (due to the usage of kmalloc())
+ * whilst holding pool->pool_lock.
+ * @prealloc_sa shall be set to NULL if it has been consumed by this function
+ * to indicate that the caller must not free it.
+ *
+ * Return: Pointer to array of allocated pages. NULL on failure.
+ *
+ * Note : Caller must hold pool->pool_lock
+ */
+struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
+		struct kbase_mem_phy_alloc *alloc, struct kbase_mem_pool *pool,
+		size_t nr_pages_requested,
+		struct kbase_sub_alloc **prealloc_sa);
+
+/**
+* @brief Free physical pages.
+*
+* Frees \a nr_pages and updates the alloc object.
+*
+* @param[in] alloc allocation object to free pages from
+* @param[in] nr_pages_to_free number of physical pages to free
+*/
+int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);
+
+/**
+ * kbase_free_phy_pages_helper_locked - Free pages allocated with
+ *                                      kbase_alloc_phy_pages_helper_locked()
+ * @alloc:            Allocation object to free pages from
+ * @pool:             Memory pool to return freed pages to
+ * @pages:            Pages allocated by kbase_alloc_phy_pages_helper_locked()
+ * @nr_pages_to_free: Number of physical pages to free
+ *
+ * This function atomically frees pages allocated with
+ * kbase_alloc_phy_pages_helper_locked(). @pages is the pointer to the page
+ * array that is returned by that function. @pool must be the pool that the
+ * pages were originally allocated from.
+ *
+ * If the mem_pool has been unlocked since the allocation then
+ * kbase_free_phy_pages_helper() should be used instead.
+ */
+void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
+		struct kbase_mem_pool *pool, struct tagged_addr *pages,
+		size_t nr_pages_to_free);
+
+static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
+{
+	SetPagePrivate(p);
+	if (sizeof(dma_addr_t) > sizeof(p->private)) {
+		/* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the
+		 * private field stays the same. So we have to be clever and
+		 * use the fact that we only store DMA addresses of whole pages,
+		 * so the low bits should be zero */
+		KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
+		set_page_private(p, dma_addr >> PAGE_SHIFT);
+	} else {
+		set_page_private(p, dma_addr);
+	}
+}
+
+static inline dma_addr_t kbase_dma_addr(struct page *p)
+{
+	if (sizeof(dma_addr_t) > sizeof(p->private))
+		return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
+
+	return (dma_addr_t)page_private(p);
+}
+
+static inline void kbase_clear_dma_addr(struct page *p)
+{
+	ClearPagePrivate(p);
+}
+
+/**
+ * kbase_mmu_interrupt_process - Process a bus or page fault.
+ * @kbdev   The kbase_device the fault happened on
+ * @kctx    The kbase_context for the faulting address space if one was found.
+ * @as      The address space that has the fault
+ * @fault   Data relating to the fault
+ *
+ * This function will process a fault on a specific address space
+ */
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
+		struct kbase_context *kctx, struct kbase_as *as,
+		struct kbase_fault *fault);
+
+/**
+ * @brief Process a page fault.
+ *
+ * @param[in] data  work_struct passed by queue_work()
+ */
+void page_fault_worker(struct work_struct *data);
+
+/**
+ * @brief Process a bus fault.
+ *
+ * @param[in] data  work_struct passed by queue_work()
+ */
+void bus_fault_worker(struct work_struct *data);
+
+/**
+ * @brief Flush MMU workqueues.
+ *
+ * This function will cause any outstanding page or bus faults to be processed.
+ * It should be called prior to powering off the GPU.
+ *
+ * @param[in] kbdev   Device pointer
+ */
+void kbase_flush_mmu_wqs(struct kbase_device *kbdev);
+
+/**
+ * kbase_sync_single_for_device - update physical memory and give GPU ownership
+ * @kbdev: Device pointer
+ * @handle: DMA address of region
+ * @size: Size of region to sync
+ * @dir:  DMA data direction
+ */
+
+void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir);
+
+/**
+ * kbase_sync_single_for_cpu - update physical memory and give CPU ownership
+ * @kbdev: Device pointer
+ * @handle: DMA address of region
+ * @size: Size of region to sync
+ * @dir:  DMA data direction
+ */
+
+void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir);
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * kbase_jit_debugfs_init - Add per context debugfs entry for JIT.
+ * @kctx: kbase context
+ */
+void kbase_jit_debugfs_init(struct kbase_context *kctx);
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * kbase_jit_init - Initialize the JIT memory pool management
+ * @kctx: kbase context
+ *
+ * Returns zero on success or negative error number on failure.
+ */
+int kbase_jit_init(struct kbase_context *kctx);
+
+/**
+ * kbase_jit_allocate - Allocate JIT memory
+ * @kctx: kbase context
+ * @info: JIT allocation information
+ *
+ * Return: JIT allocation on success or NULL on failure.
+ */
+struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
+		struct base_jit_alloc_info *info);
+
+/**
+ * kbase_jit_free - Free a JIT allocation
+ * @kctx: kbase context
+ * @reg: JIT allocation
+ *
+ * Frees a JIT allocation and places it into the free pool for later reuse.
+ */
+void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg);
+
+/**
+ * kbase_jit_backing_lost - Inform JIT that an allocation has lost backing
+ * @reg: JIT allocation
+ */
+void kbase_jit_backing_lost(struct kbase_va_region *reg);
+
+/**
+ * kbase_jit_evict - Evict a JIT allocation from the pool
+ * @kctx: kbase context
+ *
+ * Evict the least recently used JIT allocation from the pool. This can be
+ * required if normal VA allocations are failing due to VA exhaustion.
+ *
+ * Return: True if a JIT allocation was freed, false otherwise.
+ */
+bool kbase_jit_evict(struct kbase_context *kctx);
+
+/**
+ * kbase_jit_term - Terminate the JIT memory pool management
+ * @kctx: kbase context
+ */
+void kbase_jit_term(struct kbase_context *kctx);
+
+/**
+ * kbase_has_exec_va_zone - EXEC_VA zone predicate
+ *
+ * Determine whether an EXEC_VA zone has been created for the GPU address space
+ * of the given kbase context.
+ *
+ * @kctx: kbase context
+ *
+ * Return: True if the kbase context has an EXEC_VA zone.
+ */
+bool kbase_has_exec_va_zone(struct kbase_context *kctx);
+
+/**
+ * kbase_map_external_resource - Map an external resource to the GPU.
+ * @kctx:              kbase context.
+ * @reg:               The region to map.
+ * @locked_mm:         The mm_struct which has been locked for this operation.
+ *
+ * Return: The physical allocation which backs the region on success or NULL
+ * on failure.
+ */
+struct kbase_mem_phy_alloc *kbase_map_external_resource(
+		struct kbase_context *kctx, struct kbase_va_region *reg,
+		struct mm_struct *locked_mm);
+
+/**
+ * kbase_unmap_external_resource - Unmap an external resource from the GPU.
+ * @kctx:  kbase context.
+ * @reg:   The region to unmap or NULL if it has already been released.
+ * @alloc: The physical allocation being unmapped.
+ */
+void kbase_unmap_external_resource(struct kbase_context *kctx,
+		struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_sticky_resource_init - Initialize sticky resource management.
+ * @kctx: kbase context
+ *
+ * Returns zero on success or negative error number on failure.
+ */
+int kbase_sticky_resource_init(struct kbase_context *kctx);
+
+/**
+ * kbase_sticky_resource_acquire - Acquire a reference on a sticky resource.
+ * @kctx:     kbase context.
+ * @gpu_addr: The GPU address of the external resource.
+ *
+ * Return: The metadata object which represents the binding between the
+ * external resource and the kbase context on success or NULL on failure.
+ */
+struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
+		struct kbase_context *kctx, u64 gpu_addr);
+
+/**
+ * kbase_sticky_resource_release - Release a reference on a sticky resource.
+ * @kctx:     kbase context.
+ * @meta:     Binding metadata.
+ * @gpu_addr: GPU address of the external resource.
+ *
+ * If meta is NULL then gpu_addr will be used to scan the metadata list and
+ * find the matching metadata (if any), otherwise the provided meta will be
+ * used and gpu_addr will be ignored.
+ *
+ * Return: True if the release found the metadata and the reference was dropped.
+ */
+bool kbase_sticky_resource_release(struct kbase_context *kctx,
+		struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr);
+
+/**
+ * kbase_sticky_resource_term - Terminate sticky resource management.
+ * @kctx: kbase context
+ */
+void kbase_sticky_resource_term(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_pool_lock - Lock a memory pool
+ * @pool: Memory pool to lock
+ */
+static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
+{
+	spin_lock(&pool->pool_lock);
+}
+
+/**
+ * kbase_mem_pool_lock - Release a memory pool
+ * @pool: Memory pool to lock
+ */
+static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
+{
+	spin_unlock(&pool->pool_lock);
+}
+
+/**
+ * kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
+ * @alloc: The physical allocation
+ */
+void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc);
+
+
+#endif				/* _KBASE_MEM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
new file mode 100644
index 0000000..f16fb75
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
@@ -0,0 +1,2493 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem_linux.c
+ * Base kernel memory APIs, Linux implementation.
+ */
+
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+#include <linux/dma-attrs.h>
+#endif /* LINUX_VERSION_CODE >= 3.5.0 && < 4.8.0 */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif				/* defined(CONFIG_DMA_SHARED_BUFFER) */
+#include <linux/shrinker.h>
+#include <linux/cache.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_ioctl.h>
+
+
+static int kbase_vmap_phy_pages(struct kbase_context *kctx,
+		struct kbase_va_region *reg, u64 offset_bytes, size_t size,
+		struct kbase_vmap_struct *map);
+static void kbase_vunmap_phy_pages(struct kbase_context *kctx,
+		struct kbase_vmap_struct *map);
+
+static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
+
+/* Retrieve the associated region pointer if the GPU address corresponds to
+ * one of the event memory pages. The enclosing region, if found, shouldn't
+ * have been marked as free.
+ */
+static struct kbase_va_region *kbase_find_event_mem_region(
+			struct kbase_context *kctx, u64 gpu_addr)
+{
+
+	return NULL;
+}
+
+/**
+ * kbase_phy_alloc_mapping_init - Initialize the kernel side permanent mapping
+ *                                of the physical allocation belonging to a
+ *                                region
+ * @kctx:  The kernel base context @reg belongs to.
+ * @reg:   The region whose physical allocation is to be mapped
+ * @vsize: The size of the requested region, in pages
+ * @size:  The size in pages initially committed to the region
+ *
+ * Return: 0 on success, otherwise an error code indicating failure
+ *
+ * Maps the physical allocation backing a non-free @reg, so it may be
+ * accessed directly from the kernel. This is only supported for physical
+ * allocations of type KBASE_MEM_TYPE_NATIVE, and will fail for other types of
+ * physical allocation.
+ *
+ * The mapping is stored directly in the allocation that backs @reg. The
+ * refcount is not incremented at this point. Instead, use of the mapping should
+ * be surrounded by kbase_phy_alloc_mapping_get() and
+ * kbase_phy_alloc_mapping_put() to ensure it does not disappear whilst the
+ * client is accessing it.
+ *
+ * Both cached and uncached regions are allowed, but any sync operations are the
+ * responsibility of the client using the permanent mapping.
+ *
+ * A number of checks are made to ensure that a region that needs a permanent
+ * mapping can actually be supported:
+ * - The region must be created as fully backed
+ * - The region must not be growable
+ *
+ * This function will fail if those checks are not satisfied.
+ *
+ * On success, the region will also be forced into a certain kind:
+ * - It will no longer be growable
+ */
+static int kbase_phy_alloc_mapping_init(struct kbase_context *kctx,
+		struct kbase_va_region *reg, size_t vsize, size_t size)
+{
+	size_t size_bytes = (size << PAGE_SHIFT);
+	struct kbase_vmap_struct *kern_mapping;
+	int err = 0;
+
+	/* Can only map in regions that are always fully committed
+	 * Don't setup the mapping twice
+	 * Only support KBASE_MEM_TYPE_NATIVE allocations
+	 */
+	if (vsize != size || reg->cpu_alloc->permanent_map != NULL ||
+			reg->cpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+		return -EINVAL;
+
+	if (size > (KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES -
+			kctx->permanent_mapped_pages)) {
+		dev_warn(kctx->kbdev->dev, "Request for %llu more pages mem needing a permanent mapping would breach limit %lu, currently at %lu pages",
+				(u64)size,
+				KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES,
+				kctx->permanent_mapped_pages);
+		return -ENOMEM;
+	}
+
+	kern_mapping = kzalloc(sizeof(*kern_mapping), GFP_KERNEL);
+	if (!kern_mapping)
+		return -ENOMEM;
+
+	err = kbase_vmap_phy_pages(kctx, reg, 0u, size_bytes, kern_mapping);
+	if (err < 0)
+		goto vmap_fail;
+
+	/* No support for growing or shrinking mapped regions */
+	reg->flags &= ~KBASE_REG_GROWABLE;
+
+	reg->cpu_alloc->permanent_map = kern_mapping;
+	kctx->permanent_mapped_pages += size;
+
+	return 0;
+vmap_fail:
+	kfree(kern_mapping);
+	return err;
+}
+
+void kbase_phy_alloc_mapping_term(struct kbase_context *kctx,
+		struct kbase_mem_phy_alloc *alloc)
+{
+	WARN_ON(!alloc->permanent_map);
+	kbase_vunmap_phy_pages(kctx, alloc->permanent_map);
+	kfree(alloc->permanent_map);
+
+	alloc->permanent_map = NULL;
+
+	/* Mappings are only done on cpu_alloc, so don't need to worry about
+	 * this being reduced a second time if a separate gpu_alloc is
+	 * freed
+	 */
+	WARN_ON(alloc->nents > kctx->permanent_mapped_pages);
+	kctx->permanent_mapped_pages -= alloc->nents;
+}
+
+void *kbase_phy_alloc_mapping_get(struct kbase_context *kctx,
+		u64 gpu_addr,
+		struct kbase_vmap_struct **out_kern_mapping)
+{
+	struct kbase_va_region *reg;
+	void *kern_mem_ptr = NULL;
+	struct kbase_vmap_struct *kern_mapping;
+	u64 mapping_offset;
+
+	WARN_ON(!kctx);
+	WARN_ON(!out_kern_mapping);
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* First do a quick lookup in the list of event memory regions */
+	reg = kbase_find_event_mem_region(kctx, gpu_addr);
+
+	if (!reg) {
+		reg = kbase_region_tracker_find_region_enclosing_address(
+			kctx, gpu_addr);
+	}
+
+	if (reg == NULL || (reg->flags & KBASE_REG_FREE) != 0)
+		goto out_unlock;
+
+	kern_mapping = reg->cpu_alloc->permanent_map;
+	if (kern_mapping == NULL)
+		goto out_unlock;
+
+	mapping_offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
+
+	/* Refcount the allocations to prevent them disappearing */
+	WARN_ON(reg->cpu_alloc != kern_mapping->cpu_alloc);
+	WARN_ON(reg->gpu_alloc != kern_mapping->gpu_alloc);
+	(void)kbase_mem_phy_alloc_get(kern_mapping->cpu_alloc);
+	(void)kbase_mem_phy_alloc_get(kern_mapping->gpu_alloc);
+
+	kern_mem_ptr = (void *)(uintptr_t)((uintptr_t)kern_mapping->addr + mapping_offset);
+	*out_kern_mapping = kern_mapping;
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return kern_mem_ptr;
+}
+
+void kbase_phy_alloc_mapping_put(struct kbase_context *kctx,
+		struct kbase_vmap_struct *kern_mapping)
+{
+	WARN_ON(!kctx);
+	WARN_ON(!kern_mapping);
+
+	WARN_ON(kctx != kern_mapping->cpu_alloc->imported.native.kctx);
+	WARN_ON(kern_mapping != kern_mapping->cpu_alloc->permanent_map);
+
+	kbase_mem_phy_alloc_put(kern_mapping->cpu_alloc);
+	kbase_mem_phy_alloc_put(kern_mapping->gpu_alloc);
+
+	/* kern_mapping and the gpu/cpu phy allocs backing it must not be used
+	 * from now on
+	 */
+}
+
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
+		u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
+		u64 *gpu_va)
+{
+	int zone;
+	struct kbase_va_region *reg;
+	struct rb_root *rbtree;
+	struct device *dev;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(flags);
+	KBASE_DEBUG_ASSERT(gpu_va);
+
+	dev = kctx->kbdev->dev;
+	*gpu_va = 0; /* return 0 on failure */
+
+	if (!kbase_check_alloc_flags(*flags)) {
+		dev_warn(dev,
+				"kbase_mem_alloc called with bad flags (%llx)",
+				(unsigned long long)*flags);
+		goto bad_flags;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	if (unlikely(kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE))) {
+		/* Mask coherency flags if infinite cache is enabled to prevent
+		 * the skipping of syncs from BASE side.
+		 */
+		*flags &= ~(BASE_MEM_COHERENT_SYSTEM_REQUIRED |
+			    BASE_MEM_COHERENT_SYSTEM);
+	}
+#endif
+
+	if ((*flags & BASE_MEM_UNCACHED_GPU) != 0 &&
+			(*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0) {
+		/* Remove COHERENT_SYSTEM_REQUIRED flag if uncached GPU mapping is requested */
+		*flags &= ~BASE_MEM_COHERENT_SYSTEM_REQUIRED;
+	}
+	if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 &&
+			!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		dev_warn(dev, "kbase_mem_alloc call required coherent mem when unavailable");
+		goto bad_flags;
+	}
+	if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 &&
+			!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		/* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */
+		*flags &= ~BASE_MEM_COHERENT_SYSTEM;
+	}
+
+	if (kbase_check_alloc_sizes(kctx, *flags, va_pages, commit_pages, extent))
+		goto bad_sizes;
+
+	/* find out which VA zone to use */
+	if (*flags & BASE_MEM_SAME_VA) {
+		rbtree = &kctx->reg_rbtree_same;
+		zone = KBASE_REG_ZONE_SAME_VA;
+	} else if ((*flags & BASE_MEM_PROT_GPU_EX) && kbase_has_exec_va_zone(kctx)) {
+		rbtree = &kctx->reg_rbtree_exec;
+		zone = KBASE_REG_ZONE_EXEC_VA;
+	} else {
+		rbtree = &kctx->reg_rbtree_custom;
+		zone = KBASE_REG_ZONE_CUSTOM_VA;
+	}
+
+	reg = kbase_alloc_free_region(rbtree, 0, va_pages, zone);
+	if (!reg) {
+		dev_err(dev, "Failed to allocate free region");
+		goto no_region;
+	}
+
+	if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+		goto invalid_flags;
+
+	if (kbase_reg_prepare_native(reg, kctx) != 0) {
+		dev_err(dev, "Failed to prepare region");
+		goto prepare_failed;
+	}
+
+	if (*flags & (BASE_MEM_GROW_ON_GPF|BASE_MEM_TILER_ALIGN_TOP)) {
+		/* kbase_check_alloc_sizes() already checks extent is valid for
+		 * assigning to reg->extent */
+		reg->extent = extent;
+	} else {
+		reg->extent = 0;
+	}
+
+	if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) {
+		dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)",
+				(unsigned long long)commit_pages,
+				(unsigned long long)va_pages);
+		goto no_mem;
+	}
+	reg->initial_commit = commit_pages;
+
+	kbase_gpu_vm_lock(kctx);
+
+	if (reg->flags & KBASE_REG_PERMANENT_KERNEL_MAPPING) {
+		/* Permanent kernel mappings must happen as soon as
+		 * reg->cpu_alloc->pages is ready. Currently this happens after
+		 * kbase_alloc_phy_pages(). If we move that to setup pages
+		 * earlier, also move this call too
+		 */
+		int err = kbase_phy_alloc_mapping_init(kctx, reg, va_pages,
+				commit_pages);
+		if (err < 0) {
+			kbase_gpu_vm_unlock(kctx);
+			goto no_kern_mapping;
+		}
+	}
+
+
+	/* mmap needed to setup VA? */
+	if (*flags & BASE_MEM_SAME_VA) {
+		unsigned long prot = PROT_NONE;
+		unsigned long va_size = va_pages << PAGE_SHIFT;
+		unsigned long va_map = va_size;
+		unsigned long cookie, cookie_nr;
+		unsigned long cpu_addr;
+
+		/* Bind to a cookie */
+		if (!kctx->cookies) {
+			dev_err(dev, "No cookies available for allocation!");
+			kbase_gpu_vm_unlock(kctx);
+			goto no_cookie;
+		}
+		/* return a cookie */
+		cookie_nr = __ffs(kctx->cookies);
+		kctx->cookies &= ~(1UL << cookie_nr);
+		BUG_ON(kctx->pending_regions[cookie_nr]);
+		kctx->pending_regions[cookie_nr] = reg;
+
+		kbase_gpu_vm_unlock(kctx);
+
+		/* relocate to correct base */
+		cookie = cookie_nr + PFN_DOWN(BASE_MEM_COOKIE_BASE);
+		cookie <<= PAGE_SHIFT;
+
+		/*
+		 * 10.1-10.4 UKU userland relies on the kernel to call mmap.
+		 * For all other versions we can just return the cookie
+		 */
+		if (kctx->api_version < KBASE_API_VERSION(10, 1) ||
+		    kctx->api_version > KBASE_API_VERSION(10, 4)) {
+			*gpu_va = (u64) cookie;
+			return reg;
+		}
+		if (*flags & BASE_MEM_PROT_CPU_RD)
+			prot |= PROT_READ;
+		if (*flags & BASE_MEM_PROT_CPU_WR)
+			prot |= PROT_WRITE;
+
+		cpu_addr = vm_mmap(kctx->filp, 0, va_map, prot,
+				MAP_SHARED, cookie);
+
+		if (IS_ERR_VALUE(cpu_addr)) {
+			kbase_gpu_vm_lock(kctx);
+			kctx->pending_regions[cookie_nr] = NULL;
+			kctx->cookies |= (1UL << cookie_nr);
+			kbase_gpu_vm_unlock(kctx);
+			goto no_mmap;
+		}
+
+		*gpu_va = (u64) cpu_addr;
+	} else /* we control the VA */ {
+		if (kbase_gpu_mmap(kctx, reg, 0, va_pages, 1) != 0) {
+			dev_warn(dev, "Failed to map memory on GPU");
+			kbase_gpu_vm_unlock(kctx);
+			goto no_mmap;
+		}
+		/* return real GPU VA */
+		*gpu_va = reg->start_pfn << PAGE_SHIFT;
+
+		kbase_gpu_vm_unlock(kctx);
+	}
+
+	return reg;
+
+no_mmap:
+no_cookie:
+no_kern_mapping:
+no_mem:
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+invalid_flags:
+prepare_failed:
+	kfree(reg);
+no_region:
+bad_sizes:
+bad_flags:
+	return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_mem_alloc);
+
+int kbase_mem_query(struct kbase_context *kctx,
+		u64 gpu_addr, u64 query, u64 * const out)
+{
+	struct kbase_va_region *reg;
+	int ret = -EINVAL;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(out);
+
+	if (gpu_addr & ~PAGE_MASK) {
+		dev_warn(kctx->kbdev->dev, "mem_query: gpu_addr: passed parameter is invalid");
+		return -EINVAL;
+	}
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* Validate the region */
+	reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+	if (!reg || (reg->flags & KBASE_REG_FREE))
+		goto out_unlock;
+
+	switch (query) {
+	case KBASE_MEM_QUERY_COMMIT_SIZE:
+		if (reg->cpu_alloc->type != KBASE_MEM_TYPE_ALIAS) {
+			*out = kbase_reg_current_backed_size(reg);
+		} else {
+			size_t i;
+			struct kbase_aliased *aliased;
+			*out = 0;
+			aliased = reg->cpu_alloc->imported.alias.aliased;
+			for (i = 0; i < reg->cpu_alloc->imported.alias.nents; i++)
+				*out += aliased[i].length;
+		}
+		break;
+	case KBASE_MEM_QUERY_VA_SIZE:
+		*out = reg->nr_pages;
+		break;
+	case KBASE_MEM_QUERY_FLAGS:
+	{
+		*out = 0;
+		if (KBASE_REG_CPU_WR & reg->flags)
+			*out |= BASE_MEM_PROT_CPU_WR;
+		if (KBASE_REG_CPU_RD & reg->flags)
+			*out |= BASE_MEM_PROT_CPU_RD;
+		if (KBASE_REG_CPU_CACHED & reg->flags)
+			*out |= BASE_MEM_CACHED_CPU;
+		if (KBASE_REG_GPU_WR & reg->flags)
+			*out |= BASE_MEM_PROT_GPU_WR;
+		if (KBASE_REG_GPU_RD & reg->flags)
+			*out |= BASE_MEM_PROT_GPU_RD;
+		if (!(KBASE_REG_GPU_NX & reg->flags))
+			*out |= BASE_MEM_PROT_GPU_EX;
+		if (KBASE_REG_SHARE_BOTH & reg->flags)
+			*out |= BASE_MEM_COHERENT_SYSTEM;
+		if (KBASE_REG_SHARE_IN & reg->flags)
+			*out |= BASE_MEM_COHERENT_LOCAL;
+		if (kctx->api_version >= KBASE_API_VERSION(11, 2)) {
+			/* Prior to 11.2, these were known about by user-side
+			 * but we did not return them. Returning some of these
+			 * caused certain clients that were not expecting them
+			 * to fail, so we omit all of them as a special-case
+			 * for compatibility reasons */
+			if (KBASE_REG_PF_GROW & reg->flags)
+				*out |= BASE_MEM_GROW_ON_GPF;
+			if (KBASE_REG_SECURE & reg->flags)
+				*out |= BASE_MEM_SECURE;
+		}
+		if (KBASE_REG_TILER_ALIGN_TOP & reg->flags)
+			*out |= BASE_MEM_TILER_ALIGN_TOP;
+		if (!(KBASE_REG_GPU_CACHED & reg->flags))
+			*out |= BASE_MEM_UNCACHED_GPU;
+		if (KBASE_REG_GPU_VA_SAME_4GB_PAGE & reg->flags)
+			*out |= BASE_MEM_GPU_VA_SAME_4GB_PAGE;
+
+		WARN(*out & ~BASE_MEM_FLAGS_QUERYABLE,
+				"BASE_MEM_FLAGS_QUERYABLE needs updating\n");
+		*out &= BASE_MEM_FLAGS_QUERYABLE;
+		break;
+	}
+	default:
+		*out = 0;
+		goto out_unlock;
+	}
+
+	ret = 0;
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return ret;
+}
+
+/**
+ * kbase_mem_evictable_reclaim_count_objects - Count number of pages in the
+ * Ephemeral memory eviction list.
+ * @s:        Shrinker
+ * @sc:       Shrinker control
+ *
+ * Return: Number of pages which can be freed.
+ */
+static
+unsigned long kbase_mem_evictable_reclaim_count_objects(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	struct kbase_context *kctx;
+	struct kbase_mem_phy_alloc *alloc;
+	unsigned long pages = 0;
+
+	kctx = container_of(s, struct kbase_context, reclaim);
+
+	mutex_lock(&kctx->jit_evict_lock);
+
+	list_for_each_entry(alloc, &kctx->evict_list, evict_node)
+		pages += alloc->nents;
+
+	mutex_unlock(&kctx->jit_evict_lock);
+	return pages;
+}
+
+/**
+ * kbase_mem_evictable_reclaim_scan_objects - Scan the Ephemeral memory eviction
+ * list for pages and try to reclaim them.
+ * @s:        Shrinker
+ * @sc:       Shrinker control
+ *
+ * Return: Number of pages freed (can be less then requested) or -1 if the
+ * shrinker failed to free pages in its pool.
+ *
+ * Note:
+ * This function accesses region structures without taking the region lock,
+ * this is required as the OOM killer can call the shrinker after the region
+ * lock has already been held.
+ * This is safe as we can guarantee that a region on the eviction list will
+ * not be freed (kbase_mem_free_region removes the allocation from the list
+ * before destroying it), or modified by other parts of the driver.
+ * The eviction list itself is guarded by the eviction lock and the MMU updates
+ * are protected by their own lock.
+ */
+static
+unsigned long kbase_mem_evictable_reclaim_scan_objects(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	struct kbase_context *kctx;
+	struct kbase_mem_phy_alloc *alloc;
+	struct kbase_mem_phy_alloc *tmp;
+	unsigned long freed = 0;
+
+	kctx = container_of(s, struct kbase_context, reclaim);
+	mutex_lock(&kctx->jit_evict_lock);
+
+	list_for_each_entry_safe(alloc, tmp, &kctx->evict_list, evict_node) {
+		int err;
+
+		err = kbase_mem_shrink_gpu_mapping(kctx, alloc->reg,
+				0, alloc->nents);
+		if (err != 0) {
+			/*
+			 * Failed to remove GPU mapping, tell the shrinker
+			 * to stop trying to shrink our slab even though we
+			 * have pages in it.
+			 */
+			freed = -1;
+			goto out_unlock;
+		}
+
+		/*
+		 * Update alloc->evicted before freeing the backing so the
+		 * helper can determine that it needs to bypass the accounting
+		 * and memory pool.
+		 */
+		alloc->evicted = alloc->nents;
+
+		kbase_free_phy_pages_helper(alloc, alloc->evicted);
+		freed += alloc->evicted;
+		list_del_init(&alloc->evict_node);
+
+		/*
+		 * Inform the JIT allocator this region has lost backing
+		 * as it might need to free the allocation.
+		 */
+		kbase_jit_backing_lost(alloc->reg);
+
+		/* Enough pages have been freed so stop now */
+		if (freed > sc->nr_to_scan)
+			break;
+	}
+out_unlock:
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	return freed;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int kbase_mem_evictable_reclaim_shrink(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	if (sc->nr_to_scan == 0)
+		return kbase_mem_evictable_reclaim_count_objects(s, sc);
+
+	return kbase_mem_evictable_reclaim_scan_objects(s, sc);
+}
+#endif
+
+int kbase_mem_evictable_init(struct kbase_context *kctx)
+{
+	INIT_LIST_HEAD(&kctx->evict_list);
+	mutex_init(&kctx->jit_evict_lock);
+
+	/* Register shrinker */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+	kctx->reclaim.shrink = kbase_mem_evictable_reclaim_shrink;
+#else
+	kctx->reclaim.count_objects = kbase_mem_evictable_reclaim_count_objects;
+	kctx->reclaim.scan_objects = kbase_mem_evictable_reclaim_scan_objects;
+#endif
+	kctx->reclaim.seeks = DEFAULT_SEEKS;
+	/* Kernel versions prior to 3.1 :
+	 * struct shrinker does not define batch */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+	kctx->reclaim.batch = 0;
+#endif
+	register_shrinker(&kctx->reclaim);
+	return 0;
+}
+
+void kbase_mem_evictable_deinit(struct kbase_context *kctx)
+{
+	unregister_shrinker(&kctx->reclaim);
+}
+
+/**
+ * kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
+ * @alloc: The physical allocation
+ */
+void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc)
+{
+	struct kbase_context *kctx = alloc->imported.native.kctx;
+	int __maybe_unused new_page_count;
+
+	kbase_process_page_usage_dec(kctx, alloc->nents);
+	new_page_count = kbase_atomic_sub_pages(alloc->nents,
+						&kctx->used_pages);
+	kbase_atomic_sub_pages(alloc->nents, &kctx->kbdev->memdev.used_pages);
+
+	KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kctx->id,
+			(u64)new_page_count);
+}
+
+/**
+ * kbase_mem_evictable_unmark_reclaim - Mark the pages as no longer reclaimable.
+ * @alloc: The physical allocation
+ */
+static
+void kbase_mem_evictable_unmark_reclaim(struct kbase_mem_phy_alloc *alloc)
+{
+	struct kbase_context *kctx = alloc->imported.native.kctx;
+	int __maybe_unused new_page_count;
+
+	new_page_count = kbase_atomic_add_pages(alloc->nents,
+						&kctx->used_pages);
+	kbase_atomic_add_pages(alloc->nents, &kctx->kbdev->memdev.used_pages);
+
+	/* Increase mm counters so that the allocation is accounted for
+	 * against the process and thus is visible to the OOM killer,
+	 */
+	kbase_process_page_usage_inc(kctx, alloc->nents);
+
+	KBASE_TLSTREAM_AUX_PAGESALLOC(
+			kctx->id,
+			(u64)new_page_count);
+}
+
+int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc)
+{
+	struct kbase_context *kctx = gpu_alloc->imported.native.kctx;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	kbase_mem_shrink_cpu_mapping(kctx, gpu_alloc->reg,
+			0, gpu_alloc->nents);
+
+	mutex_lock(&kctx->jit_evict_lock);
+	/* This allocation can't already be on a list. */
+	WARN_ON(!list_empty(&gpu_alloc->evict_node));
+
+	/*
+	 * Add the allocation to the eviction list, after this point the shrink
+	 * can reclaim it.
+	 */
+	list_add(&gpu_alloc->evict_node, &kctx->evict_list);
+	mutex_unlock(&kctx->jit_evict_lock);
+	kbase_mem_evictable_mark_reclaim(gpu_alloc);
+
+	gpu_alloc->reg->flags |= KBASE_REG_DONT_NEED;
+	return 0;
+}
+
+bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *gpu_alloc)
+{
+	struct kbase_context *kctx = gpu_alloc->imported.native.kctx;
+	int err = 0;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	mutex_lock(&kctx->jit_evict_lock);
+	/*
+	 * First remove the allocation from the eviction list as it's no
+	 * longer eligible for eviction.
+	 */
+	list_del_init(&gpu_alloc->evict_node);
+	mutex_unlock(&kctx->jit_evict_lock);
+
+	if (gpu_alloc->evicted == 0) {
+		/*
+		 * The backing is still present, update the VM stats as it's
+		 * in use again.
+		 */
+		kbase_mem_evictable_unmark_reclaim(gpu_alloc);
+	} else {
+		/* If the region is still alive ... */
+		if (gpu_alloc->reg) {
+			/* ... allocate replacement backing ... */
+			err = kbase_alloc_phy_pages_helper(gpu_alloc,
+					gpu_alloc->evicted);
+
+			/*
+			 * ... and grow the mapping back to its
+			 * pre-eviction size.
+			 */
+			if (!err)
+				err = kbase_mem_grow_gpu_mapping(kctx,
+						gpu_alloc->reg,
+						gpu_alloc->evicted, 0);
+
+			gpu_alloc->evicted = 0;
+		}
+	}
+
+	/* If the region is still alive remove the DONT_NEED attribute. */
+	if (gpu_alloc->reg)
+		gpu_alloc->reg->flags &= ~KBASE_REG_DONT_NEED;
+
+	return (err == 0);
+}
+
+int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask)
+{
+	struct kbase_va_region *reg;
+	int ret = -EINVAL;
+	unsigned int real_flags = 0;
+	unsigned int prev_flags = 0;
+	bool prev_needed, new_needed;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	if (!gpu_addr)
+		return -EINVAL;
+
+	if ((gpu_addr & ~PAGE_MASK) && (gpu_addr >= PAGE_SIZE))
+		return -EINVAL;
+
+	/* nuke other bits */
+	flags &= mask;
+
+	/* check for only supported flags */
+	if (flags & ~(BASE_MEM_FLAGS_MODIFIABLE))
+		goto out;
+
+	/* mask covers bits we don't support? */
+	if (mask & ~(BASE_MEM_FLAGS_MODIFIABLE))
+		goto out;
+
+	/* convert flags */
+	if (BASE_MEM_COHERENT_SYSTEM & flags)
+		real_flags |= KBASE_REG_SHARE_BOTH;
+	else if (BASE_MEM_COHERENT_LOCAL & flags)
+		real_flags |= KBASE_REG_SHARE_IN;
+
+	/* now we can lock down the context, and find the region */
+	mmap_write_lock(current->mm);
+	kbase_gpu_vm_lock(kctx);
+
+	/* Validate the region */
+	reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+	if (!reg || (reg->flags & KBASE_REG_FREE))
+		goto out_unlock;
+
+	/* Is the region being transitioning between not needed and needed? */
+	prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED;
+	new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED;
+	if (prev_needed != new_needed) {
+		/* Aliased allocations can't be made ephemeral */
+		if (atomic_read(&reg->cpu_alloc->gpu_mappings) > 1)
+			goto out_unlock;
+
+		if (new_needed) {
+			/* Only native allocations can be marked not needed */
+			if (reg->cpu_alloc->type != KBASE_MEM_TYPE_NATIVE) {
+				ret = -EINVAL;
+				goto out_unlock;
+			}
+			ret = kbase_mem_evictable_make(reg->gpu_alloc);
+			if (ret)
+				goto out_unlock;
+		} else {
+			kbase_mem_evictable_unmake(reg->gpu_alloc);
+		}
+	}
+
+	/* limit to imported memory */
+	if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM)
+		goto out_unlock;
+
+	/* shareability flags are ignored for GPU uncached memory */
+	if (!(reg->flags & KBASE_REG_GPU_CACHED)) {
+		ret = 0;
+		goto out_unlock;
+	}
+
+	/* no change? */
+	if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH))) {
+		ret = 0;
+		goto out_unlock;
+	}
+
+	/* save for roll back */
+	prev_flags = reg->flags;
+	reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
+	reg->flags |= real_flags;
+
+	/* Currently supporting only imported memory */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+		/* Future use will use the new flags, existing mapping will NOT be updated
+		 * as memory should not be in use by the GPU when updating the flags.
+		 */
+		ret = 0;
+		WARN_ON(reg->gpu_alloc->imported.umm.current_mapping_usage_count);
+	}
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+	/* roll back on error */
+	if (ret)
+		reg->flags = prev_flags;
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	mmap_write_unlock(current->mm);
+out:
+	return ret;
+}
+
+#define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_BITS)
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
+		int fd, u64 *va_pages, u64 *flags, u32 padding)
+{
+	struct kbase_va_region *reg;
+	struct dma_buf *dma_buf;
+	struct dma_buf_attachment *dma_attachment;
+	bool shared_zone = false;
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dma_buf))
+		goto no_buf;
+
+	dma_attachment = dma_buf_attach(dma_buf, kctx->kbdev->dev);
+	if (!dma_attachment)
+		goto no_attachment;
+
+	*va_pages = (PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT) + padding;
+	if (!*va_pages)
+		goto bad_size;
+
+	if (*va_pages > (U64_MAX / PAGE_SIZE))
+		/* 64-bit address range is the max */
+		goto bad_size;
+
+	/* ignore SAME_VA */
+	*flags &= ~BASE_MEM_SAME_VA;
+
+	if (*flags & BASE_MEM_IMPORT_SHARED)
+		shared_zone = true;
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+		/*
+		 * 64-bit tasks require us to reserve VA on the CPU that we use
+		 * on the GPU.
+		 */
+		shared_zone = true;
+	}
+#endif
+
+	if (shared_zone) {
+		*flags |= BASE_MEM_NEED_MMAP;
+		reg = kbase_alloc_free_region(&kctx->reg_rbtree_same,
+				0, *va_pages, KBASE_REG_ZONE_SAME_VA);
+	} else {
+		reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
+				0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
+	}
+
+	if (!reg)
+		goto no_region;
+
+	if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+		goto invalid_flags;
+
+	reg->gpu_alloc = kbase_alloc_create(kctx, *va_pages,
+			KBASE_MEM_TYPE_IMPORTED_UMM);
+	if (IS_ERR_OR_NULL(reg->gpu_alloc))
+		goto no_alloc_obj;
+
+	reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+	/* No pages to map yet */
+	reg->gpu_alloc->nents = 0;
+
+	reg->flags &= ~KBASE_REG_FREE;
+	reg->flags |= KBASE_REG_GPU_NX;	/* UMM is always No eXecute */
+	reg->flags &= ~KBASE_REG_GROWABLE;	/* UMM cannot be grown */
+
+	if (*flags & BASE_MEM_SECURE)
+		reg->flags |= KBASE_REG_SECURE;
+
+	if (padding)
+		reg->flags |= KBASE_REG_IMPORT_PAD;
+
+	reg->gpu_alloc->type = KBASE_MEM_TYPE_IMPORTED_UMM;
+	reg->gpu_alloc->imported.umm.sgt = NULL;
+	reg->gpu_alloc->imported.umm.dma_buf = dma_buf;
+	reg->gpu_alloc->imported.umm.dma_attachment = dma_attachment;
+	reg->gpu_alloc->imported.umm.current_mapping_usage_count = 0;
+	reg->extent = 0;
+
+	return reg;
+
+no_alloc_obj:
+invalid_flags:
+	kfree(reg);
+no_region:
+bad_size:
+	dma_buf_detach(dma_buf, dma_attachment);
+no_attachment:
+	dma_buf_put(dma_buf);
+no_buf:
+	return NULL;
+}
+#endif  /* CONFIG_DMA_SHARED_BUFFER */
+
+u32 kbase_get_cache_line_alignment(struct kbase_device *kbdev)
+{
+	u32 cpu_cache_line_size = cache_line_size();
+	u32 gpu_cache_line_size =
+		(1UL << kbdev->gpu_props.props.l2_props.log2_line_size);
+
+	return ((cpu_cache_line_size > gpu_cache_line_size) ?
+				cpu_cache_line_size :
+				gpu_cache_line_size);
+}
+
+static struct kbase_va_region *kbase_mem_from_user_buffer(
+		struct kbase_context *kctx, unsigned long address,
+		unsigned long size, u64 *va_pages, u64 *flags)
+{
+	long i;
+	struct kbase_va_region *reg;
+	struct rb_root *rbtree;
+	long faulted_pages;
+	int zone = KBASE_REG_ZONE_CUSTOM_VA;
+	bool shared_zone = false;
+	u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx->kbdev);
+	struct kbase_alloc_import_user_buf *user_buf;
+	struct page **pages = NULL;
+
+	if ((address & (cache_line_alignment - 1)) != 0 ||
+			(size & (cache_line_alignment - 1)) != 0) {
+		if (*flags & BASE_MEM_UNCACHED_GPU) {
+			dev_warn(kctx->kbdev->dev,
+					"User buffer is not cache line aligned and marked as GPU uncached\n");
+			goto bad_size;
+		}
+
+		/* Coherency must be enabled to handle partial cache lines */
+		if (*flags & (BASE_MEM_COHERENT_SYSTEM |
+			BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
+			/* Force coherent system required flag, import will
+			 * then fail if coherency isn't available
+			 */
+			*flags |= BASE_MEM_COHERENT_SYSTEM_REQUIRED;
+		} else {
+			dev_warn(kctx->kbdev->dev,
+					"User buffer is not cache line aligned and no coherency enabled\n");
+			goto bad_size;
+		}
+	}
+
+	*va_pages = (PAGE_ALIGN(address + size) >> PAGE_SHIFT) -
+		PFN_DOWN(address);
+	if (!*va_pages)
+		goto bad_size;
+
+	if (*va_pages > (UINT64_MAX / PAGE_SIZE))
+		/* 64-bit address range is the max */
+		goto bad_size;
+
+	/* SAME_VA generally not supported with imported memory (no known use cases) */
+	*flags &= ~BASE_MEM_SAME_VA;
+
+	if (*flags & BASE_MEM_IMPORT_SHARED)
+		shared_zone = true;
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+		/*
+		 * 64-bit tasks require us to reserve VA on the CPU that we use
+		 * on the GPU.
+		 */
+		shared_zone = true;
+	}
+#endif
+
+	if (shared_zone) {
+		*flags |= BASE_MEM_NEED_MMAP;
+		zone = KBASE_REG_ZONE_SAME_VA;
+		rbtree = &kctx->reg_rbtree_same;
+	} else
+		rbtree = &kctx->reg_rbtree_custom;
+
+	reg = kbase_alloc_free_region(rbtree, 0, *va_pages, zone);
+
+	if (!reg)
+		goto no_region;
+
+	reg->gpu_alloc = kbase_alloc_create(kctx, *va_pages,
+			KBASE_MEM_TYPE_IMPORTED_USER_BUF);
+	if (IS_ERR_OR_NULL(reg->gpu_alloc))
+		goto no_alloc_obj;
+
+	reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+	if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+		goto invalid_flags;
+
+	reg->flags &= ~KBASE_REG_FREE;
+	reg->flags |= KBASE_REG_GPU_NX; /* User-buffers are always No eXecute */
+	reg->flags &= ~KBASE_REG_GROWABLE; /* Cannot be grown */
+
+	user_buf = &reg->gpu_alloc->imported.user_buf;
+
+	user_buf->size = size;
+	user_buf->address = address;
+	user_buf->nr_pages = *va_pages;
+	user_buf->mm = current->mm;
+#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+	atomic_inc(&current->mm->mm_count);
+#else
+	mmgrab(current->mm);
+#endif
+	if (reg->gpu_alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
+		user_buf->pages = vmalloc(*va_pages * sizeof(struct page *));
+	else
+		user_buf->pages = kmalloc_array(*va_pages,
+				sizeof(struct page *), GFP_KERNEL);
+
+	if (!user_buf->pages)
+		goto no_page_array;
+
+	/* If the region is coherent with the CPU then the memory is imported
+	 * and mapped onto the GPU immediately.
+	 * Otherwise get_user_pages is called as a sanity check, but with
+	 * NULL as the pages argument which will fault the pages, but not
+	 * pin them. The memory will then be pinned only around the jobs that
+	 * specify the region as an external resource.
+	 */
+	if (reg->flags & KBASE_REG_SHARE_BOTH) {
+		pages = user_buf->pages;
+		*flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
+	}
+
+	mmap_read_lock(current->mm);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+	faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
+			reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+	faulted_pages = get_user_pages(address, *va_pages,
+			reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+#else
+	faulted_pages = get_user_pages(address, *va_pages,
+			reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+			pages, NULL);
+#endif
+
+	mmap_read_unlock(current->mm);
+
+	if (faulted_pages != *va_pages)
+		goto fault_mismatch;
+
+	reg->gpu_alloc->nents = 0;
+	reg->extent = 0;
+
+	if (pages) {
+		struct device *dev = kctx->kbdev->dev;
+		unsigned long local_size = user_buf->size;
+		unsigned long offset = user_buf->address & ~PAGE_MASK;
+		struct tagged_addr *pa = kbase_get_gpu_phy_pages(reg);
+
+		/* Top bit signifies that this was pinned on import */
+		user_buf->current_mapping_usage_count |= PINNED_ON_IMPORT;
+
+		for (i = 0; i < faulted_pages; i++) {
+			dma_addr_t dma_addr;
+			unsigned long min;
+
+			min = MIN(PAGE_SIZE - offset, local_size);
+			dma_addr = dma_map_page(dev, pages[i],
+					offset, min,
+					DMA_BIDIRECTIONAL);
+			if (dma_mapping_error(dev, dma_addr))
+				goto unwind_dma_map;
+
+			user_buf->dma_addrs[i] = dma_addr;
+			pa[i] = as_tagged(page_to_phys(pages[i]));
+
+			local_size -= min;
+			offset = 0;
+		}
+
+		reg->gpu_alloc->nents = faulted_pages;
+	}
+
+	return reg;
+
+unwind_dma_map:
+	while (i--) {
+		dma_unmap_page(kctx->kbdev->dev,
+				user_buf->dma_addrs[i],
+				PAGE_SIZE, DMA_BIDIRECTIONAL);
+	}
+fault_mismatch:
+	if (pages) {
+		for (i = 0; i < faulted_pages; i++)
+			put_page(pages[i]);
+	}
+no_page_array:
+invalid_flags:
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc_obj:
+	kfree(reg);
+no_region:
+bad_size:
+	return NULL;
+
+}
+
+
+u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
+		    u64 nents, struct base_mem_aliasing_info *ai,
+		    u64 *num_pages)
+{
+	struct kbase_va_region *reg;
+	u64 gpu_va;
+	size_t i;
+	bool coherent;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(flags);
+	KBASE_DEBUG_ASSERT(ai);
+	KBASE_DEBUG_ASSERT(num_pages);
+
+	/* mask to only allowed flags */
+	*flags &= (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR |
+		   BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL |
+		   BASE_MEM_PROT_CPU_RD | BASE_MEM_COHERENT_SYSTEM_REQUIRED);
+
+	if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR))) {
+		dev_warn(kctx->kbdev->dev,
+				"kbase_mem_alias called with bad flags (%llx)",
+				(unsigned long long)*flags);
+		goto bad_flags;
+	}
+	coherent = (*flags & BASE_MEM_COHERENT_SYSTEM) != 0 ||
+			(*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0;
+
+	if (!stride)
+		goto bad_stride;
+
+	if (!nents)
+		goto bad_nents;
+
+	if ((nents * stride) > (U64_MAX / PAGE_SIZE))
+		/* 64-bit address range is the max */
+		goto bad_size;
+
+	/* calculate the number of pages this alias will cover */
+	*num_pages = nents * stride;
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+		/* 64-bit tasks must MMAP anyway, but not expose this address to
+		 * clients */
+		*flags |= BASE_MEM_NEED_MMAP;
+		reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 0,
+				*num_pages,
+				KBASE_REG_ZONE_SAME_VA);
+	} else {
+#else
+	if (1) {
+#endif
+		reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
+				0, *num_pages,
+				KBASE_REG_ZONE_CUSTOM_VA);
+	}
+
+	if (!reg)
+		goto no_reg;
+
+	/* zero-sized page array, as we don't need one/can support one */
+	reg->gpu_alloc = kbase_alloc_create(kctx, 0, KBASE_MEM_TYPE_ALIAS);
+	if (IS_ERR_OR_NULL(reg->gpu_alloc))
+		goto no_alloc_obj;
+
+	reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+	if (kbase_update_region_flags(kctx, reg, *flags) != 0)
+		goto invalid_flags;
+
+	reg->gpu_alloc->imported.alias.nents = nents;
+	reg->gpu_alloc->imported.alias.stride = stride;
+	reg->gpu_alloc->imported.alias.aliased = vzalloc(sizeof(*reg->gpu_alloc->imported.alias.aliased) * nents);
+	if (!reg->gpu_alloc->imported.alias.aliased)
+		goto no_aliased_array;
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* validate and add src handles */
+	for (i = 0; i < nents; i++) {
+		if (ai[i].handle.basep.handle < BASE_MEM_FIRST_FREE_ADDRESS) {
+			if (ai[i].handle.basep.handle !=
+			    BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE)
+				goto bad_handle; /* unsupported magic handle */
+			if (!ai[i].length)
+				goto bad_handle; /* must be > 0 */
+			if (ai[i].length > stride)
+				goto bad_handle; /* can't be larger than the
+						    stride */
+			reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
+		} else {
+			struct kbase_va_region *aliasing_reg;
+			struct kbase_mem_phy_alloc *alloc;
+
+			aliasing_reg = kbase_region_tracker_find_region_base_address(
+				kctx,
+				(ai[i].handle.basep.handle >> PAGE_SHIFT) << PAGE_SHIFT);
+
+			/* validate found region */
+			if (!aliasing_reg)
+				goto bad_handle; /* Not found */
+			if (aliasing_reg->flags & KBASE_REG_FREE)
+				goto bad_handle; /* Free region */
+			if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
+				goto bad_handle; /* Ephemeral region */
+			if (!(aliasing_reg->flags & KBASE_REG_GPU_CACHED))
+				goto bad_handle; /* GPU uncached memory */
+			if (!aliasing_reg->gpu_alloc)
+				goto bad_handle; /* No alloc */
+			if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+				goto bad_handle; /* Not a native alloc */
+			if (coherent != ((aliasing_reg->flags & KBASE_REG_SHARE_BOTH) != 0))
+				goto bad_handle;
+				/* Non-coherent memory cannot alias
+				   coherent memory, and vice versa.*/
+
+			/* check size against stride */
+			if (!ai[i].length)
+				goto bad_handle; /* must be > 0 */
+			if (ai[i].length > stride)
+				goto bad_handle; /* can't be larger than the
+						    stride */
+
+			alloc = aliasing_reg->gpu_alloc;
+
+			/* check against the alloc's size */
+			if (ai[i].offset > alloc->nents)
+				goto bad_handle; /* beyond end */
+			if (ai[i].offset + ai[i].length > alloc->nents)
+				goto bad_handle; /* beyond end */
+
+			reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
+			reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
+			reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
+		}
+	}
+
+#ifdef CONFIG_64BIT
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+		/* Bind to a cookie */
+		if (!kctx->cookies) {
+			dev_err(kctx->kbdev->dev, "No cookies available for allocation!");
+			goto no_cookie;
+		}
+		/* return a cookie */
+		gpu_va = __ffs(kctx->cookies);
+		kctx->cookies &= ~(1UL << gpu_va);
+		BUG_ON(kctx->pending_regions[gpu_va]);
+		kctx->pending_regions[gpu_va] = reg;
+
+		/* relocate to correct base */
+		gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
+		gpu_va <<= PAGE_SHIFT;
+	} else /* we control the VA */ {
+#else
+	if (1) {
+#endif
+		if (kbase_gpu_mmap(kctx, reg, 0, *num_pages, 1) != 0) {
+			dev_warn(kctx->kbdev->dev, "Failed to map memory on GPU");
+			goto no_mmap;
+		}
+		/* return real GPU VA */
+		gpu_va = reg->start_pfn << PAGE_SHIFT;
+	}
+
+	reg->flags &= ~KBASE_REG_FREE;
+	reg->flags &= ~KBASE_REG_GROWABLE;
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return gpu_va;
+
+#ifdef CONFIG_64BIT
+no_cookie:
+#endif
+no_mmap:
+bad_handle:
+	kbase_gpu_vm_unlock(kctx);
+no_aliased_array:
+invalid_flags:
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+no_alloc_obj:
+	kfree(reg);
+no_reg:
+bad_size:
+bad_nents:
+bad_stride:
+bad_flags:
+	return 0;
+}
+
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
+		void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
+		u64 *flags)
+{
+	struct kbase_va_region *reg;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(gpu_va);
+	KBASE_DEBUG_ASSERT(va_pages);
+	KBASE_DEBUG_ASSERT(flags);
+
+	if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
+			kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA))
+		*flags |= BASE_MEM_SAME_VA;
+
+	if (!kbase_check_import_flags(*flags)) {
+		dev_warn(kctx->kbdev->dev,
+				"kbase_mem_import called with bad flags (%llx)",
+				(unsigned long long)*flags);
+		goto bad_flags;
+	}
+
+	if ((*flags & BASE_MEM_UNCACHED_GPU) != 0 &&
+			(*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0) {
+		/* Remove COHERENT_SYSTEM_REQUIRED flag if uncached GPU mapping is requested */
+		*flags &= ~BASE_MEM_COHERENT_SYSTEM_REQUIRED;
+	}
+	if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 &&
+			!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		dev_warn(kctx->kbdev->dev,
+				"kbase_mem_import call required coherent mem when unavailable");
+		goto bad_flags;
+	}
+	if ((*flags & BASE_MEM_COHERENT_SYSTEM) != 0 &&
+			!kbase_device_is_cpu_coherent(kctx->kbdev)) {
+		/* Remove COHERENT_SYSTEM flag if coherent mem is unavailable */
+		*flags &= ~BASE_MEM_COHERENT_SYSTEM;
+	}
+
+	if ((padding != 0) && (type != BASE_MEM_IMPORT_TYPE_UMM)) {
+		dev_warn(kctx->kbdev->dev,
+				"padding is only supported for UMM");
+		goto bad_flags;
+	}
+
+	switch (type) {
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	case BASE_MEM_IMPORT_TYPE_UMM: {
+		int fd;
+
+		if (get_user(fd, (int __user *)phandle))
+			reg = NULL;
+		else
+			reg = kbase_mem_from_umm(kctx, fd, va_pages, flags,
+					padding);
+	}
+	break;
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+	case BASE_MEM_IMPORT_TYPE_USER_BUFFER: {
+		struct base_mem_import_user_buffer user_buffer;
+		void __user *uptr;
+
+		if (copy_from_user(&user_buffer, phandle,
+				sizeof(user_buffer))) {
+			reg = NULL;
+		} else {
+#ifdef CONFIG_COMPAT
+			if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+				uptr = compat_ptr(user_buffer.ptr);
+			else
+#endif
+				uptr = u64_to_user_ptr(user_buffer.ptr);
+
+			reg = kbase_mem_from_user_buffer(kctx,
+					(unsigned long)uptr, user_buffer.length,
+					va_pages, flags);
+		}
+		break;
+	}
+	default: {
+		reg = NULL;
+		break;
+	}
+	}
+
+	if (!reg)
+		goto no_reg;
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* mmap needed to setup VA? */
+	if (*flags & (BASE_MEM_SAME_VA | BASE_MEM_NEED_MMAP)) {
+		/* Bind to a cookie */
+		if (!kctx->cookies)
+			goto no_cookie;
+		/* return a cookie */
+		*gpu_va = __ffs(kctx->cookies);
+		kctx->cookies &= ~(1UL << *gpu_va);
+		BUG_ON(kctx->pending_regions[*gpu_va]);
+		kctx->pending_regions[*gpu_va] = reg;
+
+		/* relocate to correct base */
+		*gpu_va += PFN_DOWN(BASE_MEM_COOKIE_BASE);
+		*gpu_va <<= PAGE_SHIFT;
+
+	} else if (*flags & KBASE_MEM_IMPORT_HAVE_PAGES)  {
+		/* we control the VA, mmap now to the GPU */
+		if (kbase_gpu_mmap(kctx, reg, 0, *va_pages, 1) != 0)
+			goto no_gpu_va;
+		/* return real GPU VA */
+		*gpu_va = reg->start_pfn << PAGE_SHIFT;
+	} else {
+		/* we control the VA, but nothing to mmap yet */
+		if (kbase_add_va_region(kctx, reg, 0, *va_pages, 1) != 0)
+			goto no_gpu_va;
+		/* return real GPU VA */
+		*gpu_va = reg->start_pfn << PAGE_SHIFT;
+	}
+
+	/* clear out private flags */
+	*flags &= ((1UL << BASE_MEM_FLAGS_NR_BITS) - 1);
+
+	kbase_gpu_vm_unlock(kctx);
+
+	return 0;
+
+no_gpu_va:
+no_cookie:
+	kbase_gpu_vm_unlock(kctx);
+	kbase_mem_phy_alloc_put(reg->cpu_alloc);
+	kbase_mem_phy_alloc_put(reg->gpu_alloc);
+	kfree(reg);
+no_reg:
+bad_flags:
+	*gpu_va = 0;
+	*va_pages = 0;
+	*flags = 0;
+	return -ENOMEM;
+}
+
+int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages)
+{
+	struct tagged_addr *phy_pages;
+	u64 delta = new_pages - old_pages;
+	int ret = 0;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	/* Map the new pages into the GPU */
+	phy_pages = kbase_get_gpu_phy_pages(reg);
+	ret = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn + old_pages,
+			phy_pages + old_pages, delta, reg->flags, kctx->as_nr);
+
+	return ret;
+}
+
+void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages)
+{
+	u64 gpu_va_start = reg->start_pfn;
+
+	if (new_pages == old_pages)
+		/* Nothing to do */
+		return;
+
+	unmap_mapping_range(kctx->filp->f_inode->i_mapping,
+			(gpu_va_start + new_pages)<<PAGE_SHIFT,
+			(old_pages - new_pages)<<PAGE_SHIFT, 1);
+}
+
+int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages)
+{
+	u64 delta = old_pages - new_pages;
+	int ret = 0;
+
+	ret = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+			reg->start_pfn + new_pages, delta, kctx->as_nr);
+
+	return ret;
+}
+
+int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
+{
+	u64 old_pages;
+	u64 delta;
+	int res = -EINVAL;
+	struct kbase_va_region *reg;
+	bool read_locked = false;
+
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(gpu_addr != 0);
+
+	if (gpu_addr & ~PAGE_MASK) {
+		dev_warn(kctx->kbdev->dev, "kbase:mem_commit: gpu_addr: passed parameter is invalid");
+		return -EINVAL;
+	}
+
+	mmap_write_lock(current->mm);
+	kbase_gpu_vm_lock(kctx);
+
+	/* Validate the region */
+	reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
+	if (!reg || (reg->flags & KBASE_REG_FREE))
+		goto out_unlock;
+
+	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
+	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
+
+	if (reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+		goto out_unlock;
+
+	if (0 == (reg->flags & KBASE_REG_GROWABLE))
+		goto out_unlock;
+
+	/* Would overflow the VA region */
+	if (new_pages > reg->nr_pages)
+		goto out_unlock;
+
+	/* can't be mapped more than once on the GPU */
+	if (atomic_read(&reg->gpu_alloc->gpu_mappings) > 1)
+		goto out_unlock;
+	/* can't grow regions which are ephemeral */
+	if (reg->flags & KBASE_REG_DONT_NEED)
+		goto out_unlock;
+
+	if (new_pages == reg->gpu_alloc->nents) {
+		/* no change */
+		res = 0;
+		goto out_unlock;
+	}
+
+	old_pages = kbase_reg_current_backed_size(reg);
+	if (new_pages > old_pages) {
+		delta = new_pages - old_pages;
+
+		/*
+		 * No update to the mm so downgrade the writer lock to a read
+		 * lock so other readers aren't blocked after this point.
+		 */
+		downgrade_write(&current->mm->mmap_lock);
+		read_locked = true;
+
+		/* Allocate some more pages */
+		if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, delta) != 0) {
+			res = -ENOMEM;
+			goto out_unlock;
+		}
+		if (reg->cpu_alloc != reg->gpu_alloc) {
+			if (kbase_alloc_phy_pages_helper(
+					reg->gpu_alloc, delta) != 0) {
+				res = -ENOMEM;
+				kbase_free_phy_pages_helper(reg->cpu_alloc,
+						delta);
+				goto out_unlock;
+			}
+		}
+
+		/* No update required for CPU mappings, that's done on fault. */
+
+		/* Update GPU mapping. */
+		res = kbase_mem_grow_gpu_mapping(kctx, reg,
+				new_pages, old_pages);
+
+		/* On error free the new pages */
+		if (res) {
+			kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+			if (reg->cpu_alloc != reg->gpu_alloc)
+				kbase_free_phy_pages_helper(reg->gpu_alloc,
+						delta);
+			res = -ENOMEM;
+			goto out_unlock;
+		}
+	} else {
+		delta = old_pages - new_pages;
+
+		/* Update all CPU mapping(s) */
+		kbase_mem_shrink_cpu_mapping(kctx, reg,
+				new_pages, old_pages);
+
+		/* Update the GPU mapping */
+		res = kbase_mem_shrink_gpu_mapping(kctx, reg,
+				new_pages, old_pages);
+		if (res) {
+			res = -ENOMEM;
+			goto out_unlock;
+		}
+
+		kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+		if (reg->cpu_alloc != reg->gpu_alloc)
+			kbase_free_phy_pages_helper(reg->gpu_alloc, delta);
+	}
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	if (read_locked)
+		mmap_read_unlock(current->mm);
+	else
+		mmap_write_unlock(current->mm);
+
+	return res;
+}
+
+static void kbase_cpu_vm_open(struct vm_area_struct *vma)
+{
+	struct kbase_cpu_mapping *map = vma->vm_private_data;
+
+	KBASE_DEBUG_ASSERT(map);
+	KBASE_DEBUG_ASSERT(map->count > 0);
+	/* non-atomic as we're under Linux' mm lock */
+	map->count++;
+}
+
+static void kbase_cpu_vm_close(struct vm_area_struct *vma)
+{
+	struct kbase_cpu_mapping *map = vma->vm_private_data;
+
+	KBASE_DEBUG_ASSERT(map);
+	KBASE_DEBUG_ASSERT(map->count > 0);
+
+	/* non-atomic as we're under Linux' mm lock */
+	if (--map->count)
+		return;
+
+	KBASE_DEBUG_ASSERT(map->kctx);
+	KBASE_DEBUG_ASSERT(map->alloc);
+
+	kbase_gpu_vm_lock(map->kctx);
+
+	if (map->free_on_close) {
+		KBASE_DEBUG_ASSERT((map->region->flags & KBASE_REG_ZONE_MASK) ==
+				KBASE_REG_ZONE_SAME_VA);
+		/* Avoid freeing memory on the process death which results in
+		 * GPU Page Fault. Memory will be freed in kbase_destroy_context
+		 */
+		if (!(current->flags & PF_EXITING))
+			kbase_mem_free_region(map->kctx, map->region);
+	}
+
+	list_del(&map->mappings_list);
+
+	kbase_gpu_vm_unlock(map->kctx);
+
+	kbase_mem_phy_alloc_put(map->alloc);
+	kfree(map);
+}
+
+KBASE_EXPORT_TEST_API(kbase_cpu_vm_close);
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+static int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0))
+static vm_fault_t kbase_cpu_vm_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+#else
+static int kbase_cpu_vm_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+#endif
+	struct kbase_cpu_mapping *map = vma->vm_private_data;
+	pgoff_t rel_pgoff;
+	size_t i;
+	int ret = VM_FAULT_SIGBUS;
+	pgoff_t addr;
+
+	KBASE_DEBUG_ASSERT(map);
+	KBASE_DEBUG_ASSERT(map->count > 0);
+	KBASE_DEBUG_ASSERT(map->kctx);
+	KBASE_DEBUG_ASSERT(map->alloc);
+
+	rel_pgoff = vmf->pgoff - map->region->start_pfn;
+
+	kbase_gpu_vm_lock(map->kctx);
+	if (rel_pgoff >= map->alloc->nents)
+		goto locked_bad_fault;
+
+	/* Fault on access to DONT_NEED regions */
+	if (map->alloc->reg && (map->alloc->reg->flags & KBASE_REG_DONT_NEED))
+		goto locked_bad_fault;
+
+	/* insert all valid pages from the fault location */
+	i = rel_pgoff;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	addr = (pgoff_t)((uintptr_t)vmf->virtual_address >> PAGE_SHIFT);
+#else
+	addr = (pgoff_t)(vmf->address >> PAGE_SHIFT);
+#endif
+	while (i < map->alloc->nents && (addr < vma->vm_end >> PAGE_SHIFT)) {
+		ret = vmf_insert_pfn(vma, addr << PAGE_SHIFT,
+		    PFN_DOWN(as_phys_addr_t(map->alloc->pages[i])));
+		if (unlikely(ret & VM_FAULT_ERROR))
+			goto locked_bad_fault;
+
+		i++; addr++;
+	}
+
+	kbase_gpu_vm_unlock(map->kctx);
+	/* we resolved it, nothing for VM to do */
+	return VM_FAULT_NOPAGE;
+
+locked_bad_fault:
+	kbase_gpu_vm_unlock(map->kctx);
+	return ret;
+}
+
+const struct vm_operations_struct kbase_vm_ops = {
+	.open  = kbase_cpu_vm_open,
+	.close = kbase_cpu_vm_close,
+	.fault = kbase_cpu_vm_fault
+};
+
+static int kbase_cpu_mmap(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		struct vm_area_struct *vma,
+		void *kaddr,
+		size_t nr_pages,
+		unsigned long aligned_offset,
+		int free_on_close)
+{
+	struct kbase_cpu_mapping *map;
+	struct tagged_addr *page_array;
+	int err = 0;
+	int i;
+	u64 start_off;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+
+	if (!map) {
+		WARN_ON(1);
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * VM_DONTCOPY - don't make this mapping available in fork'ed processes
+	 * VM_DONTEXPAND - disable mremap on this region
+	 * VM_IO - disables paging
+	 * VM_DONTDUMP - Don't include in core dumps (3.7 only)
+	 * VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
+	 *               This is needed to support using the dedicated and
+	 *               the OS based memory backends together.
+	 */
+	/*
+	 * This will need updating to propagate coherency flags
+	 * See MIDBASE-1057
+	 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
+#else
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
+#endif
+	vma->vm_ops = &kbase_vm_ops;
+	vma->vm_private_data = map;
+
+	page_array = kbase_get_cpu_phy_pages(reg);
+	start_off = vma->vm_pgoff - reg->start_pfn +
+				(aligned_offset >> PAGE_SHIFT);
+	if (reg->cpu_alloc->type == KBASE_MEM_TYPE_ALIAS && nr_pages) {
+		struct kbase_aliased *aliased =
+			reg->cpu_alloc->imported.alias.aliased;
+
+		if (!reg->cpu_alloc->imported.alias.stride ||
+				reg->nr_pages < (start_off + nr_pages)) {
+			err = -EINVAL;
+			goto out;
+		}
+
+		while (start_off >= reg->cpu_alloc->imported.alias.stride) {
+			aliased++;
+			start_off -= reg->cpu_alloc->imported.alias.stride;
+		}
+
+		if (!aliased->alloc) {
+			/* sink page not available for dumping map */
+			err = -EINVAL;
+			goto out;
+		}
+
+		if ((start_off + nr_pages) > aliased->length) {
+			/* not fully backed by physical pages */
+			err = -EINVAL;
+			goto out;
+		}
+
+		/* ready the pages for dumping map */
+		page_array = aliased->alloc->pages + aliased->offset;
+	}
+
+	if (!(reg->flags & KBASE_REG_CPU_CACHED) &&
+	    (reg->flags & (KBASE_REG_CPU_WR|KBASE_REG_CPU_RD))) {
+		/* We can't map vmalloc'd memory uncached.
+		 * Other memory will have been returned from
+		 * kbase_mem_pool which would be
+		 * suitable for mapping uncached.
+		 */
+		BUG_ON(kaddr);
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	}
+
+	if (!kaddr) {
+		unsigned long addr = vma->vm_start + aligned_offset;
+
+		vma->vm_flags |= VM_PFNMAP;
+		for (i = 0; i < nr_pages; i++) {
+			phys_addr_t phys;
+
+			phys = as_phys_addr_t(page_array[i + start_off]);
+			err = vmf_insert_pfn(vma, addr, PFN_DOWN(phys));
+			if (unlikely(WARN_ON(err & VM_FAULT_ERROR)))
+				break;
+
+			err = 0;
+			addr += PAGE_SIZE;
+		}
+	} else {
+		WARN_ON(aligned_offset);
+		/* MIXEDMAP so we can vfree the kaddr early and not track it after map time */
+		vma->vm_flags |= VM_MIXEDMAP;
+		/* vmalloc remaping is easy... */
+		err = remap_vmalloc_range(vma, kaddr, 0);
+		WARN_ON(err);
+	}
+
+	if (err) {
+		kfree(map);
+		goto out;
+	}
+
+	map->region = reg;
+	map->free_on_close = free_on_close;
+	map->kctx = kctx;
+	map->alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+	map->count = 1; /* start with one ref */
+
+	if (reg->flags & KBASE_REG_CPU_CACHED)
+		map->alloc->properties |= KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+	list_add(&map->mappings_list, &map->alloc->mappings);
+
+ out:
+	return err;
+}
+
+static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr)
+{
+	struct kbase_va_region *new_reg;
+	void *kaddr;
+	u32 nr_pages;
+	size_t size;
+	int err = 0;
+
+	dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
+	size = (vma->vm_end - vma->vm_start);
+	nr_pages = size >> PAGE_SHIFT;
+
+	kaddr = kbase_mmu_dump(kctx, nr_pages);
+
+	if (!kaddr) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	new_reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 0, nr_pages,
+			KBASE_REG_ZONE_SAME_VA);
+	if (!new_reg) {
+		err = -ENOMEM;
+		WARN_ON(1);
+		goto out;
+	}
+
+	new_reg->cpu_alloc = kbase_alloc_create(kctx, 0, KBASE_MEM_TYPE_RAW);
+	if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
+		err = -ENOMEM;
+		new_reg->cpu_alloc = NULL;
+		WARN_ON(1);
+		goto out_no_alloc;
+	}
+
+	new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc);
+
+	new_reg->flags &= ~KBASE_REG_FREE;
+	new_reg->flags |= KBASE_REG_CPU_CACHED;
+	if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) {
+		err = -ENOMEM;
+		WARN_ON(1);
+		goto out_va_region;
+	}
+
+	*kmap_addr = kaddr;
+	*reg = new_reg;
+
+	dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
+	return 0;
+
+out_no_alloc:
+out_va_region:
+	kbase_free_alloced_region(new_reg);
+out:
+	return err;
+}
+
+
+void kbase_os_mem_map_lock(struct kbase_context *kctx)
+{
+	struct mm_struct *mm = current->mm;
+	(void)kctx;
+	mmap_read_lock(mm);
+}
+
+void kbase_os_mem_map_unlock(struct kbase_context *kctx)
+{
+	struct mm_struct *mm = current->mm;
+	(void)kctx;
+	mmap_read_unlock(mm);
+}
+
+static int kbasep_reg_mmap(struct kbase_context *kctx,
+			   struct vm_area_struct *vma,
+			   struct kbase_va_region **regm,
+			   size_t *nr_pages, size_t *aligned_offset)
+
+{
+	int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
+	struct kbase_va_region *reg;
+	int err = 0;
+
+	*aligned_offset = 0;
+
+	dev_dbg(kctx->kbdev->dev, "in kbasep_reg_mmap\n");
+
+	/* SAME_VA stuff, fetch the right region */
+	reg = kctx->pending_regions[cookie];
+	if (!reg) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if ((reg->flags & KBASE_REG_GPU_NX) && (reg->nr_pages != *nr_pages)) {
+		/* incorrect mmap size */
+		/* leave the cookie for a potential later
+		 * mapping, or to be reclaimed later when the
+		 * context is freed */
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if ((vma->vm_flags & VM_READ && !(reg->flags & KBASE_REG_CPU_RD)) ||
+	    (vma->vm_flags & VM_WRITE && !(reg->flags & KBASE_REG_CPU_WR))) {
+		/* VM flags inconsistent with region flags */
+		err = -EPERM;
+		dev_err(kctx->kbdev->dev, "%s:%d inconsistent VM flags\n",
+							__FILE__, __LINE__);
+		goto out;
+	}
+
+	/* adjust down nr_pages to what we have physically */
+	*nr_pages = kbase_reg_current_backed_size(reg);
+
+	if (kbase_gpu_mmap(kctx, reg, vma->vm_start + *aligned_offset,
+						reg->nr_pages, 1) != 0) {
+		dev_err(kctx->kbdev->dev, "%s:%d\n", __FILE__, __LINE__);
+		/* Unable to map in GPU space. */
+		WARN_ON(1);
+		err = -ENOMEM;
+		goto out;
+	}
+	/* no need for the cookie anymore */
+	kctx->pending_regions[cookie] = NULL;
+	kctx->cookies |= (1UL << cookie);
+
+	/*
+	 * Overwrite the offset with the region start_pfn, so we effectively
+	 * map from offset 0 in the region. However subtract the aligned
+	 * offset so that when user space trims the mapping the beginning of
+	 * the trimmed VMA has the correct vm_pgoff;
+	 */
+	vma->vm_pgoff = reg->start_pfn - ((*aligned_offset)>>PAGE_SHIFT);
+out:
+	*regm = reg;
+	dev_dbg(kctx->kbdev->dev, "kbasep_reg_mmap done\n");
+
+	return err;
+}
+
+int kbase_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct kbase_context *kctx = file->private_data;
+	struct kbase_va_region *reg = NULL;
+	void *kaddr = NULL;
+	size_t nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	int err = 0;
+	int free_on_close = 0;
+	struct device *dev = kctx->kbdev->dev;
+	size_t aligned_offset = 0;
+
+	dev_dbg(dev, "kbase_mmap\n");
+
+	if (!(vma->vm_flags & VM_READ))
+		vma->vm_flags &= ~VM_MAYREAD;
+	if (!(vma->vm_flags & VM_WRITE))
+		vma->vm_flags &= ~VM_MAYWRITE;
+
+	if (0 == nr_pages) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (!(vma->vm_flags & VM_SHARED)) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	kbase_gpu_vm_lock(kctx);
+
+	if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MAP_TRACKING_HANDLE)) {
+		/* The non-mapped tracking helper page */
+		err = kbase_tracking_page_setup(kctx, vma);
+		goto out_unlock;
+	}
+
+	/* if not the MTP, verify that the MTP has been mapped */
+	rcu_read_lock();
+	/* catches both when the special page isn't present or
+	 * when we've forked */
+	if (rcu_dereference(kctx->process_mm) != current->mm) {
+		err = -EINVAL;
+		rcu_read_unlock();
+		goto out_unlock;
+	}
+	rcu_read_unlock();
+
+	switch (vma->vm_pgoff) {
+	case PFN_DOWN(BASEP_MEM_INVALID_HANDLE):
+	case PFN_DOWN(BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE):
+		/* Illegal handle for direct map */
+		err = -EINVAL;
+		goto out_unlock;
+	case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE):
+		/* MMU dump */
+		err = kbase_mmu_dump_mmap(kctx, vma, &reg, &kaddr);
+		if (0 != err)
+			goto out_unlock;
+		/* free the region on munmap */
+		free_on_close = 1;
+		break;
+	case PFN_DOWN(BASE_MEM_COOKIE_BASE) ...
+	     PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) - 1: {
+		err = kbasep_reg_mmap(kctx, vma, &reg, &nr_pages,
+							&aligned_offset);
+		if (0 != err)
+			goto out_unlock;
+		/* free the region on munmap */
+		free_on_close = 1;
+		break;
+	}
+	default: {
+		reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+					(u64)vma->vm_pgoff << PAGE_SHIFT);
+
+		if (reg && !(reg->flags & KBASE_REG_FREE)) {
+			/* will this mapping overflow the size of the region? */
+			if (nr_pages > (reg->nr_pages -
+					(vma->vm_pgoff - reg->start_pfn))) {
+				err = -ENOMEM;
+				goto out_unlock;
+			}
+
+			if ((vma->vm_flags & VM_READ &&
+					!(reg->flags & KBASE_REG_CPU_RD)) ||
+					(vma->vm_flags & VM_WRITE &&
+					!(reg->flags & KBASE_REG_CPU_WR))) {
+				/* VM flags inconsistent with region flags */
+				err = -EPERM;
+				dev_err(dev, "%s:%d inconsistent VM flags\n",
+					__FILE__, __LINE__);
+				goto out_unlock;
+			}
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+			if (KBASE_MEM_TYPE_IMPORTED_UMM ==
+							reg->cpu_alloc->type) {
+				if (0 != (vma->vm_pgoff - reg->start_pfn)) {
+					err = -EINVAL;
+					dev_warn(dev, "%s:%d attempt to do a partial map in a dma_buf: non-zero offset to dma_buf mapping!\n",
+						__FILE__, __LINE__);
+					goto out_unlock;
+				}
+				err = dma_buf_mmap(
+					reg->cpu_alloc->imported.umm.dma_buf,
+					vma, vma->vm_pgoff - reg->start_pfn);
+				goto out_unlock;
+			}
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+			if (reg->cpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
+				/* initial params check for aliased dumping map */
+				if (nr_pages > reg->gpu_alloc->imported.alias.stride ||
+					!reg->gpu_alloc->imported.alias.stride ||
+					!nr_pages) {
+					err = -EINVAL;
+					dev_warn(dev, "mmap aliased: invalid params!\n");
+					goto out_unlock;
+				}
+			}
+			else if (reg->cpu_alloc->nents <
+					(vma->vm_pgoff - reg->start_pfn + nr_pages)) {
+				/* limit what we map to the amount currently backed */
+				if ((vma->vm_pgoff - reg->start_pfn) >= reg->cpu_alloc->nents)
+					nr_pages = 0;
+				else
+					nr_pages = reg->cpu_alloc->nents - (vma->vm_pgoff - reg->start_pfn);
+			}
+		} else {
+			err = -ENOMEM;
+			goto out_unlock;
+		}
+	} /* default */
+	} /* switch */
+
+	err = kbase_cpu_mmap(kctx, reg, vma, kaddr, nr_pages, aligned_offset,
+			free_on_close);
+
+	if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
+		/* MMU dump - userspace should now have a reference on
+		 * the pages, so we can now free the kernel mapping */
+		vfree(kaddr);
+	}
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+out:
+	if (err)
+		dev_err(dev, "mmap failed %d\n", err);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmap);
+
+void kbase_sync_mem_regions(struct kbase_context *kctx,
+		struct kbase_vmap_struct *map, enum kbase_sync_type dest)
+{
+	size_t i;
+	off_t const offset = map->offset_in_page;
+	size_t const page_count = PFN_UP(offset + map->size);
+
+	/* Sync first page */
+	size_t sz = MIN(((size_t) PAGE_SIZE - offset), map->size);
+	struct tagged_addr cpu_pa = map->cpu_pages[0];
+	struct tagged_addr gpu_pa = map->gpu_pages[0];
+
+	kbase_sync_single(kctx, cpu_pa, gpu_pa, offset, sz, dest);
+
+	/* Sync middle pages (if any) */
+	for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+		cpu_pa = map->cpu_pages[i];
+		gpu_pa = map->gpu_pages[i];
+		kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, PAGE_SIZE, dest);
+	}
+
+	/* Sync last page (if any) */
+	if (page_count > 1) {
+		cpu_pa = map->cpu_pages[page_count - 1];
+		gpu_pa = map->gpu_pages[page_count - 1];
+		sz = ((offset + map->size - 1) & ~PAGE_MASK) + 1;
+		kbase_sync_single(kctx, cpu_pa, gpu_pa, 0, sz, dest);
+	}
+}
+
+static int kbase_vmap_phy_pages(struct kbase_context *kctx,
+		struct kbase_va_region *reg, u64 offset_bytes, size_t size,
+		struct kbase_vmap_struct *map)
+{
+	unsigned long page_index;
+	unsigned int offset_in_page = offset_bytes & ~PAGE_MASK;
+	size_t page_count = PFN_UP(offset_in_page + size);
+	struct tagged_addr *page_array;
+	struct page **pages;
+	void *cpu_addr = NULL;
+	pgprot_t prot;
+	size_t i;
+
+	if (!size || !map || !reg->cpu_alloc || !reg->gpu_alloc)
+		return -EINVAL;
+
+	/* check if page_count calculation will wrap */
+	if (size > ((size_t)-1 / PAGE_SIZE))
+		return -EINVAL;
+
+	page_index = offset_bytes >> PAGE_SHIFT;
+
+	/* check if page_index + page_count will wrap */
+	if (-1UL - page_count < page_index)
+		return -EINVAL;
+
+	if (page_index + page_count > kbase_reg_current_backed_size(reg))
+		return -ENOMEM;
+
+	if (reg->flags & KBASE_REG_DONT_NEED)
+		return -EINVAL;
+
+	prot = PAGE_KERNEL;
+	if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
+		/* Map uncached */
+		prot = pgprot_writecombine(prot);
+	}
+
+	page_array = kbase_get_cpu_phy_pages(reg);
+	if (!page_array)
+		return -ENOMEM;
+
+	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	for (i = 0; i < page_count; i++)
+		pages[i] = as_page(page_array[page_index + i]);
+
+	/* Note: enforcing a RO prot_request onto prot is not done, since:
+	 * - CPU-arch-specific integration required
+	 * - kbase_vmap() requires no access checks to be made/enforced */
+
+	cpu_addr = vmap(pages, page_count, VM_MAP, prot);
+
+	kfree(pages);
+
+	if (!cpu_addr)
+		return -ENOMEM;
+
+	map->offset_in_page = offset_in_page;
+	map->cpu_alloc = reg->cpu_alloc;
+	map->cpu_pages = &kbase_get_cpu_phy_pages(reg)[page_index];
+	map->gpu_alloc = reg->gpu_alloc;
+	map->gpu_pages = &kbase_get_gpu_phy_pages(reg)[page_index];
+	map->addr = (void *)((uintptr_t)cpu_addr + offset_in_page);
+	map->size = size;
+	map->sync_needed = ((reg->flags & KBASE_REG_CPU_CACHED) != 0) &&
+		!kbase_mem_is_imported(map->gpu_alloc->type);
+
+	if (map->sync_needed)
+		kbase_sync_mem_regions(kctx, map, KBASE_SYNC_TO_CPU);
+
+	return 0;
+}
+
+void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+		      unsigned long prot_request, struct kbase_vmap_struct *map)
+{
+	struct kbase_va_region *reg;
+	void *addr = NULL;
+	u64 offset_bytes;
+	struct kbase_mem_phy_alloc *cpu_alloc;
+	struct kbase_mem_phy_alloc *gpu_alloc;
+	int err;
+
+	kbase_gpu_vm_lock(kctx);
+
+	reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+			gpu_addr);
+	if (!reg || (reg->flags & KBASE_REG_FREE))
+		goto out_unlock;
+
+	/* check access permissions can be satisfied
+	 * Intended only for checking KBASE_REG_{CPU,GPU}_{RD,WR}
+	 */
+	if ((reg->flags & prot_request) != prot_request)
+		goto out_unlock;
+
+	offset_bytes = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
+	cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+	gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+	err = kbase_vmap_phy_pages(kctx, reg, offset_bytes, size, map);
+	if (err < 0)
+		goto fail_vmap_phy_pages;
+
+	addr = map->addr;
+
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return addr;
+
+fail_vmap_phy_pages:
+	kbase_gpu_vm_unlock(kctx);
+	kbase_mem_phy_alloc_put(cpu_alloc);
+	kbase_mem_phy_alloc_put(gpu_alloc);
+
+	return NULL;
+}
+
+void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+		struct kbase_vmap_struct *map)
+{
+	/* 0 is specified for prot_request to indicate no access checks should
+	 * be made.
+	 *
+	 * As mentioned in kbase_vmap_prot() this means that a kernel-side
+	 * CPU-RO mapping is not enforced to allow this to work */
+	return kbase_vmap_prot(kctx, gpu_addr, size, 0u, map);
+}
+KBASE_EXPORT_TEST_API(kbase_vmap);
+
+static void kbase_vunmap_phy_pages(struct kbase_context *kctx,
+		struct kbase_vmap_struct *map)
+{
+	void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK);
+	vunmap(addr);
+
+	if (map->sync_needed)
+		kbase_sync_mem_regions(kctx, map, KBASE_SYNC_TO_DEVICE);
+
+	map->offset_in_page = 0;
+	map->cpu_pages = NULL;
+	map->gpu_pages = NULL;
+	map->addr = NULL;
+	map->size = 0;
+	map->sync_needed = false;
+}
+
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map)
+{
+	kbase_vunmap_phy_pages(kctx, map);
+	map->cpu_alloc = kbase_mem_phy_alloc_put(map->cpu_alloc);
+	map->gpu_alloc = kbase_mem_phy_alloc_put(map->gpu_alloc);
+}
+KBASE_EXPORT_TEST_API(kbase_vunmap);
+
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages)
+{
+	struct mm_struct *mm;
+
+	rcu_read_lock();
+	mm = rcu_dereference(kctx->process_mm);
+	if (mm) {
+		atomic_add(pages, &kctx->nonmapped_pages);
+#ifdef SPLIT_RSS_COUNTING
+		add_mm_counter(mm, MM_FILEPAGES, pages);
+#else
+		spin_lock(&mm->page_table_lock);
+		add_mm_counter(mm, MM_FILEPAGES, pages);
+		spin_unlock(&mm->page_table_lock);
+#endif
+	}
+	rcu_read_unlock();
+}
+
+static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx)
+{
+	int pages;
+	struct mm_struct *mm;
+
+	spin_lock(&kctx->mm_update_lock);
+	mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock));
+	if (!mm) {
+		spin_unlock(&kctx->mm_update_lock);
+		return;
+	}
+
+	rcu_assign_pointer(kctx->process_mm, NULL);
+	spin_unlock(&kctx->mm_update_lock);
+	synchronize_rcu();
+
+	pages = atomic_xchg(&kctx->nonmapped_pages, 0);
+#ifdef SPLIT_RSS_COUNTING
+	add_mm_counter(mm, MM_FILEPAGES, -pages);
+#else
+	spin_lock(&mm->page_table_lock);
+	add_mm_counter(mm, MM_FILEPAGES, -pages);
+	spin_unlock(&mm->page_table_lock);
+#endif
+}
+
+static void kbase_special_vm_close(struct vm_area_struct *vma)
+{
+	struct kbase_context *kctx;
+
+	kctx = vma->vm_private_data;
+	kbasep_os_process_page_usage_drain(kctx);
+}
+
+static const struct vm_operations_struct kbase_vm_special_ops = {
+	.close = kbase_special_vm_close,
+};
+
+static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma)
+{
+	/* check that this is the only tracking page */
+	spin_lock(&kctx->mm_update_lock);
+	if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) {
+		spin_unlock(&kctx->mm_update_lock);
+		return -EFAULT;
+	}
+
+	rcu_assign_pointer(kctx->process_mm, current->mm);
+
+	spin_unlock(&kctx->mm_update_lock);
+
+	/* no real access */
+	vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+#else
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
+#endif
+	vma->vm_ops = &kbase_vm_special_ops;
+	vma->vm_private_data = kctx;
+
+	return 0;
+}
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h
new file mode 100644
index 0000000..5cb88d1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h
@@ -0,0 +1,443 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem_linux.h
+ * Base kernel memory APIs, Linux implementation.
+ */
+
+#ifndef _KBASE_MEM_LINUX_H_
+#define _KBASE_MEM_LINUX_H_
+
+/** A HWC dump mapping */
+struct kbase_hwc_dma_mapping {
+	void       *cpu_va;
+	dma_addr_t  dma_pa;
+	size_t      size;
+};
+
+/**
+ * kbase_mem_alloc - Create a new allocation for GPU
+ *
+ * @kctx:         The kernel context
+ * @va_pages:     The number of pages of virtual address space to reserve
+ * @commit_pages: The number of physical pages to allocate upfront
+ * @extent:       The number of extra pages to allocate on each GPU fault which
+ *                grows the region.
+ * @flags:        bitmask of BASE_MEM_* flags to convey special requirements &
+ *                properties for the new allocation.
+ * @gpu_va:       Start address of the memory region which was allocated from GPU
+ *                virtual address space.
+ *
+ * Return: 0 on success or error code
+ */
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
+		u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
+		u64 *gpu_va);
+
+/**
+ * kbase_mem_query - Query properties of a GPU memory region
+ *
+ * @kctx:     The kernel context
+ * @gpu_addr: A GPU address contained within the memory region
+ * @query:    The type of query, from KBASE_MEM_QUERY_* flags, which could be
+ *            regarding the amount of backing physical memory allocated so far
+ *            for the region or the size of the region or the flags associated
+ *            with the region.
+ * @out:      Pointer to the location to store the result of query.
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, u64 query,
+		u64 *const out);
+
+/**
+ * kbase_mem_import - Import the external memory for use by the GPU
+ *
+ * @kctx:     The kernel context
+ * @type:     Type of external memory
+ * @phandle:  Handle to the external memory interpreted as per the type.
+ * @padding:  Amount of extra VA pages to append to the imported buffer
+ * @gpu_va:   GPU address assigned to the imported external memory
+ * @va_pages: Size of the memory region reserved from the GPU address space
+ * @flags:    bitmask of BASE_MEM_* flags to convey special requirements &
+ *            properties for the new allocation representing the external
+ *            memory.
+ * Return: 0 on success or error code
+ */
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
+		void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
+		u64 *flags);
+
+/**
+ * kbase_mem_alias - Create a new allocation for GPU, aliasing one or more
+ *                   memory regions
+ *
+ * @kctx:      The kernel context
+ * @flags:     bitmask of BASE_MEM_* flags.
+ * @stride:    Bytes between start of each memory region
+ * @nents:     The number of regions to pack together into the alias
+ * @ai:        Pointer to the struct containing the memory aliasing info
+ * @num_pages: Number of pages the alias will cover
+ *
+ * Return: 0 on failure or otherwise the GPU VA for the alias
+ */
+u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages);
+
+/**
+ * kbase_mem_flags_change - Change the flags for a memory region
+ *
+ * @kctx:     The kernel context
+ * @gpu_addr: A GPU address contained within the memory region to modify.
+ * @flags:    The new flags to set
+ * @mask:     Mask of the flags, from BASE_MEM_*, to modify.
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask);
+
+/**
+ * kbase_mem_commit - Change the physical backing size of a region
+ *
+ * @kctx: The kernel context
+ * @gpu_addr: Handle to the memory region
+ * @new_pages: Number of physical pages to back the region with
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages);
+
+/**
+ * kbase_mmap - Mmap method, gets invoked when mmap system call is issued on
+ *              device file /dev/malixx.
+ * @file: Pointer to the device file /dev/malixx instance.
+ * @vma:  Pointer to the struct containing the info where the GPU allocation
+ *        will be mapped in virtual address space of CPU.
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_mmap(struct file *file, struct vm_area_struct *vma);
+
+/**
+ * kbase_mem_evictable_init - Initialize the Ephemeral memory eviction
+ * mechanism.
+ * @kctx: The kbase context to initialize.
+ *
+ * Return: Zero on success or -errno on failure.
+ */
+int kbase_mem_evictable_init(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_evictable_deinit - De-initialize the Ephemeral memory eviction
+ * mechanism.
+ * @kctx: The kbase context to de-initialize.
+ */
+void kbase_mem_evictable_deinit(struct kbase_context *kctx);
+
+/**
+ * kbase_mem_grow_gpu_mapping - Grow the GPU mapping of an allocation
+ * @kctx:      Context the region belongs to
+ * @reg:       The GPU region
+ * @new_pages: The number of pages after the grow
+ * @old_pages: The number of pages before the grow
+ *
+ * Return: 0 on success, -errno on error.
+ *
+ * Expand the GPU mapping to encompass the new psychical pages which have
+ * been added to the allocation.
+ *
+ * Note: Caller must be holding the region lock.
+ */
+int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_mem_evictable_make - Make a physical allocation eligible for eviction
+ * @gpu_alloc: The physical allocation to make evictable
+ *
+ * Return: 0 on success, -errno on error.
+ *
+ * Take the provided region and make all the physical pages within it
+ * reclaimable by the kernel, updating the per-process VM stats as well.
+ * Remove any CPU mappings (as these can't be removed in the shrinker callback
+ * as mmap_sem might already be taken) but leave the GPU mapping intact as
+ * and until the shrinker reclaims the allocation.
+ *
+ * Note: Must be called with the region lock of the containing context.
+ */
+int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc);
+
+/**
+ * kbase_mem_evictable_unmake - Remove a physical allocations eligibility for
+ * eviction.
+ * @alloc: The physical allocation to remove eviction eligibility from.
+ *
+ * Return: True if the allocation had its backing restored and false if
+ * it hasn't.
+ *
+ * Make the physical pages in the region no longer reclaimable and update the
+ * per-process stats, if the shrinker has already evicted the memory then
+ * re-allocate it if the region is still alive.
+ *
+ * Note: Must be called with the region lock of the containing context.
+ */
+bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc);
+
+struct kbase_vmap_struct {
+	off_t offset_in_page;
+	struct kbase_mem_phy_alloc *cpu_alloc;
+	struct kbase_mem_phy_alloc *gpu_alloc;
+	struct tagged_addr *cpu_pages;
+	struct tagged_addr *gpu_pages;
+	void *addr;
+	size_t size;
+	bool sync_needed;
+};
+
+
+/**
+ * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the
+ * requested access permissions are supported
+ * @kctx:         Context the VA range belongs to
+ * @gpu_addr:     Start address of VA range
+ * @size:         Size of VA range
+ * @prot_request: Flags indicating how the caller will then access the memory
+ * @map:          Structure to be given to kbase_vunmap() on freeing
+ *
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
+ *
+ * Map a GPU VA Range into the kernel. The VA range must be contained within a
+ * GPU memory region. Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * This is safer than using kmap() on the pages directly,
+ * because the pages here are refcounted to prevent freeing (and hence reuse
+ * elsewhere in the system) until an kbase_vunmap()
+ *
+ * The flags in @prot_request should use KBASE_REG_{CPU,GPU}_{RD,WR}, to check
+ * whether the region should allow the intended access, and return an error if
+ * disallowed. This is essential for security of imported memory, particularly
+ * a user buf from SHM mapped into the process as RO. In that case, write
+ * access must be checked if the intention is for kernel to write to the
+ * memory.
+ *
+ * The checks are also there to help catch access errors on memory where
+ * security is not a concern: imported memory that is always RW, and memory
+ * that was allocated and owned by the process attached to @kctx. In this case,
+ * it helps to identify memory that was was mapped with the wrong access type.
+ *
+ * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases
+ * where either the security of memory is solely dependent on those flags, or
+ * when userspace code was expecting only the GPU to access the memory (e.g. HW
+ * workarounds).
+ *
+ * All cache maintenance operations shall be ignored if the
+ * memory region has been imported.
+ *
+ */
+void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+		      unsigned long prot_request, struct kbase_vmap_struct *map);
+
+/**
+ * kbase_vmap - Map a GPU VA range into the kernel safely
+ * @kctx:     Context the VA range belongs to
+ * @gpu_addr: Start address of VA range
+ * @size:     Size of VA range
+ * @map:      Structure to be given to kbase_vunmap() on freeing
+ *
+ * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
+ *
+ * Map a GPU VA Range into the kernel. The VA range must be contained within a
+ * GPU memory region. Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * This is safer than using kmap() on the pages directly,
+ * because the pages here are refcounted to prevent freeing (and hence reuse
+ * elsewhere in the system) until an kbase_vunmap()
+ *
+ * kbase_vmap_prot() should be used in preference, since kbase_vmap() makes no
+ * checks to ensure the security of e.g. imported user bufs from RO SHM.
+ *
+ * Note: All cache maintenance operations shall be ignored if the memory region
+ * has been imported.
+ */
+void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+		struct kbase_vmap_struct *map);
+
+/**
+ * kbase_vunmap - Unmap a GPU VA range from the kernel
+ * @kctx: Context the VA range belongs to
+ * @map:  Structure describing the mapping from the corresponding kbase_vmap()
+ *        call
+ *
+ * Unmaps a GPU VA range from the kernel, given its @map structure obtained
+ * from kbase_vmap(). Appropriate CPU cache-flushing operations are made as
+ * required, dependent on the CPU mapping for the memory region.
+ *
+ * The reference taken on pages during kbase_vmap() is released.
+ *
+ * Note: All cache maintenance operations shall be ignored if the memory region
+ * has been imported.
+ */
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map);
+
+extern const struct vm_operations_struct kbase_vm_ops;
+
+/**
+ * kbase_sync_mem_regions - Perform the cache maintenance for the kernel mode
+ *                          CPU mapping.
+ * @kctx: Context the CPU mapping belongs to.
+ * @map:  Structure describing the CPU mapping, setup previously by the
+ *        kbase_vmap() call.
+ * @dest: Indicates the type of maintenance required (i.e. flush or invalidate)
+ *
+ * Note: The caller shall ensure that CPU mapping is not revoked & remains
+ * active whilst the maintenance is in progress.
+ */
+void kbase_sync_mem_regions(struct kbase_context *kctx,
+		struct kbase_vmap_struct *map, enum kbase_sync_type dest);
+
+/**
+ * kbase_mem_shrink_cpu_mapping - Shrink the CPU mapping(s) of an allocation
+ * @kctx:      Context the region belongs to
+ * @reg:       The GPU region
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Shrink (or completely remove) all CPU mappings which reference the shrunk
+ * part of the allocation.
+ */
+void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation
+ * @kctx:      Context the region belongs to
+ * @reg:       The GPU region or NULL if there isn't one
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Return: 0 on success, negative -errno on error
+ *
+ * Unmap the shrunk pages from the GPU mapping. Note that the size of the region
+ * itself is unmodified as we still need to reserve the VA, only the page tables
+ * will be modified by this function.
+ */
+int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+		struct kbase_va_region *reg,
+		u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_phy_alloc_mapping_term - Terminate the kernel side mapping of a
+ *                                physical allocation
+ * @kctx:  The kernel base context associated with the mapping
+ * @alloc: Pointer to the allocation to terminate
+ *
+ * This function will unmap the kernel mapping, and free any structures used to
+ * track it.
+ */
+void kbase_phy_alloc_mapping_term(struct kbase_context *kctx,
+		struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_phy_alloc_mapping_get - Get a kernel-side CPU pointer to the permanent
+ *                               mapping of a physical allocation
+ * @kctx:             The kernel base context @gpu_addr will be looked up in
+ * @gpu_addr:         The gpu address to lookup for the kernel-side CPU mapping
+ * @out_kern_mapping: Pointer to storage for a struct kbase_vmap_struct pointer
+ *                    which will be used for a call to
+ *                    kbase_phy_alloc_mapping_put()
+ *
+ * Return: Pointer to a kernel-side accessible location that directly
+ *         corresponds to @gpu_addr, or NULL on failure
+ *
+ * Looks up @gpu_addr to retrieve the CPU pointer that can be used to access
+ * that location kernel-side. Only certain kinds of memory have a permanent
+ * kernel mapping, refer to the internal functions
+ * kbase_reg_needs_kernel_mapping() and kbase_phy_alloc_mapping_init() for more
+ * information.
+ *
+ * If this function succeeds, a CPU access to the returned pointer will access
+ * the actual location represented by @gpu_addr. That is, the return value does
+ * not require any offset added to it to access the location specified in
+ * @gpu_addr
+ *
+ * The client must take care to either apply any necessary sync operations when
+ * accessing the data, or ensure that the enclosing region was coherent with
+ * the GPU, or uncached in the CPU.
+ *
+ * The refcount on the physical allocations backing the region are taken, so
+ * that they do not disappear whilst the client is accessing it. Once the
+ * client has finished accessing the memory, it must be released with a call to
+ * kbase_phy_alloc_mapping_put()
+ *
+ * Whilst this is expected to execute quickly (the mapping was already setup
+ * when the physical allocation was created), the call is not IRQ-safe due to
+ * the region lookup involved.
+ *
+ * An error code may indicate that:
+ * - a userside process has freed the allocation, and so @gpu_addr is no longer
+ *   valid
+ * - the region containing @gpu_addr does not support a permanent kernel mapping
+ */
+void *kbase_phy_alloc_mapping_get(struct kbase_context *kctx, u64 gpu_addr,
+		struct kbase_vmap_struct **out_kern_mapping);
+
+/**
+ * kbase_phy_alloc_mapping_put - Put a reference to the kernel-side mapping of a
+ *                               physical allocation
+ * @kctx:         The kernel base context associated with the mapping
+ * @kern_mapping: Pointer to a struct kbase_phy_alloc_mapping pointer obtained
+ *                from a call to kbase_phy_alloc_mapping_get()
+ *
+ * Releases the reference to the allocations backing @kern_mapping that was
+ * obtained through a call to kbase_phy_alloc_mapping_get(). This must be used
+ * when the client no longer needs to access the kernel-side CPU pointer.
+ *
+ * If this was the last reference on the underlying physical allocations, they
+ * will go through the normal allocation free steps, which also includes an
+ * unmap of the permanent kernel mapping for those allocations.
+ *
+ * Due to these operations, the function is not IRQ-safe. However it is
+ * expected to execute quickly in the normal case, i.e. when the region holding
+ * the physical allocation is still present.
+ */
+void kbase_phy_alloc_mapping_put(struct kbase_context *kctx,
+		struct kbase_vmap_struct *kern_mapping);
+
+/**
+ * kbase_get_cache_line_alignment - Return cache line alignment
+ *
+ * Helper function to return the maximum cache line alignment considering
+ * both CPU and GPU cache sizes.
+ *
+ * Return: CPU and GPU cache line alignment, in bytes.
+ *
+ * @kbdev: Device pointer.
+ */
+u32 kbase_get_cache_line_alignment(struct kbase_device *kbdev);
+
+#endif				/* _KBASE_MEM_LINUX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h b/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h
new file mode 100644
index 0000000..7011603
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h
@@ -0,0 +1,166 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2014,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_MEM_LOWLEVEL_H
+#define _KBASE_MEM_LOWLEVEL_H
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+#include <linux/dma-mapping.h>
+
+/**
+ * @brief Flags for kbase_phy_allocator_pages_alloc
+ */
+#define KBASE_PHY_PAGES_FLAG_DEFAULT (0)	/** Default allocation flag */
+#define KBASE_PHY_PAGES_FLAG_CLEAR   (1 << 0)	/** Clear the pages after allocation */
+#define KBASE_PHY_PAGES_FLAG_POISON  (1 << 1)	/** Fill the memory with a poison value */
+
+#define KBASE_PHY_PAGES_SUPPORTED_FLAGS (KBASE_PHY_PAGES_FLAG_DEFAULT|KBASE_PHY_PAGES_FLAG_CLEAR|KBASE_PHY_PAGES_FLAG_POISON)
+
+#define KBASE_PHY_PAGES_POISON_VALUE  0xFD /** Value to fill the memory with when KBASE_PHY_PAGES_FLAG_POISON is set */
+
+enum kbase_sync_type {
+	KBASE_SYNC_TO_CPU,
+	KBASE_SYNC_TO_DEVICE
+};
+
+struct tagged_addr { phys_addr_t tagged_addr; };
+
+#define HUGE_PAGE    (1u << 0)
+#define HUGE_HEAD    (1u << 1)
+#define FROM_PARTIAL (1u << 2)
+
+/*
+ * Note: if macro for converting physical address to page is not defined
+ * in the kernel itself, it is defined hereby. This is to avoid build errors
+ * which are reported during builds for some architectures.
+ */
+#ifndef phys_to_page
+#define phys_to_page(phys)	(pfn_to_page((phys) >> PAGE_SHIFT))
+#endif
+
+/**
+ * as_phys_addr_t - Retrieve the physical address from tagged address by
+ *                  masking the lower order 12 bits.
+ * @t: tagged address to be translated.
+ *
+ * Return: physical address corresponding to tagged address.
+ */
+static inline phys_addr_t as_phys_addr_t(struct tagged_addr t)
+{
+	return t.tagged_addr & PAGE_MASK;
+}
+
+/**
+ * as_page - Retrieve the struct page from a tagged address
+ * @t: tagged address to be translated.
+ *
+ * Return: pointer to struct page corresponding to tagged address.
+ */
+static inline struct page *as_page(struct tagged_addr t)
+{
+	return phys_to_page(as_phys_addr_t(t));
+}
+
+/**
+ * as_tagged - Convert the physical address to tagged address type though
+ *             there is no tag info present, the lower order 12 bits will be 0
+ * @phys: physical address to be converted to tagged type
+ *
+ * This is used for 4KB physical pages allocated by the Driver or imported pages
+ * and is needed as physical pages tracking object stores the reference for
+ * physical pages using tagged address type in lieu of the type generally used
+ * for physical addresses.
+ *
+ * Return: address of tagged address type.
+ */
+static inline struct tagged_addr as_tagged(phys_addr_t phys)
+{
+	struct tagged_addr t;
+
+	t.tagged_addr = phys & PAGE_MASK;
+	return t;
+}
+
+/**
+ * as_tagged_tag - Form the tagged address by storing the tag or metadata in the
+ *                 lower order 12 bits of physial address
+ * @phys: physical address to be converted to tagged address
+ * @tag:  tag to be stored along with the physical address.
+ *
+ * The tag info is used while freeing up the pages
+ *
+ * Return: tagged address storing physical address & tag.
+ */
+static inline struct tagged_addr as_tagged_tag(phys_addr_t phys, int tag)
+{
+	struct tagged_addr t;
+
+	t.tagged_addr = (phys & PAGE_MASK) | (tag & ~PAGE_MASK);
+	return t;
+}
+
+/**
+ * is_huge - Check if the physical page is one of the 512 4KB pages of the
+ *           large page which was not split to be used partially
+ * @t: tagged address storing the tag in the lower order bits.
+ *
+ * Return: true if page belongs to large page, or false
+ */
+static inline bool is_huge(struct tagged_addr t)
+{
+	return t.tagged_addr & HUGE_PAGE;
+}
+
+/**
+ * is_huge_head - Check if the physical page is the first 4KB page of the
+ *                512 4KB pages within a large page which was not split
+ *                to be used partially
+ * @t: tagged address storing the tag in the lower order bits.
+ *
+ * Return: true if page is the first page of a large page, or false
+ */
+static inline bool is_huge_head(struct tagged_addr t)
+{
+	int mask = HUGE_HEAD | HUGE_PAGE;
+
+	return mask == (t.tagged_addr & mask);
+}
+
+/**
+ * is_partial - Check if the physical page is one of the 512 pages of the
+ *              large page which was split in 4KB pages to be used
+ *              partially for allocations >= 2 MB in size.
+ * @t: tagged address storing the tag in the lower order bits.
+ *
+ * Return: true if page was taken from large page used partially, or false
+ */
+static inline bool is_partial(struct tagged_addr t)
+{
+	return t.tagged_addr & FROM_PARTIAL;
+}
+
+#endif /* _KBASE_LOWLEVEL_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
new file mode 100644
index 0000000..0f91be1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
@@ -0,0 +1,842 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+#include <linux/shrinker.h>
+#include <linux/atomic.h>
+#include <linux/version.h>
+
+#define pool_dbg(pool, format, ...) \
+	dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format,	\
+		(pool->next_pool) ? "kctx" : "kbdev",	\
+		kbase_mem_pool_size(pool),	\
+		kbase_mem_pool_max_size(pool),	\
+		##__VA_ARGS__)
+
+#define NOT_DIRTY false
+#define NOT_RECLAIMED false
+
+static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
+{
+	ssize_t max_size = kbase_mem_pool_max_size(pool);
+	ssize_t cur_size = kbase_mem_pool_size(pool);
+
+	return max(max_size - cur_size, (ssize_t)0);
+}
+
+static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool)
+{
+	return kbase_mem_pool_size(pool) >= kbase_mem_pool_max_size(pool);
+}
+
+static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
+{
+	return kbase_mem_pool_size(pool) == 0;
+}
+
+static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
+		struct page *p)
+{
+	lockdep_assert_held(&pool->pool_lock);
+
+	list_add(&p->lru, &pool->page_list);
+	pool->cur_size++;
+
+	pool_dbg(pool, "added page\n");
+}
+
+static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
+{
+	kbase_mem_pool_lock(pool);
+	kbase_mem_pool_add_locked(pool, p);
+	kbase_mem_pool_unlock(pool);
+}
+
+static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
+		struct list_head *page_list, size_t nr_pages)
+{
+	lockdep_assert_held(&pool->pool_lock);
+
+	list_splice(page_list, &pool->page_list);
+	pool->cur_size += nr_pages;
+
+	pool_dbg(pool, "added %zu pages\n", nr_pages);
+}
+
+static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
+		struct list_head *page_list, size_t nr_pages)
+{
+	kbase_mem_pool_lock(pool);
+	kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
+	kbase_mem_pool_unlock(pool);
+}
+
+static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	if (kbase_mem_pool_is_empty(pool))
+		return NULL;
+
+	p = list_first_entry(&pool->page_list, struct page, lru);
+	list_del_init(&p->lru);
+	pool->cur_size--;
+
+	pool_dbg(pool, "removed page\n");
+
+	return p;
+}
+
+static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+
+	kbase_mem_pool_lock(pool);
+	p = kbase_mem_pool_remove_locked(pool);
+	kbase_mem_pool_unlock(pool);
+
+	return p;
+}
+
+static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
+		struct page *p)
+{
+	struct device *dev = pool->kbdev->dev;
+	dma_sync_single_for_device(dev, kbase_dma_addr(p),
+			(PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
+}
+
+static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
+		struct page *p)
+{
+	int i;
+
+	for (i = 0; i < (1U << pool->order); i++)
+		clear_highpage(p+i);
+
+	kbase_mem_pool_sync_page(pool, p);
+}
+
+static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
+		struct page *p)
+{
+	/* Zero page before spilling */
+	kbase_mem_pool_zero_page(next_pool, p);
+
+	kbase_mem_pool_add(next_pool, p);
+}
+
+struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+	gfp_t gfp;
+	struct device *dev = pool->kbdev->dev;
+	dma_addr_t dma_addr;
+	int i;
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
+	LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+	/* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
+	gfp = GFP_USER | __GFP_ZERO;
+#else
+	gfp = GFP_HIGHUSER | __GFP_ZERO;
+#endif
+
+	/* don't warn on higer order failures */
+	if (pool->order)
+		gfp |= __GFP_NOWARN;
+
+	p = alloc_pages(gfp, pool->order);
+	if (!p)
+		return NULL;
+
+	dma_addr = dma_map_page(dev, p, 0, (PAGE_SIZE << pool->order),
+				DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, dma_addr)) {
+		__free_pages(p, pool->order);
+		return NULL;
+	}
+
+	WARN_ON(dma_addr != page_to_phys(p));
+	for (i = 0; i < (1u << pool->order); i++)
+		kbase_set_dma_addr(p+i, dma_addr + PAGE_SIZE * i);
+
+	return p;
+}
+
+static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
+		struct page *p)
+{
+	struct device *dev = pool->kbdev->dev;
+	dma_addr_t dma_addr = kbase_dma_addr(p);
+	int i;
+
+	dma_unmap_page(dev, dma_addr, (PAGE_SIZE << pool->order),
+		       DMA_BIDIRECTIONAL);
+	for (i = 0; i < (1u << pool->order); i++)
+		kbase_clear_dma_addr(p+i);
+	__free_pages(p, pool->order);
+
+	pool_dbg(pool, "freed page to kernel\n");
+}
+
+static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
+		size_t nr_to_shrink)
+{
+	struct page *p;
+	size_t i;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
+		p = kbase_mem_pool_remove_locked(pool);
+		kbase_mem_pool_free_page(pool, p);
+	}
+
+	return i;
+}
+
+static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
+		size_t nr_to_shrink)
+{
+	size_t nr_freed;
+
+	kbase_mem_pool_lock(pool);
+	nr_freed = kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
+	kbase_mem_pool_unlock(pool);
+
+	return nr_freed;
+}
+
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
+		size_t nr_to_grow)
+{
+	struct page *p;
+	size_t i;
+
+	kbase_mem_pool_lock(pool);
+
+	pool->dont_reclaim = true;
+	for (i = 0; i < nr_to_grow; i++) {
+		if (pool->dying) {
+			pool->dont_reclaim = false;
+			kbase_mem_pool_shrink_locked(pool, nr_to_grow);
+			kbase_mem_pool_unlock(pool);
+
+			return -ENOMEM;
+		}
+		kbase_mem_pool_unlock(pool);
+
+		p = kbase_mem_alloc_page(pool);
+		if (!p) {
+			kbase_mem_pool_lock(pool);
+			pool->dont_reclaim = false;
+			kbase_mem_pool_unlock(pool);
+
+			return -ENOMEM;
+		}
+
+		kbase_mem_pool_lock(pool);
+		kbase_mem_pool_add_locked(pool, p);
+	}
+	pool->dont_reclaim = false;
+	kbase_mem_pool_unlock(pool);
+
+	return 0;
+}
+
+void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
+{
+	size_t cur_size;
+	int err = 0;
+
+	cur_size = kbase_mem_pool_size(pool);
+
+	if (new_size > pool->max_size)
+		new_size = pool->max_size;
+
+	if (new_size < cur_size)
+		kbase_mem_pool_shrink(pool, cur_size - new_size);
+	else if (new_size > cur_size)
+		err = kbase_mem_pool_grow(pool, new_size - cur_size);
+
+	if (err) {
+		size_t grown_size = kbase_mem_pool_size(pool);
+
+		dev_warn(pool->kbdev->dev,
+			 "Mem pool not grown to the required size of %zu bytes, grown for additional %zu bytes instead!\n",
+			 (new_size - cur_size), (grown_size - cur_size));
+	}
+}
+
+void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
+{
+	size_t cur_size;
+	size_t nr_to_shrink;
+
+	kbase_mem_pool_lock(pool);
+
+	pool->max_size = max_size;
+
+	cur_size = kbase_mem_pool_size(pool);
+	if (max_size < cur_size) {
+		nr_to_shrink = cur_size - max_size;
+		kbase_mem_pool_shrink_locked(pool, nr_to_shrink);
+	}
+
+	kbase_mem_pool_unlock(pool);
+}
+
+
+static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	struct kbase_mem_pool *pool;
+	size_t pool_size;
+
+	pool = container_of(s, struct kbase_mem_pool, reclaim);
+
+	kbase_mem_pool_lock(pool);
+	if (pool->dont_reclaim && !pool->dying) {
+		kbase_mem_pool_unlock(pool);
+		return 0;
+	}
+	pool_size = kbase_mem_pool_size(pool);
+	kbase_mem_pool_unlock(pool);
+
+	return pool_size;
+}
+
+static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	struct kbase_mem_pool *pool;
+	unsigned long freed;
+
+	pool = container_of(s, struct kbase_mem_pool, reclaim);
+
+	kbase_mem_pool_lock(pool);
+	if (pool->dont_reclaim && !pool->dying) {
+		kbase_mem_pool_unlock(pool);
+		return 0;
+	}
+
+	pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
+
+	freed = kbase_mem_pool_shrink_locked(pool, sc->nr_to_scan);
+
+	kbase_mem_pool_unlock(pool);
+
+	pool_dbg(pool, "reclaim freed %ld pages\n", freed);
+
+	return freed;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
+		struct shrink_control *sc)
+{
+	if (sc->nr_to_scan == 0)
+		return kbase_mem_pool_reclaim_count_objects(s, sc);
+
+	return kbase_mem_pool_reclaim_scan_objects(s, sc);
+}
+#endif
+
+int kbase_mem_pool_init(struct kbase_mem_pool *pool,
+		size_t max_size,
+		size_t order,
+		struct kbase_device *kbdev,
+		struct kbase_mem_pool *next_pool)
+{
+	pool->cur_size = 0;
+	pool->max_size = max_size;
+	pool->order = order;
+	pool->kbdev = kbdev;
+	pool->next_pool = next_pool;
+	pool->dying = false;
+
+	spin_lock_init(&pool->pool_lock);
+	INIT_LIST_HEAD(&pool->page_list);
+
+	/* Register shrinker */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+	pool->reclaim.shrink = kbase_mem_pool_reclaim_shrink;
+#else
+	pool->reclaim.count_objects = kbase_mem_pool_reclaim_count_objects;
+	pool->reclaim.scan_objects = kbase_mem_pool_reclaim_scan_objects;
+#endif
+	pool->reclaim.seeks = DEFAULT_SEEKS;
+	/* Kernel versions prior to 3.1 :
+	 * struct shrinker does not define batch */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+	pool->reclaim.batch = 0;
+#endif
+	register_shrinker(&pool->reclaim);
+
+	pool_dbg(pool, "initialized\n");
+
+	return 0;
+}
+
+void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool)
+{
+	kbase_mem_pool_lock(pool);
+	pool->dying = true;
+	kbase_mem_pool_unlock(pool);
+}
+
+void kbase_mem_pool_term(struct kbase_mem_pool *pool)
+{
+	struct kbase_mem_pool *next_pool = pool->next_pool;
+	struct page *p, *tmp;
+	size_t nr_to_spill = 0;
+	LIST_HEAD(spill_list);
+	LIST_HEAD(free_list);
+	int i;
+
+	pool_dbg(pool, "terminate()\n");
+
+	unregister_shrinker(&pool->reclaim);
+
+	kbase_mem_pool_lock(pool);
+	pool->max_size = 0;
+
+	if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
+		/* Spill to next pool (may overspill) */
+		nr_to_spill = kbase_mem_pool_capacity(next_pool);
+		nr_to_spill = min(kbase_mem_pool_size(pool), nr_to_spill);
+
+		/* Zero pages first without holding the next_pool lock */
+		for (i = 0; i < nr_to_spill; i++) {
+			p = kbase_mem_pool_remove_locked(pool);
+			list_add(&p->lru, &spill_list);
+		}
+	}
+
+	while (!kbase_mem_pool_is_empty(pool)) {
+		/* Free remaining pages to kernel */
+		p = kbase_mem_pool_remove_locked(pool);
+		list_add(&p->lru, &free_list);
+	}
+
+	kbase_mem_pool_unlock(pool);
+
+	if (next_pool && nr_to_spill) {
+		list_for_each_entry(p, &spill_list, lru)
+			kbase_mem_pool_zero_page(pool, p);
+
+		/* Add new page list to next_pool */
+		kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
+
+		pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
+	}
+
+	list_for_each_entry_safe(p, tmp, &free_list, lru) {
+		list_del_init(&p->lru);
+		kbase_mem_pool_free_page(pool, p);
+	}
+
+	pool_dbg(pool, "terminated\n");
+}
+
+struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+
+	do {
+		pool_dbg(pool, "alloc()\n");
+		p = kbase_mem_pool_remove(pool);
+
+		if (p)
+			return p;
+
+		pool = pool->next_pool;
+	} while (pool);
+
+	return NULL;
+}
+
+struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool)
+{
+	struct page *p;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	pool_dbg(pool, "alloc_locked()\n");
+	p = kbase_mem_pool_remove_locked(pool);
+
+	if (p)
+		return p;
+
+	return NULL;
+}
+
+void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
+		bool dirty)
+{
+	struct kbase_mem_pool *next_pool = pool->next_pool;
+
+	pool_dbg(pool, "free()\n");
+
+	if (!kbase_mem_pool_is_full(pool)) {
+		/* Add to our own pool */
+		if (dirty)
+			kbase_mem_pool_sync_page(pool, p);
+
+		kbase_mem_pool_add(pool, p);
+	} else if (next_pool && !kbase_mem_pool_is_full(next_pool)) {
+		/* Spill to next pool */
+		kbase_mem_pool_spill(next_pool, p);
+	} else {
+		/* Free page */
+		kbase_mem_pool_free_page(pool, p);
+	}
+}
+
+void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
+		bool dirty)
+{
+	pool_dbg(pool, "free_locked()\n");
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	if (!kbase_mem_pool_is_full(pool)) {
+		/* Add to our own pool */
+		if (dirty)
+			kbase_mem_pool_sync_page(pool, p);
+
+		kbase_mem_pool_add_locked(pool, p);
+	} else {
+		/* Free page */
+		kbase_mem_pool_free_page(pool, p);
+	}
+}
+
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
+		struct tagged_addr *pages, bool partial_allowed)
+{
+	struct page *p;
+	size_t nr_from_pool;
+	size_t i = 0;
+	int err = -ENOMEM;
+	size_t nr_pages_internal;
+
+	nr_pages_internal = nr_4k_pages / (1u << (pool->order));
+
+	if (nr_pages_internal * (1u << pool->order) != nr_4k_pages)
+		return -EINVAL;
+
+	pool_dbg(pool, "alloc_pages(4k=%zu):\n", nr_4k_pages);
+	pool_dbg(pool, "alloc_pages(internal=%zu):\n", nr_pages_internal);
+
+	/* Get pages from this pool */
+	kbase_mem_pool_lock(pool);
+	nr_from_pool = min(nr_pages_internal, kbase_mem_pool_size(pool));
+	while (nr_from_pool--) {
+		int j;
+		p = kbase_mem_pool_remove_locked(pool);
+		if (pool->order) {
+			pages[i++] = as_tagged_tag(page_to_phys(p),
+						   HUGE_HEAD | HUGE_PAGE);
+			for (j = 1; j < (1u << pool->order); j++)
+				pages[i++] = as_tagged_tag(page_to_phys(p) +
+							   PAGE_SIZE * j,
+							   HUGE_PAGE);
+		} else {
+			pages[i++] = as_tagged(page_to_phys(p));
+		}
+	}
+	kbase_mem_pool_unlock(pool);
+
+	if (i != nr_4k_pages && pool->next_pool) {
+		/* Allocate via next pool */
+		err = kbase_mem_pool_alloc_pages(pool->next_pool,
+				nr_4k_pages - i, pages + i, partial_allowed);
+
+		if (err < 0)
+			goto err_rollback;
+
+		i += err;
+	} else {
+		/* Get any remaining pages from kernel */
+		while (i != nr_4k_pages) {
+			p = kbase_mem_alloc_page(pool);
+			if (!p) {
+				if (partial_allowed)
+					goto done;
+				else
+					goto err_rollback;
+			}
+
+			if (pool->order) {
+				int j;
+
+				pages[i++] = as_tagged_tag(page_to_phys(p),
+							   HUGE_PAGE |
+							   HUGE_HEAD);
+				for (j = 1; j < (1u << pool->order); j++) {
+					phys_addr_t phys;
+
+					phys = page_to_phys(p) + PAGE_SIZE * j;
+					pages[i++] = as_tagged_tag(phys,
+								   HUGE_PAGE);
+				}
+			} else {
+				pages[i++] = as_tagged(page_to_phys(p));
+			}
+		}
+	}
+
+done:
+	pool_dbg(pool, "alloc_pages(%zu) done\n", i);
+	return i;
+
+err_rollback:
+	kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
+	return err;
+}
+
+int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool,
+		size_t nr_4k_pages, struct tagged_addr *pages)
+{
+	struct page *p;
+	size_t i;
+	size_t nr_pages_internal;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	nr_pages_internal = nr_4k_pages / (1u << (pool->order));
+
+	if (nr_pages_internal * (1u << pool->order) != nr_4k_pages)
+		return -EINVAL;
+
+	pool_dbg(pool, "alloc_pages_locked(4k=%zu):\n", nr_4k_pages);
+	pool_dbg(pool, "alloc_pages_locked(internal=%zu):\n",
+			nr_pages_internal);
+
+	if (kbase_mem_pool_size(pool) < nr_pages_internal) {
+		pool_dbg(pool, "Failed alloc\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < nr_pages_internal; i++) {
+		int j;
+
+		p = kbase_mem_pool_remove_locked(pool);
+		if (pool->order) {
+			*pages++ = as_tagged_tag(page_to_phys(p),
+						   HUGE_HEAD | HUGE_PAGE);
+			for (j = 1; j < (1u << pool->order); j++) {
+				*pages++ = as_tagged_tag(page_to_phys(p) +
+							   PAGE_SIZE * j,
+							   HUGE_PAGE);
+			}
+		} else {
+			*pages++ = as_tagged(page_to_phys(p));
+		}
+	}
+
+	return nr_4k_pages;
+}
+
+static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
+				     size_t nr_pages, struct tagged_addr *pages,
+				     bool zero, bool sync)
+{
+	struct page *p;
+	size_t nr_to_pool = 0;
+	LIST_HEAD(new_page_list);
+	size_t i;
+
+	if (!nr_pages)
+		return;
+
+	pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n",
+			nr_pages, zero, sync);
+
+	/* Zero/sync pages first without holding the pool lock */
+	for (i = 0; i < nr_pages; i++) {
+		if (unlikely(!as_phys_addr_t(pages[i])))
+			continue;
+
+		if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
+			p = as_page(pages[i]);
+			if (zero)
+				kbase_mem_pool_zero_page(pool, p);
+			else if (sync)
+				kbase_mem_pool_sync_page(pool, p);
+
+			list_add(&p->lru, &new_page_list);
+			nr_to_pool++;
+		}
+		pages[i] = as_tagged(0);
+	}
+
+	/* Add new page list to pool */
+	kbase_mem_pool_add_list(pool, &new_page_list, nr_to_pool);
+
+	pool_dbg(pool, "add_array(%zu) added %zu pages\n",
+			nr_pages, nr_to_pool);
+}
+
+static void kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool,
+		size_t nr_pages, struct tagged_addr *pages,
+		bool zero, bool sync)
+{
+	struct page *p;
+	size_t nr_to_pool = 0;
+	LIST_HEAD(new_page_list);
+	size_t i;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	if (!nr_pages)
+		return;
+
+	pool_dbg(pool, "add_array_locked(%zu, zero=%d, sync=%d):\n",
+			nr_pages, zero, sync);
+
+	/* Zero/sync pages first */
+	for (i = 0; i < nr_pages; i++) {
+		if (unlikely(!as_phys_addr_t(pages[i])))
+			continue;
+
+		if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
+			p = as_page(pages[i]);
+			if (zero)
+				kbase_mem_pool_zero_page(pool, p);
+			else if (sync)
+				kbase_mem_pool_sync_page(pool, p);
+
+			list_add(&p->lru, &new_page_list);
+			nr_to_pool++;
+		}
+		pages[i] = as_tagged(0);
+	}
+
+	/* Add new page list to pool */
+	kbase_mem_pool_add_list_locked(pool, &new_page_list, nr_to_pool);
+
+	pool_dbg(pool, "add_array_locked(%zu) added %zu pages\n",
+			nr_pages, nr_to_pool);
+}
+
+void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
+		struct tagged_addr *pages, bool dirty, bool reclaimed)
+{
+	struct kbase_mem_pool *next_pool = pool->next_pool;
+	struct page *p;
+	size_t nr_to_pool;
+	LIST_HEAD(to_pool_list);
+	size_t i = 0;
+
+	pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
+
+	if (!reclaimed) {
+		/* Add to this pool */
+		nr_to_pool = kbase_mem_pool_capacity(pool);
+		nr_to_pool = min(nr_pages, nr_to_pool);
+
+		kbase_mem_pool_add_array(pool, nr_to_pool, pages, false, dirty);
+
+		i += nr_to_pool;
+
+		if (i != nr_pages && next_pool) {
+			/* Spill to next pool (may overspill) */
+			nr_to_pool = kbase_mem_pool_capacity(next_pool);
+			nr_to_pool = min(nr_pages - i, nr_to_pool);
+
+			kbase_mem_pool_add_array(next_pool, nr_to_pool,
+					pages + i, true, dirty);
+			i += nr_to_pool;
+		}
+	}
+
+	/* Free any remaining pages to kernel */
+	for (; i < nr_pages; i++) {
+		if (unlikely(!as_phys_addr_t(pages[i])))
+			continue;
+
+		if (is_huge(pages[i]) && !is_huge_head(pages[i])) {
+			pages[i] = as_tagged(0);
+			continue;
+		}
+
+		p = as_page(pages[i]);
+
+		kbase_mem_pool_free_page(pool, p);
+		pages[i] = as_tagged(0);
+	}
+
+	pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
+}
+
+
+void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
+		size_t nr_pages, struct tagged_addr *pages, bool dirty,
+		bool reclaimed)
+{
+	struct page *p;
+	size_t nr_to_pool;
+	LIST_HEAD(to_pool_list);
+	size_t i = 0;
+
+	lockdep_assert_held(&pool->pool_lock);
+
+	pool_dbg(pool, "free_pages_locked(%zu):\n", nr_pages);
+
+	if (!reclaimed) {
+		/* Add to this pool */
+		nr_to_pool = kbase_mem_pool_capacity(pool);
+		nr_to_pool = min(nr_pages, nr_to_pool);
+
+		kbase_mem_pool_add_array_locked(pool, nr_pages, pages, false,
+				dirty);
+
+		i += nr_to_pool;
+	}
+
+	/* Free any remaining pages to kernel */
+	for (; i < nr_pages; i++) {
+		if (unlikely(!as_phys_addr_t(pages[i])))
+			continue;
+
+		if (is_huge(pages[i]) && !is_huge_head(pages[i])) {
+			pages[i] = as_tagged(0);
+			continue;
+		}
+
+		p = as_page(pages[i]);
+
+		kbase_mem_pool_free_page(pool, p);
+		pages[i] = as_tagged(0);
+	}
+
+	pool_dbg(pool, "free_pages_locked(%zu) done\n", nr_pages);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c
new file mode 100644
index 0000000..4b4eeb3
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.c
@@ -0,0 +1,93 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <mali_kbase_mem_pool_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static int kbase_mem_pool_debugfs_size_get(void *data, u64 *val)
+{
+	struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+	*val = kbase_mem_pool_size(pool);
+
+	return 0;
+}
+
+static int kbase_mem_pool_debugfs_size_set(void *data, u64 val)
+{
+	struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+	kbase_mem_pool_trim(pool, val);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_size_fops,
+		kbase_mem_pool_debugfs_size_get,
+		kbase_mem_pool_debugfs_size_set,
+		"%llu\n");
+
+static int kbase_mem_pool_debugfs_max_size_get(void *data, u64 *val)
+{
+	struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+	*val = kbase_mem_pool_max_size(pool);
+
+	return 0;
+}
+
+static int kbase_mem_pool_debugfs_max_size_set(void *data, u64 val)
+{
+	struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data;
+
+	kbase_mem_pool_set_max_size(pool, val);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kbase_mem_pool_debugfs_max_size_fops,
+		kbase_mem_pool_debugfs_max_size_get,
+		kbase_mem_pool_debugfs_max_size_set,
+		"%llu\n");
+
+void kbase_mem_pool_debugfs_init(struct dentry *parent,
+		struct kbase_mem_pool *pool,
+		struct kbase_mem_pool *lp_pool)
+{
+	debugfs_create_file("mem_pool_size", S_IRUGO | S_IWUSR, parent,
+			pool, &kbase_mem_pool_debugfs_size_fops);
+
+	debugfs_create_file("mem_pool_max_size", S_IRUGO | S_IWUSR, parent,
+			pool, &kbase_mem_pool_debugfs_max_size_fops);
+
+	debugfs_create_file("lp_mem_pool_size", S_IRUGO | S_IWUSR, parent,
+			lp_pool, &kbase_mem_pool_debugfs_size_fops);
+
+	debugfs_create_file("lp_mem_pool_max_size", S_IRUGO | S_IWUSR, parent,
+			lp_pool, &kbase_mem_pool_debugfs_max_size_fops);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h
new file mode 100644
index 0000000..990d91c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool_debugfs.h
@@ -0,0 +1,45 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KBASE_MEM_POOL_DEBUGFS_H
+#define _KBASE_MEM_POOL_DEBUGFS_H
+
+#include <mali_kbase.h>
+
+/**
+ * kbase_mem_pool_debugfs_init - add debugfs knobs for @pool
+ * @parent:  Parent debugfs dentry
+ * @pool:    Memory pool of small pages to control
+ * @lp_pool: Memory pool of large pages to control
+ *
+ * Adds four debugfs files under @parent:
+ * - mem_pool_size: get/set the current size of @pool
+ * - mem_pool_max_size: get/set the max size of @pool
+ * - lp_mem_pool_size: get/set the current size of @lp_pool
+ * - lp_mem_pool_max_size: get/set the max size of @lp_pool
+ */
+void kbase_mem_pool_debugfs_init(struct dentry *parent,
+		struct kbase_mem_pool *pool,
+		struct kbase_mem_pool *lp_pool);
+
+#endif  /*_KBASE_MEM_POOL_DEBUGFS_H*/
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c
new file mode 100644
index 0000000..d4f8433
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c
@@ -0,0 +1,126 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+/** Show callback for the @c mem_profile debugfs file.
+ *
+ * This function is called to get the contents of the @c mem_profile debugfs
+ * file. This is a report of current memory usage and distribution in userspace.
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if it successfully prints data in debugfs entry file, non-zero otherwise
+ */
+static int kbasep_mem_profile_seq_show(struct seq_file *sfile, void *data)
+{
+	struct kbase_context *kctx = sfile->private;
+
+	mutex_lock(&kctx->mem_profile_lock);
+
+	seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size);
+
+	seq_putc(sfile, '\n');
+
+	mutex_unlock(&kctx->mem_profile_lock);
+
+	return 0;
+}
+
+/*
+ *  File operations related to debugfs entry for mem_profile
+ */
+static int kbasep_mem_profile_debugfs_open(struct inode *in, struct file *file)
+{
+	return single_open(file, kbasep_mem_profile_seq_show, in->i_private);
+}
+
+static const struct file_operations kbasep_mem_profile_debugfs_fops = {
+	.open = kbasep_mem_profile_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+					size_t size)
+{
+	int err = 0;
+
+	mutex_lock(&kctx->mem_profile_lock);
+
+	dev_dbg(kctx->kbdev->dev, "initialised: %d",
+		kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+	if (!kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) {
+		if (!debugfs_create_file("mem_profile", S_IRUGO,
+					kctx->kctx_dentry, kctx,
+					&kbasep_mem_profile_debugfs_fops)) {
+			err = -EAGAIN;
+		} else {
+			kbase_ctx_flag_set(kctx,
+					   KCTX_MEM_PROFILE_INITIALIZED);
+		}
+	}
+
+	if (kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED)) {
+		kfree(kctx->mem_profile_data);
+		kctx->mem_profile_data = data;
+		kctx->mem_profile_size = size;
+	} else {
+		kfree(data);
+	}
+
+	dev_dbg(kctx->kbdev->dev, "returning: %d, initialised: %d",
+		err, kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+	mutex_unlock(&kctx->mem_profile_lock);
+
+	return err;
+}
+
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx)
+{
+	mutex_lock(&kctx->mem_profile_lock);
+
+	dev_dbg(kctx->kbdev->dev, "initialised: %d",
+				kbase_ctx_flag(kctx, KCTX_MEM_PROFILE_INITIALIZED));
+
+	kfree(kctx->mem_profile_data);
+	kctx->mem_profile_data = NULL;
+	kctx->mem_profile_size = 0;
+
+	mutex_unlock(&kctx->mem_profile_lock);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+					size_t size)
+{
+	kfree(data);
+	return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h
new file mode 100644
index 0000000..1462247
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem_profile_debugfs.h
+ * Header file for mem profiles entries in debugfs
+ *
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_H
+#define _KBASE_MEM_PROFILE_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/**
+ * @brief Remove entry from Mali memory profile debugfs
+ */
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx);
+
+/**
+ * @brief Insert @p data to the debugfs file so it can be read by userspace
+ *
+ * The function takes ownership of @p data and frees it later when new data
+ * is inserted.
+ *
+ * If the debugfs entry corresponding to the @p kctx doesn't exist,
+ * an attempt will be made to create it.
+ *
+ * @param kctx The context whose debugfs file @p data should be inserted to
+ * @param data A NULL-terminated string to be inserted to the debugfs file,
+ *             without the trailing new line character
+ * @param size The length of the @p data string
+ * @return 0 if @p data inserted correctly
+ *         -EAGAIN in case of error
+ * @post @ref mem_profile_initialized will be set to @c true
+ *       the first time this function succeeds.
+ */
+int kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+					size_t size);
+
+#endif  /*_KBASE_MEM_PROFILE_DEBUGFS_H*/
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
new file mode 100644
index 0000000..3c76071
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_mem_profile_debugfs_buf_size.h
+ * Header file for the size of the buffer to accumulate the histogram report text in
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+#define _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+
+/**
+ * The size of the buffer to accumulate the histogram report text in
+ * @see @ref CCTXP_HIST_BUF_SIZE_MAX_LENGTH_REPORT
+ */
+#define KBASE_MEM_PROFILE_MAX_BUF_SIZE \
+	((size_t) (64 + ((80 + (56 * 64)) * 35) + 56))
+
+#endif  /*_KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_*/
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu.c b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
new file mode 100644
index 0000000..84341ca
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
@@ -0,0 +1,2658 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mmu.c
+ * Base kernel MMU management.
+ */
+
+/* #define DEBUG    1 */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+#include <mali_kbase_gator.h>
+#endif
+#include <mali_kbase_tlstream.h>
+#include <mali_kbase_instr_defs.h>
+#include <mali_kbase_debug.h>
+
+#define beenthere(kctx, f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+
+#include <mali_kbase_defs.h>
+#include <mali_kbase_hw.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_hwaccess_jm.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <mali_kbase_mem.h>
+
+#define KBASE_MMU_PAGE_ENTRIES 512
+
+/**
+ * kbase_mmu_flush_invalidate() - Flush and invalidate the GPU caches.
+ * @kctx: The KBase context.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ *
+ * Issue a cache flush + invalidate to the GPU caches and invalidate the TLBs.
+ *
+ * If sync is not set then transactions still in flight when the flush is issued
+ * may use the old page tables and the data they write will not be written out
+ * to memory, this function returns after the flush has been issued but
+ * before all accesses which might effect the flushed region have completed.
+ *
+ * If sync is set then accesses in the flushed region will be drained
+ * before data is flush and invalidated through L1, L2 and into memory,
+ * after which point this function will return.
+ */
+static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
+		u64 vpfn, size_t nr, bool sync);
+
+/**
+ * kbase_mmu_flush_invalidate_no_ctx() - Flush and invalidate the GPU caches.
+ * @kbdev: Device pointer.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ * @as_nr: GPU address space number for which flush + invalidate is required.
+ *
+ * This is used for MMU tables which do not belong to a user space context.
+ */
+static void kbase_mmu_flush_invalidate_no_ctx(struct kbase_device *kbdev,
+		u64 vpfn, size_t nr, bool sync, int as_nr);
+
+/**
+ * kbase_mmu_sync_pgd - sync page directory to memory
+ * @kbdev:	Device pointer.
+ * @handle:	Address of DMA region.
+ * @size:       Size of the region to sync.
+ *
+ * This should be called after each page directory update.
+ */
+
+static void kbase_mmu_sync_pgd(struct kbase_device *kbdev,
+		dma_addr_t handle, size_t size)
+{
+	/* If page table is not coherent then ensure the gpu can read
+	 * the pages from memory
+	 */
+	if (kbdev->system_coherency != COHERENCY_ACE)
+		dma_sync_single_for_device(kbdev->dev, handle, size,
+				DMA_TO_DEVICE);
+}
+
+/*
+ * Definitions:
+ * - PGD: Page Directory.
+ * - PTE: Page Table Entry. A 64bit value pointing to the next
+ *        level of translation
+ * - ATE: Address Transation Entry. A 64bit value pointing to
+ *        a 4kB physical page.
+ */
+
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+		struct kbase_as *as, const char *reason_str,
+		struct kbase_fault *fault);
+
+static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
+					struct tagged_addr *phys, size_t nr,
+					unsigned long flags);
+
+/**
+ * reg_grow_calc_extra_pages() - Calculate the number of backed pages to add to
+ *                               a region on a GPU page fault
+ *
+ * @reg:           The region that will be backed with more pages
+ * @fault_rel_pfn: PFN of the fault relative to the start of the region
+ *
+ * This calculates how much to increase the backing of a region by, based on
+ * where a GPU page fault occurred and the flags in the region.
+ *
+ * This can be more than the minimum number of pages that would reach
+ * @fault_rel_pfn, for example to reduce the overall rate of page fault
+ * interrupts on a region, or to ensure that the end address is aligned.
+ *
+ * Return: the number of backed pages to increase by
+ */
+static size_t reg_grow_calc_extra_pages(struct kbase_device *kbdev,
+		struct kbase_va_region *reg, size_t fault_rel_pfn)
+{
+	size_t multiple = reg->extent;
+	size_t reg_current_size = kbase_reg_current_backed_size(reg);
+	size_t minimum_extra = fault_rel_pfn - reg_current_size + 1;
+	size_t remainder;
+
+	if (!multiple) {
+		dev_warn(kbdev->dev,
+				"VA Region 0x%llx extent was 0, allocator needs to set this properly for KBASE_REG_PF_GROW\n",
+				((unsigned long long)reg->start_pfn) << PAGE_SHIFT);
+		return minimum_extra;
+	}
+
+	/* Calculate the remainder to subtract from minimum_extra to make it
+	 * the desired (rounded down) multiple of the extent.
+	 * Depending on reg's flags, the base used for calculating multiples is
+	 * different */
+	if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) {
+		/* multiple is based from the top of the initial commit, which
+		 * has been allocated in such a way that (start_pfn +
+		 * initial_commit) is already aligned to multiple. Hence the
+		 * pfn for the end of committed memory will also be aligned to
+		 * multiple */
+		size_t initial_commit = reg->initial_commit;
+
+		if (fault_rel_pfn < initial_commit) {
+			/* this case is just to catch in case it's been
+			 * recommitted by userspace to be smaller than the
+			 * initial commit */
+			minimum_extra = initial_commit - reg_current_size;
+			remainder = 0;
+		} else {
+			/* same as calculating (fault_rel_pfn - initial_commit + 1) */
+			size_t pages_after_initial = minimum_extra + reg_current_size - initial_commit;
+
+			remainder = pages_after_initial % multiple;
+		}
+	} else {
+		/* multiple is based from the current backed size, even if the
+		 * current backed size/pfn for end of committed memory are not
+		 * themselves aligned to multiple */
+		remainder = minimum_extra % multiple;
+	}
+
+	if (remainder == 0)
+		return minimum_extra;
+
+	return minimum_extra + multiple - remainder;
+}
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+static void kbase_gpu_mmu_handle_write_faulting_as(
+				struct kbase_device *kbdev,
+				struct kbase_as *faulting_as,
+				u64 start_pfn, size_t nr, u32 op)
+{
+	mutex_lock(&kbdev->mmu_hw_mutex);
+
+	kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+			KBASE_MMU_FAULT_TYPE_PAGE);
+	kbase_mmu_hw_do_operation(kbdev, faulting_as, start_pfn,
+			nr, op, 1);
+
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+
+	kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+			KBASE_MMU_FAULT_TYPE_PAGE);
+}
+
+static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
+			struct kbase_as *faulting_as)
+{
+	struct kbasep_gwt_list_element *pos;
+	struct kbase_va_region *region;
+	struct kbase_device *kbdev;
+	struct kbase_fault *fault;
+	u64 fault_pfn, pfn_offset;
+	u32 op;
+	int ret;
+	int as_no;
+
+	as_no = faulting_as->number;
+	kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+	fault = &faulting_as->pf_data;
+	fault_pfn = fault->addr >> PAGE_SHIFT;
+
+	kbase_gpu_vm_lock(kctx);
+
+	/* Find region and check if it should be writable. */
+	region = kbase_region_tracker_find_region_enclosing_address(kctx,
+			fault->addr);
+	if (!region || region->flags & KBASE_REG_FREE) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Memory is not mapped on the GPU",
+				&faulting_as->pf_data);
+		return;
+	}
+
+	if (!(region->flags & KBASE_REG_GPU_WR)) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Region does not have write permissions",
+				&faulting_as->pf_data);
+		return;
+	}
+
+	/* Capture addresses of faulting write location
+	 * for job dumping if write tracking is enabled.
+	 */
+	if (kctx->gwt_enabled) {
+		u64 page_addr = fault->addr & PAGE_MASK;
+		bool found = false;
+		/* Check if this write was already handled. */
+		list_for_each_entry(pos, &kctx->gwt_current_list, link) {
+			if (page_addr == pos->page_addr) {
+				found = true;
+				break;
+			}
+		}
+
+		if (!found) {
+			pos = kmalloc(sizeof(*pos), GFP_KERNEL);
+			if (pos) {
+				pos->region = region;
+				pos->page_addr = page_addr;
+				pos->num_pages = 1;
+				list_add(&pos->link, &kctx->gwt_current_list);
+			} else {
+				dev_warn(kbdev->dev, "kmalloc failure");
+			}
+		}
+	}
+
+	pfn_offset = fault_pfn - region->start_pfn;
+	/* Now make this faulting page writable to GPU. */
+	ret = kbase_mmu_update_pages_no_flush(kctx, fault_pfn,
+				&kbase_get_gpu_phy_pages(region)[pfn_offset],
+				1, region->flags);
+
+	/* flush L2 and unlock the VA (resumes the MMU) */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
+		op = AS_COMMAND_FLUSH;
+	else
+		op = AS_COMMAND_FLUSH_PT;
+
+	kbase_gpu_mmu_handle_write_faulting_as(kbdev, faulting_as,
+			fault_pfn, 1, op);
+
+	kbase_gpu_vm_unlock(kctx);
+}
+
+static void kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx,
+			struct kbase_as	*faulting_as)
+{
+	struct kbase_fault *fault = &faulting_as->pf_data;
+
+	switch (fault->status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+		kbase_gpu_mmu_handle_write_fault(kctx, faulting_as);
+		break;
+	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Execute Permission fault", fault);
+		break;
+	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Read Permission fault", fault);
+		break;
+	default:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Unknown Permission fault", fault);
+		break;
+	}
+}
+#endif
+
+#define MAX_POOL_LEVEL 2
+
+/**
+ * page_fault_try_alloc - Try to allocate memory from a context pool
+ * @kctx:          Context pointer
+ * @region:        Region to grow
+ * @new_pages:     Number of 4 kB pages to allocate
+ * @pages_to_grow: Pointer to variable to store number of outstanding pages on
+ *                 failure. This can be either 4 kB or 2 MB pages, depending on
+ *                 the number of pages requested.
+ * @grow_2mb_pool: Pointer to variable to store which pool needs to grow - true
+ *                 for 2 MB, false for 4 kB.
+ * @prealloc_sas:  Pointer to kbase_sub_alloc structures
+ *
+ * This function will try to allocate as many pages as possible from the context
+ * pool, then if required will try to allocate the remaining pages from the
+ * device pool.
+ *
+ * This function will not allocate any new memory beyond that that is already
+ * present in the context or device pools. This is because it is intended to be
+ * called with the vm_lock held, which could cause recursive locking if the
+ * allocation caused the out-of-memory killer to run.
+ *
+ * If 2 MB pages are enabled and new_pages is >= 2 MB then pages_to_grow will be
+ * a count of 2 MB pages, otherwise it will be a count of 4 kB pages.
+ *
+ * Return: true if successful, false on failure
+ */
+static bool page_fault_try_alloc(struct kbase_context *kctx,
+		struct kbase_va_region *region, size_t new_pages,
+		int *pages_to_grow, bool *grow_2mb_pool,
+		struct kbase_sub_alloc **prealloc_sas)
+{
+	struct tagged_addr *gpu_pages[MAX_POOL_LEVEL] = {NULL};
+	struct tagged_addr *cpu_pages[MAX_POOL_LEVEL] = {NULL};
+	size_t pages_alloced[MAX_POOL_LEVEL] = {0};
+	struct kbase_mem_pool *pool, *root_pool;
+	int pool_level = 0;
+	bool alloc_failed = false;
+	size_t pages_still_required;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	if (new_pages >= (SZ_2M / SZ_4K)) {
+		root_pool = &kctx->lp_mem_pool;
+		*grow_2mb_pool = true;
+	} else {
+#endif
+		root_pool = &kctx->mem_pool;
+		*grow_2mb_pool = false;
+#ifdef CONFIG_MALI_2MB_ALLOC
+	}
+#endif
+
+	if (region->gpu_alloc != region->cpu_alloc)
+		new_pages *= 2;
+
+	pages_still_required = new_pages;
+
+	/* Determine how many pages are in the pools before trying to allocate.
+	 * Don't attempt to allocate & free if the allocation can't succeed.
+	 */
+	for (pool = root_pool; pool != NULL; pool = pool->next_pool) {
+		size_t pool_size_4k;
+
+		kbase_mem_pool_lock(pool);
+
+		pool_size_4k = kbase_mem_pool_size(pool) << pool->order;
+		if (pool_size_4k >= pages_still_required)
+			pages_still_required = 0;
+		else
+			pages_still_required -= pool_size_4k;
+
+		kbase_mem_pool_unlock(pool);
+
+		if (!pages_still_required)
+			break;
+	}
+
+	if (pages_still_required) {
+		/* Insufficient pages in pools. Don't try to allocate - just
+		 * request a grow.
+		 */
+		*pages_to_grow = pages_still_required;
+
+		return false;
+	}
+
+	/* Since we've dropped the pool locks, the amount of memory in the pools
+	 * may change between the above check and the actual allocation.
+	 */
+	pool = root_pool;
+	for (pool_level = 0; pool_level < MAX_POOL_LEVEL; pool_level++) {
+		size_t pool_size_4k;
+		size_t pages_to_alloc_4k;
+		size_t pages_to_alloc_4k_per_alloc;
+
+		kbase_mem_pool_lock(pool);
+
+		/* Allocate as much as possible from this pool*/
+		pool_size_4k = kbase_mem_pool_size(pool) << pool->order;
+		pages_to_alloc_4k = MIN(new_pages, pool_size_4k);
+		if (region->gpu_alloc == region->cpu_alloc)
+			pages_to_alloc_4k_per_alloc = pages_to_alloc_4k;
+		else
+			pages_to_alloc_4k_per_alloc = pages_to_alloc_4k >> 1;
+
+		pages_alloced[pool_level] = pages_to_alloc_4k;
+		if (pages_to_alloc_4k) {
+			gpu_pages[pool_level] =
+					kbase_alloc_phy_pages_helper_locked(
+						region->gpu_alloc, pool,
+						pages_to_alloc_4k_per_alloc,
+						&prealloc_sas[0]);
+
+			if (!gpu_pages[pool_level]) {
+				alloc_failed = true;
+			} else if (region->gpu_alloc != region->cpu_alloc) {
+				cpu_pages[pool_level] =
+					kbase_alloc_phy_pages_helper_locked(
+						region->cpu_alloc, pool,
+						pages_to_alloc_4k_per_alloc,
+						&prealloc_sas[1]);
+
+				if (!cpu_pages[pool_level])
+					alloc_failed = true;
+			}
+		}
+
+		kbase_mem_pool_unlock(pool);
+
+		if (alloc_failed) {
+			WARN_ON(!new_pages);
+			WARN_ON(pages_to_alloc_4k >= new_pages);
+			WARN_ON(pages_to_alloc_4k_per_alloc >= new_pages);
+			break;
+		}
+
+		new_pages -= pages_to_alloc_4k;
+
+		if (!new_pages)
+			break;
+
+		pool = pool->next_pool;
+		if (!pool)
+			break;
+	}
+
+	if (new_pages) {
+		/* Allocation was unsuccessful */
+		int max_pool_level = pool_level;
+
+		pool = root_pool;
+
+		/* Free memory allocated so far */
+		for (pool_level = 0; pool_level <= max_pool_level;
+				pool_level++) {
+			kbase_mem_pool_lock(pool);
+
+			if (region->gpu_alloc != region->cpu_alloc) {
+				if (pages_alloced[pool_level] &&
+						cpu_pages[pool_level])
+					kbase_free_phy_pages_helper_locked(
+						region->cpu_alloc,
+						pool, cpu_pages[pool_level],
+						pages_alloced[pool_level]);
+			}
+
+			if (pages_alloced[pool_level] && gpu_pages[pool_level])
+				kbase_free_phy_pages_helper_locked(
+						region->gpu_alloc,
+						pool, gpu_pages[pool_level],
+						pages_alloced[pool_level]);
+
+			kbase_mem_pool_unlock(pool);
+
+			pool = pool->next_pool;
+		}
+
+		/*
+		 * If the allocation failed despite there being enough memory in
+		 * the pool, then just fail. Otherwise, try to grow the memory
+		 * pool.
+		 */
+		if (alloc_failed)
+			*pages_to_grow = 0;
+		else
+			*pages_to_grow = new_pages;
+
+		return false;
+	}
+
+	/* Allocation was successful. No pages to grow, return success. */
+	*pages_to_grow = 0;
+
+	return true;
+}
+
+void page_fault_worker(struct work_struct *data)
+{
+	u64 fault_pfn;
+	u32 fault_status;
+	size_t new_pages;
+	size_t fault_rel_pfn;
+	struct kbase_as *faulting_as;
+	int as_no;
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+	struct kbase_va_region *region;
+	struct kbase_fault *fault;
+	int err;
+	bool grown = false;
+	int pages_to_grow;
+	bool grow_2mb_pool;
+	struct kbase_sub_alloc *prealloc_sas[2] = { NULL, NULL };
+	int i;
+
+	faulting_as = container_of(data, struct kbase_as, work_pagefault);
+	fault = &faulting_as->pf_data;
+	fault_pfn = fault->addr >> PAGE_SHIFT;
+	as_no = faulting_as->number;
+
+	kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+
+	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
+	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
+	 */
+	kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
+	if (WARN_ON(!kctx)) {
+		atomic_dec(&kbdev->faults_pending);
+		return;
+	}
+
+	KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);
+
+	if (unlikely(fault->protected_mode)) {
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Protected mode fault", fault);
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+
+		goto fault_done;
+	}
+
+	fault_status = fault->status;
+	switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT:
+		/* need to check against the region to handle this one */
+		break;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT:
+#ifdef CONFIG_MALI_CINSTR_GWT
+		/* If GWT was ever enabled then we need to handle
+		 * write fault pages even if the feature was disabled later.
+		 */
+		if (kctx->gwt_was_enabled) {
+			kbase_gpu_mmu_handle_permission_fault(kctx,
+							faulting_as);
+			goto fault_done;
+		}
+#endif
+
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Permission failure", fault);
+		goto fault_done;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Translation table bus fault", fault);
+		goto fault_done;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
+		/* nothing to do, but we don't expect this fault currently */
+		dev_warn(kbdev->dev, "Access flag unexpectedly set");
+		goto fault_done;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Address size fault", fault);
+		else
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Unknown fault code", fault);
+		goto fault_done;
+
+	case AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Memory attributes fault", fault);
+		else
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Unknown fault code", fault);
+		goto fault_done;
+
+	default:
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Unknown fault code", fault);
+		goto fault_done;
+	}
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+	/* Preallocate memory for the sub-allocation structs if necessary */
+	for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i) {
+		prealloc_sas[i] = kmalloc(sizeof(*prealloc_sas[i]), GFP_KERNEL);
+		if (!prealloc_sas[i]) {
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Failed pre-allocating memory for sub-allocations' metadata",
+					fault);
+			goto fault_done;
+		}
+	}
+#endif /* CONFIG_MALI_2MB_ALLOC */
+
+page_fault_retry:
+	/* so we have a translation fault, let's see if it is for growable
+	 * memory */
+	kbase_gpu_vm_lock(kctx);
+
+	region = kbase_region_tracker_find_region_enclosing_address(kctx,
+			fault->addr);
+	if (!region || region->flags & KBASE_REG_FREE) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Memory is not mapped on the GPU", fault);
+		goto fault_done;
+	}
+
+	if (region->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"DMA-BUF is not mapped on the GPU", fault);
+		goto fault_done;
+	}
+
+	if ((region->flags & GROWABLE_FLAGS_REQUIRED)
+			!= GROWABLE_FLAGS_REQUIRED) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Memory is not growable", fault);
+		goto fault_done;
+	}
+
+	if ((region->flags & KBASE_REG_DONT_NEED)) {
+		kbase_gpu_vm_unlock(kctx);
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Don't need memory can't be grown", fault);
+		goto fault_done;
+	}
+
+	/* find the size we need to grow it by */
+	/* we know the result fit in a size_t due to kbase_region_tracker_find_region_enclosing_address
+	 * validating the fault_adress to be within a size_t from the start_pfn */
+	fault_rel_pfn = fault_pfn - region->start_pfn;
+
+	if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
+		dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
+				fault->addr, region->start_pfn,
+				region->start_pfn +
+				kbase_reg_current_backed_size(region));
+
+		mutex_lock(&kbdev->mmu_hw_mutex);
+
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+		/* [1] in case another page fault occurred while we were
+		 * handling the (duplicate) page fault we need to ensure we
+		 * don't loose the other page fault as result of us clearing
+		 * the MMU IRQ. Therefore, after we clear the MMU IRQ we send
+		 * an UNLOCK command that will retry any stalled memory
+		 * transaction (which should cause the other page fault to be
+		 * raised again).
+		 */
+		kbase_mmu_hw_do_operation(kbdev, faulting_as, 0, 0,
+				AS_COMMAND_UNLOCK, 1);
+
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+
+		kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+		kbase_gpu_vm_unlock(kctx);
+
+		goto fault_done;
+	}
+
+	new_pages = reg_grow_calc_extra_pages(kbdev, region, fault_rel_pfn);
+
+	/* cap to max vsize */
+	new_pages = min(new_pages, region->nr_pages - kbase_reg_current_backed_size(region));
+
+	if (0 == new_pages) {
+		mutex_lock(&kbdev->mmu_hw_mutex);
+
+		/* Duplicate of a fault we've already handled, nothing to do */
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+		/* See comment [1] about UNLOCK usage */
+		kbase_mmu_hw_do_operation(kbdev, faulting_as, 0, 0,
+				AS_COMMAND_UNLOCK, 1);
+
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+
+		kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_PAGE);
+		kbase_gpu_vm_unlock(kctx);
+		goto fault_done;
+	}
+
+	pages_to_grow = 0;
+
+	spin_lock(&kctx->mem_partials_lock);
+	grown = page_fault_try_alloc(kctx, region, new_pages, &pages_to_grow,
+			&grow_2mb_pool, prealloc_sas);
+	spin_unlock(&kctx->mem_partials_lock);
+
+	if (grown) {
+		u64 pfn_offset;
+		u32 op;
+
+		/* alloc success */
+		KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages);
+
+		/* set up the new pages */
+		pfn_offset = kbase_reg_current_backed_size(region) - new_pages;
+		/*
+		 * Note:
+		 * Issuing an MMU operation will unlock the MMU and cause the
+		 * translation to be replayed. If the page insertion fails then
+		 * rather then trying to continue the context should be killed
+		 * so the no_flush version of insert_pages is used which allows
+		 * us to unlock the MMU as we see fit.
+		 */
+		err = kbase_mmu_insert_pages_no_flush(kbdev, &kctx->mmu,
+				region->start_pfn + pfn_offset,
+				&kbase_get_gpu_phy_pages(region)[pfn_offset],
+				new_pages, region->flags);
+		if (err) {
+			kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
+			if (region->gpu_alloc != region->cpu_alloc)
+				kbase_free_phy_pages_helper(region->cpu_alloc,
+						new_pages);
+			kbase_gpu_vm_unlock(kctx);
+			/* The locked VA region will be unlocked and the cache invalidated in here */
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Page table update failure", fault);
+			goto fault_done;
+		}
+#if defined(CONFIG_MALI_GATOR_SUPPORT)
+		kbase_trace_mali_page_fault_insert_pages(as_no, new_pages);
+#endif
+		KBASE_TLSTREAM_AUX_PAGEFAULT(kctx->id, (u64)new_pages);
+
+		/* AS transaction begin */
+		mutex_lock(&kbdev->mmu_hw_mutex);
+
+		/* flush L2 and unlock the VA (resumes the MMU) */
+		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
+			op = AS_COMMAND_FLUSH;
+		else
+			op = AS_COMMAND_FLUSH_PT;
+
+		/* clear MMU interrupt - this needs to be done after updating
+		 * the page tables but before issuing a FLUSH command. The
+		 * FLUSH cmd has a side effect that it restarts stalled memory
+		 * transactions in other address spaces which may cause
+		 * another fault to occur. If we didn't clear the interrupt at
+		 * this stage a new IRQ might not be raised when the GPU finds
+		 * a MMU IRQ is already pending.
+		 */
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+					 KBASE_MMU_FAULT_TYPE_PAGE);
+
+		kbase_mmu_hw_do_operation(kbdev, faulting_as,
+				fault->addr >> PAGE_SHIFT,
+				new_pages, op, 1);
+
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+		/* AS transaction end */
+
+		/* reenable this in the mask */
+		kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+					 KBASE_MMU_FAULT_TYPE_PAGE);
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+		if (kctx->gwt_enabled) {
+			/* GWT also tracks growable regions. */
+			struct kbasep_gwt_list_element *pos;
+
+			pos = kmalloc(sizeof(*pos), GFP_KERNEL);
+			if (pos) {
+				pos->region = region;
+				pos->page_addr = (region->start_pfn +
+							pfn_offset) <<
+							 PAGE_SHIFT;
+				pos->num_pages = new_pages;
+				list_add(&pos->link,
+					&kctx->gwt_current_list);
+			} else {
+				dev_warn(kbdev->dev, "kmalloc failure");
+			}
+		}
+#endif
+		kbase_gpu_vm_unlock(kctx);
+	} else {
+		int ret = -ENOMEM;
+
+		kbase_gpu_vm_unlock(kctx);
+
+		/* If the memory pool was insufficient then grow it and retry.
+		 * Otherwise fail the allocation.
+		 */
+		if (pages_to_grow > 0) {
+#ifdef CONFIG_MALI_2MB_ALLOC
+			if (grow_2mb_pool) {
+				/* Round page requirement up to nearest 2 MB */
+				pages_to_grow = (pages_to_grow +
+					((1 << kctx->lp_mem_pool.order) - 1))
+						>> kctx->lp_mem_pool.order;
+				ret = kbase_mem_pool_grow(&kctx->lp_mem_pool,
+						pages_to_grow);
+			} else {
+#endif
+				ret = kbase_mem_pool_grow(&kctx->mem_pool,
+						pages_to_grow);
+#ifdef CONFIG_MALI_2MB_ALLOC
+			}
+#endif
+		}
+		if (ret < 0) {
+			/* failed to extend, handle as a normal PF */
+			kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+					"Page allocation failure", fault);
+		} else {
+			goto page_fault_retry;
+		}
+	}
+
+fault_done:
+	for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i)
+		kfree(prealloc_sas[i]);
+
+	/*
+	 * By this point, the fault was handled in some way,
+	 * so release the ctx refcount
+	 */
+	kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+	atomic_dec(&kbdev->faults_pending);
+}
+
+static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut)
+{
+	u64 *page;
+	int i;
+	struct page *p;
+
+	p = kbase_mem_pool_alloc(&kbdev->mem_pool);
+	if (!p)
+		return 0;
+
+	page = kmap(p);
+	if (NULL == page)
+		goto alloc_free;
+
+	/* If the MMU tables belong to a context then account the memory usage
+	 * to that context, otherwise the MMU tables are device wide and are
+	 * only accounted to the device.
+	 */
+	if (mmut->kctx) {
+		int new_page_count;
+
+		new_page_count = kbase_atomic_add_pages(1,
+				&mmut->kctx->used_pages);
+		KBASE_TLSTREAM_AUX_PAGESALLOC(
+				mmut->kctx->id,
+				(u64)new_page_count);
+		kbase_process_page_usage_inc(mmut->kctx, 1);
+	}
+
+	kbase_atomic_add_pages(1, &kbdev->memdev.used_pages);
+
+	for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++)
+		kbdev->mmu_mode->entry_invalidate(&page[i]);
+
+	kbase_mmu_sync_pgd(kbdev, kbase_dma_addr(p), PAGE_SIZE);
+
+	kunmap(p);
+	return page_to_phys(p);
+
+alloc_free:
+	kbase_mem_pool_free(&kbdev->mem_pool, p, false);
+
+	return 0;
+}
+
+/* Given PGD PFN for level N, return PGD PFN for level N+1, allocating the
+ * new table from the pool if needed and possible
+ */
+static int mmu_get_next_pgd(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		phys_addr_t *pgd, u64 vpfn, int level)
+{
+	u64 *page;
+	phys_addr_t target_pgd;
+	struct page *p;
+
+	KBASE_DEBUG_ASSERT(*pgd);
+
+	lockdep_assert_held(&mmut->mmu_lock);
+
+	/*
+	 * Architecture spec defines level-0 as being the top-most.
+	 * This is a bit unfortunate here, but we keep the same convention.
+	 */
+	vpfn >>= (3 - level) * 9;
+	vpfn &= 0x1FF;
+
+	p = pfn_to_page(PFN_DOWN(*pgd));
+	page = kmap(p);
+	if (NULL == page) {
+		dev_warn(kbdev->dev, "%s: kmap failure\n", __func__);
+		return -EINVAL;
+	}
+
+	target_pgd = kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
+
+	if (!target_pgd) {
+		target_pgd = kbase_mmu_alloc_pgd(kbdev, mmut);
+		if (!target_pgd) {
+			dev_dbg(kbdev->dev, "%s: kbase_mmu_alloc_pgd failure\n",
+					__func__);
+			kunmap(p);
+			return -ENOMEM;
+		}
+
+		kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
+
+		kbase_mmu_sync_pgd(kbdev, kbase_dma_addr(p), PAGE_SIZE);
+		/* Rely on the caller to update the address space flags. */
+	}
+
+	kunmap(p);
+	*pgd = target_pgd;
+
+	return 0;
+}
+
+/*
+ * Returns the PGD for the specified level of translation
+ */
+static int mmu_get_pgd_at_level(struct kbase_device *kbdev,
+					struct kbase_mmu_table *mmut,
+					u64 vpfn,
+					unsigned int level,
+					phys_addr_t *out_pgd)
+{
+	phys_addr_t pgd;
+	int l;
+
+	lockdep_assert_held(&mmut->mmu_lock);
+	pgd = mmut->pgd;
+
+	for (l = MIDGARD_MMU_TOPLEVEL; l < level; l++) {
+		int err = mmu_get_next_pgd(kbdev, mmut, &pgd, vpfn, l);
+		/* Handle failure condition */
+		if (err) {
+			dev_dbg(kbdev->dev,
+				 "%s: mmu_get_next_pgd failure at level %d\n",
+				 __func__, l);
+			return err;
+		}
+	}
+
+	*out_pgd = pgd;
+
+	return 0;
+}
+
+static int mmu_get_bottom_pgd(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		u64 vpfn,
+		phys_addr_t *out_pgd)
+{
+	return mmu_get_pgd_at_level(kbdev, mmut, vpfn, MIDGARD_MMU_BOTTOMLEVEL,
+			out_pgd);
+}
+
+static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		u64 from_vpfn, u64 to_vpfn)
+{
+	phys_addr_t pgd;
+	u64 vpfn = from_vpfn;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	/* 64-bit address range is the max */
+	KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+	KBASE_DEBUG_ASSERT(from_vpfn <= to_vpfn);
+
+	lockdep_assert_held(&mmut->mmu_lock);
+
+	mmu_mode = kbdev->mmu_mode;
+
+	while (vpfn < to_vpfn) {
+		unsigned int i;
+		unsigned int idx = vpfn & 0x1FF;
+		unsigned int count = KBASE_MMU_PAGE_ENTRIES - idx;
+		unsigned int pcount = 0;
+		unsigned int left = to_vpfn - vpfn;
+		unsigned int level;
+		u64 *page;
+
+		if (count > left)
+			count = left;
+
+		/* need to check if this is a 2MB page or a 4kB */
+		pgd = mmut->pgd;
+
+		for (level = MIDGARD_MMU_TOPLEVEL;
+				level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
+			idx = (vpfn >> ((3 - level) * 9)) & 0x1FF;
+			page = kmap(phys_to_page(pgd));
+			if (mmu_mode->ate_is_valid(page[idx], level))
+				break; /* keep the mapping */
+			kunmap(phys_to_page(pgd));
+			pgd = mmu_mode->pte_to_phy_addr(page[idx]);
+		}
+
+		switch (level) {
+		case MIDGARD_MMU_LEVEL(2):
+			/* remap to single entry to update */
+			pcount = 1;
+			break;
+		case MIDGARD_MMU_BOTTOMLEVEL:
+			/* page count is the same as the logical count */
+			pcount = count;
+			break;
+		default:
+			dev_warn(kbdev->dev, "%sNo support for ATEs at level %d\n",
+			       __func__, level);
+			goto next;
+		}
+
+		/* Invalidate the entries we added */
+		for (i = 0; i < pcount; i++)
+			mmu_mode->entry_invalidate(&page[idx + i]);
+
+		kbase_mmu_sync_pgd(kbdev,
+				   kbase_dma_addr(phys_to_page(pgd)) + 8 * idx,
+				   8 * pcount);
+		kunmap(phys_to_page(pgd));
+
+next:
+		vpfn += count;
+	}
+}
+
+/*
+ * Map the single page 'phys' 'nr' of times, starting at GPU PFN 'vpfn'
+ */
+int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
+					struct tagged_addr phys, size_t nr,
+					unsigned long flags)
+{
+	phys_addr_t pgd;
+	u64 *pgd_page;
+	/* In case the insert_single_page only partially completes we need to be
+	 * able to recover */
+	bool recover_required = false;
+	u64 recover_vpfn = vpfn;
+	size_t recover_count = 0;
+	size_t remain = nr;
+	int err;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	/* 64-bit address range is the max */
+	KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+	mmu_mode = kctx->kbdev->mmu_mode;
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return 0;
+
+	mutex_lock(&kctx->mmu.mmu_lock);
+
+	while (remain) {
+		unsigned int i;
+		unsigned int index = vpfn & 0x1FF;
+		unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+		struct page *p;
+
+		if (count > remain)
+			count = remain;
+
+		/*
+		 * Repeatedly calling mmu_get_bottom_pte() is clearly
+		 * suboptimal. We don't have to re-parse the whole tree
+		 * each time (just cache the l0-l2 sequence).
+		 * On the other hand, it's only a gain when we map more than
+		 * 256 pages at once (on average). Do we really care?
+		 */
+		do {
+			err = mmu_get_bottom_pgd(kctx->kbdev, &kctx->mmu,
+					vpfn, &pgd);
+			if (err != -ENOMEM)
+				break;
+			/* Fill the memory pool with enough pages for
+			 * the page walk to succeed
+			 */
+			mutex_unlock(&kctx->mmu.mmu_lock);
+			err = kbase_mem_pool_grow(&kctx->kbdev->mem_pool,
+					MIDGARD_MMU_BOTTOMLEVEL);
+			mutex_lock(&kctx->mmu.mmu_lock);
+		} while (!err);
+		if (err) {
+			dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
+			if (recover_required) {
+				/* Invalidate the pages we have partially
+				 * completed */
+				mmu_insert_pages_failure_recovery(kctx->kbdev,
+						&kctx->mmu,
+						recover_vpfn,
+						recover_vpfn + recover_count);
+			}
+			goto fail_unlock;
+		}
+
+		p = pfn_to_page(PFN_DOWN(pgd));
+		pgd_page = kmap(p);
+		if (!pgd_page) {
+			dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: kmap failure\n");
+			if (recover_required) {
+				/* Invalidate the pages we have partially
+				 * completed */
+				mmu_insert_pages_failure_recovery(kctx->kbdev,
+						&kctx->mmu,
+						recover_vpfn,
+						recover_vpfn + recover_count);
+			}
+			err = -ENOMEM;
+			goto fail_unlock;
+		}
+
+		for (i = 0; i < count; i++) {
+			unsigned int ofs = index + i;
+
+			/* Fail if the current page is a valid ATE entry */
+			KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL));
+
+			mmu_mode->entry_set_ate(&pgd_page[ofs],
+						phys, flags,
+						MIDGARD_MMU_BOTTOMLEVEL);
+		}
+
+		vpfn += count;
+		remain -= count;
+
+		kbase_mmu_sync_pgd(kctx->kbdev,
+				kbase_dma_addr(p) + (index * sizeof(u64)),
+				count * sizeof(u64));
+
+		kunmap(p);
+		/* We have started modifying the page table.
+		 * If further pages need inserting and fail we need to undo what
+		 * has already taken place */
+		recover_required = true;
+		recover_count += count;
+	}
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+	return 0;
+
+fail_unlock:
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+	return err;
+}
+
+static inline void cleanup_empty_pte(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut, u64 *pte)
+{
+	phys_addr_t tmp_pgd;
+	struct page *tmp_p;
+
+	tmp_pgd = kbdev->mmu_mode->pte_to_phy_addr(*pte);
+	tmp_p = phys_to_page(tmp_pgd);
+	kbase_mem_pool_free(&kbdev->mem_pool, tmp_p, false);
+
+	/* If the MMU tables belong to a context then we accounted the memory
+	 * usage to that context, so decrement here.
+	 */
+	if (mmut->kctx) {
+		kbase_process_page_usage_dec(mmut->kctx, 1);
+		kbase_atomic_sub_pages(1, &mmut->kctx->used_pages);
+	}
+	kbase_atomic_sub_pages(1, &kbdev->memdev.used_pages);
+}
+
+int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
+				    struct kbase_mmu_table *mmut,
+				    const u64 start_vpfn,
+				    struct tagged_addr *phys, size_t nr,
+				    unsigned long flags)
+{
+	phys_addr_t pgd;
+	u64 *pgd_page;
+	u64 insert_vpfn = start_vpfn;
+	size_t remain = nr;
+	int err;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	/* Note that 0 is a valid start_vpfn */
+	/* 64-bit address range is the max */
+	KBASE_DEBUG_ASSERT(start_vpfn <= (U64_MAX / PAGE_SIZE));
+
+	mmu_mode = kbdev->mmu_mode;
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return 0;
+
+	mutex_lock(&mmut->mmu_lock);
+
+	while (remain) {
+		unsigned int i;
+		unsigned int vindex = insert_vpfn & 0x1FF;
+		unsigned int count = KBASE_MMU_PAGE_ENTRIES - vindex;
+		struct page *p;
+		unsigned int cur_level;
+
+		if (count > remain)
+			count = remain;
+
+		if (!vindex && is_huge_head(*phys))
+			cur_level = MIDGARD_MMU_LEVEL(2);
+		else
+			cur_level = MIDGARD_MMU_BOTTOMLEVEL;
+
+		/*
+		 * Repeatedly calling mmu_get_pgd_at_level() is clearly
+		 * suboptimal. We don't have to re-parse the whole tree
+		 * each time (just cache the l0-l2 sequence).
+		 * On the other hand, it's only a gain when we map more than
+		 * 256 pages at once (on average). Do we really care?
+		 */
+		do {
+			err = mmu_get_pgd_at_level(kbdev, mmut, insert_vpfn,
+						   cur_level, &pgd);
+			if (err != -ENOMEM)
+				break;
+			/* Fill the memory pool with enough pages for
+			 * the page walk to succeed
+			 */
+			mutex_unlock(&mmut->mmu_lock);
+			err = kbase_mem_pool_grow(&kbdev->mem_pool,
+					cur_level);
+			mutex_lock(&mmut->mmu_lock);
+		} while (!err);
+
+		if (err) {
+			dev_warn(kbdev->dev,
+				 "%s: mmu_get_bottom_pgd failure\n", __func__);
+			if (insert_vpfn != start_vpfn) {
+				/* Invalidate the pages we have partially
+				 * completed */
+				mmu_insert_pages_failure_recovery(kbdev,
+						mmut, start_vpfn, insert_vpfn);
+			}
+			goto fail_unlock;
+		}
+
+		p = pfn_to_page(PFN_DOWN(pgd));
+		pgd_page = kmap(p);
+		if (!pgd_page) {
+			dev_warn(kbdev->dev, "%s: kmap failure\n",
+				 __func__);
+			if (insert_vpfn != start_vpfn) {
+				/* Invalidate the pages we have partially
+				 * completed */
+				mmu_insert_pages_failure_recovery(kbdev,
+						mmut, start_vpfn, insert_vpfn);
+			}
+			err = -ENOMEM;
+			goto fail_unlock;
+		}
+
+		if (cur_level == MIDGARD_MMU_LEVEL(2)) {
+			unsigned int level_index = (insert_vpfn >> 9) & 0x1FF;
+			u64 *target = &pgd_page[level_index];
+
+			if (mmu_mode->pte_is_valid(*target, cur_level))
+				cleanup_empty_pte(kbdev, mmut, target);
+			mmu_mode->entry_set_ate(target, *phys, flags,
+						cur_level);
+		} else {
+			for (i = 0; i < count; i++) {
+				unsigned int ofs = vindex + i;
+				u64 *target = &pgd_page[ofs];
+
+				/* Warn if the current page is a valid ATE
+				 * entry. The page table shouldn't have anything
+				 * in the place where we are trying to put a
+				 * new entry. Modification to page table entries
+				 * should be performed with
+				 * kbase_mmu_update_pages()
+				 */
+				WARN_ON((*target & 1UL) != 0);
+
+				kbdev->mmu_mode->entry_set_ate(target,
+						phys[i], flags, cur_level);
+			}
+		}
+
+		phys += count;
+		insert_vpfn += count;
+		remain -= count;
+
+		kbase_mmu_sync_pgd(kbdev,
+				kbase_dma_addr(p) + (vindex * sizeof(u64)),
+				count * sizeof(u64));
+
+		kunmap(p);
+	}
+
+	err = 0;
+
+fail_unlock:
+	mutex_unlock(&mmut->mmu_lock);
+	return err;
+}
+
+/*
+ * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn' for GPU address space
+ * number 'as_nr'.
+ */
+int kbase_mmu_insert_pages(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut, u64 vpfn,
+		struct tagged_addr *phys, size_t nr,
+		unsigned long flags, int as_nr)
+{
+	int err;
+
+	err = kbase_mmu_insert_pages_no_flush(kbdev, mmut, vpfn,
+			phys, nr, flags);
+
+	if (mmut->kctx)
+		kbase_mmu_flush_invalidate(mmut->kctx, vpfn, nr, false);
+	else
+		kbase_mmu_flush_invalidate_no_ctx(kbdev, vpfn, nr, false, as_nr);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_insert_pages);
+
+/**
+ * kbase_mmu_flush_invalidate_noretain() - Flush and invalidate the GPU caches
+ * without retaining the kbase context.
+ * @kctx: The KBase context.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ *
+ * As per kbase_mmu_flush_invalidate but doesn't retain the kctx or do any
+ * other locking.
+ */
+static void kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx,
+		u64 vpfn, size_t nr, bool sync)
+{
+	struct kbase_device *kbdev = kctx->kbdev;
+	int err;
+	u32 op;
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return;
+
+	if (sync)
+		op = AS_COMMAND_FLUSH_MEM;
+	else
+		op = AS_COMMAND_FLUSH_PT;
+
+	err = kbase_mmu_hw_do_operation(kbdev,
+				&kbdev->as[kctx->as_nr],
+				vpfn, nr, op, 0);
+	if (err) {
+		/* Flush failed to complete, assume the
+		 * GPU has hung and perform a reset to
+		 * recover */
+		dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issuing GPU soft-reset to recover\n");
+
+		if (kbase_prepare_to_reset_gpu_locked(kbdev))
+			kbase_reset_gpu_locked(kbdev);
+	}
+
+#ifndef CONFIG_MALI_NO_MALI
+	/*
+	 * As this function could be called in interrupt context the sync
+	 * request can't block. Instead log the request and the next flush
+	 * request will pick it up.
+	 */
+	if ((!err) && sync &&
+			kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367))
+		atomic_set(&kctx->drain_pending, 1);
+#endif /* !CONFIG_MALI_NO_MALI */
+}
+
+/* Perform a flush/invalidate on a particular address space
+ */
+static void kbase_mmu_flush_invalidate_as(struct kbase_device *kbdev,
+		struct kbase_as *as,
+		u64 vpfn, size_t nr, bool sync, bool drain_pending)
+{
+	int err;
+	u32 op;
+
+	if (kbase_pm_context_active_handle_suspend(kbdev,
+				KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+		/* GPU is off so there's no need to perform flush/invalidate */
+		return;
+	}
+
+	/* AS transaction begin */
+	mutex_lock(&kbdev->mmu_hw_mutex);
+
+	if (sync)
+		op = AS_COMMAND_FLUSH_MEM;
+	else
+		op = AS_COMMAND_FLUSH_PT;
+
+	err = kbase_mmu_hw_do_operation(kbdev,
+			as, vpfn, nr, op, 0);
+
+	if (err) {
+		/* Flush failed to complete, assume the GPU has hung and
+		 * perform a reset to recover
+		 */
+		dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n");
+
+		if (kbase_prepare_to_reset_gpu(kbdev))
+			kbase_reset_gpu(kbdev);
+	}
+
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+	/* AS transaction end */
+
+#ifndef CONFIG_MALI_NO_MALI
+	/*
+	 * The transaction lock must be dropped before here
+	 * as kbase_wait_write_flush could take it if
+	 * the GPU was powered down (static analysis doesn't
+	 * know this can't happen).
+	 */
+	drain_pending |= (!err) && sync &&
+		kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367);
+	if (drain_pending) {
+		/* Wait for GPU to flush write buffer */
+		kbase_wait_write_flush(kbdev);
+	}
+#endif /* !CONFIG_MALI_NO_MALI */
+
+	kbase_pm_context_idle(kbdev);
+}
+
+static void kbase_mmu_flush_invalidate_no_ctx(struct kbase_device *kbdev,
+		u64 vpfn, size_t nr, bool sync, int as_nr)
+{
+	/* Skip if there is nothing to do */
+	if (nr) {
+		kbase_mmu_flush_invalidate_as(kbdev, &kbdev->as[as_nr], vpfn,
+					nr, sync, false);
+	}
+}
+
+static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
+		u64 vpfn, size_t nr, bool sync)
+{
+	struct kbase_device *kbdev;
+	bool ctx_is_in_runpool;
+	bool drain_pending = false;
+
+#ifndef CONFIG_MALI_NO_MALI
+	if (atomic_xchg(&kctx->drain_pending, 0))
+		drain_pending = true;
+#endif /* !CONFIG_MALI_NO_MALI */
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return;
+
+	kbdev = kctx->kbdev;
+	mutex_lock(&kbdev->js_data.queue_mutex);
+	ctx_is_in_runpool = kbasep_js_runpool_retain_ctx(kbdev, kctx);
+	mutex_unlock(&kbdev->js_data.queue_mutex);
+
+	if (ctx_is_in_runpool) {
+		KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+		kbase_mmu_flush_invalidate_as(kbdev, &kbdev->as[kctx->as_nr],
+				vpfn, nr, sync, drain_pending);
+
+		kbasep_js_runpool_release_ctx(kbdev, kctx);
+	}
+}
+
+void kbase_mmu_update(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		int as_nr)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	KBASE_DEBUG_ASSERT(as_nr != KBASEP_AS_NR_INVALID);
+
+	kbdev->mmu_mode->update(kbdev, mmut, as_nr);
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_update);
+
+void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+	kbdev->mmu_mode->disable_as(kbdev, as_nr);
+}
+
+void kbase_mmu_disable(struct kbase_context *kctx)
+{
+	/* ASSERT that the context has a valid as_nr, which is only the case
+	 * when it's scheduled in.
+	 *
+	 * as_nr won't change because the caller has the hwaccess_lock */
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
+
+	/*
+	 * The address space is being disabled, drain all knowledge of it out
+	 * from the caches as pages and page tables might be freed after this.
+	 *
+	 * The job scheduler code will already be holding the locks and context
+	 * so just do the flush.
+	 */
+	kbase_mmu_flush_invalidate_noretain(kctx, 0, ~0, true);
+
+	kctx->kbdev->mmu_mode->disable_as(kctx->kbdev, kctx->as_nr);
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_disable);
+
+/*
+ * We actually only discard the ATE, and not the page table
+ * pages. There is a potential DoS here, as we'll leak memory by
+ * having PTEs that are potentially unused.  Will require physical
+ * page accounting, so MMU pages are part of the process allocation.
+ *
+ * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
+ * currently scheduled into the runpool, and so potentially uses a lot of locks.
+ * These locks must be taken in the correct order with respect to others
+ * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
+ * information.
+ */
+int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
+	struct kbase_mmu_table *mmut, u64 vpfn, size_t nr, int as_nr)
+{
+	phys_addr_t pgd;
+	size_t requested_nr = nr;
+	struct kbase_mmu_mode const *mmu_mode;
+	int err = -EFAULT;
+
+	if (0 == nr) {
+		/* early out if nothing to do */
+		return 0;
+	}
+
+	mutex_lock(&mmut->mmu_lock);
+
+	mmu_mode = kbdev->mmu_mode;
+
+	while (nr) {
+		unsigned int i;
+		unsigned int index = vpfn & 0x1FF;
+		unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+		unsigned int pcount;
+		unsigned int level;
+		u64 *page;
+
+		if (count > nr)
+			count = nr;
+
+		/* need to check if this is a 2MB or a 4kB page */
+		pgd = mmut->pgd;
+
+		for (level = MIDGARD_MMU_TOPLEVEL;
+				level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
+			phys_addr_t next_pgd;
+
+			index = (vpfn >> ((3 - level) * 9)) & 0x1FF;
+			page = kmap(phys_to_page(pgd));
+			if (mmu_mode->ate_is_valid(page[index], level))
+				break; /* keep the mapping */
+			else if (!mmu_mode->pte_is_valid(page[index], level)) {
+				/* nothing here, advance */
+				switch (level) {
+				case MIDGARD_MMU_LEVEL(0):
+					count = 134217728;
+					break;
+				case MIDGARD_MMU_LEVEL(1):
+					count = 262144;
+					break;
+				case MIDGARD_MMU_LEVEL(2):
+					count = 512;
+					break;
+				case MIDGARD_MMU_LEVEL(3):
+					count = 1;
+					break;
+				}
+				if (count > nr)
+					count = nr;
+				goto next;
+			}
+			next_pgd = mmu_mode->pte_to_phy_addr(page[index]);
+			kunmap(phys_to_page(pgd));
+			pgd = next_pgd;
+		}
+
+		switch (level) {
+		case MIDGARD_MMU_LEVEL(0):
+		case MIDGARD_MMU_LEVEL(1):
+			dev_warn(kbdev->dev,
+				 "%s: No support for ATEs at level %d\n",
+				 __func__, level);
+			kunmap(phys_to_page(pgd));
+			goto out;
+		case MIDGARD_MMU_LEVEL(2):
+			/* can only teardown if count >= 512 */
+			if (count >= 512) {
+				pcount = 1;
+			} else {
+				dev_warn(kbdev->dev,
+					 "%s: limiting teardown as it tries to do a partial 2MB teardown, need 512, but have %d to tear down\n",
+					 __func__, count);
+				pcount = 0;
+			}
+			break;
+		case MIDGARD_MMU_BOTTOMLEVEL:
+			/* page count is the same as the logical count */
+			pcount = count;
+			break;
+		default:
+			dev_err(kbdev->dev,
+				"%s: found non-mapped memory, early out\n",
+				__func__);
+			vpfn += count;
+			nr -= count;
+			continue;
+		}
+
+		/* Invalidate the entries we added */
+		for (i = 0; i < pcount; i++)
+			mmu_mode->entry_invalidate(&page[index + i]);
+
+		kbase_mmu_sync_pgd(kbdev,
+				   kbase_dma_addr(phys_to_page(pgd)) +
+				   8 * index, 8*pcount);
+
+next:
+		kunmap(phys_to_page(pgd));
+		vpfn += count;
+		nr -= count;
+	}
+	err = 0;
+out:
+	mutex_unlock(&mmut->mmu_lock);
+
+	if (mmut->kctx)
+		kbase_mmu_flush_invalidate(mmut->kctx, vpfn, requested_nr, true);
+	else
+		kbase_mmu_flush_invalidate_no_ctx(kbdev, vpfn, requested_nr, true, as_nr);
+
+	return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages);
+
+/**
+ * kbase_mmu_update_pages_no_flush() - Update page table entries on the GPU
+ *
+ * This will update page table entries that already exist on the GPU based on
+ * the new flags that are passed. It is used as a response to the changes of
+ * the memory attributes
+ *
+ * The caller is responsible for validating the memory attributes
+ *
+ * @kctx:  Kbase context
+ * @vpfn:  Virtual PFN (Page Frame Number) of the first page to update
+ * @phys:  Tagged physical addresses of the physical pages to replace the
+ *         current mappings
+ * @nr:    Number of pages to update
+ * @flags: Flags
+ */
+static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
+					struct tagged_addr *phys, size_t nr,
+					unsigned long flags)
+{
+	phys_addr_t pgd;
+	u64 *pgd_page;
+	struct kbase_mmu_mode const *mmu_mode;
+	int err;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+
+	/* Early out if there is nothing to do */
+	if (nr == 0)
+		return 0;
+
+	mutex_lock(&kctx->mmu.mmu_lock);
+
+	mmu_mode = kctx->kbdev->mmu_mode;
+
+	while (nr) {
+		unsigned int i;
+		unsigned int index = vpfn & 0x1FF;
+		size_t count = KBASE_MMU_PAGE_ENTRIES - index;
+		struct page *p;
+
+		if (count > nr)
+			count = nr;
+
+		do {
+			err = mmu_get_bottom_pgd(kctx->kbdev, &kctx->mmu,
+					vpfn, &pgd);
+			if (err != -ENOMEM)
+				break;
+			/* Fill the memory pool with enough pages for
+			 * the page walk to succeed
+			 */
+			mutex_unlock(&kctx->mmu.mmu_lock);
+			err = kbase_mem_pool_grow(&kctx->kbdev->mem_pool,
+					MIDGARD_MMU_BOTTOMLEVEL);
+			mutex_lock(&kctx->mmu.mmu_lock);
+		} while (!err);
+		if (err) {
+			dev_warn(kctx->kbdev->dev,
+				 "mmu_get_bottom_pgd failure\n");
+			goto fail_unlock;
+		}
+
+		p = pfn_to_page(PFN_DOWN(pgd));
+		pgd_page = kmap(p);
+		if (!pgd_page) {
+			dev_warn(kctx->kbdev->dev, "kmap failure\n");
+			err = -ENOMEM;
+			goto fail_unlock;
+		}
+
+		for (i = 0; i < count; i++)
+			mmu_mode->entry_set_ate(&pgd_page[index + i], phys[i],
+						flags, MIDGARD_MMU_BOTTOMLEVEL);
+
+		phys += count;
+		vpfn += count;
+		nr -= count;
+
+		kbase_mmu_sync_pgd(kctx->kbdev,
+				kbase_dma_addr(p) + (index * sizeof(u64)),
+				count * sizeof(u64));
+
+		kunmap(pfn_to_page(PFN_DOWN(pgd)));
+	}
+
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	return 0;
+
+fail_unlock:
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	return err;
+}
+
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
+			   struct tagged_addr *phys, size_t nr,
+			   unsigned long flags)
+{
+	int err;
+
+	err = kbase_mmu_update_pages_no_flush(kctx, vpfn, phys, nr, flags);
+	kbase_mmu_flush_invalidate(kctx, vpfn, nr, true);
+	return err;
+}
+
+static void mmu_teardown_level(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut, phys_addr_t pgd,
+		int level, u64 *pgd_page_buffer)
+{
+	phys_addr_t target_pgd;
+	struct page *p;
+	u64 *pgd_page;
+	int i;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	lockdep_assert_held(&mmut->mmu_lock);
+
+	pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
+	/* kmap_atomic should NEVER fail. */
+	KBASE_DEBUG_ASSERT(NULL != pgd_page);
+	/* Copy the page to our preallocated buffer so that we can minimize
+	 * kmap_atomic usage */
+	memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE);
+	kunmap_atomic(pgd_page);
+	pgd_page = pgd_page_buffer;
+
+	mmu_mode = kbdev->mmu_mode;
+
+	for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
+		target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]);
+
+		if (target_pgd) {
+			if (mmu_mode->pte_is_valid(pgd_page[i], level)) {
+				mmu_teardown_level(kbdev, mmut,
+						   target_pgd,
+						   level + 1,
+						   pgd_page_buffer +
+						   (PAGE_SIZE / sizeof(u64)));
+			}
+		}
+	}
+
+	p = pfn_to_page(PFN_DOWN(pgd));
+	kbase_mem_pool_free(&kbdev->mem_pool, p, true);
+	kbase_atomic_sub_pages(1, &kbdev->memdev.used_pages);
+
+	/* If MMU tables belong to a context then pages will have been accounted
+	 * against it, so we must decrement the usage counts here.
+	 */
+	if (mmut->kctx) {
+		kbase_process_page_usage_dec(mmut->kctx, 1);
+		kbase_atomic_sub_pages(1, &mmut->kctx->used_pages);
+	}
+}
+
+int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+		struct kbase_context *kctx)
+{
+	mutex_init(&mmut->mmu_lock);
+	mmut->kctx = kctx;
+
+	/* Preallocate MMU depth of four pages for mmu_teardown_level to use */
+	mmut->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+
+	if (mmut->mmu_teardown_pages == NULL)
+		return -ENOMEM;
+
+	mmut->pgd = 0;
+	/* We allocate pages into the kbdev memory pool, then
+	 * kbase_mmu_alloc_pgd will allocate out of that pool. This is done to
+	 * avoid allocations from the kernel happening with the lock held.
+	 */
+	while (!mmut->pgd) {
+		int err;
+
+		err = kbase_mem_pool_grow(&kbdev->mem_pool,
+				MIDGARD_MMU_BOTTOMLEVEL);
+		if (err) {
+			kbase_mmu_term(kbdev, mmut);
+			return -ENOMEM;
+		}
+
+		mutex_lock(&mmut->mmu_lock);
+		mmut->pgd = kbase_mmu_alloc_pgd(kbdev, mmut);
+		mutex_unlock(&mmut->mmu_lock);
+	}
+
+	return 0;
+}
+
+void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut)
+{
+	if (mmut->pgd) {
+		mutex_lock(&mmut->mmu_lock);
+		mmu_teardown_level(kbdev, mmut, mmut->pgd, MIDGARD_MMU_TOPLEVEL,
+				mmut->mmu_teardown_pages);
+		mutex_unlock(&mmut->mmu_lock);
+
+		if (mmut->kctx)
+			KBASE_TLSTREAM_AUX_PAGESALLOC(mmut->kctx->id, 0);
+	}
+
+	kfree(mmut->mmu_teardown_pages);
+	mutex_destroy(&mmut->mmu_lock);
+}
+
+static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left)
+{
+	phys_addr_t target_pgd;
+	u64 *pgd_page;
+	int i;
+	size_t size = KBASE_MMU_PAGE_ENTRIES * sizeof(u64) + sizeof(u64);
+	size_t dump_size;
+	struct kbase_mmu_mode const *mmu_mode;
+
+	KBASE_DEBUG_ASSERT(NULL != kctx);
+	lockdep_assert_held(&kctx->mmu.mmu_lock);
+
+	mmu_mode = kctx->kbdev->mmu_mode;
+
+	pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+	if (!pgd_page) {
+		dev_warn(kctx->kbdev->dev, "kbasep_mmu_dump_level: kmap failure\n");
+		return 0;
+	}
+
+	if (*size_left >= size) {
+		/* A modified physical address that contains the page table level */
+		u64 m_pgd = pgd | level;
+
+		/* Put the modified physical address in the output buffer */
+		memcpy(*buffer, &m_pgd, sizeof(m_pgd));
+		*buffer += sizeof(m_pgd);
+
+		/* Followed by the page table itself */
+		memcpy(*buffer, pgd_page, sizeof(u64) * KBASE_MMU_PAGE_ENTRIES);
+		*buffer += sizeof(u64) * KBASE_MMU_PAGE_ENTRIES;
+
+		*size_left -= size;
+	}
+
+	if (level < MIDGARD_MMU_BOTTOMLEVEL) {
+		for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
+			if (mmu_mode->pte_is_valid(pgd_page[i], level)) {
+				target_pgd = mmu_mode->pte_to_phy_addr(
+						pgd_page[i]);
+
+				dump_size = kbasep_mmu_dump_level(kctx,
+						target_pgd, level + 1,
+						buffer, size_left);
+				if (!dump_size) {
+					kunmap(pfn_to_page(PFN_DOWN(pgd)));
+					return 0;
+				}
+				size += dump_size;
+			}
+		}
+	}
+
+	kunmap(pfn_to_page(PFN_DOWN(pgd)));
+
+	return size;
+}
+
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
+{
+	void *kaddr;
+	size_t size_left;
+
+	KBASE_DEBUG_ASSERT(kctx);
+
+	if (0 == nr_pages) {
+		/* can't dump in a 0 sized buffer, early out */
+		return NULL;
+	}
+
+	size_left = nr_pages * PAGE_SIZE;
+
+	KBASE_DEBUG_ASSERT(0 != size_left);
+	kaddr = vmalloc_user(size_left);
+
+	mutex_lock(&kctx->mmu.mmu_lock);
+
+	if (kaddr) {
+		u64 end_marker = 0xFFULL;
+		char *buffer;
+		char *mmu_dump_buffer;
+		u64 config[3];
+		size_t dump_size, size = 0;
+
+		buffer = (char *)kaddr;
+		mmu_dump_buffer = buffer;
+
+		if (kctx->api_version >= KBASE_API_VERSION(8, 4)) {
+			struct kbase_mmu_setup as_setup;
+
+			kctx->kbdev->mmu_mode->get_as_setup(&kctx->mmu,
+					&as_setup);
+			config[0] = as_setup.transtab;
+			config[1] = as_setup.memattr;
+			config[2] = as_setup.transcfg;
+			memcpy(buffer, &config, sizeof(config));
+			mmu_dump_buffer += sizeof(config);
+			size_left -= sizeof(config);
+			size += sizeof(config);
+		}
+
+		dump_size = kbasep_mmu_dump_level(kctx,
+				kctx->mmu.pgd,
+				MIDGARD_MMU_TOPLEVEL,
+				&mmu_dump_buffer,
+				&size_left);
+
+		if (!dump_size)
+			goto fail_free;
+
+		size += dump_size;
+
+		/* Add on the size for the end marker */
+		size += sizeof(u64);
+
+		if (size > (nr_pages * PAGE_SIZE)) {
+			/* The buffer isn't big enough - free the memory and return failure */
+			goto fail_free;
+		}
+
+		/* Add the end marker */
+		memcpy(mmu_dump_buffer, &end_marker, sizeof(u64));
+	}
+
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	return kaddr;
+
+fail_free:
+	vfree(kaddr);
+	mutex_unlock(&kctx->mmu.mmu_lock);
+	return NULL;
+}
+KBASE_EXPORT_TEST_API(kbase_mmu_dump);
+
+void bus_fault_worker(struct work_struct *data)
+{
+	struct kbase_as *faulting_as;
+	int as_no;
+	struct kbase_context *kctx;
+	struct kbase_device *kbdev;
+	struct kbase_fault *fault;
+	bool reset_status = false;
+
+	faulting_as = container_of(data, struct kbase_as, work_busfault);
+	fault = &faulting_as->bf_data;
+
+	/* Ensure that any pending page fault worker has completed */
+	flush_work(&faulting_as->work_pagefault);
+
+	as_no = faulting_as->number;
+
+	kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
+
+	/* Grab the context that was already refcounted in kbase_mmu_interrupt().
+	 * Therefore, it cannot be scheduled out of this AS until we explicitly release it
+	 */
+	kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
+	if (WARN_ON(!kctx)) {
+		atomic_dec(&kbdev->faults_pending);
+		return;
+	}
+
+	if (unlikely(fault->protected_mode)) {
+		kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+				"Permission failure", fault);
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+				KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+		kbasep_js_runpool_release_ctx(kbdev, kctx);
+		atomic_dec(&kbdev->faults_pending);
+		return;
+
+	}
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+		/* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
+		 * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
+		 * are evicted from the GPU before the switch.
+		 */
+		dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
+		reset_status = kbase_prepare_to_reset_gpu(kbdev);
+	}
+	/* NOTE: If GPU already powered off for suspend, we don't need to switch to unmapped */
+	if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+		unsigned long flags;
+
+		/* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
+		/* AS transaction begin */
+		mutex_lock(&kbdev->mmu_hw_mutex);
+
+		/* Set the MMU into unmapped mode */
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		kbase_mmu_disable(kctx);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		mutex_unlock(&kbdev->mmu_hw_mutex);
+		/* AS transaction end */
+
+		kbase_mmu_hw_clear_fault(kbdev, faulting_as,
+					 KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+		kbase_mmu_hw_enable_fault(kbdev, faulting_as,
+					 KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+
+		kbase_pm_context_idle(kbdev);
+	}
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
+		kbase_reset_gpu(kbdev);
+
+	kbasep_js_runpool_release_ctx(kbdev, kctx);
+
+	atomic_dec(&kbdev->faults_pending);
+}
+
+const char *kbase_exception_name(struct kbase_device *kbdev, u32 exception_code)
+{
+	const char *e;
+
+	switch (exception_code) {
+		/* Non-Fault Status code */
+	case 0x00:
+		e = "NOT_STARTED/IDLE/OK";
+		break;
+	case 0x01:
+		e = "DONE";
+		break;
+	case 0x02:
+		e = "INTERRUPTED";
+		break;
+	case 0x03:
+		e = "STOPPED";
+		break;
+	case 0x04:
+		e = "TERMINATED";
+		break;
+	case 0x08:
+		e = "ACTIVE";
+		break;
+		/* Job exceptions */
+	case 0x40:
+		e = "JOB_CONFIG_FAULT";
+		break;
+	case 0x41:
+		e = "JOB_POWER_FAULT";
+		break;
+	case 0x42:
+		e = "JOB_READ_FAULT";
+		break;
+	case 0x43:
+		e = "JOB_WRITE_FAULT";
+		break;
+	case 0x44:
+		e = "JOB_AFFINITY_FAULT";
+		break;
+	case 0x48:
+		e = "JOB_BUS_FAULT";
+		break;
+	case 0x50:
+		e = "INSTR_INVALID_PC";
+		break;
+	case 0x51:
+		e = "INSTR_INVALID_ENC";
+		break;
+	case 0x52:
+		e = "INSTR_TYPE_MISMATCH";
+		break;
+	case 0x53:
+		e = "INSTR_OPERAND_FAULT";
+		break;
+	case 0x54:
+		e = "INSTR_TLS_FAULT";
+		break;
+	case 0x55:
+		e = "INSTR_BARRIER_FAULT";
+		break;
+	case 0x56:
+		e = "INSTR_ALIGN_FAULT";
+		break;
+	case 0x58:
+		e = "DATA_INVALID_FAULT";
+		break;
+	case 0x59:
+		e = "TILE_RANGE_FAULT";
+		break;
+	case 0x5A:
+		e = "ADDR_RANGE_FAULT";
+		break;
+	case 0x60:
+		e = "OUT_OF_MEMORY";
+		break;
+		/* GPU exceptions */
+	case 0x80:
+		e = "DELAYED_BUS_FAULT";
+		break;
+	case 0x88:
+		e = "SHAREABILITY_FAULT";
+		break;
+		/* MMU exceptions */
+	case 0xC0:
+	case 0xC1:
+	case 0xC2:
+	case 0xC3:
+	case 0xC4:
+	case 0xC5:
+	case 0xC6:
+	case 0xC7:
+		e = "TRANSLATION_FAULT";
+		break;
+	case 0xC8:
+		e = "PERMISSION_FAULT";
+		break;
+	case 0xC9:
+	case 0xCA:
+	case 0xCB:
+	case 0xCC:
+	case 0xCD:
+	case 0xCE:
+	case 0xCF:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			e = "PERMISSION_FAULT";
+		else
+			e = "UNKNOWN";
+		break;
+	case 0xD0:
+	case 0xD1:
+	case 0xD2:
+	case 0xD3:
+	case 0xD4:
+	case 0xD5:
+	case 0xD6:
+	case 0xD7:
+		e = "TRANSTAB_BUS_FAULT";
+		break;
+	case 0xD8:
+		e = "ACCESS_FLAG";
+		break;
+	case 0xD9:
+	case 0xDA:
+	case 0xDB:
+	case 0xDC:
+	case 0xDD:
+	case 0xDE:
+	case 0xDF:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			e = "ACCESS_FLAG";
+		else
+			e = "UNKNOWN";
+		break;
+	case 0xE0:
+	case 0xE1:
+	case 0xE2:
+	case 0xE3:
+	case 0xE4:
+	case 0xE5:
+	case 0xE6:
+	case 0xE7:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			e = "ADDRESS_SIZE_FAULT";
+		else
+			e = "UNKNOWN";
+		break;
+	case 0xE8:
+	case 0xE9:
+	case 0xEA:
+	case 0xEB:
+	case 0xEC:
+	case 0xED:
+	case 0xEE:
+	case 0xEF:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			e = "MEMORY_ATTRIBUTES_FAULT";
+		else
+			e = "UNKNOWN";
+		break;
+	default:
+		e = "UNKNOWN";
+		break;
+	};
+
+	return e;
+}
+
+static const char *access_type_name(struct kbase_device *kbdev,
+		u32 fault_status)
+{
+	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			return "ATOMIC";
+		else
+			return "UNKNOWN";
+	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+		return "READ";
+	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+		return "WRITE";
+	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+		return "EXECUTE";
+	default:
+		WARN_ON(1);
+		return NULL;
+	}
+}
+
+/**
+ * The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on.
+ */
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+		struct kbase_as *as, const char *reason_str,
+		struct kbase_fault *fault)
+{
+	unsigned long flags;
+	int exception_type;
+	int access_type;
+	int source_id;
+	int as_no;
+	struct kbase_device *kbdev;
+	struct kbasep_js_device_data *js_devdata;
+
+	bool reset_status = false;
+
+	as_no = as->number;
+	kbdev = kctx->kbdev;
+	js_devdata = &kbdev->js_data;
+
+	/* ASSERT that the context won't leave the runpool */
+	KBASE_DEBUG_ASSERT(atomic_read(&kctx->refcount) > 0);
+
+	/* decode the fault status */
+	exception_type = fault->status & 0xFF;
+	access_type = (fault->status >> 8) & 0x3;
+	source_id = (fault->status >> 16);
+
+	/* terminal fault, print info about the fault */
+	dev_err(kbdev->dev,
+		"Unhandled Page fault in AS%d at VA 0x%016llX\n"
+		"Reason: %s\n"
+		"raw fault status: 0x%X\n"
+		"decoded fault status: %s\n"
+		"exception type 0x%X: %s\n"
+		"access type 0x%X: %s\n"
+		"source id 0x%X\n"
+		"pid: %d\n",
+		as_no, fault->addr,
+		reason_str,
+		fault->status,
+		(fault->status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+		exception_type, kbase_exception_name(kbdev, exception_type),
+		access_type, access_type_name(kbdev, fault->status),
+		source_id,
+		kctx->pid);
+
+	/* hardware counters dump fault handling */
+	if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) &&
+			(kbdev->hwcnt.backend.state ==
+						KBASE_INSTR_STATE_DUMPING)) {
+		if ((fault->addr >= kbdev->hwcnt.addr) &&
+				(fault->addr < (kbdev->hwcnt.addr +
+					kbdev->hwcnt.addr_bytes)))
+			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
+	}
+
+	/* Stop the kctx from submitting more jobs and cause it to be scheduled
+	 * out/rescheduled - this will occur on releasing the context's refcount */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbasep_js_clear_submit_allowed(js_devdata, kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* Kill any running jobs from the context. Submit is disallowed, so no more jobs from this
+	 * context can appear in the job slots from this point on */
+	kbase_backend_jm_kill_jobs_from_kctx(kctx);
+	/* AS transaction begin */
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+		/* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
+		 * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
+		 * are evicted from the GPU before the switch.
+		 */
+		dev_err(kbdev->dev, "Unhandled page fault. For this GPU version we now soft-reset the GPU as part of page fault recovery.");
+		reset_status = kbase_prepare_to_reset_gpu(kbdev);
+	}
+	/* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_mmu_disable(kctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+	/* AS transaction end */
+	/* Clear down the fault */
+	kbase_mmu_hw_clear_fault(kbdev, as,
+			KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+	kbase_mmu_hw_enable_fault(kbdev, as,
+			KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
+		kbase_reset_gpu(kbdev);
+}
+
+void kbasep_as_do_poke(struct work_struct *work)
+{
+	struct kbase_as *as;
+	struct kbase_device *kbdev;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(work);
+	as = container_of(work, struct kbase_as, poke_work);
+	kbdev = container_of(as, struct kbase_device, as[as->number]);
+	KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+	/* GPU power will already be active by virtue of the caller holding a JS
+	 * reference on the address space, and will not release it until this worker
+	 * has finished */
+
+	/* Further to the comment above, we know that while this function is running
+	 * the AS will not be released as before the atom is released this workqueue
+	 * is flushed (in kbase_as_poking_timer_release_atom)
+	 */
+
+	/* AS transaction begin */
+	mutex_lock(&kbdev->mmu_hw_mutex);
+	/* Force a uTLB invalidate */
+	kbase_mmu_hw_do_operation(kbdev, as, 0, 0,
+				  AS_COMMAND_UNLOCK, 0);
+	mutex_unlock(&kbdev->mmu_hw_mutex);
+	/* AS transaction end */
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	if (as->poke_refcount &&
+		!(as->poke_state & KBASE_AS_POKE_STATE_KILLING_POKE)) {
+		/* Only queue up the timer if we need it, and we're not trying to kill it */
+		hrtimer_start(&as->poke_timer, HR_TIMER_DELAY_MSEC(5), HRTIMER_MODE_REL);
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
+enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
+{
+	struct kbase_as *as;
+	int queue_work_ret;
+
+	KBASE_DEBUG_ASSERT(NULL != timer);
+	as = container_of(timer, struct kbase_as, poke_timer);
+	KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+	queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
+	KBASE_DEBUG_ASSERT(queue_work_ret);
+	return HRTIMER_NORESTART;
+}
+
+/**
+ * Retain the poking timer on an atom's context (if the atom hasn't already
+ * done so), and start the timer (if it's not already started).
+ *
+ * This must only be called on a context that's scheduled in, and an atom
+ * that's running on the GPU.
+ *
+ * The caller must hold hwaccess_lock
+ *
+ * This can be called safely from atomic context
+ */
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	struct kbase_as *as;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (katom->poking)
+		return;
+
+	katom->poking = 1;
+
+	/* It's safe to work on the as/as_nr without an explicit reference,
+	 * because the caller holds the hwaccess_lock, and the atom itself
+	 * was also running and had already taken a reference  */
+	as = &kbdev->as[kctx->as_nr];
+
+	if (++(as->poke_refcount) == 1) {
+		/* First refcount for poke needed: check if not already in flight */
+		if (!as->poke_state) {
+			/* need to start poking */
+			as->poke_state |= KBASE_AS_POKE_STATE_IN_FLIGHT;
+			queue_work(as->poke_wq, &as->poke_work);
+		}
+	}
+}
+
+/**
+ * If an atom holds a poking timer, release it and wait for it to finish
+ *
+ * This must only be called on a context that's scheduled in, and an atom
+ * that still has a JS reference on the context
+ *
+ * This must \b not be called from atomic context, since it can sleep.
+ */
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
+{
+	struct kbase_as *as;
+	unsigned long flags;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+	KBASE_DEBUG_ASSERT(kctx);
+	KBASE_DEBUG_ASSERT(katom);
+	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+
+	if (!katom->poking)
+		return;
+
+	as = &kbdev->as[kctx->as_nr];
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	KBASE_DEBUG_ASSERT(as->poke_refcount > 0);
+	KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
+
+	if (--(as->poke_refcount) == 0) {
+		as->poke_state |= KBASE_AS_POKE_STATE_KILLING_POKE;
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		hrtimer_cancel(&as->poke_timer);
+		flush_workqueue(as->poke_wq);
+
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+		/* Re-check whether it's still needed */
+		if (as->poke_refcount) {
+			int queue_work_ret;
+			/* Poking still needed:
+			 * - Another retain will not be starting the timer or queueing work,
+			 * because it's still marked as in-flight
+			 * - The hrtimer has finished, and has not started a new timer or
+			 * queued work because it's been marked as killing
+			 *
+			 * So whatever happens now, just queue the work again */
+			as->poke_state &= ~((kbase_as_poke_state)KBASE_AS_POKE_STATE_KILLING_POKE);
+			queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
+			KBASE_DEBUG_ASSERT(queue_work_ret);
+		} else {
+			/* It isn't - so mark it as not in flight, and not killing */
+			as->poke_state = 0u;
+
+			/* The poke associated with the atom has now finished. If this is
+			 * also the last atom on the context, then we can guarentee no more
+			 * pokes (and thus no more poking register accesses) will occur on
+			 * the context until new atoms are run */
+		}
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	katom->poking = 0;
+}
+
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
+		struct kbase_context *kctx, struct kbase_as *as,
+		struct kbase_fault *fault)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!kctx) {
+		dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
+				kbase_as_has_bus_fault(as) ?
+						"Bus error" : "Page fault",
+				as->number, fault->addr);
+
+		/* Since no ctx was found, the MMU must be disabled. */
+		WARN_ON(as->current_setup.transtab);
+
+		if (kbase_as_has_bus_fault(as)) {
+			kbase_mmu_hw_clear_fault(kbdev, as,
+					KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+			kbase_mmu_hw_enable_fault(kbdev, as,
+					KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+		} else if (kbase_as_has_page_fault(as)) {
+			kbase_mmu_hw_clear_fault(kbdev, as,
+					KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+			kbase_mmu_hw_enable_fault(kbdev, as,
+					KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+		}
+
+		if (kbase_as_has_bus_fault(as) &&
+				kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
+			bool reset_status;
+			/*
+			 * Reset the GPU, like in bus_fault_worker, in case an
+			 * earlier error hasn't been properly cleared by this
+			 * point.
+			 */
+			dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
+			reset_status = kbase_prepare_to_reset_gpu_locked(kbdev);
+			if (reset_status)
+				kbase_reset_gpu_locked(kbdev);
+		}
+
+		return;
+	}
+
+	if (kbase_as_has_bus_fault(as)) {
+		/*
+		 * hw counters dumping in progress, signal the
+		 * other thread that it failed
+		 */
+		if ((kbdev->hwcnt.kctx == kctx) &&
+		    (kbdev->hwcnt.backend.state ==
+					KBASE_INSTR_STATE_DUMPING))
+			kbdev->hwcnt.backend.state =
+						KBASE_INSTR_STATE_FAULT;
+
+		/*
+		 * Stop the kctx from submitting more jobs and cause it
+		 * to be scheduled out/rescheduled when all references
+		 * to it are released
+		 */
+		kbasep_js_clear_submit_allowed(js_devdata, kctx);
+
+		if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU))
+			dev_warn(kbdev->dev,
+					"Bus error in AS%d at VA=0x%016llx, IPA=0x%016llx\n",
+					as->number, fault->addr,
+					fault->extra_addr);
+		else
+			dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n",
+					as->number, fault->addr);
+
+		/*
+		 * We need to switch to UNMAPPED mode - but we do this in a
+		 * worker so that we can sleep
+		 */
+		WARN_ON(!queue_work(as->pf_wq, &as->work_busfault));
+		atomic_inc(&kbdev->faults_pending);
+	} else {
+		WARN_ON(!queue_work(as->pf_wq, &as->work_pagefault));
+		atomic_inc(&kbdev->faults_pending);
+	}
+}
+
+void kbase_flush_mmu_wqs(struct kbase_device *kbdev)
+{
+	int i;
+
+	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+		struct kbase_as *as = &kbdev->as[i];
+
+		flush_workqueue(as->pf_wq);
+	}
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h
new file mode 100644
index 0000000..70d5f2b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h
@@ -0,0 +1,124 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file
+ * Interface file for accessing MMU hardware functionality
+ */
+
+/**
+ * @page mali_kbase_mmu_hw_page MMU hardware interface
+ *
+ * @section mali_kbase_mmu_hw_intro_sec Introduction
+ * This module provides an abstraction for accessing the functionality provided
+ * by the midgard MMU and thus allows all MMU HW access to be contained within
+ * one common place and allows for different backends (implementations) to
+ * be provided.
+ */
+
+#ifndef _MALI_KBASE_MMU_HW_H_
+#define _MALI_KBASE_MMU_HW_H_
+
+/* Forward declarations */
+struct kbase_device;
+struct kbase_as;
+struct kbase_context;
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup mali_kbase_mmu_hw  MMU access APIs
+ * @{
+ */
+
+/** @brief MMU fault type descriptor.
+ */
+enum kbase_mmu_fault_type {
+	KBASE_MMU_FAULT_TYPE_UNKNOWN = 0,
+	KBASE_MMU_FAULT_TYPE_PAGE,
+	KBASE_MMU_FAULT_TYPE_BUS,
+	KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED,
+	KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED
+};
+
+/** @brief Configure an address space for use.
+ *
+ * Configure the MMU using the address space details setup in the
+ * @ref kbase_context structure.
+ *
+ * @param[in]  kbdev          kbase device to configure.
+ * @param[in]  as             address space to configure.
+ */
+void kbase_mmu_hw_configure(struct kbase_device *kbdev,
+		struct kbase_as *as);
+
+/** @brief Issue an operation to the MMU.
+ *
+ * Issue an operation (MMU invalidate, MMU flush, etc) on the address space that
+ * is associated with the provided @ref kbase_context over the specified range
+ *
+ * @param[in]  kbdev         kbase device to issue the MMU operation on.
+ * @param[in]  as            address space to issue the MMU operation on.
+ * @param[in]  vpfn          MMU Virtual Page Frame Number to start the
+ *                           operation on.
+ * @param[in]  nr            Number of pages to work on.
+ * @param[in]  type          Operation type (written to ASn_COMMAND).
+ * @param[in]  handling_irq  Is this operation being called during the handling
+ *                           of an interrupt?
+ *
+ * @return Zero if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+		u64 vpfn, u32 nr, u32 type,
+		unsigned int handling_irq);
+
+/** @brief Clear a fault that has been previously reported by the MMU.
+ *
+ * Clear a bus error or page fault that has been reported by the MMU.
+ *
+ * @param[in]  kbdev         kbase device to  clear the fault from.
+ * @param[in]  as            address space to  clear the fault from.
+ * @param[in]  type          The type of fault that needs to be cleared.
+ */
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+		enum kbase_mmu_fault_type type);
+
+/** @brief Enable fault that has been previously reported by the MMU.
+ *
+ * After a page fault or bus error has been reported by the MMU these
+ * will be disabled. After these are handled this function needs to be
+ * called to enable the page fault or bus error fault again.
+ *
+ * @param[in]  kbdev         kbase device to again enable the fault from.
+ * @param[in]  as            address space to again enable the fault from.
+ * @param[in]  type          The type of fault that needs to be enabled again.
+ */
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+		enum kbase_mmu_fault_type type);
+
+/** @} *//* end group mali_kbase_mmu_hw */
+/** @} *//* end group base_kbase_api */
+
+#endif	/* _MALI_KBASE_MMU_HW_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c
new file mode 100644
index 0000000..38ca456
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c
@@ -0,0 +1,223 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2014, 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#include "mali_kbase.h"
+#include "mali_midg_regmap.h"
+#include "mali_kbase_defs.h"
+
+#define ENTRY_TYPE_MASK     3ULL
+/* For valid ATEs bit 1 = ((level == 3) ? 1 : 0).
+ * Valid ATE entries at level 3 are flagged with the value 3.
+ * Valid ATE entries at level 0-2 are flagged with the value 1.
+ */
+#define ENTRY_IS_ATE_L3		3ULL
+#define ENTRY_IS_ATE_L02	1ULL
+#define ENTRY_IS_INVAL		2ULL
+#define ENTRY_IS_PTE		3ULL
+
+#define ENTRY_ATTR_BITS (7ULL << 2)	/* bits 4:2 */
+#define ENTRY_ACCESS_RW (1ULL << 6)     /* bits 6:7 */
+#define ENTRY_ACCESS_RO (3ULL << 6)
+#define ENTRY_SHARE_BITS (3ULL << 8)	/* bits 9:8 */
+#define ENTRY_ACCESS_BIT (1ULL << 10)
+#define ENTRY_NX_BIT (1ULL << 54)
+
+/* Helper Function to perform assignment of page table entries, to
+ * ensure the use of strd, which is required on LPAE systems.
+ */
+static inline void page_table_entry_set(u64 *pte, u64 phy)
+{
+#if KERNEL_VERSION(3, 18, 13) <= LINUX_VERSION_CODE
+	WRITE_ONCE(*pte, phy);
+#else
+#ifdef CONFIG_64BIT
+	barrier();
+	*pte = phy;
+	barrier();
+#elif defined(CONFIG_ARM)
+	barrier();
+	asm volatile("ldrd r0, [%1]\n\t"
+		     "strd r0, %0\n\t"
+		     : "=m" (*pte)
+		     : "r" (&phy)
+		     : "r0", "r1");
+	barrier();
+#else
+#error "64-bit atomic write must be implemented for your architecture"
+#endif
+#endif
+}
+
+static void mmu_get_as_setup(struct kbase_mmu_table *mmut,
+		struct kbase_mmu_setup * const setup)
+{
+	/* Set up the required caching policies at the correct indices
+	 * in the memattr register.
+	 */
+	setup->memattr =
+		(AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
+			(AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+		(AS_MEMATTR_FORCE_TO_CACHE_ALL    <<
+			(AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+		(AS_MEMATTR_WRITE_ALLOC           <<
+			(AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+		(AS_MEMATTR_AARCH64_OUTER_IMPL_DEF   <<
+			(AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
+		(AS_MEMATTR_AARCH64_OUTER_WA         <<
+			(AS_MEMATTR_INDEX_OUTER_WA * 8)) |
+		(AS_MEMATTR_AARCH64_NON_CACHEABLE    <<
+			(AS_MEMATTR_INDEX_NON_CACHEABLE * 8));
+
+	setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK;
+	setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
+}
+
+static void mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+		int as_nr)
+{
+	struct kbase_as *as;
+	struct kbase_mmu_setup *current_setup;
+
+	if (WARN_ON(as_nr == KBASEP_AS_NR_INVALID))
+		return;
+
+	as = &kbdev->as[as_nr];
+	current_setup = &as->current_setup;
+
+	mmu_get_as_setup(mmut, current_setup);
+
+	/* Apply the address space setting */
+	kbase_mmu_hw_configure(kbdev, as);
+}
+
+static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+	struct kbase_as * const as = &kbdev->as[as_nr];
+	struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+	current_setup->transtab = 0ULL;
+	current_setup->transcfg = AS_TRANSCFG_ADRMODE_UNMAPPED;
+
+	/* Apply the address space setting */
+	kbase_mmu_hw_configure(kbdev, as);
+}
+
+static phys_addr_t pte_to_phy_addr(u64 entry)
+{
+	if (!(entry & 1))
+		return 0;
+
+	return entry & ~0xFFF;
+}
+
+static int ate_is_valid(u64 ate, unsigned int level)
+{
+	if (level == MIDGARD_MMU_BOTTOMLEVEL)
+		return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE_L3);
+	else
+		return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE_L02);
+}
+
+static int pte_is_valid(u64 pte, unsigned int level)
+{
+	/* PTEs cannot exist at the bottom level */
+	if (level == MIDGARD_MMU_BOTTOMLEVEL)
+		return false;
+	return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE);
+}
+
+/*
+ * Map KBASE_REG flags to MMU flags
+ */
+static u64 get_mmu_flags(unsigned long flags)
+{
+	u64 mmu_flags;
+
+	/* store mem_attr index as 4:2 (macro called ensures 3 bits already) */
+	mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2;
+
+	/* Set access flags - note that AArch64 stage 1 does not support
+	 * write-only access, so we use read/write instead
+	 */
+	if (flags & KBASE_REG_GPU_WR)
+		mmu_flags |= ENTRY_ACCESS_RW;
+	else if (flags & KBASE_REG_GPU_RD)
+		mmu_flags |= ENTRY_ACCESS_RO;
+
+	/* nx if requested */
+	mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0;
+
+	if (flags & KBASE_REG_SHARE_BOTH) {
+		/* inner and outer shareable */
+		mmu_flags |= SHARE_BOTH_BITS;
+	} else if (flags & KBASE_REG_SHARE_IN) {
+		/* inner shareable coherency */
+		mmu_flags |= SHARE_INNER_BITS;
+	}
+
+	return mmu_flags;
+}
+
+static void entry_set_ate(u64 *entry,
+		struct tagged_addr phy,
+		unsigned long flags,
+		unsigned int level)
+{
+	if (level == MIDGARD_MMU_BOTTOMLEVEL)
+		page_table_entry_set(entry, as_phys_addr_t(phy) |
+				get_mmu_flags(flags) |
+				ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L3);
+	else
+		page_table_entry_set(entry, as_phys_addr_t(phy) |
+				get_mmu_flags(flags) |
+				ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L02);
+}
+
+static void entry_set_pte(u64 *entry, phys_addr_t phy)
+{
+	page_table_entry_set(entry, (phy & PAGE_MASK) |
+			ENTRY_ACCESS_BIT | ENTRY_IS_PTE);
+}
+
+static void entry_invalidate(u64 *entry)
+{
+	page_table_entry_set(entry, ENTRY_IS_INVAL);
+}
+
+static struct kbase_mmu_mode const aarch64_mode = {
+	.update = mmu_update,
+	.get_as_setup = mmu_get_as_setup,
+	.disable_as = mmu_disable_as,
+	.pte_to_phy_addr = pte_to_phy_addr,
+	.ate_is_valid = ate_is_valid,
+	.pte_is_valid = pte_is_valid,
+	.entry_set_ate = entry_set_ate,
+	.entry_set_pte = entry_set_pte,
+	.entry_invalidate = entry_invalidate,
+	.flags = KBASE_MMU_MODE_HAS_NON_CACHEABLE
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void)
+{
+	return &aarch64_mode;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c
new file mode 100644
index 0000000..f6bdf91
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c
@@ -0,0 +1,214 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#include "mali_kbase.h"
+#include "mali_midg_regmap.h"
+#include "mali_kbase_defs.h"
+
+#define ENTRY_TYPE_MASK     3ULL
+#define ENTRY_IS_ATE        1ULL
+#define ENTRY_IS_INVAL      2ULL
+#define ENTRY_IS_PTE        3ULL
+
+#define ENTRY_ATTR_BITS (7ULL << 2)	/* bits 4:2 */
+#define ENTRY_RD_BIT (1ULL << 6)
+#define ENTRY_WR_BIT (1ULL << 7)
+#define ENTRY_SHARE_BITS (3ULL << 8)	/* bits 9:8 */
+#define ENTRY_ACCESS_BIT (1ULL << 10)
+#define ENTRY_NX_BIT (1ULL << 54)
+
+#define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | \
+		ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT)
+
+/* Helper Function to perform assignment of page table entries, to
+ * ensure the use of strd, which is required on LPAE systems.
+ */
+static inline void page_table_entry_set(u64 *pte, u64 phy)
+{
+#if KERNEL_VERSION(3, 18, 13) <= LINUX_VERSION_CODE
+	WRITE_ONCE(*pte, phy);
+#else
+#ifdef CONFIG_64BIT
+	barrier();
+	*pte = phy;
+	barrier();
+#elif defined(CONFIG_ARM)
+	barrier();
+	asm volatile("ldrd r0, [%1]\n\t"
+		     "strd r0, %0\n\t"
+		     : "=m" (*pte)
+		     : "r" (&phy)
+		     : "r0", "r1");
+	barrier();
+#else
+#error "64-bit atomic write must be implemented for your architecture"
+#endif
+#endif
+}
+
+static void mmu_get_as_setup(struct kbase_mmu_table *mmut,
+		struct kbase_mmu_setup * const setup)
+{
+	/* Set up the required caching policies at the correct indices
+	 * in the memattr register. */
+	setup->memattr =
+		(AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY <<
+		(AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+		(AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL    <<
+		(AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8))    |
+		(AS_MEMATTR_LPAE_WRITE_ALLOC           <<
+		(AS_MEMATTR_INDEX_WRITE_ALLOC * 8))           |
+		(AS_MEMATTR_LPAE_OUTER_IMPL_DEF        <<
+		(AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8))        |
+		(AS_MEMATTR_LPAE_OUTER_WA              <<
+		(AS_MEMATTR_INDEX_OUTER_WA * 8))              |
+		0; /* The other indices are unused for now */
+
+	setup->transtab = ((u64)mmut->pgd &
+		((0xFFFFFFFFULL << 32) | AS_TRANSTAB_LPAE_ADDR_SPACE_MASK)) |
+		AS_TRANSTAB_LPAE_ADRMODE_TABLE |
+		AS_TRANSTAB_LPAE_READ_INNER;
+
+	setup->transcfg = 0;
+}
+
+static void mmu_update(struct kbase_device *kbdev,
+		struct kbase_mmu_table *mmut,
+		int as_nr)
+{
+	struct kbase_as *as;
+	struct kbase_mmu_setup *current_setup;
+
+	if (WARN_ON(as_nr == KBASEP_AS_NR_INVALID))
+		return;
+
+	as = &kbdev->as[as_nr];
+	current_setup = &as->current_setup;
+
+	mmu_get_as_setup(mmut, current_setup);
+
+	/* Apply the address space setting */
+	kbase_mmu_hw_configure(kbdev, as);
+}
+
+static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
+{
+	struct kbase_as * const as = &kbdev->as[as_nr];
+	struct kbase_mmu_setup * const current_setup = &as->current_setup;
+
+	current_setup->transtab = AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED;
+
+	/* Apply the address space setting */
+	kbase_mmu_hw_configure(kbdev, as);
+}
+
+static phys_addr_t pte_to_phy_addr(u64 entry)
+{
+	if (!(entry & 1))
+		return 0;
+
+	return entry & ~0xFFF;
+}
+
+static int ate_is_valid(u64 ate, unsigned int level)
+{
+	return ((ate & ENTRY_TYPE_MASK) == ENTRY_IS_ATE);
+}
+
+static int pte_is_valid(u64 pte, unsigned int level)
+{
+	return ((pte & ENTRY_TYPE_MASK) == ENTRY_IS_PTE);
+}
+
+/*
+ * Map KBASE_REG flags to MMU flags
+ */
+static u64 get_mmu_flags(unsigned long flags)
+{
+	u64 mmu_flags;
+	unsigned long memattr_idx;
+
+	memattr_idx = KBASE_REG_MEMATTR_VALUE(flags);
+	if (WARN(memattr_idx == AS_MEMATTR_INDEX_NON_CACHEABLE,
+			"Legacy Mode MMU cannot honor GPU non-cachable memory, will use default instead\n"))
+		memattr_idx = AS_MEMATTR_INDEX_DEFAULT;
+	/* store mem_attr index as 4:2, noting that:
+	 * - macro called above ensures 3 bits already
+	 * - all AS_MEMATTR_INDEX_<...> macros only use 3 bits
+	 */
+	mmu_flags = memattr_idx << 2;
+
+	/* write perm if requested */
+	mmu_flags |= (flags & KBASE_REG_GPU_WR) ? ENTRY_WR_BIT : 0;
+	/* read perm if requested */
+	mmu_flags |= (flags & KBASE_REG_GPU_RD) ? ENTRY_RD_BIT : 0;
+	/* nx if requested */
+	mmu_flags |= (flags & KBASE_REG_GPU_NX) ? ENTRY_NX_BIT : 0;
+
+	if (flags & KBASE_REG_SHARE_BOTH) {
+		/* inner and outer shareable */
+		mmu_flags |= SHARE_BOTH_BITS;
+	} else if (flags & KBASE_REG_SHARE_IN) {
+		/* inner shareable coherency */
+		mmu_flags |= SHARE_INNER_BITS;
+	}
+
+	return mmu_flags;
+}
+
+static void entry_set_ate(u64 *entry,
+		struct tagged_addr phy,
+		unsigned long flags,
+		unsigned int level)
+{
+	page_table_entry_set(entry, as_phys_addr_t(phy) | get_mmu_flags(flags) |
+			     ENTRY_IS_ATE);
+}
+
+static void entry_set_pte(u64 *entry, phys_addr_t phy)
+{
+	page_table_entry_set(entry, (phy & ~0xFFF) | ENTRY_IS_PTE);
+}
+
+static void entry_invalidate(u64 *entry)
+{
+	page_table_entry_set(entry, ENTRY_IS_INVAL);
+}
+
+static struct kbase_mmu_mode const lpae_mode = {
+	.update = mmu_update,
+	.get_as_setup = mmu_get_as_setup,
+	.disable_as = mmu_disable_as,
+	.pte_to_phy_addr = pte_to_phy_addr,
+	.ate_is_valid = ate_is_valid,
+	.pte_is_valid = pte_is_valid,
+	.entry_set_ate = entry_set_ate,
+	.entry_set_pte = entry_set_pte,
+	.entry_invalidate = entry_invalidate,
+	.flags = 0
+};
+
+struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void)
+{
+	return &lpae_mode;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c b/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c
new file mode 100644
index 0000000..fbb090e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_platform_fake.c
@@ -0,0 +1,124 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014, 2016-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+
+/*
+ * This file is included only for type definitions and functions belonging to
+ * specific platform folders. Do not add dependencies with symbols that are
+ * defined somewhere else.
+ */
+#include <mali_kbase_config.h>
+
+#define PLATFORM_CONFIG_RESOURCE_COUNT 4
+#define PLATFORM_CONFIG_IRQ_RES_COUNT  3
+
+static struct platform_device *mali_device;
+
+#ifndef CONFIG_OF
+/**
+ * @brief Convert data in struct kbase_io_resources struct to Linux-specific resources
+ *
+ * Function converts data in struct kbase_io_resources struct to an array of Linux resource structures. Note that function
+ * assumes that size of linux_resource array is at least PLATFORM_CONFIG_RESOURCE_COUNT.
+ * Resources are put in fixed order: I/O memory region, job IRQ, MMU IRQ, GPU IRQ.
+ *
+ * @param[in]  io_resource      Input IO resource data
+ * @param[out] linux_resources  Pointer to output array of Linux resource structures
+ */
+static void kbasep_config_parse_io_resources(const struct kbase_io_resources *io_resources, struct resource *const linux_resources)
+{
+	if (!io_resources || !linux_resources) {
+		pr_err("%s: couldn't find proper resources\n", __func__);
+		return;
+	}
+
+	memset(linux_resources, 0, PLATFORM_CONFIG_RESOURCE_COUNT * sizeof(struct resource));
+
+	linux_resources[0].start = io_resources->io_memory_region.start;
+	linux_resources[0].end   = io_resources->io_memory_region.end;
+	linux_resources[0].flags = IORESOURCE_MEM;
+
+	linux_resources[1].start = io_resources->job_irq_number;
+	linux_resources[1].end   = io_resources->job_irq_number;
+	linux_resources[1].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+
+	linux_resources[2].start = io_resources->mmu_irq_number;
+	linux_resources[2].end   = io_resources->mmu_irq_number;
+	linux_resources[2].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+
+	linux_resources[3].start = io_resources->gpu_irq_number;
+	linux_resources[3].end   = io_resources->gpu_irq_number;
+	linux_resources[3].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
+}
+#endif /* CONFIG_OF */
+
+int kbase_platform_register(void)
+{
+	struct kbase_platform_config *config;
+#ifndef CONFIG_OF
+	struct resource resources[PLATFORM_CONFIG_RESOURCE_COUNT];
+#endif
+	int err;
+
+	config = kbase_get_platform_config(); /* declared in midgard/mali_kbase_config.h but defined in platform folder */
+	if (config == NULL) {
+		pr_err("%s: couldn't get platform config\n", __func__);
+		return -ENODEV;
+	}
+
+	mali_device = platform_device_alloc("mali", 0);
+	if (mali_device == NULL)
+		return -ENOMEM;
+
+#ifndef CONFIG_OF
+	kbasep_config_parse_io_resources(config->io_resources, resources);
+	err = platform_device_add_resources(mali_device, resources, PLATFORM_CONFIG_RESOURCE_COUNT);
+	if (err) {
+		platform_device_put(mali_device);
+		mali_device = NULL;
+		return err;
+	}
+#endif /* CONFIG_OF */
+
+	err = platform_device_add(mali_device);
+	if (err) {
+		platform_device_unregister(mali_device);
+		mali_device = NULL;
+		return err;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(kbase_platform_register);
+
+void kbase_platform_unregister(void)
+{
+	if (mali_device)
+		platform_device_unregister(mali_device);
+}
+EXPORT_SYMBOL(kbase_platform_unregister);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm.c b/drivers/gpu/arm/midgard/mali_kbase_pm.c
new file mode 100644
index 0000000..5699eb8
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_pm.c
@@ -0,0 +1,196 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_pm.c
+ * Base kernel power management APIs
+ */
+
+#include <mali_kbase.h>
+#include <mali_midg_regmap.h>
+#include <mali_kbase_vinstr.h>
+#include <mali_kbase_hwcnt_context.h>
+
+#include <mali_kbase_pm.h>
+
+int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
+{
+	return kbase_hwaccess_pm_powerup(kbdev, flags);
+}
+
+void kbase_pm_halt(struct kbase_device *kbdev)
+{
+	kbase_hwaccess_pm_halt(kbdev);
+}
+
+void kbase_pm_context_active(struct kbase_device *kbdev)
+{
+	(void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
+}
+
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	int c;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+	if (kbase_pm_is_suspending(kbdev)) {
+		switch (suspend_handler) {
+		case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
+			if (kbdev->pm.active_count != 0)
+				break;
+			/* FALLTHROUGH */
+		case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
+			mutex_unlock(&kbdev->pm.lock);
+			mutex_unlock(&js_devdata->runpool_mutex);
+			return 1;
+
+		case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
+			/* FALLTHROUGH */
+		default:
+			KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
+			break;
+		}
+	}
+	c = ++kbdev->pm.active_count;
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
+
+	if (c == 1) {
+		/* First context active: Power on the GPU and any cores requested by
+		 * the policy */
+		kbase_hwaccess_pm_gpu_active(kbdev);
+	}
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	return 0;
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_context_active);
+
+void kbase_pm_context_idle(struct kbase_device *kbdev)
+{
+	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+	int c;
+
+	KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+
+	mutex_lock(&js_devdata->runpool_mutex);
+	mutex_lock(&kbdev->pm.lock);
+
+	c = --kbdev->pm.active_count;
+	KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
+
+	KBASE_DEBUG_ASSERT(c >= 0);
+
+	if (c == 0) {
+		/* Last context has gone idle */
+		kbase_hwaccess_pm_gpu_idle(kbdev);
+
+		/* Wake up anyone waiting for this to become 0 (e.g. suspend). The
+		 * waiters must synchronize with us by locking the pm.lock after
+		 * waiting.
+		 */
+		wake_up(&kbdev->pm.zero_active_count_wait);
+	}
+
+	mutex_unlock(&kbdev->pm.lock);
+	mutex_unlock(&js_devdata->runpool_mutex);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_context_idle);
+
+void kbase_pm_suspend(struct kbase_device *kbdev)
+{
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	/* Suspend vinstr. This blocks until the vinstr worker and timer are
+	 * no longer running.
+	 */
+	kbase_vinstr_suspend(kbdev->vinstr_ctx);
+
+	/* Disable GPU hardware counters.
+	 * This call will block until counters are disabled.
+	 */
+	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+	mutex_lock(&kbdev->pm.lock);
+	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+	kbdev->pm.suspending = true;
+	mutex_unlock(&kbdev->pm.lock);
+
+	/* From now on, the active count will drop towards zero. Sometimes, it'll
+	 * go up briefly before going down again. However, once it reaches zero it
+	 * will stay there - guaranteeing that we've idled all pm references */
+
+	/* Suspend job scheduler and associated components, so that it releases all
+	 * the PM active count references */
+	kbasep_js_suspend(kbdev);
+
+	/* Wait for the active count to reach zero. This is not the same as
+	 * waiting for a power down, since not all policies power down when this
+	 * reaches zero. */
+	wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
+
+	/* NOTE: We synchronize with anything that was just finishing a
+	 * kbase_pm_context_idle() call by locking the pm.lock below */
+
+	kbase_hwaccess_pm_suspend(kbdev);
+}
+
+void kbase_pm_resume(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	/* MUST happen before any pm_context_active calls occur */
+	kbase_hwaccess_pm_resume(kbdev);
+
+	/* Initial active call, to power on the GPU/cores if needed */
+	kbase_pm_context_active(kbdev);
+
+	/* Resume any blocked atoms (which may cause contexts to be scheduled in
+	 * and dependent atoms to run) */
+	kbase_resume_suspended_soft_jobs(kbdev);
+
+	/* Resume the Job Scheduler and associated components, and start running
+	 * atoms */
+	kbasep_js_resume(kbdev);
+
+	/* Matching idle call, to power off the GPU/cores if we didn't actually
+	 * need it and the policy doesn't want it on */
+	kbase_pm_context_idle(kbdev);
+
+	/* Re-enable GPU hardware counters */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* Resume vinstr */
+	kbase_vinstr_resume(kbdev->vinstr_ctx);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm.h b/drivers/gpu/arm/midgard/mali_kbase_pm.h
new file mode 100644
index 0000000..59a0314
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_pm.h
@@ -0,0 +1,180 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_pm.h
+ * Power management API definitions
+ */
+
+#ifndef _KBASE_PM_H_
+#define _KBASE_PM_H_
+
+#include "mali_kbase_hwaccess_pm.h"
+
+#define PM_ENABLE_IRQS       0x01
+#define PM_HW_ISSUES_DETECT  0x02
+
+
+/** Initialize the power management framework.
+ *
+ * Must be called before any other power management function
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ *
+ * @return 0 if the power management framework was successfully initialized.
+ */
+int kbase_pm_init(struct kbase_device *kbdev);
+
+/** Power up GPU after all modules have been initialized and interrupt handlers installed.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ *
+ * @param flags     Flags to pass on to kbase_pm_init_hw
+ *
+ * @return 0 if powerup was successful.
+ */
+int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags);
+
+/**
+ * Halt the power management framework.
+ * Should ensure that no new interrupts are generated,
+ * but allow any currently running interrupt handlers to complete successfully.
+ * The GPU is forced off by the time this function returns, regardless of
+ * whether or not the active power policy asks for the GPU to be powered off.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_halt(struct kbase_device *kbdev);
+
+/** Terminate the power management framework.
+ *
+ * No power management functions may be called after this
+ * (except @ref kbase_pm_init)
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_term(struct kbase_device *kbdev);
+
+/** Increment the count of active contexts.
+ *
+ * This function should be called when a context is about to submit a job. It informs the active power policy that the
+ * GPU is going to be in use shortly and the policy is expected to start turning on the GPU.
+ *
+ * This function will block until the GPU is available.
+ *
+ * This function ASSERTS if a suspend is occuring/has occurred whilst this is
+ * in use. Use kbase_pm_contect_active_unless_suspending() instead.
+ *
+ * @note a Suspend is only visible to Kernel threads; user-space threads in a
+ * syscall cannot witness a suspend, because they are frozen before the suspend
+ * begins.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_context_active(struct kbase_device *kbdev);
+
+
+/** Handler codes for doing kbase_pm_context_active_handle_suspend() */
+enum kbase_pm_suspend_handler {
+	/** A suspend is not expected/not possible - this is the same as
+	 * kbase_pm_context_active() */
+	KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE,
+	/** If we're suspending, fail and don't increase the active count */
+	KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE,
+	/** If we're suspending, succeed and allow the active count to increase iff
+	 * it didn't go from 0->1 (i.e., we didn't re-activate the GPU).
+	 *
+	 * This should only be used when there is a bounded time on the activation
+	 * (e.g. guarantee it's going to be idled very soon after) */
+	KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE
+};
+
+/** Suspend 'safe' variant of kbase_pm_context_active()
+ *
+ * If a suspend is in progress, this allows for various different ways of
+ * handling the suspend. Refer to @ref enum kbase_pm_suspend_handler for details.
+ *
+ * We returns a status code indicating whether we're allowed to keep the GPU
+ * active during the suspend, depending on the handler code. If the status code
+ * indicates a failure, the caller must abort whatever operation it was
+ * attempting, and potentially queue it up for after the OS has resumed.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ * @param suspend_handler The handler code for how to handle a suspend that might occur
+ * @return zero     Indicates success
+ * @return non-zero Indicates failure due to the system being suspending/suspended.
+ */
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler);
+
+/** Decrement the reference count of active contexts.
+ *
+ * This function should be called when a context becomes idle. After this call the GPU may be turned off by the power
+ * policy so the calling code should ensure that it does not access the GPU's registers.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_context_idle(struct kbase_device *kbdev);
+
+/* NOTE: kbase_pm_is_active() is in mali_kbase.h, because it is an inline
+ * function
+ */
+
+/**
+ * Suspend the GPU and prevent any further register accesses to it from Kernel
+ * threads.
+ *
+ * This is called in response to an OS suspend event, and calls into the various
+ * kbase components to complete the suspend.
+ *
+ * @note the mechanisms used here rely on all user-space threads being frozen
+ * by the OS before we suspend. Otherwise, an IOCTL could occur that powers up
+ * the GPU e.g. via atom submission.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_suspend(struct kbase_device *kbdev);
+
+/**
+ * Resume the GPU, allow register accesses to it, and resume running atoms on
+ * the GPU.
+ *
+ * This is called in response to an OS resume event, and calls into the various
+ * kbase components to complete the resume.
+ *
+ * @param kbdev     The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_resume(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_vsync_callback - vsync callback
+ *
+ * @buffer_updated: 1 if a new frame was displayed, 0 otherwise
+ * @data: Pointer to the kbase device as returned by kbase_find_device()
+ *
+ * Callback function used to notify the power management code that a vsync has
+ * occurred on the display.
+ */
+void kbase_pm_vsync_callback(int buffer_updated, void *data);
+
+#endif				/* _KBASE_PM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.c
new file mode 100644
index 0000000..763740e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.c
@@ -0,0 +1,135 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase.h"
+
+#include "mali_kbase_regs_history_debugfs.h"
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+#include <linux/debugfs.h>
+
+
+static int regs_history_size_get(void *data, u64 *val)
+{
+	struct kbase_io_history *const h = data;
+
+	*val = h->size;
+
+	return 0;
+}
+
+static int regs_history_size_set(void *data, u64 val)
+{
+	struct kbase_io_history *const h = data;
+
+	return kbase_io_history_resize(h, (u16)val);
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(regs_history_size_fops,
+		regs_history_size_get,
+		regs_history_size_set,
+		"%llu\n");
+
+
+/**
+ * regs_history_show - show callback for the register access history file.
+ *
+ * @sfile: The debugfs entry
+ * @data: Data associated with the entry
+ *
+ * This function is called to dump all recent accesses to the GPU registers.
+ *
+ * @return 0 if successfully prints data in debugfs entry file, failure
+ * otherwise
+ */
+static int regs_history_show(struct seq_file *sfile, void *data)
+{
+	struct kbase_io_history *const h = sfile->private;
+	u16 i;
+	size_t iters;
+	unsigned long flags;
+
+	if (!h->enabled) {
+		seq_puts(sfile, "The register access history is disabled\n");
+		goto out;
+	}
+
+	spin_lock_irqsave(&h->lock, flags);
+
+	iters = (h->size > h->count) ? h->count : h->size;
+	seq_printf(sfile, "Last %zu register accesses of %zu total:\n", iters,
+			h->count);
+	for (i = 0; i < iters; ++i) {
+		struct kbase_io_access *io =
+			&h->buf[(h->count - iters + i) % h->size];
+		char const access = (io->addr & 1) ? 'w' : 'r';
+
+		seq_printf(sfile, "%6i: %c: reg 0x%p val %08x\n", i, access,
+				(void *)(io->addr & ~0x1), io->value);
+	}
+
+	spin_unlock_irqrestore(&h->lock, flags);
+
+out:
+	return 0;
+}
+
+
+/**
+ * regs_history_open - open operation for regs_history debugfs file
+ *
+ * @in: &struct inode pointer
+ * @file: &struct file pointer
+ *
+ * @return file descriptor
+ */
+static int regs_history_open(struct inode *in, struct file *file)
+{
+	return single_open(file, &regs_history_show, in->i_private);
+}
+
+
+static const struct file_operations regs_history_fops = {
+	.open = &regs_history_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+
+void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev)
+{
+	debugfs_create_bool("regs_history_enabled", S_IRUGO | S_IWUSR,
+			kbdev->mali_debugfs_directory,
+			&kbdev->io_history.enabled);
+	debugfs_create_file("regs_history_size", S_IRUGO | S_IWUSR,
+			kbdev->mali_debugfs_directory,
+			&kbdev->io_history, &regs_history_size_fops);
+	debugfs_create_file("regs_history", S_IRUGO,
+			kbdev->mali_debugfs_directory, &kbdev->io_history,
+			&regs_history_fops);
+}
+
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.h
new file mode 100644
index 0000000..a0078cb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_regs_history_debugfs.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Header file for register access history support via debugfs
+ *
+ * This interface is made available via /sys/kernel/debug/mali#/regs_history*.
+ *
+ * Usage:
+ * - regs_history_enabled: whether recording of register accesses is enabled.
+ *   Write 'y' to enable, 'n' to disable.
+ * - regs_history_size: size of the register history buffer, must be > 0
+ * - regs_history: return the information about last accesses to the registers.
+ */
+
+#ifndef _KBASE_REGS_HISTORY_DEBUGFS_H
+#define _KBASE_REGS_HISTORY_DEBUGFS_H
+
+struct kbase_device;
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
+
+/**
+ * kbasep_regs_history_debugfs_init - add debugfs entries for register history
+ *
+ * @kbdev: Pointer to kbase_device containing the register history
+ */
+void kbasep_regs_history_debugfs_init(struct kbase_device *kbdev);
+
+#else /* CONFIG_DEBUG_FS */
+
+#define kbasep_regs_history_debugfs_init CSTD_NOP
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif  /*_KBASE_REGS_HISTORY_DEBUGFS_H*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_replay.c b/drivers/gpu/arm/midgard/mali_kbase_replay.c
new file mode 100644
index 0000000..92101fe
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_replay.c
@@ -0,0 +1,1156 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_replay.c
+ * Replay soft job handlers
+ */
+
+#include <linux/dma-mapping.h>
+#include <mali_kbase_config.h>
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mem_linux.h>
+
+#define JOB_NOT_STARTED 0
+#define JOB_TYPE_NULL      (1)
+#define JOB_TYPE_VERTEX    (5)
+#define JOB_TYPE_TILER     (7)
+#define JOB_TYPE_FUSED     (8)
+#define JOB_TYPE_FRAGMENT  (9)
+
+#define JOB_HEADER_32_FBD_OFFSET (31*4)
+#define JOB_HEADER_64_FBD_OFFSET (44*4)
+
+#define FBD_POINTER_MASK (~0x3f)
+
+#define SFBD_TILER_OFFSET (48*4)
+
+#define MFBD_TILER_OFFSET       (14*4)
+
+#define FBD_HIERARCHY_WEIGHTS 8
+#define FBD_HIERARCHY_MASK_MASK 0x1fff
+
+#define FBD_TYPE 1
+
+#define HIERARCHY_WEIGHTS 13
+
+#define JOB_HEADER_ID_MAX                 0xffff
+
+#define JOB_SOURCE_ID(status)		(((status) >> 16) & 0xFFFF)
+#define JOB_POLYGON_LIST		(0x03)
+
+struct fragment_job {
+	struct job_descriptor_header header;
+
+	u32 x[2];
+	union {
+		u64 _64;
+		u32 _32;
+	} fragment_fbd;
+};
+
+static void dump_job_head(struct kbase_context *kctx, char *head_str,
+		struct job_descriptor_header *job)
+{
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(kctx->kbdev->dev, "%s\n", head_str);
+	dev_dbg(kctx->kbdev->dev,
+			"addr                  = %p\n"
+			"exception_status      = %x (Source ID: 0x%x Access: 0x%x Exception: 0x%x)\n"
+			"first_incomplete_task = %x\n"
+			"fault_pointer         = %llx\n"
+			"job_descriptor_size   = %x\n"
+			"job_type              = %x\n"
+			"job_barrier           = %x\n"
+			"_reserved_01          = %x\n"
+			"_reserved_02          = %x\n"
+			"_reserved_03          = %x\n"
+			"_reserved_04/05       = %x,%x\n"
+			"job_index             = %x\n"
+			"dependencies          = %x,%x\n",
+			job, job->exception_status,
+			JOB_SOURCE_ID(job->exception_status),
+			(job->exception_status >> 8) & 0x3,
+			job->exception_status  & 0xFF,
+			job->first_incomplete_task,
+			job->fault_pointer, job->job_descriptor_size,
+			job->job_type, job->job_barrier, job->_reserved_01,
+			job->_reserved_02, job->_reserved_03,
+			job->_reserved_04, job->_reserved_05,
+			job->job_index,
+			job->job_dependency_index_1,
+			job->job_dependency_index_2);
+
+	if (job->job_descriptor_size)
+		dev_dbg(kctx->kbdev->dev, "next               = %llx\n",
+				job->next_job._64);
+	else
+		dev_dbg(kctx->kbdev->dev, "next               = %x\n",
+				job->next_job._32);
+#endif
+}
+
+static int kbasep_replay_reset_sfbd(struct kbase_context *kctx,
+		u64 fbd_address, u64 tiler_heap_free,
+		u16 hierarchy_mask, u32 default_weight)
+{
+	struct {
+		u32 padding_1[1];
+		u32 flags;
+		u64 padding_2[2];
+		u64 heap_free_address;
+		u32 padding[8];
+		u32 weights[FBD_HIERARCHY_WEIGHTS];
+	} *fbd_tiler;
+	struct kbase_vmap_struct map;
+
+	dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
+
+	fbd_tiler = kbase_vmap(kctx, fbd_address + SFBD_TILER_OFFSET,
+			sizeof(*fbd_tiler), &map);
+	if (!fbd_tiler) {
+		dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(kctx->kbdev->dev,
+		"FBD tiler:\n"
+		"flags = %x\n"
+		"heap_free_address = %llx\n",
+		fbd_tiler->flags, fbd_tiler->heap_free_address);
+#endif
+	if (hierarchy_mask) {
+		u32 weights[HIERARCHY_WEIGHTS];
+		u16 old_hierarchy_mask = fbd_tiler->flags &
+						       FBD_HIERARCHY_MASK_MASK;
+		int i, j = 0;
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+			if (old_hierarchy_mask & (1 << i)) {
+				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+				weights[i] = fbd_tiler->weights[j++];
+			} else {
+				weights[i] = default_weight;
+			}
+		}
+
+
+		dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x  New hierarchy mask=%x\n",
+				old_hierarchy_mask, hierarchy_mask);
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++)
+			dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+					i, weights[i]);
+
+		j = 0;
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+			if (hierarchy_mask & (1 << i)) {
+				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+
+				dev_dbg(kctx->kbdev->dev, " Writing hierarchy level %02d (%08x) to %d\n",
+						i, weights[i], j);
+
+				fbd_tiler->weights[j++] = weights[i];
+			}
+		}
+
+		for (; j < FBD_HIERARCHY_WEIGHTS; j++)
+			fbd_tiler->weights[j] = 0;
+
+		fbd_tiler->flags = hierarchy_mask | (1 << 16);
+	}
+
+	fbd_tiler->heap_free_address = tiler_heap_free;
+
+	dev_dbg(kctx->kbdev->dev, "heap_free_address=%llx flags=%x\n",
+			fbd_tiler->heap_free_address, fbd_tiler->flags);
+
+	kbase_vunmap(kctx, &map);
+
+	return 0;
+}
+
+static int kbasep_replay_reset_mfbd(struct kbase_context *kctx,
+		u64 fbd_address, u64 tiler_heap_free,
+		u16 hierarchy_mask, u32 default_weight)
+{
+	struct kbase_vmap_struct map;
+	struct {
+		u32 padding_0;
+		u32 flags;
+		u64 padding_1[2];
+		u64 heap_free_address;
+		u64 padding_2;
+		u32 weights[FBD_HIERARCHY_WEIGHTS];
+	} *fbd_tiler;
+
+	dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
+
+	fbd_tiler = kbase_vmap(kctx, fbd_address + MFBD_TILER_OFFSET,
+			sizeof(*fbd_tiler), &map);
+	if (!fbd_tiler) {
+		dev_err(kctx->kbdev->dev,
+			       "kbasep_replay_reset_fbd: failed to map fbd\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(kctx->kbdev->dev, "FBD tiler:\n"
+			"flags = %x\n"
+			"heap_free_address = %llx\n",
+			fbd_tiler->flags,
+			fbd_tiler->heap_free_address);
+#endif
+	if (hierarchy_mask) {
+		u32 weights[HIERARCHY_WEIGHTS];
+		u16 old_hierarchy_mask = (fbd_tiler->flags) &
+						       FBD_HIERARCHY_MASK_MASK;
+		int i, j = 0;
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+			if (old_hierarchy_mask & (1 << i)) {
+				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+				weights[i] = fbd_tiler->weights[j++];
+			} else {
+				weights[i] = default_weight;
+			}
+		}
+
+
+		dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x  New hierarchy mask=%x\n",
+				old_hierarchy_mask, hierarchy_mask);
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++)
+			dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+					i, weights[i]);
+
+		j = 0;
+
+		for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
+			if (hierarchy_mask & (1 << i)) {
+				KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
+
+				dev_dbg(kctx->kbdev->dev,
+				" Writing hierarchy level %02d (%08x) to %d\n",
+							     i, weights[i], j);
+
+				fbd_tiler->weights[j++] = weights[i];
+			}
+		}
+
+		for (; j < FBD_HIERARCHY_WEIGHTS; j++)
+			fbd_tiler->weights[j] = 0;
+
+		fbd_tiler->flags = hierarchy_mask | (1 << 16);
+	}
+
+	fbd_tiler->heap_free_address = tiler_heap_free;
+
+	kbase_vunmap(kctx, &map);
+
+	return 0;
+}
+
+/**
+ * @brief Reset the status of an FBD pointed to by a tiler job
+ *
+ * This performs two functions :
+ * - Set the hierarchy mask
+ * - Reset the tiler free heap address
+ *
+ * @param[in] kctx              Context pointer
+ * @param[in] job_header        Address of job header to reset.
+ * @param[in] tiler_heap_free   The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask    The hierarchy mask to use
+ * @param[in] default_weight    Default hierarchy weight to write when no other
+ *                              weight is given in the FBD
+ * @param[in] job_64            true if this job is using 64-bit
+ *                              descriptors
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_reset_tiler_job(struct kbase_context *kctx,
+		u64 job_header,	u64 tiler_heap_free,
+		u16 hierarchy_mask, u32 default_weight,	bool job_64)
+{
+	struct kbase_vmap_struct map;
+	u64 fbd_address;
+
+	if (job_64) {
+		u64 *job_ext;
+
+		job_ext = kbase_vmap(kctx,
+				job_header + JOB_HEADER_64_FBD_OFFSET,
+				sizeof(*job_ext), &map);
+
+		if (!job_ext) {
+			dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
+			return -EINVAL;
+		}
+
+		fbd_address = *job_ext;
+
+		kbase_vunmap(kctx, &map);
+	} else {
+		u32 *job_ext;
+
+		job_ext = kbase_vmap(kctx,
+				job_header + JOB_HEADER_32_FBD_OFFSET,
+				sizeof(*job_ext), &map);
+
+		if (!job_ext) {
+			dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
+			return -EINVAL;
+		}
+
+		fbd_address = *job_ext;
+
+		kbase_vunmap(kctx, &map);
+	}
+
+	if (fbd_address & FBD_TYPE) {
+		return kbasep_replay_reset_mfbd(kctx,
+						fbd_address & FBD_POINTER_MASK,
+						tiler_heap_free,
+						hierarchy_mask,
+						default_weight);
+	} else {
+		return kbasep_replay_reset_sfbd(kctx,
+						fbd_address & FBD_POINTER_MASK,
+						tiler_heap_free,
+						hierarchy_mask,
+						default_weight);
+	}
+}
+
+/**
+ * @brief Reset the status of a job
+ *
+ * This performs the following functions :
+ *
+ * - Reset the Job Status field of each job to NOT_STARTED.
+ * - Set the Job Type field of any Vertex Jobs to Null Job.
+ * - For any jobs using an FBD, set the Tiler Heap Free field to the value of
+ *   the tiler_heap_free parameter, and set the hierarchy level mask to the
+ *   hier_mask parameter.
+ * - Offset HW dependencies by the hw_job_id_offset parameter
+ * - Set the Perform Job Barrier flag if this job is the first in the chain
+ * - Read the address of the next job header
+ *
+ * @param[in] kctx              Context pointer
+ * @param[in,out] job_header    Address of job header to reset. Set to address
+ *                              of next job header on exit.
+ * @param[in] prev_jc           Previous job chain to link to, if this job is
+ *                              the last in the chain.
+ * @param[in] hw_job_id_offset  Offset for HW job IDs
+ * @param[in] tiler_heap_free   The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask    The hierarchy mask to use
+ * @param[in] default_weight    Default hierarchy weight to write when no other
+ *                              weight is given in the FBD
+ * @param[in] first_in_chain    true if this job is the first in the chain
+ * @param[in] fragment_chain    true if this job is in the fragment chain
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_reset_job(struct kbase_context *kctx,
+		u64 *job_header, u64 prev_jc,
+		u64 tiler_heap_free, u16 hierarchy_mask,
+		u32 default_weight, u16 hw_job_id_offset,
+		bool first_in_chain, bool fragment_chain)
+{
+	struct fragment_job *frag_job;
+	struct job_descriptor_header *job;
+	u64 new_job_header;
+	struct kbase_vmap_struct map;
+
+	frag_job = kbase_vmap(kctx, *job_header, sizeof(*frag_job), &map);
+	if (!frag_job) {
+		dev_err(kctx->kbdev->dev,
+				 "kbasep_replay_parse_jc: failed to map jc\n");
+		return -EINVAL;
+	}
+	job = &frag_job->header;
+
+	dump_job_head(kctx, "Job header:", job);
+
+	if (job->exception_status == JOB_NOT_STARTED && !fragment_chain) {
+		dev_err(kctx->kbdev->dev, "Job already not started\n");
+		goto out_unmap;
+	}
+	job->exception_status = JOB_NOT_STARTED;
+
+	if (job->job_type == JOB_TYPE_VERTEX)
+		job->job_type = JOB_TYPE_NULL;
+
+	if (job->job_type == JOB_TYPE_FUSED) {
+		dev_err(kctx->kbdev->dev, "Fused jobs can not be replayed\n");
+		goto out_unmap;
+	}
+
+	if (first_in_chain)
+		job->job_barrier = 1;
+
+	if ((job->job_dependency_index_1 + hw_job_id_offset) >
+			JOB_HEADER_ID_MAX ||
+	    (job->job_dependency_index_2 + hw_job_id_offset) >
+			JOB_HEADER_ID_MAX ||
+	    (job->job_index + hw_job_id_offset) > JOB_HEADER_ID_MAX) {
+		dev_err(kctx->kbdev->dev,
+			     "Job indicies/dependencies out of valid range\n");
+		goto out_unmap;
+	}
+
+	if (job->job_dependency_index_1)
+		job->job_dependency_index_1 += hw_job_id_offset;
+	if (job->job_dependency_index_2)
+		job->job_dependency_index_2 += hw_job_id_offset;
+
+	job->job_index += hw_job_id_offset;
+
+	if (job->job_descriptor_size) {
+		new_job_header = job->next_job._64;
+		if (!job->next_job._64)
+			job->next_job._64 = prev_jc;
+	} else {
+		new_job_header = job->next_job._32;
+		if (!job->next_job._32)
+			job->next_job._32 = prev_jc;
+	}
+	dump_job_head(kctx, "Updated to:", job);
+
+	if (job->job_type == JOB_TYPE_TILER) {
+		bool job_64 = job->job_descriptor_size != 0;
+
+		if (kbasep_replay_reset_tiler_job(kctx, *job_header,
+				tiler_heap_free, hierarchy_mask,
+				default_weight, job_64) != 0)
+			goto out_unmap;
+
+	} else if (job->job_type == JOB_TYPE_FRAGMENT) {
+		u64 fbd_address;
+
+		if (job->job_descriptor_size)
+			fbd_address = frag_job->fragment_fbd._64;
+		else
+			fbd_address = (u64)frag_job->fragment_fbd._32;
+
+		if (fbd_address & FBD_TYPE) {
+			if (kbasep_replay_reset_mfbd(kctx,
+					fbd_address & FBD_POINTER_MASK,
+					tiler_heap_free,
+					hierarchy_mask,
+					default_weight) != 0)
+				goto out_unmap;
+		} else {
+			if (kbasep_replay_reset_sfbd(kctx,
+					fbd_address & FBD_POINTER_MASK,
+					tiler_heap_free,
+					hierarchy_mask,
+					default_weight) != 0)
+				goto out_unmap;
+		}
+	}
+
+	kbase_vunmap(kctx, &map);
+
+	*job_header = new_job_header;
+
+	return 0;
+
+out_unmap:
+	kbase_vunmap(kctx, &map);
+	return -EINVAL;
+}
+
+/**
+ * @brief Find the highest job ID in a job chain
+ *
+ * @param[in] kctx        Context pointer
+ * @param[in] jc          Job chain start address
+ * @param[out] hw_job_id  Highest job ID in chain
+ *
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_find_hw_job_id(struct kbase_context *kctx,
+		u64 jc,	u16 *hw_job_id)
+{
+	while (jc) {
+		struct job_descriptor_header *job;
+		struct kbase_vmap_struct map;
+
+		dev_dbg(kctx->kbdev->dev,
+			"kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
+
+		job = kbase_vmap(kctx, jc, sizeof(*job), &map);
+		if (!job) {
+			dev_err(kctx->kbdev->dev, "failed to map jc\n");
+
+			return -EINVAL;
+		}
+
+		if (job->job_index > *hw_job_id)
+			*hw_job_id = job->job_index;
+
+		if (job->job_descriptor_size)
+			jc = job->next_job._64;
+		else
+			jc = job->next_job._32;
+
+		kbase_vunmap(kctx, &map);
+	}
+
+	return 0;
+}
+
+/**
+ * @brief Reset the status of a number of jobs
+ *
+ * This function walks the provided job chain, and calls
+ * kbasep_replay_reset_job for each job. It also links the job chain to the
+ * provided previous job chain.
+ *
+ * The function will fail if any of the jobs passed already have status of
+ * NOT_STARTED.
+ *
+ * @param[in] kctx              Context pointer
+ * @param[in] jc                Job chain to be processed
+ * @param[in] prev_jc           Job chain to be added to. May be NULL
+ * @param[in] tiler_heap_free   The value to reset Tiler Heap Free to
+ * @param[in] hierarchy_mask    The hierarchy mask to use
+ * @param[in] default_weight    Default hierarchy weight to write when no other
+ *                              weight is given in the FBD
+ * @param[in] hw_job_id_offset  Offset for HW job IDs
+ * @param[in] fragment_chain    true if this chain is the fragment chain
+ *
+ * @return 0 on success, error code otherwise
+ */
+static int kbasep_replay_parse_jc(struct kbase_context *kctx,
+		u64 jc,	u64 prev_jc,
+		u64 tiler_heap_free, u16 hierarchy_mask,
+		u32 default_weight, u16 hw_job_id_offset,
+		bool fragment_chain)
+{
+	bool first_in_chain = true;
+	int nr_jobs = 0;
+
+	dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
+			jc, hw_job_id_offset);
+
+	while (jc) {
+		dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: parsing jc=%llx\n", jc);
+
+		if (kbasep_replay_reset_job(kctx, &jc, prev_jc,
+				tiler_heap_free, hierarchy_mask,
+				default_weight, hw_job_id_offset,
+				first_in_chain, fragment_chain) != 0)
+			return -EINVAL;
+
+		first_in_chain = false;
+
+		nr_jobs++;
+		if (fragment_chain &&
+		    nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) {
+			dev_err(kctx->kbdev->dev,
+				"Exceeded maximum number of jobs in fragment chain\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * @brief Reset the status of a replay job, and set up dependencies
+ *
+ * This performs the actions to allow the replay job to be re-run following
+ * completion of the passed dependency.
+ *
+ * @param[in] katom     The atom to be reset
+ * @param[in] dep_atom  The dependency to be attached to the atom
+ */
+static void kbasep_replay_reset_softjob(struct kbase_jd_atom *katom,
+		struct kbase_jd_atom *dep_atom)
+{
+	katom->status = KBASE_JD_ATOM_STATE_QUEUED;
+	kbase_jd_katom_dep_set(&katom->dep[0], dep_atom, BASE_JD_DEP_TYPE_DATA);
+	list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
+}
+
+/**
+ * @brief Allocate an unused katom
+ *
+ * This will search the provided context for an unused katom, and will mark it
+ * as KBASE_JD_ATOM_STATE_QUEUED.
+ *
+ * If no atoms are available then the function will fail.
+ *
+ * @param[in] kctx      Context pointer
+ * @return An atom ID, or -1 on failure
+ */
+static int kbasep_allocate_katom(struct kbase_context *kctx)
+{
+	struct kbase_jd_context *jctx = &kctx->jctx;
+	int i;
+
+	for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
+		if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) {
+			jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED;
+			dev_dbg(kctx->kbdev->dev,
+				  "kbasep_allocate_katom: Allocated atom %d\n",
+									    i);
+			return i;
+		}
+	}
+
+	return -1;
+}
+
+/**
+ * @brief Release a katom
+ *
+ * This will mark the provided atom as available, and remove any dependencies.
+ *
+ * For use on error path.
+ *
+ * @param[in] kctx      Context pointer
+ * @param[in] atom_id   ID of atom to release
+ */
+static void kbasep_release_katom(struct kbase_context *kctx, int atom_id)
+{
+	struct kbase_jd_context *jctx = &kctx->jctx;
+
+	dev_dbg(kctx->kbdev->dev, "kbasep_release_katom: Released atom %d\n",
+			atom_id);
+
+	while (!list_empty(&jctx->atoms[atom_id].dep_head[0]))
+		list_del(jctx->atoms[atom_id].dep_head[0].next);
+
+	while (!list_empty(&jctx->atoms[atom_id].dep_head[1]))
+		list_del(jctx->atoms[atom_id].dep_head[1].next);
+
+	jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED;
+}
+
+static void kbasep_replay_create_atom(struct kbase_context *kctx,
+				      struct base_jd_atom_v2 *atom,
+				      int atom_nr,
+				      base_jd_prio prio)
+{
+	atom->nr_extres = 0;
+	atom->extres_list = 0;
+	atom->device_nr = 0;
+	atom->prio = prio;
+	atom->atom_number = atom_nr;
+
+	base_jd_atom_dep_set(&atom->pre_dep[0], 0, BASE_JD_DEP_TYPE_INVALID);
+	base_jd_atom_dep_set(&atom->pre_dep[1], 0, BASE_JD_DEP_TYPE_INVALID);
+
+	atom->udata.blob[0] = 0;
+	atom->udata.blob[1] = 0;
+}
+
+/**
+ * @brief Create two atoms for the purpose of replaying jobs
+ *
+ * Two atoms are allocated and created. The jc pointer is not set at this
+ * stage. The second atom has a dependency on the first. The remaining fields
+ * are set up as follows :
+ *
+ * - No external resources. Any required external resources will be held by the
+ *   replay atom.
+ * - device_nr is set to 0. This is not relevant as
+ *   BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
+ * - Priority is inherited from the replay job.
+ *
+ * @param[out] t_atom      Atom to use for tiler jobs
+ * @param[out] f_atom      Atom to use for fragment jobs
+ * @param[in]  prio        Priority of new atom (inherited from replay soft
+ *                         job)
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_create_atoms(struct kbase_context *kctx,
+		struct base_jd_atom_v2 *t_atom,
+		struct base_jd_atom_v2 *f_atom,
+		base_jd_prio prio)
+{
+	int t_atom_nr, f_atom_nr;
+
+	t_atom_nr = kbasep_allocate_katom(kctx);
+	if (t_atom_nr < 0) {
+		dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
+		return -EINVAL;
+	}
+
+	f_atom_nr = kbasep_allocate_katom(kctx);
+	if (f_atom_nr < 0) {
+		dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
+		kbasep_release_katom(kctx, t_atom_nr);
+		return -EINVAL;
+	}
+
+	kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
+	kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
+
+	base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr,
+			     BASE_JD_DEP_TYPE_DATA);
+
+	return 0;
+}
+
+#ifdef CONFIG_MALI_DEBUG
+static void payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload)
+{
+	u64 next;
+
+	dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n");
+	next = payload->tiler_jc_list;
+
+	while (next) {
+		struct kbase_vmap_struct map;
+		base_jd_replay_jc *jc_struct;
+
+		jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &map);
+
+		if (!jc_struct)
+			return;
+
+		dev_dbg(kctx->kbdev->dev, "* jc_struct=%p jc=%llx next=%llx\n",
+				jc_struct, jc_struct->jc, jc_struct->next);
+
+		next = jc_struct->next;
+
+		kbase_vunmap(kctx, &map);
+	}
+}
+#endif
+
+/**
+ * @brief Parse a base_jd_replay_payload provided by userspace
+ *
+ * This will read the payload from userspace, and parse the job chains.
+ *
+ * @param[in] kctx         Context pointer
+ * @param[in] replay_atom  Replay soft job atom
+ * @param[in] t_atom       Atom to use for tiler jobs
+ * @param[in] f_atom       Atom to use for fragment jobs
+ * @return 0 on success, error code on failure
+ */
+static int kbasep_replay_parse_payload(struct kbase_context *kctx,
+					      struct kbase_jd_atom *replay_atom,
+					      struct base_jd_atom_v2 *t_atom,
+					      struct base_jd_atom_v2 *f_atom)
+{
+	base_jd_replay_payload *payload = NULL;
+	u64 next;
+	u64 prev_jc = 0;
+	u16 hw_job_id_offset = 0;
+	int ret = -EINVAL;
+	struct kbase_vmap_struct map;
+
+	dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: replay_atom->jc = %llx sizeof(payload) = %zu\n",
+			replay_atom->jc, sizeof(payload));
+
+	payload = kbase_vmap(kctx, replay_atom->jc, sizeof(*payload), &map);
+	if (!payload) {
+		dev_err(kctx->kbdev->dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
+	dev_dbg(kctx->kbdev->dev, "Payload structure:\n"
+				  "tiler_jc_list            = %llx\n"
+				  "fragment_jc              = %llx\n"
+				  "tiler_heap_free          = %llx\n"
+				  "fragment_hierarchy_mask  = %x\n"
+				  "tiler_hierarchy_mask     = %x\n"
+				  "hierarchy_default_weight = %x\n"
+				  "tiler_core_req           = %x\n"
+				  "fragment_core_req        = %x\n",
+							payload->tiler_jc_list,
+							  payload->fragment_jc,
+						      payload->tiler_heap_free,
+					      payload->fragment_hierarchy_mask,
+						 payload->tiler_hierarchy_mask,
+					     payload->hierarchy_default_weight,
+						       payload->tiler_core_req,
+						   payload->fragment_core_req);
+	payload_dump(kctx, payload);
+#endif
+	t_atom->core_req = payload->tiler_core_req | BASEP_JD_REQ_EVENT_NEVER;
+	f_atom->core_req = payload->fragment_core_req | BASEP_JD_REQ_EVENT_NEVER;
+
+	/* Sanity check core requirements*/
+	if ((t_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_T ||
+	    (f_atom->core_req & BASE_JD_REQ_ATOM_TYPE) != BASE_JD_REQ_FS ||
+	     t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES ||
+	     f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
+
+		int t_atom_type = t_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP;
+		int f_atom_type = f_atom->core_req & BASE_JD_REQ_ATOM_TYPE & ~BASE_JD_REQ_COHERENT_GROUP & ~BASE_JD_REQ_FS_AFBC;
+		int t_has_ex_res = t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
+		int f_has_ex_res = f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES;
+
+		if (t_atom_type != BASE_JD_REQ_T) {
+			dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom not a tiler job. Was: 0x%x\n Expected: 0x%x",
+			    t_atom_type, BASE_JD_REQ_T);
+		}
+		if (f_atom_type != BASE_JD_REQ_FS) {
+			dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom not a fragment shader. Was 0x%x Expected: 0x%x\n",
+			    f_atom_type, BASE_JD_REQ_FS);
+		}
+		if (t_has_ex_res) {
+			dev_err(kctx->kbdev->dev, "Invalid core requirement: Tiler atom has external resources.\n");
+		}
+		if (f_has_ex_res) {
+			dev_err(kctx->kbdev->dev, "Invalid core requirement: Fragment shader atom has external resources.\n");
+		}
+
+		goto out;
+	}
+
+	/* Process tiler job chains */
+	next = payload->tiler_jc_list;
+	if (!next) {
+		dev_err(kctx->kbdev->dev, "Invalid tiler JC list\n");
+		goto out;
+	}
+
+	while (next) {
+		base_jd_replay_jc *jc_struct;
+		struct kbase_vmap_struct jc_map;
+		u64 jc;
+
+		jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &jc_map);
+
+		if (!jc_struct) {
+			dev_err(kctx->kbdev->dev, "Failed to map jc struct\n");
+			goto out;
+		}
+
+		jc = jc_struct->jc;
+		next = jc_struct->next;
+		if (next)
+			jc_struct->jc = 0;
+
+		kbase_vunmap(kctx, &jc_map);
+
+		if (jc) {
+			u16 max_hw_job_id = 0;
+
+			if (kbasep_replay_find_hw_job_id(kctx, jc,
+					&max_hw_job_id) != 0)
+				goto out;
+
+			if (kbasep_replay_parse_jc(kctx, jc, prev_jc,
+					payload->tiler_heap_free,
+					payload->tiler_hierarchy_mask,
+					payload->hierarchy_default_weight,
+					hw_job_id_offset, false) != 0) {
+				goto out;
+			}
+
+			hw_job_id_offset += max_hw_job_id;
+
+			prev_jc = jc;
+		}
+	}
+	t_atom->jc = prev_jc;
+
+	/* Process fragment job chain */
+	f_atom->jc = payload->fragment_jc;
+	if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0,
+			payload->tiler_heap_free,
+			payload->fragment_hierarchy_mask,
+			payload->hierarchy_default_weight, 0,
+			true) != 0) {
+		goto out;
+	}
+
+	if (!t_atom->jc || !f_atom->jc) {
+		dev_err(kctx->kbdev->dev, "Invalid payload\n");
+		goto out;
+	}
+
+	dev_dbg(kctx->kbdev->dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
+			t_atom->jc, f_atom->jc);
+	ret = 0;
+
+out:
+	kbase_vunmap(kctx, &map);
+
+	return ret;
+}
+
+static void kbase_replay_process_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom;
+	struct kbase_context *kctx;
+	struct kbase_jd_context *jctx;
+	bool need_to_try_schedule_context = false;
+
+	struct base_jd_atom_v2 t_atom, f_atom;
+	struct kbase_jd_atom *t_katom, *f_katom;
+	base_jd_prio atom_prio;
+
+	katom = container_of(data, struct kbase_jd_atom, work);
+	kctx = katom->kctx;
+	jctx = &kctx->jctx;
+
+	mutex_lock(&jctx->lock);
+
+	atom_prio = kbasep_js_sched_prio_to_atom_prio(katom->sched_priority);
+
+	if (kbasep_replay_create_atoms(
+			kctx, &t_atom, &f_atom, atom_prio) != 0) {
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		goto out;
+	}
+
+	t_katom = &jctx->atoms[t_atom.atom_number];
+	f_katom = &jctx->atoms[f_atom.atom_number];
+
+	if (kbasep_replay_parse_payload(kctx, katom, &t_atom, &f_atom) != 0) {
+		kbasep_release_katom(kctx, t_atom.atom_number);
+		kbasep_release_katom(kctx, f_atom.atom_number);
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		goto out;
+	}
+
+	kbasep_replay_reset_softjob(katom, f_katom);
+
+	need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom);
+	if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
+		dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
+		kbasep_release_katom(kctx, f_atom.atom_number);
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		goto out;
+	}
+	need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom);
+	if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
+		dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		goto out;
+	}
+
+	katom->event_code = BASE_JD_EVENT_DONE;
+
+out:
+	if (katom->event_code != BASE_JD_EVENT_DONE) {
+		kbase_disjoint_state_down(kctx->kbdev);
+
+		need_to_try_schedule_context |= jd_done_nolock(katom, NULL);
+	}
+
+	if (need_to_try_schedule_context)
+		kbase_js_sched_all(kctx->kbdev);
+
+	mutex_unlock(&jctx->lock);
+}
+
+/**
+ * @brief Check job replay fault
+ *
+ * This will read the job payload, checks fault type and source, then decides
+ * whether replay is required.
+ *
+ * @param[in] katom       The atom to be processed
+ * @return  true (success) if replay required or false on failure.
+ */
+static bool kbase_replay_fault_check(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct device *dev = kctx->kbdev->dev;
+	base_jd_replay_payload *payload;
+	u64 job_header;
+	u64 job_loop_detect;
+	struct job_descriptor_header *job;
+	struct kbase_vmap_struct job_map;
+	struct kbase_vmap_struct map;
+	bool err = false;
+
+	/* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or
+	 * if force_replay is enabled.
+	 */
+	if (BASE_JD_EVENT_TERMINATED == katom->event_code) {
+		return false;
+	} else if (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code) {
+		return true;
+	} else if (BASE_JD_EVENT_FORCE_REPLAY == katom->event_code) {
+		katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT;
+		return true;
+	} else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) {
+		/* No replay for faults of type other than
+		 * BASE_JD_EVENT_DATA_INVALID_FAULT.
+		 */
+		return false;
+	}
+
+	/* Job fault is BASE_JD_EVENT_DATA_INVALID_FAULT, now scan fragment jc
+	 * to find out whether the source of exception is POLYGON_LIST. Replay
+	 * is required if the source of fault is POLYGON_LIST.
+	 */
+	payload = kbase_vmap(kctx, katom->jc, sizeof(*payload), &map);
+	if (!payload) {
+		dev_err(dev, "kbase_replay_fault_check: failed to map payload.\n");
+		return false;
+	}
+
+#ifdef CONFIG_MALI_DEBUG
+	dev_dbg(dev, "kbase_replay_fault_check: payload=%p\n", payload);
+	dev_dbg(dev, "\nPayload structure:\n"
+		     "fragment_jc              = 0x%llx\n"
+		     "fragment_hierarchy_mask  = 0x%x\n"
+		     "fragment_core_req        = 0x%x\n",
+		     payload->fragment_jc,
+		     payload->fragment_hierarchy_mask,
+		     payload->fragment_core_req);
+#endif
+	/* Process fragment job chain */
+	job_header      = (u64) payload->fragment_jc;
+	job_loop_detect = job_header;
+	while (job_header) {
+		job = kbase_vmap(kctx, job_header, sizeof(*job), &job_map);
+		if (!job) {
+			dev_err(dev, "failed to map jc\n");
+			/* unmap payload*/
+			kbase_vunmap(kctx, &map);
+			return false;
+		}
+
+
+		dump_job_head(kctx, "\njob_head structure:\n", job);
+
+		/* Replay only when the polygon list reader caused the
+		 * DATA_INVALID_FAULT */
+		if ((BASE_JD_EVENT_DATA_INVALID_FAULT == katom->event_code) &&
+		   (JOB_POLYGON_LIST == JOB_SOURCE_ID(job->exception_status))) {
+			err = true;
+			kbase_vunmap(kctx, &job_map);
+			break;
+		}
+
+		/* Move on to next fragment job in the list */
+		if (job->job_descriptor_size)
+			job_header = job->next_job._64;
+		else
+			job_header = job->next_job._32;
+
+		kbase_vunmap(kctx, &job_map);
+
+		/* Job chain loop detected */
+		if (job_header == job_loop_detect)
+			break;
+	}
+
+	/* unmap payload*/
+	kbase_vunmap(kctx, &map);
+
+	return err;
+}
+
+
+/**
+ * @brief Process a replay job
+ *
+ * Called from kbase_process_soft_job.
+ *
+ * On exit, if the job has completed, katom->event_code will have been updated.
+ * If the job has not completed, and is replaying jobs, then the atom status
+ * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
+ *
+ * @param[in] katom  The atom to be processed
+ * @return           false if the atom has completed
+ *                   true if the atom is replaying jobs
+ */
+bool kbase_replay_process(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct kbase_device *kbdev = kctx->kbdev;
+
+	/* Don't replay this atom if these issues are not present in the
+	 * hardware */
+	if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11020) &&
+			!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11024)) {
+		dev_dbg(kbdev->dev, "Hardware does not need replay workaround");
+
+		/* Signal failure to userspace */
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+
+		return false;
+	}
+
+	if (katom->event_code == BASE_JD_EVENT_DONE) {
+		dev_dbg(kbdev->dev, "Previous job succeeded - not replaying\n");
+
+		if (katom->retry_count)
+			kbase_disjoint_state_down(kbdev);
+
+		return false;
+	}
+
+	if (kbase_ctx_flag(kctx, KCTX_DYING)) {
+		dev_dbg(kbdev->dev, "Not replaying; context is dying\n");
+
+		if (katom->retry_count)
+			kbase_disjoint_state_down(kbdev);
+
+		return false;
+	}
+
+	/* Check job exception type and source before replaying. */
+	if (!kbase_replay_fault_check(katom)) {
+		dev_dbg(kbdev->dev,
+			"Replay cancelled on event %x\n", katom->event_code);
+		/* katom->event_code is already set to the failure code of the
+		 * previous job.
+		 */
+		return false;
+	}
+
+	dev_warn(kbdev->dev, "Replaying jobs retry=%d\n",
+			katom->retry_count);
+
+	katom->retry_count++;
+
+	if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
+		dev_err(kbdev->dev, "Replay exceeded limit - failing jobs\n");
+
+		kbase_disjoint_state_down(kbdev);
+
+		/* katom->event_code is already set to the failure code of the
+		   previous job */
+		return false;
+	}
+
+	/* only enter the disjoint state once for the whole time while the replay is ongoing */
+	if (katom->retry_count == 1)
+		kbase_disjoint_state_up(kbdev);
+
+	INIT_WORK(&katom->work, kbase_replay_process_worker);
+	queue_work(kctx->event_workq, &katom->work);
+
+	return true;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_smc.c b/drivers/gpu/arm/midgard/mali_kbase_smc.c
new file mode 100644
index 0000000..a5e4e1d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_smc.c
@@ -0,0 +1,80 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifdef CONFIG_ARM64
+
+#include <mali_kbase.h>
+#include <mali_kbase_smc.h>
+
+#include <linux/compiler.h>
+
+static noinline u64 invoke_smc_fid(u64 function_id,
+		u64 arg0, u64 arg1, u64 arg2)
+{
+	register u64 x0 asm("x0") = function_id;
+	register u64 x1 asm("x1") = arg0;
+	register u64 x2 asm("x2") = arg1;
+	register u64 x3 asm("x3") = arg2;
+
+#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
+	asm volatile(
+			__asmeq("%0", "x0")
+			__asmeq("%1", "x1")
+			__asmeq("%2", "x2")
+			__asmeq("%3", "x3")
+			"smc    #0\n"
+			: "+r" (x0)
+			: "r" (x1), "r" (x2), "r" (x3));
+#undef __asmeq
+	return x0;
+}
+
+u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2)
+{
+	/* Is fast call (bit 31 set) */
+	KBASE_DEBUG_ASSERT(fid & ~SMC_FAST_CALL);
+	/* bits 16-23 must be zero for fast calls */
+	KBASE_DEBUG_ASSERT((fid & (0xFF << 16)) == 0);
+
+	return invoke_smc_fid(fid, arg0, arg1, arg2);
+}
+
+u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
+		u64 arg0, u64 arg1, u64 arg2)
+{
+	u32 fid = 0;
+
+	/* Only the six bits allowed should be used. */
+	KBASE_DEBUG_ASSERT((oen & ~SMC_OEN_MASK) == 0);
+
+	fid |= SMC_FAST_CALL; /* Bit 31: Fast call */
+	if (smc64)
+		fid |= SMC_64; /* Bit 30: 1=SMC64, 0=SMC32 */
+	fid |= oen; /* Bit 29:24: OEN */
+	/* Bit 23:16: Must be zero for fast calls */
+	fid |= (function_number); /* Bit 15:0: function number */
+
+	return kbase_invoke_smc_fid(fid, arg0, arg1, arg2);
+}
+
+#endif /* CONFIG_ARM64 */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_smc.h b/drivers/gpu/arm/midgard/mali_kbase_smc.h
new file mode 100644
index 0000000..221eb21
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_smc.h
@@ -0,0 +1,72 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_SMC_H_
+#define _KBASE_SMC_H_
+
+#ifdef CONFIG_ARM64
+
+#include <mali_kbase.h>
+
+#define SMC_FAST_CALL (1 << 31)
+#define SMC_64 (1 << 30)
+
+#define SMC_OEN_OFFSET 24
+#define SMC_OEN_MASK (0x3F << SMC_OEN_OFFSET) /* 6 bits */
+#define SMC_OEN_SIP (2 << SMC_OEN_OFFSET)
+#define SMC_OEN_STD (4 << SMC_OEN_OFFSET)
+
+
+/**
+  * kbase_invoke_smc_fid - Perform a secure monitor call
+  * @fid: The SMC function to call, see SMC Calling convention.
+  * @arg0: First argument to the SMC.
+  * @arg1: Second argument to the SMC.
+  * @arg2: Third argument to the SMC.
+  *
+  * See SMC Calling Convention for details.
+  *
+  * Return: the return value from the SMC.
+  */
+u64 kbase_invoke_smc_fid(u32 fid, u64 arg0, u64 arg1, u64 arg2);
+
+/**
+  * kbase_invoke_smc_fid - Perform a secure monitor call
+  * @oen: Owning Entity number (SIP, STD etc).
+  * @function_number: The function number within the OEN.
+  * @smc64: use SMC64 calling convention instead of SMC32.
+  * @arg0: First argument to the SMC.
+  * @arg1: Second argument to the SMC.
+  * @arg2: Third argument to the SMC.
+  *
+  * See SMC Calling Convention for details.
+  *
+  * Return: the return value from the SMC call.
+  */
+u64 kbase_invoke_smc(u32 oen, u16 function_number, bool smc64,
+		u64 arg0, u64 arg1, u64 arg2);
+
+#endif /* CONFIG_ARM64 */
+
+#endif /* _KBASE_SMC_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_softjobs.c b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
new file mode 100644
index 0000000..16f6ccb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
@@ -0,0 +1,1687 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#include <asm/cacheflush.h>
+#endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <mali_kbase_sync.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <mali_base_kernel.h>
+#include <mali_kbase_hwaccess_time.h>
+#include <mali_kbase_mem_linux.h>
+#include <mali_kbase_tlstream.h>
+#include <linux/version.h>
+#include <linux/ktime.h>
+#include <linux/pfn.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/cache.h>
+
+/**
+ * @file mali_kbase_softjobs.c
+ *
+ * This file implements the logic behind software only jobs that are
+ * executed within the driver rather than being handed over to the GPU.
+ */
+
+static void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	unsigned long lflags;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+	list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	unsigned long lflags;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+	list_del(&katom->queue);
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Record the start time of this atom so we could cancel it at
+	 * the right time.
+	 */
+	katom->start_timestamp = ktime_get();
+
+	/* Add the atom to the waiting list before the timer is
+	 * (re)started to make sure that it gets processed.
+	 */
+	kbasep_add_waiting_soft_job(katom);
+
+	/* Schedule timeout of this atom after a period if it is not active */
+	if (!timer_pending(&kctx->soft_job_timeout)) {
+		int timeout_ms = atomic_read(
+				&kctx->kbdev->js_data.soft_job_timeout_ms);
+		mod_timer(&kctx->soft_job_timeout,
+			  jiffies + msecs_to_jiffies(timeout_ms));
+	}
+}
+
+static int kbasep_read_soft_event_status(
+		struct kbase_context *kctx, u64 evt, unsigned char *status)
+{
+	unsigned char *mapped_evt;
+	struct kbase_vmap_struct map;
+
+	mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
+	if (!mapped_evt)
+		return -EFAULT;
+
+	*status = *mapped_evt;
+
+	kbase_vunmap(kctx, &map);
+
+	return 0;
+}
+
+static int kbasep_write_soft_event_status(
+		struct kbase_context *kctx, u64 evt, unsigned char new_status)
+{
+	unsigned char *mapped_evt;
+	struct kbase_vmap_struct map;
+
+	if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
+	    (new_status != BASE_JD_SOFT_EVENT_RESET))
+		return -EINVAL;
+
+	mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
+	if (!mapped_evt)
+		return -EFAULT;
+
+	*mapped_evt = new_status;
+
+	kbase_vunmap(kctx, &map);
+
+	return 0;
+}
+
+static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
+{
+	struct kbase_vmap_struct map;
+	void *user_result;
+	struct timespec64 ts;
+	struct base_dump_cpu_gpu_counters data;
+	u64 system_time;
+	u64 cycle_counter;
+	u64 jc = katom->jc;
+	struct kbase_context *kctx = katom->kctx;
+	int pm_active_err;
+
+	memset(&data, 0, sizeof(data));
+
+	/* Take the PM active reference as late as possible - otherwise, it could
+	 * delay suspend until we process the atom (which may be at the end of a
+	 * long chain of dependencies */
+	pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
+	if (pm_active_err) {
+		struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
+
+		/* We're suspended - queue this on the list of suspended jobs
+		 * Use dep_item[1], because dep_item[0] was previously in use
+		 * for 'waiting_soft_jobs'.
+		 */
+		mutex_lock(&js_devdata->runpool_mutex);
+		list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
+		mutex_unlock(&js_devdata->runpool_mutex);
+
+		/* Also adding this to the list of waiting soft job */
+		kbasep_add_waiting_soft_job(katom);
+
+		return pm_active_err;
+	}
+
+	kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
+									&ts);
+
+	kbase_pm_context_idle(kctx->kbdev);
+
+	data.sec = ts.tv_sec;
+	data.usec = ts.tv_nsec / 1000;
+	data.system_time = system_time;
+	data.cycle_counter = cycle_counter;
+
+	/* Assume this atom will be cancelled until we know otherwise */
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	/* GPU_WR access is checked on the range for returning the result to
+	 * userspace for the following reasons:
+	 * - security, this is currently how imported user bufs are checked.
+	 * - userspace ddk guaranteed to assume region was mapped as GPU_WR */
+	user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
+	if (!user_result)
+		return 0;
+
+	memcpy(user_result, &data, sizeof(data));
+
+	kbase_vunmap(kctx, &map);
+
+	/* Atom was fine - mark it as done */
+	katom->event_code = BASE_JD_EVENT_DONE;
+
+	return 0;
+}
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+/* Called by the explicit fence mechanism when a fence wait has completed */
+void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+
+	mutex_lock(&kctx->jctx.lock);
+	kbasep_remove_waiting_soft_job(katom);
+	kbase_finish_soft_job(katom);
+	if (jd_done_nolock(katom, NULL))
+		kbase_js_sched_all(kctx->kbdev);
+	mutex_unlock(&kctx->jctx.lock);
+}
+#endif
+
+static void kbasep_soft_event_complete_job(struct work_struct *work)
+{
+	struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
+			work);
+	struct kbase_context *kctx = katom->kctx;
+	int resched;
+
+	mutex_lock(&kctx->jctx.lock);
+	resched = jd_done_nolock(katom, NULL);
+	mutex_unlock(&kctx->jctx.lock);
+
+	if (resched)
+		kbase_js_sched_all(kctx->kbdev);
+}
+
+void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
+{
+	int cancel_timer = 1;
+	struct list_head *entry, *tmp;
+	unsigned long lflags;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+	list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+		struct kbase_jd_atom *katom = list_entry(
+				entry, struct kbase_jd_atom, queue);
+
+		switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+		case BASE_JD_REQ_SOFT_EVENT_WAIT:
+			if (katom->jc == evt) {
+				list_del(&katom->queue);
+
+				katom->event_code = BASE_JD_EVENT_DONE;
+				INIT_WORK(&katom->work,
+					  kbasep_soft_event_complete_job);
+				queue_work(kctx->jctx.job_done_wq,
+					   &katom->work);
+			} else {
+				/* There are still other waiting jobs, we cannot
+				 * cancel the timer yet.
+				 */
+				cancel_timer = 0;
+			}
+			break;
+#ifdef CONFIG_MALI_FENCE_DEBUG
+		case BASE_JD_REQ_SOFT_FENCE_WAIT:
+			/* Keep the timer running if fence debug is enabled and
+			 * there are waiting fence jobs.
+			 */
+			cancel_timer = 0;
+			break;
+#endif
+		}
+	}
+
+	if (cancel_timer)
+		del_timer(&kctx->soft_job_timeout);
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct device *dev = kctx->kbdev->dev;
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		struct kbase_jd_atom *dep;
+
+		list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
+			if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
+			    dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
+				continue;
+
+			if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
+					== BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
+				/* Found blocked trigger fence. */
+				struct kbase_sync_fence_info info;
+
+				if (!kbase_sync_fence_in_info_get(dep, &info)) {
+					dev_warn(dev,
+						 "\tVictim trigger atom %d fence [%p] %s: %s\n",
+						 kbase_jd_atom_id(kctx, dep),
+						 info.fence,
+						 info.name,
+						 kbase_sync_status_string(info.status));
+				 }
+			}
+
+			kbase_fence_debug_check_atom(dep);
+		}
+	}
+}
+
+static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct device *dev = katom->kctx->kbdev->dev;
+	int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
+	unsigned long lflags;
+	struct kbase_sync_fence_info info;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+
+	if (kbase_sync_fence_in_info_get(katom, &info)) {
+		/* Fence must have signaled just after timeout. */
+		spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+		return;
+	}
+
+	dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n",
+		 kctx->tgid, kctx->id,
+		 kbase_jd_atom_id(kctx, katom),
+		 info.fence, timeout_ms);
+	dev_warn(dev, "\tGuilty fence [%p] %s: %s\n",
+		 info.fence, info.name,
+		 kbase_sync_status_string(info.status));
+
+	/* Search for blocked trigger atoms */
+	kbase_fence_debug_check_atom(katom);
+
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+
+	kbase_sync_fence_in_dump(katom);
+}
+
+struct kbase_fence_debug_work {
+	struct kbase_jd_atom *katom;
+	struct work_struct work;
+};
+
+static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
+{
+	struct kbase_fence_debug_work *w = container_of(work,
+			struct kbase_fence_debug_work, work);
+	struct kbase_jd_atom *katom = w->katom;
+	struct kbase_context *kctx = katom->kctx;
+
+	mutex_lock(&kctx->jctx.lock);
+	kbase_fence_debug_wait_timeout(katom);
+	mutex_unlock(&kctx->jctx.lock);
+
+	kfree(w);
+}
+
+static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
+{
+	struct kbase_fence_debug_work *work;
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Enqueue fence debug worker. Use job_done_wq to get
+	 * debug print ordered with job completion.
+	 */
+	work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
+	/* Ignore allocation failure. */
+	if (work) {
+		work->katom = katom;
+		INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
+		queue_work(kctx->jctx.job_done_wq, &work->work);
+	}
+}
+#endif /* CONFIG_MALI_FENCE_DEBUG */
+
+void kbasep_soft_job_timeout_worker(struct timer_list *timer)
+{
+	struct kbase_context *kctx = container_of(timer, struct kbase_context,
+			soft_job_timeout);
+	u32 timeout_ms = (u32)atomic_read(
+			&kctx->kbdev->js_data.soft_job_timeout_ms);
+	ktime_t cur_time = ktime_get();
+	bool restarting = false;
+	unsigned long lflags;
+	struct list_head *entry, *tmp;
+
+	spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
+	list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+		struct kbase_jd_atom *katom = list_entry(entry,
+				struct kbase_jd_atom, queue);
+		s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
+					katom->start_timestamp));
+
+		if (elapsed_time < (s64)timeout_ms) {
+			restarting = true;
+			continue;
+		}
+
+		switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+		case BASE_JD_REQ_SOFT_EVENT_WAIT:
+			/* Take it out of the list to ensure that it
+			 * will be cancelled in all cases
+			 */
+			list_del(&katom->queue);
+
+			katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+			INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
+			queue_work(kctx->jctx.job_done_wq, &katom->work);
+			break;
+#ifdef CONFIG_MALI_FENCE_DEBUG
+		case BASE_JD_REQ_SOFT_FENCE_WAIT:
+			kbase_fence_debug_timeout(katom);
+			break;
+#endif
+		}
+	}
+
+	if (restarting)
+		mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
+	spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
+}
+
+static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	unsigned char status;
+
+	/* The status of this soft-job is stored in jc */
+	if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		return 0;
+	}
+
+	if (status == BASE_JD_SOFT_EVENT_SET)
+		return 0; /* Event already set, nothing to do */
+
+	kbasep_add_waiting_with_timeout(katom);
+
+	return 1;
+}
+
+static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
+				     unsigned char new_status)
+{
+	/* Complete jobs waiting on the same event */
+	struct kbase_context *kctx = katom->kctx;
+
+	if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		return;
+	}
+
+	if (new_status == BASE_JD_SOFT_EVENT_SET)
+		kbasep_complete_triggered_soft_events(kctx, katom->jc);
+}
+
+/**
+ * kbase_soft_event_update() - Update soft event state
+ * @kctx: Pointer to context
+ * @event: Event to update
+ * @new_status: New status value of event
+ *
+ * Update the event, and wake up any atoms waiting for the event.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int kbase_soft_event_update(struct kbase_context *kctx,
+			     u64 event,
+			     unsigned char new_status)
+{
+	int err = 0;
+
+	mutex_lock(&kctx->jctx.lock);
+
+	if (kbasep_write_soft_event_status(kctx, event, new_status)) {
+		err = -ENOENT;
+		goto out;
+	}
+
+	if (new_status == BASE_JD_SOFT_EVENT_SET)
+		kbasep_complete_triggered_soft_events(kctx, event);
+
+out:
+	mutex_unlock(&kctx->jctx.lock);
+
+	return err;
+}
+
+static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
+{
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+	if (jd_done_nolock(katom, NULL))
+		kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
+{
+	struct kbase_debug_copy_buffer *buffers = katom->softjob_data;
+	unsigned int i;
+	unsigned int nr = katom->nr_extres;
+
+	if (!buffers)
+		return;
+
+	kbase_gpu_vm_lock(katom->kctx);
+	for (i = 0; i < nr; i++) {
+		int p;
+		struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc;
+
+		if (!buffers[i].pages)
+			break;
+		for (p = 0; p < buffers[i].nr_pages; p++) {
+			struct page *pg = buffers[i].pages[p];
+
+			if (pg)
+				put_page(pg);
+		}
+		if (buffers[i].is_vmalloc)
+			vfree(buffers[i].pages);
+		else
+			kfree(buffers[i].pages);
+		if (gpu_alloc) {
+			switch (gpu_alloc->type) {
+			case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+			{
+				kbase_free_user_buffer(&buffers[i]);
+				break;
+			}
+			default:
+				/* Nothing to be done. */
+				break;
+			}
+			kbase_mem_phy_alloc_put(gpu_alloc);
+		}
+	}
+	kbase_gpu_vm_unlock(katom->kctx);
+	kfree(buffers);
+
+	katom->softjob_data = NULL;
+}
+
+static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
+{
+	struct kbase_debug_copy_buffer *buffers;
+	struct base_jd_debug_copy_buffer *user_buffers = NULL;
+	unsigned int i;
+	unsigned int nr = katom->nr_extres;
+	int ret = 0;
+	void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
+
+	if (!user_structs)
+		return -EINVAL;
+
+	buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
+	if (!buffers) {
+		ret = -ENOMEM;
+		goto out_cleanup;
+	}
+	katom->softjob_data = buffers;
+
+	user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
+
+	if (!user_buffers) {
+		ret = -ENOMEM;
+		goto out_cleanup;
+	}
+
+	ret = copy_from_user(user_buffers, user_structs,
+			sizeof(*user_buffers)*nr);
+	if (ret) {
+		ret = -EFAULT;
+		goto out_cleanup;
+	}
+
+	for (i = 0; i < nr; i++) {
+		u64 addr = user_buffers[i].address;
+		u64 page_addr = addr & PAGE_MASK;
+		u64 end_page_addr = addr + user_buffers[i].size - 1;
+		u64 last_page_addr = end_page_addr & PAGE_MASK;
+		int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
+		int pinned_pages;
+		struct kbase_va_region *reg;
+		struct base_external_resource user_extres;
+
+		if (!addr)
+			continue;
+
+		if (last_page_addr < page_addr) {
+			ret = -EINVAL;
+			goto out_cleanup;
+		}
+
+		buffers[i].nr_pages = nr_pages;
+		buffers[i].offset = addr & ~PAGE_MASK;
+		if (buffers[i].offset >= PAGE_SIZE) {
+			ret = -EINVAL;
+			goto out_cleanup;
+		}
+		buffers[i].size = user_buffers[i].size;
+
+		if (nr_pages > (KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD /
+				sizeof(struct page *))) {
+			buffers[i].is_vmalloc = true;
+			buffers[i].pages = vzalloc(nr_pages *
+					sizeof(struct page *));
+		} else {
+			buffers[i].is_vmalloc = false;
+			buffers[i].pages = kcalloc(nr_pages,
+					sizeof(struct page *), GFP_KERNEL);
+		}
+
+		if (!buffers[i].pages) {
+			ret = -ENOMEM;
+			goto out_cleanup;
+		}
+
+		pinned_pages = get_user_pages_fast(page_addr,
+					nr_pages,
+					1, /* Write */
+					buffers[i].pages);
+		if (pinned_pages < 0) {
+			ret = pinned_pages;
+			goto out_cleanup;
+		}
+		if (pinned_pages != nr_pages) {
+			ret = -EINVAL;
+			goto out_cleanup;
+		}
+
+		user_extres = user_buffers[i].extres;
+		if (user_extres.ext_resource == 0ULL) {
+			ret = -EINVAL;
+			goto out_cleanup;
+		}
+
+		kbase_gpu_vm_lock(katom->kctx);
+		reg = kbase_region_tracker_find_region_enclosing_address(
+				katom->kctx, user_extres.ext_resource &
+				~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+
+		if (NULL == reg || NULL == reg->gpu_alloc ||
+				(reg->flags & KBASE_REG_FREE)) {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+		buffers[i].nr_extres_pages = reg->nr_pages;
+
+		if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
+			dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
+
+		switch (reg->gpu_alloc->type) {
+		case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+		{
+			struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+			unsigned long nr_pages =
+				alloc->imported.user_buf.nr_pages;
+
+			if (alloc->imported.user_buf.mm != current->mm) {
+				ret = -EINVAL;
+				goto out_unlock;
+			}
+			buffers[i].extres_pages = kcalloc(nr_pages,
+					sizeof(struct page *), GFP_KERNEL);
+			if (!buffers[i].extres_pages) {
+				ret = -ENOMEM;
+				goto out_unlock;
+			}
+
+			ret = get_user_pages_fast(
+					alloc->imported.user_buf.address,
+					nr_pages, 0,
+					buffers[i].extres_pages);
+			if (ret != nr_pages)
+				goto out_unlock;
+			ret = 0;
+			break;
+		}
+		default:
+			/* Nothing to be done. */
+			break;
+		}
+		kbase_gpu_vm_unlock(katom->kctx);
+	}
+	kfree(user_buffers);
+
+	return ret;
+
+out_unlock:
+	kbase_gpu_vm_unlock(katom->kctx);
+
+out_cleanup:
+	/* Frees allocated memory for kbase_debug_copy_job struct, including
+	 * members, and sets jc to 0 */
+	kbase_debug_copy_finish(katom);
+	kfree(user_buffers);
+
+	return ret;
+}
+
+void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
+		void *extres_page, struct page **pages, unsigned int nr_pages,
+		unsigned int *target_page_nr, size_t offset, size_t *to_copy)
+{
+	void *target_page = kmap(pages[*target_page_nr]);
+	size_t chunk = PAGE_SIZE-offset;
+
+	lockdep_assert_held(&kctx->reg_lock);
+
+	if (!target_page) {
+		*target_page_nr += 1;
+		dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
+		return;
+	}
+
+	chunk = min(chunk, *to_copy);
+
+	memcpy(target_page + offset, extres_page, chunk);
+	*to_copy -= chunk;
+
+	kunmap(pages[*target_page_nr]);
+
+	*target_page_nr += 1;
+	if (*target_page_nr >= nr_pages)
+		return;
+
+	target_page = kmap(pages[*target_page_nr]);
+	if (!target_page) {
+		*target_page_nr += 1;
+		dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
+		return;
+	}
+
+	KBASE_DEBUG_ASSERT(target_page);
+
+	chunk = min(offset, *to_copy);
+	memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
+	*to_copy -= chunk;
+
+	kunmap(pages[*target_page_nr]);
+}
+
+int kbase_mem_copy_from_extres(struct kbase_context *kctx,
+		struct kbase_debug_copy_buffer *buf_data)
+{
+	unsigned int i;
+	unsigned int target_page_nr = 0;
+	struct page **pages = buf_data->pages;
+	u64 offset = buf_data->offset;
+	size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
+	size_t to_copy = min(extres_size, buf_data->size);
+	struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
+	int ret = 0;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	size_t dma_to_copy;
+#endif
+
+	KBASE_DEBUG_ASSERT(pages != NULL);
+
+	kbase_gpu_vm_lock(kctx);
+	if (!gpu_alloc) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	switch (gpu_alloc->type) {
+	case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
+	{
+		for (i = 0; i < buf_data->nr_extres_pages; i++) {
+			struct page *pg = buf_data->extres_pages[i];
+			void *extres_page = kmap(pg);
+
+			if (extres_page)
+				kbase_mem_copy_from_extres_page(kctx,
+						extres_page, pages,
+						buf_data->nr_pages,
+						&target_page_nr,
+						offset, &to_copy);
+
+			kunmap(pg);
+			if (target_page_nr >= buf_data->nr_pages)
+				break;
+		}
+		break;
+	}
+	break;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	case KBASE_MEM_TYPE_IMPORTED_UMM: {
+		struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
+		void *dma_kernel;
+
+		KBASE_DEBUG_ASSERT(dma_buf != NULL);
+		if (dma_buf->size > buf_data->nr_extres_pages * PAGE_SIZE)
+			dev_warn(kctx->kbdev->dev, "External resources buffer size mismatch");
+
+		dma_to_copy = min(dma_buf->size,
+			(size_t)(buf_data->nr_extres_pages * PAGE_SIZE));
+		ret = dma_buf_begin_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
+				0, dma_to_copy,
+#endif
+				DMA_FROM_DEVICE);
+		if (ret)
+			goto out_unlock;
+
+		dma_kernel = dma_buf_vmap(dma_buf);
+		for (i = 0; i < dma_to_copy/PAGE_SIZE; i++) {
+			void *extres_page = dma_kernel + i * PAGE_SIZE;
+
+			if (extres_page)
+				kbase_mem_copy_from_extres_page(kctx,
+						extres_page, pages,
+						buf_data->nr_pages,
+						&target_page_nr,
+						offset, &to_copy);
+
+			if (target_page_nr >= buf_data->nr_pages)
+				break;
+		}
+		dma_buf_vunmap(dma_buf, dma_kernel);
+		dma_buf_end_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
+				0, dma_to_copy,
+#endif
+				DMA_FROM_DEVICE);
+		break;
+	}
+#endif
+	default:
+		ret = -EINVAL;
+	}
+out_unlock:
+	kbase_gpu_vm_unlock(kctx);
+	return ret;
+
+}
+
+static int kbase_debug_copy(struct kbase_jd_atom *katom)
+{
+	struct kbase_debug_copy_buffer *buffers = katom->softjob_data;
+	unsigned int i;
+
+	if (WARN_ON(!buffers))
+		return -EINVAL;
+
+	for (i = 0; i < katom->nr_extres; i++) {
+		int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
+
+		if (res)
+			return res;
+	}
+
+	return 0;
+}
+
+#define KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT ((u32)0x7)
+
+int kbasep_jit_alloc_validate(struct kbase_context *kctx,
+					struct base_jit_alloc_info *info)
+{
+	/* If the ID is zero, then fail the job */
+	if (info->id == 0)
+		return -EINVAL;
+
+	/* Sanity check that the PA fits within the VA */
+	if (info->va_pages < info->commit_pages)
+		return -EINVAL;
+
+	/* Ensure the GPU address is correctly aligned */
+	if ((info->gpu_alloc_addr & KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT) != 0)
+		return -EINVAL;
+
+	if (kctx->jit_version == 1) {
+		/* Old JIT didn't have usage_id, max_allocations, bin_id
+		 * or padding, so force them to zero
+		 */
+		info->usage_id = 0;
+		info->max_allocations = 0;
+		info->bin_id = 0;
+		info->flags = 0;
+		memset(info->padding, 0, sizeof(info->padding));
+	} else {
+		int j;
+
+		/* Check padding is all zeroed */
+		for (j = 0; j < sizeof(info->padding); j++) {
+			if (info->padding[j] != 0) {
+				return -EINVAL;
+			}
+		}
+
+		/* No bit other than TILER_ALIGN_TOP shall be set */
+		if (info->flags & ~BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP) {
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
+{
+	__user void *data = (__user void *)(uintptr_t) katom->jc;
+	struct base_jit_alloc_info *info;
+	struct kbase_context *kctx = katom->kctx;
+	u32 count;
+	int ret;
+	u32 i;
+
+	/* For backwards compatibility */
+	if (katom->nr_extres == 0)
+		katom->nr_extres = 1;
+	count = katom->nr_extres;
+
+	/* Sanity checks */
+	if (!data || count > kctx->jit_max_allocations ||
+			count > ARRAY_SIZE(kctx->jit_alloc)) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Copy the information for safe access and future storage */
+	info = kmalloc_array(count, sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	if (copy_from_user(info, data, sizeof(*info)*count) != 0) {
+		ret = -EINVAL;
+		goto free_info;
+	}
+	katom->softjob_data = info;
+
+	for (i = 0; i < count; i++, info++) {
+		ret = kbasep_jit_alloc_validate(kctx, info);
+		if (ret)
+			goto free_info;
+		KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO(katom,
+			info->va_pages, info->commit_pages, info->extent,
+			info->id, info->bin_id, info->max_allocations,
+			info->flags, info->usage_id);
+	}
+
+	katom->jit_blocked = false;
+
+	lockdep_assert_held(&kctx->jctx.lock);
+	list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
+
+	/*
+	 * Note:
+	 * The provided info->gpu_alloc_addr isn't validated here as
+	 * userland can cache allocations which means that even
+	 * though the region is valid it doesn't represent the
+	 * same thing it used to.
+	 *
+	 * Complete validation of va_pages, commit_pages and extent
+	 * isn't done here as it will be done during the call to
+	 * kbase_mem_alloc.
+	 */
+	return 0;
+
+free_info:
+	kfree(katom->softjob_data);
+	katom->softjob_data = NULL;
+fail:
+	return ret;
+}
+
+static u8 *kbase_jit_free_get_ids(struct kbase_jd_atom *katom)
+{
+	if (WARN_ON((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) !=
+				BASE_JD_REQ_SOFT_JIT_FREE))
+		return NULL;
+
+	return (u8 *) katom->softjob_data;
+}
+
+static void kbase_jit_add_to_pending_alloc_list(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct list_head *target_list_head = NULL;
+	struct kbase_jd_atom *entry;
+
+	list_for_each_entry(entry, &kctx->jit_pending_alloc, queue) {
+		if (katom->age < entry->age) {
+			target_list_head = &entry->queue;
+			break;
+		}
+	}
+
+	if (target_list_head == NULL)
+		target_list_head = &kctx->jit_pending_alloc;
+
+	list_add_tail(&katom->queue, target_list_head);
+}
+
+static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	struct base_jit_alloc_info *info;
+	struct kbase_va_region *reg;
+	struct kbase_vmap_struct mapping;
+	u64 *ptr, new_addr;
+	u32 count = katom->nr_extres;
+	u32 i;
+
+	if (katom->jit_blocked) {
+		list_del(&katom->queue);
+		katom->jit_blocked = false;
+	}
+
+	info = katom->softjob_data;
+	if (WARN_ON(!info)) {
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		return 0;
+	}
+
+	for (i = 0; i < count; i++, info++) {
+		/* The JIT ID is still in use so fail the allocation */
+		if (kctx->jit_alloc[info->id]) {
+			katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+			return 0;
+		}
+	}
+
+	for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
+		if (kctx->jit_alloc[info->id]) {
+			/* The JIT ID is duplicated in this atom. Roll back
+			 * previous allocations and fail.
+			 */
+			u32 j;
+
+			info = katom->softjob_data;
+			for (j = 0; j < i; j++, info++) {
+				kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
+				kctx->jit_alloc[info->id] =
+						(struct kbase_va_region *) -1;
+			}
+
+			katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+			return 0;
+		}
+
+		/* Create a JIT allocation */
+		reg = kbase_jit_allocate(kctx, info);
+		if (!reg) {
+			struct kbase_jd_atom *jit_atom;
+			bool can_block = false;
+
+			lockdep_assert_held(&kctx->jctx.lock);
+
+			jit_atom = list_first_entry(&kctx->jit_atoms_head,
+					struct kbase_jd_atom, jit_node);
+
+			list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) {
+				if (jit_atom == katom)
+					break;
+
+				if ((jit_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
+						BASE_JD_REQ_SOFT_JIT_FREE) {
+					u8 *free_ids = kbase_jit_free_get_ids(jit_atom);
+
+					if (free_ids && *free_ids &&
+						kctx->jit_alloc[*free_ids]) {
+						/* A JIT free which is active and
+						 * submitted before this atom
+						 */
+						can_block = true;
+						break;
+					}
+				}
+			}
+
+			if (!can_block) {
+				/* Mark the failed allocation as well as the
+				 * other un-attempted allocations in the set,
+				 * so we know they are in use even if the
+				 * allocation itself failed.
+				 */
+				for (; i < count; i++, info++) {
+					kctx->jit_alloc[info->id] =
+						(struct kbase_va_region *) -1;
+				}
+
+				katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+				return 0;
+			}
+
+			/* There are pending frees for an active allocation
+			 * so we should wait to see whether they free the
+			 * memory. Add to the list of atoms for which JIT
+			 * allocation is pending.
+			 */
+			kbase_jit_add_to_pending_alloc_list(katom);
+			katom->jit_blocked = true;
+
+			/* Rollback, the whole set will be re-attempted */
+			while (i-- > 0) {
+				info--;
+				kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
+				kctx->jit_alloc[info->id] = NULL;
+			}
+
+			return 1;
+		}
+
+		/* Bind it to the user provided ID. */
+		kctx->jit_alloc[info->id] = reg;
+	}
+
+	for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
+		/*
+		 * Write the address of the JIT allocation to the user provided
+		 * GPU allocation.
+		 */
+		ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
+				&mapping);
+		if (!ptr) {
+			/*
+			 * Leave the allocations "live" as the JIT free atom
+			 * will be submitted anyway.
+			 */
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			return 0;
+		}
+
+		reg = kctx->jit_alloc[info->id];
+		new_addr = reg->start_pfn << PAGE_SHIFT;
+		*ptr = new_addr;
+		KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(katom,
+			info->gpu_alloc_addr,
+			new_addr, info->va_pages);
+		kbase_vunmap(kctx, &mapping);
+	}
+
+	katom->event_code = BASE_JD_EVENT_DONE;
+
+	return 0;
+}
+
+static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
+{
+	struct base_jit_alloc_info *info;
+
+	lockdep_assert_held(&katom->kctx->jctx.lock);
+
+	if (WARN_ON(!katom->softjob_data))
+		return;
+
+	/* Remove atom from jit_atoms_head list */
+	list_del(&katom->jit_node);
+
+	if (katom->jit_blocked) {
+		list_del(&katom->queue);
+		katom->jit_blocked = false;
+	}
+
+	info = katom->softjob_data;
+	/* Free the info structure */
+	kfree(info);
+}
+
+static int kbase_jit_free_prepare(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	__user void *data = (__user void *)(uintptr_t) katom->jc;
+	u8 *ids;
+	u32 count = MAX(katom->nr_extres, 1);
+	u32 i;
+	int ret;
+
+	/* Sanity checks */
+	if (count > ARRAY_SIZE(kctx->jit_alloc)) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Copy the information for safe access and future storage */
+	ids = kmalloc_array(count, sizeof(*ids), GFP_KERNEL);
+	if (!ids) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	lockdep_assert_held(&kctx->jctx.lock);
+	katom->softjob_data = ids;
+
+	/* For backwards compatibility */
+	if (katom->nr_extres) {
+		/* Fail the job if there is no list of ids */
+		if (!data) {
+			ret = -EINVAL;
+			goto free_info;
+		}
+
+		if (copy_from_user(ids, data, sizeof(*ids)*count) != 0) {
+			ret = -EINVAL;
+			goto free_info;
+		}
+	} else {
+		katom->nr_extres = 1;
+		*ids = (u8)katom->jc;
+	}
+	for (i = 0; i < count; i++)
+		KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO(katom, ids[i]);
+
+	list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
+
+	return 0;
+
+free_info:
+	kfree(katom->softjob_data);
+	katom->softjob_data = NULL;
+fail:
+	return ret;
+}
+
+static void kbase_jit_free_process(struct kbase_jd_atom *katom)
+{
+	struct kbase_context *kctx = katom->kctx;
+	u8 *ids = kbase_jit_free_get_ids(katom);
+	u32 count = katom->nr_extres;
+	u32 i;
+
+	if (ids == NULL) {
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		return;
+	}
+
+	for (i = 0; i < count; i++, ids++) {
+		/*
+		 * If the ID is zero or it is not in use yet then fail the job.
+		 */
+		if ((*ids == 0) || (kctx->jit_alloc[*ids] == NULL)) {
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+			return;
+		}
+	}
+}
+
+static void kbasep_jit_free_finish_worker(struct work_struct *work)
+{
+	struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
+			work);
+	struct kbase_context *kctx = katom->kctx;
+	int resched;
+
+	mutex_lock(&kctx->jctx.lock);
+	kbase_finish_soft_job(katom);
+	resched = jd_done_nolock(katom, NULL);
+	mutex_unlock(&kctx->jctx.lock);
+
+	if (resched)
+		kbase_js_sched_all(kctx->kbdev);
+}
+
+static void kbase_jit_free_finish(struct kbase_jd_atom *katom)
+{
+	struct list_head *i, *tmp;
+	struct kbase_context *kctx = katom->kctx;
+	LIST_HEAD(jit_pending_alloc_list);
+	u8 *ids;
+	size_t j;
+
+	lockdep_assert_held(&kctx->jctx.lock);
+
+	ids = kbase_jit_free_get_ids(katom);
+	if (WARN_ON(ids == NULL)) {
+		return;
+	}
+
+	/* Remove this atom from the kctx->jit_atoms_head list */
+	list_del(&katom->jit_node);
+
+	for (j = 0; j != katom->nr_extres; ++j) {
+		if ((ids[j] != 0) && (kctx->jit_alloc[ids[j]] != NULL)) {
+			/*
+			 * If the ID is valid but the allocation request failed
+			 * still succeed this soft job but don't try and free
+			 * the allocation.
+			 */
+			if (kctx->jit_alloc[ids[j]] != (struct kbase_va_region *) -1)
+				kbase_jit_free(kctx, kctx->jit_alloc[ids[j]]);
+
+			kctx->jit_alloc[ids[j]] = NULL;
+		}
+	}
+	/* Free the list of ids */
+	kfree(ids);
+
+	list_splice_tail_init(&kctx->jit_pending_alloc, &jit_pending_alloc_list);
+
+	list_for_each_safe(i, tmp, &jit_pending_alloc_list) {
+		struct kbase_jd_atom *pending_atom = list_entry(i,
+				struct kbase_jd_atom, queue);
+		if (kbase_jit_allocate_process(pending_atom) == 0) {
+			/* Atom has completed */
+			INIT_WORK(&pending_atom->work,
+					kbasep_jit_free_finish_worker);
+			queue_work(kctx->jctx.job_done_wq, &pending_atom->work);
+		}
+	}
+}
+
+static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
+{
+	__user struct base_external_resource_list *user_ext_res;
+	struct base_external_resource_list *ext_res;
+	u64 count = 0;
+	size_t copy_size;
+	int ret;
+
+	user_ext_res = (__user struct base_external_resource_list *)
+			(uintptr_t) katom->jc;
+
+	/* Fail the job if there is no info structure */
+	if (!user_ext_res) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Is the number of external resources in range? */
+	if (!count || count > BASE_EXT_RES_COUNT_MAX) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Copy the information for safe access and future storage */
+	copy_size = sizeof(*ext_res);
+	copy_size += sizeof(struct base_external_resource) * (count - 1);
+	ext_res = kzalloc(copy_size, GFP_KERNEL);
+	if (!ext_res) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
+		ret = -EINVAL;
+		goto free_info;
+	}
+
+	/*
+	 * Overwrite the count with the first value incase it was changed
+	 * after the fact.
+	 */
+	ext_res->count = count;
+
+	katom->softjob_data = ext_res;
+
+	return 0;
+
+free_info:
+	kfree(ext_res);
+fail:
+	return ret;
+}
+
+static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
+{
+	struct base_external_resource_list *ext_res;
+	int i;
+	bool failed = false;
+
+	ext_res = katom->softjob_data;
+	if (!ext_res)
+		goto failed_jc;
+
+	kbase_gpu_vm_lock(katom->kctx);
+
+	for (i = 0; i < ext_res->count; i++) {
+		u64 gpu_addr;
+
+		gpu_addr = ext_res->ext_res[i].ext_resource &
+				~BASE_EXT_RES_ACCESS_EXCLUSIVE;
+		if (map) {
+			if (!kbase_sticky_resource_acquire(katom->kctx,
+					gpu_addr))
+				goto failed_loop;
+		} else
+			if (!kbase_sticky_resource_release(katom->kctx, NULL,
+					gpu_addr))
+				failed = true;
+	}
+
+	/*
+	 * In the case of unmap we continue unmapping other resources in the
+	 * case of failure but will always report failure if _any_ unmap
+	 * request fails.
+	 */
+	if (failed)
+		katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+	else
+		katom->event_code = BASE_JD_EVENT_DONE;
+
+	kbase_gpu_vm_unlock(katom->kctx);
+
+	return;
+
+failed_loop:
+	while (i > 0) {
+		u64 const gpu_addr = ext_res->ext_res[i - 1].ext_resource &
+				~BASE_EXT_RES_ACCESS_EXCLUSIVE;
+
+		kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
+
+		--i;
+	}
+
+	katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+	kbase_gpu_vm_unlock(katom->kctx);
+
+failed_jc:
+	return;
+}
+
+static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
+{
+	struct base_external_resource_list *ext_res;
+
+	ext_res = katom->softjob_data;
+	/* Free the info structure */
+	kfree(ext_res);
+}
+
+int kbase_process_soft_job(struct kbase_jd_atom *katom)
+{
+	int ret = 0;
+
+	KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(katom);
+
+	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+	case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+		ret = kbase_dump_cpu_gpu_time(katom);
+		break;
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+		katom->event_code = kbase_sync_fence_out_trigger(katom,
+				katom->event_code == BASE_JD_EVENT_DONE ?
+								0 : -EFAULT);
+		break;
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+	{
+		ret = kbase_sync_fence_in_wait(katom);
+
+		if (ret == 1) {
+#ifdef CONFIG_MALI_FENCE_DEBUG
+			kbasep_add_waiting_with_timeout(katom);
+#else
+			kbasep_add_waiting_soft_job(katom);
+#endif
+		}
+		break;
+	}
+#endif
+
+	case BASE_JD_REQ_SOFT_REPLAY:
+		ret = kbase_replay_process(katom);
+		break;
+	case BASE_JD_REQ_SOFT_EVENT_WAIT:
+		ret = kbasep_soft_event_wait(katom);
+		break;
+	case BASE_JD_REQ_SOFT_EVENT_SET:
+		kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
+		break;
+	case BASE_JD_REQ_SOFT_EVENT_RESET:
+		kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
+		break;
+	case BASE_JD_REQ_SOFT_DEBUG_COPY:
+	{
+		int res = kbase_debug_copy(katom);
+
+		if (res)
+			katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+		break;
+	}
+	case BASE_JD_REQ_SOFT_JIT_ALLOC:
+		ret = kbase_jit_allocate_process(katom);
+		break;
+	case BASE_JD_REQ_SOFT_JIT_FREE:
+		kbase_jit_free_process(katom);
+		break;
+	case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+		kbase_ext_res_process(katom, true);
+		break;
+	case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+		kbase_ext_res_process(katom, false);
+		break;
+	}
+
+	/* Atom is complete */
+	KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(katom);
+	return ret;
+}
+
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
+{
+	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+		kbase_sync_fence_in_cancel_wait(katom);
+		break;
+#endif
+	case BASE_JD_REQ_SOFT_EVENT_WAIT:
+		kbasep_soft_event_cancel_job(katom);
+		break;
+	default:
+		/* This soft-job doesn't support cancellation! */
+		KBASE_DEBUG_ASSERT(0);
+	}
+}
+
+int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
+{
+	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+	case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+		{
+			if (!IS_ALIGNED(katom->jc, cache_line_size()))
+				return -EINVAL;
+		}
+		break;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+		{
+			struct base_fence fence;
+			int fd;
+
+			if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
+				return -EINVAL;
+
+			fd = kbase_sync_fence_out_create(katom,
+							 fence.basep.stream_fd);
+			if (fd < 0)
+				return -EINVAL;
+
+			fence.basep.fd = fd;
+			if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
+				kbase_sync_fence_out_remove(katom);
+				kbase_sync_fence_close_fd(fd);
+				fence.basep.fd = -EINVAL;
+				return -EINVAL;
+			}
+		}
+		break;
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+		{
+			struct base_fence fence;
+			int ret;
+
+			if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
+				return -EINVAL;
+
+			/* Get a reference to the fence object */
+			ret = kbase_sync_fence_in_from_fd(katom,
+							  fence.basep.fd);
+			if (ret < 0)
+				return ret;
+
+#ifdef CONFIG_MALI_DMA_FENCE
+			/*
+			 * Set KCTX_NO_IMPLICIT_FENCE in the context the first
+			 * time a soft fence wait job is observed. This will
+			 * prevent the implicit dma-buf fence to conflict with
+			 * the Android native sync fences.
+			 */
+			if (!kbase_ctx_flag(katom->kctx, KCTX_NO_IMPLICIT_SYNC))
+				kbase_ctx_flag_set(katom->kctx, KCTX_NO_IMPLICIT_SYNC);
+#endif /* CONFIG_MALI_DMA_FENCE */
+		}
+		break;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+	case BASE_JD_REQ_SOFT_JIT_ALLOC:
+		return kbase_jit_allocate_prepare(katom);
+	case BASE_JD_REQ_SOFT_REPLAY:
+		break;
+	case BASE_JD_REQ_SOFT_JIT_FREE:
+		return kbase_jit_free_prepare(katom);
+	case BASE_JD_REQ_SOFT_EVENT_WAIT:
+	case BASE_JD_REQ_SOFT_EVENT_SET:
+	case BASE_JD_REQ_SOFT_EVENT_RESET:
+		if (katom->jc == 0)
+			return -EINVAL;
+		break;
+	case BASE_JD_REQ_SOFT_DEBUG_COPY:
+		return kbase_debug_copy_prepare(katom);
+	case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+		return kbase_ext_res_prepare(katom);
+	case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+		return kbase_ext_res_prepare(katom);
+	default:
+		/* Unsupported soft-job */
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void kbase_finish_soft_job(struct kbase_jd_atom *katom)
+{
+	switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
+	case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
+		/* Nothing to do */
+		break;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+	case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
+		/* If fence has not yet been signaled, do it now */
+		kbase_sync_fence_out_trigger(katom, katom->event_code ==
+				BASE_JD_EVENT_DONE ? 0 : -EFAULT);
+		break;
+	case BASE_JD_REQ_SOFT_FENCE_WAIT:
+		/* Release katom's reference to fence object */
+		kbase_sync_fence_in_remove(katom);
+		break;
+#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+	case BASE_JD_REQ_SOFT_DEBUG_COPY:
+		kbase_debug_copy_finish(katom);
+		break;
+	case BASE_JD_REQ_SOFT_JIT_ALLOC:
+		kbase_jit_allocate_finish(katom);
+		break;
+	case BASE_JD_REQ_SOFT_EXT_RES_MAP:
+		kbase_ext_res_finish(katom);
+		break;
+	case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
+		kbase_ext_res_finish(katom);
+		break;
+	case BASE_JD_REQ_SOFT_JIT_FREE:
+		kbase_jit_free_finish(katom);
+		break;
+	}
+}
+
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
+{
+	LIST_HEAD(local_suspended_soft_jobs);
+	struct kbase_jd_atom *tmp_iter;
+	struct kbase_jd_atom *katom_iter;
+	struct kbasep_js_device_data *js_devdata;
+	bool resched = false;
+
+	KBASE_DEBUG_ASSERT(kbdev);
+
+	js_devdata = &kbdev->js_data;
+
+	/* Move out the entire list */
+	mutex_lock(&js_devdata->runpool_mutex);
+	list_splice_init(&js_devdata->suspended_soft_jobs_list,
+			&local_suspended_soft_jobs);
+	mutex_unlock(&js_devdata->runpool_mutex);
+
+	/*
+	 * Each atom must be detached from the list and ran separately -
+	 * it could be re-added to the old list, but this is unlikely
+	 */
+	list_for_each_entry_safe(katom_iter, tmp_iter,
+			&local_suspended_soft_jobs, dep_item[1]) {
+		struct kbase_context *kctx = katom_iter->kctx;
+
+		mutex_lock(&kctx->jctx.lock);
+
+		/* Remove from the global list */
+		list_del(&katom_iter->dep_item[1]);
+		/* Remove from the context's list of waiting soft jobs */
+		kbasep_remove_waiting_soft_job(katom_iter);
+
+		if (kbase_process_soft_job(katom_iter) == 0) {
+			kbase_finish_soft_job(katom_iter);
+			resched |= jd_done_nolock(katom_iter, NULL);
+		} else {
+			KBASE_DEBUG_ASSERT((katom_iter->core_req &
+					BASE_JD_REQ_SOFT_JOB_TYPE)
+					!= BASE_JD_REQ_SOFT_REPLAY);
+		}
+
+		mutex_unlock(&kctx->jctx.lock);
+	}
+
+	if (resched)
+		kbase_js_sched_all(kbdev);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_strings.c b/drivers/gpu/arm/midgard/mali_kbase_strings.c
new file mode 100644
index 0000000..22caa4a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_strings.c
@@ -0,0 +1,28 @@
+ /*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#include "mali_kbase_strings.h"
+
+#define KBASE_DRV_NAME "mali"
+#define KBASE_TIMELINE_NAME KBASE_DRV_NAME ".timeline"
+
+const char kbase_drv_name[] = KBASE_DRV_NAME;
+const char kbase_timeline_name[] = KBASE_TIMELINE_NAME;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_strings.h b/drivers/gpu/arm/midgard/mali_kbase_strings.h
new file mode 100644
index 0000000..d2f1825
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_strings.h
@@ -0,0 +1,24 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+extern const char kbase_drv_name[];
+extern const char kbase_timeline_name[];
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync.h b/drivers/gpu/arm/midgard/mali_kbase_sync.h
new file mode 100644
index 0000000..70557dd
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync.h
@@ -0,0 +1,212 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * @file mali_kbase_sync.h
+ *
+ * This file contains our internal "API" for explicit fences.
+ * It hides the implementation details of the actual explicit fence mechanism
+ * used (Android fences or sync file with DMA fences).
+ */
+
+#ifndef MALI_KBASE_SYNC_H
+#define MALI_KBASE_SYNC_H
+
+#include <linux/syscalls.h>
+#ifdef CONFIG_SYNC
+#include <sync.h>
+#endif
+#ifdef CONFIG_SYNC_FILE
+#include "mali_kbase_fence_defs.h"
+#include <linux/sync_file.h>
+#endif
+
+#include "mali_kbase.h"
+
+/**
+ * struct kbase_sync_fence_info - Information about a fence
+ * @fence: Pointer to fence (type is void*, as underlaying struct can differ)
+ * @name: The name given to this fence when it was created
+ * @status: < 0 means error, 0 means active, 1 means signaled
+ *
+ * Use kbase_sync_fence_in_info_get() or kbase_sync_fence_out_info_get()
+ * to get the information.
+ */
+struct kbase_sync_fence_info {
+	void *fence;
+	char name[32];
+	int status;
+};
+
+/**
+ * kbase_sync_fence_stream_create() - Create a stream object
+ * @name: Name of stream (only used to ease debugging/visualization)
+ * @out_fd: A file descriptor representing the created stream object
+ *
+ * Can map down to a timeline implementation in some implementations.
+ * Exposed as a file descriptor.
+ * Life-time controlled via the file descriptor:
+ * - dup to add a ref
+ * - close to remove a ref
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_stream_create(const char *name, int *const out_fd);
+
+/**
+ * kbase_sync_fence_out_create Create an explicit output fence to specified atom
+ * @katom: Atom to assign the new explicit fence to
+ * @stream_fd: File descriptor for stream object to create fence on
+ *
+ * return: Valid file descriptor to fence or < 0 on error
+ */
+int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int stream_fd);
+
+/**
+ * kbase_sync_fence_in_from_fd() Assigns an existing fence to specified atom
+ * @katom: Atom to assign the existing explicit fence to
+ * @fd: File descriptor to an existing fence
+ *
+ * Assigns an explicit input fence to atom.
+ * This can later be waited for by calling @kbase_sync_fence_in_wait
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd);
+
+/**
+ * kbase_sync_fence_validate() - Validate a fd to be a valid fence
+ * @fd: File descriptor to check
+ *
+ * This function is only usable to catch unintentional user errors early,
+ * it does not stop malicious code changing the fd after this function returns.
+ *
+ * return 0: if fd is for a valid fence, < 0 if invalid
+ */
+int kbase_sync_fence_validate(int fd);
+
+/**
+ * kbase_sync_fence_out_trigger - Signal explicit output fence attached on katom
+ * @katom: Atom with an explicit fence to signal
+ * @result: < 0 means signal with error, 0 >= indicates success
+ *
+ * Signal output fence attached on katom and remove the fence from the atom.
+ *
+ * return: The "next" event code for atom, typically JOB_CANCELLED or EVENT_DONE
+ */
+enum base_jd_event_code
+kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result);
+
+/**
+ * kbase_sync_fence_in_wait() - Wait for explicit input fence to be signaled
+ * @katom: Atom with explicit fence to wait for
+ *
+ * If the fence is already signaled, then 0 is returned, and the caller must
+ * continue processing of the katom.
+ *
+ * If the fence isn't already signaled, then this kbase_sync framework will
+ * take responsibility to continue the processing once the fence is signaled.
+ *
+ * return: 0 if already signaled, otherwise 1
+ */
+int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_in_cancel_wait() - Cancel explicit input fence waits
+ * @katom: Atom to cancel wait for
+ *
+ * This function is fully responsible for continuing processing of this atom
+ * (remove_waiting_soft_job + finish_soft_job + jd_done + js_sched_all)
+ */
+void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_in_remove() - Remove the input fence from the katom
+ * @katom: Atom to remove explicit input fence for
+ *
+ * This will also release the corresponding reference.
+ */
+void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_out_remove() - Remove the output fence from the katom
+ * @katom: Atom to remove explicit output fence for
+ *
+ * This will also release the corresponding reference.
+ */
+void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom);
+
+/**
+ * kbase_sync_fence_close_fd() - Close a file descriptor representing a fence
+ * @fd: File descriptor to close
+ */
+static inline void kbase_sync_fence_close_fd(int fd)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0)
+	ksys_close(fd);
+#else
+	sys_close(fd);
+#endif
+}
+
+/**
+ * kbase_sync_fence_in_info_get() - Retrieves information about input fence
+ * @katom: Atom to get fence information from
+ * @info: Struct to be filled with fence information
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
+				 struct kbase_sync_fence_info *info);
+
+/**
+ * kbase_sync_fence_out_info_get() - Retrieves information about output fence
+ * @katom: Atom to get fence information from
+ * @info: Struct to be filled with fence information
+ *
+ * return: 0 on success, < 0 on error
+ */
+int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
+				  struct kbase_sync_fence_info *info);
+
+/**
+ * kbase_sync_status_string() - Get string matching @status
+ * @status: Value of fence status.
+ *
+ * return: Pointer to string describing @status.
+ */
+const char *kbase_sync_status_string(int status);
+
+/*
+ * Internal worker used to continue processing of atom.
+ */
+void kbase_sync_fence_wait_worker(struct work_struct *data);
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+/**
+ * kbase_sync_fence_in_dump() Trigger a debug dump of atoms input fence state
+ * @katom: Atom to trigger fence debug dump for
+ */
+void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom);
+#endif
+
+#endif /* MALI_KBASE_SYNC_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_android.c b/drivers/gpu/arm/midgard/mali_kbase_sync_android.c
new file mode 100644
index 0000000..75940fb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync_android.c
@@ -0,0 +1,542 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Code for supporting explicit Android fences (CONFIG_SYNC)
+ * Known to be good for kernels 4.5 and earlier.
+ * Replaced with CONFIG_SYNC_FILE for 4.9 and later kernels
+ * (see mali_kbase_sync_file.c)
+ */
+
+#include <linux/sched.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/version.h>
+#include "sync.h"
+#include <mali_kbase.h>
+#include <mali_kbase_sync.h>
+
+struct mali_sync_timeline {
+	struct sync_timeline timeline;
+	atomic_t counter;
+	atomic_t signaled;
+};
+
+struct mali_sync_pt {
+	struct sync_pt pt;
+	int order;
+	int result;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+/* For backwards compatibility with kernels before 3.17. After 3.17
+ * sync_pt_parent is included in the kernel. */
+static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+{
+	return pt->parent;
+}
+#endif
+
+static struct mali_sync_timeline *to_mali_sync_timeline(
+						struct sync_timeline *timeline)
+{
+	return container_of(timeline, struct mali_sync_timeline, timeline);
+}
+
+static struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+{
+	return container_of(pt, struct mali_sync_pt, pt);
+}
+
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+	struct mali_sync_pt *new_mpt;
+	struct sync_pt *new_pt = sync_pt_create(sync_pt_parent(pt),
+						sizeof(struct mali_sync_pt));
+
+	if (!new_pt)
+		return NULL;
+
+	new_mpt = to_mali_sync_pt(new_pt);
+	new_mpt->order = mpt->order;
+	new_mpt->result = mpt->result;
+
+	return new_pt;
+}
+
+static int timeline_has_signaled(struct sync_pt *pt)
+{
+	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+	struct mali_sync_timeline *mtl = to_mali_sync_timeline(
+							sync_pt_parent(pt));
+	int result = mpt->result;
+
+	int diff = atomic_read(&mtl->signaled) - mpt->order;
+
+	if (diff >= 0)
+		return (result < 0) ? result : 1;
+
+	return 0;
+}
+
+static int timeline_compare(struct sync_pt *a, struct sync_pt *b)
+{
+	struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt);
+	struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt);
+
+	int diff = ma->order - mb->order;
+
+	if (diff == 0)
+		return 0;
+
+	return (diff < 0) ? -1 : 1;
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str,
+			       int size)
+{
+	struct mali_sync_timeline *mtl = to_mali_sync_timeline(timeline);
+
+	snprintf(str, size, "%d", atomic_read(&mtl->signaled));
+}
+
+static void pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+
+	snprintf(str, size, "%d(%d)", mpt->order, mpt->result);
+}
+
+static struct sync_timeline_ops mali_timeline_ops = {
+	.driver_name = "Mali",
+	.dup = timeline_dup,
+	.has_signaled = timeline_has_signaled,
+	.compare = timeline_compare,
+	.timeline_value_str = timeline_value_str,
+	.pt_value_str       = pt_value_str,
+};
+
+/* Allocates a timeline for Mali
+ *
+ * One timeline should be allocated per API context.
+ */
+static struct sync_timeline *mali_sync_timeline_alloc(const char *name)
+{
+	struct sync_timeline *tl;
+	struct mali_sync_timeline *mtl;
+
+	tl = sync_timeline_create(&mali_timeline_ops,
+				  sizeof(struct mali_sync_timeline), name);
+	if (!tl)
+		return NULL;
+
+	/* Set the counter in our private struct */
+	mtl = to_mali_sync_timeline(tl);
+	atomic_set(&mtl->counter, 0);
+	atomic_set(&mtl->signaled, 0);
+
+	return tl;
+}
+
+static int kbase_stream_close(struct inode *inode, struct file *file)
+{
+	struct sync_timeline *tl;
+
+	tl = (struct sync_timeline *)file->private_data;
+	sync_timeline_destroy(tl);
+	return 0;
+}
+
+static const struct file_operations stream_fops = {
+	.owner = THIS_MODULE,
+	.release = kbase_stream_close,
+};
+
+int kbase_sync_fence_stream_create(const char *name, int *const out_fd)
+{
+	struct sync_timeline *tl;
+
+	if (!out_fd)
+		return -EINVAL;
+
+	tl = mali_sync_timeline_alloc(name);
+	if (!tl)
+		return -EINVAL;
+
+	*out_fd = anon_inode_getfd(name, &stream_fops, tl, O_RDONLY|O_CLOEXEC);
+
+	if (*out_fd < 0) {
+		sync_timeline_destroy(tl);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Allocates a sync point within the timeline.
+ *
+ * The timeline must be the one allocated by kbase_sync_timeline_alloc
+ *
+ * Sync points must be triggered in *exactly* the same order as they are
+ * allocated.
+ */
+static struct sync_pt *kbase_sync_pt_alloc(struct sync_timeline *parent)
+{
+	struct sync_pt *pt = sync_pt_create(parent,
+					    sizeof(struct mali_sync_pt));
+	struct mali_sync_timeline *mtl = to_mali_sync_timeline(parent);
+	struct mali_sync_pt *mpt;
+
+	if (!pt)
+		return NULL;
+
+	mpt = to_mali_sync_pt(pt);
+	mpt->order = atomic_inc_return(&mtl->counter);
+	mpt->result = 0;
+
+	return pt;
+}
+
+int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int tl_fd)
+{
+	struct sync_timeline *tl;
+	struct sync_pt *pt;
+	struct sync_fence *fence;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+	struct files_struct *files;
+	struct fdtable *fdt;
+#endif
+	int fd;
+	struct file *tl_file;
+
+	tl_file = fget(tl_fd);
+	if (tl_file == NULL)
+		return -EBADF;
+
+	if (tl_file->f_op != &stream_fops) {
+		fd = -EBADF;
+		goto out;
+	}
+
+	tl = tl_file->private_data;
+
+	pt = kbase_sync_pt_alloc(tl);
+	if (!pt) {
+		fd = -EFAULT;
+		goto out;
+	}
+
+	fence = sync_fence_create("mali_fence", pt);
+	if (!fence) {
+		sync_pt_free(pt);
+		fd = -EFAULT;
+		goto out;
+	}
+
+	/* from here the fence owns the sync_pt */
+
+	/* create a fd representing the fence */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
+	if (fd < 0) {
+		sync_fence_put(fence);
+		goto out;
+	}
+#else
+	fd = get_unused_fd();
+	if (fd < 0) {
+		sync_fence_put(fence);
+		goto out;
+	}
+
+	files = current->files;
+	spin_lock(&files->file_lock);
+	fdt = files_fdtable(files);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+	__set_close_on_exec(fd, fdt);
+#else
+	FD_SET(fd, fdt->close_on_exec);
+#endif
+	spin_unlock(&files->file_lock);
+#endif  /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */
+
+	/* bind fence to the new fd */
+	sync_fence_install(fence, fd);
+
+	katom->fence = sync_fence_fdget(fd);
+	if (katom->fence == NULL) {
+		/* The only way the fence can be NULL is if userspace closed it
+		 * for us, so we don't need to clear it up */
+		fd = -EINVAL;
+		goto out;
+	}
+
+out:
+	fput(tl_file);
+
+	return fd;
+}
+
+int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd)
+{
+	katom->fence = sync_fence_fdget(fd);
+	return katom->fence ? 0 : -ENOENT;
+}
+
+int kbase_sync_fence_validate(int fd)
+{
+	struct sync_fence *fence;
+
+	fence = sync_fence_fdget(fd);
+	if (!fence)
+		return -EINVAL;
+
+	sync_fence_put(fence);
+	return 0;
+}
+
+/* Returns true if the specified timeline is allocated by Mali */
+static int kbase_sync_timeline_is_ours(struct sync_timeline *timeline)
+{
+	return timeline->ops == &mali_timeline_ops;
+}
+
+/* Signals a particular sync point
+ *
+ * Sync points must be triggered in *exactly* the same order as they are
+ * allocated.
+ *
+ * If they are signaled in the wrong order then a message will be printed in
+ * debug builds and otherwise attempts to signal order sync_pts will be ignored.
+ *
+ * result can be negative to indicate error, any other value is interpreted as
+ * success.
+ */
+static void kbase_sync_signal_pt(struct sync_pt *pt, int result)
+{
+	struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+	struct mali_sync_timeline *mtl = to_mali_sync_timeline(
+							sync_pt_parent(pt));
+	int signaled;
+	int diff;
+
+	mpt->result = result;
+
+	do {
+		signaled = atomic_read(&mtl->signaled);
+
+		diff = signaled - mpt->order;
+
+		if (diff > 0) {
+			/* The timeline is already at or ahead of this point.
+			 * This should not happen unless userspace has been
+			 * signaling fences out of order, so warn but don't
+			 * violate the sync_pt API.
+			 * The warning is only in debug builds to prevent
+			 * a malicious user being able to spam dmesg.
+			 */
+#ifdef CONFIG_MALI_DEBUG
+			pr_err("Fences were triggered in a different order to allocation!");
+#endif				/* CONFIG_MALI_DEBUG */
+			return;
+		}
+	} while (atomic_cmpxchg(&mtl->signaled,
+				signaled, mpt->order) != signaled);
+}
+
+enum base_jd_event_code
+kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result)
+{
+	struct sync_pt *pt;
+	struct sync_timeline *timeline;
+
+	if (!katom->fence)
+		return BASE_JD_EVENT_JOB_CANCELLED;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+	if (!list_is_singular(&katom->fence->pt_list_head)) {
+#else
+	if (katom->fence->num_fences != 1) {
+#endif
+		/* Not exactly one item in the list - so it didn't (directly)
+		 * come from us */
+		return BASE_JD_EVENT_JOB_CANCELLED;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+	pt = list_first_entry(&katom->fence->pt_list_head,
+			      struct sync_pt, pt_list);
+#else
+	pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
+#endif
+	timeline = sync_pt_parent(pt);
+
+	if (!kbase_sync_timeline_is_ours(timeline)) {
+		/* Fence has a sync_pt which isn't ours! */
+		return BASE_JD_EVENT_JOB_CANCELLED;
+	}
+
+	kbase_sync_signal_pt(pt, result);
+
+	sync_timeline_signal(timeline);
+
+	kbase_sync_fence_out_remove(katom);
+
+	return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
+}
+
+static inline int kbase_fence_get_status(struct sync_fence *fence)
+{
+	if (!fence)
+		return -ENOENT;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+	return fence->status;
+#else
+	return atomic_read(&fence->status);
+#endif
+}
+
+static void kbase_fence_wait_callback(struct sync_fence *fence,
+				      struct sync_fence_waiter *waiter)
+{
+	struct kbase_jd_atom *katom = container_of(waiter,
+					struct kbase_jd_atom, sync_waiter);
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Propagate the fence status to the atom.
+	 * If negative then cancel this atom and its dependencies.
+	 */
+	if (kbase_fence_get_status(fence) < 0)
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	/* To prevent a potential deadlock we schedule the work onto the
+	 * job_done_wq workqueue
+	 *
+	 * The issue is that we may signal the timeline while holding
+	 * kctx->jctx.lock and the callbacks are run synchronously from
+	 * sync_timeline_signal. So we simply defer the work.
+	 */
+
+	INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+	queue_work(kctx->jctx.job_done_wq, &katom->work);
+}
+
+int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
+{
+	int ret;
+
+	sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
+
+	ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
+
+	if (ret == 1) {
+		/* Already signaled */
+		return 0;
+	}
+
+	if (ret < 0) {
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+		/* We should cause the dependent jobs in the bag to be failed,
+		 * to do this we schedule the work queue to complete this job */
+		INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+		queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
+	}
+
+	return 1;
+}
+
+void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom)
+{
+	if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
+		/* The wait wasn't cancelled - leave the cleanup for
+		 * kbase_fence_wait_callback */
+		return;
+	}
+
+	/* Wait was cancelled - zap the atoms */
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	kbasep_remove_waiting_soft_job(katom);
+	kbase_finish_soft_job(katom);
+
+	if (jd_done_nolock(katom, NULL))
+		kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom)
+{
+	if (katom->fence) {
+		sync_fence_put(katom->fence);
+		katom->fence = NULL;
+	}
+}
+
+void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom)
+{
+	if (katom->fence) {
+		sync_fence_put(katom->fence);
+		katom->fence = NULL;
+	}
+}
+
+int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
+				 struct kbase_sync_fence_info *info)
+{
+	if (!katom->fence)
+		return -ENOENT;
+
+	info->fence = katom->fence;
+	info->status = kbase_fence_get_status(katom->fence);
+	strlcpy(info->name, katom->fence->name, sizeof(info->name));
+
+	return 0;
+}
+
+int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
+				 struct kbase_sync_fence_info *info)
+{
+	if (!katom->fence)
+		return -ENOENT;
+
+	info->fence = katom->fence;
+	info->status = kbase_fence_get_status(katom->fence);
+	strlcpy(info->name, katom->fence->name, sizeof(info->name));
+
+	return 0;
+}
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom)
+{
+	/* Dump out the full state of all the Android sync fences.
+	 * The function sync_dump() isn't exported to modules, so force
+	 * sync_fence_wait() to time out to trigger sync_dump().
+	 */
+	if (katom->fence)
+		sync_fence_wait(katom->fence, 1);
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_common.c b/drivers/gpu/arm/midgard/mali_kbase_sync_common.c
new file mode 100644
index 0000000..5239dae
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync_common.c
@@ -0,0 +1,49 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * @file mali_kbase_sync_common.c
+ *
+ * Common code for our explicit fence functionality
+ */
+
+#include <linux/workqueue.h>
+#include "mali_kbase.h"
+#include "mali_kbase_sync.h"
+
+void kbase_sync_fence_wait_worker(struct work_struct *data)
+{
+	struct kbase_jd_atom *katom;
+
+	katom = container_of(data, struct kbase_jd_atom, work);
+	kbase_soft_event_wait_callback(katom);
+}
+
+const char *kbase_sync_status_string(int status)
+{
+	if (status == 0)
+		return "signaled";
+	else if (status > 0)
+		return "active";
+	else
+		return "error";
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_file.c b/drivers/gpu/arm/midgard/mali_kbase_sync_file.c
new file mode 100644
index 0000000..bb94aee
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync_file.c
@@ -0,0 +1,361 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Code for supporting explicit Linux fences (CONFIG_SYNC_FILE)
+ * Introduced in kernel 4.9.
+ * Android explicit fences (CONFIG_SYNC) can be used for older kernels
+ * (see mali_kbase_sync_android.c)
+ */
+
+#include <linux/sched.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <linux/sync_file.h>
+#include <linux/slab.h>
+#include "mali_kbase_fence_defs.h"
+#include "mali_kbase_sync.h"
+#include "mali_kbase_fence.h"
+#include "mali_kbase.h"
+
+static const struct file_operations stream_fops = {
+	.owner = THIS_MODULE
+};
+
+int kbase_sync_fence_stream_create(const char *name, int *const out_fd)
+{
+	if (!out_fd)
+		return -EINVAL;
+
+	*out_fd = anon_inode_getfd(name, &stream_fops, NULL,
+				   O_RDONLY | O_CLOEXEC);
+	if (*out_fd < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int stream_fd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+	struct sync_file *sync_file;
+	int fd;
+
+	fence = kbase_fence_out_new(katom);
+	if (!fence)
+		return -ENOMEM;
+
+#if (KERNEL_VERSION(4, 9, 67) >= LINUX_VERSION_CODE)
+	/* Take an extra reference to the fence on behalf of the sync_file.
+	 * This is only needed on older kernels where sync_file_create()
+	 * does not take its own reference. This was changed in v4.9.68,
+	 * where sync_file_create() now takes its own reference.
+	 */
+	dma_fence_get(fence);
+#endif
+
+	/* create a sync_file fd representing the fence */
+	sync_file = sync_file_create(fence);
+	if (!sync_file) {
+		dma_fence_put(fence);
+		kbase_fence_out_remove(katom);
+		return -ENOMEM;
+	}
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0) {
+		fput(sync_file->file);
+		kbase_fence_out_remove(katom);
+		return fd;
+	}
+
+	fd_install(fd, sync_file->file);
+
+	return fd;
+}
+
+int kbase_sync_fence_in_from_fd(struct kbase_jd_atom *katom, int fd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence = sync_file_get_fence(fd);
+#else
+	struct dma_fence *fence = sync_file_get_fence(fd);
+#endif
+
+	if (!fence)
+		return -ENOENT;
+
+	kbase_fence_fence_in_set(katom, fence);
+
+	return 0;
+}
+
+int kbase_sync_fence_validate(int fd)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence = sync_file_get_fence(fd);
+#else
+	struct dma_fence *fence = sync_file_get_fence(fd);
+#endif
+
+	if (!fence)
+		return -EINVAL;
+
+	dma_fence_put(fence);
+
+	return 0; /* valid */
+}
+
+enum base_jd_event_code
+kbase_sync_fence_out_trigger(struct kbase_jd_atom *katom, int result)
+{
+	int res;
+
+	if (!kbase_fence_out_is_ours(katom)) {
+		/* Not our fence */
+		return BASE_JD_EVENT_JOB_CANCELLED;
+	}
+
+	res = kbase_fence_out_signal(katom, result);
+	if (unlikely(res < 0)) {
+		dev_warn(katom->kctx->kbdev->dev,
+				"fence_signal() failed with %d\n", res);
+	}
+
+	kbase_sync_fence_out_remove(katom);
+
+	return (result != 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+static void kbase_fence_wait_callback(struct fence *fence,
+				      struct fence_cb *cb)
+#else
+static void kbase_fence_wait_callback(struct dma_fence *fence,
+				      struct dma_fence_cb *cb)
+#endif
+{
+	struct kbase_fence_cb *kcb = container_of(cb,
+				struct kbase_fence_cb,
+				fence_cb);
+	struct kbase_jd_atom *katom = kcb->katom;
+	struct kbase_context *kctx = katom->kctx;
+
+	/* Cancel atom if fence is erroneous */
+#if (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE || \
+	 (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE && \
+	  KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE))
+	if (dma_fence_is_signaled(kcb->fence) && kcb->fence->error)
+#else
+	if (dma_fence_is_signaled(kcb->fence) && kcb->fence->status < 0)
+#endif
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	if (kbase_fence_dep_count_dec_and_test(katom)) {
+		/* We take responsibility of handling this */
+		kbase_fence_dep_count_set(katom, -1);
+
+		/* To prevent a potential deadlock we schedule the work onto the
+		 * job_done_wq workqueue
+		 *
+		 * The issue is that we may signal the timeline while holding
+		 * kctx->jctx.lock and the callbacks are run synchronously from
+		 * sync_timeline_signal. So we simply defer the work.
+		 */
+		INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+		queue_work(kctx->jctx.job_done_wq, &katom->work);
+	}
+}
+
+int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
+{
+	int err;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+
+	fence = kbase_fence_in_get(katom);
+	if (!fence)
+		return 0; /* no input fence to wait for, good to go! */
+
+	kbase_fence_dep_count_set(katom, 1);
+
+	err = kbase_fence_add_callback(katom, fence, kbase_fence_wait_callback);
+
+	kbase_fence_put(fence);
+
+	if (likely(!err)) {
+		/* Test if the callbacks are already triggered */
+		if (kbase_fence_dep_count_dec_and_test(katom)) {
+			kbase_fence_free_callbacks(katom);
+			kbase_fence_dep_count_set(katom, -1);
+			return 0; /* Already signaled, good to go right now */
+		}
+
+		/* Callback installed, so we just need to wait for it... */
+	} else {
+		/* Failure */
+		kbase_fence_free_callbacks(katom);
+		kbase_fence_dep_count_set(katom, -1);
+
+		katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+		/* We should cause the dependent jobs in the bag to be failed,
+		 * to do this we schedule the work queue to complete this job */
+
+		INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
+		queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
+	}
+
+	return 1; /* completion to be done later by callback/worker */
+}
+
+void kbase_sync_fence_in_cancel_wait(struct kbase_jd_atom *katom)
+{
+	if (!kbase_fence_free_callbacks(katom)) {
+		/* The wait wasn't cancelled -
+		 * leave the cleanup for kbase_fence_wait_callback */
+		return;
+	}
+
+	/* Take responsibility of completion */
+	kbase_fence_dep_count_set(katom, -1);
+
+	/* Wait was cancelled - zap the atoms */
+	katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
+
+	kbasep_remove_waiting_soft_job(katom);
+	kbase_finish_soft_job(katom);
+
+	if (jd_done_nolock(katom, NULL))
+		kbase_js_sched_all(katom->kctx->kbdev);
+}
+
+void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom)
+{
+	kbase_fence_out_remove(katom);
+}
+
+void kbase_sync_fence_in_remove(struct kbase_jd_atom *katom)
+{
+	kbase_fence_free_callbacks(katom);
+	kbase_fence_in_remove(katom);
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+static void kbase_sync_fence_info_get(struct fence *fence,
+				      struct kbase_sync_fence_info *info)
+#else
+static void kbase_sync_fence_info_get(struct dma_fence *fence,
+				      struct kbase_sync_fence_info *info)
+#endif
+{
+	info->fence = fence;
+
+	/* translate into CONFIG_SYNC status:
+	 * < 0 : error
+	 * 0 : active
+	 * 1 : signaled
+	 */
+	if (dma_fence_is_signaled(fence)) {
+#if (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE || \
+	 (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE && \
+	  KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE))
+		int status = fence->error;
+#else
+		int status = fence->status;
+#endif
+		if (status < 0)
+			info->status = status; /* signaled with error */
+		else
+			info->status = 1; /* signaled with success */
+	} else  {
+		info->status = 0; /* still active (unsignaled) */
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+	scnprintf(info->name, sizeof(info->name), "%u#%u",
+		  fence->context, fence->seqno);
+#else
+	scnprintf(info->name, sizeof(info->name), "%llu#%u",
+		  fence->context, fence->seqno);
+#endif
+}
+
+int kbase_sync_fence_in_info_get(struct kbase_jd_atom *katom,
+				 struct kbase_sync_fence_info *info)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+
+	fence = kbase_fence_in_get(katom);
+	if (!fence)
+		return -ENOENT;
+
+	kbase_sync_fence_info_get(fence, info);
+
+	kbase_fence_put(fence);
+
+	return 0;
+}
+
+int kbase_sync_fence_out_info_get(struct kbase_jd_atom *katom,
+				  struct kbase_sync_fence_info *info)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+	struct fence *fence;
+#else
+	struct dma_fence *fence;
+#endif
+
+	fence = kbase_fence_out_get(katom);
+	if (!fence)
+		return -ENOENT;
+
+	kbase_sync_fence_info_get(fence, info);
+
+	kbase_fence_put(fence);
+
+	return 0;
+}
+
+
+#ifdef CONFIG_MALI_FENCE_DEBUG
+void kbase_sync_fence_in_dump(struct kbase_jd_atom *katom)
+{
+	/* Not implemented */
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_tlstream.c b/drivers/gpu/arm/midgard/mali_kbase_tlstream.c
new file mode 100644
index 0000000..56e4ebe
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_tlstream.c
@@ -0,0 +1,2674 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
+#include <linux/file.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_jm.h>
+#include <mali_kbase_tlstream.h>
+
+/*****************************************************************************/
+
+/* The version of swtrace protocol used in timeline stream. */
+#define SWTRACE_VERSION    3
+
+/* The maximum expected length of string in tracepoint descriptor. */
+#define STRLEN_MAX         64 /* bytes */
+
+/* The number of nanoseconds in a second. */
+#define NSECS_IN_SEC       1000000000ull /* ns */
+
+/* The period of autoflush checker execution in milliseconds. */
+#define AUTOFLUSH_INTERVAL 1000 /* ms */
+
+/* The maximum size of a single packet used by timeline. */
+#define PACKET_SIZE        4096 /* bytes */
+
+/* The number of packets used by one timeline stream. */
+#define PACKET_COUNT       16
+
+/* The number of bytes reserved for packet header.
+ * These value must be defined according to MIPE documentation. */
+#define PACKET_HEADER_SIZE 8 /* bytes */
+
+/* The number of bytes reserved for packet sequence number.
+ * These value must be defined according to MIPE documentation. */
+#define PACKET_NUMBER_SIZE 4 /* bytes */
+
+/* Packet header - first word.
+ * These values must be defined according to MIPE documentation. */
+#define PACKET_STREAMID_POS  0
+#define PACKET_STREAMID_LEN  8
+#define PACKET_RSVD1_POS     (PACKET_STREAMID_POS + PACKET_STREAMID_LEN)
+#define PACKET_RSVD1_LEN     8
+#define PACKET_TYPE_POS      (PACKET_RSVD1_POS + PACKET_RSVD1_LEN)
+#define PACKET_TYPE_LEN      3
+#define PACKET_CLASS_POS     (PACKET_TYPE_POS + PACKET_TYPE_LEN)
+#define PACKET_CLASS_LEN     7
+#define PACKET_FAMILY_POS    (PACKET_CLASS_POS + PACKET_CLASS_LEN)
+#define PACKET_FAMILY_LEN    6
+
+/* Packet header - second word
+ * These values must be defined according to MIPE documentation. */
+#define PACKET_LENGTH_POS    0
+#define PACKET_LENGTH_LEN    24
+#define PACKET_SEQBIT_POS    (PACKET_LENGTH_POS + PACKET_LENGTH_LEN)
+#define PACKET_SEQBIT_LEN    1
+#define PACKET_RSVD2_POS     (PACKET_SEQBIT_POS + PACKET_SEQBIT_LEN)
+#define PACKET_RSVD2_LEN     7
+
+/* Types of streams generated by timeline.
+ * Order is significant! Header streams must precede respective body streams. */
+enum tl_stream_type {
+	TL_STREAM_TYPE_OBJ_HEADER,
+	TL_STREAM_TYPE_OBJ_SUMMARY,
+	TL_STREAM_TYPE_OBJ,
+	TL_STREAM_TYPE_AUX_HEADER,
+	TL_STREAM_TYPE_AUX,
+
+	TL_STREAM_TYPE_COUNT
+};
+
+/* Timeline packet family ids.
+ * Values are significant! Check MIPE documentation. */
+enum tl_packet_family {
+	TL_PACKET_FAMILY_CTRL = 0, /* control packets */
+	TL_PACKET_FAMILY_TL   = 1, /* timeline packets */
+
+	TL_PACKET_FAMILY_COUNT
+};
+
+/* Packet classes used in timeline streams.
+ * Values are significant! Check MIPE documentation. */
+enum tl_packet_class {
+	TL_PACKET_CLASS_OBJ = 0, /* timeline objects packet */
+	TL_PACKET_CLASS_AUX = 1, /* auxiliary events packet */
+};
+
+/* Packet types used in timeline streams.
+ * Values are significant! Check MIPE documentation. */
+enum tl_packet_type {
+	TL_PACKET_TYPE_HEADER  = 0, /* stream's header/directory */
+	TL_PACKET_TYPE_BODY    = 1, /* stream's body */
+	TL_PACKET_TYPE_SUMMARY = 2, /* stream's summary */
+};
+
+/* Message ids of trace events that are recorded in the timeline stream. */
+enum tl_msg_id_obj {
+	/* Timeline object events. */
+	KBASE_TL_NEW_CTX,
+	KBASE_TL_NEW_GPU,
+	KBASE_TL_NEW_LPU,
+	KBASE_TL_NEW_ATOM,
+	KBASE_TL_NEW_AS,
+	KBASE_TL_DEL_CTX,
+	KBASE_TL_DEL_ATOM,
+	KBASE_TL_LIFELINK_LPU_GPU,
+	KBASE_TL_LIFELINK_AS_GPU,
+	KBASE_TL_RET_CTX_LPU,
+	KBASE_TL_RET_ATOM_CTX,
+	KBASE_TL_RET_ATOM_LPU,
+	KBASE_TL_NRET_CTX_LPU,
+	KBASE_TL_NRET_ATOM_CTX,
+	KBASE_TL_NRET_ATOM_LPU,
+	KBASE_TL_RET_AS_CTX,
+	KBASE_TL_NRET_AS_CTX,
+	KBASE_TL_RET_ATOM_AS,
+	KBASE_TL_NRET_ATOM_AS,
+	KBASE_TL_ATTRIB_ATOM_CONFIG,
+	KBASE_TL_ATTRIB_ATOM_PRIORITY,
+	KBASE_TL_ATTRIB_ATOM_STATE,
+	KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
+	KBASE_TL_ATTRIB_ATOM_JIT,
+	KBASE_TL_ATTRIB_ATOM_JITALLOCINFO,
+	KBASE_TL_ATTRIB_ATOM_JITFREEINFO,
+	KBASE_TL_ATTRIB_AS_CONFIG,
+	KBASE_TL_EVENT_LPU_SOFTSTOP,
+	KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
+	KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+	KBASE_TL_EVENT_ATOM_SOFTJOB_START,
+	KBASE_TL_EVENT_ATOM_SOFTJOB_END,
+
+	/* Job dump specific events. */
+	KBASE_JD_GPU_SOFT_RESET
+};
+
+/* Message ids of trace events that are recorded in the auxiliary stream. */
+enum tl_msg_id_aux {
+	KBASE_AUX_PM_STATE,
+	KBASE_AUX_PAGEFAULT,
+	KBASE_AUX_PAGESALLOC,
+	KBASE_AUX_DEVFREQ_TARGET,
+	KBASE_AUX_PROTECTED_ENTER_START,
+	KBASE_AUX_PROTECTED_ENTER_END,
+	KBASE_AUX_PROTECTED_LEAVE_START,
+	KBASE_AUX_PROTECTED_LEAVE_END,
+	KBASE_AUX_JIT_STATS,
+};
+
+/*****************************************************************************/
+
+/**
+ * struct tl_stream - timeline stream structure
+ * @lock: message order lock
+ * @buffer: array of buffers
+ * @wbi: write buffer index
+ * @rbi: read buffer index
+ * @numbered: if non-zero stream's packets are sequentially numbered
+ * @autoflush_counter: counter tracking stream's autoflush state
+ *
+ * This structure holds information needed to construct proper packets in the
+ * timeline stream. Each message in sequence must bear timestamp that is greater
+ * to one in previous message in the same stream. For this reason lock is held
+ * throughout the process of message creation. Each stream contains set of
+ * buffers. Each buffer will hold one MIPE packet. In case there is no free
+ * space required to store incoming message the oldest buffer is discarded.
+ * Each packet in timeline body stream has sequence number embedded (this value
+ * must increment monotonically and is used by packets receiver to discover
+ * buffer overflows.
+ * Autoflush counter is set to negative number when there is no data pending
+ * for flush and it is set to zero on every update of the buffer. Autoflush
+ * timer will increment the counter by one on every expiry. In case there will
+ * be no activity on the buffer during two consecutive timer expiries, stream
+ * buffer will be flushed.
+ */
+struct tl_stream {
+	spinlock_t lock;
+
+	struct {
+		atomic_t size;              /* number of bytes in buffer */
+		char     data[PACKET_SIZE]; /* buffer's data */
+	} buffer[PACKET_COUNT];
+
+	atomic_t wbi;
+	atomic_t rbi;
+
+	int      numbered;
+	atomic_t autoflush_counter;
+};
+
+/**
+ * struct tp_desc - tracepoint message descriptor structure
+ * @id:        tracepoint ID identifying message in stream
+ * @id_str:    human readable version of tracepoint ID
+ * @name:      tracepoint description
+ * @arg_types: tracepoint's arguments types declaration
+ * @arg_names: comma separated list of tracepoint's arguments names
+ */
+struct tp_desc {
+	u32        id;
+	const char *id_str;
+	const char *name;
+	const char *arg_types;
+	const char *arg_names;
+};
+
+/*****************************************************************************/
+
+/* Configuration of timeline streams generated by kernel.
+ * Kernel emit only streams containing either timeline object events or
+ * auxiliary events. All streams have stream id value of 1 (as opposed to user
+ * space streams that have value of 0). */
+static const struct {
+	enum tl_packet_family pkt_family;
+	enum tl_packet_class  pkt_class;
+	enum tl_packet_type   pkt_type;
+	unsigned int          stream_id;
+} tl_stream_cfg[TL_STREAM_TYPE_COUNT] = {
+	{TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_HEADER,  1},
+	{TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_SUMMARY, 1},
+	{TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_OBJ, TL_PACKET_TYPE_BODY,    1},
+	{TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_HEADER,  1},
+	{TL_PACKET_FAMILY_TL, TL_PACKET_CLASS_AUX, TL_PACKET_TYPE_BODY,    1}
+};
+
+/* The timeline streams generated by kernel. */
+static struct tl_stream *tl_stream[TL_STREAM_TYPE_COUNT];
+
+/* Autoflush timer. */
+static struct timer_list autoflush_timer;
+
+/* If non-zero autoflush timer is active. */
+static atomic_t autoflush_timer_active;
+
+/* Reader lock. Only one reader is allowed to have access to the timeline
+ * streams at any given time. */
+static DEFINE_MUTEX(tl_reader_lock);
+
+/* Timeline stream event queue. */
+static DECLARE_WAIT_QUEUE_HEAD(tl_event_queue);
+
+/* The timeline stream file operations functions. */
+static ssize_t kbasep_tlstream_read(
+		struct file *filp,
+		char __user *buffer,
+		size_t      size,
+		loff_t      *f_pos);
+static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait);
+static int kbasep_tlstream_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+static const struct file_operations kbasep_tlstream_fops = {
+	.release = kbasep_tlstream_release,
+	.read    = kbasep_tlstream_read,
+	.poll    = kbasep_tlstream_poll,
+};
+
+/* Descriptors of timeline messages transmitted in object events stream. */
+static const struct tp_desc tp_desc_obj[] = {
+	{
+		KBASE_TL_NEW_CTX,
+		__stringify(KBASE_TL_NEW_CTX),
+		"object ctx is created",
+		"@pII",
+		"ctx,ctx_nr,tgid"
+	},
+	{
+		KBASE_TL_NEW_GPU,
+		__stringify(KBASE_TL_NEW_GPU),
+		"object gpu is created",
+		"@pII",
+		"gpu,gpu_id,core_count"
+	},
+	{
+		KBASE_TL_NEW_LPU,
+		__stringify(KBASE_TL_NEW_LPU),
+		"object lpu is created",
+		"@pII",
+		"lpu,lpu_nr,lpu_fn"
+	},
+	{
+		KBASE_TL_NEW_ATOM,
+		__stringify(KBASE_TL_NEW_ATOM),
+		"object atom is created",
+		"@pI",
+		"atom,atom_nr"
+	},
+	{
+		KBASE_TL_NEW_AS,
+		__stringify(KBASE_TL_NEW_AS),
+		"address space object is created",
+		"@pI",
+		"address_space,as_nr"
+	},
+	{
+		KBASE_TL_DEL_CTX,
+		__stringify(KBASE_TL_DEL_CTX),
+		"context is destroyed",
+		"@p",
+		"ctx"
+	},
+	{
+		KBASE_TL_DEL_ATOM,
+		__stringify(KBASE_TL_DEL_ATOM),
+		"atom is destroyed",
+		"@p",
+		"atom"
+	},
+	{
+		KBASE_TL_LIFELINK_LPU_GPU,
+		__stringify(KBASE_TL_LIFELINK_LPU_GPU),
+		"lpu is deleted with gpu",
+		"@pp",
+		"lpu,gpu"
+	},
+	{
+		KBASE_TL_LIFELINK_AS_GPU,
+		__stringify(KBASE_TL_LIFELINK_AS_GPU),
+		"address space is deleted with gpu",
+		"@pp",
+		"address_space,gpu"
+	},
+	{
+		KBASE_TL_RET_CTX_LPU,
+		__stringify(KBASE_TL_RET_CTX_LPU),
+		"context is retained by lpu",
+		"@pp",
+		"ctx,lpu"
+	},
+	{
+		KBASE_TL_RET_ATOM_CTX,
+		__stringify(KBASE_TL_RET_ATOM_CTX),
+		"atom is retained by context",
+		"@pp",
+		"atom,ctx"
+	},
+	{
+		KBASE_TL_RET_ATOM_LPU,
+		__stringify(KBASE_TL_RET_ATOM_LPU),
+		"atom is retained by lpu",
+		"@pps",
+		"atom,lpu,attrib_match_list"
+	},
+	{
+		KBASE_TL_NRET_CTX_LPU,
+		__stringify(KBASE_TL_NRET_CTX_LPU),
+		"context is released by lpu",
+		"@pp",
+		"ctx,lpu"
+	},
+	{
+		KBASE_TL_NRET_ATOM_CTX,
+		__stringify(KBASE_TL_NRET_ATOM_CTX),
+		"atom is released by context",
+		"@pp",
+		"atom,ctx"
+	},
+	{
+		KBASE_TL_NRET_ATOM_LPU,
+		__stringify(KBASE_TL_NRET_ATOM_LPU),
+		"atom is released by lpu",
+		"@pp",
+		"atom,lpu"
+	},
+	{
+		KBASE_TL_RET_AS_CTX,
+		__stringify(KBASE_TL_RET_AS_CTX),
+		"address space is retained by context",
+		"@pp",
+		"address_space,ctx"
+	},
+	{
+		KBASE_TL_NRET_AS_CTX,
+		__stringify(KBASE_TL_NRET_AS_CTX),
+		"address space is released by context",
+		"@pp",
+		"address_space,ctx"
+	},
+	{
+		KBASE_TL_RET_ATOM_AS,
+		__stringify(KBASE_TL_RET_ATOM_AS),
+		"atom is retained by address space",
+		"@pp",
+		"atom,address_space"
+	},
+	{
+		KBASE_TL_NRET_ATOM_AS,
+		__stringify(KBASE_TL_NRET_ATOM_AS),
+		"atom is released by address space",
+		"@pp",
+		"atom,address_space"
+	},
+	{
+		KBASE_TL_ATTRIB_ATOM_CONFIG,
+		__stringify(KBASE_TL_ATTRIB_ATOM_CONFIG),
+		"atom job slot attributes",
+		"@pLLI",
+		"atom,descriptor,affinity,config"
+	},
+	{
+		KBASE_TL_ATTRIB_ATOM_PRIORITY,
+		__stringify(KBASE_TL_ATTRIB_ATOM_PRIORITY),
+		"atom priority",
+		"@pI",
+		"atom,prio"
+	},
+	{
+		KBASE_TL_ATTRIB_ATOM_STATE,
+		__stringify(KBASE_TL_ATTRIB_ATOM_STATE),
+		"atom state",
+		"@pI",
+		"atom,state"
+	},
+	{
+		KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
+		__stringify(KBASE_TL_ATTRIB_ATOM_PRIORITIZED),
+		"atom caused priority change",
+		"@p",
+		"atom"
+	},
+	{
+		KBASE_TL_ATTRIB_ATOM_JIT,
+		__stringify(KBASE_TL_ATTRIB_ATOM_JIT),
+		"jit done for atom",
+		"@pLLL",
+		"atom,edit_addr,new_addr,va_pages"
+	},
+	{
+		KBASE_TL_ATTRIB_ATOM_JITALLOCINFO,
+		__stringify(KBASE_TL_ATTRIB_ATOM_JITALLOCINFO),
+		"Information about JIT allocations",
+		"@pLLLIIIII",
+		"atom,va_pgs,com_pgs,extent,j_id,bin_id,max_allocs,flags,usg_id"
+	},
+	{
+		KBASE_TL_ATTRIB_ATOM_JITFREEINFO,
+		__stringify(KBASE_TL_ATTRIB_ATOM_JITFREEINFO),
+		"Information about JIT frees",
+		"@pI",
+		"atom,j_id"
+	},
+	{
+		KBASE_TL_ATTRIB_AS_CONFIG,
+		__stringify(KBASE_TL_ATTRIB_AS_CONFIG),
+		"address space attributes",
+		"@pLLL",
+		"address_space,transtab,memattr,transcfg"
+	},
+	{
+		KBASE_TL_EVENT_LPU_SOFTSTOP,
+		__stringify(KBASE_TL_EVENT_LPU_SOFTSTOP),
+		"softstop event on given lpu",
+		"@p",
+		"lpu"
+	},
+	{
+		KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
+		__stringify(KBASE_TL_EVENT_ATOM_SOFTSTOP_EX),
+		"atom softstopped",
+		"@p",
+		"atom"
+	},
+	{
+		KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+		__stringify(KBASE_TL_EVENT_SOFTSTOP_ISSUE),
+		"atom softstop issued",
+		"@p",
+		"atom"
+	},
+	{
+		KBASE_TL_EVENT_ATOM_SOFTJOB_START,
+		__stringify(KBASE_TL_EVENT_ATOM_SOFTJOB_START),
+		"atom soft job has started",
+		"@p",
+		"atom"
+	},
+	{
+		KBASE_TL_EVENT_ATOM_SOFTJOB_END,
+		__stringify(KBASE_TL_EVENT_ATOM_SOFTJOB_END),
+		"atom soft job has completed",
+		"@p",
+		"atom"
+	},
+	{
+		KBASE_JD_GPU_SOFT_RESET,
+		__stringify(KBASE_JD_GPU_SOFT_RESET),
+		"gpu soft reset",
+		"@p",
+		"gpu"
+	},
+};
+
+/* Descriptors of timeline messages transmitted in auxiliary events stream. */
+static const struct tp_desc tp_desc_aux[] = {
+	{
+		KBASE_AUX_PM_STATE,
+		__stringify(KBASE_AUX_PM_STATE),
+		"PM state",
+		"@IL",
+		"core_type,core_state_bitset"
+	},
+	{
+		KBASE_AUX_PAGEFAULT,
+		__stringify(KBASE_AUX_PAGEFAULT),
+		"Page fault",
+		"@IL",
+		"ctx_nr,page_cnt_change"
+	},
+	{
+		KBASE_AUX_PAGESALLOC,
+		__stringify(KBASE_AUX_PAGESALLOC),
+		"Total alloc pages change",
+		"@IL",
+		"ctx_nr,page_cnt"
+	},
+	{
+		KBASE_AUX_DEVFREQ_TARGET,
+		__stringify(KBASE_AUX_DEVFREQ_TARGET),
+		"New device frequency target",
+		"@L",
+		"target_freq"
+	},
+	{
+		KBASE_AUX_PROTECTED_ENTER_START,
+		__stringify(KBASE_AUX_PROTECTED_ENTER_START),
+		"enter protected mode start",
+		"@p",
+		"gpu"
+	},
+	{
+		KBASE_AUX_PROTECTED_ENTER_END,
+		__stringify(KBASE_AUX_PROTECTED_ENTER_END),
+		"enter protected mode end",
+		"@p",
+		"gpu"
+	},
+	{
+		KBASE_AUX_PROTECTED_LEAVE_START,
+		__stringify(KBASE_AUX_PROTECTED_LEAVE_START),
+		"leave protected mode start",
+		"@p",
+		"gpu"
+	},
+	{
+		KBASE_AUX_PROTECTED_LEAVE_END,
+		__stringify(KBASE_AUX_PROTECTED_LEAVE_END),
+		"leave protected mode end",
+		"@p",
+		"gpu"
+	},
+	{
+		KBASE_AUX_JIT_STATS,
+		__stringify(KBASE_AUX_JIT_STATS),
+		"per-bin JIT statistics",
+		"@IIIIII",
+		"ctx_nr,bid,max_allocs,allocs,va_pages,ph_pages"
+	}
+};
+
+#if MALI_UNIT_TEST
+/* Number of bytes read by user. */
+static atomic_t tlstream_bytes_collected = {0};
+
+/* Number of bytes generated by tracepoint messages. */
+static atomic_t tlstream_bytes_generated = {0};
+#endif /* MALI_UNIT_TEST */
+
+/*****************************************************************************/
+
+/* Indicator of whether the timeline stream file descriptor is used. */
+atomic_t kbase_tlstream_enabled = {0};
+
+/*****************************************************************************/
+
+/**
+ * kbasep_tlstream_get_timestamp - return timestamp
+ *
+ * Function returns timestamp value based on raw monotonic timer. Value will
+ * wrap around zero in case of overflow.
+ * Return: timestamp value
+ */
+static u64 kbasep_tlstream_get_timestamp(void)
+{
+	struct timespec64 ts;
+	u64             timestamp;
+
+	ktime_get_raw_ts64(&ts);
+	timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
+	return timestamp;
+}
+
+/**
+ * kbasep_tlstream_write_bytes - write data to message buffer
+ * @buffer: buffer where data will be written
+ * @pos:    position in the buffer where to place data
+ * @bytes:  pointer to buffer holding data
+ * @len:    length of data to be written
+ *
+ * Return: updated position in the buffer
+ */
+static size_t kbasep_tlstream_write_bytes(
+		char       *buffer,
+		size_t     pos,
+		const void *bytes,
+		size_t     len)
+{
+	KBASE_DEBUG_ASSERT(buffer);
+	KBASE_DEBUG_ASSERT(bytes);
+
+	memcpy(&buffer[pos], bytes, len);
+
+	return pos + len;
+}
+
+/**
+ * kbasep_tlstream_write_string - write string to message buffer
+ * @buffer:         buffer where data will be written
+ * @pos:            position in the buffer where to place data
+ * @string:         pointer to buffer holding the source string
+ * @max_write_size: number of bytes that can be stored in buffer
+ *
+ * Return: updated position in the buffer
+ */
+static size_t kbasep_tlstream_write_string(
+		char       *buffer,
+		size_t     pos,
+		const char *string,
+		size_t     max_write_size)
+{
+	u32 string_len;
+
+	KBASE_DEBUG_ASSERT(buffer);
+	KBASE_DEBUG_ASSERT(string);
+	/* Timeline string consists of at least string length and nul
+	 * terminator. */
+	KBASE_DEBUG_ASSERT(max_write_size >= sizeof(string_len) + sizeof(char));
+	max_write_size -= sizeof(string_len);
+
+	string_len = strlcpy(
+			&buffer[pos + sizeof(string_len)],
+			string,
+			max_write_size);
+	string_len += sizeof(char);
+
+	/* Make sure that the source string fit into the buffer. */
+	KBASE_DEBUG_ASSERT(string_len <= max_write_size);
+
+	/* Update string length. */
+	memcpy(&buffer[pos], &string_len, sizeof(string_len));
+
+	return pos + sizeof(string_len) + string_len;
+}
+
+/**
+ * kbasep_tlstream_write_timestamp - write timestamp to message buffer
+ * @buffer: buffer where data will be written
+ * @pos:    position in the buffer where to place data
+ *
+ * Return: updated position in the buffer
+ */
+static size_t kbasep_tlstream_write_timestamp(void *buffer, size_t pos)
+{
+	u64 timestamp = kbasep_tlstream_get_timestamp();
+
+	return kbasep_tlstream_write_bytes(
+			buffer, pos,
+			&timestamp, sizeof(timestamp));
+}
+
+/**
+ * kbasep_tlstream_put_bits - put bits in a word
+ * @word:   pointer to the words being modified
+ * @value:  value that shall be written to given position
+ * @bitpos: position where value shall be written (in bits)
+ * @bitlen: length of value (in bits)
+ */
+static void kbasep_tlstream_put_bits(
+		u32          *word,
+		u32          value,
+		unsigned int bitpos,
+		unsigned int bitlen)
+{
+	const u32 mask = ((1 << bitlen) - 1) << bitpos;
+
+	KBASE_DEBUG_ASSERT(word);
+	KBASE_DEBUG_ASSERT((0 != bitlen) && (32 >= bitlen));
+	KBASE_DEBUG_ASSERT((bitpos + bitlen) <= 32);
+
+	*word &= ~mask;
+	*word |= ((value << bitpos) & mask);
+}
+
+/**
+ * kbasep_tlstream_packet_header_setup - setup the packet header
+ * @buffer:     pointer to the buffer
+ * @pkt_family: packet's family
+ * @pkt_type:   packet's type
+ * @pkt_class:  packet's class
+ * @stream_id:  stream id
+ * @numbered:   non-zero if this stream is numbered
+ *
+ * Function sets up immutable part of packet header in the given buffer.
+ */
+static void kbasep_tlstream_packet_header_setup(
+		char                  *buffer,
+		enum tl_packet_family pkt_family,
+		enum tl_packet_class  pkt_class,
+		enum tl_packet_type   pkt_type,
+		unsigned int          stream_id,
+		int                   numbered)
+{
+	u32 word0 = 0;
+	u32 word1 = 0;
+
+	KBASE_DEBUG_ASSERT(buffer);
+	KBASE_DEBUG_ASSERT(pkt_family == TL_PACKET_FAMILY_TL);
+	KBASE_DEBUG_ASSERT(
+			(pkt_type == TL_PACKET_TYPE_HEADER)  ||
+			(pkt_type == TL_PACKET_TYPE_SUMMARY) ||
+			(pkt_type == TL_PACKET_TYPE_BODY));
+	KBASE_DEBUG_ASSERT(
+			(pkt_class == TL_PACKET_CLASS_OBJ) ||
+			(pkt_class == TL_PACKET_CLASS_AUX));
+
+	kbasep_tlstream_put_bits(
+			&word0, pkt_family,
+			PACKET_FAMILY_POS, PACKET_FAMILY_LEN);
+	kbasep_tlstream_put_bits(
+			&word0, pkt_class,
+			PACKET_CLASS_POS, PACKET_CLASS_LEN);
+	kbasep_tlstream_put_bits(
+			&word0, pkt_type,
+			PACKET_TYPE_POS, PACKET_TYPE_LEN);
+	kbasep_tlstream_put_bits(
+			&word0, stream_id,
+			PACKET_STREAMID_POS, PACKET_STREAMID_LEN);
+
+	if (numbered)
+		kbasep_tlstream_put_bits(
+				&word1, 1,
+				PACKET_SEQBIT_POS, PACKET_SEQBIT_LEN);
+
+	memcpy(&buffer[0],             &word0, sizeof(word0));
+	memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
+}
+
+/**
+ * kbasep_tlstream_packet_header_update - update the packet header
+ * @buffer:    pointer to the buffer
+ * @data_size: amount of data carried in this packet
+ *
+ * Function updates mutable part of packet header in the given buffer.
+ * Note that value of data_size must not including size of the header.
+ */
+static void kbasep_tlstream_packet_header_update(
+		char   *buffer,
+		size_t data_size)
+{
+	u32 word0;
+	u32 word1;
+
+	KBASE_DEBUG_ASSERT(buffer);
+	CSTD_UNUSED(word0);
+
+	memcpy(&word1, &buffer[sizeof(word0)], sizeof(word1));
+
+	kbasep_tlstream_put_bits(
+			&word1, data_size,
+			PACKET_LENGTH_POS, PACKET_LENGTH_LEN);
+
+	memcpy(&buffer[sizeof(word0)], &word1, sizeof(word1));
+}
+
+/**
+ * kbasep_tlstream_packet_number_update - update the packet number
+ * @buffer:  pointer to the buffer
+ * @counter: value of packet counter for this packet's stream
+ *
+ * Function updates packet number embedded within the packet placed in the
+ * given buffer.
+ */
+static void kbasep_tlstream_packet_number_update(char *buffer, u32 counter)
+{
+	KBASE_DEBUG_ASSERT(buffer);
+
+	memcpy(&buffer[PACKET_HEADER_SIZE], &counter, sizeof(counter));
+}
+
+/**
+ * kbasep_timeline_stream_reset - reset stream
+ * @stream:  pointer to the stream structure
+ *
+ * Function discards all pending messages and resets packet counters.
+ */
+static void kbasep_timeline_stream_reset(struct tl_stream *stream)
+{
+	unsigned int i;
+
+	for (i = 0; i < PACKET_COUNT; i++) {
+		if (stream->numbered)
+			atomic_set(
+					&stream->buffer[i].size,
+					PACKET_HEADER_SIZE +
+					PACKET_NUMBER_SIZE);
+		else
+			atomic_set(&stream->buffer[i].size, PACKET_HEADER_SIZE);
+	}
+
+	atomic_set(&stream->wbi, 0);
+	atomic_set(&stream->rbi, 0);
+}
+
+/**
+ * kbasep_timeline_stream_init - initialize timeline stream
+ * @stream:      pointer to the stream structure
+ * @stream_type: stream type
+ */
+static void kbasep_timeline_stream_init(
+		struct tl_stream    *stream,
+		enum tl_stream_type stream_type)
+{
+	unsigned int i;
+
+	KBASE_DEBUG_ASSERT(stream);
+	KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+
+	spin_lock_init(&stream->lock);
+
+	/* All packets carrying tracepoints shall be numbered. */
+	if (TL_PACKET_TYPE_BODY == tl_stream_cfg[stream_type].pkt_type)
+		stream->numbered = 1;
+	else
+		stream->numbered = 0;
+
+	for (i = 0; i < PACKET_COUNT; i++)
+		kbasep_tlstream_packet_header_setup(
+				stream->buffer[i].data,
+				tl_stream_cfg[stream_type].pkt_family,
+				tl_stream_cfg[stream_type].pkt_class,
+				tl_stream_cfg[stream_type].pkt_type,
+				tl_stream_cfg[stream_type].stream_id,
+				stream->numbered);
+
+	kbasep_timeline_stream_reset(tl_stream[stream_type]);
+}
+
+/**
+ * kbasep_timeline_stream_term - terminate timeline stream
+ * @stream: pointer to the stream structure
+ */
+static void kbasep_timeline_stream_term(struct tl_stream *stream)
+{
+	KBASE_DEBUG_ASSERT(stream);
+}
+
+/**
+ * kbasep_tlstream_msgbuf_submit - submit packet to the user space
+ * @stream:     pointer to the stream structure
+ * @wb_idx_raw: write buffer index
+ * @wb_size:    length of data stored in current buffer
+ *
+ * Function updates currently written buffer with packet header. Then write
+ * index is incremented and buffer is handled to user space. Parameters
+ * of new buffer are returned using provided arguments.
+ *
+ * Return: length of data in new buffer
+ *
+ * Warning:  User must update the stream structure with returned value.
+ */
+static size_t kbasep_tlstream_msgbuf_submit(
+		struct tl_stream *stream,
+		unsigned int      wb_idx_raw,
+		unsigned int      wb_size)
+{
+	unsigned int wb_idx = wb_idx_raw % PACKET_COUNT;
+
+	/* Set stream as flushed. */
+	atomic_set(&stream->autoflush_counter, -1);
+
+	kbasep_tlstream_packet_header_update(
+			stream->buffer[wb_idx].data,
+			wb_size - PACKET_HEADER_SIZE);
+
+	if (stream->numbered)
+		kbasep_tlstream_packet_number_update(
+				stream->buffer[wb_idx].data,
+				wb_idx_raw);
+
+	/* Increasing write buffer index will expose this packet to the reader.
+	 * As stream->lock is not taken on reader side we must make sure memory
+	 * is updated correctly before this will happen. */
+	smp_wmb();
+	atomic_inc(&stream->wbi);
+
+	/* Inform user that packets are ready for reading. */
+	wake_up_interruptible(&tl_event_queue);
+
+	wb_size = PACKET_HEADER_SIZE;
+	if (stream->numbered)
+		wb_size += PACKET_NUMBER_SIZE;
+
+	return wb_size;
+}
+
+/**
+ * kbasep_tlstream_msgbuf_acquire - lock selected stream and reserves buffer
+ * @stream_type: type of the stream that shall be locked
+ * @msg_size:    message size
+ * @flags:       pointer to store flags passed back on stream release
+ *
+ * Function will lock the stream and reserve the number of bytes requested
+ * in msg_size for the user.
+ *
+ * Return: pointer to the buffer where message can be stored
+ *
+ * Warning: Stream must be released with kbasep_tlstream_msgbuf_release().
+ *          Only atomic operations are allowed while stream is locked
+ *          (i.e. do not use any operation that may sleep).
+ */
+static char *kbasep_tlstream_msgbuf_acquire(
+		enum tl_stream_type stream_type,
+		size_t              msg_size,
+		unsigned long       *flags) __acquires(&stream->lock)
+{
+	struct tl_stream *stream;
+	unsigned int     wb_idx_raw;
+	unsigned int     wb_idx;
+	size_t           wb_size;
+
+	KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+	KBASE_DEBUG_ASSERT(
+			PACKET_SIZE - PACKET_HEADER_SIZE - PACKET_NUMBER_SIZE >=
+			msg_size);
+
+	stream = tl_stream[stream_type];
+
+	spin_lock_irqsave(&stream->lock, *flags);
+
+	wb_idx_raw = atomic_read(&stream->wbi);
+	wb_idx     = wb_idx_raw % PACKET_COUNT;
+	wb_size    = atomic_read(&stream->buffer[wb_idx].size);
+
+	/* Select next buffer if data will not fit into current one. */
+	if (PACKET_SIZE < wb_size + msg_size) {
+		wb_size = kbasep_tlstream_msgbuf_submit(
+				stream, wb_idx_raw, wb_size);
+		wb_idx  = (wb_idx_raw + 1) % PACKET_COUNT;
+	}
+
+	/* Reserve space in selected buffer. */
+	atomic_set(&stream->buffer[wb_idx].size, wb_size + msg_size);
+
+#if MALI_UNIT_TEST
+	atomic_add(msg_size, &tlstream_bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+	return &stream->buffer[wb_idx].data[wb_size];
+}
+
+/**
+ * kbasep_tlstream_msgbuf_release - unlock selected stream
+ * @stream_type:  type of the stream that shall be locked
+ * @flags:        value obtained during stream acquire
+ *
+ * Function releases stream that has been previously locked with a call to
+ * kbasep_tlstream_msgbuf_acquire().
+ */
+static void kbasep_tlstream_msgbuf_release(
+		enum tl_stream_type stream_type,
+		unsigned long       flags) __releases(&stream->lock)
+{
+	struct tl_stream *stream;
+
+	KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+
+	stream = tl_stream[stream_type];
+
+	/* Mark stream as containing unflushed data. */
+	atomic_set(&stream->autoflush_counter, 0);
+
+	spin_unlock_irqrestore(&stream->lock, flags);
+}
+
+/*****************************************************************************/
+
+/**
+ * kbasep_tlstream_flush_stream - flush stream
+ * @stype:  type of stream to be flushed
+ *
+ * Flush pending data in timeline stream.
+ */
+static void kbasep_tlstream_flush_stream(enum tl_stream_type stype)
+{
+	struct tl_stream *stream = tl_stream[stype];
+	unsigned long    flags;
+	unsigned int     wb_idx_raw;
+	unsigned int     wb_idx;
+	size_t           wb_size;
+	size_t           min_size = PACKET_HEADER_SIZE;
+
+	if (stream->numbered)
+		min_size += PACKET_NUMBER_SIZE;
+
+	spin_lock_irqsave(&stream->lock, flags);
+
+	wb_idx_raw = atomic_read(&stream->wbi);
+	wb_idx     = wb_idx_raw % PACKET_COUNT;
+	wb_size    = atomic_read(&stream->buffer[wb_idx].size);
+
+	if (wb_size > min_size) {
+		wb_size = kbasep_tlstream_msgbuf_submit(
+				stream, wb_idx_raw, wb_size);
+		wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+		atomic_set(&stream->buffer[wb_idx].size, wb_size);
+	}
+	spin_unlock_irqrestore(&stream->lock, flags);
+}
+
+/**
+ * kbasep_tlstream_autoflush_timer_callback - autoflush timer callback
+ * @timer: unused
+ *
+ * Timer is executed periodically to check if any of the stream contains
+ * buffer ready to be submitted to user space.
+ */
+static void kbasep_tlstream_autoflush_timer_callback(struct timer_list *timer)
+{
+	enum tl_stream_type stype;
+	int                 rcode;
+
+	CSTD_UNUSED(timer);
+
+	for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) {
+		struct tl_stream *stream = tl_stream[stype];
+		unsigned long    flags;
+		unsigned int     wb_idx_raw;
+		unsigned int     wb_idx;
+		size_t           wb_size;
+		size_t           min_size = PACKET_HEADER_SIZE;
+
+		int af_cnt = atomic_read(&stream->autoflush_counter);
+
+		/* Check if stream contain unflushed data. */
+		if (0 > af_cnt)
+			continue;
+
+		/* Check if stream should be flushed now. */
+		if (af_cnt != atomic_cmpxchg(
+					&stream->autoflush_counter,
+					af_cnt,
+					af_cnt + 1))
+			continue;
+		if (!af_cnt)
+			continue;
+
+		/* Autoflush this stream. */
+		if (stream->numbered)
+			min_size += PACKET_NUMBER_SIZE;
+
+		spin_lock_irqsave(&stream->lock, flags);
+
+		wb_idx_raw = atomic_read(&stream->wbi);
+		wb_idx     = wb_idx_raw % PACKET_COUNT;
+		wb_size    = atomic_read(&stream->buffer[wb_idx].size);
+
+		if (wb_size > min_size) {
+			wb_size = kbasep_tlstream_msgbuf_submit(
+					stream, wb_idx_raw, wb_size);
+			wb_idx = (wb_idx_raw + 1) % PACKET_COUNT;
+			atomic_set(&stream->buffer[wb_idx].size,
+					wb_size);
+		}
+		spin_unlock_irqrestore(&stream->lock, flags);
+	}
+
+	if (atomic_read(&autoflush_timer_active))
+		rcode = mod_timer(
+				&autoflush_timer,
+				jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+	CSTD_UNUSED(rcode);
+}
+
+/**
+ * kbasep_tlstream_packet_pending - check timeline streams for pending packets
+ * @stype:      pointer to variable where stream type will be placed
+ * @rb_idx_raw: pointer to variable where read buffer index will be placed
+ *
+ * Function checks all streams for pending packets. It will stop as soon as
+ * packet ready to be submitted to user space is detected. Variables under
+ * pointers, passed as the parameters to this function will be updated with
+ * values pointing to right stream and buffer.
+ *
+ * Return: non-zero if any of timeline streams has at last one packet ready
+ */
+static int kbasep_tlstream_packet_pending(
+		enum tl_stream_type *stype,
+		unsigned int        *rb_idx_raw)
+{
+	int pending = 0;
+
+	KBASE_DEBUG_ASSERT(stype);
+	KBASE_DEBUG_ASSERT(rb_idx_raw);
+
+	for (
+			*stype = 0;
+			(*stype < TL_STREAM_TYPE_COUNT) && !pending;
+			(*stype)++) {
+		if (NULL != tl_stream[*stype]) {
+			*rb_idx_raw = atomic_read(&tl_stream[*stype]->rbi);
+			/* Read buffer index may be updated by writer in case of
+			 * overflow. Read and write buffer indexes must be
+			 * loaded in correct order. */
+			smp_rmb();
+			if (atomic_read(&tl_stream[*stype]->wbi) != *rb_idx_raw)
+				pending = 1;
+		}
+	}
+	(*stype)--;
+
+	return pending;
+}
+
+/**
+ * kbasep_tlstream_read - copy data from streams to buffer provided by user
+ * @filp:   pointer to file structure (unused)
+ * @buffer: pointer to the buffer provided by user
+ * @size:   maximum amount of data that can be stored in the buffer
+ * @f_pos:  pointer to file offset (unused)
+ *
+ * Return: number of bytes stored in the buffer
+ */
+static ssize_t kbasep_tlstream_read(
+		struct file *filp,
+		char __user *buffer,
+		size_t      size,
+		loff_t      *f_pos)
+{
+	ssize_t copy_len = 0;
+
+	KBASE_DEBUG_ASSERT(filp);
+	KBASE_DEBUG_ASSERT(f_pos);
+
+	if (!buffer)
+		return -EINVAL;
+
+	if ((0 > *f_pos) || (PACKET_SIZE > size))
+		return -EINVAL;
+
+	mutex_lock(&tl_reader_lock);
+
+	while (copy_len < size) {
+		enum tl_stream_type stype;
+		unsigned int        rb_idx_raw = 0;
+		unsigned int        wb_idx_raw;
+		unsigned int        rb_idx;
+		size_t              rb_size;
+
+		/* If we don't have any data yet, wait for packet to be
+		 * submitted. If we already read some packets and there is no
+		 * packet pending return back to user. */
+		if (0 < copy_len) {
+			if (!kbasep_tlstream_packet_pending(
+						&stype,
+						&rb_idx_raw))
+				break;
+		} else {
+			if (wait_event_interruptible(
+						tl_event_queue,
+						kbasep_tlstream_packet_pending(
+							&stype,
+							&rb_idx_raw))) {
+				copy_len = -ERESTARTSYS;
+				break;
+			}
+		}
+
+		/* Check if this packet fits into the user buffer.
+		 * If so copy its content. */
+		rb_idx = rb_idx_raw % PACKET_COUNT;
+		rb_size = atomic_read(&tl_stream[stype]->buffer[rb_idx].size);
+		if (rb_size > size - copy_len)
+			break;
+		if (copy_to_user(
+					&buffer[copy_len],
+					tl_stream[stype]->buffer[rb_idx].data,
+					rb_size)) {
+			copy_len = -EFAULT;
+			break;
+		}
+
+		/* If the distance between read buffer index and write
+		 * buffer index became more than PACKET_COUNT, then overflow
+		 * happened and we need to ignore the last portion of bytes
+		 * that we have just sent to user.
+		 */
+		smp_rmb();
+		wb_idx_raw = atomic_read(&tl_stream[stype]->wbi);
+
+		if (wb_idx_raw - rb_idx_raw < PACKET_COUNT) {
+			copy_len += rb_size;
+			atomic_inc(&tl_stream[stype]->rbi);
+#if MALI_UNIT_TEST
+			atomic_add(rb_size, &tlstream_bytes_collected);
+#endif /* MALI_UNIT_TEST */
+
+		} else {
+			const unsigned int new_rb_idx_raw =
+				wb_idx_raw - PACKET_COUNT + 1;
+			/* Adjust read buffer index to the next valid buffer */
+			atomic_set(&tl_stream[stype]->rbi, new_rb_idx_raw);
+		}
+	}
+
+	mutex_unlock(&tl_reader_lock);
+
+	return copy_len;
+}
+
+/**
+ * kbasep_tlstream_poll - poll timeline stream for packets
+ * @filp: pointer to file structure
+ * @wait: pointer to poll table
+ * Return: POLLIN if data can be read without blocking, otherwise zero
+ */
+static unsigned int kbasep_tlstream_poll(struct file *filp, poll_table *wait)
+{
+	enum tl_stream_type stream_type;
+	unsigned int        rb_idx;
+
+	KBASE_DEBUG_ASSERT(filp);
+	KBASE_DEBUG_ASSERT(wait);
+
+	poll_wait(filp, &tl_event_queue, wait);
+	if (kbasep_tlstream_packet_pending(&stream_type, &rb_idx))
+		return POLLIN;
+	return 0;
+}
+
+/**
+ * kbasep_tlstream_release - release timeline stream descriptor
+ * @inode: pointer to inode structure
+ * @filp:  pointer to file structure
+ *
+ * Return always return zero
+ */
+static int kbasep_tlstream_release(struct inode *inode, struct file *filp)
+{
+	KBASE_DEBUG_ASSERT(inode);
+	KBASE_DEBUG_ASSERT(filp);
+	CSTD_UNUSED(inode);
+	CSTD_UNUSED(filp);
+
+	/* Stop autoflush timer before releasing access to streams. */
+	atomic_set(&autoflush_timer_active, 0);
+	del_timer_sync(&autoflush_timer);
+
+	atomic_set(&kbase_tlstream_enabled, 0);
+	return 0;
+}
+
+/**
+ * kbasep_tlstream_timeline_header - prepare timeline header stream packet
+ * @stream_type: type of the stream that will carry header data
+ * @tp_desc:     pointer to array with tracepoint descriptors
+ * @tp_count:    number of descriptors in the given array
+ *
+ * Functions fills in information about tracepoints stored in body stream
+ * associated with this header stream.
+ */
+static void kbasep_tlstream_timeline_header(
+		enum tl_stream_type  stream_type,
+		const struct tp_desc *tp_desc,
+		u32                  tp_count)
+{
+	const u8      tv = SWTRACE_VERSION; /* protocol version */
+	const u8      ps = sizeof(void *); /* pointer size */
+	size_t        msg_size = sizeof(tv) + sizeof(ps) + sizeof(tp_count);
+	char          *buffer;
+	size_t        pos = 0;
+	unsigned long flags;
+	unsigned int  i;
+
+	KBASE_DEBUG_ASSERT(TL_STREAM_TYPE_COUNT > stream_type);
+	KBASE_DEBUG_ASSERT(tp_desc);
+
+	/* Calculate the size of the timeline message. */
+	for (i = 0; i < tp_count; i++) {
+		msg_size += sizeof(tp_desc[i].id);
+		msg_size +=
+			strnlen(tp_desc[i].id_str,    STRLEN_MAX) +
+			sizeof(char) + sizeof(u32);
+		msg_size +=
+			strnlen(tp_desc[i].name,      STRLEN_MAX) +
+			sizeof(char) + sizeof(u32);
+		msg_size +=
+			strnlen(tp_desc[i].arg_types, STRLEN_MAX) +
+			sizeof(char) + sizeof(u32);
+		msg_size +=
+			strnlen(tp_desc[i].arg_names, STRLEN_MAX) +
+			sizeof(char) + sizeof(u32);
+	}
+
+	KBASE_DEBUG_ASSERT(PACKET_SIZE - PACKET_HEADER_SIZE >= msg_size);
+
+	buffer = kbasep_tlstream_msgbuf_acquire(stream_type, msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &tv, sizeof(tv));
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &ps, sizeof(ps));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &tp_count, sizeof(tp_count));
+
+	for (i = 0; i < tp_count; i++) {
+		pos = kbasep_tlstream_write_bytes(
+				buffer, pos,
+				&tp_desc[i].id, sizeof(tp_desc[i].id));
+		pos = kbasep_tlstream_write_string(
+				buffer, pos,
+				tp_desc[i].id_str, msg_size - pos);
+		pos = kbasep_tlstream_write_string(
+				buffer, pos,
+				tp_desc[i].name, msg_size - pos);
+		pos = kbasep_tlstream_write_string(
+				buffer, pos,
+				tp_desc[i].arg_types, msg_size - pos);
+		pos = kbasep_tlstream_write_string(
+				buffer, pos,
+				tp_desc[i].arg_names, msg_size - pos);
+	}
+
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(stream_type, flags);
+
+	/* We don't expect any more data to be read in this stream.
+	 * As header stream must be read before its associated body stream,
+	 * make this packet visible to the user straightaway. */
+	kbasep_tlstream_flush_stream(stream_type);
+}
+
+/*****************************************************************************/
+
+int kbase_tlstream_init(void)
+{
+	enum tl_stream_type i;
+
+	/* Prepare stream structures. */
+	for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) {
+		tl_stream[i] = kmalloc(sizeof(**tl_stream), GFP_KERNEL);
+		if (!tl_stream[i])
+			break;
+		kbasep_timeline_stream_init(tl_stream[i], i);
+	}
+	if (TL_STREAM_TYPE_COUNT > i) {
+		for (; i > 0; i--) {
+			kbasep_timeline_stream_term(tl_stream[i - 1]);
+			kfree(tl_stream[i - 1]);
+		}
+		return -ENOMEM;
+	}
+
+	/* Initialize autoflush timer. */
+	atomic_set(&autoflush_timer_active, 0);
+	kbase_timer_setup(&autoflush_timer,
+			  kbasep_tlstream_autoflush_timer_callback);
+
+	return 0;
+}
+
+void kbase_tlstream_term(void)
+{
+	enum tl_stream_type i;
+
+	for (i = 0; i < TL_STREAM_TYPE_COUNT; i++) {
+		kbasep_timeline_stream_term(tl_stream[i]);
+		kfree(tl_stream[i]);
+	}
+}
+
+static void kbase_create_timeline_objects(struct kbase_context *kctx)
+{
+	struct kbase_device             *kbdev = kctx->kbdev;
+	unsigned int                    lpu_id;
+	unsigned int                    as_nr;
+	struct kbasep_kctx_list_element *element;
+
+	/* Create LPU objects. */
+	for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+		u32 *lpu =
+			&kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+		KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU(lpu, lpu_id, *lpu);
+	}
+
+	/* Create Address Space objects. */
+	for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+		KBASE_TLSTREAM_TL_SUMMARY_NEW_AS(&kbdev->as[as_nr], as_nr);
+
+	/* Create GPU object and make it retain all LPUs and address spaces. */
+	KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU(
+			kbdev,
+			kbdev->gpu_props.props.raw_props.gpu_id,
+			kbdev->gpu_props.num_cores);
+
+	for (lpu_id = 0; lpu_id < kbdev->gpu_props.num_job_slots; lpu_id++) {
+		void *lpu =
+			&kbdev->gpu_props.props.raw_props.js_features[lpu_id];
+		KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU(lpu, kbdev);
+	}
+	for (as_nr = 0; as_nr < kbdev->nr_hw_address_spaces; as_nr++)
+		KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU(
+				&kbdev->as[as_nr],
+				kbdev);
+
+	/* Create object for each known context. */
+	mutex_lock(&kbdev->kctx_list_lock);
+	list_for_each_entry(element, &kbdev->kctx_list, link) {
+		KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX(
+				element->kctx,
+				element->kctx->id,
+				(u32)(element->kctx->tgid));
+	}
+	/* Before releasing the lock, reset body stream buffers.
+	 * This will prevent context creation message to be directed to both
+	 * summary and body stream.
+	 */
+	kbase_tlstream_reset_body_streams();
+	mutex_unlock(&kbdev->kctx_list_lock);
+	/* Static object are placed into summary packet that needs to be
+	 * transmitted first. Flush all streams to make it available to
+	 * user space.
+	 */
+	kbase_tlstream_flush_streams();
+}
+
+int kbase_tlstream_acquire(struct kbase_context *kctx, u32 flags)
+{
+	int ret;
+	u32 tlstream_enabled = TLSTREAM_ENABLED | flags;
+
+	if (0 == atomic_cmpxchg(&kbase_tlstream_enabled, 0, tlstream_enabled)) {
+		int rcode;
+
+		ret = anon_inode_getfd(
+				"[mali_tlstream]",
+				&kbasep_tlstream_fops,
+				kctx,
+				O_RDONLY | O_CLOEXEC);
+		if (ret < 0) {
+			atomic_set(&kbase_tlstream_enabled, 0);
+			return ret;
+		}
+
+		/* Reset and initialize header streams. */
+		kbasep_timeline_stream_reset(
+				tl_stream[TL_STREAM_TYPE_OBJ_HEADER]);
+		kbasep_timeline_stream_reset(
+				tl_stream[TL_STREAM_TYPE_OBJ_SUMMARY]);
+		kbasep_timeline_stream_reset(
+				tl_stream[TL_STREAM_TYPE_AUX_HEADER]);
+		kbasep_tlstream_timeline_header(
+				TL_STREAM_TYPE_OBJ_HEADER,
+				tp_desc_obj,
+				ARRAY_SIZE(tp_desc_obj));
+		kbasep_tlstream_timeline_header(
+				TL_STREAM_TYPE_AUX_HEADER,
+				tp_desc_aux,
+				ARRAY_SIZE(tp_desc_aux));
+
+		/* Start autoflush timer. */
+		atomic_set(&autoflush_timer_active, 1);
+		rcode = mod_timer(
+				&autoflush_timer,
+				jiffies + msecs_to_jiffies(AUTOFLUSH_INTERVAL));
+		CSTD_UNUSED(rcode);
+
+		/* If job dumping is enabled, readjust the software event's
+		 * timeout as the default value of 3 seconds is often
+		 * insufficient. */
+		if (flags & BASE_TLSTREAM_JOB_DUMPING_ENABLED) {
+			dev_info(kctx->kbdev->dev,
+					"Job dumping is enabled, readjusting the software event's timeout\n");
+			atomic_set(&kctx->kbdev->js_data.soft_job_timeout_ms,
+					1800000);
+		}
+
+		/* Summary stream was cleared during acquire.
+		 * Create static timeline objects that will be
+		 * read by client.
+		 */
+		kbase_create_timeline_objects(kctx);
+
+	} else {
+		ret = -EBUSY;
+	}
+
+	return ret;
+}
+
+void kbase_tlstream_flush_streams(void)
+{
+	enum tl_stream_type stype;
+
+	for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++)
+		kbasep_tlstream_flush_stream(stype);
+}
+
+void kbase_tlstream_reset_body_streams(void)
+{
+	kbasep_timeline_stream_reset(
+			tl_stream[TL_STREAM_TYPE_OBJ]);
+	kbasep_timeline_stream_reset(
+			tl_stream[TL_STREAM_TYPE_AUX]);
+}
+
+#if MALI_UNIT_TEST
+void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated)
+{
+	KBASE_DEBUG_ASSERT(bytes_collected);
+	KBASE_DEBUG_ASSERT(bytes_generated);
+	*bytes_collected = atomic_read(&tlstream_bytes_collected);
+	*bytes_generated = atomic_read(&tlstream_bytes_generated);
+}
+#endif /* MALI_UNIT_TEST */
+
+/*****************************************************************************/
+
+void __kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr, u32 tgid)
+{
+	const u32     msg_id = KBASE_TL_NEW_CTX;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) +
+		sizeof(tgid);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ_SUMMARY,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &context, sizeof(context));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &nr, sizeof(nr));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &tgid, sizeof(tgid));
+
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count)
+{
+	const u32     msg_id = KBASE_TL_NEW_GPU;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(gpu) + sizeof(id) +
+		sizeof(core_count);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ_SUMMARY,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &gpu, sizeof(gpu));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &id, sizeof(id));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &core_count, sizeof(core_count));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn)
+{
+	const u32     msg_id = KBASE_TL_NEW_LPU;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(nr) +
+		sizeof(fn);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ_SUMMARY,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &lpu, sizeof(lpu));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &nr, sizeof(nr));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &fn, sizeof(fn));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu)
+{
+	const u32     msg_id = KBASE_TL_LIFELINK_LPU_GPU;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(lpu) + sizeof(gpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ_SUMMARY,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &lpu, sizeof(lpu));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &gpu, sizeof(gpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_new_as(void *as, u32 nr)
+{
+	const u32     msg_id = KBASE_TL_NEW_AS;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(nr);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ_SUMMARY,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &as, sizeof(as));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &nr, sizeof(nr));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+void __kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu)
+{
+	const u32     msg_id = KBASE_TL_LIFELINK_AS_GPU;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(gpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ_SUMMARY,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &as, sizeof(as));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &gpu, sizeof(gpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ_SUMMARY, flags);
+}
+
+/*****************************************************************************/
+
+void __kbase_tlstream_tl_new_ctx(void *context, u32 nr, u32 tgid)
+{
+	const u32     msg_id = KBASE_TL_NEW_CTX;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(nr) +
+		sizeof(tgid);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &context, sizeof(context));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &nr, sizeof(nr));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &tgid, sizeof(tgid));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_new_atom(void *atom, u32 nr)
+{
+	const u32     msg_id = KBASE_TL_NEW_ATOM;
+	const size_t  msg_size = sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
+			sizeof(nr);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &nr, sizeof(nr));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_del_ctx(void *context)
+{
+	const u32     msg_id = KBASE_TL_DEL_CTX;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(context);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &context, sizeof(context));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_del_atom(void *atom)
+{
+	const u32     msg_id = KBASE_TL_DEL_ATOM;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_ctx_lpu(void *context, void *lpu)
+{
+	const u32     msg_id = KBASE_TL_RET_CTX_LPU;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &context, sizeof(context));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &lpu, sizeof(lpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context)
+{
+	const u32     msg_id = KBASE_TL_RET_ATOM_CTX;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &context, sizeof(context));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_lpu(
+		void *atom, void *lpu, const char *attrib_match_list)
+{
+	const u32     msg_id = KBASE_TL_RET_ATOM_LPU;
+	const size_t  msg_s0 = sizeof(u32) + sizeof(char) +
+			strnlen(attrib_match_list, STRLEN_MAX);
+	const size_t  msg_size =
+			sizeof(msg_id) + sizeof(u64) +
+			sizeof(atom) + sizeof(lpu) + msg_s0;
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &lpu, sizeof(lpu));
+	pos = kbasep_tlstream_write_string(
+			buffer, pos, attrib_match_list, msg_s0);
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_ctx_lpu(void *context, void *lpu)
+{
+	const u32     msg_id = KBASE_TL_NRET_CTX_LPU;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(context) + sizeof(lpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &context, sizeof(context));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &lpu, sizeof(lpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context)
+{
+	const u32     msg_id = KBASE_TL_NRET_ATOM_CTX;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(context);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &context, sizeof(context));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu)
+{
+	const u32     msg_id = KBASE_TL_NRET_ATOM_LPU;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(lpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &lpu, sizeof(lpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx)
+{
+	const u32     msg_id = KBASE_TL_RET_AS_CTX;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &as, sizeof(as));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &ctx, sizeof(ctx));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx)
+{
+	const u32     msg_id = KBASE_TL_NRET_AS_CTX;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(as) + sizeof(ctx);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &as, sizeof(as));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &ctx, sizeof(ctx));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_ret_atom_as(void *atom, void *as)
+{
+	const u32     msg_id = KBASE_TL_RET_ATOM_AS;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &as, sizeof(as));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_nret_atom_as(void *atom, void *as)
+{
+	const u32     msg_id = KBASE_TL_NRET_ATOM_AS;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(as);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &as, sizeof(as));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_config(
+		void *atom, u64 jd, u64 affinity, u32 config)
+{
+	const u32     msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
+		sizeof(jd) + sizeof(affinity) + sizeof(config);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &jd, sizeof(jd));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &affinity, sizeof(affinity));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &config, sizeof(config));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_priority(void *atom, u32 prio)
+{
+	const u32     msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(prio);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &prio, sizeof(prio));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_state(void *atom, u32 state)
+{
+	const u32     msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(state);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &state, sizeof(state));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_prioritized(void *atom)
+{
+	const u32     msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITIZED;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jit(
+		void *atom, u64 edit_addr, u64 new_addr, u64 va_pages)
+{
+	const u32     msg_id = KBASE_TL_ATTRIB_ATOM_JIT;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom)
+		+ sizeof(edit_addr) + sizeof(new_addr) + sizeof(va_pages);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &edit_addr, sizeof(edit_addr));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &new_addr, sizeof(new_addr));
+	pos = kbasep_tlstream_write_bytes(
+		buffer, pos, &va_pages, sizeof(va_pages));
+
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
+		void *atom, u64 va_pages, u64 commit_pages, u64 extent,
+		u32 jit_id, u32 bin_id, u32 max_allocations, u32 jit_flags,
+		u32 usage_id)
+{
+	const u32     msg_id = KBASE_TL_ATTRIB_ATOM_JITALLOCINFO;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) +
+		sizeof(va_pages) + sizeof(commit_pages) +
+		sizeof(extent) + sizeof(jit_id) +
+		sizeof(bin_id) + sizeof(max_allocations) +
+		sizeof(jit_flags) + sizeof(usage_id);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id,
+			sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &va_pages, sizeof(va_pages));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &commit_pages, sizeof(commit_pages));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &extent, sizeof(extent));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &jit_id, sizeof(jit_id));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &bin_id, sizeof(bin_id));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &max_allocations,
+			sizeof(max_allocations));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &jit_flags, sizeof(jit_flags));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &usage_id, sizeof(usage_id));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(void *atom, u32 jit_id)
+{
+	const u32     msg_id = KBASE_TL_ATTRIB_ATOM_JITFREEINFO;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom) + sizeof(jit_id);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id,
+			sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &jit_id, sizeof(jit_id));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+
+void __kbase_tlstream_tl_attrib_as_config(
+		void *as, u64 transtab, u64 memattr, u64 transcfg)
+{
+	const u32     msg_id = KBASE_TL_ATTRIB_AS_CONFIG;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(as) +
+		sizeof(transtab) + sizeof(memattr) + sizeof(transcfg);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &as, sizeof(as));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &transtab, sizeof(transtab));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &memattr, sizeof(memattr));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &transcfg, sizeof(transcfg));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_lpu_softstop(void *lpu)
+{
+	const u32     msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(lpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &lpu, sizeof(lpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_ex(void *atom)
+{
+	const u32     msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom)
+{
+	const u32     msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softjob_start(void *atom)
+{
+	const u32     msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_START;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softjob_end(void *atom)
+{
+	const u32     msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_END;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &atom, sizeof(atom));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_jd_gpu_soft_reset(void *gpu)
+{
+	const u32     msg_id = KBASE_JD_GPU_SOFT_RESET;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_OBJ,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &gpu, sizeof(gpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+/*****************************************************************************/
+
+void __kbase_tlstream_aux_pm_state(u32 core_type, u64 state)
+{
+	const u32     msg_id = KBASE_AUX_PM_STATE;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(core_type) +
+		sizeof(state);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_AUX,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &core_type, sizeof(core_type));
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &state, sizeof(state));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_pagefault(u32 ctx_nr, u64 page_count_change)
+{
+	const u32     msg_id = KBASE_AUX_PAGEFAULT;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) +
+		sizeof(page_count_change);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_AUX, msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos,
+			&page_count_change, sizeof(page_count_change));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_pagesalloc(u32 ctx_nr, u64 page_count)
+{
+	const u32     msg_id = KBASE_AUX_PAGESALLOC;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(ctx_nr) +
+		sizeof(page_count);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_AUX, msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &page_count, sizeof(page_count));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_devfreq_target(u64 target_freq)
+{
+	const u32       msg_id = KBASE_AUX_DEVFREQ_TARGET;
+	const size_t    msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(target_freq);
+	unsigned long   flags;
+	char            *buffer;
+	size_t          pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_AUX, msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &target_freq, sizeof(target_freq));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_start(void *gpu)
+{
+	const u32     msg_id = KBASE_AUX_PROTECTED_ENTER_START;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_AUX,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &gpu, sizeof(gpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+void __kbase_tlstream_aux_protected_enter_end(void *gpu)
+{
+	const u32     msg_id = KBASE_AUX_PROTECTED_ENTER_END;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_AUX,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &gpu, sizeof(gpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_start(void *gpu)
+{
+	const u32     msg_id = KBASE_AUX_PROTECTED_LEAVE_START;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_AUX,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &gpu, sizeof(gpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+void __kbase_tlstream_aux_protected_leave_end(void *gpu)
+{
+	const u32     msg_id = KBASE_AUX_PROTECTED_LEAVE_END;
+	const size_t  msg_size =
+		sizeof(msg_id) + sizeof(u64) + sizeof(gpu);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_AUX,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+			buffer, pos, &gpu, sizeof(gpu));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
+
+void __kbase_tlstream_aux_jit_stats(u32 ctx_nr, u32 bid,
+		u32 max_allocs, u32 allocs,
+		u32 va_pages, u32 ph_pages)
+{
+	const u32     msg_id = KBASE_AUX_JIT_STATS;
+	const size_t  msg_size = sizeof(msg_id) + sizeof(u64) +
+		sizeof(ctx_nr) + sizeof(bid) +
+		sizeof(max_allocs) + sizeof(allocs) +
+		sizeof(va_pages) + sizeof(ph_pages);
+	unsigned long flags;
+	char          *buffer;
+	size_t        pos = 0;
+
+	buffer = kbasep_tlstream_msgbuf_acquire(
+			TL_STREAM_TYPE_AUX,
+			msg_size, &flags);
+	KBASE_DEBUG_ASSERT(buffer);
+
+	pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_tlstream_write_timestamp(buffer, pos);
+	pos = kbasep_tlstream_write_bytes(
+		buffer, pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_tlstream_write_bytes(
+		buffer, pos, &bid, sizeof(bid));
+	pos = kbasep_tlstream_write_bytes(
+		buffer, pos, &max_allocs, sizeof(max_allocs));
+	pos = kbasep_tlstream_write_bytes(
+		buffer, pos, &allocs, sizeof(allocs));
+	pos = kbasep_tlstream_write_bytes(
+		buffer, pos, &va_pages, sizeof(va_pages));
+	pos = kbasep_tlstream_write_bytes(
+		buffer, pos, &ph_pages, sizeof(ph_pages));
+	KBASE_DEBUG_ASSERT(msg_size == pos);
+
+	kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_AUX, flags);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_tlstream.h b/drivers/gpu/arm/midgard/mali_kbase_tlstream.h
new file mode 100644
index 0000000..e2a3ea4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_tlstream.h
@@ -0,0 +1,680 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_KBASE_TLSTREAM_H)
+#define _KBASE_TLSTREAM_H
+
+#include <mali_kbase.h>
+
+/*****************************************************************************/
+
+/**
+ * kbase_tlstream_init - initialize timeline infrastructure in kernel
+ * Return: zero on success, negative number on error
+ */
+int kbase_tlstream_init(void);
+
+/**
+ * kbase_tlstream_term - terminate timeline infrastructure in kernel
+ *
+ * Timeline need have to been previously enabled with kbase_tlstream_init().
+ */
+void kbase_tlstream_term(void);
+
+/**
+ * kbase_tlstream_acquire - acquire timeline stream file descriptor
+ * @kctx:  kernel common context
+ * @flags: timeline stream flags
+ *
+ * This descriptor is meant to be used by userspace timeline to gain access to
+ * kernel timeline stream. This stream is later broadcasted by user space to the
+ * timeline client.
+ * Only one entity can own the descriptor at any given time. Descriptor shall be
+ * closed if unused. If descriptor cannot be obtained (i.e. when it is already
+ * being used) return will be a negative value.
+ *
+ * Return: file descriptor on success, negative number on error
+ */
+int kbase_tlstream_acquire(struct kbase_context *kctx, u32 flags);
+
+/**
+ * kbase_tlstream_flush_streams - flush timeline streams.
+ *
+ * Function will flush pending data in all timeline streams.
+ */
+void kbase_tlstream_flush_streams(void);
+
+/**
+ * kbase_tlstream_reset_body_streams - reset timeline body streams.
+ *
+ * Function will discard pending data in all timeline body streams.
+ */
+void kbase_tlstream_reset_body_streams(void);
+
+#if MALI_UNIT_TEST
+/**
+ * kbase_tlstream_test - start timeline stream data generator
+ * @tpw_count: number of trace point writers in each context
+ * @msg_delay: time delay in milliseconds between trace points written by one
+ *             writer
+ * @msg_count: number of trace points written by one writer
+ * @aux_msg:   if non-zero aux messages will be included
+ *
+ * This test starts a requested number of asynchronous writers in both IRQ and
+ * thread context. Each writer will generate required number of test
+ * tracepoints (tracepoints with embedded information about writer that
+ * should be verified by user space reader). Tracepoints will be emitted in
+ * all timeline body streams. If aux_msg is non-zero writer will also
+ * generate not testable tracepoints (tracepoints without information about
+ * writer). These tracepoints are used to check correctness of remaining
+ * timeline message generating functions. Writer will wait requested time
+ * between generating another set of messages. This call blocks until all
+ * writers finish.
+ */
+void kbase_tlstream_test(
+		unsigned int tpw_count,
+		unsigned int msg_delay,
+		unsigned int msg_count,
+		int          aux_msg);
+
+/**
+ * kbase_tlstream_stats - read timeline stream statistics
+ * @bytes_collected: will hold number of bytes read by the user
+ * @bytes_generated: will hold number of bytes generated by trace points
+ */
+void kbase_tlstream_stats(u32 *bytes_collected, u32 *bytes_generated);
+#endif /* MALI_UNIT_TEST */
+
+/*****************************************************************************/
+
+#define TL_ATOM_STATE_IDLE 0
+#define TL_ATOM_STATE_READY 1
+#define TL_ATOM_STATE_DONE 2
+#define TL_ATOM_STATE_POSTED 3
+
+void __kbase_tlstream_tl_summary_new_ctx(void *context, u32 nr, u32 tgid);
+void __kbase_tlstream_tl_summary_new_gpu(void *gpu, u32 id, u32 core_count);
+void __kbase_tlstream_tl_summary_new_lpu(void *lpu, u32 nr, u32 fn);
+void __kbase_tlstream_tl_summary_lifelink_lpu_gpu(void *lpu, void *gpu);
+void __kbase_tlstream_tl_summary_new_as(void *as, u32 nr);
+void __kbase_tlstream_tl_summary_lifelink_as_gpu(void *as, void *gpu);
+void __kbase_tlstream_tl_new_ctx(void *context, u32 nr, u32 tgid);
+void __kbase_tlstream_tl_new_atom(void *atom, u32 nr);
+void __kbase_tlstream_tl_del_ctx(void *context);
+void __kbase_tlstream_tl_del_atom(void *atom);
+void __kbase_tlstream_tl_ret_ctx_lpu(void *context, void *lpu);
+void __kbase_tlstream_tl_ret_atom_ctx(void *atom, void *context);
+void __kbase_tlstream_tl_ret_atom_lpu(
+		void *atom, void *lpu, const char *attrib_match_list);
+void __kbase_tlstream_tl_nret_ctx_lpu(void *context, void *lpu);
+void __kbase_tlstream_tl_nret_atom_ctx(void *atom, void *context);
+void __kbase_tlstream_tl_nret_atom_lpu(void *atom, void *lpu);
+void __kbase_tlstream_tl_ret_as_ctx(void *as, void *ctx);
+void __kbase_tlstream_tl_nret_as_ctx(void *as, void *ctx);
+void __kbase_tlstream_tl_ret_atom_as(void *atom, void *as);
+void __kbase_tlstream_tl_nret_atom_as(void *atom, void *as);
+void __kbase_tlstream_tl_dep_atom_atom(void *atom1, void *atom2);
+void __kbase_tlstream_tl_ndep_atom_atom(void *atom1, void *atom2);
+void __kbase_tlstream_tl_rdep_atom_atom(void *atom1, void *atom2);
+void __kbase_tlstream_tl_attrib_atom_config(
+		void *atom, u64 jd, u64 affinity, u32 config);
+void __kbase_tlstream_tl_attrib_atom_priority(void *atom, u32 prio);
+void __kbase_tlstream_tl_attrib_atom_state(void *atom, u32 state);
+void __kbase_tlstream_tl_attrib_atom_prioritized(void *atom);
+void __kbase_tlstream_tl_attrib_atom_jit(
+		void *atom, u64 edit_addr, u64 new_addr, u64 va_pages);
+void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
+		void *atom, u64 va_pages, u64 commit_pages, u64 extent,
+		u32 jit_id, u32 bin_id, u32 max_allocations, u32 flags,
+		u32 usage_id);
+void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(void *atom, u32 jit_id);
+void __kbase_tlstream_tl_attrib_as_config(
+		void *as, u64 transtab, u64 memattr, u64 transcfg);
+void __kbase_tlstream_tl_event_atom_softstop_ex(void *atom);
+void __kbase_tlstream_tl_event_lpu_softstop(void *lpu);
+void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_start(void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_end(void *atom);
+void __kbase_tlstream_jd_gpu_soft_reset(void *gpu);
+void __kbase_tlstream_aux_pm_state(u32 core_type, u64 state);
+void __kbase_tlstream_aux_pagefault(u32 ctx_nr, u64 page_count_change);
+void __kbase_tlstream_aux_pagesalloc(u32 ctx_nr, u64 page_count);
+void __kbase_tlstream_aux_devfreq_target(u64 target_freq);
+void __kbase_tlstream_aux_protected_enter_start(void *gpu);
+void __kbase_tlstream_aux_protected_enter_end(void *gpu);
+void __kbase_tlstream_aux_protected_leave_start(void *gpu);
+void __kbase_tlstream_aux_protected_leave_end(void *gpu);
+void __kbase_tlstream_aux_jit_stats(u32 ctx_nr, u32 bin_id,
+		u32 max_allocations, u32 allocations,
+		u32 va_pages_nr, u32 ph_pages_nr);
+
+#define TLSTREAM_ENABLED (1 << 31)
+
+extern atomic_t kbase_tlstream_enabled;
+
+#define __TRACE_IF_ENABLED(trace_name, ...)                         \
+	do {                                                        \
+		int enabled = atomic_read(&kbase_tlstream_enabled); \
+		if (enabled & TLSTREAM_ENABLED)                     \
+			__kbase_tlstream_##trace_name(__VA_ARGS__); \
+	} while (0)
+
+#define __TRACE_IF_ENABLED_LATENCY(trace_name, ...)                     \
+	do {                                                            \
+		int enabled = atomic_read(&kbase_tlstream_enabled);     \
+		if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
+			__kbase_tlstream_##trace_name(__VA_ARGS__);     \
+	} while (0)
+
+#define __TRACE_IF_ENABLED_JD(trace_name, ...)                      \
+	do {                                                        \
+		int enabled = atomic_read(&kbase_tlstream_enabled); \
+		if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED)    \
+			__kbase_tlstream_##trace_name(__VA_ARGS__); \
+	} while (0)
+
+/*****************************************************************************/
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX - create context object in timeline
+ *                                     summary
+ * @context: name of the context object
+ * @nr:      context number
+ * @tgid:    thread Group Id
+ *
+ * Function emits a timeline message informing about context creation. Context
+ * is created with context number (its attribute), that can be used to link
+ * kbase context with userspace context.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_NEW_CTX(context, nr, tgid) \
+	__TRACE_IF_ENABLED(tl_summary_new_ctx, context, nr, tgid)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU - create GPU object in timeline summary
+ * @gpu:        name of the GPU object
+ * @id:         id value of this GPU
+ * @core_count: number of cores this GPU hosts
+ *
+ * Function emits a timeline message informing about GPU creation. GPU is
+ * created with two attributes: id and core count.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_NEW_GPU(gpu, id, core_count) \
+	__TRACE_IF_ENABLED(tl_summary_new_gpu, gpu, id, core_count)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU - create LPU object in timeline summary
+ * @lpu: name of the Logical Processing Unit object
+ * @nr:  sequential number assigned to this LPU
+ * @fn:  property describing this LPU's functional abilities
+ *
+ * Function emits a timeline message informing about LPU creation. LPU is
+ * created with two attributes: number linking this LPU with GPU's job slot
+ * and function bearing information about this LPU abilities.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_NEW_LPU(lpu, nr, fn) \
+	__TRACE_IF_ENABLED(tl_summary_new_lpu, lpu, nr, fn)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU - lifelink LPU object to GPU
+ * @lpu: name of the Logical Processing Unit object
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message informing that LPU object shall be deleted
+ * along with GPU object.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_LPU_GPU(lpu, gpu) \
+	__TRACE_IF_ENABLED(tl_summary_lifelink_lpu_gpu, lpu, gpu)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_NEW_AS - create address space object in timeline summary
+ * @as: name of the address space object
+ * @nr: sequential number assigned to this address space
+ *
+ * Function emits a timeline message informing about address space creation.
+ * Address space is created with one attribute: number identifying this
+ * address space.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_NEW_AS(as, nr) \
+	__TRACE_IF_ENABLED(tl_summary_new_as, as, nr)
+
+/**
+ * KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU - lifelink address space object to GPU
+ * @as:  name of the address space object
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message informing that address space object
+ * shall be deleted along with GPU object.
+ * This message is directed to timeline summary stream.
+ */
+#define KBASE_TLSTREAM_TL_SUMMARY_LIFELINK_AS_GPU(as, gpu) \
+	__TRACE_IF_ENABLED(tl_summary_lifelink_as_gpu, as, gpu)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_CTX - create context object in timeline
+ * @context: name of the context object
+ * @nr:      context number
+ * @tgid:    thread Group Id
+ *
+ * Function emits a timeline message informing about context creation. Context
+ * is created with context number (its attribute), that can be used to link
+ * kbase context with userspace context.
+ */
+#define KBASE_TLSTREAM_TL_NEW_CTX(context, nr, tgid) \
+	__TRACE_IF_ENABLED(tl_new_ctx, context, nr, tgid)
+
+/**
+ * KBASE_TLSTREAM_TL_NEW_ATOM - create atom object in timeline
+ * @atom: name of the atom object
+ * @nr:   sequential number assigned to this atom
+ *
+ * Function emits a timeline message informing about atom creation. Atom is
+ * created with atom number (its attribute) that links it with actual work
+ * bucket id understood by hardware.
+ */
+#define KBASE_TLSTREAM_TL_NEW_ATOM(atom, nr) \
+	__TRACE_IF_ENABLED(tl_new_atom, atom, nr)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_CTX - destroy context object in timeline
+ * @context: name of the context object
+ *
+ * Function emits a timeline message informing that context object ceased to
+ * exist.
+ */
+#define KBASE_TLSTREAM_TL_DEL_CTX(context) \
+	__TRACE_IF_ENABLED(tl_del_ctx, context)
+
+/**
+ * KBASE_TLSTREAM_TL_DEL_ATOM - destroy atom object in timeline
+ * @atom: name of the atom object
+ *
+ * Function emits a timeline message informing that atom object ceased to
+ * exist.
+ */
+#define KBASE_TLSTREAM_TL_DEL_ATOM(atom) \
+	__TRACE_IF_ENABLED(tl_del_atom, atom)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_CTX_LPU - retain context by LPU
+ * @context: name of the context object
+ * @lpu:     name of the Logical Processing Unit object
+ *
+ * Function emits a timeline message informing that context is being held
+ * by LPU and must not be deleted unless it is released.
+ */
+#define KBASE_TLSTREAM_TL_RET_CTX_LPU(context, lpu) \
+	__TRACE_IF_ENABLED(tl_ret_ctx_lpu, context, lpu)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_CTX - retain atom by context
+ * @atom:    name of the atom object
+ * @context: name of the context object
+ *
+ * Function emits a timeline message informing that atom object is being held
+ * by context and must not be deleted unless it is released.
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_CTX(atom, context) \
+	__TRACE_IF_ENABLED(tl_ret_atom_ctx, atom, context)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_LPU - retain atom by LPU
+ * @atom:              name of the atom object
+ * @lpu:               name of the Logical Processing Unit object
+ * @attrib_match_list: list containing match operator attributes
+ *
+ * Function emits a timeline message informing that atom object is being held
+ * by LPU and must not be deleted unless it is released.
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_LPU(atom, lpu, attrib_match_list) \
+	__TRACE_IF_ENABLED(tl_ret_atom_lpu, atom, lpu, attrib_match_list)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_CTX_LPU - release context by LPU
+ * @context: name of the context object
+ * @lpu:     name of the Logical Processing Unit object
+ *
+ * Function emits a timeline message informing that context is being released
+ * by LPU object.
+ */
+#define KBASE_TLSTREAM_TL_NRET_CTX_LPU(context, lpu) \
+	__TRACE_IF_ENABLED(tl_nret_ctx_lpu, context, lpu)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_CTX - release atom by context
+ * @atom:    name of the atom object
+ * @context: name of the context object
+ *
+ * Function emits a timeline message informing that atom object is being
+ * released by context.
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_CTX(atom, context) \
+	__TRACE_IF_ENABLED(tl_nret_atom_ctx, atom, context)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_LPU - release atom by LPU
+ * @atom: name of the atom object
+ * @lpu:  name of the Logical Processing Unit object
+ *
+ * Function emits a timeline message informing that atom object is being
+ * released by LPU.
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_LPU(atom, lpu) \
+	__TRACE_IF_ENABLED(tl_nret_atom_lpu, atom, lpu)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_AS_CTX - lifelink address space object to context
+ * @as:  name of the address space object
+ * @ctx: name of the context object
+ *
+ * Function emits a timeline message informing that address space object
+ * is being held by the context object.
+ */
+#define KBASE_TLSTREAM_TL_RET_AS_CTX(as, ctx) \
+	__TRACE_IF_ENABLED(tl_ret_as_ctx, as, ctx)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_AS_CTX - release address space by context
+ * @as:  name of the address space object
+ * @ctx: name of the context object
+ *
+ * Function emits a timeline message informing that address space object
+ * is being released by atom.
+ */
+#define KBASE_TLSTREAM_TL_NRET_AS_CTX(as, ctx) \
+	__TRACE_IF_ENABLED(tl_nret_as_ctx, as, ctx)
+
+/**
+ * KBASE_TLSTREAM_TL_RET_ATOM_AS - retain atom by address space
+ * @atom: name of the atom object
+ * @as:   name of the address space object
+ *
+ * Function emits a timeline message informing that atom object is being held
+ * by address space and must not be deleted unless it is released.
+ */
+#define KBASE_TLSTREAM_TL_RET_ATOM_AS(atom, as) \
+	__TRACE_IF_ENABLED(tl_ret_atom_as, atom, as)
+
+/**
+ * KBASE_TLSTREAM_TL_NRET_ATOM_AS - release atom by address space
+ * @atom: name of the atom object
+ * @as:   name of the address space object
+ *
+ * Function emits a timeline message informing that atom object is being
+ * released by address space.
+ */
+#define KBASE_TLSTREAM_TL_NRET_ATOM_AS(atom, as) \
+	__TRACE_IF_ENABLED(tl_nret_atom_as, atom, as)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG - atom job slot attributes
+ * @atom:     name of the atom object
+ * @jd:       job descriptor address
+ * @affinity: job affinity
+ * @config:   job config
+ *
+ * Function emits a timeline message containing atom attributes.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(atom, jd, affinity, config) \
+	__TRACE_IF_ENABLED(tl_attrib_atom_config, atom, jd, affinity, config)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY - atom priority
+ * @atom: name of the atom object
+ * @prio: atom priority
+ *
+ * Function emits a timeline message containing atom priority.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY(atom, prio) \
+	__TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_priority, atom, prio)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE - atom state
+ * @atom:  name of the atom object
+ * @state: atom state
+ *
+ * Function emits a timeline message containing atom state.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, state) \
+	__TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_state, atom, state)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED - atom was prioritized
+ * @atom:  name of the atom object
+ *
+ * Function emits a timeline message signalling priority change
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED(atom) \
+	__TRACE_IF_ENABLED_LATENCY(tl_attrib_atom_prioritized, atom)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT - jit happened on atom
+ * @atom:       atom identifier
+ * @edit_addr:  address edited by jit
+ * @new_addr:   address placed into the edited location
+ * @va_pages:   maximum number of pages this jit can allocate
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(atom, edit_addr, new_addr, va_pages) \
+	__TRACE_IF_ENABLED_JD(tl_attrib_atom_jit, atom, edit_addr, \
+		new_addr, va_pages)
+
+/**
+ * Information about the JIT allocation atom.
+ *
+ * @atom:            Atom identifier.
+ * @va_pages:        The minimum number of virtual pages required.
+ * @commit_pages:    The minimum number of physical pages which
+ *                   should back the allocation.
+ * @extent:          Granularity of physical pages to grow the
+ *                   allocation by during a fault.
+ * @jit_id:          Unique ID provided by the caller, this is used
+ *                   to pair allocation and free requests.
+ * @bin_id:          The JIT allocation bin, used in conjunction with
+ *                   @max_allocations to limit the number of each
+ *                   type of JIT allocation.
+ * @max_allocations: The maximum number of allocations allowed within
+ *                   the bin specified by @bin_id. Should be the same
+ *                   for all JIT allocations within the same bin.
+ * @jit_flags:       Flags specifying the special requirements for
+ *                   the JIT allocation.
+ * @usage_id:        A hint about which allocation should be reused.
+ *                   The kernel should attempt to use a previous
+ *                   allocation with the same usage_id
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO(atom, va_pages,\
+		commit_pages, extent, jit_id, bin_id,\
+		max_allocations, jit_flags, usage_id) \
+	__TRACE_IF_ENABLED(tl_attrib_atom_jitallocinfo, atom, va_pages,\
+		commit_pages, extent, jit_id, bin_id,\
+		max_allocations, jit_flags, usage_id)
+
+/**
+ * Information about the JIT free atom.
+ *
+ * @atom:   Atom identifier.
+ * @jit_id: Unique ID provided by the caller, this is used
+ *          to pair allocation and free requests.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO(atom, jit_id) \
+	__TRACE_IF_ENABLED(tl_attrib_atom_jitfreeinfo, atom, jit_id)
+
+/**
+ * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG - address space attributes
+ * @as:       assigned address space
+ * @transtab: configuration of the TRANSTAB register
+ * @memattr:  configuration of the MEMATTR register
+ * @transcfg: configuration of the TRANSCFG register (or zero if not present)
+ *
+ * Function emits a timeline message containing address space attributes.
+ */
+#define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(as, transtab, memattr, transcfg) \
+	__TRACE_IF_ENABLED(tl_attrib_as_config, as, transtab, memattr, transcfg)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX
+ * @atom:       atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(atom) \
+	__TRACE_IF_ENABLED(tl_event_atom_softstop_ex, atom)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP
+ * @lpu:        name of the LPU object
+ */
+#define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP(lpu) \
+	__TRACE_IF_ENABLED(tl_event_lpu_softstop, lpu)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE
+ * @atom:       atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(atom) \
+	__TRACE_IF_ENABLED(tl_event_atom_softstop_issue, atom)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START
+ * @atom:       atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(atom) \
+	__TRACE_IF_ENABLED(tl_event_atom_softjob_start, atom)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END
+ * @atom:       atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(atom) \
+	__TRACE_IF_ENABLED(tl_event_atom_softjob_end, atom)
+
+/**
+ * KBASE_TLSTREAM_JD_GPU_SOFT_RESET - The GPU is being soft reset
+ * @gpu:        name of the GPU object
+ *
+ * This imperative tracepoint is specific to job dumping.
+ * Function emits a timeline message indicating GPU soft reset.
+ */
+#define KBASE_TLSTREAM_JD_GPU_SOFT_RESET(gpu) \
+	__TRACE_IF_ENABLED(jd_gpu_soft_reset, gpu)
+
+
+/**
+ * KBASE_TLSTREAM_AUX_PM_STATE - timeline message: power management state
+ * @core_type: core type (shader, tiler, l2 cache, l3 cache)
+ * @state:     64bits bitmask reporting power state of the cores (1-ON, 0-OFF)
+ */
+#define KBASE_TLSTREAM_AUX_PM_STATE(core_type, state) \
+	__TRACE_IF_ENABLED(aux_pm_state, core_type, state)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGEFAULT - timeline message: MMU page fault event
+ *                                resulting in new pages being mapped
+ * @ctx_nr:            kernel context number
+ * @page_count_change: number of pages to be added
+ */
+#define KBASE_TLSTREAM_AUX_PAGEFAULT(ctx_nr, page_count_change) \
+	__TRACE_IF_ENABLED(aux_pagefault, ctx_nr, page_count_change)
+
+/**
+ * KBASE_TLSTREAM_AUX_PAGESALLOC - timeline message: total number of allocated
+ *                                 pages is changed
+ * @ctx_nr:     kernel context number
+ * @page_count: number of pages used by the context
+ */
+#define KBASE_TLSTREAM_AUX_PAGESALLOC(ctx_nr, page_count) \
+	__TRACE_IF_ENABLED(aux_pagesalloc, ctx_nr, page_count)
+
+/**
+ * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET - timeline message: new target DVFS
+ *                                     frequency
+ * @target_freq: new target frequency
+ */
+#define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET(target_freq) \
+	__TRACE_IF_ENABLED(aux_devfreq_target, target_freq)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START - The GPU has started transitioning
+ *                                            to protected mode
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message indicating the GPU is starting to
+ * transition to protected mode.
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(gpu) \
+	__TRACE_IF_ENABLED_LATENCY(aux_protected_enter_start, gpu)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END - The GPU has finished transitioning
+ *                                          to protected mode
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message indicating the GPU has finished
+ * transitioning to protected mode.
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(gpu) \
+	__TRACE_IF_ENABLED_LATENCY(aux_protected_enter_end, gpu)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START - The GPU has started transitioning
+ *                                            to non-protected mode
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message indicating the GPU is starting to
+ * transition to non-protected mode.
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(gpu) \
+	__TRACE_IF_ENABLED_LATENCY(aux_protected_leave_start, gpu)
+
+/**
+ * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END - The GPU has finished transitioning
+ *                                          to non-protected mode
+ * @gpu: name of the GPU object
+ *
+ * Function emits a timeline message indicating the GPU has finished
+ * transitioning to non-protected mode.
+ */
+#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(gpu) \
+	__TRACE_IF_ENABLED_LATENCY(aux_protected_leave_end, gpu)
+
+/**
+ * KBASE_TLSTREAM_AUX_JIT_STATS - JIT allocations per bin statistics
+ *
+ * @ctx_nr:      kernel context number
+ * @bid:         JIT bin id
+ * @max_allocs:  maximum allocations allowed in this bin.
+ *               UINT_MAX is a special value. It denotes that
+ *               the parameter was not changed since the last time.
+ * @allocs:      number of active allocations in this bin
+ * @va_pages:    number of virtual pages allocated in this bin
+ * @ph_pages:    number of physical pages allocated in this bin
+ *
+ * Function emits a timeline message indicating the JIT statistics
+ * for a given bin have chaned.
+ */
+#define KBASE_TLSTREAM_AUX_JIT_STATS(ctx_nr, bid, max_allocs, allocs, va_pages, ph_pages) \
+	__TRACE_IF_ENABLED(aux_jit_stats, ctx_nr, bid, \
+			max_allocs, allocs, \
+			va_pages, ph_pages)
+#endif /* _KBASE_TLSTREAM_H */
+
diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h b/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h
new file mode 100644
index 0000000..77fb818
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h
@@ -0,0 +1,261 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE         *****
+ * *****            DO NOT INCLUDE DIRECTLY                  *****
+ * *****            THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
+
+/*
+ * The purpose of this header file is just to contain a list of trace code idenitifers
+ *
+ * Each identifier is wrapped in a macro, so that its string form and enum form can be created
+ *
+ * Each macro is separated with a comma, to allow insertion into an array initializer or enum definition block.
+ *
+ * This allows automatic creation of an enum and a corresponding array of strings
+ *
+ * Before #including, the includer MUST #define KBASE_TRACE_CODE_MAKE_CODE.
+ * After #including, the includer MUST #under KBASE_TRACE_CODE_MAKE_CODE.
+ *
+ * e.g.:
+ * #define KBASE_TRACE_CODE( X ) KBASE_TRACE_CODE_ ## X
+ * typedef enum
+ * {
+ * #define KBASE_TRACE_CODE_MAKE_CODE( X ) KBASE_TRACE_CODE( X )
+ * #include "mali_kbase_trace_defs.h"
+ * #undef  KBASE_TRACE_CODE_MAKE_CODE
+ * } kbase_trace_code;
+ *
+ * IMPORTANT: THIS FILE MUST NOT BE USED FOR ANY OTHER PURPOSE OTHER THAN THE ABOVE
+ *
+ *
+ * The use of the macro here is:
+ * - KBASE_TRACE_CODE_MAKE_CODE( X )
+ *
+ * Which produces:
+ * - For an enum, KBASE_TRACE_CODE_X
+ * - For a string, "X"
+ *
+ *
+ * For example:
+ * - KBASE_TRACE_CODE_MAKE_CODE( JM_JOB_COMPLETE ) expands to:
+ *  - KBASE_TRACE_CODE_JM_JOB_COMPLETE for the enum
+ *  - "JM_JOB_COMPLETE" for the string
+ * - To use it to trace an event, do:
+ *  - KBASE_TRACE_ADD( kbdev, JM_JOB_COMPLETE, subcode, kctx, uatom, val );
+ */
+
+#if 0 /* Dummy section to avoid breaking formatting */
+int dummy_array[] = {
+#endif
+
+/*
+ * Core events
+ */
+	/* no info_val, no gpu_addr, no atom */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY),
+	/* no info_val, no gpu_addr, no atom */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_HWINSTR_TERM),
+	/* info_val == GPU_IRQ_STATUS register */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ),
+	/* info_val == bits cleared */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_CLEAR),
+	/* info_val == GPU_IRQ_STATUS register */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_DONE),
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_SOFT_RESET),
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_HARD_RESET),
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_CLEAR),
+	/* GPU addr==dump address */
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_SAMPLE),
+	KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_CLEAN_INV_CACHES),
+/*
+ * Job Slot management events
+ */
+	/* info_val==irq rawstat at start */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ),
+	/* info_val==jobs processed */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ_END),
+/* In the following:
+ *
+ * - ctx is set if a corresponding job found (NULL otherwise, e.g. some soft-stop cases)
+ * - uatom==kernel-side mapped uatom address (for correlation with user-side)
+ */
+	/* info_val==exit code; gpu_addr==chain gpuaddr */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_JOB_DONE),
+	/* gpu_addr==JS_HEAD_NEXT written, info_val==lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT),
+	/* gpu_addr is as follows:
+	 * - If JS_STATUS active after soft-stop, val==gpu addr written to
+	 *   JS_HEAD on submit
+	 * - otherwise gpu_addr==0 */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_0),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_1),
+	/* gpu_addr==JS_HEAD read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP),
+	/* gpu_addr==JS_HEAD read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_0),
+	/* gpu_addr==JS_HEAD read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_1),
+	/* gpu_addr==JS_TAIL read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_UPDATE_HEAD),
+/* gpu_addr is as follows:
+ * - If JS_STATUS active before soft-stop, val==JS_HEAD
+ * - otherwise gpu_addr==0
+ */
+	/* gpu_addr==JS_HEAD read */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_CHECK_HEAD),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS_DONE),
+	/* info_val == is_scheduled */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_NON_SCHEDULED),
+	/* info_val == is_scheduled */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_SCHEDULED),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_DONE),
+	/* info_val == nr jobs submitted */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_SOFT_OR_HARD_STOP),
+	/* gpu_addr==JS_HEAD_NEXT last written */
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_EVICT),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT_AFTER_RESET),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_BEGIN_RESET_WORKER),
+	KBASE_TRACE_CODE_MAKE_CODE(JM_END_RESET_WORKER),
+/*
+ * Job dispatch events
+ */
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_DONE),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER_END),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_TRY_RUN_NEXT_JOB),
+	/* gpu_addr==0, info_val==0, uatom==0 */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_ZAP_CONTEXT),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL_WORKER),
+/*
+ * Scheduler Core events
+ */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX_NOLOCK),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_ADD_JOB),
+	/* gpu_addr==last value written/would be written to JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_REMOVE_JOB),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_RELEASE_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_TRY_SCHEDULE_HEAD_CTX),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_TRY_RUN_NEXT_JOB),
+	/* gpu_addr==value to write into JS_HEAD */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_RETRY_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_SUBMIT_TO_BLOCKED),
+	/* info_val == lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_CURRENT),
+	/* info_val == lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_CORES_FAILED),
+	/* info_val == lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_INUSE_FAILED),
+	/* info_val == lower 32 bits of rechecked affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED),
+	/* info_val == lower 32 bits of rechecked affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED),
+	/* info_val == lower 32 bits of affinity */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_AFFINITY_WOULD_VIOLATE),
+	/* info_val == the ctx attribute now on ctx */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_CTX),
+	/* info_val == the ctx attribute now on runpool */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_RUNPOOL),
+	/* info_val == the ctx attribute now off ctx */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_CTX),
+	/* info_val == the ctx attribute now off runpool */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_RUNPOOL),
+/*
+ * Scheduler Policy events
+ */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_INIT_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TERM_CTX),
+	/* info_val == whether it was evicted */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TRY_EVICT_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_FOREACH_CTX_JOBS),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_HEAD_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_ADD_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_REMOVE_CTX),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB_IRQ),
+	/* gpu_addr==JS_HEAD to write if the job were run */
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_JOB),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_START),
+	KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_END),
+/*
+ * Power Management Events
+ */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERING_UP),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_JOB_SUBMIT_AFTER_POWERED_UP),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWRON_L2),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_PWROFF_L2),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_POWERED_L2),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_DESIRED_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_CHANGE_AVAILABLE_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CORES_AVAILABLE_TILER),
+	/* PM_DESIRED_REACHED: gpu_addr == pm.gpu_in_desired_state */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED_TILER),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_SHADER_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_TILER_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_SHADER_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_TILER_NEEDED),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_WAKE_WAITERS),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_ACTIVE),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_IDLE),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_ON),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_OFF),
+	/* info_val == policy number, or -1 for "Already changing" */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_SET_POLICY),
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CA_SET_POLICY),
+	/* info_val == policy number */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_INIT),
+	/* info_val == policy number */
+	KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_TERM),
+/* Unused code just to make it easier to not have a comma at the end.
+ * All other codes MUST come before this */
+	KBASE_TRACE_CODE_MAKE_CODE(DUMMY)
+
+#if 0 /* Dummy section to avoid breaking formatting */
+};
+#endif
+
+/* ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_utility.h b/drivers/gpu/arm/midgard/mali_kbase_utility.h
new file mode 100644
index 0000000..8d4f0443
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_utility.h
@@ -0,0 +1,55 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013, 2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#ifndef _KBASE_UTILITY_H
+#define _KBASE_UTILITY_H
+
+#ifndef _KBASE_H_
+#error "Don't include this file directly, use mali_kbase.h instead"
+#endif
+
+static inline void kbase_timer_setup(struct timer_list *timer,
+				     void (*callback)(struct timer_list *timer))
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+	setup_timer(timer, (void (*)(unsigned long)) callback,
+			(unsigned long) timer);
+#else
+	timer_setup(timer, callback, 0);
+#endif
+}
+
+#ifndef WRITE_ONCE
+	#ifdef ASSIGN_ONCE
+		#define WRITE_ONCE(x, val) ASSIGN_ONCE(val, x)
+	#else
+		#define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val))
+	#endif
+#endif
+
+#ifndef READ_ONCE
+	#define READ_ONCE(x) ACCESS_ONCE(x)
+#endif
+
+#endif				/* _KBASE_UTILITY_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_vinstr.c b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
new file mode 100644
index 0000000..3e020e1
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
@@ -0,0 +1,989 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include "mali_kbase_vinstr.h"
+#include "mali_kbase_hwcnt_virtualizer.h"
+#include "mali_kbase_hwcnt_types.h"
+#include "mali_kbase_hwcnt_reader.h"
+#include "mali_kbase_hwcnt_gpu.h"
+#include "mali_kbase_ioctl.h"
+#include "mali_malisw.h"
+#include "mali_kbase_debug.h"
+
+#include <linux/anon_inodes.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+/* Hwcnt reader API version */
+#define HWCNT_READER_API 1
+
+/* The minimum allowed interval between dumps (equivalent to 10KHz) */
+#define DUMP_INTERVAL_MIN_NS (100 * NSEC_PER_USEC)
+
+/* The maximum allowed buffers per client */
+#define MAX_BUFFER_COUNT 32
+
+/**
+ * struct kbase_vinstr_context - IOCTL interface for userspace hardware
+ *                               counters.
+ * @hvirt:         Hardware counter virtualizer used by vinstr.
+ * @metadata:      Hardware counter metadata provided by virtualizer.
+ * @lock:          Lock protecting all vinstr state.
+ * @suspend_count: Suspend reference count. If non-zero, timer and worker are
+ *                 prevented from being re-scheduled.
+ * @client_count:  Number of vinstr clients.
+ * @clients:       List of vinstr clients.
+ * @dump_timer:    Timer that enqueues dump_work to a workqueue.
+ * @dump_work:     Worker for performing periodic counter dumps.
+ */
+struct kbase_vinstr_context {
+	struct kbase_hwcnt_virtualizer *hvirt;
+	const struct kbase_hwcnt_metadata *metadata;
+	struct mutex lock;
+	size_t suspend_count;
+	size_t client_count;
+	struct list_head clients;
+	struct hrtimer dump_timer;
+	struct work_struct dump_work;
+};
+
+/**
+ * struct kbase_vinstr_client - A vinstr client attached to a vinstr context.
+ * @vctx:              Vinstr context client is attached to.
+ * @hvcli:             Hardware counter virtualizer client.
+ * @node:              Node used to attach this client to list in vinstr
+ *                     context.
+ * @dump_interval_ns:  Interval between periodic dumps. If 0, not a periodic
+ *                     client.
+ * @next_dump_time_ns: Time in ns when this client's next periodic dump must
+ *                     occur. If 0, not a periodic client.
+ * @enable_map:        Counters enable map.
+ * @dump_bufs:         Array of dump buffers allocated by this client.
+ * @dump_bufs_meta:    Metadata of dump buffers.
+ * @meta_idx:          Index of metadata being accessed by userspace.
+ * @read_idx:          Index of buffer read by userspace.
+ * @write_idx:         Index of buffer being written by dump worker.
+ * @waitq:             Client's notification queue.
+ */
+struct kbase_vinstr_client {
+	struct kbase_vinstr_context *vctx;
+	struct kbase_hwcnt_virtualizer_client *hvcli;
+	struct list_head node;
+	u64 next_dump_time_ns;
+	u32 dump_interval_ns;
+	struct kbase_hwcnt_enable_map enable_map;
+	struct kbase_hwcnt_dump_buffer_array dump_bufs;
+	struct kbase_hwcnt_reader_metadata *dump_bufs_meta;
+	atomic_t meta_idx;
+	atomic_t read_idx;
+	atomic_t write_idx;
+	wait_queue_head_t waitq;
+};
+
+static unsigned int kbasep_vinstr_hwcnt_reader_poll(
+	struct file *filp,
+	poll_table *wait);
+
+static long kbasep_vinstr_hwcnt_reader_ioctl(
+	struct file *filp,
+	unsigned int cmd,
+	unsigned long arg);
+
+static int kbasep_vinstr_hwcnt_reader_mmap(
+	struct file *filp,
+	struct vm_area_struct *vma);
+
+static int kbasep_vinstr_hwcnt_reader_release(
+	struct inode *inode,
+	struct file *filp);
+
+/* Vinstr client file operations */
+static const struct file_operations vinstr_client_fops = {
+	.poll           = kbasep_vinstr_hwcnt_reader_poll,
+	.unlocked_ioctl = kbasep_vinstr_hwcnt_reader_ioctl,
+	.compat_ioctl   = kbasep_vinstr_hwcnt_reader_ioctl,
+	.mmap           = kbasep_vinstr_hwcnt_reader_mmap,
+	.release        = kbasep_vinstr_hwcnt_reader_release,
+};
+
+/**
+ * kbasep_vinstr_timestamp_ns() - Get the current time in nanoseconds.
+ *
+ * Return: Current time in nanoseconds.
+ */
+static u64 kbasep_vinstr_timestamp_ns(void)
+{
+	struct timespec64 ts;
+
+	ktime_get_raw_ts64(&ts);
+	return (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+}
+
+/**
+ * kbasep_vinstr_next_dump_time_ns() - Calculate the next periodic dump time.
+ * @cur_ts_ns: Current time in nanoseconds.
+ * @interval:  Interval between dumps in nanoseconds.
+ *
+ * Return: 0 if interval is 0 (i.e. a non-periodic client), or the next dump
+ *         time that occurs after cur_ts_ns.
+ */
+static u64 kbasep_vinstr_next_dump_time_ns(u64 cur_ts_ns, u32 interval)
+{
+	/* Non-periodic client */
+	if (interval == 0)
+		return 0;
+
+	/*
+	 * Return the next interval after the current time relative to t=0.
+	 * This means multiple clients with the same period will synchronise,
+	 * regardless of when they were started, allowing the worker to be
+	 * scheduled less frequently.
+	 */
+	do_div(cur_ts_ns, interval);
+	return (cur_ts_ns + 1) * interval;
+}
+
+/**
+ * kbasep_vinstr_client_dump() - Perform a dump for a client.
+ * @vcli:     Non-NULL pointer to a vinstr client.
+ * @event_id: Event type that triggered the dump.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_vinstr_client_dump(
+	struct kbase_vinstr_client *vcli,
+	enum base_hwcnt_reader_event event_id)
+{
+	int errcode;
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+	unsigned int write_idx;
+	unsigned int read_idx;
+	struct kbase_hwcnt_dump_buffer *dump_buf;
+	struct kbase_hwcnt_reader_metadata *meta;
+
+	WARN_ON(!vcli);
+	lockdep_assert_held(&vcli->vctx->lock);
+
+	write_idx = atomic_read(&vcli->write_idx);
+	read_idx = atomic_read(&vcli->read_idx);
+
+	/* Check if there is a place to copy HWC block into. */
+	if (write_idx - read_idx == vcli->dump_bufs.buf_cnt)
+		return -EBUSY;
+	write_idx %= vcli->dump_bufs.buf_cnt;
+
+	dump_buf = &vcli->dump_bufs.bufs[write_idx];
+	meta = &vcli->dump_bufs_meta[write_idx];
+
+	errcode = kbase_hwcnt_virtualizer_client_dump(
+		vcli->hvcli, &ts_start_ns, &ts_end_ns, dump_buf);
+	if (errcode)
+		return errcode;
+
+	/* Patch the dump buf headers, to hide the counters that other hwcnt
+	 * clients are using.
+	 */
+	kbase_hwcnt_gpu_patch_dump_headers(dump_buf, &vcli->enable_map);
+
+	/* Zero all non-enabled counters (current values are undefined) */
+	kbase_hwcnt_dump_buffer_zero_non_enabled(dump_buf, &vcli->enable_map);
+
+	meta->timestamp = ts_end_ns;
+	meta->event_id = event_id;
+	meta->buffer_idx = write_idx;
+
+	/* Notify client. Make sure all changes to memory are visible. */
+	wmb();
+	atomic_inc(&vcli->write_idx);
+	wake_up_interruptible(&vcli->waitq);
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_client_clear() - Reset all the client's counters to zero.
+ * @vcli: Non-NULL pointer to a vinstr client.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_vinstr_client_clear(struct kbase_vinstr_client *vcli)
+{
+	u64 ts_start_ns;
+	u64 ts_end_ns;
+
+	WARN_ON(!vcli);
+	lockdep_assert_held(&vcli->vctx->lock);
+
+	/* A virtualizer dump with a NULL buffer will just clear the virtualizer
+	 * client's buffer.
+	 */
+	return kbase_hwcnt_virtualizer_client_dump(
+		vcli->hvcli, &ts_start_ns, &ts_end_ns, NULL);
+}
+
+/**
+ * kbasep_vinstr_reschedule_worker() - Update next dump times for all periodic
+ *                                     vinstr clients, then reschedule the dump
+ *                                     worker appropriately.
+ * @vctx: Non-NULL pointer to the vinstr context.
+ *
+ * If there are no periodic clients, then the dump worker will not be
+ * rescheduled. Else, the dump worker will be rescheduled for the next periodic
+ * client dump.
+ */
+static void kbasep_vinstr_reschedule_worker(struct kbase_vinstr_context *vctx)
+{
+	u64 cur_ts_ns;
+	u64 earliest_next_ns = U64_MAX;
+	struct kbase_vinstr_client *pos;
+
+	WARN_ON(!vctx);
+	lockdep_assert_held(&vctx->lock);
+
+	cur_ts_ns = kbasep_vinstr_timestamp_ns();
+
+	/*
+	 * Update each client's next dump time, and find the earliest next
+	 * dump time if any of the clients have a non-zero interval.
+	 */
+	list_for_each_entry(pos, &vctx->clients, node) {
+		const u64 cli_next_ns =
+			kbasep_vinstr_next_dump_time_ns(
+				cur_ts_ns, pos->dump_interval_ns);
+
+		/* Non-zero next dump time implies a periodic client */
+		if ((cli_next_ns != 0) && (cli_next_ns < earliest_next_ns))
+			earliest_next_ns = cli_next_ns;
+
+		pos->next_dump_time_ns = cli_next_ns;
+	}
+
+	/* Cancel the timer if it is already pending */
+	hrtimer_cancel(&vctx->dump_timer);
+
+	/* Start the timer if there are periodic clients and vinstr is not
+	 * suspended.
+	 */
+	if ((earliest_next_ns != U64_MAX) &&
+	    (vctx->suspend_count == 0) &&
+	    !WARN_ON(earliest_next_ns < cur_ts_ns))
+		hrtimer_start(
+			&vctx->dump_timer,
+			ns_to_ktime(earliest_next_ns - cur_ts_ns),
+			HRTIMER_MODE_REL);
+}
+
+/**
+ * kbasep_vinstr_dump_worker()- Dump worker, that dumps all periodic clients
+ *                              that need to be dumped, then reschedules itself.
+ * @work: Work structure.
+ */
+static void kbasep_vinstr_dump_worker(struct work_struct *work)
+{
+	struct kbase_vinstr_context *vctx =
+		container_of(work, struct kbase_vinstr_context, dump_work);
+	struct kbase_vinstr_client *pos;
+	u64 cur_time_ns;
+
+	mutex_lock(&vctx->lock);
+
+	cur_time_ns = kbasep_vinstr_timestamp_ns();
+
+	/* Dump all periodic clients whose next dump time is before the current
+	 * time.
+	 */
+	list_for_each_entry(pos, &vctx->clients, node) {
+		if ((pos->next_dump_time_ns != 0) &&
+			(pos->next_dump_time_ns < cur_time_ns))
+			kbasep_vinstr_client_dump(
+				pos, BASE_HWCNT_READER_EVENT_PERIODIC);
+	}
+
+	/* Update the next dump times of all periodic clients, then reschedule
+	 * this worker at the earliest next dump time.
+	 */
+	kbasep_vinstr_reschedule_worker(vctx);
+
+	mutex_unlock(&vctx->lock);
+}
+
+/**
+ * kbasep_vinstr_dump_timer() - Dump timer that schedules the dump worker for
+ *                              execution as soon as possible.
+ * @timer: Timer structure.
+ */
+static enum hrtimer_restart kbasep_vinstr_dump_timer(struct hrtimer *timer)
+{
+	struct kbase_vinstr_context *vctx =
+		container_of(timer, struct kbase_vinstr_context, dump_timer);
+
+	/* We don't need to check vctx->suspend_count here, as the suspend
+	 * function will ensure that any worker enqueued here is immediately
+	 * cancelled, and the worker itself won't reschedule this timer if
+	 * suspend_count != 0.
+	 */
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+	queue_work(system_wq, &vctx->dump_work);
+#else
+	queue_work(system_highpri_wq, &vctx->dump_work);
+#endif
+	return HRTIMER_NORESTART;
+}
+
+/**
+ * kbasep_vinstr_client_destroy() - Destroy a vinstr client.
+ * @vcli: vinstr client. Must not be attached to a vinstr context.
+ */
+static void kbasep_vinstr_client_destroy(struct kbase_vinstr_client *vcli)
+{
+	if (!vcli)
+		return;
+
+	kbase_hwcnt_virtualizer_client_destroy(vcli->hvcli);
+	kfree(vcli->dump_bufs_meta);
+	kbase_hwcnt_dump_buffer_array_free(&vcli->dump_bufs);
+	kbase_hwcnt_enable_map_free(&vcli->enable_map);
+	kfree(vcli);
+}
+
+/**
+ * kbasep_vinstr_client_create() - Create a vinstr client. Does not attach to
+ *                                 the vinstr context.
+ * @vctx:     Non-NULL pointer to vinstr context.
+ * @setup:    Non-NULL pointer to hardware counter ioctl setup structure.
+ *            setup->buffer_count must not be 0.
+ * @out_vcli: Non-NULL pointer to where created client will be stored on
+ *            success.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_vinstr_client_create(
+	struct kbase_vinstr_context *vctx,
+	struct kbase_ioctl_hwcnt_reader_setup *setup,
+	struct kbase_vinstr_client **out_vcli)
+{
+	int errcode;
+	struct kbase_vinstr_client *vcli;
+	struct kbase_hwcnt_physical_enable_map phys_em;
+
+	WARN_ON(!vctx);
+	WARN_ON(!setup);
+	WARN_ON(setup->buffer_count == 0);
+
+	vcli = kzalloc(sizeof(*vcli), GFP_KERNEL);
+	if (!vcli)
+		return -ENOMEM;
+
+	vcli->vctx = vctx;
+
+	errcode = kbase_hwcnt_enable_map_alloc(
+		vctx->metadata, &vcli->enable_map);
+	if (errcode)
+		goto error;
+
+	phys_em.jm_bm = setup->jm_bm;
+	phys_em.shader_bm = setup->shader_bm;
+	phys_em.tiler_bm = setup->tiler_bm;
+	phys_em.mmu_l2_bm = setup->mmu_l2_bm;
+	kbase_hwcnt_gpu_enable_map_from_physical(&vcli->enable_map, &phys_em);
+
+	errcode = kbase_hwcnt_dump_buffer_array_alloc(
+		vctx->metadata, setup->buffer_count, &vcli->dump_bufs);
+	if (errcode)
+		goto error;
+
+	errcode = -ENOMEM;
+	vcli->dump_bufs_meta = kmalloc_array(
+		setup->buffer_count, sizeof(*vcli->dump_bufs_meta), GFP_KERNEL);
+	if (!vcli->dump_bufs_meta)
+		goto error;
+
+	errcode = kbase_hwcnt_virtualizer_client_create(
+		vctx->hvirt, &vcli->enable_map, &vcli->hvcli);
+	if (errcode)
+		goto error;
+
+	init_waitqueue_head(&vcli->waitq);
+
+	*out_vcli = vcli;
+	return 0;
+error:
+	kbasep_vinstr_client_destroy(vcli);
+	return errcode;
+}
+
+int kbase_vinstr_init(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_vinstr_context **out_vctx)
+{
+	struct kbase_vinstr_context *vctx;
+	const struct kbase_hwcnt_metadata *metadata;
+
+	if (!hvirt || !out_vctx)
+		return -EINVAL;
+
+	metadata = kbase_hwcnt_virtualizer_metadata(hvirt);
+	if (!metadata)
+		return -EINVAL;
+
+	vctx = kzalloc(sizeof(*vctx), GFP_KERNEL);
+	if (!vctx)
+		return -ENOMEM;
+
+	vctx->hvirt = hvirt;
+	vctx->metadata = metadata;
+
+	mutex_init(&vctx->lock);
+	INIT_LIST_HEAD(&vctx->clients);
+	hrtimer_init(&vctx->dump_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	vctx->dump_timer.function = kbasep_vinstr_dump_timer;
+	INIT_WORK(&vctx->dump_work, kbasep_vinstr_dump_worker);
+
+	*out_vctx = vctx;
+	return 0;
+}
+
+void kbase_vinstr_term(struct kbase_vinstr_context *vctx)
+{
+	if (!vctx)
+		return;
+
+	cancel_work_sync(&vctx->dump_work);
+
+	/* Non-zero client count implies client leak */
+	if (WARN_ON(vctx->client_count != 0)) {
+		struct kbase_vinstr_client *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &vctx->clients, node) {
+			list_del(&pos->node);
+			vctx->client_count--;
+			kbasep_vinstr_client_destroy(pos);
+		}
+	}
+
+	WARN_ON(vctx->client_count != 0);
+	kfree(vctx);
+}
+
+void kbase_vinstr_suspend(struct kbase_vinstr_context *vctx)
+{
+	if (WARN_ON(!vctx))
+		return;
+
+	mutex_lock(&vctx->lock);
+
+	if (!WARN_ON(vctx->suspend_count == SIZE_MAX))
+		vctx->suspend_count++;
+
+	mutex_unlock(&vctx->lock);
+
+	/* Always sync cancel the timer and then the worker, regardless of the
+	 * new suspend count.
+	 *
+	 * This ensures concurrent calls to kbase_vinstr_suspend() always block
+	 * until vinstr is fully suspended.
+	 *
+	 * The timer is cancelled before the worker, as the timer
+	 * unconditionally re-enqueues the worker, but the worker checks the
+	 * suspend_count that we just incremented before rescheduling the timer.
+	 *
+	 * Therefore if we cancel the worker first, the timer might re-enqueue
+	 * the worker before we cancel the timer, but the opposite is not
+	 * possible.
+	 */
+	hrtimer_cancel(&vctx->dump_timer);
+	cancel_work_sync(&vctx->dump_work);
+}
+
+void kbase_vinstr_resume(struct kbase_vinstr_context *vctx)
+{
+	if (WARN_ON(!vctx))
+		return;
+
+	mutex_lock(&vctx->lock);
+
+	if (!WARN_ON(vctx->suspend_count == 0)) {
+		vctx->suspend_count--;
+
+		/* Last resume, so re-enqueue the worker if we have any periodic
+		 * clients.
+		 */
+		if (vctx->suspend_count == 0) {
+			struct kbase_vinstr_client *pos;
+			bool has_periodic_clients = false;
+
+			list_for_each_entry(pos, &vctx->clients, node) {
+				if (pos->dump_interval_ns != 0) {
+					has_periodic_clients = true;
+					break;
+				}
+			}
+
+			if (has_periodic_clients)
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+				queue_work(system_wq, &vctx->dump_work);
+#else
+				queue_work(system_highpri_wq, &vctx->dump_work);
+#endif
+		}
+	}
+
+	mutex_unlock(&vctx->lock);
+}
+
+int kbase_vinstr_hwcnt_reader_setup(
+	struct kbase_vinstr_context *vctx,
+	struct kbase_ioctl_hwcnt_reader_setup *setup)
+{
+	int errcode;
+	int fd;
+	struct kbase_vinstr_client *vcli = NULL;
+
+	if (!vctx || !setup ||
+	    (setup->buffer_count == 0) ||
+	    (setup->buffer_count > MAX_BUFFER_COUNT))
+		return -EINVAL;
+
+	errcode = kbasep_vinstr_client_create(vctx, setup, &vcli);
+	if (errcode)
+		goto error;
+
+	errcode = anon_inode_getfd(
+		"[mali_vinstr_desc]",
+		&vinstr_client_fops,
+		vcli,
+		O_RDONLY | O_CLOEXEC);
+	if (errcode < 0)
+		goto error;
+
+	fd = errcode;
+
+	/* Add the new client. No need to reschedule worker, as not periodic */
+	mutex_lock(&vctx->lock);
+
+	vctx->client_count++;
+	list_add(&vcli->node, &vctx->clients);
+
+	mutex_unlock(&vctx->lock);
+
+	return fd;
+error:
+	kbasep_vinstr_client_destroy(vcli);
+	return errcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_buffer_ready() - Check if client has ready
+ *                                             buffers.
+ * @cli: Non-NULL pointer to vinstr client.
+ *
+ * Return: Non-zero if client has at least one dumping buffer filled that was
+ *         not notified to user yet.
+ */
+static int kbasep_vinstr_hwcnt_reader_buffer_ready(
+	struct kbase_vinstr_client *cli)
+{
+	WARN_ON(!cli);
+	return atomic_read(&cli->write_idx) != atomic_read(&cli->meta_idx);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_dump() - Dump ioctl command.
+ * @cli: Non-NULL pointer to vinstr client.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_dump(
+	struct kbase_vinstr_client *cli)
+{
+	int errcode;
+
+	mutex_lock(&cli->vctx->lock);
+
+	errcode = kbasep_vinstr_client_dump(
+		cli, BASE_HWCNT_READER_EVENT_MANUAL);
+
+	mutex_unlock(&cli->vctx->lock);
+	return errcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_clear() - Clear ioctl command.
+ * @cli: Non-NULL pointer to vinstr client.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_clear(
+	struct kbase_vinstr_client *cli)
+{
+	int errcode;
+
+	mutex_lock(&cli->vctx->lock);
+
+	errcode = kbasep_vinstr_client_clear(cli);
+
+	mutex_unlock(&cli->vctx->lock);
+	return errcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_get_buffer() - Get buffer ioctl command.
+ * @cli:    Non-NULL pointer to vinstr client.
+ * @buffer: Non-NULL pointer to userspace buffer.
+ * @size:   Size of buffer.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
+	struct kbase_vinstr_client *cli,
+	void __user *buffer,
+	size_t size)
+{
+	unsigned int meta_idx = atomic_read(&cli->meta_idx);
+	unsigned int idx = meta_idx % cli->dump_bufs.buf_cnt;
+
+	struct kbase_hwcnt_reader_metadata *meta = &cli->dump_bufs_meta[idx];
+
+	/* Metadata sanity check. */
+	WARN_ON(idx != meta->buffer_idx);
+
+	if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
+		return -EINVAL;
+
+	/* Check if there is any buffer available. */
+	if (atomic_read(&cli->write_idx) == meta_idx)
+		return -EAGAIN;
+
+	/* Check if previously taken buffer was put back. */
+	if (atomic_read(&cli->read_idx) != meta_idx)
+		return -EBUSY;
+
+	/* Copy next available buffer's metadata to user. */
+	if (copy_to_user(buffer, meta, size))
+		return -EFAULT;
+
+	atomic_inc(&cli->meta_idx);
+
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_put_buffer() - Put buffer ioctl command.
+ * @cli:    Non-NULL pointer to vinstr client.
+ * @buffer: Non-NULL pointer to userspace buffer.
+ * @size:   Size of buffer.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
+	struct kbase_vinstr_client *cli,
+	void __user *buffer,
+	size_t size)
+{
+	unsigned int read_idx = atomic_read(&cli->read_idx);
+	unsigned int idx = read_idx % cli->dump_bufs.buf_cnt;
+
+	struct kbase_hwcnt_reader_metadata meta;
+
+	if (sizeof(struct kbase_hwcnt_reader_metadata) != size)
+		return -EINVAL;
+
+	/* Check if any buffer was taken. */
+	if (atomic_read(&cli->meta_idx) == read_idx)
+		return -EPERM;
+
+	/* Check if correct buffer is put back. */
+	if (copy_from_user(&meta, buffer, size))
+		return -EFAULT;
+	if (idx != meta.buffer_idx)
+		return -EINVAL;
+
+	atomic_inc(&cli->read_idx);
+
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_set_interval() - Set interval ioctl command.
+ * @cli:      Non-NULL pointer to vinstr client.
+ * @interval: Periodic dumping interval (disable periodic dumping if 0).
+ *
+ * Return: 0 always.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
+	struct kbase_vinstr_client *cli,
+	u32 interval)
+{
+	mutex_lock(&cli->vctx->lock);
+
+	if ((interval != 0) && (interval < DUMP_INTERVAL_MIN_NS))
+		interval = DUMP_INTERVAL_MIN_NS;
+	/* Update the interval, and put in a dummy next dump time */
+	cli->dump_interval_ns = interval;
+	cli->next_dump_time_ns = 0;
+
+	/*
+	 * If it's a periodic client, kick off the worker early to do a proper
+	 * timer reschedule. Return value is ignored, as we don't care if the
+	 * worker is already queued.
+	 */
+	if ((interval != 0) && (cli->vctx->suspend_count == 0))
+#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+		queue_work(system_wq, &cli->vctx->dump_work);
+#else
+		queue_work(system_highpri_wq, &cli->vctx->dump_work);
+#endif
+
+	mutex_unlock(&cli->vctx->lock);
+
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_enable_event() - Enable event ioctl command.
+ * @cli:      Non-NULL pointer to vinstr client.
+ * @event_id: ID of event to enable.
+ *
+ * Return: 0 always.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
+		struct kbase_vinstr_client *cli,
+		enum base_hwcnt_reader_event event_id)
+{
+	/* No-op, as events aren't supported */
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_disable_event() - Disable event ioctl
+ *                                                    command.
+ * @cli:      Non-NULL pointer to vinstr client.
+ * @event_id: ID of event to disable.
+ *
+ * Return: 0 always.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
+	struct kbase_vinstr_client *cli,
+	enum base_hwcnt_reader_event event_id)
+{
+	/* No-op, as events aren't supported */
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl_get_hwver() - Get HW version ioctl command.
+ * @cli:   Non-NULL pointer to vinstr client.
+ * @hwver: Non-NULL pointer to user buffer where HW version will be stored.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
+	struct kbase_vinstr_client *cli,
+	u32 __user *hwver)
+{
+	u32 ver = 0;
+	const enum kbase_hwcnt_gpu_group_type type =
+		kbase_hwcnt_metadata_group_type(cli->vctx->metadata, 0);
+
+	switch (type) {
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V4:
+		ver = 4;
+		break;
+	case KBASE_HWCNT_GPU_GROUP_TYPE_V5:
+		ver = 5;
+		break;
+	default:
+		WARN_ON(true);
+	}
+
+	if (ver != 0) {
+		return put_user(ver, hwver);
+	} else {
+		return -EINVAL;
+	}
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_ioctl() - hwcnt reader's ioctl.
+ * @filp:   Non-NULL pointer to file structure.
+ * @cmd:    User command.
+ * @arg:    Command's argument.
+ *
+ * Return: 0 on success, else error code.
+ */
+static long kbasep_vinstr_hwcnt_reader_ioctl(
+	struct file *filp,
+	unsigned int cmd,
+	unsigned long arg)
+{
+	long rcode;
+	struct kbase_vinstr_client *cli;
+
+	if (!filp || (_IOC_TYPE(cmd) != KBASE_HWCNT_READER))
+		return -EINVAL;
+
+	cli = filp->private_data;
+	if (!cli)
+		return -EINVAL;
+
+	switch (cmd) {
+	case KBASE_HWCNT_READER_GET_API_VERSION:
+		rcode = put_user(HWCNT_READER_API, (u32 __user *)arg);
+		break;
+	case KBASE_HWCNT_READER_GET_HWVER:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_hwver(
+			cli, (u32 __user *)arg);
+		break;
+	case KBASE_HWCNT_READER_GET_BUFFER_SIZE:
+		rcode = put_user(
+			(u32)cli->vctx->metadata->dump_buf_bytes,
+			(u32 __user *)arg);
+		break;
+	case KBASE_HWCNT_READER_DUMP:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_dump(cli);
+		break;
+	case KBASE_HWCNT_READER_CLEAR:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_clear(cli);
+		break;
+	case KBASE_HWCNT_READER_GET_BUFFER:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_get_buffer(
+			cli, (void __user *)arg, _IOC_SIZE(cmd));
+		break;
+	case KBASE_HWCNT_READER_PUT_BUFFER:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_put_buffer(
+			cli, (void __user *)arg, _IOC_SIZE(cmd));
+		break;
+	case KBASE_HWCNT_READER_SET_INTERVAL:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
+			cli, (u32)arg);
+		break;
+	case KBASE_HWCNT_READER_ENABLE_EVENT:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_enable_event(
+			cli, (enum base_hwcnt_reader_event)arg);
+		break;
+	case KBASE_HWCNT_READER_DISABLE_EVENT:
+		rcode = kbasep_vinstr_hwcnt_reader_ioctl_disable_event(
+			cli, (enum base_hwcnt_reader_event)arg);
+		break;
+	default:
+		WARN_ON(true);
+		rcode = -EINVAL;
+		break;
+	}
+
+	return rcode;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_poll() - hwcnt reader's poll.
+ * @filp: Non-NULL pointer to file structure.
+ * @wait: Non-NULL pointer to poll table.
+ *
+ * Return: POLLIN if data can be read without blocking, 0 if data can not be
+ *         read without blocking, else error code.
+ */
+static unsigned int kbasep_vinstr_hwcnt_reader_poll(
+	struct file *filp,
+	poll_table *wait)
+{
+	struct kbase_vinstr_client *cli;
+
+	if (!filp || !wait)
+		return -EINVAL;
+
+	cli = filp->private_data;
+	if (!cli)
+		return -EINVAL;
+
+	poll_wait(filp, &cli->waitq, wait);
+	if (kbasep_vinstr_hwcnt_reader_buffer_ready(cli))
+		return POLLIN;
+	return 0;
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_mmap() - hwcnt reader's mmap.
+ * @filp: Non-NULL pointer to file structure.
+ * @vma:  Non-NULL pointer to vma structure.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int kbasep_vinstr_hwcnt_reader_mmap(
+	struct file *filp,
+	struct vm_area_struct *vma)
+{
+	struct kbase_vinstr_client *cli;
+	unsigned long vm_size, size, addr, pfn, offset;
+
+	if (!filp || !vma)
+		return -EINVAL;
+
+	cli = filp->private_data;
+	if (!cli)
+		return -EINVAL;
+
+	vm_size = vma->vm_end - vma->vm_start;
+	size = cli->dump_bufs.buf_cnt * cli->vctx->metadata->dump_buf_bytes;
+
+	if (vma->vm_pgoff > (size >> PAGE_SHIFT))
+		return -EINVAL;
+
+	offset = vma->vm_pgoff << PAGE_SHIFT;
+	if (vm_size > size - offset)
+		return -EINVAL;
+
+	addr = __pa(cli->dump_bufs.page_addr + offset);
+	pfn = addr >> PAGE_SHIFT;
+
+	return remap_pfn_range(
+		vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot);
+}
+
+/**
+ * kbasep_vinstr_hwcnt_reader_release() - hwcnt reader's release.
+ * @inode: Non-NULL pointer to inode structure.
+ * @filp:  Non-NULL pointer to file structure.
+ *
+ * Return: 0 always.
+ */
+static int kbasep_vinstr_hwcnt_reader_release(struct inode *inode,
+	struct file *filp)
+{
+	struct kbase_vinstr_client *vcli = filp->private_data;
+
+	mutex_lock(&vcli->vctx->lock);
+
+	vcli->vctx->client_count--;
+	list_del(&vcli->node);
+
+	mutex_unlock(&vcli->vctx->lock);
+
+	kbasep_vinstr_client_destroy(vcli);
+
+	return 0;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_vinstr.h b/drivers/gpu/arm/midgard/mali_kbase_vinstr.h
new file mode 100644
index 0000000..81d315f
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_vinstr.h
@@ -0,0 +1,91 @@
+/*
+ *
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * Vinstr, used to provide an ioctl for userspace access to periodic hardware
+ * counters.
+ */
+
+#ifndef _KBASE_VINSTR_H_
+#define _KBASE_VINSTR_H_
+
+struct kbase_vinstr_context;
+struct kbase_hwcnt_virtualizer;
+struct kbase_ioctl_hwcnt_reader_setup;
+
+/**
+ * kbase_vinstr_init() - Initialise a vinstr context.
+ * @hvirt:    Non-NULL pointer to the hardware counter virtualizer.
+ * @out_vctx: Non-NULL pointer to where the pointer to the created vinstr
+ *            context will be stored on success.
+ *
+ * On creation, the suspend count of the context will be 0.
+ *
+ * Return: 0 on success, else error code.
+ */
+int kbase_vinstr_init(
+	struct kbase_hwcnt_virtualizer *hvirt,
+	struct kbase_vinstr_context **out_vctx);
+
+/**
+ * kbase_vinstr_term() - Terminate a vinstr context.
+ * @vctx: Pointer to the vinstr context to be terminated.
+ */
+void kbase_vinstr_term(struct kbase_vinstr_context *vctx);
+
+/**
+ * kbase_vinstr_suspend() - Increment the suspend count of the context.
+ * @vctx: Non-NULL pointer to the vinstr context to be suspended.
+ *
+ * After this function call returns, it is guaranteed that all timers and
+ * workers in vinstr will be cancelled, and will not be re-triggered until
+ * after the context has been resumed. In effect, this means no new counter
+ * dumps will occur for any existing or subsequently added periodic clients.
+ */
+void kbase_vinstr_suspend(struct kbase_vinstr_context *vctx);
+
+/**
+ * kbase_vinstr_resume() - Decrement the suspend count of the context.
+ * @vctx: Non-NULL pointer to the vinstr context to be resumed.
+ *
+ * If a call to this function decrements the suspend count from 1 to 0, then
+ * normal operation of vinstr will be resumed (i.e. counter dumps will once
+ * again be automatically triggered for all periodic clients).
+ *
+ * It is only valid to call this function one time for each prior returned call
+ * to kbase_vinstr_suspend.
+ */
+void kbase_vinstr_resume(struct kbase_vinstr_context *vctx);
+
+/**
+ * kbase_vinstr_hwcnt_reader_setup() - Set up a new hardware counter reader
+ *                                     client.
+ * @vinstr_ctx: Non-NULL pointer to the vinstr context.
+ * @setup:      Non-NULL pointer to the hwcnt reader configuration.
+ *
+ * Return: file descriptor on success, else a (negative) error code.
+ */
+int kbase_vinstr_hwcnt_reader_setup(
+	struct kbase_vinstr_context *vinstr_ctx,
+	struct kbase_ioctl_hwcnt_reader_setup *setup);
+
+#endif /* _KBASE_VINSTR_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h b/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h
new file mode 100644
index 0000000..6c6a8c6a5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h
@@ -0,0 +1,204 @@
+/*
+ *
+ * (C) COPYRIGHT 2014,2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+#if !defined(_TRACE_MALI_KBASE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_KBASE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(mali_slot_template,
+	TP_PROTO(int jobslot, unsigned int info_val),
+	TP_ARGS(jobslot, info_val),
+	TP_STRUCT__entry(
+		__field(unsigned int, jobslot)
+		__field(unsigned int, info_val)
+	),
+	TP_fast_assign(
+		__entry->jobslot = jobslot;
+		__entry->info_val = info_val;
+	),
+	TP_printk("jobslot=%u info=%u", __entry->jobslot, __entry->info_val)
+);
+
+#define DEFINE_MALI_SLOT_EVENT(name) \
+DEFINE_EVENT(mali_slot_template, mali_##name, \
+	TP_PROTO(int jobslot, unsigned int info_val), \
+	TP_ARGS(jobslot, info_val))
+DEFINE_MALI_SLOT_EVENT(JM_SUBMIT);
+DEFINE_MALI_SLOT_EVENT(JM_JOB_DONE);
+DEFINE_MALI_SLOT_EVENT(JM_UPDATE_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_CHECK_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_SOFT_OR_HARD_STOP);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_EVICT);
+DEFINE_MALI_SLOT_EVENT(JM_BEGIN_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JM_END_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_SUBMIT_TO_BLOCKED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_CURRENT);
+DEFINE_MALI_SLOT_EVENT(JD_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_CORES_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_INUSE_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_AFFINITY_WOULD_VIOLATE);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_RETRY_NEEDED);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB_IRQ);
+#undef DEFINE_MALI_SLOT_EVENT
+
+DECLARE_EVENT_CLASS(mali_refcount_template,
+	TP_PROTO(int refcount, unsigned int info_val),
+	TP_ARGS(refcount, info_val),
+	TP_STRUCT__entry(
+		__field(unsigned int, refcount)
+		__field(unsigned int, info_val)
+	),
+	TP_fast_assign(
+		__entry->refcount = refcount;
+		__entry->info_val = info_val;
+	),
+	TP_printk("refcount=%u info=%u", __entry->refcount, __entry->info_val)
+);
+
+#define DEFINE_MALI_REFCOUNT_EVENT(name) \
+DEFINE_EVENT(mali_refcount_template, mali_##name, \
+	TP_PROTO(int refcount, unsigned int info_val), \
+	TP_ARGS(refcount, info_val))
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX_NOLOCK);
+DEFINE_MALI_REFCOUNT_EVENT(JS_ADD_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_REMOVE_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RELEASE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_TRY_SCHEDULE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_INIT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TERM_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_ENQUEUE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_DEQUEUE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TRY_EVICT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_ADD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_REMOVE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_FOREACH_CTX_JOBS);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_ACTIVE);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_IDLE);
+#undef DEFINE_MALI_REFCOUNT_EVENT
+
+DECLARE_EVENT_CLASS(mali_add_template,
+	TP_PROTO(int gpu_addr, unsigned int info_val),
+	TP_ARGS(gpu_addr, info_val),
+	TP_STRUCT__entry(
+		__field(unsigned int, gpu_addr)
+		__field(unsigned int, info_val)
+	),
+	TP_fast_assign(
+		__entry->gpu_addr = gpu_addr;
+		__entry->info_val = info_val;
+	),
+	TP_printk("gpu_addr=%u info=%u", __entry->gpu_addr, __entry->info_val)
+);
+
+#define DEFINE_MALI_ADD_EVENT(name) \
+DEFINE_EVENT(mali_add_template, mali_##name, \
+	TP_PROTO(int gpu_addr, unsigned int info_val), \
+	TP_ARGS(gpu_addr, info_val))
+DEFINE_MALI_ADD_EVENT(CORE_CTX_DESTROY);
+DEFINE_MALI_ADD_EVENT(CORE_CTX_HWINSTR_TERM);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_DONE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_SOFT_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_HARD_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_SAMPLE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_CLEAN_INV_CACHES);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER_END);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL);
+DEFINE_MALI_ADD_EVENT(JD_ZAP_CONTEXT);
+DEFINE_MALI_ADD_EVENT(JM_IRQ);
+DEFINE_MALI_ADD_EVENT(JM_IRQ_END);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS_DONE);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_NON_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_DONE);
+DEFINE_MALI_ADD_EVENT(JM_SUBMIT_AFTER_RESET);
+DEFINE_MALI_ADD_EVENT(JM_JOB_COMPLETE);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_CTX);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_CTX);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_END);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_START);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_ENQUEUE_JOB);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_DESIRED);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERING_UP);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERED_UP);
+DEFINE_MALI_ADD_EVENT(PM_PWRON);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_L2);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_L2);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_L2);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_TILER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_TILER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_GPU_ON);
+DEFINE_MALI_ADD_EVENT(PM_GPU_OFF);
+DEFINE_MALI_ADD_EVENT(PM_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_INIT);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_TERM);
+DEFINE_MALI_ADD_EVENT(PM_CA_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_WAKE_WAITERS);
+#undef DEFINE_MALI_ADD_EVENT
+
+#endif /* _TRACE_MALI_KBASE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mali_linux_kbase_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/arm/midgard/mali_linux_trace.h b/drivers/gpu/arm/midgard/mali_linux_trace.h
new file mode 100644
index 0000000..0741dfc
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_linux_trace.h
@@ -0,0 +1,194 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#if !defined(_TRACE_MALI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+#include <linux/tracepoint.h>
+
+#define MALI_JOB_SLOTS_EVENT_CHANGED
+
+/**
+ * mali_job_slots_event - called from mali_kbase_core_linux.c
+ * @event_id: ORed together bitfields representing a type of event, made with the GATOR_MAKE_EVENT() macro.
+ */
+TRACE_EVENT(mali_job_slots_event,
+	TP_PROTO(unsigned int event_id, unsigned int tgid, unsigned int pid,
+			unsigned char job_id),
+	TP_ARGS(event_id, tgid, pid, job_id),
+	TP_STRUCT__entry(
+		__field(unsigned int, event_id)
+		__field(unsigned int, tgid)
+		__field(unsigned int, pid)
+		__field(unsigned char, job_id)
+	),
+	TP_fast_assign(
+		__entry->event_id = event_id;
+		__entry->tgid = tgid;
+		__entry->pid = pid;
+		__entry->job_id = job_id;
+	),
+	TP_printk("event=%u tgid=%u pid=%u job_id=%u",
+		__entry->event_id, __entry->tgid, __entry->pid, __entry->job_id)
+);
+
+/**
+ * mali_pm_status - Called by mali_kbase_pm_driver.c
+ * @event_id: core type (shader, tiler, l2 cache)
+ * @value: 64bits bitmask reporting either power status of the cores (1-ON, 0-OFF)
+ */
+TRACE_EVENT(mali_pm_status,
+	TP_PROTO(unsigned int event_id, unsigned long long value),
+	TP_ARGS(event_id, value),
+	TP_STRUCT__entry(
+		__field(unsigned int, event_id)
+		__field(unsigned long long, value)
+	),
+	TP_fast_assign(
+		__entry->event_id = event_id;
+		__entry->value = value;
+	),
+	TP_printk("event %u = %llu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_pm_power_on - Called by mali_kbase_pm_driver.c
+ * @event_id: core type (shader, tiler, l2 cache)
+ * @value: 64bits bitmask reporting the cores to power up
+ */
+TRACE_EVENT(mali_pm_power_on,
+	TP_PROTO(unsigned int event_id, unsigned long long value),
+	TP_ARGS(event_id, value),
+	TP_STRUCT__entry(
+		__field(unsigned int, event_id)
+		__field(unsigned long long, value)
+	),
+	TP_fast_assign(
+		__entry->event_id = event_id;
+		__entry->value = value;
+	),
+	TP_printk("event %u = %llu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_pm_power_off - Called by mali_kbase_pm_driver.c
+ * @event_id: core type (shader, tiler, l2 cache)
+ * @value: 64bits bitmask reporting the cores to power down
+ */
+TRACE_EVENT(mali_pm_power_off,
+	TP_PROTO(unsigned int event_id, unsigned long long value),
+	TP_ARGS(event_id, value),
+	TP_STRUCT__entry(
+		__field(unsigned int, event_id)
+		__field(unsigned long long, value)
+	),
+	TP_fast_assign(
+		__entry->event_id = event_id;
+		__entry->value = value;
+	),
+	TP_printk("event %u = %llu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_page_fault_insert_pages - Called by page_fault_worker()
+ * it reports an MMU page fault resulting in new pages being mapped.
+ * @event_id: MMU address space number.
+ * @value: number of newly allocated pages
+ */
+TRACE_EVENT(mali_page_fault_insert_pages,
+	TP_PROTO(int event_id, unsigned long value),
+	TP_ARGS(event_id, value),
+	TP_STRUCT__entry(
+		__field(int, event_id)
+		__field(unsigned long, value)
+	),
+	TP_fast_assign(
+		__entry->event_id = event_id;
+		__entry->value = value;
+	),
+	TP_printk("event %d = %lu", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_mmu_as_in_use - Called by assign_and_activate_kctx_addr_space()
+ * it reports that a certain MMU address space is in use now.
+ * @event_id: MMU address space number.
+ */
+TRACE_EVENT(mali_mmu_as_in_use,
+	TP_PROTO(int event_id),
+	TP_ARGS(event_id),
+	TP_STRUCT__entry(
+		__field(int, event_id)
+	),
+	TP_fast_assign(
+		__entry->event_id = event_id;
+	),
+	TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * mali_mmu_as_released - Called by kbasep_js_runpool_release_ctx_internal()
+ * it reports that a certain MMU address space has been released now.
+ * @event_id: MMU address space number.
+ */
+TRACE_EVENT(mali_mmu_as_released,
+	TP_PROTO(int event_id),
+	TP_ARGS(event_id),
+	TP_STRUCT__entry(
+		__field(int, event_id)
+	),
+	TP_fast_assign(
+		__entry->event_id = event_id;
+	),
+	TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * mali_total_alloc_pages_change - Called by kbase_atomic_add_pages()
+ *                                 and by kbase_atomic_sub_pages()
+ * it reports that the total number of allocated pages is changed.
+ * @event_id: number of pages to be added or subtracted (according to the sign).
+ */
+TRACE_EVENT(mali_total_alloc_pages_change,
+	TP_PROTO(long long int event_id),
+	TP_ARGS(event_id),
+	TP_STRUCT__entry(
+		__field(long long int, event_id)
+	),
+	TP_fast_assign(
+		__entry->event_id = event_id;
+	),
+	TP_printk("event=%lld", __entry->event_id)
+);
+
+#endif				/*  _TRACE_MALI_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/arm/midgard/mali_malisw.h b/drivers/gpu/arm/midgard/mali_malisw.h
new file mode 100644
index 0000000..3a4db10
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_malisw.h
@@ -0,0 +1,109 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Kernel-wide include for common macros and types.
+ */
+
+#ifndef _MALISW_H_
+#define _MALISW_H_
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+#define U8_MAX          ((u8)~0U)
+#define S8_MAX          ((s8)(U8_MAX>>1))
+#define S8_MIN          ((s8)(-S8_MAX - 1))
+#define U16_MAX         ((u16)~0U)
+#define S16_MAX         ((s16)(U16_MAX>>1))
+#define S16_MIN         ((s16)(-S16_MAX - 1))
+#define U32_MAX         ((u32)~0U)
+#define S32_MAX         ((s32)(U32_MAX>>1))
+#define S32_MIN         ((s32)(-S32_MAX - 1))
+#define U64_MAX         ((u64)~0ULL)
+#define S64_MAX         ((s64)(U64_MAX>>1))
+#define S64_MIN         ((s64)(-S64_MAX - 1))
+#endif /* LINUX_VERSION_CODE */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+#define SIZE_MAX        (~(size_t)0)
+#endif /* LINUX_VERSION_CODE */
+
+/**
+ * MIN - Return the lesser of two values.
+ *
+ * As a macro it may evaluate its arguments more than once.
+ * Refer to MAX macro for more details
+ */
+#define MIN(x, y)	((x) < (y) ? (x) : (y))
+
+/**
+ * MAX -  Return the greater of two values.
+ *
+ * As a macro it may evaluate its arguments more than once.
+ * If called on the same two arguments as MIN it is guaranteed to return
+ * the one that MIN didn't return. This is significant for types where not
+ * all values are comparable e.g. NaNs in floating-point types. But if you want
+ * to retrieve the min and max of two values, consider using a conditional swap
+ * instead.
+ */
+#define MAX(x, y)	((x) < (y) ? (y) : (x))
+
+/**
+ * @hideinitializer
+ * Function-like macro for suppressing unused variable warnings. Where possible
+ * such variables should be removed; this macro is present for cases where we
+ * much support API backwards compatibility.
+ */
+#define CSTD_UNUSED(x)	((void)(x))
+
+/**
+ * @hideinitializer
+ * Function-like macro for use where "no behavior" is desired. This is useful
+ * when compile time macros turn a function-like macro in to a no-op, but
+ * where having no statement is otherwise invalid.
+ */
+#define CSTD_NOP(...)	((void)#__VA_ARGS__)
+
+/**
+ * @hideinitializer
+ * Function-like macro for stringizing a single level macro.
+ * @code
+ * #define MY_MACRO 32
+ * CSTD_STR1( MY_MACRO )
+ * > "MY_MACRO"
+ * @endcode
+ */
+#define CSTD_STR1(x)	#x
+
+/**
+ * @hideinitializer
+ * Function-like macro for stringizing a macro's value. This should not be used
+ * if the macro is defined in a way which may have no value; use the
+ * alternative @c CSTD_STR2N macro should be used instead.
+ * @code
+ * #define MY_MACRO 32
+ * CSTD_STR2( MY_MACRO )
+ * > "32"
+ * @endcode
+ */
+#define CSTD_STR2(x)	CSTD_STR1(x)
+
+#endif /* _MALISW_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_midg_coherency.h b/drivers/gpu/arm/midgard/mali_midg_coherency.h
new file mode 100644
index 0000000..29d5df3
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_midg_coherency.h
@@ -0,0 +1,31 @@
+/*
+ *
+ * (C) COPYRIGHT 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _MIDG_COHERENCY_H_
+#define _MIDG_COHERENCY_H_
+
+#define COHERENCY_ACE_LITE 0
+#define COHERENCY_ACE      1
+#define COHERENCY_NONE     31
+#define COHERENCY_FEATURE_BIT(x) (1 << (x))
+
+#endif /* _MIDG_COHERENCY_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_midg_regmap.h b/drivers/gpu/arm/midgard/mali_midg_regmap.h
new file mode 100644
index 0000000..0f03e8d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_midg_regmap.h
@@ -0,0 +1,643 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _MIDGARD_REGMAP_H_
+#define _MIDGARD_REGMAP_H_
+
+#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_id.h"
+
+/*
+ * Begin Register Offsets
+ */
+
+#define GPU_CONTROL_BASE        0x0000
+#define GPU_CONTROL_REG(r)      (GPU_CONTROL_BASE + (r))
+#define GPU_ID                  0x000	/* (RO) GPU and revision identifier */
+#define L2_FEATURES             0x004	/* (RO) Level 2 cache features */
+#define CORE_FEATURES           0x008	/* (RO) Shader Core Features */
+#define TILER_FEATURES          0x00C	/* (RO) Tiler Features */
+#define MEM_FEATURES            0x010	/* (RO) Memory system features */
+#define MMU_FEATURES            0x014	/* (RO) MMU features */
+#define AS_PRESENT              0x018	/* (RO) Address space slots present */
+#define JS_PRESENT              0x01C	/* (RO) Job slots present */
+#define GPU_IRQ_RAWSTAT         0x020	/* (RW) */
+#define GPU_IRQ_CLEAR           0x024	/* (WO) */
+#define GPU_IRQ_MASK            0x028	/* (RW) */
+#define GPU_IRQ_STATUS          0x02C	/* (RO) */
+
+
+/* IRQ flags */
+#define GPU_FAULT               (1 << 0)	/* A GPU Fault has occurred */
+#define MULTIPLE_GPU_FAULTS     (1 << 7)	/* More than one GPU Fault occurred. */
+#define RESET_COMPLETED         (1 << 8)	/* Set when a reset has completed. Intended to use with SOFT_RESET
+						   commands which may take time. */
+#define POWER_CHANGED_SINGLE    (1 << 9)	/* Set when a single core has finished powering up or down. */
+#define POWER_CHANGED_ALL       (1 << 10)	/* Set when all cores have finished powering up or down
+						   and the power manager is idle. */
+
+#define PRFCNT_SAMPLE_COMPLETED (1 << 16)	/* Set when a performance count sample has completed. */
+#define CLEAN_CACHES_COMPLETED  (1 << 17)	/* Set when a cache clean operation has completed. */
+
+#define GPU_IRQ_REG_ALL (GPU_FAULT | MULTIPLE_GPU_FAULTS | RESET_COMPLETED \
+			| POWER_CHANGED_ALL | PRFCNT_SAMPLE_COMPLETED)
+
+#define GPU_COMMAND             0x030	/* (WO) */
+#define GPU_STATUS              0x034	/* (RO) */
+#define LATEST_FLUSH            0x038	/* (RO) */
+
+#define GROUPS_L2_COHERENT      (1 << 0)	/* Cores groups are l2 coherent */
+#define GPU_DBGEN               (1 << 8)	/* DBGEN wire status */
+
+#define GPU_FAULTSTATUS         0x03C	/* (RO) GPU exception type and fault status */
+#define GPU_FAULTADDRESS_LO     0x040	/* (RO) GPU exception fault address, low word */
+#define GPU_FAULTADDRESS_HI     0x044	/* (RO) GPU exception fault address, high word */
+
+#define PWR_KEY                 0x050	/* (WO) Power manager key register */
+#define PWR_OVERRIDE0           0x054	/* (RW) Power manager override settings */
+#define PWR_OVERRIDE1           0x058	/* (RW) Power manager override settings */
+
+#define PRFCNT_BASE_LO          0x060	/* (RW) Performance counter memory region base address, low word */
+#define PRFCNT_BASE_HI          0x064	/* (RW) Performance counter memory region base address, high word */
+#define PRFCNT_CONFIG           0x068	/* (RW) Performance counter configuration */
+#define PRFCNT_JM_EN            0x06C	/* (RW) Performance counter enable flags for Job Manager */
+#define PRFCNT_SHADER_EN        0x070	/* (RW) Performance counter enable flags for shader cores */
+#define PRFCNT_TILER_EN         0x074	/* (RW) Performance counter enable flags for tiler */
+#define PRFCNT_MMU_L2_EN        0x07C	/* (RW) Performance counter enable flags for MMU/L2 cache */
+
+#define CYCLE_COUNT_LO          0x090	/* (RO) Cycle counter, low word */
+#define CYCLE_COUNT_HI          0x094	/* (RO) Cycle counter, high word */
+#define TIMESTAMP_LO            0x098	/* (RO) Global time stamp counter, low word */
+#define TIMESTAMP_HI            0x09C	/* (RO) Global time stamp counter, high word */
+
+#define THREAD_MAX_THREADS		0x0A0	/* (RO) Maximum number of threads per core */
+#define THREAD_MAX_WORKGROUP_SIZE 0x0A4	/* (RO) Maximum workgroup size */
+#define THREAD_MAX_BARRIER_SIZE 0x0A8	/* (RO) Maximum threads waiting at a barrier */
+#define THREAD_FEATURES         0x0AC	/* (RO) Thread features */
+#define THREAD_TLS_ALLOC        0x310   /* (RO) Number of threads per core that
+					 * TLS must be allocated for
+					 */
+
+#define TEXTURE_FEATURES_0      0x0B0	/* (RO) Support flags for indexed texture formats 0..31 */
+#define TEXTURE_FEATURES_1      0x0B4	/* (RO) Support flags for indexed texture formats 32..63 */
+#define TEXTURE_FEATURES_2      0x0B8	/* (RO) Support flags for indexed texture formats 64..95 */
+#define TEXTURE_FEATURES_3      0x0BC	/* (RO) Support flags for texture order */
+
+#define TEXTURE_FEATURES_REG(n) GPU_CONTROL_REG(TEXTURE_FEATURES_0 + ((n) << 2))
+
+#define JS0_FEATURES            0x0C0	/* (RO) Features of job slot 0 */
+#define JS1_FEATURES            0x0C4	/* (RO) Features of job slot 1 */
+#define JS2_FEATURES            0x0C8	/* (RO) Features of job slot 2 */
+#define JS3_FEATURES            0x0CC	/* (RO) Features of job slot 3 */
+#define JS4_FEATURES            0x0D0	/* (RO) Features of job slot 4 */
+#define JS5_FEATURES            0x0D4	/* (RO) Features of job slot 5 */
+#define JS6_FEATURES            0x0D8	/* (RO) Features of job slot 6 */
+#define JS7_FEATURES            0x0DC	/* (RO) Features of job slot 7 */
+#define JS8_FEATURES            0x0E0	/* (RO) Features of job slot 8 */
+#define JS9_FEATURES            0x0E4	/* (RO) Features of job slot 9 */
+#define JS10_FEATURES           0x0E8	/* (RO) Features of job slot 10 */
+#define JS11_FEATURES           0x0EC	/* (RO) Features of job slot 11 */
+#define JS12_FEATURES           0x0F0	/* (RO) Features of job slot 12 */
+#define JS13_FEATURES           0x0F4	/* (RO) Features of job slot 13 */
+#define JS14_FEATURES           0x0F8	/* (RO) Features of job slot 14 */
+#define JS15_FEATURES           0x0FC	/* (RO) Features of job slot 15 */
+
+#define JS_FEATURES_REG(n)      GPU_CONTROL_REG(JS0_FEATURES + ((n) << 2))
+
+#define SHADER_PRESENT_LO       0x100	/* (RO) Shader core present bitmap, low word */
+#define SHADER_PRESENT_HI       0x104	/* (RO) Shader core present bitmap, high word */
+
+#define TILER_PRESENT_LO        0x110	/* (RO) Tiler core present bitmap, low word */
+#define TILER_PRESENT_HI        0x114	/* (RO) Tiler core present bitmap, high word */
+
+#define L2_PRESENT_LO           0x120	/* (RO) Level 2 cache present bitmap, low word */
+#define L2_PRESENT_HI           0x124	/* (RO) Level 2 cache present bitmap, high word */
+
+#define STACK_PRESENT_LO        0xE00   /* (RO) Core stack present bitmap, low word */
+#define STACK_PRESENT_HI        0xE04   /* (RO) Core stack present bitmap, high word */
+
+
+#define SHADER_READY_LO         0x140	/* (RO) Shader core ready bitmap, low word */
+#define SHADER_READY_HI         0x144	/* (RO) Shader core ready bitmap, high word */
+
+#define TILER_READY_LO          0x150	/* (RO) Tiler core ready bitmap, low word */
+#define TILER_READY_HI          0x154	/* (RO) Tiler core ready bitmap, high word */
+
+#define L2_READY_LO             0x160	/* (RO) Level 2 cache ready bitmap, low word */
+#define L2_READY_HI             0x164	/* (RO) Level 2 cache ready bitmap, high word */
+
+#define STACK_READY_LO          0xE10   /* (RO) Core stack ready bitmap, low word */
+#define STACK_READY_HI          0xE14   /* (RO) Core stack ready bitmap, high word */
+
+
+#define SHADER_PWRON_LO         0x180	/* (WO) Shader core power on bitmap, low word */
+#define SHADER_PWRON_HI         0x184	/* (WO) Shader core power on bitmap, high word */
+
+#define TILER_PWRON_LO          0x190	/* (WO) Tiler core power on bitmap, low word */
+#define TILER_PWRON_HI          0x194	/* (WO) Tiler core power on bitmap, high word */
+
+#define L2_PWRON_LO             0x1A0	/* (WO) Level 2 cache power on bitmap, low word */
+#define L2_PWRON_HI             0x1A4	/* (WO) Level 2 cache power on bitmap, high word */
+
+#define STACK_PWRON_LO          0xE20   /* (RO) Core stack power on bitmap, low word */
+#define STACK_PWRON_HI          0xE24   /* (RO) Core stack power on bitmap, high word */
+
+
+#define SHADER_PWROFF_LO        0x1C0	/* (WO) Shader core power off bitmap, low word */
+#define SHADER_PWROFF_HI        0x1C4	/* (WO) Shader core power off bitmap, high word */
+
+#define TILER_PWROFF_LO         0x1D0	/* (WO) Tiler core power off bitmap, low word */
+#define TILER_PWROFF_HI         0x1D4	/* (WO) Tiler core power off bitmap, high word */
+
+#define L2_PWROFF_LO            0x1E0	/* (WO) Level 2 cache power off bitmap, low word */
+#define L2_PWROFF_HI            0x1E4	/* (WO) Level 2 cache power off bitmap, high word */
+
+#define STACK_PWROFF_LO         0xE30   /* (RO) Core stack power off bitmap, low word */
+#define STACK_PWROFF_HI         0xE34   /* (RO) Core stack power off bitmap, high word */
+
+
+#define SHADER_PWRTRANS_LO      0x200	/* (RO) Shader core power transition bitmap, low word */
+#define SHADER_PWRTRANS_HI      0x204	/* (RO) Shader core power transition bitmap, high word */
+
+#define TILER_PWRTRANS_LO       0x210	/* (RO) Tiler core power transition bitmap, low word */
+#define TILER_PWRTRANS_HI       0x214	/* (RO) Tiler core power transition bitmap, high word */
+
+#define L2_PWRTRANS_LO          0x220	/* (RO) Level 2 cache power transition bitmap, low word */
+#define L2_PWRTRANS_HI          0x224	/* (RO) Level 2 cache power transition bitmap, high word */
+
+#define STACK_PWRTRANS_LO       0xE40   /* (RO) Core stack power transition bitmap, low word */
+#define STACK_PWRTRANS_HI       0xE44   /* (RO) Core stack power transition bitmap, high word */
+
+
+#define SHADER_PWRACTIVE_LO     0x240	/* (RO) Shader core active bitmap, low word */
+#define SHADER_PWRACTIVE_HI     0x244	/* (RO) Shader core active bitmap, high word */
+
+#define TILER_PWRACTIVE_LO      0x250	/* (RO) Tiler core active bitmap, low word */
+#define TILER_PWRACTIVE_HI      0x254	/* (RO) Tiler core active bitmap, high word */
+
+#define L2_PWRACTIVE_LO         0x260	/* (RO) Level 2 cache active bitmap, low word */
+#define L2_PWRACTIVE_HI         0x264	/* (RO) Level 2 cache active bitmap, high word */
+
+#define COHERENCY_FEATURES      0x300	/* (RO) Coherency features present */
+#define COHERENCY_ENABLE        0x304	/* (RW) Coherency enable */
+
+#define JM_CONFIG               0xF00   /* (RW) Job Manager configuration register (Implementation specific register) */
+#define SHADER_CONFIG           0xF04	/* (RW) Shader core configuration settings (Implementation specific register) */
+#define TILER_CONFIG            0xF08   /* (RW) Tiler core configuration settings (Implementation specific register) */
+#define L2_MMU_CONFIG           0xF0C	/* (RW) Configuration of the L2 cache and MMU (Implementation specific register) */
+
+#define JOB_CONTROL_BASE        0x1000
+
+#define JOB_CONTROL_REG(r)      (JOB_CONTROL_BASE + (r))
+
+#define JOB_IRQ_RAWSTAT         0x000	/* Raw interrupt status register */
+#define JOB_IRQ_CLEAR           0x004	/* Interrupt clear register */
+#define JOB_IRQ_MASK            0x008	/* Interrupt mask register */
+#define JOB_IRQ_STATUS          0x00C	/* Interrupt status register */
+#define JOB_IRQ_JS_STATE        0x010	/* status==active and _next == busy snapshot from last JOB_IRQ_CLEAR */
+#define JOB_IRQ_THROTTLE        0x014	/* cycles to delay delivering an interrupt externally. The JOB_IRQ_STATUS is NOT affected by this, just the delivery of the interrupt.  */
+
+/* JOB IRQ flags */
+#define JOB_IRQ_GLOBAL_IF       (1 << 31)   /* Global interface interrupt received */
+
+#define JOB_SLOT0               0x800	/* Configuration registers for job slot 0 */
+#define JOB_SLOT1               0x880	/* Configuration registers for job slot 1 */
+#define JOB_SLOT2               0x900	/* Configuration registers for job slot 2 */
+#define JOB_SLOT3               0x980	/* Configuration registers for job slot 3 */
+#define JOB_SLOT4               0xA00	/* Configuration registers for job slot 4 */
+#define JOB_SLOT5               0xA80	/* Configuration registers for job slot 5 */
+#define JOB_SLOT6               0xB00	/* Configuration registers for job slot 6 */
+#define JOB_SLOT7               0xB80	/* Configuration registers for job slot 7 */
+#define JOB_SLOT8               0xC00	/* Configuration registers for job slot 8 */
+#define JOB_SLOT9               0xC80	/* Configuration registers for job slot 9 */
+#define JOB_SLOT10              0xD00	/* Configuration registers for job slot 10 */
+#define JOB_SLOT11              0xD80	/* Configuration registers for job slot 11 */
+#define JOB_SLOT12              0xE00	/* Configuration registers for job slot 12 */
+#define JOB_SLOT13              0xE80	/* Configuration registers for job slot 13 */
+#define JOB_SLOT14              0xF00	/* Configuration registers for job slot 14 */
+#define JOB_SLOT15              0xF80	/* Configuration registers for job slot 15 */
+
+#define JOB_SLOT_REG(n, r)      (JOB_CONTROL_REG(JOB_SLOT0 + ((n) << 7)) + (r))
+
+#define JS_HEAD_LO             0x00	/* (RO) Job queue head pointer for job slot n, low word */
+#define JS_HEAD_HI             0x04	/* (RO) Job queue head pointer for job slot n, high word */
+#define JS_TAIL_LO             0x08	/* (RO) Job queue tail pointer for job slot n, low word */
+#define JS_TAIL_HI             0x0C	/* (RO) Job queue tail pointer for job slot n, high word */
+#define JS_AFFINITY_LO         0x10	/* (RO) Core affinity mask for job slot n, low word */
+#define JS_AFFINITY_HI         0x14	/* (RO) Core affinity mask for job slot n, high word */
+#define JS_CONFIG              0x18	/* (RO) Configuration settings for job slot n */
+#define JS_XAFFINITY           0x1C	/* (RO) Extended affinity mask for job
+					   slot n */
+
+#define JS_COMMAND             0x20	/* (WO) Command register for job slot n */
+#define JS_STATUS              0x24	/* (RO) Status register for job slot n */
+
+#define JS_HEAD_NEXT_LO        0x40	/* (RW) Next job queue head pointer for job slot n, low word */
+#define JS_HEAD_NEXT_HI        0x44	/* (RW) Next job queue head pointer for job slot n, high word */
+
+#define JS_AFFINITY_NEXT_LO    0x50	/* (RW) Next core affinity mask for job slot n, low word */
+#define JS_AFFINITY_NEXT_HI    0x54	/* (RW) Next core affinity mask for job slot n, high word */
+#define JS_CONFIG_NEXT         0x58	/* (RW) Next configuration settings for job slot n */
+#define JS_XAFFINITY_NEXT      0x5C	/* (RW) Next extended affinity mask for
+					   job slot n */
+
+#define JS_COMMAND_NEXT        0x60	/* (RW) Next command register for job slot n */
+
+#define JS_FLUSH_ID_NEXT       0x70	/* (RW) Next job slot n cache flush ID */
+
+#define MEMORY_MANAGEMENT_BASE  0x2000
+#define MMU_REG(r)              (MEMORY_MANAGEMENT_BASE + (r))
+
+#define MMU_IRQ_RAWSTAT         0x000	/* (RW) Raw interrupt status register */
+#define MMU_IRQ_CLEAR           0x004	/* (WO) Interrupt clear register */
+#define MMU_IRQ_MASK            0x008	/* (RW) Interrupt mask register */
+#define MMU_IRQ_STATUS          0x00C	/* (RO) Interrupt status register */
+
+#define MMU_AS0                 0x400	/* Configuration registers for address space 0 */
+#define MMU_AS1                 0x440	/* Configuration registers for address space 1 */
+#define MMU_AS2                 0x480	/* Configuration registers for address space 2 */
+#define MMU_AS3                 0x4C0	/* Configuration registers for address space 3 */
+#define MMU_AS4                 0x500	/* Configuration registers for address space 4 */
+#define MMU_AS5                 0x540	/* Configuration registers for address space 5 */
+#define MMU_AS6                 0x580	/* Configuration registers for address space 6 */
+#define MMU_AS7                 0x5C0	/* Configuration registers for address space 7 */
+#define MMU_AS8                 0x600	/* Configuration registers for address space 8 */
+#define MMU_AS9                 0x640	/* Configuration registers for address space 9 */
+#define MMU_AS10                0x680	/* Configuration registers for address space 10 */
+#define MMU_AS11                0x6C0	/* Configuration registers for address space 11 */
+#define MMU_AS12                0x700	/* Configuration registers for address space 12 */
+#define MMU_AS13                0x740	/* Configuration registers for address space 13 */
+#define MMU_AS14                0x780	/* Configuration registers for address space 14 */
+#define MMU_AS15                0x7C0	/* Configuration registers for address space 15 */
+
+#define MMU_AS_REG(n, r)        (MMU_REG(MMU_AS0 + ((n) << 6)) + (r))
+
+#define AS_TRANSTAB_LO         0x00	/* (RW) Translation Table Base Address for address space n, low word */
+#define AS_TRANSTAB_HI         0x04	/* (RW) Translation Table Base Address for address space n, high word */
+#define AS_MEMATTR_LO          0x08	/* (RW) Memory attributes for address space n, low word. */
+#define AS_MEMATTR_HI          0x0C	/* (RW) Memory attributes for address space n, high word. */
+#define AS_LOCKADDR_LO         0x10	/* (RW) Lock region address for address space n, low word */
+#define AS_LOCKADDR_HI         0x14	/* (RW) Lock region address for address space n, high word */
+#define AS_COMMAND             0x18	/* (WO) MMU command register for address space n */
+#define AS_FAULTSTATUS         0x1C	/* (RO) MMU fault status register for address space n */
+#define AS_FAULTADDRESS_LO     0x20	/* (RO) Fault Address for address space n, low word */
+#define AS_FAULTADDRESS_HI     0x24	/* (RO) Fault Address for address space n, high word */
+#define AS_STATUS              0x28	/* (RO) Status flags for address space n */
+
+
+/* (RW) Translation table configuration for address space n, low word */
+#define AS_TRANSCFG_LO         0x30
+/* (RW) Translation table configuration for address space n, high word */
+#define AS_TRANSCFG_HI         0x34
+/* (RO) Secondary fault address for address space n, low word */
+#define AS_FAULTEXTRA_LO       0x38
+/* (RO) Secondary fault address for address space n, high word */
+#define AS_FAULTEXTRA_HI       0x3C
+
+/* End Register Offsets */
+
+/*
+ * MMU_IRQ_RAWSTAT register values. Values are valid also for
+   MMU_IRQ_CLEAR, MMU_IRQ_MASK, MMU_IRQ_STATUS registers.
+ */
+
+#define MMU_PAGE_FAULT_FLAGS   16
+
+/* Macros returning a bitmask to retrieve page fault or bus error flags from
+ * MMU registers */
+#define MMU_PAGE_FAULT(n)      (1UL << (n))
+#define MMU_BUS_ERROR(n)       (1UL << ((n) + MMU_PAGE_FAULT_FLAGS))
+
+/*
+ * Begin LPAE MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_LPAE_ADDR_SPACE_MASK   0xfffff000
+#define AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED  (0u << 0)
+#define AS_TRANSTAB_LPAE_ADRMODE_IDENTITY  (1u << 1)
+#define AS_TRANSTAB_LPAE_ADRMODE_TABLE     (3u << 0)
+#define AS_TRANSTAB_LPAE_READ_INNER        (1u << 2)
+#define AS_TRANSTAB_LPAE_SHARE_OUTER       (1u << 4)
+
+#define AS_TRANSTAB_LPAE_ADRMODE_MASK      0x00000003
+
+/*
+ * Begin AARCH64 MMU TRANSTAB register values
+ */
+#define MMU_HW_OUTA_BITS 40
+#define AS_TRANSTAB_BASE_MASK ((1ULL << MMU_HW_OUTA_BITS) - (1ULL << 4))
+
+/*
+ * Begin MMU STATUS register values
+ */
+#define AS_STATUS_AS_ACTIVE 0x01
+
+#define AS_FAULTSTATUS_EXCEPTION_CODE_MASK                    (0x7<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT       (0x0<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT        (0x1<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT      (0x2<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG             (0x3<<3)
+
+#define AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT      (0x4<<3)
+#define AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT (0x5<<3)
+
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK                  (0x3<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC                (0x0<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX                    (0x1<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ                  (0x2<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE                 (0x3<<8)
+
+/*
+ * Begin MMU TRANSCFG register values
+ */
+
+#define AS_TRANSCFG_ADRMODE_LEGACY      0
+#define AS_TRANSCFG_ADRMODE_UNMAPPED    1
+#define AS_TRANSCFG_ADRMODE_IDENTITY    2
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K  6
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K 8
+
+#define AS_TRANSCFG_ADRMODE_MASK        0xF
+
+
+/*
+ * Begin TRANSCFG register values
+ */
+#define AS_TRANSCFG_PTW_MEMATTR_MASK (3ull << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_NON_CACHEABLE (1ull << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK (2ull << 24)
+
+#define AS_TRANSCFG_PTW_SH_MASK ((3ull << 28))
+#define AS_TRANSCFG_PTW_SH_OS (2ull << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3ull << 28)
+#define AS_TRANSCFG_R_ALLOCATE (1ull << 30)
+/*
+ * Begin Command Values
+ */
+
+/* JS_COMMAND register commands */
+#define JS_COMMAND_NOP         0x00	/* NOP Operation. Writing this value is ignored */
+#define JS_COMMAND_START       0x01	/* Start processing a job chain. Writing this value is ignored */
+#define JS_COMMAND_SOFT_STOP   0x02	/* Gently stop processing a job chain */
+#define JS_COMMAND_HARD_STOP   0x03	/* Rudely stop processing a job chain */
+#define JS_COMMAND_SOFT_STOP_0 0x04	/* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_HARD_STOP_0 0x05	/* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_SOFT_STOP_1 0x06	/* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */
+#define JS_COMMAND_HARD_STOP_1 0x07	/* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */
+
+#define JS_COMMAND_MASK        0x07    /* Mask of bits currently in use by the HW */
+
+/* AS_COMMAND register commands */
+#define AS_COMMAND_NOP         0x00	/* NOP Operation */
+#define AS_COMMAND_UPDATE      0x01	/* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */
+#define AS_COMMAND_LOCK        0x02	/* Issue a lock region command to all MMUs */
+#define AS_COMMAND_UNLOCK      0x03	/* Issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH       0x04	/* Flush all L2 caches then issue a flush region command to all MMUs
+					   (deprecated - only for use with T60x) */
+#define AS_COMMAND_FLUSH_PT    0x04	/* Flush all L2 caches then issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH_MEM   0x05	/* Wait for memory accesses to complete, flush all the L1s cache then
+					   flush all L2 caches then issue a flush region command to all MMUs */
+
+/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
+#define JS_CONFIG_START_FLUSH_NO_ACTION        (0u << 0)
+#define JS_CONFIG_START_FLUSH_CLEAN            (1u << 8)
+#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8)
+#define JS_CONFIG_START_MMU                    (1u << 10)
+#define JS_CONFIG_JOB_CHAIN_FLAG               (1u << 11)
+#define JS_CONFIG_END_FLUSH_NO_ACTION          JS_CONFIG_START_FLUSH_NO_ACTION
+#define JS_CONFIG_END_FLUSH_CLEAN              (1u << 12)
+#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE   (3u << 12)
+#define JS_CONFIG_ENABLE_FLUSH_REDUCTION       (1u << 14)
+#define JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK     (1u << 15)
+#define JS_CONFIG_THREAD_PRI(n)                ((n) << 16)
+
+/* JS_XAFFINITY register values */
+#define JS_XAFFINITY_XAFFINITY_ENABLE (1u << 0)
+#define JS_XAFFINITY_TILER_ENABLE     (1u << 8)
+#define JS_XAFFINITY_CACHE_ENABLE     (1u << 16)
+
+/* JS_STATUS register values */
+
+/* NOTE: Please keep this values in sync with enum base_jd_event_code in mali_base_kernel.h.
+ * The values are separated to avoid dependency of userspace and kernel code.
+ */
+
+/* Group of values representing the job status insead a particular fault */
+#define JS_STATUS_NO_EXCEPTION_BASE   0x00
+#define JS_STATUS_INTERRUPTED         (JS_STATUS_NO_EXCEPTION_BASE + 0x02)	/* 0x02 means INTERRUPTED */
+#define JS_STATUS_STOPPED             (JS_STATUS_NO_EXCEPTION_BASE + 0x03)	/* 0x03 means STOPPED */
+#define JS_STATUS_TERMINATED          (JS_STATUS_NO_EXCEPTION_BASE + 0x04)	/* 0x04 means TERMINATED */
+
+/* General fault values */
+#define JS_STATUS_FAULT_BASE          0x40
+#define JS_STATUS_CONFIG_FAULT        (JS_STATUS_FAULT_BASE)	/* 0x40 means CONFIG FAULT */
+#define JS_STATUS_POWER_FAULT         (JS_STATUS_FAULT_BASE + 0x01)	/* 0x41 means POWER FAULT */
+#define JS_STATUS_READ_FAULT          (JS_STATUS_FAULT_BASE + 0x02)	/* 0x42 means READ FAULT */
+#define JS_STATUS_WRITE_FAULT         (JS_STATUS_FAULT_BASE + 0x03)	/* 0x43 means WRITE FAULT */
+#define JS_STATUS_AFFINITY_FAULT      (JS_STATUS_FAULT_BASE + 0x04)	/* 0x44 means AFFINITY FAULT */
+#define JS_STATUS_BUS_FAULT           (JS_STATUS_FAULT_BASE + 0x08)	/* 0x48 means BUS FAULT */
+
+/* Instruction or data faults */
+#define JS_STATUS_INSTRUCTION_FAULT_BASE  0x50
+#define JS_STATUS_INSTR_INVALID_PC        (JS_STATUS_INSTRUCTION_FAULT_BASE)	/* 0x50 means INSTR INVALID PC */
+#define JS_STATUS_INSTR_INVALID_ENC       (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x01)	/* 0x51 means INSTR INVALID ENC */
+#define JS_STATUS_INSTR_TYPE_MISMATCH     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x02)	/* 0x52 means INSTR TYPE MISMATCH */
+#define JS_STATUS_INSTR_OPERAND_FAULT     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x03)	/* 0x53 means INSTR OPERAND FAULT */
+#define JS_STATUS_INSTR_TLS_FAULT         (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x04)	/* 0x54 means INSTR TLS FAULT */
+#define JS_STATUS_INSTR_BARRIER_FAULT     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x05)	/* 0x55 means INSTR BARRIER FAULT */
+#define JS_STATUS_INSTR_ALIGN_FAULT       (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x06)	/* 0x56 means INSTR ALIGN FAULT */
+/* NOTE: No fault with 0x57 code defined in spec. */
+#define JS_STATUS_DATA_INVALID_FAULT      (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x08)	/* 0x58 means DATA INVALID FAULT */
+#define JS_STATUS_TILE_RANGE_FAULT        (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x09)	/* 0x59 means TILE RANGE FAULT */
+#define JS_STATUS_ADDRESS_RANGE_FAULT     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x0A)	/* 0x5A means ADDRESS RANGE FAULT */
+
+/* Other faults */
+#define JS_STATUS_MEMORY_FAULT_BASE   0x60
+#define JS_STATUS_OUT_OF_MEMORY       (JS_STATUS_MEMORY_FAULT_BASE)	/* 0x60 means OUT OF MEMORY */
+#define JS_STATUS_UNKNOWN             0x7F	/* 0x7F means UNKNOWN */
+
+/* GPU_COMMAND values */
+#define GPU_COMMAND_NOP                0x00	/* No operation, nothing happens */
+#define GPU_COMMAND_SOFT_RESET         0x01	/* Stop all external bus interfaces, and then reset the entire GPU. */
+#define GPU_COMMAND_HARD_RESET         0x02	/* Immediately reset the entire GPU. */
+#define GPU_COMMAND_PRFCNT_CLEAR       0x03	/* Clear all performance counters, setting them all to zero. */
+#define GPU_COMMAND_PRFCNT_SAMPLE      0x04	/* Sample all performance counters, writing them out to memory */
+#define GPU_COMMAND_CYCLE_COUNT_START  0x05	/* Starts the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CYCLE_COUNT_STOP   0x06	/* Stops the cycle counter, and system timestamp propagation */
+#define GPU_COMMAND_CLEAN_CACHES       0x07	/* Clean all caches */
+#define GPU_COMMAND_CLEAN_INV_CACHES   0x08	/* Clean and invalidate all caches */
+#define GPU_COMMAND_SET_PROTECTED_MODE 0x09	/* Places the GPU in protected mode */
+
+/* End Command Values */
+
+/* GPU_STATUS values */
+#define GPU_STATUS_PRFCNT_ACTIVE           (1 << 2)	/* Set if the performance counters are active. */
+#define GPU_STATUS_PROTECTED_MODE_ACTIVE   (1 << 7)	/* Set if protected mode is active */
+
+/* PRFCNT_CONFIG register values */
+#define PRFCNT_CONFIG_MODE_SHIFT      0 /* Counter mode position. */
+#define PRFCNT_CONFIG_AS_SHIFT        4 /* Address space bitmap position. */
+#define PRFCNT_CONFIG_SETSELECT_SHIFT 8 /* Set select position. */
+
+#define PRFCNT_CONFIG_MODE_OFF    0	/* The performance counters are disabled. */
+#define PRFCNT_CONFIG_MODE_MANUAL 1	/* The performance counters are enabled, but are only written out when a PRFCNT_SAMPLE command is issued using the GPU_COMMAND register. */
+#define PRFCNT_CONFIG_MODE_TILE   2	/* The performance counters are enabled, and are written out each time a tile finishes rendering. */
+
+/* AS<n>_MEMATTR values from MMU_MEMATTR_STAGE1: */
+/* Use GPU implementation-defined caching policy. */
+#define AS_MEMATTR_IMPL_DEF_CACHE_POLICY 0x88ull
+/* The attribute set to force all resources to be cached. */
+#define AS_MEMATTR_FORCE_TO_CACHE_ALL    0x8Full
+/* Inner write-alloc cache setup, no outer caching */
+#define AS_MEMATTR_WRITE_ALLOC           0x8Dull
+
+/* Set to implementation defined, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_IMPL_DEF 0x88ull
+/* Set to write back memory, outer caching */
+#define AS_MEMATTR_AARCH64_OUTER_WA       0x8Dull
+/* Set to inner non-cacheable, outer-non-cacheable
+ * Setting defined by the alloc bits is ignored, but set to a valid encoding:
+ * - no-alloc on read
+ * - no alloc on write
+ */
+#define AS_MEMATTR_AARCH64_NON_CACHEABLE  0x4Cull
+
+/* Use GPU implementation-defined  caching policy. */
+#define AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY 0x48ull
+/* The attribute set to force all resources to be cached. */
+#define AS_MEMATTR_LPAE_FORCE_TO_CACHE_ALL    0x4Full
+/* Inner write-alloc cache setup, no outer caching */
+#define AS_MEMATTR_LPAE_WRITE_ALLOC           0x4Dull
+/* Set to implementation defined, outer caching */
+#define AS_MEMATTR_LPAE_OUTER_IMPL_DEF        0x88ull
+/* Set to write back memory, outer caching */
+#define AS_MEMATTR_LPAE_OUTER_WA              0x8Dull
+/* There is no LPAE support for non-cacheable, since the memory type is always
+ * write-back.
+ * Marking this setting as reserved for LPAE
+ */
+#define AS_MEMATTR_LPAE_NON_CACHEABLE_RESERVED
+
+/* Symbols for default MEMATTR to use
+ * Default is - HW implementation defined caching */
+#define AS_MEMATTR_INDEX_DEFAULT               0
+#define AS_MEMATTR_INDEX_DEFAULT_ACE           3
+
+/* HW implementation defined caching */
+#define AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0
+/* Force cache on */
+#define AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL    1
+/* Write-alloc */
+#define AS_MEMATTR_INDEX_WRITE_ALLOC           2
+/* Outer coherent, inner implementation defined policy */
+#define AS_MEMATTR_INDEX_OUTER_IMPL_DEF        3
+/* Outer coherent, write alloc inner */
+#define AS_MEMATTR_INDEX_OUTER_WA              4
+/* Normal memory, inner non-cacheable, outer non-cacheable (ARMv8 mode only) */
+#define AS_MEMATTR_INDEX_NON_CACHEABLE         5
+
+/* JS<n>_FEATURES register */
+
+#define JS_FEATURE_NULL_JOB              (1u << 1)
+#define JS_FEATURE_SET_VALUE_JOB         (1u << 2)
+#define JS_FEATURE_CACHE_FLUSH_JOB       (1u << 3)
+#define JS_FEATURE_COMPUTE_JOB           (1u << 4)
+#define JS_FEATURE_VERTEX_JOB            (1u << 5)
+#define JS_FEATURE_GEOMETRY_JOB          (1u << 6)
+#define JS_FEATURE_TILER_JOB             (1u << 7)
+#define JS_FEATURE_FUSED_JOB             (1u << 8)
+#define JS_FEATURE_FRAGMENT_JOB          (1u << 9)
+
+/* End JS<n>_FEATURES register */
+
+/* L2_MMU_CONFIG register */
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT       (23)
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY             (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT        (24)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS              (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_OCTANT       (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_QUARTER      (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_HALF         (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT       (26)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES             (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT      (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER     (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF        (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT      (12)
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS            (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT     (15)
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES           (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
+/* End L2_MMU_CONFIG register */
+
+/* THREAD_* registers */
+
+/* THREAD_FEATURES IMPLEMENTATION_TECHNOLOGY values */
+#define IMPLEMENTATION_UNSPECIFIED  0
+#define IMPLEMENTATION_SILICON      1
+#define IMPLEMENTATION_FPGA         2
+#define IMPLEMENTATION_MODEL        3
+
+/* Default values when registers are not supported by the implemented hardware */
+#define THREAD_MT_DEFAULT     256
+#define THREAD_MWS_DEFAULT    256
+#define THREAD_MBS_DEFAULT    256
+#define THREAD_MR_DEFAULT     1024
+#define THREAD_MTQ_DEFAULT    4
+#define THREAD_MTGS_DEFAULT   10
+
+/* End THREAD_* registers */
+
+/* SHADER_CONFIG register */
+
+#define SC_ALT_COUNTERS             (1ul << 3)
+#define SC_OVERRIDE_FWD_PIXEL_KILL  (1ul << 4)
+#define SC_SDC_DISABLE_OQ_DISCARD   (1ul << 6)
+#define SC_LS_ALLOW_ATTR_TYPES      (1ul << 16)
+#define SC_LS_PAUSEBUFFER_DISABLE   (1ul << 16)
+#define SC_TLS_HASH_ENABLE          (1ul << 17)
+#define SC_LS_ATTR_CHECK_DISABLE    (1ul << 18)
+#define SC_ENABLE_TEXGRD_FLAGS      (1ul << 25)
+/* End SHADER_CONFIG register */
+
+/* TILER_CONFIG register */
+
+#define TC_CLOCK_GATE_OVERRIDE      (1ul << 0)
+
+/* End TILER_CONFIG register */
+
+/* JM_CONFIG register */
+
+#define JM_TIMESTAMP_OVERRIDE  (1ul << 0)
+#define JM_CLOCK_GATE_OVERRIDE (1ul << 1)
+#define JM_JOB_THROTTLE_ENABLE (1ul << 2)
+#define JM_JOB_THROTTLE_LIMIT_SHIFT (3)
+#define JM_MAX_JOB_THROTTLE_LIMIT (0x3F)
+#define JM_FORCE_COHERENCY_FEATURES_SHIFT (2)
+#define JM_IDVS_GROUP_SIZE_SHIFT (16)
+#define JM_MAX_IDVS_GROUP_SIZE (0x3F)
+/* End JM_CONFIG register */
+
+
+#endif /* _MIDGARD_REGMAP_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_uk.h b/drivers/gpu/arm/midgard/mali_uk.h
new file mode 100644
index 0000000..701f390
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_uk.h
@@ -0,0 +1,84 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2015, 2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_uk.h
+ * Types and definitions that are common across OSs for both the user
+ * and kernel side of the User-Kernel interface.
+ */
+
+#ifndef _UK_H_
+#define _UK_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif				/* __cplusplus */
+
+/**
+ * @addtogroup base_api
+ * @{
+ */
+
+/**
+ * @defgroup uk_api User-Kernel Interface API
+ *
+ * The User-Kernel Interface abstracts the communication mechanism between the user and kernel-side code of device
+ * drivers developed as part of the Midgard DDK. Currently that includes the Base driver.
+ *
+ * It exposes an OS independent API to user-side code (UKU) which routes functions calls to an OS-independent
+ * kernel-side API (UKK) via an OS-specific communication mechanism.
+ *
+ * This API is internal to the Midgard DDK and is not exposed to any applications.
+ *
+ * @{
+ */
+
+/**
+ * These are identifiers for kernel-side drivers implementing a UK interface, aka UKK clients. The
+ * UK module maps this to an OS specific device name, e.g. "gpu_base" -> "GPU0:". Specify this
+ * identifier to select a UKK client to the uku_open() function.
+ *
+ * When a new UKK client driver is created a new identifier needs to be added to the uk_client_id
+ * enumeration and the uku_open() implemenation for the various OS ports need to be updated to
+ * provide a mapping of the identifier to the OS specific device name.
+ *
+ */
+enum uk_client_id {
+	/**
+	 * Value used to identify the Base driver UK client.
+	 */
+	UK_CLIENT_MALI_T600_BASE,
+
+	/** The number of uk clients supported. This must be the last member of the enum */
+	UK_CLIENT_COUNT
+};
+
+/** @} end group uk_api */
+
+/** @} *//* end group base_api */
+
+#ifdef __cplusplus
+}
+#endif				/* __cplusplus */
+#endif				/* _UK_H_ */
diff --git a/drivers/gpu/arm/midgard/platform/Kconfig b/drivers/gpu/arm/midgard/platform/Kconfig
new file mode 100644
index 0000000..ef9fb96
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/Kconfig
@@ -0,0 +1,30 @@
+#
+# (C) COPYRIGHT 2012-2013, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+
+# Add your platform specific Kconfig file here
+#
+# "drivers/gpu/arm/midgard/platform/xxx/Kconfig"
+#
+# Where xxx is the platform name is the name set in MALI_PLATFORM_NAME
+#
+
diff --git a/drivers/gpu/arm/midgard/platform/devicetree/Kbuild b/drivers/gpu/arm/midgard/platform/devicetree/Kbuild
new file mode 100644
index 0000000..ce637fb
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/devicetree/Kbuild
@@ -0,0 +1,24 @@
+#
+# (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_devicetree.o \
+	$(MALI_PLATFORM_DIR)/mali_kbase_runtime_pm.o
diff --git a/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c
new file mode 100644
index 0000000..ccefddf
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c
@@ -0,0 +1,41 @@
+/*
+ *
+ * (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase_config.h>
+
+static struct kbase_platform_config dummy_platform_config;
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &dummy_platform_config;
+}
+
+#ifndef CONFIG_OF
+int kbase_platform_register(void)
+{
+	return 0;
+}
+
+void kbase_platform_unregister(void)
+{
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h
new file mode 100644
index 0000000..5990313
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
+
+/**
+ * Autosuspend delay
+ *
+ * The delay time (in milliseconds) to be used for autosuspend
+ */
+#define AUTO_SUSPEND_DELAY (100)
diff --git a/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c
new file mode 100644
index 0000000..c5f3ad7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c
@@ -0,0 +1,128 @@
+/*
+ *
+ * (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <linux/pm_runtime.h>
+#include "mali_kbase_config_platform.h"
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	int ret = 1; /* Assume GPU has been powered off */
+	int error;
+
+	dev_dbg(kbdev->dev, "pm_callback_power_on %p\n",
+			(void *)kbdev->dev->pm_domain);
+
+	error = pm_runtime_get_sync(kbdev->dev);
+	if (error == 1) {
+		/*
+		 * Let core know that the chip has not been
+		 * powered off, so we can save on re-initialization.
+		 */
+		ret = 0;
+	}
+
+	dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d\n", error);
+
+	return ret;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "pm_callback_power_off\n");
+
+	pm_runtime_mark_last_busy(kbdev->dev);
+	pm_runtime_put_autosuspend(kbdev->dev);
+}
+
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_init(struct kbase_device *kbdev)
+{
+	int ret = 0;
+
+	dev_dbg(kbdev->dev, "kbase_device_runtime_init\n");
+
+	pm_runtime_set_autosuspend_delay(kbdev->dev, AUTO_SUSPEND_DELAY);
+	pm_runtime_use_autosuspend(kbdev->dev);
+
+	pm_runtime_set_active(kbdev->dev);
+	pm_runtime_enable(kbdev->dev);
+
+	if (!pm_runtime_enabled(kbdev->dev)) {
+		dev_warn(kbdev->dev, "pm_runtime not enabled");
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static void kbase_device_runtime_disable(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "kbase_device_runtime_disable\n");
+	pm_runtime_disable(kbdev->dev);
+}
+#endif
+
+static int pm_callback_runtime_on(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "pm_callback_runtime_on\n");
+
+	return 0;
+}
+
+static void pm_callback_runtime_off(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "pm_callback_runtime_off\n");
+}
+
+static void pm_callback_resume(struct kbase_device *kbdev)
+{
+	int ret = pm_callback_runtime_on(kbdev);
+
+	WARN_ON(ret);
+}
+
+static void pm_callback_suspend(struct kbase_device *kbdev)
+{
+	pm_callback_runtime_off(kbdev);
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback = pm_callback_suspend,
+	.power_resume_callback = pm_callback_resume,
+#ifdef KBASE_PM_RUNTIME
+	.power_runtime_init_callback = kbase_device_runtime_init,
+	.power_runtime_term_callback = kbase_device_runtime_disable,
+	.power_runtime_on_callback = pm_callback_runtime_on,
+	.power_runtime_off_callback = pm_callback_runtime_off,
+#else				/* KBASE_PM_RUNTIME */
+	.power_runtime_init_callback = NULL,
+	.power_runtime_term_callback = NULL,
+	.power_runtime_on_callback = NULL,
+	.power_runtime_off_callback = NULL,
+#endif				/* KBASE_PM_RUNTIME */
+};
+
+
diff --git a/drivers/gpu/arm/midgard/platform/meson/Kbuild b/drivers/gpu/arm/midgard/platform/meson/Kbuild
new file mode 100644
index 0000000..645f8e0
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/meson/Kbuild
@@ -0,0 +1,24 @@
+#
+# (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_meson.o \
+	$(MALI_PLATFORM_DIR)/mali_kbase_runtime_pm.o
diff --git a/drivers/gpu/arm/midgard/platform/meson/mali_kbase_config_meson.c b/drivers/gpu/arm/midgard/platform/meson/mali_kbase_config_meson.c
new file mode 100644
index 0000000..ccefddf
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/meson/mali_kbase_config_meson.c
@@ -0,0 +1,41 @@
+/*
+ *
+ * (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase_config.h>
+
+static struct kbase_platform_config dummy_platform_config;
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &dummy_platform_config;
+}
+
+#ifndef CONFIG_OF
+int kbase_platform_register(void)
+{
+	return 0;
+}
+
+void kbase_platform_unregister(void)
+{
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/platform/meson/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/meson/mali_kbase_config_platform.h
new file mode 100644
index 0000000..5990313
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/meson/mali_kbase_config_platform.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
+
+/**
+ * Autosuspend delay
+ *
+ * The delay time (in milliseconds) to be used for autosuspend
+ */
+#define AUTO_SUSPEND_DELAY (100)
diff --git a/drivers/gpu/arm/midgard/platform/meson/mali_kbase_runtime_pm.c b/drivers/gpu/arm/midgard/platform/meson/mali_kbase_runtime_pm.c
new file mode 100644
index 0000000..ef48144
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/meson/mali_kbase_runtime_pm.c
@@ -0,0 +1,195 @@
+/*
+ *
+ * (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include "mali_kbase_config_platform.h"
+#include <backend/gpu/mali_kbase_device_internal.h>
+#include <asm/delay.h>
+
+static int first = 1;
+
+/* Manually Power On Cores and L2 Cache */
+static int gpu_power_on(struct kbase_device *kbdev, uint32_t  mask)
+{
+	/* Clear all interrupts */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
+
+	/* Power On all Tiler Cores */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_PWRON_LO), 0xffffffff);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_PWRON_HI), 0xffffffff);
+
+	/* Power On all L2 cache */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_PWRON_LO), 0xffffffff);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_PWRON_HI), 0xffffffff);
+
+	/* Power On all Shader Cores */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_PWRON_LO), 0x3);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_PWRON_HI), 0x0);
+
+	/* Wait for Power On to complete */
+	udelay(10);
+	while (!kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)))
+		udelay(10);
+
+	/* Clear all interrupts */
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
+
+	return 0;
+}
+
+static int pm_soft_reset(struct kbase_device *kbdev)
+{
+	struct reset_control *rstc;
+
+	rstc = of_reset_control_array_get(kbdev->dev->of_node, false, false, true);
+	if (!IS_ERR(rstc)) {
+		reset_control_assert(rstc);
+		udelay(10);
+		reset_control_deassert(rstc);
+		udelay(10);
+
+		reset_control_put(rstc);
+
+		/* Override Power Management Settings */
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(PWR_KEY),
+				0x2968A819);
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1),
+				0xfff | (0x20 << 16));
+	} else
+		return PTR_ERR(rstc);
+
+	return 0;
+}
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	int ret = 1; /* Assume GPU has been powered off */
+	int error;
+
+	dev_dbg(kbdev->dev, "pm_callback_power_on %p\n",
+			(void *)kbdev->dev->pm_domain);
+
+	if (first) {
+		pm_soft_reset(kbdev);
+
+		gpu_power_on(kbdev, 7);
+
+		first = 0;
+	}
+
+	error = pm_runtime_get_sync(kbdev->dev);
+	if (error == 1) {
+		/*
+		 * Let core know that the chip has not been
+		 * powered off, so we can save on re-initialization.
+		 */
+		ret = 0;
+	}
+
+	dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d\n", error);
+
+	return ret;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "pm_callback_power_off\n");
+
+	pm_runtime_mark_last_busy(kbdev->dev);
+	pm_runtime_put_autosuspend(kbdev->dev);
+}
+
+#ifdef KBASE_PM_RUNTIME
+static int kbase_device_runtime_init(struct kbase_device *kbdev)
+{
+	int ret = 0;
+
+	dev_dbg(kbdev->dev, "kbase_device_runtime_init\n");
+
+	pm_runtime_set_autosuspend_delay(kbdev->dev, AUTO_SUSPEND_DELAY);
+	pm_runtime_use_autosuspend(kbdev->dev);
+
+	pm_runtime_set_active(kbdev->dev);
+	pm_runtime_enable(kbdev->dev);
+
+	if (!pm_runtime_enabled(kbdev->dev)) {
+		dev_warn(kbdev->dev, "pm_runtime not enabled");
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static void kbase_device_runtime_disable(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "kbase_device_runtime_disable\n");
+	pm_runtime_disable(kbdev->dev);
+}
+#endif
+
+static int pm_callback_runtime_on(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "pm_callback_runtime_on\n");
+
+	return 0;
+}
+
+static void pm_callback_runtime_off(struct kbase_device *kbdev)
+{
+	dev_dbg(kbdev->dev, "pm_callback_runtime_off\n");
+}
+
+static void pm_callback_resume(struct kbase_device *kbdev)
+{
+	int ret = pm_callback_runtime_on(kbdev);
+
+	WARN_ON(ret);
+}
+
+static void pm_callback_suspend(struct kbase_device *kbdev)
+{
+	pm_callback_runtime_off(kbdev);
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback = pm_callback_suspend,
+	.power_resume_callback = pm_callback_resume,
+	.soft_reset_callback = pm_soft_reset,
+#ifdef KBASE_PM_RUNTIME
+	.power_runtime_init_callback = kbase_device_runtime_init,
+	.power_runtime_term_callback = kbase_device_runtime_disable,
+	.power_runtime_on_callback = pm_callback_runtime_on,
+	.power_runtime_off_callback = pm_callback_runtime_off,
+#else				/* KBASE_PM_RUNTIME */
+	.power_runtime_init_callback = NULL,
+	.power_runtime_term_callback = NULL,
+	.power_runtime_on_callback = NULL,
+	.power_runtime_off_callback = NULL,
+#endif				/* KBASE_PM_RUNTIME */
+};
+
+
diff --git a/drivers/gpu/arm/midgard/platform/vexpress/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress/Kbuild
new file mode 100644
index 0000000..6780e4c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress/Kbuild
@@ -0,0 +1,24 @@
+#
+# (C) COPYRIGHT 2012-2013, 2016-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_vexpress.o \
+	mali_kbase_platform_fake.o
diff --git a/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h
new file mode 100644
index 0000000..fac3cd5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c
new file mode 100644
index 0000000..d165ce2
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c
@@ -0,0 +1,69 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+#include "mali_kbase_config_platform.h"
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+	.job_irq_number = 68,
+	.mmu_irq_number = 69,
+	.gpu_irq_number = 70,
+	.io_memory_region = {
+	.start = 0xFC010000,
+	.end = 0xFC010000 + (4096 * 4) - 1
+	}
+};
+#endif /* CONFIG_OF */
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	/* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+	return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback  = NULL,
+	.power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+	.io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &versatile_platform_config;
+}
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild
new file mode 100644
index 0000000..51b408e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild
@@ -0,0 +1,24 @@
+#
+# (C) COPYRIGHT 2013-2014, 2016-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_vexpress.o \
+	mali_kbase_platform_fake.o
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h
new file mode 100644
index 0000000..fac3cd5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
new file mode 100644
index 0000000..efca0a5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
@@ -0,0 +1,65 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+	.job_irq_number = 68,
+	.mmu_irq_number = 69,
+	.gpu_irq_number = 70,
+	.io_memory_region = {
+			     .start = 0x2f010000,
+			     .end = 0x2f010000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	/* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+	return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback  = NULL,
+	.power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+	.io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &versatile_platform_config;
+}
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild
new file mode 100644
index 0000000..e07709c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/Kbuild
@@ -0,0 +1,25 @@
+#
+# (C) COPYRIGHT 2012-2013, 2016-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+mali_kbase-y += \
+	$(MALI_PLATFORM_DIR)/mali_kbase_config_vexpress.o \
+	$(MALI_PLATFORM_DIR)/mali_kbase_cpu_vexpress.o \
+	mali_kbase_platform_fake.o
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h
new file mode 100644
index 0000000..fac3cd5
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/**
+ * Power management configuration
+ *
+ * Attached value: pointer to @ref kbase_pm_callback_conf
+ * Default value: See @ref kbase_pm_callback_conf
+ */
+#define POWER_MANAGEMENT_CALLBACKS (&pm_callbacks)
+
+/**
+ * Platform specific configuration functions
+ *
+ * Attached value: pointer to @ref kbase_platform_funcs_conf
+ * Default value: See @ref kbase_platform_funcs_conf
+ */
+#define PLATFORM_FUNCS (NULL)
+
+extern struct kbase_pm_callback_conf pm_callbacks;
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
new file mode 100644
index 0000000..b6714b9
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
@@ -0,0 +1,67 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+	.job_irq_number = 75,
+	.mmu_irq_number = 76,
+	.gpu_irq_number = 77,
+	.io_memory_region = {
+			     .start = 0x2F000000,
+			     .end = 0x2F000000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+	/* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+	return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+}
+
+struct kbase_pm_callback_conf pm_callbacks = {
+	.power_on_callback = pm_callback_power_on,
+	.power_off_callback = pm_callback_power_off,
+	.power_suspend_callback  = NULL,
+	.power_resume_callback = NULL
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+#ifndef CONFIG_OF
+	.io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+	return &versatile_platform_config;
+}
diff --git a/drivers/gpu/arm/midgard/platform_dummy/mali_ukk_os.h b/drivers/gpu/arm/midgard/platform_dummy/mali_ukk_os.h
new file mode 100644
index 0000000..ef1ec70
--- /dev/null
+++ b/drivers/gpu/arm/midgard/platform_dummy/mali_ukk_os.h
@@ -0,0 +1,58 @@
+/*
+ *
+ * (C) COPYRIGHT 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+
+
+/**
+ * @file mali_ukk_os.h
+ * Types and definitions that are common for Linux OSs for the kernel side of the
+ * User-Kernel interface.
+ */
+
+#ifndef _UKK_OS_H_ /* Linux version */
+#define _UKK_OS_H_
+
+#include <linux/fs.h>
+
+/**
+ * @addtogroup uk_api User-Kernel Interface API
+ * @{
+ */
+
+/**
+ * @addtogroup uk_api_kernel UKK (Kernel side)
+ * @{
+ */
+
+/**
+ * Internal OS specific data structure associated with each UKK session. Part
+ * of a ukk_session object.
+ */
+typedef struct ukkp_session {
+	int dummy;     /**< No internal OS specific data at this time */
+} ukkp_session;
+
+/** @} end group uk_api_kernel */
+
+/** @} end group uk_api */
+
+#endif /* _UKK_OS_H__ */
diff --git a/drivers/gpu/arm/midgard/protected_mode_switcher.h b/drivers/gpu/arm/midgard/protected_mode_switcher.h
new file mode 100644
index 0000000..8778d81
--- /dev/null
+++ b/drivers/gpu/arm/midgard/protected_mode_switcher.h
@@ -0,0 +1,69 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _PROTECTED_MODE_SWITCH_H_
+#define _PROTECTED_MODE_SWITCH_H_
+
+struct protected_mode_device;
+
+/**
+ * struct protected_mode_ops - Callbacks for protected mode switch operations
+ *
+ * @protected_mode_enable:  Callback to enable protected mode for device
+ * @protected_mode_disable: Callback to disable protected mode for device
+ */
+struct protected_mode_ops {
+	/**
+	 * protected_mode_enable() - Enable protected mode on device
+	 * @dev:	The struct device
+	 *
+	 * Return: 0 on success, non-zero on error
+	 */
+	int (*protected_mode_enable)(
+			struct protected_mode_device *protected_dev);
+
+	/**
+	 * protected_mode_disable() - Disable protected mode on device, and
+	 *                            reset device
+	 * @dev:	The struct device
+	 *
+	 * Return: 0 on success, non-zero on error
+	 */
+	int (*protected_mode_disable)(
+			struct protected_mode_device *protected_dev);
+};
+
+/**
+ * struct protected_mode_device - Device structure for protected mode devices
+ *
+ * @ops  - Callbacks associated with this device
+ * @data - Pointer to device private data
+ *
+ * This structure should be registered with the platform device using
+ * platform_set_drvdata().
+ */
+struct protected_mode_device {
+	struct protected_mode_ops ops;
+	void *data;
+};
+
+#endif /* _PROTECTED_MODE_SWITCH_H_ */
diff --git a/drivers/gpu/arm/midgard/sconscript b/drivers/gpu/arm/midgard/sconscript
new file mode 100644
index 0000000..f9d9c1b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/sconscript
@@ -0,0 +1,66 @@
+#
+# (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+import sys
+Import('env')
+
+SConscript( 'tests/sconscript' )
+
+mock_test = 0
+
+# Source files required for kbase.
+kbase_src = [
+	Glob('*.c'),
+	Glob('backend/*/*.c'),
+	Glob('internal/*/*.c'),
+	Glob('ipa/*.c'),
+	Glob('platform/%s/*.c' % env['platform_config']),
+	Glob('thirdparty/*.c'),
+]
+
+if env['platform_config']=='juno_soc':
+	kbase_src += [Glob('platform/devicetree/*.c')]
+else:
+	kbase_src += [Glob('platform/%s/*.c' % env['platform_config'])]
+
+if Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock') and env['unit'] == '1':
+	kbase_src += [Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock/*.c')]
+	mock_test = 1
+
+make_args = env.kernel_get_config_defines(ret_list = True) + [
+	'PLATFORM=%s' % env['platform'],
+	'MALI_KERNEL_TEST_API=%s' % env['debug'],
+	'MALI_UNIT_TEST=%s' % env['unit'],
+	'MALI_RELEASE_NAME=%s' % env['mali_release_name'],
+	'MALI_CUSTOMER_RELEASE=%s' % env['release'],
+	'MALI_USE_CSF=%s' % env['csf'],
+	'MALI_COVERAGE=%s' % env['coverage'],
+]
+
+kbase = env.BuildKernelModule('$STATIC_LIB_PATH/mali_kbase.ko', kbase_src,
+                              make_args = make_args)
+
+if 'smc_protected_mode_switcher' in env:
+	env.Depends('$STATIC_LIB_PATH/mali_kbase.ko', '$STATIC_LIB_PATH/smc_protected_mode_switcher.ko')
+
+env.KernelObjTarget('kbase', kbase)
+
+env.AppendUnique(BASE=['cutils_linked_list'])
diff --git a/drivers/gpu/arm/midgard/tests/Kbuild b/drivers/gpu/arm/midgard/tests/Kbuild
new file mode 100644
index 0000000..df16a77
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/Kbuild
@@ -0,0 +1,23 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+obj-$(CONFIG_MALI_KUTF) += kutf/
+obj-$(CONFIG_MALI_IRQ_LATENCY) += mali_kutf_irq_test/
diff --git a/drivers/gpu/arm/midgard/tests/Kconfig b/drivers/gpu/arm/midgard/tests/Kconfig
new file mode 100644
index 0000000..fa91aea
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/Kconfig
@@ -0,0 +1,23 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+source "drivers/gpu/arm/midgard/tests/kutf/Kconfig"
+source "drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig"
diff --git a/drivers/gpu/arm/midgard/tests/Mconfig b/drivers/gpu/arm/midgard/tests/Mconfig
new file mode 100644
index 0000000..af4e383
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/Mconfig
@@ -0,0 +1,32 @@
+#
+# (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA  02110-1301, USA.
+#
+
+config UNIT_TEST_KERNEL_MODULES
+	bool
+	default y if UNIT_TEST_CODE && BUILD_KERNEL_MODULES
+	default n
+
+config BUILD_IPA_TESTS
+	bool
+	default y if UNIT_TEST_KERNEL_MODULES && MALI_DEVFREQ
+	default n
+
+config BUILD_IPA_UNIT_TESTS
+	bool
+	default y if NO_MALI && BUILD_IPA_TESTS
+	default n
+
+config BUILD_CSF_TESTS
+	bool
+	default y if UNIT_TEST_KERNEL_MODULES && GPU_HAS_CSF
+	default n
diff --git a/drivers/gpu/arm/midgard/tests/build.bp b/drivers/gpu/arm/midgard/tests/build.bp
new file mode 100644
index 0000000..a0823c7
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/build.bp
@@ -0,0 +1,22 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ *      (C) COPYRIGHT 2018 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+bob_defaults {
+    name: "kernel_test_module_defaults",
+    defaults: ["mali_kbase_shared_config_defaults"],
+    include_dirs: [
+        "kernel/drivers/gpu/arm",
+        "kernel/drivers/gpu/arm/midgard",
+        "kernel/drivers/gpu/arm/midgard/backend/gpu",
+        "kernel/drivers/gpu/arm/midgard/tests/include",
+    ],
+}
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers.h
new file mode 100644
index 0000000..15e168c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers.h
@@ -0,0 +1,77 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_HELPERS_H_
+#define _KERNEL_UTF_HELPERS_H_
+
+/* kutf_helpers.h
+ * Test helper functions for the kernel UTF test infrastructure.
+ *
+ * These functions provide methods for enqueuing/dequeuing lines of text sent
+ * by user space. They are used to implement the transfer of "userdata" from
+ * user space to kernel.
+ */
+
+#include <kutf/kutf_suite.h>
+
+/**
+ * kutf_helper_input_dequeue() - Dequeue a line sent by user space
+ * @context:    KUTF context
+ * @str_size:   Pointer to an integer to receive the size of the string
+ *
+ * If no line is available then this function will wait (interruptibly) until
+ * a line is available.
+ *
+ * Return: The line dequeued, ERR_PTR(-EINTR) if interrupted or NULL on end
+ * of data.
+ */
+char *kutf_helper_input_dequeue(struct kutf_context *context, size_t *str_size);
+
+/**
+ * kutf_helper_input_enqueue() - Enqueue a line sent by user space
+ * @context:   KUTF context
+ * @str:       The user space address of the line
+ * @size:      The length in bytes of the string
+ *
+ * This function will use copy_from_user to copy the string out of user space.
+ * The string need not be NULL-terminated (@size should not include the NULL
+ * termination).
+ *
+ * As a special case @str==NULL and @size==0 is valid to mark the end of input,
+ * but callers should use kutf_helper_input_enqueue_end_of_data() instead.
+ *
+ * Return: 0 on success, -EFAULT if the line cannot be copied from user space,
+ * -ENOMEM if out of memory.
+ */
+int kutf_helper_input_enqueue(struct kutf_context *context,
+		const char __user *str, size_t size);
+
+/**
+ * kutf_helper_input_enqueue_end_of_data() - Signal no more data is to be sent
+ * @context:    KUTF context
+ *
+ * After this function has been called, kutf_helper_input_dequeue() will always
+ * return NULL.
+ */
+void kutf_helper_input_enqueue_end_of_data(struct kutf_context *context);
+
+#endif	/* _KERNEL_UTF_HELPERS_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers_user.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers_user.h
new file mode 100644
index 0000000..3b1300e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_helpers_user.h
@@ -0,0 +1,179 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_HELPERS_USER_H_
+#define _KERNEL_UTF_HELPERS_USER_H_
+
+/* kutf_helpers.h
+ * Test helper functions for the kernel UTF test infrastructure, whose
+ * implementation mirrors that of similar functions for kutf-userside
+ */
+
+#include <kutf/kutf_suite.h>
+#include <kutf/kutf_helpers.h>
+
+
+#define KUTF_HELPER_MAX_VAL_NAME_LEN 255
+
+enum kutf_helper_valtype {
+	KUTF_HELPER_VALTYPE_INVALID,
+	KUTF_HELPER_VALTYPE_U64,
+	KUTF_HELPER_VALTYPE_STR,
+
+	KUTF_HELPER_VALTYPE_COUNT /* Must be last */
+};
+
+struct kutf_helper_named_val {
+	enum kutf_helper_valtype type;
+	char *val_name;
+	union {
+		u64 val_u64;
+		char *val_str;
+	} u;
+};
+
+/* Extra error values for certain helpers when we want to distinguish between
+ * Linux's own error values too.
+ *
+ * These can only be used on certain functions returning an int type that are
+ * documented as returning one of these potential values, they cannot be used
+ * from functions return a ptr type, since we can't decode it with PTR_ERR
+ *
+ * No negative values are used - Linux error codes should be used instead, and
+ * indicate a problem in accessing the data file itself (are generally
+ * unrecoverable)
+ *
+ * Positive values indicate correct access but invalid parsing (can be
+ * recovered from assuming data in the future is correct) */
+enum kutf_helper_err {
+	/* No error - must be zero */
+	KUTF_HELPER_ERR_NONE = 0,
+	/* Named value parsing encountered an invalid name */
+	KUTF_HELPER_ERR_INVALID_NAME,
+	/* Named value parsing of string or u64 type encountered extra
+	 * characters after the value (after the last digit for a u64 type or
+	 * after the string end delimiter for string type) */
+	KUTF_HELPER_ERR_CHARS_AFTER_VAL,
+	/* Named value parsing of string type couldn't find the string end
+	 * delimiter.
+	 *
+	 * This cannot be encountered when the NAME="value" message exceeds the
+	 * textbuf's maximum line length, because such messages are not checked
+	 * for an end string delimiter */
+	KUTF_HELPER_ERR_NO_END_DELIMITER,
+	/* Named value didn't parse as any of the known types */
+	KUTF_HELPER_ERR_INVALID_VALUE,
+};
+
+
+/* Send named NAME=value pair, u64 value
+ *
+ * NAME must match [A-Z0-9_]\+ and can be up to MAX_VAL_NAME_LEN characters long
+ *
+ * Any failure will be logged on the suite's current test fixture
+ *
+ * Returns 0 on success, non-zero on failure
+ */
+int kutf_helper_send_named_u64(struct kutf_context *context,
+		const char *val_name, u64 val);
+
+/* Get the maximum length of a string that can be represented as a particular
+ * NAME="value" pair without string-value truncation in the kernel's buffer
+ *
+ * Given val_name and the kernel buffer's size, this can be used to determine
+ * the maximum length of a string that can be sent as val_name="value" pair
+ * without having the string value truncated. Any string longer than this will
+ * be truncated at some point during communication to this size.
+ *
+ * It is assumed that val_name is a valid name for
+ * kutf_helper_send_named_str(), and no checking will be made to
+ * ensure this.
+ *
+ * Returns the maximum string length that can be represented, or a negative
+ * value if the NAME="value" encoding itself wouldn't fit in kern_buf_sz
+ */
+int kutf_helper_max_str_len_for_kern(const char *val_name, int kern_buf_sz);
+
+/* Send named NAME="str" pair
+ *
+ * no escaping allowed in str. Any of the following characters will terminate
+ * the string: '"' '\\' '\n'
+ *
+ * NAME must match [A-Z0-9_]\+ and can be up to MAX_VAL_NAME_LEN characters long
+ *
+ * Any failure will be logged on the suite's current test fixture
+ *
+ * Returns 0 on success, non-zero on failure */
+int kutf_helper_send_named_str(struct kutf_context *context,
+		const char *val_name, const char *val_str);
+
+/* Receive named NAME=value pair
+ *
+ * This can receive u64 and string values - check named_val->type
+ *
+ * If you are not planning on dynamic handling of the named value's name and
+ * type, then kutf_helper_receive_check_val() is more useful as a
+ * convenience function.
+ *
+ * String members of named_val will come from memory allocated on the fixture's mempool
+ *
+ * Returns 0 on success. Negative value on failure to receive from the 'run'
+ * file, positive value indicates an enum kutf_helper_err value for correct
+ * reception of data but invalid parsing */
+int kutf_helper_receive_named_val(
+		struct kutf_context *context,
+		struct kutf_helper_named_val *named_val);
+
+/* Receive and validate NAME=value pair
+ *
+ * As with kutf_helper_receive_named_val, but validate that the
+ * name and type are as expected, as a convenience for a common pattern found
+ * in tests.
+ *
+ * NOTE: this only returns an error value if there was actually a problem
+ * receiving data.
+ *
+ * NOTE: If the underlying data was received correctly, but:
+ * - isn't of the expected name
+ * - isn't the expected type
+ * - isn't correctly parsed for the type
+ * then the following happens:
+ * - failure result is recorded
+ * - named_val->type will be KUTF_HELPER_VALTYPE_INVALID
+ * - named_val->u will contain some default value that should be relatively
+ *   harmless for the test, including being writable in the case of string
+ *   values
+ * - return value will be 0 to indicate success
+ *
+ * The rationale behind this is that we'd prefer to continue the rest of the
+ * test with failures propagated, rather than hitting a timeout */
+int kutf_helper_receive_check_val(
+		struct kutf_helper_named_val *named_val,
+		struct kutf_context *context,
+		const char *expect_val_name,
+		enum kutf_helper_valtype expect_val_type);
+
+/* Output a named value to kmsg */
+void kutf_helper_output_named_val(struct kutf_helper_named_val *named_val);
+
+
+#endif	/* _KERNEL_UTF_HELPERS_USER_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_mem.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_mem.h
new file mode 100644
index 0000000..988559d
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_mem.h
@@ -0,0 +1,73 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_MEM_H_
+#define _KERNEL_UTF_MEM_H_
+
+/* kutf_mem.h
+ * Functions for management of memory pools in the kernel.
+ *
+ * This module implements a memory pool allocator, allowing a test
+ * implementation to allocate linked allocations which can then be freed by a
+ * single free which releases all of the resources held by the entire pool.
+ *
+ * Note that it is not possible to free single resources within the pool once
+ * allocated.
+ */
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+/**
+ * struct kutf_mempool - the memory pool context management structure
+ * @head:	list head on which the allocations in this context are added to
+ * @lock:	mutex for concurrent allocation from multiple threads
+ *
+ */
+struct kutf_mempool {
+	struct list_head head;
+	struct mutex lock;
+};
+
+/**
+ * kutf_mempool_init() - Initialize a memory pool.
+ * @pool:	Memory pool structure to initialize, provided by the user
+ *
+ * Return:	zero on success
+ */
+int kutf_mempool_init(struct kutf_mempool *pool);
+
+/**
+ * kutf_mempool_alloc() - Allocate memory from a pool
+ * @pool:	Memory pool to allocate from
+ * @size:	Size of memory wanted in number of bytes
+ *
+ * Return:	Pointer to memory on success, NULL on failure.
+ */
+void *kutf_mempool_alloc(struct kutf_mempool *pool, size_t size);
+
+/**
+ * kutf_mempool_destroy() - Destroy a memory pool, freeing all memory within it.
+ * @pool:	The memory pool to free
+ */
+void kutf_mempool_destroy(struct kutf_mempool *pool);
+#endif	/* _KERNEL_UTF_MEM_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_resultset.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_resultset.h
new file mode 100644
index 0000000..49ebeb4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_resultset.h
@@ -0,0 +1,181 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_RESULTSET_H_
+#define _KERNEL_UTF_RESULTSET_H_
+
+/* kutf_resultset.h
+ * Functions and structures for handling test results and result sets.
+ *
+ * This section of the kernel UTF contains structures and functions used for the
+ * management of Results and Result Sets.
+ */
+
+/**
+ * enum kutf_result_status - Status values for a single Test error.
+ * @KUTF_RESULT_BENCHMARK:	Result is a meta-result containing benchmark
+ *                              results.
+ * @KUTF_RESULT_SKIP:		The test was skipped.
+ * @KUTF_RESULT_UNKNOWN:	The test has an unknown result.
+ * @KUTF_RESULT_PASS:		The test result passed.
+ * @KUTF_RESULT_DEBUG:		The test result passed, but raised a debug
+ *                              message.
+ * @KUTF_RESULT_INFO:		The test result passed, but raised
+ *                              an informative message.
+ * @KUTF_RESULT_WARN:		The test result passed, but raised a warning
+ *                              message.
+ * @KUTF_RESULT_FAIL:		The test result failed with a non-fatal error.
+ * @KUTF_RESULT_FATAL:		The test result failed with a fatal error.
+ * @KUTF_RESULT_ABORT:		The test result failed due to a non-UTF
+ *                              assertion failure.
+ * @KUTF_RESULT_USERDATA:	User data is ready to be read,
+ *                              this is not seen outside the kernel
+ * @KUTF_RESULT_USERDATA_WAIT:	Waiting for user data to be sent,
+ *                              this is not seen outside the kernel
+ * @KUTF_RESULT_TEST_FINISHED:	The test has finished, no more results will
+ *                              be produced. This is not seen outside kutf
+ */
+enum kutf_result_status {
+	KUTF_RESULT_BENCHMARK = -3,
+	KUTF_RESULT_SKIP    = -2,
+	KUTF_RESULT_UNKNOWN = -1,
+
+	KUTF_RESULT_PASS    = 0,
+	KUTF_RESULT_DEBUG   = 1,
+	KUTF_RESULT_INFO    = 2,
+	KUTF_RESULT_WARN    = 3,
+	KUTF_RESULT_FAIL    = 4,
+	KUTF_RESULT_FATAL   = 5,
+	KUTF_RESULT_ABORT   = 6,
+
+	KUTF_RESULT_USERDATA      = 7,
+	KUTF_RESULT_USERDATA_WAIT = 8,
+	KUTF_RESULT_TEST_FINISHED = 9
+};
+
+/* The maximum size of a kutf_result_status result when
+ * converted to a string
+ */
+#define KUTF_ERROR_MAX_NAME_SIZE 21
+
+#ifdef __KERNEL__
+
+#include <kutf/kutf_mem.h>
+#include <linux/wait.h>
+
+struct kutf_context;
+
+/**
+ * struct kutf_result - Represents a single test result.
+ * @node:	Next result in the list of results.
+ * @status:	The status summary (pass / warn / fail / etc).
+ * @message:	A more verbose status message.
+ */
+struct kutf_result {
+	struct list_head            node;
+	enum kutf_result_status     status;
+	const char                  *message;
+};
+
+/**
+ * KUTF_RESULT_SET_WAITING_FOR_INPUT - Test is waiting for user data
+ *
+ * This flag is set within a struct kutf_result_set whenever the test is blocked
+ * waiting for user data. Attempts to dequeue results when this flag is set
+ * will cause a dummy %KUTF_RESULT_USERDATA_WAIT result to be produced. This
+ * is used to output a warning message and end of file.
+ */
+#define KUTF_RESULT_SET_WAITING_FOR_INPUT 1
+
+/**
+ * struct kutf_result_set - Represents a set of results.
+ * @results:	List head of a struct kutf_result list for storing the results
+ * @waitq:	Wait queue signalled whenever new results are added.
+ * @flags:	Flags see %KUTF_RESULT_SET_WAITING_FOR_INPUT
+ */
+struct kutf_result_set {
+	struct list_head          results;
+	wait_queue_head_t         waitq;
+	int                       flags;
+};
+
+/**
+ * kutf_create_result_set() - Create a new result set
+ *                            to which results can be added.
+ *
+ * Return: The created result set.
+ */
+struct kutf_result_set *kutf_create_result_set(void);
+
+/**
+ * kutf_add_result() - Add a result to the end of an existing result set.
+ *
+ * @context:	The kutf context
+ * @status:	The result status to add.
+ * @message:	The result message to add.
+ *
+ * Return: 0 if the result is successfully added. -ENOMEM if allocation fails.
+ */
+int kutf_add_result(struct kutf_context *context,
+		enum kutf_result_status status, const char *message);
+
+/**
+ * kutf_remove_result() - Remove a result from the head of a result set.
+ * @set:	The result set.
+ *
+ * This function will block until there is a result to read. The wait is
+ * interruptible, so this function will return with an ERR_PTR if interrupted.
+ *
+ * Return: result or ERR_PTR if interrupted
+ */
+struct kutf_result *kutf_remove_result(
+		struct kutf_result_set *set);
+
+/**
+ * kutf_destroy_result_set() - Free a previously created result set.
+ *
+ * @results:	The result set whose resources to free.
+ */
+void kutf_destroy_result_set(struct kutf_result_set *results);
+
+/**
+ * kutf_set_waiting_for_input() - The test is waiting for userdata
+ *
+ * @set: The result set to update
+ *
+ * Causes the result set to always have results and return a fake
+ * %KUTF_RESULT_USERDATA_WAIT result.
+ */
+void kutf_set_waiting_for_input(struct kutf_result_set *set);
+
+/**
+ * kutf_clear_waiting_for_input() - The test is no longer waiting for userdata
+ *
+ * @set: The result set to update
+ *
+ * Cancels the effect of kutf_set_waiting_for_input()
+ */
+void kutf_clear_waiting_for_input(struct kutf_result_set *set);
+
+#endif	/* __KERNEL__ */
+
+#endif	/* _KERNEL_UTF_RESULTSET_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_suite.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_suite.h
new file mode 100644
index 0000000..8d75f50
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_suite.h
@@ -0,0 +1,569 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_SUITE_H_
+#define _KERNEL_UTF_SUITE_H_
+
+/* kutf_suite.h
+ * Functions for management of test suites.
+ *
+ * This collection of data structures, macros, and functions are used to
+ * create Test Suites, Tests within those Test Suites, and Fixture variants
+ * of each test.
+ */
+
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+
+#include <kutf/kutf_mem.h>
+#include <kutf/kutf_resultset.h>
+
+/* Arbitrary maximum size to prevent user space allocating too much kernel
+ * memory
+ */
+#define KUTF_MAX_LINE_LENGTH (1024u)
+
+/**
+ * Pseudo-flag indicating an absence of any specified test class. Note that
+ * tests should not be annotated with this constant as it is simply a zero
+ * value; tests without a more specific class must be marked with the flag
+ * KUTF_F_TEST_GENERIC.
+ */
+#define KUTF_F_TEST_NONE                ((unsigned int)(0))
+
+/**
+ * Class indicating this test is a smoke test.
+ * A given set of smoke tests should be quick to run, enabling rapid turn-around
+ * of "regress-on-commit" test runs.
+ */
+#define KUTF_F_TEST_SMOKETEST           ((unsigned int)(1 << 1))
+
+/**
+ * Class indicating this test is a performance test.
+ * These tests typically produce a performance metric, such as "time to run" or
+ * "frames per second",
+ */
+#define KUTF_F_TEST_PERFORMANCE         ((unsigned int)(1 << 2))
+
+/**
+ * Class indicating that this test is a deprecated test.
+ * These tests have typically been replaced by an alternative test which is
+ * more efficient, or has better coverage.
+ */
+#define KUTF_F_TEST_DEPRECATED          ((unsigned int)(1 << 3))
+
+/**
+ * Class indicating that this test is a known failure.
+ * These tests have typically been run and failed, but marking them as a known
+ * failure means it is easier to triage results.
+ *
+ * It is typically more convenient to triage known failures using the
+ * results database and web UI, as this means there is no need to modify the
+ * test code.
+ */
+#define KUTF_F_TEST_EXPECTED_FAILURE    ((unsigned int)(1 << 4))
+
+/**
+ * Class indicating that this test is a generic test, which is not a member of
+ * a more specific test class. Tests which are not created with a specific set
+ * of filter flags by the user are assigned this test class by default.
+ */
+#define KUTF_F_TEST_GENERIC             ((unsigned int)(1 << 5))
+
+/**
+ * Class indicating this test is a resource allocation failure test.
+ * A resource allocation failure test will test that an error code is
+ * correctly propagated when an allocation fails.
+ */
+#define KUTF_F_TEST_RESFAIL             ((unsigned int)(1 << 6))
+
+/**
+ * Additional flag indicating that this test is an expected failure when
+ * run in resource failure mode. These tests are never run when running
+ * the low resource mode.
+ */
+#define KUTF_F_TEST_EXPECTED_FAILURE_RF ((unsigned int)(1 << 7))
+
+/**
+ * Flag reserved for user-defined filter zero.
+ */
+#define KUTF_F_TEST_USER_0 ((unsigned int)(1 << 24))
+
+/**
+ * Flag reserved for user-defined filter one.
+ */
+#define KUTF_F_TEST_USER_1 ((unsigned int)(1 << 25))
+
+/**
+ * Flag reserved for user-defined filter two.
+ */
+#define KUTF_F_TEST_USER_2 ((unsigned int)(1 << 26))
+
+/**
+ * Flag reserved for user-defined filter three.
+ */
+#define KUTF_F_TEST_USER_3 ((unsigned int)(1 << 27))
+
+/**
+ * Flag reserved for user-defined filter four.
+ */
+#define KUTF_F_TEST_USER_4 ((unsigned int)(1 << 28))
+
+/**
+ * Flag reserved for user-defined filter five.
+ */
+#define KUTF_F_TEST_USER_5 ((unsigned int)(1 << 29))
+
+/**
+ * Flag reserved for user-defined filter six.
+ */
+#define KUTF_F_TEST_USER_6 ((unsigned int)(1 << 30))
+
+/**
+ * Flag reserved for user-defined filter seven.
+ */
+#define KUTF_F_TEST_USER_7 ((unsigned int)(1 << 31))
+
+/**
+ * Pseudo-flag indicating that all test classes should be executed.
+ */
+#define KUTF_F_TEST_ALL                 ((unsigned int)(0xFFFFFFFFU))
+
+/**
+ * union kutf_callback_data - Union used to store test callback data
+ * @ptr_value:		pointer to the location where test callback data
+ *                      are stored
+ * @u32_value:		a number which represents test callback data
+ */
+union kutf_callback_data {
+	void *ptr_value;
+	u32  u32_value;
+};
+
+/**
+ * struct kutf_userdata_line - A line of user data to be returned to the user
+ * @node:   struct list_head to link this into a list
+ * @str:    The line of user data to return to user space
+ * @size:   The number of bytes within @str
+ */
+struct kutf_userdata_line {
+	struct list_head node;
+	char *str;
+	size_t size;
+};
+
+/**
+ * KUTF_USERDATA_WARNING_OUTPUT - Flag specifying that a warning has been output
+ *
+ * If user space reads the "run" file while the test is waiting for user data,
+ * then the framework will output a warning message and set this flag within
+ * struct kutf_userdata. A subsequent read will then simply return an end of
+ * file condition rather than outputting the warning again. The upshot of this
+ * is that simply running 'cat' on a test which requires user data will produce
+ * the warning followed by 'cat' exiting due to EOF - which is much more user
+ * friendly than blocking indefinitely waiting for user data.
+ */
+#define KUTF_USERDATA_WARNING_OUTPUT  1
+
+/**
+ * struct kutf_userdata - Structure holding user data
+ * @flags:       See %KUTF_USERDATA_WARNING_OUTPUT
+ * @input_head:  List of struct kutf_userdata_line containing user data
+ *               to be read by the kernel space test.
+ * @input_waitq: Wait queue signalled when there is new user data to be
+ *               read by the kernel space test.
+ */
+struct kutf_userdata {
+	unsigned long flags;
+	struct list_head input_head;
+	wait_queue_head_t input_waitq;
+};
+
+/**
+ * struct kutf_context - Structure representing a kernel test context
+ * @kref:		Refcount for number of users of this context
+ * @suite:		Convenience pointer to the suite this context
+ *                      is running
+ * @test_fix:		The fixture that is being run in this context
+ * @fixture_pool:	The memory pool used for the duration of
+ *                      the fixture/text context.
+ * @fixture:		The user provided fixture structure.
+ * @fixture_index:	The index (id) of the current fixture.
+ * @fixture_name:	The name of the current fixture (or NULL if unnamed).
+ * @test_data:		Any user private data associated with this test
+ * @result_set:		All the results logged by this test context
+ * @status:		The status of the currently running fixture.
+ * @expected_status:	The expected status on exist of the currently
+ *                      running fixture.
+ * @work:		Work item to enqueue onto the work queue to run the test
+ * @userdata:		Structure containing the user data for the test to read
+ */
+struct kutf_context {
+	struct kref                     kref;
+	struct kutf_suite               *suite;
+	struct kutf_test_fixture        *test_fix;
+	struct kutf_mempool             fixture_pool;
+	void                            *fixture;
+	unsigned int                    fixture_index;
+	const char                      *fixture_name;
+	union kutf_callback_data        test_data;
+	struct kutf_result_set          *result_set;
+	enum kutf_result_status         status;
+	enum kutf_result_status         expected_status;
+
+	struct work_struct              work;
+	struct kutf_userdata            userdata;
+};
+
+/**
+ * struct kutf_suite - Structure representing a kernel test suite
+ * @app:			The application this suite belongs to.
+ * @name:			The name of this suite.
+ * @suite_data:			Any user private data associated with this
+ *                              suite.
+ * @create_fixture:		Function used to create a new fixture instance
+ * @remove_fixture:		Function used to destroy a new fixture instance
+ * @fixture_variants:		The number of variants (must be at least 1).
+ * @suite_default_flags:	Suite global filter flags which are set on
+ *                              all tests.
+ * @node:			List node for suite_list
+ * @dir:			The debugfs directory for this suite
+ * @test_list:			List head to store all the tests which are
+ *                              part of this suite
+ */
+struct kutf_suite {
+	struct kutf_application        *app;
+	const char                     *name;
+	union kutf_callback_data       suite_data;
+	void *(*create_fixture)(struct kutf_context *context);
+	void  (*remove_fixture)(struct kutf_context *context);
+	unsigned int                   fixture_variants;
+	unsigned int                   suite_default_flags;
+	struct list_head               node;
+	struct dentry                  *dir;
+	struct list_head               test_list;
+};
+
+/* ============================================================================
+	Application functions
+============================================================================ */
+
+/**
+ * kutf_create_application() - Create an in kernel test application.
+ * @name:	The name of the test application.
+ *
+ * Return: pointer to the kutf_application  on success or NULL
+ * on failure
+ */
+struct kutf_application *kutf_create_application(const char *name);
+
+/**
+ * kutf_destroy_application() - Destroy an in kernel test application.
+ *
+ * @app:	The test application to destroy.
+ */
+void kutf_destroy_application(struct kutf_application *app);
+
+/* ============================================================================
+	Suite functions
+============================================================================ */
+
+/**
+ * kutf_create_suite() - Create a kernel test suite.
+ * @app:		The test application to create the suite in.
+ * @name:		The name of the suite.
+ * @fixture_count:	The number of fixtures to run over the test
+ *                      functions in this suite
+ * @create_fixture:	Callback used to create a fixture. The returned value
+ *                      is stored in the fixture pointer in the context for
+ *                      use in the test functions.
+ * @remove_fixture:	Callback used to remove a previously created fixture.
+ *
+ * Suite names must be unique. Should two suites with the same name be
+ * registered with the same application then this function will fail, if they
+ * are registered with different applications then the function will not detect
+ * this and the call will succeed.
+ *
+ * Return: pointer to the created kutf_suite on success or NULL
+ * on failure
+ */
+struct kutf_suite *kutf_create_suite(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context));
+
+/**
+ * kutf_create_suite_with_filters() - Create a kernel test suite with user
+ *                                    defined default filters.
+ * @app:		The test application to create the suite in.
+ * @name:		The name of the suite.
+ * @fixture_count:	The number of fixtures to run over the test
+ *                      functions in this suite
+ * @create_fixture:	Callback used to create a fixture. The returned value
+ *			is stored in the fixture pointer in the context for
+ *			use in the test functions.
+ * @remove_fixture:	Callback used to remove a previously created fixture.
+ * @filters:		Filters to apply to a test if it doesn't provide its own
+ *
+ * Suite names must be unique. Should two suites with the same name be
+ * registered with the same application then this function will fail, if they
+ * are registered with different applications then the function will not detect
+ * this and the call will succeed.
+ *
+ * Return: pointer to the created kutf_suite on success or NULL on failure
+ */
+struct kutf_suite *kutf_create_suite_with_filters(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context),
+		unsigned int filters);
+
+/**
+ * kutf_create_suite_with_filters_and_data() - Create a kernel test suite with
+ *                                             user defined default filters.
+ * @app:		The test application to create the suite in.
+ * @name:		The name of the suite.
+ * @fixture_count:	The number of fixtures to run over the test
+ *			functions in this suite
+ * @create_fixture:	Callback used to create a fixture. The returned value
+ *			is stored in the fixture pointer in the context for
+ *			use in the test functions.
+ * @remove_fixture:	Callback used to remove a previously created fixture.
+ * @filters:		Filters to apply to a test if it doesn't provide its own
+ * @suite_data:		Suite specific callback data, provided during the
+ *			running of the test in the kutf_context
+ *
+ * Return: pointer to the created kutf_suite on success or NULL
+ * on failure
+ */
+struct kutf_suite *kutf_create_suite_with_filters_and_data(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context),
+		unsigned int filters,
+		union kutf_callback_data suite_data);
+
+/**
+ * kutf_add_test() - Add a test to a kernel test suite.
+ * @suite:	The suite to add the test to.
+ * @id:		The ID of the test.
+ * @name:	The name of the test.
+ * @execute:	Callback to the test function to run.
+ *
+ * Note: As no filters are provided the test will use the suite filters instead
+ */
+void kutf_add_test(struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context));
+
+/**
+ * kutf_add_test_with_filters() - Add a test to a kernel test suite with filters
+ * @suite:	The suite to add the test to.
+ * @id:		The ID of the test.
+ * @name:	The name of the test.
+ * @execute:	Callback to the test function to run.
+ * @filters:	A set of filtering flags, assigning test categories.
+ */
+void kutf_add_test_with_filters(struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context),
+		unsigned int filters);
+
+/**
+ * kutf_add_test_with_filters_and_data() - Add a test to a kernel test suite
+ *					   with filters.
+ * @suite:	The suite to add the test to.
+ * @id:		The ID of the test.
+ * @name:	The name of the test.
+ * @execute:	Callback to the test function to run.
+ * @filters:	A set of filtering flags, assigning test categories.
+ * @test_data:	Test specific callback data, provided during the
+ *		running of the test in the kutf_context
+ */
+void kutf_add_test_with_filters_and_data(
+		struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context),
+		unsigned int filters,
+		union kutf_callback_data test_data);
+
+
+/* ============================================================================
+	Test functions
+============================================================================ */
+/**
+ * kutf_test_log_result_external() - Log a result which has been created
+ *                                   externally into a in a standard form
+ *                                   recognized by the log parser.
+ * @context:	The test context the test is running in
+ * @message:	The message for this result
+ * @new_status:	The result status of this log message
+ */
+void kutf_test_log_result_external(
+	struct kutf_context *context,
+	const char *message,
+	enum kutf_result_status new_status);
+
+/**
+ * kutf_test_expect_abort() - Tell the kernel that you expect the current
+ *                            fixture to produce an abort.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_abort(struct kutf_context *context);
+
+/**
+ * kutf_test_expect_fatal() - Tell the kernel that you expect the current
+ *                            fixture to produce a fatal error.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_fatal(struct kutf_context *context);
+
+/**
+ * kutf_test_expect_fail() - Tell the kernel that you expect the current
+ *                           fixture to fail.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_fail(struct kutf_context *context);
+
+/**
+ * kutf_test_expect_warn() - Tell the kernel that you expect the current
+ *                           fixture to produce a warning.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_warn(struct kutf_context *context);
+
+/**
+ * kutf_test_expect_pass() - Tell the kernel that you expect the current
+ *                           fixture to pass.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_expect_pass(struct kutf_context *context);
+
+/**
+ * kutf_test_skip() - Tell the kernel that the test should be skipped.
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_skip(struct kutf_context *context);
+
+/**
+ * kutf_test_skip_msg() - Tell the kernel that this test has been skipped,
+ *                        supplying a reason string.
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the reason for the skip.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a prebaked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_skip_msg(struct kutf_context *context, const char *message);
+
+/**
+ * kutf_test_pass() - Tell the kernel that this test has passed.
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the reason for the pass.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_pass(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_debug() - Send a debug message
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the debug information.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_debug(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_info() - Send an information message
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the information message.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_info(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_warn() - Send a warning message
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the warning message.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_warn(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_fail() - Tell the kernel that a test has failed
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the failure message.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_fail(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_fatal() - Tell the kernel that a test has triggered a fatal error
+ * @context:	The test context this test is running in.
+ * @message:	A message string containing the fatal error message.
+ *
+ * Note: The message must not be freed during the lifetime of the test run.
+ * This means it should either be a pre-baked string, or if a dynamic string
+ * is required it must be created with kutf_dsprintf which will store
+ * the resultant string in a buffer who's lifetime is the same as the test run.
+ */
+void kutf_test_fatal(struct kutf_context *context, char const *message);
+
+/**
+ * kutf_test_abort() - Tell the kernel that a test triggered an abort in the test
+ *
+ * @context:	The test context this test is running in.
+ */
+void kutf_test_abort(struct kutf_context *context);
+
+#endif	/* _KERNEL_UTF_SUITE_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/include/kutf/kutf_utils.h b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_utils.h
new file mode 100644
index 0000000..25b8285
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/include/kutf/kutf_utils.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#ifndef _KERNEL_UTF_UTILS_H_
+#define _KERNEL_UTF_UTILS_H_
+
+/* kutf_utils.h
+ * Utilities for the kernel UTF test infrastructure.
+ *
+ * This collection of library functions are provided for use by kernel UTF
+ * and users of kernel UTF which don't directly fit within the other
+ * code modules.
+ */
+
+#include <kutf/kutf_mem.h>
+
+/**
+ * Maximum size of the message strings within kernel UTF, messages longer then
+ * this will be truncated.
+ */
+#define KUTF_MAX_DSPRINTF_LEN	1024
+
+/**
+ * kutf_dsprintf() - dynamic sprintf
+ * @pool:	memory pool to allocate from
+ * @fmt:	The format string describing the string to document.
+ * @...		The parameters to feed in to the format string.
+ *
+ * This function implements sprintf which dynamically allocates memory to store
+ * the string. The library will free the memory containing the string when the
+ * result set is cleared or destroyed.
+ *
+ * Note The returned string may be truncated to fit an internal temporary
+ * buffer, which is KUTF_MAX_DSPRINTF_LEN bytes in length.
+ *
+ * Return: Returns pointer to allocated string, or NULL on error.
+ */
+const char *kutf_dsprintf(struct kutf_mempool *pool,
+		const char *fmt, ...);
+
+#endif	/* _KERNEL_UTF_UTILS_H_ */
diff --git a/drivers/gpu/arm/midgard/tests/kutf/Kbuild b/drivers/gpu/arm/midgard/tests/kutf/Kbuild
new file mode 100644
index 0000000..2531d41
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/Kbuild
@@ -0,0 +1,26 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+ccflags-y += -I$(src)/../include
+
+obj-$(CONFIG_MALI_KUTF) += kutf.o
+
+kutf-y := kutf_mem.o kutf_resultset.o kutf_suite.o kutf_utils.o kutf_helpers.o kutf_helpers_user.o
diff --git a/drivers/gpu/arm/midgard/tests/kutf/Kconfig b/drivers/gpu/arm/midgard/tests/kutf/Kconfig
new file mode 100644
index 0000000..0cdb474
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/Kconfig
@@ -0,0 +1,28 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+
+config MALI_KUTF
+ tristate "Mali Kernel Unit Test Framework"
+ default m
+ help
+   Enables MALI testing framework. To compile it as a module,
+   choose M here - this will generate a single module called kutf.
diff --git a/drivers/gpu/arm/midgard/tests/kutf/Makefile b/drivers/gpu/arm/midgard/tests/kutf/Makefile
new file mode 100644
index 0000000..d848e87
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/Makefile
@@ -0,0 +1,35 @@
+#
+# (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+# linux build system bootstrap for out-of-tree module
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+ifeq ($(KDIR),)
+$(error Must specify KDIR to point to the kernel to target))
+endif
+
+all:
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) $(SCONS_CONFIGS) EXTRA_CFLAGS=-I$(CURDIR)/../include modules
+
+clean:
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
diff --git a/drivers/gpu/arm/midgard/tests/kutf/build.bp b/drivers/gpu/arm/midgard/tests/kutf/build.bp
new file mode 100644
index 0000000..960c8faa
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/build.bp
@@ -0,0 +1,31 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ *      (C) COPYRIGHT 2018 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+bob_kernel_module {
+    name: "kutf",
+    defaults: ["kernel_defaults"],
+    srcs: [
+        "Kbuild",
+        "kutf_helpers.c",
+        "kutf_helpers_user.c",
+        "kutf_mem.c",
+        "kutf_resultset.c",
+        "kutf_suite.c",
+        "kutf_utils.c",
+    ],
+    kbuild_options: ["CONFIG_MALI_KUTF=m"],
+    include_dirs: ["kernel/drivers/gpu/arm/midgard/tests/include"],
+    enabled: false,
+    base_build_kutf: {
+        enabled: true,
+    },
+}
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers.c
new file mode 100644
index 0000000..cab5add6
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers.c
@@ -0,0 +1,129 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF test helpers */
+#include <kutf/kutf_helpers.h>
+
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/preempt.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+
+static DEFINE_SPINLOCK(kutf_input_lock);
+
+static bool pending_input(struct kutf_context *context)
+{
+	bool input_pending;
+
+	spin_lock(&kutf_input_lock);
+
+	input_pending = !list_empty(&context->userdata.input_head);
+
+	spin_unlock(&kutf_input_lock);
+
+	return input_pending;
+}
+
+char *kutf_helper_input_dequeue(struct kutf_context *context, size_t *str_size)
+{
+	struct kutf_userdata_line *line;
+
+	spin_lock(&kutf_input_lock);
+
+	while (list_empty(&context->userdata.input_head)) {
+		int err;
+
+		kutf_set_waiting_for_input(context->result_set);
+
+		spin_unlock(&kutf_input_lock);
+
+		err = wait_event_interruptible(context->userdata.input_waitq,
+				pending_input(context));
+
+		if (err)
+			return ERR_PTR(-EINTR);
+
+		spin_lock(&kutf_input_lock);
+	}
+
+	line = list_first_entry(&context->userdata.input_head,
+			struct kutf_userdata_line, node);
+	if (line->str) {
+		/*
+		 * Unless it is the end-of-input marker,
+		 * remove it from the list
+		 */
+		list_del(&line->node);
+	}
+
+	spin_unlock(&kutf_input_lock);
+
+	if (str_size)
+		*str_size = line->size;
+	return line->str;
+}
+
+int kutf_helper_input_enqueue(struct kutf_context *context,
+		const char __user *str, size_t size)
+{
+	struct kutf_userdata_line *line;
+
+	line = kutf_mempool_alloc(&context->fixture_pool,
+			sizeof(*line) + size + 1);
+	if (!line)
+		return -ENOMEM;
+	if (str) {
+		unsigned long bytes_not_copied;
+
+		line->size = size;
+		line->str = (void *)(line + 1);
+		bytes_not_copied = copy_from_user(line->str, str, size);
+		if (bytes_not_copied != 0)
+			return -EFAULT;
+		/* Zero terminate the string */
+		line->str[size] = '\0';
+	} else {
+		/* This is used to mark the end of input */
+		WARN_ON(size);
+		line->size = 0;
+		line->str = NULL;
+	}
+
+	spin_lock(&kutf_input_lock);
+
+	list_add_tail(&line->node, &context->userdata.input_head);
+
+	kutf_clear_waiting_for_input(context->result_set);
+
+	spin_unlock(&kutf_input_lock);
+
+	wake_up(&context->userdata.input_waitq);
+
+	return 0;
+}
+
+void kutf_helper_input_enqueue_end_of_data(struct kutf_context *context)
+{
+	kutf_helper_input_enqueue(context, NULL, 0);
+}
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers_user.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers_user.c
new file mode 100644
index 0000000..108fa82
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_helpers_user.c
@@ -0,0 +1,468 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF test helpers that mirror those for kutf-userside */
+#include <kutf/kutf_helpers_user.h>
+#include <kutf/kutf_helpers.h>
+#include <kutf/kutf_utils.h>
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+const char *valtype_names[] = {
+	"INVALID",
+	"U64",
+	"STR",
+};
+
+static const char *get_val_type_name(enum kutf_helper_valtype valtype)
+{
+	/* enums can be signed or unsigned (implementation dependant), so
+	 * enforce it to prevent:
+	 * a) "<0 comparison on unsigned type" warning - if we did both upper
+	 *    and lower bound check
+	 * b) incorrect range checking if it was a signed type - if we did
+	 *    upper bound check only */
+	unsigned int type_idx = (unsigned int)valtype;
+
+	if (type_idx >= (unsigned int)KUTF_HELPER_VALTYPE_COUNT)
+		type_idx = (unsigned int)KUTF_HELPER_VALTYPE_INVALID;
+
+	return valtype_names[type_idx];
+}
+
+/* Check up to str_len chars of val_str to see if it's a valid value name:
+ *
+ * - Has between 1 and KUTF_HELPER_MAX_VAL_NAME_LEN characters before the \0 terminator
+ * - And, each char is in the character set [A-Z0-9_] */
+static int validate_val_name(const char *val_str, int str_len)
+{
+	int i = 0;
+
+	for (i = 0; str_len && i <= KUTF_HELPER_MAX_VAL_NAME_LEN && val_str[i] != '\0'; ++i, --str_len) {
+		char val_chr = val_str[i];
+
+		if (val_chr >= 'A' && val_chr <= 'Z')
+			continue;
+		if (val_chr >= '0' && val_chr <= '9')
+			continue;
+		if (val_chr == '_')
+			continue;
+
+		/* Character not in the set [A-Z0-9_] - report error */
+		return 1;
+	}
+
+	/* Names of 0 length are not valid */
+	if (i == 0)
+		return 1;
+	/* Length greater than KUTF_HELPER_MAX_VAL_NAME_LEN not allowed */
+	if (i > KUTF_HELPER_MAX_VAL_NAME_LEN || (i == KUTF_HELPER_MAX_VAL_NAME_LEN && val_str[i] != '\0'))
+		return 1;
+
+	return 0;
+}
+
+/* Find the length of the valid part of the string when it will be in quotes
+ * e.g. "str"
+ *
+ * That is, before any '\\', '\n' or '"' characters. This is so we don't have
+ * to escape the string */
+static int find_quoted_string_valid_len(const char *str)
+{
+	char *ptr;
+	const char *check_chars = "\\\n\"";
+
+	ptr = strpbrk(str, check_chars);
+	if (ptr)
+		return (int)(ptr-str);
+
+	return (int)strlen(str);
+}
+
+static int kutf_helper_userdata_enqueue(struct kutf_context *context,
+		const char *str)
+{
+	char *str_copy;
+	size_t len;
+	int err;
+
+	len = strlen(str)+1;
+
+	str_copy = kutf_mempool_alloc(&context->fixture_pool, len);
+	if (!str_copy)
+		return -ENOMEM;
+
+	strcpy(str_copy, str);
+
+	err = kutf_add_result(context, KUTF_RESULT_USERDATA, str_copy);
+
+	return err;
+}
+
+#define MAX_U64_HEX_LEN 16
+/* (Name size) + ("=0x" size) + (64-bit hex value size) + (terminator) */
+#define NAMED_U64_VAL_BUF_SZ (KUTF_HELPER_MAX_VAL_NAME_LEN + 3 + MAX_U64_HEX_LEN + 1)
+
+int kutf_helper_send_named_u64(struct kutf_context *context,
+		const char *val_name, u64 val)
+{
+	int ret = 1;
+	char msgbuf[NAMED_U64_VAL_BUF_SZ];
+	const char *errmsg = NULL;
+
+	if (validate_val_name(val_name, KUTF_HELPER_MAX_VAL_NAME_LEN + 1)) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send u64 value named '%s': Invalid value name", val_name);
+		goto out_err;
+	}
+
+	ret = snprintf(msgbuf, NAMED_U64_VAL_BUF_SZ, "%s=0x%llx", val_name, val);
+	if (ret >= NAMED_U64_VAL_BUF_SZ || ret < 0) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send u64 value named '%s': snprintf() problem buffer size==%d ret=%d",
+				val_name, NAMED_U64_VAL_BUF_SZ, ret);
+		goto out_err;
+	}
+
+	ret = kutf_helper_userdata_enqueue(context, msgbuf);
+	if (ret) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send u64 value named '%s': send returned %d",
+				val_name, ret);
+		goto out_err;
+	}
+
+	return ret;
+out_err:
+	kutf_test_fail(context, errmsg);
+	return ret;
+}
+EXPORT_SYMBOL(kutf_helper_send_named_u64);
+
+#define NAMED_VALUE_SEP "="
+#define NAMED_STR_START_DELIM NAMED_VALUE_SEP "\""
+#define NAMED_STR_END_DELIM "\""
+
+int kutf_helper_max_str_len_for_kern(const char *val_name,
+		int kern_buf_sz)
+{
+	const int val_name_len = strlen(val_name);
+	const int start_delim_len = strlen(NAMED_STR_START_DELIM);
+	const int end_delim_len = strlen(NAMED_STR_END_DELIM);
+	int max_msg_len = kern_buf_sz;
+	int max_str_len;
+
+	max_str_len = max_msg_len - val_name_len - start_delim_len -
+		end_delim_len;
+
+	return max_str_len;
+}
+EXPORT_SYMBOL(kutf_helper_max_str_len_for_kern);
+
+int kutf_helper_send_named_str(struct kutf_context *context,
+		const char *val_name,
+		const char *val_str)
+{
+	int val_str_len;
+	int str_buf_sz;
+	char *str_buf = NULL;
+	int ret = 1;
+	char *copy_ptr;
+	int val_name_len;
+	int start_delim_len = strlen(NAMED_STR_START_DELIM);
+	int end_delim_len = strlen(NAMED_STR_END_DELIM);
+	const char *errmsg = NULL;
+
+	if (validate_val_name(val_name, KUTF_HELPER_MAX_VAL_NAME_LEN + 1)) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send u64 value named '%s': Invalid value name", val_name);
+		goto out_err;
+	}
+	val_name_len = strlen(val_name);
+
+	val_str_len = find_quoted_string_valid_len(val_str);
+
+	/* (name length) + ("=\"" length) + (val_str len) + ("\"" length) + terminator */
+	str_buf_sz = val_name_len + start_delim_len + val_str_len + end_delim_len + 1;
+
+	/* Using kmalloc() here instead of mempool since we know we need to free
+	 * before we return */
+	str_buf = kmalloc(str_buf_sz, GFP_KERNEL);
+	if (!str_buf) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send str value named '%s': kmalloc failed, str_buf_sz=%d",
+				val_name, str_buf_sz);
+		goto out_err;
+	}
+	copy_ptr = str_buf;
+
+	/* Manually copy each string component instead of snprintf because
+	 * val_str may need to end early, and less error path handling */
+
+	/* name */
+	memcpy(copy_ptr, val_name, val_name_len);
+	copy_ptr += val_name_len;
+
+	/* str start delimiter */
+	memcpy(copy_ptr, NAMED_STR_START_DELIM, start_delim_len);
+	copy_ptr += start_delim_len;
+
+	/* str value */
+	memcpy(copy_ptr, val_str, val_str_len);
+	copy_ptr += val_str_len;
+
+	/* str end delimiter */
+	memcpy(copy_ptr, NAMED_STR_END_DELIM, end_delim_len);
+	copy_ptr += end_delim_len;
+
+	/* Terminator */
+	*copy_ptr = '\0';
+
+	ret = kutf_helper_userdata_enqueue(context, str_buf);
+
+	if (ret) {
+		errmsg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to send str value named '%s': send returned %d",
+				val_name, ret);
+		goto out_err;
+	}
+
+	kfree(str_buf);
+	return ret;
+
+out_err:
+	kutf_test_fail(context, errmsg);
+	kfree(str_buf);
+	return ret;
+}
+EXPORT_SYMBOL(kutf_helper_send_named_str);
+
+int kutf_helper_receive_named_val(
+		struct kutf_context *context,
+		struct kutf_helper_named_val *named_val)
+{
+	size_t recv_sz;
+	char *recv_str;
+	char *search_ptr;
+	char *name_str = NULL;
+	int name_len;
+	int strval_len;
+	enum kutf_helper_valtype type = KUTF_HELPER_VALTYPE_INVALID;
+	char *strval = NULL;
+	u64 u64val = 0;
+	int err = KUTF_HELPER_ERR_INVALID_VALUE;
+
+	recv_str = kutf_helper_input_dequeue(context, &recv_sz);
+	if (!recv_str)
+		return -EBUSY;
+	else if (IS_ERR(recv_str))
+		return PTR_ERR(recv_str);
+
+	/* Find the '=', grab the name and validate it */
+	search_ptr = strnchr(recv_str, recv_sz, NAMED_VALUE_SEP[0]);
+	if (search_ptr) {
+		name_len = search_ptr - recv_str;
+		if (!validate_val_name(recv_str, name_len)) {
+			/* no need to reallocate - just modify string in place */
+			name_str = recv_str;
+			name_str[name_len] = '\0';
+
+			/* Move until after the '=' */
+			recv_str += (name_len + 1);
+			recv_sz -= (name_len + 1);
+		}
+	}
+	if (!name_str) {
+		pr_err("Invalid name part for received string '%s'\n",
+				recv_str);
+		return KUTF_HELPER_ERR_INVALID_NAME;
+	}
+
+	/* detect value type */
+	if (*recv_str == NAMED_STR_START_DELIM[1]) {
+		/* string delimiter start*/
+		++recv_str;
+		--recv_sz;
+
+		/* Find end of string */
+		search_ptr = strnchr(recv_str, recv_sz, NAMED_STR_END_DELIM[0]);
+		if (search_ptr) {
+			strval_len = search_ptr - recv_str;
+			/* Validate the string to ensure it contains no quotes */
+			if (strval_len == find_quoted_string_valid_len(recv_str)) {
+				/* no need to reallocate - just modify string in place */
+				strval = recv_str;
+				strval[strval_len] = '\0';
+
+				/* Move until after the end delimiter */
+				recv_str += (strval_len + 1);
+				recv_sz -= (strval_len + 1);
+				type = KUTF_HELPER_VALTYPE_STR;
+			} else {
+				pr_err("String value contains invalid characters in rest of received string '%s'\n", recv_str);
+				err = KUTF_HELPER_ERR_CHARS_AFTER_VAL;
+			}
+		} else {
+			pr_err("End of string delimiter not found in rest of received string '%s'\n", recv_str);
+			err = KUTF_HELPER_ERR_NO_END_DELIMITER;
+		}
+	} else {
+		/* possibly a number value - strtoull will parse it */
+		err = kstrtoull(recv_str, 0, &u64val);
+		/* unlike userspace can't get an end ptr, but if kstrtoull()
+		 * reads characters after the number it'll report -EINVAL */
+		if (!err) {
+			int len_remain = strnlen(recv_str, recv_sz);
+
+			type = KUTF_HELPER_VALTYPE_U64;
+			recv_str += len_remain;
+			recv_sz -= len_remain;
+		} else {
+			/* special case: not a number, report as such */
+			pr_err("Rest of received string was not a numeric value or quoted string value: '%s'\n", recv_str);
+		}
+	}
+
+	if (type == KUTF_HELPER_VALTYPE_INVALID)
+		return err;
+
+	/* Any remaining characters - error */
+	if (strnlen(recv_str, recv_sz) != 0) {
+		pr_err("Characters remain after value of type %s: '%s'\n",
+				get_val_type_name(type), recv_str);
+		return KUTF_HELPER_ERR_CHARS_AFTER_VAL;
+	}
+
+	/* Success - write into the output structure */
+	switch (type) {
+	case KUTF_HELPER_VALTYPE_U64:
+		named_val->u.val_u64 = u64val;
+		break;
+	case KUTF_HELPER_VALTYPE_STR:
+		named_val->u.val_str = strval;
+		break;
+	default:
+		pr_err("Unreachable, fix kutf_helper_receive_named_val\n");
+		/* Coding error, report as though 'run' file failed */
+		return -EINVAL;
+	}
+
+	named_val->val_name = name_str;
+	named_val->type = type;
+
+	return KUTF_HELPER_ERR_NONE;
+}
+EXPORT_SYMBOL(kutf_helper_receive_named_val);
+
+#define DUMMY_MSG "<placeholder due to test fail>"
+int kutf_helper_receive_check_val(
+		struct kutf_helper_named_val *named_val,
+		struct kutf_context *context,
+		const char *expect_val_name,
+		enum kutf_helper_valtype expect_val_type)
+{
+	int err;
+
+	err = kutf_helper_receive_named_val(context, named_val);
+	if (err < 0) {
+		const char *msg = kutf_dsprintf(&context->fixture_pool,
+				"Failed to receive value named '%s'",
+				expect_val_name);
+		kutf_test_fail(context, msg);
+		return err;
+	} else if (err > 0) {
+		const char *msg = kutf_dsprintf(&context->fixture_pool,
+				"Named-value parse error when expecting value named '%s'",
+				expect_val_name);
+		kutf_test_fail(context, msg);
+		goto out_fail_and_fixup;
+	}
+
+	if (strcmp(named_val->val_name, expect_val_name) != 0) {
+		const char *msg = kutf_dsprintf(&context->fixture_pool,
+				"Expecting to receive value named '%s' but got '%s'",
+				expect_val_name, named_val->val_name);
+		kutf_test_fail(context, msg);
+		goto out_fail_and_fixup;
+	}
+
+
+	if (named_val->type != expect_val_type) {
+		const char *msg = kutf_dsprintf(&context->fixture_pool,
+				"Expecting value named '%s' to be of type %s but got %s",
+				expect_val_name, get_val_type_name(expect_val_type),
+				get_val_type_name(named_val->type));
+		kutf_test_fail(context, msg);
+		goto out_fail_and_fixup;
+	}
+
+	return err;
+
+out_fail_and_fixup:
+	/* Produce a valid but incorrect value */
+	switch (expect_val_type) {
+	case KUTF_HELPER_VALTYPE_U64:
+		named_val->u.val_u64 = 0ull;
+		break;
+	case KUTF_HELPER_VALTYPE_STR:
+		{
+			char *str = kutf_mempool_alloc(&context->fixture_pool, sizeof(DUMMY_MSG));
+
+			if (!str)
+				return -1;
+
+			strcpy(str, DUMMY_MSG);
+			named_val->u.val_str = str;
+			break;
+		}
+	default:
+		break;
+	}
+
+	/* Indicate that this is invalid */
+	named_val->type = KUTF_HELPER_VALTYPE_INVALID;
+
+	/* But at least allow the caller to continue in the test with failures */
+	return 0;
+}
+EXPORT_SYMBOL(kutf_helper_receive_check_val);
+
+void kutf_helper_output_named_val(struct kutf_helper_named_val *named_val)
+{
+	switch (named_val->type) {
+	case KUTF_HELPER_VALTYPE_U64:
+		pr_warn("%s=0x%llx\n", named_val->val_name, named_val->u.val_u64);
+		break;
+	case KUTF_HELPER_VALTYPE_STR:
+		pr_warn("%s=\"%s\"\n", named_val->val_name, named_val->u.val_str);
+		break;
+	case KUTF_HELPER_VALTYPE_INVALID:
+		pr_warn("%s is invalid\n", named_val->val_name);
+		break;
+	default:
+		pr_warn("%s has unknown type %d\n", named_val->val_name, named_val->type);
+		break;
+	}
+}
+EXPORT_SYMBOL(kutf_helper_output_named_val);
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_mem.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_mem.c
new file mode 100644
index 0000000..fd98bea
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_mem.c
@@ -0,0 +1,108 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF memory management functions */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <kutf/kutf_mem.h>
+
+
+/**
+ * struct kutf_alloc_entry - Structure representing an allocation.
+ * @node:	List node for use with kutf_mempool.
+ * @data:	Data area of the allocation
+ */
+struct kutf_alloc_entry {
+	struct list_head node;
+	u8 data[0];
+};
+
+int kutf_mempool_init(struct kutf_mempool *pool)
+{
+	if (!pool) {
+		pr_err("NULL pointer passed to %s\n", __func__);
+		return -1;
+	}
+
+	INIT_LIST_HEAD(&pool->head);
+	mutex_init(&pool->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(kutf_mempool_init);
+
+void kutf_mempool_destroy(struct kutf_mempool *pool)
+{
+	struct list_head *remove;
+	struct list_head *tmp;
+
+	if (!pool) {
+		pr_err("NULL pointer passed to %s\n", __func__);
+		return;
+	}
+
+	mutex_lock(&pool->lock);
+	list_for_each_safe(remove, tmp, &pool->head) {
+		struct kutf_alloc_entry *remove_alloc;
+
+		remove_alloc = list_entry(remove, struct kutf_alloc_entry, node);
+		list_del(&remove_alloc->node);
+		kfree(remove_alloc);
+	}
+	mutex_unlock(&pool->lock);
+
+}
+EXPORT_SYMBOL(kutf_mempool_destroy);
+
+void *kutf_mempool_alloc(struct kutf_mempool *pool, size_t size)
+{
+	struct kutf_alloc_entry *ret;
+
+	if (!pool) {
+		pr_err("NULL pointer passed to %s\n", __func__);
+		goto fail_pool;
+	}
+
+	mutex_lock(&pool->lock);
+
+	ret = kmalloc(sizeof(*ret) + size, GFP_KERNEL);
+	if (!ret) {
+		pr_err("Failed to allocate memory\n");
+		goto fail_alloc;
+	}
+
+	INIT_LIST_HEAD(&ret->node);
+	list_add(&ret->node, &pool->head);
+
+	mutex_unlock(&pool->lock);
+
+	return &ret->data[0];
+
+fail_alloc:
+	mutex_unlock(&pool->lock);
+fail_pool:
+	return NULL;
+}
+EXPORT_SYMBOL(kutf_mempool_alloc);
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_resultset.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_resultset.c
new file mode 100644
index 0000000..94ecfa4
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_resultset.c
@@ -0,0 +1,164 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF result management functions */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+
+#include <kutf/kutf_suite.h>
+#include <kutf/kutf_resultset.h>
+
+/* Lock to protect all result structures */
+static DEFINE_SPINLOCK(kutf_result_lock);
+
+struct kutf_result_set *kutf_create_result_set(void)
+{
+	struct kutf_result_set *set;
+
+	set = kmalloc(sizeof(*set), GFP_KERNEL);
+	if (!set) {
+		pr_err("Failed to allocate resultset");
+		goto fail_alloc;
+	}
+
+	INIT_LIST_HEAD(&set->results);
+	init_waitqueue_head(&set->waitq);
+	set->flags = 0;
+
+	return set;
+
+fail_alloc:
+	return NULL;
+}
+
+int kutf_add_result(struct kutf_context *context,
+		enum kutf_result_status status,
+		const char *message)
+{
+	struct kutf_mempool *mempool = &context->fixture_pool;
+	struct kutf_result_set *set = context->result_set;
+	/* Create the new result */
+	struct kutf_result *new_result;
+
+	BUG_ON(set == NULL);
+
+	new_result = kutf_mempool_alloc(mempool, sizeof(*new_result));
+	if (!new_result) {
+		pr_err("Result allocation failed\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&new_result->node);
+	new_result->status = status;
+	new_result->message = message;
+
+	spin_lock(&kutf_result_lock);
+
+	list_add_tail(&new_result->node, &set->results);
+
+	spin_unlock(&kutf_result_lock);
+
+	wake_up(&set->waitq);
+
+	return 0;
+}
+
+void kutf_destroy_result_set(struct kutf_result_set *set)
+{
+	if (!list_empty(&set->results))
+		pr_err("kutf_destroy_result_set: Unread results from test\n");
+
+	kfree(set);
+}
+
+static bool kutf_has_result(struct kutf_result_set *set)
+{
+	bool has_result;
+
+	spin_lock(&kutf_result_lock);
+	if (set->flags & KUTF_RESULT_SET_WAITING_FOR_INPUT)
+		/* Pretend there are results if waiting for input */
+		has_result = true;
+	else
+		has_result = !list_empty(&set->results);
+	spin_unlock(&kutf_result_lock);
+
+	return has_result;
+}
+
+struct kutf_result *kutf_remove_result(struct kutf_result_set *set)
+{
+	struct kutf_result *result = NULL;
+	int ret;
+
+	do {
+		ret = wait_event_interruptible(set->waitq,
+				kutf_has_result(set));
+
+		if (ret)
+			return ERR_PTR(ret);
+
+		spin_lock(&kutf_result_lock);
+
+		if (!list_empty(&set->results)) {
+			result = list_first_entry(&set->results,
+					struct kutf_result,
+					node);
+			list_del(&result->node);
+		} else if (set->flags & KUTF_RESULT_SET_WAITING_FOR_INPUT) {
+			/* Return a fake result */
+			static struct kutf_result waiting = {
+				.status = KUTF_RESULT_USERDATA_WAIT
+			};
+			result = &waiting;
+		}
+		/* If result == NULL then there was a race with the event
+		 * being removed between the check in kutf_has_result and
+		 * the lock being obtained. In this case we retry
+		 */
+
+		spin_unlock(&kutf_result_lock);
+	} while (result == NULL);
+
+	return result;
+}
+
+void kutf_set_waiting_for_input(struct kutf_result_set *set)
+{
+	spin_lock(&kutf_result_lock);
+	set->flags |= KUTF_RESULT_SET_WAITING_FOR_INPUT;
+	spin_unlock(&kutf_result_lock);
+
+	wake_up(&set->waitq);
+}
+
+void kutf_clear_waiting_for_input(struct kutf_result_set *set)
+{
+	spin_lock(&kutf_result_lock);
+	set->flags &= ~KUTF_RESULT_SET_WAITING_FOR_INPUT;
+	spin_unlock(&kutf_result_lock);
+}
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_suite.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_suite.c
new file mode 100644
index 0000000..f3a8e9b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_suite.c
@@ -0,0 +1,1203 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF suite, test and fixture management including user to kernel
+ * interaction */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/atomic.h>
+#include <linux/sched.h>
+
+#include <generated/autoconf.h>
+
+#include <kutf/kutf_suite.h>
+#include <kutf/kutf_resultset.h>
+#include <kutf/kutf_utils.h>
+#include <kutf/kutf_helpers.h>
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * struct kutf_application - Structure which represents kutf application
+ * @name:	The name of this test application.
+ * @dir:	The debugfs directory for this test
+ * @suite_list:	List head to store all the suites which are part of this
+ *              application
+ */
+struct kutf_application {
+	const char         *name;
+	struct dentry      *dir;
+	struct list_head   suite_list;
+};
+
+/**
+ * struct kutf_test_function - Structure which represents kutf test function
+ * @suite:		Back reference to the suite this test function
+ *                      belongs to
+ * @filters:		Filters that apply to this test function
+ * @test_id:		Test ID
+ * @execute:		Function to run for this test
+ * @test_data:		Static data for this test
+ * @node:		List node for test_list
+ * @variant_list:	List head to store all the variants which can run on
+ *                      this function
+ * @dir:		debugfs directory for this test function
+ */
+struct kutf_test_function {
+	struct kutf_suite  *suite;
+	unsigned int       filters;
+	unsigned int       test_id;
+	void (*execute)(struct kutf_context *context);
+	union kutf_callback_data test_data;
+	struct list_head   node;
+	struct list_head   variant_list;
+	struct dentry      *dir;
+};
+
+/**
+ * struct kutf_test_fixture - Structure which holds information on the kutf
+ *                            test fixture
+ * @test_func:		Test function this fixture belongs to
+ * @fixture_index:	Index of this fixture
+ * @node:		List node for variant_list
+ * @dir:		debugfs directory for this test fixture
+ */
+struct kutf_test_fixture {
+	struct kutf_test_function *test_func;
+	unsigned int              fixture_index;
+	struct list_head          node;
+	struct dentry             *dir;
+};
+
+static struct dentry *base_dir;
+static struct workqueue_struct *kutf_workq;
+
+/**
+ * struct kutf_convert_table - Structure which keeps test results
+ * @result_name:	Status of the test result
+ * @result:		Status value for a single test
+ */
+struct kutf_convert_table {
+	char                    result_name[50];
+	enum kutf_result_status result;
+};
+
+struct kutf_convert_table kutf_convert[] = {
+#define ADD_UTF_RESULT(_name) \
+{ \
+	#_name, \
+	_name, \
+},
+ADD_UTF_RESULT(KUTF_RESULT_BENCHMARK)
+ADD_UTF_RESULT(KUTF_RESULT_SKIP)
+ADD_UTF_RESULT(KUTF_RESULT_UNKNOWN)
+ADD_UTF_RESULT(KUTF_RESULT_PASS)
+ADD_UTF_RESULT(KUTF_RESULT_DEBUG)
+ADD_UTF_RESULT(KUTF_RESULT_INFO)
+ADD_UTF_RESULT(KUTF_RESULT_WARN)
+ADD_UTF_RESULT(KUTF_RESULT_FAIL)
+ADD_UTF_RESULT(KUTF_RESULT_FATAL)
+ADD_UTF_RESULT(KUTF_RESULT_ABORT)
+};
+
+#define UTF_CONVERT_SIZE (ARRAY_SIZE(kutf_convert))
+
+/**
+ * kutf_create_context() - Create a test context in which a specific fixture
+ *                         of an application will be run and its results
+ *                         reported back to the user
+ * @test_fix:	Test fixture to be run.
+ *
+ * The context's refcount will be initialized to 1.
+ *
+ * Return: Returns the created test context on success or NULL on failure
+ */
+static struct kutf_context *kutf_create_context(
+		struct kutf_test_fixture *test_fix);
+
+/**
+ * kutf_destroy_context() - Destroy a previously created test context, only
+ *                          once its refcount has become zero
+ * @kref:	pointer to kref member within the context
+ *
+ * This should only be used via a kref_put() call on the context's kref member
+ */
+static void kutf_destroy_context(struct kref *kref);
+
+/**
+ * kutf_context_get() - increment refcount on a context
+ * @context:	the kutf context
+ *
+ * This must be used when the lifetime of the context might exceed that of the
+ * thread creating @context
+ */
+static void kutf_context_get(struct kutf_context *context);
+
+/**
+ * kutf_context_put() - decrement refcount on a context, destroying it when it
+ *                      reached zero
+ * @context:	the kutf context
+ *
+ * This must be used only after a corresponding kutf_context_get() call on
+ * @context, and the caller no longer needs access to @context.
+ */
+static void kutf_context_put(struct kutf_context *context);
+
+/**
+ * kutf_set_result() - Set the test result against the specified test context
+ * @context:	Test context
+ * @status:	Result status
+ */
+static void kutf_set_result(struct kutf_context *context,
+		enum kutf_result_status status);
+
+/**
+ * kutf_set_expected_result() - Set the expected test result for the specified
+ *                              test context
+ * @context:		Test context
+ * @expected_status:	Expected result status
+ */
+static void kutf_set_expected_result(struct kutf_context *context,
+		enum kutf_result_status expected_status);
+
+/**
+ * kutf_result_to_string() - Converts a KUTF result into a string
+ * @result_str:      Output result string
+ * @result:          Result status to convert
+ *
+ * Return: 1 if test result was successfully converted to string, 0 otherwise
+ */
+static int kutf_result_to_string(char **result_str,
+		enum kutf_result_status result)
+{
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < UTF_CONVERT_SIZE; i++) {
+		if (result == kutf_convert[i].result) {
+			*result_str = kutf_convert[i].result_name;
+			ret = 1;
+		}
+	}
+	return ret;
+}
+
+/**
+ * kutf_debugfs_const_string_read() - Simple debugfs read callback which
+ *                                    returns a constant string
+ * @file:	Opened file to read from
+ * @buf:	User buffer to write the data into
+ * @len:	Amount of data to read
+ * @ppos:	Offset into file to read from
+ *
+ * Return: On success, the number of bytes read and offset @ppos advanced by
+ *         this number; on error, negative value
+ */
+static ssize_t kutf_debugfs_const_string_read(struct file *file,
+		char __user *buf, size_t len, loff_t *ppos)
+{
+	char *str = file->private_data;
+
+	return simple_read_from_buffer(buf, len, ppos, str, strlen(str));
+}
+
+static const struct file_operations kutf_debugfs_const_string_ops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = kutf_debugfs_const_string_read,
+	.llseek  = default_llseek,
+};
+
+/**
+ * kutf_add_explicit_result() - Check if an explicit result needs to be added
+ * @context:	KUTF test context
+ */
+static void kutf_add_explicit_result(struct kutf_context *context)
+{
+	switch (context->expected_status) {
+	case KUTF_RESULT_UNKNOWN:
+		break;
+
+	case KUTF_RESULT_WARN:
+		if (context->status == KUTF_RESULT_WARN)
+			kutf_test_pass(context,
+					"Pass (expected warn occurred)");
+		else if (context->status != KUTF_RESULT_SKIP)
+			kutf_test_fail(context,
+					"Fail (expected warn missing)");
+		break;
+
+	case KUTF_RESULT_FAIL:
+		if (context->status == KUTF_RESULT_FAIL)
+			kutf_test_pass(context,
+					"Pass (expected fail occurred)");
+		else if (context->status != KUTF_RESULT_SKIP) {
+			/* Force the expected status so the fail gets logged */
+			context->expected_status = KUTF_RESULT_PASS;
+			kutf_test_fail(context,
+					"Fail (expected fail missing)");
+		}
+		break;
+
+	case KUTF_RESULT_FATAL:
+		if (context->status == KUTF_RESULT_FATAL)
+			kutf_test_pass(context,
+					"Pass (expected fatal occurred)");
+		else if (context->status != KUTF_RESULT_SKIP)
+			kutf_test_fail(context,
+					"Fail (expected fatal missing)");
+		break;
+
+	case KUTF_RESULT_ABORT:
+		if (context->status == KUTF_RESULT_ABORT)
+			kutf_test_pass(context,
+					"Pass (expected abort occurred)");
+		else if (context->status != KUTF_RESULT_SKIP)
+			kutf_test_fail(context,
+					"Fail (expected abort missing)");
+		break;
+	default:
+		break;
+	}
+}
+
+static void kutf_run_test(struct work_struct *data)
+{
+	struct kutf_context *test_context = container_of(data,
+			struct kutf_context, work);
+	struct kutf_suite *suite = test_context->suite;
+	struct kutf_test_function *test_func;
+
+	test_func = test_context->test_fix->test_func;
+
+	/*
+	 * Call the create fixture function if required before the
+	 * fixture is run
+	 */
+	if (suite->create_fixture)
+		test_context->fixture = suite->create_fixture(test_context);
+
+	/* Only run the test if the fixture was created (if required) */
+	if ((suite->create_fixture && test_context->fixture) ||
+			(!suite->create_fixture)) {
+		/* Run this fixture */
+		test_func->execute(test_context);
+
+		if (suite->remove_fixture)
+			suite->remove_fixture(test_context);
+
+		kutf_add_explicit_result(test_context);
+	}
+
+	kutf_add_result(test_context, KUTF_RESULT_TEST_FINISHED, NULL);
+
+	kutf_context_put(test_context);
+}
+
+/**
+ * kutf_debugfs_run_open() Debugfs open callback for the "run" entry.
+ * @inode:	inode of the opened file
+ * @file:	Opened file to read from
+ *
+ * This function creates a KUTF context and queues it onto a workqueue to be
+ * run asynchronously. The resulting file descriptor can be used to communicate
+ * userdata to the test and to read back the results of the test execution.
+ *
+ * Return: 0 on success
+ */
+static int kutf_debugfs_run_open(struct inode *inode, struct file *file)
+{
+	struct kutf_test_fixture *test_fix = inode->i_private;
+	struct kutf_context *test_context;
+	int err = 0;
+
+	test_context = kutf_create_context(test_fix);
+	if (!test_context) {
+		err = -ENOMEM;
+		goto finish;
+	}
+
+	file->private_data = test_context;
+
+	/* This reference is release by the kutf_run_test */
+	kutf_context_get(test_context);
+
+	queue_work(kutf_workq, &test_context->work);
+
+finish:
+	return err;
+}
+
+#define USERDATA_WARNING_MESSAGE "WARNING: This test requires userdata\n"
+
+/**
+ * kutf_debugfs_run_read() - Debugfs read callback for the "run" entry.
+ * @file:	Opened file to read from
+ * @buf:	User buffer to write the data into
+ * @len:	Amount of data to read
+ * @ppos:	Offset into file to read from
+ *
+ * This function emits the results of the test, blocking until they are
+ * available.
+ *
+ * If the test involves user data then this will also return user data records
+ * to user space. If the test is waiting for user data then this function will
+ * output a message (to make the likes of 'cat' display it), followed by
+ * returning 0 to mark the end of file.
+ *
+ * Results will be emitted one at a time, once all the results have been read
+ * 0 will be returned to indicate there is no more data.
+ *
+ * Return: Number of bytes read.
+ */
+static ssize_t kutf_debugfs_run_read(struct file *file, char __user *buf,
+		size_t len, loff_t *ppos)
+{
+	struct kutf_context *test_context = file->private_data;
+	struct kutf_result *res;
+	unsigned long bytes_not_copied;
+	ssize_t bytes_copied = 0;
+	char *kutf_str_ptr = NULL;
+	size_t kutf_str_len = 0;
+	size_t message_len = 0;
+	char separator = ':';
+	char terminator = '\n';
+
+	res = kutf_remove_result(test_context->result_set);
+
+	if (IS_ERR(res))
+		return PTR_ERR(res);
+
+	/*
+	 * Handle 'fake' results - these results are converted to another
+	 * form before being returned from the kernel
+	 */
+	switch (res->status) {
+	case KUTF_RESULT_TEST_FINISHED:
+		return 0;
+	case KUTF_RESULT_USERDATA_WAIT:
+		if (test_context->userdata.flags &
+				KUTF_USERDATA_WARNING_OUTPUT) {
+			/*
+			 * Warning message already output,
+			 * signal end-of-file
+			 */
+			return 0;
+		}
+
+		message_len = sizeof(USERDATA_WARNING_MESSAGE)-1;
+		if (message_len > len)
+			message_len = len;
+
+		bytes_not_copied = copy_to_user(buf,
+				USERDATA_WARNING_MESSAGE,
+				message_len);
+		if (bytes_not_copied != 0)
+			return -EFAULT;
+		test_context->userdata.flags |= KUTF_USERDATA_WARNING_OUTPUT;
+		return message_len;
+	case KUTF_RESULT_USERDATA:
+		message_len = strlen(res->message);
+		if (message_len > len-1) {
+			message_len = len-1;
+			pr_warn("User data truncated, read not long enough\n");
+		}
+		bytes_not_copied = copy_to_user(buf, res->message,
+				message_len);
+		if (bytes_not_copied != 0) {
+			pr_warn("Failed to copy data to user space buffer\n");
+			return -EFAULT;
+		}
+		/* Finally the terminator */
+		bytes_not_copied = copy_to_user(&buf[message_len],
+				&terminator, 1);
+		if (bytes_not_copied != 0) {
+			pr_warn("Failed to copy data to user space buffer\n");
+			return -EFAULT;
+		}
+		return message_len+1;
+	default:
+		/* Fall through - this is a test result */
+		break;
+	}
+
+	/* Note: This code assumes a result is read completely */
+	kutf_result_to_string(&kutf_str_ptr, res->status);
+	if (kutf_str_ptr)
+		kutf_str_len = strlen(kutf_str_ptr);
+
+	if (res->message)
+		message_len = strlen(res->message);
+
+	if ((kutf_str_len + 1 + message_len + 1) > len) {
+		pr_err("Not enough space in user buffer for a single result");
+		return 0;
+	}
+
+	/* First copy the result string */
+	if (kutf_str_ptr) {
+		bytes_not_copied = copy_to_user(&buf[0], kutf_str_ptr,
+						kutf_str_len);
+		bytes_copied += kutf_str_len - bytes_not_copied;
+		if (bytes_not_copied)
+			goto exit;
+	}
+
+	/* Then the separator */
+	bytes_not_copied = copy_to_user(&buf[bytes_copied],
+					&separator, 1);
+	bytes_copied += 1 - bytes_not_copied;
+	if (bytes_not_copied)
+		goto exit;
+
+	/* Finally Next copy the result string */
+	if (res->message) {
+		bytes_not_copied = copy_to_user(&buf[bytes_copied],
+						res->message, message_len);
+		bytes_copied += message_len - bytes_not_copied;
+		if (bytes_not_copied)
+			goto exit;
+	}
+
+	/* Finally the terminator */
+	bytes_not_copied = copy_to_user(&buf[bytes_copied],
+					&terminator, 1);
+	bytes_copied += 1 - bytes_not_copied;
+
+exit:
+	return bytes_copied;
+}
+
+/**
+ * kutf_debugfs_run_write() Debugfs write callback for the "run" entry.
+ * @file:	Opened file to write to
+ * @buf:	User buffer to read the data from
+ * @len:	Amount of data to write
+ * @ppos:	Offset into file to write to
+ *
+ * This function allows user and kernel to exchange extra data necessary for
+ * the test fixture.
+ *
+ * The data is added to the first struct kutf_context running the fixture
+ *
+ * Return: Number of bytes written
+ */
+static ssize_t kutf_debugfs_run_write(struct file *file,
+		const char __user *buf, size_t len, loff_t *ppos)
+{
+	int ret = 0;
+	struct kutf_context *test_context = file->private_data;
+
+	if (len > KUTF_MAX_LINE_LENGTH)
+		return -EINVAL;
+
+	ret = kutf_helper_input_enqueue(test_context, buf, len);
+	if (ret < 0)
+		return ret;
+
+	return len;
+}
+
+/**
+ * kutf_debugfs_run_release() - Debugfs release callback for the "run" entry.
+ * @inode:	File entry representation
+ * @file:	A specific opening of the file
+ *
+ * Release any resources that were created during the opening of the file
+ *
+ * Note that resources may not be released immediately, that might only happen
+ * later when other users of the kutf_context release their refcount.
+ *
+ * Return: 0 on success
+ */
+static int kutf_debugfs_run_release(struct inode *inode, struct file *file)
+{
+	struct kutf_context *test_context = file->private_data;
+
+	kutf_helper_input_enqueue_end_of_data(test_context);
+
+	kutf_context_put(test_context);
+	return 0;
+}
+
+static const struct file_operations kutf_debugfs_run_ops = {
+	.owner = THIS_MODULE,
+	.open = kutf_debugfs_run_open,
+	.read = kutf_debugfs_run_read,
+	.write = kutf_debugfs_run_write,
+	.release = kutf_debugfs_run_release,
+	.llseek  = default_llseek,
+};
+
+/**
+ * create_fixture_variant() - Creates a fixture variant for the specified
+ *                            test function and index and the debugfs entries
+ *                            that represent it.
+ * @test_func:		Test function
+ * @fixture_index:	Fixture index
+ *
+ * Return: 0 on success, negative value corresponding to error code in failure
+ */
+static int create_fixture_variant(struct kutf_test_function *test_func,
+		unsigned int fixture_index)
+{
+	struct kutf_test_fixture *test_fix;
+	char name[11];	/* Enough to print the MAX_UINT32 + the null terminator */
+	struct dentry *tmp;
+	int err;
+
+	test_fix = kmalloc(sizeof(*test_fix), GFP_KERNEL);
+	if (!test_fix) {
+		pr_err("Failed to create debugfs directory when adding fixture\n");
+		err = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	test_fix->test_func = test_func;
+	test_fix->fixture_index = fixture_index;
+
+	snprintf(name, sizeof(name), "%d", fixture_index);
+	test_fix->dir = debugfs_create_dir(name, test_func->dir);
+	if (!test_func->dir) {
+		pr_err("Failed to create debugfs directory when adding fixture\n");
+		/* Might not be the right error, we don't get it passed back to us */
+		err = -EEXIST;
+		goto fail_dir;
+	}
+
+	tmp = debugfs_create_file("type", S_IROTH, test_fix->dir, "fixture\n",
+				  &kutf_debugfs_const_string_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"type\" when adding fixture\n");
+		/* Might not be the right error, we don't get it passed back to us */
+		err = -EEXIST;
+		goto fail_file;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+	tmp = debugfs_create_file_unsafe(
+#else
+	tmp = debugfs_create_file(
+#endif
+			"run", 0600, test_fix->dir,
+			test_fix,
+			&kutf_debugfs_run_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"run\" when adding fixture\n");
+		/* Might not be the right error, we don't get it passed back to us */
+		err = -EEXIST;
+		goto fail_file;
+	}
+
+	list_add(&test_fix->node, &test_func->variant_list);
+	return 0;
+
+fail_file:
+	debugfs_remove_recursive(test_fix->dir);
+fail_dir:
+	kfree(test_fix);
+fail_alloc:
+	return err;
+}
+
+/**
+ * kutf_remove_test_variant() - Destroy a previously created fixture variant.
+ * @test_fix:	Test fixture
+ */
+static void kutf_remove_test_variant(struct kutf_test_fixture *test_fix)
+{
+	debugfs_remove_recursive(test_fix->dir);
+	kfree(test_fix);
+}
+
+void kutf_add_test_with_filters_and_data(
+		struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context),
+		unsigned int filters,
+		union kutf_callback_data test_data)
+{
+	struct kutf_test_function *test_func;
+	struct dentry *tmp;
+	unsigned int i;
+
+	test_func = kmalloc(sizeof(*test_func), GFP_KERNEL);
+	if (!test_func) {
+		pr_err("Failed to allocate memory when adding test %s\n", name);
+		goto fail_alloc;
+	}
+
+	INIT_LIST_HEAD(&test_func->variant_list);
+
+	test_func->dir = debugfs_create_dir(name, suite->dir);
+	if (!test_func->dir) {
+		pr_err("Failed to create debugfs directory when adding test %s\n", name);
+		goto fail_dir;
+	}
+
+	tmp = debugfs_create_file("type", S_IROTH, test_func->dir, "test\n",
+				  &kutf_debugfs_const_string_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"type\" when adding test %s\n", name);
+		goto fail_file;
+	}
+
+	test_func->filters = filters;
+	tmp = debugfs_create_x32("filters", S_IROTH, test_func->dir,
+				 &test_func->filters);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"filters\" when adding test %s\n", name);
+		goto fail_file;
+	}
+
+	test_func->test_id = id;
+	tmp = debugfs_create_u32("test_id", S_IROTH, test_func->dir,
+				 &test_func->test_id);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"test_id\" when adding test %s\n", name);
+		goto fail_file;
+	}
+
+	for (i = 0; i < suite->fixture_variants; i++) {
+		if (create_fixture_variant(test_func, i)) {
+			pr_err("Failed to create fixture %d when adding test %s\n", i, name);
+			goto fail_file;
+		}
+	}
+
+	test_func->suite = suite;
+	test_func->execute = execute;
+	test_func->test_data = test_data;
+
+	list_add(&test_func->node, &suite->test_list);
+	return;
+
+fail_file:
+	debugfs_remove_recursive(test_func->dir);
+fail_dir:
+	kfree(test_func);
+fail_alloc:
+	return;
+}
+EXPORT_SYMBOL(kutf_add_test_with_filters_and_data);
+
+void kutf_add_test_with_filters(
+		struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context),
+		unsigned int filters)
+{
+	union kutf_callback_data data;
+
+	data.ptr_value = NULL;
+
+	kutf_add_test_with_filters_and_data(suite,
+					    id,
+					    name,
+					    execute,
+					    suite->suite_default_flags,
+					    data);
+}
+EXPORT_SYMBOL(kutf_add_test_with_filters);
+
+void kutf_add_test(struct kutf_suite *suite,
+		unsigned int id,
+		const char *name,
+		void (*execute)(struct kutf_context *context))
+{
+	union kutf_callback_data data;
+
+	data.ptr_value = NULL;
+
+	kutf_add_test_with_filters_and_data(suite,
+					    id,
+					    name,
+					    execute,
+					    suite->suite_default_flags,
+					    data);
+}
+EXPORT_SYMBOL(kutf_add_test);
+
+/**
+ * kutf_remove_test(): Remove a previously added test function.
+ * @test_func: Test function
+ */
+static void kutf_remove_test(struct kutf_test_function *test_func)
+{
+	struct list_head *pos;
+	struct list_head *tmp;
+
+	list_for_each_safe(pos, tmp, &test_func->variant_list) {
+		struct kutf_test_fixture *test_fix;
+
+		test_fix = list_entry(pos, struct kutf_test_fixture, node);
+		kutf_remove_test_variant(test_fix);
+	}
+
+	list_del(&test_func->node);
+	debugfs_remove_recursive(test_func->dir);
+	kfree(test_func);
+}
+
+struct kutf_suite *kutf_create_suite_with_filters_and_data(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context),
+		unsigned int filters,
+		union kutf_callback_data suite_data)
+{
+	struct kutf_suite *suite;
+	struct dentry *tmp;
+
+	suite = kmalloc(sizeof(*suite), GFP_KERNEL);
+	if (!suite) {
+		pr_err("Failed to allocate memory when creating suite %s\n", name);
+		goto fail_kmalloc;
+	}
+
+	suite->dir = debugfs_create_dir(name, app->dir);
+	if (!suite->dir) {
+		pr_err("Failed to create debugfs directory when adding test %s\n", name);
+		goto fail_debugfs;
+	}
+
+	tmp = debugfs_create_file("type", S_IROTH, suite->dir, "suite\n",
+				  &kutf_debugfs_const_string_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"type\" when adding test %s\n", name);
+		goto fail_file;
+	}
+
+	INIT_LIST_HEAD(&suite->test_list);
+	suite->app = app;
+	suite->name = name;
+	suite->fixture_variants = fixture_count;
+	suite->create_fixture = create_fixture;
+	suite->remove_fixture = remove_fixture;
+	suite->suite_default_flags = filters;
+	suite->suite_data = suite_data;
+
+	list_add(&suite->node, &app->suite_list);
+
+	return suite;
+
+fail_file:
+	debugfs_remove_recursive(suite->dir);
+fail_debugfs:
+	kfree(suite);
+fail_kmalloc:
+	return NULL;
+}
+EXPORT_SYMBOL(kutf_create_suite_with_filters_and_data);
+
+struct kutf_suite *kutf_create_suite_with_filters(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context),
+		unsigned int filters)
+{
+	union kutf_callback_data data;
+
+	data.ptr_value = NULL;
+	return kutf_create_suite_with_filters_and_data(app,
+						       name,
+						       fixture_count,
+						       create_fixture,
+						       remove_fixture,
+						       filters,
+						       data);
+}
+EXPORT_SYMBOL(kutf_create_suite_with_filters);
+
+struct kutf_suite *kutf_create_suite(
+		struct kutf_application *app,
+		const char *name,
+		unsigned int fixture_count,
+		void *(*create_fixture)(struct kutf_context *context),
+		void (*remove_fixture)(struct kutf_context *context))
+{
+	union kutf_callback_data data;
+
+	data.ptr_value = NULL;
+	return kutf_create_suite_with_filters_and_data(app,
+						       name,
+						       fixture_count,
+						       create_fixture,
+						       remove_fixture,
+						       KUTF_F_TEST_GENERIC,
+						       data);
+}
+EXPORT_SYMBOL(kutf_create_suite);
+
+/**
+ * kutf_destroy_suite() - Destroy a previously added test suite.
+ * @suite:	Test suite
+ */
+static void kutf_destroy_suite(struct kutf_suite *suite)
+{
+	struct list_head *pos;
+	struct list_head *tmp;
+
+	list_for_each_safe(pos, tmp, &suite->test_list) {
+		struct kutf_test_function *test_func;
+
+		test_func = list_entry(pos, struct kutf_test_function, node);
+		kutf_remove_test(test_func);
+	}
+
+	list_del(&suite->node);
+	debugfs_remove_recursive(suite->dir);
+	kfree(suite);
+}
+
+struct kutf_application *kutf_create_application(const char *name)
+{
+	struct kutf_application *app;
+	struct dentry *tmp;
+
+	app = kmalloc(sizeof(*app), GFP_KERNEL);
+	if (!app) {
+		pr_err("Failed to create allocate memory when creating application %s\n", name);
+		goto fail_kmalloc;
+	}
+
+	app->dir = debugfs_create_dir(name, base_dir);
+	if (!app->dir) {
+		pr_err("Failed to create debugfs direcotry when creating application %s\n", name);
+		goto fail_debugfs;
+	}
+
+	tmp = debugfs_create_file("type", S_IROTH, app->dir, "application\n",
+				  &kutf_debugfs_const_string_ops);
+	if (!tmp) {
+		pr_err("Failed to create debugfs file \"type\" when creating application %s\n", name);
+		goto fail_file;
+	}
+
+	INIT_LIST_HEAD(&app->suite_list);
+	app->name = name;
+
+	return app;
+
+fail_file:
+	debugfs_remove_recursive(app->dir);
+fail_debugfs:
+	kfree(app);
+fail_kmalloc:
+	return NULL;
+}
+EXPORT_SYMBOL(kutf_create_application);
+
+void kutf_destroy_application(struct kutf_application *app)
+{
+	struct list_head *pos;
+	struct list_head *tmp;
+
+	list_for_each_safe(pos, tmp, &app->suite_list) {
+		struct kutf_suite *suite;
+
+		suite = list_entry(pos, struct kutf_suite, node);
+		kutf_destroy_suite(suite);
+	}
+
+	debugfs_remove_recursive(app->dir);
+	kfree(app);
+}
+EXPORT_SYMBOL(kutf_destroy_application);
+
+static struct kutf_context *kutf_create_context(
+		struct kutf_test_fixture *test_fix)
+{
+	struct kutf_context *new_context;
+
+	new_context = kmalloc(sizeof(*new_context), GFP_KERNEL);
+	if (!new_context) {
+		pr_err("Failed to allocate test context");
+		goto fail_alloc;
+	}
+
+	new_context->result_set = kutf_create_result_set();
+	if (!new_context->result_set) {
+		pr_err("Failed to create result set");
+		goto fail_result_set;
+	}
+
+	new_context->test_fix = test_fix;
+	/* Save the pointer to the suite as the callbacks will require it */
+	new_context->suite = test_fix->test_func->suite;
+	new_context->status = KUTF_RESULT_UNKNOWN;
+	new_context->expected_status = KUTF_RESULT_UNKNOWN;
+
+	kutf_mempool_init(&new_context->fixture_pool);
+	new_context->fixture = NULL;
+	new_context->fixture_index = test_fix->fixture_index;
+	new_context->fixture_name = NULL;
+	new_context->test_data = test_fix->test_func->test_data;
+
+	new_context->userdata.flags = 0;
+	INIT_LIST_HEAD(&new_context->userdata.input_head);
+	init_waitqueue_head(&new_context->userdata.input_waitq);
+
+	INIT_WORK(&new_context->work, kutf_run_test);
+
+	kref_init(&new_context->kref);
+
+	return new_context;
+
+fail_result_set:
+	kfree(new_context);
+fail_alloc:
+	return NULL;
+}
+
+static void kutf_destroy_context(struct kref *kref)
+{
+	struct kutf_context *context;
+
+	context = container_of(kref, struct kutf_context, kref);
+	kutf_destroy_result_set(context->result_set);
+	kutf_mempool_destroy(&context->fixture_pool);
+	kfree(context);
+}
+
+static void kutf_context_get(struct kutf_context *context)
+{
+	kref_get(&context->kref);
+}
+
+static void kutf_context_put(struct kutf_context *context)
+{
+	kref_put(&context->kref, kutf_destroy_context);
+}
+
+
+static void kutf_set_result(struct kutf_context *context,
+		enum kutf_result_status status)
+{
+	context->status = status;
+}
+
+static void kutf_set_expected_result(struct kutf_context *context,
+		enum kutf_result_status expected_status)
+{
+	context->expected_status = expected_status;
+}
+
+/**
+ * kutf_test_log_result() - Log a result for the specified test context
+ * @context:	Test context
+ * @message:	Result string
+ * @new_status:	Result status
+ */
+static void kutf_test_log_result(
+	struct kutf_context *context,
+	const char *message,
+	enum kutf_result_status new_status)
+{
+	if (context->status < new_status)
+		context->status = new_status;
+
+	if (context->expected_status != new_status)
+		kutf_add_result(context, new_status, message);
+}
+
+void kutf_test_log_result_external(
+	struct kutf_context *context,
+	const char *message,
+	enum kutf_result_status new_status)
+{
+	kutf_test_log_result(context, message, new_status);
+}
+EXPORT_SYMBOL(kutf_test_log_result_external);
+
+void kutf_test_expect_abort(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_ABORT);
+}
+EXPORT_SYMBOL(kutf_test_expect_abort);
+
+void kutf_test_expect_fatal(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_FATAL);
+}
+EXPORT_SYMBOL(kutf_test_expect_fatal);
+
+void kutf_test_expect_fail(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_FAIL);
+}
+EXPORT_SYMBOL(kutf_test_expect_fail);
+
+void kutf_test_expect_warn(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_WARN);
+}
+EXPORT_SYMBOL(kutf_test_expect_warn);
+
+void kutf_test_expect_pass(struct kutf_context *context)
+{
+	kutf_set_expected_result(context, KUTF_RESULT_PASS);
+}
+EXPORT_SYMBOL(kutf_test_expect_pass);
+
+void kutf_test_skip(struct kutf_context *context)
+{
+	kutf_set_result(context, KUTF_RESULT_SKIP);
+	kutf_set_expected_result(context, KUTF_RESULT_UNKNOWN);
+
+	kutf_test_log_result(context, "Test skipped", KUTF_RESULT_SKIP);
+}
+EXPORT_SYMBOL(kutf_test_skip);
+
+void kutf_test_skip_msg(struct kutf_context *context, const char *message)
+{
+	kutf_set_result(context, KUTF_RESULT_SKIP);
+	kutf_set_expected_result(context, KUTF_RESULT_UNKNOWN);
+
+	kutf_test_log_result(context, kutf_dsprintf(&context->fixture_pool,
+			     "Test skipped: %s", message), KUTF_RESULT_SKIP);
+	kutf_test_log_result(context, "!!!Test skipped!!!", KUTF_RESULT_SKIP);
+}
+EXPORT_SYMBOL(kutf_test_skip_msg);
+
+void kutf_test_debug(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_DEBUG);
+}
+EXPORT_SYMBOL(kutf_test_debug);
+
+void kutf_test_pass(struct kutf_context *context, char const *message)
+{
+	static const char explicit_message[] = "(explicit pass)";
+
+	if (!message)
+		message = explicit_message;
+
+	kutf_test_log_result(context, message, KUTF_RESULT_PASS);
+}
+EXPORT_SYMBOL(kutf_test_pass);
+
+void kutf_test_info(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_INFO);
+}
+EXPORT_SYMBOL(kutf_test_info);
+
+void kutf_test_warn(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_WARN);
+}
+EXPORT_SYMBOL(kutf_test_warn);
+
+void kutf_test_fail(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_FAIL);
+}
+EXPORT_SYMBOL(kutf_test_fail);
+
+void kutf_test_fatal(struct kutf_context *context, char const *message)
+{
+	kutf_test_log_result(context, message, KUTF_RESULT_FATAL);
+}
+EXPORT_SYMBOL(kutf_test_fatal);
+
+void kutf_test_abort(struct kutf_context *context)
+{
+	kutf_test_log_result(context, "", KUTF_RESULT_ABORT);
+}
+EXPORT_SYMBOL(kutf_test_abort);
+
+/**
+ * init_kutf_core() - Module entry point.
+ *
+ * Create the base entry point in debugfs.
+ */
+static int __init init_kutf_core(void)
+{
+	kutf_workq = alloc_workqueue("kutf workq", WQ_UNBOUND, 1);
+	if (!kutf_workq)
+		return -ENOMEM;
+
+	base_dir = debugfs_create_dir("kutf_tests", NULL);
+	if (!base_dir) {
+		destroy_workqueue(kutf_workq);
+		kutf_workq = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * exit_kutf_core() - Module exit point.
+ *
+ * Remove the base entry point in debugfs.
+ */
+static void __exit exit_kutf_core(void)
+{
+	debugfs_remove_recursive(base_dir);
+
+	if (kutf_workq)
+		destroy_workqueue(kutf_workq);
+}
+
+#else	/* defined(CONFIG_DEBUG_FS) */
+
+/**
+ * init_kutf_core() - Module entry point.
+ *
+ * Stub for when build against a kernel without debugfs support
+ */
+static int __init init_kutf_core(void)
+{
+	pr_debug("KUTF requires a kernel with debug fs support");
+
+	return -ENODEV;
+}
+
+/**
+ * exit_kutf_core() - Module exit point.
+ *
+ * Stub for when build against a kernel without debugfs support
+ */
+static void __exit exit_kutf_core(void)
+{
+}
+#endif	/* defined(CONFIG_DEBUG_FS) */
+
+MODULE_LICENSE("GPL");
+
+module_init(init_kutf_core);
+module_exit(exit_kutf_core);
diff --git a/drivers/gpu/arm/midgard/tests/kutf/kutf_utils.c b/drivers/gpu/arm/midgard/tests/kutf/kutf_utils.c
new file mode 100644
index 0000000..7f5ac517
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/kutf_utils.c
@@ -0,0 +1,76 @@
+/*
+ *
+ * (C) COPYRIGHT 2014, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/* Kernel UTF utility functions */
+
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#include <kutf/kutf_utils.h>
+#include <kutf/kutf_mem.h>
+
+static char tmp_buffer[KUTF_MAX_DSPRINTF_LEN];
+
+DEFINE_MUTEX(buffer_lock);
+
+const char *kutf_dsprintf(struct kutf_mempool *pool,
+		const char *fmt, ...)
+{
+	va_list args;
+	int len;
+	int size;
+	void *buffer;
+
+	mutex_lock(&buffer_lock);
+	va_start(args, fmt);
+	len = vsnprintf(tmp_buffer, sizeof(tmp_buffer), fmt, args);
+	va_end(args);
+
+	if (len < 0) {
+		pr_err("kutf_dsprintf: Bad format dsprintf format %s\n", fmt);
+		goto fail_format;
+	}
+
+	if (len >= sizeof(tmp_buffer)) {
+		pr_warn("kutf_dsprintf: Truncated dsprintf message %s\n", fmt);
+		size = sizeof(tmp_buffer);
+	} else {
+		size = len + 1;
+	}
+
+	buffer = kutf_mempool_alloc(pool, size);
+	if (!buffer)
+		goto fail_alloc;
+
+	memcpy(buffer, tmp_buffer, size);
+	mutex_unlock(&buffer_lock);
+
+	return buffer;
+
+fail_alloc:
+fail_format:
+	mutex_unlock(&buffer_lock);
+	return NULL;
+}
+EXPORT_SYMBOL(kutf_dsprintf);
diff --git a/drivers/gpu/arm/midgard/tests/kutf/sconscript b/drivers/gpu/arm/midgard/tests/kutf/sconscript
new file mode 100644
index 0000000..4590d1a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/sconscript
@@ -0,0 +1,27 @@
+#
+# (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+Import('kutf_env')
+
+make_args = kutf_env.kernel_get_config_defines(ret_list = True, extra_cflags = ['-DCONFIG_MALI_KUTF'], extra_configs = ['CONFIG_MALI_KUTF=m'])
+
+mod = kutf_env.BuildKernelModule('$STATIC_LIB_PATH/kutf.ko', Glob('*.c'), make_args = make_args)
+kutf_env.KernelObjTarget('kutf', mod)
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kbuild b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kbuild
new file mode 100644
index 0000000..ca8c512
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kbuild
@@ -0,0 +1,26 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+ccflags-y += -I$(src)/../include -I$(src)/../../../ -I$(src)/../../ -I$(src)/../../backend/gpu -I$(srctree)/drivers/staging/android
+
+obj-$(CONFIG_MALI_IRQ_LATENCY) += mali_kutf_irq_test.o
+
+mali_kutf_irq_test-y := mali_kutf_irq_test_main.o
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig
new file mode 100644
index 0000000..4a3863a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig
@@ -0,0 +1,29 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+config MALI_IRQ_LATENCY
+ tristate "Mali GPU IRQ latency measurement"
+ depends on MALI_MIDGARD && MALI_DEBUG && MALI_KUTF
+ default m
+ help
+   This option will build a test module mali_kutf_irq_test that
+   can determine the latency of the Mali GPU IRQ on your system.
+   Choosing M here will generate a single module called mali_kutf_irq_test.
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile
new file mode 100644
index 0000000..9218a40
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile
@@ -0,0 +1,49 @@
+#
+# (C) COPYRIGHT 2015, 2017-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+# linux build system bootstrap for out-of-tree module
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+ifeq ($(KDIR),)
+$(error Must specify KDIR to point to the kernel to target))
+endif
+
+TEST_CCFLAGS := \
+	-DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
+	-DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+	-DMALI_USE_CSF=$(MALI_USE_CSF) \
+	$(SCONS_CFLAGS) \
+	-I$(CURDIR)/../include \
+	-I$(CURDIR)/../../../../../../include \
+	-I$(CURDIR)/../../../ \
+	-I$(CURDIR)/../../ \
+	-I$(CURDIR)/../../backend/gpu \
+	-I$(CURDIR)/ \
+	-I$(srctree)/drivers/staging/android \
+	-I$(srctree)/include/linux
+
+all:
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) $(SCONS_CONFIGS) EXTRA_CFLAGS="$(TEST_CCFLAGS)" KBUILD_EXTRA_SYMBOLS="$(CURDIR)/../kutf/Module.symvers $(CURDIR)/../../Module.symvers" modules
+
+clean:
+	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp
new file mode 100644
index 0000000..66f4eb3c
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp
@@ -0,0 +1,29 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ *      (C) COPYRIGHT 2018 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+bob_kernel_module {
+    name: "mali_kutf_irq_test",
+    defaults: ["kernel_test_module_defaults"],
+    srcs: [
+        "Kbuild",
+        "mali_kutf_irq_test_main.c",
+    ],
+    extra_symbols: [
+        "mali_kbase",
+        "kutf",
+    ],
+    enabled: false,
+    base_build_kutf: {
+        enabled: true,
+        kbuild_options: ["CONFIG_MALI_IRQ_LATENCY=m"],
+    },
+}
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
new file mode 100644
index 0000000..5fb777a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
@@ -0,0 +1,273 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include "mali_kbase.h"
+#include <midgard/backend/gpu/mali_kbase_device_internal.h>
+
+#include <kutf/kutf_suite.h>
+#include <kutf/kutf_utils.h>
+
+/*
+ * This file contains the code which is used for measuring interrupt latency
+ * of the Mali GPU IRQ. In particular, function mali_kutf_irq_latency() is
+ * used with this purpose and it is called within KUTF framework - a kernel
+ * unit test framework. The measured latency provided by this test should
+ * be representative for the latency of the Mali JOB/MMU IRQs as well.
+ */
+
+/* KUTF test application pointer for this test */
+struct kutf_application *irq_app;
+
+/**
+ * struct kutf_irq_fixture data - test fixture used by the test functions.
+ * @kbdev:	kbase device for the GPU.
+ *
+ */
+struct kutf_irq_fixture_data {
+	struct kbase_device *kbdev;
+};
+
+#define SEC_TO_NANO(s)	      ((s)*1000000000LL)
+
+/* ID for the GPU IRQ */
+#define GPU_IRQ_HANDLER 2
+
+#define NR_TEST_IRQS 1000000
+
+/* IRQ for the test to trigger. Currently MULTIPLE_GPU_FAULTS as we would not
+ * expect to see this in normal use (e.g., when Android is running). */
+#define TEST_IRQ MULTIPLE_GPU_FAULTS
+
+#define IRQ_TIMEOUT HZ
+
+/* Kernel API for setting irq throttle hook callback and irq time in us*/
+extern int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
+		irq_handler_t custom_handler,
+		int irq_type);
+extern irqreturn_t kbase_gpu_irq_handler(int irq, void *data);
+
+static DECLARE_WAIT_QUEUE_HEAD(wait);
+static bool triggered;
+static u64 irq_time;
+
+static void *kbase_untag(void *ptr)
+{
+	return (void *)(((uintptr_t) ptr) & ~3);
+}
+
+/**
+ * kbase_gpu_irq_custom_handler - Custom IRQ throttle handler
+ * @irq:  IRQ number
+ * @data: Data associated with this IRQ
+ *
+ * Return: state of the IRQ
+ */
+static irqreturn_t kbase_gpu_irq_custom_handler(int irq, void *data)
+{
+	struct kbase_device *kbdev = kbase_untag(data);
+	u32 val;
+
+	val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS));
+	if (val & TEST_IRQ) {
+		struct timespec64 tval;
+
+		ktime_get_raw_ts64(&tval);
+		irq_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec);
+
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val);
+
+		triggered = true;
+		wake_up(&wait);
+
+		return IRQ_HANDLED;
+	}
+
+	/* Trigger main irq handler */
+	return kbase_gpu_irq_handler(irq, data);
+}
+
+/**
+ * mali_kutf_irq_default_create_fixture() - Creates the fixture data required
+ *                                          for all the tests in the irq suite.
+ * @context:             KUTF context.
+ *
+ * Return: Fixture data created on success or NULL on failure
+ */
+static void *mali_kutf_irq_default_create_fixture(
+		struct kutf_context *context)
+{
+	struct kutf_irq_fixture_data *data;
+
+	data = kutf_mempool_alloc(&context->fixture_pool,
+			sizeof(struct kutf_irq_fixture_data));
+
+	if (!data)
+		goto fail;
+
+	/* Acquire the kbase device */
+	data->kbdev = kbase_find_device(-1);
+	if (data->kbdev == NULL) {
+		kutf_test_fail(context, "Failed to find kbase device");
+		goto fail;
+	}
+
+	return data;
+
+fail:
+	return NULL;
+}
+
+/**
+ * mali_kutf_irq_default_remove_fixture() - Destroy fixture data previously
+ *                          created by mali_kutf_irq_default_create_fixture.
+ *
+ * @context:             KUTF context.
+ */
+static void mali_kutf_irq_default_remove_fixture(
+		struct kutf_context *context)
+{
+	struct kutf_irq_fixture_data *data = context->fixture;
+	struct kbase_device *kbdev = data->kbdev;
+
+	kbase_release_device(kbdev);
+}
+
+/**
+ * mali_kutf_irq_latency() - measure GPU IRQ latency
+ * @context:		kutf context within which to perform the test
+ *
+ * The test triggers IRQs manually, and measures the
+ * time between triggering the IRQ and the IRQ handler being executed.
+ *
+ * This is not a traditional test, in that the pass/fail status has little
+ * meaning (other than indicating that the IRQ handler executed at all). Instead
+ * the results are in the latencies provided with the test result. There is no
+ * meaningful pass/fail result that can be obtained here, instead the latencies
+ * are provided for manual analysis only.
+ */
+static void mali_kutf_irq_latency(struct kutf_context *context)
+{
+	struct kutf_irq_fixture_data *data = context->fixture;
+	struct kbase_device *kbdev = data->kbdev;
+	u64 min_time = U64_MAX, max_time = 0, average_time = 0;
+	int i;
+	bool test_failed = false;
+
+	/* Force GPU to be powered */
+	kbase_pm_context_active(kbdev);
+
+	kbase_set_custom_irq_handler(kbdev, kbase_gpu_irq_custom_handler,
+			GPU_IRQ_HANDLER);
+
+	for (i = 0; i < NR_TEST_IRQS; i++) {
+		struct timespec64 tval;
+		u64 start_time;
+		int ret;
+
+		triggered = false;
+		ktime_get_raw_ts64(&tval);
+		start_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec);
+
+		/* Trigger fake IRQ */
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
+				TEST_IRQ);
+
+		ret = wait_event_timeout(wait, triggered != false, IRQ_TIMEOUT);
+
+		if (ret == 0) {
+			kutf_test_fail(context, "Timed out waiting for IRQ\n");
+			test_failed = true;
+			break;
+		}
+
+		if ((irq_time - start_time) < min_time)
+			min_time = irq_time - start_time;
+		if ((irq_time - start_time) > max_time)
+			max_time = irq_time - start_time;
+		average_time += irq_time - start_time;
+
+		udelay(10);
+	}
+
+	/* Go back to default handler */
+	kbase_set_custom_irq_handler(kbdev, NULL, GPU_IRQ_HANDLER);
+
+	kbase_pm_context_idle(kbdev);
+
+	if (!test_failed) {
+		const char *results;
+
+		do_div(average_time, NR_TEST_IRQS);
+		results = kutf_dsprintf(&context->fixture_pool,
+				"Min latency = %lldns, Max latency = %lldns, Average latency = %lldns\n",
+				min_time, max_time, average_time);
+		kutf_test_pass(context, results);
+	}
+}
+
+/**
+ * Module entry point for this test.
+ */
+int mali_kutf_irq_test_main_init(void)
+{
+	struct kutf_suite *suite;
+
+	irq_app = kutf_create_application("irq");
+
+	if (NULL == irq_app) {
+		pr_warn("Creation of test application failed!\n");
+		return -ENOMEM;
+	}
+
+	suite = kutf_create_suite(irq_app, "irq_default",
+			1, mali_kutf_irq_default_create_fixture,
+			mali_kutf_irq_default_remove_fixture);
+
+	if (NULL == suite) {
+		pr_warn("Creation of test suite failed!\n");
+		kutf_destroy_application(irq_app);
+		return -ENOMEM;
+	}
+
+	kutf_add_test(suite, 0x0, "irq_latency",
+			mali_kutf_irq_latency);
+	return 0;
+}
+
+/**
+ * Module exit point for this test.
+ */
+void mali_kutf_irq_test_main_exit(void)
+{
+	kutf_destroy_application(irq_app);
+}
+
+module_init(mali_kutf_irq_test_main_init);
+module_exit(mali_kutf_irq_test_main_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION("1.0");
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript
new file mode 100644
index 0000000..cefac0b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript
@@ -0,0 +1,36 @@
+#
+# (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+import os
+Import('env')
+
+src = [Glob('#kernel/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/*.c'), Glob('#kernel/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile')]
+
+if env.GetOption('clean') :
+	env.Execute(Action("make clean", '[CLEAN] mali_kutf_irq_test'))
+	cmd = env.Command('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', src, [])
+	env.KernelObjTarget('mali_kutf_irq_test', cmd)
+else:
+	makeAction=Action("cd ${SOURCE.dir} && make MALI_UNIT_TEST=${unit} MALI_CUSTOMER_RELEASE=${release} MALI_USE_CSF=${csf} %s && ( ( [ -f mali_kutf_irq_test.ko ] && cp mali_kutf_irq_test.ko $STATIC_LIB_PATH/ ) || touch $STATIC_LIB_PATH/mali_kutf_irq_test.ko)" % env.kernel_get_config_defines(extra_cflags = ['-DCONFIG_MALI_IRQ_LATENCY'], extra_configs = ['CONFIG_MALI_IRQ_LATENCY=m']), '$MAKECOMSTR')
+	cmd = env.Command('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', src, [makeAction])
+	env.Depends('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', '$STATIC_LIB_PATH/kutf.ko')
+	env.Depends('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', '$STATIC_LIB_PATH/mali_kbase.ko')
+	env.KernelObjTarget('mali_kutf_irq_test', cmd)
diff --git a/drivers/gpu/arm/midgard/tests/sconscript b/drivers/gpu/arm/midgard/tests/sconscript
new file mode 100644
index 0000000..ca64e83
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/sconscript
@@ -0,0 +1,43 @@
+#
+# (C) COPYRIGHT 2010-2011, 2013, 2017-2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+Import ('env')
+
+kutf_env = env.Clone()
+kutf_env.Append(CPPPATH = '#kernel/drivers/gpu/arm/midgard/tests/include')
+Export('kutf_env')
+
+if Glob('internal/sconscript'):
+	SConscript('internal/sconscript')
+
+if kutf_env['debug'] == '1':
+	SConscript('kutf/sconscript')
+	SConscript('mali_kutf_irq_test/sconscript')
+
+	if Glob('kutf_test/sconscript'):
+		SConscript('kutf_test/sconscript')
+
+	if Glob('kutf_test_runner/sconscript'):
+		SConscript('kutf_test_runner/sconscript')
+
+if env['unit'] == '1':
+	SConscript('mali_kutf_ipa_unit_test/sconscript')
+	SConscript('mali_kutf_vinstr_test/sconscript')
diff --git a/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c b/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
new file mode 100644
index 0000000..3aab51a
--- /dev/null
+++ b/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
@@ -0,0 +1,363 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ *//*
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ */
+
+#include "linux/mman.h"
+#include "../mali_kbase.h"
+
+/* mali_kbase_mmap.c
+ *
+ * This file contains Linux specific implementation of
+ * kbase_get_unmapped_area() interface.
+ */
+
+
+/**
+ * align_and_check() - Align the specified pointer to the provided alignment and
+ *                     check that it is still in range.
+ * @gap_end:        Highest possible start address for allocation (end of gap in
+ *                  address space)
+ * @gap_start:      Start address of current memory area / gap in address space
+ * @info:           vm_unmapped_area_info structure passed to caller, containing
+ *                  alignment, length and limits for the allocation
+ * @is_shader_code: True if the allocation is for shader code (which has
+ *                  additional alignment requirements)
+ * @is_same_4gb_page: True if the allocation needs to reside completely within
+ *                    a 4GB chunk
+ *
+ * Return: true if gap_end is now aligned correctly and is still in range,
+ *         false otherwise
+ */
+static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
+		struct vm_unmapped_area_info *info, bool is_shader_code,
+		bool is_same_4gb_page)
+{
+	/* Compute highest gap address at the desired alignment */
+	(*gap_end) -= info->length;
+	(*gap_end) -= (*gap_end - info->align_offset) & info->align_mask;
+
+	if (is_shader_code) {
+		/* Check for 4GB boundary */
+		if (0 == (*gap_end & BASE_MEM_MASK_4GB))
+			(*gap_end) -= (info->align_offset ? info->align_offset :
+					info->length);
+		if (0 == ((*gap_end + info->length) & BASE_MEM_MASK_4GB))
+			(*gap_end) -= (info->align_offset ? info->align_offset :
+					info->length);
+
+		if (!(*gap_end & BASE_MEM_MASK_4GB) || !((*gap_end +
+				info->length) & BASE_MEM_MASK_4GB))
+			return false;
+	} else if (is_same_4gb_page) {
+		unsigned long start = *gap_end;
+		unsigned long end = *gap_end + info->length;
+		unsigned long mask = ~((unsigned long)U32_MAX);
+
+		/* Check if 4GB boundary is straddled */
+		if ((start & mask) != ((end - 1) & mask)) {
+			unsigned long offset = end - (end & mask);
+			/* This is to ensure that alignment doesn't get
+			 * disturbed in an attempt to prevent straddling at
+			 * 4GB boundary. The GPU VA is aligned to 2MB when the
+			 * allocation size is > 2MB and there is enough CPU &
+			 * GPU virtual space.
+			 */
+			unsigned long rounded_offset =
+					ALIGN(offset, info->align_mask + 1);
+
+			start -= rounded_offset;
+			end -= rounded_offset;
+
+			*gap_end = start;
+
+			/* The preceding 4GB boundary shall not get straddled,
+			 * even after accounting for the alignment, as the
+			 * size of allocation is limited to 4GB and the initial
+			 * start location was already aligned.
+			 */
+			WARN_ON((start & mask) != ((end - 1) & mask));
+		}
+	}
+
+
+	if ((*gap_end < info->low_limit) || (*gap_end < gap_start))
+		return false;
+
+
+	return true;
+}
+
+/**
+ * kbase_unmapped_area_topdown() - allocates new areas top-down from
+ *                                 below the stack limit.
+ * @info:              Information about the memory area to allocate.
+ * @is_shader_code:    Boolean which denotes whether the allocated area is
+ *                      intended for the use by shader core in which case a
+ *                      special alignment requirements apply.
+ * @is_same_4gb_page: Boolean which indicates whether the allocated area needs
+ *                    to reside completely within a 4GB chunk.
+ *
+ * The unmapped_area_topdown() function in the Linux kernel is not exported
+ * using EXPORT_SYMBOL_GPL macro. To allow us to call this function from a
+ * module and also make use of the fact that some of the requirements for
+ * the unmapped area are known in advance, we implemented an extended version
+ * of this function and prefixed it with 'kbase_'.
+ *
+ * The difference in the call parameter list comes from the fact that
+ * kbase_unmapped_area_topdown() is called with additional parameters which
+ * are provided to indicate whether the allocation is for a shader core memory,
+ * which has additional alignment requirements, and whether the allocation can
+ * straddle a 4GB boundary.
+ *
+ * The modification of the original Linux function lies in how the computation
+ * of the highest gap address at the desired alignment is performed once the
+ * gap with desirable properties is found. For this purpose a special function
+ * is introduced (@ref align_and_check()) which beside computing the gap end
+ * at the desired alignment also performs additional alignment checks for the
+ * case when the memory is executable shader core memory, for which it is
+ * ensured that the gap does not end on a 4GB boundary, and for the case when
+ * memory needs to be confined within a 4GB chunk.
+ *
+ * Return: address of the found gap end (high limit) if area is found;
+ *         -ENOMEM if search is unsuccessful
+*/
+
+static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
+		*info, bool is_shader_code, bool is_same_4gb_page)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long length, low_limit, high_limit, gap_start, gap_end;
+
+	/* Adjust search length to account for worst case alignment overhead */
+	length = info->length + info->align_mask;
+	if (length < info->length)
+		return -ENOMEM;
+
+	/*
+	 * Adjust search limits by the desired length.
+	 * See implementation comment at top of unmapped_area().
+	 */
+	gap_end = info->high_limit;
+	if (gap_end < length)
+		return -ENOMEM;
+	high_limit = gap_end - length;
+
+	if (info->low_limit > high_limit)
+		return -ENOMEM;
+	low_limit = info->low_limit + length;
+
+	/* Check highest gap, which does not precede any rbtree node */
+	gap_start = mm->highest_vm_end;
+	if (gap_start <= high_limit) {
+		if (align_and_check(&gap_end, gap_start, info,
+				is_shader_code, is_same_4gb_page))
+			return gap_end;
+	}
+
+	/* Check if rbtree root looks promising */
+	if (RB_EMPTY_ROOT(&mm->mm_rb))
+		return -ENOMEM;
+	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+	if (vma->rb_subtree_gap < length)
+		return -ENOMEM;
+
+	while (true) {
+		/* Visit right subtree if it looks promising */
+		gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+			struct vm_area_struct *right =
+				rb_entry(vma->vm_rb.rb_right,
+					 struct vm_area_struct, vm_rb);
+			if (right->rb_subtree_gap >= length) {
+				vma = right;
+				continue;
+			}
+		}
+
+check_current:
+		/* Check if current node has a suitable gap */
+		gap_end = vma->vm_start;
+		if (gap_end < low_limit)
+			return -ENOMEM;
+		if (gap_start <= high_limit && gap_end - gap_start >= length) {
+			/* We found a suitable gap. Clip it with the original
+			 * high_limit. */
+			if (gap_end > info->high_limit)
+				gap_end = info->high_limit;
+
+			if (align_and_check(&gap_end, gap_start, info,
+					is_shader_code, is_same_4gb_page))
+				return gap_end;
+		}
+
+		/* Visit left subtree if it looks promising */
+		if (vma->vm_rb.rb_left) {
+			struct vm_area_struct *left =
+				rb_entry(vma->vm_rb.rb_left,
+					 struct vm_area_struct, vm_rb);
+			if (left->rb_subtree_gap >= length) {
+				vma = left;
+				continue;
+			}
+		}
+
+		/* Go back up the rbtree to find next candidate node */
+		while (true) {
+			struct rb_node *prev = &vma->vm_rb;
+
+			if (!rb_parent(prev))
+				return -ENOMEM;
+			vma = rb_entry(rb_parent(prev),
+				       struct vm_area_struct, vm_rb);
+			if (prev == vma->vm_rb.rb_right) {
+				gap_start = vma->vm_prev ?
+					vma->vm_prev->vm_end : 0;
+				goto check_current;
+			}
+		}
+	}
+
+	return -ENOMEM;
+}
+
+
+/* This function is based on Linux kernel's arch_get_unmapped_area, but
+ * simplified slightly. Modifications come from the fact that some values
+ * about the memory area are known in advance.
+ */
+unsigned long kbase_get_unmapped_area(struct file *filp,
+		const unsigned long addr, const unsigned long len,
+		const unsigned long pgoff, const unsigned long flags)
+{
+	struct kbase_context *kctx = filp->private_data;
+	struct mm_struct *mm = current->mm;
+	struct vm_unmapped_area_info info;
+	unsigned long align_offset = 0;
+	unsigned long align_mask = 0;
+	unsigned long high_limit = mm->mmap_base;
+	unsigned long low_limit = PAGE_SIZE;
+	int cpu_va_bits = BITS_PER_LONG;
+	int gpu_pc_bits =
+	      kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+	bool is_shader_code = false;
+	bool is_same_4gb_page = false;
+	unsigned long ret;
+
+	/* err on fixed address */
+	if ((flags & MAP_FIXED) || addr)
+		return -EINVAL;
+
+#ifdef CONFIG_64BIT
+	/* too big? */
+	if (len > TASK_SIZE - SZ_2M)
+		return -ENOMEM;
+
+	if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
+
+		high_limit = min_t(unsigned long, mm->mmap_base,
+				(kctx->same_va_end << PAGE_SHIFT));
+
+		/* If there's enough (> 33 bits) of GPU VA space, align
+		 * to 2MB boundaries.
+		 */
+		if (kctx->kbdev->gpu_props.mmu.va_bits > 33) {
+			if (len >= SZ_2M) {
+				align_offset = SZ_2M;
+				align_mask = SZ_2M - 1;
+			}
+		}
+
+		low_limit = SZ_2M;
+	} else {
+		cpu_va_bits = 32;
+	}
+#endif /* CONFIG_64BIT */
+	if ((PFN_DOWN(BASE_MEM_COOKIE_BASE) <= pgoff) &&
+		(PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) {
+			int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
+			struct kbase_va_region *reg =
+					kctx->pending_regions[cookie];
+
+			if (!reg)
+				return -EINVAL;
+
+			if (!(reg->flags & KBASE_REG_GPU_NX)) {
+				if (cpu_va_bits > gpu_pc_bits) {
+					align_offset = 1ULL << gpu_pc_bits;
+					align_mask = align_offset - 1;
+					is_shader_code = true;
+				}
+			} else if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) {
+				unsigned long extent_bytes =
+				     (unsigned long)(reg->extent << PAGE_SHIFT);
+				/* kbase_check_alloc_sizes() already satisfies
+				 * these checks, but they're here to avoid
+				 * maintenance hazards due to the assumptions
+				 * involved */
+				WARN_ON(reg->extent > (ULONG_MAX >> PAGE_SHIFT));
+				WARN_ON(reg->initial_commit > (ULONG_MAX >> PAGE_SHIFT));
+				WARN_ON(!is_power_of_2(extent_bytes));
+				align_mask = extent_bytes - 1;
+				align_offset =
+				      extent_bytes - (reg->initial_commit << PAGE_SHIFT);
+			} else if (reg->flags & KBASE_REG_GPU_VA_SAME_4GB_PAGE) {
+				is_same_4gb_page = true;
+			}
+#ifndef CONFIG_64BIT
+	} else {
+		return current->mm->get_unmapped_area(filp, addr, len, pgoff,
+						      flags);
+#endif
+	}
+
+	info.flags = 0;
+	info.length = len;
+	info.low_limit = low_limit;
+	info.high_limit = high_limit;
+	info.align_offset = align_offset;
+	info.align_mask = align_mask;
+
+	ret = kbase_unmapped_area_topdown(&info, is_shader_code,
+			is_same_4gb_page);
+
+	if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base &&
+			high_limit < (kctx->same_va_end << PAGE_SHIFT)) {
+		/* Retry above mmap_base */
+		info.low_limit = mm->mmap_base;
+		info.high_limit = min_t(u64, TASK_SIZE,
+					(kctx->same_va_end << PAGE_SHIFT));
+
+		ret = kbase_unmapped_area_topdown(&info, is_shader_code,
+				is_same_4gb_page);
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/arm/sconscript b/drivers/gpu/arm/sconscript
new file mode 100644
index 0000000..dd02acd
--- /dev/null
+++ b/drivers/gpu/arm/sconscript
@@ -0,0 +1,26 @@
+#
+# (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# SPDX-License-Identifier: GPL-2.0
+#
+#
+
+import glob
+
+
+SConscript('midgard/sconscript')
+
diff --git a/drivers/media/cec/platform/meson/ao-cec-g12a.c b/drivers/media/cec/platform/meson/ao-cec-g12a.c
index 8915330..85850b9 100644
--- a/drivers/media/cec/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/cec/platform/meson/ao-cec-g12a.c
@@ -25,6 +25,7 @@
 #include <media/cec.h>
 #include <media/cec-notifier.h>
 #include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
 
 /* CEC Registers */
 
@@ -168,6 +169,18 @@
 
 #define CECB_WAKEUPCTRL		0x31
 
+#define CECB_FUNC_CFG_REG		0xA0
+#define CECB_FUNC_CFG_MASK		GENMASK(6, 0)
+#define CECB_FUNC_CFG_CEC_ON		0x01
+#define CECB_FUNC_CFG_OTP_ON		0x02
+#define CECB_FUNC_CFG_AUTO_STANDBY	0x04
+#define CECB_FUNC_CFG_AUTO_POWER_ON	0x08
+#define CECB_FUNC_CFG_ALL		0x2f
+#define CECB_FUNC_CFG_NONE		0x0
+
+#define CECB_LOG_ADDR_REG	0xA4
+#define CECB_LOG_ADDR_MASK	GENMASK(22, 16)
+
 struct meson_ao_cec_g12a_data {
 	/* Setup the internal CECB_CTRL2 register */
 	bool				ctrl2_setup;
@@ -177,6 +190,7 @@
 	struct platform_device		*pdev;
 	struct regmap			*regmap;
 	struct regmap			*regmap_cec;
+	struct regmap			*regmap_ao_sysctrl;
 	spinlock_t			cec_reg_lock;
 	struct cec_notifier		*notify;
 	struct cec_adapter		*adap;
@@ -518,6 +532,13 @@
 					 BIT(logical_addr - 8));
 	}
 
+	if (ao_cec->regmap_ao_sysctrl)
+		ret |= regmap_update_bits(ao_cec->regmap_ao_sysctrl,
+					 CECB_LOG_ADDR_REG,
+					 CECB_LOG_ADDR_MASK,
+					 FIELD_PREP(CECB_LOG_ADDR_MASK,
+						    logical_addr));
+
 	/* Always set Broadcast/Unregistered 15 address */
 	ret |= regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_HIGH,
 				  BIT(CEC_LOG_ADDR_UNREGISTERED - 8),
@@ -618,6 +639,13 @@
 		regmap_write(ao_cec->regmap_cec, CECB_CTRL2,
 			     FIELD_PREP(CECB_CTRL2_RISE_DEL_MAX, 2));
 
+	if (ao_cec->regmap_ao_sysctrl) {
+		regmap_update_bits(ao_cec->regmap_ao_sysctrl,
+				   CECB_FUNC_CFG_REG,
+				   CECB_FUNC_CFG_MASK,
+				   CECB_FUNC_CFG_ALL);
+	}
+
 	meson_ao_cec_g12a_irq_setup(ao_cec, true);
 
 	return 0;
@@ -685,6 +713,11 @@
 		goto out_probe_adapter;
 	}
 
+	ao_cec->regmap_ao_sysctrl = syscon_regmap_lookup_by_phandle
+		(pdev->dev.of_node, "amlogic,ao-sysctrl");
+	if (IS_ERR(ao_cec->regmap_ao_sysctrl))
+		dev_warn(&pdev->dev, "ao-sysctrl syscon regmap lookup failed.\n");
+
 	irq = platform_get_irq(pdev, 0);
 	ret = devm_request_threaded_irq(&pdev->dev, irq,
 					meson_ao_cec_g12a_irq,
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index b988002..a8395ea 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1480,6 +1480,8 @@
 		case V4L2_PIX_FMT_S5C_UYVY_JPG:	descr = "S5C73MX interleaved UYVY/JPEG"; break;
 		case V4L2_PIX_FMT_MT21C:	descr = "Mediatek Compressed Format"; break;
 		case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break;
+		case V4L2_PIX_FMT_YUV420_8BIT:	descr = "Compressed YUV 4:2:0 8-bit"; break;
+		case V4L2_PIX_FMT_YUV420_10BIT:	descr = "Compressed YUV 4:2:0 10-bit"; break;
 		default:
 			trace_android_vh_fill_ext_fmtdesc(fmt, &descr);
 			if (descr)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 170a64e..53c9368 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -34,6 +34,7 @@
 source "drivers/net/wireless/admtek/Kconfig"
 source "drivers/net/wireless/ath/Kconfig"
 source "drivers/net/wireless/atmel/Kconfig"
+source "drivers/net/wireless/bcmdhd/Kconfig"
 source "drivers/net/wireless/broadcom/Kconfig"
 source "drivers/net/wireless/cisco/Kconfig"
 source "drivers/net/wireless/intel/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 80b3244..7ed5a16 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_WLAN_VENDOR_ADMTEK) += admtek/
 obj-$(CONFIG_WLAN_VENDOR_ATH) += ath/
 obj-$(CONFIG_WLAN_VENDOR_ATMEL) += atmel/
+obj-$(CONFIG_BCMDHD) += bcmdhd/
 obj-$(CONFIG_WLAN_VENDOR_BROADCOM) += broadcom/
 obj-$(CONFIG_WLAN_VENDOR_CISCO) += cisco/
 obj-$(CONFIG_WLAN_VENDOR_INTEL) += intel/
diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig
new file mode 100644
index 0000000..fffcee2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Kconfig
@@ -0,0 +1,60 @@
+config BCMDHD
+	tristate "Broadcom FullMAC wireless cards support"
+	help
+	  This module adds support for wireless adapters based on
+	  Broadcom FullMAC chipset.
+
+config BCMDHD_FW_PATH
+	depends on BCMDHD
+	string "Firmware path"
+	default "/system/etc/firmware/fw_bcmdhd.bin"
+	help
+	  Path to the firmware file.
+
+config BCMDHD_NVRAM_PATH
+	depends on BCMDHD
+	string "NVRAM path"
+	default "/system/etc/firmware/nvram.txt"
+	help
+	  Path to the calibration file.
+
+config BCMDHD_WEXT
+	bool "Enable WEXT support"
+	depends on BCMDHD && CFG80211 = n
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	help
+	  Enables WEXT support
+
+choice
+	prompt "Enable Chip Interface"
+	depends on BCMDHD
+	help
+		Enable Chip Interface.
+config BCMDHD_SDIO
+		bool "SDIO bus interface support"
+		depends on BCMDHD && MMC
+config BCMDHD_PCIE
+		bool "PCIe bus interface support"
+		depends on BCMDHD && PCI
+config BCMDHD_USB
+		bool "USB bus interface support"
+		depends on BCMDHD && USB
+endchoice
+
+choice
+	depends on BCMDHD && BCMDHD_SDIO
+	prompt "Interrupt type"
+	help
+		Interrupt type
+config BCMDHD_OOB
+	depends on BCMDHD && BCMDHD_SDIO
+	bool "Out-of-Band Interrupt"
+	help
+		Interrupt from WL_HOST_WAKE.
+config BCMDHD_SDIO_IRQ
+	depends on BCMDHD && BCMDHD_SDIO
+	bool "In-Band Interrupt"
+	help
+	  Interrupt from SDIO DAT[1]
+endchoice
diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile
new file mode 100644
index 0000000..58bb833
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Makefile
@@ -0,0 +1,165 @@
+# bcmdhd
+# 1. WL_IFACE_COMB_NUM_CHANNELS must be added if Android version is 4.4 with Kernel version 3.0~3.4,
+#    otherwise please remove it.
+
+#CONFIG_BCMDHD := m
+#CONFIG_BCMDHD_SDIO := y
+#CONFIG_BCMDHD_PCIE := y
+#CONFIG_BCMDHD_USB := y
+
+#CONFIG_BCMDHD_OOB := y
+#CONFIG_BCMDHD_CUSB := y
+CONFIG_BCMDHD_PROPTXSTATUS := y
+#CONFIG_DHD_USE_STATIC_BUF := y
+#CONFIG_VTS_SUPPORT := y
+
+CONFIG_MACH_PLATFORM := y
+#CONFIG_BCMDHD_DTS := y
+
+DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER                 \
+	-DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE            \
+	-DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG -DGET_OTP_MAC_ENABLE   \
+	-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY             \
+	-DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DPNO_SUPPORT -DDHDTCPACK_SUPPRESS  \
+	-DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT                           \
+	-DMULTIPLE_SUPPLICANT -DTSQ_MULTIPLIER -DMFP                          \
+	-DWL_EXT_IAPSTA                                                       \
+	-DENABLE_INSMOD_NO_FW_LOAD                                            \
+	-Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include
+
+DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o   \
+	dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o           \
+	dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o                \
+	bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o           \
+	dhd_debug_linux.o dhd_debug.o dhd_mschdbg.o                           \
+	hnd_pktq.o hnd_pktpool.o dhd_config.o wl_android_ext.o
+
+#BCMDHD_SDIO
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+DHDCFLAGS += -DBCMSDIO -DMMC_SDIO_ABORT -DBCMLXSDMMC -DUSE_SDIOFIFO_IOVAR \
+	-DSDTEST -DBDC -DDHD_USE_IDLECOUNT -DCUSTOM_SDIO_F2_BLKSIZE=256       \
+	-DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD
+ifeq ($(CONFIG_BCMDHD_OOB),y)
+	DHDCFLAGS += -DOOB_INTR_ONLY -DCUSTOMER_OOB -DHW_OOB
+ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y)
+	DHDCFLAGS += -DDISABLE_WOWLAN
+endif
+else
+	DHDCFLAGS += -DSDIO_ISR_THREAD
+endif
+
+DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o  \
+	dhd_sdio.o dhd_cdc.o dhd_wlfc.o
+endif
+
+#BCMDHD_PCIE
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1    \
+	-DDONGLE_ENABLE_ISOLATION
+ifneq ($(CONFIG_PCI_MSI),)
+	DHDCFLAGS += -DDHD_USE_MSI
+endif
+
+DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o       \
+	dhd_msgbuf.o
+endif
+
+#BCMDHD_USB
+ifneq ($(CONFIG_BCMDHD_USB),)
+DHDCFLAGS += -DUSBOS_TX_THREAD -DBCMDBUS -DBCMTRXV2 -DDBUS_USB_LOOPBACK   \
+	-DBDC
+DHDCFLAGS += -DBCM_REQUEST_FW -DEXTERNAL_FW_PATH
+#DHDCFLAGS :=$(filter-out -DENABLE_INSMOD_NO_FW_LOAD,$(DHDCFLAGS))
+ifneq ($(CONFIG_BCMDHD_CUSB),)
+	DHDCFLAGS += -DBCMUSBDEV_COMPOSITE
+	DHDCFLAGS :=$(filter-out -DENABLE_INSMOD_NO_FW_LOAD,$(DHDCFLAGS))
+endif
+
+DHDOFILES += dbus.o dbus_usb.o dbus_usb_linux.o dhd_cdc.o dhd_wlfc.o
+endif
+
+#PROPTXSTATUS
+ifeq ($(CONFIG_BCMDHD_PROPTXSTATUS),y)
+ifneq ($(CONFIG_BCMDHD_USB),)
+	DHDCFLAGS += -DPROP_TXSTATUS
+endif
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+	DHDCFLAGS += -DPROP_TXSTATUS
+endif
+ifneq ($(CONFIG_CFG80211),)
+	DHDCFLAGS += -DPROP_TXSTATUS_VSDB
+endif
+endif
+
+#VTS_SUPPORT
+ifeq ($(CONFIG_VTS_SUPPORT),y)
+ifneq ($(CONFIG_CFG80211),)
+DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DCUSTOM_FORCE_NODFS_FLAG      \
+	-DLINKSTAT_SUPPORT -DDEBUGABILITY -DDBG_PKT_MON -DPKT_FILTER_SUPPORT  \
+	-DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT -DDHD_WAKE_STATUS   \
+	-DCUSTOM_COUNTRY_CODE -DDHD_FW_COREDUMP -DEXPLICIT_DISCIF_CLEANUP
+
+DHDOFILES += bcmxtlv.o dhd_rtt.o bcm_app_utils.o
+endif
+endif
+
+# MESH support for kernel 3.10 later
+ifeq ($(CONFIG_WL_MESH),y)
+	DHDCFLAGS += -DWLMESH
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+	DHDCFLAGS += -DBCM_HOST_BUF -DDMA_HOST_BUFFER_LEN=0x80000
+endif
+	DHDCFLAGS += -DDHD_UPDATE_INTF_MAC
+	DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS))
+	DHDCFLAGS :=$(filter-out -DSET_RANDOM_MAC_SOFTAP,$(DHDCFLAGS))
+endif
+
+obj-$(CONFIG_BCMDHD) += bcmdhd.o
+bcmdhd-objs += $(DHDOFILES)
+
+ifeq ($(CONFIG_MACH_PLATFORM),y)
+	DHDOFILES += dhd_gpio.o
+ifeq ($(CONFIG_BCMDHD_DTS),y)
+	DHDCFLAGS += -DCONFIG_DTS
+else
+	DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT
+endif
+#	DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI
+endif
+
+ifeq ($(CONFIG_BCMDHD_AG),y)
+	DHDCFLAGS += -DBAND_AG
+endif
+
+ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
+	obj-m += dhd_static_buf.o
+	DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF
+	DHDCFLAGS += -DDHD_USE_STATIC_MEMDUMP -DCONFIG_DHD_USE_STATIC_BUF
+endif
+
+ifneq ($(CONFIG_WIRELESS_EXT),)
+	DHDOFILES += wl_iw.o wl_escan.o
+	DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW -DWL_ESCAN
+endif
+ifneq ($(CONFIG_CFG80211),)
+	DHDOFILES += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o
+	DHDOFILES += dhd_cfg80211.o
+	DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF
+	DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
+	DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65
+	DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15
+	DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000
+	DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=7
+	DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL
+	DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES
+	DHDCFLAGS += -DESCAN_RESULT_PATCH -DESCAN_BUF_OVERFLOW_MGMT
+	DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8
+	DHDCFLAGS += -DWL_VIRTUAL_APSTA
+endif
+EXTRA_CFLAGS = $(DHDCFLAGS)
+ifeq ($(CONFIG_BCMDHD),m)
+EXTRA_LDFLAGS += --strip-debug
+endif
+
+ccflags-y += -I$(srctree)/drivers/net/wireless/bcmdhd -I$(srctree)/drivers/net/wireless/bcmdhd/include
diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c
new file mode 100644
index 0000000..f88b123
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/aiutils.c
@@ -0,0 +1,1810 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: aiutils.c 625027 2016-03-15 08:20:18Z $
+ */
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+#include <bcmdevs.h>
+
+#define BCM5357_DMP() (0)
+#define BCM53573_DMP() (0)
+#define BCM4707_DMP() (0)
+#define PMU_DMP() (0)
+#define GCI_DMP() (0)
+#define remap_coreid(sih, coreid)	(coreid)
+#define remap_corerev(sih, corerev)	(corerev)
+
+/* EROM parsing */
+
+static uint32
+get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+	uint32 ent;
+	uint inv = 0, nom = 0;
+	uint32 size = 0;
+
+	while (TRUE) {
+		ent = R_REG(si_osh(sih), *eromptr);
+		(*eromptr)++;
+
+		if (mask == 0)
+			break;
+
+		if ((ent & ER_VALID) == 0) {
+			inv++;
+			continue;
+		}
+
+		if (ent == (ER_END | ER_VALID))
+			break;
+
+		if ((ent & mask) == match)
+			break;
+
+		/* escape condition related EROM size if it has invalid values */
+		size += sizeof(*eromptr);
+		if (size >= ER_SZ_MAX) {
+			SI_ERROR(("Failed to find end of EROM marker\n"));
+			break;
+		}
+
+		nom++;
+	}
+
+	SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+	if (inv + nom) {
+		SI_VMSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
+	}
+	return ent;
+}
+
+static uint32
+get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+	uint32 *sizel, uint32 *sizeh)
+{
+	uint32 asd, sz, szd;
+
+	BCM_REFERENCE(ad);
+
+	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+	if (((asd & ER_TAG1) != ER_ADD) ||
+	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+	    ((asd & AD_ST_MASK) != st)) {
+		/* This is not what we want, "push" it back */
+		(*eromptr)--;
+		return 0;
+	}
+	*addrl = asd & AD_ADDR_MASK;
+	if (asd & AD_AG32)
+		*addrh = get_erom_ent(sih, eromptr, 0, 0);
+	else
+		*addrh = 0;
+	*sizeh = 0;
+	sz = asd & AD_SZ_MASK;
+	if (sz == AD_SZ_SZD) {
+		szd = get_erom_ent(sih, eromptr, 0, 0);
+		*sizel = szd & SD_SZ_MASK;
+		if (szd & SD_SG32)
+			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
+	} else
+		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+	SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+	        sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+	return asd;
+}
+
+
+/* parse the enumeration rom to identify all cores */
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc = (chipcregs_t *)regs;
+	uint32 erombase, *eromptr, *eromlim;
+	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+
+	BCM_REFERENCE(devid);
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+		break;
+
+	case PCI_BUS:
+		/* Set wrappers address */
+		sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+		/* Now point the window at the erom */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+		eromptr = regs;
+		break;
+
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		eromptr = (uint32 *)(uintptr)erombase;
+		break;
+#endif	/* BCMSDIO */
+
+	case PCMCIA_BUS:
+	default:
+		SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+		ASSERT(0);
+		return;
+	}
+	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+	sii->axi_num_wrappers = 0;
+
+	SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+	         OSL_OBFUSCATE_BUF(regs), erombase,
+		OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSATE_BUF(eromlim)));
+	while (eromptr < eromlim) {
+		uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+		uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+		uint i, j, idx;
+		bool br;
+
+		br = FALSE;
+
+		/* Grok a component */
+		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+		if (cia == (ER_END | ER_VALID)) {
+			SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+			return;
+		}
+
+		cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+		if ((cib & ER_TAG) != ER_CI) {
+			SI_ERROR(("CIA not followed by CIB\n"));
+			goto error;
+		}
+
+		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+#ifdef BCMDBG_SI
+		SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+		         "nsw = %d, nmp = %d & nsp = %d\n",
+		         mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
+#else
+		BCM_REFERENCE(crev);
+#endif
+
+		if (CHIPID(sih->chip) == BCM4347_CHIP_ID) {
+			/* 4347 has more entries for ARM core
+			 * This should apply to all chips but crashes on router
+			 * This is a temp fix to be further analyze
+			 */
+			if (nsp == 0)
+				continue;
+		} else {
+			/* Include Default slave wrapper for timeout monitoring */
+			if ((nsp == 0) ||
+#if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
+				((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
+#endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
+				FALSE) {
+				continue;
+			}
+		}
+
+		if ((nmw + nsw == 0)) {
+			/* A component which is not a core */
+			if (cid == OOB_ROUTER_CORE_ID) {
+				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+					&addrl, &addrh, &sizel, &sizeh);
+				if (asd != 0) {
+					sii->oob_router = addrl;
+				}
+			}
+			if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID &&
+				cid != PMU_CORE_ID && cid != GCI_CORE_ID)
+				continue;
+		}
+
+		idx = sii->numcores;
+
+		cores_info->cia[idx] = cia;
+		cores_info->cib[idx] = cib;
+		cores_info->coreid[idx] = remap_coreid(sih, cid);
+
+		for (i = 0; i < nmp; i++) {
+			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+			if ((mpd & ER_TAG) != ER_MP) {
+				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+				goto error;
+			}
+			SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
+			         (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+			         (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+		}
+
+		/* First Slave Address Descriptor should be port 0:
+		 * the main register space for the core
+		 */
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+		if (asd == 0) {
+			do {
+			/* Try again to see if it is a bridge */
+			asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd != 0)
+				br = TRUE;
+			else {
+					if (br == TRUE) {
+						break;
+					}
+					else if ((addrh != 0) || (sizeh != 0) ||
+						(sizel != SI_CORE_SIZE)) {
+						SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
+							"0x%x\n", addrh, sizeh, sizel));
+						SI_ERROR(("First Slave ASD for"
+							"core 0x%04x malformed "
+							"(0x%08x)\n", cid, asd));
+						goto error;
+					}
+				}
+			} while (1);
+		}
+		cores_info->coresba[idx] = addrl;
+		cores_info->coresba_size[idx] = sizel;
+		/* Get any more ASDs in port 0 */
+		j = 1;
+		do {
+			asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+				cores_info->coresba2[idx] = addrl;
+				cores_info->coresba2_size[idx] = sizel;
+			}
+			j++;
+		} while (asd != 0);
+
+		/* Go through the ASDs for other slave ports */
+		for (i = 1; i < nsp; i++) {
+			j = 0;
+			do {
+				asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+				              &sizel, &sizeh);
+
+				if (asd == 0)
+					break;
+				j++;
+			} while (1);
+			if (j == 0) {
+				SI_ERROR((" SP %d has no address descriptors\n", i));
+				goto error;
+			}
+		}
+
+		/* Now get master wrappers */
+		for (i = 0; i < nmw; i++) {
+			asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for MW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if (i == 0)
+				cores_info->wrapba[idx] = addrl;
+			else if (i == 1)
+				cores_info->wrapba2[idx] = addrl;
+
+
+			ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS);
+			axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
+			axi_wrapper[sii->axi_num_wrappers].cid = cid;
+			axi_wrapper[sii->axi_num_wrappers].rev = crev;
+			axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
+			axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
+			sii->axi_num_wrappers++;
+			SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n",
+					sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
+		}
+
+		/* And finally slave wrappers */
+		for (i = 0; i < nsw; i++) {
+			uint fwp = (nsp == 1) ? 0 : 1;
+			asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+
+			/* cache APB bridge wrapper address for set/clear timeout */
+			if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
+				ASSERT(sii->num_br < SI_MAXBR);
+				sii->br_wrapba[sii->num_br++] = addrl;
+			}
+
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for SW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if ((nmw == 0) && (i == 0))
+				cores_info->wrapba[idx] = addrl;
+			else if ((nmw == 0) && (i == 1))
+				cores_info->wrapba2[idx] = addrl;
+
+			/* Include all slave wrappers to the list to
+			 * enable and monitor watchdog timeouts
+			 */
+
+			ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS);
+			axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
+			axi_wrapper[sii->axi_num_wrappers].cid = cid;
+			axi_wrapper[sii->axi_num_wrappers].rev = crev;
+			axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
+			axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
+			sii->axi_num_wrappers++;
+
+			SI_VMSG(("SLAVE WRAPPER: %d,  mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n",
+				sii->axi_num_wrappers,  mfg, cid, crev, addrl, sizel));
+		}
+
+
+#ifndef BCM_BACKPLANE_TIMEOUT
+		/* Don't record bridges */
+		if (br)
+			continue;
+#endif
+
+		/* Done with core */
+		sii->numcores++;
+	}
+
+	SI_ERROR(("Reached end of erom without finding END\n"));
+
+error:
+	sii->numcores = 0;
+	return;
+}
+
+#define AI_SETCOREIDX_MAPSIZE(coreid) \
+	(((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static volatile void *
+_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 addr, wrap, wrap2;
+	volatile void *regs;
+
+	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+		return (NULL);
+
+	addr = cores_info->coresba[coreidx];
+	wrap = cores_info->wrapba[coreidx];
+	wrap2 = cores_info->wrapba2[coreidx];
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+	/* No need to disable interrupts while entering/exiting APB bridge core */
+	if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
+		(cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
+#endif /* BCM_BACKPLANE_TIMEOUT */
+	{
+		/*
+		 * If the user has provided an interrupt mask enabled function,
+		 * then assert interrupts are disabled before switching the core.
+		 */
+		ASSERT((sii->intrsenabled_fn == NULL) ||
+			!(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+	}
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(addr,
+				AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		sii->curmap = regs = cores_info->regs[coreidx];
+		if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
+			cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
+		}
+		if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
+			cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
+		}
+		if (use_wrap2)
+			sii->curwrap = cores_info->wrappers2[coreidx];
+		else
+			sii->curwrap = cores_info->wrappers[coreidx];
+		break;
+
+	case PCI_BUS:
+#ifdef BCM_BACKPLANE_TIMEOUT
+		/* No need to set the BAR0 if core is APB Bridge.
+		 * This is to reduce 2 PCI writes while checkng for errlog
+		 */
+		if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
+#endif /* BCM_BACKPLANE_TIMEOUT */
+		{
+			/* point bar0 window */
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+		}
+
+		regs = sii->curmap;
+		/* point bar0 2nd 4KB window to the primary wrapper */
+		if (use_wrap2)
+			wrap = wrap2;
+		if (PCIE_GEN2(sii))
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
+		else
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+		break;
+
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		sii->curmap = regs = (void *)((uintptr)addr);
+		if (use_wrap2)
+			sii->curwrap = (void *)((uintptr)wrap2);
+		else
+			sii->curwrap = (void *)((uintptr)wrap);
+		break;
+#endif	/* BCMSDIO */
+
+	case PCMCIA_BUS:
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	sii->curmap = regs;
+	sii->curidx = coreidx;
+
+	return regs;
+}
+
+volatile void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+	return _ai_setcoreidx(sih, coreidx, 0);
+}
+
+volatile void *
+ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
+{
+	return _ai_setcoreidx(sih, coreidx, 1);
+}
+
+void
+ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc = NULL;
+	uint32 erombase, *eromptr, *eromlim;
+	uint i, j, cidx;
+	uint32 cia, cib, nmp, nsp;
+	uint32 asd, addrl, addrh, sizel, sizeh;
+
+	for (i = 0; i < sii->numcores; i++) {
+		if (cores_info->coreid[i] == CC_CORE_ID) {
+			cc = (chipcregs_t *)cores_info->regs[i];
+			break;
+		}
+	}
+	if (cc == NULL)
+		goto error;
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+	eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+	cidx = sii->curidx;
+	cia = cores_info->cia[cidx];
+	cib = cores_info->cib[cidx];
+
+	nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+	nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+	/* scan for cores */
+	while (eromptr < eromlim) {
+		if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
+			(get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
+			break;
+		}
+	}
+
+	/* skip master ports */
+	for (i = 0; i < nmp; i++)
+		get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+
+	/* Skip ASDs in port 0 */
+	asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+	if (asd == 0) {
+		/* Try again to see if it is a bridge */
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+		              &sizel, &sizeh);
+	}
+
+	j = 1;
+	do {
+		asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+		              &sizel, &sizeh);
+		j++;
+	} while (asd != 0);
+
+	/* Go through the ASDs for other slave ports */
+	for (i = 1; i < nsp; i++) {
+		j = 0;
+		do {
+			asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+				&sizel, &sizeh);
+			if (asd == 0)
+				break;
+
+			if (!asidx--) {
+				*addr = addrl;
+				*size = sizel;
+				return;
+			}
+			j++;
+		} while (1);
+
+		if (j == 0) {
+			SI_ERROR((" SP %d has no address descriptors\n", i));
+			break;
+		}
+	}
+
+error:
+	*size = 0;
+	return;
+}
+
+/* Return the number of address spaces in current core */
+int
+ai_numaddrspaces(si_t *sih)
+{
+
+	BCM_REFERENCE(sih);
+
+	return 2;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint cidx;
+
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return cores_info->coresba[cidx];
+	else if (asidx == 1)
+		return cores_info->coresba2[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint cidx;
+
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return cores_info->coresba_size[cidx];
+	else if (asidx == 1)
+		return cores_info->coresba2_size[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+uint
+ai_flag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
+			__FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM53573_DMP()) {
+		SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
+		return sii->curidx;
+	}
+#ifdef REROUTE_OOBINT
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+			__FUNCTION__));
+		return PMU_OOB_BIT;
+	}
+#else
+	if (PMU_DMP()) {
+		uint idx, flag;
+		idx = sii->curidx;
+		ai_setcoreidx(sih, SI_CC_IDX);
+		flag = ai_flag_alt(sih);
+		ai_setcoreidx(sih, idx);
+		return flag;
+	}
+#endif /* REROUTE_OOBINT */
+
+	ai = sii->curwrap;
+	ASSERT(ai != NULL);
+
+	return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+uint
+ai_flag_alt(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
+			__FUNCTION__));
+		return sii->curidx;
+	}
+#ifdef REROUTE_OOBINT
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+			__FUNCTION__));
+		return PMU_OOB_BIT;
+	}
+#endif /* REROUTE_OOBINT */
+
+	ai = sii->curwrap;
+
+	return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+	BCM_REFERENCE(sih);
+	BCM_REFERENCE(siflag);
+
+}
+
+uint
+ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 *map = (uint32 *) sii->curwrap;
+
+	if (mask || val) {
+		uint32 w = R_REG(sii->osh, map+(offset/4));
+		w &= ~mask;
+		w |= val;
+		W_REG(sii->osh, map+(offset/4), w);
+	}
+
+	return (R_REG(sii->osh, map+(offset/4)));
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 cia;
+
+	cia = cores_info->cia[sii->curidx];
+	return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 cib;
+
+
+	cib = cores_info->cib[sii->curidx];
+	return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	ai = sii->curwrap;
+
+	return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+	        ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	volatile uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (volatile uint32 *)((volatile char *)sii->curmap +
+			               PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (volatile uint32 *)((volatile char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (volatile uint32 *)((volatile char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
+		               regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_REG(sii->osh, r) & ~mask) | val;
+		W_REG(sii->osh, r, w);
+	}
+
+	/* readback */
+	w = R_REG(sii->osh, r);
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			ai_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+volatile uint32 *
+ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	volatile uint32 *r = NULL;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (volatile uint32 *)((volatile char *)sii->curmap +
+			               PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (volatile uint32 *)((volatile char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (volatile uint32 *)((volatile char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		ASSERT(sii->curidx == coreidx);
+		r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
+	}
+
+	return (r);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii = SI_INFO(sih);
+	volatile uint32 dummy;
+	uint32 status;
+	aidmp_t *ai;
+
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/* if core is already in reset, just return */
+	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
+		return;
+	}
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+	/* if pending backplane ops still, try waiting longer */
+	if (status != 0) {
+		/* 300usecs was sufficient to allow backplane ops to clear for big hammer */
+		/* during driver load we may need more time */
+		SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
+		/* if still pending ops, continue on and try disable anyway */
+		/* this is in big hammer path, so don't call wl_reinit in this case... */
+	}
+
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	dummy = R_REG(sii->osh, &ai->resetctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	W_REG(sii->osh, &ai->ioctrl, bits);
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(10);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+static void
+_ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	volatile uint32 dummy;
+	uint loop_counter = 10;
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/* if core is already out of reset, just return */
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+	/* put core into reset state */
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	OSL_DELAY(10);
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+	while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
+		/* ensure there are no pending backplane operations */
+		SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+		/* take core out of reset */
+		W_REG(sii->osh, &ai->resetctrl, 0);
+
+		/* ensure there are no pending backplane operations */
+		SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+	}
+
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+}
+
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint idx = sii->curidx;
+
+	if (cores_info->wrapba2[idx] != 0) {
+		ai_setcoreidx_2ndwrap(sih, idx);
+		_ai_core_reset(sih, bits, resetbits);
+		ai_setcoreidx(sih, idx);
+	}
+
+	_ai_core_reset(sih, bits, resetbits);
+}
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+		          __FUNCTION__));
+		return;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return;
+	}
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+			__FUNCTION__));
+		return;
+	}
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+			__FUNCTION__));
+		return 0;
+	}
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+
+	return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return 0;
+	}
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+		W_REG(sii->osh, &ai->iostatus, w);
+	}
+
+	return R_REG(sii->osh, &ai->iostatus);
+}
+
+#if defined(BCMDBG_PHYDUMP)
+/* print interesting aidmp registers */
+void
+ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+	si_info_t *sii = SI_INFO(sih);
+	osl_t *osh;
+	aidmp_t *ai;
+	uint i;
+	uint32 prev_value = 0;
+	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+	uint32 cfg_reg = 0;
+	uint bar0_win_offset = 0;
+
+	osh = sii->osh;
+
+
+	/* Save and restore wrapper access window */
+	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		if (PCIE_GEN2(sii)) {
+			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
+			bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
+		} else {
+			cfg_reg = PCI_BAR0_WIN2;
+			bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
+		}
+
+		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
+
+		if (prev_value == ID32_INVALID) {
+			SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
+			return;
+		}
+	}
+
+	bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
+		sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
+
+	for (i = 0; i < sii->axi_num_wrappers; i++) {
+
+		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+			/* Set BAR0 window to bridge wapper base address */
+			OSL_PCI_WRITE_CONFIG(osh,
+				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
+
+			ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
+		} else {
+			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
+		}
+
+		bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
+			axi_wrapper[i].rev,
+			axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
+			axi_wrapper[i].wrapper_addr);
+
+		/* BCM5357_DMP() */
+		if (((CHIPID(sih->chip) == BCM5357_CHIP_ID) ||
+			(CHIPID(sih->chip) == BCM4749_CHIP_ID)) &&
+			(sih->chippkg == BCM5357_PKG_ID) &&
+			(axi_wrapper[i].cid == USB20H_CORE_ID)) {
+			bcm_bprintf(b, "Skipping usb20h in 5357\n");
+			continue;
+		}
+
+		/* BCM4707_DMP() */
+		if (BCM4707_CHIP(CHIPID(sih->chip)) &&
+			(axi_wrapper[i].cid == NS_CCB_CORE_ID)) {
+			bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
+			continue;
+		}
+
+		bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
+			    "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
+			    "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
+			    "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
+			    "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
+			    "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
+			    "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
+			    R_REG(osh, &ai->ioctrlset),
+			    R_REG(osh, &ai->ioctrlclear),
+			    R_REG(osh, &ai->ioctrl),
+			    R_REG(osh, &ai->iostatus),
+			    R_REG(osh, &ai->ioctrlwidth),
+			    R_REG(osh, &ai->iostatuswidth),
+			    R_REG(osh, &ai->resetctrl),
+			    R_REG(osh, &ai->resetstatus),
+			    R_REG(osh, &ai->resetreadid),
+			    R_REG(osh, &ai->resetwriteid),
+			    R_REG(osh, &ai->errlogctrl),
+			    R_REG(osh, &ai->errlogdone),
+			    R_REG(osh, &ai->errlogstatus),
+			    R_REG(osh, &ai->errlogaddrlo),
+			    R_REG(osh, &ai->errlogaddrhi),
+			    R_REG(osh, &ai->errlogid),
+			    R_REG(osh, &ai->errloguser),
+			    R_REG(osh, &ai->errlogflags),
+			    R_REG(osh, &ai->intstatus),
+			    R_REG(osh, &ai->config),
+			    R_REG(osh, &ai->itcr));
+	}
+
+	/* Restore the initial wrapper space */
+	if (prev_value && cfg_reg) {
+		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
+	}
+
+}
+#endif	
+
+
+void
+ai_enable_backplane_timeouts(si_t *sih)
+{
+#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 i;
+	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+	uint32 prev_value = 0;
+	osl_t *osh = sii->osh;
+	uint32 cfg_reg = 0;
+	uint32 offset = 0;
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+	if ((sii->axi_num_wrappers == 0) ||
+#ifdef BCM_BACKPLANE_TIMEOUT
+		(!PCIE(sii)) ||
+#endif /* BCM_BACKPLANE_TIMEOUT */
+		FALSE) {
+		SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
+			__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
+			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+		return;
+	}
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+	/* Save and restore the wrapper access window */
+	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		if (PCIE_GEN1(sii)) {
+			cfg_reg = PCI_BAR0_WIN2;
+			offset = PCI_BAR0_WIN2_OFFSET;
+		} else if (PCIE_GEN2(sii)) {
+			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
+			offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
+		}
+		else {
+			osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n");
+		}
+
+		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
+		if (prev_value == ID32_INVALID) {
+			SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
+			return;
+		}
+	}
+
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+	for (i = 0; i < sii->axi_num_wrappers; ++i) {
+
+		if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
+			SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
+				axi_wrapper[i].mfg,
+				axi_wrapper[i].cid,
+				axi_wrapper[i].wrapper_addr));
+			continue;
+		}
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+			/* Set BAR0_CORE2_WIN2 to wapper base address */
+			OSL_PCI_WRITE_CONFIG(osh,
+				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
+
+			/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
+			ai = (aidmp_t *) ((uint8*)sii->curmap + offset);
+		}
+		else
+#endif /* BCM_BACKPLANE_TIMEOUT */
+		{
+			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
+		}
+
+		W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) |
+		      ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK));
+
+		SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
+			axi_wrapper[i].mfg,
+			axi_wrapper[i].cid,
+			axi_wrapper[i].wrapper_addr,
+			R_REG(sii->osh, &ai->errlogctrl)));
+	}
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+	/* Restore the initial wrapper space */
+	if (prev_value) {
+		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
+	}
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
+}
+
+#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
+
+/* slave error is ignored, so account for those cases */
+static uint32 si_ignore_errlog_cnt = 0;
+
+static bool
+ai_ignore_errlog(si_info_t *sii, uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
+{
+	uint32 axi_id;
+
+	/* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
+	switch (CHIPID(sii->pub.chip)) {
+		case BCM4350_CHIP_ID:
+			axi_id = BCM4350_BT_AXI_ID;
+			break;
+		case BCM4345_CHIP_ID:
+			axi_id = BCM4345_BT_AXI_ID;
+			break;
+		default:
+			return FALSE;
+	}
+
+	/* AXI ID check */
+	if ((err_axi_id & AI_ERRLOGID_AXI_ID_MASK) != axi_id)
+		return FALSE;
+
+	/* slave errors */
+	if ((errsts & AIELS_TIMEOUT_MASK) != AIELS_SLAVE_ERR)
+		return FALSE;
+
+	/* chipc reg 0x190 */
+	if ((hi_addr != BT_CC_SPROM_BADREG_HI) || (lo_addr != BT_CC_SPROM_BADREG_LO))
+		return FALSE;
+
+	return TRUE;
+}
+#endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+
+/* Function to return the APB bridge details corresponding to the core */
+bool
+ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit)
+{
+	uint i;
+	uint32 core_base, core_end;
+	si_info_t *sii = SI_INFO(sih);
+	static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
+	uint32 tmp_coreunit = 0;
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+		return FALSE;
+
+	/* Most of the time apb bridge query will be for d11 core.
+	 * Maintain the last cache and return if found rather than iterating the table
+	 */
+	if (coreidx_cached == coreidx) {
+		*apb_id = apb_id_cached;
+		*apb_coreuinit = apb_coreunit_cached;
+		return TRUE;
+	}
+
+	core_base = cores_info->coresba[coreidx];
+	core_end = core_base + cores_info->coresba_size[coreidx];
+
+	for (i = 0; i < sii->numcores; i++) {
+		if (cores_info->coreid[i] == APB_BRIDGE_ID) {
+			uint32 apb_base;
+			uint32 apb_end;
+
+			apb_base = cores_info->coresba[i];
+			apb_end = apb_base + cores_info->coresba_size[i];
+
+			if ((core_base >= apb_base) &&
+				(core_end <= apb_end)) {
+				/* Current core is attached to this APB bridge */
+				*apb_id = apb_id_cached = APB_BRIDGE_ID;
+				*apb_coreuinit = apb_coreunit_cached = tmp_coreunit;
+				coreidx_cached = coreidx;
+				return TRUE;
+			}
+			/* Increment the coreunit */
+			tmp_coreunit++;
+		}
+	}
+
+	return FALSE;
+}
+
+uint32
+ai_clear_backplane_to_fast(si_t *sih, void * addr)
+{
+	si_info_t *sii = SI_INFO(sih);
+	void * curmap = sii->curmap;
+	bool core_reg = FALSE;
+
+	/* Use fast path only for core register access */
+	if ((addr >= curmap) && (addr < (curmap + SI_CORE_SIZE))) {
+		/* address being accessed is within current core reg map */
+		core_reg = TRUE;
+	}
+
+	if (core_reg) {
+		uint32 apb_id, apb_coreuinit;
+
+		if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
+			&apb_id, &apb_coreuinit) == TRUE) {
+			/* Found the APB bridge corresponding to current core,
+			 * Check for bus errors in APB wrapper
+			 */
+			return ai_clear_backplane_to_per_core(sih,
+				apb_id, apb_coreuinit, NULL);
+		}
+	}
+
+	/* Default is to poll for errors on all slave wrappers */
+	return si_clear_backplane_to(sih);
+}
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
+/*
+ * API to clear the back plane timeout per core.
+ * Caller may passs optional wrapper address. If present this will be used as
+ * the wrapper base address. If wrapper base address is provided then caller
+ * must provide the coreid also.
+ * If both coreid and wrapper is zero, then err status of current bridge
+ * will be verified.
+ */
+uint32
+ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap)
+{
+	int ret = AXI_WRAP_STS_NONE;
+	aidmp_t *ai = NULL;
+	uint32 errlog_status = 0;
+	si_info_t *sii = SI_INFO(sih);
+	uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
+	uint32 current_coreidx = si_coreidx(sih);
+	uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
+
+#if defined(BCM_BACKPLANE_TIMEOUT)
+	si_axi_error_t * axi_error = &sih->err_info->axi_error[sih->err_info->count];
+#endif /* BCM_BACKPLANE_TIMEOUT */
+	bool restore_core = FALSE;
+
+	if ((sii->axi_num_wrappers == 0) ||
+#ifdef BCM_BACKPLANE_TIMEOUT
+		(!PCIE(sii)) ||
+#endif /* BCM_BACKPLANE_TIMEOUT */
+		FALSE) {
+		SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
+			__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
+			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+		return AXI_WRAP_STS_NONE;
+	}
+
+	if (wrap != NULL) {
+		ai = (aidmp_t *)wrap;
+	} else if (coreid && (target_coreidx != current_coreidx)) {
+
+		if (ai_setcoreidx(sih, target_coreidx) == NULL) {
+			/* Unable to set the core */
+			SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
+				coreid, coreunit, target_coreidx));
+			errlog_lo = target_coreidx;
+			ret = AXI_WRAP_STS_SET_CORE_FAIL;
+			goto end;
+		}
+
+		restore_core = TRUE;
+		ai = (aidmp_t *)si_wrapperregs(sih);
+	} else {
+		/* Read error status of current wrapper */
+		ai = (aidmp_t *)si_wrapperregs(sih);
+
+		/* Update CoreID to current Code ID */
+		coreid = si_coreid(sih);
+	}
+
+	/* read error log status */
+	errlog_status = R_REG(sii->osh, &ai->errlogstatus);
+
+	if (errlog_status == ID32_INVALID) {
+		/* Do not try to peek further */
+		SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n",
+			__FUNCTION__, errlog_status, coreid));
+		ret = AXI_WRAP_STS_WRAP_RD_ERR;
+		errlog_lo = (uint32)&ai->errlogstatus;
+		goto end;
+	}
+
+	if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) {
+		uint32 tmp;
+		uint32 count = 0;
+		/* set ErrDone to clear the condition */
+		W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
+
+		/* SPINWAIT on errlogstatus timeout status bits */
+		while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_TIMEOUT_MASK) {
+
+			if (tmp == ID32_INVALID) {
+				SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
+					__FUNCTION__, errlog_status, tmp));
+				ret = AXI_WRAP_STS_WRAP_RD_ERR;
+				errlog_lo = (uint32)&ai->errlogstatus;
+				goto end;
+			}
+			/*
+			 * Clear again, to avoid getting stuck in the loop, if a new error
+			 * is logged after we cleared the first timeout
+			 */
+			W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
+
+			count++;
+			OSL_DELAY(10);
+			if ((10 * count) > AI_REG_READ_TIMEOUT) {
+				errlog_status = tmp;
+				break;
+			}
+		}
+
+		errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
+		errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
+		errlog_id = R_REG(sii->osh, &ai->errlogid);
+		errlog_flags = R_REG(sii->osh, &ai->errlogflags);
+
+		/* we are already in the error path, so OK to check for the  slave error */
+		if (ai_ignore_errlog(sii, errlog_lo, errlog_hi, errlog_id,
+			errlog_status)) {
+			si_ignore_errlog_cnt++;
+			goto end;
+		}
+
+		/* only reset APB Bridge on timeout (not slave error, or dec error) */
+		switch (errlog_status & AIELS_TIMEOUT_MASK) {
+			case AIELS_SLAVE_ERR:
+				SI_PRINT(("AXI slave error"));
+				ret = AXI_WRAP_STS_SLAVE_ERR;
+				break;
+
+			case AIELS_TIMEOUT:
+				/* reset APB Bridge */
+				OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+				/* sync write */
+				(void)R_REG(sii->osh, &ai->resetctrl);
+				/* clear Reset bit */
+				AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
+				/* sync write */
+				(void)R_REG(sii->osh, &ai->resetctrl);
+				SI_PRINT(("AXI timeout"));
+				ret = AXI_WRAP_STS_TIMEOUT;
+				break;
+
+			case AIELS_DECODE:
+				SI_PRINT(("AXI decode error"));
+				ret = AXI_WRAP_STS_DECODE_ERR;
+				break;
+			default:
+				ASSERT(0);	/* should be impossible */
+		}
+
+		SI_PRINT(("\tCoreID: %x\n", coreid));
+		SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
+			", status 0x%08x\n",
+			errlog_lo, errlog_hi, errlog_id, errlog_flags,
+			errlog_status));
+	}
+
+end:
+
+#if defined(BCM_BACKPLANE_TIMEOUT)
+	if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
+		axi_error->error = ret;
+		axi_error->coreid = coreid;
+		axi_error->errlog_lo = errlog_lo;
+		axi_error->errlog_hi = errlog_hi;
+		axi_error->errlog_id = errlog_id;
+		axi_error->errlog_flags = errlog_flags;
+		axi_error->errlog_status = errlog_status;
+		sih->err_info->count++;
+
+		if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
+			sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
+			SI_PRINT(("AXI Error log overflow\n"));
+		}
+	}
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+	if (restore_core) {
+		if (ai_setcoreidx(sih, current_coreidx) == NULL) {
+			/* Unable to set the core */
+			return ID32_INVALID;
+		}
+	}
+
+	return ret;
+}
+#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
+
+/*
+ * This API polls all slave wrappers for errors and returns bit map of
+ * all reported errors.
+ * return - bit map of
+ *	AXI_WRAP_STS_NONE
+ *	AXI_WRAP_STS_TIMEOUT
+ *	AXI_WRAP_STS_SLAVE_ERR
+ *	AXI_WRAP_STS_DECODE_ERR
+ *	AXI_WRAP_STS_PCI_RD_ERR
+ *	AXI_WRAP_STS_WRAP_RD_ERR
+ *	AXI_WRAP_STS_SET_CORE_FAIL
+ * On timeout detection, correspondign bridge will be reset to
+ * unblock the bus.
+ * Error reported in each wrapper can be retrieved using the API
+ * si_get_axi_errlog_info()
+ */
+uint32
+ai_clear_backplane_to(si_t *sih)
+{
+	uint32 ret = 0;
+#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
+
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 i;
+	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+	uint32 prev_value = 0;
+	osl_t *osh = sii->osh;
+	uint32 cfg_reg = 0;
+	uint32 offset = 0;
+
+	if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
+#else
+	if (sii->axi_num_wrappers == 0)
+#endif
+	{
+		SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
+			__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
+			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
+		return AXI_WRAP_STS_NONE;
+	}
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+	/* Save and restore wrapper access window */
+	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		if (PCIE_GEN1(sii)) {
+			cfg_reg = PCI_BAR0_WIN2;
+			offset = PCI_BAR0_WIN2_OFFSET;
+		} else if (PCIE_GEN2(sii)) {
+			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
+			offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
+		}
+		else {
+			osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n");
+		}
+
+		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
+
+		if (prev_value == ID32_INVALID) {
+			si_axi_error_t * axi_error =
+				&sih->err_info->axi_error[sih->err_info->count];
+			SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
+
+			axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
+			axi_error->errlog_lo = cfg_reg;
+			sih->err_info->count++;
+
+			if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
+				sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
+				SI_PRINT(("AXI Error log overflow\n"));
+			}
+
+			return ret;
+		}
+	}
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+	for (i = 0; i < sii->axi_num_wrappers; ++i) {
+		uint32 tmp;
+
+		if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
+			continue;
+		}
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+
+		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+			/* Set BAR0_CORE2_WIN2 to bridge wapper base address */
+			OSL_PCI_WRITE_CONFIG(osh,
+				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
+
+			/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
+			ai = (aidmp_t *) ((uint8*)sii->curmap + offset);
+		}
+		else
+#endif /* BCM_BACKPLANE_TIMEOUT */
+		{
+			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
+		}
+
+		tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0, (void*)ai);
+
+		ret |= tmp;
+	}
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+	/* Restore the initial wrapper space */
+	if (prev_value) {
+		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
+	}
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
+
+	return ret;
+}
+
+uint
+ai_num_slaveports(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 cib;
+
+	cib = cores_info->cib[coreidx];
+	return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcm_app_utils.c b/drivers/net/wireless/bcmdhd/bcm_app_utils.c
new file mode 100644
index 0000000..a5a7a5b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcm_app_utils.c
@@ -0,0 +1,1015 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcm_app_utils.c 623866 2016-03-09 11:58:34Z $
+ */
+
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else /* BCMDRIVER */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMDRIVER */
+#include <bcmwifi_channels.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h>	/* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */
+#endif
+
+#include <bcmutils.h>
+#include <wlioctl.h>
+#include <wlioctl_utils.h>
+
+#ifndef BCMDRIVER
+/*	Take an array of measurments representing a single channel over time and return
+	a summary. Currently implemented as a simple average but could easily evolve
+	into more cpomplex alogrithms.
+*/
+cca_congest_channel_req_t *
+cca_per_chan_summary(cca_congest_channel_req_t *input, cca_congest_channel_req_t *avg, bool percent)
+{
+	int sec;
+	cca_congest_t totals;
+
+	totals.duration  = 0;
+	totals.congest_ibss  = 0;
+	totals.congest_obss  = 0;
+	totals.interference  = 0;
+	avg->num_secs = 0;
+
+	for (sec = 0; sec < input->num_secs; sec++) {
+		if (input->secs[sec].duration) {
+			totals.duration += input->secs[sec].duration;
+			totals.congest_ibss += input->secs[sec].congest_ibss;
+			totals.congest_obss += input->secs[sec].congest_obss;
+			totals.interference += input->secs[sec].interference;
+			avg->num_secs++;
+		}
+	}
+	avg->chanspec = input->chanspec;
+
+	if (!avg->num_secs || !totals.duration)
+		return (avg);
+
+	if (percent) {
+		avg->secs[0].duration = totals.duration / avg->num_secs;
+		avg->secs[0].congest_ibss = totals.congest_ibss * 100/totals.duration;
+		avg->secs[0].congest_obss = totals.congest_obss * 100/totals.duration;
+		avg->secs[0].interference = totals.interference * 100/totals.duration;
+	} else {
+		avg->secs[0].duration = totals.duration / avg->num_secs;
+		avg->secs[0].congest_ibss = totals.congest_ibss / avg->num_secs;
+		avg->secs[0].congest_obss = totals.congest_obss / avg->num_secs;
+		avg->secs[0].interference = totals.interference / avg->num_secs;
+	}
+
+	return (avg);
+}
+
+static void
+cca_info(uint8 *bitmap, int num_bits, int *left, int *bit_pos)
+{
+	int i;
+	for (*left = 0, i = 0; i < num_bits; i++) {
+		if (isset(bitmap, i)) {
+			(*left)++;
+			*bit_pos = i;
+		}
+	}
+}
+
+static uint8
+spec_to_chan(chanspec_t chspec)
+{
+	uint8 center_ch, edge, primary, sb;
+
+	center_ch = CHSPEC_CHANNEL(chspec);
+
+	if (CHSPEC_BW_LE20(chspec)) {
+		return center_ch;
+	} else {
+		/* the lower edge of the wide channel is half the bw from
+		 * the center channel.
+		 */
+		if (CHSPEC_IS40(chspec)) {
+			edge = center_ch - CH_20MHZ_APART;
+		} else {
+			/* must be 80MHz (until we support more) */
+			ASSERT(CHSPEC_IS80(chspec));
+			edge = center_ch - CH_40MHZ_APART;
+		}
+
+		/* find the channel number of the lowest 20MHz primary channel */
+		primary = edge + CH_10MHZ_APART;
+
+		/* select the actual subband */
+		sb = (chspec & WL_CHANSPEC_CTL_SB_MASK) >> WL_CHANSPEC_CTL_SB_SHIFT;
+		primary = primary + sb * CH_20MHZ_APART;
+
+		return primary;
+	}
+}
+
+/*
+	Take an array of measumrements representing summaries of different channels.
+	Return a recomended channel.
+	Interference is evil, get rid of that first.
+	Then hunt for lowest Other bss traffic.
+	Don't forget that channels with low duration times may not have accurate readings.
+	For the moment, do not overwrite input array.
+*/
+int
+cca_analyze(cca_congest_channel_req_t *input[], int num_chans, uint flags, chanspec_t *answer)
+{
+	uint8 *bitmap = NULL;	/* 38 Max channels needs 5 bytes  = 40 */
+	int i, left, winner, ret_val = 0;
+	uint32 min_obss = 1 << 30;
+	uint bitmap_sz;
+
+	bitmap_sz = CEIL(num_chans, NBBY);
+	bitmap = (uint8 *)malloc(bitmap_sz);
+	if (bitmap == NULL) {
+		printf("unable to allocate memory\n");
+		return BCME_NOMEM;
+	}
+
+	memset(bitmap, 0, bitmap_sz);
+	/* Initially, all channels are up for consideration */
+	for (i = 0; i < num_chans; i++) {
+		if (input[i]->chanspec)
+			setbit(bitmap, i);
+	}
+	cca_info(bitmap, num_chans, &left, &i);
+	if (!left) {
+		ret_val = CCA_ERRNO_TOO_FEW;
+		goto f_exit;
+	}
+
+	/* Filter for 2.4 GHz Band */
+	if (flags & CCA_FLAG_2G_ONLY) {
+		for (i = 0; i < num_chans; i++) {
+			if (!CHSPEC_IS2G(input[i]->chanspec))
+				clrbit(bitmap, i);
+		}
+	}
+	cca_info(bitmap, num_chans, &left, &i);
+	if (!left) {
+		ret_val = CCA_ERRNO_BAND;
+		goto f_exit;
+	}
+
+	/* Filter for 5 GHz Band */
+	if (flags & CCA_FLAG_5G_ONLY) {
+		for (i = 0; i < num_chans; i++) {
+			if (!CHSPEC_IS5G(input[i]->chanspec))
+				clrbit(bitmap, i);
+		}
+	}
+	cca_info(bitmap, num_chans, &left, &i);
+	if (!left) {
+		ret_val = CCA_ERRNO_BAND;
+		goto f_exit;
+	}
+
+	/* Filter for Duration */
+	if (!(flags & CCA_FLAG_IGNORE_DURATION)) {
+		for (i = 0; i < num_chans; i++) {
+			if (input[i]->secs[0].duration < CCA_THRESH_MILLI)
+				clrbit(bitmap, i);
+		}
+	}
+	cca_info(bitmap, num_chans, &left, &i);
+	if (!left) {
+		ret_val = CCA_ERRNO_DURATION;
+		goto f_exit;
+	}
+
+	/* Filter for 1 6 11 on 2.4 Band */
+	if (flags &  CCA_FLAGS_PREFER_1_6_11) {
+		int tmp_channel = spec_to_chan(input[i]->chanspec);
+		int is2g = CHSPEC_IS2G(input[i]->chanspec);
+		for (i = 0; i < num_chans; i++) {
+			if (is2g && tmp_channel != 1 && tmp_channel != 6 && tmp_channel != 11)
+				clrbit(bitmap, i);
+		}
+	}
+	cca_info(bitmap, num_chans, &left, &i);
+	if (!left) {
+		ret_val = CCA_ERRNO_PREF_CHAN;
+		goto f_exit;
+	}
+
+	/* Toss high interference interference */
+	if (!(flags & CCA_FLAG_IGNORE_INTERFER)) {
+		for (i = 0; i < num_chans; i++) {
+			if (input[i]->secs[0].interference > CCA_THRESH_INTERFERE)
+				clrbit(bitmap, i);
+		}
+		cca_info(bitmap, num_chans, &left, &i);
+		if (!left) {
+			ret_val = CCA_ERRNO_INTERFER;
+			goto f_exit;
+		}
+	}
+
+	/* Now find lowest obss */
+	winner = 0;
+	for (i = 0; i < num_chans; i++) {
+		if (isset(bitmap, i) && input[i]->secs[0].congest_obss < min_obss) {
+			winner = i;
+			min_obss = input[i]->secs[0].congest_obss;
+		}
+	}
+	*answer = input[winner]->chanspec;
+	f_exit:
+	free(bitmap);	/* free the allocated memory for bitmap */
+	return ret_val;
+}
+#endif /* !BCMDRIVER */
+
+/* offset of cntmember by sizeof(uint32) from the first cnt variable, txframe. */
+#define IDX_IN_WL_CNT_VER_6_T(cntmember)		\
+	((OFFSETOF(wl_cnt_ver_6_t, cntmember) - OFFSETOF(wl_cnt_ver_6_t, txframe)) / sizeof(uint32))
+
+#define IDX_IN_WL_CNT_VER_11_T(cntmember)		\
+	((OFFSETOF(wl_cnt_ver_11_t, cntmember) - OFFSETOF(wl_cnt_ver_11_t, txframe))	\
+	/ sizeof(uint32))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_6_T	\
+	((sizeof(wl_cnt_ver_6_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+/* Exclude macstat cnt variables. wl_cnt_ver_6_t only has 62 macstat cnt variables. */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T			\
+	(NUM_OF_CNT_IN_WL_CNT_VER_6_T - (WL_CNT_MCST_VAR_NUM - 2))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_11_T	\
+	((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+/* Exclude 64 macstat cnt variables. */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T		\
+	(NUM_OF_CNT_IN_WL_CNT_VER_11_T - WL_CNT_MCST_VAR_NUM)
+
+/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */
+static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = {
+	IDX_IN_WL_CNT_VER_6_T(txframe),
+	IDX_IN_WL_CNT_VER_6_T(txbyte),
+	IDX_IN_WL_CNT_VER_6_T(txretrans),
+	IDX_IN_WL_CNT_VER_6_T(txerror),
+	IDX_IN_WL_CNT_VER_6_T(txctl),
+	IDX_IN_WL_CNT_VER_6_T(txprshort),
+	IDX_IN_WL_CNT_VER_6_T(txserr),
+	IDX_IN_WL_CNT_VER_6_T(txnobuf),
+	IDX_IN_WL_CNT_VER_6_T(txnoassoc),
+	IDX_IN_WL_CNT_VER_6_T(txrunt),
+	IDX_IN_WL_CNT_VER_6_T(txchit),
+	IDX_IN_WL_CNT_VER_6_T(txcmiss),
+	IDX_IN_WL_CNT_VER_6_T(txuflo),
+	IDX_IN_WL_CNT_VER_6_T(txphyerr),
+	IDX_IN_WL_CNT_VER_6_T(txphycrs),
+	IDX_IN_WL_CNT_VER_6_T(rxframe),
+	IDX_IN_WL_CNT_VER_6_T(rxbyte),
+	IDX_IN_WL_CNT_VER_6_T(rxerror),
+	IDX_IN_WL_CNT_VER_6_T(rxctl),
+	IDX_IN_WL_CNT_VER_6_T(rxnobuf),
+	IDX_IN_WL_CNT_VER_6_T(rxnondata),
+	IDX_IN_WL_CNT_VER_6_T(rxbadds),
+	IDX_IN_WL_CNT_VER_6_T(rxbadcm),
+	IDX_IN_WL_CNT_VER_6_T(rxfragerr),
+	IDX_IN_WL_CNT_VER_6_T(rxrunt),
+	IDX_IN_WL_CNT_VER_6_T(rxgiant),
+	IDX_IN_WL_CNT_VER_6_T(rxnoscb),
+	IDX_IN_WL_CNT_VER_6_T(rxbadproto),
+	IDX_IN_WL_CNT_VER_6_T(rxbadsrcmac),
+	IDX_IN_WL_CNT_VER_6_T(rxbadda),
+	IDX_IN_WL_CNT_VER_6_T(rxfilter),
+	IDX_IN_WL_CNT_VER_6_T(rxoflo),
+	IDX_IN_WL_CNT_VER_6_T(rxuflo),
+	IDX_IN_WL_CNT_VER_6_T(rxuflo) + 1,
+	IDX_IN_WL_CNT_VER_6_T(rxuflo) + 2,
+	IDX_IN_WL_CNT_VER_6_T(rxuflo) + 3,
+	IDX_IN_WL_CNT_VER_6_T(rxuflo) + 4,
+	IDX_IN_WL_CNT_VER_6_T(rxuflo) + 5,
+	IDX_IN_WL_CNT_VER_6_T(d11cnt_txrts_off),
+	IDX_IN_WL_CNT_VER_6_T(d11cnt_rxcrc_off),
+	IDX_IN_WL_CNT_VER_6_T(d11cnt_txnocts_off),
+	IDX_IN_WL_CNT_VER_6_T(dmade),
+	IDX_IN_WL_CNT_VER_6_T(dmada),
+	IDX_IN_WL_CNT_VER_6_T(dmape),
+	IDX_IN_WL_CNT_VER_6_T(reset),
+	IDX_IN_WL_CNT_VER_6_T(tbtt),
+	IDX_IN_WL_CNT_VER_6_T(txdmawar),
+	IDX_IN_WL_CNT_VER_6_T(pkt_callback_reg_fail),
+	IDX_IN_WL_CNT_VER_6_T(txfrag),
+	IDX_IN_WL_CNT_VER_6_T(txmulti),
+	IDX_IN_WL_CNT_VER_6_T(txfail),
+	IDX_IN_WL_CNT_VER_6_T(txretry),
+	IDX_IN_WL_CNT_VER_6_T(txretrie),
+	IDX_IN_WL_CNT_VER_6_T(rxdup),
+	IDX_IN_WL_CNT_VER_6_T(txrts),
+	IDX_IN_WL_CNT_VER_6_T(txnocts),
+	IDX_IN_WL_CNT_VER_6_T(txnoack),
+	IDX_IN_WL_CNT_VER_6_T(rxfrag),
+	IDX_IN_WL_CNT_VER_6_T(rxmulti),
+	IDX_IN_WL_CNT_VER_6_T(rxcrc),
+	IDX_IN_WL_CNT_VER_6_T(txfrmsnt),
+	IDX_IN_WL_CNT_VER_6_T(rxundec),
+	IDX_IN_WL_CNT_VER_6_T(tkipmicfaill),
+	IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr),
+	IDX_IN_WL_CNT_VER_6_T(tkipreplay),
+	IDX_IN_WL_CNT_VER_6_T(ccmpfmterr),
+	IDX_IN_WL_CNT_VER_6_T(ccmpreplay),
+	IDX_IN_WL_CNT_VER_6_T(ccmpundec),
+	IDX_IN_WL_CNT_VER_6_T(fourwayfail),
+	IDX_IN_WL_CNT_VER_6_T(wepundec),
+	IDX_IN_WL_CNT_VER_6_T(wepicverr),
+	IDX_IN_WL_CNT_VER_6_T(decsuccess),
+	IDX_IN_WL_CNT_VER_6_T(tkipicverr),
+	IDX_IN_WL_CNT_VER_6_T(wepexcluded),
+	IDX_IN_WL_CNT_VER_6_T(txchanrej),
+	IDX_IN_WL_CNT_VER_6_T(psmwds),
+	IDX_IN_WL_CNT_VER_6_T(phywatchdog),
+	IDX_IN_WL_CNT_VER_6_T(prq_entries_handled),
+	IDX_IN_WL_CNT_VER_6_T(prq_undirected_entries),
+	IDX_IN_WL_CNT_VER_6_T(prq_bad_entries),
+	IDX_IN_WL_CNT_VER_6_T(atim_suppress_count),
+	IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready),
+	IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready_done),
+	IDX_IN_WL_CNT_VER_6_T(late_tbtt_dpc),
+	IDX_IN_WL_CNT_VER_6_T(rx1mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx2mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx5mbps5),
+	IDX_IN_WL_CNT_VER_6_T(rx6mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx9mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx11mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx12mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx18mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx24mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx36mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx48mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx54mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx108mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx162mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx216mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx270mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx324mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx378mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx432mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx486mbps),
+	IDX_IN_WL_CNT_VER_6_T(rx540mbps),
+	IDX_IN_WL_CNT_VER_6_T(rfdisable),
+	IDX_IN_WL_CNT_VER_6_T(txexptime),
+	IDX_IN_WL_CNT_VER_6_T(txmpdu_sgi),
+	IDX_IN_WL_CNT_VER_6_T(rxmpdu_sgi),
+	IDX_IN_WL_CNT_VER_6_T(txmpdu_stbc),
+	IDX_IN_WL_CNT_VER_6_T(rxmpdu_stbc),
+	IDX_IN_WL_CNT_VER_6_T(rxundec_mcst),
+	IDX_IN_WL_CNT_VER_6_T(tkipmicfaill_mcst),
+	IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr_mcst),
+	IDX_IN_WL_CNT_VER_6_T(tkipreplay_mcst),
+	IDX_IN_WL_CNT_VER_6_T(ccmpfmterr_mcst),
+	IDX_IN_WL_CNT_VER_6_T(ccmpreplay_mcst),
+	IDX_IN_WL_CNT_VER_6_T(ccmpundec_mcst),
+	IDX_IN_WL_CNT_VER_6_T(fourwayfail_mcst),
+	IDX_IN_WL_CNT_VER_6_T(wepundec_mcst),
+	IDX_IN_WL_CNT_VER_6_T(wepicverr_mcst),
+	IDX_IN_WL_CNT_VER_6_T(decsuccess_mcst),
+	IDX_IN_WL_CNT_VER_6_T(tkipicverr_mcst),
+	IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst)
+};
+
+/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */
+static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = {
+	IDX_IN_WL_CNT_VER_11_T(txframe),
+	IDX_IN_WL_CNT_VER_11_T(txbyte),
+	IDX_IN_WL_CNT_VER_11_T(txretrans),
+	IDX_IN_WL_CNT_VER_11_T(txerror),
+	IDX_IN_WL_CNT_VER_11_T(txctl),
+	IDX_IN_WL_CNT_VER_11_T(txprshort),
+	IDX_IN_WL_CNT_VER_11_T(txserr),
+	IDX_IN_WL_CNT_VER_11_T(txnobuf),
+	IDX_IN_WL_CNT_VER_11_T(txnoassoc),
+	IDX_IN_WL_CNT_VER_11_T(txrunt),
+	IDX_IN_WL_CNT_VER_11_T(txchit),
+	IDX_IN_WL_CNT_VER_11_T(txcmiss),
+	IDX_IN_WL_CNT_VER_11_T(txuflo),
+	IDX_IN_WL_CNT_VER_11_T(txphyerr),
+	IDX_IN_WL_CNT_VER_11_T(txphycrs),
+	IDX_IN_WL_CNT_VER_11_T(rxframe),
+	IDX_IN_WL_CNT_VER_11_T(rxbyte),
+	IDX_IN_WL_CNT_VER_11_T(rxerror),
+	IDX_IN_WL_CNT_VER_11_T(rxctl),
+	IDX_IN_WL_CNT_VER_11_T(rxnobuf),
+	IDX_IN_WL_CNT_VER_11_T(rxnondata),
+	IDX_IN_WL_CNT_VER_11_T(rxbadds),
+	IDX_IN_WL_CNT_VER_11_T(rxbadcm),
+	IDX_IN_WL_CNT_VER_11_T(rxfragerr),
+	IDX_IN_WL_CNT_VER_11_T(rxrunt),
+	IDX_IN_WL_CNT_VER_11_T(rxgiant),
+	IDX_IN_WL_CNT_VER_11_T(rxnoscb),
+	IDX_IN_WL_CNT_VER_11_T(rxbadproto),
+	IDX_IN_WL_CNT_VER_11_T(rxbadsrcmac),
+	IDX_IN_WL_CNT_VER_11_T(rxbadda),
+	IDX_IN_WL_CNT_VER_11_T(rxfilter),
+	IDX_IN_WL_CNT_VER_11_T(rxoflo),
+	IDX_IN_WL_CNT_VER_11_T(rxuflo),
+	IDX_IN_WL_CNT_VER_11_T(rxuflo) + 1,
+	IDX_IN_WL_CNT_VER_11_T(rxuflo) + 2,
+	IDX_IN_WL_CNT_VER_11_T(rxuflo) + 3,
+	IDX_IN_WL_CNT_VER_11_T(rxuflo) + 4,
+	IDX_IN_WL_CNT_VER_11_T(rxuflo) + 5,
+	IDX_IN_WL_CNT_VER_11_T(d11cnt_txrts_off),
+	IDX_IN_WL_CNT_VER_11_T(d11cnt_rxcrc_off),
+	IDX_IN_WL_CNT_VER_11_T(d11cnt_txnocts_off),
+	IDX_IN_WL_CNT_VER_11_T(dmade),
+	IDX_IN_WL_CNT_VER_11_T(dmada),
+	IDX_IN_WL_CNT_VER_11_T(dmape),
+	IDX_IN_WL_CNT_VER_11_T(reset),
+	IDX_IN_WL_CNT_VER_11_T(tbtt),
+	IDX_IN_WL_CNT_VER_11_T(txdmawar),
+	IDX_IN_WL_CNT_VER_11_T(pkt_callback_reg_fail),
+	IDX_IN_WL_CNT_VER_11_T(txfrag),
+	IDX_IN_WL_CNT_VER_11_T(txmulti),
+	IDX_IN_WL_CNT_VER_11_T(txfail),
+	IDX_IN_WL_CNT_VER_11_T(txretry),
+	IDX_IN_WL_CNT_VER_11_T(txretrie),
+	IDX_IN_WL_CNT_VER_11_T(rxdup),
+	IDX_IN_WL_CNT_VER_11_T(txrts),
+	IDX_IN_WL_CNT_VER_11_T(txnocts),
+	IDX_IN_WL_CNT_VER_11_T(txnoack),
+	IDX_IN_WL_CNT_VER_11_T(rxfrag),
+	IDX_IN_WL_CNT_VER_11_T(rxmulti),
+	IDX_IN_WL_CNT_VER_11_T(rxcrc),
+	IDX_IN_WL_CNT_VER_11_T(txfrmsnt),
+	IDX_IN_WL_CNT_VER_11_T(rxundec),
+	IDX_IN_WL_CNT_VER_11_T(tkipmicfaill),
+	IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr),
+	IDX_IN_WL_CNT_VER_11_T(tkipreplay),
+	IDX_IN_WL_CNT_VER_11_T(ccmpfmterr),
+	IDX_IN_WL_CNT_VER_11_T(ccmpreplay),
+	IDX_IN_WL_CNT_VER_11_T(ccmpundec),
+	IDX_IN_WL_CNT_VER_11_T(fourwayfail),
+	IDX_IN_WL_CNT_VER_11_T(wepundec),
+	IDX_IN_WL_CNT_VER_11_T(wepicverr),
+	IDX_IN_WL_CNT_VER_11_T(decsuccess),
+	IDX_IN_WL_CNT_VER_11_T(tkipicverr),
+	IDX_IN_WL_CNT_VER_11_T(wepexcluded),
+	IDX_IN_WL_CNT_VER_11_T(txchanrej),
+	IDX_IN_WL_CNT_VER_11_T(psmwds),
+	IDX_IN_WL_CNT_VER_11_T(phywatchdog),
+	IDX_IN_WL_CNT_VER_11_T(prq_entries_handled),
+	IDX_IN_WL_CNT_VER_11_T(prq_undirected_entries),
+	IDX_IN_WL_CNT_VER_11_T(prq_bad_entries),
+	IDX_IN_WL_CNT_VER_11_T(atim_suppress_count),
+	IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready),
+	IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready_done),
+	IDX_IN_WL_CNT_VER_11_T(late_tbtt_dpc),
+	IDX_IN_WL_CNT_VER_11_T(rx1mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx2mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx5mbps5),
+	IDX_IN_WL_CNT_VER_11_T(rx6mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx9mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx11mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx12mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx18mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx24mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx36mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx48mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx54mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx108mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx162mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx216mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx270mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx324mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx378mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx432mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx486mbps),
+	IDX_IN_WL_CNT_VER_11_T(rx540mbps),
+	IDX_IN_WL_CNT_VER_11_T(rfdisable),
+	IDX_IN_WL_CNT_VER_11_T(txexptime),
+	IDX_IN_WL_CNT_VER_11_T(txmpdu_sgi),
+	IDX_IN_WL_CNT_VER_11_T(rxmpdu_sgi),
+	IDX_IN_WL_CNT_VER_11_T(txmpdu_stbc),
+	IDX_IN_WL_CNT_VER_11_T(rxmpdu_stbc),
+	IDX_IN_WL_CNT_VER_11_T(rxundec_mcst),
+	IDX_IN_WL_CNT_VER_11_T(tkipmicfaill_mcst),
+	IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr_mcst),
+	IDX_IN_WL_CNT_VER_11_T(tkipreplay_mcst),
+	IDX_IN_WL_CNT_VER_11_T(ccmpfmterr_mcst),
+	IDX_IN_WL_CNT_VER_11_T(ccmpreplay_mcst),
+	IDX_IN_WL_CNT_VER_11_T(ccmpundec_mcst),
+	IDX_IN_WL_CNT_VER_11_T(fourwayfail_mcst),
+	IDX_IN_WL_CNT_VER_11_T(wepundec_mcst),
+	IDX_IN_WL_CNT_VER_11_T(wepicverr_mcst),
+	IDX_IN_WL_CNT_VER_11_T(decsuccess_mcst),
+	IDX_IN_WL_CNT_VER_11_T(tkipicverr_mcst),
+	IDX_IN_WL_CNT_VER_11_T(wepexcluded_mcst),
+	IDX_IN_WL_CNT_VER_11_T(dma_hang),
+	IDX_IN_WL_CNT_VER_11_T(reinit),
+	IDX_IN_WL_CNT_VER_11_T(pstatxucast),
+	IDX_IN_WL_CNT_VER_11_T(pstatxnoassoc),
+	IDX_IN_WL_CNT_VER_11_T(pstarxucast),
+	IDX_IN_WL_CNT_VER_11_T(pstarxbcmc),
+	IDX_IN_WL_CNT_VER_11_T(pstatxbcmc),
+	IDX_IN_WL_CNT_VER_11_T(cso_passthrough),
+	IDX_IN_WL_CNT_VER_11_T(cso_normal),
+	IDX_IN_WL_CNT_VER_11_T(chained),
+	IDX_IN_WL_CNT_VER_11_T(chainedsz1),
+	IDX_IN_WL_CNT_VER_11_T(unchained),
+	IDX_IN_WL_CNT_VER_11_T(maxchainsz),
+	IDX_IN_WL_CNT_VER_11_T(currchainsz),
+	IDX_IN_WL_CNT_VER_11_T(pciereset),
+	IDX_IN_WL_CNT_VER_11_T(cfgrestore),
+	IDX_IN_WL_CNT_VER_11_T(reinitreason),
+	IDX_IN_WL_CNT_VER_11_T(reinitreason) + 1,
+	IDX_IN_WL_CNT_VER_11_T(reinitreason) + 2,
+	IDX_IN_WL_CNT_VER_11_T(reinitreason) + 3,
+	IDX_IN_WL_CNT_VER_11_T(reinitreason) + 4,
+	IDX_IN_WL_CNT_VER_11_T(reinitreason) + 5,
+	IDX_IN_WL_CNT_VER_11_T(reinitreason) + 6,
+	IDX_IN_WL_CNT_VER_11_T(reinitreason) + 7,
+	IDX_IN_WL_CNT_VER_11_T(rxrtry),
+	IDX_IN_WL_CNT_VER_11_T(rxmpdu_mu),
+	IDX_IN_WL_CNT_VER_11_T(txbar),
+	IDX_IN_WL_CNT_VER_11_T(rxbar),
+	IDX_IN_WL_CNT_VER_11_T(txpspoll),
+	IDX_IN_WL_CNT_VER_11_T(rxpspoll),
+	IDX_IN_WL_CNT_VER_11_T(txnull),
+	IDX_IN_WL_CNT_VER_11_T(rxnull),
+	IDX_IN_WL_CNT_VER_11_T(txqosnull),
+	IDX_IN_WL_CNT_VER_11_T(rxqosnull),
+	IDX_IN_WL_CNT_VER_11_T(txassocreq),
+	IDX_IN_WL_CNT_VER_11_T(rxassocreq),
+	IDX_IN_WL_CNT_VER_11_T(txreassocreq),
+	IDX_IN_WL_CNT_VER_11_T(rxreassocreq),
+	IDX_IN_WL_CNT_VER_11_T(txdisassoc),
+	IDX_IN_WL_CNT_VER_11_T(rxdisassoc),
+	IDX_IN_WL_CNT_VER_11_T(txassocrsp),
+	IDX_IN_WL_CNT_VER_11_T(rxassocrsp),
+	IDX_IN_WL_CNT_VER_11_T(txreassocrsp),
+	IDX_IN_WL_CNT_VER_11_T(rxreassocrsp),
+	IDX_IN_WL_CNT_VER_11_T(txauth),
+	IDX_IN_WL_CNT_VER_11_T(rxauth),
+	IDX_IN_WL_CNT_VER_11_T(txdeauth),
+	IDX_IN_WL_CNT_VER_11_T(rxdeauth),
+	IDX_IN_WL_CNT_VER_11_T(txprobereq),
+	IDX_IN_WL_CNT_VER_11_T(rxprobereq),
+	IDX_IN_WL_CNT_VER_11_T(txprobersp),
+	IDX_IN_WL_CNT_VER_11_T(rxprobersp),
+	IDX_IN_WL_CNT_VER_11_T(txaction),
+	IDX_IN_WL_CNT_VER_11_T(rxaction),
+	IDX_IN_WL_CNT_VER_11_T(ampdu_wds),
+	IDX_IN_WL_CNT_VER_11_T(txlost),
+	IDX_IN_WL_CNT_VER_11_T(txdatamcast),
+	IDX_IN_WL_CNT_VER_11_T(txdatabcast)
+};
+
+/* Index conversion table from wl_cnt_ver_11_t to
+ * either wl_cnt_ge40mcst_v1_t or wl_cnt_lt40mcst_v1_t
+ */
+static const uint8 wlcntver11t_to_wlcntXX40mcstv1t[WL_CNT_MCST_VAR_NUM] = {
+	IDX_IN_WL_CNT_VER_11_T(txallfrm),
+	IDX_IN_WL_CNT_VER_11_T(txrtsfrm),
+	IDX_IN_WL_CNT_VER_11_T(txctsfrm),
+	IDX_IN_WL_CNT_VER_11_T(txackfrm),
+	IDX_IN_WL_CNT_VER_11_T(txdnlfrm),
+	IDX_IN_WL_CNT_VER_11_T(txbcnfrm),
+	IDX_IN_WL_CNT_VER_11_T(txfunfl),
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1,
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2,
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3,
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4,
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5,
+	IDX_IN_WL_CNT_VER_11_T(txfbw),
+	IDX_IN_WL_CNT_VER_11_T(txmpdu),
+	IDX_IN_WL_CNT_VER_11_T(txtplunfl),
+	IDX_IN_WL_CNT_VER_11_T(txphyerror),
+	IDX_IN_WL_CNT_VER_11_T(pktengrxducast),
+	IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast),
+	IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong),
+	IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt),
+	IDX_IN_WL_CNT_VER_11_T(rxinvmachdr),
+	IDX_IN_WL_CNT_VER_11_T(rxbadfcs),
+	IDX_IN_WL_CNT_VER_11_T(rxbadplcp),
+	IDX_IN_WL_CNT_VER_11_T(rxcrsglitch),
+	IDX_IN_WL_CNT_VER_11_T(rxstrt),
+	IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss),
+	IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss),
+	IDX_IN_WL_CNT_VER_11_T(rxcfrmucast),
+	IDX_IN_WL_CNT_VER_11_T(rxrtsucast),
+	IDX_IN_WL_CNT_VER_11_T(rxctsucast),
+	IDX_IN_WL_CNT_VER_11_T(rxackucast),
+	IDX_IN_WL_CNT_VER_11_T(rxdfrmocast),
+	IDX_IN_WL_CNT_VER_11_T(rxmfrmocast),
+	IDX_IN_WL_CNT_VER_11_T(rxcfrmocast),
+	IDX_IN_WL_CNT_VER_11_T(rxrtsocast),
+	IDX_IN_WL_CNT_VER_11_T(rxctsocast),
+	IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast),
+	IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast),
+	IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast),
+	IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss),
+	IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss),
+	IDX_IN_WL_CNT_VER_11_T(rxbeaconobss),
+	IDX_IN_WL_CNT_VER_11_T(rxrsptmout),
+	IDX_IN_WL_CNT_VER_11_T(bcntxcancl),
+	IDX_IN_WL_CNT_VER_11_T(rxnodelim),
+	IDX_IN_WL_CNT_VER_11_T(rxf0ovfl),
+	IDX_IN_WL_CNT_VER_11_T(rxf1ovfl),
+	IDX_IN_WL_CNT_VER_11_T(rxf2ovfl),
+	IDX_IN_WL_CNT_VER_11_T(txsfovfl),
+	IDX_IN_WL_CNT_VER_11_T(pmqovfl),
+	IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm),
+	IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl),
+	IDX_IN_WL_CNT_VER_11_T(txcgprsfail),
+	IDX_IN_WL_CNT_VER_11_T(txcgprssuc),
+	IDX_IN_WL_CNT_VER_11_T(prs_timeout),
+	IDX_IN_WL_CNT_VER_11_T(rxnack),
+	IDX_IN_WL_CNT_VER_11_T(frmscons),
+	IDX_IN_WL_CNT_VER_11_T(txnack),
+	IDX_IN_WL_CNT_VER_11_T(rxback),
+	IDX_IN_WL_CNT_VER_11_T(txback),
+	IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch),
+	IDX_IN_WL_CNT_VER_11_T(rxdrop20s),
+	IDX_IN_WL_CNT_VER_11_T(rxtoolate),
+	IDX_IN_WL_CNT_VER_11_T(bphy_badplcp)
+};
+
+/* For mcst offsets that were not used. (2 Pads) */
+#define INVALID_MCST_IDX ((uint8)(-1))
+/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver11t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+	IDX_IN_WL_CNT_VER_11_T(txallfrm),
+	IDX_IN_WL_CNT_VER_11_T(txrtsfrm),
+	IDX_IN_WL_CNT_VER_11_T(txctsfrm),
+	IDX_IN_WL_CNT_VER_11_T(txackfrm),
+	IDX_IN_WL_CNT_VER_11_T(txdnlfrm),
+	IDX_IN_WL_CNT_VER_11_T(txbcnfrm),
+	IDX_IN_WL_CNT_VER_11_T(txfunfl),
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1,
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2,
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3,
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4,
+	IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5,
+	IDX_IN_WL_CNT_VER_11_T(txfbw),
+	INVALID_MCST_IDX,
+	IDX_IN_WL_CNT_VER_11_T(txtplunfl),
+	IDX_IN_WL_CNT_VER_11_T(txphyerror),
+	IDX_IN_WL_CNT_VER_11_T(pktengrxducast),
+	IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast),
+	IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong),
+	IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt),
+	IDX_IN_WL_CNT_VER_11_T(rxinvmachdr),
+	IDX_IN_WL_CNT_VER_11_T(rxbadfcs),
+	IDX_IN_WL_CNT_VER_11_T(rxbadplcp),
+	IDX_IN_WL_CNT_VER_11_T(rxcrsglitch),
+	IDX_IN_WL_CNT_VER_11_T(rxstrt),
+	IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss),
+	IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss),
+	IDX_IN_WL_CNT_VER_11_T(rxcfrmucast),
+	IDX_IN_WL_CNT_VER_11_T(rxrtsucast),
+	IDX_IN_WL_CNT_VER_11_T(rxctsucast),
+	IDX_IN_WL_CNT_VER_11_T(rxackucast),
+	IDX_IN_WL_CNT_VER_11_T(rxdfrmocast),
+	IDX_IN_WL_CNT_VER_11_T(rxmfrmocast),
+	IDX_IN_WL_CNT_VER_11_T(rxcfrmocast),
+	IDX_IN_WL_CNT_VER_11_T(rxrtsocast),
+	IDX_IN_WL_CNT_VER_11_T(rxctsocast),
+	IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast),
+	IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast),
+	IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast),
+	IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss),
+	IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss),
+	IDX_IN_WL_CNT_VER_11_T(rxbeaconobss),
+	IDX_IN_WL_CNT_VER_11_T(rxrsptmout),
+	IDX_IN_WL_CNT_VER_11_T(bcntxcancl),
+	INVALID_MCST_IDX,
+	IDX_IN_WL_CNT_VER_11_T(rxf0ovfl),
+	IDX_IN_WL_CNT_VER_11_T(rxf1ovfl),
+	IDX_IN_WL_CNT_VER_11_T(rxf2ovfl),
+	IDX_IN_WL_CNT_VER_11_T(txsfovfl),
+	IDX_IN_WL_CNT_VER_11_T(pmqovfl),
+	IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm),
+	IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl),
+	IDX_IN_WL_CNT_VER_11_T(txcgprsfail),
+	IDX_IN_WL_CNT_VER_11_T(txcgprssuc),
+	IDX_IN_WL_CNT_VER_11_T(prs_timeout),
+	IDX_IN_WL_CNT_VER_11_T(rxnack),
+	IDX_IN_WL_CNT_VER_11_T(frmscons),
+	IDX_IN_WL_CNT_VER_11_T(txnack),
+	IDX_IN_WL_CNT_VER_11_T(rxback),
+	IDX_IN_WL_CNT_VER_11_T(txback),
+	IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch),
+	IDX_IN_WL_CNT_VER_11_T(rxdrop20s),
+	IDX_IN_WL_CNT_VER_11_T(rxtoolate),
+	IDX_IN_WL_CNT_VER_11_T(bphy_badplcp)
+};
+
+
+/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+	IDX_IN_WL_CNT_VER_6_T(txallfrm),
+	IDX_IN_WL_CNT_VER_6_T(txrtsfrm),
+	IDX_IN_WL_CNT_VER_6_T(txctsfrm),
+	IDX_IN_WL_CNT_VER_6_T(txackfrm),
+	IDX_IN_WL_CNT_VER_6_T(txdnlfrm),
+	IDX_IN_WL_CNT_VER_6_T(txbcnfrm),
+	IDX_IN_WL_CNT_VER_6_T(txfunfl),
+	IDX_IN_WL_CNT_VER_6_T(txfunfl) + 1,
+	IDX_IN_WL_CNT_VER_6_T(txfunfl) + 2,
+	IDX_IN_WL_CNT_VER_6_T(txfunfl) + 3,
+	IDX_IN_WL_CNT_VER_6_T(txfunfl) + 4,
+	IDX_IN_WL_CNT_VER_6_T(txfunfl) + 5,
+	IDX_IN_WL_CNT_VER_6_T(txfbw),
+	INVALID_MCST_IDX,
+	IDX_IN_WL_CNT_VER_6_T(txtplunfl),
+	IDX_IN_WL_CNT_VER_6_T(txphyerror),
+	IDX_IN_WL_CNT_VER_6_T(pktengrxducast),
+	IDX_IN_WL_CNT_VER_6_T(pktengrxdmcast),
+	IDX_IN_WL_CNT_VER_6_T(rxfrmtoolong),
+	IDX_IN_WL_CNT_VER_6_T(rxfrmtooshrt),
+	IDX_IN_WL_CNT_VER_6_T(rxinvmachdr),
+	IDX_IN_WL_CNT_VER_6_T(rxbadfcs),
+	IDX_IN_WL_CNT_VER_6_T(rxbadplcp),
+	IDX_IN_WL_CNT_VER_6_T(rxcrsglitch),
+	IDX_IN_WL_CNT_VER_6_T(rxstrt),
+	IDX_IN_WL_CNT_VER_6_T(rxdfrmucastmbss),
+	IDX_IN_WL_CNT_VER_6_T(rxmfrmucastmbss),
+	IDX_IN_WL_CNT_VER_6_T(rxcfrmucast),
+	IDX_IN_WL_CNT_VER_6_T(rxrtsucast),
+	IDX_IN_WL_CNT_VER_6_T(rxctsucast),
+	IDX_IN_WL_CNT_VER_6_T(rxackucast),
+	IDX_IN_WL_CNT_VER_6_T(rxdfrmocast),
+	IDX_IN_WL_CNT_VER_6_T(rxmfrmocast),
+	IDX_IN_WL_CNT_VER_6_T(rxcfrmocast),
+	IDX_IN_WL_CNT_VER_6_T(rxrtsocast),
+	IDX_IN_WL_CNT_VER_6_T(rxctsocast),
+	IDX_IN_WL_CNT_VER_6_T(rxdfrmmcast),
+	IDX_IN_WL_CNT_VER_6_T(rxmfrmmcast),
+	IDX_IN_WL_CNT_VER_6_T(rxcfrmmcast),
+	IDX_IN_WL_CNT_VER_6_T(rxbeaconmbss),
+	IDX_IN_WL_CNT_VER_6_T(rxdfrmucastobss),
+	IDX_IN_WL_CNT_VER_6_T(rxbeaconobss),
+	IDX_IN_WL_CNT_VER_6_T(rxrsptmout),
+	IDX_IN_WL_CNT_VER_6_T(bcntxcancl),
+	INVALID_MCST_IDX,
+	IDX_IN_WL_CNT_VER_6_T(rxf0ovfl),
+	IDX_IN_WL_CNT_VER_6_T(rxf1ovfl),
+	IDX_IN_WL_CNT_VER_6_T(rxf2ovfl),
+	IDX_IN_WL_CNT_VER_6_T(txsfovfl),
+	IDX_IN_WL_CNT_VER_6_T(pmqovfl),
+	IDX_IN_WL_CNT_VER_6_T(rxcgprqfrm),
+	IDX_IN_WL_CNT_VER_6_T(rxcgprsqovfl),
+	IDX_IN_WL_CNT_VER_6_T(txcgprsfail),
+	IDX_IN_WL_CNT_VER_6_T(txcgprssuc),
+	IDX_IN_WL_CNT_VER_6_T(prs_timeout),
+	IDX_IN_WL_CNT_VER_6_T(rxnack),
+	IDX_IN_WL_CNT_VER_6_T(frmscons),
+	IDX_IN_WL_CNT_VER_6_T(txnack),
+	IDX_IN_WL_CNT_VER_6_T(rxback),
+	IDX_IN_WL_CNT_VER_6_T(txback),
+	IDX_IN_WL_CNT_VER_6_T(bphy_rxcrsglitch),
+	IDX_IN_WL_CNT_VER_6_T(rxdrop20s),
+	IDX_IN_WL_CNT_VER_6_T(rxtoolate),
+	IDX_IN_WL_CNT_VER_6_T(bphy_badplcp)
+};
+
+/* copy wlc layer counters from old type cntbuf to wl_cnt_wlc_t type. */
+static int
+wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx)
+{
+	uint i;
+	if (dst == NULL || src == NULL) {
+		return BCME_ERROR;
+	}
+
+	/* Init wlccnt with invalid value. Unchanged value will not be printed out */
+	for (i = 0; i < (sizeof(wl_cnt_wlc_t) / sizeof(uint32)); i++) {
+		dst[i] = INVALID_CNT_VAL;
+	}
+
+	if (cntver == WL_CNT_VERSION_6) {
+		for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) {
+			if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) {
+				/* src buffer does not have counters from here */
+				break;
+			}
+			dst[i] = src[wlcntver6t_to_wlcntwlct[i]];
+		}
+	} else {
+		for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) {
+			if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) {
+				/* src buffer does not have counters from here */
+				break;
+			}
+			dst[i] = src[wlcntver11t_to_wlcntwlct[i]];
+		}
+	}
+	return BCME_OK;
+}
+
+/* copy macstat counters from old type cntbuf to wl_cnt_v_le10_mcst_t type. */
+static int
+wl_copy_macstat_upto_ver10(uint16 cntver, uint32 *dst, uint32 *src)
+{
+	uint i;
+
+	if (dst == NULL || src == NULL) {
+		return BCME_ERROR;
+	}
+
+	if (cntver == WL_CNT_VERSION_6) {
+		for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+			if (wlcntver6t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+				/* This mcst counter does not exist in wl_cnt_ver_6_t */
+				dst[i] = INVALID_CNT_VAL;
+			} else {
+				dst[i] = src[wlcntver6t_to_wlcntvle10mcstt[i]];
+			}
+		}
+	} else {
+		for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+			if (wlcntver11t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+				/* This mcst counter does not exist in wl_cnt_ver_11_t */
+				dst[i] = INVALID_CNT_VAL;
+			} else {
+				dst[i] = src[wlcntver11t_to_wlcntvle10mcstt[i]];
+			}
+		}
+	}
+	return BCME_OK;
+}
+
+static int
+wl_copy_macstat_ver11(uint32 *dst, uint32 *src)
+{
+	uint i;
+
+	if (dst == NULL || src == NULL) {
+		return BCME_ERROR;
+	}
+
+	for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+		dst[i] = src[wlcntver11t_to_wlcntXX40mcstv1t[i]];
+	}
+	return BCME_OK;
+}
+
+/**
+ * Translate non-xtlv 'wl counters' IOVar buffer received by old driver/FW to xtlv format.
+ * Parameters:
+ *	cntbuf: pointer to non-xtlv 'wl counters' IOVar buffer received by old driver/FW.
+ *		Newly translated xtlv format is written to this pointer.
+ *	buflen: length of the "cntbuf" without any padding.
+ *	corerev: chip core revision of the driver/FW.
+ */
+int
+wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev)
+{
+	wl_cnt_wlc_t *wlccnt = NULL;
+	uint32 *macstat = NULL;
+	xtlv_desc_t xtlv_desc[3];
+	uint16 mcst_xtlv_id;
+	int res = BCME_OK;
+	wl_cnt_info_t *cntinfo = cntbuf;
+	void *xtlvbuf_p = cntinfo->data;
+	uint16 ver = cntinfo->version;
+	uint16 xtlvbuflen = (uint16)buflen;
+	uint16 src_max_idx;
+#ifdef BCMDRIVER
+	osl_t *osh = ctx;
+#else
+	BCM_REFERENCE(ctx);
+#endif
+
+	if (ver >= WL_CNT_VERSION_XTLV) {
+		/* Already in xtlv format. */
+		goto exit;
+	}
+
+#ifdef BCMDRIVER
+	wlccnt = MALLOC(osh, sizeof(*wlccnt));
+	macstat = MALLOC(osh, WL_CNT_MCST_STRUCT_SZ);
+#else
+	wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt));
+	macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ);
+#endif
+	if (!wlccnt || !macstat) {
+		printf("%s: malloc fail!\n", __FUNCTION__);
+		res = BCME_NOMEM;
+		goto exit;
+	}
+
+	/* Check if the max idx in the struct exceeds the boundary of uint8 */
+	if (NUM_OF_CNT_IN_WL_CNT_VER_6_T > ((uint8)(-1) + 1) ||
+		NUM_OF_CNT_IN_WL_CNT_VER_11_T > ((uint8)(-1) + 1)) {
+		printf("wlcntverXXt_to_wlcntwlct and src_max_idx need"
+			" to be of uint16 instead of uint8\n");
+		res = BCME_ERROR;
+		goto exit;
+	}
+
+	/* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */
+	src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32);
+	if (src_max_idx > (uint8)(-1)) {
+		printf("wlcntverXXt_to_wlcntwlct and src_max_idx need"
+			" to be of uint16 instead of uint8\n"
+			"Try updating wl utility to the latest.\n");
+		src_max_idx = (uint8)(-1);
+	}
+
+	/* Copy wlc layer counters to wl_cnt_wlc_t */
+	res = wl_copy_wlccnt(ver, (uint32 *)wlccnt, (uint32 *)cntinfo->data, (uint8)src_max_idx);
+	if (res != BCME_OK) {
+		printf("wl_copy_wlccnt fail!\n");
+		goto exit;
+	}
+
+	/* Copy macstat counters to wl_cnt_wlc_t */
+	if (ver == WL_CNT_VERSION_11) {
+		res = wl_copy_macstat_ver11(macstat, (uint32 *)cntinfo->data);
+		if (res != BCME_OK) {
+			printf("wl_copy_macstat_ver11 fail!\n");
+			goto exit;
+		}
+		if (corerev >= 40) {
+			mcst_xtlv_id = WL_CNT_XTLV_GE40_UCODE_V1;
+		} else {
+			mcst_xtlv_id = WL_CNT_XTLV_LT40_UCODE_V1;
+		}
+	} else {
+		res = wl_copy_macstat_upto_ver10(ver, macstat, (uint32 *)cntinfo->data);
+		if (res != BCME_OK) {
+			printf("wl_copy_macstat_upto_ver10 fail!\n");
+			goto exit;
+		}
+		mcst_xtlv_id = WL_CNT_XTLV_CNTV_LE10_UCODE;
+	}
+
+	xtlv_desc[0].type = WL_CNT_XTLV_WLC;
+	xtlv_desc[0].len = sizeof(*wlccnt);
+	xtlv_desc[0].ptr = wlccnt;
+
+	xtlv_desc[1].type = mcst_xtlv_id;
+	xtlv_desc[1].len = WL_CNT_MCST_STRUCT_SZ;
+	xtlv_desc[1].ptr = macstat;
+
+	xtlv_desc[2].type = 0;
+	xtlv_desc[2].len = 0;
+	xtlv_desc[2].ptr = NULL;
+
+	memset(cntbuf, 0, buflen);
+
+	res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen,
+		xtlv_desc, BCM_XTLV_OPTION_ALIGN32);
+	cntinfo->datalen = (buflen - xtlvbuflen);
+exit:
+#ifdef BCMDRIVER
+	if (wlccnt) {
+		MFREE(osh, wlccnt, sizeof(*wlccnt));
+	}
+	if (macstat) {
+		MFREE(osh, macstat, WL_CNT_MCST_STRUCT_SZ);
+	}
+#else
+	if (wlccnt) {
+		free(wlccnt);
+	}
+	if (macstat) {
+		free(macstat);
+	}
+#endif
+	return res;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c
new file mode 100644
index 0000000..34aef8a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmevent.c
@@ -0,0 +1,382 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmevent.c 707287 2017-06-27 06:44:29Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <bcmeth.h>
+#include <bcmevent.h>
+#include <802.11.h>
+
+/* Table of event name strings for UIs and debugging dumps */
+typedef struct {
+	uint event;
+	const char *name;
+} bcmevent_name_str_t;
+
+/* Use the actual name for event tracing */
+#define BCMEVENT_NAME(_event) {(_event), #_event}
+
+static const bcmevent_name_str_t bcmevent_names[] = {
+	BCMEVENT_NAME(WLC_E_SET_SSID),
+	BCMEVENT_NAME(WLC_E_JOIN),
+	BCMEVENT_NAME(WLC_E_START),
+	BCMEVENT_NAME(WLC_E_AUTH),
+	BCMEVENT_NAME(WLC_E_AUTH_IND),
+	BCMEVENT_NAME(WLC_E_DEAUTH),
+	BCMEVENT_NAME(WLC_E_DEAUTH_IND),
+	BCMEVENT_NAME(WLC_E_ASSOC),
+	BCMEVENT_NAME(WLC_E_ASSOC_IND),
+	BCMEVENT_NAME(WLC_E_REASSOC),
+	BCMEVENT_NAME(WLC_E_REASSOC_IND),
+	BCMEVENT_NAME(WLC_E_DISASSOC),
+	BCMEVENT_NAME(WLC_E_DISASSOC_IND),
+	BCMEVENT_NAME(WLC_E_QUIET_START),
+	BCMEVENT_NAME(WLC_E_QUIET_END),
+	BCMEVENT_NAME(WLC_E_BEACON_RX),
+	BCMEVENT_NAME(WLC_E_LINK),
+	BCMEVENT_NAME(WLC_E_MIC_ERROR),
+	BCMEVENT_NAME(WLC_E_NDIS_LINK),
+	BCMEVENT_NAME(WLC_E_ROAM),
+	BCMEVENT_NAME(WLC_E_TXFAIL),
+	BCMEVENT_NAME(WLC_E_PMKID_CACHE),
+	BCMEVENT_NAME(WLC_E_RETROGRADE_TSF),
+	BCMEVENT_NAME(WLC_E_PRUNE),
+	BCMEVENT_NAME(WLC_E_AUTOAUTH),
+	BCMEVENT_NAME(WLC_E_EAPOL_MSG),
+	BCMEVENT_NAME(WLC_E_SCAN_COMPLETE),
+	BCMEVENT_NAME(WLC_E_ADDTS_IND),
+	BCMEVENT_NAME(WLC_E_DELTS_IND),
+	BCMEVENT_NAME(WLC_E_BCNSENT_IND),
+	BCMEVENT_NAME(WLC_E_BCNRX_MSG),
+	BCMEVENT_NAME(WLC_E_BCNLOST_MSG),
+	BCMEVENT_NAME(WLC_E_ROAM_PREP),
+	BCMEVENT_NAME(WLC_E_PFN_NET_FOUND),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
+	BCMEVENT_NAME(WLC_E_PFN_NET_LOST),
+	BCMEVENT_NAME(WLC_E_JOIN_START),
+	BCMEVENT_NAME(WLC_E_ROAM_START),
+	BCMEVENT_NAME(WLC_E_ASSOC_START),
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+	BCMEVENT_NAME(WLC_E_IBSS_ASSOC),
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+	BCMEVENT_NAME(WLC_E_RADIO),
+	BCMEVENT_NAME(WLC_E_PSM_WATCHDOG),
+	BCMEVENT_NAME(WLC_E_PROBREQ_MSG),
+	BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND),
+	BCMEVENT_NAME(WLC_E_PSK_SUP),
+	BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED),
+	BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME),
+	BCMEVENT_NAME(WLC_E_ICV_ERROR),
+	BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR),
+	BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR),
+	BCMEVENT_NAME(WLC_E_TRACE),
+	BCMEVENT_NAME(WLC_E_IF),
+#ifdef WLP2P
+	BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE),
+#endif
+	BCMEVENT_NAME(WLC_E_RSSI),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE),
+	BCMEVENT_NAME(WLC_E_EXTLOG_MSG),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
+#ifdef BCMWAPI_WAI
+	BCMEVENT_NAME(WLC_E_WAI_STA_EVENT),
+	BCMEVENT_NAME(WLC_E_WAI_MSG),
+#endif /* BCMWAPI_WAI */
+	BCMEVENT_NAME(WLC_E_ESCAN_RESULT),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE),
+#ifdef WLP2P
+	BCMEVENT_NAME(WLC_E_PROBRESP_MSG),
+	BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG),
+#endif
+#ifdef PROP_TXSTATUS
+	BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP),
+#endif
+	BCMEVENT_NAME(WLC_E_WAKE_EVENT),
+	BCMEVENT_NAME(WLC_E_DCS_REQUEST),
+	BCMEVENT_NAME(WLC_E_RM_COMPLETE),
+#ifdef WLMEDIA_HTSF
+	BCMEVENT_NAME(WLC_E_HTSFSYNC),
+#endif
+	BCMEVENT_NAME(WLC_E_OVERLAY_REQ),
+	BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND),
+	BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
+#ifdef SOFTAP
+	BCMEVENT_NAME(WLC_E_GTK_PLUMBED),
+#endif
+	BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE),
+	BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE),
+	BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX),
+#ifdef WLTDLS
+	BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT),
+#endif /* WLTDLS */
+	BCMEVENT_NAME(WLC_E_NATIVE),
+#ifdef WLPKTDLYSTAT
+	BCMEVENT_NAME(WLC_E_PKTDELAY_IND),
+#endif /* WLPKTDLYSTAT */
+	BCMEVENT_NAME(WLC_E_SERVICE_FOUND),
+	BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX),
+	BCMEVENT_NAME(WLC_E_GAS_COMPLETE),
+	BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE),
+	BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE),
+#ifdef WLWNM
+	BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
+#endif /* WLWNM */
+#if defined(WL_PROXDETECT)
+	BCMEVENT_NAME(WLC_E_PROXD),
+#endif
+	BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
+	BCMEVENT_NAME(WLC_E_BSSID),
+#ifdef PROP_TXSTATUS
+	BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT),
+#endif
+	BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND),
+	BCMEVENT_NAME(WLC_E_TXFAIL_THRESH),
+#ifdef GSCAN_SUPPORT
+	BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT),
+	BCMEVENT_NAME(WLC_E_PFN_SSID_EXT),
+#endif /* GSCAN_SUPPORT */
+#ifdef WLBSSLOAD_REPORT
+	BCMEVENT_NAME(WLC_E_BSS_LOAD),
+#endif
+#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW)
+	BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ),
+#endif
+	BCMEVENT_NAME(WLC_E_AUTHORIZED),
+	BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX),
+	BCMEVENT_NAME(WLC_E_CSA_START_IND),
+	BCMEVENT_NAME(WLC_E_CSA_DONE_IND),
+	BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND),
+	BCMEVENT_NAME(WLC_E_RMC_EVENT),
+	BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND),
+	BCMEVENT_NAME(WLC_E_ALLOW_CREDIT_BORROW),
+	BCMEVENT_NAME(WLC_E_MSCH),
+	BCMEVENT_NAME(WLC_E_ULP),
+	BCMEVENT_NAME(WLC_E_PSK_AUTH),
+	BCMEVENT_NAME(WLC_E_SDB_TRANSITION),
+};
+
+
+const char *bcmevent_get_name(uint event_type)
+{
+	/* note:  first coded this as a static const but some
+	 * ROMs already have something called event_name so
+	 * changed it so we don't have a variable for the
+	 * 'unknown string
+	 */
+	const char *event_name = NULL;
+
+	uint idx;
+	for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) {
+
+		if (bcmevent_names[idx].event == event_type) {
+			event_name = bcmevent_names[idx].name;
+			break;
+		}
+	}
+
+	/* if we find an event name in the array, return it.
+	 * otherwise return unknown string.
+	 */
+	return ((event_name) ? event_name : "Unknown Event");
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+	/* Event struct members passed from dongle to host are stored in network
+	* byte order. Convert all members to host-order.
+	*/
+	evt->event_type = ntoh32(evt->event_type);
+	evt->flags = ntoh16(evt->flags);
+	evt->status = ntoh32(evt->status);
+	evt->reason = ntoh32(evt->reason);
+	evt->auth_type = ntoh32(evt->auth_type);
+	evt->datalen = ntoh32(evt->datalen);
+	evt->version = ntoh16(evt->version);
+}
+
+void
+wl_event_to_network_order(wl_event_msg_t * evt)
+{
+	/* Event struct members passed from dongle to host are stored in network
+	* byte order. Convert all members to host-order.
+	*/
+	evt->event_type = hton32(evt->event_type);
+	evt->flags = hton16(evt->flags);
+	evt->status = hton32(evt->status);
+	evt->reason = hton32(evt->reason);
+	evt->auth_type = hton32(evt->auth_type);
+	evt->datalen = hton32(evt->datalen);
+	evt->version = hton16(evt->version);
+}
+
+/*
+ * Validate if the event is proper and if valid copy event header to event.
+ * If proper event pointer is passed, to just validate, pass NULL to event.
+ *
+ * Return values are
+ *	BCME_OK - It is a BRCM event or BRCM dongle event
+ *	BCME_NOTFOUND - Not BRCM, not an event, may be okay
+ *	BCME_BADLEN - Bad length, should not process, just drop
+ */
+int
+is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype,
+	bcm_event_msg_u_t *out_event)
+{
+	uint16 evlen = 0;	/* length in bcmeth_hdr */
+	uint16 subtype;
+	uint16 usr_subtype;
+	bcm_event_t *bcm_event;
+	uint8 *pktend;
+	uint8 *evend;
+	int err = BCME_OK;
+	uint32 data_len = 0; /* data length in bcm_event */
+
+	pktend = (uint8 *)pktdata + pktlen;
+	bcm_event = (bcm_event_t *)pktdata;
+
+	/* only care about 16-bit subtype / length versions */
+	if ((uint8 *)&bcm_event->bcm_hdr < pktend) {
+		uint8 short_subtype = *(uint8 *)&bcm_event->bcm_hdr;
+		if (!(short_subtype & 0x80)) {
+			err = BCME_NOTFOUND;
+			goto done;
+		}
+	}
+
+	/* must have both ether_header and bcmeth_hdr */
+	if (pktlen < OFFSETOF(bcm_event_t, event)) {
+		err = BCME_BADLEN;
+		goto done;
+	}
+
+	/* check length in bcmeth_hdr */
+
+	/* temporary - header length not always set properly. When the below
+	 * !BCMDONGLEHOST is in all branches that use trunk DHD, the code
+	 * under BCMDONGLEHOST can be removed.
+	 */
+	evlen = (uint16)(pktend - (uint8 *)&bcm_event->bcm_hdr.version);
+	evend = (uint8 *)&bcm_event->bcm_hdr.version + evlen;
+	if (evend != pktend) {
+		err = BCME_BADLEN;
+		goto done;
+	}
+
+	/* match on subtype, oui and usr subtype for BRCM events */
+	subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.subtype);
+	if (subtype != BCMILCP_SUBTYPE_VENDOR_LONG) {
+		err = BCME_NOTFOUND;
+		goto done;
+	}
+
+	if (bcmp(BRCM_OUI, &bcm_event->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+		err = BCME_NOTFOUND;
+		goto done;
+	}
+
+	/* if it is a bcm_event or bcm_dngl_event_t, validate it */
+	usr_subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.usr_subtype);
+	switch (usr_subtype) {
+	case BCMILCP_BCM_SUBTYPE_EVENT:
+		/* check that header length and pkt length are sufficient */
+		if ((pktlen < sizeof(bcm_event_t)) ||
+			(evend < ((uint8 *)bcm_event + sizeof(bcm_event_t)))) {
+			err = BCME_BADLEN;
+			goto done;
+		}
+
+		/* ensure data length in event is not beyond the packet. */
+		data_len = ntoh32_ua((void *)&bcm_event->event.datalen);
+		if ((sizeof(bcm_event_t) + data_len +
+			BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) {
+			err = BCME_BADLEN;
+			goto done;
+		}
+
+		if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) {
+			err = BCME_NOTFOUND;
+			goto done;
+		}
+
+		if (out_event) {
+			/* ensure BRCM event pkt aligned */
+			memcpy(&out_event->event, &bcm_event->event, sizeof(wl_event_msg_t));
+		}
+
+		break;
+
+	case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
+#if defined(DNGL_EVENT_SUPPORT)
+		if ((pktlen < sizeof(bcm_dngl_event_t)) ||
+			(evend < ((uint8 *)bcm_event + sizeof(bcm_dngl_event_t)))) {
+			err = BCME_BADLEN;
+			goto done;
+		}
+
+		/* ensure data length in event is not beyond the packet. */
+		data_len = ntoh16_ua((void *)&((bcm_dngl_event_t *)pktdata)->dngl_event.datalen);
+		if ((sizeof(bcm_dngl_event_t) + data_len +
+			BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) {
+			err = BCME_BADLEN;
+			goto done;
+		}
+
+		if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) {
+			err = BCME_NOTFOUND;
+			goto done;
+		}
+
+		if (out_event) {
+			/* ensure BRCM dngl event pkt aligned */
+			memcpy(&out_event->dngl_event, &((bcm_dngl_event_t *)pktdata)->dngl_event,
+				sizeof(bcm_dngl_event_msg_t));
+		}
+
+		break;
+#else
+		err = BCME_UNSUPPORTED;
+		break;
+#endif 
+
+	default:
+		err = BCME_NOTFOUND;
+		goto done;
+	}
+
+	BCM_REFERENCE(data_len);
+done:
+	return err;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c
new file mode 100644
index 0000000..a4144ef
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh.c
@@ -0,0 +1,847 @@
+/*
+ *  BCMSDH interface glue
+ *  implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdh.c 671319 2016-11-21 14:27:29Z $
+ */
+
+/**
+ * @file bcmsdh.c
+ */
+
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h>	/* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h>	/* common SDIO/controller interface */
+#include <sbsdio.h>	/* SDIO device core hardware definitions. */
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+
+#if defined(BT_OVER_SDIO)
+#include <dhd_bt_interface.h>
+#endif /* defined (BT_OVER_SDIO) */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT	2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if defined(BT_OVER_SDIO)
+struct sdio_func *func_f3 = NULL;
+static f3intr_handler processf3intr = NULL;
+static dhd_hang_notification process_dhd_hang_notification = NULL;
+static dhd_hang_state_t g_dhd_hang_state = NO_HANG_STATE;
+#endif /* defined (BT_OVER_SDIO) */
+
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB) || defined(FORCE_WOWLAN)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+	sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+#if defined(BT_OVER_SDIO)
+void bcmsdh_btsdio_process_hang_state(dhd_hang_state_t new_state)
+{
+	bool state_change = false;
+
+	BCMSDH_ERROR(("%s: DHD hang state changed - [%d] -> [%d]\n",
+		__FUNCTION__, g_dhd_hang_state, new_state));
+
+	if (g_dhd_hang_state == new_state)
+		return;
+
+	switch (g_dhd_hang_state) {
+		case NO_HANG_STATE:
+			if (HANG_START_STATE == new_state)
+				state_change = true;
+		break;
+
+		case HANG_START_STATE:
+			if (HANG_RECOVERY_STATE == new_state ||
+				NO_HANG_STATE == new_state)
+				state_change = true;
+		break;
+
+		case HANG_RECOVERY_STATE:
+			if (NO_HANG_STATE == new_state)
+				state_change = true;
+		break;
+
+		default:
+			BCMSDH_ERROR(("%s: Unhandled Hang state\n", __FUNCTION__));
+		break;
+	}
+
+	if (!state_change) {
+		BCMSDH_ERROR(("%s: Hang state cannot be changed\n", __FUNCTION__));
+		return;
+	}
+
+	g_dhd_hang_state = new_state;
+}
+
+void bcmsdh_btsdio_process_f3_intr(void)
+{
+	if (processf3intr && (g_dhd_hang_state == NO_HANG_STATE))
+		processf3intr(func_f3);
+}
+
+void bcmsdh_btsdio_process_dhd_hang_notification(bool wifi_recovery_completed)
+{
+	bcmsdh_btsdio_process_hang_state(HANG_START_STATE);
+
+	if (process_dhd_hang_notification)
+		process_dhd_hang_notification(func_f3, wifi_recovery_completed);
+
+	/* WiFi was off, so HANG_RECOVERY_STATE is not needed */
+	if (wifi_recovery_completed)
+		bcmsdh_btsdio_process_hang_state(NO_HANG_STATE);
+	else {
+		bcmsdh_btsdio_process_hang_state(HANG_RECOVERY_STATE);
+	}
+}
+
+void bcmsdh_btsdio_interface_init(struct sdio_func *func,
+	f3intr_handler f3intr_fun, dhd_hang_notification hang_notification)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)l_bcmsdh;
+	BCMSDH_INFO(("%s: func %p \n", __FUNCTION__, func));
+	func_f3 = func;
+	processf3intr = f3intr_fun;
+	sdioh_sdmmc_card_enable_func_f3(bcmsdh->sdioh, func);
+	process_dhd_hang_notification = hang_notification;
+
+} EXPORT_SYMBOL(bcmsdh_btsdio_interface_init);
+#endif /* defined (BT_OVER_SDIO) */
+
+/* Attach BCMSDH layer to SDIO Host Controller Driver
+ *
+ * @param osh OSL Handle.
+ * @param cfghdl Configuration Handle.
+ * @param regsva Virtual address of controller registers.
+ * @param irq Interrupt number of SDIO controller.
+ *
+ * @return bcmsdh_info_t Handle to BCMSDH context.
+ */
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva)
+{
+	bcmsdh_info_t *bcmsdh;
+
+	if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+		BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+	bcmsdh->sdioh = sdioh;
+	bcmsdh->osh = osh;
+	bcmsdh->init_success = TRUE;
+	*regsva = SI_ENUM_BASE;
+
+	bcmsdh_force_sbwad_calc(bcmsdh, FALSE);
+
+	/* Report the BAR, to fix if needed */
+	bcmsdh->sbwad = SI_ENUM_BASE;
+
+	/* save the handler locally */
+	l_bcmsdh = bcmsdh;
+
+	return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bcmsdh != NULL) {
+		MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+	}
+
+	l_bcmsdh = NULL;
+
+	return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+                void *params, int plen, void *arg, int len, bool set)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	bool on;
+
+	ASSERT(bcmsdh);
+	status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+	if (SDIOH_API_SUCCESS(status))
+		return FALSE;
+	else
+		return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	ASSERT(sdh);
+	return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	ASSERT(sdh);
+
+	/* don't support yet */
+	return BCME_UNSUPPORTED;
+}
+
+/**
+ * Read from SDIO Configuration Space
+ * @param sdh SDIO Host context.
+ * @param func_num Function number to read from.
+ * @param addr Address to read from.
+ * @param err Error return.
+ * @return value read from SDIO configuration space.
+ */
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+	uint8 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+} EXPORT_SYMBOL(bcmsdh_cfg_read);
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+} EXPORT_SYMBOL(bcmsdh_cfg_write);
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+	             addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	uint8 *tmp_buf, *tmp_ptr;
+	uint8 *ptr;
+	bool ascii = func & ~0xf;
+	func &= 0x7;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+	ASSERT(cis);
+	ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+	status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+	if (ascii) {
+		/* Move binary bits to tmp and format them into the provided buffer. */
+		if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+			BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+			return BCME_NOMEM;
+		}
+		bcopy(cis, tmp_buf, length);
+		for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+			ptr += snprintf((char*)ptr, (cis + length - ptr - 4),
+				"%.2x ", *tmp_ptr & 0xff);
+			if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+				ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n");
+		}
+		MFREE(bcmsdh->osh, tmp_buf, length);
+	}
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint32 offset)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	func &= 0x7;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+	ASSERT(cisd);
+
+	status = sdioh_cisaddr_read(bcmsdh->sdioh, func, cisd, offset);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
+{
+	int err = 0;
+	uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bar0 != bcmsdh->sbwad || force_set) {
+		bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+			(address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+		if (!err)
+			bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+				(address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+		if (!err)
+			bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+				(address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+		if (!err)
+			bcmsdh->sbwad = bar0;
+		else
+			/* invalidate cached window var */
+			bcmsdh->sbwad = 0;
+
+	}
+
+	return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uintptr addr, uint size)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 word = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x\n", __FUNCTION__, (unsigned int)addr));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)) {
+		bcmsdh->regfail = TRUE; // terence 20130621: prevent dhd_dpc in dead lock
+		return 0xFFFFFFFF;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+		SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+	/* if ok, return appropriately masked word */
+	if (SDIOH_API_SUCCESS(status)) {
+		switch (size) {
+			case sizeof(uint8):
+				return (word & 0xff);
+			case sizeof(uint16):
+				return (word & 0xffff);
+			case sizeof(uint32):
+				return word;
+			default:
+				bcmsdh->regfail = TRUE;
+
+		}
+	}
+
+	/* otherwise, bad sdio access or invalid size */
+	BCMSDH_ERROR(("%s: error reading addr 0x%x size %d\n",
+		__FUNCTION__, (unsigned int)addr, size));
+	return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	int err = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+	             __FUNCTION__, (unsigned int)addr, size*8, data));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc))) {
+		bcmsdh->regfail = TRUE; // terence 20130621:
+		return err;
+	}
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+	                            addr, &data, size);
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	if (SDIOH_API_SUCCESS(status))
+		return 0;
+
+	BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+	              __FUNCTION__, data, (unsigned int)addr, size));
+	return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+	return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	             __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	            __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+	ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+	                              (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+	                              addr, 4, nbytes, buf, NULL);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_waitlockfree(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_query_device(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+	return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+	ASSERT(sdh);
+	return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+	return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (bcmsdh->sbwad);
+}
+
+/* example usage: if force is TRUE, forces the bcmsdhsdio_set_sbaddr_window to
+ * calculate sbwad always instead of caching.
+ */
+void
+bcmsdh_force_sbwad_calc(void *sdh, bool force)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+	bcmsdh->force_sbwad_calc = force;
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+	return;
+}
+
+
+int
+bcmsdh_sleep(void *sdh, bool enab)
+{
+#ifdef SDIOH_SLEEP_ENABLED
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_sleep(sd, enab);
+#else
+	return BCME_UNSUPPORTED;
+#endif
+}
+
+int
+bcmsdh_gpio_init(void *sdh)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpio_init(sd);
+}
+
+bool
+bcmsdh_gpioin(void *sdh, uint32 gpio)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioin(sd, gpio);
+}
+
+int
+bcmsdh_gpioouten(void *sdh, uint32 gpio)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioouten(sd, gpio);
+}
+
+int
+bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioout(sd, gpio, enab);
+}
+
+uint
+bcmsdh_set_mode(void *sdh, uint mode) 
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	return (sdioh_set_mode(bcmsdh->sdioh, mode));
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
new file mode 100644
index 0000000..41462a3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
@@ -0,0 +1,512 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdh_linux.c 672609 2016-11-29 07:00:46Z $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#if defined(CONFIG_ARCH_ODIN)
+#include <linux/platform_data/gpio-odin.h>
+#endif /* defined(CONFIG_ARCH_ODIN) */
+#include <dhd_linux.h>
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL};
+
+typedef enum {
+	DHD_INTR_INVALID = 0,
+	DHD_INTR_INBAND,
+	DHD_INTR_HWOOB,
+	DHD_INTR_SWOOB
+} DHD_HOST_INTR_TYPE;
+
+/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g.
+ * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather
+ * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this
+ * structure.
+ */
+typedef struct bcmsdh_os_info {
+	DHD_HOST_INTR_TYPE	intr_type;
+	int			oob_irq_num;	/* valid when hardware or software oob in use */
+	unsigned long		oob_irq_flags;	/* valid when hardware or software oob in use */
+	bool			oob_irq_registered;
+	bool			oob_irq_enabled;
+	bool			oob_irq_wake_enabled;
+	spinlock_t		oob_irq_spinlock;
+	bcmsdh_cb_fn_t		oob_irq_handler;
+	void			*oob_irq_handler_context;
+	void			*context;	/* context returned from upper layer */
+	void			*sdioh;		/* handle to lower layer (sdioh) */
+	void			*dev;		/* handle to the underlying device */
+	bool			dev_wake_enabled;
+} bcmsdh_os_info_t;
+
+/* debugging macros */
+#define SDLX_MSG(x) printf x
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+	/* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+	/* Check for Arasan host controller */
+	if (vendor == VENDOR_SI_IMAGE) {
+		return (TRUE);
+	}
+	/* Check for BRCM 27XX Standard host controller */
+	if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for BRCM Standard host controller */
+	if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for TI PCIxx21 Standard host controller */
+	if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	/* Ricoh R5C822 Standard SDIO Host */
+	if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+		return (TRUE);
+	}
+	/* JMicron Standard SDIO Host */
+	if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+	/* This is the PciSpiHost. */
+	if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		printf("Found PCI SPI Host Controller\n");
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_SPI */
+
+	return (FALSE);
+}
+
+void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+	uint bus_num, uint slot_num)
+{
+	ulong regs;
+	bcmsdh_info_t *bcmsdh;
+	uint32 vendevid;
+	bcmsdh_os_info_t *bcmsdh_osinfo = NULL;
+
+	bcmsdh = bcmsdh_attach(osh, sdioh, &regs);
+	if (bcmsdh == NULL) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+	bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t));
+	if (bcmsdh_osinfo == NULL) {
+		SDLX_MSG(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__));
+		goto err;
+	}
+	bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+	bcmsdh->os_cxt = bcmsdh_osinfo;
+	bcmsdh_osinfo->sdioh = sdioh;
+	bcmsdh_osinfo->dev = dev;
+	osl_set_bus_handle(osh, bcmsdh);
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dev && device_init_wakeup(dev, true) == 0)
+		bcmsdh_osinfo->dev_wake_enabled = TRUE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+#if defined(OOB_INTR_ONLY)
+	spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock);
+	/* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+	bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info,
+		&bcmsdh_osinfo->oob_irq_flags);
+	if  (bcmsdh_osinfo->oob_irq_num < 0) {
+		SDLX_MSG(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+		goto err;
+	}
+#endif /* defined(BCMLXSDMMC) */
+
+	/* Read the vendor/device ID from the CIS */
+	vendevid = bcmsdh_query_device(bcmsdh);
+	/* try to attach to the target device */
+	bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
+		slot_num, 0, bus_type, (void *)regs, osh, bcmsdh);
+	if (bcmsdh_osinfo->context == NULL) {
+		SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	return bcmsdh;
+
+	/* error handling */
+err:
+	if (bcmsdh != NULL)
+		bcmsdh_detach(osh, bcmsdh);
+	if (bcmsdh_osinfo != NULL)
+		MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+	return NULL;
+}
+
+int bcmsdh_remove(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (bcmsdh_osinfo->dev)
+		device_init_wakeup(bcmsdh_osinfo->dev, false);
+	bcmsdh_osinfo->dev_wake_enabled = FALSE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+	drvinfo.remove(bcmsdh_osinfo->context);
+	MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t));
+	bcmsdh_detach(bcmsdh->osh, bcmsdh);
+
+	return 0;
+}
+
+#ifdef DHD_WAKE_STATUS
+int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh)
+{
+	return bcmsdh->total_wake_count;
+}
+
+int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+
+	ret = bcmsdh->pkt_wake;
+	bcmsdh->total_wake_count += flag;
+	bcmsdh->pkt_wake = flag;
+
+	spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+	return ret;
+}
+#endif /* DHD_WAKE_STATUS */
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context))
+		return -EBUSY;
+	return 0;
+}
+
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	if (drvinfo.resume)
+		return drvinfo.resume(bcmsdh_osinfo->context);
+	return 0;
+}
+
+extern int bcmsdh_register_client_driver(void);
+extern void bcmsdh_unregister_client_driver(void);
+extern int sdio_func_reg_notify(void* semaphore);
+extern void sdio_func_unreg_notify(void);
+
+#if defined(BCMLXSDMMC)
+int bcmsdh_reg_sdio_notify(void* semaphore)
+{
+	return sdio_func_reg_notify(semaphore);
+}
+
+void bcmsdh_unreg_sdio_notify(void)
+{
+	sdio_func_unreg_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+	int error = 0;
+
+	drvinfo = *driver;
+	SDLX_MSG(("%s: register client driver\n", __FUNCTION__));
+	error = bcmsdh_register_client_driver();
+	if (error)
+		SDLX_MSG(("%s: failed %d\n", __FUNCTION__, error));
+
+	return error;
+}
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+		if (bcmsdh_pci_driver.node.next == NULL)
+			return;
+#endif
+
+	bcmsdh_unregister_client_driver();
+}
+
+void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	pm_stay_awake(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	pm_relax(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	return bcmsdh_osinfo->dev_wake_enabled;
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable)
+{
+	unsigned long flags;
+	bcmsdh_os_info_t *bcmsdh_osinfo;
+
+	if (!bcmsdh)
+		return;
+
+	bcmsdh_osinfo = bcmsdh->os_cxt;
+	spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+	if (bcmsdh_osinfo->oob_irq_enabled != enable) {
+		if (enable)
+			enable_irq(bcmsdh_osinfo->oob_irq_num);
+		else
+			disable_irq_nosync(bcmsdh_osinfo->oob_irq_num);
+		bcmsdh_osinfo->oob_irq_enabled = enable;
+	}
+	spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	bcmsdh_oob_intr_set(bcmsdh, FALSE);
+	bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context);
+
+	return IRQ_HANDLED;
+}
+
+int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+	void* oob_irq_handler_context)
+{
+	int err = 0;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	if (bcmsdh_osinfo->oob_irq_registered) {
+		SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__));
+		return -EBUSY;
+	}
+#ifdef HW_OOB
+	printf("%s: HW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
+		(int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags);
+#else
+	printf("%s: SW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
+		(int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags);
+#endif
+	bcmsdh_osinfo->oob_irq_handler = oob_irq_handler;
+	bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context;
+	bcmsdh_osinfo->oob_irq_enabled = TRUE;
+	bcmsdh_osinfo->oob_irq_registered = TRUE;
+#if defined(CONFIG_ARCH_ODIN)
+	err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+		bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#else
+	err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+		bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#endif /* defined(CONFIG_ARCH_ODIN) */
+	if (err) {
+		SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err));
+		bcmsdh_osinfo->oob_irq_enabled = FALSE;
+		bcmsdh_osinfo->oob_irq_registered = FALSE;
+		return err;
+	}
+
+#if defined(DISABLE_WOWLAN)
+	SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__));
+	bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+#else
+	err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+	if (err)
+		SDLX_MSG(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err));
+	else
+		bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
+#endif
+
+	return 0;
+}
+
+void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh)
+{
+	int err = 0;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+	if (!bcmsdh_osinfo->oob_irq_registered) {
+		SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__));
+		return;
+	}
+	if (bcmsdh_osinfo->oob_irq_wake_enabled) {
+		err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+		if (!err)
+			bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+	}
+	if (bcmsdh_osinfo->oob_irq_enabled) {
+		disable_irq(bcmsdh_osinfo->oob_irq_num);
+		bcmsdh_osinfo->oob_irq_enabled = FALSE;
+	}
+	free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh);
+	bcmsdh_osinfo->oob_irq_registered = FALSE;
+}
+#endif
+
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel;	/* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power;	/* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock;	/* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor;	/* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode;	/* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok;	/* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+extern uint sd_f1_blocksize;
+module_param(sd_f1_blocksize, int, 0);
+
+#ifdef BCMSDIOH_STD
+extern int sd_uhsimode;
+module_param(sd_uhsimode, int, 0);
+extern uint sd_tuning_period;
+module_param(sd_tuning_period, uint, 0);
+extern int sd_delay_value;
+module_param(sd_delay_value, uint, 0);
+
+/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */
+extern char dhd_sdiod_uhsi_ds_override[2];
+module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0);
+
+#endif
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+#if defined(BT_OVER_SDIO)
+EXPORT_SYMBOL(bcmsdh_btsdio_interface_init);
+#endif /* defined (BT_OVER_SDIO) */
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
new file mode 100644
index 0000000..650e8b8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
@@ -0,0 +1,1759 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary,Open:>>
+ *
+ * $Id: bcmsdh_sdmmc.c 710913 2017-07-14 10:17:51Z $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <sdioh.h>	/* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0))
+#include <drivers/mmc/core/host.h>
+void
+mmc_host_clk_hold(struct mmc_host *host)
+{
+	BCM_REFERENCE(host);
+	return;
+}
+
+void
+mmc_host_clk_release(struct mmc_host *host)
+{
+	BCM_REFERENCE(host);
+	return;
+}
+#elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
+#include <drivers/mmc/core/host.h>
+#else
+#include <linux/mmc/host.h>
+#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) */
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+int sdio_reset_comm(struct mmc_card *card)
+{
+	return 0;
+}
+#ifdef GLOBAL_SDMMC_INSTANCE
+extern PBCMSDH_SDMMC_INSTANCE gInstance;
+#endif
+
+#define DEFAULT_SDIO_F2_BLKSIZE		512
+#ifndef CUSTOM_SDIO_F2_BLKSIZE
+#define CUSTOM_SDIO_F2_BLKSIZE		DEFAULT_SDIO_F2_BLKSIZE
+#endif
+
+#define DEFAULT_SDIO_F1_BLKSIZE		64
+#ifndef CUSTOM_SDIO_F1_BLKSIZE
+#define CUSTOM_SDIO_F1_BLKSIZE		DEFAULT_SDIO_F1_BLKSIZE
+#endif
+
+#define MAX_IO_RW_EXTENDED_BLK		511
+
+uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
+uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
+uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE;
+
+#if defined(BT_OVER_SDIO)
+uint sd_f3_blocksize = 64;
+#endif /* defined (BT_OVER_SDIO) */
+
+uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_hiok = FALSE;	/* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+
+#ifndef CUSTOM_RXCHAIN
+#define CUSTOM_RXCHAIN 0
+#endif
+
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK	0x03
+#define MMC_SDIO_ABORT_RETRY_LIMIT 5
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+void  sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
+uint  sdmmc_get_clock_rate(sdioh_info_t *sd);
+void  sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
+#if defined(BT_OVER_SDIO)
+extern
+void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func)
+{
+	sd->func[3] = func;
+	sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3]));
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+	int err_ret;
+	uint32 fbraddr;
+	uint8 func;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	/* Get the Card's common CIS address */
+	sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Get the Card's function CIS (for each function) */
+	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+		sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+		         __FUNCTION__, func, sd->func_cis_ptr[func]));
+	}
+
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Enable Function 1 */
+	sdio_claim_host(sd->func[1]);
+	err_ret = sdio_enable_func(sd->func[1]);
+	sdio_release_host(sd->func[1]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x\n", err_ret));
+	}
+
+	return FALSE;
+}
+
+/*
+ *	Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, struct sdio_func *func)
+{
+	sdioh_info_t *sd = NULL;
+	int err_ret;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (func == NULL) {
+		sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
+		return NULL;
+	}
+
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	sd->fake_func0.num = 0;
+	sd->fake_func0.card = func->card;
+	sd->func[0] = &sd->fake_func0;
+#ifdef GLOBAL_SDMMC_INSTANCE
+	if (func->num == 2)
+		sd->func[1] = gInstance->func[1];
+#else
+	sd->func[1] = func->card->sdio_func[0];
+#endif
+	sd->func[2] = func->card->sdio_func[1];
+#ifdef GLOBAL_SDMMC_INSTANCE
+	sd->func[func->num] = func;
+#endif
+
+#if defined(BT_OVER_SDIO)
+	sd->func[3] = NULL;
+#endif /* defined (BT_OVER_SDIO) */
+
+	sd->num_funcs = 2;
+	sd->sd_blockmode = TRUE;
+	sd->use_client_ints = TRUE;
+	sd->client_block_size[0] = 64;
+	sd->use_rxchain = CUSTOM_RXCHAIN;
+	if (sd->func[1] == NULL || sd->func[2] == NULL) {
+		sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
+		goto fail;
+	}
+	sdio_set_drvdata(sd->func[1], sd);
+
+	sdio_claim_host(sd->func[1]);
+	sd->client_block_size[1] = sd_f1_blocksize;
+	err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize);
+	sdio_release_host(sd->func[1]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
+		goto fail;
+	}
+
+	sdio_claim_host(sd->func[2]);
+	sd->client_block_size[2] = sd_f2_blocksize;
+	printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
+	err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+	sdio_release_host(sd->func[2]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
+			sd_f2_blocksize, err_ret));
+		goto fail;
+	}
+
+	sd->sd_clk_rate = sdmmc_get_clock_rate(sd);
+	printf("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate);
+	sdioh_sdmmc_card_enablefuncs(sd);
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+	return sd;
+
+fail:
+	MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	return NULL;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (sd) {
+
+		/* Disable Function 2 */
+		if (sd->func[2]) {
+			sdio_claim_host(sd->func[2]);
+			sdio_disable_func(sd->func[2]);
+			sdio_release_host(sd->func[2]);
+		}
+
+		/* Disable Function 1 */
+		if (sd->func[1]) {
+			sdio_claim_host(sd->func[1]);
+			sdio_disable_func(sd->func[1]);
+			sdio_release_host(sd->func[1]);
+		}
+
+		sd->func[1] = NULL;
+		sd->func[2] = NULL;
+
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(sdioh_info_t *sd)
+{
+	uint8 reg;
+	int err;
+
+	if (sd->func[0] == NULL) {
+		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sdio_claim_host(sd->func[0]);
+	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+	if (err) {
+		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		sdio_release_host(sd->func[0]);
+		return SDIOH_API_RC_FAIL;
+	}
+	/* Enable F1 and F2 interrupts, clear master enable */
+	reg &= ~INTR_CTL_MASTER_EN;
+	reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+#if defined(BT_OVER_SDIO)
+	reg |= (INTR_CTL_FUNC3_EN);
+#endif /* defined (BT_OVER_SDIO) */
+	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+	sdio_release_host(sd->func[0]);
+
+	if (err) {
+		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(sdioh_info_t *sd)
+{
+	uint8 reg;
+	int err;
+
+	if (sd->func[0] == NULL) {
+		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sdio_claim_host(sd->func[0]);
+	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+	if (err) {
+		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		sdio_release_host(sd->func[0]);
+		return SDIOH_API_RC_FAIL;
+	}
+	reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+#if defined(BT_OVER_SDIO)
+	reg &= ~INTR_CTL_FUNC3_EN;
+#endif
+	/* Disable master interrupt with the last function interrupt */
+	if (!(reg & 0xFE))
+		reg = 0;
+	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+	sdio_release_host(sd->func[0]);
+
+	if (err) {
+		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	if (fn == NULL) {
+		sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+
+	/* register and unmask irq */
+	if (sd->func[2]) {
+		sdio_claim_host(sd->func[2]);
+		sdio_claim_irq(sd->func[2], IRQHandlerF2);
+		sdio_release_host(sd->func[2]);
+	}
+
+	if (sd->func[1]) {
+		sdio_claim_host(sd->func[1]);
+		sdio_claim_irq(sd->func[1], IRQHandler);
+		sdio_release_host(sd->func[1]);
+	}
+#elif defined(HW_OOB)
+	sdioh_enable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+	if (sd->func[1]) {
+		/* register and unmask irq */
+		sdio_claim_host(sd->func[1]);
+		sdio_release_irq(sd->func[1]);
+		sdio_release_host(sd->func[1]);
+	}
+
+	if (sd->func[2]) {
+		/* Claim host controller F2 */
+		sdio_claim_host(sd->func[2]);
+		sdio_release_irq(sd->func[2]);
+		/* Release host controller F2 */
+		sdio_release_host(sd->func[2]);
+	}
+
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+	if (dhd_download_fw_on_driverload)
+		sdioh_disable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel", IOV_MSGLEVEL,	0, 0,	IOVT_UINT32,	0 },
+	{"sd_blockmode", IOV_BLOCKMODE, 0, 0,	IOVT_BOOL,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0, 0,	IOVT_BOOL,	0 },
+	{"sd_ints",	IOV_USEINTS,	0, 0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0, 0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32,	0 },
+	{"sd_divisor",	IOV_DIVISOR,	0, 0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0, 0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0, 0,	IOVT_UINT32,	0 },
+	{"sd_mode",	IOV_SDMODE,	0, 0,	IOVT_UINT32,	100},
+	{"sd_highspeed", IOV_HISPEED,	0, 0,	IOVT_UINT32,	0 },
+	{"sd_rxchain",  IOV_RXCHAIN,    0, 0, 	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                           void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+	BCM_REFERENCE(bool_val);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKMODE):
+		int_val = (int32)si->sd_blockmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKMODE):
+		si->sd_blockmode = (bool)int_val;
+		/* Haven't figured out how to make non-block mode with DMA */
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKSIZE):
+	{
+		uint func = ((uint32)int_val >> 16);
+		uint blksize = (uint16)int_val;
+		uint maxsize;
+
+		if (func > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		switch (func) {
+		case 0: maxsize = 32; break;
+		case 1: maxsize = BLOCK_SIZE_4318; break;
+		case 2: maxsize = BLOCK_SIZE_4328; break;
+		default: maxsize = 0;
+		}
+		if (blksize > maxsize) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		if (!blksize) {
+			blksize = maxsize;
+		}
+
+		/* Now set it */
+		si->client_block_size[func] = blksize;
+
+		if (si->func[func] == NULL) {
+			sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+			bcmerror = BCME_NORESOURCE;
+			break;
+		}
+		sdio_claim_host(si->func[func]);
+		bcmerror = sdio_set_block_size(si->func[func], blksize);
+		if (bcmerror)
+			sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
+				__FUNCTION__, func, blksize, bcmerror));
+		sdio_release_host(si->func[func]);
+		break;
+	}
+
+	case IOV_GVAL(IOV_RXCHAIN):
+		int_val = (int32)si->use_rxchain;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		si->use_client_ints = (bool)int_val;
+		if (si->use_client_ints)
+			si->intmask |= CLIENT_INTR;
+		else
+			si->intmask &= ~CLIENT_INTR;
+
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		/* set the clock to divisor, if value is non-zero & power of 2 */
+		if (int_val && !(int_val & (int_val - 1))) {
+			sd_divisor = int_val;
+			sdmmc_set_clock_divisor(si, sd_divisor);
+		} else {
+			DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n",
+				__FUNCTION__));
+		}
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+	SDIOH_API_RC status;
+	uint8 data;
+
+	if (enable)
+#ifdef HW_OOB_LOW_LEVEL
+		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+#else
+		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
+#endif
+	else
+		data = SDIO_SEPINT_ACT_HI;	/* disable hw oob interrupt */
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+	return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+	/* read 24 bits and return valid 17 bit addr */
+	int i;
+	uint32 scratch, regdata;
+	uint8 *ptr = (uint8 *)&scratch;
+	for (i = 0; i < 3; i++) {
+		if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+			sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+		*ptr++ = (uint8) regdata;
+		regaddr++;
+	}
+
+	/* Only the lower 17-bits are valid */
+	scratch = ltoh32(scratch);
+	scratch &= 0x0001FFFF;
+	return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 foo;
+	uint8 *cis = cisd;
+
+	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+	if (!sd->func_cis_ptr[func]) {
+		bzero(cis, length);
+		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+	for (count = 0; count < length; count++) {
+		offset =  sd->func_cis_ptr[func] + count;
+		if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		*cis = (uint8)(foo & 0xff);
+		cis++;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset)
+{
+	uint32 foo;
+
+	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+	if (!sd->func_cis_ptr[func]) {
+		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+	if (sdioh_sdmmc_card_regread (sd, 0, sd->func_cis_ptr[func]+offset, 1, &foo) < 0) {
+		sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	*cisd = (uint8)(foo & 0xff);
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int err_ret = 0;
+#if defined(MMC_SDIO_ABORT)
+	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+	struct timespec64 now, before;
+
+	if (sd_msglevel & SDH_COST_VAL)
+		ktime_get_boottime_ts64(&before);
+
+	sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	if(rw) { /* CMD52 Write */
+		if (func == 0) {
+			/* Can only directly write to some F0 registers.  Handle F2 enable
+			 * as a special case.
+			 */
+			if (regaddr == SDIOD_CCCR_IOEN) {
+#if defined(BT_OVER_SDIO)
+				do {
+				if (sd->func[3]) {
+					sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte));
+
+					if (*byte & SDIO_FUNC_ENABLE_3) {
+						sdio_claim_host(sd->func[3]);
+
+						/* Set Function 3 Block Size */
+						err_ret = sdio_set_block_size(sd->func[3],
+						sd_f3_blocksize);
+						if (err_ret) {
+							sd_err(("F3 blocksize set err%d\n",
+								err_ret));
+						}
+
+						/* Enable Function 3 */
+						sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n",
+						sd->func[3]));
+						err_ret = sdio_enable_func(sd->func[3]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n",
+								err_ret));
+						}
+
+						sdio_release_host(sd->func[3]);
+
+						break;
+					} else if (*byte & SDIO_FUNC_DISABLE_3) {
+						sdio_claim_host(sd->func[3]);
+
+						/* Disable Function 3 */
+						sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n",
+						sd->func[3]));
+						err_ret = sdio_disable_func(sd->func[3]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n",
+								err_ret));
+						}
+						sdio_release_host(sd->func[3]);
+						sd->func[3] = NULL;
+
+						break;
+					}
+				}
+#endif /* defined (BT_OVER_SDIO) */
+				if (sd->func[2]) {
+					sdio_claim_host(sd->func[2]);
+					if (*byte & SDIO_FUNC_ENABLE_2) {
+						/* Enable Function 2 */
+						err_ret = sdio_enable_func(sd->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: enable F2 failed:%d\n",
+								err_ret));
+						}
+					} else {
+						/* Disable Function 2 */
+						err_ret = sdio_disable_func(sd->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d\n",
+								err_ret));
+						}
+					}
+					sdio_release_host(sd->func[2]);
+				}
+#if defined(BT_OVER_SDIO)
+			} while (0);
+#endif /* defined (BT_OVER_SDIO) */
+		}
+#if defined(MMC_SDIO_ABORT)
+			/* to allow abort command through F1 */
+			else if (regaddr == SDIOD_CCCR_IOABORT) {
+				while (sdio_abort_retry--) {
+					if (sd->func[func]) {
+						sdio_claim_host(sd->func[func]);
+						/*
+						 * this sdio_f0_writeb() can be replaced with
+						 * another api depending upon MMC driver change.
+						 * As of this time, this is temporaray one
+						 */
+						sdio_writeb(sd->func[func],
+							*byte, regaddr, &err_ret);
+						sdio_release_host(sd->func[func]);
+					}
+					if (!err_ret)
+						break;
+				}
+			}
+#endif /* MMC_SDIO_ABORT */
+			else if (regaddr < 0xF0) {
+				sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+			} else {
+				/* Claim host controller, perform F0 write, and release */
+				if (sd->func[func]) {
+					sdio_claim_host(sd->func[func]);
+					sdio_f0_writeb(sd->func[func],
+						*byte, regaddr, &err_ret);
+					sdio_release_host(sd->func[func]);
+				}
+			}
+		} else {
+			/* Claim host controller, perform Fn write, and release */
+			if (sd->func[func]) {
+				sdio_claim_host(sd->func[func]);
+				sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
+				sdio_release_host(sd->func[func]);
+			}
+		}
+	} else { /* CMD52 Read */
+		/* Claim host controller, perform Fn read, and release */
+		if (sd->func[func]) {
+			sdio_claim_host(sd->func[func]);
+			if (func == 0) {
+				*byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
+			} else {
+				*byte = sdio_readb(sd->func[func], regaddr, &err_ret);
+			}
+			sdio_release_host(sd->func[func]);
+		}
+	}
+
+	if (err_ret) {
+		if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
+		} else {
+			sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+				rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+		}
+	}
+
+	if (sd_msglevel & SDH_COST_VAL) {
+		ktime_get_boottime_ts64(&now);
+		sd_cost(("%s: rw=%d len=1 cost=%llds %ldus\n", __FUNCTION__,
+			rw, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000));
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+uint
+sdioh_set_mode(sdioh_info_t *sd, uint mode)
+{
+	if (mode == SDPCM_TXGLOM_CPY)
+		sd->txglom_mode = mode;
+	else if (mode == SDPCM_TXGLOM_MDESC)
+		sd->txglom_mode = mode;
+
+	return (sd->txglom_mode);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                                   uint32 *word, uint nbytes)
+{
+	int err_ret = SDIOH_API_RC_FAIL;
+	int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock
+#if defined(MMC_SDIO_ABORT)
+	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+	struct timespec64 now, before;
+
+	if (sd_msglevel & SDH_COST_VAL)
+		ktime_get_boottime_ts64(&before);
+
+	if (func == 0) {
+		sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+	         __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	/* Claim host controller */
+	sdio_claim_host(sd->func[func]);
+
+	if(rw) { /* CMD52 Write */
+		if (nbytes == 4) {
+			sdio_writel(sd->func[func], *word, addr, &err_ret);
+		} else if (nbytes == 2) {
+			sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	} else { /* CMD52 Read */
+		if (nbytes == 4) {
+			*word = sdio_readl(sd->func[func], addr, &err_ret);
+		} else if (nbytes == 2) {
+			*word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	}
+
+	/* Release host controller */
+	sdio_release_host(sd->func[func]);
+
+	if (err_ret) {
+#if defined(MMC_SDIO_ABORT)
+		/* Any error on CMD53 transaction should abort that function using function 0. */
+		while (sdio_abort_retry--) {
+			if (sd->func[0]) {
+				sdio_claim_host(sd->func[0]);
+				/*
+				 * this sdio_f0_writeb() can be replaced with another api
+				 * depending upon MMC driver change.
+				 * As of this time, this is temporaray one
+				 */
+				sdio_writeb(sd->func[0],
+					func, SDIOD_CCCR_IOABORT, &err_ret2);
+				sdio_release_host(sd->func[0]);
+			}
+			if (!err_ret2)
+				break;
+		}
+		if (err_ret)
+#endif /* MMC_SDIO_ABORT */
+		{
+			sd_err(("bcmsdh_sdmmc: Failed to %s word F%d:@0x%05x=%02x, Err: 0x%08x\n",
+				rw ? "Write" : "Read", func, addr, *word, err_ret));
+		}
+	}
+
+	if (sd_msglevel & SDH_COST_VAL) {
+		ktime_get_boottime_ts64(&now);
+		sd_cost(("%s: rw=%d, len=%d cost=%llds %ldus\n", __FUNCTION__,
+			rw, nbytes, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
+	}
+
+	return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+#ifdef BCMSDIOH_TXGLOM
+static SDIOH_API_RC
+sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, void *pkt)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	int err_ret = 0;
+	void *pnext;
+	uint ttl_len, pkt_offset;
+	uint blk_num;
+	uint blk_size;
+	uint max_blk_count;
+	uint max_req_size;
+	struct mmc_request mmc_req;
+	struct mmc_command mmc_cmd;
+	struct mmc_data mmc_dat;
+	uint32 sg_count;
+	struct sdio_func *sdio_func = sd->func[func];
+	struct mmc_host *host = sdio_func->card->host;
+	uint8 *localbuf = NULL;
+	uint local_plen = 0;
+	uint pkt_len = 0;
+	struct timespec64 now, before;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	ASSERT(pkt);
+	DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	if (sd_msglevel & SDH_COST_VAL)
+		ktime_get_boottime_ts64(&before);
+
+	blk_size = sd->client_block_size[func];
+	max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
+	max_req_size = min(max_blk_count * blk_size, host->max_req_size);
+
+	pkt_offset = 0;
+	pnext = pkt;
+
+	ttl_len = 0;
+	sg_count = 0;
+	if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
+	while (pnext != NULL) {
+		ttl_len = 0;
+		sg_count = 0;
+		memset(&mmc_req, 0, sizeof(struct mmc_request));
+		memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+		memset(&mmc_dat, 0, sizeof(struct mmc_data));
+		sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
+
+		/* Set up scatter-gather DMA descriptors. this loop is to find out the max
+		 * data we can transfer with one command 53. blocks per command is limited by
+		 * host max_req_size and 9-bit max block number. when the total length of this
+		 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
+		 * commands (each transfer is still block aligned)
+		 */
+		while (pnext != NULL && ttl_len < max_req_size) {
+			int pkt_len;
+			int sg_data_size;
+			uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
+
+			ASSERT(pdata != NULL);
+			pkt_len = PKTLEN(sd->osh, pnext);
+			sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
+			/* sg_count is unlikely larger than the array size, and this is
+			 * NOT something we can handle here, but in case it happens, PLEASE put
+			 * a restriction on max tx/glom count (based on host->max_segs).
+			 */
+			if (sg_count >= ARRAYSIZE(sd->sg_list)) {
+				sd_err(("%s: sg list entries exceed limit %d\n", __FUNCTION__, sg_count));
+				return (SDIOH_API_RC_FAIL);
+			}
+			pdata += pkt_offset;
+
+			sg_data_size = pkt_len - pkt_offset;
+			if (sg_data_size > max_req_size - ttl_len)
+				sg_data_size = max_req_size - ttl_len;
+			/* some platforms put a restriction on the data size of each scatter-gather
+			 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
+			 * max_seg_size
+			 */
+			if (sg_data_size > host->max_seg_size)
+				sg_data_size = host->max_seg_size;
+			sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
+
+			ttl_len += sg_data_size;
+			pkt_offset += sg_data_size;
+			if (pkt_offset == pkt_len) {
+				pnext = PKTNEXT(sd->osh, pnext);
+				pkt_offset = 0;
+			}
+		}
+
+		if (ttl_len % blk_size != 0) {
+			sd_err(("%s, data length %d not aligned to block size %d\n",
+				__FUNCTION__,  ttl_len, blk_size));
+			return SDIOH_API_RC_FAIL;
+		}
+		blk_num = ttl_len / blk_size;
+		mmc_dat.sg = sd->sg_list;
+		mmc_dat.sg_len = sg_count;
+		mmc_dat.blksz = blk_size;
+		mmc_dat.blocks = blk_num;
+		mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+		mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+		mmc_cmd.arg = write ? 1<<31 : 0;
+		mmc_cmd.arg |= (func & 0x7) << 28;
+		mmc_cmd.arg |= 1<<27;
+		mmc_cmd.arg |= fifo ? 0 : 1<<26;
+		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
+		mmc_cmd.arg |= blk_num & 0x1FF;
+		mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+		mmc_req.cmd = &mmc_cmd;
+		mmc_req.data = &mmc_dat;
+		if (!fifo)
+			addr += ttl_len;
+
+		sdio_claim_host(sdio_func);
+		mmc_set_data_timeout(&mmc_dat, sdio_func->card);
+		mmc_wait_for_req(host, &mmc_req);
+		sdio_release_host(sdio_func);
+
+		err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
+		if (0 != err_ret) {
+			sd_err(("%s:CMD53 %s failed with code %d\n",
+				__FUNCTION__, write ? "write" : "read", err_ret));
+			return SDIOH_API_RC_FAIL;
+		}
+	}
+	} else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) {
+		for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+			ttl_len += PKTLEN(sd->osh, pnext);
+		}
+		/* Claim host controller */
+		sdio_claim_host(sd->func[func]);
+		for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+			uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext);
+			pkt_len = PKTLEN(sd->osh, pnext);
+
+			if (!localbuf) {
+				localbuf = (uint8 *)MALLOC(sd->osh, ttl_len);
+				if (localbuf == NULL) {
+					sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
+						__FUNCTION__, (write) ? "TX" : "RX"));
+					goto txglomfail;
+				}
+			}
+
+			bcopy(buf, (localbuf + local_plen), pkt_len);
+			local_plen += pkt_len;
+			if (PKTNEXT(sd->osh, pnext))
+				continue;
+
+			buf = localbuf;
+			pkt_len = local_plen;
+txglomfail:
+			/* Align Patch */
+			if (!write || pkt_len < 32)
+				pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
+			else if (pkt_len % blk_size)
+				pkt_len += blk_size - (pkt_len % blk_size);
+
+			if ((write) && (!fifo))
+				err_ret = sdio_memcpy_toio(
+						sd->func[func],
+						addr, buf, pkt_len);
+			else if (write)
+				err_ret = sdio_memcpy_toio(
+						sd->func[func],
+						addr, buf, pkt_len);
+			else if (fifo)
+				err_ret = sdio_readsb(
+						sd->func[func],
+						buf, addr, pkt_len);
+			else
+				err_ret = sdio_memcpy_fromio(
+						sd->func[func],
+						buf, addr, pkt_len);
+
+			if (err_ret)
+				sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
+				       __FUNCTION__,
+				       (write) ? "TX" : "RX",
+				       pnext, sg_count, addr, pkt_len, err_ret));
+			else
+				sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+					__FUNCTION__,
+					(write) ? "TX" : "RX",
+					pnext, sg_count, addr, pkt_len));
+
+			if (!fifo)
+				addr += pkt_len;
+			sg_count ++;
+		}
+		sdio_release_host(sd->func[func]);
+	} else {
+		sd_err(("%s: set to wrong glom mode %d\n", __FUNCTION__, sd->txglom_mode));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (localbuf)
+		MFREE(sd->osh, localbuf, ttl_len);
+
+	if (sd_msglevel & SDH_COST_VAL) {
+		ktime_get_boottime_ts64(&now);
+		sd_cost(("%s: rw=%d, ttl_len=%d, cost=%llds %ldus\n", __FUNCTION__,
+			write, ttl_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000));
+	}
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+#endif /* BCMSDIOH_TXGLOM */
+
+static SDIOH_API_RC
+sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, uint8 *buf, uint len)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	int err_ret = 0;
+	struct timespec64 now, before;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	ASSERT(buf);
+
+	if (sd_msglevel & SDH_COST_VAL)
+		ktime_get_boottime_ts64(&before);
+
+	/* NOTE:
+	 * For all writes, each packet length is aligned to 32 (or 4)
+	 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
+	 * is aligned to block boundary. If you want to align each packet to
+	 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
+	 *
+	 * For reads, the alignment is doen in sdioh_request_buffer.
+	 *
+	 */
+	sdio_claim_host(sd->func[func]);
+
+	if ((write) && (!fifo))
+		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+	else if (write)
+		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+	else if (fifo)
+		err_ret = sdio_readsb(sd->func[func], buf, addr, len);
+	else
+		err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
+
+	sdio_release_host(sd->func[func]);
+
+	if (err_ret)
+		sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
+		       (write) ? "TX" : "RX", buf, addr, len, err_ret));
+	else
+		sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
+			(write) ? "TX" : "RX", buf, addr, len));
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+
+	if (sd_msglevel & SDH_COST_VAL) {
+		ktime_get_boottime_ts64(&now);
+		sd_cost(("%s: rw=%d, len=%d cost=%llds %ldus\n", __FUNCTION__,
+			write, len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain.  If it is a packet chain,
+ * then all the packets in the chain must be properly aligned.  If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+	uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
+{
+	SDIOH_API_RC status;
+	void *tmppkt;
+	struct timespec64 now, before;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	if (sd_msglevel & SDH_COST_VAL)
+		ktime_get_boottime_ts64(&before);
+
+	if (pkt) {
+#ifdef BCMSDIOH_TXGLOM
+		/* packet chain, only used for tx/rx glom, all packets length
+		 * are aligned, total length is a block multiple
+		 */
+		if (PKTNEXT(sd->osh, pkt))
+			return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
+#endif /* BCMSDIOH_TXGLOM */
+		/* non-glom mode, ignore the buffer parameter and use the packet pointer
+		 * (this shouldn't happen)
+		 */
+		buffer = PKTDATA(sd->osh, pkt);
+		buf_len = PKTLEN(sd->osh, pkt);
+	}
+
+	ASSERT(buffer);
+
+	/* buffer and length are aligned, use it directly so we can avoid memory copy */
+	if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
+		return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
+
+	sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
+		__FUNCTION__, write, buffer, buf_len));
+
+	/* otherwise, a memory copy is needed as the input buffer is not aligned */
+	tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
+	if (tmppkt == NULL) {
+		sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (write)
+		bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
+
+	status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
+		PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
+
+	if (!write)
+		bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
+
+	PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
+
+	if (sd_msglevel & SDH_COST_VAL) {
+		ktime_get_boottime_ts64(&now);
+		sd_cost(("%s: len=%d cost=%llds %ldus\n", __FUNCTION__,
+			buf_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
+	}
+
+	return status;
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+	char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+	/* issue abort cmd52 command through F1 */
+	sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp = 0;
+
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		*data = temp;
+		*data &= 0xff;
+		sd_data(("%s: byte read data=0x%02x\n",
+		         __FUNCTION__, *data));
+	} else {
+		if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) {
+			return BCME_SDIO_ERROR;
+		}
+
+		if (regsize == 2)
+			*data &= 0xffff;
+
+		sd_data(("%s: word read data=0x%08x\n",
+		         __FUNCTION__, *data));
+	}
+
+	return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+	sdioh_info_t *sd;
+
+	sd = sdio_get_drvdata(func);
+
+	ASSERT(sd != NULL);
+	sdio_release_host(sd->func[0]);
+
+	if (sd->use_client_ints) {
+		sd->intrcount++;
+		ASSERT(sd->intr_handler);
+		ASSERT(sd->intr_handler_arg);
+		(sd->intr_handler)(sd->intr_handler_arg);
+	} else {
+		sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+		sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+		        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+	}
+
+	sdio_claim_host(sd->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+	sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp;
+
+		temp = data & 0xff;
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		sd_data(("%s: byte write data=0x%02x\n",
+		         __FUNCTION__, data));
+	} else {
+		if (regsize == 2)
+			data &= 0xffff;
+
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+		sd_data(("%s: word write data=0x%08x\n",
+		         __FUNCTION__, data));
+	}
+
+	return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	int ret;
+
+	if (!sd) {
+		sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
+		return (0);
+	}
+
+	/* Need to do this stages as we can't enable the interrupt till
+		downloading of the firmware is complete, other wise polling
+		sdio access will come in way
+	*/
+	if (sd->func[0]) {
+			if (stage == 0) {
+		/* Since the power to the chip is killed, we will have
+			re enumerate the device again. Set the block size
+			and enable the fucntion 1 for in preparation for
+			downloading the code
+		*/
+		/* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+		   2.6.27. The implementation prior to that is buggy, and needs broadcom's
+		   patch for it
+		*/
+		if ((ret = sdio_reset_comm(sd->func[0]->card))) {
+			sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		else {
+			sd->num_funcs = 2;
+			sd->sd_blockmode = TRUE;
+			sd->use_client_ints = TRUE;
+			sd->client_block_size[0] = 64;
+
+			if (sd->func[1]) {
+				/* Claim host controller */
+				sdio_claim_host(sd->func[1]);
+
+				sd->client_block_size[1] = 64;
+				ret = sdio_set_block_size(sd->func[1], 64);
+				if (ret) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F1 "
+						"blocksize(%d)\n", ret));
+				}
+
+				/* Release host controller F1 */
+				sdio_release_host(sd->func[1]);
+			}
+
+			if (sd->func[2]) {
+				/* Claim host controller F2 */
+				sdio_claim_host(sd->func[2]);
+
+				sd->client_block_size[2] = sd_f2_blocksize;
+				printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
+				ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+				if (ret) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+						"blocksize to %d(%d)\n", sd_f2_blocksize, ret));
+				}
+
+				/* Release host controller F2 */
+				sdio_release_host(sd->func[2]);
+			}
+
+			sdioh_sdmmc_card_enablefuncs(sd);
+			}
+		} else {
+#if !defined(OOB_INTR_ONLY)
+			sdio_claim_host(sd->func[0]);
+			if (sd->func[2])
+				sdio_claim_irq(sd->func[2], IRQHandlerF2);
+			if (sd->func[1])
+				sdio_claim_irq(sd->func[1], IRQHandler);
+			sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+			sdioh_enable_func_intr(sd);
+#endif
+			bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+		}
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+
+	return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	/* MSM7201A Android sdio stack has bug with interrupt
+		So internaly within SDIO stack they are polling
+		which cause issue when device is turned off. So
+		unregister interrupt with SDIO stack to stop the
+		polling
+	*/
+	if (sd->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+		sdio_claim_host(sd->func[0]);
+		if (sd->func[1])
+			sdio_release_irq(sd->func[1]);
+		if (sd->func[2])
+			sdio_release_irq(sd->func[2]);
+		sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+		sdioh_disable_func_intr(sd);
+#endif
+		bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+	return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+	return (1);
+}
+
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+	return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+uint
+sdmmc_get_clock_rate(sdioh_info_t *sd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+	return 0;
+#else
+	struct sdio_func *sdio_func = sd->func[0];
+	struct mmc_host *host = sdio_func->card->host;
+	return mmc_host_clk_rate(host);
+#endif
+}
+
+
+void
+sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+	return;
+#else
+	struct sdio_func *sdio_func = sd->func[0];
+	struct mmc_host *host = sdio_func->card->host;
+	struct mmc_ios *ios = &host->ios;
+
+	mmc_host_clk_hold(host);
+	DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
+	if (hz < host->f_min) {
+		DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__));
+		hz = host->f_min;
+	}
+
+	if (hz > host->f_max) {
+		DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__));
+		hz = host->f_max;
+	}
+	ios->clock = hz;
+	host->ops->set_ios(host, ios);
+	DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock));
+	mmc_host_clk_release(host);
+#endif
+}
+
+void
+sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div)
+{
+	uint hz;
+	uint old_div = sdmmc_get_clock_rate(sd);
+	if (old_div == sd_div) {
+		return;
+	}
+
+	hz = sd->sd_clk_rate / sd_div;
+	sdmmc_set_clock_rate(sd, hz);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
new file mode 100644
index 0000000..dd5d3ba
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,405 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary,Open:>>
+ *
+ * $Id: bcmsdh_sdmmc_linux.c 644124 2016-06-17 07:59:34Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* to get msglevel bit values */
+
+#include <linux/sched.h>	/* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <dhd_linux.h>
+#include <bcmsdh_sdmmc.h>
+#include <dhd_dbg.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM		0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT	0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB	0x0492	/* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325	0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329	0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319	0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4330)
+#define SDIO_DEVICE_ID_BROADCOM_4330	0x4330
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4334)
+#define SDIO_DEVICE_ID_BROADCOM_4334    0x4334
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4324)
+#define SDIO_DEVICE_ID_BROADCOM_4324    0x4324
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43239)
+#define SDIO_DEVICE_ID_BROADCOM_43239    43239
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */
+
+extern void wl_cfg80211_set_parent_dev(void *dev);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+	uint bus_num, uint slot_num);
+extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+#ifdef GLOBAL_SDMMC_INSTANCE
+PBCMSDH_SDMMC_INSTANCE gInstance;
+#endif
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern volatile bool dhd_mmc_suspend;
+
+static int sdioh_probe(struct sdio_func *func)
+{
+	int host_idx = func->card->host->index;
+	uint32 rca = func->card->rca;
+	wifi_adapter_info_t *adapter;
+	osl_t *osh = NULL;
+	sdioh_info_t *sdioh = NULL;
+
+	sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca));
+	adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca);
+	if (adapter != NULL) {
+		sd_err(("found adapter info '%s'\n", adapter->name));
+#ifdef BUS_POWER_RESTORE
+		adapter->sdio_func = func;
+#endif
+	} else
+		sd_err(("can't find adapter info for this chip\n"));
+
+#ifdef WL_CFG80211
+	wl_cfg80211_set_parent_dev(&func->dev);
+#endif
+
+	 /* allocate SDIO Host Controller state info */
+	 osh = osl_attach(&func->dev, SDIO_BUS, TRUE);
+	 if (osh == NULL) {
+		 sd_err(("%s: osl_attach failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+	 osl_static_mem_init(osh, adapter);
+	 sdioh = sdioh_attach(osh, func);
+	 if (sdioh == NULL) {
+		 sd_err(("%s: sdioh_attach failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+	 sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca);
+	 if (sdioh->bcmsdh == NULL) {
+		 sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+
+	sdio_set_drvdata(func, sdioh);
+	return 0;
+
+fail:
+	if (sdioh != NULL)
+		sdioh_detach(osh, sdioh);
+	if (osh != NULL)
+		osl_detach(osh);
+	return -ENOMEM;
+}
+
+static void sdioh_remove(struct sdio_func *func)
+{
+	sdioh_info_t *sdioh;
+	osl_t *osh;
+
+	sdioh = sdio_get_drvdata(func);
+	if (sdioh == NULL) {
+		sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
+		return;
+	}
+	sd_err(("%s: Enter\n", __FUNCTION__));
+
+	osh = sdioh->osh;
+	bcmsdh_remove(sdioh->bcmsdh);
+	sdioh_detach(osh, sdioh);
+	osl_detach(osh);
+}
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	int ret = 0;
+
+	if (func == NULL)
+		return -EINVAL;
+
+	sd_err(("%s: Enter num=%d\n", __FUNCTION__, func->num));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+
+#ifdef GLOBAL_SDMMC_INSTANCE
+	gInstance->func[func->num] = func;
+#endif
+
+	/* 4318 doesn't have function 2 */
+	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+		ret = sdioh_probe(func);
+
+	return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+	if (func == NULL) {
+		sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__));
+		return;
+	}
+
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+
+	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+		sdioh_remove(func);
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) },
+	{ SDIO_DEVICE_CLASS(SDIO_CLASS_NONE)		},
+	{ 0, 0, 0, 0 /* end: all zeroes */
+	},
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+static int bcmsdh_sdmmc_suspend(struct device *pdev)
+{
+	int err;
+	sdioh_info_t *sdioh;
+	struct sdio_func *func = dev_to_sdio_func(pdev);
+	mmc_pm_flag_t sdio_flags;
+
+	printf("%s Enter func->num=%d\n", __FUNCTION__, func->num);
+	if (func->num != 2)
+		return 0;
+
+	dhd_mmc_suspend = TRUE;
+	sdioh = sdio_get_drvdata(func);
+	err = bcmsdh_suspend(sdioh->bcmsdh);
+	if (err) {
+		printf("%s bcmsdh_suspend err=%d\n", __FUNCTION__, err);
+		dhd_mmc_suspend = FALSE;
+		return err;
+	}
+
+	sdio_flags = sdio_get_host_pm_caps(func);
+	if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+		sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
+		dhd_mmc_suspend = FALSE;
+		return  -EINVAL;
+	}
+
+	/* keep power while host suspended */
+	err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+	if (err) {
+		sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
+		dhd_mmc_suspend = FALSE;
+		return err;
+	}
+	smp_mb();
+
+	printf("%s Exit\n", __FUNCTION__);
+	return 0;
+}
+
+static int bcmsdh_sdmmc_resume(struct device *pdev)
+{
+	sdioh_info_t *sdioh;
+	struct sdio_func *func = dev_to_sdio_func(pdev);
+
+	printf("%s Enter func->num=%d\n", __FUNCTION__, func->num);
+	if (func->num != 2)
+		return 0;
+
+	dhd_mmc_suspend = FALSE;
+	sdioh = sdio_get_drvdata(func);
+	bcmsdh_resume(sdioh->bcmsdh);
+
+	smp_mb();
+	printf("%s Exit\n", __FUNCTION__);
+	return 0;
+}
+
+static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = {
+	.suspend	= bcmsdh_sdmmc_suspend,
+	.resume		= bcmsdh_sdmmc_resume,
+};
+#endif  /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+
+#if defined(BCMLXSDMMC)
+static struct semaphore *notify_semaphore = NULL;
+
+static int dummy_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	if (func && (func->num != 2)) {
+		return 0;
+	}
+
+	if (notify_semaphore)
+		up(notify_semaphore);
+	return 0;
+}
+
+static void dummy_remove(struct sdio_func *func)
+{
+}
+
+static struct sdio_driver dummy_sdmmc_driver = {
+	.probe		= dummy_probe,
+	.remove		= dummy_remove,
+	.name		= "dummy_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+	};
+
+int sdio_func_reg_notify(void* semaphore)
+{
+	notify_semaphore = semaphore;
+	return sdio_register_driver(&dummy_sdmmc_driver);
+}
+
+void sdio_func_unreg_notify(void)
+{
+	OSL_SLEEP(15);
+	sdio_unregister_driver(&dummy_sdmmc_driver);
+}
+
+#endif /* defined(BCMLXSDMMC) */
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+	.probe		= bcmsdh_sdmmc_probe,
+	.remove		= bcmsdh_sdmmc_remove,
+	.name		= "bcmsdh_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+	.drv = {
+	.pm	= &bcmsdh_sdmmc_pm_ops,
+	},
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+	};
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+};
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	if (!sd)
+		return BCME_BADARG;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+	int error = 0;
+	error = sdio_function_init();
+	return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+	sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int bcmsdh_register_client_driver(void)
+{
+	return sdio_register_driver(&bcmsdh_sdmmc_driver);
+}
+
+/*
+ * module cleanup
+*/
+void bcmsdh_unregister_client_driver(void)
+{
+	sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c b/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c
new file mode 100644
index 0000000..b7091e5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c
@@ -0,0 +1,252 @@
+/*
+ * Broadcom SPI Host Controller Driver - Linux Per-port
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdspi_linux.c 514727 2014-11-12 03:02:48Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#include <bcmsdbus.h>		/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>		/* to get msglevel bit values */
+
+#include <pcicfg.h>
+#include <sdio.h>		/* SDIO Device and Protocol Specs */
+#include <linux/sched.h>	/* request_irq(), free_irq() */
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+
+extern uint sd_crc;
+module_param(sd_crc, uint, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define KERNEL26
+#endif
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+	wait_queue_head_t intr_wait_queue;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE()	(!in_atomic())
+#else
+#define BLOCKABLE()	(!in_interrupt())
+#endif
+
+/* Interrupt handler */
+static irqreturn_t
+sdspi_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+	sdioh_info_t *sd;
+	struct sdos_info *sdos;
+	bool ours;
+
+	sd = (sdioh_info_t *)dev_id;
+	sd->local_intrcount++;
+
+	if (!sd->card_init_done) {
+		sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+		return IRQ_RETVAL(FALSE);
+	} else {
+		ours = spi_check_client_intr(sd, NULL);
+
+		/* For local interrupts, wake the waiting process */
+		if (ours && sd->got_hcint) {
+			sdos = (struct sdos_info *)sd->sdos_info;
+			wake_up_interruptible(&sdos->intr_wait_queue);
+		}
+
+		return IRQ_RETVAL(ours);
+	}
+}
+
+
+/* Register with Linux for interrupts */
+int
+spi_register_irq(sdioh_info_t *sd, uint irq)
+{
+	sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+	if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) {
+		sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+		return ERROR;
+	}
+	return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+spi_free_irq(uint irq, sdioh_info_t *sd)
+{
+	free_irq(irq, sd);
+}
+
+/* Map Host controller registers */
+uint32 *
+spi_reg_map(osl_t *osh, uintptr addr, int size)
+{
+	return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+spi_reg_unmap(osl_t *osh, uintptr addr, int size)
+{
+	REG_UNMAP((void*)(uintptr)addr);
+}
+
+int
+spi_osinit(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+	sd->sdos_info = (void*)sdos;
+	if (sdos == NULL)
+		return BCME_NOMEM;
+
+	sdos->sd = sd;
+	spin_lock_init(&sdos->lock);
+	init_waitqueue_head(&sdos->intr_wait_queue);
+	return BCME_OK;
+}
+
+void
+spi_osfree(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+	ASSERT(sd && sd->sdos_info);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	if (!(sd->host_init_done && sd->card_init_done)) {
+		sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+		sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	/* Ensure atomicity for enable/disable calls */
+	spin_lock_irqsave(&sdos->lock, flags);
+
+	sd->client_intr_enabled = enable;
+	if (enable && !sd->lockcount)
+		spi_devintr_on(sd);
+	else
+		spi_devintr_off(sd);
+
+	spin_unlock_irqrestore(&sdos->lock, flags);
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+spi_lock(sdioh_info_t *sd)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+	spin_lock_irqsave(&sdos->lock, flags);
+	if (sd->lockcount) {
+		sd_err(("%s: Already locked!\n", __FUNCTION__));
+		ASSERT(sd->lockcount == 0);
+	}
+	spi_devintr_off(sd);
+	sd->lockcount++;
+	spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+/* Enable client interrupt */
+void
+spi_unlock(sdioh_info_t *sd)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+	ASSERT(sd->lockcount > 0);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	spin_lock_irqsave(&sdos->lock, flags);
+	if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+		spi_devintr_on(sd);
+	}
+	spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+void spi_waitbits(sdioh_info_t *sd, bool yield)
+{
+#ifndef BCMSDYIELD
+	ASSERT(!yield);
+#endif
+	sd_trace(("%s: yield %d canblock %d\n",
+	          __FUNCTION__, yield, BLOCKABLE()));
+
+	/* Clear the "interrupt happened" flag and last intrstatus */
+	sd->got_hcint = FALSE;
+
+#ifdef BCMSDYIELD
+	if (yield && BLOCKABLE()) {
+		struct sdos_info *sdos;
+		sdos = (struct sdos_info *)sd->sdos_info;
+		/* Wait for the indication, the interrupt will be masked when the ISR fires. */
+		wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint));
+	} else
+#endif /* BCMSDYIELD */
+	{
+		spi_spinbits(sd);
+	}
+
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmspibrcm.c b/drivers/net/wireless/bcmdhd/bcmspibrcm.c
new file mode 100644
index 0000000..1bbff16
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmspibrcm.c
@@ -0,0 +1,1764 @@
+/*
+ * Broadcom BCMSDH to gSPI Protocol Conversion Layer
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmspibrcm.c 611787 2016-01-12 06:07:27Z $
+ */
+
+#define HSMODE
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <sbchipc.h>
+#include <sbsdio.h>	/* SDIO device core hardware definitions. */
+#include <spid.h>
+
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+
+#include <pcicfg.h>
+
+
+#include <bcmspibrcm.h>
+#include <bcmspi.h>
+
+/* these are for the older cores... for newer cores we have control for each of them */
+#define F0_RESPONSE_DELAY	16
+#define F1_RESPONSE_DELAY	16
+#define F2_RESPONSE_DELAY	F0_RESPONSE_DELAY
+
+
+#define GSPI_F0_RESP_DELAY		0
+#define GSPI_F1_RESP_DELAY		F1_RESPONSE_DELAY
+#define GSPI_F2_RESP_DELAY		0
+#define GSPI_F3_RESP_DELAY		0
+
+#define CMDLEN		4
+
+/* Globals */
+#if defined(DHD_DEBUG)
+uint sd_msglevel = SDH_ERROR_VAL;
+#else
+uint sd_msglevel = 0;
+#endif 
+
+uint sd_hiok = FALSE;		/* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SPI;		/* Use SD4 mode by default */
+uint sd_f2_blocksize = 64;		/* Default blocksize */
+
+
+uint sd_divisor = 2;
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_crc = 0;		/* Default to SPI CRC Check turned OFF */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+
+uint8	spi_outbuf[SPI_MAX_PKT_LEN];
+uint8	spi_inbuf[SPI_MAX_PKT_LEN];
+
+/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits
+ * assuming we will not exceed F0 response delay > 100 bytes at 48MHz.
+ */
+#define BUF2_PKT_LEN	128
+uint8	spi_outbuf2[BUF2_PKT_LEN];
+uint8	spi_inbuf2[BUF2_PKT_LEN];
+
+#define SPISWAP_WD4(x) bcmswap32(x);
+#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \
+						(bcmswap16((x & 0xffff0000) >> 16) << 16);
+
+/* Prototypes */
+static bool bcmspi_test_card(sdioh_info_t *sd);
+static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd);
+static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                           uint32 *data, uint32 datalen);
+static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+                              int regsize, uint32 *data);
+static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               int regsize, uint32 data);
+static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               uint8 *data);
+static int bcmspi_driver_init(sdioh_info_t *sd);
+static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                          uint32 addr, int nbytes, uint32 *data);
+static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize,
+                                 uint32 *data);
+static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer);
+static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg);
+
+/*
+ *  Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	if (spi_osinit(sd) != 0) {
+		sd_err(("%s: spi_osinit() failed\n", __FUNCTION__));
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+
+	sd->bar0 = bar0;
+	sd->irq = irq;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	sd->intr_handler_valid = FALSE;
+
+	/* Set defaults */
+	sd->use_client_ints = TRUE;
+	sd->sd_use_dma = FALSE;	/* DMA Not supported */
+
+	/* Spi device default is 16bit mode, change to 4 when device is changed to 32bit
+	 * mode
+	 */
+	sd->wordlen = 2;
+
+
+	if (!spi_hw_attach(sd)) {
+		sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (bcmspi_driver_init(sd) != SUCCESS) {
+		sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (spi_register_irq(sd, irq) != SUCCESS) {
+		sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+
+	return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if (sd) {
+		sd_err(("%s: detaching from hardware\n", __FUNCTION__));
+		spi_free_irq(sd->irq, sd);
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+#endif /* !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+#endif /* !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return 0;
+}
+#endif
+
+/* Provide dstatus bits of spi-transaction for dhd layers. */
+extern uint32
+sdioh_get_dstatus(sdioh_info_t *sd)
+{
+	return sd->card_dstatus;
+}
+
+extern void
+sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev)
+{
+	sd->chip = chip;
+	sd->chiprev = chiprev;
+}
+
+extern void
+sdioh_dwordmode(sdioh_info_t *sd, bool set)
+{
+	uint8 reg = 0;
+	int status;
+
+	if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+
+	if (set) {
+		reg |= DWORD_PKT_LEN_EN;
+		sd->dwordmode = TRUE;
+		sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */
+	} else {
+		reg &= ~DWORD_PKT_LEN_EN;
+		sd->dwordmode = FALSE;
+		sd->client_block_size[SPI_FUNC_2] = 2048;
+	}
+
+	if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+}
+
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_SPIERRSTATS,
+	IOV_RESP_DELAY_ALL
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel",	IOV_MSGLEVEL, 	0,	IOVT_UINT32,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints",	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t)	},
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode",	IOV_SDMODE,	0,	IOVT_UINT32,	100},
+	{"sd_highspeed",	IOV_HISPEED,	0,	IOVT_UINT32,	0},
+	{"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) },
+	{"spi_respdelay",	IOV_RESP_DELAY_ALL,	0,	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+               void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+/*
+	sdioh_regs_t *regs;
+*/
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		if (!spi_start_clock(si, (uint16)sd_divisor)) {
+			sd_err(("%s: set clock failed\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+
+		if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) {
+			sd_err(("%s: Failed changing highspeed mode to %d.\n",
+			        __FUNCTION__, sd_hiok));
+			bcmerror = BCME_ERROR;
+			return ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)si->local_intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+
+	case IOV_GVAL(IOV_SPIERRSTATS):
+	{
+		bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SPIERRSTATS):
+	{
+		bzero(&si->spierrstats, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_GVAL(IOV_RESP_DELAY_ALL):
+		int_val = (int32)si->resp_delay_all;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RESP_DELAY_ALL):
+		si->resp_delay_all = (bool)int_val;
+		int_val = STATUS_ENABLE|INTR_WITH_STATUS;
+		if (si->resp_delay_all)
+			int_val |= RESP_DELAY_ALL;
+		else {
+			if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1,
+			     F1_RESPONSE_DELAY) != SUCCESS) {
+				sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+				bcmerror = BCME_SDIO_ERROR;
+				break;
+			}
+		}
+
+		if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val)
+		     != SUCCESS) {
+			sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+
+	if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) {
+		uint8 dummy_data;
+		status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data);
+		if (status) {
+			sd_err(("sdioh_cfg_read() failed.\n"));
+			return status;
+		}
+	}
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 cis_byte;
+	uint16 *cis = (uint16 *)cisd;
+	uint bar0 = SI_ENUM_BASE;
+	int status;
+	uint8 data;
+
+	sd_trace(("%s: Func %d\n", __FUNCTION__, func));
+
+	spi_lock(sd);
+
+	/* Set sb window address to 0x18000000 */
+	data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK;
+	status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data);
+	if (status == SUCCESS) {
+		data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+	if (status == SUCCESS) {
+		data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+
+	offset =  CC_SROM_OTP; /* OTP offset in chipcommon. */
+	for (count = 0; count < length/2; count++) {
+		if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			spi_unlock(sd);
+			return (BCME_ERROR);
+		}
+
+		*cis = (uint16)cis_byte;
+		cis++;
+		offset += 2;
+	}
+
+	spi_unlock(sd);
+
+	return (BCME_OK);
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	spi_lock(sd);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	if (rw == SDIOH_READ) {
+		sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x\n",
+		          __FUNCTION__, cmd_arg, func, regaddr));
+	} else {
+		sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n",
+		          __FUNCTION__, cmd_arg, func, regaddr, data));
+	}
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) {
+		spi_unlock(sd);
+		return status;
+	}
+
+	if (rw == SDIOH_READ) {
+		*byte = (uint8)data;
+		sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *byte));
+	}
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus=0x%x\n", dstatus));
+
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                   uint32 *word, uint nbytes)
+{
+	int status;
+
+	spi_lock(sd);
+
+	if (rw == SDIOH_READ)
+		status = bcmspi_card_regread(sd, func, addr, nbytes, word);
+	else
+		status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word);
+
+	spi_unlock(sd);
+	return (status == SUCCESS ?  SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+                     uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+	int len;
+	int buflen = (int)buflen_u;
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+
+	spi_lock(sd);
+
+	ASSERT(reg_width == 4);
+	ASSERT(buflen_u < (1 << 30));
+	ASSERT(sd->client_block_size[func]);
+
+	sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+	         __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+	         buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+	/* Break buffer down into blocksize chunks. */
+	while (buflen > 0) {
+		len = MIN(sd->client_block_size[func], buflen);
+		if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+			sd_err(("%s: bcmspi_card_buf %s failed\n",
+				__FUNCTION__, rw == SDIOH_READ ? "Read" : "Write"));
+			spi_unlock(sd);
+			return SDIOH_API_RC_FAIL;
+		}
+		buffer += len;
+		buflen -= len;
+		if (!fifo)
+			addr += len;
+	}
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* This function allows write to gspi bus when another rd/wr function is deep down the call stack.
+ * Its main aim is to have simpler spi writes rather than recursive writes.
+ * e.g. When there is a need to program response delay on the fly after detecting the SPI-func
+ * this call will allow to program the response delay.
+ */
+static int
+bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte)
+{
+	uint32 cmd_arg;
+	uint32 datalen = 1;
+	uint32 hostlen;
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf2 = SPISWAP_WD4(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint32 *)spi_outbuf2 = SPISWAP_WD2(cmd_arg);
+		if (datalen & 0x1)
+			datalen++;
+	} else {
+		sd_err(("%s: Host is %d bit spid, could not create SPI command.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer  */
+	if (datalen != 0) {
+			if (sd->wordlen == 4) { /* 32bit spid */
+				*(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD4(byte);
+			} else if (sd->wordlen == 2) { /* 16bit spid */
+				*(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD2(byte);
+			}
+	}
+
+	/* +4 for cmd, +4 for dstatus */
+	hostlen = datalen + 8;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+/* Program the response delay corresponding to the spi function */
+static int
+bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay)
+{
+	if (sd->resp_delay_all == FALSE)
+		return (BCME_OK);
+
+	if (sd->prev_fun == func)
+		return (BCME_OK);
+
+	if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY)
+		return (BCME_OK);
+
+	bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay);
+
+	/* Remember function for which to avoid reprogramming resp-delay in next iteration */
+	sd->prev_fun = func;
+
+	return (BCME_OK);
+
+}
+
+#define GSPI_RESYNC_PATTERN	0x0
+
+/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI.
+ * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is
+ * synchronised and all queued resuests are cancelled.
+ */
+static int
+bcmspi_resync_f1(sdioh_info_t *sd)
+{
+	uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0;
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	*(uint32 *)spi_outbuf2 = cmd_arg;
+
+	/* for Write, put the data into the output buffer  */
+	*(uint32 *)&spi_outbuf2[CMDLEN] = data;
+
+	/* +4 for cmd, +4 for dstatus */
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+uint32 dstatus_count = 0;
+
+static int
+bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg)
+{
+	uint32 dstatus = sd->card_dstatus;
+	struct spierrstats_t *spierrstats = &sd->spierrstats;
+	int err = SUCCESS;
+
+	sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus));
+
+	/* Store dstatus of last few gSPI transactions */
+	spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus;
+	spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg;
+	dstatus_count++;
+
+	if (sd->card_init_done == FALSE)
+		return err;
+
+	if (dstatus & STATUS_DATA_NOT_AVAILABLE) {
+		spierrstats->dna++;
+		sd_trace(("Read data not available on F1 addr = 0x%x\n",
+		        GFIELD(cmd_arg, SPI_REG_ADDR)));
+		/* Clear dna bit */
+		bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE);
+	}
+
+	if (dstatus & STATUS_UNDERFLOW) {
+		spierrstats->rdunderflow++;
+		sd_err(("FIFO underflow happened due to current F2 read command.\n"));
+	}
+
+	if (dstatus & STATUS_OVERFLOW) {
+		spierrstats->wroverflow++;
+		sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n"));
+		bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW);
+		bcmspi_resync_f1(sd);
+		sd_err(("Recovering from F1 FIFO overflow.\n"));
+	}
+
+	if (dstatus & STATUS_F2_INTR) {
+		spierrstats->f2interrupt++;
+		sd_trace(("Interrupt from F2.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_F3_INTR) {
+		spierrstats->f3interrupt++;
+		sd_err(("Interrupt from F3.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_HOST_CMD_DATA_ERR) {
+		spierrstats->hostcmddataerr++;
+		sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n"));
+	}
+
+	if (dstatus & STATUS_F2_PKT_AVAILABLE) {
+		spierrstats->f2pktavailable++;
+		sd_trace(("Packet is available/ready in F2 TX FIFO\n"));
+		sd_trace(("Packet length = %d\n", sd->dwordmode ?
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) :
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT)));
+	}
+
+	if (dstatus & STATUS_F3_PKT_AVAILABLE) {
+		spierrstats->f3pktavailable++;
+		sd_err(("Packet is available/ready in F3 TX FIFO\n"));
+		sd_err(("Packet length = %d\n",
+		        (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT));
+	}
+
+	return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+	return 0;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	return SUCCESS;
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+	return SUCCESS;
+}
+
+
+/*
+ * Private/Static work routines
+ */
+static int
+bcmspi_host_init(sdioh_info_t *sd)
+{
+
+	/* Default power on mode */
+	sd->sd_mode = SDIOH_MODE_SPI;
+	sd->polled_mode = TRUE;
+	sd->host_init_done = TRUE;
+	sd->card_init_done = FALSE;
+	sd->adapter_slot = 1;
+
+	return (SUCCESS);
+}
+
+static int
+get_client_blocksize(sdioh_info_t *sd)
+{
+	uint32 regdata[2];
+	int status;
+
+	/* Find F1/F2/F3 max packet size */
+	if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG,
+	                                 8, regdata)) != SUCCESS) {
+		return status;
+	}
+
+	sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n",
+	        regdata[0], regdata[1]));
+
+	sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1]));
+	ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1);
+
+	sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2]));
+	ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2);
+
+	sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3]));
+	ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3);
+
+	return 0;
+}
+
+static int
+bcmspi_client_init(sdioh_info_t *sd)
+{
+	uint32	status_en_reg = 0;
+	sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+#ifdef HSMODE
+	if (!spi_start_clock(sd, (uint16)sd_divisor)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#else
+	/* Start at ~400KHz clock rate for initialization */
+	if (!spi_start_clock(sd, 128)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	if (!bcmspi_host_device_init_adapt(sd)) {
+		sd_err(("bcmspi_host_device_init_adapt failed\n"));
+		return ERROR;
+	}
+
+	if (!bcmspi_test_card(sd)) {
+		sd_err(("bcmspi_test_card failed\n"));
+		return ERROR;
+	}
+
+	sd->num_funcs = SPI_MAX_IOFUNCS;
+
+	get_client_blocksize(sd);
+
+	/* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */
+	bcmspi_resync_f1(sd);
+
+	sd->dwordmode = FALSE;
+
+	bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg);
+
+	sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__));
+	status_en_reg |= INTR_WITH_STATUS;
+
+	if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1,
+	    status_en_reg & 0xff) != SUCCESS) {
+		sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__));
+		return ERROR;
+	}
+
+#ifndef HSMODE
+	/* After configuring for High-Speed mode, set the desired clock rate. */
+	if (!spi_start_clock(sd, 4)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	/* check to see if the response delay needs to be programmed properly */
+	{
+		uint32 f1_respdelay = 0;
+		bcmspi_card_regread(sd, 0, SPID_RESP_DELAY_F1, 1, &f1_respdelay);
+		if ((f1_respdelay == 0) || (f1_respdelay == 0xFF)) {
+			/* older sdiodevice core and has no separte resp delay for each of */
+			sd_err(("older corerev < 4 so use the same resp delay for all funcs\n"));
+			sd->resp_delay_new = FALSE;
+		}
+		else {
+			/* older sdiodevice core and has no separte resp delay for each of */
+			int ret_val;
+			sd->resp_delay_new = TRUE;
+			sd_err(("new corerev >= 4 so set the resp delay for each of the funcs\n"));
+			sd_trace(("resp delay for funcs f0(%d), f1(%d), f2(%d), f3(%d)\n",
+				GSPI_F0_RESP_DELAY, GSPI_F1_RESP_DELAY,
+				GSPI_F2_RESP_DELAY, GSPI_F3_RESP_DELAY));
+			ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F0, 1,
+				GSPI_F0_RESP_DELAY);
+			if (ret_val != SUCCESS) {
+				sd_err(("%s: Unable to set response delay for F0\n", __FUNCTION__));
+				return ERROR;
+			}
+			ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F1, 1,
+				GSPI_F1_RESP_DELAY);
+			if (ret_val != SUCCESS) {
+				sd_err(("%s: Unable to set response delay for F1\n", __FUNCTION__));
+				return ERROR;
+			}
+			ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F2, 1,
+				GSPI_F2_RESP_DELAY);
+			if (ret_val != SUCCESS) {
+				sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__));
+				return ERROR;
+			}
+			ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F3, 1,
+				GSPI_F3_RESP_DELAY);
+			if (ret_val != SUCCESS) {
+				sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__));
+				return ERROR;
+			}
+		}
+	}
+
+
+	sd->card_init_done = TRUE;
+
+	/* get the device rev to program the prop respdelays */
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG,
+	                                 4, &regdata)) != SUCCESS)
+		return status;
+
+	sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata));
+
+
+	if (hsmode == TRUE) {
+		sd_trace(("Attempting to enable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			sd_trace(("Device is already in High-Speed mode.\n"));
+			return status;
+		} else {
+			regdata |= HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS) {
+				return status;
+			}
+		}
+	} else {
+		sd_trace(("Attempting to disable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			regdata &= ~HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS)
+				return status;
+		}
+		 else {
+			sd_trace(("Device is already in Low-Speed mode.\n"));
+			return status;
+		}
+	}
+	spi_controller_highspeed_mode(sd, hsmode);
+
+	return TRUE;
+}
+
+#define bcmspi_find_curr_mode(sd) { \
+	sd->wordlen = 2; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd->wordlen = 4; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd_trace(("Silicon testability issue: regdata = 0x%x." \
+		" Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \
+	OSL_DELAY(100000); \
+}
+
+#define INIT_ADAPT_LOOP		100
+
+/* Adapt clock-phase-speed-bitwidth between host and device */
+static bool
+bcmspi_host_device_init_adapt(sdioh_info_t *sd)
+{
+	uint32 wrregdata, regdata = 0;
+	int status;
+	int i;
+
+	/* Due to a silicon testability issue, the first command from the Host
+	 * to the device will get corrupted (first bit will be lost). So the
+	 * Host should poll the device with a safe read request. ie: The Host
+	 * should try to read F0 addr 0x14 using the Fixed address mode
+	 * (This will prevent a unintended write command to be detected by device)
+	 */
+	for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+	/* If device was not power-cycled it will stay in 32bit mode with
+	 * response-delay-all bit set.  Alternate the iteration so that
+	 * read either with or without response-delay for F0 to succeed.
+	 */
+		bcmspi_find_curr_mode(sd);
+		sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = TRUE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = FALSE;
+	}
+
+	/* Bail out, device not detected */
+	if (i == INIT_ADAPT_LOOP)
+		return FALSE;
+
+	/* Softreset the spid logic */
+	if ((sd->dwordmode) || (sd->wordlen == 4)) {
+		bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI);
+		bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, &regdata);
+		sd_trace(("reset reg read = 0x%x\n", regdata));
+		sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode,
+		       sd->wordlen, sd->resp_delay_all));
+		/* Restore default state after softreset */
+		sd->wordlen = 2;
+		sd->dwordmode = FALSE;
+	}
+
+	if (sd->wordlen == 4) {
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) !=
+		     SUCCESS)
+				return FALSE;
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n",
+			          regdata));
+			sd_trace(("Spid power was left on.\n"));
+		} else {
+			sd_err(("Spid power was left on but signature read failed."
+			        " Value read = 0x%x\n", regdata));
+			return FALSE;
+		}
+	} else {
+		sd->wordlen = 2;
+
+#define CTRL_REG_DEFAULT	0x00010430 /* according to the host m/c */
+
+		wrregdata = (CTRL_REG_DEFAULT);
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+		sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata));
+
+#ifndef HSMODE
+		wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY);
+		wrregdata &= ~HIGH_SPEED_MODE;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+#endif /* HSMODE */
+
+		for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+			if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) {
+				sd_trace(("0xfeedbead was leftshifted by 1-bit.\n"));
+				if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4,
+				     &regdata)) != SUCCESS)
+					return FALSE;
+			}
+			OSL_DELAY(1000);
+		}
+
+#if defined(CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH)
+		/* Change to host controller intr-polarity of active-high */
+		wrregdata |= INTR_POLARITY;
+#else
+		/* Change to host controller intr-polarity of active-low */
+		wrregdata &= ~INTR_POLARITY;
+#endif /* CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH */
+
+		sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n",
+		        wrregdata));
+		/* Change to 32bit mode */
+		wrregdata |= WORD_LENGTH_32;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+
+		/* Change command/data packaging in 32bit LE mode */
+		sd->wordlen = 4;
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Read spid passed. Value read = 0x%x\n", regdata));
+			sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n"));
+		} else {
+			sd_err(("Stale spid reg values read as it was kept powered. Value read ="
+			  "0x%x\n", regdata));
+			return FALSE;
+		}
+	}
+
+
+	return TRUE;
+}
+
+static bool
+bcmspi_test_card(sdioh_info_t *sd)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+		return FALSE;
+
+	if (regdata == (TEST_RO_DATA_32BIT_LE))
+		sd_trace(("32bit LE regdata = 0x%x\n", regdata));
+	else {
+		sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata));
+		return FALSE;
+	}
+
+
+#define RW_PATTERN1	0xA0A1A2A3
+#define RW_PATTERN2	0x4B5B6B7B
+
+	regdata = RW_PATTERN1;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN1) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN1, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	regdata = RW_PATTERN2;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN2) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN2, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	return TRUE;
+}
+
+static int
+bcmspi_driver_init(sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((bcmspi_host_init(sd)) != SUCCESS) {
+		return ERROR;
+	}
+
+	if (bcmspi_client_init(sd) != SUCCESS) {
+		return ERROR;
+	}
+
+	return SUCCESS;
+}
+
+/* Read device reg */
+static int
+bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n",
+	          __FUNCTION__, cmd_arg, func, regaddr, regsize));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);	/* Fixed access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize);
+
+	sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n",
+	          __FUNCTION__, cmd_arg, func, regaddr, regsize));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS)
+		return status;
+
+	sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *data));
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	sd_trace(("dstatus =0x%x\n", dstatus));
+	return SUCCESS;
+}
+
+/* write a device register */
+static int
+bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d data=0x%x\n",
+	          __FUNCTION__, cmd_arg, func, regaddr, regsize, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus=0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+/* write a device register - 1 byte */
+static int
+bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n",
+	          __FUNCTION__, cmd_arg, func, regaddr, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+void
+bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer)
+{
+	*dstatus_buffer = sd->card_dstatus;
+}
+
+/* 'data' is of type uint32 whereas other buffers are of type uint8 */
+static int
+bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                uint32 *data, uint32 datalen)
+{
+	uint32	i, j;
+	uint8	resp_delay = 0;
+	int	err = SUCCESS;
+	uint32	hostlen;
+	uint32 spilen = 0;
+	uint32 dstatus_idx = 0;
+	uint16 templen, buslen, len, *ptr = NULL;
+
+	sd_trace(("spi cmd = 0x%x\n", cmd_arg));
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf = SPISWAP_WD4(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint32 *)spi_outbuf = SPISWAP_WD2(cmd_arg);
+		if (datalen & 0x1)
+			datalen++;
+		if (datalen < 4)
+			datalen = ROUNDUP(datalen, 4);
+	} else {
+		sd_err(("Host is %d bit spid, could not create SPI command.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer */
+	if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) {
+		/* We send len field of hw-header always a mod16 size, both from host and dongle */
+		if (datalen != 0) {
+			for (i = 0; i < datalen/4; i++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					*(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+						SPISWAP_WD4(data[i]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					*(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+						SPISWAP_WD2(data[i]);
+				}
+			}
+		}
+	}
+
+	/* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */
+	if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 0)) {
+		int func = GFIELD(cmd_arg, SPI_FUNCTION);
+		switch (func) {
+			case 0:
+				if (sd->resp_delay_new)
+					resp_delay = GSPI_F0_RESP_DELAY;
+				else
+					resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0;
+				break;
+			case 1:
+				if (sd->resp_delay_new)
+					resp_delay = GSPI_F1_RESP_DELAY;
+				else
+					resp_delay = F1_RESPONSE_DELAY;
+				break;
+			case 2:
+				if (sd->resp_delay_new)
+					resp_delay = GSPI_F2_RESP_DELAY;
+				else
+					resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0;
+				break;
+			default:
+				ASSERT(0);
+				break;
+		}
+		/* Program response delay */
+		if (sd->resp_delay_new == FALSE)
+			bcmspi_prog_resp_delay(sd, func, resp_delay);
+	}
+
+	/* +4 for cmd and +4 for dstatus */
+	hostlen = datalen + 8 + resp_delay;
+	hostlen += dstatus_idx;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen);
+
+	/* for Read, get the data into the input buffer */
+	if (datalen != 0) {
+		if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */
+			for (j = 0; j < datalen/4; j++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					data[j] = SPISWAP_WD4(*(uint32 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					data[j] = SPISWAP_WD2(*(uint32 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay]);
+				}
+			}
+		}
+	}
+
+	dstatus_idx += (datalen + CMDLEN + resp_delay);
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf[dstatus_idx]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf[dstatus_idx]);
+	} else {
+		sd_err(("Host is %d bit machine, could not read SPI dstatus.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+	if (sd->card_dstatus == 0xffffffff) {
+		sd_err(("looks like not a GSPI device or device is not powered.\n"));
+	}
+
+	err = bcmspi_update_stats(sd, cmd_arg);
+
+	return err;
+
+}
+
+static int
+bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                uint32 addr, int nbytes, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg;
+	bool write = rw == SDIOH_READ ? 0 : 1;
+	uint retries = 0;
+
+	bool enable;
+	uint32	spilen;
+
+	cmd_arg = 0;
+
+	ASSERT(nbytes);
+	ASSERT(nbytes <= sd->client_block_size[func]);
+
+	if (write) sd->t_cnt++; else sd->r_cnt++;
+
+	if (func == 2) {
+		/* Frame len check limited by gSPI. */
+		if ((nbytes > 2000) && write) {
+			sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes));
+		}
+		/* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */
+		/* If F2 fifo on device is not ready to receive data, don't do F2 transfer */
+		if (write) {
+			uint32 dstatus;
+			/* check F2 ready with cached one */
+			bcmspi_cmd_getdstatus(sd, &dstatus);
+			if ((dstatus & STATUS_F2_RX_READY) == 0) {
+				retries = WAIT_F2RXFIFORDY;
+				enable = 0;
+				while (retries-- && !enable) {
+					OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000);
+					bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4,
+					                   &dstatus);
+					if (dstatus & STATUS_F2_RX_READY)
+						enable = TRUE;
+				}
+				if (!enable) {
+					struct spierrstats_t *spierrstats = &sd->spierrstats;
+					spierrstats->f2rxnotready++;
+					sd_err(("F2 FIFO is not ready to receive data.\n"));
+					return ERROR;
+				}
+				sd_trace(("No of retries on F2 ready %d\n",
+					(WAIT_F2RXFIFORDY - retries)));
+			}
+		}
+	}
+
+	/* F2 transfers happen on 0 addr */
+	addr = (func == 2) ? 0 : addr;
+
+	/* In pio mode buffer is read using fixed address fifo in func 1 */
+	if ((func == 1) && (fifo))
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);
+	else
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);
+
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write);
+	spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes);
+	if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+		/* convert len to mod4 size */
+		spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0);
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+	} else
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen);
+
+	if ((func == 2) && (fifo == 1)) {
+		sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+		          __FUNCTION__, write ? "Wr" : "Rd", func, "INCR",
+		          addr, nbytes, sd->r_cnt, sd->t_cnt));
+	}
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+	         __FUNCTION__, write ? "Wd" : "Rd", func, "INCR",
+	         addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) {
+		sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__,
+			(write ? "write" : "read")));
+		return status;
+	}
+
+	/* gSPI expects that hw-header-len is equal to spi-command-len */
+	if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) {
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff));
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16)));
+	}
+
+	if ((nbytes > 2000) && !write) {
+		sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes));
+	}
+
+	return SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+	si->card_init_done = FALSE;
+	return bcmspi_client_init(si);
+}
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+	return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+	return SDIOH_API_RC_FAIL;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c
new file mode 100644
index 0000000..05ee100
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmutils.c
@@ -0,0 +1,4106 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmutils.c 699163 2017-05-12 05:18:23Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#ifdef BCMDRIVER
+
+#include <osl.h>
+#include <bcmutils.h>
+
+#else /* !BCMDRIVER */
+
+#include <stdio.h>
+#include <string.h>
+#include <bcmutils.h>
+
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <ethernet.h>
+#include <vlan.h>
+#include <bcmip.h>
+#include <802.1d.h>
+#include <802.11.h>
+#include <bcmip.h>
+#include <bcmipv6.h>
+#include <bcmtcp.h>
+
+/* Look-up table to calculate head room present in a number */
+static const uint8 msb_table[] = {
+	0, 1, 2, 2, 3, 3, 3, 3,
+	4, 4, 4, 4, 4, 4, 4, 4,
+	5, 5, 5, 5, 5, 5, 5, 5,
+	5, 5, 5, 5, 5, 5, 5, 5,
+	6, 6, 6, 6, 6, 6, 6, 6,
+	6, 6, 6, 6, 6, 6, 6, 6,
+	6, 6, 6, 6, 6, 6, 6, 6,
+	6, 6, 6, 6, 6, 6, 6, 6,
+	7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7,
+	7, 7, 7, 7, 7, 7, 7, 7,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+	8, 8, 8, 8, 8, 8, 8, 8,
+};
+
+void *_bcmutils_dummy_fn = NULL;
+
+
+
+
+#ifdef BCMDRIVER
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+	if (len < 0)
+		len = 4096;	/* "infinite" */
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(PKTDATA(osh, p) + offset, buf, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(buf, PKTDATA(osh, p) + offset, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint BCMFASTPATH
+pkttotlen(osl_t *osh, void *p)
+{
+	uint total;
+	int len;
+
+	total = 0;
+	for (; p; p = PKTNEXT(osh, p)) {
+		len = PKTLEN(osh, p);
+		total += len;
+#ifdef BCMLFRAG
+		if (BCMLFRAG_ENAB()) {
+			if (PKTISFRAG(osh, p)) {
+				total += PKTFRAGTOTLEN(osh, p);
+			}
+		}
+#endif
+	}
+
+	return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+	for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+		;
+
+	return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt(osl_t *osh, void *p)
+{
+	uint cnt;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+		cnt++;
+#ifdef BCMLFRAG
+		if (BCMLFRAG_ENAB()) {
+			if (PKTISFRAG(osh, p)) {
+				cnt += PKTFRAGTOTNUM(osh, p);
+			}
+		}
+#endif
+	}
+
+	return cnt;
+}
+
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt_war(osl_t *osh, void *p)
+{
+	uint cnt;
+	uint8 *pktdata;
+	uint len, remain, align64;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+		cnt++;
+		len = PKTLEN(osh, p);
+		if (len > 128) {
+			pktdata = (uint8 *)PKTDATA(osh, p);	/* starting address of data */
+			/* Check for page boundary straddle (2048B) */
+			if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff))
+				cnt++;
+
+			align64 = (uint)((uintptr)pktdata & 0x3f);	/* aligned to 64B */
+			align64 = (64 - align64) & 0x3f;
+			len -= align64;		/* bytes from aligned 64B to end */
+			/* if aligned to 128B, check for MOD 128 between 1 to 4B */
+			remain = len % 128;
+			if (remain > 0 && remain <= 4)
+				cnt++;		/* add extra seg */
+		}
+	}
+
+	return cnt;
+}
+
+uint8 * BCMFASTPATH
+pktdataoffset(osl_t *osh, void *p,  uint offset)
+{
+	uint total = pkttotlen(osh, p);
+	uint pkt_off = 0, len = 0;
+	uint8 *pdata = (uint8 *) PKTDATA(osh, p);
+
+	if (offset > total)
+		return NULL;
+
+	for (; p; p = PKTNEXT(osh, p)) {
+		pdata = (uint8 *) PKTDATA(osh, p);
+		pkt_off = offset - len;
+		len += PKTLEN(osh, p);
+		if (len > offset)
+			break;
+	}
+	return (uint8*) (pdata+pkt_off);
+}
+
+
+/* given a offset in pdata, find the pkt seg hdr */
+void *
+pktoffset(osl_t *osh, void *p,  uint offset)
+{
+	uint total = pkttotlen(osh, p);
+	uint len = 0;
+
+	if (offset > total)
+		return NULL;
+
+	for (; p; p = PKTNEXT(osh, p)) {
+		len += PKTLEN(osh, p);
+		if (len > offset)
+			break;
+	}
+	return p;
+}
+
+#endif /* BCMDRIVER */
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+const unsigned char bcm_ctype[] = {
+
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 0-7 */
+	_BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+	_BCM_C,	/* 8-15 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 16-23 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 24-31 */
+	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,		/* 32-39 */
+	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 40-47 */
+	_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,			/* 48-55 */
+	_BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 56-63 */
+	_BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+	_BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 72-79 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 80-87 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 88-95 */
+	_BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+	_BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 128-143 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 144-159 */
+	_BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 160-175 */
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 176-191 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,	/* 192-207 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L,	/* 208-223 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,	/* 224-239 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(const char *cp, char **endp, uint base)
+{
+	ulong result, last_result = 0, value;
+	bool minus;
+
+	minus = FALSE;
+
+	while (bcm_isspace(*cp))
+		cp++;
+
+	if (cp[0] == '+')
+		cp++;
+	else if (cp[0] == '-') {
+		minus = TRUE;
+		cp++;
+	}
+
+	if (base == 0) {
+		if (cp[0] == '0') {
+			if ((cp[1] == 'x') || (cp[1] == 'X')) {
+				base = 16;
+				cp = &cp[2];
+			} else {
+				base = 8;
+				cp = &cp[1];
+			}
+		} else
+			base = 10;
+	} else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+		cp = &cp[2];
+	}
+
+	result = 0;
+
+	while (bcm_isxdigit(*cp) &&
+	       (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+		result = result*base + value;
+		/* Detected overflow */
+		if (result < last_result && !minus) {
+			if (endp) {
+				/* Go to the end of current number */
+				while (bcm_isxdigit(*cp)) {
+					cp++;
+				}
+				*endp = DISCARD_QUAL(cp, char);
+			}
+			return (ulong)-1;
+		}
+		last_result = result;
+		cp++;
+	}
+
+	if (minus)
+		result = (ulong)(-(long)result);
+
+	if (endp)
+		*endp = DISCARD_QUAL(cp, char);
+
+	return (result);
+}
+
+int
+bcm_atoi(const char *s)
+{
+	return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char *
+bcmstrstr(const char *haystack, const char *needle)
+{
+	int len, nlen;
+	int i;
+
+	if ((haystack == NULL) || (needle == NULL))
+		return DISCARD_QUAL(haystack, char);
+
+	nlen = (int)strlen(needle);
+	len = (int)strlen(haystack) - nlen + 1;
+
+	for (i = 0; i < len; i++)
+		if (memcmp(needle, &haystack[i], nlen) == 0)
+			return DISCARD_QUAL(&haystack[i], char);
+	return (NULL);
+}
+
+char *
+bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len)
+{
+	for (; s_len >= substr_len; s++, s_len--)
+		if (strncmp(s, substr, substr_len) == 0)
+			return DISCARD_QUAL(s, char);
+
+	return NULL;
+}
+
+char *
+bcmstrcat(char *dest, const char *src)
+{
+	char *p;
+
+	p = dest + strlen(dest);
+
+	while ((*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+char *
+bcmstrncat(char *dest, const char *src, uint size)
+{
+	char *endp;
+	char *p;
+
+	p = dest + strlen(dest);
+	endp = p + size;
+
+	while (p != endp && (*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+
+/****************************************************************************
+* Function:   bcmstrtok
+*
+* Purpose:
+*  Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+*  but allows strToken() to be used by different strings or callers at the same
+*  time. Each call modifies '*string' by substituting a NULL character for the
+*  first delimiter that is encountered, and updates 'string' to point to the char
+*  after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+*  string      (mod) Ptr to string ptr, updated by token.
+*  delimiters  (in)  Set of delimiter characters.
+*  tokdelim    (out) Character that delimits the returned token. (May
+*                    be set to NULL if token delimiter is not required).
+*
+* Returns:  Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+	unsigned char *str;
+	unsigned long map[8];
+	int count;
+	char *nextoken;
+
+	if (tokdelim != NULL) {
+		/* Prime the token delimiter */
+		*tokdelim = '\0';
+	}
+
+	/* Clear control map */
+	for (count = 0; count < 8; count++) {
+		map[count] = 0;
+	}
+
+	/* Set bits in delimiter table */
+	do {
+		map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+	}
+	while (*delimiters++);
+
+	str = (unsigned char*)*string;
+
+	/* Find beginning of token (skip over leading delimiters). Note that
+	 * there is no token iff this loop sets str to point to the terminal
+	 * null (*str == '\0')
+	 */
+	while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+		str++;
+	}
+
+	nextoken = (char*)str;
+
+	/* Find the end of the token. If it is not the end of the string,
+	 * put a null there.
+	 */
+	for (; *str; str++) {
+		if (map[*str >> 5] & (1 << (*str & 31))) {
+			if (tokdelim != NULL) {
+				*tokdelim = *str;
+			}
+
+			*str++ = '\0';
+			break;
+		}
+	}
+
+	*string = (char*)str;
+
+	/* Determine if a token has been found. */
+	if (nextoken == (char *) str) {
+		return NULL;
+	}
+	else {
+		return nextoken;
+	}
+}
+
+
+#define xToLower(C) \
+	((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function:   bcmstricmp
+*
+* Purpose:    Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+*             s2 (in) Second string to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+	char dc, sc;
+
+	while (*s2 && *s1) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+	}
+
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+
+/****************************************************************************
+* Function:   bcmstrnicmp
+*
+* Purpose:    Compare to strings case insensitively, upto a max of 'cnt'
+*             characters.
+*
+* Parameters: s1  (in) First string to compare.
+*             s2  (in) Second string to compare.
+*             cnt (in) Max characters to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+	char dc, sc;
+
+	while (*s2 && *s1 && cnt) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+		cnt--;
+	}
+
+	if (!cnt) return 0;
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(const char *p, struct ether_addr *ea)
+{
+	int i = 0;
+	char *ep;
+
+	for (;;) {
+		ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+		p = ep;
+		if (!*p++ || i == 6)
+			break;
+	}
+
+	return (i == 6);
+}
+
+int
+bcm_atoipv4(const char *p, struct ipv4_addr *ip)
+{
+
+	int i = 0;
+	char *c;
+	for (;;) {
+		ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0);
+		if (*c++ != '.' || i == IPV4_ADDR_LEN)
+			break;
+		p = c;
+	}
+	return (i == IPV4_ADDR_LEN);
+}
+#endif	/* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+	ulong copyct = 1;
+	ushort i;
+
+	if (abuflen == 0)
+		return 0;
+
+	/* wbuflen is in bytes */
+	wbuflen /= sizeof(ushort);
+
+	for (i = 0; i < wbuflen; ++i) {
+		if (--abuflen == 0)
+			break;
+		*abuf++ = (char) *wbuf++;
+		++copyct;
+	}
+	*abuf = '\0';
+
+	return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+	static const char hex[] =
+	  {
+		  '0', '1', '2', '3', '4', '5', '6', '7',
+		  '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+	  };
+	const uint8 *octet = ea->octet;
+	char *p = buf;
+	int i;
+
+	for (i = 0; i < 6; i++, octet++) {
+		*p++ = hex[(*octet >> 4) & 0xf];
+		*p++ = hex[*octet & 0xf];
+		*p++ = ':';
+	}
+
+	*(p-1) = '\0';
+
+	return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+	snprintf(buf, 16, "%d.%d.%d.%d",
+	         ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+	return (buf);
+}
+
+char *
+bcm_ipv6_ntoa(void *ipv6, char *buf)
+{
+	/* Implementing RFC 5952 Sections 4 + 5 */
+	/* Not thoroughly tested */
+	uint16 tmp[8];
+	uint16 *a = &tmp[0];
+	char *p = buf;
+	int i, i_max = -1, cnt = 0, cnt_max = 1;
+	uint8 *a4 = NULL;
+	memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN);
+
+	for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+		if (a[i]) {
+			if (cnt > cnt_max) {
+				cnt_max = cnt;
+				i_max = i - cnt;
+			}
+			cnt = 0;
+		} else
+			cnt++;
+	}
+	if (cnt > cnt_max) {
+		cnt_max = cnt;
+		i_max = i - cnt;
+	}
+	if (i_max == 0 &&
+		/* IPv4-translated: ::ffff:0:a.b.c.d */
+		((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) ||
+		/* IPv4-mapped: ::ffff:a.b.c.d */
+		(cnt_max == 5 && a[5] == 0xffff)))
+		a4 = (uint8*) (a + 6);
+
+	for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+		if ((uint8*) (a + i) == a4) {
+			snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]);
+			break;
+		} else if (i == i_max) {
+			*p++ = ':';
+			i += cnt_max - 1;
+			p[0] = ':';
+			p[1] = '\0';
+		} else {
+			if (i)
+				*p++ = ':';
+			p += snprintf(p, 8, "%x", ntoh16(a[i]));
+		}
+	}
+
+	return buf;
+}
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+	uint i;
+
+	for (i = 0; i < ms; i++) {
+		OSL_DELAY(1000);
+	}
+}
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+	void *p;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	for (p = p0; p; p = PKTNEXT(osh, p))
+		prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif	
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint BCMFASTPATH
+pktsetprio(void *pkt, bool update_vtag)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *pktdata;
+	int priority = 0;
+	int rc = 0;
+
+	pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+	ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+	eh = (struct ether_header *) pktdata;
+
+	if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
+		uint16 vlan_tag;
+		int vlan_prio, dscp_prio = 0;
+
+		evh = (struct ethervlan_header *)eh;
+
+		vlan_tag = ntoh16(evh->vlan_tag);
+		vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+		if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
+			(evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+			uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+			uint8 tos_tc = IP_TOS46(ip_body);
+			dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+		}
+
+		/* DSCP priority gets precedence over 802.1P (vlan tag) */
+		if (dscp_prio != 0) {
+			priority = dscp_prio;
+			rc |= PKTPRIO_VDSCP;
+		} else {
+			priority = vlan_prio;
+			rc |= PKTPRIO_VLAN;
+		}
+		/*
+		 * If the DSCP priority is not the same as the VLAN priority,
+		 * then overwrite the priority field in the vlan tag, with the
+		 * DSCP priority value. This is required for Linux APs because
+		 * the VLAN driver on Linux, overwrites the skb->priority field
+		 * with the priority value in the vlan tag
+		 */
+		if (update_vtag && (priority != vlan_prio)) {
+			vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+			vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+			evh->vlan_tag = hton16(vlan_tag);
+			rc |= PKTPRIO_UPD;
+		}
+#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
+	} else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+		priority = PRIO_8021D_NC;
+		rc = PKTPRIO_DSCP;
+#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
+	} else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
+		(eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+		uint8 *ip_body = pktdata + sizeof(struct ether_header);
+		uint8 tos_tc = IP_TOS46(ip_body);
+		uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
+		switch (dscp) {
+		case DSCP_EF:
+			priority = PRIO_8021D_VO;
+			break;
+		case DSCP_AF31:
+		case DSCP_AF32:
+		case DSCP_AF33:
+			priority = PRIO_8021D_CL;
+			break;
+		case DSCP_AF21:
+		case DSCP_AF22:
+		case DSCP_AF23:
+		case DSCP_AF11:
+		case DSCP_AF12:
+		case DSCP_AF13:
+			priority = PRIO_8021D_EE;
+			break;
+		default:
+			priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+			break;
+		}
+
+		rc |= PKTPRIO_DSCP;
+	}
+
+	ASSERT(priority >= 0 && priority <= MAXPRIO);
+	PKTSETPRIO(pkt, priority);
+	return (rc | priority);
+}
+
+/* lookup user priority for specified DSCP */
+static uint8
+dscp2up(uint8 *up_table, uint8 dscp)
+{
+	uint8 user_priority = 255;
+
+	/* lookup up from table if parameters valid */
+	if (up_table != NULL && dscp < UP_TABLE_MAX) {
+		user_priority = up_table[dscp];
+	}
+
+	/* 255 is unused value so return up from dscp */
+	if (user_priority == 255) {
+		user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT);
+	}
+
+	return user_priority;
+}
+
+/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */
+uint BCMFASTPATH
+pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag)
+{
+	if (up_table) {
+		uint8 *pktdata;
+		uint pktlen;
+		uint8 dscp;
+		uint user_priority = 0;
+		uint rc = 0;
+
+		pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+		pktlen = PKTLEN(OSH_NULL, pkt);
+
+		if (pktgetdscp(pktdata, pktlen, &dscp)) {
+			rc = PKTPRIO_DSCP;
+			user_priority = dscp2up(up_table, dscp);
+			PKTSETPRIO(pkt, user_priority);
+		}
+
+		return (rc | user_priority);
+	} else {
+		return pktsetprio(pkt, update_vtag);
+	}
+}
+
+/* Returns TRUE and DSCP if IP header found, FALSE otherwise.
+ */
+bool BCMFASTPATH
+pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *ip_body;
+	bool rc = FALSE;
+
+	/* minimum length is ether header and IP header */
+	if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN)
+		return FALSE;
+
+	eh = (struct ether_header *) pktdata;
+
+	if (eh->ether_type == HTON16(ETHER_TYPE_IP)) {
+		ip_body = pktdata + sizeof(struct ether_header);
+		*dscp = IP_DSCP46(ip_body);
+		rc = TRUE;
+	}
+	else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) {
+		evh = (struct ethervlan_header *)eh;
+
+		/* minimum length is ethervlan header and IP header */
+		if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN &&
+			evh->ether_type == HTON16(ETHER_TYPE_IP)) {
+			ip_body = pktdata + sizeof(struct ethervlan_header);
+			*dscp = IP_DSCP46(ip_body);
+			rc = TRUE;
+		}
+	}
+
+	return rc;
+}
+
+/* Add to adjust the 802.1x priority */
+void
+pktset8021xprio(void *pkt, int prio)
+{
+	struct ether_header *eh;
+	uint8 *pktdata;
+	if(prio == PKTPRIO(pkt))
+		return;
+	pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+	ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+	eh = (struct ether_header *) pktdata;
+	if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+		ASSERT(prio >= 0 && prio <= MAXPRIO);
+		PKTSETPRIO(pkt, prio);
+	}
+}
+
+/* usr_prio range from low to high with usr_prio value */
+static bool
+up_table_set(uint8 *up_table, uint8 usr_prio, uint8 low, uint8 high)
+{
+	int i;
+
+	if (usr_prio > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) {
+		return FALSE;
+	}
+
+	for (i = low; i <= high; i++) {
+		up_table[i] = usr_prio;
+	}
+
+	return TRUE;
+}
+
+/* set user priority table */
+int BCMFASTPATH
+wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie)
+{
+	uint8 len;
+
+	if (up_table == NULL || qos_map_ie == NULL) {
+		return BCME_ERROR;
+	}
+
+	/* clear table to check table was set or not */
+	memset(up_table, 0xff, UP_TABLE_MAX);
+
+	/* length of QoS Map IE must be 16+n*2, n is number of exceptions */
+	if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID &&
+			(len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH &&
+			(len % 2) == 0) {
+		uint8 *except_ptr = (uint8 *)qos_map_ie->data;
+		uint8 except_len = len - QOS_MAP_FIXED_LENGTH;
+		uint8 *range_ptr = except_ptr + except_len;
+		int i;
+
+		/* fill in ranges */
+		for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) {
+			uint8 low = range_ptr[i];
+			uint8 high = range_ptr[i + 1];
+			if (low == 255 && high == 255) {
+				continue;
+			}
+
+			if (!up_table_set(up_table, i / 2, low, high)) {
+				/* clear the table on failure */
+				memset(up_table, 0xff, UP_TABLE_MAX);
+				return BCME_ERROR;
+			}
+		}
+
+		/* update exceptions */
+		for (i = 0; i < except_len; i += 2) {
+			uint8 dscp = except_ptr[i];
+			uint8 usr_prio = except_ptr[i+1];
+
+			/* exceptions with invalid dscp/usr_prio are ignored */
+			up_table_set(up_table, usr_prio, dscp, dscp);
+		}
+	}
+
+	return BCME_OK;
+}
+
+/* The 0.5KB string table is not removed by compiler even though it's unused */
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings  */
+const char *
+bcmerrorstr(int bcmerror)
+{
+	/* check if someone added a bcmerror code but forgot to add errorstring */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+	if (bcmerror > 0 || bcmerror < BCME_LAST) {
+		snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+		return bcm_undeferrstr;
+	}
+
+	ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+	return bcmerrorstrtable[-bcmerror];
+}
+
+
+/* iovar table lookup */
+/* could mandate sorted tables and do a binary search */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+	const bcm_iovar_t *vi;
+	const char *lookup_name;
+
+	/* skip any ':' delimited option prefixes */
+	lookup_name = strrchr(name, ':');
+	if (lookup_name != NULL)
+		lookup_name++;
+	else
+		lookup_name = name;
+
+	ASSERT(table != NULL);
+
+	for (vi = table; vi->name; vi++) {
+		if (!strcmp(vi->name, lookup_name))
+			return vi;
+	}
+	/* ran to end of table */
+
+	return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+	BCM_REFERENCE(arg);
+
+	/* length check on io buf */
+	switch (vi->type) {
+	case IOVT_BOOL:
+	case IOVT_INT8:
+	case IOVT_INT16:
+	case IOVT_INT32:
+	case IOVT_UINT8:
+	case IOVT_UINT16:
+	case IOVT_UINT32:
+		/* all integers are int32 sized args at the ioctl interface */
+		if (len < (int)sizeof(int)) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_BUFFER:
+		/* buffer must meet minimum length requirement */
+		if (len < vi->minlen) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_VOID:
+		if (!set) {
+			/* Cannot return nil... */
+			bcmerror = BCME_UNSUPPORTED;
+		} else if (len) {
+			/* Set is an action w/o parameters */
+			bcmerror = BCME_BUFTOOLONG;
+		}
+		break;
+
+	default:
+		/* unknown type for length check in iovar info */
+		ASSERT(0);
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#endif	/* BCMDRIVER */
+
+#ifdef BCM_OBJECT_TRACE
+
+#define BCM_OBJECT_MERGE_SAME_OBJ	0
+
+/* some place may add / remove the object to trace list for Linux: */
+/* add:    osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */
+/* remove: osl_pktfree dev_kfree_skb netif_rx */
+
+#define BCM_OBJDBG_COUNT          (1024 * 100)
+static spinlock_t dbgobj_lock;
+#define	BCM_OBJDBG_LOCK_INIT()    spin_lock_init(&dbgobj_lock)
+#define	BCM_OBJDBG_LOCK_DESTROY()
+#define	BCM_OBJDBG_LOCK           spin_lock_irqsave
+#define	BCM_OBJDBG_UNLOCK         spin_unlock_irqrestore
+
+#define BCM_OBJDBG_ADDTOHEAD      0
+#define BCM_OBJDBG_ADDTOTAIL      1
+
+#define BCM_OBJDBG_CALLER_LEN     32
+struct bcm_dbgobj {
+	struct bcm_dbgobj *prior;
+	struct bcm_dbgobj *next;
+	uint32 flag;
+	void   *obj;
+	uint32 obj_sn;
+	uint32 obj_state;
+	uint32 line;
+	char   caller[BCM_OBJDBG_CALLER_LEN];
+};
+
+static struct bcm_dbgobj *dbgobj_freehead = NULL;
+static struct bcm_dbgobj *dbgobj_freetail = NULL;
+static struct bcm_dbgobj *dbgobj_objhead = NULL;
+static struct bcm_dbgobj *dbgobj_objtail = NULL;
+
+static uint32 dbgobj_sn = 0;
+static int dbgobj_count = 0;
+static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT];
+
+void
+bcm_object_trace_init(void)
+{
+	int i = 0;
+	BCM_OBJDBG_LOCK_INIT();
+	memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT);
+	dbgobj_freehead = &bcm_dbg_objs[0];
+	dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1];
+
+	for (i = 0; i < BCM_OBJDBG_COUNT; ++i) {
+		bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ?
+			dbgobj_freehead : &bcm_dbg_objs[i + 1];
+		bcm_dbg_objs[i].prior = (i == 0) ?
+			dbgobj_freetail : &bcm_dbg_objs[i - 1];
+	}
+}
+
+void
+bcm_object_trace_deinit(void)
+{
+	if (dbgobj_objhead || dbgobj_objtail) {
+		printf("%s: not all objects are released\n", __FUNCTION__);
+		ASSERT(0);
+	}
+	BCM_OBJDBG_LOCK_DESTROY();
+}
+
+static void
+bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+	struct bcm_dbgobj *dbgobj)
+{
+	if ((dbgobj == *head) && (dbgobj == *tail)) {
+		*head = NULL;
+		*tail = NULL;
+	} else if (dbgobj == *head) {
+		*head = (*head)->next;
+	} else if (dbgobj == *tail) {
+		*tail = (*tail)->prior;
+	}
+	dbgobj->next->prior = dbgobj->prior;
+	dbgobj->prior->next = dbgobj->next;
+}
+
+static void
+bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+	struct bcm_dbgobj *dbgobj, int addtotail)
+{
+	if (!(*head) && !(*tail)) {
+		*head = dbgobj;
+		*tail = dbgobj;
+		dbgobj->next = dbgobj;
+		dbgobj->prior = dbgobj;
+	} else if ((*head) && (*tail)) {
+		(*tail)->next = dbgobj;
+		(*head)->prior = dbgobj;
+		dbgobj->next = *head;
+		dbgobj->prior = *tail;
+		if (addtotail == BCM_OBJDBG_ADDTOTAIL)
+			*tail = dbgobj;
+		else
+			*head = dbgobj;
+	} else {
+		ASSERT(0); /* can't be this case */
+	}
+}
+
+static INLINE void
+bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+	struct bcm_dbgobj *dbgobj, int movetotail)
+{
+	if ((*head) && (*tail)) {
+		if (movetotail == BCM_OBJDBG_ADDTOTAIL) {
+			if (dbgobj != (*tail)) {
+				bcm_object_rm_list(head, tail, dbgobj);
+				bcm_object_add_list(head, tail, dbgobj, movetotail);
+			}
+		} else {
+			if (dbgobj != (*head)) {
+				bcm_object_rm_list(head, tail, dbgobj);
+				bcm_object_add_list(head, tail, dbgobj, movetotail);
+			}
+		}
+	} else {
+		ASSERT(0); /* can't be this case */
+	}
+}
+
+void
+bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line)
+{
+	struct bcm_dbgobj *dbgobj;
+	unsigned long flags;
+
+	BCM_REFERENCE(flags);
+	BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+	if (opt == BCM_OBJDBG_ADD_PKT ||
+		opt == BCM_OBJDBG_ADD) {
+		dbgobj = dbgobj_objtail;
+		while (dbgobj) {
+			if (dbgobj->obj == obj) {
+				printf("%s: obj %p allocated from %s(%d),"
+					" allocate again from %s(%d)\n",
+					__FUNCTION__, dbgobj->obj,
+					dbgobj->caller, dbgobj->line,
+					caller, line);
+				ASSERT(0);
+				goto EXIT;
+			}
+			dbgobj = dbgobj->prior;
+			if (dbgobj == dbgobj_objtail)
+				break;
+		}
+
+#if BCM_OBJECT_MERGE_SAME_OBJ
+		dbgobj = dbgobj_freetail;
+		while (dbgobj) {
+			if (dbgobj->obj == obj) {
+				goto FREED_ENTRY_FOUND;
+			}
+			dbgobj = dbgobj->prior;
+			if (dbgobj == dbgobj_freetail)
+				break;
+		}
+#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
+
+		dbgobj = dbgobj_freehead;
+#if BCM_OBJECT_MERGE_SAME_OBJ
+FREED_ENTRY_FOUND:
+#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
+		if (!dbgobj) {
+			printf("%s: already got %d objects ?????????????????????\n",
+				__FUNCTION__, BCM_OBJDBG_COUNT);
+			ASSERT(0);
+			goto EXIT;
+		}
+
+		bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj);
+		dbgobj->obj = obj;
+		strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN);
+		dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0';
+		dbgobj->line = line;
+		dbgobj->flag = 0;
+		if (opt == BCM_OBJDBG_ADD_PKT) {
+			dbgobj->obj_sn = dbgobj_sn++;
+			dbgobj->obj_state = 0;
+			/* first 4 bytes is pkt sn */
+			if (((unsigned long)PKTTAG(obj)) & 0x3)
+				printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj));
+			*(uint32*)PKTTAG(obj) = dbgobj->obj_sn;
+		}
+		bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj,
+			BCM_OBJDBG_ADDTOTAIL);
+
+		dbgobj_count++;
+
+	} else if (opt == BCM_OBJDBG_REMOVE) {
+		dbgobj = dbgobj_objtail;
+		while (dbgobj) {
+			if (dbgobj->obj == obj) {
+				if (dbgobj->flag) {
+					printf("%s: rm flagged obj %p flag 0x%08x from %s(%d)\n",
+						__FUNCTION__, obj, dbgobj->flag, caller, line);
+				}
+				bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj);
+				memset(dbgobj->caller, 0x00, BCM_OBJDBG_CALLER_LEN);
+				strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN);
+				dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0';
+				dbgobj->line = line;
+				bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj,
+					BCM_OBJDBG_ADDTOTAIL);
+				dbgobj_count--;
+				goto EXIT;
+			}
+			dbgobj = dbgobj->prior;
+			if (dbgobj == dbgobj_objtail)
+				break;
+		}
+
+		dbgobj = dbgobj_freetail;
+		while (dbgobj && dbgobj->obj) {
+			if (dbgobj->obj == obj) {
+				printf("%s: obj %p already freed from from %s(%d),"
+					" try free again from %s(%d)\n",
+					__FUNCTION__, obj,
+					dbgobj->caller, dbgobj->line,
+					caller, line);
+				//ASSERT(0); /* release same obj more than one time? */
+				goto EXIT;
+			}
+			dbgobj = dbgobj->prior;
+			if (dbgobj == dbgobj_freetail)
+				break;
+		}
+
+		printf("%s: ################### release none-existing obj %p from %s(%d)\n",
+			__FUNCTION__, obj, caller, line);
+		//ASSERT(0); /* release same obj more than one time? */
+
+	}
+
+EXIT:
+	BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+	return;
+}
+
+void
+bcm_object_trace_upd(void *obj, void *obj_new)
+{
+	struct bcm_dbgobj *dbgobj;
+	unsigned long flags;
+
+	BCM_REFERENCE(flags);
+	BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+	dbgobj = dbgobj_objtail;
+	while (dbgobj) {
+		if (dbgobj->obj == obj) {
+			dbgobj->obj = obj_new;
+			if (dbgobj != dbgobj_objtail) {
+				bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+					dbgobj, BCM_OBJDBG_ADDTOTAIL);
+			}
+			goto EXIT;
+		}
+		dbgobj = dbgobj->prior;
+		if (dbgobj == dbgobj_objtail)
+			break;
+	}
+
+EXIT:
+	BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+	return;
+}
+
+void
+bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn,
+	const char *caller, int line)
+{
+	struct bcm_dbgobj *dbgobj;
+	unsigned long flags;
+
+	BCM_REFERENCE(flags);
+	BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+	dbgobj = dbgobj_objtail;
+	while (dbgobj) {
+		if ((dbgobj->obj == obj) &&
+			((!chksn) || (dbgobj->obj_sn == sn))) {
+			if (dbgobj != dbgobj_objtail) {
+				bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+					dbgobj, BCM_OBJDBG_ADDTOTAIL);
+			}
+			goto EXIT;
+		}
+		dbgobj = dbgobj->prior;
+		if (dbgobj == dbgobj_objtail)
+			break;
+	}
+
+	dbgobj = dbgobj_freetail;
+	while (dbgobj) {
+		if ((dbgobj->obj == obj) &&
+			((!chksn) || (dbgobj->obj_sn == sn))) {
+			printf("%s: (%s:%d) obj %p (sn %d state %d) was freed from %s(%d)\n",
+				__FUNCTION__, caller, line,
+				dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state,
+				dbgobj->caller, dbgobj->line);
+			goto EXIT;
+		}
+		else if (dbgobj->obj == NULL) {
+			break;
+		}
+		dbgobj = dbgobj->prior;
+		if (dbgobj == dbgobj_freetail)
+			break;
+	}
+
+	printf("%s: obj %p not found, check from %s(%d), chksn %s, sn %d\n",
+		__FUNCTION__, obj, caller, line, chksn ? "yes" : "no", sn);
+	dbgobj = dbgobj_objtail;
+	while (dbgobj) {
+		printf("%s: (%s:%d) obj %p sn %d was allocated from %s(%d)\n",
+				__FUNCTION__, caller, line,
+				dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line);
+		dbgobj = dbgobj->prior;
+		if (dbgobj == dbgobj_objtail)
+			break;
+	}
+
+EXIT:
+	BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+	return;
+}
+
+void
+bcm_object_feature_set(void *obj, uint32 type, uint32 value)
+{
+	struct bcm_dbgobj *dbgobj;
+	unsigned long flags;
+
+	BCM_REFERENCE(flags);
+	BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+	dbgobj = dbgobj_objtail;
+	while (dbgobj) {
+		if (dbgobj->obj == obj) {
+			if (type == BCM_OBJECT_FEATURE_FLAG) {
+				if (value & BCM_OBJECT_FEATURE_CLEAR)
+					dbgobj->flag &= ~(value);
+				else
+					dbgobj->flag |= (value);
+			} else if (type == BCM_OBJECT_FEATURE_PKT_STATE) {
+				dbgobj->obj_state = value;
+			}
+			if (dbgobj != dbgobj_objtail) {
+				bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+					dbgobj, BCM_OBJDBG_ADDTOTAIL);
+			}
+			goto EXIT;
+		}
+		dbgobj = dbgobj->prior;
+		if (dbgobj == dbgobj_objtail)
+			break;
+	}
+
+	printf("%s: obj %p not found in active list\n", __FUNCTION__, obj);
+	ASSERT(0);
+
+EXIT:
+	BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+	return;
+}
+
+int
+bcm_object_feature_get(void *obj, uint32 type, uint32 value)
+{
+	int rtn = 0;
+	struct bcm_dbgobj *dbgobj;
+	unsigned long flags;
+
+	BCM_REFERENCE(flags);
+	BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+	dbgobj = dbgobj_objtail;
+	while (dbgobj) {
+		if (dbgobj->obj == obj) {
+			if (type == BCM_OBJECT_FEATURE_FLAG) {
+				rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR);
+			}
+			if (dbgobj != dbgobj_objtail) {
+				bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+					dbgobj, BCM_OBJDBG_ADDTOTAIL);
+			}
+			goto EXIT;
+		}
+		dbgobj = dbgobj->prior;
+		if (dbgobj == dbgobj_objtail)
+			break;
+	}
+
+	printf("%s: obj %p not found in active list\n", __FUNCTION__, obj);
+	ASSERT(0);
+
+EXIT:
+	BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+	return rtn;
+}
+
+#endif /* BCM_OBJECT_TRACE */
+
+uint8 *
+bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst)
+{
+	uint8 *new_dst = dst;
+	bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst;
+
+	/* dst buffer should always be valid */
+	ASSERT(dst);
+
+	/* data len must be within valid range */
+	ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE));
+
+	/* source data buffer pointer should be valid, unless datalen is 0
+	 * meaning no data with this TLV
+	 */
+	ASSERT((data != NULL) || (datalen == 0));
+
+	/* only do work if the inputs are valid
+	 * - must have a dst to write to AND
+	 * - datalen must be within range AND
+	 * - the source data pointer must be non-NULL if datalen is non-zero
+	 * (this last condition detects datalen > 0 with a NULL data pointer)
+	 */
+	if ((dst != NULL) &&
+	    ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) &&
+	    ((data != NULL) || (datalen == 0))) {
+
+	        /* write type, len fields */
+		dst_tlv->id = (uint8)type;
+	        dst_tlv->len = (uint8)datalen;
+
+		/* if data is present, copy to the output buffer and update
+		 * pointer to output buffer
+		 */
+		if (datalen > 0) {
+
+			memcpy(dst_tlv->data, data, datalen);
+		}
+
+		/* update the output destination poitner to point past
+		 * the TLV written
+		 */
+		new_dst = dst + BCM_TLV_HDR_SIZE + datalen;
+	}
+
+	return (new_dst);
+}
+
+uint8 *
+bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen)
+{
+	uint8 *new_dst = dst;
+
+	if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) {
+
+		/* if len + tlv hdr len is more than destlen, don't do anything
+		 * just return the buffer untouched
+		 */
+		if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) {
+
+			new_dst = bcm_write_tlv(type, data, datalen, dst);
+		}
+	}
+
+	return (new_dst);
+}
+
+uint8 *
+bcm_copy_tlv(const void *src, uint8 *dst)
+{
+	uint8 *new_dst = dst;
+	const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+	uint totlen;
+
+	ASSERT(dst && src);
+	if (dst && src) {
+
+		totlen = BCM_TLV_HDR_SIZE + src_tlv->len;
+		memcpy(dst, src_tlv, totlen);
+		new_dst = dst + totlen;
+	}
+
+	return (new_dst);
+}
+
+
+uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen)
+{
+	uint8 *new_dst = dst;
+	const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+
+	ASSERT(src);
+	if (src) {
+		if (bcm_valid_tlv(src_tlv, dst_maxlen)) {
+			new_dst = bcm_copy_tlv(src, dst);
+		}
+	}
+
+	return (new_dst);
+}
+
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ *       x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+    0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+    0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+    0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+    0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+    0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+    0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+    0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+    0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+    0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+    0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+    0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+    0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+    0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+    0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+    0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+    0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+    0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+    0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+    0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+    0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+    0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+    0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+    0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+    0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+    0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+    0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+    0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+    0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+    0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+    0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+    0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+    0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+	(c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+	uint8 *pdata,	/* pointer to array of data to process */
+	uint  nbytes,	/* number of input data bytes to process */
+	uint8 crc	/* either CRC8_INIT_VALUE or previous return value */
+)
+{
+	/* hard code the crc loop instead of using CRC_INNER_LOOP macro
+	 * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+	 */
+	while (nbytes-- > 0)
+		crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+	return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ *       x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+    0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+    0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+    0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+    0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+    0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+    0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+    0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+    0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+    0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+    0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+    0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+    0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+    0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+    0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+    0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+    0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+    0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+    0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+    0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+    0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+    0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+    0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+    0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+    0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+    0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+    0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+    0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+    0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+    0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+    0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+    0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+    0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+    uint8 *pdata,  /* pointer to array of data to process */
+    uint nbytes, /* number of input data bytes to process */
+    uint16 crc     /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+	while (nbytes-- > 0)
+		CRC_INNER_LOOP(16, crc, *pdata++);
+	return crc;
+}
+
+static const uint32 crc32_table[256] = {
+    0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+    0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+    0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+    0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+    0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+    0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+    0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+    0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+    0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+    0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+    0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+    0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+    0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+    0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+    0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+    0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+    0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+    0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+    0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+    0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+    0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+    0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+    0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+    0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+    0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+    0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+    0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+    0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+    0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+    0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+    0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+    0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+    0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+    0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+    0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+    0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+    0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+    0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+    0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+    0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+    0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+    0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+    0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+    0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+    0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+    0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+    0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+    0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+    0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+    0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+    0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+    0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+    0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+    0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+    0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+    0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+    0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+    0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+    0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+    0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+    0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+    0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+    0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+    0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(uint8 *pdata, uint nbytes, uint32 crc)
+{
+	uint8 *pend;
+	pend = pdata + nbytes;
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+
+	return crc;
+}
+
+#ifdef notdef
+#define CLEN 	1499 	/*  CRC Length */
+#define CBUFSIZ 	(CLEN+4)
+#define CNBUFS		5 /* # of bufs */
+
+void
+testcrc32(void)
+{
+	uint j, k, l;
+	uint8 *buf;
+	uint len[CNBUFS];
+	uint32 crcr;
+	uint32 crc32tv[CNBUFS] =
+		{0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+	ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+	/* step through all possible alignments */
+	for (l = 0; l <= 4; l++) {
+		for (j = 0; j < CNBUFS; j++) {
+			len[j] = CLEN;
+			for (k = 0; k < len[j]; k++)
+				*(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+		}
+
+		for (j = 0; j < CNBUFS; j++) {
+			crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+			ASSERT(crcr == crc32tv[j]);
+		}
+	}
+
+	MFREE(buf, CBUFSIZ*CNBUFS);
+	return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+	int len;
+
+	/* validate current elt */
+	if (!bcm_valid_tlv(elt, *buflen)) {
+		return NULL;
+	}
+
+	/* advance to next elt */
+	len = elt->len;
+	elt = (bcm_tlv_t*)(elt->data + len);
+	*buflen -= (TLV_HDR_LEN + len);
+
+	/* validate next elt */
+	if (!bcm_valid_tlv(elt, *buflen)) {
+		return NULL;
+	}
+
+	return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	if ((elt = (bcm_tlv_t*)buf) == NULL) {
+		return NULL;
+	}
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		int len = elt->len;
+
+		/* validate remaining totlen */
+		if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+
+			return (elt);
+		}
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+
+	return NULL;
+}
+
+bcm_tlv_t *
+bcm_parse_tlvs_dot11(void *buf, int buflen, uint key, bool id_ext)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		int len = elt->len;
+
+		do {
+			/* validate remaining totlen */
+			if (totlen <  (int)(len + TLV_HDR_LEN))
+				break;
+
+			if (id_ext) {
+				if (!DOT11_MNG_IE_ID_EXT_MATCH(elt, key))
+					break;
+			} else if (elt->id != key) {
+				break;
+			}
+
+			return (elt);
+		} while (0);
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+
+	return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ * return NULL if not found or length field < min_varlen
+ */
+bcm_tlv_t *
+bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen)
+{
+	bcm_tlv_t * ret;
+	ret = bcm_parse_tlvs(buf, buflen, key);
+	if (ret == NULL || ret->len < min_bodylen) {
+		return NULL;
+	}
+	return ret;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag.  Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		uint id = elt->id;
+		int len = elt->len;
+
+		/* Punt if we start seeing IDs > than target key */
+		if (id > key) {
+			return (NULL);
+		}
+
+		/* validate remaining totlen */
+		if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+			return (elt);
+		}
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+	return NULL;
+}
+#endif	/* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+	defined(DHD_DEBUG)
+int
+bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len)
+{
+	int i, slen = 0;
+	uint32 bit, mask;
+	const char *name;
+	mask = bd->mask;
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+
+	for (i = 0;  (name = bd->bitfield[i].name) != NULL; i++) {
+		bit = bd->bitfield[i].bit;
+		if ((flags & mask) == bit) {
+			if (len > (int)strlen(name)) {
+				slen = strlen(name);
+				strncpy(buf, name, slen+1);
+			}
+			break;
+		}
+	}
+	return slen;
+}
+
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+	int i;
+	char* p = buf;
+	char hexstr[16];
+	int slen = 0, nlen = 0;
+	uint32 bit;
+	const char* name;
+
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+
+	for (i = 0; flags != 0; i++) {
+		bit = bd[i].bit;
+		name = bd[i].name;
+		if (bit == 0 && flags != 0) {
+			/* print any unnamed bits */
+			snprintf(hexstr, 16, "0x%X", flags);
+			name = hexstr;
+			flags = 0;	/* exit loop */
+		} else if ((flags & bit) == 0)
+			continue;
+		flags &= ~bit;
+		nlen = strlen(name);
+		slen += nlen;
+		/* count btwn flag space */
+		if (flags != 0)
+			slen += 1;
+		/* need NULL char as well */
+		if (len <= slen)
+			break;
+		/* copy NULL char but don't count it */
+		strncpy(p, name, nlen + 1);
+		p += nlen;
+		/* copy btwn flag space and NULL char */
+		if (flags != 0)
+			p += snprintf(p, 2, " ");
+	}
+
+	/* indicate the str was too short */
+	if (flags != 0) {
+		p += snprintf(p, 2, ">");
+	}
+
+	return (int)(p - buf);
+}
+#endif 
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+	int i;
+	char *p = str;
+	const uint8 *src = (const uint8*)bytes;
+
+	for (i = 0; i < len; i++) {
+		p += snprintf(p, 3, "%02X", *src);
+		src++;
+	}
+	return (int)(p - str);
+}
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, volatile uchar *buf, uint nbytes)
+{
+	char line[128], *p;
+	int len = sizeof(line);
+	int nchar;
+	uint i;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	p = line;
+	for (i = 0; i < nbytes; i++) {
+		if (i % 16 == 0) {
+			nchar = snprintf(p, len, "  %04x: ", i);	/* line prefix */
+			p += nchar;
+			len -= nchar;
+		}
+		if (len > 0) {
+			nchar = snprintf(p, len, "%02x ", buf[i]);
+			p += nchar;
+			len -= nchar;
+		}
+
+		if (i % 16 == 15) {
+			printf("%s\n", line);		/* flush line */
+			p = line;
+			len = sizeof(line);
+		}
+	}
+
+	/* flush last partial line */
+	if (p != line)
+		printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+	"NONE",
+	"WEP1",
+	"TKIP",
+	"WEP128",
+	"AES_CCM",
+	"AES_OCB_MSDU",
+	"AES_OCB_MPDU",
+	"NALG",
+	"UNDEF",
+	"UNDEF",
+	"UNDEF",
+	"UNDEF"
+	"PMK",
+	"BIP",
+	"AES_GCM",
+	"AES_CCM256",
+	"AES_GCM256",
+	"BIP_CMAC256",
+	"BIP_GMAC",
+	"BIP_GMAC256",
+	"UNDEF"
+};
+
+const char *
+bcm_crypto_algo_name(uint algo)
+{
+	return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+	const char *fmt;
+
+	fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+	snprintf(buf, len, fmt, chipid);
+	return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+	if (brev < 0x100)
+		snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+	else
+		snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+	return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+	uint len, max_len;
+	char c;
+
+	len = (uint)strlen(buf);
+
+	max_len = BUFSIZE_TODUMP_ATONCE;
+
+	while (len > max_len) {
+		c = buf[max_len];
+		buf[max_len] = '\0';
+		printf("%s", buf);
+		buf[max_len] = c;
+
+		buf += max_len;
+		len -= max_len;
+	}
+	/* print the remaining string */
+	printf("%s\n", buf);
+	return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+	char *buf, uint32 bufsize)
+{
+	uint  filled_len;
+	int len;
+	struct fielddesc *cur_ptr;
+
+	filled_len = 0;
+	cur_ptr = fielddesc_array;
+
+	while (bufsize > 1) {
+		if (cur_ptr->nameandfmt == NULL)
+			break;
+		len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+		               read_rtn(arg0, arg1, cur_ptr->offset));
+		/* check for snprintf overflow or error */
+		if (len < 0 || (uint32)len >= bufsize)
+			len = bufsize - 1;
+		buf += len;
+		bufsize -= len;
+		filled_len += len;
+		cur_ptr++;
+	}
+	return filled_len;
+}
+
+uint
+bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint buflen)
+{
+	uint len;
+
+	len = (uint)strlen(name) + 1;
+
+	if ((len + datalen) > buflen)
+		return 0;
+
+	strncpy(buf, name, buflen);
+
+	/* append data onto the end of the name string */
+	if (data && datalen != 0) {
+		memcpy(&buf[len], data, datalen);
+		len += datalen;
+	}
+
+	return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153		/* Offset for first entry */
+#define QDBM_TABLE_LEN 40	/* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: 	+0 	+1 	+2 	+3 	+4 	+5 	+6 	+7 */
+/* 153: */      6683,	7079,	7499,	7943,	8414,	8913,	9441,	10000,
+/* 161: */      10593,	11220,	11885,	12589,	13335,	14125,	14962,	15849,
+/* 169: */      16788,	17783,	18836,	19953,	21135,	22387,	23714,	25119,
+/* 177: */      26607,	28184,	29854,	31623,	33497,	35481,	37584,	39811,
+/* 185: */      42170,	44668,	47315,	50119,	53088,	56234,	59566,	63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+	uint factor = 1;
+	int idx = qdbm - QDBM_OFFSET;
+
+	if (idx >= QDBM_TABLE_LEN) {
+		/* clamp to max uint16 mW value */
+		return 0xFFFF;
+	}
+
+	/* scale the qdBm index up to the range of the table 0-40
+	 * where an offset of 40 qdBm equals a factor of 10 mW.
+	 */
+	while (idx < 0) {
+		idx += 40;
+		factor *= 10;
+	}
+
+	/* return the mW value scaled down to the correct factor of 10,
+	 * adding in factor/2 to get proper rounding.
+	 */
+	return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+	uint8 qdbm;
+	int offset;
+	uint mw_uint = mw;
+	uint boundary;
+
+	/* handle boundary case */
+	if (mw_uint <= 1)
+		return 0;
+
+	offset = QDBM_OFFSET;
+
+	/* move mw into the range of the table */
+	while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+		mw_uint *= 10;
+		offset -= 40;
+	}
+
+	for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+		boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+		                                    nqdBm_to_mW_map[qdbm])/2;
+		if (mw_uint < boundary) break;
+	}
+
+	qdbm += (uint8)offset;
+
+	return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+	uint bitcount = 0, i;
+	uint8 tmp;
+	for (i = 0; i < length; i++) {
+		tmp = bitmap[i];
+		while (tmp) {
+			bitcount++;
+			tmp &= (tmp - 1);
+		}
+	}
+	return bitcount;
+}
+
+#if defined(BCMDRIVER) || defined(WL_UNITTEST)
+
+/* triggers bcm_bprintf to print to kernel log */
+bool bcm_bprintf_bypass = FALSE;
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+	b->origsize = b->size = size;
+	b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+	va_list ap;
+	int r;
+
+	va_start(ap, fmt);
+
+	r = vsnprintf(b->buf, b->size, fmt, ap);
+	if (bcm_bprintf_bypass == TRUE) {
+		printf("%s", b->buf);
+		goto exit;
+	}
+
+	/* Non Ansi C99 compliant returns -1,
+	 * Ansi compliant return r >= b->size,
+	 * bcmstdlib returns 0, handle all
+	 */
+	/* r == 0 is also the case when strlen(fmt) is zero.
+	 * typically the case when "" is passed as argument.
+	 */
+	if ((r == -1) || (r >= (int)b->size)) {
+		b->size = 0;
+	} else {
+		b->size -= r;
+		b->buf += r;
+	}
+
+exit:
+	va_end(ap);
+
+	return r;
+}
+
+void
+bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, int len)
+{
+	int i;
+
+	if (msg != NULL && msg[0] != '\0')
+		bcm_bprintf(b, "%s", msg);
+	for (i = 0; i < len; i ++)
+		bcm_bprintf(b, "%02X", buf[i]);
+	if (newline)
+		bcm_bprintf(b, "\n");
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+	int i;
+
+	for (i = 0; i < num_bytes; i++) {
+		num[i] += amount;
+		if (num[i] >= amount)
+			break;
+		amount = 1;
+	}
+}
+
+int
+bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes)
+{
+	int i;
+
+	for (i = nbytes - 1; i >= 0; i--) {
+		if (arg1[i] != arg2[i])
+			return (arg1[i] - arg2[i]);
+	}
+	return 0;
+}
+
+void
+bcm_print_bytes(const char *name, const uchar *data, int len)
+{
+	int i;
+	int per_line = 0;
+
+	printf("%s: %d \n", name ? name : "", len);
+	for (i = 0; i < len; i++) {
+		printf("%02x ", *data++);
+		per_line++;
+		if (per_line == 16) {
+			per_line = 0;
+			printf("\n");
+		}
+	}
+	printf("\n");
+}
+
+/* Look for vendor-specific IE with specified OUI and optional type */
+bcm_tlv_t *
+bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len)
+{
+	bcm_tlv_t *ie;
+	uint8 ie_len;
+
+	ie = (bcm_tlv_t*)tlvs;
+
+	/* make sure we are looking at a valid IE */
+	if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) {
+		return NULL;
+	}
+
+	/* Walk through the IEs looking for an OUI match */
+	do {
+		ie_len = ie->len;
+		if ((ie->id == DOT11_MNG_PROPR_ID) &&
+		    (ie_len >= (DOT11_OUI_LEN + type_len)) &&
+		    !bcmp(ie->data, voui, DOT11_OUI_LEN))
+		{
+			/* compare optional type */
+			if (type_len == 0 ||
+			    !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) {
+				return (ie);		/* a match */
+			}
+		}
+	} while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL);
+
+	return NULL;
+}
+
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN	((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+	uint i, c;
+	char *p = buf;
+	char *endp = buf + SSID_FMT_BUF_LEN;
+
+	if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+	for (i = 0; i < ssid_len; i++) {
+		c = (uint)ssid[i];
+		if (c == '\\') {
+			*p++ = '\\';
+			*p++ = '\\';
+		} else if (bcm_isprint((uchar)c)) {
+			*p++ = (char)c;
+		} else {
+			p += snprintf(p, (endp - p), "\\x%02X", c);
+		}
+	}
+	*p = '\0';
+	ASSERT(p < endp);
+
+	return (int)(p - buf);
+}
+#endif 
+
+#endif /* BCMDRIVER || WL_UNITTEST */
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs.  End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
+{
+	char *dp;
+	bool findNewline;
+	int column;
+	unsigned int buf_len, n;
+	unsigned int pad = 0;
+	char nv_ver[128];
+
+	dp = varbuf;
+
+	findNewline = FALSE;
+	column = 0;
+
+	// terence 20130914: print out NVRAM version
+	if (varbuf[0] == '#') {
+		memset(nv_ver, 0x00, sizeof(nv_ver));
+		for (n=1; n<len && n<(sizeof(nv_ver)-1); n++) {
+			if (varbuf[n] == '\n')
+				break;
+			nv_ver[n-1] = varbuf[n];
+		}
+		printk("NVRAM version: %s\n", nv_ver);
+	}
+
+	for (n = 0; n < len; n++) {
+		if (varbuf[n] == '\r')
+			continue;
+		if (findNewline && varbuf[n] != '\n')
+			continue;
+		findNewline = FALSE;
+		if (varbuf[n] == '#') {
+			findNewline = TRUE;
+			continue;
+		}
+		if (varbuf[n] == '\n') {
+			if (column == 0)
+				continue;
+			*dp++ = 0;
+			column = 0;
+			continue;
+		}
+		*dp++ = varbuf[n];
+		column++;
+	}
+	buf_len = (unsigned int)(dp - varbuf);
+	if (buf_len % 4) {
+		pad = 4 - buf_len % 4;
+		if (pad && (buf_len + pad <= len)) {
+			buf_len += pad;
+		}
+	}
+
+	while (dp < varbuf + n)
+		*dp++ = 0;
+
+	return buf_len;
+}
+
+/* calculate a * b + c */
+void
+bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c)
+{
+#define FORMALIZE(var) {cc += (var & 0x80000000) ? 1 : 0; var &= 0x7fffffff;}
+	uint32 r1, r0;
+	uint32 a1, a0, b1, b0, t, cc = 0;
+
+	a1 = a >> 16;
+	a0 = a & 0xffff;
+	b1 = b >> 16;
+	b0 = b & 0xffff;
+
+	r0 = a0 * b0;
+	FORMALIZE(r0);
+
+	t = (a1 * b0) << 16;
+	FORMALIZE(t);
+
+	r0 += t;
+	FORMALIZE(r0);
+
+	t = (a0 * b1) << 16;
+	FORMALIZE(t);
+
+	r0 += t;
+	FORMALIZE(r0);
+
+	FORMALIZE(c);
+
+	r0 += c;
+	FORMALIZE(r0);
+
+	r0 |= (cc % 2) ? 0x80000000 : 0;
+	r1 = a1 * b1 + ((a1 * b0) >> 16) + ((b1 * a0) >> 16) + (cc / 2);
+
+	*r_high = r1;
+	*r_low = r0;
+}
+
+/* calculate a / b */
+void
+bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+	uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+	if (b < 2)
+		return;
+
+	while (a1 != 0) {
+		r0 += (0xffffffff / b) * a1;
+		bcm_uint64_multiple_add(&a1, &a0, ((0xffffffff % b) + 1) % b, a1, a0);
+	}
+
+	r0 += a0 / b;
+	*r = r0;
+}
+
+#ifndef setbit /* As in the header file */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+/* Set bit in byte array. */
+void
+setbit(void *array, uint bit)
+{
+	((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY);
+}
+
+/* Clear bit in byte array. */
+void
+clrbit(void *array, uint bit)
+{
+	((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY));
+}
+
+/* Test if bit is set in byte array. */
+bool
+isset(const void *array, uint bit)
+{
+	return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY)));
+}
+
+/* Test if bit is clear in byte array. */
+bool
+isclr(const void *array, uint bit)
+{
+	return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0);
+}
+#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */
+#endif /* setbit */
+
+void
+set_bitrange(void *array, uint start, uint end, uint maxbit)
+{
+	uint startbyte = start/NBBY;
+	uint endbyte = end/NBBY;
+	uint i, startbytelastbit, endbytestartbit;
+
+	if (end >= start) {
+		if (endbyte - startbyte > 1)
+		{
+			startbytelastbit = (startbyte+1)*NBBY - 1;
+			endbytestartbit = endbyte*NBBY;
+			for (i = startbyte+1; i < endbyte; i++)
+				((uint8 *)array)[i] = 0xFF;
+			for (i = start; i <= startbytelastbit; i++)
+				setbit(array, i);
+			for (i = endbytestartbit; i <= end; i++)
+				setbit(array, i);
+		} else {
+			for (i = start; i <= end; i++)
+				setbit(array, i);
+		}
+	}
+	else {
+		set_bitrange(array, start, maxbit, maxbit);
+		set_bitrange(array, 0, end, maxbit);
+	}
+}
+
+void
+bcm_bitprint32(const uint32 u32arg)
+{
+	int i;
+	for (i = NBITS(uint32) - 1; i >= 0; i--) {
+		if (isbitset(u32arg, i)) {
+			printf("1");
+		} else {
+			printf("0");
+		}
+
+		if ((i % NBBY) == 0) printf(" ");
+	}
+	printf("\n");
+}
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16
+bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum)
+{
+	while (len > 1) {
+		sum += (buf[0] << 8) | buf[1];
+		buf += 2;
+		len -= 2;
+	}
+
+	if (len > 0) {
+		sum += (*buf) << 8;
+	}
+
+	while (sum >> 16) {
+		sum = (sum & 0xffff) + (sum >> 16);
+	}
+
+	return ((uint16)~sum);
+}
+#if defined(BCMDRIVER) && !defined(_CFEZ_)
+/*
+ * Hierarchical Multiword bitmap based small id allocator.
+ *
+ * Multilevel hierarchy bitmap. (maximum 2 levels)
+ * First hierarchy uses a multiword bitmap to identify 32bit words in the
+ * second hierarchy that have at least a single bit set. Each bit in a word of
+ * the second hierarchy represents a unique ID that may be allocated.
+ *
+ * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed.
+ * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word
+ * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs.
+ * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non
+ *                       non-zero bitmap word carrying at least one free ID.
+ * BCM_MWBMAP_SHIFT_OP:  Used in MOD, DIV and MUL operations.
+ * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID
+ *
+ * Design Notes:
+ * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many
+ * bits are computed each time on allocation and deallocation, requiring 4
+ * array indexed access and 3 arithmetic operations. When not defined, a runtime
+ * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed.
+ * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation.
+ * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may
+ * be used by defining BCM_MWBMAP_USE_CNTSETBITS.
+ *
+ * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array
+ * size is fixed. No intention to support larger than 4K indice allocation. ID
+ * allocators for ranges smaller than 4K will have a wastage of only 12Bytes
+ * with savings in not having to use an indirect access, had it been dynamically
+ * allocated.
+ */
+#define BCM_MWBMAP_ITEMS_MAX    (64 * 1024)  /* May increase to 64K */
+
+#define BCM_MWBMAP_BITS_WORD    (NBITS(uint32))
+#define BCM_MWBMAP_WORDS_MAX    (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_WDMAP_MAX    (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_SHIFT_OP     (5)
+#define BCM_MWBMAP_MODOP(ix)    ((ix) & (BCM_MWBMAP_BITS_WORD - 1))
+#define BCM_MWBMAP_DIVOP(ix)    ((ix) >> BCM_MWBMAP_SHIFT_OP)
+#define BCM_MWBMAP_MULOP(ix)    ((ix) << BCM_MWBMAP_SHIFT_OP)
+
+/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */
+#define BCM_MWBMAP_PTR(hdl)		((struct bcm_mwbmap *)(hdl))
+#define BCM_MWBMAP_HDL(ptr)		((void *)(ptr))
+
+#if defined(BCM_MWBMAP_DEBUG)
+#define BCM_MWBMAP_AUDIT(mwb) \
+	do { \
+		ASSERT((mwb != NULL) && \
+		       (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \
+		bcm_mwbmap_audit(mwb); \
+	} while (0)
+#define MWBMAP_ASSERT(exp)		ASSERT(exp)
+#define MWBMAP_DBG(x)           printf x
+#else   /* !BCM_MWBMAP_DEBUG */
+#define BCM_MWBMAP_AUDIT(mwb)   do {} while (0)
+#define MWBMAP_ASSERT(exp)		do {} while (0)
+#define MWBMAP_DBG(x)
+#endif  /* !BCM_MWBMAP_DEBUG */
+
+
+typedef struct bcm_mwbmap {     /* Hierarchical multiword bitmap allocator    */
+	uint16 wmaps;               /* Total number of words in free wd bitmap    */
+	uint16 imaps;               /* Total number of words in free id bitmap    */
+	int32  ifree;               /* Count of free indices. Used only in audits */
+	uint16 total;               /* Total indices managed by multiword bitmap  */
+
+	void * magic;               /* Audit handle parameter from user           */
+
+	uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of            */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+	int8   wd_count[BCM_MWBMAP_WORDS_MAX];  /* free id running count, 1st lvl */
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+
+	uint32 id_bitmap[0];        /* Second level bitmap                        */
+} bcm_mwbmap_t;
+
+/* Incarnate a hierarchical multiword bitmap based small index allocator. */
+struct bcm_mwbmap *
+bcm_mwbmap_init(osl_t *osh, uint32 items_max)
+{
+	struct bcm_mwbmap * mwbmap_p;
+	uint32 wordix, size, words, extra;
+
+	/* Implementation Constraint: Uses 32bit word bitmap */
+	MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U);
+	MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U);
+	MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX));
+	MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U);
+
+	ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX);
+
+	/* Determine the number of words needed in the multiword bitmap */
+	extra = BCM_MWBMAP_MODOP(items_max);
+	words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U);
+
+	/* Allocate runtime state of multiword bitmap */
+	/* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */
+	size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words);
+	mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size);
+	if (mwbmap_p == (bcm_mwbmap_t *)NULL) {
+		ASSERT(0);
+		goto error1;
+	}
+	memset(mwbmap_p, 0, size);
+
+	/* Initialize runtime multiword bitmap state */
+	mwbmap_p->imaps = (uint16)words;
+	mwbmap_p->ifree = (int32)items_max;
+	mwbmap_p->total = (uint16)items_max;
+
+	/* Setup magic, for use in audit of handle */
+	mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p);
+
+	/* Setup the second level bitmap of free indices */
+	/* Mark all indices as available */
+	for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) {
+		mwbmap_p->id_bitmap[wordix] = (uint32)(~0U);
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+		mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD;
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+	}
+
+	/* Ensure that extra indices are tagged as un-available */
+	if (extra) { /* fixup the free ids in last bitmap and wd_count */
+		uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1];
+		*bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+		mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+	}
+
+	/* Setup the first level bitmap hierarchy */
+	extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps);
+	words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U);
+
+	mwbmap_p->wmaps = (uint16)words;
+
+	for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++)
+		mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U);
+	if (extra) {
+		uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1];
+		*bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+	}
+
+	return mwbmap_p;
+
+error1:
+	return BCM_MWBMAP_INVALID_HDL;
+}
+
+/* Release resources used by multiword bitmap based small index allocator. */
+void
+bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap)
+	                     + (sizeof(uint32) * mwbmap_p->imaps));
+	return;
+}
+
+/* Allocate a unique small index using a multiword bitmap index allocator.    */
+uint32 BCMFASTPATH
+bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	/* Start with the first hierarchy */
+	for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) {
+
+		bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */
+
+		if (bitmap != 0U) {
+
+			uint32 count, bitix, *bitmap_p;
+
+			bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+			/* clear all except trailing 1 */
+			bitmap   = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+			MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+			              bcm_count_leading_zeros(bitmap));
+			bitix    = (BCM_MWBMAP_BITS_WORD - 1)
+			         - bcm_count_leading_zeros(bitmap); /* use asm clz */
+			wordix   = BCM_MWBMAP_MULOP(wordix) + bitix;
+
+			/* Clear bit if wd count is 0, without conditional branch */
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+			count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+			mwbmap_p->wd_count[wordix]--;
+			count = mwbmap_p->wd_count[wordix];
+			MWBMAP_ASSERT(count ==
+			              (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+			MWBMAP_ASSERT(count >= 0);
+
+			/* clear wd_bitmap bit if id_map count is 0 */
+			bitmap = (count == 0) << bitix;
+
+			MWBMAP_DBG((
+			    "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+			    bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count));
+
+			*bitmap_p ^= bitmap;
+
+			/* Use bitix in the second hierarchy */
+			bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+			bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */
+			MWBMAP_ASSERT(bitmap != 0U);
+
+			/* clear all except trailing 1 */
+			bitmap   = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+			MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+			              bcm_count_leading_zeros(bitmap));
+			bitix    = BCM_MWBMAP_MULOP(wordix)
+			         + (BCM_MWBMAP_BITS_WORD - 1)
+			         - bcm_count_leading_zeros(bitmap); /* use asm clz */
+
+			mwbmap_p->ifree--; /* decrement system wide free count */
+			MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
+
+			MWBMAP_DBG((
+			    "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d",
+			    bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+			    mwbmap_p->ifree));
+
+			*bitmap_p ^= bitmap; /* mark as allocated = 1b0 */
+
+			return bitix;
+		}
+	}
+
+	ASSERT(mwbmap_p->ifree == 0);
+
+	return BCM_MWBMAP_INVALID_IDX;
+}
+
+/* Force an index at a specified position to be in use */
+void
+bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 count, wordix, bitmap, *bitmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	/* Start with second hierarchy */
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (uint32)(1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+	ASSERT((*bitmap_p & bitmap) == bitmap);
+
+	mwbmap_p->ifree--; /* update free count */
+	ASSERT(mwbmap_p->ifree >= 0);
+
+	MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d",
+	           bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+	           mwbmap_p->ifree));
+
+	*bitmap_p ^= bitmap; /* mark as in use */
+
+	/* Update first hierarchy */
+	bitix    = wordix;
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+	count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+	mwbmap_p->wd_count[bitix]--;
+	count = mwbmap_p->wd_count[bitix];
+	MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+	MWBMAP_ASSERT(count >= 0);
+
+	bitmap   = (count == 0) << BCM_MWBMAP_MODOP(bitix);
+
+	MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+	           BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
+	           (*bitmap_p) ^ bitmap, count));
+
+	*bitmap_p ^= bitmap; /* mark as in use */
+
+	return;
+}
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+void BCMFASTPATH
+bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap, *bitmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	/* Start with second level hierarchy */
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+	ASSERT((*bitmap_p & bitmap) == 0U);	/* ASSERT not a double free */
+
+	mwbmap_p->ifree++; /* update free count */
+	ASSERT(mwbmap_p->ifree <= mwbmap_p->total);
+
+	MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d",
+	           bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap,
+	           mwbmap_p->ifree));
+
+	*bitmap_p |= bitmap; /* mark as available */
+
+	/* Now update first level hierarchy */
+
+	bitix    = wordix;
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+	mwbmap_p->wd_count[bitix]++;
+#endif
+
+#if defined(BCM_MWBMAP_DEBUG)
+	{
+		uint32 count;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+		count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else  /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+		count = mwbmap_p->wd_count[bitix];
+		MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+
+		MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD);
+
+		MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d",
+		            bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count));
+	}
+#endif /* BCM_MWBMAP_DEBUG */
+
+	*bitmap_p |= bitmap;
+
+	return;
+}
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+uint32
+bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(mwbmap_p->ifree >= 0);
+
+	return mwbmap_p->ifree;
+}
+
+/* Determine whether an index is inuse or free */
+bool
+bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+
+	return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U);
+}
+
+/* Debug dump a multiword bitmap allocator */
+void
+bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl)
+{
+	uint32 ix, count;
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", mwbmap_p,
+	       mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total);
+	for (ix = 0U; ix < mwbmap_p->wmaps; ix++) {
+		printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]);
+		bcm_bitprint32(mwbmap_p->wd_bitmap[ix]);
+		printf("\n");
+	}
+	for (ix = 0U; ix < mwbmap_p->imaps; ix++) {
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+		count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+		count = mwbmap_p->wd_count[ix];
+		MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+		printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
+		bcm_bitprint32(mwbmap_p->id_bitmap[ix]);
+		printf("\n");
+	}
+
+	return;
+}
+
+/* Audit a hierarchical multiword bitmap */
+void
+bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p;
+
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) {
+
+		bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+		for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) {
+			if ((*bitmap_p) & (1 << bitix)) {
+				idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+				count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+				count = mwbmap_p->wd_count[idmap_ix];
+				ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+				ASSERT(count != 0U);
+				free_cnt += count;
+			}
+		}
+	}
+
+	ASSERT((int)free_cnt == mwbmap_p->ifree);
+}
+/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */
+
+/* Simple 16bit Id allocator using a stack implementation. */
+typedef struct id16_map {
+	uint32  failures;  /* count of failures */
+	void    *dbg;      /* debug placeholder */
+	uint16  total;     /* total number of ids managed by allocator */
+	uint16  start;     /* start value of 16bit ids to be managed */
+	int     stack_idx; /* index into stack of available ids */
+	uint16  stack[0];  /* stack of 16 bit ids */
+} id16_map_t;
+
+#define ID16_MAP_SZ(items)      (sizeof(id16_map_t) + \
+	                             (sizeof(uint16) * (items)))
+
+#if defined(BCM_DBG)
+
+/* Uncomment BCM_DBG_ID16 to debug double free */
+/* #define BCM_DBG_ID16 */
+
+typedef struct id16_map_dbg {
+	uint16  total;
+	bool    avail[0];
+} id16_map_dbg_t;
+#define ID16_MAP_DBG_SZ(items)  (sizeof(id16_map_dbg_t) + \
+	                             (sizeof(bool) * (items)))
+#define ID16_MAP_MSG(x)         print x
+#else
+#define ID16_MAP_MSG(x)
+#endif /* BCM_DBG */
+
+void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */
+id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16)
+{
+	uint16 idx, val16;
+	id16_map_t * id16_map;
+
+	ASSERT(total_ids > 0);
+
+	/* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
+	 * with random values.
+	 */
+	ASSERT((start_val16 == ID16_UNDEFINED) ||
+	       (start_val16 + total_ids) < ID16_INVALID);
+
+	id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids));
+	if (id16_map == NULL) {
+		return NULL;
+	}
+
+	id16_map->total = total_ids;
+	id16_map->start = start_val16;
+	id16_map->failures = 0;
+	id16_map->dbg = NULL;
+
+	/*
+	 * Populate stack with 16bit id values, commencing with start_val16.
+	 * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map.
+	 */
+	id16_map->stack_idx = -1;
+
+	if (id16_map->start != ID16_UNDEFINED) {
+		val16 = start_val16;
+
+		for (idx = 0; idx < total_ids; idx++, val16++) {
+			id16_map->stack_idx = idx;
+			id16_map->stack[id16_map->stack_idx] = val16;
+		}
+	}
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	if (id16_map->start != ID16_UNDEFINED) {
+		id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids));
+
+		if (id16_map->dbg) {
+			id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+			id16_map_dbg->total = total_ids;
+			for (idx = 0; idx < total_ids; idx++) {
+				id16_map_dbg->avail[idx] = TRUE;
+			}
+		}
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	return (void *)id16_map;
+}
+
+void * /* Destruct an id16 allocator instance */
+id16_map_fini(osl_t *osh, void * id16_map_hndl)
+{
+	uint16 total_ids;
+	id16_map_t * id16_map;
+
+	if (id16_map_hndl == NULL)
+		return NULL;
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+	total_ids = id16_map->total;
+	ASSERT(total_ids > 0);
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	if (id16_map->dbg) {
+		MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids));
+		id16_map->dbg = NULL;
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	id16_map->total = 0;
+	MFREE(osh, id16_map, ID16_MAP_SZ(total_ids));
+
+	return NULL;
+}
+
+void
+id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16)
+{
+	uint16 idx, val16;
+	id16_map_t * id16_map;
+
+	ASSERT(total_ids > 0);
+	/* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
+	 * with random values.
+	 */
+	ASSERT((start_val16 == ID16_UNDEFINED) ||
+	       (start_val16 + total_ids) < ID16_INVALID);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+	if (id16_map == NULL) {
+		return;
+	}
+
+	id16_map->total = total_ids;
+	id16_map->start = start_val16;
+	id16_map->failures = 0;
+
+	/* Populate stack with 16bit id values, commencing with start_val16 */
+	id16_map->stack_idx = -1;
+
+	if (id16_map->start != ID16_UNDEFINED) {
+		val16 = start_val16;
+
+		for (idx = 0; idx < total_ids; idx++, val16++) {
+			id16_map->stack_idx = idx;
+			id16_map->stack[id16_map->stack_idx] = val16;
+		}
+	}
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	if (id16_map->start != ID16_UNDEFINED) {
+		if (id16_map->dbg) {
+			id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+			id16_map_dbg->total = total_ids;
+			for (idx = 0; idx < total_ids; idx++) {
+				id16_map_dbg->avail[idx] = TRUE;
+			}
+		}
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+}
+
+uint16 BCMFASTPATH /* Allocate a unique 16bit id */
+id16_map_alloc(void * id16_map_hndl)
+{
+	uint16 val16;
+	id16_map_t * id16_map;
+
+	ASSERT(id16_map_hndl != NULL);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+	ASSERT(id16_map->total > 0);
+
+	if (id16_map->stack_idx < 0) {
+		id16_map->failures++;
+		return ID16_INVALID;
+	}
+
+	val16 = id16_map->stack[id16_map->stack_idx];
+	id16_map->stack_idx--;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	ASSERT((id16_map->start == ID16_UNDEFINED) ||
+	       (val16 < (id16_map->start + id16_map->total)));
+
+	if (id16_map->dbg) { /* Validate val16 */
+		id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+		ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE);
+		id16_map_dbg->avail[val16 - id16_map->start] = FALSE;
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	return val16;
+}
+
+
+void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */
+id16_map_free(void * id16_map_hndl, uint16 val16)
+{
+	id16_map_t * id16_map;
+
+	ASSERT(id16_map_hndl != NULL);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	ASSERT((id16_map->start == ID16_UNDEFINED) ||
+	       (val16 < (id16_map->start + id16_map->total)));
+
+	if (id16_map->dbg) { /* Validate val16 */
+		id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+		ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE);
+		id16_map_dbg->avail[val16 - id16_map->start] = TRUE;
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	id16_map->stack_idx++;
+	id16_map->stack[id16_map->stack_idx] = val16;
+}
+
+uint32 /* Returns number of failures to allocate an unique id16 */
+id16_map_failures(void * id16_map_hndl)
+{
+	ASSERT(id16_map_hndl != NULL);
+	return ((id16_map_t *)id16_map_hndl)->failures;
+}
+
+bool
+id16_map_audit(void * id16_map_hndl)
+{
+	int idx;
+	int insane = 0;
+	id16_map_t * id16_map;
+
+	ASSERT(id16_map_hndl != NULL);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+	ASSERT(id16_map->stack_idx >= -1);
+	ASSERT(id16_map->stack_idx < (int)id16_map->total);
+
+	if (id16_map->start == ID16_UNDEFINED)
+		goto done;
+
+	for (idx = 0; idx <= id16_map->stack_idx; idx++) {
+		ASSERT(id16_map->stack[idx] >= id16_map->start);
+		ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total));
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+		if (id16_map->dbg) {
+			uint16 val16 = id16_map->stack[idx];
+			if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) {
+				insane |= 1;
+				ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n",
+					id16_map_hndl, idx, val16));
+			}
+		}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+	}
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	if (id16_map->dbg) {
+		uint16 avail = 0; /* Audit available ids counts */
+		for (idx = 0; idx < id16_map_dbg->total; idx++) {
+			if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE)
+				avail++;
+		}
+		if (avail && (avail != (id16_map->stack_idx + 1))) {
+			insane |= 1;
+			ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n",
+				id16_map_hndl, avail, id16_map->stack_idx));
+		}
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+done:
+	/* invoke any other system audits */
+	return (!!insane);
+}
+/* END: Simple id16 allocator */
+
+
+#endif 
+
+/* calculate a >> b; and returns only lower 32 bits */
+void
+bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+	uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+	if (b == 0) {
+		r0 = a_low;
+		*r = r0;
+		return;
+	}
+
+	if (b < 32) {
+		a0 = a0 >> b;
+		a1 = a1 & ((1 << b) - 1);
+		a1 = a1 << (32 - b);
+		r0 = a0 | a1;
+		*r = r0;
+		return;
+	} else {
+		r0 = a1 >> (b - 32);
+		*r = r0;
+		return;
+	}
+
+}
+
+/* calculate a + b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+	uint32 r1_lo = *r_lo;
+	(*r_lo) += offset;
+	if (*r_lo < r1_lo)
+		(*r_hi) ++;
+}
+
+/* calculate a - b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+	uint32 r1_lo = *r_lo;
+	(*r_lo) -= offset;
+	if (*r_lo > r1_lo)
+		(*r_hi) --;
+}
+
+/* Does unsigned 64 bit fixed point multiplication */
+uint64
+fp_mult_64(uint64 val1, uint64 val2, uint8 nf1, uint8 nf2, uint8 nf_res)
+{
+	uint64 mult_out_tmp, mult_out, rnd_val;
+	uint8 shift_amt;
+
+	shift_amt = nf1 + nf2 - nf_res;
+	/* 0.5 in 1.0.shift_amt */
+	rnd_val = bcm_shl_64(1, (shift_amt - 1));
+	rnd_val = (shift_amt == 0) ? 0 : rnd_val;
+	mult_out_tmp = (uint64)((uint64)val1 * (uint64)val2) + (uint64)rnd_val;
+	mult_out = bcm_shr_64(mult_out_tmp, shift_amt);
+
+	return mult_out;
+}
+
+
+/* Does unsigned 64 bit by 32 bit fixed point division */
+uint8
+fp_div_64(uint64 num, uint32 den, uint8 nf_num, uint8 nf_den, uint32 *div_out)
+{
+	uint8 shift_amt1, shift_amt2, shift_amt, nf_res, hd_rm_nr, hd_rm_dr;
+	uint32 num_hi, num_lo;
+	uint64 num_scale;
+
+	/* Worst case shift possible */
+	hd_rm_nr = fp_calc_head_room_64(num);
+	hd_rm_dr = fp_calc_head_room_32(den);
+
+	/* (Nr / Dr) <= 2^32 */
+	shift_amt1 = hd_rm_nr - hd_rm_dr - 1;
+	/* Shift <= 32 + N2 - N1 */
+	shift_amt2 = 31 + nf_den - nf_num;
+	shift_amt = MINIMUM(shift_amt1, shift_amt2);
+
+	/* Scale numerator */
+	num_scale = bcm_shl_64(num, shift_amt);
+
+	/* Do division */
+	num_hi = (uint32)((uint64)num_scale >> 32) & MASK_32_BITS;
+	num_lo = (uint32)(num_scale & MASK_32_BITS);
+	bcm_uint64_divide(div_out, num_hi, num_lo, den);
+
+	/* Result format */
+	nf_res = nf_num - nf_den + shift_amt;
+	return nf_res;
+}
+
+/* Finds the number of bits available for shifting in unsigned 64 bit number */
+uint8
+fp_calc_head_room_64(uint64 num)
+{
+	uint8 n_room_bits = 0, msb_pos;
+	uint32 num_hi, num_lo, x;
+
+	num_hi = (uint32)((uint64)num >> 32) & MASK_32_BITS;
+	num_lo = (uint32)(num & MASK_32_BITS);
+
+	if (num_hi > 0) {
+		x = num_hi;
+		n_room_bits = 0;
+	} else {
+		x = num_lo;
+		n_room_bits = 32;
+	}
+
+	msb_pos = (x >> 16) ? ((x >> 24) ? (24 + msb_table[(x >> 24) & MASK_8_BITS])
+			: (16 + msb_table[(x >> 16) & MASK_8_BITS]))
+			: ((x >> 8) ? (8 + msb_table[(x >> 8) & MASK_8_BITS])
+			: msb_table[x & MASK_8_BITS]);
+
+	return (n_room_bits + 32 - msb_pos);
+}
+
+/* Finds the number of bits available for shifting in unsigned 32 bit number */
+uint8
+fp_calc_head_room_32(uint32 x)
+{
+	uint8 msb_pos;
+
+	msb_pos = (x >> 16) ? ((x >> 24) ? (24 + msb_table[(x >> 24) & MASK_8_BITS])
+			: (16 + msb_table[(x >> 16) & MASK_8_BITS]))
+			: ((x >> 8) ? (8 + msb_table[(x >> 8) & MASK_8_BITS])
+			: msb_table[x & MASK_8_BITS]);
+
+	return (32 - msb_pos);
+}
+
+/* Does unsigned 64 bit fixed point floor */
+uint32
+fp_floor_64(uint64 num, uint8 floor_pos)
+{
+	uint32 floor_out;
+
+	floor_out = (uint32)bcm_shr_64(num, floor_pos);
+
+	return floor_out;
+}
+
+/* Does unsigned 32 bit fixed point floor */
+uint32
+fp_floor_32(uint32 num, uint8 floor_pos)
+{
+	return num >> floor_pos;
+}
+
+/* Does unsigned 64 bit fixed point rounding */
+uint32
+fp_round_64(uint64 num, uint8 rnd_pos)
+{
+	uint64 rnd_val, rnd_out_tmp;
+	uint32 rnd_out;
+
+	/* 0.5 in 1.0.rnd_pos */
+	rnd_val = bcm_shl_64(1, (rnd_pos - 1));
+	rnd_val = (rnd_pos == 0) ? 0 : rnd_val;
+	rnd_out_tmp = num + rnd_val;
+	rnd_out = (uint32)bcm_shr_64(rnd_out_tmp, rnd_pos);
+
+	return rnd_out;
+}
+
+/* Does unsigned 32 bit fixed point rounding */
+uint32
+fp_round_32(uint32 num, uint8 rnd_pos)
+{
+	uint32 rnd_val, rnd_out_tmp;
+
+	/* 0.5 in 1.0.rnd_pos */
+	rnd_val = 1 << (rnd_pos - 1);
+	rnd_val = (rnd_pos == 0) ? 0 : rnd_val;
+	rnd_out_tmp = num + rnd_val;
+	return (rnd_out_tmp >> rnd_pos);
+}
+
+/* Does unsigned fixed point ceiling */
+uint32
+fp_ceil_64(uint64 num, uint8 ceil_pos)
+{
+	uint64 ceil_val, ceil_out_tmp;
+	uint32 ceil_out;
+
+	/* 0.999 in 1.0.rnd_pos */
+	ceil_val = bcm_shl_64(1, ceil_pos) - 1;
+	ceil_out_tmp = num + ceil_val;
+	ceil_out = (uint32)bcm_shr_64(ceil_out_tmp, ceil_pos);
+
+	return ceil_out;
+}
+
+/* Does left shift of unsigned 64 bit number */
+uint64
+bcm_shl_64(uint64 input, uint8 shift_amt)
+{
+	uint32 in_hi, in_lo;
+	uint32 masked_lo = 0;
+	uint32 mask;
+	uint64 shl_out;
+
+	if (shift_amt == 0) {
+		return input;
+	}
+
+	/* Get hi and lo part */
+	in_hi = (uint32)((uint64)input >> 32) & MASK_32_BITS;
+	in_lo = (uint32)(input & MASK_32_BITS);
+
+	if (shift_amt < 32) {
+		/* Extract bit which belongs to hi part after shifting */
+		mask = ((uint32)~0) << (32 - shift_amt);
+		masked_lo = (in_lo & mask) >> (32 - shift_amt);
+
+		/* Shift hi and lo and prepare output */
+		in_hi = (in_hi << shift_amt) | masked_lo;
+		in_lo = in_lo << shift_amt;
+	} else {
+		/* Extract bit which belongs to hi part after shifting */
+		shift_amt = shift_amt - 32;
+
+		/* Shift hi and lo and prepare output */
+		in_hi = in_lo << shift_amt;
+		in_lo = 0;
+	}
+
+	shl_out = (((uint64)in_hi << 32) | in_lo);
+	return shl_out;
+}
+
+/* Does right shift of unsigned 64 bit number */
+uint64
+bcm_shr_64(uint64 input, uint8 shift_amt)
+{
+	uint32 in_hi, in_lo;
+	uint32 masked_hi = 0;
+	uint32 mask;
+	uint64 shr_out;
+
+	if (shift_amt == 0) {
+		return input;
+	}
+
+	/* Get hi and lo part */
+	in_hi = (uint32)((uint64)input >> 32) & MASK_32_BITS;
+	in_lo = (uint32)(input & MASK_32_BITS);
+
+	if (shift_amt < 32) {
+		/* Extract bit which belongs to lo part after shifting */
+		mask = (1 << shift_amt) - 1;
+		masked_hi = in_hi & mask;
+
+		/* Shift hi and lo and prepare output */
+		in_hi = (uint32)in_hi >> shift_amt;
+		in_lo = ((uint32)in_lo >> shift_amt) | (masked_hi << (32 - shift_amt));
+	} else {
+		shift_amt = shift_amt - 32;
+		in_lo = in_hi >> shift_amt;
+		in_hi = 0;
+	}
+
+	shr_out = (((uint64)in_hi << 32) | in_lo);
+	return shr_out;
+}
+
+#ifdef DEBUG_COUNTER
+#if (OSL_SYSUPTIME_SUPPORT == TRUE)
+void counter_printlog(counter_tbl_t *ctr_tbl)
+{
+	uint32 now;
+
+	if (!ctr_tbl->enabled)
+		return;
+
+	now = OSL_SYSUPTIME();
+
+	if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) {
+		uint8 i = 0;
+		printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print);
+
+		for (i = 0; i < ctr_tbl->needed_cnt; i++) {
+			printf(" %u", ctr_tbl->cnt[i]);
+		}
+		printf("\n");
+
+		ctr_tbl->prev_log_print = now;
+		bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint));
+	}
+}
+#else
+/* OSL_SYSUPTIME is not supported so no way to get time */
+#define counter_printlog(a) do {} while (0)
+#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */
+#endif /* DEBUG_COUNTER */
+
+#if defined(BCMDRIVER) && !defined(_CFEZ_)
+void
+dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
+{
+	uint32 mem_size;
+	mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+	if (pool)
+		MFREE(osh, pool, mem_size);
+}
+dll_pool_t *
+dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size)
+{
+	uint32 mem_size, i;
+	dll_pool_t * dll_pool_p;
+	dll_t * elem_p;
+
+	ASSERT(elem_size > sizeof(dll_t));
+
+	mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+
+	if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) {
+		printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n",
+			elems_max, elem_size);
+		ASSERT(0);
+		return dll_pool_p;
+	}
+
+	dll_init(&dll_pool_p->free_list);
+	dll_pool_p->elems_max = elems_max;
+	dll_pool_p->elem_size = elem_size;
+
+	elem_p = dll_pool_p->elements;
+	for (i = 0; i < elems_max; i++) {
+		dll_append(&dll_pool_p->free_list, elem_p);
+		elem_p = (dll_t *)((uintptr)elem_p + elem_size);
+	}
+
+	dll_pool_p->free_count = elems_max;
+
+	return dll_pool_p;
+}
+
+
+void *
+dll_pool_alloc(dll_pool_t * dll_pool_p)
+{
+	dll_t * elem_p;
+
+	if (dll_pool_p->free_count == 0) {
+		ASSERT(dll_empty(&dll_pool_p->free_list));
+		return NULL;
+	}
+
+	elem_p = dll_head_p(&dll_pool_p->free_list);
+	dll_delete(elem_p);
+	dll_pool_p->free_count -= 1;
+
+	return (void *)elem_p;
+}
+
+void
+dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p)
+{
+	dll_t * node_p = (dll_t *)elem_p;
+	dll_prepend(&dll_pool_p->free_list, node_p);
+	dll_pool_p->free_count += 1;
+}
+
+
+void
+dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p)
+{
+	dll_t * node_p = (dll_t *)elem_p;
+	dll_append(&dll_pool_p->free_list, node_p);
+	dll_pool_p->free_count += 1;
+}
+
+#endif 
+
+/* calculate partial checksum */
+static uint32
+ip_cksum_partial(uint32 sum, uint8 *val8, uint32 count)
+{
+	uint32 i;
+	uint16 *val16 = (uint16 *)val8;
+
+	ASSERT(val8 != NULL);
+	/* partial chksum calculated on 16-bit values */
+	ASSERT((count % 2) == 0);
+
+	count /= 2;
+
+	for (i = 0; i < count; i++) {
+		sum += *val16++;
+	}
+	return sum;
+}
+
+/* calculate IP checksum */
+static uint16
+ip_cksum(uint32 sum, uint8 *val8, uint32 count)
+{
+	uint16 *val16 = (uint16 *)val8;
+
+	ASSERT(val8 != NULL);
+
+	while (count > 1) {
+		sum += *val16++;
+		count -= 2;
+	}
+	/*  add left-over byte, if any */
+	if (count > 0) {
+		sum += (*(uint8 *)val16);
+	}
+
+	/*  fold 32-bit sum to 16 bits */
+	sum = (sum >> 16) + (sum & 0xffff);
+	sum += (sum >> 16);
+	return ((uint16)~sum);
+}
+
+/* calculate IPv4 header checksum
+ * - input ip points to IP header in network order
+ * - output cksum is in network order
+ */
+uint16
+ipv4_hdr_cksum(uint8 *ip, int ip_len)
+{
+	uint32 sum = 0;
+	uint8 *ptr = ip;
+
+	ASSERT(ip != NULL);
+	ASSERT(ip_len >= IPV4_MIN_HEADER_LEN);
+
+	/* partial cksum skipping the hdr_chksum field */
+	sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct ipv4_hdr, hdr_chksum));
+	ptr += OFFSETOF(struct ipv4_hdr, hdr_chksum) + 2;
+
+	/* return calculated chksum */
+	return ip_cksum(sum, ptr, ip_len - OFFSETOF(struct ipv4_hdr, src_ip));
+}
+
+/* calculate TCP header checksum using partial sum */
+static uint16
+tcp_hdr_chksum(uint32 sum, uint8 *tcp_hdr, uint16 tcp_len)
+{
+	uint8 *ptr = tcp_hdr;
+
+	ASSERT(tcp_hdr != NULL);
+	ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+
+	/* partial TCP cksum skipping the chksum field */
+	sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct bcmtcp_hdr, chksum));
+	ptr += OFFSETOF(struct bcmtcp_hdr, chksum) + 2;
+
+	/* return calculated chksum */
+	return ip_cksum(sum, ptr, tcp_len - OFFSETOF(struct bcmtcp_hdr, urg_ptr));
+}
+
+struct tcp_pseudo_hdr {
+	uint8   src_ip[IPV4_ADDR_LEN];  /* Source IP Address */
+	uint8   dst_ip[IPV4_ADDR_LEN];  /* Destination IP Address */
+	uint8	zero;
+	uint8	prot;
+	uint16	tcp_size;
+};
+
+/* calculate IPv4 TCP header checksum
+ * - input ip and tcp points to IP and TCP header in network order
+ * - output cksum is in network order
+ */
+uint16
+ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len)
+{
+	struct ipv4_hdr *ip_hdr = (struct ipv4_hdr *)ip;
+	struct tcp_pseudo_hdr tcp_ps;
+	uint32 sum = 0;
+
+	ASSERT(ip != NULL);
+	ASSERT(tcp != NULL);
+	ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+
+	/* pseudo header cksum */
+	memset(&tcp_ps, 0, sizeof(tcp_ps));
+	memcpy(&tcp_ps.dst_ip, ip_hdr->dst_ip, IPV4_ADDR_LEN);
+	memcpy(&tcp_ps.src_ip, ip_hdr->src_ip, IPV4_ADDR_LEN);
+	tcp_ps.zero = 0;
+	tcp_ps.prot = ip_hdr->prot;
+	tcp_ps.tcp_size = hton16(tcp_len);
+	sum = ip_cksum_partial(sum, (uint8 *)&tcp_ps, sizeof(tcp_ps));
+
+	/* return calculated TCP header chksum */
+	return tcp_hdr_chksum(sum, tcp, tcp_len);
+}
+
+struct ipv6_pseudo_hdr {
+	uint8  saddr[IPV6_ADDR_LEN];
+	uint8  daddr[IPV6_ADDR_LEN];
+	uint16 payload_len;
+	uint8  zero;
+	uint8  next_hdr;
+};
+
+/* calculate IPv6 TCP header checksum
+ * - input ipv6 and tcp points to IPv6 and TCP header in network order
+ * - output cksum is in network order
+ */
+uint16
+ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len)
+{
+	struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)ipv6;
+	struct ipv6_pseudo_hdr ipv6_pseudo;
+	uint32 sum = 0;
+
+	ASSERT(ipv6 != NULL);
+	ASSERT(tcp != NULL);
+	ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
+
+	/* pseudo header cksum */
+	memset((char *)&ipv6_pseudo, 0, sizeof(ipv6_pseudo));
+	memcpy((char *)ipv6_pseudo.saddr, (char *)ipv6_hdr->saddr.addr,
+		sizeof(ipv6_pseudo.saddr));
+	memcpy((char *)ipv6_pseudo.daddr, (char *)ipv6_hdr->daddr.addr,
+		sizeof(ipv6_pseudo.daddr));
+	ipv6_pseudo.payload_len = ipv6_hdr->payload_len;
+	ipv6_pseudo.next_hdr = ipv6_hdr->nexthdr;
+	sum = ip_cksum_partial(sum, (uint8 *)&ipv6_pseudo, sizeof(ipv6_pseudo));
+
+	/* return calculated TCP header chksum */
+	return tcp_hdr_chksum(sum, tcp, tcp_len);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.c b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
new file mode 100644
index 0000000..40fc3f7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
@@ -0,0 +1,1349 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmwifi_channels.c 612483 2016-01-14 03:44:27Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMDRIVER */
+
+#include <bcmwifi_channels.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> 	/* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */
+#endif
+
+/* Definitions for D11AC capable Chanspec type */
+
+/* Chanspec ASCII representation with 802.11ac capability:
+ * [<band> 'g'] <channel> ['/'<bandwidth> [<ctl-sideband>]['/'<1st80channel>'-'<2nd80channel>]]
+ *
+ * <band>:
+ *      (optional) 2, 3, 4, 5 for 2.4GHz, 3GHz, 4GHz, and 5GHz respectively.
+ *      Default value is 2g if channel <= 14, otherwise 5g.
+ * <channel>:
+ *      channel number of the 5MHz, 10MHz, 20MHz channel,
+ *      or primary channel of 40MHz, 80MHz, 160MHz, or 80+80MHz channel.
+ * <bandwidth>:
+ *      (optional) 5, 10, 20, 40, 80, 160, or 80+80. Default value is 20.
+ * <primary-sideband>:
+ *      (only for 2.4GHz band 40MHz) U for upper sideband primary, L for lower.
+ *
+ *      For 2.4GHz band 40MHz channels, the same primary channel may be the
+ *      upper sideband for one 40MHz channel, and the lower sideband for an
+ *      overlapping 40MHz channel.  The U/L disambiguates which 40MHz channel
+ *      is being specified.
+ *
+ *      For 40MHz in the 5GHz band and all channel bandwidths greater than
+ *      40MHz, the U/L specificaion is not allowed since the channels are
+ *      non-overlapping and the primary sub-band is derived from its
+ *      position in the wide bandwidth channel.
+ *
+ * <1st80Channel>:
+ * <2nd80Channel>:
+ *      Required for 80+80, otherwise not allowed.
+ *      Specifies the center channel of the first and second 80MHz band.
+ *
+ * In its simplest form, it is a 20MHz channel number, with the implied band
+ * of 2.4GHz if channel number <= 14, and 5GHz otherwise.
+ *
+ * To allow for backward compatibility with scripts, the old form for
+ * 40MHz channels is also allowed: <channel><ctl-sideband>
+ *
+ * <channel>:
+ *	primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz
+ * <ctl-sideband>:
+ * 	"U" for upper, "L" for lower (or lower case "u" "l")
+ *
+ * 5 GHz Examples:
+ *      Chanspec        BW        Center Ch  Channel Range  Primary Ch
+ *      5g8             20MHz     8          -              -
+ *      52              20MHz     52         -              -
+ *      52/40           40MHz     54         52-56          52
+ *      56/40           40MHz     54         52-56          56
+ *      52/80           80MHz     58         52-64          52
+ *      56/80           80MHz     58         52-64          56
+ *      60/80           80MHz     58         52-64          60
+ *      64/80           80MHz     58         52-64          64
+ *      52/160          160MHz    50         36-64          52
+ *      36/160          160MGz    50         36-64          36
+ *      36/80+80/42-106 80+80MHz  42,106     36-48,100-112  36
+ *
+ * 2 GHz Examples:
+ *      Chanspec        BW        Center Ch  Channel Range  Primary Ch
+ *      2g8             20MHz     8          -              -
+ *      8               20MHz     8          -              -
+ *      6               20MHz     6          -              -
+ *      6/40l           40MHz     8          6-10           6
+ *      6l              40MHz     8          6-10           6
+ *      6/40u           40MHz     4          2-6            6
+ *      6u              40MHz     4          2-6            6
+ */
+
+/* bandwidth ASCII string */
+static const char *wf_chspec_bw_str[] =
+{
+	"5",
+	"10",
+	"20",
+	"40",
+	"80",
+	"160",
+	"80+80",
+#ifdef WL11ULB
+	"2.5"
+#else /* WL11ULB */
+	"na"
+#endif /* WL11ULB */
+};
+
+static const uint8 wf_chspec_bw_mhz[] =
+{5, 10, 20, 40, 80, 160, 160};
+
+#define WF_NUM_BW \
+	(sizeof(wf_chspec_bw_mhz)/sizeof(uint8))
+
+/* 40MHz channels in 5GHz band */
+static const uint8 wf_5g_40m_chans[] =
+{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159};
+#define WF_NUM_5G_40M_CHANS \
+	(sizeof(wf_5g_40m_chans)/sizeof(uint8))
+
+/* 80MHz channels in 5GHz band */
+static const uint8 wf_5g_80m_chans[] =
+{42, 58, 106, 122, 138, 155};
+#define WF_NUM_5G_80M_CHANS \
+	(sizeof(wf_5g_80m_chans)/sizeof(uint8))
+
+/* 160MHz channels in 5GHz band */
+static const uint8 wf_5g_160m_chans[] =
+{50, 114};
+#define WF_NUM_5G_160M_CHANS \
+	(sizeof(wf_5g_160m_chans)/sizeof(uint8))
+
+/* opclass and channel information for US. Table E-1 */
+static const uint16 opclass_data[] = {
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_3G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_3G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_3G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)),
+	0,
+	0,
+	0,
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER),
+	(WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER),
+	(WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER),
+	(WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER),
+};
+
+/* convert bandwidth from chanspec to MHz */
+static uint
+bw_chspec_to_mhz(chanspec_t chspec)
+{
+	uint bw;
+
+	bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT;
+	return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]);
+}
+
+/* bw in MHz, return the channel count from the center channel to the
+ * the channel at the edge of the band
+ */
+static uint8
+center_chan_to_edge(uint bw)
+{
+	/* edge channels separated by BW - 10MHz on each side
+	 * delta from cf to edge is half of that,
+	 * MHz to channel num conversion is 5MHz/channel
+	 */
+	return (uint8)(((bw - 20) / 2) / 5);
+}
+
+/* return channel number of the low edge of the band
+ * given the center channel and BW
+ */
+static uint8
+channel_low_edge(uint center_ch, uint bw)
+{
+	return (uint8)(center_ch - center_chan_to_edge(bw));
+}
+
+/* return side band number given center channel and control channel
+ * return -1 on error
+ */
+static int
+channel_to_sb(uint center_ch, uint ctl_ch, uint bw)
+{
+	uint lowest = channel_low_edge(center_ch, bw);
+	uint sb;
+
+	if ((ctl_ch - lowest) % 4) {
+		/* bad ctl channel, not mult 4 */
+		return -1;
+	}
+
+	sb = ((ctl_ch - lowest) / 4);
+
+	/* sb must be a index to a 20MHz channel in range */
+	if (sb >= (bw / 20)) {
+		/* ctl_ch must have been too high for the center_ch */
+		return -1;
+	}
+
+	return sb;
+}
+
+/* return control channel given center channel and side band */
+static uint8
+channel_to_ctl_chan(uint center_ch, uint bw, uint sb)
+{
+	return (uint8)(channel_low_edge(center_ch, bw) + sb * 4);
+}
+
+/* return index of 80MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_80mhz_to_id(uint ch)
+{
+	uint i;
+	for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
+		if (ch == wf_5g_80m_chans[i])
+			return i;
+	}
+
+	return -1;
+}
+
+/* wrapper function for wf_chspec_ntoa. In case of an error it puts
+ * the original chanspec in the output buffer, prepended with "invalid".
+ * Can be directly used in print routines as it takes care of null
+ */
+char *
+wf_chspec_ntoa_ex(chanspec_t chspec, char *buf)
+{
+	if (wf_chspec_ntoa(chspec, buf) == NULL)
+		snprintf(buf, CHANSPEC_STR_LEN, "invalid 0x%04x", chspec);
+	return buf;
+}
+
+/* given a chanspec and a string buffer, format the chanspec as a
+ * string, and return the original pointer a.
+ * Min buffer length must be CHANSPEC_STR_LEN.
+ * On error return NULL
+ */
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+	const char *band;
+	uint ctl_chan;
+
+	if (wf_chspec_malformed(chspec))
+		return NULL;
+
+	band = "";
+
+	/* check for non-default band spec */
+	if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) ||
+	    (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL))
+		band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g";
+
+	/* ctl channel */
+	ctl_chan = wf_chspec_ctlchan(chspec);
+
+	/* bandwidth and ctl sideband */
+	if (CHSPEC_IS20(chspec)) {
+		snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan);
+	} else if (!CHSPEC_IS8080(chspec)) {
+		const char *bw;
+		const char *sb = "";
+
+		bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT];
+
+#ifdef CHANSPEC_NEW_40MHZ_FORMAT
+		/* ctl sideband string if needed for 2g 40MHz */
+		if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) {
+			sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+		}
+
+		snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb);
+#else
+		/* ctl sideband string instead of BW for 40MHz */
+		if (CHSPEC_IS40(chspec)) {
+			sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+			snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb);
+		} else {
+			snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw);
+		}
+#endif /* CHANSPEC_NEW_40MHZ_FORMAT */
+
+	} else {
+		/* 80+80 */
+		uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT;
+		uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT;
+
+		/* convert to channel number */
+		chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0;
+		chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0;
+
+		/* Outputs a max of CHANSPEC_STR_LEN chars including '\0'  */
+		snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2);
+	}
+
+	return (buf);
+}
+
+static int
+read_uint(const char **p, unsigned int *num)
+{
+	unsigned long val;
+	char *endp = NULL;
+
+	val = strtoul(*p, &endp, 10);
+	/* if endp is the initial pointer value, then a number was not read */
+	if (endp == *p)
+		return 0;
+
+	/* advance the buffer pointer to the end of the integer string */
+	*p = endp;
+	/* return the parsed integer */
+	*num = (unsigned int)val;
+
+	return 1;
+}
+
+/* given a chanspec string, convert to a chanspec.
+ * On error return 0
+ */
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+	chanspec_t chspec;
+	uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb;
+	uint num, ctl_ch;
+	uint ch1, ch2;
+	char c, sb_ul = '\0';
+	int i;
+
+	bw = 20;
+	chspec_sb = 0;
+	chspec_ch = ch1 = ch2 = 0;
+
+	/* parse channel num or band */
+	if (!read_uint(&a, &num))
+		return 0;
+	/* if we are looking at a 'g', then the first number was a band */
+	c = tolower((int)a[0]);
+	if (c == 'g') {
+		a++; /* consume the char */
+
+		/* band must be "2" or "5" */
+		if (num == 2)
+			chspec_band = WL_CHANSPEC_BAND_2G;
+		else if (num == 5)
+			chspec_band = WL_CHANSPEC_BAND_5G;
+		else
+			return 0;
+
+		/* read the channel number */
+		if (!read_uint(&a, &ctl_ch))
+			return 0;
+
+		c = tolower((int)a[0]);
+	}
+	else {
+		/* first number is channel, use default for band */
+		ctl_ch = num;
+		chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ?
+		               WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+	}
+
+	if (c == '\0') {
+		/* default BW of 20MHz */
+		chspec_bw = WL_CHANSPEC_BW_20;
+		goto done_read;
+	}
+
+	a ++; /* consume the 'u','l', or '/' */
+
+	/* check 'u'/'l' */
+	if (c == 'u' || c == 'l') {
+		sb_ul = c;
+		chspec_bw = WL_CHANSPEC_BW_40;
+		goto done_read;
+	}
+
+	/* next letter must be '/' */
+	if (c != '/')
+		return 0;
+
+	/* read bandwidth */
+	if (!read_uint(&a, &bw))
+		return 0;
+
+	/* convert to chspec value */
+	if (bw == 2) {
+		chspec_bw = WL_CHANSPEC_BW_2P5;
+	} else if (bw == 5) {
+		chspec_bw = WL_CHANSPEC_BW_5;
+	} else if (bw == 10) {
+		chspec_bw = WL_CHANSPEC_BW_10;
+	} else if (bw == 20) {
+		chspec_bw = WL_CHANSPEC_BW_20;
+	} else if (bw == 40) {
+		chspec_bw = WL_CHANSPEC_BW_40;
+	} else if (bw == 80) {
+		chspec_bw = WL_CHANSPEC_BW_80;
+	} else if (bw == 160) {
+		chspec_bw = WL_CHANSPEC_BW_160;
+	} else {
+		return 0;
+	}
+
+	/* So far we have <band>g<chan>/<bw>
+	 * Can now be followed by u/l if bw = 40,
+	 * or '+80' if bw = 80, to make '80+80' bw,
+	 * or '.5' if bw = 2.5 to make '2.5' bw .
+	 */
+
+	c = tolower((int)a[0]);
+
+	/* if we have a 2g/40 channel, we should have a l/u spec now */
+	if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
+		if (c == 'u' || c == 'l') {
+			a ++; /* consume the u/l char */
+			sb_ul = c;
+			goto done_read;
+		}
+	}
+
+	/* check for 80+80 */
+	if (c == '+') {
+		/* 80+80 */
+		const char plus80[] = "80/";
+
+		/* must be looking at '+80/'
+		 * check and consume this string.
+		 */
+		chspec_bw = WL_CHANSPEC_BW_8080;
+
+		a ++; /* consume the char '+' */
+
+		/* consume the '80/' string */
+		for (i = 0; i < 3; i++) {
+			if (*a++ != plus80[i]) {
+				return 0;
+			}
+		}
+
+		/* read primary 80MHz channel */
+		if (!read_uint(&a, &ch1))
+			return 0;
+
+		/* must followed by '-' */
+		if (a[0] != '-')
+			return 0;
+		a ++; /* consume the char */
+
+		/* read secondary 80MHz channel */
+		if (!read_uint(&a, &ch2))
+			return 0;
+	} else if (c == '.') {
+		/* 2.5 */
+		/* must be looking at '.5'
+		 * check and consume this string.
+		 */
+		chspec_bw = WL_CHANSPEC_BW_2P5;
+
+		a ++; /* consume the char '.' */
+
+		/* consume the '5' string */
+		if (*a++ != '5') {
+			return 0;
+		}
+	}
+
+done_read:
+	/* skip trailing white space */
+	while (a[0] == ' ') {
+		a ++;
+	}
+
+	/* must be end of string */
+	if (a[0] != '\0')
+		return 0;
+
+	/* Now have all the chanspec string parts read;
+	 * chspec_band, ctl_ch, chspec_bw, sb_ul, ch1, ch2.
+	 * chspec_band and chspec_bw are chanspec values.
+	 * Need to convert ctl_ch, sb_ul, and ch1,ch2 into
+	 * a center channel (or two) and sideband.
+	 */
+
+	/* if a sb u/l string was given, just use that,
+	 * guaranteed to be bw = 40 by sting parse.
+	 */
+	if (sb_ul != '\0') {
+		if (sb_ul == 'l') {
+			chspec_ch = UPPER_20_SB(ctl_ch);
+			chspec_sb = WL_CHANSPEC_CTL_SB_LLL;
+		} else if (sb_ul == 'u') {
+			chspec_ch = LOWER_20_SB(ctl_ch);
+			chspec_sb = WL_CHANSPEC_CTL_SB_LLU;
+		}
+	}
+	/* if the bw is 20, center and sideband are trivial */
+	else if (BW_LE20(chspec_bw)) {
+		chspec_ch = ctl_ch;
+		chspec_sb = WL_CHANSPEC_CTL_SB_NONE;
+	}
+	/* if the bw is 40/80/160, not 80+80, a single method
+	 * can be used to to find the center and sideband
+	 */
+	else if (chspec_bw != WL_CHANSPEC_BW_8080) {
+		/* figure out ctl sideband based on ctl channel and bandwidth */
+		const uint8 *center_ch = NULL;
+		int num_ch = 0;
+		int sb = -1;
+
+		if (chspec_bw == WL_CHANSPEC_BW_40) {
+			center_ch = wf_5g_40m_chans;
+			num_ch = WF_NUM_5G_40M_CHANS;
+		} else if (chspec_bw == WL_CHANSPEC_BW_80) {
+			center_ch = wf_5g_80m_chans;
+			num_ch = WF_NUM_5G_80M_CHANS;
+		} else if (chspec_bw == WL_CHANSPEC_BW_160) {
+			center_ch = wf_5g_160m_chans;
+			num_ch = WF_NUM_5G_160M_CHANS;
+		} else {
+			return 0;
+		}
+
+		for (i = 0; i < num_ch; i ++) {
+			sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+			if (sb >= 0) {
+				chspec_ch = center_ch[i];
+				chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+				break;
+			}
+		}
+
+		/* check for no matching sb/center */
+		if (sb < 0) {
+			return 0;
+		}
+	}
+	/* Otherwise, bw is 80+80. Figure out channel pair and sb */
+	else {
+		int ch1_id = 0, ch2_id = 0;
+		int sb;
+
+		/* look up the channel ID for the specified channel numbers */
+		ch1_id = channel_80mhz_to_id(ch1);
+		ch2_id = channel_80mhz_to_id(ch2);
+
+		/* validate channels */
+		if (ch1_id < 0 || ch2_id < 0)
+			return 0;
+
+		/* combine 2 channel IDs in channel field of chspec */
+		chspec_ch = (((uint)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) |
+		             ((uint)ch2_id << WL_CHANSPEC_CHAN2_SHIFT));
+
+		/* figure out primary 20 MHz sideband */
+
+		/* is the primary channel contained in the 1st 80MHz channel? */
+		sb = channel_to_sb(ch1, ctl_ch, bw);
+		if (sb < 0) {
+			/* no match for primary channel 'ctl_ch' in segment0 80MHz channel */
+			return 0;
+		}
+
+		chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+	}
+
+	chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb);
+
+	if (wf_chspec_malformed(chspec))
+		return 0;
+
+	return chspec;
+}
+
+/*
+ * Verify the chanspec is using a legal set of parameters, i.e. that the
+ * chanspec specified a band, bw, ctl_sb and channel and that the
+ * combination could be legal given any set of circumstances.
+ * RETURNS: TRUE is the chanspec is malformed, false if it looks good.
+ */
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+	uint chspec_bw = CHSPEC_BW(chanspec);
+	uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+	/* must be 2G or 5G band */
+	if (CHSPEC_IS2G(chanspec)) {
+		/* must be valid bandwidth */
+		if (!BW_LE40(chspec_bw)) {
+			return TRUE;
+		}
+	} else if (CHSPEC_IS5G(chanspec)) {
+		if (chspec_bw == WL_CHANSPEC_BW_8080) {
+			uint ch1_id, ch2_id;
+
+			/* channel IDs in 80+80 must be in range */
+			ch1_id = CHSPEC_CHAN1(chanspec);
+			ch2_id = CHSPEC_CHAN2(chanspec);
+			if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS)
+				return TRUE;
+
+		} else if (BW_LE160(chspec_bw)) {
+			if (chspec_ch > MAXCHANNEL) {
+				return TRUE;
+			}
+		} else {
+			/* invalid bandwidth */
+			return TRUE;
+		}
+	} else {
+		/* must be 2G or 5G band */
+		return TRUE;
+	}
+
+	/* side band needs to be consistent with bandwidth */
+	if (BW_LE20(chspec_bw)) {
+		if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL)
+			return TRUE;
+	} else if (chspec_bw == WL_CHANSPEC_BW_40) {
+		if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU)
+			return TRUE;
+	} else if (chspec_bw == WL_CHANSPEC_BW_80 ||
+	           chspec_bw == WL_CHANSPEC_BW_8080) {
+		if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU)
+			return TRUE;
+	}
+	else if (chspec_bw == WL_CHANSPEC_BW_160) {
+		ASSERT(CHSPEC_CTL_SB(chanspec) <= WL_CHANSPEC_CTL_SB_UUU);
+	}
+	return FALSE;
+}
+
+/*
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ * RETURNS: TRUE if the chanspec is a valid 802.11 channel
+ */
+bool
+wf_chspec_valid(chanspec_t chanspec)
+{
+	uint chspec_bw = CHSPEC_BW(chanspec);
+	uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+	if (wf_chspec_malformed(chanspec))
+		return FALSE;
+
+	if (CHSPEC_IS2G(chanspec)) {
+		/* must be valid bandwidth and channel range */
+		if (BW_LE20(chspec_bw)) {
+			if (chspec_ch >= 1 && chspec_ch <= 14)
+				return TRUE;
+		} else if (chspec_bw == WL_CHANSPEC_BW_40) {
+			if (chspec_ch >= 3 && chspec_ch <= 11)
+				return TRUE;
+		}
+	} else if (CHSPEC_IS5G(chanspec)) {
+		if (chspec_bw == WL_CHANSPEC_BW_8080) {
+			uint16 ch1, ch2;
+
+			ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)];
+			ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)];
+
+			/* the two channels must be separated by more than 80MHz by VHT req */
+			if ((ch2 > ch1 + CH_80MHZ_APART) ||
+			    (ch1 > ch2 + CH_80MHZ_APART))
+				return TRUE;
+		} else {
+			const uint8 *center_ch;
+			uint num_ch, i;
+
+			if (BW_LE40(chspec_bw)) {
+				center_ch = wf_5g_40m_chans;
+				num_ch = WF_NUM_5G_40M_CHANS;
+			} else if (chspec_bw == WL_CHANSPEC_BW_80) {
+				center_ch = wf_5g_80m_chans;
+				num_ch = WF_NUM_5G_80M_CHANS;
+			} else if (chspec_bw == WL_CHANSPEC_BW_160) {
+				center_ch = wf_5g_160m_chans;
+				num_ch = WF_NUM_5G_160M_CHANS;
+			} else {
+				/* invalid bandwidth */
+				return FALSE;
+			}
+
+			/* check for a valid center channel */
+			if (BW_LE20(chspec_bw)) {
+				/* We don't have an array of legal 20MHz 5G channels, but they are
+				 * each side of the legal 40MHz channels.  Check the chanspec
+				 * channel against either side of the 40MHz channels.
+				 */
+				for (i = 0; i < num_ch; i ++) {
+					if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) ||
+					    chspec_ch == (uint)UPPER_20_SB(center_ch[i]))
+						break; /* match found */
+				}
+
+				if (i == num_ch) {
+					/* check for channel 165 which is not the side band
+					 * of 40MHz 5G channel
+					 */
+					if (chspec_ch == 165)
+						i = 0;
+
+					/* check for legacy JP channels on failure */
+					if (chspec_ch == 34 || chspec_ch == 38 ||
+					    chspec_ch == 42 || chspec_ch == 46)
+						i = 0;
+				}
+			} else {
+				/* check the chanspec channel to each legal channel */
+				for (i = 0; i < num_ch; i ++) {
+					if (chspec_ch == center_ch[i])
+						break; /* match found */
+				}
+			}
+
+			if (i < num_ch) {
+				/* match found */
+				return TRUE;
+			}
+		}
+	}
+
+	return FALSE;
+}
+
+/*
+ * This function returns the channel number that control traffic is being sent on, for 20MHz
+ * channels this is just the channel number, for 40MHZ, 80MHz, 160MHz channels it is the 20MHZ
+ * sideband depending on the chanspec selected
+ */
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+	uint center_chan;
+	uint bw_mhz;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* Is there a sideband ? */
+	if (CHSPEC_BW_LE20(chspec)) {
+		return CHSPEC_CHANNEL(chspec);
+	} else {
+		sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		if (CHSPEC_IS8080(chspec)) {
+			/* For an 80+80 MHz channel, the sideband 'sb' field is an 80 MHz sideband
+			 * (LL, LU, UL, LU) for the 80 MHz frequency segment 0.
+			 */
+			uint chan_id = CHSPEC_CHAN1(chspec);
+
+			bw_mhz = 80;
+
+			/* convert from channel index to channel number */
+			center_chan = wf_5g_80m_chans[chan_id];
+		}
+		else {
+			bw_mhz = bw_chspec_to_mhz(chspec);
+			center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT;
+		}
+
+		return (channel_to_ctl_chan(center_chan, bw_mhz, sb));
+	}
+}
+
+/* given a chanspec, return the bandwidth string */
+const char *
+wf_chspec_to_bw_str(chanspec_t chspec)
+{
+	return wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)];
+}
+
+/*
+ * This function returns the chanspec of the control channel of a given chanspec
+ */
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+	chanspec_t ctl_chspec = chspec;
+	uint8 ctl_chan;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* Is there a sideband ? */
+	if (!CHSPEC_BW_LE20(chspec)) {
+		ctl_chan = wf_chspec_ctlchan(chspec);
+		ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20;
+		ctl_chspec |= CHSPEC_BAND(chspec);
+	}
+	return ctl_chspec;
+}
+
+/* return chanspec given control channel and bandwidth
+ * return 0 on error
+ */
+uint16
+wf_channel2chspec(uint ctl_ch, uint bw)
+{
+	uint16 chspec;
+	const uint8 *center_ch = NULL;
+	int num_ch = 0;
+	int sb = -1;
+	int i = 0;
+
+	chspec = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+
+	chspec |= bw;
+
+	if (bw == WL_CHANSPEC_BW_40) {
+		center_ch = wf_5g_40m_chans;
+		num_ch = WF_NUM_5G_40M_CHANS;
+		bw = 40;
+	} else if (bw == WL_CHANSPEC_BW_80) {
+		center_ch = wf_5g_80m_chans;
+		num_ch = WF_NUM_5G_80M_CHANS;
+		bw = 80;
+	} else if (bw == WL_CHANSPEC_BW_160) {
+		center_ch = wf_5g_160m_chans;
+		num_ch = WF_NUM_5G_160M_CHANS;
+		bw = 160;
+	} else if (BW_LE20(bw)) {
+		chspec |= ctl_ch;
+		return chspec;
+	} else {
+		return 0;
+	}
+
+	for (i = 0; i < num_ch; i ++) {
+		sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+		if (sb >= 0) {
+			chspec |= center_ch[i];
+			chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT);
+			break;
+		}
+	}
+
+	/* check for no matching sb/center */
+	if (sb < 0) {
+		return 0;
+	}
+
+	return chspec;
+}
+
+/*
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec)
+{
+	chanspec_t chspec40 = chspec;
+	uint center_chan;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* if the chanspec is > 80MHz, use the helper routine to find the primary 80 MHz channel */
+	if (CHSPEC_IS8080(chspec) || CHSPEC_IS160(chspec)) {
+		chspec = wf_chspec_primary80_chspec(chspec);
+	}
+
+	/* determine primary 40 MHz sub-channel of an 80 MHz chanspec */
+	if (CHSPEC_IS80(chspec)) {
+		center_chan = CHSPEC_CHANNEL(chspec);
+		sb = CHSPEC_CTL_SB(chspec);
+
+		if (sb < WL_CHANSPEC_CTL_SB_UL) {
+			/* Primary 40MHz is on lower side */
+			center_chan -= CH_20MHZ_APART;
+			/* sideband bits are the same for LL/LU and L/U */
+		} else {
+			/* Primary 40MHz is on upper side */
+			center_chan += CH_20MHZ_APART;
+			/* sideband bits need to be adjusted by UL offset */
+			sb -= WL_CHANSPEC_CTL_SB_UL;
+		}
+
+		/* Create primary 40MHz chanspec */
+		chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
+		            sb | center_chan);
+	}
+
+	return chspec40;
+}
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+	int ch = -1;
+	uint base;
+	int offset;
+
+	/* take the default channel start frequency */
+	if (start_factor == 0) {
+		if (freq >= 2400 && freq <= 2500)
+			start_factor = WF_CHAN_FACTOR_2_4_G;
+		else if (freq >= 5000 && freq <= 6000)
+			start_factor = WF_CHAN_FACTOR_5_G;
+	}
+
+	if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+		return 14;
+
+	base = start_factor / 2;
+
+	/* check that the frequency is in 1GHz range of the base */
+	if ((freq < base) || (freq > base + 1000))
+		return -1;
+
+	offset = freq - base;
+	ch = offset / 5;
+
+	/* check that frequency is a 5MHz multiple from the base */
+	if (offset != (ch * 5))
+		return -1;
+
+	/* restricted channel range check for 2.4G */
+	if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+		return -1;
+
+	return ch;
+}
+
+/*
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_4_G, and WF_CHAN_FACTOR_5_G
+ * are defined for 2.4 GHz, 4 GHz, and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814 = 2407 * 2).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+	int freq;
+
+	if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+	    (ch > 200))
+		freq = -1;
+	else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+		freq = 2484;
+	else
+		freq = ch * 5 + start_factor / 2;
+
+	return freq;
+}
+
+static const uint16 sidebands[] = {
+	WL_CHANSPEC_CTL_SB_LLL, WL_CHANSPEC_CTL_SB_LLU,
+	WL_CHANSPEC_CTL_SB_LUL, WL_CHANSPEC_CTL_SB_LUU,
+	WL_CHANSPEC_CTL_SB_ULL, WL_CHANSPEC_CTL_SB_ULU,
+	WL_CHANSPEC_CTL_SB_UUL, WL_CHANSPEC_CTL_SB_UUU
+};
+
+/*
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ *	primary_channel - primary 20Mhz channel
+ *	center_channel   - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+chanspec_t
+wf_chspec_80(uint8 center_channel, uint8 primary_channel)
+{
+
+	chanspec_t chanspec = INVCHANSPEC;
+	chanspec_t chanspec_cur;
+	uint i;
+
+	for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) {
+		chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]);
+		if (primary_channel == wf_chspec_ctlchan(chanspec_cur)) {
+			chanspec = chanspec_cur;
+			break;
+		}
+	}
+	/* If the loop ended early, we are good, otherwise we did not
+	* find a 80MHz chanspec with the given center_channel that had a primary channel
+	*matching the given primary_channel.
+	*/
+	return chanspec;
+}
+
+/*
+ * Returns the 80+80 chanspec corresponding to the following input parameters
+ *
+ *    primary_20mhz - Primary 20 MHz channel
+ *    chan0 - center channel number of one frequency segment
+ *    chan1 - center channel number of the other frequency segment
+ *
+ * Parameters chan0 and chan1 are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
+ */
+chanspec_t
+wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1)
+{
+	int sb = 0;
+	uint16 chanspec = 0;
+	int chan0_id = 0, chan1_id = 0;
+	int seg0, seg1;
+
+	chan0_id = channel_80mhz_to_id(chan0);
+	chan1_id = channel_80mhz_to_id(chan1);
+
+	/* make sure the channel numbers were valid */
+	if (chan0_id == -1 || chan1_id == -1)
+		return INVCHANSPEC;
+
+	/* does the primary channel fit with the 1st 80MHz channel ? */
+	sb = channel_to_sb(chan0, primary_20mhz, 80);
+	if (sb >= 0) {
+		/* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */
+		seg0 = chan0_id;
+		seg1 = chan1_id;
+	} else {
+		/* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+		sb = channel_to_sb(chan1, primary_20mhz, 80);
+		if (sb < 0) {
+			/* no match for ctl_ch to either 80MHz center channel */
+			return INVCHANSPEC;
+		}
+		/* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */
+		seg0 = chan1_id;
+		seg1 = chan0_id;
+	}
+
+	chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
+	            (seg1 << WL_CHANSPEC_CHAN2_SHIFT) |
+	            (sb << WL_CHANSPEC_CTL_SB_SHIFT) |
+	            WL_CHANSPEC_BW_8080 |
+	            WL_CHANSPEC_BAND_5G);
+
+	return chanspec;
+}
+
+/*
+ * This function returns the 80Mhz channel for the given id.
+ */
+static uint8
+wf_chspec_get80Mhz_ch(uint8 chan_80Mhz_id)
+{
+	if (chan_80Mhz_id < WF_NUM_5G_80M_CHANS)
+		return wf_5g_80m_chans[chan_80Mhz_id];
+
+	return 0;
+}
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+
+uint8
+wf_chspec_primary80_channel(chanspec_t chanspec)
+{
+	uint8 primary80_chan;
+
+	if (CHSPEC_IS80(chanspec))	{
+		primary80_chan = CHSPEC_CHANNEL(chanspec);
+	}
+	else if (CHSPEC_IS8080(chanspec)) {
+		/* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
+		primary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chanspec));
+	}
+	else if (CHSPEC_IS160(chanspec)) {
+		uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+		uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		/* based on the sb value primary 80 channel can be retrieved
+		 * if sb is in range 0 to 3 the lower band is the 80Mhz primary band
+		 */
+		if (sb < 4) {
+			primary80_chan = center_chan - CH_40MHZ_APART;
+		}
+		/* if sb is in range 4 to 7 the upper band is the 80Mhz primary band */
+		else
+		{
+			primary80_chan = center_chan + CH_40MHZ_APART;
+		}
+	}
+	else {
+		/* for 20 and 40 Mhz */
+		primary80_chan = -1;
+	}
+	return primary80_chan;
+}
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40/80 Mhz chanspec
+ */
+uint8
+wf_chspec_secondary80_channel(chanspec_t chanspec)
+{
+	uint8 secondary80_chan;
+
+	if (CHSPEC_IS8080(chanspec)) {
+		secondary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chanspec));
+	}
+	else if (CHSPEC_IS160(chanspec)) {
+		uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+		uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		/* based on the sb value  secondary 80 channel can be retrieved
+		 * if sb is in range 0 to 3 upper band is the secondary 80Mhz band
+		 */
+		if (sb < 4) {
+			secondary80_chan = center_chan + CH_40MHZ_APART;
+		}
+		/* if sb is in range 4 to 7 the lower band is the secondary 80Mhz band */
+		else
+		{
+			secondary80_chan = center_chan - CH_40MHZ_APART;
+		}
+	}
+	else {
+		/* for 20, 40, and 80 Mhz */
+		secondary80_chan = -1;
+	}
+	return secondary80_chan;
+}
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ *
+ *    chanspec - Input chanspec for which the primary 80Mhz chanspec has to be retreived
+ *
+ *  returns the input chanspec in case the provided chanspec is an 80 MHz chanspec
+ *  returns INVCHANSPEC in case the provided channel is 20/40 MHz chanspec
+ */
+chanspec_t
+wf_chspec_primary80_chspec(chanspec_t chspec)
+{
+	chanspec_t chspec80;
+	uint center_chan;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+	if (CHSPEC_IS80(chspec)) {
+		chspec80 = chspec;
+	}
+	else if (CHSPEC_IS8080(chspec)) {
+
+		/* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
+		center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec));
+
+		sb = CHSPEC_CTL_SB(chspec);
+
+		/* Create primary 80MHz chanspec */
+		chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+	}
+	else if (CHSPEC_IS160(chspec)) {
+		center_chan = CHSPEC_CHANNEL(chspec);
+		sb = CHSPEC_CTL_SB(chspec);
+
+		if (sb < WL_CHANSPEC_CTL_SB_ULL) {
+			/* Primary 80MHz is on lower side */
+			center_chan -= CH_40MHZ_APART;
+		}
+		else {
+			/* Primary 80MHz is on upper side */
+			center_chan += CH_40MHZ_APART;
+			sb -= WL_CHANSPEC_CTL_SB_ULL;
+		}
+		/* Create primary 80MHz chanspec */
+		chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+	}
+	else {
+		chspec80 = INVCHANSPEC;
+	}
+
+	return chspec80;
+}
+
+#ifdef WL11AC_80P80
+uint8
+wf_chspec_channel(chanspec_t chspec)
+{
+	if (CHSPEC_IS8080(chspec)) {
+		return wf_chspec_primary80_channel(chspec);
+	}
+	else {
+		return ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK));
+	}
+}
+#endif /* WL11AC_80P80 */
+
+/* This routine returns the chanspec for a given operating class and
+ * channel number
+ */
+chanspec_t
+wf_channel_create_chspec_frm_opclass(uint8 opclass, uint8 channel)
+{
+	chanspec_t chanspec = 0;
+	uint16 opclass_info = 0;
+	uint16 lookupindex = 0;
+	switch (opclass) {
+		case 115:
+			lookupindex = 1;
+			break;
+		case 124:
+			lookupindex = 3;
+			break;
+		case 125:
+			lookupindex = 5;
+			break;
+		case 81:
+			lookupindex = 12;
+			break;
+		case 116:
+			lookupindex = 22;
+			break;
+		case 119:
+			lookupindex = 23;
+			break;
+		case 126:
+			lookupindex = 25;
+			break;
+		case 83:
+			lookupindex = 32;
+			break;
+		case 84:
+			lookupindex = 33;
+			break;
+		default:
+			lookupindex = 12;
+	}
+
+	if (lookupindex < 33) {
+		opclass_info = opclass_data[lookupindex-1];
+	}
+	else {
+		opclass_info = opclass_data[11];
+	}
+	chanspec = opclass_info | (uint16)channel;
+	return chanspec;
+}
+
+/* This routine returns the opclass for a given chanspec */
+int
+wf_channel_create_opclass_frm_chspec(chanspec_t chspec)
+{
+	BCM_REFERENCE(chspec);
+	/* TODO: Implement this function ! */
+	return 12; /* opclass 12 for basic 2G channels */
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.h b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
new file mode 100644
index 0000000..28c6e27
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
@@ -0,0 +1,645 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmwifi_channels.h 612483 2016-01-14 03:44:27Z $
+ */
+
+#ifndef	_bcmwifi_channels_h_
+#define	_bcmwifi_channels_h_
+
+
+/* A chanspec holds the channel number, band, bandwidth and control sideband */
+typedef uint16 chanspec_t;
+
+/* channel defines */
+#define CH_UPPER_SB			0x01
+#define CH_LOWER_SB			0x02
+#define CH_EWA_VALID			0x04
+#define CH_80MHZ_APART			16
+#define CH_40MHZ_APART			8
+#define CH_20MHZ_APART			4
+#define CH_10MHZ_APART			2
+#define CH_5MHZ_APART			1	/* 2G band channels are 5 Mhz apart */
+#define CH_MAX_2G_CHANNEL		14	/* Max channel in 2G band */
+
+/* maximum # channels the s/w supports */
+#define MAXCHANNEL		224	/* max # supported channels. The max channel no is above,
+					 * this is that + 1 rounded up to a multiple of NBBY (8).
+					 * DO NOT MAKE it > 255: channels are uint8's all over
+					 */
+#define MAXCHANNEL_NUM	(MAXCHANNEL - 1)	/* max channel number */
+
+/* channel bitvec */
+typedef struct {
+	uint8   vec[MAXCHANNEL/8];   /* bitvec of channels */
+} chanvec_t;
+
+/* make sure channel num is within valid range */
+#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM)
+
+#define CHSPEC_CTLOVLP(sp1, sp2, sep)	\
+	(ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (sep))
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef  D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#define WL_CHANSPEC_CHAN_MASK		0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT		0
+#define WL_CHANSPEC_CHAN1_MASK		0x000f
+#define WL_CHANSPEC_CHAN1_SHIFT		0
+#define WL_CHANSPEC_CHAN2_MASK		0x00f0
+#define WL_CHANSPEC_CHAN2_SHIFT		4
+
+#define WL_CHANSPEC_CTL_SB_MASK		0x0700
+#define WL_CHANSPEC_CTL_SB_SHIFT	8
+#define WL_CHANSPEC_CTL_SB_LLL		0x0000
+#define WL_CHANSPEC_CTL_SB_LLU		0x0100
+#define WL_CHANSPEC_CTL_SB_LUL		0x0200
+#define WL_CHANSPEC_CTL_SB_LUU		0x0300
+#define WL_CHANSPEC_CTL_SB_ULL		0x0400
+#define WL_CHANSPEC_CTL_SB_ULU		0x0500
+#define WL_CHANSPEC_CTL_SB_UUL		0x0600
+#define WL_CHANSPEC_CTL_SB_UUU		0x0700
+#define WL_CHANSPEC_CTL_SB_LL		WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_LU		WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_UL		WL_CHANSPEC_CTL_SB_LUL
+#define WL_CHANSPEC_CTL_SB_UU		WL_CHANSPEC_CTL_SB_LUU
+#define WL_CHANSPEC_CTL_SB_L		WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_U		WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_LOWER	WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_UPPER	WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_NONE		WL_CHANSPEC_CTL_SB_LLL
+
+#define WL_CHANSPEC_BW_MASK		0x3800
+#define WL_CHANSPEC_BW_SHIFT		11
+#define WL_CHANSPEC_BW_5		0x0000
+#define WL_CHANSPEC_BW_10		0x0800
+#define WL_CHANSPEC_BW_20		0x1000
+#define WL_CHANSPEC_BW_40		0x1800
+#define WL_CHANSPEC_BW_80		0x2000
+#define WL_CHANSPEC_BW_160		0x2800
+#define WL_CHANSPEC_BW_8080		0x3000
+#define WL_CHANSPEC_BW_2P5		0x3800
+
+#define WL_CHANSPEC_BAND_MASK		0xc000
+#define WL_CHANSPEC_BAND_SHIFT		14
+#define WL_CHANSPEC_BAND_2G		0x0000
+#define WL_CHANSPEC_BAND_3G		0x4000
+#define WL_CHANSPEC_BAND_4G		0x8000
+#define WL_CHANSPEC_BAND_5G		0xc000
+#define INVCHANSPEC			255
+#define MAX_CHANSPEC				0xFFFF
+
+#define WL_CHANNEL_BAND(ch) (((ch) <= CH_MAX_2G_CHANNEL) ? \
+	WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)
+
+/* channel defines */
+#define LOWER_20_SB(channel)		(((channel) > CH_10MHZ_APART) ? \
+					((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel)		(((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+					((channel) + CH_10MHZ_APART) : 0)
+
+#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0)
+#define UU_20_SB(channel) 	(((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \
+				((channel) + 3 * CH_10MHZ_APART) : 0)
+#define LU_20_SB(channel) LOWER_20_SB(channel)
+#define UL_20_SB(channel) UPPER_20_SB(channel)
+
+#define LOWER_40_SB(channel)		((channel) - CH_20MHZ_APART)
+#define UPPER_40_SB(channel)		((channel) + CH_20MHZ_APART)
+#define CHSPEC_WLCBANDUNIT(chspec)	(CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel)		(chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+					(((channel) <= CH_MAX_2G_CHANNEL) ? \
+					WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define CH2P5MHZ_CHSPEC(channel)	(chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_2P5 | \
+						(((channel) <= CH_MAX_2G_CHANNEL) ? \
+						WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define CH5MHZ_CHSPEC(channel)		(chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_5 | \
+						(((channel) <= CH_MAX_2G_CHANNEL) ? \
+						WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define CH10MHZ_CHSPEC(channel)		(chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_10 | \
+						(((channel) <= CH_MAX_2G_CHANNEL) ? \
+						WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel)	(((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+					((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+					((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+					WL_CHANSPEC_BAND_5G))
+#define CH80MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | \
+					 WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G)
+#define CH160MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | \
+					 WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
+#define CHBW_CHSPEC(bw, channel)	(chanspec_t)((chanspec_t)(channel) | (bw) | \
+							(((channel) <= CH_MAX_2G_CHANNEL) ? \
+							WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+
+/* simple MACROs to get different fields of chanspec */
+#ifdef WL11AC_80P80
+#define CHSPEC_CHANNEL(chspec)	wf_chspec_channel(chspec)
+#else
+#define CHSPEC_CHANNEL(chspec)	((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#endif
+#define CHSPEC_CHAN1(chspec)	((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT
+#define CHSPEC_CHAN2(chspec)	((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT
+#define CHSPEC_BAND(chspec)		((chspec) & WL_CHANSPEC_BAND_MASK)
+#define CHSPEC_CTL_SB(chspec)	((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec)		((chspec) & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+#ifdef WL11ULB
+#define CHSPEC_IS2P5(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_2P5)
+#define CHSPEC_IS5(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5)
+#define CHSPEC_IS10(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#else
+#define CHSPEC_IS2P5(chspec)	0
+#define CHSPEC_IS5(chspec)	0
+#define CHSPEC_IS10(chspec)	0
+#endif
+#define CHSPEC_IS20(chspec)	1
+#define CHSPEC_IS20_2G(chspec)	((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \
+								CHSPEC_IS2G(chspec))
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	0
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec)	0
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec)	0
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec)	0
+#endif
+#define BW_LE20(bw)		TRUE
+#define CHSPEC_ISLE20(chspec)	TRUE
+#else /* !WL11N_20MHZONLY */
+
+#define CHSPEC_IS2P5(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_2P5)
+#define CHSPEC_IS5(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5)
+#define CHSPEC_IS10(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#define CHSPEC_IS20_5G(chspec)	((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \
+								CHSPEC_IS5G(chspec))
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080)
+#endif
+
+#ifdef WL11ULB
+#define BW_LT20(bw)		(((bw) == WL_CHANSPEC_BW_2P5) || \
+				((bw) == WL_CHANSPEC_BW_5) || \
+				((bw) == WL_CHANSPEC_BW_10))
+#define CHSPEC_BW_LT20(chspec)	(BW_LT20(CHSPEC_BW(chspec)))
+/* This MACRO is strictly to avoid abandons in existing code with ULB feature and is in no way
+ * optimial to use. Should be replaced with CHSPEC_BW_LE() instead
+ */
+#define BW_LE20(bw)		(((bw) == WL_CHANSPEC_BW_2P5) || \
+				((bw) == WL_CHANSPEC_BW_5) || \
+				((bw) == WL_CHANSPEC_BW_10) || \
+				((bw) == WL_CHANSPEC_BW_20))
+#define CHSPEC_ISLE20(chspec)	(BW_LE20(CHSPEC_BW(chspec)))
+
+#else /* WL11ULB */
+#define BW_LE20(bw)		((bw) == WL_CHANSPEC_BW_20)
+#define CHSPEC_ISLE20(chspec)	(CHSPEC_IS20(chspec))
+#endif /* WL11ULB */
+#endif /* !WL11N_20MHZONLY */
+
+#define BW_LE40(bw)		(BW_LE20(bw) || ((bw) == WL_CHANSPEC_BW_40))
+#define BW_LE80(bw)		(BW_LE40(bw) || ((bw) == WL_CHANSPEC_BW_80))
+#define BW_LE160(bw)		(BW_LE80(bw) || ((bw) == WL_CHANSPEC_BW_160))
+#define CHSPEC_BW_LE20(chspec)	(BW_LE20(CHSPEC_BW(chspec)))
+#define CHSPEC_IS5G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_UPPER(chspec)	\
+	((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \
+	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC_SB_LOWER(chspec)	\
+	((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \
+	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+/**
+ * Number of chars needed for wf_chspec_ntoa() destination character buffer.
+ */
+#define CHANSPEC_STR_LEN    20
+
+
+#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\
+	CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080)
+
+/* BW inequality comparisons, LE (<=), GE (>=), LT (<), GT (>), comparisons can be made
+* as simple numeric comparisons, with the exception that 160 is the same BW as 80+80,
+* but have different numeric values; (WL_CHANSPEC_BW_160 < WL_CHANSPEC_BW_8080).
+*
+* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide.
+* If both chspec bandwidth and bw is not 160 wide, then the comparison is made.
+*/
+#ifdef WL11ULB
+#define CHSPEC_BW_GE(chspec, bw) \
+	(((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+	(CHSPEC_BW(chspec) >= (bw))) && \
+	(!(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5)))
+#else /* WL11ULB */
+#define CHSPEC_BW_GE(chspec, bw) \
+		((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+		((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+		(CHSPEC_BW(chspec) >= (bw)))
+#endif /* WL11ULB */
+
+#ifdef WL11ULB
+#define CHSPEC_BW_LE(chspec, bw) \
+	(((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+	(CHSPEC_BW(chspec) <= (bw))) || \
+	(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5))
+#else /* WL11ULB */
+#define CHSPEC_BW_LE(chspec, bw) \
+		((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+		((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+		(CHSPEC_BW(chspec) <= (bw)))
+#endif /* WL11ULB */
+
+#ifdef WL11ULB
+#define CHSPEC_BW_GT(chspec, bw) \
+	((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+	(CHSPEC_BW(chspec) > (bw))) && \
+	(CHSPEC_BW(chspec) != WL_CHANSPEC_BW_2P5))
+#else /* WL11ULB */
+#define CHSPEC_BW_GT(chspec, bw) \
+		(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+		((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+		(CHSPEC_BW(chspec) > (bw)))
+#endif /* WL11ULB */
+
+#ifdef WL11ULB
+#define CHSPEC_BW_LT(chspec, bw) \
+	((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+	(CHSPEC_BW(chspec) < (bw))) || \
+	((CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5)))
+#else /* WL11ULB */
+#define CHSPEC_BW_LT(chspec, bw) \
+		(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+		((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+		(CHSPEC_BW(chspec) < (bw)))
+#endif /* WL11ULB */
+
+/* Legacy Chanspec defines
+ * These are the defines for the previous format of the chanspec_t
+ */
+#define WL_LCHANSPEC_CHAN_MASK		0x00ff
+#define WL_LCHANSPEC_CHAN_SHIFT		     0
+
+#define WL_LCHANSPEC_CTL_SB_MASK	0x0300
+#define WL_LCHANSPEC_CTL_SB_SHIFT	     8
+#define WL_LCHANSPEC_CTL_SB_LOWER	0x0100
+#define WL_LCHANSPEC_CTL_SB_UPPER	0x0200
+#define WL_LCHANSPEC_CTL_SB_NONE	0x0300
+
+#define WL_LCHANSPEC_BW_MASK		0x0C00
+#define WL_LCHANSPEC_BW_SHIFT		    10
+#define WL_LCHANSPEC_BW_10		0x0400
+#define WL_LCHANSPEC_BW_20		0x0800
+#define WL_LCHANSPEC_BW_40		0x0C00
+
+#define WL_LCHANSPEC_BAND_MASK		0xf000
+#define WL_LCHANSPEC_BAND_SHIFT		    12
+#define WL_LCHANSPEC_BAND_5G		0x1000
+#define WL_LCHANSPEC_BAND_2G		0x2000
+
+#define LCHSPEC_CHANNEL(chspec)	((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK))
+#define LCHSPEC_BAND(chspec)	((chspec) & WL_LCHANSPEC_BAND_MASK)
+#define LCHSPEC_CTL_SB(chspec)	((chspec) & WL_LCHANSPEC_CTL_SB_MASK)
+#define LCHSPEC_BW(chspec)	((chspec) & WL_LCHANSPEC_BW_MASK)
+#define LCHSPEC_IS10(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10)
+#define LCHSPEC_IS20(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20)
+#define LCHSPEC_IS40(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)
+#define LCHSPEC_IS5G(chspec)	(((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G)
+#define LCHSPEC_IS2G(chspec)	(((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G)
+
+#define LCHSPEC_SB_UPPER(chspec)	\
+	((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \
+	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+#define LCHSPEC_SB_LOWER(chspec)	\
+	((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \
+	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+
+#define LCHSPEC_CREATE(chan, band, bw, sb)  ((uint16)((chan) | (sb) | (bw) | (band)))
+
+#define CH20MHZ_LCHSPEC(channel) \
+	(chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \
+	WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+	WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G))
+
+/*
+ * WF_CHAN_FACTOR_* constants are used to calculate channel frequency
+ * given a channel number.
+ * chan_freq = chan_factor * 500Mhz + chan_number * 5
+ */
+
+/**
+ * Channel Factor for the starting frequence of 2.4 GHz channels.
+ * The value corresponds to 2407 MHz.
+ */
+#define WF_CHAN_FACTOR_2_4_G		4814	/* 2.4 GHz band, 2407 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 5 GHz channels.
+ * The value corresponds to 5000 MHz.
+ */
+#define WF_CHAN_FACTOR_5_G		10000	/* 5   GHz band, 5000 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 4.9 GHz channels.
+ * The value corresponds to 4000 MHz.
+ */
+#define WF_CHAN_FACTOR_4_G		8000	/* 4.9 GHz band for Japan */
+
+#define WLC_2G_25MHZ_OFFSET		5	/* 2.4GHz band channel offset */
+
+/**
+ *  No of sub-band vlaue of the specified Mhz chanspec
+ */
+#define WF_NUM_SIDEBANDS_40MHZ   2
+#define WF_NUM_SIDEBANDS_80MHZ   4
+#define WF_NUM_SIDEBANDS_8080MHZ 4
+#define WF_NUM_SIDEBANDS_160MHZ  8
+
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param	chspec		chanspec format
+ * @param	buf		ascii string of chanspec
+ *
+ * @return	pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ *		Original chanspec in case of error
+ *
+ * @see		CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf);
+
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param	chspec		chanspec format
+ * @param	buf		ascii string of chanspec
+ *
+ * @return	pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ *		NULL in case of error
+ *
+ * @see		CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+/**
+ * Convert ascii string to chanspec
+ *
+ * @param	a     pointer to input string
+ *
+ * @return	>= 0 if successful or 0 otherwise
+ */
+extern chanspec_t wf_chspec_aton(const char *a);
+
+/**
+ * Verify the chanspec fields are valid.
+ *
+ * Verify the chanspec is using a legal set field values, i.e. that the chanspec
+ * specified a band, bw, ctl_sb and channel and that the combination could be
+ * legal given some set of circumstances.
+ *
+ * @param	chanspec   input chanspec to verify
+ *
+ * @return TRUE if the chanspec is malformed, FALSE if it looks good.
+ */
+extern bool wf_chspec_malformed(chanspec_t chanspec);
+
+/**
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ *
+ * @param	chanspec   input chanspec to verify
+ *
+ * @return TRUE if the chanspec is a valid 802.11 channel
+ */
+extern bool wf_chspec_valid(chanspec_t chanspec);
+
+/**
+ * Return the primary (control) channel.
+ *
+ * This function returns the channel number of the primary 20MHz channel. For
+ * 20MHz channels this is just the channel number. For 40MHz or wider channels
+ * it is the primary 20MHz channel specified by the chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the channel number of the primary 20MHz channel
+ */
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+
+/*
+ * Return the bandwidth string.
+ *
+ * This function returns the bandwidth string for the passed chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the bandwidth string
+ */
+extern const char *wf_chspec_to_bw_str(chanspec_t chspec);
+
+/**
+ * Return the primary (control) chanspec.
+ *
+ * This function returns the chanspec of the primary 20MHz channel. For 20MHz
+ * channels this is just the chanspec. For 40MHz or wider channels it is the
+ * chanspec of the primary 20MHZ channel specified by the chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the chanspec of the primary 20MHz channel
+ */
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
+
+/**
+ * Return a channel number corresponding to a frequency.
+ *
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param	freq          frequency in MHz
+ * @param	start_factor  base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a channel number
+ *
+ * @see  WF_CHAN_FACTOR_2_4_G
+ * @see  WF_CHAN_FACTOR_5_G
+ */
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+/**
+ * Return the center frequency in MHz of the given channel and base frequency.
+ *
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param	channel       input channel number
+ * @param	start_factor  base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a frequency in MHz
+ *
+ * @see  WF_CHAN_FACTOR_2_4_G
+ * @see  WF_CHAN_FACTOR_5_G
+ */
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+/**
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ *	primary_channel - primary 20Mhz channel
+ *	center_channel   - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel);
+
+/**
+ * Convert ctl chan and bw to chanspec
+ *
+ * @param	ctl_ch		channel
+ * @param	bw	        bandwidth
+ *
+ * @return	> 0 if successful or 0 otherwise
+ *
+ */
+extern uint16 wf_channel2chspec(uint ctl_ch, uint bw);
+
+extern uint wf_channel2freq(uint channel);
+extern uint wf_freq2channel(uint freq);
+
+/*
+ * Returns the 80+80 MHz chanspec corresponding to the following input parameters
+ *
+ *    primary_20mhz - Primary 20 MHz channel
+ *    chan0_80MHz - center channel number of one frequency segment
+ *    chan1_80MHz - center channel number of the other frequency segment
+ *
+ * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
+ */
+extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz,
+	uint8 chan0_80Mhz, uint8 chan1_80Mhz);
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec);
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec);
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ */
+extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec);
+
+#ifdef WL11AC_80P80
+/*
+ * This function returns the centre chanel for the given chanspec.
+ * In case of 80+80 chanspec it returns the primary 80 Mhz centre channel
+ */
+extern uint8 wf_chspec_channel(chanspec_t chspec);
+#endif
+extern chanspec_t wf_channel_create_chspec_frm_opclass(uint8 opclass, uint8 channel);
+extern int wf_channel_create_opclass_frm_chspec(chanspec_t chspec);
+#endif	/* _bcmwifi_channels_h_ */
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_rates.h b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
new file mode 100644
index 0000000..542055d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
@@ -0,0 +1,793 @@
+/*
+ * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmwifi_rates.h 612483 2016-01-14 03:44:27Z $
+ */
+
+#ifndef _bcmwifi_rates_h_
+#define _bcmwifi_rates_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+#define WL_RATESET_SZ_DSSS		4
+#define WL_RATESET_SZ_OFDM		8
+#define WL_RATESET_SZ_VHT_MCS	10
+#define WL_RATESET_SZ_VHT_MCS_P	12
+
+#if defined(WLPROPRIETARY_11N_RATES)
+#define WL_RATESET_SZ_HT_MCS	WL_RATESET_SZ_VHT_MCS
+#else
+#define WL_RATESET_SZ_HT_MCS	8
+#endif
+
+#define WL_RATESET_SZ_HT_IOCTL	8	/* MAC histogram, compatibility with wl utility */
+
+#define WL_TX_CHAINS_MAX		4
+
+#define WL_RATE_DISABLED		(-128) /* Power value corresponding to unsupported rate */
+
+/* Transmit channel bandwidths */
+typedef enum wl_tx_bw {
+	WL_TX_BW_20,
+	WL_TX_BW_40,
+	WL_TX_BW_80,
+	WL_TX_BW_20IN40,
+	WL_TX_BW_20IN80,
+	WL_TX_BW_40IN80,
+	WL_TX_BW_160,
+	WL_TX_BW_20IN160,
+	WL_TX_BW_40IN160,
+	WL_TX_BW_80IN160,
+	WL_TX_BW_ALL,
+	WL_TX_BW_8080,
+	WL_TX_BW_8080CHAN2,
+	WL_TX_BW_20IN8080,
+	WL_TX_BW_40IN8080,
+	WL_TX_BW_80IN8080,
+	WL_TX_BW_2P5,
+	WL_TX_BW_5,
+	WL_TX_BW_10
+} wl_tx_bw_t;
+
+
+/*
+ * Transmit modes.
+ * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed
+ */
+typedef enum wl_tx_mode {
+	WL_TX_MODE_NONE,
+	WL_TX_MODE_STBC,
+	WL_TX_MODE_CDD,
+	WL_TX_MODE_TXBF,
+	WL_NUM_TX_MODES
+} wl_tx_mode_t;
+
+
+/* Number of transmit chains */
+typedef enum wl_tx_chains {
+	WL_TX_CHAINS_1 = 1,
+	WL_TX_CHAINS_2,
+	WL_TX_CHAINS_3,
+	WL_TX_CHAINS_4
+} wl_tx_chains_t;
+
+
+/* Number of transmit streams */
+typedef enum wl_tx_nss {
+	WL_TX_NSS_1 = 1,
+	WL_TX_NSS_2,
+	WL_TX_NSS_3,
+	WL_TX_NSS_4
+} wl_tx_nss_t;
+
+
+/* This enum maps each rate to a CLM index */
+
+typedef enum clm_rates {
+	/************
+	* 1 chain  *
+	************
+	*/
+
+	/* 1 Stream */
+	WL_RATE_1X1_DSSS_1            = 0,
+	WL_RATE_1X1_DSSS_2            = 1,
+	WL_RATE_1X1_DSSS_5_5          = 2,
+	WL_RATE_1X1_DSSS_11           = 3,
+
+	WL_RATE_1X1_OFDM_6            = 4,
+	WL_RATE_1X1_OFDM_9            = 5,
+	WL_RATE_1X1_OFDM_12           = 6,
+	WL_RATE_1X1_OFDM_18           = 7,
+	WL_RATE_1X1_OFDM_24           = 8,
+	WL_RATE_1X1_OFDM_36           = 9,
+	WL_RATE_1X1_OFDM_48           = 10,
+	WL_RATE_1X1_OFDM_54           = 11,
+
+	WL_RATE_1X1_MCS0              = 12,
+	WL_RATE_1X1_MCS1              = 13,
+	WL_RATE_1X1_MCS2              = 14,
+	WL_RATE_1X1_MCS3              = 15,
+	WL_RATE_1X1_MCS4              = 16,
+	WL_RATE_1X1_MCS5              = 17,
+	WL_RATE_1X1_MCS6              = 18,
+	WL_RATE_1X1_MCS7              = 19,
+	WL_RATE_P_1X1_MCS87           = 20,
+	WL_RATE_P_1X1_MCS88           = 21,
+
+	WL_RATE_1X1_VHT0SS1           = 12,
+	WL_RATE_1X1_VHT1SS1           = 13,
+	WL_RATE_1X1_VHT2SS1           = 14,
+	WL_RATE_1X1_VHT3SS1           = 15,
+	WL_RATE_1X1_VHT4SS1           = 16,
+	WL_RATE_1X1_VHT5SS1           = 17,
+	WL_RATE_1X1_VHT6SS1           = 18,
+	WL_RATE_1X1_VHT7SS1           = 19,
+	WL_RATE_1X1_VHT8SS1           = 20,
+	WL_RATE_1X1_VHT9SS1           = 21,
+	WL_RATE_P_1X1_VHT10SS1        = 22,
+	WL_RATE_P_1X1_VHT11SS1        = 23,
+
+
+	/************
+	* 2 chains *
+	************
+	*/
+
+	/* 1 Stream expanded + 1 */
+	WL_RATE_1X2_DSSS_1            = 24,
+	WL_RATE_1X2_DSSS_2            = 25,
+	WL_RATE_1X2_DSSS_5_5          = 26,
+	WL_RATE_1X2_DSSS_11           = 27,
+
+	WL_RATE_1X2_CDD_OFDM_6        = 28,
+	WL_RATE_1X2_CDD_OFDM_9        = 29,
+	WL_RATE_1X2_CDD_OFDM_12       = 30,
+	WL_RATE_1X2_CDD_OFDM_18       = 31,
+	WL_RATE_1X2_CDD_OFDM_24       = 32,
+	WL_RATE_1X2_CDD_OFDM_36       = 33,
+	WL_RATE_1X2_CDD_OFDM_48       = 34,
+	WL_RATE_1X2_CDD_OFDM_54       = 35,
+
+	WL_RATE_1X2_CDD_MCS0          = 36,
+	WL_RATE_1X2_CDD_MCS1          = 37,
+	WL_RATE_1X2_CDD_MCS2          = 38,
+	WL_RATE_1X2_CDD_MCS3          = 39,
+	WL_RATE_1X2_CDD_MCS4          = 40,
+	WL_RATE_1X2_CDD_MCS5          = 41,
+	WL_RATE_1X2_CDD_MCS6          = 42,
+	WL_RATE_1X2_CDD_MCS7          = 43,
+	WL_RATE_P_1X2_CDD_MCS87       = 44,
+	WL_RATE_P_1X2_CDD_MCS88       = 45,
+
+	WL_RATE_1X2_VHT0SS1           = 36,
+	WL_RATE_1X2_VHT1SS1           = 37,
+	WL_RATE_1X2_VHT2SS1           = 38,
+	WL_RATE_1X2_VHT3SS1           = 39,
+	WL_RATE_1X2_VHT4SS1           = 40,
+	WL_RATE_1X2_VHT5SS1           = 41,
+	WL_RATE_1X2_VHT6SS1           = 42,
+	WL_RATE_1X2_VHT7SS1           = 43,
+	WL_RATE_1X2_VHT8SS1           = 44,
+	WL_RATE_1X2_VHT9SS1           = 45,
+	WL_RATE_P_1X2_VHT10SS1        = 46,
+	WL_RATE_P_1X2_VHT11SS1        = 47,
+
+	/* 2 Streams */
+	WL_RATE_2X2_STBC_MCS0         = 48,
+	WL_RATE_2X2_STBC_MCS1         = 49,
+	WL_RATE_2X2_STBC_MCS2         = 50,
+	WL_RATE_2X2_STBC_MCS3         = 51,
+	WL_RATE_2X2_STBC_MCS4         = 52,
+	WL_RATE_2X2_STBC_MCS5         = 53,
+	WL_RATE_2X2_STBC_MCS6         = 54,
+	WL_RATE_2X2_STBC_MCS7         = 55,
+	WL_RATE_P_2X2_STBC_MCS87      = 56,
+	WL_RATE_P_2X2_STBC_MCS88      = 57,
+
+	WL_RATE_2X2_STBC_VHT0SS1      = 48,
+	WL_RATE_2X2_STBC_VHT1SS1      = 49,
+	WL_RATE_2X2_STBC_VHT2SS1      = 50,
+	WL_RATE_2X2_STBC_VHT3SS1      = 51,
+	WL_RATE_2X2_STBC_VHT4SS1      = 52,
+	WL_RATE_2X2_STBC_VHT5SS1      = 53,
+	WL_RATE_2X2_STBC_VHT6SS1      = 54,
+	WL_RATE_2X2_STBC_VHT7SS1      = 55,
+	WL_RATE_2X2_STBC_VHT8SS1      = 56,
+	WL_RATE_2X2_STBC_VHT9SS1      = 57,
+	WL_RATE_P_2X2_STBC_VHT10SS1   = 58,
+	WL_RATE_P_2X2_STBC_VHT11SS1   = 59,
+
+	WL_RATE_2X2_SDM_MCS8          = 60,
+	WL_RATE_2X2_SDM_MCS9          = 61,
+	WL_RATE_2X2_SDM_MCS10         = 62,
+	WL_RATE_2X2_SDM_MCS11         = 63,
+	WL_RATE_2X2_SDM_MCS12         = 64,
+	WL_RATE_2X2_SDM_MCS13         = 65,
+	WL_RATE_2X2_SDM_MCS14         = 66,
+	WL_RATE_2X2_SDM_MCS15         = 67,
+	WL_RATE_P_2X2_SDM_MCS99       = 68,
+	WL_RATE_P_2X2_SDM_MCS100      = 69,
+
+	WL_RATE_2X2_VHT0SS2           = 60,
+	WL_RATE_2X2_VHT1SS2           = 61,
+	WL_RATE_2X2_VHT2SS2           = 62,
+	WL_RATE_2X2_VHT3SS2           = 63,
+	WL_RATE_2X2_VHT4SS2           = 64,
+	WL_RATE_2X2_VHT5SS2           = 65,
+	WL_RATE_2X2_VHT6SS2           = 66,
+	WL_RATE_2X2_VHT7SS2           = 67,
+	WL_RATE_2X2_VHT8SS2           = 68,
+	WL_RATE_2X2_VHT9SS2           = 69,
+	WL_RATE_P_2X2_VHT10SS2        = 70,
+	WL_RATE_P_2X2_VHT11SS2        = 71,
+
+	/****************************
+	 * TX Beamforming, 2 chains *
+	 ****************************
+	 */
+
+	/* 1 Stream expanded + 1 */
+	WL_RATE_1X2_TXBF_OFDM_6       = 72,
+	WL_RATE_1X2_TXBF_OFDM_9       = 73,
+	WL_RATE_1X2_TXBF_OFDM_12      = 74,
+	WL_RATE_1X2_TXBF_OFDM_18      = 75,
+	WL_RATE_1X2_TXBF_OFDM_24      = 76,
+	WL_RATE_1X2_TXBF_OFDM_36      = 77,
+	WL_RATE_1X2_TXBF_OFDM_48      = 78,
+	WL_RATE_1X2_TXBF_OFDM_54      = 79,
+
+	WL_RATE_1X2_TXBF_MCS0         = 80,
+	WL_RATE_1X2_TXBF_MCS1         = 81,
+	WL_RATE_1X2_TXBF_MCS2         = 82,
+	WL_RATE_1X2_TXBF_MCS3         = 83,
+	WL_RATE_1X2_TXBF_MCS4         = 84,
+	WL_RATE_1X2_TXBF_MCS5         = 85,
+	WL_RATE_1X2_TXBF_MCS6         = 86,
+	WL_RATE_1X2_TXBF_MCS7         = 87,
+	WL_RATE_P_1X2_TXBF_MCS87      = 88,
+	WL_RATE_P_1X2_TXBF_MCS88      = 89,
+
+	WL_RATE_1X2_TXBF_VHT0SS1      = 80,
+	WL_RATE_1X2_TXBF_VHT1SS1      = 81,
+	WL_RATE_1X2_TXBF_VHT2SS1      = 82,
+	WL_RATE_1X2_TXBF_VHT3SS1      = 83,
+	WL_RATE_1X2_TXBF_VHT4SS1      = 84,
+	WL_RATE_1X2_TXBF_VHT5SS1      = 85,
+	WL_RATE_1X2_TXBF_VHT6SS1      = 86,
+	WL_RATE_1X2_TXBF_VHT7SS1      = 87,
+	WL_RATE_1X2_TXBF_VHT8SS1      = 88,
+	WL_RATE_1X2_TXBF_VHT9SS1      = 89,
+	WL_RATE_P_1X2_TXBF_VHT10SS1   = 90,
+	WL_RATE_P_1X2_TXBF_VHT11SS1   = 91,
+
+	/* 2 Streams */
+	WL_RATE_2X2_TXBF_SDM_MCS8     = 92,
+	WL_RATE_2X2_TXBF_SDM_MCS9     = 93,
+	WL_RATE_2X2_TXBF_SDM_MCS10    = 94,
+	WL_RATE_2X2_TXBF_SDM_MCS11    = 95,
+	WL_RATE_2X2_TXBF_SDM_MCS12    = 96,
+	WL_RATE_2X2_TXBF_SDM_MCS13    = 97,
+	WL_RATE_2X2_TXBF_SDM_MCS14    = 98,
+	WL_RATE_2X2_TXBF_SDM_MCS15    = 99,
+	WL_RATE_P_2X2_TXBF_SDM_MCS99  = 100,
+	WL_RATE_P_2X2_TXBF_SDM_MCS100 = 101,
+
+	WL_RATE_2X2_TXBF_VHT0SS2      = 92,
+	WL_RATE_2X2_TXBF_VHT1SS2      = 93,
+	WL_RATE_2X2_TXBF_VHT2SS2      = 94,
+	WL_RATE_2X2_TXBF_VHT3SS2      = 95,
+	WL_RATE_2X2_TXBF_VHT4SS2      = 96,
+	WL_RATE_2X2_TXBF_VHT5SS2      = 97,
+	WL_RATE_2X2_TXBF_VHT6SS2      = 98,
+	WL_RATE_2X2_TXBF_VHT7SS2      = 99,
+	WL_RATE_2X2_TXBF_VHT8SS2      = 100,
+	WL_RATE_2X2_TXBF_VHT9SS2      = 101,
+	WL_RATE_P_2X2_TXBF_VHT10SS2   = 102,
+	WL_RATE_P_2X2_TXBF_VHT11SS2   = 103,
+
+
+	/************
+	* 3 chains *
+	************
+	*/
+
+	/* 1 Stream expanded + 2 */
+	WL_RATE_1X3_DSSS_1            = 104,
+	WL_RATE_1X3_DSSS_2            = 105,
+	WL_RATE_1X3_DSSS_5_5          = 106,
+	WL_RATE_1X3_DSSS_11           = 107,
+
+	WL_RATE_1X3_CDD_OFDM_6        = 108,
+	WL_RATE_1X3_CDD_OFDM_9        = 109,
+	WL_RATE_1X3_CDD_OFDM_12       = 110,
+	WL_RATE_1X3_CDD_OFDM_18       = 111,
+	WL_RATE_1X3_CDD_OFDM_24       = 112,
+	WL_RATE_1X3_CDD_OFDM_36       = 113,
+	WL_RATE_1X3_CDD_OFDM_48       = 114,
+	WL_RATE_1X3_CDD_OFDM_54       = 115,
+
+	WL_RATE_1X3_CDD_MCS0          = 116,
+	WL_RATE_1X3_CDD_MCS1          = 117,
+	WL_RATE_1X3_CDD_MCS2          = 118,
+	WL_RATE_1X3_CDD_MCS3          = 119,
+	WL_RATE_1X3_CDD_MCS4          = 120,
+	WL_RATE_1X3_CDD_MCS5          = 121,
+	WL_RATE_1X3_CDD_MCS6          = 122,
+	WL_RATE_1X3_CDD_MCS7          = 123,
+	WL_RATE_P_1X3_CDD_MCS87       = 124,
+	WL_RATE_P_1X3_CDD_MCS88       = 125,
+
+	WL_RATE_1X3_VHT0SS1           = 116,
+	WL_RATE_1X3_VHT1SS1           = 117,
+	WL_RATE_1X3_VHT2SS1           = 118,
+	WL_RATE_1X3_VHT3SS1           = 119,
+	WL_RATE_1X3_VHT4SS1           = 120,
+	WL_RATE_1X3_VHT5SS1           = 121,
+	WL_RATE_1X3_VHT6SS1           = 122,
+	WL_RATE_1X3_VHT7SS1           = 123,
+	WL_RATE_1X3_VHT8SS1           = 124,
+	WL_RATE_1X3_VHT9SS1           = 125,
+	WL_RATE_P_1X3_VHT10SS1        = 126,
+	WL_RATE_P_1X3_VHT11SS1        = 127,
+
+	/* 2 Streams expanded + 1 */
+	WL_RATE_2X3_STBC_MCS0         = 128,
+	WL_RATE_2X3_STBC_MCS1         = 129,
+	WL_RATE_2X3_STBC_MCS2         = 130,
+	WL_RATE_2X3_STBC_MCS3         = 131,
+	WL_RATE_2X3_STBC_MCS4         = 132,
+	WL_RATE_2X3_STBC_MCS5         = 133,
+	WL_RATE_2X3_STBC_MCS6         = 134,
+	WL_RATE_2X3_STBC_MCS7         = 135,
+	WL_RATE_P_2X3_STBC_MCS87      = 136,
+	WL_RATE_P_2X3_STBC_MCS88      = 137,
+
+	WL_RATE_2X3_STBC_VHT0SS1      = 128,
+	WL_RATE_2X3_STBC_VHT1SS1      = 129,
+	WL_RATE_2X3_STBC_VHT2SS1      = 130,
+	WL_RATE_2X3_STBC_VHT3SS1      = 131,
+	WL_RATE_2X3_STBC_VHT4SS1      = 132,
+	WL_RATE_2X3_STBC_VHT5SS1      = 133,
+	WL_RATE_2X3_STBC_VHT6SS1      = 134,
+	WL_RATE_2X3_STBC_VHT7SS1      = 135,
+	WL_RATE_2X3_STBC_VHT8SS1      = 136,
+	WL_RATE_2X3_STBC_VHT9SS1      = 137,
+	WL_RATE_P_2X3_STBC_VHT10SS1   = 138,
+	WL_RATE_P_2X3_STBC_VHT11SS1   = 139,
+
+	WL_RATE_2X3_SDM_MCS8          = 140,
+	WL_RATE_2X3_SDM_MCS9          = 141,
+	WL_RATE_2X3_SDM_MCS10         = 142,
+	WL_RATE_2X3_SDM_MCS11         = 143,
+	WL_RATE_2X3_SDM_MCS12         = 144,
+	WL_RATE_2X3_SDM_MCS13         = 145,
+	WL_RATE_2X3_SDM_MCS14         = 146,
+	WL_RATE_2X3_SDM_MCS15         = 147,
+	WL_RATE_P_2X3_SDM_MCS99       = 148,
+	WL_RATE_P_2X3_SDM_MCS100      = 149,
+
+	WL_RATE_2X3_VHT0SS2           = 140,
+	WL_RATE_2X3_VHT1SS2           = 141,
+	WL_RATE_2X3_VHT2SS2           = 142,
+	WL_RATE_2X3_VHT3SS2           = 143,
+	WL_RATE_2X3_VHT4SS2           = 144,
+	WL_RATE_2X3_VHT5SS2           = 145,
+	WL_RATE_2X3_VHT6SS2           = 146,
+	WL_RATE_2X3_VHT7SS2           = 147,
+	WL_RATE_2X3_VHT8SS2           = 148,
+	WL_RATE_2X3_VHT9SS2           = 149,
+	WL_RATE_P_2X3_VHT10SS2        = 150,
+	WL_RATE_P_2X3_VHT11SS2        = 151,
+
+	/* 3 Streams */
+	WL_RATE_3X3_SDM_MCS16         = 152,
+	WL_RATE_3X3_SDM_MCS17         = 153,
+	WL_RATE_3X3_SDM_MCS18         = 154,
+	WL_RATE_3X3_SDM_MCS19         = 155,
+	WL_RATE_3X3_SDM_MCS20         = 156,
+	WL_RATE_3X3_SDM_MCS21         = 157,
+	WL_RATE_3X3_SDM_MCS22         = 158,
+	WL_RATE_3X3_SDM_MCS23         = 159,
+	WL_RATE_P_3X3_SDM_MCS101      = 160,
+	WL_RATE_P_3X3_SDM_MCS102      = 161,
+
+	WL_RATE_3X3_VHT0SS3           = 152,
+	WL_RATE_3X3_VHT1SS3           = 153,
+	WL_RATE_3X3_VHT2SS3           = 154,
+	WL_RATE_3X3_VHT3SS3           = 155,
+	WL_RATE_3X3_VHT4SS3           = 156,
+	WL_RATE_3X3_VHT5SS3           = 157,
+	WL_RATE_3X3_VHT6SS3           = 158,
+	WL_RATE_3X3_VHT7SS3           = 159,
+	WL_RATE_3X3_VHT8SS3           = 160,
+	WL_RATE_3X3_VHT9SS3           = 161,
+	WL_RATE_P_3X3_VHT10SS3        = 162,
+	WL_RATE_P_3X3_VHT11SS3        = 163,
+
+
+	/****************************
+	 * TX Beamforming, 3 chains *
+	 ****************************
+	 */
+
+	/* 1 Stream expanded + 2 */
+	WL_RATE_1X3_TXBF_OFDM_6       = 164,
+	WL_RATE_1X3_TXBF_OFDM_9       = 165,
+	WL_RATE_1X3_TXBF_OFDM_12      = 166,
+	WL_RATE_1X3_TXBF_OFDM_18      = 167,
+	WL_RATE_1X3_TXBF_OFDM_24      = 168,
+	WL_RATE_1X3_TXBF_OFDM_36      = 169,
+	WL_RATE_1X3_TXBF_OFDM_48      = 170,
+	WL_RATE_1X3_TXBF_OFDM_54      = 171,
+
+	WL_RATE_1X3_TXBF_MCS0         = 172,
+	WL_RATE_1X3_TXBF_MCS1         = 173,
+	WL_RATE_1X3_TXBF_MCS2         = 174,
+	WL_RATE_1X3_TXBF_MCS3         = 175,
+	WL_RATE_1X3_TXBF_MCS4         = 176,
+	WL_RATE_1X3_TXBF_MCS5         = 177,
+	WL_RATE_1X3_TXBF_MCS6         = 178,
+	WL_RATE_1X3_TXBF_MCS7         = 179,
+	WL_RATE_P_1X3_TXBF_MCS87      = 180,
+	WL_RATE_P_1X3_TXBF_MCS88      = 181,
+
+	WL_RATE_1X3_TXBF_VHT0SS1      = 172,
+	WL_RATE_1X3_TXBF_VHT1SS1      = 173,
+	WL_RATE_1X3_TXBF_VHT2SS1      = 174,
+	WL_RATE_1X3_TXBF_VHT3SS1      = 175,
+	WL_RATE_1X3_TXBF_VHT4SS1      = 176,
+	WL_RATE_1X3_TXBF_VHT5SS1      = 177,
+	WL_RATE_1X3_TXBF_VHT6SS1      = 178,
+	WL_RATE_1X3_TXBF_VHT7SS1      = 179,
+	WL_RATE_1X3_TXBF_VHT8SS1      = 180,
+	WL_RATE_1X3_TXBF_VHT9SS1      = 181,
+	WL_RATE_P_1X3_TXBF_VHT10SS1   = 182,
+	WL_RATE_P_1X3_TXBF_VHT11SS1   = 183,
+
+	/* 2 Streams expanded + 1 */
+	WL_RATE_2X3_TXBF_SDM_MCS8     = 184,
+	WL_RATE_2X3_TXBF_SDM_MCS9     = 185,
+	WL_RATE_2X3_TXBF_SDM_MCS10    = 186,
+	WL_RATE_2X3_TXBF_SDM_MCS11    = 187,
+	WL_RATE_2X3_TXBF_SDM_MCS12    = 188,
+	WL_RATE_2X3_TXBF_SDM_MCS13    = 189,
+	WL_RATE_2X3_TXBF_SDM_MCS14    = 190,
+	WL_RATE_2X3_TXBF_SDM_MCS15    = 191,
+	WL_RATE_P_2X3_TXBF_SDM_MCS99  = 192,
+	WL_RATE_P_2X3_TXBF_SDM_MCS100 = 193,
+
+	WL_RATE_2X3_TXBF_VHT0SS2      = 184,
+	WL_RATE_2X3_TXBF_VHT1SS2      = 185,
+	WL_RATE_2X3_TXBF_VHT2SS2      = 186,
+	WL_RATE_2X3_TXBF_VHT3SS2      = 187,
+	WL_RATE_2X3_TXBF_VHT4SS2      = 188,
+	WL_RATE_2X3_TXBF_VHT5SS2      = 189,
+	WL_RATE_2X3_TXBF_VHT6SS2      = 190,
+	WL_RATE_2X3_TXBF_VHT7SS2      = 191,
+	WL_RATE_2X3_TXBF_VHT8SS2      = 192,
+	WL_RATE_2X3_TXBF_VHT9SS2      = 193,
+	WL_RATE_P_2X3_TXBF_VHT10SS2   = 194,
+	WL_RATE_P_2X3_TXBF_VHT11SS2   = 195,
+
+	/* 3 Streams */
+	WL_RATE_3X3_TXBF_SDM_MCS16    = 196,
+	WL_RATE_3X3_TXBF_SDM_MCS17    = 197,
+	WL_RATE_3X3_TXBF_SDM_MCS18    = 198,
+	WL_RATE_3X3_TXBF_SDM_MCS19    = 199,
+	WL_RATE_3X3_TXBF_SDM_MCS20    = 200,
+	WL_RATE_3X3_TXBF_SDM_MCS21    = 201,
+	WL_RATE_3X3_TXBF_SDM_MCS22    = 202,
+	WL_RATE_3X3_TXBF_SDM_MCS23    = 203,
+	WL_RATE_P_3X3_TXBF_SDM_MCS101 = 204,
+	WL_RATE_P_3X3_TXBF_SDM_MCS102 = 205,
+
+	WL_RATE_3X3_TXBF_VHT0SS3      = 196,
+	WL_RATE_3X3_TXBF_VHT1SS3      = 197,
+	WL_RATE_3X3_TXBF_VHT2SS3      = 198,
+	WL_RATE_3X3_TXBF_VHT3SS3      = 199,
+	WL_RATE_3X3_TXBF_VHT4SS3      = 200,
+	WL_RATE_3X3_TXBF_VHT5SS3      = 201,
+	WL_RATE_3X3_TXBF_VHT6SS3      = 202,
+	WL_RATE_3X3_TXBF_VHT7SS3      = 203,
+	WL_RATE_3X3_TXBF_VHT8SS3      = 204,
+	WL_RATE_3X3_TXBF_VHT9SS3      = 205,
+	WL_RATE_P_3X3_TXBF_VHT10SS3   = 206,
+	WL_RATE_P_3X3_TXBF_VHT11SS3   = 207,
+
+
+	/************
+	* 4 chains *
+	************
+	*/
+
+	/* 1 Stream expanded + 3 */
+	WL_RATE_1X4_DSSS_1            = 208,
+	WL_RATE_1X4_DSSS_2            = 209,
+	WL_RATE_1X4_DSSS_5_5          = 210,
+	WL_RATE_1X4_DSSS_11           = 211,
+
+	WL_RATE_1X4_CDD_OFDM_6        = 212,
+	WL_RATE_1X4_CDD_OFDM_9        = 213,
+	WL_RATE_1X4_CDD_OFDM_12       = 214,
+	WL_RATE_1X4_CDD_OFDM_18       = 215,
+	WL_RATE_1X4_CDD_OFDM_24       = 216,
+	WL_RATE_1X4_CDD_OFDM_36       = 217,
+	WL_RATE_1X4_CDD_OFDM_48       = 218,
+	WL_RATE_1X4_CDD_OFDM_54       = 219,
+
+	WL_RATE_1X4_CDD_MCS0          = 220,
+	WL_RATE_1X4_CDD_MCS1          = 221,
+	WL_RATE_1X4_CDD_MCS2          = 222,
+	WL_RATE_1X4_CDD_MCS3          = 223,
+	WL_RATE_1X4_CDD_MCS4          = 224,
+	WL_RATE_1X4_CDD_MCS5          = 225,
+	WL_RATE_1X4_CDD_MCS6          = 226,
+	WL_RATE_1X4_CDD_MCS7          = 227,
+	WL_RATE_P_1X4_CDD_MCS87       = 228,
+	WL_RATE_P_1X4_CDD_MCS88       = 229,
+
+	WL_RATE_1X4_VHT0SS1           = 220,
+	WL_RATE_1X4_VHT1SS1           = 221,
+	WL_RATE_1X4_VHT2SS1           = 222,
+	WL_RATE_1X4_VHT3SS1           = 223,
+	WL_RATE_1X4_VHT4SS1           = 224,
+	WL_RATE_1X4_VHT5SS1           = 225,
+	WL_RATE_1X4_VHT6SS1           = 226,
+	WL_RATE_1X4_VHT7SS1           = 227,
+	WL_RATE_1X4_VHT8SS1           = 228,
+	WL_RATE_1X4_VHT9SS1           = 229,
+	WL_RATE_P_1X4_VHT10SS1        = 230,
+	WL_RATE_P_1X4_VHT11SS1        = 231,
+
+	/* 2 Streams expanded + 2 */
+	WL_RATE_2X4_STBC_MCS0         = 232,
+	WL_RATE_2X4_STBC_MCS1         = 233,
+	WL_RATE_2X4_STBC_MCS2         = 234,
+	WL_RATE_2X4_STBC_MCS3         = 235,
+	WL_RATE_2X4_STBC_MCS4         = 236,
+	WL_RATE_2X4_STBC_MCS5         = 237,
+	WL_RATE_2X4_STBC_MCS6         = 238,
+	WL_RATE_2X4_STBC_MCS7         = 239,
+	WL_RATE_P_2X4_STBC_MCS87      = 240,
+	WL_RATE_P_2X4_STBC_MCS88      = 241,
+
+	WL_RATE_2X4_STBC_VHT0SS1      = 232,
+	WL_RATE_2X4_STBC_VHT1SS1      = 233,
+	WL_RATE_2X4_STBC_VHT2SS1      = 234,
+	WL_RATE_2X4_STBC_VHT3SS1      = 235,
+	WL_RATE_2X4_STBC_VHT4SS1      = 236,
+	WL_RATE_2X4_STBC_VHT5SS1      = 237,
+	WL_RATE_2X4_STBC_VHT6SS1      = 238,
+	WL_RATE_2X4_STBC_VHT7SS1      = 239,
+	WL_RATE_2X4_STBC_VHT8SS1      = 240,
+	WL_RATE_2X4_STBC_VHT9SS1      = 241,
+	WL_RATE_P_2X4_STBC_VHT10SS1   = 242,
+	WL_RATE_P_2X4_STBC_VHT11SS1   = 243,
+
+	WL_RATE_2X4_SDM_MCS8          = 244,
+	WL_RATE_2X4_SDM_MCS9          = 245,
+	WL_RATE_2X4_SDM_MCS10         = 246,
+	WL_RATE_2X4_SDM_MCS11         = 247,
+	WL_RATE_2X4_SDM_MCS12         = 248,
+	WL_RATE_2X4_SDM_MCS13         = 249,
+	WL_RATE_2X4_SDM_MCS14         = 250,
+	WL_RATE_2X4_SDM_MCS15         = 251,
+	WL_RATE_P_2X4_SDM_MCS99       = 252,
+	WL_RATE_P_2X4_SDM_MCS100      = 253,
+
+	WL_RATE_2X4_VHT0SS2           = 244,
+	WL_RATE_2X4_VHT1SS2           = 245,
+	WL_RATE_2X4_VHT2SS2           = 246,
+	WL_RATE_2X4_VHT3SS2           = 247,
+	WL_RATE_2X4_VHT4SS2           = 248,
+	WL_RATE_2X4_VHT5SS2           = 249,
+	WL_RATE_2X4_VHT6SS2           = 250,
+	WL_RATE_2X4_VHT7SS2           = 251,
+	WL_RATE_2X4_VHT8SS2           = 252,
+	WL_RATE_2X4_VHT9SS2           = 253,
+	WL_RATE_P_2X4_VHT10SS2        = 254,
+	WL_RATE_P_2X4_VHT11SS2        = 255,
+
+	/* 3 Streams expanded + 1 */
+	WL_RATE_3X4_SDM_MCS16         = 256,
+	WL_RATE_3X4_SDM_MCS17         = 257,
+	WL_RATE_3X4_SDM_MCS18         = 258,
+	WL_RATE_3X4_SDM_MCS19         = 259,
+	WL_RATE_3X4_SDM_MCS20         = 260,
+	WL_RATE_3X4_SDM_MCS21         = 261,
+	WL_RATE_3X4_SDM_MCS22         = 262,
+	WL_RATE_3X4_SDM_MCS23         = 263,
+	WL_RATE_P_3X4_SDM_MCS101      = 264,
+	WL_RATE_P_3X4_SDM_MCS102      = 265,
+
+	WL_RATE_3X4_VHT0SS3           = 256,
+	WL_RATE_3X4_VHT1SS3           = 257,
+	WL_RATE_3X4_VHT2SS3           = 258,
+	WL_RATE_3X4_VHT3SS3           = 259,
+	WL_RATE_3X4_VHT4SS3           = 260,
+	WL_RATE_3X4_VHT5SS3           = 261,
+	WL_RATE_3X4_VHT6SS3           = 262,
+	WL_RATE_3X4_VHT7SS3           = 263,
+	WL_RATE_3X4_VHT8SS3           = 264,
+	WL_RATE_3X4_VHT9SS3           = 265,
+	WL_RATE_P_3X4_VHT10SS3        = 266,
+	WL_RATE_P_3X4_VHT11SS3        = 267,
+
+
+	/* 4 Streams */
+	WL_RATE_4X4_SDM_MCS24         = 268,
+	WL_RATE_4X4_SDM_MCS25         = 269,
+	WL_RATE_4X4_SDM_MCS26         = 270,
+	WL_RATE_4X4_SDM_MCS27         = 271,
+	WL_RATE_4X4_SDM_MCS28         = 272,
+	WL_RATE_4X4_SDM_MCS29         = 273,
+	WL_RATE_4X4_SDM_MCS30         = 274,
+	WL_RATE_4X4_SDM_MCS31         = 275,
+	WL_RATE_P_4X4_SDM_MCS103      = 276,
+	WL_RATE_P_4X4_SDM_MCS104      = 277,
+
+	WL_RATE_4X4_VHT0SS4           = 268,
+	WL_RATE_4X4_VHT1SS4           = 269,
+	WL_RATE_4X4_VHT2SS4           = 270,
+	WL_RATE_4X4_VHT3SS4           = 271,
+	WL_RATE_4X4_VHT4SS4           = 272,
+	WL_RATE_4X4_VHT5SS4           = 273,
+	WL_RATE_4X4_VHT6SS4           = 274,
+	WL_RATE_4X4_VHT7SS4           = 275,
+	WL_RATE_4X4_VHT8SS4           = 276,
+	WL_RATE_4X4_VHT9SS4           = 277,
+	WL_RATE_P_4X4_VHT10SS4        = 278,
+	WL_RATE_P_4X4_VHT11SS4        = 279,
+
+
+	/****************************
+	 * TX Beamforming, 4 chains *
+	 ****************************
+	 */
+
+	/* 1 Stream expanded + 3 */
+	WL_RATE_1X4_TXBF_OFDM_6       = 280,
+	WL_RATE_1X4_TXBF_OFDM_9       = 281,
+	WL_RATE_1X4_TXBF_OFDM_12      = 282,
+	WL_RATE_1X4_TXBF_OFDM_18      = 283,
+	WL_RATE_1X4_TXBF_OFDM_24      = 284,
+	WL_RATE_1X4_TXBF_OFDM_36      = 285,
+	WL_RATE_1X4_TXBF_OFDM_48      = 286,
+	WL_RATE_1X4_TXBF_OFDM_54      = 287,
+
+	WL_RATE_1X4_TXBF_MCS0         = 288,
+	WL_RATE_1X4_TXBF_MCS1         = 289,
+	WL_RATE_1X4_TXBF_MCS2         = 290,
+	WL_RATE_1X4_TXBF_MCS3         = 291,
+	WL_RATE_1X4_TXBF_MCS4         = 292,
+	WL_RATE_1X4_TXBF_MCS5         = 293,
+	WL_RATE_1X4_TXBF_MCS6         = 294,
+	WL_RATE_1X4_TXBF_MCS7         = 295,
+	WL_RATE_P_1X4_TXBF_MCS87      = 296,
+	WL_RATE_P_1X4_TXBF_MCS88      = 297,
+
+	WL_RATE_1X4_TXBF_VHT0SS1      = 288,
+	WL_RATE_1X4_TXBF_VHT1SS1      = 289,
+	WL_RATE_1X4_TXBF_VHT2SS1      = 290,
+	WL_RATE_1X4_TXBF_VHT3SS1      = 291,
+	WL_RATE_1X4_TXBF_VHT4SS1      = 292,
+	WL_RATE_1X4_TXBF_VHT5SS1      = 293,
+	WL_RATE_1X4_TXBF_VHT6SS1      = 294,
+	WL_RATE_1X4_TXBF_VHT7SS1      = 295,
+	WL_RATE_1X4_TXBF_VHT8SS1      = 296,
+	WL_RATE_1X4_TXBF_VHT9SS1      = 297,
+	WL_RATE_P_1X4_TXBF_VHT10SS1   = 298,
+	WL_RATE_P_1X4_TXBF_VHT11SS1   = 299,
+
+	/* 2 Streams expanded + 2 */
+	WL_RATE_2X4_TXBF_SDM_MCS8     = 300,
+	WL_RATE_2X4_TXBF_SDM_MCS9     = 301,
+	WL_RATE_2X4_TXBF_SDM_MCS10    = 302,
+	WL_RATE_2X4_TXBF_SDM_MCS11    = 303,
+	WL_RATE_2X4_TXBF_SDM_MCS12    = 304,
+	WL_RATE_2X4_TXBF_SDM_MCS13    = 305,
+	WL_RATE_2X4_TXBF_SDM_MCS14    = 306,
+	WL_RATE_2X4_TXBF_SDM_MCS15    = 307,
+	WL_RATE_P_2X4_TXBF_SDM_MCS99  = 308,
+	WL_RATE_P_2X4_TXBF_SDM_MCS100 = 309,
+
+	WL_RATE_2X4_TXBF_VHT0SS2      = 300,
+	WL_RATE_2X4_TXBF_VHT1SS2      = 301,
+	WL_RATE_2X4_TXBF_VHT2SS2      = 302,
+	WL_RATE_2X4_TXBF_VHT3SS2      = 303,
+	WL_RATE_2X4_TXBF_VHT4SS2      = 304,
+	WL_RATE_2X4_TXBF_VHT5SS2      = 305,
+	WL_RATE_2X4_TXBF_VHT6SS2      = 306,
+	WL_RATE_2X4_TXBF_VHT7SS2      = 307,
+	WL_RATE_2X4_TXBF_VHT8SS2      = 308,
+	WL_RATE_2X4_TXBF_VHT9SS2      = 309,
+	WL_RATE_P_2X4_TXBF_VHT10SS2   = 310,
+	WL_RATE_P_2X4_TXBF_VHT11SS2   = 311,
+
+	/* 3 Streams expanded + 1 */
+	WL_RATE_3X4_TXBF_SDM_MCS16    = 312,
+	WL_RATE_3X4_TXBF_SDM_MCS17    = 313,
+	WL_RATE_3X4_TXBF_SDM_MCS18    = 314,
+	WL_RATE_3X4_TXBF_SDM_MCS19    = 315,
+	WL_RATE_3X4_TXBF_SDM_MCS20    = 316,
+	WL_RATE_3X4_TXBF_SDM_MCS21    = 317,
+	WL_RATE_3X4_TXBF_SDM_MCS22    = 318,
+	WL_RATE_3X4_TXBF_SDM_MCS23    = 319,
+	WL_RATE_P_3X4_TXBF_SDM_MCS101 = 320,
+	WL_RATE_P_3X4_TXBF_SDM_MCS102 = 321,
+
+	WL_RATE_3X4_TXBF_VHT0SS3      = 312,
+	WL_RATE_3X4_TXBF_VHT1SS3      = 313,
+	WL_RATE_3X4_TXBF_VHT2SS3      = 314,
+	WL_RATE_3X4_TXBF_VHT3SS3      = 315,
+	WL_RATE_3X4_TXBF_VHT4SS3      = 316,
+	WL_RATE_3X4_TXBF_VHT5SS3      = 317,
+	WL_RATE_3X4_TXBF_VHT6SS3      = 318,
+	WL_RATE_3X4_TXBF_VHT7SS3      = 319,
+	WL_RATE_P_3X4_TXBF_VHT8SS3    = 320,
+	WL_RATE_P_3X4_TXBF_VHT9SS3    = 321,
+	WL_RATE_P_3X4_TXBF_VHT10SS3   = 322,
+	WL_RATE_P_3X4_TXBF_VHT11SS3   = 323,
+
+	/* 4 Streams */
+	WL_RATE_4X4_TXBF_SDM_MCS24    = 324,
+	WL_RATE_4X4_TXBF_SDM_MCS25    = 325,
+	WL_RATE_4X4_TXBF_SDM_MCS26    = 326,
+	WL_RATE_4X4_TXBF_SDM_MCS27    = 327,
+	WL_RATE_4X4_TXBF_SDM_MCS28    = 328,
+	WL_RATE_4X4_TXBF_SDM_MCS29    = 329,
+	WL_RATE_4X4_TXBF_SDM_MCS30    = 330,
+	WL_RATE_4X4_TXBF_SDM_MCS31    = 331,
+	WL_RATE_P_4X4_TXBF_SDM_MCS103 = 332,
+	WL_RATE_P_4X4_TXBF_SDM_MCS104 = 333,
+
+	WL_RATE_4X4_TXBF_VHT0SS4      = 324,
+	WL_RATE_4X4_TXBF_VHT1SS4      = 325,
+	WL_RATE_4X4_TXBF_VHT2SS4      = 326,
+	WL_RATE_4X4_TXBF_VHT3SS4      = 327,
+	WL_RATE_4X4_TXBF_VHT4SS4      = 328,
+	WL_RATE_4X4_TXBF_VHT5SS4      = 329,
+	WL_RATE_4X4_TXBF_VHT6SS4      = 330,
+	WL_RATE_4X4_TXBF_VHT7SS4      = 331,
+	WL_RATE_P_4X4_TXBF_VHT8SS4    = 332,
+	WL_RATE_P_4X4_TXBF_VHT9SS4    = 333,
+	WL_RATE_P_4X4_TXBF_VHT10SS4   = 334,
+	WL_RATE_P_4X4_TXBF_VHT11SS4   = 335
+
+} clm_rates_t;
+
+/* Number of rate codes */
+#define WL_NUMRATES 336
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _bcmwifi_rates_h_ */
diff --git a/drivers/net/wireless/bcmdhd/bcmxtlv.c b/drivers/net/wireless/bcmdhd/bcmxtlv.c
new file mode 100644
index 0000000..d6bef6f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmxtlv.c
@@ -0,0 +1,457 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmxtlv.c 628611 2016-03-31 17:53:25Z $
+ */
+
+#include <bcm_cfg.h>
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+#include <stdarg.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#else /* !BCMDRIVER */
+	#include <stdlib.h> /* AS!!! */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+INLINE void* MALLOCZ(void *o, size_t s) { BCM_REFERENCE(o); return calloc(1, s); }
+INLINE void MFREE(void *o, void *p, size_t s) { BCM_REFERENCE(o); BCM_REFERENCE(s); free(p); }
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmutils.h>
+
+static INLINE int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts)
+{
+	return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + BCM_XTLV_HDR_SIZE, 4)
+		: (dlen + BCM_XTLV_HDR_SIZE));
+}
+
+bcm_xtlv_t *
+bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts)
+{
+	int sz;
+	/* advance to next elt */
+	sz = BCM_XTLV_SIZE(elt, opts);
+	elt = (bcm_xtlv_t*)((uint8 *)elt + sz);
+	*buflen -= sz;
+
+	/* validate next elt */
+	if (!bcm_valid_xtlv(elt, *buflen, opts))
+		return NULL;
+
+	return elt;
+}
+
+int
+bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, bcm_xtlv_opts_t opts)
+{
+	if (!tlv_buf || !buf || !len)
+		return BCME_BADARG;
+
+	tlv_buf->opts = opts;
+	tlv_buf->size = len;
+	tlv_buf->head = buf;
+	tlv_buf->buf  = buf;
+	return BCME_OK;
+}
+
+uint16
+bcm_xtlv_buf_len(bcm_xtlvbuf_t *tbuf)
+{
+	if (tbuf == NULL) return 0;
+	return (uint16)(tbuf->buf - tbuf->head);
+}
+uint16
+bcm_xtlv_buf_rlen(bcm_xtlvbuf_t *tbuf)
+{
+	if (tbuf == NULL) return 0;
+	return tbuf->size - bcm_xtlv_buf_len(tbuf);
+}
+uint8 *
+bcm_xtlv_buf(bcm_xtlvbuf_t *tbuf)
+{
+	if (tbuf == NULL) return NULL;
+	return tbuf->buf;
+}
+uint8 *
+bcm_xtlv_head(bcm_xtlvbuf_t *tbuf)
+{
+	if (tbuf == NULL) return NULL;
+	return tbuf->head;
+}
+int
+bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen)
+{
+	bcm_xtlv_t *xtlv;
+	int size;
+
+	if (tbuf == NULL)
+		return BCME_BADARG;
+	size = bcm_xtlv_size_for_data(dlen, tbuf->opts);
+	if (bcm_xtlv_buf_rlen(tbuf) < size)
+		return BCME_NOMEM;
+	xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+	xtlv->id = htol16(type);
+	xtlv->len = htol16(dlen);
+	memcpy(xtlv->data, data, dlen);
+	tbuf->buf += size;
+	return BCME_OK;
+}
+int
+bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data)
+{
+	bcm_xtlv_t *xtlv;
+	int size;
+
+	if (tbuf == NULL)
+		return BCME_BADARG;
+	size = bcm_xtlv_size_for_data(1, tbuf->opts);
+	if (bcm_xtlv_buf_rlen(tbuf) < size)
+		return BCME_NOMEM;
+	xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+	xtlv->id = htol16(type);
+	xtlv->len = htol16(sizeof(data));
+	xtlv->data[0] = data;
+	tbuf->buf += size;
+	return BCME_OK;
+}
+int
+bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data)
+{
+	bcm_xtlv_t *xtlv;
+	int size;
+
+	if (tbuf == NULL)
+		return BCME_BADARG;
+	size = bcm_xtlv_size_for_data(2, tbuf->opts);
+	if (bcm_xtlv_buf_rlen(tbuf) < size)
+		return BCME_NOMEM;
+
+	xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+	xtlv->id = htol16(type);
+	xtlv->len = htol16(sizeof(data));
+	htol16_ua_store(data, xtlv->data);
+	tbuf->buf += size;
+	return BCME_OK;
+}
+int
+bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data)
+{
+	bcm_xtlv_t *xtlv;
+	int size;
+
+	if (tbuf == NULL)
+		return BCME_BADARG;
+	size = bcm_xtlv_size_for_data(4, tbuf->opts);
+	if (bcm_xtlv_buf_rlen(tbuf) < size)
+		return BCME_NOMEM;
+	xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+	xtlv->id = htol16(type);
+	xtlv->len = htol16(sizeof(data));
+	htol32_ua_store(data, xtlv->data);
+	tbuf->buf += size;
+	return BCME_OK;
+}
+
+/*
+ *  upacks xtlv record from buf checks the type
+ *  copies data to callers buffer
+ *  advances tlv pointer to next record
+ *  caller's resposible for dst space check
+ */
+int
+bcm_unpack_xtlv_entry(uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst,
+	bcm_xtlv_opts_t opts)
+{
+	bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf;
+	uint16 len;
+	uint16 type;
+
+	ASSERT(ptlv);
+	/* tlv headr is always packed in LE order */
+	len = ltoh16(ptlv->len);
+	type = ltoh16(ptlv->id);
+	if	(len == 0) {
+		/* z-len tlv headers: allow, but don't process */
+		printf("z-len, skip unpack\n");
+	} else  {
+		if ((type != xpct_type) ||
+			(len > xpct_len)) {
+			printf("xtlv_unpack Error: found[type:%d,len:%d] != xpct[type:%d,len:%d]\n",
+				type, len, xpct_type, xpct_len);
+			return BCME_BADARG;
+		}
+		/* copy tlv record to caller's buffer */
+		memcpy(dst, ptlv->data, ptlv->len);
+	}
+	*tlv_buf = (uint8*)(*tlv_buf) + BCM_XTLV_SIZE(ptlv, opts);
+	return BCME_OK;
+}
+
+/*
+ *  packs user data into tlv record
+ *  advances tlv pointer to next xtlv slot
+ *  buflen is used for tlv_buf space check
+ */
+int
+bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src,
+	bcm_xtlv_opts_t opts)
+{
+	bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf;
+	int size;
+
+	ASSERT(ptlv);
+	ASSERT(src);
+
+	size = bcm_xtlv_size_for_data(len, opts);
+
+	/* copy data from tlv buffer to dst provided by user */
+	if (size > *buflen) {
+		printf("bcm_pack_xtlv_entry: no space tlv_buf: requested:%d, available:%d\n",
+			size, *buflen);
+		return BCME_BADLEN;
+	}
+	ptlv->id = htol16(type);
+	ptlv->len = htol16(len);
+
+	/* copy callers data */
+	memcpy(ptlv->data, src, len);
+
+	/* advance callers pointer to tlv buff */
+	*tlv_buf = (uint8*)(*tlv_buf) + size;
+	/* decrement the len */
+	*buflen -= (uint16)size;
+	return BCME_OK;
+}
+
+/*
+ *  unpack all xtlv records from the issue a callback
+ *  to set function one call per found tlv record
+ */
+int
+bcm_unpack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
+	bcm_xtlv_unpack_cbfn_t *cbfn)
+{
+	uint16 len;
+	uint16 type;
+	int res = BCME_OK;
+	int size;
+	bcm_xtlv_t *ptlv;
+	int sbuflen = buflen;
+
+	ASSERT(!buflen || tlv_buf);
+	ASSERT(!buflen || cbfn);
+
+	while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) {
+		ptlv = (bcm_xtlv_t *)tlv_buf;
+
+		/* tlv header is always packed in LE order */
+		len = ltoh16(ptlv->len);
+		type = ltoh16(ptlv->id);
+
+		size = bcm_xtlv_size_for_data(len, opts);
+
+		sbuflen -= size;
+		/* check for possible buffer overrun */
+		if (sbuflen < 0)
+			break;
+
+		if ((res = cbfn(ctx, ptlv->data, type, len)) != BCME_OK)
+			break;
+		tlv_buf = (uint8*)tlv_buf + size;
+	}
+	return res;
+}
+
+int
+bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
+	bcm_pack_xtlv_next_info_cbfn_t get_next, bcm_pack_xtlv_pack_next_cbfn_t pack_next,
+	int *outlen)
+{
+	int res = BCME_OK;
+	uint16 tlv_id;
+	uint16 tlv_len;
+	uint8 *startp;
+	uint8 *endp;
+	uint8 *buf;
+	bool more;
+	int size;
+
+	ASSERT(get_next && pack_next);
+
+	buf = (uint8 *)tlv_buf;
+	startp = buf;
+	endp = (uint8 *)buf + buflen;
+	more = TRUE;
+	while (more && (buf < endp)) {
+		more = get_next(ctx, &tlv_id, &tlv_len);
+		size = bcm_xtlv_size_for_data(tlv_len, opts);
+		if ((buf + size) > endp) {
+			res = BCME_BUFTOOSHORT;
+			goto done;
+		}
+
+		htol16_ua_store(tlv_id, buf);
+		htol16_ua_store(tlv_len, buf + sizeof(tlv_id));
+		pack_next(ctx, tlv_id, tlv_len, buf + BCM_XTLV_HDR_SIZE);
+		buf += size;
+	}
+
+	if (more)
+		res = BCME_BUFTOOSHORT;
+
+done:
+	if (outlen) {
+		*outlen = (int)(buf - startp);
+	}
+	return res;
+}
+
+/*
+ *  pack xtlv buffer from memory according to xtlv_desc_t
+ */
+int
+bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items,
+	bcm_xtlv_opts_t opts)
+{
+	int res = BCME_OK;
+	uint8 *ptlv = (uint8 *)*tlv_buf;
+
+	while (items->type != 0) {
+		if ((items->len > 0) && (res = bcm_pack_xtlv_entry(&ptlv,
+			buflen, items->type,
+			items->len, items->ptr, opts) != BCME_OK)) {
+			break;
+		}
+		items++;
+	}
+	*tlv_buf = ptlv; /* update the external pointer */
+	return res;
+}
+
+/*
+ *  unpack xtlv buffer to memory according to xtlv_desc_t
+ *
+ */
+int
+bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items, bcm_xtlv_opts_t opts)
+{
+	int res = BCME_OK;
+	bcm_xtlv_t *elt;
+
+	elt =  bcm_valid_xtlv((bcm_xtlv_t *)tlv_buf, *buflen, opts) ? (bcm_xtlv_t *)tlv_buf : NULL;
+	if (!elt || !items) {
+		res = BCME_BADARG;
+		return res;
+	}
+
+	for (; elt != NULL && res == BCME_OK; elt = bcm_next_xtlv(elt, buflen, opts)) {
+		/*  find matches in desc_t items  */
+		xtlv_desc_t *dst_desc = items;
+		uint16 len = ltoh16(elt->len);
+
+		while (dst_desc->type != 0) {
+			if (ltoh16(elt->id) == dst_desc->type) {
+				if (len != dst_desc->len) {
+					res = BCME_BADLEN;
+				} else {
+					memcpy(dst_desc->ptr, elt->data, len);
+				}
+				break;
+			}
+			dst_desc++;
+		}
+	}
+
+	if (res == BCME_OK && *buflen != 0)
+		res =  BCME_BUFTOOSHORT;
+
+	return res;
+}
+
+/*
+ * return data pointer of a given ID from xtlv buffer.
+ * If the specified xTLV ID is found, on return *data_len_out will contain
+ * the the data length of the xTLV ID.
+ */
+void *
+bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id,
+	uint16 *datalen_out, bcm_xtlv_opts_t opts)
+{
+	void *retptr = NULL;
+	uint16 type, len;
+	int size;
+	bcm_xtlv_t *ptlv;
+	int sbuflen = buflen;
+
+	while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) {
+		ptlv = (bcm_xtlv_t *)tlv_buf;
+
+		/* tlv header is always packed in LE order */
+		type = ltoh16(ptlv->id);
+		len = ltoh16(ptlv->len);
+		size = bcm_xtlv_size_for_data(len, opts);
+
+		sbuflen -= size;
+		/* check for possible buffer overrun */
+		if (sbuflen < 0) {
+			printf("%s %d: Invalid sbuflen %d\n",
+				__FUNCTION__, __LINE__, sbuflen);
+			break;
+		}
+
+		if (id == type) {
+			retptr = ptlv->data;
+			if (datalen_out) {
+				*datalen_out = len;
+			}
+			break;
+		}
+		tlv_buf += size;
+	}
+
+	return retptr;
+}
+
+int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
+{
+	int size; /* entire size of the XTLV including header, data, and optional padding */
+	int len; /* XTLV's value real length wthout padding */
+
+	len = BCM_XTLV_LEN(elt);
+
+	size = bcm_xtlv_size_for_data(len, opts);
+
+	return size;
+}
diff --git a/drivers/net/wireless/bcmdhd/dbus.c b/drivers/net/wireless/bcmdhd/dbus.c
new file mode 100644
index 0000000..85daa9b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dbus.c
@@ -0,0 +1,2931 @@
+/** @file dbus.c
+ *
+ * Hides details of USB / SDIO / SPI interfaces and OS details. It is intended to shield details and
+ * provide the caller with one common bus interface for all dongle devices. In practice, it is only
+ * used for USB interfaces. DBUS is not a protocol, but an abstraction layer.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dbus.c 553311 2015-04-29 10:23:08Z $
+ */
+
+
+#include "osl.h"
+#include "dbus.h"
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */
+#include <dhd_wlfc.h>
+#endif
+#include <dhd_config.h>
+
+#if defined(BCM_REQUEST_FW)
+#include <bcmsrom_fmt.h>
+#include <trxhdr.h>
+#include <usbrdl.h>
+#include <bcmendian.h>
+#include <sbpcmcia.h>
+#include <bcmnvram.h>
+#include <bcmdevs.h>
+#endif 
+
+
+
+#if defined(BCM_REQUEST_FW)
+#ifndef VARS_MAX
+#define VARS_MAX            8192
+#endif
+#endif 
+
+#ifdef DBUS_USB_LOOPBACK
+extern bool is_loopback_pkt(void *buf);
+extern int matches_loopback_pkt(void *buf);
+#endif
+
+/** General info for all BUS types */
+typedef struct dbus_irbq {
+	dbus_irb_t *head;
+	dbus_irb_t *tail;
+	int cnt;
+} dbus_irbq_t;
+
+/**
+ * This private structure dhd_bus_t is also declared in dbus_usb_linux.c.
+ * All the fields must be consistent in both declarations.
+ */
+typedef struct dhd_bus {
+	dbus_pub_t   pub; /* MUST BE FIRST */
+	dhd_pub_t *dhd;
+
+	void        *cbarg;
+	dbus_callbacks_t *cbs; /* callbacks to higher level, e.g. dhd_linux.c */
+	void        *bus_info;
+	dbus_intf_t *drvintf;  /* callbacks to lower level, e.g. dbus_usb.c or dbus_usb_linux.c */
+	uint8       *fw;
+	int         fwlen;
+	uint32      errmask;
+	int         rx_low_watermark;  /* avoid rx overflow by filling rx with free IRBs */
+	int         tx_low_watermark;
+	bool        txoff;
+	bool        txoverride;   /* flow control related */
+	bool        rxoff;
+	bool        tx_timer_ticking;
+
+
+	dbus_irbq_t *rx_q;
+	dbus_irbq_t *tx_q;
+
+	uint8        *nvram;
+	int          nvram_len;
+	uint8        *image;  /* buffer for combine fw and nvram */
+	int          image_len;
+	uint8        *orig_fw;
+	int          origfw_len;
+	int          decomp_memsize;
+	dbus_extdl_t extdl;
+	int          nvram_nontxt;
+#if defined(BCM_REQUEST_FW)
+	void         *firmware;
+	void         *nvfile;
+#endif
+	char		*fw_path;		/* module_param: path to firmware image */
+	char		*nv_path;		/* module_param: path to nvram vars file */
+} dhd_bus_t;
+
+struct exec_parms {
+	union {
+		/* Can consolidate same params, if need be, but this shows
+		 * group of parameters per function
+		 */
+		struct {
+			dbus_irbq_t  *q;
+			dbus_irb_t   *b;
+		} qenq;
+
+		struct {
+			dbus_irbq_t  *q;
+		} qdeq;
+	};
+};
+
+#define EXEC_RXLOCK(info, fn, a) \
+	info->drvintf->exec_rxlock(dhd_bus->bus_info, ((exec_cb_t)fn), ((struct exec_parms *) a))
+
+#define EXEC_TXLOCK(info, fn, a) \
+	info->drvintf->exec_txlock(dhd_bus->bus_info, ((exec_cb_t)fn), ((struct exec_parms *) a))
+
+/*
+ * Callbacks common for all BUS
+ */
+static void dbus_if_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb);
+static void dbus_if_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status);
+static void dbus_if_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status);
+static void dbus_if_errhandler(void *handle, int err);
+static void dbus_if_ctl_complete(void *handle, int type, int status);
+static void dbus_if_state_change(void *handle, int state);
+static void *dbus_if_pktget(void *handle, uint len, bool send);
+static void dbus_if_pktfree(void *handle, void *p, bool send);
+static struct dbus_irb *dbus_if_getirb(void *cbarg, bool send);
+static void dbus_if_rxerr_indicate(void *handle, bool on);
+
+void * dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype,
+	uint16 bus_no, uint16 slot, uint32 hdrlen);
+void dhd_dbus_disconnect_cb(void *arg);
+void dbus_detach(dhd_bus_t *pub);
+
+/** functions in this file that are called by lower DBUS levels, e.g. dbus_usb.c */
+static dbus_intf_callbacks_t dbus_intf_cbs = {
+	dbus_if_send_irb_timeout,
+	dbus_if_send_irb_complete,
+	dbus_if_recv_irb_complete,
+	dbus_if_errhandler,
+	dbus_if_ctl_complete,
+	dbus_if_state_change,
+	NULL,			/* isr */
+	NULL,			/* dpc */
+	NULL,			/* watchdog */
+	dbus_if_pktget,
+	dbus_if_pktfree,
+	dbus_if_getirb,
+	dbus_if_rxerr_indicate
+};
+
+/*
+ * Need global for probe() and disconnect() since
+ * attach() is not called at probe and detach()
+ * can be called inside disconnect()
+ */
+static dbus_intf_t     *g_busintf = NULL;
+static probe_cb_t      probe_cb = NULL;
+static disconnect_cb_t disconnect_cb = NULL;
+static void            *probe_arg = NULL;
+static void            *disc_arg = NULL;
+
+#if defined(BCM_REQUEST_FW)
+int8 *nonfwnvram = NULL; /* stand-alone multi-nvram given with driver load */
+int nonfwnvramlen = 0;
+#endif /* #if defined(BCM_REQUEST_FW) */
+
+static void* q_enq(dbus_irbq_t *q, dbus_irb_t *b);
+static void* q_enq_exec(struct exec_parms *args);
+static dbus_irb_t*q_deq(dbus_irbq_t *q);
+static void* q_deq_exec(struct exec_parms *args);
+static int   dbus_tx_timer_init(dhd_bus_t *dhd_bus);
+static int   dbus_tx_timer_start(dhd_bus_t *dhd_bus, uint timeout);
+static int   dbus_tx_timer_stop(dhd_bus_t *dhd_bus);
+static int   dbus_irbq_init(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int nq, int size_irb);
+static int   dbus_irbq_deinit(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int size_irb);
+static int   dbus_rxirbs_fill(dhd_bus_t *dhd_bus);
+static int   dbus_send_irb(dbus_pub_t *pub, uint8 *buf, int len, void *pkt, void *info);
+static void  dbus_disconnect(void *handle);
+static void *dbus_probe(void *arg, const char *desc, uint32 bustype,
+	uint16 bus_no, uint16 slot, uint32 hdrlen);
+
+#if defined(BCM_REQUEST_FW)
+extern char * dngl_firmware;
+extern unsigned int dngl_fwlen;
+#ifndef EXTERNAL_FW_PATH
+static int dbus_get_nvram(dhd_bus_t *dhd_bus);
+static int dbus_jumbo_nvram(dhd_bus_t *dhd_bus);
+static int dbus_otp(dhd_bus_t *dhd_bus, uint16 *boardtype, uint16 *boardrev);
+static int dbus_select_nvram(dhd_bus_t *dhd_bus, int8 *jumbonvram, int jumbolen,
+uint16 boardtype, uint16 boardrev, int8 **nvram, int *nvram_len);
+#endif /* !EXTERNAL_FW_PATH */
+extern int dbus_zlib_decomp(dhd_bus_t *dhd_bus);
+extern void *dbus_zlib_calloc(int num, int size);
+extern void dbus_zlib_free(void *ptr);
+#endif
+
+/* function */
+void
+dbus_flowctrl_tx(void *dbi, bool on)
+{
+	dhd_bus_t *dhd_bus = dbi;
+
+	if (dhd_bus == NULL)
+		return;
+
+	DBUSTRACE(("%s on %d\n", __FUNCTION__, on));
+
+	if (dhd_bus->txoff == on)
+		return;
+
+	dhd_bus->txoff = on;
+
+	if (dhd_bus->cbs && dhd_bus->cbs->txflowcontrol)
+		dhd_bus->cbs->txflowcontrol(dhd_bus->cbarg, on);
+}
+
+/**
+ * if lower level DBUS signaled a rx error, more free rx IRBs should be allocated or flow control
+ * should kick in to make more free rx IRBs available.
+ */
+static void
+dbus_if_rxerr_indicate(void *handle, bool on)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+
+	DBUSTRACE(("%s, on %d\n", __FUNCTION__, on));
+
+	if (dhd_bus == NULL)
+		return;
+
+	if (dhd_bus->txoverride == on)
+		return;
+
+	dhd_bus->txoverride = on;	/* flow control */
+
+	if (!on)
+		dbus_rxirbs_fill(dhd_bus);
+
+}
+
+/** q_enq()/q_deq() are executed with protection via exec_rxlock()/exec_txlock() */
+static void*
+q_enq(dbus_irbq_t *q, dbus_irb_t *b)
+{
+	ASSERT(q->tail != b);
+	ASSERT(b->next == NULL);
+	b->next = NULL;
+	if (q->tail) {
+		q->tail->next = b;
+		q->tail = b;
+	} else
+		q->head = q->tail = b;
+
+	q->cnt++;
+
+	return b;
+}
+
+static void*
+q_enq_exec(struct exec_parms *args)
+{
+	return q_enq(args->qenq.q, args->qenq.b);
+}
+
+static dbus_irb_t*
+q_deq(dbus_irbq_t *q)
+{
+	dbus_irb_t *b;
+
+	b = q->head;
+	if (b) {
+		q->head = q->head->next;
+		b->next = NULL;
+
+		if (q->head == NULL)
+			q->tail = q->head;
+
+		q->cnt--;
+	}
+	return b;
+}
+
+static void*
+q_deq_exec(struct exec_parms *args)
+{
+	return q_deq(args->qdeq.q);
+}
+
+/**
+ * called during attach phase. Status @ Dec 2012: this function does nothing since for all of the
+ * lower DBUS levels dhd_bus->drvintf->tx_timer_init is NULL.
+ */
+static int
+dbus_tx_timer_init(dhd_bus_t *dhd_bus)
+{
+	if (dhd_bus && dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_init)
+		return dhd_bus->drvintf->tx_timer_init(dhd_bus->bus_info);
+	else
+		return DBUS_ERR;
+}
+
+static int
+dbus_tx_timer_start(dhd_bus_t *dhd_bus, uint timeout)
+{
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	if (dhd_bus->tx_timer_ticking)
+		return DBUS_OK;
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_start) {
+		if (dhd_bus->drvintf->tx_timer_start(dhd_bus->bus_info, timeout) == DBUS_OK) {
+			dhd_bus->tx_timer_ticking = TRUE;
+			return DBUS_OK;
+		}
+	}
+
+	return DBUS_ERR;
+}
+
+static int
+dbus_tx_timer_stop(dhd_bus_t *dhd_bus)
+{
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	if (!dhd_bus->tx_timer_ticking)
+		return DBUS_OK;
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->tx_timer_stop) {
+		if (dhd_bus->drvintf->tx_timer_stop(dhd_bus->bus_info) == DBUS_OK) {
+			dhd_bus->tx_timer_ticking = FALSE;
+			return DBUS_OK;
+		}
+	}
+
+	return DBUS_ERR;
+}
+
+/** called during attach phase. */
+static int
+dbus_irbq_init(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int nq, int size_irb)
+{
+	int i;
+	dbus_irb_t *irb;
+
+	ASSERT(q);
+	ASSERT(dhd_bus);
+
+	for (i = 0; i < nq; i++) {
+		/* MALLOC dbus_irb_tx or dbus_irb_rx, but cast to simple dbus_irb_t linkedlist */
+		irb = (dbus_irb_t *) MALLOC(dhd_bus->pub.osh, size_irb);
+		if (irb == NULL) {
+			ASSERT(irb);
+			return DBUS_ERR;
+		}
+		bzero(irb, size_irb);
+
+		/* q_enq() does not need to go through EXEC_xxLOCK() during init() */
+		q_enq(q, irb);
+	}
+
+	return DBUS_OK;
+}
+
+/** called during detach phase or when attach failed */
+static int
+dbus_irbq_deinit(dhd_bus_t *dhd_bus, dbus_irbq_t *q, int size_irb)
+{
+	dbus_irb_t *irb;
+
+	ASSERT(q);
+	ASSERT(dhd_bus);
+
+	/* q_deq() does not need to go through EXEC_xxLOCK()
+	 * during deinit(); all callbacks are stopped by this time
+	 */
+	while ((irb = q_deq(q)) != NULL) {
+		MFREE(dhd_bus->pub.osh, irb, size_irb);
+	}
+
+	if (q->cnt)
+		DBUSERR(("deinit: q->cnt=%d > 0\n", q->cnt));
+	return DBUS_OK;
+}
+
+/** multiple code paths require the rx queue to be filled with more free IRBs */
+static int
+dbus_rxirbs_fill(dhd_bus_t *dhd_bus)
+{
+	int err = DBUS_OK;
+
+
+	dbus_irb_rx_t *rxirb;
+	struct exec_parms args;
+
+	ASSERT(dhd_bus);
+	if (dhd_bus->pub.busstate != DBUS_STATE_UP) {
+		DBUSERR(("dbus_rxirbs_fill: DBUS not up \n"));
+		return DBUS_ERR;
+	} else if (!dhd_bus->drvintf || (dhd_bus->drvintf->recv_irb == NULL)) {
+		/* Lower edge bus interface does not support recv_irb().
+		 * No need to pre-submit IRBs in this case.
+		 */
+		return DBUS_ERR;
+	}
+
+	/* The dongle recv callback is freerunning without lock. So multiple callbacks(and this
+	 *  refill) can run in parallel. While the rxoff condition is triggered outside,
+	 *  below while loop has to check and abort posting more to avoid RPC rxq overflow.
+	 */
+	args.qdeq.q = dhd_bus->rx_q;
+	while ((!dhd_bus->rxoff) &&
+	       (rxirb = (EXEC_RXLOCK(dhd_bus, q_deq_exec, &args))) != NULL) {
+		err = dhd_bus->drvintf->recv_irb(dhd_bus->bus_info, rxirb);
+		if (err == DBUS_ERR_RXDROP || err == DBUS_ERR_RXFAIL) {
+			/* Add the the free rxirb back to the queue
+			 * and wait till later
+			 */
+			bzero(rxirb, sizeof(dbus_irb_rx_t));
+			args.qenq.q = dhd_bus->rx_q;
+			args.qenq.b = (dbus_irb_t *) rxirb;
+			EXEC_RXLOCK(dhd_bus, q_enq_exec, &args);
+			break;
+		} else if (err != DBUS_OK) {
+			int i = 0;
+			while (i++ < 100) {
+				DBUSERR(("%s :: memory leak for rxirb note?\n", __FUNCTION__));
+			}
+		}
+	}
+	return err;
+} /* dbus_rxirbs_fill */
+
+/** called when the DBUS interface state changed. */
+void
+dbus_flowctrl_rx(dbus_pub_t *pub, bool on)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	if (dhd_bus == NULL)
+		return;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus->rxoff == on)
+		return;
+
+	dhd_bus->rxoff = on;
+
+	if (dhd_bus->pub.busstate == DBUS_STATE_UP) {
+		if (!on) {
+			/* post more irbs, resume rx if necessary */
+			dbus_rxirbs_fill(dhd_bus);
+			if (dhd_bus && dhd_bus->drvintf->recv_resume) {
+				dhd_bus->drvintf->recv_resume(dhd_bus->bus_info);
+			}
+		} else {
+			/* ??? cancell posted irbs first */
+
+			if (dhd_bus && dhd_bus->drvintf->recv_stop) {
+				dhd_bus->drvintf->recv_stop(dhd_bus->bus_info);
+			}
+		}
+	}
+}
+
+/**
+ * Several code paths in this file want to send a buffer to the dongle. This function handles both
+ * sending of a buffer or a pkt.
+ */
+static int
+dbus_send_irb(dbus_pub_t *pub, uint8 *buf, int len, void *pkt, void *info)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_OK;
+	dbus_irb_tx_t *txirb = NULL;
+	int txirb_pending;
+	struct exec_parms args;
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+		dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+		args.qdeq.q = dhd_bus->tx_q;
+		if (dhd_bus->drvintf)
+			txirb = EXEC_TXLOCK(dhd_bus, q_deq_exec, &args);
+
+		if (txirb == NULL) {
+			DBUSERR(("Out of tx dbus_bufs\n"));
+			return DBUS_ERR;
+		}
+
+		if (pkt != NULL) {
+			txirb->pkt = pkt;
+			txirb->buf = NULL;
+			txirb->len = 0;
+		} else if (buf != NULL) {
+			txirb->pkt = NULL;
+			txirb->buf = buf;
+			txirb->len = len;
+		} else {
+			ASSERT(0); /* Should not happen */
+		}
+		txirb->info = info;
+		txirb->arg = NULL;
+		txirb->retry_count = 0;
+
+		if (dhd_bus->drvintf && dhd_bus->drvintf->send_irb) {
+			/* call lower DBUS level send_irb function */
+			err = dhd_bus->drvintf->send_irb(dhd_bus->bus_info, txirb);
+			if (err == DBUS_ERR_TXDROP) {
+				/* tx fail and no completion routine to clean up, reclaim irb NOW */
+				DBUSERR(("%s: send_irb failed, status = %d\n", __FUNCTION__, err));
+				bzero(txirb, sizeof(dbus_irb_tx_t));
+				args.qenq.q = dhd_bus->tx_q;
+				args.qenq.b = (dbus_irb_t *) txirb;
+				EXEC_TXLOCK(dhd_bus, q_enq_exec, &args);
+			} else {
+				dbus_tx_timer_start(dhd_bus, DBUS_TX_TIMEOUT_INTERVAL);
+				txirb_pending = dhd_bus->pub.ntxq - dhd_bus->tx_q->cnt;
+				if (txirb_pending > (dhd_bus->tx_low_watermark * 3)) {
+					dbus_flowctrl_tx(dhd_bus, TRUE);
+				}
+			}
+		}
+	} else {
+		err = DBUS_ERR_TXFAIL;
+		DBUSTRACE(("%s: bus down, send_irb failed\n", __FUNCTION__));
+	}
+
+	return err;
+} /* dbus_send_irb */
+
+#if defined(BCM_REQUEST_FW)
+
+/**
+ * Before downloading a firmware image into the dongle, the validity of the image must be checked.
+ */
+static int
+check_file(osl_t *osh, unsigned char *headers)
+{
+	struct trx_header *trx;
+	int actual_len = -1;
+
+	/* Extract trx header */
+	trx = (struct trx_header *)headers;
+	if (ltoh32(trx->magic) != TRX_MAGIC) {
+		printf("Error: trx bad hdr %x\n", ltoh32(trx->magic));
+		return -1;
+	}
+
+	headers += SIZEOF_TRX(trx);
+
+	/* TRX V1: get firmware len */
+	/* TRX V2: get firmware len and DSG/CFG lengths */
+	if (ltoh32(trx->flag_version) & TRX_UNCOMP_IMAGE) {
+		actual_len = ltoh32(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]) +
+		                     SIZEOF_TRX(trx);
+#ifdef BCMTRXV2
+		if (ISTRX_V2(trx)) {
+			actual_len += ltoh32(trx->offsets[TRX_OFFSETS_DSG_LEN_IDX]) +
+				ltoh32(trx->offsets[TRX_OFFSETS_CFG_LEN_IDX]);
+		}
+#endif
+		return actual_len;
+	}  else {
+		printf("compressed image\n");
+	}
+
+	return -1;
+}
+
+#ifdef EXTERNAL_FW_PATH
+static int
+dbus_get_fw_nvram(dhd_bus_t *dhd_bus, char *pfw_path, char *pnv_path)
+{
+	int bcmerror = -1, i;
+	uint len, total_len;
+	void *nv_image = NULL, *fw_image = NULL;
+	char *nv_memblock = NULL, *fw_memblock = NULL;
+	char *bufp;
+	bool file_exists;
+	uint8 nvram_words_pad = 0;
+	uint memblock_size = 2048;
+	uint8 *memptr;
+	int	actual_fwlen;
+	struct trx_header *hdr;
+	uint32 img_offset = 0;
+	int offset = 0;
+
+	/* For Get nvram */
+	file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+	if (file_exists) {
+		nv_image = dhd_os_open_image(pnv_path);
+		if (nv_image == NULL) {
+			printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
+			goto err;
+		}
+	}
+	nv_memblock = MALLOC(dhd_bus->pub.osh, MAX_NVRAMBUF_SIZE);
+	if (nv_memblock == NULL) {
+		DBUSERR(("%s: Failed to allocate memory %d bytes\n",
+		           __FUNCTION__, MAX_NVRAMBUF_SIZE));
+		goto err;
+	}
+	len = dhd_os_get_image_block(nv_memblock, MAX_NVRAMBUF_SIZE, nv_image);
+	if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+		bufp = (char *)nv_memblock;
+		bufp[len] = 0;
+		dhd_bus->nvram_len = process_nvram_vars(bufp, len);
+		if (dhd_bus->nvram_len % 4)
+			nvram_words_pad = 4 - dhd_bus->nvram_len % 4;
+	} else {
+		DBUSERR(("%s: error reading nvram file: %d\n", __FUNCTION__, len));
+		bcmerror = DBUS_ERR_NVRAM;
+		goto err;
+	}
+	if (nv_image) {
+		dhd_os_close_image(nv_image);
+		nv_image = NULL;
+	}
+
+	/* For Get first block of fw to calculate total_len */
+	file_exists = ((pfw_path != NULL) && (pfw_path[0] != '\0'));
+	if (file_exists) {
+		fw_image = dhd_os_open_image(pfw_path);
+		if (fw_image == NULL) {
+			printf("%s: Open fw file failed %s\n", __FUNCTION__, pfw_path);
+			goto err;
+		}
+	}
+	memptr = fw_memblock = MALLOC(dhd_bus->pub.osh, memblock_size);
+	if (fw_memblock == NULL) {
+		DBUSERR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+			memblock_size));
+		goto err;
+	}
+	len = dhd_os_get_image_block((char*)memptr, memblock_size, fw_image);
+	if ((actual_fwlen = check_file(dhd_bus->pub.osh, memptr)) <= 0) {
+		DBUSERR(("%s: bad firmware format!\n", __FUNCTION__));
+		goto err;
+	}
+
+	total_len = actual_fwlen + dhd_bus->nvram_len + nvram_words_pad;
+	dhd_bus->image = MALLOC(dhd_bus->pub.osh, total_len);
+	dhd_bus->image_len = total_len;
+	if (dhd_bus->image == NULL) {
+		DBUSERR(("%s: malloc failed!\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* Step1: Copy trx header + firmwre */
+	memptr = fw_memblock;
+	do {
+		if (len < 0) {
+			DBUSERR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+		bcopy(memptr, dhd_bus->image+offset, len);
+		offset += len;
+	} while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, fw_image)));
+	/* Step2: Copy NVRAM + pad */
+	hdr = (struct trx_header *)dhd_bus->image;
+	img_offset = SIZEOF_TRX(hdr) + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX];
+	bcopy(nv_memblock, (uint8 *)(dhd_bus->image + img_offset),
+		dhd_bus->nvram_len);
+	img_offset += dhd_bus->nvram_len;
+	if (nvram_words_pad) {
+		bzero(&dhd_bus->image[img_offset], nvram_words_pad);
+		img_offset += nvram_words_pad;
+	}
+#ifdef BCMTRXV2
+	/* Step3: Copy DSG/CFG for V2 */
+	if (ISTRX_V2(hdr) &&
+		(hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] ||
+		hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX])) {
+		DBUSERR(("%s: fix me\n", __FUNCTION__));
+	}
+#endif /* BCMTRXV2 */
+	/* Step4: update TRX header for nvram size */
+	hdr = (struct trx_header *)dhd_bus->image;
+	hdr->len = htol32(total_len);
+	/* Pass the actual fw len */
+	hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX] =
+		htol32(dhd_bus->nvram_len + nvram_words_pad);
+	/* Calculate CRC over header */
+	hdr->crc32 = hndcrc32((uint8 *)&hdr->flag_version,
+		SIZEOF_TRX(hdr) - OFFSETOF(struct trx_header, flag_version),
+		CRC32_INIT_VALUE);
+
+	/* Calculate CRC over data */
+	for (i = SIZEOF_TRX(hdr); i < total_len; ++i)
+			hdr->crc32 = hndcrc32((uint8 *)&dhd_bus->image[i], 1, hdr->crc32);
+	hdr->crc32 = htol32(hdr->crc32);
+
+	bcmerror = DBUS_OK;
+
+err:
+	if (fw_memblock)
+		MFREE(dhd_bus->pub.osh, fw_memblock, MAX_NVRAMBUF_SIZE);
+	if (fw_image)
+		dhd_os_close_image(fw_image);
+	if (nv_memblock)
+		MFREE(dhd_bus->pub.osh, nv_memblock, MAX_NVRAMBUF_SIZE);
+	if (nv_image)
+		dhd_os_close_image(nv_image);
+
+	return bcmerror;
+}
+
+/**
+ * during driver initialization ('attach') or after PnP 'resume', firmware needs to be loaded into
+ * the dongle
+ */
+static int
+dbus_do_download(dhd_bus_t *dhd_bus, char *pfw_path, char *pnv_path)
+{
+	int err = DBUS_OK;
+
+	err = dbus_get_fw_nvram(dhd_bus, pfw_path, pnv_path);
+	if (err) {
+		DBUSERR(("dbus_do_download: fail to get nvram %d\n", err));
+		return err;
+	}
+
+	if (dhd_bus->drvintf->dlstart && dhd_bus->drvintf->dlrun) {
+		err = dhd_bus->drvintf->dlstart(dhd_bus->bus_info,
+			dhd_bus->image, dhd_bus->image_len);
+		if (err == DBUS_OK) {
+			err = dhd_bus->drvintf->dlrun(dhd_bus->bus_info);
+		}
+	} else
+		err = DBUS_ERR;
+
+	if (dhd_bus->image) {
+		MFREE(dhd_bus->pub.osh, dhd_bus->image, dhd_bus->image_len);
+		dhd_bus->image = NULL;
+		dhd_bus->image_len = 0;
+	}
+
+	return err;
+} /* dbus_do_download */
+#else
+
+/**
+ * It is easy for the user to pass one jumbo nvram file to the driver than a set of smaller files.
+ * The 'jumbo nvram' file format is essentially a set of nvram files. Before commencing firmware
+ * download, the dongle needs to be probed so that the correct nvram contents within the jumbo nvram
+ * file is selected.
+ */
+static int
+dbus_jumbo_nvram(dhd_bus_t *dhd_bus)
+{
+	int8 *nvram = NULL;
+	int nvram_len = 0;
+	int ret = DBUS_OK;
+	uint16 boardrev = 0xFFFF;
+	uint16 boardtype = 0xFFFF;
+
+	/* read the otp for boardrev & boardtype
+	* if boardtype/rev are present in otp
+	* select nvram data for that boardtype/rev
+	*/
+	dbus_otp(dhd_bus, &boardtype, &boardrev);
+
+	ret = dbus_select_nvram(dhd_bus, dhd_bus->extdl.vars, dhd_bus->extdl.varslen,
+		boardtype, boardrev, &nvram, &nvram_len);
+
+	if (ret == DBUS_JUMBO_BAD_FORMAT)
+			return DBUS_ERR_NVRAM;
+	else if (ret == DBUS_JUMBO_NOMATCH &&
+		(boardtype != 0xFFFF || boardrev  != 0xFFFF)) {
+			DBUSERR(("No matching NVRAM for boardtype 0x%02x boardrev 0x%02x\n",
+				boardtype, boardrev));
+			return DBUS_ERR_NVRAM;
+	}
+	dhd_bus->nvram = nvram;
+	dhd_bus->nvram_len =  nvram_len;
+
+	return DBUS_OK;
+}
+
+/** before commencing fw download, the correct NVRAM image to download has to be picked */
+static int
+dbus_get_nvram(dhd_bus_t *dhd_bus)
+{
+	int len, i;
+	struct trx_header *hdr;
+	int	actual_fwlen;
+	uint32 img_offset = 0;
+
+	dhd_bus->nvram_len = 0;
+	if (dhd_bus->extdl.varslen) {
+		if (DBUS_OK != dbus_jumbo_nvram(dhd_bus))
+			return DBUS_ERR_NVRAM;
+		DBUSERR(("NVRAM %d bytes downloaded\n", dhd_bus->nvram_len));
+	}
+#if defined(BCM_REQUEST_FW)
+	else if (nonfwnvram) {
+		dhd_bus->nvram = nonfwnvram;
+		dhd_bus->nvram_len = nonfwnvramlen;
+		DBUSERR(("NVRAM %d bytes downloaded\n", dhd_bus->nvram_len));
+	}
+#endif
+	if (dhd_bus->nvram) {
+		uint8 nvram_words_pad = 0;
+		/* Validate the format/length etc of the file */
+		if ((actual_fwlen = check_file(dhd_bus->pub.osh, dhd_bus->fw)) <= 0) {
+			DBUSERR(("%s: bad firmware format!\n", __FUNCTION__));
+			return DBUS_ERR_NVRAM;
+		}
+
+		if (!dhd_bus->nvram_nontxt) {
+			/* host supplied nvram could be in .txt format
+			* with all the comments etc...
+			*/
+			dhd_bus->nvram_len = process_nvram_vars(dhd_bus->nvram,
+				dhd_bus->nvram_len);
+		}
+		if (dhd_bus->nvram_len % 4)
+			nvram_words_pad = 4 - dhd_bus->nvram_len % 4;
+
+		len = actual_fwlen + dhd_bus->nvram_len + nvram_words_pad;
+		dhd_bus->image = MALLOC(dhd_bus->pub.osh, len);
+		dhd_bus->image_len = len;
+		if (dhd_bus->image == NULL) {
+			DBUSERR(("%s: malloc failed!\n", __FUNCTION__));
+			return DBUS_ERR_NVRAM;
+		}
+		hdr = (struct trx_header *)dhd_bus->fw;
+		/* Step1: Copy trx header + firmwre */
+		img_offset = SIZEOF_TRX(hdr) + hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX];
+		bcopy(dhd_bus->fw, dhd_bus->image, img_offset);
+		/* Step2: Copy NVRAM + pad */
+		bcopy(dhd_bus->nvram, (uint8 *)(dhd_bus->image + img_offset),
+			dhd_bus->nvram_len);
+		img_offset += dhd_bus->nvram_len;
+		if (nvram_words_pad) {
+			bzero(&dhd_bus->image[img_offset],
+				nvram_words_pad);
+			img_offset += nvram_words_pad;
+		}
+#ifdef BCMTRXV2
+		/* Step3: Copy DSG/CFG for V2 */
+		if (ISTRX_V2(hdr) &&
+			(hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] ||
+			hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX])) {
+
+			bcopy(dhd_bus->fw + SIZEOF_TRX(hdr) +
+				hdr->offsets[TRX_OFFSETS_DLFWLEN_IDX] +
+				hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX],
+				dhd_bus->image + img_offset,
+				hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] +
+				hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX]);
+
+			img_offset += hdr->offsets[TRX_OFFSETS_DSG_LEN_IDX] +
+				hdr->offsets[TRX_OFFSETS_CFG_LEN_IDX];
+		}
+#endif /* BCMTRXV2 */
+		/* Step4: update TRX header for nvram size */
+		hdr = (struct trx_header *)dhd_bus->image;
+		hdr->len = htol32(len);
+		/* Pass the actual fw len */
+		hdr->offsets[TRX_OFFSETS_NVM_LEN_IDX] =
+			htol32(dhd_bus->nvram_len + nvram_words_pad);
+		/* Calculate CRC over header */
+		hdr->crc32 = hndcrc32((uint8 *)&hdr->flag_version,
+			SIZEOF_TRX(hdr) - OFFSETOF(struct trx_header, flag_version),
+			CRC32_INIT_VALUE);
+
+		/* Calculate CRC over data */
+		for (i = SIZEOF_TRX(hdr); i < len; ++i)
+				hdr->crc32 = hndcrc32((uint8 *)&dhd_bus->image[i], 1, hdr->crc32);
+		hdr->crc32 = htol32(hdr->crc32);
+	} else {
+		dhd_bus->image = dhd_bus->fw;
+		dhd_bus->image_len = (uint32)dhd_bus->fwlen;
+	}
+
+	return DBUS_OK;
+} /* dbus_get_nvram */
+
+/**
+ * during driver initialization ('attach') or after PnP 'resume', firmware needs to be loaded into
+ * the dongle
+ */
+static int
+dbus_do_download(dhd_bus_t *dhd_bus)
+{
+	int err = DBUS_OK;
+#ifndef BCM_REQUEST_FW
+	int decomp_override = 0;
+#endif
+#ifdef BCM_REQUEST_FW
+	uint16 boardrev = 0xFFFF, boardtype = 0xFFFF;
+	int8 *temp_nvram;
+	int temp_len;
+#endif
+
+#if defined(BCM_REQUEST_FW)
+	dhd_bus->firmware = dbus_get_fw_nvfile(dhd_bus->pub.attrib.devid,
+		dhd_bus->pub.attrib.chiprev, &dhd_bus->fw, &dhd_bus->fwlen,
+		DBUS_FIRMWARE, 0, 0);
+	if (!dhd_bus->firmware)
+		return DBUS_ERR;
+#endif 
+
+	dhd_bus->image = dhd_bus->fw;
+	dhd_bus->image_len = (uint32)dhd_bus->fwlen;
+
+#ifndef BCM_REQUEST_FW
+	if (UNZIP_ENAB(dhd_bus) && !decomp_override) {
+		err = dbus_zlib_decomp(dhd_bus);
+		if (err) {
+			DBUSERR(("dbus_attach: fw decompress fail %d\n", err));
+			return err;
+		}
+	}
+#endif
+
+#if defined(BCM_REQUEST_FW)
+	/* check if firmware is appended with nvram file */
+	err = dbus_otp(dhd_bus, &boardtype, &boardrev);
+	/* check if nvram is provided as separte file */
+	nonfwnvram = NULL;
+	nonfwnvramlen = 0;
+	dhd_bus->nvfile = dbus_get_fw_nvfile(dhd_bus->pub.attrib.devid,
+		dhd_bus->pub.attrib.chiprev, (void *)&temp_nvram, &temp_len,
+		DBUS_NVFILE, boardtype, boardrev);
+	if (dhd_bus->nvfile) {
+		int8 *tmp = MALLOC(dhd_bus->pub.osh, temp_len);
+		if (tmp) {
+			bcopy(temp_nvram, tmp, temp_len);
+			nonfwnvram = tmp;
+			nonfwnvramlen = temp_len;
+		} else {
+			err = DBUS_ERR;
+			goto fail;
+		}
+	}
+#endif /* defined(BCM_REQUEST_FW) */
+
+	err = dbus_get_nvram(dhd_bus);
+	if (err) {
+		DBUSERR(("dbus_do_download: fail to get nvram %d\n", err));
+		return err;
+	}
+
+
+	if (dhd_bus->drvintf->dlstart && dhd_bus->drvintf->dlrun) {
+		err = dhd_bus->drvintf->dlstart(dhd_bus->bus_info,
+			dhd_bus->image, dhd_bus->image_len);
+
+		if (err == DBUS_OK)
+			err = dhd_bus->drvintf->dlrun(dhd_bus->bus_info);
+	} else
+		err = DBUS_ERR;
+
+	if (dhd_bus->nvram) {
+		MFREE(dhd_bus->pub.osh, dhd_bus->image, dhd_bus->image_len);
+		dhd_bus->image = dhd_bus->fw;
+		dhd_bus->image_len = (uint32)dhd_bus->fwlen;
+	}
+
+#ifndef BCM_REQUEST_FW
+	if (UNZIP_ENAB(dhd_bus) && (!decomp_override) && dhd_bus->orig_fw) {
+		MFREE(dhd_bus->pub.osh, dhd_bus->fw, dhd_bus->decomp_memsize);
+		dhd_bus->image = dhd_bus->fw = dhd_bus->orig_fw;
+		dhd_bus->image_len = dhd_bus->fwlen = dhd_bus->origfw_len;
+	}
+#endif
+
+#if defined(BCM_REQUEST_FW)
+fail:
+	if (dhd_bus->firmware) {
+		dbus_release_fw_nvfile(dhd_bus->firmware);
+		dhd_bus->firmware = NULL;
+	}
+	if (dhd_bus->nvfile) {
+		dbus_release_fw_nvfile(dhd_bus->nvfile);
+		dhd_bus->nvfile = NULL;
+	}
+	if (nonfwnvram) {
+		MFREE(dhd_bus->pub.osh, nonfwnvram, nonfwnvramlen);
+		nonfwnvram = NULL;
+		nonfwnvramlen = 0;
+	}
+#endif
+	return err;
+} /* dbus_do_download */
+#endif /* EXTERNAL_FW_PATH */
+#endif
+
+/** required for DBUS deregistration */
+static void
+dbus_disconnect(void *handle)
+{
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (disconnect_cb)
+		disconnect_cb(disc_arg);
+}
+
+/**
+ * This function is called when the sent irb times out without a tx response status.
+ * DBUS adds reliability by resending timed out IRBs DBUS_TX_RETRY_LIMIT times.
+ */
+static void
+dbus_if_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+
+	if ((dhd_bus == NULL) || (dhd_bus->drvintf == NULL) || (txirb == NULL)) {
+		return;
+	}
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	return;
+
+} /* dbus_if_send_irb_timeout */
+
+/**
+ * When lower DBUS level signals that a send IRB completed, either successful or not, the higher
+ * level (e.g. dhd_linux.c) has to be notified, and transmit flow control has to be evaluated.
+ */
+static void BCMFASTPATH
+dbus_if_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+	int txirb_pending;
+	struct exec_parms args;
+	void *pktinfo;
+
+	if ((dhd_bus == NULL) || (txirb == NULL)) {
+		return;
+	}
+
+	DBUSTRACE(("%s: status = %d\n", __FUNCTION__, status));
+
+	dbus_tx_timer_stop(dhd_bus);
+
+	/* re-queue BEFORE calling send_complete which will assume that this irb
+	   is now available.
+	 */
+	pktinfo = txirb->info;
+	bzero(txirb, sizeof(dbus_irb_tx_t));
+	args.qenq.q = dhd_bus->tx_q;
+	args.qenq.b = (dbus_irb_t *) txirb;
+	EXEC_TXLOCK(dhd_bus, q_enq_exec, &args);
+
+	if (dhd_bus->pub.busstate != DBUS_STATE_DOWN) {
+		if ((status == DBUS_OK) || (status == DBUS_ERR_NODEVICE)) {
+			if (dhd_bus->cbs && dhd_bus->cbs->send_complete)
+				dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo,
+					status);
+
+			if (status == DBUS_OK) {
+				txirb_pending = dhd_bus->pub.ntxq - dhd_bus->tx_q->cnt;
+				if (txirb_pending)
+					dbus_tx_timer_start(dhd_bus, DBUS_TX_TIMEOUT_INTERVAL);
+				if ((txirb_pending < dhd_bus->tx_low_watermark) &&
+					dhd_bus->txoff && !dhd_bus->txoverride) {
+					dbus_flowctrl_tx(dhd_bus, OFF);
+				}
+			}
+		} else {
+			DBUSERR(("%s: %d WARNING freeing orphan pkt %p\n", __FUNCTION__, __LINE__,
+				pktinfo));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC)
+			if (pktinfo)
+				if (dhd_bus->cbs && dhd_bus->cbs->send_complete)
+					dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo,
+						status);
+#else
+			dbus_if_pktfree(dhd_bus, (void*)pktinfo, TRUE);
+#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC) */
+		}
+	} else {
+		DBUSERR(("%s: %d WARNING freeing orphan pkt %p\n", __FUNCTION__, __LINE__,
+			pktinfo));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) || defined(BCM_RPC_TOC)
+		if (pktinfo)
+			if (dhd_bus->cbs && dhd_bus->cbs->send_complete)
+				dhd_bus->cbs->send_complete(dhd_bus->cbarg, pktinfo,
+					status);
+#else
+		dbus_if_pktfree(dhd_bus, (void*)pktinfo, TRUE);
+#endif /* defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_TXNOCOPY) defined(BCM_RPC_TOC) */
+	}
+} /* dbus_if_send_irb_complete */
+
+/**
+ * When lower DBUS level signals that a receive IRB completed, either successful or not, the higher
+ * level (e.g. dhd_linux.c) has to be notified, and fresh free receive IRBs may have to be given
+ * to lower levels.
+ */
+static void BCMFASTPATH
+dbus_if_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+	int rxirb_pending;
+	struct exec_parms args;
+
+	if ((dhd_bus == NULL) || (rxirb == NULL)) {
+		return;
+	}
+	DBUSTRACE(("%s\n", __FUNCTION__));
+	if (dhd_bus->pub.busstate != DBUS_STATE_DOWN &&
+		dhd_bus->pub.busstate != DBUS_STATE_SLEEP) {
+		if (status == DBUS_OK) {
+			if ((rxirb->buf != NULL) && (rxirb->actual_len > 0)) {
+#ifdef DBUS_USB_LOOPBACK
+				if (is_loopback_pkt(rxirb->buf)) {
+					matches_loopback_pkt(rxirb->buf);
+				} else
+#endif
+				if (dhd_bus->cbs && dhd_bus->cbs->recv_buf) {
+					dhd_bus->cbs->recv_buf(dhd_bus->cbarg, rxirb->buf,
+					rxirb->actual_len);
+				}
+			} else if (rxirb->pkt != NULL) {
+				if (dhd_bus->cbs && dhd_bus->cbs->recv_pkt)
+					dhd_bus->cbs->recv_pkt(dhd_bus->cbarg, rxirb->pkt);
+			} else {
+				ASSERT(0); /* Should not happen */
+			}
+
+			rxirb_pending = dhd_bus->pub.nrxq - dhd_bus->rx_q->cnt - 1;
+			if ((rxirb_pending <= dhd_bus->rx_low_watermark) &&
+				!dhd_bus->rxoff) {
+				DBUSTRACE(("Low watermark so submit more %d <= %d \n",
+					dhd_bus->rx_low_watermark, rxirb_pending));
+				dbus_rxirbs_fill(dhd_bus);
+			} else if (dhd_bus->rxoff)
+				DBUSTRACE(("rx flow controlled. not filling more. cut_rxq=%d\n",
+					dhd_bus->rx_q->cnt));
+		} else if (status == DBUS_ERR_NODEVICE) {
+			DBUSERR(("%s: %d status = %d, buf %p\n", __FUNCTION__, __LINE__, status,
+				rxirb->buf));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+			if (rxirb->buf) {
+				PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf);
+				PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE);
+			}
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */
+		} else {
+			if (status != DBUS_ERR_RXZLP)
+				DBUSERR(("%s: %d status = %d, buf %p\n", __FUNCTION__, __LINE__,
+					status, rxirb->buf));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+			if (rxirb->buf) {
+				PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf);
+				PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE);
+			}
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */
+		}
+	} else {
+		DBUSTRACE(("%s: DBUS down, ignoring recv callback. buf %p\n", __FUNCTION__,
+			rxirb->buf));
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+		if (rxirb->buf) {
+			PKTFRMNATIVE(dhd_bus->pub.osh, rxirb->buf);
+			PKTFREE(dhd_bus->pub.osh, rxirb->buf, FALSE);
+		}
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY || BCM_RPC_TOC */
+	}
+	if (dhd_bus->rx_q != NULL) {
+		bzero(rxirb, sizeof(dbus_irb_rx_t));
+		args.qenq.q = dhd_bus->rx_q;
+		args.qenq.b = (dbus_irb_t *) rxirb;
+		EXEC_RXLOCK(dhd_bus, q_enq_exec, &args);
+	} else
+		MFREE(dhd_bus->pub.osh, rxirb, sizeof(dbus_irb_tx_t));
+} /* dbus_if_recv_irb_complete */
+
+/**
+ *  Accumulate errors signaled by lower DBUS levels and signal them to higher (e.g. dhd_linux.c)
+ *  level.
+ */
+static void
+dbus_if_errhandler(void *handle, int err)
+{
+	dhd_bus_t *dhd_bus = handle;
+	uint32 mask = 0;
+
+	if (dhd_bus == NULL)
+		return;
+
+	switch (err) {
+		case DBUS_ERR_TXFAIL:
+			dhd_bus->pub.stats.tx_errors++;
+			mask |= ERR_CBMASK_TXFAIL;
+			break;
+		case DBUS_ERR_TXDROP:
+			dhd_bus->pub.stats.tx_dropped++;
+			mask |= ERR_CBMASK_TXFAIL;
+			break;
+		case DBUS_ERR_RXFAIL:
+			dhd_bus->pub.stats.rx_errors++;
+			mask |= ERR_CBMASK_RXFAIL;
+			break;
+		case DBUS_ERR_RXDROP:
+			dhd_bus->pub.stats.rx_dropped++;
+			mask |= ERR_CBMASK_RXFAIL;
+			break;
+		default:
+			break;
+	}
+
+	if (dhd_bus->cbs && dhd_bus->cbs->errhandler && (dhd_bus->errmask & mask))
+		dhd_bus->cbs->errhandler(dhd_bus->cbarg, err);
+}
+
+/**
+ * When lower DBUS level signals control IRB completed, higher level (e.g. dhd_linux.c) has to be
+ * notified.
+ */
+static void
+dbus_if_ctl_complete(void *handle, int type, int status)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL) {
+		DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (dhd_bus->pub.busstate != DBUS_STATE_DOWN) {
+		if (dhd_bus->cbs && dhd_bus->cbs->ctl_complete)
+			dhd_bus->cbs->ctl_complete(dhd_bus->cbarg, type, status);
+	}
+}
+
+/**
+ * Rx related functionality (flow control, posting of free IRBs to rx queue) is dependent upon the
+ * bus state. When lower DBUS level signals a change in the interface state, take appropriate action
+ * and forward the signaling to the higher (e.g. dhd_linux.c) level.
+ */
+static void
+dbus_if_state_change(void *handle, int state)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+	int old_state;
+
+	if (dhd_bus == NULL)
+		return;
+
+	if (dhd_bus->pub.busstate == state)
+		return;
+	old_state = dhd_bus->pub.busstate;
+	if (state == DBUS_STATE_DISCONNECT) {
+		DBUSERR(("DBUS disconnected\n"));
+	}
+
+	/* Ignore USB SUSPEND while not up yet */
+	if (state == DBUS_STATE_SLEEP && old_state != DBUS_STATE_UP)
+		return;
+
+	DBUSTRACE(("dbus state change from %d to to %d\n", old_state, state));
+
+	/* Don't update state if it's PnP firmware re-download */
+	if (state != DBUS_STATE_PNP_FWDL)
+		dhd_bus->pub.busstate = state;
+	else
+		dbus_flowctrl_rx(handle, FALSE);
+	if (state == DBUS_STATE_SLEEP)
+		dbus_flowctrl_rx(handle, TRUE);
+	if (state == DBUS_STATE_UP) {
+		dbus_rxirbs_fill(dhd_bus);
+		dbus_flowctrl_rx(handle, FALSE);
+	}
+
+	if (dhd_bus->cbs && dhd_bus->cbs->state_change)
+		dhd_bus->cbs->state_change(dhd_bus->cbarg, state);
+}
+
+/** Forward request for packet from lower DBUS layer to higher layer (e.g. dhd_linux.c) */
+static void *
+dbus_if_pktget(void *handle, uint len, bool send)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+	void *p = NULL;
+
+	if (dhd_bus == NULL)
+		return NULL;
+
+	if (dhd_bus->cbs && dhd_bus->cbs->pktget)
+		p = dhd_bus->cbs->pktget(dhd_bus->cbarg, len, send);
+	else
+		ASSERT(0);
+
+	return p;
+}
+
+/** Forward request to free packet from lower DBUS layer to higher layer (e.g. dhd_linux.c) */
+static void
+dbus_if_pktfree(void *handle, void *p, bool send)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) handle;
+
+	if (dhd_bus == NULL)
+		return;
+
+	if (dhd_bus->cbs && dhd_bus->cbs->pktfree)
+		dhd_bus->cbs->pktfree(dhd_bus->cbarg, p, send);
+	else
+		ASSERT(0);
+}
+
+/** Lower DBUS level requests either a send or receive IRB */
+static struct dbus_irb*
+dbus_if_getirb(void *cbarg, bool send)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) cbarg;
+	struct exec_parms args;
+	struct dbus_irb *irb;
+
+	if ((dhd_bus == NULL) || (dhd_bus->pub.busstate != DBUS_STATE_UP))
+		return NULL;
+
+	if (send == TRUE) {
+		args.qdeq.q = dhd_bus->tx_q;
+		irb = EXEC_TXLOCK(dhd_bus, q_deq_exec, &args);
+	} else {
+		args.qdeq.q = dhd_bus->rx_q;
+		irb = EXEC_RXLOCK(dhd_bus, q_deq_exec, &args);
+	}
+
+	return irb;
+}
+
+/**
+ * Called as part of DBUS bus registration. Calls back into higher level (e.g. dhd_linux.c) probe
+ * function.
+ */
+static void *
+dbus_probe(void *arg, const char *desc, uint32 bustype, uint16 bus_no,
+	uint16 slot, uint32 hdrlen)
+{
+	DBUSTRACE(("%s\n", __FUNCTION__));
+	if (probe_cb) {
+		disc_arg = probe_cb(probe_arg, desc, bustype, bus_no, slot, hdrlen);
+		return disc_arg;
+	}
+
+	return (void *)DBUS_ERR;
+}
+
+/**
+ * As part of initialization, higher level (e.g. dhd_linux.c) requests DBUS to prepare for
+ * action.
+ */
+int
+dhd_bus_register(void)
+{
+	int err;
+
+	DBUSTRACE(("%s: Enter\n", __FUNCTION__));
+
+	probe_cb = dhd_dbus_probe_cb;
+	disconnect_cb = dhd_dbus_disconnect_cb;
+	probe_arg = NULL;
+
+	err = dbus_bus_register(0xa5c, 0x48f, dbus_probe, /* call lower DBUS level register function */
+		dbus_disconnect, NULL, &g_busintf, NULL, NULL);
+
+	/* Device not detected */
+	if (err == DBUS_ERR_NODEVICE)
+		err = DBUS_OK;
+
+	return err;
+}
+
+dhd_pub_t *g_pub = NULL;
+void
+dhd_bus_unregister(void)
+{
+	int ret;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	DHD_MUTEX_LOCK();
+	if (g_pub) {
+		g_pub->dhd_remove = TRUE;
+		if (!g_pub->bus) {
+			dhd_dbus_disconnect_cb(g_pub->bus);
+		}
+	}
+	probe_cb = NULL;
+	DHD_MUTEX_UNLOCK();
+	ret = dbus_bus_deregister();
+	disconnect_cb = NULL;
+	probe_arg = NULL;
+}
+
+/** As part of initialization, data structures have to be allocated and initialized */
+dhd_bus_t *
+dbus_attach(osl_t *osh, int rxsize, int nrxq, int ntxq, dhd_pub_t *pub,
+	dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh)
+{
+	dhd_bus_t *dhd_bus;
+	int err;
+
+	if ((g_busintf == NULL) || (g_busintf->attach == NULL) || (cbs == NULL))
+		return NULL;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if ((nrxq <= 0) || (ntxq <= 0))
+		return NULL;
+
+	dhd_bus = MALLOC(osh, sizeof(dhd_bus_t));
+	if (dhd_bus == NULL) {
+		DBUSERR(("%s: malloc failed %zu\n", __FUNCTION__, sizeof(dhd_bus_t)));
+		return NULL;
+	}
+
+	bzero(dhd_bus, sizeof(dhd_bus_t));
+
+	/* BUS-specific driver interface (at a lower DBUS level) */
+	dhd_bus->drvintf = g_busintf;
+	dhd_bus->cbarg = pub;
+	dhd_bus->cbs = cbs;
+
+	dhd_bus->pub.sh = sh;
+	dhd_bus->pub.osh = osh;
+	dhd_bus->pub.rxsize = rxsize;
+
+	dhd_bus->pub.nrxq = nrxq;
+	dhd_bus->rx_low_watermark = nrxq / 2;	/* keep enough posted rx urbs */
+	dhd_bus->pub.ntxq = ntxq;
+	dhd_bus->tx_low_watermark = ntxq / 4;	/* flow control when too many tx urbs posted */
+
+	dhd_bus->tx_q = MALLOC(osh, sizeof(dbus_irbq_t));
+	if (dhd_bus->tx_q == NULL)
+		goto error;
+	else {
+		bzero(dhd_bus->tx_q, sizeof(dbus_irbq_t));
+		err = dbus_irbq_init(dhd_bus, dhd_bus->tx_q, ntxq, sizeof(dbus_irb_tx_t));
+		if (err != DBUS_OK)
+			goto error;
+	}
+
+	dhd_bus->rx_q = MALLOC(osh, sizeof(dbus_irbq_t));
+	if (dhd_bus->rx_q == NULL)
+		goto error;
+	else {
+		bzero(dhd_bus->rx_q, sizeof(dbus_irbq_t));
+		err = dbus_irbq_init(dhd_bus, dhd_bus->rx_q, nrxq, sizeof(dbus_irb_rx_t));
+		if (err != DBUS_OK)
+			goto error;
+	}
+
+
+	dhd_bus->bus_info = (void *)g_busintf->attach(&dhd_bus->pub,
+		dhd_bus, &dbus_intf_cbs);
+	if (dhd_bus->bus_info == NULL)
+		goto error;
+
+	dbus_tx_timer_init(dhd_bus);
+
+#if defined(BCM_REQUEST_FW)
+	/* Need to copy external image for re-download */
+	if (extdl && extdl->fw && (extdl->fwlen > 0)) {
+		dhd_bus->extdl.fw = MALLOC(osh, extdl->fwlen);
+		if (dhd_bus->extdl.fw) {
+			bcopy(extdl->fw, dhd_bus->extdl.fw, extdl->fwlen);
+			dhd_bus->extdl.fwlen = extdl->fwlen;
+		}
+	}
+
+	if (extdl && extdl->vars && (extdl->varslen > 0)) {
+		dhd_bus->extdl.vars = MALLOC(osh, extdl->varslen);
+		if (dhd_bus->extdl.vars) {
+			bcopy(extdl->vars, dhd_bus->extdl.vars, extdl->varslen);
+			dhd_bus->extdl.varslen = extdl->varslen;
+		}
+	}
+#endif 
+
+	return (dhd_bus_t *)dhd_bus;
+
+error:
+	DBUSERR(("%s: Failed\n", __FUNCTION__));
+	dbus_detach(dhd_bus);
+	return NULL;
+} /* dbus_attach */
+
+void
+dbus_detach(dhd_bus_t *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	osl_t *osh;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL)
+		return;
+
+	dbus_tx_timer_stop(dhd_bus);
+
+	osh = pub->pub.osh;
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->detach)
+		 dhd_bus->drvintf->detach((dbus_pub_t *)dhd_bus, dhd_bus->bus_info);
+
+	if (dhd_bus->tx_q) {
+		dbus_irbq_deinit(dhd_bus, dhd_bus->tx_q, sizeof(dbus_irb_tx_t));
+		MFREE(osh, dhd_bus->tx_q, sizeof(dbus_irbq_t));
+		dhd_bus->tx_q = NULL;
+	}
+
+	if (dhd_bus->rx_q) {
+		dbus_irbq_deinit(dhd_bus, dhd_bus->rx_q, sizeof(dbus_irb_rx_t));
+		MFREE(osh, dhd_bus->rx_q, sizeof(dbus_irbq_t));
+		dhd_bus->rx_q = NULL;
+	}
+
+
+	if (dhd_bus->extdl.fw && (dhd_bus->extdl.fwlen > 0)) {
+		MFREE(osh, dhd_bus->extdl.fw, dhd_bus->extdl.fwlen);
+		dhd_bus->extdl.fw = NULL;
+		dhd_bus->extdl.fwlen = 0;
+	}
+
+	if (dhd_bus->extdl.vars && (dhd_bus->extdl.varslen > 0)) {
+		MFREE(osh, dhd_bus->extdl.vars, dhd_bus->extdl.varslen);
+		dhd_bus->extdl.vars = NULL;
+		dhd_bus->extdl.varslen = 0;
+	}
+
+	MFREE(osh, dhd_bus, sizeof(dhd_bus_t));
+} /* dbus_detach */
+
+int dbus_dlneeded(dhd_bus_t *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int dlneeded = DBUS_ERR;
+
+	if (!dhd_bus) {
+		DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+		return DBUS_ERR;
+	}
+
+	DBUSTRACE(("%s: state %d\n", __FUNCTION__, dhd_bus->pub.busstate));
+
+	if (dhd_bus->drvintf->dlneeded) {
+		dlneeded = dhd_bus->drvintf->dlneeded(dhd_bus->bus_info);
+	}
+	printf("%s: dlneeded=%d\n", __FUNCTION__, dlneeded);
+
+	/* dlneeded > 0: need to download
+	  * dlneeded = 0: downloaded
+	  * dlneeded < 0: bus error*/
+	return dlneeded;
+}
+
+#if defined(BCM_REQUEST_FW)
+int dbus_download_firmware(dhd_bus_t *pub, char *pfw_path, char *pnv_path)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_OK;
+
+	if (!dhd_bus) {
+		DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+		return DBUS_ERR;
+	}
+
+	DBUSTRACE(("%s: state %d\n", __FUNCTION__, dhd_bus->pub.busstate));
+
+	dhd_bus->pub.busstate = DBUS_STATE_DL_PENDING;
+#ifdef EXTERNAL_FW_PATH
+	err = dbus_do_download(dhd_bus, pfw_path, pnv_path);
+#else
+	err = dbus_do_download(dhd_bus);
+#endif /* EXTERNAL_FW_PATH */
+	if (err == DBUS_OK) {
+		dhd_bus->pub.busstate = DBUS_STATE_DL_DONE;
+	} else {
+		DBUSERR(("%s: download failed (%d)\n", __FUNCTION__, err));
+	}
+
+	return err;
+}
+#endif 
+
+/**
+ * higher layer requests us to 'up' the interface to the dongle. Prerequisite is that firmware (not
+ * bootloader) must be active in the dongle.
+ */
+int
+dbus_up(struct dhd_bus *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_OK;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL) {
+		DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+		return DBUS_ERR;
+	}
+
+	if ((dhd_bus->pub.busstate == DBUS_STATE_DL_DONE) ||
+		(dhd_bus->pub.busstate == DBUS_STATE_DOWN) ||
+		(dhd_bus->pub.busstate == DBUS_STATE_SLEEP)) {
+		if (dhd_bus->drvintf && dhd_bus->drvintf->up) {
+			err = dhd_bus->drvintf->up(dhd_bus->bus_info);
+
+			if (err == DBUS_OK) {
+				dbus_rxirbs_fill(dhd_bus);
+			}
+		}
+	} else
+		err = DBUS_ERR;
+
+	return err;
+}
+
+/** higher layer requests us to 'down' the interface to the dongle. */
+int
+dbus_down(dbus_pub_t *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	dbus_tx_timer_stop(dhd_bus);
+
+	if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+		dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+		if (dhd_bus->drvintf && dhd_bus->drvintf->down)
+			return dhd_bus->drvintf->down(dhd_bus->bus_info);
+	}
+
+	return DBUS_ERR;
+}
+
+int
+dbus_shutdown(dbus_pub_t *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->shutdown)
+		return dhd_bus->drvintf->shutdown(dhd_bus->bus_info);
+
+	return DBUS_OK;
+}
+
+int
+dbus_stop(struct dhd_bus *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+		dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+		if (dhd_bus->drvintf && dhd_bus->drvintf->stop)
+			return dhd_bus->drvintf->stop(dhd_bus->bus_info);
+	}
+
+	return DBUS_ERR;
+}
+
+int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf)
+{
+	return dbus_send_pkt(dbus, pktbuf, pktbuf /* pktinfo */);
+}
+
+int
+dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info)
+{
+	return dbus_send_irb(pub, buf, len, NULL, info);
+}
+
+int
+dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info)
+{
+	return dbus_send_irb(pub, NULL, 0, pkt, info);
+}
+
+int
+dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	if (dhd_bus == NULL) {
+		DBUSERR(("%s: dhd_bus is NULL\n", __FUNCTION__));
+		return DBUS_ERR;
+	}
+
+	if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+		dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+		if (dhd_bus->drvintf && dhd_bus->drvintf->send_ctl)
+			return dhd_bus->drvintf->send_ctl(dhd_bus->bus_info, buf, len);
+	} else {
+		DBUSERR(("%s: bustate=%d\n", __FUNCTION__, dhd_bus->pub.busstate));
+	}
+
+	return DBUS_ERR;
+}
+
+int
+dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	if ((dhd_bus == NULL) || (buf == NULL))
+		return DBUS_ERR;
+
+	if (dhd_bus->pub.busstate == DBUS_STATE_UP ||
+		dhd_bus->pub.busstate == DBUS_STATE_SLEEP) {
+		if (dhd_bus->drvintf && dhd_bus->drvintf->recv_ctl)
+			return dhd_bus->drvintf->recv_ctl(dhd_bus->bus_info, buf, len);
+	}
+
+	return DBUS_ERR;
+}
+
+/** Only called via RPC (Dec 2012) */
+int
+dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	dbus_irb_rx_t *rxirb;
+	struct exec_parms args;
+	int status;
+
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	args.qdeq.q = dhd_bus->rx_q;
+	if (dhd_bus->pub.busstate == DBUS_STATE_UP) {
+		if (dhd_bus->drvintf && dhd_bus->drvintf->recv_irb_from_ep) {
+			if ((rxirb = (EXEC_RXLOCK(dhd_bus, q_deq_exec, &args))) != NULL) {
+				status = dhd_bus->drvintf->recv_irb_from_ep(dhd_bus->bus_info,
+					rxirb, ep_idx);
+				if (status == DBUS_ERR_RXDROP) {
+					bzero(rxirb, sizeof(dbus_irb_rx_t));
+					args.qenq.q = dhd_bus->rx_q;
+					args.qenq.b = (dbus_irb_t *) rxirb;
+					EXEC_RXLOCK(dhd_bus, q_enq_exec, &args);
+				}
+			}
+		}
+	}
+
+	return DBUS_ERR;
+}
+
+/** only called by dhd_cdc.c (Dec 2012) */
+int
+dbus_poll_intr(dbus_pub_t *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	int status = DBUS_ERR;
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	if (dhd_bus->pub.busstate == DBUS_STATE_UP) {
+		if (dhd_bus->drvintf && dhd_bus->drvintf->recv_irb_from_ep) {
+			status = dhd_bus->drvintf->recv_irb_from_ep(dhd_bus->bus_info,
+				NULL, 0xff);
+		}
+	}
+	return status;
+}
+
+/** called by nobody (Dec 2012) */
+void *
+dbus_pktget(dbus_pub_t *pub, int len)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	if ((dhd_bus == NULL) || (len < 0))
+		return NULL;
+
+	return PKTGET(dhd_bus->pub.osh, len, TRUE);
+}
+
+/** called by nobody (Dec 2012) */
+void
+dbus_pktfree(dbus_pub_t *pub, void* pkt)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	if ((dhd_bus == NULL) || (pkt == NULL))
+		return;
+
+	PKTFREE(dhd_bus->pub.osh, pkt, TRUE);
+}
+
+/** called by nobody (Dec 2012) */
+int
+dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	if ((dhd_bus == NULL) || (stats == NULL))
+		return DBUS_ERR;
+
+	bcopy(&dhd_bus->pub.stats, stats, sizeof(dbus_stats_t));
+
+	return DBUS_OK;
+}
+
+int
+dbus_get_attrib(dhd_bus_t *pub, dbus_attrib_t *attrib)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_ERR;
+
+	if ((dhd_bus == NULL) || (attrib == NULL))
+		return DBUS_ERR;
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->get_attrib) {
+		err = dhd_bus->drvintf->get_attrib(dhd_bus->bus_info,
+		&dhd_bus->pub.attrib);
+	}
+
+	bcopy(&dhd_bus->pub.attrib, attrib, sizeof(dbus_attrib_t));
+	return err;
+}
+
+int
+dbus_get_device_speed(dbus_pub_t *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+
+	if (dhd_bus == NULL)
+		return INVALID_SPEED;
+
+	return (dhd_bus->pub.device_speed);
+}
+
+int
+dbus_set_config(dbus_pub_t *pub, dbus_config_t *config)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_ERR;
+
+	if ((dhd_bus == NULL) || (config == NULL))
+		return DBUS_ERR;
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->set_config) {
+		err = dhd_bus->drvintf->set_config(dhd_bus->bus_info,
+			config);
+
+		if ((config->config_id == DBUS_CONFIG_ID_AGGR_LIMIT) &&
+			(!err) &&
+			(dhd_bus->pub.busstate == DBUS_STATE_UP)) {
+			dbus_rxirbs_fill(dhd_bus);
+		}
+	}
+
+	return err;
+}
+
+int
+dbus_get_config(dbus_pub_t *pub, dbus_config_t *config)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_ERR;
+
+	if ((dhd_bus == NULL) || (config == NULL))
+		return DBUS_ERR;
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->get_config) {
+		err = dhd_bus->drvintf->get_config(dhd_bus->bus_info,
+		config);
+	}
+
+	return err;
+}
+
+int
+dbus_set_errmask(dbus_pub_t *pub, uint32 mask)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_OK;
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	dhd_bus->errmask = mask;
+	return err;
+}
+
+int
+dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_ERR;
+	bool fwdl = FALSE;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	if (dhd_bus->pub.busstate == DBUS_STATE_UP) {
+		return DBUS_OK;
+	}
+
+
+
+	if (dhd_bus->drvintf->pnp) {
+		err = dhd_bus->drvintf->pnp(dhd_bus->bus_info,
+			DBUS_PNP_RESUME);
+	}
+
+	if (dhd_bus->drvintf->recv_needed) {
+		if (dhd_bus->drvintf->recv_needed(dhd_bus->bus_info)) {
+			/* Refill after sleep/hibernate */
+			dbus_rxirbs_fill(dhd_bus);
+		}
+	}
+
+
+	if (fw_reload)
+		*fw_reload = fwdl;
+
+	return err;
+} /* dbus_pnp_resume */
+
+int
+dbus_pnp_sleep(dbus_pub_t *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_ERR;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	dbus_tx_timer_stop(dhd_bus);
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->pnp) {
+		err = dhd_bus->drvintf->pnp(dhd_bus->bus_info,
+			DBUS_PNP_SLEEP);
+	}
+
+	return err;
+}
+
+int
+dbus_pnp_disconnect(dbus_pub_t *pub)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) pub;
+	int err = DBUS_ERR;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	dbus_tx_timer_stop(dhd_bus);
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->pnp) {
+		err = dhd_bus->drvintf->pnp(dhd_bus->bus_info,
+			DBUS_PNP_DISCONNECT);
+	}
+
+	return err;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+	void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *dhd_bus = (dhd_bus_t *) dhdp->bus;
+	int err = DBUS_ERR;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (dhd_bus == NULL)
+		return DBUS_ERR;
+
+	if (dhd_bus->drvintf && dhd_bus->drvintf->iovar_op) {
+		err = dhd_bus->drvintf->iovar_op(dhd_bus->bus_info,
+			name, params, plen, arg, len, set);
+	}
+
+	return err;
+}
+
+
+void *
+dhd_dbus_txq(const dbus_pub_t *pub)
+{
+	return NULL;
+}
+
+uint
+dhd_dbus_hdrlen(const dbus_pub_t *pub)
+{
+	return 0;
+}
+
+void *
+dbus_get_devinfo(dbus_pub_t *pub)
+{
+	return pub->dev_info;
+}
+
+#if defined(BCM_REQUEST_FW) && !defined(EXTERNAL_FW_PATH)
+static int
+dbus_otp(dhd_bus_t *dhd_bus, uint16 *boardtype, uint16 *boardrev)
+{
+	uint32 value = 0;
+	uint8 *cis;
+	uint16 *otpinfo;
+	uint32 i;
+	bool standard_cis = TRUE;
+	uint8 tup, tlen;
+	bool btype_present = FALSE;
+	bool brev_present = FALSE;
+	int ret;
+	int devid;
+	uint16 btype = 0;
+	uint16 brev = 0;
+	uint32 otp_size = 0, otp_addr = 0, otp_sw_rgn = 0;
+
+	if (dhd_bus == NULL || dhd_bus->drvintf == NULL ||
+		dhd_bus->drvintf->readreg == NULL)
+		return DBUS_ERR;
+
+	devid = dhd_bus->pub.attrib.devid;
+
+	if ((devid == BCM43234_CHIP_ID) || (devid == BCM43235_CHIP_ID) ||
+		(devid == BCM43236_CHIP_ID)) {
+
+		otp_size = BCM_OTP_SIZE_43236;
+		otp_sw_rgn = BCM_OTP_SW_RGN_43236;
+		otp_addr = BCM_OTP_ADDR_43236;
+
+	} else {
+		return DBUS_ERR_NVRAM;
+	}
+
+	cis = MALLOC(dhd_bus->pub.osh, otp_size * 2);
+	if (cis == NULL)
+		return DBUS_ERR;
+
+	otpinfo = (uint16 *) cis;
+
+	for (i = 0; i < otp_size; i++) {
+
+		ret = dhd_bus->drvintf->readreg(dhd_bus->bus_info,
+			otp_addr + ((otp_sw_rgn + i) << 1), 2, &value);
+
+		if (ret != DBUS_OK) {
+			MFREE(dhd_bus->pub.osh, cis, otp_size * 2);
+			return ret;
+		}
+		otpinfo[i] = (uint16) value;
+	}
+
+	for (i = 0; i < (otp_size << 1); ) {
+
+		if (standard_cis) {
+			tup = cis[i++];
+			if (tup == CISTPL_NULL || tup == CISTPL_END)
+				tlen = 0;
+			else
+				tlen = cis[i++];
+		} else {
+			if (cis[i] == CISTPL_NULL || cis[i] == CISTPL_END) {
+				tlen = 0;
+				tup = cis[i];
+			} else {
+				tlen = cis[i];
+				tup = CISTPL_BRCM_HNBU;
+			}
+			++i;
+		}
+
+		if (tup == CISTPL_END || (i + tlen) >= (otp_size << 1)) {
+			break;
+		}
+
+		switch (tup) {
+
+		case CISTPL_BRCM_HNBU:
+
+			switch (cis[i]) {
+
+			case HNBU_BOARDTYPE:
+
+				btype = (uint16) ((cis[i + 2] << 8) + cis[i + 1]);
+				btype_present = TRUE;
+				DBUSTRACE(("%s: HNBU_BOARDTYPE = 0x%2x\n", __FUNCTION__,
+					(uint32)btype));
+				break;
+
+			case HNBU_BOARDREV:
+
+				if (tlen == 2)
+					brev = (uint16) cis[i + 1];
+				else
+					brev = (uint16) ((cis[i + 2] << 8) + cis[i + 1]);
+				brev_present = TRUE;
+				DBUSTRACE(("%s: HNBU_BOARDREV =  0x%2x\n", __FUNCTION__,
+					(uint32)*boardrev));
+				break;
+
+			case HNBU_HNBUCIS:
+				DBUSTRACE(("%s: HNBU_HNBUCIS\n", __FUNCTION__));
+				tlen++;
+				standard_cis = FALSE;
+				break;
+			}
+			break;
+		}
+
+		i += tlen;
+	}
+
+	MFREE(dhd_bus->pub.osh, cis, otp_size * 2);
+
+	if (btype_present == TRUE && brev_present == TRUE) {
+		*boardtype = btype;
+		*boardrev = brev;
+		DBUSERR(("otp boardtype = 0x%2x boardrev = 0x%2x\n",
+			*boardtype, *boardrev));
+
+		return DBUS_OK;
+	}
+	else
+		return DBUS_ERR;
+} /* dbus_otp */
+
+static int
+dbus_select_nvram(dhd_bus_t *dhd_bus, int8 *jumbonvram, int jumbolen,
+uint16 boardtype, uint16 boardrev, int8 **nvram, int *nvram_len)
+{
+	/* Multi board nvram file format is contenation of nvram info with \r
+	*  The file format for two contatenated set is
+	*  \nBroadcom Jumbo Nvram file\nfirst_set\nsecond_set\nthird_set\n
+	*/
+	uint8 *nvram_start = NULL, *nvram_end = NULL;
+	uint8 *nvram_start_prev = NULL, *nvram_end_prev = NULL;
+	uint16 btype = 0, brev = 0;
+	int len  = 0;
+	char *field;
+
+	*nvram = NULL;
+	*nvram_len = 0;
+
+	if (strncmp(BCM_JUMBO_START, jumbonvram, strlen(BCM_JUMBO_START))) {
+		/* single nvram file in the native format */
+		DBUSTRACE(("%s: Non-Jumbo NVRAM File \n", __FUNCTION__));
+		*nvram = jumbonvram;
+		*nvram_len = jumbolen;
+		return DBUS_OK;
+	} else {
+		DBUSTRACE(("%s: Jumbo NVRAM File \n", __FUNCTION__));
+	}
+
+	/* sanity test the end of the config sets for proper ending */
+	if (jumbonvram[jumbolen - 1] != BCM_JUMBO_NVRAM_DELIMIT ||
+		jumbonvram[jumbolen - 2] != '\0') {
+		DBUSERR(("%s: Bad Jumbo NVRAM file format\n", __FUNCTION__));
+		return DBUS_JUMBO_BAD_FORMAT;
+	}
+
+	dhd_bus->nvram_nontxt = DBUS_NVRAM_NONTXT;
+
+	nvram_start = jumbonvram;
+
+	while (*nvram_start != BCM_JUMBO_NVRAM_DELIMIT && len < jumbolen) {
+
+		/* consume the  first file info line
+		* \nBroadcom Jumbo Nvram file\nfile1\n ...
+		*/
+		len ++;
+		nvram_start ++;
+	}
+
+	nvram_end = nvram_start;
+
+	/* search for "boardrev=0xabcd" and "boardtype=0x1234" information in
+	* the concatenated nvram config files /sets
+	*/
+
+	while (len < jumbolen) {
+
+		if (*nvram_end == '\0') {
+			/* end of a config set is marked by multiple null characters */
+			len ++;
+			nvram_end ++;
+			DBUSTRACE(("%s: NULL chr len = %d char = 0x%x\n", __FUNCTION__,
+				len, *nvram_end));
+			continue;
+
+		} else if (*nvram_end == BCM_JUMBO_NVRAM_DELIMIT) {
+
+			/* config set delimiter is reached */
+			/* check if next config set is present or not
+			*  return  if next config is not present
+			*/
+
+			/* start search the next config set */
+			nvram_start_prev = nvram_start;
+			nvram_end_prev = nvram_end;
+
+			nvram_end ++;
+			nvram_start = nvram_end;
+			btype = brev = 0;
+			DBUSTRACE(("%s: going to next record len = %d "
+					"char = 0x%x \n", __FUNCTION__, len, *nvram_end));
+			len ++;
+			if (len >= jumbolen) {
+
+				*nvram = nvram_start_prev;
+				*nvram_len = (int)(nvram_end_prev - nvram_start_prev);
+
+				DBUSTRACE(("%s: no more len = %d nvram_end = 0x%p",
+					__FUNCTION__, len, nvram_end));
+
+				return DBUS_JUMBO_NOMATCH;
+
+			} else {
+				continue;
+			}
+
+		} else {
+
+			DBUSTRACE(("%s: config str = %s\n", __FUNCTION__, nvram_end));
+
+			if (bcmp(nvram_end, "boardtype", strlen("boardtype")) == 0) {
+
+				field = strchr(nvram_end, '=');
+				field++;
+				btype = (uint16)bcm_strtoul(field, NULL, 0);
+
+				DBUSTRACE(("%s: btype = 0x%x boardtype = 0x%x \n", __FUNCTION__,
+					btype, boardtype));
+			}
+
+			if (bcmp(nvram_end, "boardrev", strlen("boardrev")) == 0) {
+
+				field = strchr(nvram_end, '=');
+				field++;
+				brev = (uint16)bcm_strtoul(field, NULL, 0);
+
+				DBUSTRACE(("%s: brev = 0x%x boardrev = 0x%x \n", __FUNCTION__,
+					brev, boardrev));
+			}
+			if (btype == boardtype && brev == boardrev) {
+				/* locate nvram config set end - ie.find '\r' char */
+				while (*nvram_end != BCM_JUMBO_NVRAM_DELIMIT)
+					nvram_end ++;
+				*nvram = nvram_start;
+				*nvram_len = (int) (nvram_end - nvram_start);
+				DBUSTRACE(("found len = %d nvram_start = 0x%p "
+					"nvram_end = 0x%p\n", *nvram_len, nvram_start, nvram_end));
+				return DBUS_OK;
+			}
+
+			len += (strlen(nvram_end) + 1);
+			nvram_end += (strlen(nvram_end) + 1);
+		}
+	}
+	return DBUS_JUMBO_NOMATCH;
+} /* dbus_select_nvram */
+
+#endif 
+
+#define DBUS_NRXQ	50
+#define DBUS_NTXQ	100
+
+static void
+dhd_dbus_send_complete(void *handle, void *info, int status)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)handle;
+	void *pkt = info;
+
+	if ((dhd == NULL) || (pkt == NULL)) {
+		DBUSERR(("dhd or pkt is NULL\n"));
+		return;
+	}
+
+	if (status == DBUS_OK) {
+		dhd->dstats.tx_packets++;
+	} else {
+		DBUSERR(("TX error=%d\n", status));
+		dhd->dstats.tx_errors++;
+	}
+#ifdef PROP_TXSTATUS
+	if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) &&
+		(dhd_wlfc_txcomplete(dhd, pkt, status == 0) != WLFC_UNSUPPORTED)) {
+		return;
+	}
+#endif /* PROP_TXSTATUS */
+	PKTFREE(dhd->osh, pkt, TRUE);
+}
+
+static void
+dhd_dbus_recv_pkt(void *handle, void *pkt)
+{
+	uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+	uint reorder_info_len;
+	uint pkt_count;
+	dhd_pub_t *dhd = (dhd_pub_t *)handle;
+	int ifidx = 0;
+
+	if (dhd == NULL) {
+		DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	/* If the protocol uses a data header, check and remove it */
+	if (dhd_prot_hdrpull(dhd, &ifidx, pkt, reorder_info_buf,
+		&reorder_info_len) != 0) {
+		DBUSERR(("rx protocol error\n"));
+		PKTFREE(dhd->osh, pkt, FALSE);
+		dhd->rx_errors++;
+		return;
+	}
+
+	if (reorder_info_len) {
+		/* Reordering info from the firmware */
+		dhd_process_pkt_reorder_info(dhd, reorder_info_buf, reorder_info_len,
+			&pkt, &pkt_count);
+		if (pkt_count == 0)
+			return;
+	}
+	else {
+		pkt_count = 1;
+	}
+	dhd_rx_frame(dhd, ifidx, pkt, pkt_count, 0);
+}
+
+static void
+dhd_dbus_recv_buf(void *handle, uint8 *buf, int len)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)handle;
+	void *pkt;
+
+	if (dhd == NULL) {
+		DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if ((pkt = PKTGET(dhd->osh, len, FALSE)) == NULL) {
+		DBUSERR(("PKTGET (rx) failed=%d\n", len));
+		return;
+	}
+
+	bcopy(buf, PKTDATA(dhd->osh, pkt), len);
+	dhd_dbus_recv_pkt(dhd, pkt);
+}
+
+static void
+dhd_dbus_txflowcontrol(void *handle, bool onoff)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)handle;
+	bool wlfc_enabled = FALSE;
+
+	if (dhd == NULL) {
+		DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+#ifdef PROP_TXSTATUS
+	wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, onoff, !onoff) != WLFC_UNSUPPORTED);
+#endif
+
+	if (!wlfc_enabled) {
+		dhd_txflowcontrol(dhd, ALL_INTERFACES, onoff);
+	}
+}
+
+static void
+dhd_dbus_errhandler(void *handle, int err)
+{
+}
+
+static void
+dhd_dbus_ctl_complete(void *handle, int type, int status)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)handle;
+
+	if (dhd == NULL) {
+		DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (type == DBUS_CBCTL_READ) {
+		if (status == DBUS_OK)
+			dhd->rx_ctlpkts++;
+		else
+			dhd->rx_ctlerrs++;
+	} else if (type == DBUS_CBCTL_WRITE) {
+		if (status == DBUS_OK)
+			dhd->tx_ctlpkts++;
+		else
+			dhd->tx_ctlerrs++;
+	}
+
+	dhd_prot_ctl_complete(dhd);
+}
+
+static void
+dhd_dbus_state_change(void *handle, int state)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)handle;
+
+	if (dhd == NULL) {
+		DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	switch (state) {
+
+		case DBUS_STATE_DL_NEEDED:
+			DBUSERR(("%s: firmware request cannot be handled\n", __FUNCTION__));
+			break;
+		case DBUS_STATE_DOWN:
+			DBUSTRACE(("%s: DBUS is down\n", __FUNCTION__));
+			dhd->busstate = DHD_BUS_DOWN;
+			break;
+		case DBUS_STATE_UP:
+			DBUSTRACE(("%s: DBUS is up\n", __FUNCTION__));
+			dhd->busstate = DHD_BUS_DATA;
+			break;
+		default:
+			break;
+	}
+
+	DBUSERR(("%s: DBUS current state=%d\n", __FUNCTION__, state));
+}
+
+static void *
+dhd_dbus_pktget(void *handle, uint len, bool send)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)handle;
+	void *p = NULL;
+
+	if (dhd == NULL) {
+		DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+		return NULL;
+	}
+
+	if (send == TRUE) {
+		dhd_os_sdlock_txq(dhd);
+		p = PKTGET(dhd->osh, len, TRUE);
+		dhd_os_sdunlock_txq(dhd);
+	} else {
+		dhd_os_sdlock_rxq(dhd);
+		p = PKTGET(dhd->osh, len, FALSE);
+		dhd_os_sdunlock_rxq(dhd);
+	}
+
+	return p;
+}
+
+static void
+dhd_dbus_pktfree(void *handle, void *p, bool send)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)handle;
+
+	if (dhd == NULL) {
+		DBUSERR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (send == TRUE) {
+#ifdef PROP_TXSTATUS
+		if (DHD_PKTTAG_WLFCPKT(PKTTAG(p)) &&
+			(dhd_wlfc_txcomplete(dhd, p, FALSE) != WLFC_UNSUPPORTED)) {
+			return;
+		}
+#endif /* PROP_TXSTATUS */
+
+		dhd_os_sdlock_txq(dhd);
+		PKTFREE(dhd->osh, p, TRUE);
+		dhd_os_sdunlock_txq(dhd);
+	} else {
+		dhd_os_sdlock_rxq(dhd);
+		PKTFREE(dhd->osh, p, FALSE);
+		dhd_os_sdunlock_rxq(dhd);
+	}
+}
+
+
+static dbus_callbacks_t dhd_dbus_cbs = {
+	dhd_dbus_send_complete,
+	dhd_dbus_recv_buf,
+	dhd_dbus_recv_pkt,
+	dhd_dbus_txflowcontrol,
+	dhd_dbus_errhandler,
+	dhd_dbus_ctl_complete,
+	dhd_dbus_state_change,
+	dhd_dbus_pktget,
+	dhd_dbus_pktfree
+};
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus != NULL);
+	return bus->pub.attrib.devid;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+	ASSERT(bus);
+	ASSERT(bus != NULL);
+	return bus->pub.attrib.chiprev;
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	bcm_bprintf(strbuf, "Bus USB\n");
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+}
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pktbuf)
+{
+	DBUSTRACE(("%s\n", __FUNCTION__));
+	if (bus->txoff) {
+		DBUSTRACE(("txoff\n"));
+		return BCME_EPERM;
+	}
+	return dbus_send_txdata(&bus->pub, pktbuf);
+}
+
+static void
+dhd_dbus_advertise_bus_cleanup(dhd_pub_t *dhdp)
+{
+	unsigned long flags;
+	int timeleft;
+
+	DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+	dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
+	DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+	if ((timeleft == 0) || (timeleft == 1)) {
+		DBUSERR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+				__FUNCTION__, dhdp->dhd_bus_busy_state));
+		ASSERT(0);
+	}
+
+	return;
+}
+
+static void
+dhd_dbus_advertise_bus_remove(dhd_pub_t *dhdp)
+{
+	unsigned long flags;
+	int timeleft;
+
+	DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+	dhdp->busstate = DHD_BUS_REMOVE;
+	DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+	if ((timeleft == 0) || (timeleft == 1)) {
+		DBUSERR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+				__FUNCTION__, dhdp->dhd_bus_busy_state));
+		ASSERT(0);
+	}
+
+	return;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	int bcmerror = 0;
+	unsigned long flags;
+	wifi_adapter_info_t *adapter = (wifi_adapter_info_t *)dhdp->adapter;
+
+	if (flag == TRUE) {
+		if (!dhdp->dongle_reset) {
+			DBUSERR(("%s: == Power OFF ==\n", __FUNCTION__));
+			dhd_dbus_advertise_bus_cleanup(dhdp);
+			dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+			/* Force flow control as protection when stop come before ifconfig_down */
+			dhd_txflowcontrol(dhdp, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+			dbus_stop(dhdp->bus);
+
+			dhdp->dongle_reset = TRUE;
+			dhdp->up = FALSE;
+
+			DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+			dhdp->busstate = DHD_BUS_DOWN;
+			DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+			wifi_clr_adapter_status(adapter, WIFI_STATUS_FW_READY);
+
+			printf("%s:  WLAN OFF DONE\n", __FUNCTION__);
+			/* App can now remove power from device */
+		} else
+			bcmerror = BCME_ERROR;
+	} else {
+		/* App must have restored power to device before calling */
+		printf("\n\n%s: == WLAN ON ==\n", __FUNCTION__);
+		if (dhdp->dongle_reset) {
+			/* Turn on WLAN */
+			DHD_MUTEX_UNLOCK();
+			wait_event_interruptible_timeout(adapter->status_event,
+				wifi_get_adapter_status(adapter, WIFI_STATUS_FW_READY),
+				msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
+			DHD_MUTEX_LOCK();
+			bcmerror = dbus_up(dhdp->bus);
+			if (bcmerror == BCME_OK) {
+				dhdp->dongle_reset = FALSE;
+				dhdp->up = TRUE;
+#if !defined(IGNORE_ETH0_DOWN)
+				/* Restore flow control  */
+				dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF);
+#endif 
+				dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+				DBUSTRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+			} else {
+				DBUSERR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, bcmerror));
+			}
+		}
+	}
+
+#ifdef PKT_STATICS
+	memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t));
+#endif
+	return bcmerror;
+}
+
+void
+dhd_set_path_params(struct dhd_bus *bus)
+{
+	/* External conf takes precedence if specified */
+	dhd_conf_preinit(bus->dhd);
+
+	if (bus->dhd->conf_path[0] == '\0') {
+		dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
+	}
+	if (bus->dhd->clm_path[0] == '\0') {
+		dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
+	}
+#ifdef CONFIG_PATH_AUTO_SELECT
+	dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
+#endif
+
+	dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
+
+	dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
+	dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
+	dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
+
+	printf("Final fw_path=%s\n", bus->fw_path);
+	printf("Final nv_path=%s\n", bus->nv_path);
+	printf("Final clm_path=%s\n", bus->dhd->clm_path);
+	printf("Final conf_path=%s\n", bus->dhd->conf_path);
+
+}
+
+void
+dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path,
+	char *pnv_path, char *pclm_path, char *pconf_path)
+{
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (bus == NULL) {
+		DBUSERR(("%s: bus is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+	bus->dhd->clm_path = pclm_path;
+	bus->dhd->conf_path = pconf_path;
+
+	dhd_set_path_params(bus);
+
+}
+
+/*
+ * hdrlen is space to reserve in pkt headroom for DBUS
+ */
+void *
+dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype,
+	uint16 bus_no, uint16 slot, uint32 hdrlen)
+{
+	osl_t *osh = NULL;
+	dhd_bus_t *bus = NULL;
+	dhd_pub_t *pub = NULL;
+	uint rxsz;
+	int dlneeded = 0;
+	wifi_adapter_info_t *adapter = NULL;
+
+	DBUSTRACE(("%s: Enter\n", __FUNCTION__));
+
+	adapter = dhd_wifi_platform_get_adapter(bustype, bus_no, slot);
+
+	if (!g_pub) {
+		/* Ask the OS interface part for an OSL handle */
+		if (!(osh = osl_attach(NULL, bustype, TRUE))) {
+			DBUSERR(("%s: OSL attach failed\n", __FUNCTION__));
+			goto fail;
+		}
+
+		/* Attach to the dhd/OS interface */
+		if (!(pub = dhd_attach(osh, bus, hdrlen, adapter))) {
+			DBUSERR(("%s: dhd_attach failed\n", __FUNCTION__));
+			goto fail;
+		}
+	} else {
+		pub = g_pub;
+		osh = pub->osh;
+	}
+
+	if (pub->bus) {
+		DBUSERR(("%s: wrong probe\n", __FUNCTION__));
+		goto fail;
+	}
+
+	rxsz = dhd_get_rxsz(pub);
+	bus = dbus_attach(osh, rxsz, DBUS_NRXQ, DBUS_NTXQ, pub, &dhd_dbus_cbs, NULL, NULL);
+	if (bus) {
+		pub->bus = bus;
+		bus->dhd = pub;
+
+		dlneeded = dbus_dlneeded(bus);
+		if (dlneeded >= 0) {
+			if (!g_pub) {
+				dhd_conf_reset(pub);
+				dhd_conf_set_chiprev(pub, bus->pub.attrib.devid, bus->pub.attrib.chiprev);
+				dhd_conf_preinit(pub);
+			}
+		}
+
+		if (g_pub || dhd_download_fw_on_driverload) {
+			if (dlneeded == 0) {
+				wifi_set_adapter_status(adapter, WIFI_STATUS_FW_READY);
+#ifdef BCM_REQUEST_FW
+			} else if (dlneeded > 0) {
+				dhd_set_path(bus->dhd);
+				if (dbus_download_firmware(bus, bus->fw_path, bus->nv_path) != DBUS_OK)
+					goto fail;
+#endif
+			}
+		}
+	} else {
+		DBUSERR(("%s: dbus_attach failed\n", __FUNCTION__));
+	}
+
+	if (!g_pub) {
+		/* Ok, finish the attach to the OS network interface */
+		if (dhd_register_if(pub, 0, TRUE) != 0) {
+			DBUSERR(("%s: dhd_register_if failed\n", __FUNCTION__));
+			goto fail;
+		}
+		pub->hang_report  = TRUE;
+#if defined(MULTIPLE_SUPPLICANT)
+		wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif
+		g_pub = pub;
+	}
+
+	DBUSTRACE(("%s: Exit\n", __FUNCTION__));
+	wifi_clr_adapter_status(adapter, WIFI_STATUS_DETTACH);
+	wifi_set_adapter_status(adapter, WIFI_STATUS_ATTACH);
+	wake_up_interruptible(&adapter->status_event);
+	/* This is passed to dhd_dbus_disconnect_cb */
+	return bus;
+
+fail:
+	if (pub && pub->bus) {
+		dbus_detach(pub->bus);
+		pub->bus = NULL;
+	}
+	/* Release resources in reverse order */
+	if (!g_pub) {
+		if (pub) {
+			dhd_detach(pub);
+			dhd_free(pub);
+		}
+		if (osh) {
+			osl_detach(osh);
+		}
+	}
+
+	printf("%s: Failed\n", __FUNCTION__);
+	return NULL;
+}
+
+void
+dhd_dbus_disconnect_cb(void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)arg;
+	dhd_pub_t *pub = g_pub;
+	osl_t *osh;
+	wifi_adapter_info_t *adapter = NULL;
+
+	adapter = (wifi_adapter_info_t *)pub->adapter;
+
+	if (pub && !pub->dhd_remove && bus == NULL) {
+		DBUSERR(("%s: bus is NULL\n", __FUNCTION__));
+		return;
+	}
+	if (!adapter) {
+		DBUSERR(("%s: adapter is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	printf("%s: Enter dhd_remove=%d on %s\n", __FUNCTION__,
+		pub->dhd_remove, adapter->name);
+	if (!pub->dhd_remove) {
+		/* Advertise bus remove during rmmod */
+		dhd_dbus_advertise_bus_remove(bus->dhd);
+		dbus_detach(pub->bus);
+		pub->bus = NULL;
+		wifi_clr_adapter_status(adapter, WIFI_STATUS_ATTACH);
+		wifi_set_adapter_status(adapter, WIFI_STATUS_DETTACH);
+		wake_up_interruptible(&adapter->status_event);
+	} else {
+		osh = pub->osh;
+		dhd_detach(pub);
+		if (pub->bus) {
+			dbus_detach(pub->bus);
+			pub->bus = NULL;
+		}
+		dhd_free(pub);
+		g_pub = NULL;
+		if (MALLOCED(osh)) {
+			DBUSERR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+		}
+		osl_detach(osh);
+	}
+
+	DBUSTRACE(("%s: Exit\n", __FUNCTION__));
+}
+
+#ifdef LINUX_EXTERNAL_MODULE_DBUS
+
+static int __init
+bcm_dbus_module_init(void)
+{
+	printf("Inserting bcm_dbus module \n");
+	return 0;
+}
+
+static void __exit
+bcm_dbus_module_exit(void)
+{
+	printf("Removing bcm_dbus module \n");
+	return;
+}
+
+EXPORT_SYMBOL(dbus_pnp_sleep);
+EXPORT_SYMBOL(dbus_get_devinfo);
+EXPORT_SYMBOL(dbus_detach);
+EXPORT_SYMBOL(dbus_get_attrib);
+EXPORT_SYMBOL(dbus_down);
+EXPORT_SYMBOL(dbus_pnp_resume);
+EXPORT_SYMBOL(dbus_set_config);
+EXPORT_SYMBOL(dbus_flowctrl_rx);
+EXPORT_SYMBOL(dbus_up);
+EXPORT_SYMBOL(dbus_get_device_speed);
+EXPORT_SYMBOL(dbus_send_pkt);
+EXPORT_SYMBOL(dbus_recv_ctl);
+EXPORT_SYMBOL(dbus_attach);
+
+MODULE_LICENSE("GPL");
+
+module_init(bcm_dbus_module_init);
+module_exit(bcm_dbus_module_exit);
+
+#endif  /* #ifdef LINUX_EXTERNAL_MODULE_DBUS */
diff --git a/drivers/net/wireless/bcmdhd/dbus_usb.c b/drivers/net/wireless/bcmdhd/dbus_usb.c
new file mode 100644
index 0000000..8a496dd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dbus_usb.c
@@ -0,0 +1,1172 @@
+/*
+ * Dongle BUS interface for USB, OS independent
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dbus_usb.c 565557 2015-06-22 19:29:44Z $
+ */
+
+/**
+ * @file @brief
+ * This file contains DBUS code that is USB, but not OS specific. DBUS is a Broadcom proprietary
+ * host specific abstraction layer.
+ */
+
+#include <osl.h>
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <dbus.h>
+#include <usbrdl.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+
+uint dbus_msglevel = DBUS_ERROR_VAL;
+module_param(dbus_msglevel, int, 0);
+
+
+#define USB_DLIMAGE_RETRY_TIMEOUT    3000    /* retry Timeout */
+#define USB_SFLASH_DLIMAGE_SPINWAIT  150     /* in unit of ms */
+#define USB_SFLASH_DLIMAGE_LIMIT     2000    /* spinwait limit (ms) */
+#define POSTBOOT_ID                  0xA123  /* ID to detect if dongle has boot up */
+#define USB_RESETCFG_SPINWAIT        1       /* wait after resetcfg (ms) */
+#define USB_DEV_ISBAD(u)             (u->pub->attrib.devid == 0xDEAD)
+#define USB_DLGO_SPINWAIT            100     /* wait after DL_GO (ms) */
+#define TEST_CHIP                    0x4328
+
+typedef struct {
+	dbus_pub_t  *pub;
+
+	void        *cbarg;
+	dbus_intf_callbacks_t *cbs;  /** callbacks into higher DBUS level (dbus.c) */
+	dbus_intf_t *drvintf;
+	void        *usbosl_info;
+	uint32      rdlram_base_addr;
+	uint32      rdlram_size;
+} usb_info_t;
+
+/*
+ * Callbacks common to all USB
+ */
+static void dbus_usb_disconnect(void *handle);
+static void dbus_usb_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb);
+static void dbus_usb_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status);
+static void dbus_usb_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status);
+static void dbus_usb_errhandler(void *handle, int err);
+static void dbus_usb_ctl_complete(void *handle, int type, int status);
+static void dbus_usb_state_change(void *handle, int state);
+static struct dbus_irb* dbus_usb_getirb(void *handle, bool send);
+static void dbus_usb_rxerr_indicate(void *handle, bool on);
+#if !defined(BCM_REQUEST_FW)
+static int dbus_usb_resetcfg(usb_info_t *usbinfo);
+#endif
+static int dbus_usb_iovar_op(void *bus, const char *name,
+	void *params, int plen, void *arg, int len, bool set);
+static int dbus_iovar_process(usb_info_t* usbinfo, const char *name,
+                 void *params, int plen, void *arg, int len, bool set);
+static int dbus_usb_doiovar(usb_info_t *bus, const bcm_iovar_t *vi, uint32 actionid,
+	const char *name, void *params, int plen, void *arg, int len, int val_size);
+static int dhdusb_downloadvars(usb_info_t *bus, void *arg, int len);
+
+static int dbus_usb_dl_writeimage(usb_info_t *usbinfo, uint8 *fw, int fwlen);
+static int dbus_usb_dlstart(void *bus, uint8 *fw, int len);
+static int dbus_usb_dlneeded(void *bus);
+static int dbus_usb_dlrun(void *bus);
+static int dbus_usb_rdl_dwnld_state(usb_info_t *usbinfo);
+
+
+/* OS specific */
+extern bool dbus_usbos_dl_cmd(void *info, uint8 cmd, void *buffer, int buflen);
+extern int dbus_usbos_wait(void *info, uint16 ms);
+extern int dbus_write_membytes(usb_info_t *usbinfo, bool set, uint32 address,
+	uint8 *data, uint size);
+extern bool dbus_usbos_dl_send_bulk(void *info, void *buffer, int len);
+extern int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size);
+
+/**
+ * These functions are called by the lower DBUS level (dbus_usb_os.c) to notify this DBUS level
+ * (dbus_usb.c) of an event.
+ */
+static dbus_intf_callbacks_t dbus_usb_intf_cbs = {
+	dbus_usb_send_irb_timeout,
+	dbus_usb_send_irb_complete,
+	dbus_usb_recv_irb_complete,
+	dbus_usb_errhandler,
+	dbus_usb_ctl_complete,
+	dbus_usb_state_change,
+	NULL,			/* isr */
+	NULL,			/* dpc */
+	NULL,			/* watchdog */
+	NULL,			/* dbus_if_pktget */
+	NULL, 			/* dbus_if_pktfree */
+	dbus_usb_getirb,
+	dbus_usb_rxerr_indicate
+};
+
+/* IOVar table */
+enum {
+	IOV_SET_DOWNLOAD_STATE = 1,
+	IOV_DBUS_MSGLEVEL,
+	IOV_MEMBYTES,
+	IOV_VARS,
+	IOV_LOOPBACK_TX
+};
+
+const bcm_iovar_t dhdusb_iovars[] = {
+	{"vars",	IOV_VARS,	0,	IOVT_BUFFER,	0 },
+	{"dbus_msglevel",	IOV_DBUS_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0,	IOVT_BOOL,	0 },
+	{"membytes",	IOV_MEMBYTES,	0,	IOVT_BUFFER,	2 * sizeof(int) },
+	{"usb_lb_txfer", IOV_LOOPBACK_TX, 0,    IOVT_BUFFER,    2 * sizeof(int) },
+	{NULL, 0, 0, 0, 0 }
+};
+
+/*
+ * Need global for probe() and disconnect() since
+ * attach() is not called at probe and detach()
+ * can be called inside disconnect()
+ */
+static probe_cb_t	probe_cb = NULL;
+static disconnect_cb_t	disconnect_cb = NULL;
+static void		*probe_arg = NULL;
+static void		*disc_arg = NULL;
+static dbus_intf_t	*g_dbusintf = NULL;
+static dbus_intf_t	dbus_usb_intf; /** functions called by higher layer DBUS into lower layer */
+
+/*
+ * dbus_intf_t common to all USB
+ * These functions override dbus_usb_<os>.c.
+ */
+static void *dbus_usb_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs);
+static void dbus_usb_detach(dbus_pub_t *pub, void *info);
+static void * dbus_usb_probe(void *arg, const char *desc, uint32 bustype,
+	uint16 bus_no, uint16 slot, uint32 hdrlen);
+
+/* functions */
+
+/**
+ * As part of DBUS initialization/registration, the higher level DBUS (dbus.c) needs to know what
+ * lower level DBUS functions to call (in both dbus_usb.c and dbus_usb_os.c).
+ */
+static void *
+dbus_usb_probe(void *arg, const char *desc, uint32 bustype, uint16 bus_no,
+	uint16 slot, uint32 hdrlen)
+{
+	DBUSTRACE(("%s(): \n", __FUNCTION__));
+	if (probe_cb) {
+
+		if (g_dbusintf != NULL) {
+			/* First, initialize all lower-level functions as default
+			 * so that dbus.c simply calls directly to dbus_usb_os.c.
+			 */
+			bcopy(g_dbusintf, &dbus_usb_intf, sizeof(dbus_intf_t));
+
+			/* Second, selectively override functions we need, if any. */
+			dbus_usb_intf.attach = dbus_usb_attach;
+			dbus_usb_intf.detach = dbus_usb_detach;
+			dbus_usb_intf.iovar_op = dbus_usb_iovar_op;
+			dbus_usb_intf.dlstart = dbus_usb_dlstart;
+			dbus_usb_intf.dlneeded = dbus_usb_dlneeded;
+			dbus_usb_intf.dlrun = dbus_usb_dlrun;
+		}
+
+		disc_arg = probe_cb(probe_arg, "DBUS USB", USB_BUS, bus_no, slot, hdrlen);
+		return disc_arg;
+	}
+
+	return NULL;
+}
+
+/**
+ * On return, *intf contains this or lower-level DBUS functions to be called by higher
+ * level (dbus.c)
+ */
+int
+dbus_bus_register(int vid, int pid, probe_cb_t prcb,
+	disconnect_cb_t discb, void *prarg, dbus_intf_t **intf, void *param1, void *param2)
+{
+	int err;
+
+	DBUSTRACE(("%s(): \n", __FUNCTION__));
+	probe_cb = prcb;
+	disconnect_cb = discb;
+	probe_arg = prarg;
+
+	*intf = &dbus_usb_intf;
+
+	err = dbus_bus_osl_register(vid, pid, dbus_usb_probe,
+		dbus_usb_disconnect, NULL, &g_dbusintf, param1, param2);
+
+	ASSERT(g_dbusintf);
+	return err;
+}
+
+int
+dbus_bus_deregister()
+{
+	DBUSTRACE(("%s(): \n", __FUNCTION__));
+	return dbus_bus_osl_deregister();
+}
+
+/** initialization consists of registration followed by 'attach'. */
+void *
+dbus_usb_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs)
+{
+	usb_info_t *usb_info;
+
+	DBUSTRACE(("%s(): \n", __FUNCTION__));
+
+	if ((g_dbusintf == NULL) || (g_dbusintf->attach == NULL))
+		return NULL;
+
+	/* Sanity check for BUS_INFO() */
+	ASSERT(OFFSETOF(usb_info_t, pub) == 0);
+
+	usb_info = MALLOC(pub->osh, sizeof(usb_info_t));
+	if (usb_info == NULL)
+		return NULL;
+
+	bzero(usb_info, sizeof(usb_info_t));
+
+	usb_info->pub = pub;
+	usb_info->cbarg = cbarg;
+	usb_info->cbs = cbs;
+
+	usb_info->usbosl_info = (dbus_pub_t *)g_dbusintf->attach(pub,
+		usb_info, &dbus_usb_intf_cbs);
+	if (usb_info->usbosl_info == NULL) {
+		MFREE(pub->osh, usb_info, sizeof(usb_info_t));
+		return NULL;
+	}
+
+	/* Save USB OS-specific driver entry points */
+	usb_info->drvintf = g_dbusintf;
+
+	pub->bus = usb_info;
+#if !defined(BCM_REQUEST_FW)
+	if (!dbus_usb_resetcfg(usb_info)) {
+		usb_info->pub->busstate = DBUS_STATE_DL_DONE;
+	}
+#endif
+	/* Return Lower layer info */
+	return (void *) usb_info->usbosl_info;
+}
+
+void
+dbus_usb_detach(dbus_pub_t *pub, void *info)
+{
+	usb_info_t *usb_info = (usb_info_t *) pub->bus;
+	osl_t *osh = pub->osh;
+
+	if (usb_info == NULL)
+		return;
+
+	if (usb_info->drvintf && usb_info->drvintf->detach)
+		usb_info->drvintf->detach(pub, usb_info->usbosl_info);
+
+	MFREE(osh, usb_info, sizeof(usb_info_t));
+}
+
+void
+dbus_usb_disconnect(void *handle)
+{
+	DBUSTRACE(("%s(): \n", __FUNCTION__));
+	if (disconnect_cb)
+		disconnect_cb(disc_arg);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_send_irb_timeout(void *handle, dbus_irb_tx_t *txirb)
+{
+	usb_info_t *usb_info = (usb_info_t *) handle;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (usb_info == NULL)
+		return;
+
+	if (usb_info->cbs && usb_info->cbs->send_irb_timeout)
+		usb_info->cbs->send_irb_timeout(usb_info->cbarg, txirb);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_send_irb_complete(void *handle, dbus_irb_tx_t *txirb, int status)
+{
+	usb_info_t *usb_info = (usb_info_t *) handle;
+
+	if (usb_info == NULL)
+		return;
+
+	if (usb_info->cbs && usb_info->cbs->send_irb_complete)
+		usb_info->cbs->send_irb_complete(usb_info->cbarg, txirb, status);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_recv_irb_complete(void *handle, dbus_irb_rx_t *rxirb, int status)
+{
+	usb_info_t *usb_info = (usb_info_t *) handle;
+
+	if (usb_info == NULL)
+		return;
+
+	if (usb_info->cbs && usb_info->cbs->recv_irb_complete)
+		usb_info->cbs->recv_irb_complete(usb_info->cbarg, rxirb, status);
+}
+
+/** Lower DBUS level (dbus_usb_os.c) requests a free IRB. Pass this on to the higher DBUS level. */
+static struct dbus_irb*
+dbus_usb_getirb(void *handle, bool send)
+{
+	usb_info_t *usb_info = (usb_info_t *) handle;
+
+	if (usb_info == NULL)
+		return NULL;
+
+	if (usb_info->cbs && usb_info->cbs->getirb)
+		return usb_info->cbs->getirb(usb_info->cbarg, send);
+
+	return NULL;
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_rxerr_indicate(void *handle, bool on)
+{
+	usb_info_t *usb_info = (usb_info_t *) handle;
+
+	if (usb_info == NULL)
+		return;
+
+	if (usb_info->cbs && usb_info->cbs->rxerr_indicate)
+		usb_info->cbs->rxerr_indicate(usb_info->cbarg, on);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_errhandler(void *handle, int err)
+{
+	usb_info_t *usb_info = (usb_info_t *) handle;
+
+	if (usb_info == NULL)
+		return;
+
+	if (usb_info->cbs && usb_info->cbs->errhandler)
+		usb_info->cbs->errhandler(usb_info->cbarg, err);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_ctl_complete(void *handle, int type, int status)
+{
+	usb_info_t *usb_info = (usb_info_t *) handle;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (usb_info == NULL) {
+		DBUSERR(("%s: usb_info is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (usb_info->cbs && usb_info->cbs->ctl_complete)
+		usb_info->cbs->ctl_complete(usb_info->cbarg, type, status);
+}
+
+/**
+ * When the lower DBUS level (dbus_usb_os.c) signals this event, the higher DBUS level has to be
+ * notified.
+ */
+static void
+dbus_usb_state_change(void *handle, int state)
+{
+	usb_info_t *usb_info = (usb_info_t *) handle;
+
+	if (usb_info == NULL)
+		return;
+
+	if (usb_info->cbs && usb_info->cbs->state_change)
+		usb_info->cbs->state_change(usb_info->cbarg, state);
+}
+
+/** called by higher DBUS level (dbus.c) */
+static int
+dbus_usb_iovar_op(void *bus, const char *name,
+	void *params, int plen, void *arg, int len, bool set)
+{
+	int err = DBUS_OK;
+
+	err = dbus_iovar_process((usb_info_t*)bus, name, params, plen, arg, len, set);
+	return err;
+}
+
+/** process iovar request from higher DBUS level */
+static int
+dbus_iovar_process(usb_info_t* usbinfo, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	uint32 actionid;
+
+	DBUSTRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdusb_iovars, name)) == NULL) {
+		/* Not Supported */
+		bcmerror = BCME_UNSUPPORTED;
+		DBUSTRACE(("%s: IOVAR %s is not supported\n", name, __FUNCTION__));
+		goto exit;
+
+	}
+
+	DBUSTRACE(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dbus_usb_doiovar(usbinfo, vi, actionid,
+		name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+} /* dbus_iovar_process */
+
+static int
+dbus_usb_doiovar(usb_info_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	int32 int_val2 = 0;
+	bool bool_val = 0;
+
+	DBUSTRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	if (plen >= (int)sizeof(int_val) * 2)
+		bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	switch (actionid) {
+
+	case IOV_SVAL(IOV_MEMBYTES):
+	case IOV_GVAL(IOV_MEMBYTES):
+	{
+		uint32 address;
+		uint size, dsize;
+		uint8 *data;
+
+		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+		ASSERT(plen >= 2*sizeof(int));
+
+		address = (uint32)int_val;
+		BCM_REFERENCE(address);
+		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+		size = (uint)int_val;
+
+		/* Do some validation */
+		dsize = set ? plen - (2 * sizeof(int)) : len;
+		if (dsize < size) {
+			DBUSTRACE(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		DBUSTRACE(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+		          (set ? "write" : "read"), size, address));
+
+		/* Generate the actual data pointer */
+		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+		/* Call to do the transfer */
+		bcmerror = dbus_usb_dl_writeimage(BUS_INFO(bus, usb_info_t), data, size);
+	}
+		break;
+
+
+	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+
+		if (bool_val == TRUE) {
+			bcmerror = dbus_usb_dlneeded(bus);
+			dbus_usb_rdl_dwnld_state(BUS_INFO(bus, usb_info_t));
+		} else {
+			usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+			bcmerror = dbus_usb_dlrun(bus);
+			usbinfo->pub->busstate = DBUS_STATE_DL_DONE;
+		}
+		break;
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdusb_downloadvars(BUS_INFO(bus, usb_info_t), arg, len);
+		break;
+
+	case IOV_GVAL(IOV_DBUS_MSGLEVEL):
+		int_val = (int32)dbus_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DBUS_MSGLEVEL):
+		dbus_msglevel = int_val;
+		break;
+
+#ifdef DBUS_USB_LOOPBACK
+	case IOV_SVAL(IOV_LOOPBACK_TX):
+			bcmerror = dbus_usbos_loopback_tx(BUS_INFO(bus, usb_info_t), int_val,
+			  int_val2);
+			break;
+#endif
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	return bcmerror;
+} /* dbus_usb_doiovar */
+
+/** higher DBUS level (dbus.c) wants to set NVRAM variables in dongle */
+static int
+dhdusb_downloadvars(usb_info_t *bus, void *arg, int len)
+{
+	int bcmerror = 0;
+	uint32 varsize;
+	uint32 varaddr;
+	uint32 varsizew;
+
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* RAM size is not set. Set it at dbus_usb_dlneeded */
+	if (!bus->rdlram_size)
+		bcmerror = BCME_ERROR;
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = len ? ROUNDUP(len, 4) : 0;
+	varaddr = (bus->rdlram_size - 4) - varsize;
+
+	/* Write the vars list */
+	DBUSTRACE(("WriteVars: @%x varsize=%d\n", varaddr, varsize));
+	bcmerror = dbus_write_membytes(bus->usbosl_info, TRUE, (varaddr + bus->rdlram_base_addr),
+		arg, varsize);
+
+	/* adjust to the user specified RAM */
+	DBUSTRACE(("Usable memory size: %d\n", bus->rdlram_size));
+	DBUSTRACE(("Vars are at %d, orig varsize is %d\n", varaddr, varsize));
+
+	varsize = ((bus->rdlram_size - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		varsizew = htol32(varsizew);
+	}
+
+	DBUSTRACE(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dbus_write_membytes(bus->usbosl_info, TRUE, ((bus->rdlram_size - 4) +
+		bus->rdlram_base_addr), (uint8*)&varsizew, 4);
+err:
+	return bcmerror;
+} /* dbus_usb_doiovar */
+
+#if !defined(BCM_REQUEST_FW)
+/**
+ * After downloading firmware into dongle and starting it, we need to know if the firmware is
+ * indeed up and running.
+ */
+static int
+dbus_usb_resetcfg(usb_info_t *usbinfo)
+{
+	void *osinfo;
+	bootrom_id_t id;
+	uint16 waittime = 0;
+
+	uint32 starttime = 0;
+	uint32 endtime = 0;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (usbinfo == NULL)
+		return DBUS_ERR;
+
+	osinfo = usbinfo->usbosl_info;
+	ASSERT(osinfo);
+
+	/* Give dongle chance to boot */
+	dbus_usbos_wait(osinfo, USB_SFLASH_DLIMAGE_SPINWAIT);
+	waittime = USB_SFLASH_DLIMAGE_SPINWAIT;
+	while (waittime < USB_DLIMAGE_RETRY_TIMEOUT) {
+
+		starttime = OSL_SYSUPTIME();
+
+		id.chip = 0xDEAD;       /* Get the ID */
+		dbus_usbos_dl_cmd(osinfo, DL_GETVER, &id, sizeof(bootrom_id_t));
+		id.chip = ltoh32(id.chip);
+
+		endtime = OSL_SYSUPTIME();
+		waittime += (endtime - starttime);
+
+		if (id.chip == POSTBOOT_ID)
+			break;
+	}
+
+	if (id.chip == POSTBOOT_ID) {
+		DBUSERR(("%s: download done. Bootup time = %d ms postboot chip 0x%x/rev 0x%x\n",
+			__FUNCTION__, waittime, id.chip, id.chiprev));
+
+		dbus_usbos_dl_cmd(osinfo, DL_RESETCFG, &id, sizeof(bootrom_id_t));
+
+		dbus_usbos_wait(osinfo, USB_RESETCFG_SPINWAIT);
+		return DBUS_OK;
+	} else {
+		DBUSERR(("%s: Cannot talk to Dongle. Wait time = %d ms. Firmware is not UP \n",
+			__FUNCTION__, waittime));
+		return DBUS_ERR;
+	}
+
+	return DBUS_OK;
+}
+#endif
+
+/** before firmware download, the dongle has to be prepared to receive the fw image */
+static int
+dbus_usb_rdl_dwnld_state(usb_info_t *usbinfo)
+{
+	void *osinfo = usbinfo->usbosl_info;
+	rdl_state_t state;
+	int err = DBUS_OK;
+
+	/* 1) Prepare USB boot loader for runtime image */
+	dbus_usbos_dl_cmd(osinfo, DL_START, &state, sizeof(rdl_state_t));
+
+	state.state = ltoh32(state.state);
+	state.bytes = ltoh32(state.bytes);
+
+	/* 2) Check we are in the Waiting state */
+	if (state.state != DL_WAITING) {
+		DBUSERR(("%s: Failed to DL_START\n", __FUNCTION__));
+		err = DBUS_ERR;
+		goto fail;
+	}
+
+fail:
+	return err;
+}
+
+/**
+ * Dongle contains bootcode in ROM but firmware is (partially) contained in dongle RAM. Therefore,
+ * firmware has to be downloaded into dongle RAM.
+ */
+static int
+dbus_usb_dl_writeimage(usb_info_t *usbinfo, uint8 *fw, int fwlen)
+{
+	osl_t *osh = usbinfo->pub->osh;
+	void *osinfo = usbinfo->usbosl_info;
+	unsigned int sendlen, sent, dllen;
+	char *bulkchunk = NULL, *dlpos;
+	rdl_state_t state;
+	int err = DBUS_OK;
+	bootrom_id_t id;
+	uint16 wait, wait_time;
+	uint32 dl_trunk_size = RDL_CHUNK;
+
+	if (BCM4350_CHIP(usbinfo->pub->attrib.devid))
+		dl_trunk_size = RDL_CHUNK_MAX;
+
+	while (!bulkchunk) {
+		bulkchunk = MALLOC(osh, dl_trunk_size);
+		if (dl_trunk_size == RDL_CHUNK)
+			break;
+		if (!bulkchunk) {
+			dl_trunk_size /= 2;
+			if (dl_trunk_size < RDL_CHUNK)
+				dl_trunk_size = RDL_CHUNK;
+		}
+	}
+
+	if (bulkchunk == NULL) {
+		err = DBUS_ERR;
+		goto fail;
+	}
+
+	sent = 0;
+	dlpos = fw;
+	dllen = fwlen;
+
+	/* Get chip id and rev */
+	id.chip = usbinfo->pub->attrib.devid;
+	id.chiprev = usbinfo->pub->attrib.chiprev;
+
+	DBUSTRACE(("enter %s: fwlen=%d\n", __FUNCTION__, fwlen));
+
+	dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, sizeof(rdl_state_t));
+
+	/* 3) Load the image */
+	while ((sent < dllen)) {
+		/* Wait until the usb device reports it received all the bytes we sent */
+
+		if (sent < dllen) {
+			if ((dllen-sent) < dl_trunk_size)
+				sendlen = dllen-sent;
+			else
+				sendlen = dl_trunk_size;
+
+			/* simply avoid having to send a ZLP by ensuring we never have an even
+			 * multiple of 64
+			 */
+			if (!(sendlen % 64))
+				sendlen -= 4;
+
+			/* send data */
+			memcpy(bulkchunk, dlpos, sendlen);
+			if (!dbus_usbos_dl_send_bulk(osinfo, bulkchunk, sendlen)) {
+				err = DBUS_ERR;
+				goto fail;
+			}
+
+			dlpos += sendlen;
+			sent += sendlen;
+			DBUSTRACE(("%s: sendlen %d\n", __FUNCTION__, sendlen));
+		}
+
+		wait = 0;
+		wait_time = USB_SFLASH_DLIMAGE_SPINWAIT;
+		while (!dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state,
+			sizeof(rdl_state_t))) {
+			if ((id.chip == 43236) && (id.chiprev == 0)) {
+				DBUSERR(("%s: 43236a0 SFlash delay, waiting for dongle crc check "
+					 "completion!!!\n", __FUNCTION__));
+				dbus_usbos_wait(osinfo, wait_time);
+				wait += wait_time;
+				if (wait >= USB_SFLASH_DLIMAGE_LIMIT) {
+					DBUSERR(("%s: DL_GETSTATE Failed xxxx\n", __FUNCTION__));
+					err = DBUS_ERR;
+					goto fail;
+					break;
+				}
+			} else {
+				DBUSERR(("%s: DL_GETSTATE Failed xxxx\n", __FUNCTION__));
+				err = DBUS_ERR;
+				goto fail;
+			}
+		}
+
+		state.state = ltoh32(state.state);
+		state.bytes = ltoh32(state.bytes);
+
+		/* restart if an error is reported */
+		if ((state.state == DL_BAD_HDR) || (state.state == DL_BAD_CRC)) {
+			DBUSERR(("%s: Bad Hdr or Bad CRC\n", __FUNCTION__));
+			err = DBUS_ERR;
+			goto fail;
+		}
+
+	}
+fail:
+	if (bulkchunk)
+		MFREE(osh, bulkchunk, dl_trunk_size);
+
+	return err;
+} /* dbus_usb_dl_writeimage */
+
+/** Higher level DBUS layer (dbus.c) requests this layer to download image into dongle */
+static int
+dbus_usb_dlstart(void *bus, uint8 *fw, int len)
+{
+	usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+	int err;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (usbinfo == NULL)
+		return DBUS_ERR;
+
+	if (USB_DEV_ISBAD(usbinfo))
+		return DBUS_ERR;
+
+	err = dbus_usb_rdl_dwnld_state(usbinfo);
+
+	if (DBUS_OK == err) {
+		err = dbus_usb_dl_writeimage(usbinfo, fw, len);
+		if (err == DBUS_OK)
+			usbinfo->pub->busstate = DBUS_STATE_DL_DONE;
+		else
+			usbinfo->pub->busstate = DBUS_STATE_DL_PENDING;
+	} else
+		usbinfo->pub->busstate = DBUS_STATE_DL_PENDING;
+
+	return err;
+}
+
+static bool
+dbus_usb_update_chipinfo(usb_info_t *usbinfo, uint32 chip)
+{
+	bool retval = TRUE;
+	/* based on the CHIP Id, store the ram size which is needed for NVRAM download. */
+	switch (chip) {
+
+		case 0x4319:
+			usbinfo->rdlram_size = RDL_RAM_SIZE_4319;
+			usbinfo->rdlram_base_addr = RDL_RAM_BASE_4319;
+			break;
+
+		case 0x4329:
+			usbinfo->rdlram_size = RDL_RAM_SIZE_4329;
+			usbinfo->rdlram_base_addr = RDL_RAM_BASE_4329;
+			break;
+
+		case 43234:
+		case 43235:
+		case 43236:
+			usbinfo->rdlram_size = RDL_RAM_SIZE_43236;
+			usbinfo->rdlram_base_addr = RDL_RAM_BASE_43236;
+			break;
+
+		case 0x4328:
+			usbinfo->rdlram_size = RDL_RAM_SIZE_4328;
+			usbinfo->rdlram_base_addr = RDL_RAM_BASE_4328;
+			break;
+
+		case 0x4322:
+			usbinfo->rdlram_size = RDL_RAM_SIZE_4322;
+			usbinfo->rdlram_base_addr = RDL_RAM_BASE_4322;
+			break;
+
+		case 0x4360:
+		case 0xAA06:
+			usbinfo->rdlram_size = RDL_RAM_SIZE_4360;
+			usbinfo->rdlram_base_addr = RDL_RAM_BASE_4360;
+			break;
+
+		case 43242:
+		case 43243:
+			usbinfo->rdlram_size = RDL_RAM_SIZE_43242;
+			usbinfo->rdlram_base_addr = RDL_RAM_BASE_43242;
+			break;
+
+		case 43143:
+			usbinfo->rdlram_size = RDL_RAM_SIZE_43143;
+			usbinfo->rdlram_base_addr = RDL_RAM_BASE_43143;
+			break;
+
+		case 0x4350:
+		case 43556:
+		case 43558:
+		case 43569:
+			usbinfo->rdlram_size = RDL_RAM_SIZE_4350;
+			usbinfo->rdlram_base_addr = RDL_RAM_BASE_4350;
+			break;
+
+		case POSTBOOT_ID:
+			break;
+
+		default:
+			DBUSERR(("%s: Chip 0x%x Ram size is not known\n", __FUNCTION__, chip));
+			retval = FALSE;
+			break;
+
+	}
+
+	return retval;
+} /* dbus_usb_update_chipinfo */
+
+/** higher DBUS level (dbus.c) wants to know if firmware download is required. */
+static int
+dbus_usb_dlneeded(void *bus)
+{
+	usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+	void *osinfo;
+	bootrom_id_t id;
+	int dl_needed = 1;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (usbinfo == NULL)
+		return DBUS_ERR;
+
+	osinfo = usbinfo->usbosl_info;
+	ASSERT(osinfo);
+
+	/* Check if firmware downloaded already by querying runtime ID */
+	id.chip = 0xDEAD;
+	dbus_usbos_dl_cmd(osinfo, DL_GETVER, &id, sizeof(bootrom_id_t));
+
+	id.chip = ltoh32(id.chip);
+	id.chiprev = ltoh32(id.chiprev);
+
+	if (FALSE == dbus_usb_update_chipinfo(usbinfo, id.chip)) {
+		dl_needed = DBUS_ERR;
+		goto exit;
+	}
+
+	DBUSERR(("%s: chip 0x%x rev 0x%x\n", __FUNCTION__, id.chip, id.chiprev));
+	if (id.chip == POSTBOOT_ID) {
+		/* This code is  needed to support two enumerations on USB1.1 scenario */
+		DBUSERR(("%s: Firmware already downloaded\n", __FUNCTION__));
+
+		dbus_usbos_dl_cmd(osinfo, DL_RESETCFG, &id, sizeof(bootrom_id_t));
+		dl_needed = DBUS_OK;
+		if (usbinfo->pub->busstate == DBUS_STATE_DL_PENDING)
+			usbinfo->pub->busstate = DBUS_STATE_DL_DONE;
+	} else {
+		usbinfo->pub->attrib.devid = id.chip;
+		usbinfo->pub->attrib.chiprev = id.chiprev;
+	}
+
+exit:
+	return dl_needed;
+}
+
+/** After issuing firmware download, higher DBUS level (dbus.c) wants to start the firmware. */
+static int
+dbus_usb_dlrun(void *bus)
+{
+	usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+	void *osinfo;
+	rdl_state_t state;
+	int err = DBUS_OK;
+
+	DBUSTRACE(("%s\n", __FUNCTION__));
+
+	if (usbinfo == NULL)
+		return DBUS_ERR;
+
+	if (USB_DEV_ISBAD(usbinfo))
+		return DBUS_ERR;
+
+	osinfo = usbinfo->usbosl_info;
+	ASSERT(osinfo);
+
+	/* Check we are runnable */
+	dbus_usbos_dl_cmd(osinfo, DL_GETSTATE, &state, sizeof(rdl_state_t));
+
+	state.state = ltoh32(state.state);
+	state.bytes = ltoh32(state.bytes);
+
+	/* Start the image */
+	if (state.state == DL_RUNNABLE) {
+		DBUSTRACE(("%s: Issue DL_GO\n", __FUNCTION__));
+		dbus_usbos_dl_cmd(osinfo, DL_GO, &state, sizeof(rdl_state_t));
+
+		if (usbinfo->pub->attrib.devid == TEST_CHIP)
+			dbus_usbos_wait(osinfo, USB_DLGO_SPINWAIT);
+
+//		dbus_usb_resetcfg(usbinfo);
+		/* The Donlge may go for re-enumeration. */
+	} else {
+		DBUSERR(("%s: Dongle not runnable\n", __FUNCTION__));
+		err = DBUS_ERR;
+	}
+
+	return err;
+}
+
+/**
+ * As preparation for firmware download, higher DBUS level (dbus.c) requests the firmware image
+ * to be used for the type of dongle detected. Directly called by dbus.c (so not via a callback
+ * construction)
+ */
+void
+dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp)
+{
+	usb_info_t *usbinfo = BUS_INFO(bus, usb_info_t);
+	unsigned int devid;
+	unsigned int crev;
+
+	devid = usbinfo->pub->attrib.devid;
+	crev = usbinfo->pub->attrib.chiprev;
+
+	*fw = NULL;
+	*fwlen = 0;
+
+	switch (devid) {
+	case BCM43236_CHIP_ID:
+	case BCM43235_CHIP_ID:
+	case BCM43234_CHIP_ID:
+	case BCM43238_CHIP_ID: {
+		if (crev == 3 || crev == 2 || crev == 1) {
+#ifdef EMBED_IMAGE_43236b
+			*fw = (uint8 *)dlarray_43236b;
+			*fwlen = sizeof(dlarray_43236b);
+
+#endif
+		}
+		} break;
+	case BCM4360_CHIP_ID:
+	case BCM4352_CHIP_ID:
+	case BCM43526_CHIP_ID:
+#ifdef EMBED_IMAGE_43526a
+		if (crev <= 2) {
+			*fw = (uint8 *)dlarray_43526a;
+			*fwlen = sizeof(dlarray_43526a);
+		}
+#endif
+#ifdef EMBED_IMAGE_43526b
+		if (crev > 2) {
+			*fw = (uint8 *)dlarray_43526b;
+			*fwlen = sizeof(dlarray_43526b);
+		}
+#endif
+		break;
+
+	case BCM43242_CHIP_ID:
+#ifdef EMBED_IMAGE_43242a0
+		*fw = (uint8 *)dlarray_43242a0;
+		*fwlen = sizeof(dlarray_43242a0);
+#endif
+		break;
+
+	case BCM43143_CHIP_ID:
+#ifdef EMBED_IMAGE_43143a0
+		*fw = (uint8 *)dlarray_43143a0;
+		*fwlen = sizeof(dlarray_43143a0);
+#endif
+#ifdef EMBED_IMAGE_43143b0
+		*fw = (uint8 *)dlarray_43143b0;
+		*fwlen = sizeof(dlarray_43143b0);
+#endif
+		break;
+
+	case BCM4350_CHIP_ID:
+	case BCM4354_CHIP_ID:
+	case BCM43556_CHIP_ID:
+	case BCM43558_CHIP_ID:
+	case BCM43566_CHIP_ID:
+	case BCM43568_CHIP_ID:
+	case BCM43570_CHIP_ID:
+	case BCM4358_CHIP_ID:
+#ifdef EMBED_IMAGE_4350a0
+		if (crev == 0) {
+			*fw = (uint8 *)dlarray_4350a0;
+			*fwlen = sizeof(dlarray_4350a0);
+		}
+#endif
+#ifdef EMBED_IMAGE_4350b0
+		if (crev == 1) {
+			*fw = (uint8 *)dlarray_4350b0;
+			*fwlen = sizeof(dlarray_4350b0);
+		}
+#endif
+#ifdef EMBED_IMAGE_4350b1
+		if (crev == 2) {
+			*fw = (uint8 *)dlarray_4350b1;
+			*fwlen = sizeof(dlarray_4350b1);
+		}
+#endif
+#ifdef EMBED_IMAGE_43556b1
+		if (crev == 2) {
+			*fw = (uint8 *)dlarray_43556b1;
+			*fwlen = sizeof(dlarray_43556b1);
+		}
+#endif
+#ifdef EMBED_IMAGE_4350c0
+		if (crev == 3) {
+			*fw = (uint8 *)dlarray_4350c0;
+			*fwlen = sizeof(dlarray_4350c0);
+		}
+#endif /* EMBED_IMAGE_4350c0 */
+#ifdef EMBED_IMAGE_4350c1
+		if (crev == 4) {
+			*fw = (uint8 *)dlarray_4350c1;
+			*fwlen = sizeof(dlarray_4350c1);
+		}
+#endif /* EMBED_IMAGE_4350c1 */
+		break;
+	case BCM43569_CHIP_ID:
+#ifdef EMBED_IMAGE_43569a0
+		if (crev == 0) {
+			*fw = (uint8 *)dlarray_43569a0;
+			*fwlen = sizeof(dlarray_43569a0);
+		}
+#endif /* EMBED_IMAGE_43569a0 */
+		break;
+	default:
+#ifdef EMBED_IMAGE_GENERIC
+		*fw = (uint8 *)dlarray;
+		*fwlen = sizeof(dlarray);
+#endif
+		break;
+	}
+} /* dbus_bus_fw_get */
diff --git a/drivers/net/wireless/bcmdhd/dbus_usb_linux.c b/drivers/net/wireless/bcmdhd/dbus_usb_linux.c
new file mode 100644
index 0000000..10927d8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dbus_usb_linux.c
@@ -0,0 +1,3403 @@
+/*
+ * Dongle BUS interface
+ * USB Linux Implementation
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dbus_usb_linux.c 564663 2015-06-18 02:34:42Z $
+ */
+
+/**
+ * @file @brief
+ * This file contains DBUS code that is USB *and* OS (Linux) specific. DBUS is a Broadcom
+ * proprietary host specific abstraction layer.
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+/**
+ * DBUS_LINUX_RXDPC is created for router platform performance tuning. A separate thread is created
+ * to handle USB RX and avoid the call chain getting too long and enhance cache hit rate.
+ *
+ * DBUS_LINUX_RXDPC setting is in wlconfig file.
+ */
+
+/*
+ * If DBUS_LINUX_RXDPC is off, spin_lock_bh() for CTFPOOL in
+ * linux_osl.c has to be changed to spin_lock_irqsave() because
+ * PKTGET/PKTFREE are no longer in bottom half.
+ *
+ * Right now we have another queue rpcq in wl_linux.c. Maybe we
+ * can eliminate that one to reduce the overhead.
+ *
+ * Enabling 2nd EP and DBUS_LINUX_RXDPC causing traffic from
+ * both EP's to be queued in the same rx queue. If we want
+ * RXDPC to work with 2nd EP. The EP for RPC call return
+ * should bypass the dpc and go directly up.
+ */
+
+/* #define DBUS_LINUX_RXDPC */
+
+/* Dbus histogram for ntxq, nrxq, dpc parameter tuning */
+/* #define DBUS_LINUX_HIST */
+
+#include <usbrdl.h>
+#include <bcmendian.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#include <dbus.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <linux/usb.h>
+#include <usbrdl.h>
+#include <linux/firmware.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if defined(USBOS_THREAD) || defined(USBOS_TX_THREAD)
+
+/**
+ * The usb-thread is designed to provide currency on multiprocessors and SMP linux kernels. On the
+ * dual cores platform, the WLAN driver, without threads, executed only on CPU0. The driver consumed
+ * almost of 100% on CPU0, while CPU1 remained idle. The behavior was observed on Broadcom's STB.
+ *
+ * The WLAN driver consumed most of CPU0 and not CPU1 because tasklets/queues, software irq, and
+ * hardware irq are executing from CPU0, only. CPU0 became the system's bottle-neck. TPUT is lower
+ * and system's responsiveness is slower.
+ *
+ * To improve system responsiveness and TPUT usb-thread was implemented. The system's threads could
+ * be scheduled to run on any core. One core could be processing data in the usb-layer and the other
+ * core could be processing data in the wl-layer.
+ *
+ * For further info see [WlThreadAndUsbThread] Twiki.
+ */
+
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/hardirq.h>
+#include <linux/list.h>
+#include <linux_osl.h>
+#endif /* USBOS_THREAD || USBOS_TX_THREAD */
+
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define KERNEL26
+#endif
+
+/**
+ * Starting with the 3.10 kernel release, dynamic PM support for USB is present whenever
+ * the kernel was built with CONFIG_PM_RUNTIME enabled. The CONFIG_USB_SUSPEND option has
+ * been eliminated.
+ */
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)) && defined(CONFIG_USB_SUSPEND)) \
+	|| ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) && defined(CONFIG_PM_RUNTIME))
+/* For USB power management support, see Linux kernel: Documentation/usb/power-management.txt */
+#define USB_SUSPEND_AVAILABLE
+#endif
+
+/* Define alternate fw/nvram paths used in Android */
+#ifdef OEM_ANDROID
+#define CONFIG_ANDROID_BCMDHD_FW_PATH "broadcom/dhd/firmware/fw.bin.trx"
+#define CONFIG_ANDROID_BCMDHD_NVRAM_PATH "broadcom/dhd/nvrams/nvm.txt"
+#endif /* OEM_ANDROID */
+
+static inline int usb_submit_urb_linux(struct urb *urb)
+{
+
+#ifdef BCM_MAX_URB_LEN
+	if (urb && (urb->transfer_buffer_length > BCM_MAX_URB_LEN)) {
+		DBUSERR(("URB transfer length=%d exceeded %d ra=%p\n", urb->transfer_buffer_length,
+		BCM_MAX_URB_LEN, __builtin_return_address(0)));
+		return DBUS_ERR;
+	}
+#endif
+
+#ifdef KERNEL26
+	return usb_submit_urb(urb, GFP_ATOMIC);
+#else
+	return usb_submit_urb(urb);
+#endif
+
+}
+
+#define USB_SUBMIT_URB(urb) usb_submit_urb_linux(urb)
+
+#ifdef KERNEL26
+
+#define USB_ALLOC_URB()				usb_alloc_urb(0, GFP_ATOMIC)
+#define USB_UNLINK_URB(urb)			(usb_kill_urb(urb))
+#define USB_FREE_URB(urb)			(usb_free_urb(urb))
+#define USB_REGISTER()				usb_register(&dbus_usbdev)
+#define USB_DEREGISTER()			usb_deregister(&dbus_usbdev)
+
+#ifdef USB_SUSPEND_AVAILABLE
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+#define USB_AUTOPM_SET_INTERFACE(intf)		usb_autopm_set_interface(intf)
+#else
+#define USB_ENABLE_AUTOSUSPEND(udev)		usb_enable_autosuspend(udev)
+#define USB_DISABLE_AUTOSUSPEND(udev)       usb_disable_autosuspend(udev)
+#endif  /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))  */
+
+#define USB_AUTOPM_GET_INTERFACE(intf)		usb_autopm_get_interface(intf)
+#define USB_AUTOPM_PUT_INTERFACE(intf)		usb_autopm_put_interface(intf)
+#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf)	usb_autopm_get_interface_async(intf)
+#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf)	usb_autopm_put_interface_async(intf)
+#define USB_MARK_LAST_BUSY(dev)			usb_mark_last_busy(dev)
+
+#else /* USB_SUSPEND_AVAILABLE */
+
+#define USB_AUTOPM_GET_INTERFACE(intf)		do {} while (0)
+#define USB_AUTOPM_PUT_INTERFACE(intf)		do {} while (0)
+#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf)	do {} while (0)
+#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf)	do {} while (0)
+#define USB_MARK_LAST_BUSY(dev)			do {} while (0)
+#endif /* USB_SUSPEND_AVAILABLE */
+
+#define USB_CONTROL_MSG(dev, pipe, request, requesttype, value, index, data, size, timeout) \
+	usb_control_msg((dev), (pipe), (request), (requesttype), (value), (index), \
+	(data), (size), (timeout))
+#define USB_BULK_MSG(dev, pipe, data, len, actual_length, timeout) \
+	usb_bulk_msg((dev), (pipe), (data), (len), (actual_length), (timeout))
+#define USB_BUFFER_ALLOC(dev, size, mem, dma)	usb_buffer_alloc(dev, size, mem, dma)
+#define USB_BUFFER_FREE(dev, size, data, dma)	usb_buffer_free(dev, size, data, dma)
+
+#ifdef WL_URB_ZPKT
+#define URB_QUEUE_BULK   URB_ZERO_PACKET
+#else
+#define URB_QUEUE_BULK   0
+#endif /* WL_URB_ZPKT */
+
+#define CALLBACK_ARGS		struct urb *urb, struct pt_regs *regs
+#define CALLBACK_ARGS_DATA	urb, regs
+#define CONFIGDESC(usb)		(&((usb)->actconfig)->desc)
+#define IFPTR(usb, idx)		((usb)->actconfig->interface[idx])
+#define IFALTS(usb, idx)	(IFPTR((usb), (idx))->altsetting[0])
+#define IFDESC(usb, idx)	IFALTS((usb), (idx)).desc
+#define IFEPDESC(usb, idx, ep)	(IFALTS((usb), (idx)).endpoint[ep]).desc
+
+#else /* KERNEL26 */
+
+#define USB_ALLOC_URB()				usb_alloc_urb(0)
+#define USB_UNLINK_URB(urb)			usb_unlink_urb(urb)
+#define USB_FREE_URB(urb)			(usb_free_urb(urb))
+#define USB_REGISTER()				usb_register(&dbus_usbdev)
+#define USB_DEREGISTER()			usb_deregister(&dbus_usbdev)
+#define USB_AUTOPM_GET_INTERFACE(intf)		do {} while (0)
+#define USB_AUTOPM_GET_INTERFACE_ASYNC(intf)	do {} while (0)
+#define USB_AUTOPM_PUT_INTERFACE_ASYNC(intf)	do {} while (0)
+#define USB_MARK_LAST_BUSY(dev)			do {} while (0)
+
+#define USB_CONTROL_MSG(dev, pipe, request, requesttype, value, index, data, size, timeout) \
+	usb_control_msg((dev), (pipe), (request), (requesttype), (value), (index), \
+	(data), (size), (timeout))
+#define USB_BUFFER_ALLOC(dev, size, mem, dma)  kmalloc(size, mem)
+#define USB_BUFFER_FREE(dev, size, data, dma)  kfree(data)
+
+#ifdef WL_URB_ZPKT
+#define URB_QUEUE_BULK   USB_QUEUE_BULK|URB_ZERO_PACKET
+#else
+#define URB_QUEUE_BULK   0
+#endif /*  WL_URB_ZPKT */
+
+#define CALLBACK_ARGS		struct urb *urb
+#define CALLBACK_ARGS_DATA	urb
+#define CONFIGDESC(usb)		((usb)->actconfig)
+#define IFPTR(usb, idx)		(&(usb)->actconfig->interface[idx])
+#define IFALTS(usb, idx)	((usb)->actconfig->interface[idx].altsetting[0])
+#define IFDESC(usb, idx)	IFALTS((usb), (idx))
+#define IFEPDESC(usb, idx, ep)	(IFALTS((usb), (idx)).endpoint[ep])
+
+
+#endif /* KERNEL26 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+#define USB_SPEED_SUPER		5
+#endif  /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) */
+
+#define CONTROL_IF   0
+#define BULK_IF      0
+
+#ifdef BCMUSBDEV_COMPOSITE
+#define USB_COMPIF_MAX       4
+
+#define USB_CLASS_WIRELESS	0xe0
+#define USB_CLASS_MISC		0xef
+#define USB_SUBCLASS_COMMON	0x02
+#define USB_PROTO_IAD		0x01
+#define USB_PROTO_VENDOR	0xff
+
+#define USB_QUIRK_NO_SET_INTF   0x04 /* device does not support set_interface */
+#endif /* BCMUSBDEV_COMPOSITE */
+
+#define USB_SYNC_WAIT_TIMEOUT  300  /* ms */
+
+/* Private data kept in skb */
+#define SKB_PRIV(skb, idx)  (&((void **)skb->cb)[idx])
+#define SKB_PRIV_URB(skb)   (*(struct urb **)SKB_PRIV(skb, 0))
+
+#ifndef DBUS_USB_RXQUEUE_BATCH_ADD
+/* items to add each time within limit */
+#define DBUS_USB_RXQUEUE_BATCH_ADD            8
+#endif
+
+#ifndef DBUS_USB_RXQUEUE_LOWER_WATERMARK
+/* add a new batch req to rx queue when waiting item count reduce to this number */
+#define DBUS_USB_RXQUEUE_LOWER_WATERMARK      4
+#endif
+
+enum usbos_suspend_state {
+	USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow suspend */
+	USBOS_SUSPEND_STATE_SUSPEND_PENDING,   /* Device is idle, can be suspended */
+	                                       /* Wating PM to suspend */
+	USBOS_SUSPEND_STATE_SUSPENDED          /* Device suspended */
+};
+
+enum usbos_request_state {
+	USBOS_REQUEST_STATE_UNSCHEDULED = 0,	/* USB TX request not scheduled */
+	USBOS_REQUEST_STATE_SCHEDULED,		/* USB TX request given to TX thread */
+	USBOS_REQUEST_STATE_SUBMITTED		/* USB TX request submitted */
+};
+
+typedef struct {
+	uint32 notification;
+	uint32 reserved;
+} intr_t;
+
+typedef struct {
+	dbus_pub_t *pub;
+
+	void *cbarg;
+	dbus_intf_callbacks_t *cbs;
+
+	/* Imported */
+	struct usb_device *usb;	/* USB device pointer from OS */
+	struct urb *intr_urb; /* URB for interrupt endpoint */
+	struct list_head req_rxfreeq;
+	struct list_head req_txfreeq;
+	struct list_head req_rxpostedq;	/* Posted down to USB driver for RX */
+	struct list_head req_txpostedq;	/* Posted down to USB driver for TX */
+	spinlock_t rxfree_lock; /* Lock for rx free list */
+	spinlock_t txfree_lock; /* Lock for tx free list */
+	spinlock_t rxposted_lock; /* Lock for rx posted list */
+	spinlock_t txposted_lock; /* Lock for tx posted list */
+	uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2; /* Pipe numbers for USB I/O */
+	uint rxbuf_len;
+
+	struct list_head req_rxpendingq; /* RXDPC: Pending for dpc to send up */
+	spinlock_t rxpending_lock;	/* RXDPC: Lock for rx pending list */
+	long dpc_pid;
+	struct semaphore dpc_sem;
+	struct completion dpc_exited;
+	int rxpending;
+
+	struct urb               *ctl_urb;
+	int                      ctl_in_pipe, ctl_out_pipe;
+	struct usb_ctrlrequest   ctl_write;
+	struct usb_ctrlrequest   ctl_read;
+	struct semaphore         ctl_lock;     /* Lock for CTRL transfers via tx_thread */
+#ifdef USBOS_TX_THREAD
+	enum usbos_request_state ctl_state;
+#endif /* USBOS_TX_THREAD */
+
+	spinlock_t rxlock;      /* Lock for rxq management */
+	spinlock_t txlock;      /* Lock for txq management */
+
+	int intr_size;          /* Size of interrupt message */
+	int interval;           /* Interrupt polling interval */
+	intr_t intr;            /* Data buffer for interrupt endpoint */
+
+	int maxps;
+	atomic_t txposted;
+	atomic_t rxposted;
+	atomic_t txallocated;
+	atomic_t rxallocated;
+	bool rxctl_deferrespok;	/* Get a response for setup from dongle */
+
+	wait_queue_head_t wait;
+	bool waitdone;
+	int sync_urb_status;
+
+	struct urb *blk_urb; /* Used for downloading embedded image */
+
+#ifdef USBOS_THREAD
+	spinlock_t              ctrl_lock;
+	spinlock_t              usbos_list_lock;
+	struct list_head        usbos_list;
+	struct list_head        usbos_free_list;
+	atomic_t                usbos_list_cnt;
+	wait_queue_head_t       usbos_queue_head;
+	struct task_struct      *usbos_kt;
+#endif /* USBOS_THREAD */
+
+#ifdef USBOS_TX_THREAD
+	spinlock_t              usbos_tx_list_lock;
+	struct list_head	usbos_tx_list;
+	wait_queue_head_t	usbos_tx_queue_head;
+	struct task_struct      *usbos_tx_kt;
+#endif /* USBOS_TX_THREAD */
+
+	struct dma_pool *qtd_pool; /* QTD pool for USB optimization only */
+	int tx_ep, rx_ep, rx2_ep;  /* EPs for USB optimization */
+	struct usb_device *usb_device; /* USB device for optimization */
+} usbos_info_t;
+
+typedef struct urb_req {
+	void         *pkt;
+	int          buf_len;
+	struct urb   *urb;
+	void         *arg;
+	usbos_info_t *usbinfo;
+	struct list_head urb_list;
+} urb_req_t;
+
+#ifdef USBOS_THREAD
+typedef struct usbos_list_entry {
+	struct list_head    list;   /* must be first */
+	void               *urb_context;
+	int                 urb_length;
+	int                 urb_status;
+} usbos_list_entry_t;
+
+static void* dbus_usbos_thread_init(usbos_info_t *usbos_info);
+static void  dbus_usbos_thread_deinit(usbos_info_t *usbos_info);
+static void  dbus_usbos_dispatch_schedule(CALLBACK_ARGS);
+static int   dbus_usbos_thread_func(void *data);
+#endif /* USBOS_THREAD */
+
+#ifdef USBOS_TX_THREAD
+void* dbus_usbos_tx_thread_init(usbos_info_t *usbos_info);
+void  dbus_usbos_tx_thread_deinit(usbos_info_t *usbos_info);
+int   dbus_usbos_tx_thread_func(void *data);
+#endif /* USBOS_TX_THREAD */
+
+/* Shared Function prototypes */
+bool dbus_usbos_dl_cmd(usbos_info_t *usbinfo, uint8 cmd, void *buffer, int buflen);
+int dbus_usbos_wait(usbos_info_t *usbinfo, uint16 ms);
+bool dbus_usbos_dl_send_bulk(usbos_info_t *usbinfo, void *buffer, int len);
+int dbus_write_membytes(usbos_info_t *usbinfo, bool set, uint32 address, uint8 *data, uint size);
+
+/* Local function prototypes */
+static void dbus_usbos_send_complete(CALLBACK_ARGS);
+static void dbus_usbos_recv_complete(CALLBACK_ARGS);
+static int  dbus_usbos_errhandler(void *bus, int err);
+static int  dbus_usbos_state_change(void *bus, int state);
+static void dbusos_stop(usbos_info_t *usbos_info);
+
+#ifdef KERNEL26
+static int dbus_usbos_probe(struct usb_interface *intf, const struct usb_device_id *id);
+static void dbus_usbos_disconnect(struct usb_interface *intf);
+#if defined(USB_SUSPEND_AVAILABLE)
+static int dbus_usbos_resume(struct usb_interface *intf);
+static int dbus_usbos_suspend(struct usb_interface *intf, pm_message_t message);
+/* at the moment, used for full dongle host driver only */
+static int dbus_usbos_reset_resume(struct usb_interface *intf);
+#endif /* USB_SUSPEND_AVAILABLE */
+#else /* KERNEL26 */
+static void *dbus_usbos_probe(struct usb_device *usb, unsigned int ifnum,
+	const struct usb_device_id *id);
+static void dbus_usbos_disconnect(struct usb_device *usb, void *ptr);
+#endif /* KERNEL26 */
+
+
+/**
+ * have to disable missing-field-initializers warning as last element {} triggers it
+ * and different versions of kernel have different number of members so it is impossible
+ * to specify the initializer. BTW issuing the warning here is bug og GCC as  universal
+ * zero {0} specified in C99 standard as correct way of initialization of struct to all zeros
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+#endif
+
+static struct usb_device_id devid_table[] = {
+	{ USB_DEVICE(BCM_DNGL_VID, 0x0000) }, /* Configurable via register() */
+#if defined(BCM_REQUEST_FW)
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4328) },
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4322) },
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4319) },
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43236) },
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43143) },
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43242) },
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4360) },
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_4350) },
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BL_PID_43569) },
+#endif
+#ifdef EXTENDED_VID_PID
+	EXTENDED_VID_PID,
+#endif /* EXTENDED_VID_PID */
+	{ USB_DEVICE(BCM_DNGL_VID, BCM_DNGL_BDC_PID) }, /* Default BDC */
+	{ }
+};
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic pop
+#endif
+
+MODULE_DEVICE_TABLE(usb, devid_table);
+
+/** functions called by the Linux kernel USB subsystem */
+static struct usb_driver dbus_usbdev = {
+	name:           "dbus_usbdev",
+	probe:          dbus_usbos_probe,
+	disconnect:     dbus_usbos_disconnect,
+	id_table:       devid_table,
+#if defined(USB_SUSPEND_AVAILABLE)
+	suspend:        dbus_usbos_suspend,
+	resume:         dbus_usbos_resume,
+	reset_resume:	dbus_usbos_reset_resume,
+	/* Linux USB core will allow autosuspend for devices bound to this driver */
+	supports_autosuspend: 1
+#endif /* USB_SUSPEND_AVAILABLE */
+};
+
+/**
+ * This stores USB info during Linux probe callback since attach() is not called yet at this point
+ */
+typedef struct {
+	void    *usbos_info;
+	struct usb_device *usb; /* USB device pointer from OS */
+	uint    rx_pipe;   /* Pipe numbers for USB I/O */
+	uint    tx_pipe;   /* Pipe numbers for USB I/O */
+	uint    intr_pipe; /* Pipe numbers for USB I/O */
+	uint    rx_pipe2;  /* Pipe numbers for USB I/O */
+	int     intr_size; /* Size of interrupt message */
+	int     interval;  /* Interrupt polling interval */
+	bool    dldone;
+	int     vid;
+	int     pid;
+	bool    dereged;
+	bool    disc_cb_done;
+	DEVICE_SPEED    device_speed;
+	enum usbos_suspend_state suspend_state;
+	struct usb_interface     *intf;
+} probe_info_t;
+
+/*
+ * USB Linux dbus_intf_t
+ */
+static void *dbus_usbos_intf_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs);
+static void dbus_usbos_intf_detach(dbus_pub_t *pub, void *info);
+static int  dbus_usbos_intf_send_irb(void *bus, dbus_irb_tx_t *txirb);
+static int  dbus_usbos_intf_recv_irb(void *bus, dbus_irb_rx_t *rxirb);
+static int  dbus_usbos_intf_recv_irb_from_ep(void *bus, dbus_irb_rx_t *rxirb, uint32 ep_idx);
+static int  dbus_usbos_intf_cancel_irb(void *bus, dbus_irb_tx_t *txirb);
+static int  dbus_usbos_intf_send_ctl(void *bus, uint8 *buf, int len);
+static int  dbus_usbos_intf_recv_ctl(void *bus, uint8 *buf, int len);
+static int  dbus_usbos_intf_get_attrib(void *bus, dbus_attrib_t *attrib);
+static int  dbus_usbos_intf_up(void *bus);
+static int  dbus_usbos_intf_down(void *bus);
+static int  dbus_usbos_intf_stop(void *bus);
+static int  dbus_usbos_readreg(void *bus, uint32 regaddr, int datalen, uint32 *value);
+extern int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size);
+int dbus_usbos_writereg(void *bus, uint32 regaddr, int datalen, uint32 data);
+static int  dbus_usbos_intf_set_config(void *bus, dbus_config_t *config);
+static bool dbus_usbos_intf_recv_needed(void *bus);
+static void *dbus_usbos_intf_exec_rxlock(void *bus, exec_cb_t cb, struct exec_parms *args);
+static void *dbus_usbos_intf_exec_txlock(void *bus, exec_cb_t cb, struct exec_parms *args);
+#ifdef BCMUSBDEV_COMPOSITE
+static int dbus_usbos_intf_wlan(struct usb_device *usb);
+#endif /* BCMUSBDEV_COMPOSITE */
+
+/** functions called by dbus_usb.c */
+static dbus_intf_t dbus_usbos_intf = {
+	.attach = dbus_usbos_intf_attach,
+	.detach = dbus_usbos_intf_detach,
+	.up = dbus_usbos_intf_up,
+	.down = dbus_usbos_intf_down,
+	.send_irb = dbus_usbos_intf_send_irb,
+	.recv_irb = dbus_usbos_intf_recv_irb,
+	.cancel_irb = dbus_usbos_intf_cancel_irb,
+	.send_ctl = dbus_usbos_intf_send_ctl,
+	.recv_ctl = dbus_usbos_intf_recv_ctl,
+	.get_stats = NULL,
+	.get_attrib = dbus_usbos_intf_get_attrib,
+	.remove = NULL,
+	.resume = NULL,
+	.suspend = NULL,
+	.stop = dbus_usbos_intf_stop,
+	.reset = NULL,
+	.pktget = NULL,
+	.pktfree = NULL,
+	.iovar_op = NULL,
+	.dump = NULL,
+	.set_config = dbus_usbos_intf_set_config,
+	.get_config = NULL,
+	.device_exists = NULL,
+	.dlneeded = NULL,
+	.dlstart = NULL,
+	.dlrun = NULL,
+	.recv_needed = dbus_usbos_intf_recv_needed,
+	.exec_rxlock = dbus_usbos_intf_exec_rxlock,
+	.exec_txlock = dbus_usbos_intf_exec_txlock,
+
+	.tx_timer_init = NULL,
+	.tx_timer_start = NULL,
+	.tx_timer_stop = NULL,
+
+	.sched_dpc = NULL,
+	.lock = NULL,
+	.unlock = NULL,
+	.sched_probe_cb = NULL,
+
+	.shutdown = NULL,
+
+	.recv_stop = NULL,
+	.recv_resume = NULL,
+
+	.recv_irb_from_ep = dbus_usbos_intf_recv_irb_from_ep,
+	.readreg = dbus_usbos_readreg
+};
+
+static probe_info_t    g_probe_info;
+static probe_cb_t      probe_cb = NULL;
+static disconnect_cb_t disconnect_cb = NULL;
+static void            *probe_arg = NULL;
+static void            *disc_arg = NULL;
+
+
+
+static volatile int loopback_rx_cnt, loopback_tx_cnt;
+int loopback_size;
+bool is_loopback_pkt(void *buf);
+int matches_loopback_pkt(void *buf);
+
+/**
+ * multiple code paths in this file dequeue a URB request, this function makes sure that it happens
+ * in a concurrency save manner. Don't call this from a sleepable process context.
+ */
+static urb_req_t * BCMFASTPATH
+dbus_usbos_qdeq(struct list_head *urbreq_q, spinlock_t *lock)
+{
+	unsigned long flags;
+	urb_req_t *req;
+
+	ASSERT(urbreq_q != NULL);
+
+	spin_lock_irqsave(lock, flags);
+
+	if (list_empty(urbreq_q)) {
+		req = NULL;
+	} else {
+		ASSERT(urbreq_q->next != NULL);
+		ASSERT(urbreq_q->next != urbreq_q);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		req = list_entry(urbreq_q->next, urb_req_t, urb_list);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		list_del_init(&req->urb_list);
+	}
+
+	spin_unlock_irqrestore(lock, flags);
+
+	return req;
+}
+
+static void BCMFASTPATH
+dbus_usbos_qenq(struct list_head *urbreq_q, urb_req_t *req, spinlock_t *lock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+
+	list_add_tail(&req->urb_list, urbreq_q);
+
+	spin_unlock_irqrestore(lock, flags);
+}
+
+/**
+ * multiple code paths in this file remove a URB request from a list, this function makes sure that
+ * it happens in a concurrency save manner. Don't call this from a sleepable process context.
+ * Is quite similar to dbus_usbos_qdeq(), I wonder why this function is needed.
+ */
+static void
+dbus_usbos_req_del(urb_req_t *req, spinlock_t *lock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+
+	list_del_init(&req->urb_list);
+
+	spin_unlock_irqrestore(lock, flags);
+}
+
+
+/**
+ * Driver requires a pool of URBs to operate. This function is called during
+ * initialization (attach phase), allocates a number of URBs, and puts them
+ * on the free (req_rxfreeq and req_txfreeq) queue
+ */
+static int
+dbus_usbos_urbreqs_alloc(usbos_info_t *usbos_info, uint32 count, bool is_rx)
+{
+	int i;
+	int allocated = 0;
+	int err = DBUS_OK;
+
+	for (i = 0; i < count; i++) {
+		urb_req_t *req;
+
+		req = MALLOC(usbos_info->pub->osh, sizeof(urb_req_t));
+		if (req == NULL) {
+			DBUSERR(("%s: MALLOC req failed\n", __FUNCTION__));
+			err = DBUS_ERR_NOMEM;
+			goto fail;
+		}
+		bzero(req, sizeof(urb_req_t));
+
+		req->urb = USB_ALLOC_URB();
+		if (req->urb == NULL) {
+			DBUSERR(("%s: USB_ALLOC_URB req->urb failed\n", __FUNCTION__));
+			err = DBUS_ERR_NOMEM;
+			goto fail;
+		}
+
+		INIT_LIST_HEAD(&req->urb_list);
+
+		if (is_rx) {
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+			/* don't allocate now. Do it on demand */
+			req->pkt = NULL;
+#else
+			/* pre-allocate  buffers never to be released */
+			req->pkt = MALLOC(usbos_info->pub->osh, usbos_info->rxbuf_len);
+			if (req->pkt == NULL) {
+				DBUSERR(("%s: MALLOC req->pkt failed\n", __FUNCTION__));
+				err = DBUS_ERR_NOMEM;
+				goto fail;
+			}
+#endif
+			req->buf_len = usbos_info->rxbuf_len;
+			dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock);
+		} else {
+			req->buf_len = 0;
+			dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+		}
+		allocated++;
+		continue;
+
+fail:
+		if (req) {
+			if (is_rx && req->pkt) {
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+				/* req->pkt is NULL in "NOCOPY" mode */
+#else
+				MFREE(usbos_info->pub->osh, req->pkt, req->buf_len);
+#endif
+			}
+			if (req->urb) {
+				USB_FREE_URB(req->urb);
+			}
+			MFREE(usbos_info->pub->osh, req, sizeof(urb_req_t));
+		}
+		break;
+	}
+
+	atomic_add(allocated, is_rx ? &usbos_info->rxallocated : &usbos_info->txallocated);
+
+	if (is_rx) {
+		DBUSTRACE(("%s: add %d (total %d) rx buf, each has %d bytes\n", __FUNCTION__,
+			allocated, atomic_read(&usbos_info->rxallocated), usbos_info->rxbuf_len));
+	} else {
+		DBUSTRACE(("%s: add %d (total %d) tx req\n", __FUNCTION__,
+			allocated, atomic_read(&usbos_info->txallocated)));
+	}
+
+	return err;
+} /* dbus_usbos_urbreqs_alloc */
+
+/** Typically called during detach or when attach failed. Don't call until all URBs unlinked */
+static int
+dbus_usbos_urbreqs_free(usbos_info_t *usbos_info, bool is_rx)
+{
+	int rtn = 0;
+	urb_req_t *req;
+	struct list_head *req_q;
+	spinlock_t *lock;
+
+	if (is_rx) {
+		req_q = &usbos_info->req_rxfreeq;
+		lock = &usbos_info->rxfree_lock;
+	} else {
+		req_q = &usbos_info->req_txfreeq;
+		lock = &usbos_info->txfree_lock;
+	}
+	while ((req = dbus_usbos_qdeq(req_q, lock)) != NULL) {
+
+		if (is_rx) {
+			if (req->pkt) {
+				/* We do MFREE instead of PKTFREE because the pkt has been
+				 * converted to native already
+				 */
+				MFREE(usbos_info->pub->osh, req->pkt, req->buf_len);
+				req->pkt = NULL;
+				req->buf_len = 0;
+			}
+		} else {
+			/* sending req should not be assigned pkt buffer */
+			ASSERT(req->pkt == NULL);
+		}
+
+		if (req->urb) {
+			USB_FREE_URB(req->urb);
+			req->urb = NULL;
+		}
+		MFREE(usbos_info->pub->osh, req, sizeof(urb_req_t));
+
+		rtn++;
+	}
+	return rtn;
+} /* dbus_usbos_urbreqs_free */
+
+/**
+ * called by Linux kernel on URB completion. Upper DBUS layer (dbus_usb.c) has to be notified of
+ * send completion.
+ */
+void
+dbus_usbos_send_complete(CALLBACK_ARGS)
+{
+	urb_req_t *req = urb->context;
+	dbus_irb_tx_t *txirb = req->arg;
+	usbos_info_t *usbos_info = req->usbinfo;
+	unsigned long flags;
+	int status = DBUS_OK;
+	int txposted;
+
+	USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+
+	spin_lock_irqsave(&usbos_info->txlock, flags);
+
+	dbus_usbos_req_del(req, &usbos_info->txposted_lock);
+	txposted = atomic_dec_return(&usbos_info->txposted);
+	if (unlikely (txposted < 0)) {
+		DBUSERR(("%s ERROR: txposted is negative (%d)!!\n", __FUNCTION__, txposted));
+	}
+	spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+	if (unlikely (urb->status)) {
+		status = DBUS_ERR_TXFAIL;
+		DBUSTRACE(("txfail status %d\n", urb->status));
+	}
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+	/* sending req should not be assigned pkt buffer */
+	ASSERT(req->pkt == NULL);
+#endif
+	/*  txirb should always be set, except for ZLP. ZLP is reusing this callback function. */
+	if (txirb != NULL) {
+		if (txirb->send_buf != NULL) {
+			MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len);
+			txirb->send_buf = NULL;
+			req->buf_len = 0;
+		}
+		if (likely (usbos_info->cbarg && usbos_info->cbs)) {
+			if (likely (usbos_info->cbs->send_irb_complete != NULL))
+			    usbos_info->cbs->send_irb_complete(usbos_info->cbarg, txirb, status);
+		}
+	}
+
+	dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+} /* dbus_usbos_send_complete */
+
+/**
+ * In order to receive USB traffic from the dongle, we need to supply the Linux kernel with a free
+ * URB that is going to contain received data.
+ */
+static int BCMFASTPATH
+dbus_usbos_recv_urb_submit(usbos_info_t *usbos_info, dbus_irb_rx_t *rxirb, uint32 ep_idx)
+{
+	urb_req_t *req;
+	int ret = DBUS_OK;
+	unsigned long flags;
+	void *p;
+	uint rx_pipe;
+	int rxposted;
+
+	BCM_REFERENCE(rxposted);
+
+	if (!(req = dbus_usbos_qdeq(&usbos_info->req_rxfreeq, &usbos_info->rxfree_lock))) {
+		DBUSTRACE(("%s No free URB!\n", __FUNCTION__));
+		return DBUS_ERR_RXDROP;
+	}
+
+	spin_lock_irqsave(&usbos_info->rxlock, flags);
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+	req->pkt = rxirb->pkt = PKTGET(usbos_info->pub->osh, req->buf_len, FALSE);
+	if (!rxirb->pkt) {
+		DBUSERR(("%s: PKTGET failed\n", __FUNCTION__));
+		dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock);
+		ret = DBUS_ERR_RXDROP;
+		goto fail;
+	}
+	/* consider the packet "native" so we don't count it as MALLOCED in the osl */
+	PKTTONATIVE(usbos_info->pub->osh, req->pkt);
+	rxirb->buf = NULL;
+	p = PKTDATA(usbos_info->pub->osh, req->pkt);
+#else
+	if (req->buf_len != usbos_info->rxbuf_len) {
+		ASSERT(req->pkt);
+		MFREE(usbos_info->pub->osh, req->pkt, req->buf_len);
+		DBUSTRACE(("%s: replace rx buff: old len %d, new len %d\n", __FUNCTION__,
+			req->buf_len, usbos_info->rxbuf_len));
+		req->buf_len = 0;
+		req->pkt = MALLOC(usbos_info->pub->osh, usbos_info->rxbuf_len);
+		if (req->pkt == NULL) {
+			DBUSERR(("%s: MALLOC req->pkt failed\n", __FUNCTION__));
+			ret = DBUS_ERR_NOMEM;
+			goto fail;
+		}
+		req->buf_len = usbos_info->rxbuf_len;
+	}
+	rxirb->buf = req->pkt;
+	p = rxirb->buf;
+#endif /* defined(BCM_RPC_NOCOPY) */
+	rxirb->buf_len = req->buf_len;
+	req->usbinfo = usbos_info;
+	req->arg = rxirb;
+	if (ep_idx == 0) {
+		rx_pipe = usbos_info->rx_pipe;
+	} else {
+		rx_pipe = usbos_info->rx_pipe2;
+		ASSERT(usbos_info->rx_pipe2);
+	}
+	/* Prepare the URB */
+	usb_fill_bulk_urb(req->urb, usbos_info->usb, rx_pipe,
+		p,
+		rxirb->buf_len,
+		(usb_complete_t)dbus_usbos_recv_complete, req);
+		req->urb->transfer_flags |= URB_QUEUE_BULK;
+
+	if ((ret = USB_SUBMIT_URB(req->urb))) {
+		DBUSERR(("%s USB_SUBMIT_URB failed. status %d\n", __FUNCTION__, ret));
+		dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock);
+		ret = DBUS_ERR_RXFAIL;
+		goto fail;
+	}
+	rxposted = atomic_inc_return(&usbos_info->rxposted);
+
+	dbus_usbos_qenq(&usbos_info->req_rxpostedq, req, &usbos_info->rxposted_lock);
+fail:
+	spin_unlock_irqrestore(&usbos_info->rxlock, flags);
+	return ret;
+} /* dbus_usbos_recv_urb_submit */
+
+
+/**
+ * Called by worked thread when a 'receive URB' completed or Linux kernel when it returns a URB to
+ * this driver.
+ */
+static void BCMFASTPATH
+dbus_usbos_recv_complete_handle(urb_req_t *req, int len, int status)
+{
+	dbus_irb_rx_t *rxirb = req->arg;
+	usbos_info_t *usbos_info = req->usbinfo;
+	unsigned long flags;
+	int rxallocated, rxposted;
+	int dbus_status = DBUS_OK;
+	bool killed = (g_probe_info.suspend_state == USBOS_SUSPEND_STATE_SUSPEND_PENDING) ? 1 : 0;
+
+	spin_lock_irqsave(&usbos_info->rxlock, flags);
+	dbus_usbos_req_del(req, &usbos_info->rxposted_lock);
+	rxposted = atomic_dec_return(&usbos_info->rxposted);
+	rxallocated = atomic_read(&usbos_info->rxallocated);
+	spin_unlock_irqrestore(&usbos_info->rxlock, flags);
+
+	if ((rxallocated < usbos_info->pub->nrxq) && (!status) &&
+		(rxposted == DBUS_USB_RXQUEUE_LOWER_WATERMARK)) {
+			DBUSTRACE(("%s: need more rx buf: rxallocated %d rxposted %d!\n",
+				__FUNCTION__, rxallocated, rxposted));
+			dbus_usbos_urbreqs_alloc(usbos_info,
+				MIN(DBUS_USB_RXQUEUE_BATCH_ADD,
+				usbos_info->pub->nrxq - rxallocated), TRUE);
+	}
+
+	/* Handle errors */
+	if (status) {
+		/*
+		 * Linux 2.4 disconnect: -ENOENT or -EILSEQ for CRC error; rmmod: -ENOENT
+		 * Linux 2.6 disconnect: -EPROTO, rmmod: -ESHUTDOWN
+		 */
+		if ((status == -ENOENT && (!killed))|| status == -ESHUTDOWN) {
+			/* NOTE: unlink() can not be called from URB callback().
+			 * Do not call dbusos_stop() here.
+			 */
+			DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status));
+			dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN);
+		} else if (status == -EPROTO) {
+			DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status));
+		} else if (killed && (status == -EHOSTUNREACH || status == -ENOENT)) {
+			/* Device is suspended */
+		} else {
+			DBUSTRACE(("%s rx error %d\n", __FUNCTION__, status));
+			dbus_usbos_errhandler(usbos_info, DBUS_ERR_RXFAIL);
+		}
+
+		/* On error, don't submit more URBs yet */
+		rxirb->buf = NULL;
+		rxirb->actual_len = 0;
+		dbus_status = DBUS_ERR_RXFAIL;
+		goto fail;
+	}
+
+	/* Make the skb represent the received urb */
+	rxirb->actual_len = len;
+
+	if (rxirb->actual_len < sizeof(uint32)) {
+		DBUSTRACE(("small pkt len %d, process as ZLP\n", rxirb->actual_len));
+		dbus_status = DBUS_ERR_RXZLP;
+	}
+
+fail:
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RPC_RXNOCOPY)
+	/* detach the packet from the queue */
+	req->pkt = NULL;
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_RXNOCOPY */
+
+	if (usbos_info->cbarg && usbos_info->cbs) {
+		if (usbos_info->cbs->recv_irb_complete) {
+			usbos_info->cbs->recv_irb_complete(usbos_info->cbarg, rxirb, dbus_status);
+		}
+	}
+
+	dbus_usbos_qenq(&usbos_info->req_rxfreeq, req, &usbos_info->rxfree_lock);
+
+	/* Mark the interface as busy to reset USB autosuspend timer */
+	USB_MARK_LAST_BUSY(usbos_info->usb);
+} /* dbus_usbos_recv_complete_handle */
+
+/** called by Linux kernel when it returns a URB to this driver */
+static void
+dbus_usbos_recv_complete(CALLBACK_ARGS)
+{
+#ifdef USBOS_THREAD
+	dbus_usbos_dispatch_schedule(CALLBACK_ARGS_DATA);
+#else /*  !USBOS_THREAD */
+	dbus_usbos_recv_complete_handle(urb->context, urb->actual_length, urb->status);
+#endif /*  USBOS_THREAD */
+}
+
+
+/**
+ * If Linux notifies our driver that a control read or write URB has completed, we should notify
+ * the DBUS layer above us (dbus_usb.c in this case).
+ */
+static void
+dbus_usbos_ctl_complete(usbos_info_t *usbos_info, int type, int urbstatus)
+{
+	int status = DBUS_ERR;
+
+	if (usbos_info == NULL)
+		return;
+
+	switch (urbstatus) {
+		case 0:
+			status = DBUS_OK;
+		break;
+		case -EINPROGRESS:
+		case -ENOENT:
+		default:
+#ifdef INTR_EP_ENABLE
+			DBUSERR(("%s:%d fail status %d bus:%d susp:%d intr:%d ctli:%d ctlo:%d\n",
+				__FUNCTION__, type, urbstatus,
+				usbos_info->pub->busstate, g_probe_info.suspend_state,
+				usbos_info->intr_urb_submitted, usbos_info->ctlin_urb_submitted,
+				usbos_info->ctlout_urb_submitted));
+#else
+			DBUSERR(("%s: failed with status %d\n", __FUNCTION__, urbstatus));
+			status = DBUS_ERR;
+		break;
+#endif /* INTR_EP_ENABLE */
+	}
+
+	if (usbos_info->cbarg && usbos_info->cbs) {
+		if (usbos_info->cbs->ctl_complete)
+			usbos_info->cbs->ctl_complete(usbos_info->cbarg, type, status);
+	}
+}
+
+/** called by Linux */
+static void
+dbus_usbos_ctlread_complete(CALLBACK_ARGS)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *)urb->context;
+
+	ASSERT(urb);
+	usbos_info = (usbos_info_t *)urb->context;
+
+	dbus_usbos_ctl_complete(usbos_info, DBUS_CBCTL_READ, urb->status);
+
+#ifdef USBOS_THREAD
+	if (usbos_info->rxctl_deferrespok) {
+		usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_CLASS |
+		USB_RECIP_INTERFACE;
+		usbos_info->ctl_read.bRequest = 1;
+	}
+#endif
+
+	up(&usbos_info->ctl_lock);
+
+	USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+}
+
+/** called by Linux */
+static void
+dbus_usbos_ctlwrite_complete(CALLBACK_ARGS)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *)urb->context;
+
+	ASSERT(urb);
+	usbos_info = (usbos_info_t *)urb->context;
+
+	dbus_usbos_ctl_complete(usbos_info, DBUS_CBCTL_WRITE, urb->status);
+
+#ifdef USBOS_TX_THREAD
+	usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED;
+#endif /* USBOS_TX_THREAD */
+
+	up(&usbos_info->ctl_lock);
+
+	USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+}
+
+#ifdef INTR_EP_ENABLE
+/** called by Linux */
+static void
+dbus_usbos_intr_complete(CALLBACK_ARGS)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *)urb->context;
+	bool killed = (g_probe_info.suspend_state == USBOS_SUSPEND_STATE_SUSPEND_PENDING) ? 1 : 0;
+
+	if (usbos_info == NULL || usbos_info->pub == NULL)
+		return;
+	if ((urb->status == -ENOENT && (!killed)) || urb->status == -ESHUTDOWN ||
+		urb->status == -ENODEV) {
+		dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN);
+	}
+
+	if (usbos_info->pub->busstate == DBUS_STATE_DOWN) {
+		DBUSERR(("%s: intr cb when DBUS down, ignoring\n", __FUNCTION__));
+		return;
+	}
+	dbus_usbos_ctl_complete(usbos_info, DBUS_CBINTR_POLL, urb->status);
+}
+#endif	/* INTR_EP_ENABLE */
+
+/**
+ * when the bus is going to sleep or halt, the Linux kernel requires us to take ownership of our
+ * URBs again. Multiple code paths in this file require a list of URBs to be cancelled in a
+ * concurrency save manner.
+ */
+static void
+dbus_usbos_unlink(struct list_head *urbreq_q, spinlock_t *lock)
+{
+	urb_req_t *req;
+
+	/* dbus_usbos_recv_complete() adds req back to req_freeq */
+	while ((req = dbus_usbos_qdeq(urbreq_q, lock)) != NULL) {
+		ASSERT(req->urb != NULL);
+		USB_UNLINK_URB(req->urb);
+	}
+}
+
+/** multiple code paths in this file require the bus to stop */
+static void
+dbus_usbos_cancel_all_urbs(usbos_info_t *usbos_info)
+{
+	int rxposted, txposted;
+
+	DBUSTRACE(("%s: unlink all URBs\n", __FUNCTION__));
+
+#ifdef USBOS_TX_THREAD
+	usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED;
+
+	/* Yield the CPU to TX thread so all pending requests are submitted */
+	while (!list_empty(&usbos_info->usbos_tx_list)) {
+		wake_up_interruptible(&usbos_info->usbos_tx_queue_head);
+		OSL_SLEEP(10);
+	}
+#endif /* USBOS_TX_THREAD */
+
+	/* tell Linux kernel to cancel a single intr, ctl and blk URB */
+	if (usbos_info->intr_urb)
+		USB_UNLINK_URB(usbos_info->intr_urb);
+	if (usbos_info->ctl_urb)
+		USB_UNLINK_URB(usbos_info->ctl_urb);
+	if (usbos_info->blk_urb)
+		USB_UNLINK_URB(usbos_info->blk_urb);
+
+	dbus_usbos_unlink(&usbos_info->req_txpostedq, &usbos_info->txposted_lock);
+	dbus_usbos_unlink(&usbos_info->req_rxpostedq, &usbos_info->rxposted_lock);
+
+	/* Wait until the callbacks for all submitted URBs have been called, because the
+	 * handler needs to know is an USB suspend is in progress.
+	 */
+	SPINWAIT((atomic_read(&usbos_info->txposted) != 0 ||
+		atomic_read(&usbos_info->rxposted) != 0), 10000);
+
+	txposted = atomic_read(&usbos_info->txposted);
+	rxposted = atomic_read(&usbos_info->rxposted);
+	if (txposted != 0 || rxposted != 0) {
+		DBUSERR(("%s ERROR: REQs posted, rx=%d tx=%d!\n",
+			__FUNCTION__, rxposted, txposted));
+	}
+} /* dbus_usbos_cancel_all_urbs */
+
+/** multiple code paths require the bus to stop */
+static void
+dbusos_stop(usbos_info_t *usbos_info)
+{
+	urb_req_t *req;
+	int rxposted;
+	req = NULL;
+	BCM_REFERENCE(req);
+
+	ASSERT(usbos_info);
+
+	dbus_usbos_state_change(usbos_info, DBUS_STATE_DOWN);
+
+	dbus_usbos_cancel_all_urbs(usbos_info);
+
+#ifdef USBOS_THREAD
+	/* yield the CPU to rx packet thread */
+	while (1) {
+		if (atomic_read(&usbos_info->usbos_list_cnt) <= 0)	break;
+		wake_up_interruptible(&usbos_info->usbos_queue_head);
+		OSL_SLEEP(3);
+	}
+#endif /* USBOS_THREAD */
+
+	rxposted = atomic_read(&usbos_info->rxposted);
+	if (rxposted > 0) {
+		DBUSERR(("%s ERROR: rx REQs posted=%d in stop!\n", __FUNCTION__,
+			rxposted));
+	}
+
+	ASSERT(atomic_read(&usbos_info->txposted) == 0 && rxposted == 0);
+
+} /* dbusos_stop */
+
+#if defined(USB_SUSPEND_AVAILABLE)
+
+/**
+ * Linux kernel sports a 'USB auto suspend' feature. See: http://lwn.net/Articles/373550/
+ * The suspend method is called by the Linux kernel to warn the driver that the device is going to
+ * be suspended.  If the driver returns a negative error code, the suspend will be aborted. If the
+ * driver returns 0, it must cancel all outstanding URBs (usb_kill_urb()) and not submit any more.
+ */
+static int
+dbus_usbos_suspend(struct usb_interface *intf,
+            pm_message_t message)
+{
+	DBUSERR(("%s suspend state: %d\n", __FUNCTION__, g_probe_info.suspend_state));
+	/* DHD for full dongle model */
+	g_probe_info.suspend_state = USBOS_SUSPEND_STATE_SUSPEND_PENDING;
+	dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_SLEEP);
+	dbus_usbos_cancel_all_urbs((usbos_info_t*)g_probe_info.usbos_info);
+	g_probe_info.suspend_state = USBOS_SUSPEND_STATE_SUSPENDED;
+
+	return 0;
+}
+
+/**
+ * The resume method is called to tell the driver that the device has been resumed and the driver
+ * can return to normal operation.  URBs may once more be submitted.
+ */
+static int dbus_usbos_resume(struct usb_interface *intf)
+{
+	DBUSERR(("%s Device resumed\n", __FUNCTION__));
+
+	dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_UP);
+	g_probe_info.suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE;
+	return 0;
+}
+
+/**
+* This function is directly called by the Linux kernel, when the suspended device has been reset
+* instead of being resumed
+*/
+static int dbus_usbos_reset_resume(struct usb_interface *intf)
+{
+	DBUSERR(("%s Device reset resumed\n", __FUNCTION__));
+
+	/* The device may have lost power, so a firmware download may be required */
+	dbus_usbos_state_change((usbos_info_t*)g_probe_info.usbos_info, DBUS_STATE_DL_NEEDED);
+	g_probe_info.suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE;
+	return 0;
+}
+
+#endif /* USB_SUSPEND_AVAILABLE */
+
+/**
+ * Called by Linux kernel at initialization time, kernel wants to know if our driver will accept the
+ * caller supplied USB interface. Note that USB drivers are bound to interfaces, and not to USB
+ * devices.
+ */
+#ifdef KERNEL26
+#define DBUS_USBOS_PROBE() static int dbus_usbos_probe(struct usb_interface *intf, const struct usb_device_id *id)
+#define DBUS_USBOS_DISCONNECT() static void dbus_usbos_disconnect(struct usb_interface *intf)
+#else
+#define DBUS_USBOS_PROBE() static void * dbus_usbos_probe(struct usb_device *usb, unsigned int ifnum, const struct usb_device_id *id)
+#define DBUS_USBOS_DISCONNECT() static void dbus_usbos_disconnect(struct usb_device *usb, void *ptr)
+#endif /* KERNEL26 */
+
+DBUS_USBOS_PROBE()
+{
+	int ep;
+	struct usb_endpoint_descriptor *endpoint;
+	int ret = 0;
+#ifdef KERNEL26
+	struct usb_device *usb = interface_to_usbdev(intf);
+#else
+	int claimed = 0;
+#endif
+	int num_of_eps;
+#ifdef BCMUSBDEV_COMPOSITE
+	int wlan_if = -1;
+	bool intr_ep = FALSE;
+#endif /* BCMUSBDEV_COMPOSITE */
+	wifi_adapter_info_t *adapter;
+
+	DHD_MUTEX_LOCK();
+
+	DBUSERR(("%s: bus num(busnum)=%d, slot num (portnum)=%d\n", __FUNCTION__,
+		usb->bus->busnum, usb->portnum));
+	adapter = dhd_wifi_platform_attach_adapter(USB_BUS, usb->bus->busnum,
+		usb->portnum, WIFI_STATUS_POWER_ON);
+	if (adapter == NULL) {
+		DBUSERR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
+		goto fail;
+	}
+
+#ifdef BCMUSBDEV_COMPOSITE
+	wlan_if = dbus_usbos_intf_wlan(usb);
+#ifdef KERNEL26
+	if ((wlan_if >= 0) && (IFPTR(usb, wlan_if) == intf))
+#else
+	if (wlan_if == ifnum)
+#endif /* KERNEL26 */
+	{
+#endif /* BCMUSBDEV_COMPOSITE */
+		g_probe_info.usb = usb;
+		g_probe_info.dldone = TRUE;
+#ifdef BCMUSBDEV_COMPOSITE
+	} else {
+		DBUSTRACE(("dbus_usbos_probe: skip probe for non WLAN interface\n"));
+		ret = BCME_UNSUPPORTED;
+		goto fail;
+	}
+#endif /* BCMUSBDEV_COMPOSITE */
+
+#ifdef KERNEL26
+	g_probe_info.intf = intf;
+#endif /* KERNEL26 */
+
+#ifdef BCMUSBDEV_COMPOSITE
+	if (IFDESC(usb, wlan_if).bInterfaceNumber > USB_COMPIF_MAX)
+#else
+	if (IFDESC(usb, CONTROL_IF).bInterfaceNumber)
+#endif /* BCMUSBDEV_COMPOSITE */
+	{
+		ret = -1;
+		goto fail;
+	}
+	if (id != NULL) {
+		g_probe_info.vid = id->idVendor;
+		g_probe_info.pid = id->idProduct;
+	}
+
+#ifdef KERNEL26
+	usb_set_intfdata(intf, &g_probe_info);
+#endif
+
+	/* Check that the device supports only one configuration */
+	if (usb->descriptor.bNumConfigurations != 1) {
+		ret = -1;
+		goto fail;
+	}
+
+	if (usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) {
+#ifdef BCMUSBDEV_COMPOSITE
+		if ((usb->descriptor.bDeviceClass != USB_CLASS_MISC) &&
+			(usb->descriptor.bDeviceClass != USB_CLASS_WIRELESS)) {
+#endif /* BCMUSBDEV_COMPOSITE */
+			ret = -1;
+			goto fail;
+#ifdef BCMUSBDEV_COMPOSITE
+		}
+#endif /* BCMUSBDEV_COMPOSITE */
+	}
+
+	/*
+	 * Only the BDC interface configuration is supported:
+	 *	Device class: USB_CLASS_VENDOR_SPEC
+	 *	if0 class: USB_CLASS_VENDOR_SPEC
+	 *	if0/ep0: control
+	 *	if0/ep1: bulk in
+	 *	if0/ep2: bulk out (ok if swapped with bulk in)
+	 */
+	if (CONFIGDESC(usb)->bNumInterfaces != 1) {
+#ifdef BCMUSBDEV_COMPOSITE
+		if (CONFIGDESC(usb)->bNumInterfaces > USB_COMPIF_MAX) {
+#endif /* BCMUSBDEV_COMPOSITE */
+			ret = -1;
+			goto fail;
+#ifdef BCMUSBDEV_COMPOSITE
+		}
+#endif /* BCMUSBDEV_COMPOSITE */
+	}
+
+	/* Check interface */
+#ifndef KERNEL26
+#ifdef BCMUSBDEV_COMPOSITE
+	if (usb_interface_claimed(IFPTR(usb, wlan_if)))
+#else
+	if (usb_interface_claimed(IFPTR(usb, CONTROL_IF)))
+#endif /* BCMUSBDEV_COMPOSITE */
+	{
+		ret = -1;
+		goto fail;
+	}
+#endif /* !KERNEL26 */
+
+#ifdef BCMUSBDEV_COMPOSITE
+	if ((IFDESC(usb, wlan_if).bInterfaceClass != USB_CLASS_VENDOR_SPEC ||
+		IFDESC(usb, wlan_if).bInterfaceSubClass != 2 ||
+		IFDESC(usb, wlan_if).bInterfaceProtocol != 0xff) &&
+		(IFDESC(usb, wlan_if).bInterfaceClass != USB_CLASS_MISC ||
+		IFDESC(usb, wlan_if).bInterfaceSubClass != USB_SUBCLASS_COMMON ||
+		IFDESC(usb, wlan_if).bInterfaceProtocol != USB_PROTO_IAD))
+#else
+	if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC ||
+		IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 ||
+		IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff)
+#endif /* BCMUSBDEV_COMPOSITE */
+	{
+#ifdef BCMUSBDEV_COMPOSITE
+			DBUSERR(("%s: invalid control interface: class %d, subclass %d, proto %d\n",
+				__FUNCTION__,
+				IFDESC(usb, wlan_if).bInterfaceClass,
+				IFDESC(usb, wlan_if).bInterfaceSubClass,
+				IFDESC(usb, wlan_if).bInterfaceProtocol));
+#else
+			DBUSERR(("%s: invalid control interface: class %d, subclass %d, proto %d\n",
+				__FUNCTION__,
+				IFDESC(usb, CONTROL_IF).bInterfaceClass,
+				IFDESC(usb, CONTROL_IF).bInterfaceSubClass,
+				IFDESC(usb, CONTROL_IF).bInterfaceProtocol));
+#endif /* BCMUSBDEV_COMPOSITE */
+			ret = -1;
+			goto fail;
+	}
+
+	/* Check control endpoint */
+#ifdef BCMUSBDEV_COMPOSITE
+	endpoint = &IFEPDESC(usb, wlan_if, 0);
+#else
+	endpoint = &IFEPDESC(usb, CONTROL_IF, 0);
+#endif /* BCMUSBDEV_COMPOSITE */
+	if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT) {
+#ifdef BCMUSBDEV_COMPOSITE
+		if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
+			USB_ENDPOINT_XFER_BULK) {
+#endif /* BCMUSBDEV_COMPOSITE */
+			DBUSERR(("%s: invalid control endpoint %d\n",
+				__FUNCTION__, endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
+			ret = -1;
+			goto fail;
+#ifdef BCMUSBDEV_COMPOSITE
+		}
+#endif /* BCMUSBDEV_COMPOSITE */
+	}
+
+#ifdef BCMUSBDEV_COMPOSITE
+	if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) {
+#endif /* BCMUSBDEV_COMPOSITE */
+		g_probe_info.intr_pipe =
+			usb_rcvintpipe(usb, endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+#ifdef BCMUSBDEV_COMPOSITE
+		intr_ep = TRUE;
+	}
+#endif /* BCMUSBDEV_COMPOSITE */
+
+#ifndef KERNEL26
+	/* Claim interface */
+#ifdef BCMUSBDEV_COMPOSITE
+	usb_driver_claim_interface(&dbus_usbdev, IFPTR(usb, wlan_if), &g_probe_info);
+#else
+	usb_driver_claim_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF), &g_probe_info);
+#endif /* BCMUSBDEV_COMPOSITE */
+	claimed = 1;
+#endif /* !KERNEL26 */
+	g_probe_info.rx_pipe = 0;
+	g_probe_info.rx_pipe2 = 0;
+	g_probe_info.tx_pipe = 0;
+#ifdef BCMUSBDEV_COMPOSITE
+	if (intr_ep)
+		ep = 1;
+	else
+		ep = 0;
+	num_of_eps = IFDESC(usb, wlan_if).bNumEndpoints - 1;
+#else
+	num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
+#endif /* BCMUSBDEV_COMPOSITE */
+
+	if ((num_of_eps != 2) && (num_of_eps != 3)) {
+#ifdef BCMUSBDEV_COMPOSITE
+		if (num_of_eps > 7)
+#endif /* BCMUSBDEV_COMPOSITE */
+			ASSERT(0);
+	}
+	/* Check data endpoints and get pipes */
+#ifdef BCMUSBDEV_COMPOSITE
+	for (; ep <= num_of_eps; ep++)
+#else
+	for (ep = 1; ep <= num_of_eps; ep++)
+#endif /* BCMUSBDEV_COMPOSITE */
+	{
+#ifdef BCMUSBDEV_COMPOSITE
+		endpoint = &IFEPDESC(usb, wlan_if, ep);
+#else
+		endpoint = &IFEPDESC(usb, BULK_IF, ep);
+#endif /* BCMUSBDEV_COMPOSITE */
+		if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
+		    USB_ENDPOINT_XFER_BULK) {
+			DBUSERR(("%s: invalid data endpoint %d\n",
+			           __FUNCTION__, ep));
+			ret = -1;
+			goto fail;
+		}
+
+		if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) {
+			/* direction: dongle->host */
+			if (!g_probe_info.rx_pipe) {
+				g_probe_info.rx_pipe = usb_rcvbulkpipe(usb,
+					(endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK));
+			} else {
+				g_probe_info.rx_pipe2 = usb_rcvbulkpipe(usb,
+					(endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK));
+			}
+
+		} else
+			g_probe_info.tx_pipe = usb_sndbulkpipe(usb, (endpoint->bEndpointAddress &
+			     USB_ENDPOINT_NUMBER_MASK));
+	}
+
+	/* Allocate interrupt URB and data buffer */
+	/* RNDIS says 8-byte intr, our old drivers used 4-byte */
+#ifdef BCMUSBDEV_COMPOSITE
+	g_probe_info.intr_size = (IFEPDESC(usb, wlan_if, 0).wMaxPacketSize == 16) ? 8 : 4;
+	g_probe_info.interval = IFEPDESC(usb, wlan_if, 0).bInterval;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21))
+	usb->quirks |= USB_QUIRK_NO_SET_INTF;
+#endif
+#else
+	g_probe_info.intr_size = (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == 16) ? 8 : 4;
+	g_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
+#endif /* BCMUSBDEV_COMPOSITE */
+
+#ifndef KERNEL26
+	/* usb_fill_int_urb does the interval decoding in 2.6 */
+	if (usb->speed == USB_SPEED_HIGH)
+		g_probe_info.interval = 1 << (g_probe_info.interval - 1);
+#endif
+	if (usb->speed == USB_SPEED_SUPER) {
+		g_probe_info.device_speed = SUPER_SPEED;
+		DBUSERR(("super speed device detected\n"));
+	} else if (usb->speed == USB_SPEED_HIGH) {
+		g_probe_info.device_speed = HIGH_SPEED;
+		DBUSERR(("high speed device detected\n"));
+	} else {
+		g_probe_info.device_speed = FULL_SPEED;
+		DBUSERR(("full speed device detected\n"));
+	}
+	if (g_probe_info.dereged == FALSE && probe_cb) {
+		disc_arg = probe_cb(probe_arg, "", USB_BUS, usb->bus->busnum, usb->portnum, 0);
+	}
+
+	g_probe_info.disc_cb_done = FALSE;
+
+#ifdef KERNEL26
+	intf->needs_remote_wakeup = 1;
+#endif /* KERNEL26 */
+	DHD_MUTEX_UNLOCK();
+
+	/* Success */
+#ifdef KERNEL26
+	return DBUS_OK;
+#else
+	usb_inc_dev_use(usb);
+	return &g_probe_info;
+#endif
+
+fail:
+	printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
+#ifdef BCMUSBDEV_COMPOSITE
+	if (ret != BCME_UNSUPPORTED)
+#endif /* BCMUSBDEV_COMPOSITE */
+		DBUSERR(("%s: failed with errno %d\n", __FUNCTION__, ret));
+#ifndef KERNEL26
+	if (claimed)
+#ifdef BCMUSBDEV_COMPOSITE
+		usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, wlan_if));
+#else
+		usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF));
+#endif /* BCMUSBDEV_COMPOSITE */
+#endif /* !KERNEL26 */
+
+	DHD_MUTEX_UNLOCK();
+#ifdef KERNEL26
+	usb_set_intfdata(intf, NULL);
+	return ret;
+#else
+	return NULL;
+#endif
+} /* dbus_usbos_probe */
+
+/** Called by Linux kernel, is the counter part of dbus_usbos_probe() */
+DBUS_USBOS_DISCONNECT()
+{
+#ifdef KERNEL26
+	struct usb_device *usb = interface_to_usbdev(intf);
+	probe_info_t *probe_usb_init_data = usb_get_intfdata(intf);
+#else
+	probe_info_t *probe_usb_init_data = (probe_info_t *) ptr;
+#endif
+	usbos_info_t *usbos_info;
+
+	DHD_MUTEX_LOCK();
+
+	DBUSERR(("%s: bus num(busnum)=%d, slot num (portnum)=%d\n", __FUNCTION__,
+		usb->bus->busnum, usb->portnum));
+
+	if (probe_usb_init_data) {
+		usbos_info = (usbos_info_t *) probe_usb_init_data->usbos_info;
+		if (usbos_info) {
+			if ((probe_usb_init_data->dereged == FALSE) && disconnect_cb && disc_arg) {
+				disconnect_cb(disc_arg);
+				disc_arg = NULL;
+				probe_usb_init_data->disc_cb_done = TRUE;
+			}
+		}
+	}
+
+	if (usb) {
+#ifndef KERNEL26
+#ifdef BCMUSBDEV_COMPOSITE
+		usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, wlan_if));
+#else
+		usb_driver_release_interface(&dbus_usbdev, IFPTR(usb, CONTROL_IF));
+#endif /* BCMUSBDEV_COMPOSITE */
+		usb_dec_dev_use(usb);
+#endif /* !KERNEL26 */
+	}
+	DHD_MUTEX_UNLOCK();
+} /* dbus_usbos_disconnect */
+
+#define LOOPBACK_PKT_START 0xBABE1234
+
+bool is_loopback_pkt(void *buf)
+{
+
+	uint32 *buf_ptr = (uint32 *) buf;
+
+	if (*buf_ptr == LOOPBACK_PKT_START)
+		return TRUE;
+	return FALSE;
+
+}
+
+int matches_loopback_pkt(void *buf)
+{
+	int i, j;
+	unsigned char *cbuf = (unsigned char *) buf;
+
+	for (i = 4; i < loopback_size; i++) {
+		if (cbuf[i] != (i % 256)) {
+			printf("%s: mismatch at i=%d %d : ", __FUNCTION__, i, cbuf[i]);
+			for (j = i; ((j < i+ 16) && (j < loopback_size)); j++) {
+				printf("%d ", cbuf[j]);
+			}
+			printf("\n");
+			return 0;
+		}
+	}
+	loopback_rx_cnt++;
+	return 1;
+}
+
+int dbus_usbos_loopback_tx(void *usbos_info_ptr, int cnt, int size)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) usbos_info_ptr;
+	unsigned char *buf;
+	int j;
+	void* p = NULL;
+	int rc, last_rx_cnt;
+	int tx_failed_cnt;
+	int max_size = 1650;
+	int usb_packet_size = 512;
+	int min_packet_size = 10;
+
+	if (size % usb_packet_size == 0) {
+		size = size - 1;
+		DBUSERR(("%s: overriding size=%d \n", __FUNCTION__, size));
+	}
+
+	if (size < min_packet_size) {
+		size = min_packet_size;
+		DBUSERR(("%s: overriding size=%d\n", __FUNCTION__, min_packet_size));
+	}
+	if (size > max_size) {
+		size = max_size;
+		DBUSERR(("%s: overriding size=%d\n", __FUNCTION__, max_size));
+	}
+
+	loopback_tx_cnt = 0;
+	loopback_rx_cnt = 0;
+	tx_failed_cnt = 0;
+	loopback_size   = size;
+
+	while (loopback_tx_cnt < cnt) {
+		uint32 *x;
+		int pkt_size = loopback_size;
+
+		p = PKTGET(usbos_info->pub->osh, pkt_size, TRUE);
+		if (p == NULL) {
+			DBUSERR(("%s:%d Failed to allocate packet sz=%d\n",
+			       __FUNCTION__, __LINE__, pkt_size));
+			return BCME_ERROR;
+		}
+		x = (uint32*) PKTDATA(usbos_info->pub->osh, p);
+		*x = LOOPBACK_PKT_START;
+		buf = (unsigned char*) x;
+		for (j = 4; j < pkt_size; j++) {
+			buf[j] = j % 256;
+		}
+		rc = dbus_send_buf(usbos_info->pub, buf, pkt_size, p);
+		if (rc != BCME_OK) {
+			DBUSERR(("%s:%d Freeing packet \n", __FUNCTION__, __LINE__));
+			PKTFREE(usbos_info->pub->osh, p, TRUE);
+			dbus_usbos_wait(usbos_info, 1);
+			tx_failed_cnt++;
+		} else {
+			loopback_tx_cnt++;
+			tx_failed_cnt = 0;
+		}
+		if (tx_failed_cnt == 5) {
+			DBUSERR(("%s : Failed to send loopback packets cnt=%d loopback_tx_cnt=%d\n",
+			 __FUNCTION__, cnt, loopback_tx_cnt));
+			break;
+		}
+	}
+	printf("Transmitted %d loopback packets of size %d\n", loopback_tx_cnt, loopback_size);
+
+	last_rx_cnt = loopback_rx_cnt;
+	while (loopback_rx_cnt < loopback_tx_cnt) {
+		dbus_usbos_wait(usbos_info, 1);
+		if (loopback_rx_cnt <= last_rx_cnt) {
+			DBUSERR(("%s: Matched rx cnt stuck at %d \n", __FUNCTION__, last_rx_cnt));
+			return BCME_ERROR;
+		}
+		last_rx_cnt = loopback_rx_cnt;
+	}
+	printf("Received %d loopback packets of size %d\n", loopback_tx_cnt, loopback_size);
+
+	return BCME_OK;
+} /* dbus_usbos_loopback_tx */
+
+/**
+ * Higher layer (dbus_usb.c) wants to transmit an I/O Request Block
+ *     @param[in] txirb txirb->pkt, if non-zero, contains a single or a chain of packets
+ */
+static int
+dbus_usbos_intf_send_irb(void *bus, dbus_irb_tx_t *txirb)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+	urb_req_t *req, *req_zlp = NULL;
+	int ret = DBUS_OK;
+	unsigned long flags;
+	void *pkt;
+	uint32 buffer_length;
+	uint8 *buf;
+
+	if ((usbos_info == NULL) || !usbos_info->tx_pipe) {
+		return DBUS_ERR;
+	}
+
+	if (txirb->pkt != NULL) {
+		buffer_length = pkttotlen(usbos_info->pub->osh, txirb->pkt);
+		/* In case of multiple packets the values below may be overwritten */
+		txirb->send_buf = NULL;
+		buf = PKTDATA(usbos_info->pub->osh, txirb->pkt);
+	} else { /* txirb->buf != NULL */
+		ASSERT(txirb->buf != NULL);
+		ASSERT(txirb->send_buf == NULL);
+		buffer_length = txirb->len;
+		buf = txirb->buf;
+	}
+
+	if (!(req = dbus_usbos_qdeq(&usbos_info->req_txfreeq, &usbos_info->txfree_lock))) {
+		DBUSERR(("%s No free URB!\n", __FUNCTION__));
+		return DBUS_ERR_TXDROP;
+	}
+
+	/* If not using standard Linux kernel functionality for handling Zero Length Packet(ZLP),
+	 * the dbus needs to generate ZLP when length is multiple of MaxPacketSize.
+	 */
+#ifndef WL_URB_ZPKT
+	if (!(buffer_length % usbos_info->maxps)) {
+		if (!(req_zlp =
+			dbus_usbos_qdeq(&usbos_info->req_txfreeq, &usbos_info->txfree_lock))) {
+			DBUSERR(("%s No free URB for ZLP!\n", __FUNCTION__));
+			dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+			return DBUS_ERR_TXDROP;
+		}
+
+		/* No txirb, so that dbus_usbos_send_complete can differentiate between
+		 * DATA and ZLP.
+		 */
+		req_zlp->arg = NULL;
+		req_zlp->usbinfo = usbos_info;
+		req_zlp->buf_len = 0;
+
+		usb_fill_bulk_urb(req_zlp->urb, usbos_info->usb, usbos_info->tx_pipe, NULL,
+			0, (usb_complete_t)dbus_usbos_send_complete, req_zlp);
+
+		req_zlp->urb->transfer_flags |= URB_QUEUE_BULK;
+	}
+#endif /* !WL_URB_ZPKT */
+
+#ifndef USBOS_TX_THREAD
+	/* Disable USB autosuspend until this request completes, request USB resume if needed.
+	 * Because this call runs asynchronously, there is no guarantee the bus is resumed before
+	 * the URB is submitted, and the URB might be dropped. Use USBOS_TX_THREAD to avoid
+	 * this.
+	 */
+	USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf);
+#endif /* !USBOS_TX_THREAD */
+
+	spin_lock_irqsave(&usbos_info->txlock, flags);
+
+	req->arg = txirb;
+	req->usbinfo = usbos_info;
+	req->buf_len = 0;
+
+	/* Prepare the URB */
+	if (txirb->pkt != NULL) {
+		uint32 pktlen;
+		uint8 *transfer_buf;
+
+		/* For multiple packets, allocate contiguous buffer and copy packet data to it */
+		if (PKTNEXT(usbos_info->pub->osh, txirb->pkt)) {
+			transfer_buf = MALLOC(usbos_info->pub->osh, buffer_length);
+			if (!transfer_buf) {
+				ret = DBUS_ERR_TXDROP;
+				DBUSERR(("fail to alloc to usb buffer\n"));
+				goto fail;
+			}
+
+			pkt = txirb->pkt;
+			txirb->send_buf = transfer_buf;
+			req->buf_len = buffer_length;
+
+			while (pkt) {
+				pktlen = PKTLEN(usbos_info->pub->osh, pkt);
+				bcopy(PKTDATA(usbos_info->pub->osh, pkt), transfer_buf, pktlen);
+				transfer_buf += pktlen;
+				pkt = PKTNEXT(usbos_info->pub->osh, pkt);
+			}
+
+			ASSERT(((uint8 *) txirb->send_buf + buffer_length) == transfer_buf);
+
+			/* Overwrite buf pointer with pointer to allocated contiguous transfer_buf
+			 */
+			buf = txirb->send_buf;
+		}
+	}
+
+	usb_fill_bulk_urb(req->urb, usbos_info->usb, usbos_info->tx_pipe, buf,
+		buffer_length, (usb_complete_t)dbus_usbos_send_complete, req);
+
+	req->urb->transfer_flags |= URB_QUEUE_BULK;
+
+#ifdef USBOS_TX_THREAD
+	/* Enqueue TX request, the TX thread will resume the bus if needed and submit
+	 * it asynchronously
+	 */
+	dbus_usbos_qenq(&usbos_info->usbos_tx_list, req, &usbos_info->usbos_tx_list_lock);
+	if (req_zlp != NULL) {
+		dbus_usbos_qenq(&usbos_info->usbos_tx_list, req_zlp,
+			&usbos_info->usbos_tx_list_lock);
+	}
+	spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+	wake_up_interruptible(&usbos_info->usbos_tx_queue_head);
+	return DBUS_OK;
+#else
+	if ((ret = USB_SUBMIT_URB(req->urb))) {
+		ret = DBUS_ERR_TXDROP;
+		goto fail;
+	}
+
+	dbus_usbos_qenq(&usbos_info->req_txpostedq, req, &usbos_info->txposted_lock);
+	atomic_inc(&usbos_info->txposted);
+
+	if (req_zlp != NULL) {
+		if ((ret = USB_SUBMIT_URB(req_zlp->urb))) {
+			DBUSERR(("failed to submit ZLP URB!\n"));
+			ASSERT(0);
+			ret = DBUS_ERR_TXDROP;
+			goto fail2;
+		}
+
+		dbus_usbos_qenq(&usbos_info->req_txpostedq, req_zlp, &usbos_info->txposted_lock);
+		/* Also increment txposted for zlp packet, as it will be decremented in
+		 * dbus_usbos_send_complete()
+		 */
+		atomic_inc(&usbos_info->txposted);
+	}
+
+	spin_unlock_irqrestore(&usbos_info->txlock, flags);
+	return DBUS_OK;
+#endif /* USBOS_TX_THREAD */
+
+fail:
+	if (txirb->send_buf != NULL) {
+		MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len);
+		txirb->send_buf = NULL;
+		req->buf_len = 0;
+	}
+	dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+#ifndef USBOS_TX_THREAD
+fail2:
+#endif
+	if (req_zlp != NULL) {
+		dbus_usbos_qenq(&usbos_info->req_txfreeq, req_zlp, &usbos_info->txfree_lock);
+	}
+
+	spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+#ifndef USBOS_TX_THREAD
+	USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+#endif /* !USBOS_TX_THREAD */
+
+	return ret;
+} /* dbus_usbos_intf_send_irb */
+
+/** Higher layer (dbus_usb.c) recycles a received (and used) packet. */
+static int
+dbus_usbos_intf_recv_irb(void *bus, dbus_irb_rx_t *rxirb)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+	int ret = DBUS_OK;
+
+	if (usbos_info == NULL)
+		return DBUS_ERR;
+
+	ret = dbus_usbos_recv_urb_submit(usbos_info, rxirb, 0);
+	return ret;
+}
+
+static int
+dbus_usbos_intf_recv_irb_from_ep(void *bus, dbus_irb_rx_t *rxirb, uint32 ep_idx)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+	int ret = DBUS_OK;
+
+	if (usbos_info == NULL)
+		return DBUS_ERR;
+
+#ifdef INTR_EP_ENABLE
+		/* By specifying the ep_idx value of 0xff, the cdc layer is asking to
+		* submit an interrupt URB
+		*/
+		if (rxirb == NULL && ep_idx == 0xff) {
+			/* submit intr URB */
+			if ((ret = USB_SUBMIT_URB(usbos_info->intr_urb)) < 0) {
+				DBUSERR(("%s intr USB_SUBMIT_URB failed, status %d\n",
+					__FUNCTION__, ret));
+			}
+			return ret;
+		}
+#else
+		if (rxirb == NULL) {
+			return DBUS_ERR;
+		}
+#endif /* INTR_EP_ENABLE */
+
+	ret = dbus_usbos_recv_urb_submit(usbos_info, rxirb, ep_idx);
+	return ret;
+}
+
+/** Higher layer (dbus_usb.c) want to cancel an IRB */
+static int
+dbus_usbos_intf_cancel_irb(void *bus, dbus_irb_tx_t *txirb)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+	if (usbos_info == NULL)
+		return DBUS_ERR;
+
+	return DBUS_ERR;
+}
+
+/** Only one CTL transfer can be pending at any time. This function may block. */
+static int
+dbus_usbos_intf_send_ctl(void *bus, uint8 *buf, int len)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+	uint16 size;
+#ifndef USBOS_TX_THREAD
+	int status;
+#endif /* USBOS_TX_THREAD */
+
+	if ((usbos_info == NULL) || (buf == NULL) || (len == 0))
+		return DBUS_ERR;
+
+	if (usbos_info->ctl_urb == NULL)
+		return DBUS_ERR;
+
+	/* Block until a pending CTL transfer has completed */
+	if (down_interruptible(&usbos_info->ctl_lock) != 0) {
+		return DBUS_ERR_TXCTLFAIL;
+	}
+
+#ifdef USBOS_TX_THREAD
+	ASSERT(usbos_info->ctl_state == USBOS_REQUEST_STATE_UNSCHEDULED);
+#else
+	/* Disable USB autosuspend until this request completes, request USB resume if needed.
+	 * Because this call runs asynchronously, there is no guarantee the bus is resumed before
+	 * the URB is submitted, and the URB might be dropped. Use USBOS_TX_THREAD to avoid
+	 * this.
+	 */
+	USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf);
+#endif /* USBOS_TX_THREAD */
+
+	size = len;
+	usbos_info->ctl_write.wLength = cpu_to_le16p(&size);
+	usbos_info->ctl_urb->transfer_buffer_length = size;
+
+	usb_fill_control_urb(usbos_info->ctl_urb,
+		usbos_info->usb,
+		usb_sndctrlpipe(usbos_info->usb, 0),
+		(unsigned char *) &usbos_info->ctl_write,
+		buf, size, (usb_complete_t)dbus_usbos_ctlwrite_complete, usbos_info);
+
+#ifdef USBOS_TX_THREAD
+	/* Enqueue CTRL request for transmission by the TX thread. The
+	 * USB bus will first be resumed if needed.
+	 */
+	usbos_info->ctl_state = USBOS_REQUEST_STATE_SCHEDULED;
+	wake_up_interruptible(&usbos_info->usbos_tx_queue_head);
+#else
+	status = USB_SUBMIT_URB(usbos_info->ctl_urb);
+	if (status < 0) {
+		DBUSERR(("%s: usb_submit_urb failed %d\n", __FUNCTION__, status));
+		up(&usbos_info->ctl_lock);
+
+		USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+
+		return DBUS_ERR_TXCTLFAIL;
+	}
+#endif /* USBOS_TX_THREAD */
+
+	return DBUS_OK;
+} /* dbus_usbos_intf_send_ctl */
+
+/** This function does not seem to be called by anyone, including dbus_usb.c */
+static int
+dbus_usbos_intf_recv_ctl(void *bus, uint8 *buf, int len)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+	int status;
+	uint16 size;
+
+	if ((usbos_info == NULL) || (buf == NULL) || (len == 0))
+		return DBUS_ERR;
+
+	if (usbos_info->ctl_urb == NULL)
+		return DBUS_ERR;
+
+	/* Block until a pending CTRL transfer has completed */
+	if (down_interruptible(&usbos_info->ctl_lock) != 0) {
+		return DBUS_ERR_TXCTLFAIL;
+	}
+
+	/* Disable USB autosuspend until this request completes, request USB resume if needed. */
+	USB_AUTOPM_GET_INTERFACE_ASYNC(g_probe_info.intf);
+
+	size = len;
+	usbos_info->ctl_read.wLength = cpu_to_le16p(&size);
+	usbos_info->ctl_urb->transfer_buffer_length = size;
+
+	if (usbos_info->rxctl_deferrespok) {
+		/* BMAC model */
+		usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_VENDOR |
+			USB_RECIP_INTERFACE;
+		usbos_info->ctl_read.bRequest = DL_DEFER_RESP_OK;
+	} else {
+		/* full dongle model */
+		usbos_info->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_CLASS |
+			USB_RECIP_INTERFACE;
+		usbos_info->ctl_read.bRequest = 1;
+	}
+
+	usb_fill_control_urb(usbos_info->ctl_urb,
+		usbos_info->usb,
+		usb_rcvctrlpipe(usbos_info->usb, 0),
+		(unsigned char *) &usbos_info->ctl_read,
+		buf, size, (usb_complete_t)dbus_usbos_ctlread_complete, usbos_info);
+
+	status = USB_SUBMIT_URB(usbos_info->ctl_urb);
+	if (status < 0) {
+		DBUSERR(("%s: usb_submit_urb failed %d\n", __FUNCTION__, status));
+		up(&usbos_info->ctl_lock);
+
+		USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+
+		return DBUS_ERR_RXCTLFAIL;
+	}
+
+	return DBUS_OK;
+}
+
+static int
+dbus_usbos_intf_get_attrib(void *bus, dbus_attrib_t *attrib)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+	if ((usbos_info == NULL) || (attrib == NULL))
+		return DBUS_ERR;
+
+	attrib->bustype = DBUS_USB;
+	attrib->vid = g_probe_info.vid;
+	attrib->pid = g_probe_info.pid;
+	attrib->devid = 0x4322;
+
+	attrib->nchan = 1;
+
+	/* MaxPacketSize for USB hi-speed bulk out is 512 bytes
+	 * and 64-bytes for full-speed.
+	 * When sending pkt > MaxPacketSize, Host SW breaks it
+	 * up into multiple packets.
+	 */
+	attrib->mtu = usbos_info->maxps;
+
+	return DBUS_OK;
+}
+
+/** Called by higher layer (dbus_usb.c) when it wants to 'up' the USB interface to the dongle */
+static int
+dbus_usbos_intf_up(void *bus)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+	uint16 ifnum;
+#ifdef BCMUSBDEV_COMPOSITE
+	int wlan_if = 0;
+#endif
+	if (usbos_info == NULL)
+		return DBUS_ERR;
+
+	if (usbos_info->usb == NULL)
+		return DBUS_ERR;
+
+#if defined(INTR_EP_ENABLE)
+	/* full dongle use intr EP, bmac doesn't use it */
+	if (usbos_info->intr_urb) {
+		int ret;
+
+		usb_fill_int_urb(usbos_info->intr_urb, usbos_info->usb,
+			usbos_info->intr_pipe, &usbos_info->intr,
+			usbos_info->intr_size, (usb_complete_t)dbus_usbos_intr_complete,
+			usbos_info, usbos_info->interval);
+
+		if ((ret = USB_SUBMIT_URB(usbos_info->intr_urb))) {
+			DBUSERR(("%s USB_SUBMIT_URB failed with status %d\n", __FUNCTION__, ret));
+			return DBUS_ERR;
+		}
+	}
+#endif	
+
+	if (usbos_info->ctl_urb) {
+		usbos_info->ctl_in_pipe = usb_rcvctrlpipe(usbos_info->usb, 0);
+		usbos_info->ctl_out_pipe = usb_sndctrlpipe(usbos_info->usb, 0);
+
+#ifdef BCMUSBDEV_COMPOSITE
+		wlan_if = dbus_usbos_intf_wlan(usbos_info->usb);
+		ifnum = cpu_to_le16(IFDESC(usbos_info->usb, wlan_if).bInterfaceNumber);
+#else
+		ifnum = cpu_to_le16(IFDESC(usbos_info->usb, CONTROL_IF).bInterfaceNumber);
+#endif /* BCMUSBDEV_COMPOSITE */
+		/* CTL Write */
+		usbos_info->ctl_write.bRequestType =
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+		usbos_info->ctl_write.bRequest = 0;
+		usbos_info->ctl_write.wValue = cpu_to_le16(0);
+		usbos_info->ctl_write.wIndex = cpu_to_le16p(&ifnum);
+
+		/* CTL Read */
+		usbos_info->ctl_read.bRequestType =
+			USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+		usbos_info->ctl_read.bRequest = 1;
+		usbos_info->ctl_read.wValue = cpu_to_le16(0);
+		usbos_info->ctl_read.wIndex = cpu_to_le16p(&ifnum);
+	}
+
+	/* Success, indicate usbos_info is fully up */
+	dbus_usbos_state_change(usbos_info, DBUS_STATE_UP);
+
+	return DBUS_OK;
+} /* dbus_usbos_intf_up */
+
+static int
+dbus_usbos_intf_down(void *bus)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+	if (usbos_info == NULL)
+		return DBUS_ERR;
+
+	dbusos_stop(usbos_info);
+	return DBUS_OK;
+}
+
+static int
+dbus_usbos_intf_stop(void *bus)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+	if (usbos_info == NULL)
+		return DBUS_ERR;
+
+	dbusos_stop(usbos_info);
+	return DBUS_OK;
+}
+
+
+/** Called by higher layer (dbus_usb.c) */
+static int
+dbus_usbos_intf_set_config(void *bus, dbus_config_t *config)
+{
+	int err = DBUS_ERR;
+	usbos_info_t* usbos_info = bus;
+
+	if (config->config_id == DBUS_CONFIG_ID_RXCTL_DEFERRES) {
+		usbos_info->rxctl_deferrespok = config->rxctl_deferrespok;
+		err = DBUS_OK;
+	} else if (config->config_id == DBUS_CONFIG_ID_AGGR_LIMIT) {
+		/* DBUS_CONFIG_ID_AGGR_LIMIT shouldn't be called after probe stage */
+		ASSERT(disc_arg == NULL);
+		ASSERT(config->aggr_param.maxrxsf > 0);
+		ASSERT(config->aggr_param.maxrxsize > 0);
+		if (config->aggr_param.maxrxsize > usbos_info->rxbuf_len) {
+			int state = usbos_info->pub->busstate;
+			dbus_usbos_unlink(&usbos_info->req_rxpostedq, &usbos_info->rxposted_lock);
+			while (atomic_read(&usbos_info->rxposted)) {
+				DBUSTRACE(("%s rxposted is %d, delay 1 ms\n", __FUNCTION__,
+					atomic_read(&usbos_info->rxposted)));
+				dbus_usbos_wait(usbos_info, 1);
+			}
+			usbos_info->rxbuf_len = config->aggr_param.maxrxsize;
+			dbus_usbos_state_change(usbos_info, state);
+		}
+		err = DBUS_OK;
+	}
+
+	return err;
+}
+
+
+/** Called by dbus_usb.c when it wants to download firmware into the dongle */
+bool
+dbus_usbos_dl_cmd(usbos_info_t *usbinfo, uint8 cmd, void *buffer, int buflen)
+{
+	int transferred;
+	int index = 0;
+	char *tmpbuf;
+
+	if ((usbinfo == NULL) || (buffer == NULL) || (buflen == 0))
+		return FALSE;
+
+	tmpbuf = (char *) MALLOC(usbinfo->pub->osh, buflen);
+	if (!tmpbuf) {
+		DBUSERR(("%s: Unable to allocate memory \n", __FUNCTION__));
+		return FALSE;
+	}
+
+#ifdef BCM_REQUEST_FW
+	if (cmd == DL_GO) {
+		index = 1;
+	}
+#endif
+
+	/* Disable USB autosuspend until this request completes, request USB resume if needed. */
+	USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+	transferred = USB_CONTROL_MSG(usbinfo->usb, usb_rcvctrlpipe(usbinfo->usb, 0),
+		cmd, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE),
+		0, index,
+		(void*) tmpbuf, buflen, USB_CTRL_EP_TIMEOUT);
+	if (transferred == buflen) {
+		memcpy(buffer, tmpbuf, buflen);
+	} else {
+		DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred));
+	}
+
+	USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+	MFREE(usbinfo->pub->osh, tmpbuf, buflen);
+	return (transferred == buflen);
+}
+
+/**
+ * Called by dbus_usb.c when it wants to download a buffer into the dongle (e.g. as part of the
+ * download process, when writing nvram variables).
+ */
+int
+dbus_write_membytes(usbos_info_t* usbinfo, bool set, uint32 address, uint8 *data, uint size)
+{
+	hwacc_t hwacc;
+	int write_bytes = 4;
+	int status;
+	int retval = 0;
+
+	DBUSTRACE(("Enter:%s\n", __FUNCTION__));
+
+	/* Read is not supported */
+	if (set == 0) {
+		DBUSERR(("Currently read is not supported!!\n"));
+		return -1;
+	}
+
+	USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+	hwacc.cmd = DL_CMD_WRHW;
+	hwacc.addr = address;
+
+	DBUSTRACE(("Address:%x size:%d", hwacc.addr, size));
+	do {
+		if (size >= 4) {
+			write_bytes = 4;
+		} else if (size >= 2) {
+			write_bytes = 2;
+		} else {
+			write_bytes = 1;
+		}
+
+		hwacc.len = write_bytes;
+
+		while (size >= write_bytes) {
+			hwacc.data = *((unsigned int*)data);
+
+			status = USB_CONTROL_MSG(usbinfo->usb, usb_sndctrlpipe(usbinfo->usb, 0),
+				DL_WRHW, (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE),
+				1, 0, (char *)&hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT);
+
+			if (status < 0) {
+				retval = -1;
+				DBUSERR((" Ctrl write hwacc failed w/status %d @ address:%x \n",
+					status, hwacc.addr));
+				goto err;
+			}
+
+			hwacc.addr += write_bytes;
+			data += write_bytes;
+			size -= write_bytes;
+		}
+	} while (size > 0);
+
+err:
+	USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+	return retval;
+}
+
+int
+dbus_usbos_readreg(void *bus, uint32 regaddr, int datalen, uint32 *value)
+{
+	usbos_info_t *usbinfo = (usbos_info_t *) bus;
+	int ret = DBUS_OK;
+	int transferred;
+	uint32 cmd;
+	hwacc_t	hwacc;
+
+	if (usbinfo == NULL)
+		return DBUS_ERR;
+
+	if (datalen == 1)
+		cmd = DL_RDHW8;
+	else if (datalen == 2)
+		cmd = DL_RDHW16;
+	else
+		cmd = DL_RDHW32;
+
+	USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+	transferred = USB_CONTROL_MSG(usbinfo->usb, usb_rcvctrlpipe(usbinfo->usb, 0),
+		cmd, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE),
+		(uint16)(regaddr), (uint16)(regaddr >> 16),
+		(void *) &hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT);
+
+	if (transferred >= sizeof(hwacc_t)) {
+		*value = hwacc.data;
+	} else {
+		DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred));
+		ret = DBUS_ERR;
+	}
+
+	USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+	return ret;
+}
+
+int
+dbus_usbos_writereg(void *bus, uint32 regaddr, int datalen, uint32 data)
+{
+	usbos_info_t *usbinfo = (usbos_info_t *) bus;
+	int ret = DBUS_OK;
+	int transferred;
+	uint32 cmd = DL_WRHW;
+	hwacc_t	hwacc;
+
+	if (usbinfo == NULL)
+		return DBUS_ERR;
+
+	USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+	hwacc.cmd = DL_WRHW;
+	hwacc.addr = regaddr;
+	hwacc.data = data;
+	hwacc.len = datalen;
+
+	transferred = USB_CONTROL_MSG(usbinfo->usb, usb_sndctrlpipe(usbinfo->usb, 0),
+		cmd, (USB_DIR_OUT| USB_TYPE_VENDOR | USB_RECIP_INTERFACE),
+		1, 0,
+		(void *) &hwacc, sizeof(hwacc_t), USB_CTRL_EP_TIMEOUT);
+
+	if (transferred != sizeof(hwacc_t)) {
+		DBUSERR(("%s: usb_control_msg failed %d\n", __FUNCTION__, transferred));
+		ret = DBUS_ERR;
+	}
+
+	USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+	return ret;
+}
+
+int
+dbus_usbos_wait(usbos_info_t *usbinfo, uint16 ms)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	if (in_interrupt())
+		mdelay(ms);
+	else
+		msleep_interruptible(ms);
+#else
+	wait_ms(ms);
+#endif
+	return DBUS_OK;
+}
+
+/** Called by dbus_usb.c as part of the firmware download process */
+bool
+dbus_usbos_dl_send_bulk(usbos_info_t *usbinfo, void *buffer, int len)
+{
+	bool ret = TRUE;
+	int status;
+	int transferred = 0;
+
+	if (usbinfo == NULL)
+		return DBUS_ERR;
+
+	USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+	status = USB_BULK_MSG(usbinfo->usb, usbinfo->tx_pipe,
+		buffer, len,
+		&transferred, USB_BULK_EP_TIMEOUT);
+
+	if (status < 0) {
+		DBUSERR(("%s: usb_bulk_msg failed %d\n", __FUNCTION__, status));
+		ret = FALSE;
+	}
+
+	USB_AUTOPM_PUT_INTERFACE(g_probe_info.intf);
+
+	return ret;
+}
+
+static bool
+dbus_usbos_intf_recv_needed(void *bus)
+{
+	return FALSE;
+}
+
+/**
+ * Higher layer (dbus_usb.c) wants to execute a function on the condition that the rx spin lock has
+ * been acquired.
+ */
+static void*
+dbus_usbos_intf_exec_rxlock(void *bus, exec_cb_t cb, struct exec_parms *args)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+	void *ret;
+	unsigned long flags;
+
+	if (usbos_info == NULL)
+		return NULL;
+
+	spin_lock_irqsave(&usbos_info->rxlock, flags);
+	ret = cb(args);
+	spin_unlock_irqrestore(&usbos_info->rxlock, flags);
+
+	return ret;
+}
+
+static void*
+dbus_usbos_intf_exec_txlock(void *bus, exec_cb_t cb, struct exec_parms *args)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+	void *ret;
+	unsigned long flags;
+
+	if (usbos_info == NULL)
+		return NULL;
+
+	spin_lock_irqsave(&usbos_info->txlock, flags);
+	ret = cb(args);
+	spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+	return ret;
+}
+
+/**
+ * if an error condition was detected in this module, the higher DBUS layer (dbus_usb.c) has to
+ * be notified.
+ */
+int
+dbus_usbos_errhandler(void *bus, int err)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+	if (usbos_info == NULL)
+		return DBUS_ERR;
+
+	if (usbos_info->cbarg && usbos_info->cbs) {
+		if (usbos_info->cbs->errhandler)
+			usbos_info->cbs->errhandler(usbos_info->cbarg, err);
+	}
+
+	return DBUS_OK;
+}
+
+/**
+ * if a change in bus state was detected in this module, the higher DBUS layer (dbus_usb.c) has to
+ * be notified.
+ */
+int
+dbus_usbos_state_change(void *bus, int state)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) bus;
+
+	if (usbos_info == NULL)
+		return DBUS_ERR;
+
+	if (usbos_info->cbarg && usbos_info->cbs) {
+		if (usbos_info->cbs->state_change)
+			usbos_info->cbs->state_change(usbos_info->cbarg, state);
+	}
+
+	usbos_info->pub->busstate = state;
+	return DBUS_OK;
+}
+
+int
+dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb,
+	disconnect_cb_t discb, void *prarg, dbus_intf_t **intf, void *param1, void *param2)
+{
+	bzero(&g_probe_info, sizeof(probe_info_t));
+
+	probe_cb = prcb;
+	disconnect_cb = discb;
+	probe_arg = prarg;
+
+	devid_table[0].idVendor = vid;
+	devid_table[0].idProduct = pid;
+
+	*intf = &dbus_usbos_intf;
+
+	USB_REGISTER();
+
+	return DBUS_ERR_NODEVICE;
+}
+
+int
+dbus_bus_osl_deregister()
+{
+	g_probe_info.dereged = TRUE;
+
+	DHD_MUTEX_LOCK();
+	if (disconnect_cb && disc_arg && (g_probe_info.disc_cb_done == FALSE)) {
+		disconnect_cb(disc_arg);
+		disc_arg = NULL;
+	}
+	DHD_MUTEX_UNLOCK();
+
+	USB_DEREGISTER();
+
+	return DBUS_OK;
+}
+
+void *
+dbus_usbos_intf_attach(dbus_pub_t *pub, void *cbarg, dbus_intf_callbacks_t *cbs)
+{
+	usbos_info_t *usbos_info;
+
+	if (g_probe_info.dldone == FALSE) {
+		DBUSERR(("%s: err device not downloaded!\n", __FUNCTION__));
+		return NULL;
+	}
+
+	/* Sanity check for BUS_INFO() */
+	ASSERT(OFFSETOF(usbos_info_t, pub) == 0);
+
+	usbos_info = MALLOC(pub->osh, sizeof(usbos_info_t));
+	if (usbos_info == NULL)
+		return NULL;
+
+	bzero(usbos_info, sizeof(usbos_info_t));
+
+	usbos_info->pub = pub;
+	usbos_info->cbarg = cbarg;
+	usbos_info->cbs = cbs;
+
+	/* Needed for disconnect() */
+	g_probe_info.usbos_info = usbos_info;
+
+	/* Update USB Info */
+	usbos_info->usb = g_probe_info.usb;
+	usbos_info->rx_pipe = g_probe_info.rx_pipe;
+	usbos_info->rx_pipe2 = g_probe_info.rx_pipe2;
+	usbos_info->tx_pipe = g_probe_info.tx_pipe;
+	usbos_info->intr_pipe = g_probe_info.intr_pipe;
+	usbos_info->intr_size = g_probe_info.intr_size;
+	usbos_info->interval = g_probe_info.interval;
+	usbos_info->pub->device_speed = g_probe_info.device_speed;
+	if (usbos_info->rx_pipe2) {
+		usbos_info->pub->attrib.has_2nd_bulk_in_ep = 1;
+	} else {
+		usbos_info->pub->attrib.has_2nd_bulk_in_ep = 0;
+	}
+
+	if (usbos_info->tx_pipe)
+		usbos_info->maxps = usb_maxpacket(usbos_info->usb,
+			usbos_info->tx_pipe, usb_pipeout(usbos_info->tx_pipe));
+
+	INIT_LIST_HEAD(&usbos_info->req_rxfreeq);
+	INIT_LIST_HEAD(&usbos_info->req_txfreeq);
+	INIT_LIST_HEAD(&usbos_info->req_rxpostedq);
+	INIT_LIST_HEAD(&usbos_info->req_txpostedq);
+	spin_lock_init(&usbos_info->rxfree_lock);
+	spin_lock_init(&usbos_info->txfree_lock);
+	spin_lock_init(&usbos_info->rxposted_lock);
+	spin_lock_init(&usbos_info->txposted_lock);
+	spin_lock_init(&usbos_info->rxlock);
+	spin_lock_init(&usbos_info->txlock);
+
+	atomic_set(&usbos_info->rxposted, 0);
+	atomic_set(&usbos_info->txposted, 0);
+
+
+#ifdef USB_DISABLE_INT_EP
+	usbos_info->intr_urb = NULL;
+#else
+	if (!(usbos_info->intr_urb = USB_ALLOC_URB())) {
+		DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__));
+		goto fail;
+	}
+#endif
+
+	if (!(usbos_info->ctl_urb = USB_ALLOC_URB())) {
+		DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	init_waitqueue_head(&usbos_info->wait);
+
+	if (!(usbos_info->blk_urb = USB_ALLOC_URB())) {	/* for embedded image downloading */
+		DBUSERR(("%s: usb_alloc_urb (tx) failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	usbos_info->rxbuf_len = (uint)usbos_info->pub->rxsize;
+
+
+
+	atomic_set(&usbos_info->txallocated, 0);
+	if (DBUS_OK != dbus_usbos_urbreqs_alloc(usbos_info,
+		usbos_info->pub->ntxq, FALSE)) {
+		goto fail;
+	}
+
+	atomic_set(&usbos_info->rxallocated, 0);
+	if (DBUS_OK != dbus_usbos_urbreqs_alloc(usbos_info,
+		MIN(DBUS_USB_RXQUEUE_BATCH_ADD, usbos_info->pub->nrxq),
+		TRUE)) {
+		goto fail;
+	}
+
+	sema_init(&usbos_info->ctl_lock, 1);
+
+#ifdef USBOS_THREAD
+	if (dbus_usbos_thread_init(usbos_info) == NULL)
+		goto fail;
+#endif /* USBOS_THREAD */
+
+#ifdef USBOS_TX_THREAD
+	if (dbus_usbos_tx_thread_init(usbos_info) == NULL)
+		goto fail;
+#endif /* USBOS_TX_THREAD */
+
+	pub->dev_info = g_probe_info.usb;
+
+
+	return (void *) usbos_info;
+fail:
+	if (usbos_info->intr_urb) {
+		USB_FREE_URB(usbos_info->intr_urb);
+		usbos_info->intr_urb = NULL;
+	}
+
+	if (usbos_info->ctl_urb) {
+		USB_FREE_URB(usbos_info->ctl_urb);
+		usbos_info->ctl_urb = NULL;
+	}
+
+#if defined(BCM_REQUEST_FW)
+	if (usbos_info->blk_urb) {
+		USB_FREE_URB(usbos_info->blk_urb);
+		usbos_info->blk_urb = NULL;
+	}
+#endif
+
+	dbus_usbos_urbreqs_free(usbos_info, TRUE);
+	atomic_set(&usbos_info->rxallocated, 0);
+	dbus_usbos_urbreqs_free(usbos_info, FALSE);
+	atomic_set(&usbos_info->txallocated, 0);
+
+	g_probe_info.usbos_info = NULL;
+
+	MFREE(pub->osh, usbos_info, sizeof(usbos_info_t));
+	return NULL;
+} /* dbus_usbos_intf_attach */
+
+void
+dbus_usbos_intf_detach(dbus_pub_t *pub, void *info)
+{
+	usbos_info_t *usbos_info = (usbos_info_t *) info;
+	osl_t *osh = pub->osh;
+
+	if (usbos_info == NULL) {
+		return;
+	}
+
+#ifdef USBOS_TX_THREAD
+	dbus_usbos_tx_thread_deinit(usbos_info);
+#endif /* USBOS_TX_THREAD */
+
+	/* Must unlink all URBs prior to driver unload;
+	 * otherwise an URB callback can occur after driver
+	 * has been de-allocated and rmmod'd
+	 */
+	dbusos_stop(usbos_info);
+
+	if (usbos_info->intr_urb) {
+		USB_FREE_URB(usbos_info->intr_urb);
+		usbos_info->intr_urb = NULL;
+	}
+
+	if (usbos_info->ctl_urb) {
+		USB_FREE_URB(usbos_info->ctl_urb);
+		usbos_info->ctl_urb = NULL;
+	}
+
+	if (usbos_info->blk_urb) {
+		USB_FREE_URB(usbos_info->blk_urb);
+		usbos_info->blk_urb = NULL;
+	}
+
+	dbus_usbos_urbreqs_free(usbos_info, TRUE);
+	atomic_set(&usbos_info->rxallocated, 0);
+	dbus_usbos_urbreqs_free(usbos_info, FALSE);
+	atomic_set(&usbos_info->txallocated, 0);
+
+#ifdef USBOS_THREAD
+	dbus_usbos_thread_deinit(usbos_info);
+#endif /* USBOS_THREAD */
+
+	g_probe_info.usbos_info = NULL;
+	MFREE(osh, usbos_info, sizeof(usbos_info_t));
+} /* dbus_usbos_intf_detach */
+
+
+#ifdef USBOS_TX_THREAD
+
+void*
+dbus_usbos_tx_thread_init(usbos_info_t *usbos_info)
+{
+	spin_lock_init(&usbos_info->usbos_tx_list_lock);
+	INIT_LIST_HEAD(&usbos_info->usbos_tx_list);
+	init_waitqueue_head(&usbos_info->usbos_tx_queue_head);
+
+	usbos_info->usbos_tx_kt = kthread_create(dbus_usbos_tx_thread_func,
+		usbos_info, "usb-tx-thread");
+
+	if (IS_ERR(usbos_info->usbos_tx_kt)) {
+		DBUSERR(("Thread Creation failed\n"));
+		return (NULL);
+	}
+
+	usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED;
+	wake_up_process(usbos_info->usbos_tx_kt);
+
+	return (usbos_info->usbos_tx_kt);
+}
+
+void
+dbus_usbos_tx_thread_deinit(usbos_info_t *usbos_info)
+{
+	urb_req_t *req;
+
+	if (usbos_info->usbos_tx_kt) {
+		wake_up_interruptible(&usbos_info->usbos_tx_queue_head);
+		kthread_stop(usbos_info->usbos_tx_kt);
+	}
+
+	/* Move pending requests to free queue so they can be freed */
+	while ((req = dbus_usbos_qdeq(
+		&usbos_info->usbos_tx_list, &usbos_info->usbos_tx_list_lock)) != NULL) {
+		dbus_usbos_qenq(&usbos_info->req_txfreeq, req, &usbos_info->txfree_lock);
+	}
+}
+
+/**
+ * Allow USB in-band resume to block by submitting CTRL and DATA URBs on a separate thread.
+ */
+int
+dbus_usbos_tx_thread_func(void *data)
+{
+	usbos_info_t  *usbos_info = (usbos_info_t *)data;
+	urb_req_t     *req;
+	dbus_irb_tx_t *txirb;
+	int           ret;
+	unsigned long flags;
+
+#ifdef WL_THREADNICE
+	set_user_nice(current, WL_THREADNICE);
+#endif
+
+	while (1) {
+		/* Wait until there are URBs to submit */
+		wait_event_interruptible_timeout(
+			usbos_info->usbos_tx_queue_head,
+			!list_empty(&usbos_info->usbos_tx_list) ||
+			usbos_info->ctl_state == USBOS_REQUEST_STATE_SCHEDULED,
+			100);
+
+		if (kthread_should_stop())
+			break;
+
+		/* Submit CTRL URB if needed */
+		if (usbos_info->ctl_state == USBOS_REQUEST_STATE_SCHEDULED) {
+
+			/* Disable USB autosuspend until this request completes. If the
+			 * interface was suspended, this call blocks until it has been resumed.
+			 */
+			USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+			usbos_info->ctl_state = USBOS_REQUEST_STATE_SUBMITTED;
+
+			ret = USB_SUBMIT_URB(usbos_info->ctl_urb);
+			if (ret != 0) {
+				DBUSERR(("%s CTRL USB_SUBMIT_URB failed, status %d\n",
+					__FUNCTION__, ret));
+
+				usbos_info->ctl_state = USBOS_REQUEST_STATE_UNSCHEDULED;
+				up(&usbos_info->ctl_lock);
+
+				USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+			}
+		}
+
+		/* Submit all available TX URBs */
+		while ((req = dbus_usbos_qdeq(&usbos_info->usbos_tx_list,
+			&usbos_info->usbos_tx_list_lock)) != NULL) {
+
+			/* Disable USB autosuspend until this request completes. If the
+			 * interface was suspended, this call blocks until it has been resumed.
+			 */
+			USB_AUTOPM_GET_INTERFACE(g_probe_info.intf);
+
+			spin_lock_irqsave(&usbos_info->txlock, flags);
+
+			ret = USB_SUBMIT_URB(req->urb);
+			if (ret == 0) {
+				/* URB submitted successfully */
+				dbus_usbos_qenq(&usbos_info->req_txpostedq, req,
+					&usbos_info->txposted_lock);
+				atomic_inc(&usbos_info->txposted);
+			} else {
+				/* Submitting the URB failed. */
+				DBUSERR(("%s TX USB_SUBMIT_URB failed, status %d\n",
+					__FUNCTION__, ret));
+
+				USB_AUTOPM_PUT_INTERFACE_ASYNC(g_probe_info.intf);
+			}
+
+			spin_unlock_irqrestore(&usbos_info->txlock, flags);
+
+			if (ret != 0) {
+				/* Cleanup and notify higher layers */
+				dbus_usbos_qenq(&usbos_info->req_txfreeq, req,
+					&usbos_info->txfree_lock);
+
+				txirb = req->arg;
+				if (txirb->send_buf) {
+					MFREE(usbos_info->pub->osh, txirb->send_buf, req->buf_len);
+					txirb->send_buf = NULL;
+					req->buf_len = 0;
+				}
+
+				if (likely (usbos_info->cbarg && usbos_info->cbs)) {
+					if (likely (usbos_info->cbs->send_irb_complete != NULL))
+						usbos_info->cbs->send_irb_complete(
+							usbos_info->cbarg, txirb, DBUS_ERR_TXDROP);
+				}
+			}
+		}
+	}
+
+	return 0;
+} /* dbus_usbos_tx_thread_func */
+
+#endif /* USBOS_TX_THREAD */
+
+#ifdef USBOS_THREAD
+
+/**
+ * Increase system performance by creating a USB thread that runs parallel to other system
+ * activity.
+ */
+static void*
+dbus_usbos_thread_init(usbos_info_t *usbos_info)
+{
+	usbos_list_entry_t  *entry;
+	unsigned long       flags, ii;
+
+	spin_lock_init(&usbos_info->usbos_list_lock);
+	spin_lock_init(&usbos_info->ctrl_lock);
+	INIT_LIST_HEAD(&usbos_info->usbos_list);
+	INIT_LIST_HEAD(&usbos_info->usbos_free_list);
+	init_waitqueue_head(&usbos_info->usbos_queue_head);
+	atomic_set(&usbos_info->usbos_list_cnt, 0);
+
+
+	for (ii = 0; ii < (usbos_info->pub->nrxq + usbos_info->pub->ntxq); ii++) {
+		entry = MALLOC(usbos_info->pub->osh, sizeof(usbos_list_entry_t));
+		if (entry) {
+			spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+			list_add_tail((struct list_head*) entry, &usbos_info->usbos_free_list);
+			spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+		} else {
+			DBUSERR(("Failed to create list\n"));
+		}
+	}
+
+	usbos_info->usbos_kt = kthread_create(dbus_usbos_thread_func,
+		usbos_info, "usb-thread");
+
+	if (IS_ERR(usbos_info->usbos_kt)) {
+		DBUSERR(("Thread Creation failed\n"));
+		return (NULL);
+	}
+
+	wake_up_process(usbos_info->usbos_kt);
+
+	return (usbos_info->usbos_kt);
+}
+
+static void
+dbus_usbos_thread_deinit(usbos_info_t *usbos_info)
+{
+	struct list_head    *cur, *next;
+	usbos_list_entry_t  *entry;
+	unsigned long       flags;
+
+	if (usbos_info->usbos_kt) {
+		wake_up_interruptible(&usbos_info->usbos_queue_head);
+		kthread_stop(usbos_info->usbos_kt);
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_safe(cur, next, &usbos_info->usbos_list)
+	{
+		entry = list_entry(cur, struct usbos_list_entry, list);
+		/* detach this entry from the list and then free the entry */
+		spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+		list_del(cur);
+		MFREE(usbos_info->pub->osh, entry, sizeof(usbos_list_entry_t));
+		spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+	}
+
+	list_for_each_safe(cur, next, &usbos_info->usbos_free_list)
+	{
+		entry = list_entry(cur, struct usbos_list_entry, list);
+		/* detach this entry from the list and then free the entry */
+		spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+		list_del(cur);
+		MFREE(usbos_info->pub->osh, entry, sizeof(usbos_list_entry_t));
+		spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+/** Process completed URBs in a worker thread */
+static int
+dbus_usbos_thread_func(void *data)
+{
+	usbos_info_t        *usbos_info = (usbos_info_t *)data;
+	usbos_list_entry_t  *entry;
+	struct list_head    *cur, *next;
+	unsigned long       flags;
+
+#ifdef WL_THREADNICE
+	set_user_nice(current, WL_THREADNICE);
+#endif
+
+	while (1) {
+		/* If the list is empty, then go to sleep */
+		wait_event_interruptible_timeout
+		(usbos_info->usbos_queue_head,
+			atomic_read(&usbos_info->usbos_list_cnt) > 0,
+			100);
+
+		if (kthread_should_stop())
+			break;
+
+		spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+
+		/* For each entry on the list, process it.  Remove the entry from
+		* the list when done.
+		*/
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		list_for_each_safe(cur, next, &usbos_info->usbos_list)
+		{
+			urb_req_t           *req;
+			int                 len;
+			int                 stat;
+			usbos_info_t        *usbos_info_local;
+
+			entry = list_entry(cur, struct usbos_list_entry, list);
+			if (entry == NULL)
+				break;
+
+			req = entry->urb_context;
+			len = entry->urb_length;
+			stat = entry->urb_status;
+			usbos_info_local = req->usbinfo;
+
+			/* detach this entry from the list and attach it to the free list */
+			list_del_init(cur);
+			spin_unlock_irqrestore(&usbos_info_local->usbos_list_lock, flags);
+
+			dbus_usbos_recv_complete_handle(req, len, stat);
+
+			spin_lock_irqsave(&usbos_info_local->usbos_list_lock, flags);
+
+			list_add_tail(cur, &usbos_info_local->usbos_free_list);
+
+			atomic_dec(&usbos_info_local->usbos_list_cnt);
+		}
+
+		spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	return 0;
+} /* dbus_usbos_thread_func */
+
+/** Called on Linux calling URB callback, see dbus_usbos_recv_complete() */
+static void
+dbus_usbos_dispatch_schedule(CALLBACK_ARGS)
+{
+	urb_req_t           *req = urb->context;
+	usbos_info_t        *usbos_info = req->usbinfo;
+	usbos_list_entry_t  *entry;
+	unsigned long       flags;
+	struct list_head    *cur;
+
+	spin_lock_irqsave(&usbos_info->usbos_list_lock, flags);
+
+	cur   = usbos_info->usbos_free_list.next;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	entry = list_entry(cur, struct usbos_list_entry, list);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	/* detach this entry from the free list and prepare it insert it to use list */
+	list_del_init(cur);
+
+	if (entry) {
+		entry->urb_context = urb->context;
+		entry->urb_length  = urb->actual_length;
+		entry->urb_status  = urb->status;
+
+		atomic_inc(&usbos_info->usbos_list_cnt);
+		list_add_tail(cur, &usbos_info->usbos_list);
+	} else {
+		DBUSERR(("!!!!!!OUT OF MEMORY!!!!!!!\n"));
+	}
+
+	spin_unlock_irqrestore(&usbos_info->usbos_list_lock, flags);
+
+	/* thread */
+	wake_up_interruptible(&usbos_info->usbos_queue_head);
+} /* dbus_usbos_dispatch_schedule */
+
+#endif /* USBOS_THREAD */
+
+
+
+
+#ifdef BCM_REQUEST_FW
+
+struct request_fw_context {
+	const struct firmware *firmware;
+	struct semaphore lock;
+};
+
+/*
+ * Callback for dbus_request_firmware().
+ */
+static void
+dbus_request_firmware_done(const struct firmware *firmware, void *ctx)
+{
+	struct request_fw_context *context = (struct request_fw_context*)ctx;
+
+	/* Store the received firmware handle in the context and wake requester */
+	context->firmware = firmware;
+	up(&context->lock);
+}
+
+/*
+ * Send a firmware request and wait for completion.
+ *
+ * The use of the asynchronous version of request_firmware() is needed to avoid
+ * kernel oopses when we just come out of system hibernate.
+ */
+static int
+dbus_request_firmware(const char *name, const struct firmware **firmware)
+{
+	struct request_fw_context *context;
+	int ret;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return -ENOMEM;
+
+	sema_init(&context->lock, 0);
+
+	ret = request_firmware_nowait(THIS_MODULE, true, name, &g_probe_info.usb->dev,
+	                              GFP_KERNEL, context, dbus_request_firmware_done);
+	if (ret) {
+		kfree(context);
+		return ret;
+	}
+
+	/* Wait for completion */
+	if (down_interruptible(&context->lock) != 0) {
+		kfree(context);
+		return -ERESTARTSYS;
+	}
+
+	*firmware = context->firmware;
+	kfree(context);
+
+	return *firmware != NULL ? 0 : -ENOENT;
+}
+
+static void *
+dbus_get_fwfile(int devid, int chiprev, uint8 **fw, int *fwlen, uint16 boardtype, uint16 boardrev)
+{
+	const struct firmware *firmware = NULL;
+#ifndef OEM_ANDROID
+	s8 *device_id = NULL;
+	s8 *chip_rev = "";
+#endif /* OEM_ANDROID */
+	s8 file_name[64];
+	int ret;
+
+#ifndef OEM_ANDROID
+	switch (devid) {
+		case BCM4350_CHIP_ID:
+		case BCM4354_CHIP_ID:
+		case BCM43556_CHIP_ID:
+		case BCM43558_CHIP_ID:
+		case BCM43566_CHIP_ID:
+		case BCM43568_CHIP_ID:
+		case BCM43570_CHIP_ID:
+		case BCM4358_CHIP_ID:
+			device_id = "4350";
+			break;
+		case BCM43143_CHIP_ID:
+			device_id = "43143";
+			break;
+		case BCM43234_CHIP_ID:
+		case BCM43235_CHIP_ID:
+		case BCM43236_CHIP_ID:
+			device_id = "43236";
+			break;
+		case BCM43242_CHIP_ID:
+			device_id = "43242";
+			break;
+		case BCM43238_CHIP_ID:
+			device_id = "43238";
+			break;
+		case BCM43526_CHIP_ID:
+			device_id = "43526";
+			break;
+		case BCM43569_CHIP_ID:
+			device_id = "43569";
+			switch (chiprev) {
+				case 0:
+					chip_rev = "a0";
+					break;
+				case 2:
+					chip_rev = "a2";
+					break;
+				default:
+					break;
+			}
+			break;
+		default:
+			DBUSERR(("unsupported device %x\n", devid));
+			return NULL;
+	}
+
+	/* Load firmware */
+	snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s-firmware.bin", device_id, chip_rev);
+#else
+	snprintf(file_name, sizeof(file_name), "%s", CONFIG_ANDROID_BCMDHD_FW_PATH);
+#endif /* OEM_ANDROID */
+
+	ret = dbus_request_firmware(file_name, &firmware);
+	if (ret) {
+		DBUSERR(("fail to request firmware %s\n", file_name));
+		return NULL;
+	}
+
+	*fwlen = firmware->size;
+	*fw = (uint8 *)firmware->data;
+	return (void *)firmware;
+
+}
+
+static void *
+dbus_get_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, uint16 boardtype, uint16 boardrev)
+{
+	const struct firmware *firmware = NULL;
+#ifndef OEM_ANDROID
+	s8 *device_id = NULL;
+	s8 *chip_rev = "";
+#endif /* OEM_ANDROID */
+	s8 file_name[64];
+	int ret;
+
+#ifndef OEM_ANDROID
+	switch (devid) {
+		case BCM4350_CHIP_ID:
+		case BCM4354_CHIP_ID:
+		case BCM43556_CHIP_ID:
+		case BCM43558_CHIP_ID:
+		case BCM43566_CHIP_ID:
+		case BCM43568_CHIP_ID:
+		case BCM43570_CHIP_ID:
+		case BCM4358_CHIP_ID:
+			device_id = "4350";
+			break;
+		case BCM43143_CHIP_ID:
+			device_id = "43143";
+			break;
+		case BCM43234_CHIP_ID:
+			device_id = "43234";
+			break;
+		case BCM43235_CHIP_ID:
+			device_id = "43235";
+			break;
+		case BCM43236_CHIP_ID:
+			device_id = "43236";
+			break;
+		case BCM43238_CHIP_ID:
+			device_id = "43238";
+			break;
+		case BCM43242_CHIP_ID:
+			device_id = "43242";
+			break;
+		case BCM43526_CHIP_ID:
+			device_id = "43526";
+			break;
+		case BCM43569_CHIP_ID:
+			device_id = "43569";
+			switch (chiprev) {
+				case 0:
+					chip_rev = "a0";
+					break;
+				case 2:
+					chip_rev = "a2";
+					break;
+				default:
+					break;
+			}
+			break;
+		default:
+			DBUSERR(("unsupported device %x\n", devid));
+			return NULL;
+	}
+
+	/* Load board specific nvram file */
+	snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s-%2x-%2x.nvm",
+	         device_id, chip_rev, boardtype, boardrev);
+#else
+	snprintf(file_name, sizeof(file_name), "%s", CONFIG_ANDROID_BCMDHD_NVRAM_PATH);
+#endif /* OEM_ANDROID */
+
+	ret = dbus_request_firmware(file_name, &firmware);
+	if (ret) {
+		DBUSERR(("fail to request nvram %s\n", file_name));
+
+#ifndef OEM_ANDROID
+		/* Load generic nvram file */
+		snprintf(file_name, sizeof(file_name), "brcm/bcm%s%s.nvm",
+		         device_id, chip_rev);
+
+		ret = dbus_request_firmware(file_name, &firmware);
+#endif /* OEM_ANDROID */
+
+		if (ret) {
+			DBUSERR(("fail to request nvram %s\n", file_name));
+			return NULL;
+		}
+	}
+
+	*fwlen = firmware->size;
+	*fw = (uint8 *)firmware->data;
+	return (void *)firmware;
+}
+
+void *
+dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type, uint16 boardtype,
+	uint16 boardrev)
+{
+	switch (type) {
+		case DBUS_FIRMWARE:
+			return dbus_get_fwfile(devid, chiprev, fw, fwlen, boardtype, boardrev);
+		case DBUS_NVFILE:
+			return dbus_get_nvfile(devid, chiprev, fw, fwlen, boardtype, boardrev);
+		default:
+			return NULL;
+	}
+}
+
+void
+dbus_release_fw_nvfile(void *firmware)
+{
+	release_firmware((struct firmware *)firmware);
+}
+#endif /* BCM_REQUEST_FW */
+
+#ifdef BCMUSBDEV_COMPOSITE
+/**
+ * For a composite device the interface order is not guaranteed, scan the device struct for the WLAN
+ * interface.
+ */
+static int
+dbus_usbos_intf_wlan(struct usb_device *usb)
+{
+	int i, num_of_eps, ep, intf_wlan = -1;
+	int num_intf = CONFIGDESC(usb)->bNumInterfaces;
+	struct usb_endpoint_descriptor *endpoint;
+
+	for (i = 0; i < num_intf; i++) {
+		if (IFDESC(usb, i).bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+			continue;
+		num_of_eps = IFDESC(usb, i).bNumEndpoints;
+
+		for (ep = 0; ep < num_of_eps; ep++) {
+			endpoint = &IFEPDESC(usb, i, ep);
+			if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+				USB_ENDPOINT_XFER_BULK) {
+				intf_wlan = i;
+				break;
+			}
+		}
+		if (ep < num_of_eps)
+			break;
+	}
+
+	return intf_wlan;
+}
+#endif /* BCMUSBDEV_COMPOSITE */
diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h
new file mode 100644
index 0000000..218f6c2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd.h
@@ -0,0 +1,2588 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd.h 711448 2017-07-18 08:27:03Z $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#include <linux/sched/types.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
+/* The kernel threading is sdio-specific */
+struct task_struct;
+struct sched_param;
+#if defined(BT_OVER_SDIO)
+#include <dhd_bt_interface.h>
+#endif /* defined (BT_OVER_SDIO) */
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+int get_scheduler_policy(struct task_struct *p);
+#define MAX_EVENT	16
+
+#define ALL_INTERFACES	0xff
+
+#include <wlioctl.h>
+#include <wlfc_proto.h>
+#include <hnd_armtrap.h>
+#if defined(DUMP_IOCTL_IOV_LIST) || defined(DHD_DEBUG)
+#include <bcmutils.h>
+#endif /* DUMP_IOCTL_IOV_LIST || DHD_DEBUG */
+#include <hnd_pktq.h>
+
+#if defined(BCMWDF)
+#include <wdf.h>
+#include <WdfMiniport.h>
+#endif /* (BCMWDF)  */
+
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+#define MAX_RESCHED_CNT 600
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE < \
+	KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT))
+#define WL_VENDOR_EXT_SUPPORT
+#endif /* 3.18 > KERNEL_VER >= 3.14 || defined(CONFIG_BCMDHD_VENDOR_EXT) */
+
+#if defined(KEEP_ALIVE)
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR	"null_pkt"
+#endif /* KEEP_ALIVE */
+
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+struct dhd_ioctl;
+struct dhd_dbg;
+struct dhd_ts;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+	DHD_BUS_DOWN,		/* Not ready for frame transfers */
+	DHD_BUS_LOAD,		/* Download access only (CPU reset) */
+	DHD_BUS_DATA,		/* Ready for frame transfers */
+	DHD_BUS_SUSPEND,	/* Bus has been suspended */
+	DHD_BUS_DOWN_IN_PROGRESS,	/* Bus going Down */
+	DHD_BUS_REMOVE,	/* Bus has been removed */
+};
+
+/*
+ * Bit fields to Indicate clean up process that wait till they are finished.
+ * Future synchronizable processes can add their bit filed below and update
+ * their functionalities accordingly
+ */
+#define DHD_BUS_BUSY_IN_TX                   0x01
+#define DHD_BUS_BUSY_IN_SEND_PKT             0x02
+#define DHD_BUS_BUSY_IN_DPC                  0x04
+#define DHD_BUS_BUSY_IN_WD                   0x08
+#define DHD_BUS_BUSY_IN_IOVAR                0x10
+#define DHD_BUS_BUSY_IN_DHD_IOVAR            0x20
+#define DHD_BUS_BUSY_SUSPEND_IN_PROGRESS     0x40
+#define DHD_BUS_BUSY_RESUME_IN_PROGRESS      0x80
+#define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100
+#define DHD_BUS_BUSY_RPM_SUSPEND_DONE        0x200
+#define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS  0x400
+#define DHD_BUS_BUSY_RPM_ALL                 (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \
+		DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \
+		DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS)
+#define DHD_BUS_BUSY_IN_CHECKDIED            0x800
+
+#define DHD_BUS_BUSY_SET_IN_TX(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX
+#define DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT
+#define DHD_BUS_BUSY_SET_IN_DPC(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC
+#define DHD_BUS_BUSY_SET_IN_WD(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD
+#define DHD_BUS_BUSY_SET_IN_IOVAR(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR
+#define DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR
+#define DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_SUSPEND_IN_PROGRESS
+#define DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RESUME_IN_PROGRESS
+#define DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS
+#define DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE
+#define DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS
+#define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \
+	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED
+
+#define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX
+#define DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT
+#define DHD_BUS_BUSY_CLEAR_IN_DPC(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC
+#define DHD_BUS_BUSY_CLEAR_IN_WD(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD
+#define DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR
+#define DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR
+#define DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_SUSPEND_IN_PROGRESS
+#define DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RESUME_IN_PROGRESS
+#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS
+#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE
+#define DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS
+#define DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(dhdp) \
+	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_CHECKDIED
+
+#define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX)
+#define DHD_BUS_BUSY_CHECK_IN_SEND_PKT(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SEND_PKT)
+#define DHD_BUS_BUSY_CHECK_IN_DPC(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DPC)
+#define DHD_BUS_BUSY_CHECK_IN_WD(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_WD)
+#define DHD_BUS_BUSY_CHECK_IN_IOVAR(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_IOVAR)
+#define DHD_BUS_BUSY_CHECK_IN_DHD_IOVAR(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DHD_IOVAR)
+#define DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_SUSPEND_IN_PROGRESS)
+#define DHD_BUS_BUSY_CHECK_RESUME_IN_PROGRESS(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RESUME_IN_PROGRESS)
+#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS)
+#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE)
+#define DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS)
+#define DHD_BUS_BUSY_CHECK_RPM_ALL(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL)
+#define DHD_BUS_BUSY_CHECK_IN_CHECKDIED(dhdp) \
+	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_CHECKDIED)
+#define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \
+		((dhdp)->dhd_bus_busy_state == 0)
+
+#define DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp) \
+	((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \
+	 DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp))
+
+#define DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp) \
+		(DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \
+		 DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp))
+
+#define DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) \
+		((dhdp)->busstate == DHD_BUS_DOWN || (dhdp)->busstate == DHD_BUS_DOWN_IN_PROGRESS || \
+		(dhdp)->busstate == DHD_BUS_REMOVE)
+
+#define DHD_BUS_CHECK_REMOVE(dhdp) \
+		((dhdp)->busstate == DHD_BUS_REMOVE)
+
+/* Macro to print Ethernet Address as String
+ * expects both arguements as (char *)
+ */
+#define DHD_MAC_TO_STR(mac, str) (snprintf(str, ETHER_ADDR_STR_LEN, \
+					"%02x:%02x:%02x:%02x:%02x:%02x\n", \
+					(uchar)mac[0]&0xff, \
+					(uchar)mac[1]&0xff, \
+					(uchar)mac[2]&0xff, \
+					(uchar)mac[3]&0xff, \
+					(uchar)mac[4]&0xff, \
+					(uchar)mac[5]&0xff))
+
+
+/* Download Types */
+typedef enum download_type {
+	FW,
+	NVRAM,
+	CLM_BLOB
+} download_type_t;
+
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS	16
+#define DHD_DEL_IF	-0xE
+#define DHD_BAD_IF	-0xF
+#define DHD_EVENT_IF	0xFFFF	/* Hack i/f to handle events from INFO Ring */
+
+enum dhd_op_flags {
+/* Firmware requested operation mode */
+	DHD_FLAG_STA_MODE				= (1 << (0)), /* STA only */
+	DHD_FLAG_HOSTAP_MODE				= (1 << (1)), /* SOFTAP only */
+	DHD_FLAG_P2P_MODE				= (1 << (2)), /* P2P Only */
+	/* STA + P2P */
+	DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE),
+	/* STA + SoftAP */
+	DHD_FLAG_CONCURR_STA_HOSTAP_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_HOSTAP_MODE),
+	DHD_FLAG_CONCURR_MULTI_CHAN_MODE		= (1 << (4)), /* STA + P2P */
+	/* Current P2P mode for P2P connection */
+	DHD_FLAG_P2P_GC_MODE				= (1 << (5)),
+	DHD_FLAG_P2P_GO_MODE				= (1 << (6)),
+	DHD_FLAG_MBSS_MODE				= (1 << (7)), /* MBSS in future */
+	DHD_FLAG_IBSS_MODE				= (1 << (8)),
+	DHD_FLAG_MFG_MODE				= (1 << (9)),
+	DHD_FLAG_RSDB_MODE				= (1 << (10)),
+	DHD_FLAG_MP2P_MODE				= (1 << (11))
+};
+
+#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \
+	(dhd ? ((((dhd_pub_t *)dhd)->op_mode)  &  opmode_flag) : -1)
+
+/* Max sequential TX/RX Control timeouts to set HANG event */
+#ifndef MAX_CNTL_TX_TIMEOUT
+#define MAX_CNTL_TX_TIMEOUT 2
+#endif /* MAX_CNTL_TX_TIMEOUT */
+#ifndef MAX_CNTL_RX_TIMEOUT
+#define MAX_CNTL_RX_TIMEOUT 1
+#endif /* MAX_CNTL_RX_TIMEOUT */
+
+#define DHD_SCAN_ASSOC_ACTIVE_TIME	40 /* ms: Embedded default Active setting from DHD */
+#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */
+#define DHD_SCAN_PASSIVE_TIME		130 /* ms: Embedded default Passive setting from DHD */
+#define DHD_SCAN_HOME_TIME		45 /* ms: Embedded default Home time setting from DHD */
+#define DHD_SCAN_HOME_AWAY_TIME	100 /* ms: Embedded default Home Away time setting from DHD */
+
+#ifndef POWERUP_MAX_RETRY
+#define POWERUP_MAX_RETRY	3 /* how many times we retry to power up the chip */
+#endif
+#ifndef POWERUP_WAIT_MS
+#define POWERUP_WAIT_MS		2000 /* ms: time out in waiting wifi to come up */
+#endif
+/*
+ * MAX_NVRAMBUF_SIZE determines the size of the Buffer in the DHD that holds
+ * the NVRAM data. That is the size of the buffer pointed by bus->vars
+ * This also needs to be increased to 16K to support NVRAM size higher than 8K
+ */
+#define MAX_NVRAMBUF_SIZE	(16 * 1024) /* max nvram buf size */
+#define MAX_CLM_BUF_SIZE	(48 * 1024) /* max clm blob size */
+#ifdef DHD_DEBUG
+#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */
+#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */
+#endif
+
+#ifndef CONFIG_BCMDHD_CLM_PATH
+#define CONFIG_BCMDHD_CLM_PATH "/system/etc/wifi/bcmdhd_clm.blob"
+#endif /* CONFIG_BCMDHD_CLM_PATH */
+#define WL_CCODE_NULL_COUNTRY  "#n"
+
+#define FW_VER_STR_LEN	128
+#define CLM_VER_STR_LEN 128
+#define BUS_API_REV_STR_LEN	128
+extern char bus_api_revision[];
+
+enum dhd_bus_wake_state {
+	WAKE_LOCK_OFF,
+	WAKE_LOCK_PRIV,
+	WAKE_LOCK_DPC,
+	WAKE_LOCK_IOCTL,
+	WAKE_LOCK_DOWNLOAD,
+	WAKE_LOCK_TMOUT,
+	WAKE_LOCK_WATCHDOG,
+	WAKE_LOCK_LINK_DOWN_TMOUT,
+	WAKE_LOCK_PNO_FIND_TMOUT,
+	WAKE_LOCK_SOFTAP_SET,
+	WAKE_LOCK_SOFTAP_STOP,
+	WAKE_LOCK_SOFTAP_START,
+	WAKE_LOCK_SOFTAP_THREAD
+};
+
+#ifdef PCIE_INB_DW
+enum dhd_bus_ds_state {
+	DW_DEVICE_DS_INVALID = -1,
+	DW_DEVICE_DS_DEV_SLEEP = 0,
+	DW_DEVICE_DS_DEV_SLEEP_PEND,
+	DW_DEVICE_DS_DISABLED_WAIT,
+	DW_DEVICE_DS_DEV_WAKE,
+	DW_DEVICE_DS_ACTIVE,
+	DW_DEVICE_HOST_SLEEP_WAIT,
+	DW_DEVICE_HOST_SLEEP,
+	DW_DEVICE_HOST_WAKE_WAIT,
+	DW_DEVICE_DS_D3_INFORM_WAIT
+};
+#endif /* PCIE_INB_DW */
+
+enum dhd_prealloc_index {
+	DHD_PREALLOC_PROT = 0,
+	DHD_PREALLOC_RXBUF,
+	DHD_PREALLOC_DATABUF,
+	DHD_PREALLOC_OSL_BUF,
+#if defined(STATIC_WL_PRIV_STRUCT)
+	DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+#endif /* STATIC_WL_PRIV_STRUCT */
+	DHD_PREALLOC_DHD_INFO = 7,
+	DHD_PREALLOC_DHD_WLFC_INFO = 8,
+	DHD_PREALLOC_IF_FLOW_LKUP = 9,
+	/* 10 */
+	DHD_PREALLOC_MEMDUMP_RAM = 11,
+	DHD_PREALLOC_DHD_WLFC_HANGER = 12,
+	DHD_PREALLOC_PKTID_MAP = 13,
+	DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
+	DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
+	DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
+	DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
+	DHD_PREALLOC_STAT_REPORT_BUF = 18,
+	DHD_PREALLOC_WL_ESCAN_INFO = 19,
+	DHD_PREALLOC_FW_VERBOSE_RING = 20,
+	DHD_PREALLOC_FW_EVENT_RING = 21,
+	DHD_PREALLOC_DHD_EVENT_RING = 22,
+	DHD_PREALLOC_NAN_EVENT_RING = 23
+};
+
+enum dhd_dongledump_mode {
+	DUMP_DISABLED = 0,
+	DUMP_MEMONLY,
+	DUMP_MEMFILE,
+	DUMP_MEMFILE_BUGON,
+	DUMP_MEMFILE_MAX
+};
+
+enum dhd_dongledump_type {
+	DUMP_TYPE_RESUMED_ON_TIMEOUT = 1,
+	DUMP_TYPE_D3_ACK_TIMEOUT,
+	DUMP_TYPE_DONGLE_TRAP,
+	DUMP_TYPE_MEMORY_CORRUPTION,
+	DUMP_TYPE_PKTID_AUDIT_FAILURE,
+	DUMP_TYPE_PKTID_INVALID,
+	DUMP_TYPE_SCAN_TIMEOUT,
+	DUMP_TYPE_JOIN_TIMEOUT,
+	DUMP_TYPE_SCAN_BUSY,
+	DUMP_TYPE_BY_SYSDUMP,
+	DUMP_TYPE_BY_LIVELOCK,
+	DUMP_TYPE_AP_LINKUP_FAILURE,
+	DUMP_TYPE_AP_ABNORMAL_ACCESS,
+	DUMP_TYPE_CFG_VENDOR_TRIGGERED,
+	DUMP_TYPE_RESUMED_ON_TIMEOUT_TX,
+	DUMP_TYPE_RESUMED_ON_TIMEOUT_RX,
+	DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR,
+	DUMP_TYPE_DONGLE_HOST_EVENT,
+	DUMP_TYPE_RESUMED_UNKNOWN,
+	DUMP_TYPE_TRANS_ID_MISMATCH,
+	DUMP_TYPE_HANG_ON_IFACE_OP_FAIL,
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	DUMP_TYPE_READ_SHM_FAIL
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+};
+
+enum dhd_hang_reason {
+	HANG_REASON_MASK = 0x8000,
+	HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001,
+	HANG_REASON_DONGLE_TRAP = 0x8002,
+	HANG_REASON_D3_ACK_TIMEOUT = 0x8003,
+	HANG_REASON_BUS_DOWN = 0x8004,
+	HANG_REASON_MSGBUF_LIVELOCK = 0x8006,
+	HANG_REASON_IFACE_OP_FAILURE = 0x8007,
+	HANG_REASON_HT_AVAIL_ERROR = 0x8008,
+	HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009,
+	HANG_REASON_PCIE_PKTID_ERROR = 0x800A,
+	HANG_REASON_PCIE_LINK_DOWN = 0x8805,
+	HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806,
+	HANG_REASON_UNKNOWN = 0x8807,
+	HANG_REASON_MAX = 0x8808
+};
+
+enum dhd_rsdb_scan_features {
+	/* Downgraded scan feature for AP active */
+	RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01,
+	/* Downgraded scan feature for P2P Discovery */
+	RSDB_SCAN_DOWNGRADED_P2P_DISC_SCAN = 0x02,
+	/* Enable channel pruning for ROAM SCAN */
+	RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM = 0x10,
+	/* Enable channel pruning for any SCAN */
+	RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL  = 0x20
+};
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+
+/**
+ * DMA-able buffer parameters
+ * - dmaaddr_t is 32bits on a 32bit host.
+ *   dhd_dma_buf::pa may not be used as a sh_addr_t, bcm_addr64_t or uintptr
+ * - dhd_dma_buf::_alloced is ONLY for freeing a DMA-able buffer.
+ */
+typedef struct dhd_dma_buf {
+	void      *va;      /* virtual address of buffer */
+	uint32    len;      /* user requested buffer length */
+	dmaaddr_t pa;       /* physical address of buffer */
+	void      *dmah;    /* dma mapper handle */
+	void      *secdma;  /* secure dma sec_cma_info handle */
+	uint32    _alloced; /* actual size of buffer allocated with align and pad */
+} dhd_dma_buf_t;
+
+/* host reordering packts logic */
+/* followed the structure to hold the reorder buffers (void **p) */
+typedef struct reorder_info {
+	void **p;
+	uint8 flow_id;
+	uint8 cur_idx;
+	uint8 exp_idx;
+	uint8 max_idx;
+	uint8 pend_pkts;
+} reorder_info_t;
+
+#ifdef DHDTCPACK_SUPPRESS
+
+enum {
+	/* TCPACK suppress off */
+	TCPACK_SUP_OFF,
+	/* Replace TCPACK in txq when new coming one has higher ACK number. */
+	TCPACK_SUP_REPLACE,
+	/* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA.
+	 * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that
+	 * 1. we are able to read TCP DATA packets first from the bus
+	 * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed.
+	 */
+	TCPACK_SUP_DELAYTX,
+	TCPACK_SUP_HOLD,
+	TCPACK_SUP_LAST_MODE
+};
+
+#ifdef BCMSDIO
+#define TCPACK_SUP_DEFAULT	TCPACK_SUP_DELAYTX
+#elif defined(BCMPCIE)
+#define TCPACK_SUP_DEFAULT	TCPACK_SUP_HOLD
+#else
+#define TCPACK_SUP_DEFAULT	TCPACK_SUP_OFF
+#endif /* BCMSDIO */
+#endif /* DHDTCPACK_SUPPRESS */
+
+#if defined(TRAFFIC_MGMT_DWM)
+#define DHD_DWM_TBL_SIZE           57
+/* DSCP WMM AC Mapping macros and structures */
+#define DHD_TRF_MGMT_DWM_FILTER_BIT                 0x8
+#define DHD_TRF_MGMT_DWM_PRIO_BITS                  0x7
+#define DHD_TRF_MGMT_DWM_FAVORED_BIT                0x10
+#define DHD_TRF_MGMT_DWM_PRIO(dwm_tbl_entry) ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_PRIO_BITS)
+#define DHD_TRF_MGMT_DWM_IS_FAVORED_SET(dwm_tbl_entry) \
+	((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FAVORED_BIT)
+#define DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry) \
+	((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FAVORED_BIT)
+#define DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_tbl_entry) \
+	((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FILTER_BIT)
+#define DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry) \
+	((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FILTER_BIT)
+
+typedef struct {
+	uint8 dhd_dwm_enabled;
+	uint8 dhd_dwm_tbl[DHD_DWM_TBL_SIZE];
+} dhd_trf_mgmt_dwm_tbl_t;
+#endif 
+
+#define DHD_NULL_CHK_AND_RET(cond) \
+	if (!cond) { \
+		DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
+		return; \
+	}
+
+#define DHD_NULL_CHK_AND_RET_VAL(cond, value) \
+	if (!cond) { \
+		DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
+		return value; \
+	}
+
+#define DHD_NULL_CHK_AND_GOTO(cond, label) \
+	if (!cond) { \
+		DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
+		goto label; \
+	}
+
+/*
+ * Accumulating the queue lengths of all flowring queues in a parent object,
+ * to assert flow control, when the cummulative queue length crosses an upper
+ * threshold defined on a parent object. Upper threshold may be maintained
+ * at a station level, at an interface level, or at a dhd instance.
+ *
+ * cumm_ctr_t abstraction:
+ * cumm_ctr_t abstraction may be enhanced to use an object with a hysterisis
+ * pause on/off threshold callback.
+ * All macros use the address of the cummulative length in the parent objects.
+ *
+ * BCM_GMAC3 builds use a single perimeter lock, as opposed to a per queue lock.
+ * Cummulative counters in parent objects may be updated without spinlocks.
+ *
+ * In non BCM_GMAC3, if a cummulative queue length is desired across all flows
+ * belonging to either of (a station, or an interface or a dhd instance), then
+ * an atomic operation is required using an atomic_t cummulative counters or
+ * using a spinlock. BCM_ROUTER_DHD uses the Linux atomic_t construct.
+ */
+
+/* Cummulative length not supported. */
+typedef uint32 cumm_ctr_t;
+#define DHD_CUMM_CTR_PTR(clen)     ((cumm_ctr_t*)(clen))
+#define DHD_CUMM_CTR(clen)         *(DHD_CUMM_CTR_PTR(clen)) /* accessor */
+#define DHD_CUMM_CTR_READ(clen)    DHD_CUMM_CTR(clen) /* read access */
+#define DHD_CUMM_CTR_INIT(clen)                                                \
+	ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
+#define DHD_CUMM_CTR_INCR(clen)                                                \
+	ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
+#define DHD_CUMM_CTR_DECR(clen)                                                \
+	ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+struct tdls_peer_node {
+	uint8 addr[ETHER_ADDR_LEN];
+	struct tdls_peer_node *next;
+};
+typedef struct tdls_peer_node tdls_peer_node_t;
+typedef struct {
+	tdls_peer_node_t *node;
+	uint8 tdls_peer_count;
+} tdls_peer_tbl_t;
+#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+
+#ifdef DHD_LOG_DUMP
+/* below structure describe ring buffer. */
+struct dhd_log_dump_buf
+{
+	spinlock_t lock;
+	unsigned int enable;
+	unsigned int wraparound;
+	unsigned long max;
+	unsigned int remain;
+	char* present;
+	char* front;
+	char* buffer;
+};
+
+#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE	256
+extern void dhd_log_dump_write(int type, const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
+#endif /* DHD_LOG_DUMP */
+
+#if defined(CUSTOMER_HW2)
+#define DHD_COMMON_DUMP_PATH	"/data/misc/wifi/"
+#else
+#define DHD_COMMON_DUMP_PATH	"/installmedia/"
+#endif 
+
+struct cntry_locales_custom {
+	char iso_abbrev[WLC_CNTRY_BUF_SZ];      /* ISO 3166-1 country abbreviation */
+	char custom_locale[WLC_CNTRY_BUF_SZ];   /* Custom firmware locale */
+	int32 custom_locale_rev;                /* Custom local revisin default -1 */
+};
+
+#ifdef REPORT_FATAL_TIMEOUTS
+typedef struct timeout_info {
+	void	*scan_timer_lock;
+	void	*join_timer_lock;
+	void	*cmd_timer_lock;
+	void	*bus_timer_lock;
+	uint32 scan_timeout_val;
+	uint32 join_timeout_val;
+	uint32 cmd_timeout_val;
+	uint32 bus_timeout_val;
+	bool scan_timer_active;
+	bool join_timer_active;
+	bool cmd_timer_active;
+	bool bus_timer_active;
+	osl_timer_t *scan_timer;
+	osl_timer_t *join_timer;
+	osl_timer_t *cmd_timer;
+	osl_timer_t *bus_timer;
+	uint16 cmd_request_id;
+	uint32 cmd;
+	uint32 cmd_join_error;
+} timeout_info_t;
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef HOFFLOAD_MODULES
+/* Metadata structure containing module information */
+struct module_metadata {
+	void *data;	/* module data */
+	uint32_t size;	/* module size */
+	u64 data_addr;	/* address of module data in host */
+};
+#endif
+
+#ifdef DMAMAP_STATS
+typedef struct dmamap_stats {
+	uint64 txdata;
+	uint64 txdata_sz;
+	uint64 rxdata;
+	uint64 rxdata_sz;
+	uint64 ioctl_rx;
+	uint64 ioctl_rx_sz;
+	uint64 event_rx;
+	uint64 event_rx_sz;
+	uint64 info_rx;
+	uint64 info_rx_sz;
+	uint64 tsbuf_rx;
+	uint64 tsbuf_rx_sz;
+} dma_stats_t;
+#endif /* DMAMAP_STATS */
+
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+	/* Linkage ponters */
+	osl_t *osh;		/* OSL handle */
+	struct dhd_bus *bus;	/* Bus module handle */
+	struct dhd_prot *prot;	/* Protocol module handle */
+	struct dhd_info  *info; /* Info module handle */
+	struct dhd_dbg *dbg;	/* Debugability module handle */
+
+	/* to NDIS developer, the structure dhd_common is redundant,
+	 * please do NOT merge it back from other branches !!!
+	 */
+
+#ifdef BCMDBUS
+	struct dbus_pub *dbus;
+#endif /* BCMDBUS */
+
+	/* Internal dhd items */
+	bool up;		/* Driver up/down (to OS) */
+#ifdef WL_CFG80211
+	spinlock_t up_lock;	/* Synchronization with CFG80211 down */
+#endif /* WL_CFG80211 */
+	bool txoff;		/* Transmit flow-controlled */
+	bool dongle_reset;  /* TRUE = DEVRESET put dongle into reset */
+	enum dhd_bus_state busstate;
+	uint dhd_bus_busy_state;	/* Bus busy state */
+	uint hdrlen;		/* Total DHD header length (proto + bus) */
+	uint maxctl;		/* Max size rxctl request from proto to bus */
+	uint rxsz;		/* Rx buffer size bus module should use */
+	uint8 wme_dp;	/* wme discard priority */
+
+	/* Dongle media info */
+	bool iswl;		/* Dongle-resident driver is wl */
+	ulong drv_version;	/* Version of dongle-resident driver */
+	struct ether_addr mac;	/* MAC address obtained from dongle */
+	dngl_stats_t dstats;	/* Stats for dongle-based data */
+
+	/* Additional stats for the bus level */
+	ulong tx_packets;	/* Data packets sent to dongle */
+	ulong tx_dropped;	/* Data packets dropped in dhd */
+	ulong tx_multicast;	/* Multicast data packets sent to dongle */
+	ulong tx_errors;	/* Errors in sending data to dongle */
+	ulong tx_ctlpkts;	/* Control packets sent to dongle */
+	ulong tx_ctlerrs;	/* Errors sending control frames to dongle */
+	ulong rx_packets;	/* Packets sent up the network interface */
+	ulong rx_multicast;	/* Multicast packets sent up the network interface */
+	ulong rx_errors;	/* Errors processing rx data packets */
+	ulong rx_ctlpkts;	/* Control frames processed from dongle */
+	ulong rx_ctlerrs;	/* Errors in processing rx control frames */
+	ulong rx_dropped;	/* Packets dropped locally (no memory) */
+	ulong rx_flushed;  /* Packets flushed due to unscheduled sendup thread */
+	ulong wd_dpc_sched;   /* Number of times dhd dpc scheduled by watchdog timer */
+	ulong rx_pktgetfail; /* Number of PKTGET failures in DHD on RX */
+	ulong tx_pktgetfail; /* Number of PKTGET failures in DHD on TX */
+	ulong rx_readahead_cnt;	/* Number of packets where header read-ahead was used. */
+	ulong tx_realloc;	/* Number of tx packets we had to realloc for headroom */
+	ulong fc_packets;       /* Number of flow control pkts recvd */
+
+#ifdef DMAMAP_STATS
+	/* DMA Mapping statistics */
+	dma_stats_t dma_stats;
+#endif /* DMAMAP_STATS */
+
+	/* Last error return */
+	int bcmerror;
+	uint tickcnt;
+
+	/* Last error from dongle */
+	int dongle_error;
+
+	uint8 country_code[WLC_CNTRY_BUF_SZ];
+
+	/* Suspend disable flag and "in suspend" flag */
+	int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+	int in_suspend;			/* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+	int pno_enable;			/* pno status : "1" is pno enable */
+	int pno_suspend;		/* pno suspend status : "1" is pno suspended */
+#endif /* PNO_SUPPORT */
+	/* DTIM skip value, default 0(or 1) means wake each DTIM
+	 * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3)
+	 */
+	int suspend_bcn_li_dtim;         /* bcn_li_dtim value in suspend mode */
+#ifdef PKT_FILTER_SUPPORT
+	int early_suspended;	/* Early suspend status */
+	int dhcp_in_progress;	/* DHCP period */
+#endif
+
+	/* Pkt filter defination */
+	char * pktfilter[100];
+	int pktfilter_count;
+
+	wl_country_t dhd_cspec;		/* Current Locale info */
+#ifdef CUSTOM_COUNTRY_CODE
+	uint dhd_cflags;
+#endif /* CUSTOM_COUNTRY_CODE */
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+	bool is_blob;			/* Checking for existance of Blob file */
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+	bool force_country_change;
+	char eventmask[WL_EVENTING_MASK_LEN];
+	int	op_mode;				/* STA, HostAPD, WFD, SoftAP */
+
+/* Set this to 1 to use a seperate interface (p2p0) for p2p operations.
+ *  For ICS MR1 releases it should be disable to be compatable with ICS MR1 Framework
+ *  see target dhd-cdc-sdmmc-panda-cfg80211-icsmr1-gpl-debug in Makefile
+ */
+/* #define WL_ENABLE_P2P_IF		1 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	struct mutex 	wl_start_stop_lock; /* lock/unlock for Android start/stop */
+	struct mutex 	wl_softap_lock;		 /* lock/unlock for any SoftAP/STA settings */
+#endif 
+
+#ifdef PROP_TXSTATUS
+	bool	wlfc_enabled;
+	int	wlfc_mode;
+	void*	wlfc_state;
+	/*
+	Mode in which the dhd flow control shall operate. Must be set before
+	traffic starts to the device.
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	3 - Only AMPDU hostreorder used. no wlfc.
+	*/
+	uint8	proptxstatus_mode;
+	bool	proptxstatus_txoff;
+	bool	proptxstatus_module_ignore;
+	bool	proptxstatus_credit_ignore;
+	bool	proptxstatus_txstatus_ignore;
+
+	bool	wlfc_rxpkt_chk;
+#ifdef LIMIT_BORROW
+	bool wlfc_borrow_allowed;
+#endif /* LIMIT_BORROW */
+	/*
+	 * implement below functions in each platform if needed.
+	 */
+	/* platform specific function whether to skip flow control */
+	bool (*skip_fc)(void * dhdp, uint8 ifx);
+	/* platform specific function for wlfc_enable and wlfc_deinit */
+	void (*plat_init)(void *dhd);
+	void (*plat_deinit)(void *dhd);
+#ifdef DHD_WLFC_THREAD
+	bool                wlfc_thread_go;
+	struct task_struct* wlfc_thread;
+	wait_queue_head_t   wlfc_wqhead;
+#endif /* DHD_WLFC_THREAD */
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+	void *pno_state;
+#endif
+#ifdef RTT_SUPPORT
+	void *rtt_state;
+	bool rtt_supported;
+#endif
+	bool	dongle_isolation;
+	bool	is_pcie_watchdog_reset;
+	bool	dongle_trap_occured;	/* flag for sending HANG event to upper layer */
+	bool	iovar_timeout_occured;	/* flag to indicate iovar resumed on timeout */
+#ifdef PCIE_FULL_DONGLE
+	bool	d3ack_timeout_occured;	/* flag to indicate d3ack resumed on timeout */
+#endif /* PCIE_FULL_DONGLE */
+#ifdef BT_OVER_SDIO
+	bool	is_bt_recovery_required;
+#endif
+	int   hang_was_sent;
+	int   rxcnt_timeout;		/* counter rxcnt timeout to send HANG */
+	int   txcnt_timeout;		/* counter txcnt timeout to send HANG */
+#ifdef BCMPCIE
+	int   d3ackcnt_timeout;		/* counter d3ack timeout to send HANG */
+#endif /* BCMPCIE */
+	bool hang_report;		/* enable hang report by default */
+	uint16 hang_reason;		/* reason codes for HANG event */
+#if defined(DHD_HANG_SEND_UP_TEST)
+	uint req_hang_type;
+#endif /* DHD_HANG_SEND_UP_TEST */
+#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
+	uint hang_counts;
+#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
+#endif
+#ifdef WLTDLS
+	bool tdls_enable;
+#endif
+	struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
+	#define WLC_IOCTL_MAXBUF_FWCAP	512
+	char  fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP];
+	#define MAXSKBPEND 1024
+	void *skbbuf[MAXSKBPEND];
+	uint32 store_idx;
+	uint32 sent_idx;
+#ifdef DHDTCPACK_SUPPRESS
+	uint8 tcpack_sup_mode;		/* TCPACK suppress mode */
+	void *tcpack_sup_module;	/* TCPACK suppress module */
+	uint32 tcpack_sup_ratio;
+	uint32 tcpack_sup_delay;
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(ARP_OFFLOAD_SUPPORT)
+	uint32 arp_version;
+#endif
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+	bool dhd_bug_on;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+#ifdef CUSTOM_SET_CPUCORE
+	struct task_struct * current_dpc;
+	struct task_struct * current_rxf;
+	int chan_isvht80;
+#endif /* CUSTOM_SET_CPUCORE */
+
+
+	void    *sta_pool;          /* pre-allocated pool of sta objects */
+	void    *staid_allocator;   /* allocator of sta indexes */
+#ifdef PCIE_FULL_DONGLE
+	bool	flow_rings_inited;	/* set this flag after initializing flow rings */
+#endif /* PCIE_FULL_DONGLE */
+	void    *flowid_allocator;  /* unique flowid allocator */
+	void	*flow_ring_table;   /* flow ring table, include prot and bus info */
+	void	*if_flow_lkup;      /* per interface flowid lkup hash table */
+	void    *flowid_lock;       /* per os lock for flowid info protection */
+	void    *flowring_list_lock;       /* per os lock for flowring list protection */
+	uint32  num_flow_rings;
+	cumm_ctr_t cumm_ctr;        /* cumm queue length placeholder  */
+	cumm_ctr_t l2cumm_ctr;      /* level 2 cumm queue length placeholder */
+	uint32 d2h_sync_mode;       /* D2H DMA completion sync mode */
+	uint8  flow_prio_map[NUMPRIO];
+	uint8	flow_prio_map_type;
+	char enable_log[MAX_EVENT];
+	bool dma_d2h_ring_upd_support;
+	bool dma_h2d_ring_upd_support;
+	bool dma_ring_upd_overwrite;	/* host overwrites support setting */
+
+	bool idma_enable;
+	uint idma_inited;
+	bool idma_retention_ds;		/* Implicit DMA memory retention */
+
+	bool ifrm_enable;			/* implicit frm enable */
+	uint ifrm_inited;			/* implicit frm init */
+
+#ifdef DHD_WMF
+	bool wmf_ucast_igmp;
+#ifdef DHD_IGMP_UCQUERY
+	bool wmf_ucast_igmp_query;
+#endif
+#ifdef DHD_UCAST_UPNP
+	bool wmf_ucast_upnp;
+#endif
+#endif /* DHD_WMF */
+#if defined(TRAFFIC_MGMT_DWM)
+	dhd_trf_mgmt_dwm_tbl_t dhd_tm_dwm_tbl;
+#endif 
+#ifdef DHD_L2_FILTER
+	unsigned long l2_filter_cnt;	/* for L2_FILTER ARP table timeout */
+#endif /* DHD_L2_FILTER */
+#ifdef DHD_SSSR_DUMP
+	bool sssr_inited;
+	sssr_reg_info_t sssr_reg_info;
+	uint8 *sssr_mempool;
+	uint *sssr_d11_before[MAX_NUM_D11CORES];
+	uint *sssr_d11_after[MAX_NUM_D11CORES];
+	bool sssr_d11_outofreset[MAX_NUM_D11CORES];
+	uint *sssr_vasip_buf_before;
+	uint *sssr_vasip_buf_after;
+#endif /* DHD_SSSR_DUMP */
+	uint8 *soc_ram;
+	uint32 soc_ram_length;
+	uint32 memdump_type;
+#ifdef DHD_FW_COREDUMP
+	uint32 memdump_enabled;
+	bool memdump_success;
+#endif /* DHD_FW_COREDUMP */
+#ifdef PCIE_FULL_DONGLE
+#ifdef WLTDLS
+	tdls_peer_tbl_t peer_tbl;
+#endif /* WLTDLS */
+	uint8 tx_in_progress;
+#endif /* PCIE_FULL_DONGLE */
+#ifdef DHD_ULP
+	void *dhd_ulp;
+#endif
+#ifdef CACHE_FW_IMAGES
+	char	*cached_fw;
+	int	cached_fw_length;
+	char	*cached_nvram;
+	int	cached_nvram_length;
+	char	*cached_clm;
+	int	cached_clm_length;
+#endif
+#ifdef WLTDLS
+	uint32 tdls_mode;
+#endif
+#ifdef GSCAN_SUPPORT
+	bool lazy_roam_enable;
+#endif
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+	bool apf_set;
+#endif /* PKT_FILTER_SUPPORT && APF */
+#ifdef DHD_WET
+	void *wet_info;
+#endif
+	bool	h2d_phase_supported;
+	bool	force_dongletrap_on_bad_h2d_phase;
+	uint32	dongle_trap_data;
+	bool	cto_enable;		/* enable PCIE CTO Prevention and recovery */
+	uint32	cto_threshold;	/* PCIE CTO timeout threshold */
+	bool	fw_download_done;
+	trap_t	last_trap_info; /* trap info from the last trap */
+	uint8 rand_mac_oui[DOT11_OUI_LEN];
+#ifdef DHD_LOSSLESS_ROAMING
+	uint8 dequeue_prec_map;
+	uint8 prio_8021x;
+#endif
+#ifdef REPORT_FATAL_TIMEOUTS
+	timeout_info_t *timeout_info;
+#endif /* REPORT_FATAL_TIMEOUTS */
+	/* timesync link */
+	struct dhd_ts *ts;
+	bool	d2h_hostrdy_supported;
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+	bool d11_tx_status;
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
+	uint16 ndo_version;		/* ND offload version supported */
+#ifdef NDO_CONFIG_SUPPORT
+	bool ndo_enable;		/* ND offload feature enable */
+	bool ndo_host_ip_overflow;	/* # of host ip addr exceed FW capacity */
+	uint32 ndo_max_host_ip;		/* # of host ip addr supported by FW */
+#endif /* NDO_CONFIG_SUPPORT */
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+	uint8 log_capture_enable;
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+	bool max_dtim_enable;         /* use MAX bcn_li_dtim value in suspend mode */
+#ifdef PCIE_OOB
+	bool	d2h_no_oob_dw;
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+	bool	d2h_inband_dw;
+	enum dhd_bus_ds_state	ds_state;
+#endif /* PCIE_INB_DW */
+#ifdef CUSTOM_SET_ANTNPM
+	uint32 mimo_ant_set;
+#endif /* CUSTOM_SET_ANTNPM */
+#ifdef CUSTOM_SET_OCLOFF
+	bool ocl_off;
+#endif /* CUSTOM_SET_OCLOFF */
+#ifdef HOFFLOAD_MODULES
+	struct module_metadata hmem;
+#endif
+	bool wbtext_support;
+#ifdef DUMP_IOCTL_IOV_LIST
+	/* dump iovar list */
+	dll_t dump_iovlist_head;
+	uint8 dump_iovlist_len;
+#endif /* DUMP_IOCTL_IOV_LIST */
+#ifdef DHD_DEBUG
+/* memwaste feature */
+	dll_t mw_list_head; /* memwaste list head */
+	uint32 mw_id; /* memwaste list unique id */
+#endif /* DHD_DEBUG */
+#ifdef WLTDLS
+	spinlock_t tdls_lock;
+#endif /* WLTDLS */
+#ifdef WLADPS_SEAK_AP_WAR
+	uint32 disabled_adps;
+#endif /* WLADPS_SEAK_AP_WAR */
+	bool ext_trap_data_supported;
+	uint32 *extended_trap_data;
+#ifdef DHD_PKT_LOGGING
+	struct dhd_pktlog *pktlog;
+#endif /* DHD_PKT_LOGGING */
+#if defined(STAT_REPORT)
+	void *stat_report_info;
+#endif
+	char *clm_path;		/* module_param: path to clm vars file */
+	char *conf_path;		/* module_param: path to config vars file */
+	struct dhd_conf *conf;	/* Bus module handle */
+	void *adapter;			/* adapter information, interrupt, fw path etc. */
+#ifdef BCMDBUS
+	bool dhd_remove;
+#endif /* BCMDBUS */
+#if defined(WL_WIRELESS_EXT)
+#if defined(WL_ESCAN)
+	void *escan;
+#else
+	void *iscan;
+#endif
+#endif
+#ifdef WL_EXT_IAPSTA
+	void *iapsta_params;
+#endif
+} dhd_pub_t;
+
+typedef struct {
+	uint rxwake;
+	uint rcwake;
+#ifdef DHD_WAKE_RX_STATUS
+	uint rx_bcast;
+	uint rx_arp;
+	uint rx_mcast;
+	uint rx_multi_ipv6;
+	uint rx_icmpv6;
+	uint rx_icmpv6_ra;
+	uint rx_icmpv6_na;
+	uint rx_icmpv6_ns;
+	uint rx_multi_ipv4;
+	uint rx_multi_other;
+	uint rx_ucast;
+#endif /* DHD_WAKE_RX_STATUS */
+#ifdef DHD_WAKE_EVENT_STATUS
+	uint rc_event[WLC_E_LAST];
+#endif /* DHD_WAKE_EVENT_STATUS */
+} wake_counts_t;
+
+#if defined(PCIE_FULL_DONGLE)
+
+/* Packet Tag for PCIE Full Dongle DHD */
+typedef struct dhd_pkttag_fd {
+	uint16    flowid;   /* Flowring Id */
+	uint16    dataoff;  /* start of packet */
+	uint16    dma_len;  /* pkt len for DMA_MAP/UNMAP */
+	dmaaddr_t pa;       /* physical address */
+	void      *dmah;    /* dma mapper handle */
+	void      *secdma; /* secure dma sec_cma_info handle */
+} dhd_pkttag_fd_t;
+
+/* Packet Tag for DHD PCIE Full Dongle */
+#define DHD_PKTTAG_FD(pkt)          ((dhd_pkttag_fd_t *)(PKTTAG(pkt)))
+
+#define DHD_PKT_GET_FLOWID(pkt)     ((DHD_PKTTAG_FD(pkt))->flowid)
+#define DHD_PKT_SET_FLOWID(pkt, pkt_flowid) \
+	DHD_PKTTAG_FD(pkt)->flowid = (uint16)(pkt_flowid)
+
+#define DHD_PKT_GET_DATAOFF(pkt)    ((DHD_PKTTAG_FD(pkt))->dataoff)
+#define DHD_PKT_SET_DATAOFF(pkt, pkt_dataoff) \
+	DHD_PKTTAG_FD(pkt)->dataoff = (uint16)(pkt_dataoff)
+
+#define DHD_PKT_GET_DMA_LEN(pkt)    ((DHD_PKTTAG_FD(pkt))->dma_len)
+#define DHD_PKT_SET_DMA_LEN(pkt, pkt_dma_len) \
+	DHD_PKTTAG_FD(pkt)->dma_len = (uint16)(pkt_dma_len)
+
+#define DHD_PKT_GET_PA(pkt)         ((DHD_PKTTAG_FD(pkt))->pa)
+#define DHD_PKT_SET_PA(pkt, pkt_pa) \
+	DHD_PKTTAG_FD(pkt)->pa = (dmaaddr_t)(pkt_pa)
+
+#define DHD_PKT_GET_DMAH(pkt)       ((DHD_PKTTAG_FD(pkt))->dmah)
+#define DHD_PKT_SET_DMAH(pkt, pkt_dmah) \
+	DHD_PKTTAG_FD(pkt)->dmah = (void *)(pkt_dmah)
+
+#define DHD_PKT_GET_SECDMA(pkt)    ((DHD_PKTTAG_FD(pkt))->secdma)
+#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \
+	DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma)
+#endif /* PCIE_FULL_DONGLE */
+
+#if defined(BCMWDF)
+typedef struct {
+	dhd_pub_t *dhd_pub;
+} dhd_workitem_context_t;
+
+WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context)
+#endif /* (BCMWDF)  */
+
+	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+	#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define _DHD_PM_RESUME_WAIT(a, b) do {\
+			int retry = 0; \
+			SMP_RD_BARRIER_DEPENDS(); \
+			while (dhd_mmc_suspend && retry++ != b) { \
+				SMP_RD_BARRIER_DEPENDS(); \
+				wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \
+			} \
+		} 	while (0)
+	#define DHD_PM_RESUME_WAIT(a) 		_DHD_PM_RESUME_WAIT(a, 200)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a) 	_DHD_PM_RESUME_WAIT(a, ~0)
+	#define DHD_PM_RESUME_RETURN_ERROR(a)   do { \
+			if (dhd_mmc_suspend) { \
+				printf("%s[%d]: mmc is still in suspend state!!!\n", \
+					__FUNCTION__, __LINE__); \
+				return a; \
+			} \
+		} while (0)
+	#define DHD_PM_RESUME_RETURN		do { if (dhd_mmc_suspend) return; } while (0)
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define SPINWAIT_SLEEP(a, exp, us) do { \
+		uint countdown = (us) + 9999; \
+		while ((exp) && (countdown >= 10000)) { \
+			wait_event_interruptible_timeout(a, FALSE, 1); \
+			countdown -= 10000; \
+		} \
+	} while (0)
+
+	#else
+
+	#define DHD_PM_RESUME_WAIT_INIT(a)
+	#define DHD_PM_RESUME_WAIT(a)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a)
+	#define DHD_PM_RESUME_RETURN_ERROR(a)
+	#define DHD_PM_RESUME_RETURN
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a)
+	#define SPINWAIT_SLEEP(a, exp, us)  do { \
+		uint countdown = (us) + 9; \
+		while ((exp) && (countdown >= 10)) { \
+			OSL_DELAY(10);  \
+			countdown -= 10;  \
+		} \
+	} while (0)
+
+	#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifndef OSL_SLEEP
+#define OSL_SLEEP(ms)		OSL_DELAY(ms*1000)
+#endif /* OSL_SLEEP */
+
+#define DHD_IF_VIF	0x01	/* Virtual IF (Hidden from user) */
+
+#ifdef PNO_SUPPORT
+int dhd_pno_clean(dhd_pub_t *dhd);
+#endif /* PNO_SUPPORT */
+
+#ifdef HOFFLOAD_MODULES
+void dhd_linux_get_modfw_address(dhd_pub_t *dhd);
+#endif
+
+/*
+ *  Wake locks are an Android power management concept. They are used by applications and services
+ *  to request CPU resources.
+ */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_waive(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_restore(dhd_pub_t *pub);
+extern void dhd_event_wake_lock(dhd_pub_t *pub);
+extern void dhd_event_wake_unlock(dhd_pub_t *pub);
+extern void dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_pm_wake_unlock(dhd_pub_t *pub);
+extern void dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_txfl_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub);
+extern void dhd_os_wake_lock_init(struct dhd_info *dhd);
+extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd);
+#ifdef DHD_USE_SCAN_WAKELOCK
+extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub);
+#endif /* BCMPCIE_SCAN_WAKELOCK */
+
+inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_init(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_lock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+#define PRINT_CALL_INFO(str)
+#define PRINT_CALL_INFO_TIMEOUT(str, val)
+#define DHD_OS_WAKE_LOCK(pub) \
+	do { \
+		PRINT_CALL_INFO("call wakelock"); \
+		dhd_os_wake_lock(pub); \
+	} while (0)
+#define DHD_OS_WAKE_UNLOCK(pub) \
+	do { \
+		PRINT_CALL_INFO("call wake_unlock"); \
+		dhd_os_wake_unlock(pub); \
+	} while (0)
+#define DHD_EVENT_WAKE_LOCK(pub) \
+	do { \
+		PRINT_CALL_INFO("call event_wake lock"); \
+	dhd_event_wake_lock(pub); \
+	} while (0)
+#define DHD_EVENT_WAKE_UNLOCK(pub) \
+	do { \
+		PRINT_CALL_INFO("call event_wake unlock"); \
+	dhd_event_wake_unlock(pub); \
+	} while (0)
+#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) \
+	do { \
+		PRINT_CALL_INFO("call pm_wake_timeout enable"); \
+	dhd_pm_wake_lock_timeout(pub, val); \
+	} while (0)
+#define DHD_PM_WAKE_UNLOCK(pub) \
+	do { \
+		PRINT_CALL_INFO("call pm_wake unlock"); \
+	dhd_pm_wake_unlock(pub); \
+	} while (0)
+#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) \
+	do { \
+		PRINT_CALL_INFO("call pm_wake_timeout enable"); \
+		dhd_txfl_wake_lock_timeout(pub, val); \
+	} while (0)
+#define DHD_TXFL_WAKE_UNLOCK(pub) \
+	do { \
+		PRINT_CALL_INFO("call pm_wake unlock"); \
+		dhd_txfl_wake_unlock(pub); \
+	} while (0)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) \
+	do { \
+		PRINT_CALL_INFO("call wake_lock_timeout"); \
+		dhd_os_wake_lock_timeout(pub); \
+	} while (0)
+#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
+	do { \
+		PRINT_CALL_INFO_TIMEOUT("call wake_lock_rx_timeout_enable", val); \
+		dhd_os_wake_lock_rx_timeout_enable(pub, val); \
+	} while (0)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
+	do { \
+		PRINT_CALL_INFO_TIMEOUT("call wake_lock_ctrl_timeout_enable", val); \
+		dhd_os_wake_lock_ctrl_timeout_enable(pub, val); \
+	} while (0)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
+	do { \
+		PRINT_CALL_INFO("call wake_lock_ctrl_timeout_cancel"); \
+		dhd_os_wake_lock_ctrl_timeout_cancel(pub); \
+	} while (0)
+#define DHD_OS_WAKE_LOCK_WAIVE(pub) \
+	do { \
+		PRINT_CALL_INFO("call wake_lock_waive"); \
+		dhd_os_wake_lock_waive(pub); \
+	} while (0)
+#define DHD_OS_WAKE_LOCK_RESTORE(pub) \
+	do { \
+		PRINT_CALL_INFO("call wake_lock_restore"); \
+		dhd_os_wake_lock_restore(pub); \
+	} while (0)
+#define DHD_OS_WAKE_LOCK_INIT(dhd) \
+	do { \
+		PRINT_CALL_INFO("call wake_lock_init"); \
+		dhd_os_wake_lock_init(dhd); \
+	} while (0)
+#define DHD_OS_WAKE_LOCK_DESTROY(dhd) \
+	do { \
+		PRINT_CALL_INFO("call wake_lock_destroy"); \
+		dhd_os_wake_lock_destroy(dhd); \
+	} while (0)
+
+#define DHD_OS_WD_WAKE_LOCK(pub)		dhd_os_wd_wake_lock(pub)
+#define DHD_OS_WD_WAKE_UNLOCK(pub)		dhd_os_wd_wake_unlock(pub)
+
+#ifdef DHD_USE_SCAN_WAKELOCK
+#ifdef DHD_DEBUG_SCAN_WAKELOCK
+#define PRINT_SCAN_CALL(str) printf("%s: %s %d\n", \
+		str, __FUNCTION__, __LINE__)
+#else
+#define PRINT_SCAN_CALL(str)
+#endif /* DHD_DEBUG_SCAN_WAKELOCK */
+#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \
+	do { \
+		PRINT_SCAN_CALL("call wake_lock_scan"); \
+		dhd_os_scan_wake_lock_timeout(pub, val); \
+	} while (0)
+#define DHD_OS_SCAN_WAKE_UNLOCK(pub) \
+	do { \
+		PRINT_SCAN_CALL("call wake_unlock_scan"); \
+		dhd_os_scan_wake_unlock(pub); \
+	} while (0)
+#else
+#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val)
+#define DHD_OS_SCAN_WAKE_UNLOCK(pub)
+#endif /* DHD_USE_SCAN_WAKELOCK */
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+#define OOB_WAKE_LOCK_TIMEOUT 500
+extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub);
+#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val)	dhd_os_oob_irq_wake_lock_timeout(pub, val)
+#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub)			dhd_os_oob_irq_wake_unlock(pub)
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#define DHD_PACKET_TIMEOUT_MS	500
+#define DHD_EVENT_TIMEOUT_MS	1500
+#define SCAN_WAKE_LOCK_TIMEOUT	10000
+#define MAX_TX_TIMEOUT			500
+
+/* Enum for IOCTL recieved status */
+typedef enum dhd_ioctl_recieved_status
+{
+	IOCTL_WAIT = 0,
+	IOCTL_RETURN_ON_SUCCESS,
+	IOCTL_RETURN_ON_TRAP,
+	IOCTL_RETURN_ON_BUS_STOP,
+	IOCTL_RETURN_ON_ERROR
+} dhd_ioctl_recieved_status_t;
+
+/* interface operations (register, remove) should be atomic, use this lock to prevent race
+ * condition among wifi on/off and interface operation functions
+ */
+void dhd_net_if_lock(struct net_device *dev);
+void dhd_net_if_unlock(struct net_device *dev);
+#if defined(MULTIPLE_SUPPLICANT)
+extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(MULTIPLE_SUPPLICANT)
+extern struct mutex _dhd_mutex_lock_;
+#define DHD_MUTEX_IS_LOCK_RETURN() \
+	if (mutex_is_locked(&_dhd_mutex_lock_) != 0) { \
+		printf("%s : probe is already running! return.\n", __FUNCTION__); \
+		return 0; \
+	}
+#define DHD_MUTEX_LOCK() \
+	do { \
+		if (mutex_is_locked(&_dhd_mutex_lock_) == 0) { \
+			printf("%s : no mutex held. set lock\n", __FUNCTION__); \
+		} else { \
+			printf("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__); \
+		} \
+		mutex_lock(&_dhd_mutex_lock_); \
+	} while (0)
+#define DHD_MUTEX_UNLOCK() \
+	do { \
+		mutex_unlock(&_dhd_mutex_lock_); \
+		printf("%s : the lock is released.\n", __FUNCTION__); \
+	} while (0)
+#else
+#define DHD_MUTEX_IS_LOCK_RETURN(a)	do {} while (0)
+#define DHD_MUTEX_LOCK(a)	do {} while (0)
+#define DHD_MUTEX_UNLOCK(a)	do {} while (0)
+#endif
+
+typedef enum dhd_attach_states
+{
+	DHD_ATTACH_STATE_INIT = 0x0,
+	DHD_ATTACH_STATE_NET_ALLOC = 0x1,
+	DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
+	DHD_ATTACH_STATE_ADD_IF = 0x4,
+	DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
+	DHD_ATTACH_STATE_WL_ATTACH = 0x10,
+	DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
+	DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
+	DHD_ATTACH_STATE_CFG80211 = 0x80,
+	DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
+	DHD_ATTACH_TIMESYNC_ATTACH_DONE = 0x200,
+	DHD_ATTACH_LOGTRACE_INIT = 0x400,
+	DHD_ATTACH_STATE_LB_ATTACH_DONE = 0x800,
+	DHD_ATTACH_STATE_DONE = 0x1000
+} dhd_attach_states_t;
+
+/* Value -1 means we are unsuccessful in creating the kthread. */
+#define DHD_PID_KT_INVALID 	-1
+/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
+#define DHD_PID_KT_TL_INVALID	-2
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
+#ifdef BCMDBUS
+	, void *adapter
+#endif
+);
+#if defined(WLP2P) && defined(WL_CFG80211)
+/* To allow attach/detach calls corresponding to p2p0 interface  */
+extern int dhd_attach_p2p(dhd_pub_t *);
+extern int dhd_detach_p2p(dhd_pub_t *);
+#endif /* WLP2P && WL_CFG80211 */
+extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+extern void dhd_free(dhd_pub_t *dhdp);
+extern void dhd_clear(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+#ifdef DHD_UCODE_DOWNLOAD
+/* Returns the ucode path */
+extern char *dhd_get_ucode_path(dhd_pub_t *dhdp);
+#endif /* DHD_UCODE_DOWNLOAD */
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+#define WIFI_FEATURE_INFRA              0x0001      /* Basic infrastructure mode        */
+#define WIFI_FEATURE_INFRA_5G           0x0002      /* Support for 5 GHz Band           */
+#define WIFI_FEATURE_HOTSPOT            0x0004      /* Support for GAS/ANQP             */
+#define WIFI_FEATURE_P2P                0x0008      /* Wifi-Direct                      */
+#define WIFI_FEATURE_SOFT_AP            0x0010      /* Soft AP                          */
+#define WIFI_FEATURE_GSCAN              0x0020      /* Google-Scan APIs                 */
+#define WIFI_FEATURE_NAN                0x0040      /* Neighbor Awareness Networking    */
+#define WIFI_FEATURE_D2D_RTT            0x0080      /* Device-to-device RTT             */
+#define WIFI_FEATURE_D2AP_RTT           0x0100      /* Device-to-AP RTT                 */
+#define WIFI_FEATURE_BATCH_SCAN         0x0200      /* Batched Scan (legacy)            */
+#define WIFI_FEATURE_PNO                0x0400      /* Preferred network offload        */
+#define WIFI_FEATURE_ADDITIONAL_STA     0x0800      /* Support for two STAs             */
+#define WIFI_FEATURE_TDLS               0x1000      /* Tunnel directed link setup       */
+#define WIFI_FEATURE_TDLS_OFFCHANNEL    0x2000      /* Support for TDLS off channel     */
+#define WIFI_FEATURE_EPR                0x4000      /* Enhanced power reporting         */
+#define WIFI_FEATURE_AP_STA             0x8000      /* Support for AP STA Concurrency   */
+#define WIFI_FEATURE_LINKSTAT           0x10000     /* Support for Linkstats            */
+#define WIFI_FEATURE_LOGGER             0x20000     /* WiFi Logger			*/
+#define WIFI_FEATURE_HAL_EPNO           0x40000     /* WiFi PNO enhanced                */
+#define WIFI_FEATURE_RSSI_MONITOR       0x80000     /* RSSI Monitor                     */
+#define WIFI_FEATURE_MKEEP_ALIVE        0x100000    /* WiFi mkeep_alive			*/
+#define WIFI_FEATURE_CONFIG_NDO         0x200000    /* ND offload configure             */
+#define WIFI_FEATURE_TX_TRANSMIT_POWER  0x400000    /* Capture Tx transmit power levels	*/
+#define WIFI_FEATURE_INVALID            0xFFFFFFFF  /* Invalid Feature                  */
+
+#define MAX_FEATURE_SET_CONCURRRENT_GROUPS  3
+
+extern int dhd_dev_get_feature_set(struct net_device *dev);
+extern int dhd_dev_get_feature_set_matrix(struct net_device *dev, int num);
+extern int dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui);
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs);
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+
+#ifdef NDO_CONFIG_SUPPORT
+#ifndef NDO_MAX_HOST_IP_ENTRIES
+#define NDO_MAX_HOST_IP_ENTRIES	10
+#endif /* NDO_MAX_HOST_IP_ENTRIES */
+extern int dhd_dev_ndo_cfg(struct net_device *dev, u8 enable);
+extern int dhd_dev_ndo_update_inet6addr(struct net_device * dev);
+#endif /* NDO_CONFIG_SUPPORT */
+extern int dhd_set_rand_mac_oui(dhd_pub_t *dhd);
+#ifdef GSCAN_SUPPORT
+extern int dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
+             wlc_roam_exp_params_t *roam_param);
+extern int dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable);
+extern int dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
+       wl_bssid_pref_cfg_t *bssid_pref, uint32 flush);
+extern int dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
+    uint32 len, uint32 flush);
+extern int dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *whitelist,
+    uint32 len, uint32 flush);
+#endif /* GSCAN_SUPPORT */
+
+/* OS independent layer functions */
+extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub);
+extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub);
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool resched);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub);
+extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub);
+#ifdef PCIE_FULL_DONGLE
+extern void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason);
+#else
+static INLINE void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason)
+{ printf("%s is NOT implemented for SDIO", __FUNCTION__); return; }
+#endif
+#ifdef SHOW_LOGTRACE
+extern int dhd_os_read_file(void *file, char *buf, uint32 size);
+extern int dhd_os_seek_file(void *file, int64 offset);
+#endif /* SHOW_LOGTRACE */
+
+extern void
+dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr);
+extern void wl_dhdpcie_dump_regs(void * context);
+
+#define DHD_OS_IOCTL_RESP_LOCK(x)
+#define DHD_OS_IOCTL_RESP_UNLOCK(x)
+
+
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern int dhd_os_get_image_size(void * image);
+#if defined(BT_OVER_SDIO)
+extern int dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image);
+extern void dhdsdio_bus_usr_cnt_inc(dhd_pub_t *pub);
+extern void dhdsdio_bus_usr_cnt_dec(dhd_pub_t *pub);
+#endif /* (BT_OVER_SDIO) */
+extern void * dhd_os_open_image(char * filename);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+#ifdef DHD_PCIE_RUNTIMEPM
+extern void dhd_os_runtimepm_timer(void *bus, uint tick);
+#endif /* DHD_PCIE_RUNTIMEPM */
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_tracelog(const char *format, ...);
+#ifdef DHDTCPACK_SUPPRESS
+extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub);
+extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags);
+#endif /* DHDTCPACK_SUPPRESS */
+
+extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr);
+extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff);
+extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf);
+#if defined(CUSTOM_COUNTRY_CODE)
+extern void get_customized_country_code(void *adapter, char *country_iso_code,
+	wl_country_t *cspec, u32 flags);
+#else
+extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
+extern int dhd_os_send_hang_message(dhd_pub_t *dhdp);
+extern void dhd_set_version_info(dhd_pub_t *pub, char *fw);
+extern bool dhd_os_check_if_up(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock_all(dhd_pub_t *pub);
+extern int dhd_get_instance(dhd_pub_t *pub);
+#ifdef CUSTOM_SET_CPUCORE
+extern void dhd_set_cpucore(dhd_pub_t *dhd, int set);
+#endif /* CUSTOM_SET_CPUCORE */
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+#if defined(DHD_FW_COREDUMP)
+void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size);
+#endif /* DHD_FW_COREDUMP */
+
+void dhd_schedule_sssr_dump(dhd_pub_t *dhdp);
+
+#ifdef SUPPORT_AP_POWERSAVE
+extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable);
+#endif /* SUPPORT_AP_POWERSAVE */
+
+#ifdef PKT_FILTER_SUPPORT
+#define DHD_UNICAST_FILTER_NUM		0
+#define DHD_BROADCAST_FILTER_NUM	1
+#define DHD_MULTICAST4_FILTER_NUM	2
+#define DHD_MULTICAST6_FILTER_NUM	3
+#define DHD_MDNS_FILTER_NUM		4
+#define DHD_ARP_FILTER_NUM		5
+#define DHD_BROADCAST_ARP_FILTER_NUM	6
+#define DHD_IP4BCAST_DROP_FILTER_NUM	7
+#define DISCARD_IPV4_MCAST	"102 1 6 IP4_H:16 0xf0 0xe0"
+#define DISCARD_IPV6_MCAST	"103 1 6 IP6_H:24 0xff 0xff"
+extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
+extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
+extern int dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num);
+extern int net_os_enable_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val);
+#endif /* PKT_FILTER_SUPPORT */
+
+
+#if defined(BCMPCIE)
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
+#else
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+#endif /* OEM_ANDROID && BCMPCIE */
+
+extern bool dhd_support_sta_mode(dhd_pub_t *dhd);
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+
+#ifdef RSSI_MONITOR_SUPPORT
+extern int dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
+             int8 max_rssi, int8 min_rssi);
+#endif /* RSSI_MONITOR_SUPPORT */
+
+#ifdef DHDTCPACK_SUPPRESS
+extern int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable);
+#endif /* DHDTCPACK_SUPPRESS */
+
+#define DHD_RSSI_MONITOR_EVT_VERSION   1
+typedef struct {
+	uint8 version;
+	int8 cur_rssi;
+	struct ether_addr BSSID;
+} dhd_rssi_monitor_evt_t;
+
+typedef struct {
+	uint32 limit;		/* Expiration time (usec) */
+	uint32 increment;	/* Current expiration increment (usec) */
+	uint32 elapsed;		/* Current elapsed time (usec) */
+	uint32 tick;		/* O/S tick time (usec) */
+} dhd_timeout_t;
+
+#ifdef SHOW_LOGTRACE
+typedef struct {
+	int  num_fmts;
+	char **fmts;
+	char *raw_fmts;
+	char *raw_sstr;
+	uint32 fmts_size;
+	uint32 raw_fmts_size;
+	uint32 raw_sstr_size;
+	uint32 ramstart;
+	uint32 rodata_start;
+	uint32 rodata_end;
+	char *rom_raw_sstr;
+	uint32 rom_raw_sstr_size;
+	uint32 rom_ramstart;
+	uint32 rom_rodata_start;
+	uint32 rom_rodata_end;
+} dhd_event_log_t;
+#endif /* SHOW_LOGTRACE */
+
+#ifdef KEEP_ALIVE
+extern int dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
+	uint16 ip_pkt_len, uint8* src_mac_addr, uint8* dst_mac_addr, uint32 period_msec);
+extern int dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id);
+#endif /* KEEP_ALIVE */
+
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+/*
+ * As per Google's current implementation, there will be only one APF filter.
+ * Therefore, userspace doesn't bother about filter id and because of that
+ * DHD has to manage the filter id.
+ */
+#define PKT_FILTER_APF_ID		200
+#define DHD_APF_LOCK(ndev)		dhd_apf_lock(ndev)
+#define DHD_APF_UNLOCK(ndev)	dhd_apf_unlock(ndev)
+
+extern void dhd_apf_lock(struct net_device *dev);
+extern void dhd_apf_unlock(struct net_device *dev);
+extern int dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version);
+extern int dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len);
+extern int dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
+	uint32 program_len);
+extern int dhd_dev_apf_enable_filter(struct net_device *ndev);
+extern int dhd_dev_apf_disable_filter(struct net_device *ndev);
+extern int dhd_dev_apf_delete_filter(struct net_device *ndev);
+#endif /* PKT_FILTER_SUPPORT && APF */
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
+extern struct net_device * dhd_idx2net(void *pub, int ifidx);
+extern int net_os_send_hang_message(struct net_device *dev);
+extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num);
+extern bool dhd_wowl_cap(void *bus);
+extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen,
+	wl_event_msg_t *, void **data_ptr,  void *);
+extern int wl_process_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen,
+	wl_event_msg_t *, void **data_ptr,  void *);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+extern int wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu);
+extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
+extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
+                            int ifindex);
+extern int dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
+	int cmd, uint8 set, int ifidx);
+extern int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
+	int cmd, uint8 set, int ifidx);
+extern void dhd_common_init(osl_t *osh);
+
+extern int dhd_do_driver_init(struct net_device *net);
+extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+	char *name, uint8 *mac);
+extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+	char *name, uint8 *mac);
+extern int dhd_event_ifchange(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+       char *name, uint8 *mac);
+#ifdef DHD_UPDATE_INTF_MAC
+extern int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx);
+#endif /* DHD_UPDATE_INTF_MAC */
+extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
+	uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name);
+extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock);
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* send up locally generated event */
+extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+#ifdef LOG_INTO_TCPDUMP
+extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len);
+#endif /* LOG_INTO_TCPDUMP */
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int  dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_suspend(dhd_pub_t *dhdpub);
+extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage);
+extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
+extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+extern bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval);
+#if defined(BCMSDIO) || defined(BCMPCIE)
+extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp);
+#endif /* defined(BCMSDIO) || defined(BCMPCIE) */
+int dhd_bus_get_fw_mode(dhd_pub_t *dhdp);
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+/* OS spin lock API */
+extern void *dhd_os_spin_lock_init(osl_t *osh);
+extern void dhd_os_spin_lock_deinit(osl_t *osh, void *lock);
+extern unsigned long dhd_os_spin_lock(void *lock);
+void dhd_os_spin_unlock(void *lock, unsigned long flags);
+
+#ifdef DHD_EFI
+extern int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_ds_enter_wake(dhd_pub_t * pub);
+#else
+static INLINE int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition)
+{ printf("%s is Not supported for this platform", __FUNCTION__); return 0; }
+static INLINE int dhd_os_ds_enter_wake(dhd_pub_t * pub)
+{ return 0; }
+#endif /* DHD_EFI */
+
+#ifdef PCIE_INB_DW
+extern int dhd_os_ds_exit_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_ds_exit_wake(dhd_pub_t * pub);
+#endif /* PCIE_INB_DW */
+extern int dhd_os_busbusy_wake(dhd_pub_t * pub);
+extern int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition);
+extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_d3ack_wake(dhd_pub_t * pub);
+
+/*
+ * Manage sta objects in an interface. Interface is identified by an ifindex and
+ * sta(s) within an interfaces are managed using a MacAddress of the sta.
+ */
+struct dhd_sta;
+extern bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac);
+extern struct dhd_sta *dhd_find_sta(void *pub, int ifidx, void *ea);
+extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea);
+extern void dhd_del_all_sta(void *pub, int ifidx);
+extern void dhd_del_sta(void *pub, int ifidx, void *ea);
+extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val);
+#if defined(BCM_GMAC3)
+extern int dhd_set_dev_def(dhd_pub_t *dhdp, uint32 idx, int val);
+#endif
+extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx);
+extern struct net_device *dhd_linux_get_primary_netdev(dhd_pub_t *dhdp);
+
+extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd);
+int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len,
+		char *res_buf, uint res_len, int set);
+extern int dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
+		uint cmd_len, char **resptr, uint resp_len);
+
+#ifdef DHD_MCAST_REGEN
+extern int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val);
+#endif
+typedef enum cust_gpio_modes {
+	WLAN_RESET_ON,
+	WLAN_RESET_OFF,
+	WLAN_POWER_ON,
+	WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+typedef struct dmaxref_mem_map {
+	dhd_dma_buf_t *srcmem;
+	dhd_dma_buf_t *dstmem;
+} dmaxref_mem_map_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+extern bool dhd_os_wd_timer_enabled(void *bus);
+#ifdef DHD_PCIE_RUNTIMEPM
+extern uint dhd_runtimepm_ms;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+/* Console output poll interval */
+extern uint dhd_console_ms;
+extern uint android_msg_level;
+extern uint config_msg_level;
+extern uint sd_msglevel;
+#ifdef BCMDBUS
+extern uint dbus_msglevel;
+#endif /* BCMDBUS */
+#ifdef WL_WIRELESS_EXT
+extern uint iw_msg_level;
+#endif
+#ifdef WL_CFG80211
+extern uint wl_dbg_level;
+#endif
+
+extern uint dhd_slpauto;
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/*  Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam_disable;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#ifdef DHD_USE_IDLECOUNT
+#define DHD_IDLETIME_TICKS 5
+#else
+#define DHD_IDLETIME_TICKS 1
+#endif /* DHD_USE_IDLECOUNT */
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* triggers bcm_bprintf to print to kernel log */
+extern bool bcm_bprintf_bypass;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+
+/* Default bcn_timeout value is 4 */
+#define DEFAULT_BCN_TIMEOUT_VALUE	4
+#ifndef CUSTOM_BCN_TIMEOUT_SETTING
+#define CUSTOM_BCN_TIMEOUT_SETTING	DEFAULT_BCN_TIMEOUT_VALUE
+#endif
+
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define DEFAULT_KEEP_ALIVE_VALUE 	55000 /* msec */
+#ifndef CUSTOM_KEEP_ALIVE_SETTING
+#define CUSTOM_KEEP_ALIVE_SETTING 	DEFAULT_KEEP_ALIVE_VALUE
+#endif /* DEFAULT_KEEP_ALIVE_VALUE */
+
+#define NULL_PKT_STR	"null_pkt"
+
+/* hooks for custom glom setting option via Makefile */
+#define DEFAULT_GLOM_VALUE 	-1
+#ifndef CUSTOM_GLOM_SETTING
+#define CUSTOM_GLOM_SETTING 	DEFAULT_GLOM_VALUE
+#endif
+#define WL_AUTO_ROAM_TRIGGER -75
+/* hooks for custom Roaming Trigger  setting via Makefile */
+#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */
+#define DEFAULT_ROAM_TRIGGER_SETTING 	-1
+#ifndef CUSTOM_ROAM_TRIGGER_SETTING
+#define CUSTOM_ROAM_TRIGGER_SETTING 	DEFAULT_ROAM_TRIGGER_VALUE
+#endif
+
+/* hooks for custom Roaming Romaing  setting via Makefile */
+#define DEFAULT_ROAM_DELTA_VALUE  10 /* dBm default roam delta all band */
+#define DEFAULT_ROAM_DELTA_SETTING 	-1
+#ifndef CUSTOM_ROAM_DELTA_SETTING
+#define CUSTOM_ROAM_DELTA_SETTING 	DEFAULT_ROAM_DELTA_VALUE
+#endif
+
+/* hooks for custom PNO Event wake lock to guarantee enough time
+	for the Platform to detect Event before system suspended
+*/
+#define DEFAULT_PNO_EVENT_LOCK_xTIME 	2 	/* multiplay of DHD_PACKET_TIMEOUT_MS */
+#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME
+#define CUSTOM_PNO_EVENT_LOCK_xTIME	 DEFAULT_PNO_EVENT_LOCK_xTIME
+#endif
+/* hooks for custom dhd_dpc_prio setting option via Makefile */
+#define DEFAULT_DHP_DPC_PRIO  1
+#ifndef CUSTOM_DPC_PRIO_SETTING
+#define CUSTOM_DPC_PRIO_SETTING 	DEFAULT_DHP_DPC_PRIO
+#endif
+
+#ifndef CUSTOM_LISTEN_INTERVAL
+#define CUSTOM_LISTEN_INTERVAL 		LISTEN_INTERVAL
+#endif /* CUSTOM_LISTEN_INTERVAL */
+
+#define DEFAULT_SUSPEND_BCN_LI_DTIM		3
+#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM
+#define CUSTOM_SUSPEND_BCN_LI_DTIM		DEFAULT_SUSPEND_BCN_LI_DTIM
+#endif
+
+#ifndef BCN_TIMEOUT_IN_SUSPEND
+#define BCN_TIMEOUT_IN_SUSPEND			6 /* bcn timeout value in suspend mode */
+#endif
+
+#ifndef CUSTOM_RXF_PRIO_SETTING
+#define CUSTOM_RXF_PRIO_SETTING		MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1)
+#endif
+
+#define DEFAULT_WIFI_TURNOFF_DELAY		0
+#define WIFI_TURNOFF_DELAY		DEFAULT_WIFI_TURNOFF_DELAY
+
+#define DEFAULT_WIFI_TURNON_DELAY		200
+#ifndef WIFI_TURNON_DELAY
+#define WIFI_TURNON_DELAY		DEFAULT_WIFI_TURNON_DELAY
+#endif /* WIFI_TURNON_DELAY */
+
+#ifdef BCMSDIO
+#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS	10 /* msec */
+#else
+#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS	0 /* msec */
+#endif
+#ifndef CUSTOM_DHD_WATCHDOG_MS
+#define CUSTOM_DHD_WATCHDOG_MS			DEFAULT_DHD_WATCHDOG_INTERVAL_MS
+#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */
+
+#define DEFAULT_ASSOC_RETRY_MAX			3
+#ifndef CUSTOM_ASSOC_RETRY_MAX
+#define CUSTOM_ASSOC_RETRY_MAX			DEFAULT_ASSOC_RETRY_MAX
+#endif /* DEFAULT_ASSOC_RETRY_MAX */
+
+#if defined(BCMSDIO) || defined(DISABLE_FRAMEBURST)
+#define DEFAULT_FRAMEBURST_SET			0
+#else
+#define DEFAULT_FRAMEBURST_SET			1
+#endif /* BCMSDIO */
+
+#ifndef CUSTOM_FRAMEBURST_SET
+#define CUSTOM_FRAMEBURST_SET			DEFAULT_FRAMEBURST_SET
+#endif /* CUSTOM_FRAMEBURST_SET */
+
+#ifdef WLTDLS
+#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING
+#define CUSTOM_TDLS_IDLE_MODE_SETTING  60000 /* 60sec to tear down TDLS of not active */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
+#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW
+#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */
+#endif
+#endif /* WLTDLS */
+
+#if defined(VSDB) || defined(ROAM_ENABLE)
+#define DEFAULT_BCN_TIMEOUT            8
+#else
+#define DEFAULT_BCN_TIMEOUT            4
+#endif 
+
+#ifndef CUSTOM_BCN_TIMEOUT
+#define CUSTOM_BCN_TIMEOUT             DEFAULT_BCN_TIMEOUT
+#endif
+
+#define MAX_DTIM_SKIP_BEACON_INTERVAL	100 /* max allowed associated AP beacon for DTIM skip */
+#ifndef MAX_DTIM_ALLOWED_INTERVAL
+#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */
+#endif
+
+#ifndef MIN_DTIM_FOR_ROAM_THRES_EXTEND
+#define MIN_DTIM_FOR_ROAM_THRES_EXTEND	600 /* minimum dtim interval to extend roam threshold */
+#endif
+
+#define NO_DTIM_SKIP 1
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN	2048
+#define MOD_PARAM_INFOLEN	512
+#define MOD_PARAM_SRLEN	64
+
+#ifdef SOFTAP
+extern char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+#ifdef DHD_LEGACY_FILE_PATH
+#define PLATFORM_PATH	"/data/"
+#elif defined(PLATFORM_SLP)
+#define PLATFORM_PATH	"/opt/etc/"
+#else
+#define PLATFORM_PATH	"/data/misc/conn/"
+#endif /* DHD_LEGACY_FILE_PATH */
+
+/* Flag to indicate if we should download firmware on driver load */
+extern uint dhd_download_fw_on_driverload;
+#ifndef BCMDBUS
+extern int allow_delay_fwdl;
+#endif /* !BCMDBUS */
+
+extern int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost);
+extern int dhd_write_file(const char *filepath, char *buf, int buf_len);
+extern int dhd_read_file(const char *filepath, char *buf, int buf_len);
+extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len);
+
+#ifdef READ_MACADDR
+extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp);
+#else
+static INLINE int dhd_set_macaddr_from_file(dhd_pub_t *dhdp) { return 0; }
+#endif /* READ_MACADDR */
+#ifdef WRITE_MACADDR
+extern int dhd_write_macaddr(struct ether_addr *mac);
+#else
+static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
+#endif /* WRITE_MACADDR */
+static INLINE int dhd_check_module_cid(dhd_pub_t *dhdp) { return 0; }
+#ifdef GET_MAC_FROM_OTP
+extern int dhd_check_module_mac(dhd_pub_t *dhdp);
+#else
+static INLINE int dhd_check_module_mac(dhd_pub_t *dhdp) { return 0; }
+#endif /* GET_MAC_FROM_OTP */
+
+#if defined(READ_MACADDR) || defined(WRITE_MACADDR) || defined(GET_MAC_FROM_OTP)
+#define DHD_USE_CISINFO
+#endif 
+
+#ifdef DHD_USE_CISINFO
+int dhd_read_cis(dhd_pub_t *dhdp);
+void dhd_clear_cis(dhd_pub_t *dhdp);
+#else
+static INLINE int dhd_read_cis(dhd_pub_t *dhdp) { return 0; }
+static INLINE void dhd_clear_cis(dhd_pub_t *dhdp) { }
+#endif /* DHD_USE_CISINFO */
+
+#define IBSS_COALESCE_DEFAULT 1
+#define IBSS_INITIAL_SCAN_ALLOWED_DEFAULT 1
+
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+#define IFLOCK_INIT(lock)       *lock = 0
+#define IFLOCK(lock)    while (InterlockedCompareExchange((lock), 1, 0))	\
+	NdisStallExecution(1);
+#define IFUNLOCK(lock)  InterlockedExchange((lock), 0)
+#define IFLOCK_FREE(lock)
+#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, " " #capa " ") != NULL))
+#ifdef ARP_OFFLOAD_SUPPORT
+#define MAX_IPV4_ENTRIES	8
+void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
+void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable);
+
+/* dhd_commn arp offload wrapers */
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx);
+void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef WLTDLS
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac);
+int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode);
+#ifdef PCIE_FULL_DONGLE
+int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event);
+int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event);
+int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub);
+#endif /* PCIE_FULL_DONGLE */
+#endif /* WLTDLS */
+
+/* Neighbor Discovery Offload Support */
+extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable);
+int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx);
+int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx);
+
+/* Enhanced ND offload support */
+uint16 dhd_ndo_get_version(dhd_pub_t *dhdp);
+int dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx);
+int dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx);
+int dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx);
+int dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable);
+
+/* ioctl processing for nl80211 */
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf);
+
+void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path,
+											char *pclm_path, char *pconf_path);
+void dhd_set_bus_state(void *bus, uint32 state);
+
+/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */
+typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ);
+extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn);
+
+#ifdef PROP_TXSTATUS
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+extern const uint8 prio2fifo[];
+#endif /* PROP_TXSTATUS */
+
+int dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size);
+int dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size);
+int dhd_common_socram_dump(dhd_pub_t *dhdp);
+
+int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size);
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE)
+#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size)
+#else
+#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size)
+#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size)
+#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
+
+#ifdef USE_WFA_CERT_CONF
+enum {
+	SET_PARAM_BUS_TXGLOM_MODE,
+	SET_PARAM_ROAMOFF,
+#ifdef USE_WL_FRAMEBURST
+	SET_PARAM_FRAMEBURST,
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+	SET_PARAM_TXBF,
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+	SET_PARAM_PROPTX,
+	SET_PARAM_PROPTXMODE,
+#endif /* PROP_TXSTATUS */
+	PARAM_LAST_VALUE
+};
+extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val);
+#endif /* USE_WFA_CERT_CONF */
+
+#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid)  do {} while (0)
+#define dhd_del_flowid(pub, ifidx, flowid)               do {} while (0)
+bool dhd_wet_chainable(dhd_pub_t *dhdp);
+
+extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub);
+extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+/** Miscellaenous DHD Spin Locks */
+
+/* Disable router 3GMAC bypass path perimeter lock */
+#define DHD_PERIM_LOCK(dhdp)              do {} while (0)
+#define DHD_PERIM_UNLOCK(dhdp)            do {} while (0)
+#define DHD_PERIM_LOCK_ALL(processor_id)    do {} while (0)
+#define DHD_PERIM_UNLOCK_ALL(processor_id)  do {} while (0)
+
+/* Enable DHD general spin lock/unlock */
+#define DHD_GENERAL_LOCK(dhdp, flags) \
+	(flags) = dhd_os_general_spin_lock(dhdp)
+#define DHD_GENERAL_UNLOCK(dhdp, flags) \
+	dhd_os_general_spin_unlock((dhdp), (flags))
+
+/* Enable DHD timer spin lock/unlock */
+#define DHD_TIMER_LOCK(lock, flags)     (flags) = dhd_os_spin_lock(lock)
+#define DHD_TIMER_UNLOCK(lock, flags)   dhd_os_spin_unlock(lock, (flags))
+
+/* Enable DHD flowring spin lock/unlock */
+#define DHD_FLOWRING_LOCK(lock, flags)     (flags) = dhd_os_spin_lock(lock)
+#define DHD_FLOWRING_UNLOCK(lock, flags)   dhd_os_spin_unlock((lock), (flags))
+
+/* Enable DHD common flowring info spin lock/unlock */
+#define DHD_FLOWID_LOCK(lock, flags)       (flags) = dhd_os_spin_lock(lock)
+#define DHD_FLOWID_UNLOCK(lock, flags)     dhd_os_spin_unlock((lock), (flags))
+
+/* Enable DHD common flowring list spin lock/unlock */
+#define DHD_FLOWRING_LIST_LOCK(lock, flags)       (flags) = dhd_os_spin_lock(lock)
+#define DHD_FLOWRING_LIST_UNLOCK(lock, flags)     dhd_os_spin_unlock((lock), (flags))
+
+#define DHD_SPIN_LOCK(lock, flags)	(flags) = dhd_os_spin_lock(lock)
+#define DHD_SPIN_UNLOCK(lock, flags)	dhd_os_spin_unlock((lock), (flags))
+
+#define DHD_BUS_INB_DW_LOCK(lock, flags)	(flags) = dhd_os_spin_lock(lock)
+#define DHD_BUS_INB_DW_UNLOCK(lock, flags)	dhd_os_spin_unlock((lock), (flags))
+
+/* Enable DHD TDLS peer list spin lock/unlock */
+#ifdef WLTDLS
+#define DHD_TDLS_LOCK(lock, flags)       (flags) = dhd_os_spin_lock(lock)
+#define DHD_TDLS_UNLOCK(lock, flags)     dhd_os_spin_unlock((lock), (flags))
+#endif /* WLTDLS */
+
+#ifdef DBG_PKT_MON
+/* Enable DHD PKT MON spin lock/unlock */
+#define DHD_PKT_MON_LOCK(lock, flags)     (flags) = dhd_os_spin_lock(lock)
+#define DHD_PKT_MON_UNLOCK(lock, flags)   dhd_os_spin_unlock(lock, (flags))
+#endif /* DBG_PKT_MON */
+
+#define DHD_LINUX_GENERAL_LOCK(dhdp, flags)	DHD_GENERAL_LOCK(dhdp, flags)
+#define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags)	DHD_GENERAL_UNLOCK(dhdp, flags)
+
+extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp);
+
+#ifdef BCMDBUS
+extern uint dhd_get_rxsz(dhd_pub_t *pub);
+extern void dhd_set_path(dhd_pub_t *pub);
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+#endif /* BCMDBUS */
+
+#ifdef DHD_L2_FILTER
+extern int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val);
+#endif /* DHD_L2_FILTER */
+
+
+typedef struct wl_io_pport {
+	dhd_pub_t *dhd_pub;
+	uint ifidx;
+} wl_io_pport_t;
+
+typedef struct wl_evt_pport {
+	dhd_pub_t *dhd_pub;
+	int *ifidx;
+	void *pktdata;
+	uint data_len;
+	void **data_ptr;
+	void *raw_event;
+} wl_evt_pport_t;
+
+extern void *dhd_pub_shim(dhd_pub_t *dhd_pub);
+#ifdef DHD_FW_COREDUMP
+void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length);
+#endif /* DHD_FW_COREDUMP */
+
+#if defined(SET_RPS_CPUS)
+int dhd_rps_cpus_enable(struct net_device *net, int enable);
+int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len);
+void custom_rps_map_clear(struct netdev_rx_queue *queue);
+#define PRIMARY_INF 0
+#define VIRTUAL_INF 1
+#if defined(CONFIG_MACH_UNIVERSAL5433) || defined(CONFIG_MACH_UNIVERSAL7420) || \
+	defined(CONFIG_SOC_EXYNOS8890)
+#define RPS_CPUS_MASK "10"
+#define RPS_CPUS_MASK_P2P "10"
+#define RPS_CPUS_MASK_IBSS "10"
+#define RPS_CPUS_WLAN_CORE_ID 4
+#else
+#define RPS_CPUS_MASK "6"
+#define RPS_CPUS_MASK_P2P "6"
+#define RPS_CPUS_MASK_IBSS "6"
+#endif /* CONFIG_MACH_UNIVERSAL5433 || CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */
+#endif 
+
+int dhd_get_download_buffer(dhd_pub_t	*dhd, char *file_path, download_type_t component,
+	char ** buffer, int *length);
+
+void dhd_free_download_buffer(dhd_pub_t	*dhd, void *buffer, int length);
+
+int dhd_download_blob(dhd_pub_t *dhd, unsigned char *image,
+		uint32 len, char *iovar);
+
+int dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path);
+
+#ifdef SHOW_LOGTRACE
+int dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
+		dhd_event_log_t *event_log);
+int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart,
+		uint32 *rodata_start, uint32 *rodata_end);
+#ifdef PCIE_FULL_DONGLE
+int dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
+		dhd_event_log_t *event_data);
+#endif /* PCIE_FULL_DONGLE */
+#endif /* SHOW_LOGTRACE */
+
+#define dhd_is_device_removed(x) FALSE
+#define dhd_os_ind_firmware_stall(x)
+
+#if defined(DHD_FW_COREDUMP)
+extern void dhd_get_memdump_info(dhd_pub_t *dhd);
+#endif /* defined(DHD_FW_COREDUMP) */
+#ifdef BCMASSERT_LOG
+extern void dhd_get_assert_info(dhd_pub_t *dhd);
+#else
+static INLINE void dhd_get_assert_info(dhd_pub_t *dhd) { }
+#endif /* BCMASSERT_LOG */
+
+#define DMAXFER_FREE(dhdp, dmap) dhd_schedule_dmaxfer_free(dhdp, dmap);
+
+#if defined(PCIE_FULL_DONGLE)
+extern void dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap);
+void dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap);
+#endif  /* PCIE_FULL_DONGLE */
+
+#define DHD_LB_STATS_NOOP	do { /* noop */ } while (0)
+#if defined(DHD_LB_STATS)
+#include <bcmutils.h>
+extern void dhd_lb_stats_init(dhd_pub_t *dhd);
+extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count);
+extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count);
+extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count);
+extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp);
+extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp);
+#define DHD_LB_STATS_INIT(dhdp)	dhd_lb_stats_init(dhdp)
+#define DHD_LB_STATS_DEINIT(dhdp) dhd_lb_stats_deinit(dhdp)
+/* Reset is called from common layer so it takes dhd_pub_t as argument */
+#define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_init(dhdp)
+#define DHD_LB_STATS_CLR(x)	(x) = 0U
+#define DHD_LB_STATS_INCR(x)	(x) = (x) + 1
+#define DHD_LB_STATS_ADD(x, c)	(x) = (x) + (c)
+#define DHD_LB_STATS_PERCPU_ARR_INCR(x) \
+	{ \
+		int cpu = get_cpu(); put_cpu(); \
+		DHD_LB_STATS_INCR(x[cpu]); \
+	}
+#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x)	dhd_lb_stats_update_napi_histo(dhdp, x)
+#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhdp, x)	dhd_lb_stats_update_txc_histo(dhdp, x)
+#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhdp, x)	dhd_lb_stats_update_rxc_histo(dhdp, x)
+#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp)	dhd_lb_stats_txc_percpu_cnt_incr(dhdp)
+#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp)	dhd_lb_stats_rxc_percpu_cnt_incr(dhdp)
+#else /* !DHD_LB_STATS */
+#define DHD_LB_STATS_INIT(dhdp)	 DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_DEINIT(dhdp) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_CLR(x)	 DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_INCR(x)	 DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_ADD(x, c)	 DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_PERCPU_ARR_INCR(x)	 DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP
+#endif /* !DHD_LB_STATS */
+
+#ifdef DHD_SSSR_DUMP
+#define DHD_SSSR_MEMPOOL_SIZE	(1024 * 1024) /* 1MB size */
+extern int dhd_sssr_mempool_init(dhd_pub_t *dhd);
+extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd);
+extern int dhd_sssr_dump_init(dhd_pub_t *dhd);
+extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd);
+#define DHD_SSSR_MEMPOOL_INIT(dhdp)	dhd_sssr_mempool_init(dhdp)
+#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp)
+#define DHD_SSSR_DUMP_INIT(dhdp)	dhd_sssr_dump_init(dhdp)
+#define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp)
+#else
+#define DHD_SSSR_MEMPOOL_INIT(dhdp)	do { /* noop */ } while (0)
+#define DHD_SSSR_MEMPOOL_DEINIT(dhdp)	do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_INIT(dhdp)	do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_DEINIT(dhdp)	do { /* noop */ } while (0)
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef SHOW_LOGTRACE
+void dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *read_buf_info);
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BCMPCIE
+extern int dhd_prot_debug_info_print(dhd_pub_t *dhd);
+#else
+#define dhd_prot_debug_info_print(x)
+#endif /* BCMPCIE */
+
+extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info);
+
+bool dhd_fw_download_status(dhd_pub_t * dhd_pub);
+
+/* Bitmask used for Join Timeout */
+#define WLC_SSID_MASK          0x01
+#define WLC_WPA_MASK           0x02
+
+extern int dhd_start_join_timer(dhd_pub_t *pub);
+extern int dhd_stop_join_timer(dhd_pub_t *pub);
+extern int dhd_start_scan_timer(dhd_pub_t *pub);
+extern int dhd_stop_scan_timer(dhd_pub_t *pub);
+extern int dhd_start_cmd_timer(dhd_pub_t *pub);
+extern int dhd_stop_cmd_timer(dhd_pub_t *pub);
+extern int dhd_start_bus_timer(dhd_pub_t *pub);
+extern int dhd_stop_bus_timer(dhd_pub_t *pub);
+extern uint16 dhd_get_request_id(dhd_pub_t *pub);
+extern int dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd);
+extern void dhd_set_join_error(dhd_pub_t *pub, uint32 mask);
+extern void dhd_clear_join_error(dhd_pub_t *pub, uint32 mask);
+extern void dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val);
+extern void dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val);
+extern void dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val);
+extern void dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val);
+extern void dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val);
+extern void dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val);
+extern void dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val);
+extern void dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val);
+extern int dhd_start_timesync_timer(dhd_pub_t *pub);
+extern int dhd_stop_timesync_timer(dhd_pub_t *pub);
+
+#ifdef DHD_PKTID_AUDIT_ENABLED
+void dhd_pktid_error_handler(dhd_pub_t *dhdp);
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+extern bool dhd_runtimepm_state(dhd_pub_t *dhd);
+extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr);
+extern bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void *func_addr);
+extern void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp);
+extern bool dhdpcie_is_resume_done(dhd_pub_t *dhdp);
+extern void dhd_runtime_pm_disable(dhd_pub_t *dhdp);
+extern void dhd_runtime_pm_enable(dhd_pub_t *dhdp);
+/* Disable the Runtime PM and wake up if the bus is already in suspend */
+#define DHD_DISABLE_RUNTIME_PM(dhdp) \
+do { \
+	dhd_runtime_pm_disable(dhdp); \
+} while (0);
+
+/* Enable the Runtime PM */
+#define DHD_ENABLE_RUNTIME_PM(dhdp) \
+do { \
+	dhd_runtime_pm_enable(dhdp); \
+} while (0);
+#else
+#define DHD_DISABLE_RUNTIME_PM(dhdp)
+#define DHD_ENABLE_RUNTIME_PM(dhdp)
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+/*
+ * Enable this macro if you want to track the calls to wake lock
+ * This records can be printed using the following command
+ * cat /sys/bcm-dhd/wklock_trace
+ * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#undef DHD_TRACE_WAKE_LOCK
+#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp);
+#endif
+
+extern bool dhd_query_bus_erros(dhd_pub_t *dhdp);
+
+extern void init_dhd_timeouts(dhd_pub_t *pub);
+extern void deinit_dhd_timeouts(dhd_pub_t *pub);
+
+typedef enum timeout_resons {
+	DHD_REASON_COMMAND_TO,
+	DHD_REASON_JOIN_TO,
+	DHD_REASON_SCAN_TO,
+	DHD_REASON_OQS_TO
+} timeout_reasons_t;
+
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+extern int dhd_bus_set_device_wake(struct dhd_bus *bus, bool val);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+
+#ifdef DHD_EFI
+extern void dhd_schedule_reset(dhd_pub_t *dhdp);
+#else
+static INLINE void dhd_schedule_reset(dhd_pub_t *dhdp) {;}
+#endif
+
+#ifdef ENABLE_TEMP_THROTTLING
+#ifndef TEMP_THROTTLE_CONTROL_BIT
+#define TEMP_THROTTLE_CONTROL_BIT 0xd
+#endif
+#endif /* ENABLE_TEMP_THROTTLING */
+
+int dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size);
+#ifdef REPORT_FATAL_TIMEOUTS
+void dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason);
+#endif
+
+#if defined(CONFIG_64BIT)
+#define DHD_SUPPORT_64BIT
+#elif defined(DHD_EFI)
+#define DHD_SUPPORT_64BIT
+/* by default disabled for other platforms, can enable appropriate macro to enable 64 bit support */
+#endif /* (linux || LINUX) && CONFIG_64BIT */
+
+#ifdef SET_PCIE_IRQ_CPU_CORE
+extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set);
+extern void set_irq_cpucore(unsigned int irq, int set);
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+#if defined(DHD_HANG_SEND_UP_TEST)
+extern void dhd_make_hang_with_reason(struct net_device *dev, const char *string_num);
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+extern void dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path);
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+#ifdef DHD_WAKE_STATUS
+wake_counts_t* dhd_get_wakecount(dhd_pub_t *dhdp);
+#endif /* DHD_WAKE_STATUS */
+
+#ifdef BCM_ASLR_HEAP
+extern uint32 dhd_get_random_number(void);
+#endif /* BCM_ASLR_HEAP */
+#endif /* _dhd_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h
new file mode 100644
index 0000000..e0f0483
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bus.h
@@ -0,0 +1,295 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_bus.h 698895 2017-05-11 02:55:17Z $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+extern int dbus_up(struct dhd_bus *pub);
+extern int dbus_stop(struct dhd_bus *pub);
+extern int dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len);
+extern int dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len);
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+	char *fw_path, char *nv_path, char *clm_path, char *conf_path);
+#if defined(BT_OVER_SDIO)
+extern int dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh, char *btfw_path);
+#endif /* defined (BT_OVER_SDIO) */
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Get the Bus Idle Time */
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime);
+
+/* Set the Bus Idle Time */
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+
+/* Size of Extended Trap data Buffer */
+#ifdef BCMPCIE
+#define BCMPCIE_EXT_TRAP_DATA_MAXLEN  4096
+#endif
+
+/* Send a data frame to the dongle.  Callee disposes of txp. */
+#ifdef BCMPCIE
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx);
+#else
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+#endif
+
+extern struct device * dhd_bus_to_dev(struct dhd_bus *bus);
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+
+extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable);
+extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub);
+extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub);
+extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub);
+
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                            void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* return the dongle chiprev */
+extern uint dhd_bus_chiprev(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern const void *dhd_bus_sih(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+#ifdef BCMSDIO
+extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val);
+/* return sdio io status */
+extern uint8 dhd_bus_is_ioready(struct dhd_bus *bus);
+#else
+#define dhd_bus_set_dotxinrx(a, b) do {} while (0)
+#endif
+
+#define DHD_SET_BUS_STATE_DOWN(_bus)  do { \
+	(_bus)->dhd->busstate = DHD_BUS_DOWN; \
+} while (0)
+
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+extern int dhd_bus_reg_sdio_notify(void* semaphore);
+extern void dhd_bus_unreg_sdio_notify(void);
+extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable);
+extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num,
+	uint32 *slot_num);
+
+#if defined(DHD_FW_COREDUMP) && (defined(BCMPCIE) || defined(BCMSDIO))
+extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
+#else
+#define dhd_bus_mem_dump(x)
+#endif /* DHD_FW_COREDUMP && (BCMPCIE || BCMSDIO) */
+
+#ifdef BCMPCIE
+enum {
+	/* Scratch buffer confiuguration update */
+	D2H_DMA_SCRATCH_BUF,
+	D2H_DMA_SCRATCH_BUF_LEN,
+
+	/* DMA Indices array buffers for: H2D WR and RD, and D2H WR and RD */
+	H2D_DMA_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */
+	H2D_DMA_INDX_RD_BUF, /* update H2D RD dma indices buf base addr to dongle */
+	D2H_DMA_INDX_WR_BUF, /* update D2H WR dma indices buf base addr to dongle */
+	D2H_DMA_INDX_RD_BUF, /* update D2H RD dma indices buf base addr to dongle */
+
+	/* DHD sets/gets WR or RD index, in host's H2D and D2H DMA indices buffer */
+	H2D_DMA_INDX_WR_UPD, /* update H2D WR index in H2D WR dma indices buf */
+	H2D_DMA_INDX_RD_UPD, /* update H2D RD index in H2D RD dma indices buf */
+	D2H_DMA_INDX_WR_UPD, /* update D2H WR index in D2H WR dma indices buf */
+	D2H_DMA_INDX_RD_UPD, /* update D2H RD index in D2H RD dma indices buf */
+
+	/* DHD Indices array buffers and update for: H2D flow ring WR */
+	H2D_IFRM_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */
+	H2D_IFRM_INDX_WR_UPD, /* update H2D WR dma indices buf base addr to dongle */
+
+	/* H2D and D2H Mailbox data update */
+	H2D_MB_DATA,
+	D2H_MB_DATA,
+
+	/* (Common) MsgBuf Ring configuration update */
+	RING_BUF_ADDR,       /* update ring base address to dongle */
+	RING_ITEM_LEN,       /* update ring item size to dongle */
+	RING_MAX_ITEMS,      /* update ring max items to dongle */
+
+	/* Update of WR or RD index, for a MsgBuf Ring */
+	RING_RD_UPD,         /* update ring read index from/to dongle */
+	RING_WR_UPD,         /* update ring write index from/to dongle */
+
+	TOTAL_LFRAG_PACKET_CNT,
+	MAX_HOST_RXBUFS,
+	HOST_API_VERSION,
+	DNGL_TO_HOST_TRAP_ADDR,
+#ifdef HOFFLOAD_MODULES
+	WRT_HOST_MODULE_ADDR
+#endif
+};
+
+typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32);
+typedef void (*dhd_mb_ring_2_t) (struct dhd_bus *, uint32, bool);
+extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type,
+	uint16 ringid);
+extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value);
+extern void dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake);
+extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid);
+extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus);
+extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count);
+extern void dhd_bus_start_queue(struct dhd_bus *bus);
+extern void dhd_bus_stop_queue(struct dhd_bus *bus);
+extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus);
+extern dhd_mb_ring_2_t dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus);
+extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus,
+	void * data, uint16 flowid);
+extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus,
+	void * data, uint8 flowid);
+extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status);
+extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus);
+extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs);
+extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val);
+
+#ifdef IDLE_TX_FLOW_MGMT
+extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
+#endif /* IDLE_TX_FLOW_MGMT */
+
+
+extern int dhdpcie_bus_clock_start(struct dhd_bus *bus);
+extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus);
+extern int dhdpcie_bus_enable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_disable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_alloc_resource(struct dhd_bus *bus);
+extern void dhdpcie_bus_free_resource(struct dhd_bus *bus);
+extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus);
+extern int dhd_bus_release_dongle(struct dhd_bus *bus);
+extern int dhd_bus_request_irq(struct dhd_bus *bus);
+extern int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq);
+
+extern void dhdpcie_cto_init(struct dhd_bus *bus, bool enable);
+
+
+#ifdef DHD_FW_COREDUMP
+extern struct dhd_bus *g_dhd_bus;
+extern int dhd_dongle_mem_dump(void);
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef IDLE_TX_FLOW_MGMT
+extern void dhd_bus_idle_tx_ring_suspend(dhd_pub_t *dhd, uint16 flow_ring_id);
+#endif /* IDLE_TX_FLOW_MGMT */
+extern void dhd_bus_handle_mb_data(struct dhd_bus *bus, uint32 d2h_mb_data);
+#endif /* BCMPCIE */
+
+/* dump the device trap informtation  */
+extern void dhd_bus_dump_trap_info(struct dhd_bus *bus, struct bcmstrbuf *b);
+
+/* Function to set default min res mask */
+extern bool dhd_bus_set_default_min_res_mask(struct dhd_bus *bus);
+
+/* Function to reset PMU registers */
+extern void dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp);
+
+#ifdef DHD_ULP
+extern void dhd_bus_ulp_disable_console(dhd_pub_t *dhdp);
+extern void dhd_bus_ucode_download(struct dhd_bus *bus);
+#endif /* DHD_ULP */
+extern int dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read);
+
+#ifdef BT_OVER_SDIO
+/*
+ * SDIO layer clock control functions exposed to be called from other layers.
+ * This is required especially in the case where the BUS is shared between
+ * BT and SDIO and we have to control the clock. The callers of this function
+ * are expected to hold the sdlock
+ */
+int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait);
+int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait);
+void dhdsdio_reset_bt_use_count(struct dhd_bus *bus);
+#endif /* BT_OVER_SDIO */
+#ifdef BCMPCIE
+extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus);
+#else
+#define dhd_bus_dump_console_buffer(x)
+#endif /* BCMPCIE */
+
+extern uint16 dhd_get_chipid(dhd_pub_t *dhd);
+
+extern int dhd_get_idletime(dhd_pub_t *dhd);
+
+#ifdef DHD_WAKE_STATUS
+extern wake_counts_t* dhd_bus_get_wakecount(dhd_pub_t *dhd);
+extern int dhd_bus_get_bus_wake(dhd_pub_t * dhd);
+#endif /* DHD_WAKE_STATUS */
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_buzzz.h b/drivers/net/wireless/bcmdhd/dhd_buzzz.h
new file mode 100644
index 0000000..e349a3f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_buzzz.h
@@ -0,0 +1,37 @@
+#ifndef _DHD_BUZZZ_H_INCLUDED_
+#define _DHD_BUZZZ_H_INCLUDED_
+
+/*
+ * Broadcom logging system - Empty implementaiton
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#define dhd_buzzz_attach()              do { /* noop */ } while (0)
+#define dhd_buzzz_detach()              do { /* noop */ } while (0)
+#define dhd_buzzz_panic(x)              do { /* noop */ } while (0)
+#define BUZZZ_LOG(ID, N, ARG...)    do { /* noop */ } while (0)
+
+#endif /* _DHD_BUZZZ_H_INCLUDED_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c
new file mode 100644
index 0000000..3fb5e45
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c
@@ -0,0 +1,962 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_cdc.c 699163 2017-05-12 05:18:23Z $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload.)
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+#ifdef BCMDBUS
+#include <dhd_config.h>
+#endif /* BCMDBUS */
+
+#ifdef DHD_ULP
+#include <dhd_ulp.h>
+#endif /* DHD_ULP */
+
+
+#define RETRIES 2		/* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN	(24+DHD_SDALIGN)	/* Must be at least SDPCM_RESERVE
+				 * defined in dhd_sdio.c (amount of header tha might be added)
+				 * plus any space that might be needed for alignment padding.
+				 */
+#define ROUND_UP_MARGIN	2048	/* Biggest SDIO block size possible for
+				 * round off at the end of buffer
+				 */
+
+typedef struct dhd_prot {
+	uint16 reqid;
+	uint8 pending;
+	uint32 lastcmd;
+#ifdef BCMDBUS
+	uint ctl_completed;
+#endif /* BCMDBUS */
+	uint8 bus_header[BUS_HEADER_LEN];
+	cdc_ioctl_t msg;
+	unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+#ifdef BCMDBUS
+	int timeout = 0;
+#endif /* BCMDBUS */
+	int err = 0;
+	dhd_prot_t *prot = dhd->prot;
+	int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_OS_WAKE_LOCK(dhd);
+
+	/* NOTE : cdc->msg.len holds the desired length of the buffer to be
+	 *        returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+	 *	  is actually sent to the dongle
+	 */
+	if (len > CDC_MAX_MSG_SIZE)
+		len = CDC_MAX_MSG_SIZE;
+
+	/* Send request */
+#ifdef BCMDBUS
+	DHD_OS_IOCTL_RESP_LOCK(dhd);
+	prot->ctl_completed = FALSE;
+	err = dbus_send_ctl(dhd->bus, (void *)&prot->msg, len);
+	if (err) {
+		DHD_ERROR(("dbus_send_ctl error=%d\n", err));
+		DHD_OS_IOCTL_RESP_UNLOCK(dhd);
+		DHD_OS_WAKE_UNLOCK(dhd);
+		return err;
+	}
+#else
+	err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+#endif /* BCMDBUS */
+
+#ifdef BCMDBUS
+	timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false);
+	if ((!timeout) || (!prot->ctl_completed)) {
+		DHD_ERROR(("Txctl timeout %d ctl_completed %d\n",
+			timeout, prot->ctl_completed));
+		DHD_ERROR(("Txctl wait timed out\n"));
+		err = -1;
+	}
+	DHD_OS_IOCTL_RESP_UNLOCK(dhd);
+#endif /* BCMDBUS */
+#if defined(BCMDBUS) && defined(INTR_EP_ENABLE)
+	/* If the ctl write is successfully completed, wait for an acknowledgement
+	* that indicates that it is now ok to do ctl read from the dongle
+	*/
+	if (err != -1) {
+		DHD_OS_IOCTL_RESP_LOCK(dhd);
+		prot->ctl_completed = FALSE;
+		if (dbus_poll_intr(dhd->dbus)) {
+			DHD_ERROR(("dbus_poll_intr not submitted\n"));
+		} else {
+			/* interrupt polling is sucessfully submitted. Wait for dongle to send
+			* interrupt
+			*/
+			timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false);
+			if (!timeout) {
+				DHD_ERROR(("intr poll wait timed out\n"));
+			}
+		}
+		DHD_OS_IOCTL_RESP_UNLOCK(dhd);
+	}
+#endif /* defined(BCMDBUS) && defined(INTR_EP_ENABLE) */
+	DHD_OS_WAKE_UNLOCK(dhd);
+	return err;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+#ifdef BCMDBUS
+	int timeout = 0;
+#endif /* BCMDBUS */
+	int ret;
+	int cdc_len = len + sizeof(cdc_ioctl_t);
+	dhd_prot_t *prot = dhd->prot;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	do {
+#ifdef BCMDBUS
+		DHD_OS_IOCTL_RESP_LOCK(dhd);
+		prot->ctl_completed = FALSE;
+		ret = dbus_recv_ctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+		if (ret) {
+			DHD_ERROR(("dbus_recv_ctl error=0x%x(%d)\n", ret, ret));
+			DHD_OS_IOCTL_RESP_UNLOCK(dhd);
+			goto done;
+		}
+		timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false);
+		if ((!timeout) || (!prot->ctl_completed)) {
+			DHD_ERROR(("Rxctl timeout %d ctl_completed %d\n",
+				timeout, prot->ctl_completed));
+			ret = -1;
+			DHD_OS_IOCTL_RESP_UNLOCK(dhd);
+
+			goto done;
+		}
+		DHD_OS_IOCTL_RESP_UNLOCK(dhd);
+
+		ret = cdc_len;
+#else
+		ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+#endif /* BCMDBUS */
+		if (ret < 0)
+			break;
+	} while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+#ifdef BCMDBUS
+done:
+#endif /* BCMDBUS */
+	return ret;
+}
+
+static int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0, retries = 0;
+	uint32 id, flags = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+	/* Respond "bcmerror" and "bcmerrorstr" with local cache */
+	if (cmd == WLC_GET_VAR && buf)
+	{
+		if (!strcmp((char *)buf, "bcmerrorstr"))
+		{
+			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+			goto done;
+		}
+		else if (!strcmp((char *)buf, "bcmerror"))
+		{
+			*(int *)buf = dhd->dongle_error;
+			goto done;
+		}
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT);
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		if (!dhd->hang_was_sent)
+		DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+		goto done;
+	}
+
+retry:
+	/* wait for interrupt and get first fragment */
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if ((id < prot->reqid) && (++retries < RETRIES))
+		goto retry;
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Copy info buffer */
+	if (buf)
+	{
+		if (ret < (int)len)
+			len = ret;
+		memcpy(buf, (void*) prot->buf, len);
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+
+static int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0;
+	uint32 flags, id;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return -EIO;
+	}
+
+	/* don't talk to the dongle if fw is about to be reloaded */
+	if (dhd->hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+			__FUNCTION__));
+		return -EIO;
+	}
+
+	if (cmd == WLC_SET_PM) {
+		DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET;
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+#ifdef DHD_ULP
+	if (buf && (!strncmp(buf, "ulp", sizeof("ulp")))) {
+		/* force all the writes after this point to NOT to use cached sbwad value */
+		dhd_ulp_disable_cached_sbwad(dhd);
+	}
+#endif /* DHD_ULP */
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+#ifdef DHD_ULP
+	/* For ulp prototyping temporary */
+	if ((ret = dhd_ulp_check_ulp_request(dhd, buf)) < 0)
+		goto done;
+#endif /* DHD_ULP */
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+#ifdef BCMDBUS
+int
+dhd_prot_ctl_complete(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot;
+
+	if (dhd == NULL)
+		return BCME_ERROR;
+
+	prot = dhd->prot;
+
+	ASSERT(prot);
+	DHD_OS_IOCTL_RESP_LOCK(dhd);
+	prot->ctl_completed = TRUE;
+	dhd_os_ioctl_resp_wake(dhd);
+	DHD_OS_IOCTL_RESP_UNLOCK(dhd);
+	return 0;
+}
+#endif /* BCMDBUS */
+
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = -1;
+	uint8 action;
+	static int error_cnt = 0;
+
+	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n",
+				__FUNCTION__, dhd->busstate, dhd->hang_was_sent));
+		goto done;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+	if (len > WLC_IOCTL_MAXLEN)
+		goto done;
+
+	if (prot->pending == TRUE) {
+		DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+			ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+			(unsigned long)prot->lastcmd));
+		if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+			DHD_TRACE(("iovar cmd=%s\n", buf ? (char*)buf : "\0"));
+		}
+		goto done;
+	}
+
+	prot->pending = TRUE;
+	prot->lastcmd = ioc->cmd;
+	action = ioc->set;
+	if (action & WL_IOCTL_ACTION_SET)
+		ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+	else {
+		ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+		if (ret > 0)
+			ioc->used = ret - sizeof(cdc_ioctl_t);
+	}
+	// terence 20130805: send hang event to wpa_supplicant
+	if (ret == -EIO) {
+		error_cnt++;
+		if (error_cnt > 2)
+			ret = -ETIMEDOUT;
+	} else
+		error_cnt = 0;
+
+	/* Too many programs assume ioctl() returns 0 on success */
+	if (ret >= 0)
+		ret = 0;
+	else {
+		cdc_ioctl_t *msg = &prot->msg;
+		ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+	}
+
+	/* Intercept the wme_dp ioctl here */
+	if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+		int slen, val = 0;
+
+		slen = strlen("wme_dp") + 1;
+		if (len >= (int)(slen + sizeof(int)))
+			bcopy(((char *)buf + slen), &val, sizeof(int));
+		dhd->wme_dp = (uint8) ltoh32(val);
+	}
+
+	prot->pending = FALSE;
+
+done:
+
+	return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                  void *params, int plen, void *arg, int len, bool set)
+{
+	return BCME_UNSUPPORTED;
+}
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	if (!dhdp || !dhdp->prot) {
+		return;
+	}
+
+	bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_dump(dhdp, strbuf);
+#endif
+}
+
+/*	The FreeBSD PKTPUSH could change the packet buf pinter
+	so we need to make it changable
+*/
+#define PKTBUF pktbuf
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif /* BDC */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	/* Push BDC header used to convey priority for buses that don't */
+
+	PKTPUSH(dhd->osh, PKTBUF, BDC_HEADER_LEN);
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, PKTBUF);
+
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(PKTBUF))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = 0;
+#endif /* BDC */
+	BDC_SET_IF_IDX(h, ifidx);
+}
+#undef PKTBUF	/* Only defined in the above routine */
+
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+	uint hdrlen = 0;
+#ifdef BDC
+	/* Length of BDC(+WLFC) headers pushed */
+	hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4);
+#endif
+	return hdrlen;
+}
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info,
+	uint *reorder_info_len)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif
+	uint8 data_offset = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	if (reorder_info_len)
+		*reorder_info_len = 0;
+	/* Pop BDC header used to convey priority for buses that don't */
+
+	if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+	if (!ifidx) {
+		/* for tx packet, skip the analysis */
+		data_offset = h->dataOffset;
+		PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+		goto exit;
+	}
+
+	*ifidx = BDC_GET_IF_IDX(h);
+
+	if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+		DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
+		           dhd_ifname(dhd, *ifidx), h->flags));
+		if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1)
+			h->dataOffset = 0;
+		else
+		return BCME_ERROR;
+	}
+
+	if (h->flags & BDC_FLAG_SUM_GOOD) {
+		DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+		          dhd_ifname(dhd, *ifidx), h->flags));
+		PKTSETSUMGOOD(pktbuf, TRUE);
+	}
+
+	PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+	data_offset = h->dataOffset;
+	PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+
+#ifdef PROP_TXSTATUS
+	if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) {
+		/*
+		- parse txstatus only for packets that came from the firmware
+		*/
+		dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2),
+			reorder_buf_info, reorder_info_len);
+
+#ifdef BCMDBUS
+#ifndef DHD_WLFC_THREAD
+		dhd_wlfc_commit_packets(dhd,
+			(f_commitpkt_t)dhd_bus_txdata, dhd->bus, NULL, FALSE);
+#endif /* DHD_WLFC_THREAD */
+#endif /* BCMDBUS */
+	}
+#endif /* PROP_TXSTATUS */
+
+exit:
+	PKTPULL(dhd->osh, pktbuf, (data_offset << 2));
+	return 0;
+}
+
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+	dhd_prot_t *cdc;
+
+	if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(cdc, 0, sizeof(dhd_prot_t));
+
+	/* ensure that the msg buf directly follows the cdc msg struct */
+	if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+		DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+		goto fail;
+	}
+
+	dhd->prot = cdc;
+#ifdef BDC
+	dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+	dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+	return 0;
+
+fail:
+	if (cdc != NULL)
+		DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t));
+	return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore?  Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_deinit(dhd);
+#endif
+	DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
+	dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+	/*  copy bus stats */
+
+	dhd->dstats.tx_packets = dhd->tx_packets;
+	dhd->dstats.tx_errors = dhd->tx_errors;
+	dhd->dstats.rx_packets = dhd->rx_packets;
+	dhd->dstats.rx_errors = dhd->rx_errors;
+	dhd->dstats.rx_dropped = dhd->rx_dropped;
+	dhd->dstats.multicast = dhd->rx_multicast;
+	return;
+}
+
+int
+dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	wlc_rev_info_t revinfo;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef DHD_FW_COREDUMP
+	/* Check the memdump capability */
+	dhd_get_memdump_info(dhd);
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef BCMASSERT_LOG
+	dhd_get_assert_info(dhd);
+#endif /* BCMASSERT_LOG */
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+	if (ret < 0)
+		goto done;
+#if defined(BCMDBUS)
+	if (dhd_download_fw_on_driverload) {
+		dhd_conf_reset(dhd);
+		dhd_conf_set_chiprev(dhd, revinfo.chipnum, revinfo.chiprev);
+		dhd_conf_preinit(dhd);
+		dhd_conf_read_config(dhd, dhd->conf_path);
+	}
+#endif /* BCMDBUS */
+
+
+	DHD_SSSR_DUMP_INIT(dhd);
+
+	dhd_process_cid_mac(dhd, TRUE);
+	ret = dhd_preinit_ioctls(dhd);
+	dhd_process_cid_mac(dhd, FALSE);
+
+	/* Always assumes wl for now */
+	dhd->iswl = TRUE;
+
+done:
+	return ret;
+}
+
+int dhd_prot_init(dhd_pub_t *dhd)
+{
+	return BCME_OK;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+/* Nothing to do for CDC */
+}
+
+
+static void
+dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt,
+	uint32 *pkt_count, void **pplast, uint8 start, uint8 end)
+{
+	void *plast = NULL, *p;
+	uint32 pkt_cnt = 0;
+
+	if (ptr->pend_pkts == 0) {
+		DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__));
+		*pplast = NULL;
+		*pkt_count = 0;
+		*pkt = NULL;
+		return;
+	}
+	do {
+		p = (void *)(ptr->p[start]);
+		ptr->p[start] = NULL;
+
+		if (p != NULL) {
+			if (plast == NULL)
+				*pkt = p;
+			else
+				PKTSETNEXT(osh, plast, p);
+
+			plast = p;
+			pkt_cnt++;
+		}
+		start++;
+		if (start > ptr->max_idx)
+			start = 0;
+	} while (start != end);
+	*pplast = plast;
+	*pkt_count = pkt_cnt;
+	ptr->pend_pkts -= (uint8)pkt_cnt;
+}
+
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+	void **pkt, uint32 *pkt_count)
+{
+	uint8 flow_id, max_idx, cur_idx, exp_idx;
+	struct reorder_info *ptr;
+	uint8 flags;
+	void *cur_pkt, *plast = NULL;
+	uint32 cnt = 0;
+
+	if (pkt == NULL) {
+		if (pkt_count != NULL)
+			*pkt_count = 0;
+		return 0;
+	}
+
+	flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET];
+	flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET];
+
+	DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags,
+		reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET],
+		reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET],
+		reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]));
+
+	/* validate flags and flow id */
+	if (flags == 0xFF) {
+		DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__));
+		*pkt_count = 1;
+		return 0;
+	}
+
+	cur_pkt = *pkt;
+	*pkt = NULL;
+
+	ptr = dhd->reorder_bufs[flow_id];
+	if (flags & WLHOST_REORDERDATA_DEL_FLOW) {
+		uint32 buf_size = sizeof(struct reorder_info);
+
+		DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n",
+			__FUNCTION__, flow_id));
+
+		if (ptr == NULL) {
+			DHD_REORDER(("%s: received flags to cleanup, but no flow (%d) yet\n",
+				__FUNCTION__, flow_id));
+			*pkt_count = 1;
+			*pkt = cur_pkt;
+			return 0;
+		}
+
+		dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+			ptr->exp_idx, ptr->exp_idx);
+		/* set it to the last packet */
+		if (plast) {
+			PKTSETNEXT(dhd->osh, plast, cur_pkt);
+			cnt++;
+		}
+		else {
+			if (cnt != 0) {
+				DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n",
+					__FUNCTION__, cnt));
+			}
+			*pkt = cur_pkt;
+			cnt = 1;
+		}
+		buf_size += ((ptr->max_idx + 1) * sizeof(void *));
+		MFREE(dhd->osh, ptr, buf_size);
+		dhd->reorder_bufs[flow_id] = NULL;
+		*pkt_count = cnt;
+		return 0;
+	}
+	/* all the other cases depend on the existance of the reorder struct for that flow id */
+	if (ptr == NULL) {
+		uint32 buf_size_alloc = sizeof(reorder_info_t);
+		max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+
+		buf_size_alloc += ((max_idx + 1) * sizeof(void*));
+		/* allocate space to hold the buffers, index etc */
+
+		DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n",
+			__FUNCTION__, buf_size_alloc, flow_id, max_idx));
+		ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc);
+		if (ptr == NULL) {
+			DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__));
+			*pkt_count = 1;
+			return 0;
+		}
+		bzero(ptr, buf_size_alloc);
+		dhd->reorder_bufs[flow_id] = ptr;
+		ptr->p = (void *)(ptr+1);
+		ptr->max_idx = max_idx;
+	}
+	if (flags & WLHOST_REORDERDATA_NEW_HOLE)  {
+		DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__));
+		if (ptr->pend_pkts) {
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				ptr->exp_idx, ptr->exp_idx);
+			ptr->pend_pkts = 0;
+		}
+		ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+		ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+		ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+		ptr->p[ptr->cur_idx] = cur_pkt;
+		ptr->pend_pkts++;
+		*pkt_count = cnt;
+	}
+	else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) {
+		cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+		exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+
+		if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) {
+			/* still in the current hole */
+			/* enqueue the current on the buffer chain */
+			if (ptr->p[cur_idx] != NULL) {
+				DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n",
+					__FUNCTION__));
+				PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+				ptr->p[cur_idx] = NULL;
+			}
+			ptr->p[cur_idx] = cur_pkt;
+			ptr->pend_pkts++;
+			ptr->cur_idx = cur_idx;
+			DHD_REORDER(("%s: fill up a hole..pending packets is %d\n",
+				__FUNCTION__, ptr->pend_pkts));
+			*pkt_count = 0;
+			*pkt = NULL;
+		}
+		else if (ptr->exp_idx == cur_idx) {
+			/* got the right one ..flush from cur to exp and update exp */
+			DHD_REORDER(("%s: got the right one now, cur_idx is %d\n",
+				__FUNCTION__, cur_idx));
+			if (ptr->p[cur_idx] != NULL) {
+				DHD_REORDER(("%s: Error buffer pending..free it\n",
+					__FUNCTION__));
+				PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+				ptr->p[cur_idx] = NULL;
+			}
+			ptr->p[cur_idx] = cur_pkt;
+			ptr->pend_pkts++;
+
+			ptr->cur_idx = cur_idx;
+			ptr->exp_idx = exp_idx;
+
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				cur_idx, exp_idx);
+			*pkt_count = cnt;
+			DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n",
+				__FUNCTION__, cnt, ptr->pend_pkts));
+		}
+		else {
+			uint8 end_idx;
+			bool flush_current = FALSE;
+			/* both cur and exp are moved now .. */
+			DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n",
+				__FUNCTION__, flow_id, ptr->cur_idx, cur_idx,
+				ptr->exp_idx, exp_idx));
+			if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+				end_idx = ptr->exp_idx;
+			else
+				end_idx = exp_idx;
+
+			/* flush pkts first */
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				ptr->exp_idx, end_idx);
+
+			if (cur_idx == ptr->max_idx) {
+				if (exp_idx == 0)
+					flush_current = TRUE;
+			} else {
+				if (exp_idx == cur_idx + 1)
+					flush_current = TRUE;
+			}
+			if (flush_current) {
+				if (plast)
+					PKTSETNEXT(dhd->osh, plast, cur_pkt);
+				else
+					*pkt = cur_pkt;
+				cnt++;
+			}
+			else {
+				ptr->p[cur_idx] = cur_pkt;
+				ptr->pend_pkts++;
+			}
+			ptr->exp_idx = exp_idx;
+			ptr->cur_idx = cur_idx;
+			*pkt_count = cnt;
+		}
+	}
+	else {
+		uint8 end_idx;
+		/* no real packet but update to exp_seq...that means explicit window move */
+		exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+		DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n",
+			__FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx));
+		if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+			end_idx =  ptr->exp_idx;
+		else
+			end_idx =  exp_idx;
+
+		dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx);
+		if (plast)
+			PKTSETNEXT(dhd->osh, plast, cur_pkt);
+		else
+			*pkt = cur_pkt;
+		cnt++;
+		*pkt_count = cnt;
+		/* set the new expected idx */
+		ptr->exp_idx = exp_idx;
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
new file mode 100644
index 0000000..b98fcd3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
@@ -0,0 +1,271 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_cfg80211.c 699163 2017-05-12 05:18:23Z $
+ */
+
+#include <linux/vmalloc.h>
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+
+#ifdef PKT_FILTER_SUPPORT
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+static int dhd_dongle_up = FALSE;
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <brcm_nl80211.h>
+#include <dhd_cfg80211.h>
+
+static s32 wl_dongle_up(struct net_device *ndev);
+static s32 wl_dongle_down(struct net_device *ndev);
+
+/**
+ * Function implementations
+ */
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg)
+{
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg)
+{
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	if (!dhd_dongle_up) {
+		WL_ERR(("Dongle is already down\n"));
+		return err;
+	}
+
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+	wl_dongle_down(ndev);
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val)
+{
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	dhd->op_mode |= val;
+	WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode));
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->arp_version == 1) {
+		/* IF P2P is enabled, disable arpoe */
+		dhd_arp_offload_set(dhd, 0);
+		dhd_arp_offload_enable(dhd, false);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	return 0;
+}
+
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg)
+{
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE);
+	WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode));
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->arp_version == 1) {
+		/* IF P2P is disabled, enable arpoe back for STA mode. */
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+		dhd_arp_offload_enable(dhd, true);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	return 0;
+}
+
+struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, const char *name,
+	uint8 *mac, uint8 bssidx, const char *dngl_name)
+{
+	return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE, dngl_name);
+}
+
+int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg,
+	int ifidx, struct net_device* ndev, bool rtnl_lock_reqd)
+{
+	return dhd_register_if(cfg->pub, ifidx, rtnl_lock_reqd);
+}
+
+int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg,
+	int ifidx, struct net_device* ndev, bool rtnl_lock_reqd)
+{
+	return dhd_remove_if(cfg->pub, ifidx, rtnl_lock_reqd);
+}
+
+struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev)
+{
+	if (ndev) {
+		if (ndev->ieee80211_ptr) {
+			kfree(ndev->ieee80211_ptr);
+			ndev->ieee80211_ptr = NULL;
+		}
+		free_netdev(ndev);
+		return NULL;
+	}
+
+	return ndev;
+}
+
+void dhd_netdev_free(struct net_device *ndev)
+{
+#ifdef WL_CFG80211
+	ndev = dhd_cfg80211_netdev_free(ndev);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	if (ndev)
+		free_netdev(ndev);
+#endif
+}
+
+static s32
+wl_dongle_up(struct net_device *ndev)
+{
+	s32 err = 0;
+	u32 local_up = 0;
+
+	err = wldev_ioctl_set(ndev, WLC_UP, &local_up, sizeof(local_up));
+	if (unlikely(err)) {
+		WL_ERR(("WLC_UP error (%d)\n", err));
+	}
+	return err;
+}
+
+static s32
+wl_dongle_down(struct net_device *ndev)
+{
+	s32 err = 0;
+	u32 local_down = 0;
+
+	err = wldev_ioctl_set(ndev, WLC_DOWN, &local_down, sizeof(local_down));
+	if (unlikely(err)) {
+		WL_ERR(("WLC_DOWN error (%d)\n", err));
+	}
+	return err;
+}
+
+
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+	struct net_device *ndev;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	if (dhd_dongle_up) {
+		WL_ERR(("Dongle is already up\n"));
+		return err;
+	}
+
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	err = wl_dongle_up(ndev);
+	if (unlikely(err)) {
+		WL_ERR(("wl_dongle_up failed\n"));
+		goto default_conf_out;
+	}
+	dhd_dongle_up = true;
+
+default_conf_out:
+
+	return err;
+
+}
+
+int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
+	const struct bcm_nlmsg_hdr *nlioc, void *buf)
+{
+	struct net_device *ndev = NULL;
+	dhd_pub_t *dhd;
+	dhd_ioctl_t ioc = { 0, NULL, 0, 0, 0, 0, 0};
+	int ret = 0;
+	int8 index;
+
+	WL_TRACE(("entry: cmd = %d\n", nlioc->cmd));
+
+	dhd = cfg->pub;
+	DHD_OS_WAKE_LOCK(dhd);
+
+	/* send to dongle only if we are not waiting for reload already */
+	if (dhd->hang_was_sent) {
+		WL_ERR(("HANG was sent up earlier\n"));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
+		DHD_OS_WAKE_UNLOCK(dhd);
+		return OSL_ERROR(BCME_DONGLE_DOWN);
+	}
+
+	ndev = wdev_to_wlc_ndev(wdev, cfg);
+	index = dhd_net2idx(dhd->info, ndev);
+	if (index == DHD_BAD_IF) {
+		WL_ERR(("Bad ifidx from wdev:%p\n", wdev));
+		ret = BCME_ERROR;
+		goto done;
+	}
+
+	ioc.cmd = nlioc->cmd;
+	ioc.len = nlioc->len;
+	ioc.set = nlioc->set;
+	ioc.driver = nlioc->magic;
+	ioc.buf = buf;
+	ret = dhd_ioctl_process(dhd, index, &ioc, buf);
+	if (ret) {
+		WL_TRACE(("dhd_ioctl_process return err %d\n", ret));
+		ret = OSL_ERROR(ret);
+		goto done;
+	}
+
+done:
+	DHD_OS_WAKE_UNLOCK(dhd);
+	return ret;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
new file mode 100644
index 0000000..20923ce
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
@@ -0,0 +1,54 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_cfg80211.h 612483 2016-01-14 03:44:27Z $
+ */
+
+
+#ifndef __DHD_CFG80211__
+#define __DHD_CFG80211__
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <brcm_nl80211.h>
+
+#ifndef WL_ERR
+#define WL_ERR CFG80211_ERR
+#endif
+#ifndef WL_TRACE
+#define WL_TRACE CFG80211_TRACE
+#endif
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val);
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg);
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg);
+int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg,
+	struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void  *data);
+
+#endif /* __DHD_CFG80211__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c
new file mode 100644
index 0000000..4f1af0e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_common.c
@@ -0,0 +1,6020 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_common.c 710862 2017-07-14 07:43:59Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+#include <dhd.h>
+#include <dhd_ip.h>
+#include <bcmevent.h>
+
+#ifdef PCIE_FULL_DONGLE
+#include <bcmmsgbuf.h>
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef SHOW_LOGTRACE
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_config.h>
+#include <bcmsdbus.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#include <dhd_mschdbg.h>
+#include <msgtrace.h>
+
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif
+
+#ifdef DNGL_EVENT_SUPPORT
+#include <dnglevent.h>
+#endif
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef DHD_WMF
+#include <dhd_linux.h>
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+#ifdef DHD_L2_FILTER
+#include <dhd_l2_filter.h>
+#endif /* DHD_L2_FILTER */
+
+#ifdef DHD_PSTA
+#include <dhd_psta.h>
+#endif /* DHD_PSTA */
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
+
+#ifdef DHD_WET
+#include <dhd_wet.h>
+#endif /* DHD_WET */
+
+#if defined(BCMEMBEDIMAGE) && defined(DHD_EFI)
+#include <nvram_4364.h>
+#endif
+
+#ifdef WLMEDIA_HTSF
+extern void htsf_update(struct dhd_info *dhd, void *data);
+#endif
+
+extern int is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype,
+	bcm_event_msg_u_t *out_event);
+
+/* By default all logs are enabled */
+int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL;
+
+
+#if defined(WL_WLC_SHIM)
+#include <wl_shim.h>
+#else
+#endif /* WL_WLC_SHIM */
+
+#ifdef DHD_ULP
+#include <dhd_ulp.h>
+#endif /* DHD_ULP */
+
+#ifdef DHD_DEBUG
+#include <sdiovar.h>
+#endif /* DHD_DEBUG */
+
+#ifdef SOFTAP
+char fw_path2[MOD_PARAM_PATHLEN];
+extern bool softap_enabled;
+#endif
+
+#ifdef REPORT_FATAL_TIMEOUTS
+/* Default timeout value in ms */
+#define SCAN_TIMEOUT_DEFAULT    1
+#define JOIN_TIMEOUT_DEFAULT    7500
+#ifdef DHD_EFI
+#define BUS_TIMEOUT_DEFAULT     8000000  /* 800ms, in units of 100ns */
+#define CMD_TIMEOUT_DEFAULT     15000000 /* 1.5sec, in units of 100ns */
+#else
+#define BUS_TIMEOUT_DEFAULT     800
+#define CMD_TIMEOUT_DEFAULT     1200
+#endif /* DHD_EFI */
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef SHOW_LOGTRACE
+#define BYTES_AHEAD_NUM		11	/* address in map file is before these many bytes */
+#define READ_NUM_BYTES		1000 /* read map file each time this No. of bytes */
+#define GO_BACK_FILE_POS_NUM_BYTES	100 /* set file pos back to cur pos */
+static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */
+static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */
+static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */
+#define RAMSTART_BIT	0x01
+#define RDSTART_BIT		0x02
+#define RDEND_BIT		0x04
+#define ALL_MAP_VAL		(RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
+#endif /* SHOW_LOGTRACE */
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+extern int dhd_iscan_request(void * dhdp, uint16 action);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_iscan_in_progress(void *h);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+#if !defined(AP) && defined(WLP2P)
+extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
+#endif
+
+extern int dhd_socram_dump(struct dhd_bus *bus);
+
+#ifdef DNGL_EVENT_SUPPORT
+static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
+	bcm_dngl_event_msg_t *dngl_event, size_t pktlen);
+static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event,
+	size_t pktlen);
+#endif /* DNGL_EVENT_SUPPORT */
+
+#define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
+
+bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE        "drivers/net/wireless/bcmdhd"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#endif /* DHD_DEBUG */
+
+#define CHIPID_MISMATCH	8
+
+#if defined(DHD_DEBUG)
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
+#else
+const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR;
+#endif 
+char fw_version[FW_VER_STR_LEN] = "\0";
+char clm_version[CLM_VER_STR_LEN] = "\0";
+
+char bus_api_revision[BUS_API_REV_STR_LEN] = "\0";
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+#if defined(TRAFFIC_MGMT_DWM)
+static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
+	trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len);
+#endif
+
+/* IOVar table */
+enum {
+	IOV_VERSION = 1,
+	IOV_WLMSGLEVEL,
+	IOV_MSGLEVEL,
+	IOV_BCMERRORSTR,
+	IOV_BCMERROR,
+	IOV_WDTICK,
+	IOV_DUMP,
+	IOV_CLEARCOUNTS,
+	IOV_LOGDUMP,
+	IOV_LOGCAL,
+	IOV_LOGSTAMP,
+	IOV_GPIOOB,
+	IOV_IOCTLTIMEOUT,
+	IOV_CONS,
+	IOV_DCONSOLE_POLL,
+#if defined(DHD_DEBUG)
+	IOV_DHD_JOIN_TIMEOUT_DBG,
+	IOV_SCAN_TIMEOUT,
+	IOV_MEM_DEBUG,
+#endif /* defined(DHD_DEBUG) */
+#ifdef PROP_TXSTATUS
+	IOV_PROPTXSTATUS_ENABLE,
+	IOV_PROPTXSTATUS_MODE,
+	IOV_PROPTXSTATUS_OPT,
+	IOV_PROPTXSTATUS_MODULE_IGNORE,
+	IOV_PROPTXSTATUS_CREDIT_IGNORE,
+	IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
+	IOV_PROPTXSTATUS_RXPKT_CHK,
+#endif /* PROP_TXSTATUS */
+	IOV_BUS_TYPE,
+#ifdef WLMEDIA_HTSF
+	IOV_WLPKTDLYSTAT_SZ,
+#endif
+	IOV_CHANGEMTU,
+	IOV_HOSTREORDER_FLOWS,
+#ifdef DHDTCPACK_SUPPRESS
+	IOV_TCPACK_SUPPRESS,
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+	IOV_WMF_BSS_ENAB,
+	IOV_WMF_UCAST_IGMP,
+	IOV_WMF_MCAST_DATA_SENDUP,
+#ifdef WL_IGMP_UCQUERY
+	IOV_WMF_UCAST_IGMP_QUERY,
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+	IOV_WMF_UCAST_UPNP,
+#endif /* DHD_UCAST_UPNP */
+	IOV_WMF_PSTA_DISABLE,
+#endif /* DHD_WMF */
+#if defined(TRAFFIC_MGMT_DWM)
+	IOV_TRAFFIC_MGMT_DWM,
+#endif 
+	IOV_AP_ISOLATE,
+#ifdef DHD_L2_FILTER
+	IOV_DHCP_UNICAST,
+	IOV_BLOCK_PING,
+	IOV_PROXY_ARP,
+	IOV_GRAT_ARP,
+#endif /* DHD_L2_FILTER */
+	IOV_DHD_IE,
+#ifdef DHD_PSTA
+	IOV_PSTA,
+#endif /* DHD_PSTA */
+#ifdef DHD_WET
+	IOV_WET,
+	IOV_WET_HOST_IPV4,
+	IOV_WET_HOST_MAC,
+#endif /* DHD_WET */
+	IOV_CFG80211_OPMODE,
+	IOV_ASSERT_TYPE,
+	IOV_LMTEST,
+#ifdef DHD_MCAST_REGEN
+	IOV_MCAST_REGEN_BSS_ENABLE,
+#endif
+#ifdef SHOW_LOGTRACE
+	IOV_DUMP_TRACE_LOG,
+#endif /* SHOW_LOGTRACE */
+#ifdef REPORT_FATAL_TIMEOUTS
+	IOV_SCAN_TO,
+	IOV_JOIN_TO,
+	IOV_CMD_TO,
+	IOV_OQS_TO,
+#endif /* REPORT_FATAL_TIMEOUTS */
+	IOV_DONGLE_TRAP_TYPE,
+	IOV_DONGLE_TRAP_INFO,
+	IOV_BPADDR,
+	IOV_LAST,
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+	IOV_LOG_CAPTURE_ENABLE,
+	IOV_LOG_DUMP
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+	{"version",	IOV_VERSION,	0,	0,	IOVT_BUFFER,	sizeof(dhd_version) },
+	{"wlmsglevel",	IOV_WLMSGLEVEL,	0,	0,	IOVT_UINT32,	0 },
+#ifdef DHD_DEBUG
+	{"msglevel",	IOV_MSGLEVEL,	0,	0,	IOVT_UINT32,	0 },
+	{"mem_debug",   IOV_MEM_DEBUG,  0,      0,      IOVT_BUFFER,    0 },
+#endif /* DHD_DEBUG */
+	{"bcmerrorstr", IOV_BCMERRORSTR, 0, 0,	IOVT_BUFFER,	BCME_STRLEN },
+	{"bcmerror",	IOV_BCMERROR,	0,	0,	IOVT_INT8,	0 },
+	{"wdtick",	IOV_WDTICK, 0,	0,	IOVT_UINT32,	0 },
+	{"dump",	IOV_DUMP,	0,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+	{"cons",	IOV_CONS,	0,	0,	IOVT_BUFFER,	0 },
+	{"dconpoll",	IOV_DCONSOLE_POLL, 0,	0,	IOVT_UINT32,	0 },
+	{"clearcounts", IOV_CLEARCOUNTS, 0, 0,	IOVT_VOID,	0 },
+	{"gpioob",	IOV_GPIOOB,	0,	0,	IOVT_UINT32,	0 },
+	{"ioctl_timeout",	IOV_IOCTLTIMEOUT,	0,	0,	IOVT_UINT32,	0 },
+#ifdef PROP_TXSTATUS
+	{"proptx",	IOV_PROPTXSTATUS_ENABLE,	0,	0,	IOVT_BOOL,	0 },
+	/*
+	set the proptxtstatus operation mode:
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	*/
+	{"ptxmode",	IOV_PROPTXSTATUS_MODE,	0,	0, IOVT_UINT32,	0 },
+	{"proptx_opt", IOV_PROPTXSTATUS_OPT,	0,	0, IOVT_UINT32,	0 },
+	{"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
+	{"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
+	{"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 },
+	{"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
+#endif /* PROP_TXSTATUS */
+	{"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
+#ifdef WLMEDIA_HTSF
+	{"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, 0, IOVT_UINT8, 0 },
+#endif
+	{"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
+	{"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
+	(WLHOST_REORDERDATA_MAXFLOWS + 1) },
+#ifdef DHDTCPACK_SUPPRESS
+	{"tcpack_suppress",	IOV_TCPACK_SUPPRESS,	0,	0, IOVT_UINT8,	0 },
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+	{"wmf_bss_enable", IOV_WMF_BSS_ENAB,	0,	0, IOVT_BOOL,	0 },
+	{"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP,	0,	0, IOVT_BOOL,	0 },
+	{"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP,	0,	0, IOVT_BOOL,	0 },
+#ifdef WL_IGMP_UCQUERY
+	{"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), 0, IOVT_BOOL, 0 },
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+	{"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), 0, IOVT_BOOL, 0 },
+#endif /* DHD_UCAST_UPNP */
+	{"wmf_psta_disable", IOV_WMF_PSTA_DISABLE, (0), 0, IOVT_BOOL, 0 },
+#endif /* DHD_WMF */
+#if defined(TRAFFIC_MGMT_DWM)
+	{"trf_mgmt_filters_add", IOV_TRAFFIC_MGMT_DWM, (0), 0, IOVT_BUFFER, 0},
+#endif 
+#ifdef DHD_L2_FILTER
+	{"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
+#endif /* DHD_L2_FILTER */
+	{"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0},
+#ifdef DHD_L2_FILTER
+	{"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
+	{"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
+	{"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
+#endif /* DHD_L2_FILTER */
+	{"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
+#ifdef DHD_PSTA
+	/* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
+	{"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0},
+#endif /* DHD PSTA */
+#ifdef DHD_WET
+	/* WET Mode configuration. 0: DIABLED 1: WET */
+	{"wet", IOV_WET, 0, 0, IOVT_UINT32, 0},
+	{"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0},
+	{"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0},
+#endif /* DHD WET */
+	{"op_mode",	IOV_CFG80211_OPMODE,	0,	0, IOVT_UINT32,	0 },
+	{"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0},
+	{"lmtest", IOV_LMTEST,	0,	0, IOVT_UINT32,	0 },
+#ifdef DHD_MCAST_REGEN
+	{"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
+#endif
+#ifdef SHOW_LOGTRACE
+	{"dump_trace_buf", IOV_DUMP_TRACE_LOG,	0, 0, IOVT_BUFFER,	sizeof(trace_buf_info_t) },
+#endif /* SHOW_LOGTRACE */
+#ifdef REPORT_FATAL_TIMEOUTS
+	{"scan_timeout", IOV_SCAN_TO, 0, 0, IOVT_UINT32, 0 },
+	{"join_timeout", IOV_JOIN_TO, 0, 0, IOVT_UINT32, 0 },
+	{"cmd_timeout", IOV_CMD_TO, 0, 0, IOVT_UINT32, 0 },
+	{"oqs_timeout", IOV_OQS_TO, 0, 0, IOVT_UINT32, 0 },
+#endif /* REPORT_FATAL_TIMEOUTS */
+	{"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
+	{"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
+#ifdef DHD_DEBUG
+	{"bpaddr", IOV_BPADDR,	0, 0, IOVT_BUFFER,	sizeof(sdreg_t) },
+#endif /* DHD_DEBUG */
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+	{"log_capture_enable", IOV_LOG_CAPTURE_ENABLE, 0, 0, IOVT_UINT8, 0},
+	{"log_dump", IOV_LOG_DUMP,	0, 0, IOVT_UINT8, 0},
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+	{NULL, 0, 0, 0, 0, 0 }
+};
+
+#define DHD_IOVAR_BUF_SIZE	128
+
+bool
+dhd_query_bus_erros(dhd_pub_t *dhdp)
+{
+	bool ret = FALSE;
+
+	if (dhdp->dongle_reset) {
+		DHD_ERROR(("%s: Dongle Reset occurred, cannot proceed\n",
+			__FUNCTION__));
+		ret = TRUE;
+	}
+
+	if (dhdp->dongle_trap_occured) {
+		DHD_ERROR(("%s: FW TRAP has occurred, cannot proceed\n",
+			__FUNCTION__));
+		ret = TRUE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
+		dhd_os_send_hang_message(dhdp);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
+	}
+
+	if (dhdp->iovar_timeout_occured) {
+		DHD_ERROR(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
+			__FUNCTION__));
+		ret = TRUE;
+	}
+
+#ifdef PCIE_FULL_DONGLE
+	if (dhdp->d3ack_timeout_occured) {
+		DHD_ERROR(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
+			__FUNCTION__));
+		ret = TRUE;
+	}
+#endif /* PCIE_FULL_DONGLE */
+
+	return ret;
+}
+
+#ifdef DHD_SSSR_DUMP
+int
+dhd_sssr_mempool_init(dhd_pub_t *dhd)
+{
+	dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE);
+	if (dhd->sssr_mempool == NULL) {
+		DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
+			__FUNCTION__));
+		return BCME_ERROR;
+	}
+	return BCME_OK;
+}
+
+void
+dhd_sssr_mempool_deinit(dhd_pub_t *dhd)
+{
+	if (dhd->sssr_mempool) {
+		MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE);
+		dhd->sssr_mempool = NULL;
+	}
+}
+
+int
+dhd_get_sssr_reg_info(dhd_pub_t *dhd)
+{
+	int ret = BCME_ERROR;
+
+	DHD_ERROR(("%s: get sssr_reg_info\n", __FUNCTION__));
+	/* get sssr_reg_info from firmware */
+	memset((void *)&dhd->sssr_reg_info, 0, sizeof(dhd->sssr_reg_info));
+	if (bcm_mkiovar("sssr_reg_info", 0, 0, (char *)&dhd->sssr_reg_info,
+		sizeof(dhd->sssr_reg_info))) {
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, &dhd->sssr_reg_info,
+			sizeof(dhd->sssr_reg_info), FALSE, 0)) < 0) {
+			DHD_ERROR(("%s: dhd_wl_ioctl_cmd failed (error=%d)\n", __FUNCTION__, ret));
+		}
+	} else {
+			DHD_ERROR(("%s: bcm_mkiovar failed\n", __FUNCTION__));
+	}
+
+	return ret;
+}
+
+uint32
+dhd_get_sssr_bufsize(dhd_pub_t *dhd)
+{
+	int i;
+	uint32 sssr_bufsize = 0;
+	/* Init all pointers to NULL */
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		sssr_bufsize += dhd->sssr_reg_info.mac_regs[i].sr_size;
+	}
+	sssr_bufsize += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
+
+	/* Double the size as different dumps will be saved before and after SR */
+	sssr_bufsize = 2 * sssr_bufsize;
+
+	return sssr_bufsize;
+}
+
+int
+dhd_sssr_dump_init(dhd_pub_t *dhd)
+{
+	int i;
+	uint32 sssr_bufsize;
+	uint32 mempool_used = 0;
+
+	dhd->sssr_inited = FALSE;
+
+	/* check if sssr mempool is allocated */
+	if (dhd->sssr_mempool == NULL) {
+		DHD_ERROR(("%s: sssr_mempool is not allocated\n",
+			__FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	/* Get SSSR reg info */
+	if (dhd_get_sssr_reg_info(dhd) != BCME_OK) {
+		DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	/* Validate structure version */
+	if (dhd->sssr_reg_info.version != SSSR_REG_INFO_VER) {
+		DHD_ERROR(("%s: dhd->sssr_reg_info.version (%d : %d) mismatch\n",
+			__FUNCTION__, (int)dhd->sssr_reg_info.version, SSSR_REG_INFO_VER));
+		return BCME_ERROR;
+	}
+
+	/* Validate structure length */
+	if (dhd->sssr_reg_info.length != sizeof(dhd->sssr_reg_info)) {
+		DHD_ERROR(("%s: dhd->sssr_reg_info.length (%d : %d) mismatch\n",
+			__FUNCTION__, (int)dhd->sssr_reg_info.length,
+			(int)sizeof(dhd->sssr_reg_info)));
+		return BCME_ERROR;
+	}
+
+	/* validate fifo size */
+	sssr_bufsize = dhd_get_sssr_bufsize(dhd);
+	if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) {
+		DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
+			__FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE));
+		return BCME_ERROR;
+	}
+
+	/* init all pointers to NULL */
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		dhd->sssr_d11_before[i] = NULL;
+		dhd->sssr_d11_after[i] = NULL;
+	}
+	dhd->sssr_vasip_buf_before = NULL;
+	dhd->sssr_vasip_buf_after = NULL;
+
+	/* Allocate memory */
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		if (dhd->sssr_reg_info.mac_regs[i].sr_size) {
+			dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
+			mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
+
+			dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
+			mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
+		}
+	}
+
+	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
+		dhd->sssr_vasip_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
+		mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
+
+		dhd->sssr_vasip_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
+		mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
+	}
+
+	dhd->sssr_inited = TRUE;
+
+	return BCME_OK;
+
+}
+
+void
+dhd_sssr_dump_deinit(dhd_pub_t *dhd)
+{
+	int i;
+
+	dhd->sssr_inited = FALSE;
+	/* init all pointers to NULL */
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		dhd->sssr_d11_before[i] = NULL;
+		dhd->sssr_d11_after[i] = NULL;
+	}
+	dhd->sssr_vasip_buf_before = NULL;
+	dhd->sssr_vasip_buf_after = NULL;
+
+	return;
+}
+
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef DHD_FW_COREDUMP
+void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length)
+{
+	if (!dhd_pub->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+		dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
+			DHD_PREALLOC_MEMDUMP_RAM, length);
+#else
+		dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+	}
+
+	if (dhd_pub->soc_ram == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
+			__FUNCTION__));
+		dhd_pub->soc_ram_length = 0;
+	} else {
+		memset(dhd_pub->soc_ram, 0, length);
+		dhd_pub->soc_ram_length = length;
+	}
+
+	/* soc_ram free handled in dhd_{free,clear} */
+	return dhd_pub->soc_ram;
+}
+#endif /* DHD_FW_COREDUMP */
+
+/* to NDIS developer, the structure dhd_common is redundant,
+ * please do NOT merge it back from other branches !!!
+ */
+
+int
+dhd_common_socram_dump(dhd_pub_t *dhdp)
+{
+#ifdef BCMDBUS
+	return 0;
+#else
+	return dhd_socram_dump(dhdp->bus);
+#endif /* BCMDBUS */
+}
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	struct bcmstrbuf b;
+	struct bcmstrbuf *strbuf = &b;
+
+	if (!dhdp || !dhdp->prot || !buf) {
+		return BCME_ERROR;
+	}
+
+	bcm_binit(strbuf, buf, buflen);
+
+	/* Base DHD info */
+	bcm_bprintf(strbuf, "%s\n", dhd_version);
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+	            dhdp->up, dhdp->txoff, dhdp->busstate);
+	bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
+	            dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+	bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+	            dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+	bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
+
+	bcm_bprintf(strbuf, "dongle stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
+	            dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+	            dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+	bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
+	            dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+	            dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+	bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
+
+	bcm_bprintf(strbuf, "bus stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %lu  tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
+	            dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
+	bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
+	            dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+	bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
+	            dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+	bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
+	            dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
+	bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
+	            dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+	bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
+	            dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
+	bcm_bprintf(strbuf, "\n");
+
+#ifdef DMAMAP_STATS
+	/* Add DMA MAP info */
+	bcm_bprintf(strbuf, "DMA MAP stats: \n");
+	bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
+			dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz),
+			dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz));
+#ifndef IOCTLRESP_USE_CONSTMEM
+	bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,",
+			dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz));
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+	bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
+			"TSBUF RX: %lu size %luK\n",
+			dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz),
+			dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz),
+			dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz));
+	bcm_bprintf(strbuf, "Total : %luK \n",
+			KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz +
+			dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
+			dhdp->dma_stats.tsbuf_rx_sz));
+#endif /* DMAMAP_STATS */
+
+	/* Add any prot info */
+	dhd_prot_dump(dhdp, strbuf);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any bus info */
+	dhd_bus_dump(dhdp, strbuf);
+
+
+#if defined(DHD_LB_STATS)
+	dhd_lb_stats_dump(dhdp, strbuf);
+#endif /* DHD_LB_STATS */
+#ifdef DHD_WET
+	if (dhd_get_wet_mode(dhdp)) {
+		bcm_bprintf(strbuf, "Wet Dump:\n");
+		dhd_wet_dump(dhdp, strbuf);
+		}
+#endif /* DHD_WET */
+	return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+void
+dhd_dump_to_kernelog(dhd_pub_t *dhdp)
+{
+	char buf[512];
+
+	DHD_ERROR(("F/W version: %s\n", fw_version));
+	bcm_bprintf_bypass = TRUE;
+	dhd_dump(dhdp, buf, sizeof(buf));
+	bcm_bprintf_bypass = FALSE;
+}
+
+int
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
+{
+	wl_ioctl_t ioc;
+
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+
+	return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
+}
+
+int
+dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
+	int cmd, uint8 set, int ifidx)
+{
+	char iovbuf[WLC_IOCTL_SMLEN];
+	int ret = -1;
+
+	memset(iovbuf, 0, sizeof(iovbuf));
+	if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
+		ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
+		if (!ret) {
+			*pval = ltoh32(*((uint*)iovbuf));
+		} else {
+			DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
+				__FUNCTION__, name, ret));
+		}
+	} else {
+		DHD_ERROR(("%s: mkiovar %s failed\n",
+			__FUNCTION__, name));
+	}
+
+	return ret;
+}
+
+int
+dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
+	int cmd, uint8 set, int ifidx)
+{
+	char iovbuf[WLC_IOCTL_SMLEN] = {0};
+	int ret = -1;
+	int lval = htol32(val);
+	uint len;
+
+	len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf));
+
+	if (len) {
+		ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx);
+		if (ret) {
+			DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
+				__FUNCTION__, name, ret));
+		}
+	} else {
+		DHD_ERROR(("%s: mkiovar %s failed\n",
+			__FUNCTION__, name));
+	}
+
+	return ret;
+}
+
+int
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
+{
+	int ret = BCME_ERROR;
+	unsigned long flags;
+#ifdef DUMP_IOCTL_IOV_LIST
+	dhd_iov_li_t *iov_li;
+#endif /* DUMP_IOCTL_IOV_LIST */
+#ifdef KEEPIF_ON_DEVICE_RESET
+		if (ioc->cmd == WLC_GET_VAR) {
+			dbus_config_t config;
+			config.general_param = 0;
+			if (!strcmp(buf, "wowl_activate")) {
+				config.general_param = 2; /* 1 (TRUE) after decreased by 1 */
+			} else if (!strcmp(buf, "wowl_clear")) {
+				config.general_param = 1; /* 0 (FALSE) after decreased by 1 */
+			}
+			if (config.general_param) {
+				config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
+				config.general_param--;
+				dbus_set_config(dhd_pub->dbus, &config);
+			}
+		}
+#endif /* KEEPIF_ON_DEVICE_RESET */
+
+	if (dhd_os_proto_block(dhd_pub))
+	{
+#ifdef DHD_LOG_DUMP
+		int slen, i, val, rem, lval, min_len;
+		char *pval, *pos, *msg;
+		char tmp[64];
+
+		/* WLC_GET_VAR */
+		if (ioc->cmd == WLC_GET_VAR) {
+			min_len = MIN(sizeof(tmp) - 1, strlen(buf));
+			memset(tmp, 0, sizeof(tmp));
+			bcopy(buf, tmp, min_len);
+			tmp[min_len] = '\0';
+		}
+#endif /* DHD_LOG_DUMP */
+		DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+		if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
+#ifdef DHD_EFI
+			DHD_INFO(("%s: returning as busstate=%d\n",
+				__FUNCTION__, dhd_pub->busstate));
+#else
+			DHD_ERROR(("%s: returning as busstate=%d\n",
+				__FUNCTION__, dhd_pub->busstate));
+#endif /* DHD_EFI */
+			DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+			dhd_os_proto_unblock(dhd_pub);
+			return -ENODEV;
+		}
+		DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
+		DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+		dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+		DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+		if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
+			DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+				__FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state));
+			DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
+			dhd_os_busbusy_wake(dhd_pub);
+			DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+			dhd_os_proto_unblock(dhd_pub);
+			return -ENODEV;
+		}
+		DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+#if defined(WL_WLC_SHIM)
+		{
+			struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
+
+			wl_io_pport_t io_pport;
+			io_pport.dhd_pub = dhd_pub;
+			io_pport.ifidx = ifidx;
+
+			ret = wl_shim_ioctl(shim, ioc, len, &io_pport);
+			if (ret != BCME_OK) {
+				DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n",
+					__FUNCTION__, ioc->cmd, ret));
+			}
+		}
+#else
+#ifdef DUMP_IOCTL_IOV_LIST
+		if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
+			if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
+				DHD_ERROR(("iovar dump list item allocation Failed\n"));
+			} else {
+				iov_li->cmd = ioc->cmd;
+				bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
+				dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
+					&iov_li->list);
+			}
+		}
+#endif /* DUMP_IOCTL_IOV_LIST */
+		ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
+#ifdef DUMP_IOCTL_IOV_LIST
+		if (ret == -ETIMEDOUT) {
+			DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
+				IOV_LIST_MAX_LEN));
+			dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
+		}
+#endif /* DUMP_IOCTL_IOV_LIST */
+#endif /* defined(WL_WLC_SHIM) */
+#ifdef DHD_LOG_DUMP
+		if (ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) {
+			lval = 0;
+			slen = strlen(buf) + 1;
+			msg = (char*)buf;
+			if (len >= slen + sizeof(lval)) {
+				if (ioc->cmd == WLC_GET_VAR) {
+					msg = tmp;
+					lval = *(int*)buf;
+				} else {
+					min_len = MIN(ioc->len - slen, sizeof(int));
+					bcopy((msg + slen), &lval, min_len);
+				}
+			}
+			DHD_ERROR_MEM(("%s: cmd: %d, msg: %s, val: 0x%x, len: %d, set: %d\n",
+				ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
+				ioc->cmd, msg, lval, ioc->len, ioc->set));
+		} else {
+			slen = ioc->len;
+			if (buf != NULL) {
+				val = *(int*)buf;
+				pval = (char*)buf;
+				pos = tmp;
+				rem = sizeof(tmp);
+				memset(tmp, 0, sizeof(tmp));
+				for (i = 0; i < slen; i++) {
+					if (rem <= 3) {
+						/* At least 2 byte required + 1 byte(NULL) */
+						break;
+					}
+					pos += snprintf(pos, rem, "%02x ", pval[i]);
+					rem = sizeof(tmp) - (int)(pos - tmp);
+				}
+				/* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
+				if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION)
+					DHD_ERROR_MEM(("WLC_IOCTL: cmd: %d, val: %d(%s), "
+						"len: %d, set: %d\n",
+						ioc->cmd, val, tmp, ioc->len, ioc->set));
+			} else {
+				DHD_ERROR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
+			}
+		}
+#endif /* DHD_LOG_DUMP */
+		if (ret && dhd_pub->up) {
+			/* Send hang event only if dhd_open() was success */
+			dhd_os_check_hang(dhd_pub, ifidx, ret);
+		}
+
+		if (ret == -ETIMEDOUT && !dhd_pub->up) {
+			DHD_ERROR(("%s: 'resumed on timeout' error is "
+				"occurred before the interface does not"
+				" bring up\n", __FUNCTION__));
+			dhd_pub->busstate = DHD_BUS_DOWN;
+		}
+
+		DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+		DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
+		dhd_os_busbusy_wake(dhd_pub);
+		DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+		dhd_os_proto_unblock(dhd_pub);
+
+	}
+
+	return ret;
+}
+
+uint wl_get_port_num(wl_io_pport_t *io_pport)
+{
+	return 0;
+}
+
+/* Get bssidx from iovar params
+ * Input:   dhd_pub - pointer to dhd_pub_t
+ *	    params  - IOVAR params
+ * Output:  idx	    - BSS index
+ *	    val	    - ponter to the IOVAR arguments
+ */
+static int
+dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val)
+{
+	char *prefix = "bsscfg:";
+	uint32	bssidx;
+
+	if (!(strncmp(params, prefix, strlen(prefix)))) {
+		/* per bss setting should be prefixed with 'bsscfg:' */
+		const char *p = params + strlen(prefix);
+
+		/* Skip Name */
+		while (*p != '\0')
+			p++;
+		/* consider null */
+		p = p + 1;
+		bcopy(p, &bssidx, sizeof(uint32));
+		/* Get corresponding dhd index */
+		bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
+
+		if (bssidx >= DHD_MAX_IFS) {
+			DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+
+		/* skip bss idx */
+		p += sizeof(uint32);
+		*val = p;
+		*idx = bssidx;
+	} else {
+		DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+#if defined(DHD_DEBUG) && defined(BCMDBUS)
+/* USB Device console input function */
+int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
+{
+	DHD_TRACE(("%s \n", __FUNCTION__));
+
+	return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE);
+
+}
+#endif /* DHD_DEBUG && BCMDBUS  */
+
+#ifdef DHD_DEBUG
+int
+dhd_mem_debug(dhd_pub_t *dhd, char *msg, uint msglen)
+{
+	unsigned long int_arg = 0;
+	char *p;
+	char *end_ptr = NULL;
+	dhd_dbg_mwli_t *mw_li;
+	dll_t *item, *next;
+	/* check if mwalloc, mwquery or mwfree was supplied arguement with space */
+	p = bcmstrstr(msg, " ");
+	if (p != NULL) {
+		/* space should be converted to null as separation flag for firmware */
+		*p = '\0';
+		/* store the argument in int_arg */
+		int_arg = bcm_strtoul(p+1, &end_ptr, 10);
+	}
+
+	if (!p && !strcmp(msg, "query")) {
+		/* lets query the list inetrnally */
+		if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
+			DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
+			/* reset the id */
+			dhd->mw_id = 0;
+		} else {
+			for (item = dll_head_p(&dhd->mw_list_head);
+					!dll_end(&dhd->mw_list_head, item); item = next) {
+				next = dll_next_p(item);
+				mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
+				DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li->id, mw_li->size));
+			}
+		}
+	} else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) {
+		int32 alloc_handle;
+		/* convert size into KB and append as integer */
+		*((int32 *)(p+1)) = int_arg*1024;
+		*(p+1+sizeof(int32)) = '\0';
+
+		/* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
+		 *1 bytes for null caracter
+		 */
+		msglen = strlen(msg) + sizeof(int32) + 1;
+		if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen, FALSE, 0) < 0) {
+			DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
+		}
+
+		/* returned allocated handle from dongle, basically address of the allocated unit */
+		alloc_handle = *((int32 *)msg);
+
+		/* add a node in the list with tuple <id, handle, size> */
+		if (alloc_handle == 0) {
+			DHD_ERROR(("Reuqested size could not be allocated\n"));
+		} else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) {
+			DHD_ERROR(("mw list item allocation Failed\n"));
+		} else {
+			mw_li->id = dhd->mw_id++;
+			mw_li->handle = alloc_handle;
+			mw_li->size = int_arg;
+			/* append the node in the list */
+			dll_append(&dhd->mw_list_head, &mw_li->list);
+		}
+	} else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) {
+		/* inform dongle to free wasted chunk */
+		int handle = 0;
+		int size = 0;
+		for (item = dll_head_p(&dhd->mw_list_head);
+				!dll_end(&dhd->mw_list_head, item); item = next) {
+			next = dll_next_p(item);
+			mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
+
+			if (mw_li->id == (int)int_arg) {
+				handle = mw_li->handle;
+				size = mw_li->size;
+				dll_delete(item);
+				MFREE(dhd->osh, mw_li, sizeof(*mw_li));
+			}
+		}
+		if (handle) {
+			int len;
+			/* append the free handle and the chunk size in first 8 bytes
+			 * after the command and null character
+			 */
+			*((int32 *)(p+1)) = handle;
+			*((int32 *)((p+1)+sizeof(int32))) = size;
+			/* append null as terminator */
+			*(p+1+2*sizeof(int32)) = '\0';
+			/* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
+			 * + 1 bytes for null caracter
+			 */
+			len = strlen(msg) + 2*sizeof(int32) + 1;
+			/* send iovar to free the chunk */
+			if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) {
+				DHD_ERROR(("IOCTL failed for memdebug free\n"));
+			}
+		} else {
+			DHD_ERROR(("specified id does not exist\n"));
+		}
+	} else {
+		/* for all the wrong argument formats */
+		return BCME_BADARG;
+	}
+	return 0;
+}
+
+extern void
+dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
+{
+	dll_t *item;
+	dhd_dbg_mwli_t *mw_li;
+	while (!(dll_empty(list_head))) {
+		item = dll_head_p(list_head);
+		mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
+		dll_delete(item);
+		MFREE(dhd->osh, mw_li, sizeof(*mw_li));
+	}
+}
+#endif /* DHD_DEBUG */
+
+#ifdef PKT_STATICS
+extern pkt_statics_t tx_statics;
+extern void dhdsdio_txpktstatics(void);
+#endif
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+            void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	uint32 dhd_ver_len, bus_api_rev_len;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_VERSION):
+		/* Need to have checked buffer length */
+		dhd_ver_len = strlen(dhd_version);
+		bus_api_rev_len = strlen(bus_api_revision);
+		if (dhd_ver_len)
+			bcm_strncpy_s((char*)arg, dhd_ver_len, dhd_version, dhd_ver_len);
+		if (bus_api_rev_len)
+			bcm_strncat_s((char*)arg + dhd_ver_len, bus_api_rev_len, bus_api_revision,
+				bus_api_rev_len);
+#ifdef PKT_STATICS
+		memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t));
+#endif
+		break;
+
+	case IOV_GVAL(IOV_WLMSGLEVEL):
+		printf("android_msg_level=0x%x\n", android_msg_level);
+		printf("config_msg_level=0x%x\n", config_msg_level);
+#if defined(WL_WIRELESS_EXT)
+		int_val = (int32)iw_msg_level;
+		bcopy(&int_val, arg, val_size);
+		printf("iw_msg_level=0x%x\n", iw_msg_level);
+#endif
+#ifdef WL_CFG80211
+		int_val = (int32)wl_dbg_level;
+		bcopy(&int_val, arg, val_size);
+		printf("cfg_msg_level=0x%x\n", wl_dbg_level);
+#endif
+		break;
+
+	case IOV_SVAL(IOV_WLMSGLEVEL):
+		if (int_val & DHD_ANDROID_VAL) {
+			android_msg_level = (uint)(int_val & 0xFFFF);
+			printf("android_msg_level=0x%x\n", android_msg_level);
+		}
+		if (int_val & DHD_CONFIG_VAL) {
+			config_msg_level = (uint)(int_val & 0xFFFF);
+			printf("config_msg_level=0x%x\n", config_msg_level);
+		}
+#if defined(WL_WIRELESS_EXT)
+		if (int_val & DHD_IW_VAL) {
+			iw_msg_level = (uint)(int_val & 0xFFFF);
+			printf("iw_msg_level=0x%x\n", iw_msg_level);
+		}
+#endif
+#ifdef WL_CFG80211
+		if (int_val & DHD_CFG_VAL) {
+			wl_cfg80211_enable_trace((u32)(int_val & 0xFFFF));
+		}
+#endif
+		break;
+
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)dhd_msg_level;
+		bcopy(&int_val, arg, val_size);
+#ifdef PKT_STATICS
+		dhdsdio_txpktstatics();
+#endif
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		dhd_msg_level = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BCMERRORSTR):
+		bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+		((char *)arg)[BCME_STRLEN - 1] = 0x00;
+		break;
+
+	case IOV_GVAL(IOV_BCMERROR):
+		int_val = (int32)dhd_pub->bcmerror;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+#ifndef BCMDBUS
+	case IOV_GVAL(IOV_WDTICK):
+		int_val = (int32)dhd_watchdog_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+#endif /* !BCMDBUS */
+
+	case IOV_SVAL(IOV_WDTICK):
+		if (!dhd_pub->up) {
+			bcmerror = BCME_NOTUP;
+			break;
+		}
+
+		if (CUSTOM_DHD_WATCHDOG_MS == 0 && int_val == 0) {
+			dhd_watchdog_ms = (uint)int_val;
+		}
+
+		dhd_os_wd_timer(dhd_pub, (uint)int_val);
+		break;
+
+	case IOV_GVAL(IOV_DUMP):
+		bcmerror = dhd_dump(dhd_pub, arg, len);
+		break;
+
+#ifndef BCMDBUS
+	case IOV_GVAL(IOV_DCONSOLE_POLL):
+		int_val = (int32)dhd_console_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DCONSOLE_POLL):
+		dhd_console_ms = (uint)int_val;
+		break;
+
+	case IOV_SVAL(IOV_CONS):
+		if (len > 0)
+			bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+		break;
+#endif /* !BCMDBUS */
+
+	case IOV_SVAL(IOV_CLEARCOUNTS):
+		dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+		dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+		dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+		dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+		dhd_pub->tx_dropped = 0;
+		dhd_pub->rx_dropped = 0;
+		dhd_pub->tx_pktgetfail = 0;
+		dhd_pub->rx_pktgetfail = 0;
+		dhd_pub->rx_readahead_cnt = 0;
+		dhd_pub->tx_realloc = 0;
+		dhd_pub->wd_dpc_sched = 0;
+		memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+		dhd_bus_clearcounts(dhd_pub);
+#ifdef PROP_TXSTATUS
+		/* clear proptxstatus related counters */
+		dhd_wlfc_clear_counts(dhd_pub);
+#endif /* PROP_TXSTATUS */
+#if defined(DHD_LB_STATS)
+		DHD_LB_STATS_RESET(dhd_pub);
+#endif /* DHD_LB_STATS */
+		break;
+
+
+	case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+		int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+		if (int_val <= 0)
+			bcmerror = BCME_BADARG;
+		else
+			dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+		break;
+	}
+
+#ifdef PROP_TXSTATUS
+	case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
+		bool wlfc_enab = FALSE;
+		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		int_val = wlfc_enab ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
+		bool wlfc_enab = FALSE;
+		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+		if (bcmerror != BCME_OK)
+			goto exit;
+
+		/* wlfc is already set as desired */
+		if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
+			goto exit;
+
+		if (int_val == TRUE)
+			bcmerror = dhd_wlfc_init(dhd_pub);
+		else
+			bcmerror = dhd_wlfc_deinit(dhd_pub);
+
+		break;
+	}
+	case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
+		bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
+		dhd_wlfc_set_mode(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+		bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+		dhd_wlfc_set_module_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+		bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+		dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+		bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+		dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+		bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+		dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
+		break;
+
+#endif /* PROP_TXSTATUS */
+
+	case IOV_GVAL(IOV_BUS_TYPE):
+		/* The dhd application queries the driver to check if its usb or sdio.  */
+#ifdef BCMDBUS
+		int_val = BUS_TYPE_USB;
+#endif /* BCMDBUS */
+#ifdef BCMSDIO
+		int_val = BUS_TYPE_SDIO;
+#endif
+#ifdef PCIE_FULL_DONGLE
+		int_val = BUS_TYPE_PCIE;
+#endif
+		bcopy(&int_val, arg, val_size);
+		break;
+
+
+#ifdef WLMEDIA_HTSF
+	case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
+		int_val = dhd_pub->htsfdlystat_sz;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
+		dhd_pub->htsfdlystat_sz = int_val & 0xff;
+		printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
+		break;
+#endif
+	case IOV_SVAL(IOV_CHANGEMTU):
+		int_val &= 0xffff;
+		bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
+	{
+		uint i = 0;
+		uint8 *ptr = (uint8 *)arg;
+		uint8 count = 0;
+
+		ptr++;
+		for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
+			if (dhd_pub->reorder_bufs[i] != NULL) {
+				*ptr = dhd_pub->reorder_bufs[i]->flow_id;
+				ptr++;
+				count++;
+			}
+		}
+		ptr = (uint8 *)arg;
+		*ptr = count;
+		break;
+	}
+#ifdef DHDTCPACK_SUPPRESS
+	case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
+		int_val = (uint32)dhd_pub->tcpack_sup_mode;
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
+		bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
+		break;
+	}
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+	case IOV_GVAL(IOV_WMF_BSS_ENAB): {
+		uint32	bssidx;
+		dhd_wmf_t *wmf;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		wmf = dhd_wmf_conf(dhd_pub, bssidx);
+		int_val = wmf->wmf_enable ? 1 :0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_WMF_BSS_ENAB): {
+		/* Enable/Disable WMF */
+		uint32	bssidx;
+		dhd_wmf_t *wmf;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		ASSERT(val);
+		bcopy(val, &int_val, sizeof(uint32));
+		wmf = dhd_wmf_conf(dhd_pub, bssidx);
+		if (wmf->wmf_enable == int_val)
+			break;
+		if (int_val) {
+			/* Enable WMF */
+			if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) {
+				DHD_ERROR(("%s: Error in creating WMF instance\n",
+				__FUNCTION__));
+				break;
+			}
+			if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) {
+				DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__));
+				break;
+			}
+			wmf->wmf_enable = TRUE;
+		} else {
+			/* Disable WMF */
+			wmf->wmf_enable = FALSE;
+			dhd_wmf_stop(dhd_pub, bssidx);
+			dhd_wmf_instance_del(dhd_pub, bssidx);
+		}
+		break;
+	}
+	case IOV_GVAL(IOV_WMF_UCAST_IGMP):
+		int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_WMF_UCAST_IGMP):
+		if (dhd_pub->wmf_ucast_igmp == int_val)
+			break;
+
+		if (int_val >= OFF && int_val <= ON)
+			dhd_pub->wmf_ucast_igmp = int_val;
+		else
+			bcmerror = BCME_RANGE;
+		break;
+	case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP):
+		int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE);
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP):
+		dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val);
+		break;
+
+#ifdef WL_IGMP_UCQUERY
+	case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY):
+		int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY):
+		if (dhd_pub->wmf_ucast_igmp_query == int_val)
+			break;
+
+		if (int_val >= OFF && int_val <= ON)
+			dhd_pub->wmf_ucast_igmp_query = int_val;
+		else
+			bcmerror = BCME_RANGE;
+		break;
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+	case IOV_GVAL(IOV_WMF_UCAST_UPNP):
+		int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_WMF_UCAST_UPNP):
+		if (dhd_pub->wmf_ucast_upnp == int_val)
+			break;
+
+		if (int_val >= OFF && int_val <= ON)
+			dhd_pub->wmf_ucast_upnp = int_val;
+		else
+			bcmerror = BCME_RANGE;
+		break;
+#endif /* DHD_UCAST_UPNP */
+
+	case IOV_GVAL(IOV_WMF_PSTA_DISABLE): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		int_val = dhd_get_wmf_psta_disable(dhd_pub, bssidx);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+
+	case IOV_SVAL(IOV_WMF_PSTA_DISABLE): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		ASSERT(val);
+		bcopy(val, &int_val, sizeof(uint32));
+		dhd_set_wmf_psta_disable(dhd_pub, bssidx, int_val);
+		break;
+	}
+#endif /* DHD_WMF */
+
+#if defined(TRAFFIC_MGMT_DWM)
+	case IOV_SVAL(IOV_TRAFFIC_MGMT_DWM): {
+			trf_mgmt_filter_list_t   *trf_mgmt_filter_list =
+				(trf_mgmt_filter_list_t *)(arg);
+			bcmerror = traffic_mgmt_add_dwm_filter(dhd_pub, trf_mgmt_filter_list, len);
+		}
+		break;
+#endif 
+
+#ifdef DHD_L2_FILTER
+	case IOV_GVAL(IOV_DHCP_UNICAST): {
+		uint32 bssidx;
+		const char *val;
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
+				__FUNCTION__, name));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
+		memcpy(arg, &int_val, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_DHCP_UNICAST): {
+		uint32	bssidx;
+		const char *val;
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
+				__FUNCTION__, name));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		memcpy(&int_val, val, sizeof(int_val));
+		bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
+		break;
+	}
+	case IOV_GVAL(IOV_BLOCK_PING): {
+		uint32 bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
+		memcpy(arg, &int_val, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_BLOCK_PING): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		memcpy(&int_val, val, sizeof(int_val));
+		bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
+		break;
+	}
+	case IOV_GVAL(IOV_PROXY_ARP): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = dhd_get_parp_status(dhd_pub, bssidx);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_PROXY_ARP): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		bcopy(val, &int_val, sizeof(int_val));
+
+		/* Issue a iovar request to WL to update the proxy arp capability bit
+		 * in the Extended Capability IE of beacons/probe responses.
+		 */
+		bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val),
+				NULL, 0, TRUE);
+		if (bcmerror == BCME_OK) {
+			dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
+		}
+		break;
+	}
+	case IOV_GVAL(IOV_GRAT_ARP): {
+		uint32 bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
+		memcpy(arg, &int_val, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_GRAT_ARP): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		memcpy(&int_val, val, sizeof(int_val));
+		bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
+		break;
+	}
+#endif /* DHD_L2_FILTER */
+	case IOV_SVAL(IOV_DHD_IE): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		break;
+	}
+	case IOV_GVAL(IOV_AP_ISOLATE): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_AP_ISOLATE): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		ASSERT(val);
+		bcopy(val, &int_val, sizeof(uint32));
+		dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
+		break;
+	}
+#ifdef DHD_PSTA
+	case IOV_GVAL(IOV_PSTA): {
+		int_val = dhd_get_psta_mode(dhd_pub);
+		bcopy(&int_val, arg, val_size);
+		break;
+		}
+	case IOV_SVAL(IOV_PSTA): {
+		if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
+			dhd_set_psta_mode(dhd_pub, int_val);
+		} else {
+			bcmerror = BCME_RANGE;
+		}
+		break;
+		}
+#endif /* DHD_PSTA */
+#ifdef DHD_WET
+	case IOV_GVAL(IOV_WET):
+		 int_val = dhd_get_wet_mode(dhd_pub);
+		 bcopy(&int_val, arg, val_size);
+		 break;
+
+	case IOV_SVAL(IOV_WET):
+		 if (int_val == 0 || int_val == 1) {
+			 dhd_set_wet_mode(dhd_pub, int_val);
+			 /* Delete the WET DB when disabled */
+			 if (!int_val) {
+				 dhd_wet_sta_delete_list(dhd_pub);
+			 }
+		 } else {
+			 bcmerror = BCME_RANGE;
+		 }
+				 break;
+	case IOV_SVAL(IOV_WET_HOST_IPV4):
+			dhd_set_wet_host_ipv4(dhd_pub, params, plen);
+			break;
+	case IOV_SVAL(IOV_WET_HOST_MAC):
+			dhd_set_wet_host_mac(dhd_pub, params, plen);
+		break;
+#endif /* DHD_WET */
+#ifdef DHD_MCAST_REGEN
+	case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+
+	case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
+		uint32	bssidx;
+		const char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		ASSERT(val);
+		bcopy(val, &int_val, sizeof(uint32));
+		dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val);
+		break;
+	}
+#endif /* DHD_MCAST_REGEN */
+
+	case IOV_GVAL(IOV_CFG80211_OPMODE): {
+		int_val = (int32)dhd_pub->op_mode;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+		}
+	case IOV_SVAL(IOV_CFG80211_OPMODE): {
+		if (int_val <= 0)
+			bcmerror = BCME_BADARG;
+		else
+			dhd_pub->op_mode = int_val;
+		break;
+	}
+
+	case IOV_GVAL(IOV_ASSERT_TYPE):
+		int_val = g_assert_type;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_ASSERT_TYPE):
+		g_assert_type = (uint32)int_val;
+		break;
+
+
+#if !defined(MACOSX_DHD)
+	case IOV_GVAL(IOV_LMTEST): {
+		*(uint32 *)arg = (uint32)lmtest;
+		break;
+	}
+
+	case IOV_SVAL(IOV_LMTEST): {
+		uint32 val = *(uint32 *)arg;
+		if (val > 50)
+			bcmerror = BCME_BADARG;
+		else {
+			lmtest = (uint)val;
+			DHD_ERROR(("%s: lmtest %s\n",
+				__FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
+		}
+		break;
+	}
+#endif 
+
+#ifdef SHOW_LOGTRACE
+	case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
+		trace_buf_info_t *trace_buf_info;
+
+		trace_buf_info = (trace_buf_info_t *)MALLOC(dhd_pub->osh,
+				sizeof(trace_buf_info_t));
+		if (trace_buf_info != NULL) {
+			dhd_get_read_buf_ptr(dhd_pub, trace_buf_info);
+			memcpy((void*)arg, (void*)trace_buf_info, sizeof(trace_buf_info_t));
+			MFREE(dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
+		} else {
+			DHD_ERROR(("Memory allocation Failed\n"));
+			bcmerror = BCME_NOMEM;
+		}
+		break;
+	}
+#endif /* SHOW_LOGTRACE */
+#ifdef REPORT_FATAL_TIMEOUTS
+	case IOV_GVAL(IOV_SCAN_TO): {
+		dhd_get_scan_to_val(dhd_pub, (uint32 *)&int_val);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_SCAN_TO): {
+		dhd_set_scan_to_val(dhd_pub, (uint32)int_val);
+		break;
+	}
+	case IOV_GVAL(IOV_JOIN_TO): {
+		dhd_get_join_to_val(dhd_pub, (uint32 *)&int_val);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_JOIN_TO): {
+		dhd_set_join_to_val(dhd_pub, (uint32)int_val);
+		break;
+	}
+	case IOV_GVAL(IOV_CMD_TO): {
+		dhd_get_cmd_to_val(dhd_pub, (uint32 *)&int_val);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_CMD_TO): {
+		dhd_set_cmd_to_val(dhd_pub, (uint32)int_val);
+		break;
+	}
+	case IOV_GVAL(IOV_OQS_TO): {
+		dhd_get_bus_to_val(dhd_pub, (uint32 *)&int_val);
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_OQS_TO): {
+		dhd_set_bus_to_val(dhd_pub, (uint32)int_val);
+		break;
+	}
+#endif /* REPORT_FATAL_TIMEOUTS */
+#ifdef DHD_DEBUG
+#if defined(BCMSDIO) || defined(BCMPCIE)
+	case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
+		if (dhd_pub->dongle_trap_occured)
+			int_val = ltoh32(dhd_pub->last_trap_info.type);
+		else
+			int_val = 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DONGLE_TRAP_INFO):
+	{
+		struct bcmstrbuf strbuf;
+		bcm_binit(&strbuf, arg, len);
+		if (dhd_pub->dongle_trap_occured == FALSE) {
+			bcm_bprintf(&strbuf, "no trap recorded\n");
+			break;
+		}
+		dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf);
+		break;
+	}
+
+	case IOV_GVAL(IOV_BPADDR):
+		{
+			sdreg_t sdreg;
+			uint32 addr, size;
+
+			memcpy(&sdreg, params, sizeof(sdreg));
+
+			addr = sdreg.offset;
+			size = sdreg.func;
+
+			bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
+				(uint *)&int_val, TRUE);
+
+			memcpy(arg, &int_val, sizeof(int32));
+
+			break;
+		}
+
+	case IOV_SVAL(IOV_BPADDR):
+		{
+			sdreg_t sdreg;
+			uint32 addr, size;
+
+			memcpy(&sdreg, params, sizeof(sdreg));
+
+			addr = sdreg.offset;
+			size = sdreg.func;
+
+			bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
+				(uint *)&sdreg.value,
+				FALSE);
+
+			break;
+		}
+#endif /* BCMSDIO || BCMPCIE */
+	case IOV_SVAL(IOV_MEM_DEBUG):
+		if (len > 0) {
+			bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
+		}
+		break;
+#endif /* DHD_DEBUG */
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+	case IOV_GVAL(IOV_LOG_CAPTURE_ENABLE):
+		{
+			int_val = dhd_pub->log_capture_enable;
+			bcopy(&int_val, arg, val_size);
+			break;
+		}
+
+	case IOV_SVAL(IOV_LOG_CAPTURE_ENABLE):
+		{
+			dhd_pub->log_capture_enable = (uint8)int_val;
+			break;
+		}
+
+	case IOV_GVAL(IOV_LOG_DUMP):
+		{
+			dhd_prot_debug_info_print(dhd_pub);
+			dhd_bus_mem_dump(dhd_pub);
+			break;
+		}
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+	return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+	/* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+	 * because an encryption/rsn mismatch results in both events, and
+	 * the important information is in the WLC_E_PRUNE.
+	 */
+	if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+	      dhd_conn_event == WLC_E_PRUNE)) {
+		dhd_conn_event = event;
+		dhd_conn_status = status;
+		dhd_conn_reason = reason;
+	}
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+	void *p;
+	int eprec = -1;		/* precedence to evict from */
+	bool discard_oldest;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+		pktq_penq(q, prec, pkt);
+		return TRUE;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(q, prec))
+		eprec = prec;
+	else if (pktq_full(q)) {
+		p = pktq_peek_tail(q, &eprec);
+		ASSERT(p);
+		if (eprec > prec || eprec < 0)
+			return FALSE;
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(q, eprec));
+		discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+		if (eprec == prec && !discard_oldest)
+			return FALSE;		/* refuse newer (incoming) packet */
+		/* Evict packet according to discard policy */
+		p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+		ASSERT(p);
+#ifdef DHDTCPACK_SUPPRESS
+		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+				__FUNCTION__, __LINE__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+		}
+#endif /* DHDTCPACK_SUPPRESS */
+		PKTFREE(dhdp->osh, p, TRUE);
+	}
+
+	/* Enqueue */
+	p = pktq_penq(q, prec, pkt);
+	ASSERT(p);
+
+	return TRUE;
+}
+
+/*
+ * Functions to drop proper pkts from queue:
+ *	If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
+ *	If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
+ *	If can't find pkts matching upper 2 cases, drop first pkt anyway
+ */
+bool
+dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
+{
+	struct pktq_prec *q = NULL;
+	void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
+	pkt_frag_t frag_info;
+
+	ASSERT(dhdp && pq);
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	if (p == NULL)
+		return FALSE;
+
+	while (p) {
+		frag_info = pkt_frag_info(dhdp->osh, p);
+		if (frag_info == DHD_PKT_FRAG_NONE) {
+			break;
+		} else if (frag_info == DHD_PKT_FRAG_FIRST) {
+			if (first) {
+				/* No last frag pkt, use prev as last */
+				last = prev;
+				break;
+			} else {
+				first = p;
+				prev_first = prev;
+			}
+		} else if (frag_info == DHD_PKT_FRAG_LAST) {
+			if (first) {
+				last = p;
+				break;
+			}
+		}
+
+		prev = p;
+		p = PKTLINK(p);
+	}
+
+	if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
+		/* Not found matching pkts, use oldest */
+		prev = NULL;
+		p = q->head;
+		frag_info = 0;
+	}
+
+	if (frag_info == DHD_PKT_FRAG_NONE) {
+		first = last = p;
+		prev_first = prev;
+	}
+
+	p = first;
+	while (p) {
+		next = PKTLINK(p);
+		q->len--;
+		pq->len--;
+
+		PKTSETLINK(p, NULL);
+
+		if (fn)
+			fn(dhdp, prec, p, TRUE);
+
+		if (p == last)
+			break;
+
+		p = next;
+	}
+
+	if (prev_first == NULL) {
+		if ((q->head = next) == NULL)
+			q->tail = NULL;
+	} else {
+		PKTSETLINK(prev_first, next);
+		if (!next)
+			q->tail = prev_first;
+	}
+
+	return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+	void *params, int plen, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+	int val_size;
+	const bcm_iovar_t *vi = NULL;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+		name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+	bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
+{
+	int bcmerror = 0;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!buf) {
+		return BCME_BADARG;
+	}
+
+	dhd_os_dhdiovar_lock(dhd_pub);
+	switch (ioc->cmd) {
+		case DHD_GET_MAGIC:
+			if (buflen < sizeof(int))
+				bcmerror = BCME_BUFTOOSHORT;
+			else
+				*(int*)buf = DHD_IOCTL_MAGIC;
+			break;
+
+		case DHD_GET_VERSION:
+			if (buflen < sizeof(int))
+				bcmerror = BCME_BUFTOOSHORT;
+			else
+				*(int*)buf = DHD_IOCTL_VERSION;
+			break;
+
+		case DHD_GET_VAR:
+		case DHD_SET_VAR:
+			{
+				char *arg;
+				uint arglen;
+
+				DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+				if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
+					/* In platforms like FC19, the FW download is done via IOCTL
+					 * and should not return error for IOCTLs fired before FW
+					 * Download is done
+					 */
+					if (dhd_fw_download_status(dhd_pub)) {
+						DHD_ERROR(("%s: returning as busstate=%d\n",
+								__FUNCTION__, dhd_pub->busstate));
+						DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+						dhd_os_dhdiovar_unlock(dhd_pub);
+						return -ENODEV;
+					}
+				}
+				DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
+				DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+				dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+				DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+				if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
+					/* If Suspend/Resume is tested via pcie_suspend IOVAR
+					 * then continue to execute the IOVAR, return from here for
+					 * other IOVARs, also include pciecfgreg and devreset to go
+					 * through.
+					 */
+#ifdef DHD_EFI
+					if (bcmstricmp((char *)buf, "pcie_suspend") &&
+						bcmstricmp((char *)buf, "pciecfgreg") &&
+						bcmstricmp((char *)buf, "devreset") &&
+						bcmstricmp((char *)buf, "sdio_suspend") &&
+						bcmstricmp((char *)buf, "control_signal"))
+#else
+					if (bcmstricmp((char *)buf, "pcie_suspend") &&
+					    bcmstricmp((char *)buf, "pciecfgreg") &&
+					    bcmstricmp((char *)buf, "devreset") &&
+					    bcmstricmp((char *)buf, "sdio_suspend"))
+#endif /* DHD_EFI */
+					{
+						DHD_ERROR(("%s: bus is in suspend(%d)"
+							"or suspending(0x%x) state\n",
+							__FUNCTION__, dhd_pub->busstate,
+							dhd_pub->dhd_bus_busy_state));
+						DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
+						dhd_os_busbusy_wake(dhd_pub);
+						DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+						dhd_os_dhdiovar_unlock(dhd_pub);
+						return -ENODEV;
+					}
+				}
+				/* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
+				 * which will wait for all the busy contexts to get over for
+				 * particular time and call ASSERT if timeout happens. As during
+				 * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
+				 * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
+				 * not used in Production platforms but only used in FC19 setups.
+				 */
+				if (!bcmstricmp((char *)buf, "devreset")) {
+					DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
+				}
+				DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+
+				/* scan past the name to any arguments */
+				for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+					;
+
+				if (*arg) {
+					bcmerror = BCME_BUFTOOSHORT;
+					goto unlock_exit;
+				}
+
+				/* account for the NUL terminator */
+				arg++, arglen--;
+				/* call with the appropriate arguments */
+				if (ioc->cmd == DHD_GET_VAR) {
+					bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+							buf, buflen, IOV_GET);
+				} else {
+					bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
+							arg, arglen, IOV_SET);
+				}
+				if (bcmerror != BCME_UNSUPPORTED) {
+					goto unlock_exit;
+				}
+
+				/* not in generic table, try protocol module */
+				if (ioc->cmd == DHD_GET_VAR) {
+					bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+							arglen, buf, buflen, IOV_GET);
+				} else {
+					bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+							NULL, 0, arg, arglen, IOV_SET);
+				}
+				if (bcmerror != BCME_UNSUPPORTED) {
+					goto unlock_exit;
+				}
+
+				/* if still not found, try bus module */
+				if (ioc->cmd == DHD_GET_VAR) {
+					bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+							arg, arglen, buf, buflen, IOV_GET);
+				} else {
+					bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+							NULL, 0, arg, arglen, IOV_SET);
+				}
+				if (bcmerror != BCME_UNSUPPORTED) {
+					goto unlock_exit;
+				}
+
+#ifdef DHD_TIMESYNC
+				/* check TS module */
+				if (ioc->cmd == DHD_GET_VAR)
+					bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf, arg,
+						arglen, buf, buflen, IOV_GET);
+				else
+					bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf,
+						NULL, 0, arg, arglen, IOV_SET);
+#endif /* DHD_TIMESYNC */
+			}
+			goto unlock_exit;
+
+		default:
+			bcmerror = BCME_UNSUPPORTED;
+	}
+	dhd_os_dhdiovar_unlock(dhd_pub);
+	return bcmerror;
+
+unlock_exit:
+	DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
+	DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
+	dhd_os_busbusy_wake(dhd_pub);
+	DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+	dhd_os_dhdiovar_unlock(dhd_pub);
+	return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+static void
+wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
+	void *raw_event_ptr, char *eventmask)
+{
+	uint i, status, reason;
+	bool group = FALSE, flush_txq = FALSE, link = FALSE;
+	bool host_data = FALSE; /* prints  event data after the case  when set */
+	const char *auth_str;
+	const char *event_name;
+	uchar *buf;
+	char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+	uint event_type, flags, auth_type, datalen;
+
+	event_type = ntoh32(event->event_type);
+	flags = ntoh16(event->flags);
+	status = ntoh32(event->status);
+	reason = ntoh32(event->reason);
+	BCM_REFERENCE(reason);
+	auth_type = ntoh32(event->auth_type);
+	datalen = ntoh32(event->datalen);
+
+	/* debug dump of event messages */
+	snprintf(eabuf, sizeof(eabuf), "%02x:%02x:%02x:%02x:%02x:%02x",
+	        (uchar)event->addr.octet[0]&0xff,
+	        (uchar)event->addr.octet[1]&0xff,
+	        (uchar)event->addr.octet[2]&0xff,
+	        (uchar)event->addr.octet[3]&0xff,
+	        (uchar)event->addr.octet[4]&0xff,
+	        (uchar)event->addr.octet[5]&0xff);
+
+	event_name = bcmevent_get_name(event_type);
+	BCM_REFERENCE(event_name);
+
+	if (flags & WLC_EVENT_MSG_LINK)
+		link = TRUE;
+	if (flags & WLC_EVENT_MSG_GROUP)
+		group = TRUE;
+	if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+		flush_txq = TRUE;
+
+	switch (event_type) {
+	case WLC_E_START:
+	case WLC_E_DEAUTH:
+	case WLC_E_DISASSOC:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC:
+	case WLC_E_REASSOC:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+			       event_name, eabuf, (int)reason));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+			       event_name, eabuf, (int)status));
+		}
+		break;
+
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+		break;
+
+	case WLC_E_AUTH:
+	case WLC_E_AUTH_IND:
+		if (auth_type == DOT11_OPEN_SYSTEM)
+			auth_str = "Open System";
+		else if (auth_type == DOT11_SHARED_KEY)
+			auth_str = "Shared Key";
+		else {
+			snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
+			auth_str = err_msg;
+		}
+
+	if (event_type == WLC_E_AUTH_IND) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+			       event_name, eabuf, auth_str, (int)reason));
+		}
+		BCM_REFERENCE(auth_str);
+
+		break;
+
+	case WLC_E_JOIN:
+	case WLC_E_ROAM:
+	case WLC_E_SET_SSID:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+#ifdef REPORT_FATAL_TIMEOUTS
+			dhd_clear_join_error(dhd_pub, WLC_SSID_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
+		} else {
+#ifdef REPORT_FATAL_TIMEOUTS
+			dhd_set_join_error(dhd_pub, WLC_SSID_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
+			if (status == WLC_E_STATUS_FAIL) {
+				DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+			} else if (status == WLC_E_STATUS_NO_NETWORKS) {
+				DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+			} else {
+				DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+					event_name, (int)status));
+			}
+		}
+		break;
+
+	case WLC_E_BEACON_RX:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+		}
+		break;
+
+	case WLC_E_LINK:
+		DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+		BCM_REFERENCE(link);
+		break;
+
+	case WLC_E_MIC_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+		       event_name, eabuf, group, flush_txq));
+		BCM_REFERENCE(group);
+		BCM_REFERENCE(flush_txq);
+		break;
+
+	case WLC_E_ICV_ERROR:
+	case WLC_E_UNICAST_DECODE_ERROR:
+	case WLC_E_MULTICAST_DECODE_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+		       event_name, eabuf));
+		break;
+
+	case WLC_E_TXFAIL:
+		DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
+		break;
+
+	case WLC_E_ASSOC_REQ_IE:
+	case WLC_E_ASSOC_RESP_IE:
+	case WLC_E_PMKID_CACHE:
+		DHD_EVENT(("MACEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_SCAN_COMPLETE:
+		DHD_EVENT(("MACEVENT: %s\n", event_name));
+#ifdef REPORT_FATAL_TIMEOUTS
+		dhd_stop_scan_timer(dhd_pub);
+#endif /* REPORT_FATAL_TIMEOUTS */
+		break;
+	case WLC_E_RSSI_LQM:
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_NET_LOST:
+	case WLC_E_PFN_SCAN_COMPLETE:
+	case WLC_E_PFN_SCAN_NONE:
+	case WLC_E_PFN_SCAN_ALLGONE:
+	case WLC_E_PFN_GSCAN_FULL_RESULT:
+	case WLC_E_PFN_SSID_EXT:
+		DHD_EVENT(("PNOEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PSK_SUP:
+	case WLC_E_PRUNE:
+		DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+		           event_name, (int)status, (int)reason));
+#ifdef REPORT_FATAL_TIMEOUTS
+		if ((status == WLC_E_STATUS_SUCCESS || status == WLC_E_STATUS_UNSOLICITED) &&
+				(reason == WLC_E_SUP_OTHER)) {
+			dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
+		} else {
+			dhd_set_join_error(dhd_pub, WLC_WPA_MASK);
+		}
+#endif /* REPORT_FATAL_TIMEOUTS */
+		break;
+
+#ifdef WIFI_ACT_FRAME
+	case WLC_E_ACTION_FRAME:
+		DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
+		break;
+#endif /* WIFI_ACT_FRAME */
+
+#ifdef SHOW_LOGTRACE
+	case WLC_E_TRACE:
+		DHD_EVENT(("MACEVENT: %s Logtrace\n", event_name));
+		dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
+		break;
+#endif /* SHOW_LOGTRACE */
+
+	case WLC_E_RSSI:
+		DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+		break;
+
+	case WLC_E_SERVICE_FOUND:
+	case WLC_E_P2PO_ADD_DEVICE:
+	case WLC_E_P2PO_DEL_DEVICE:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+#ifdef BT_WIFI_HANDOBER
+	case WLC_E_BT_WIFI_HANDOVER_REQ:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+#endif
+
+	case WLC_E_CCA_CHAN_QUAL:
+		if (datalen) {
+			buf = (uchar *) event_data;
+			DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d, "
+				"channel 0x%02x \n", event_name, event_type, eabuf, (int)status,
+				(int)reason, (int)auth_type, *(buf + 4)));
+		}
+		break;
+	case WLC_E_ESCAN_RESULT:
+	{
+		DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n",
+		       event_name, event_type, eabuf, (int)status));
+	}
+		break;
+	case WLC_E_PSK_AUTH:
+		DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
+		event_name, eabuf, status, reason));
+		break;
+	case WLC_E_IF:
+	{
+		struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
+		BCM_REFERENCE(ifevent);
+
+		DHD_EVENT(("MACEVENT: %s, opcode:0x%d  ifidx:%d\n",
+		event_name, ifevent->opcode, ifevent->ifidx));
+		break;
+	}
+
+#ifdef SHOW_LOGTRACE
+	case WLC_E_MSCH:
+	{
+		wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen);
+		break;
+	}
+#endif /* SHOW_LOGTRACE */
+
+	default:
+		DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+		       event_name, event_type, eabuf, (int)status, (int)reason,
+		       (int)auth_type));
+		break;
+	}
+
+	/* show any appended data if message level is set to bytes or host_data is set */
+	if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
+		buf = (uchar *) event_data;
+		BCM_REFERENCE(buf);
+		DHD_EVENT((" data (%d) : ", datalen));
+		for (i = 0; i < datalen; i++) {
+			DHD_EVENT((" 0x%02x ", buf[i]));
+		}
+		DHD_EVENT(("\n"));
+	}
+}
+#endif /* SHOW_EVENTS */
+
+#ifdef DNGL_EVENT_SUPPORT
+/* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
+int
+dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
+{
+	bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
+
+	dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen);
+	return BCME_OK;
+}
+
+void
+dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
+	bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
+{
+	uint8 *p = (uint8 *)(event + 1);
+	uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
+	uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen);
+	uint16 version = ntoh16_ua((void *)&dngl_event->version);
+
+	DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen));
+	if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) {
+		return;
+	}
+	if (version != BCM_DNGL_EVENT_MSG_VERSION) {
+		DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__,
+			version, BCM_DNGL_EVENT_MSG_VERSION));
+		return;
+	}
+	switch (type) {
+	   case DNGL_E_SOCRAM_IND:
+		{
+		   bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p;
+		   uint16 tag = ltoh32(socramind_ptr->tag);
+		   uint16 taglen = ltoh32(socramind_ptr->length);
+		   p = (uint8 *)socramind_ptr->value;
+		   DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen));
+		   switch (tag) {
+			case SOCRAM_IND_ASSERT_TAG:
+			    {
+				/*
+				* The payload consists of -
+				* null terminated function name padded till 32 bit boundary +
+				* Line number - (32 bits)
+				* Caller address (32 bits)
+				*/
+				char *fnname = (char *)p;
+				if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) +
+					sizeof(uint32) * 2)) {
+					DHD_ERROR(("Wrong length:%d\n", datalen));
+					return;
+				}
+				DHD_EVENT(("ASSRT Function:%s ", p));
+				p += ROUNDUP(strlen(p) + 1, sizeof(uint32));
+				DHD_EVENT(("Line:%d ", *(uint32 *)p));
+				p += sizeof(uint32);
+				DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
+				break;
+			    }
+			case SOCRAM_IND_TAG_HEALTH_CHECK:
+			   {
+				bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
+				DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d\n",
+				ltoh32(dngl_hc->top_module_tag), ltoh32(dngl_hc->top_module_len)));
+				if (DHD_EVENT_ON()) {
+					prhex("HEALTHCHECK", p, ltoh32(dngl_hc->top_module_len));
+				}
+				p = (uint8 *)dngl_hc->value;
+
+				switch (ltoh32(dngl_hc->top_module_tag)) {
+					case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE:
+					   {
+						bcm_dngl_pcie_hc_t *pcie_hc;
+						pcie_hc = (bcm_dngl_pcie_hc_t *)p;
+						BCM_REFERENCE(pcie_hc);
+						if (ltoh32(dngl_hc->top_module_len) <
+								sizeof(bcm_dngl_pcie_hc_t)) {
+							DHD_ERROR(("Wrong length:%d\n",
+								ltoh32(dngl_hc->top_module_len)));
+							return;
+						}
+						DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
+							" control:0x%x\n",
+							ltoh32(pcie_hc->version),
+							ltoh32(pcie_hc->pcie_err_ind_type),
+							ltoh32(pcie_hc->pcie_flag),
+							ltoh32(pcie_hc->pcie_control_reg)));
+						break;
+					   }
+					default:
+						DHD_ERROR(("%s:Unknown module TAG:%d\n",
+						  __FUNCTION__,
+						  ltoh32(dngl_hc->top_module_tag)));
+						break;
+				}
+				break;
+			   }
+			default:
+			   DHD_ERROR(("%s:Unknown TAG", __FUNCTION__));
+			   if (p && DHD_EVENT_ON()) {
+				   prhex("SOCRAMIND", p, taglen);
+			   }
+			   break;
+		   }
+		   break;
+		}
+	   default:
+		DHD_ERROR(("%s:Unknown DNGL Event Type:%d", __FUNCTION__, type));
+		if (p && DHD_EVENT_ON()) {
+			prhex("SOCRAMIND", p, datalen);
+		}
+		break;
+	}
+#ifdef DHD_FW_COREDUMP
+	dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
+#endif /* DHD_FW_COREDUMP */
+#ifndef BCMDBUS
+	if (dhd_socram_dump(dhdp->bus)) {
+		DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
+	} else {
+		/* Notify framework */
+		dhd_dbg_send_urgent_evt(dhdp, p, datalen);
+	}
+#endif /* !BCMDBUS */
+}
+#endif /* DNGL_EVENT_SUPPORT */
+
+/* Stub for now. Will become real function as soon as shim
+ * is being integrated to Android, Linux etc.
+ */
+int
+wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
+{
+	return BCME_OK;
+}
+
+int
+wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
+	uint pktlen, void **data_ptr, void *raw_event)
+{
+	wl_evt_pport_t evt_pport;
+	wl_event_msg_t event;
+	bcm_event_msg_u_t evu;
+	int ret;
+
+	/* make sure it is a BRCM event pkt and record event data */
+	ret = wl_host_event_get_data(pktdata, pktlen, &evu);
+	if (ret != BCME_OK) {
+		return ret;
+	}
+
+	memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
+
+	/* convert event from network order to host order */
+	wl_event_to_host_order(&event);
+
+	/* record event params to evt_pport */
+	evt_pport.dhd_pub = dhd_pub;
+	evt_pport.ifidx = ifidx;
+	evt_pport.pktdata = pktdata;
+	evt_pport.data_ptr = data_ptr;
+	evt_pport.raw_event = raw_event;
+	evt_pport.data_len = pktlen;
+
+#if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS)
+	{
+		struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
+		if (shim) {
+			ret = wl_shim_event_process(shim, &event, &evt_pport);
+		} else {
+			/* events can come even before shim is initialized
+			 (when waiting for "wlc_ver" response)
+			 * handle them in a non-shim way.
+			 */
+			DHD_ERROR(("%s: Events coming before shim initialization!\n",
+				__FUNCTION__));
+			ret = wl_event_process_default(&event, &evt_pport);
+		}
+	}
+#else
+	ret = wl_event_process_default(&event, &evt_pport);
+#endif /* WL_WLC_SHIM && WL_WLC_SHIM_EVENTS */
+
+	return ret;
+}
+
+/* Check whether packet is a BRCM event pkt. If it is, record event data. */
+int
+wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu)
+{
+	int ret;
+
+	ret = is_wlc_event_frame(pktdata, pktlen, 0, evu);
+	if (ret != BCME_OK) {
+		DHD_ERROR(("%s: Invalid event frame, err = %d\n",
+			__FUNCTION__, ret));
+	}
+
+	return ret;
+}
+
+int
+wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
+	wl_event_msg_t *event, void **data_ptr, void *raw_event)
+{
+	bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+	bcm_event_msg_u_t evu;
+	uint8 *event_data;
+	uint32 type, status, datalen;
+	uint16 flags;
+	uint evlen;
+	int ret;
+	uint16 usr_subtype;
+	char macstr[ETHER_ADDR_STR_LEN];
+
+	BCM_REFERENCE(macstr);
+
+	ret = wl_host_event_get_data(pktdata, pktlen, &evu);
+	if (ret != BCME_OK) {
+		return ret;
+	}
+
+	usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype);
+	switch (usr_subtype) {
+	case BCMILCP_BCM_SUBTYPE_EVENT:
+		memcpy(event, &evu.event, sizeof(wl_event_msg_t));
+		*data_ptr = &pvt_data[1];
+		break;
+	case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
+#ifdef DNGL_EVENT_SUPPORT
+		/* If it is a DNGL event process it first */
+		if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) {
+			/*
+			 * Return error purposely to prevent DNGL event being processed
+			 * as BRCM event
+			 */
+			return BCME_ERROR;
+		}
+#endif /* DNGL_EVENT_SUPPORT */
+		return BCME_NOTFOUND;
+	default:
+		return BCME_NOTFOUND;
+	}
+
+	/* start wl_event_msg process */
+	event_data = *data_ptr;
+	type = ntoh32_ua((void *)&event->event_type);
+	flags = ntoh16_ua((void *)&event->flags);
+	status = ntoh32_ua((void *)&event->status);
+	datalen = ntoh32_ua((void *)&event->datalen);
+	evlen = datalen + sizeof(bcm_event_t);
+
+	switch (type) {
+#ifdef PROP_TXSTATUS
+	case WLC_E_FIFO_CREDIT_MAP:
+		dhd_wlfc_enable(dhd_pub);
+		dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
+		WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
+			"(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
+			event_data[2],
+			event_data[3], event_data[4], event_data[5]));
+		break;
+
+	case WLC_E_BCMC_CREDIT_SUPPORT:
+		dhd_wlfc_BCMCCredit_support_event(dhd_pub);
+		break;
+#ifdef LIMIT_BORROW
+	case WLC_E_ALLOW_CREDIT_BORROW:
+		dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data);
+		break;
+#endif /* LIMIT_BORROW */
+#endif /* PROP_TXSTATUS */
+
+
+	case WLC_E_ULP:
+#ifdef DHD_ULP
+	{
+		wl_ulp_event_t *ulp_evt = (wl_ulp_event_t *)event_data;
+
+		/* Flush and disable console messages */
+		if (ulp_evt->ulp_dongle_action == WL_ULP_DISABLE_CONSOLE) {
+#ifdef DHD_ULP_NOT_USED
+			dhd_bus_ulp_disable_console(dhd_pub);
+#endif /* DHD_ULP_NOT_USED */
+		}
+		if (ulp_evt->ulp_dongle_action == WL_ULP_UCODE_DOWNLOAD) {
+			dhd_bus_ucode_download(dhd_pub->bus);
+		}
+	}
+#endif /* DHD_ULP */
+		break;
+	case WLC_E_TDLS_PEER_EVENT:
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+		{
+			dhd_tdls_event_handler(dhd_pub, event);
+		}
+#endif
+		break;
+
+	case WLC_E_IF:
+		{
+		struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
+
+		/* Ignore the event if NOIF is set */
+		if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
+			DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
+			return (BCME_UNSUPPORTED);
+		}
+#ifdef PCIE_FULL_DONGLE
+		dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
+			ifevent->opcode, ifevent->role);
+#endif
+#ifdef PROP_TXSTATUS
+		{
+			uint8* ea = pvt_data->eth.ether_dhost;
+			WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+						  "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+						  ifevent->ifidx,
+						  ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
+						  ((ifevent->role == 0) ? "STA":"AP "),
+						  ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
+			(void)ea;
+
+			if (ifevent->opcode == WLC_E_IF_CHANGE)
+				dhd_wlfc_interface_event(dhd_pub,
+					eWLFC_MAC_ENTRY_ACTION_UPDATE,
+					ifevent->ifidx, ifevent->role, ea);
+			else
+				dhd_wlfc_interface_event(dhd_pub,
+					((ifevent->opcode == WLC_E_IF_ADD) ?
+					eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+					ifevent->ifidx, ifevent->role, ea);
+
+			/* dhd already has created an interface by default, for 0 */
+			if (ifevent->ifidx == 0)
+				break;
+		}
+#endif /* PROP_TXSTATUS */
+
+		if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+			if (ifevent->opcode == WLC_E_IF_ADD) {
+				if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
+					event->addr.octet)) {
+
+					DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d  %s\n",
+						__FUNCTION__, ifevent->ifidx, event->ifname));
+					return (BCME_ERROR);
+				}
+			} else if (ifevent->opcode == WLC_E_IF_DEL) {
+#ifdef PCIE_FULL_DONGLE
+				/* Delete flowrings unconditionally for i/f delete */
+				dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
+					event->ifname));
+#endif /* PCIE_FULL_DONGLE */
+				dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
+					event->addr.octet);
+				/* Return ifidx (for vitual i/f, it will be > 0)
+				 * so that no other operations on deleted interface
+				 * are carried out
+				 */
+				ret = ifevent->ifidx;
+				goto exit;
+			} else if (ifevent->opcode == WLC_E_IF_CHANGE) {
+#ifdef WL_CFG80211
+				dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
+					event->addr.octet);
+#endif /* WL_CFG80211 */
+			}
+		} else {
+#if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
+			DHD_INFO(("%s: Invalid ifidx %d for %s\n",
+			   __FUNCTION__, ifevent->ifidx, event->ifname));
+#endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
+		}
+			/* send up the if event: btamp user needs it */
+			*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+			/* push up to external supp/auth */
+			dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+		break;
+	}
+
+#ifdef WLMEDIA_HTSF
+	case WLC_E_HTSFSYNC:
+		htsf_update(dhd_pub->info, event_data);
+		break;
+#endif /* WLMEDIA_HTSF */
+	case WLC_E_NDIS_LINK:
+		break;
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
+	case WLC_E_PFN_NET_LOST:
+		break;
+#if defined(PNO_SUPPORT)
+	case WLC_E_PFN_BSSID_NET_FOUND:
+	case WLC_E_PFN_BEST_BATCHING:
+		dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
+		break;
+#endif 
+#if defined(RTT_SUPPORT)
+	case WLC_E_PROXD:
+		dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
+		break;
+#endif /* RTT_SUPPORT */
+		/* These are what external supplicant/authenticator wants */
+	case WLC_E_ASSOC_IND:
+	case WLC_E_AUTH_IND:
+	case WLC_E_REASSOC_IND:
+		dhd_findadd_sta(dhd_pub,
+			dhd_ifname2idx(dhd_pub->info, event->ifname),
+			&event->addr.octet);
+		break;
+#ifndef BCMDBUS
+#if defined(DHD_FW_COREDUMP)
+	case WLC_E_PSM_WATCHDOG:
+		DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
+		if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
+			DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
+		}
+	break;
+#endif
+#endif /* !BCMDBUS */
+#ifdef DHD_WMF
+	case WLC_E_PSTA_PRIMARY_INTF_IND:
+		dhd_update_psta_interface_for_sta(dhd_pub, event->ifname,
+			(void *)(event->addr.octet), (void*) event_data);
+		break;
+#endif
+	case WLC_E_LINK:
+#ifdef PCIE_FULL_DONGLE
+		DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
+			__FUNCTION__, type, flags, status));
+		if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
+			event->ifname), (uint8)flags) != BCME_OK) {
+			DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
+				__FUNCTION__));
+			break;
+		}
+		if (!flags) {
+			DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
+				__FUNCTION__));
+			/* Delete all sta and flowrings */
+			dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname));
+			dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
+				event->ifname));
+		}
+		/* fall through */
+#endif /* PCIE_FULL_DONGLE */
+	case WLC_E_DEAUTH:
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC:
+	case WLC_E_DISASSOC_IND:
+#ifdef PCIE_FULL_DONGLE
+		if (type != WLC_E_LINK) {
+			uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
+			uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
+			uint8 del_sta = TRUE;
+#ifdef WL_CFG80211
+			if (role == WLC_E_IF_ROLE_STA &&
+				!wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) &&
+					!wl_cfg80211_is_event_from_connected_bssid(
+						dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) {
+				del_sta = FALSE;
+			}
+#endif /* WL_CFG80211 */
+			DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
+				__FUNCTION__, type, flags, status, role, del_sta));
+
+			if (del_sta) {
+				DHD_MAC_TO_STR((event->addr.octet), macstr);
+				DHD_EVENT(("%s: Deleting STA %s\n", __FUNCTION__, macstr));
+
+				dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
+					event->ifname), &event->addr.octet);
+				/* Delete all flowrings for STA and P2P Client */
+				if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) {
+					dhd_flow_rings_delete(dhd_pub, ifindex);
+				} else {
+					dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
+						(char *)&event->addr.octet[0]);
+				}
+			}
+		}
+#endif /* PCIE_FULL_DONGLE */
+		/* fall through */
+
+	default:
+		*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+#ifdef DHD_UPDATE_INTF_MAC
+		if ((WLC_E_LINK==type)&&(WLC_EVENT_MSG_LINK&flags)) {
+			dhd_event_ifchange(dhd_pub->info,
+			(struct wl_event_data_if *)event,
+			event->ifname,
+			event->addr.octet);
+		}
+#endif /* DHD_UPDATE_INTF_MAC */
+		/* push up to external supp/auth */
+		dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+		DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+			__FUNCTION__, type, flags, status));
+		BCM_REFERENCE(flags);
+		BCM_REFERENCE(status);
+
+		break;
+	}
+#if defined(STBAP)
+	/* For routers, EAPD will be working on these events.
+	 * Overwrite interface name to that event is pushed
+	 * to host with its registered interface name
+	 */
+	memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
+#endif
+
+exit:
+
+#ifdef SHOW_EVENTS
+	if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
+		wl_show_host_event(dhd_pub, event,
+			(void *)event_data, raw_event, dhd_pub->enable_log);
+	}
+#endif /* SHOW_EVENTS */
+
+	return ret;
+}
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
+	wl_event_msg_t *event, void **data_ptr, void *raw_event)
+{
+	return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr,
+			raw_event);
+}
+
+void
+dhd_print_buf(void *pbuf, int len, int bytes_per_line)
+{
+#ifdef DHD_DEBUG
+	int i, j = 0;
+	unsigned char *buf = pbuf;
+
+	if (bytes_per_line == 0) {
+		bytes_per_line = len;
+	}
+
+	for (i = 0; i < len; i++) {
+		printf("%2.2x", *buf++);
+		j++;
+		if (j == bytes_per_line) {
+			printf("\n");
+			j = 0;
+		} else {
+			printf(":");
+		}
+	}
+	printf("\n");
+#endif /* DHD_DEBUG */
+}
+#ifndef strtoul
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#endif
+
+#if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
+/* Convert user's input in hex pattern to byte-size mask */
+int
+wl_pattern_atoh(char *src, char *dst)
+{
+	int i;
+	if (strncmp(src, "0x", 2) != 0 &&
+	    strncmp(src, "0X", 2) != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+		return -1;
+	}
+	src = src + 2; /* Skip past 0x */
+	if (strlen(src) % 2 != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+		return -1;
+	}
+	for (i = 0; *src != '\0'; i++) {
+		char num[3];
+		bcm_strncpy_s(num, sizeof(num), src, 2);
+		num[2] = '\0';
+		dst[i] = (uint8)strtoul(num, NULL, 16);
+		src += 2;
+	}
+	return i;
+}
+#endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
+
+#ifdef PKT_FILTER_SUPPORT
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+	char				*argv[8];
+	int					i = 0;
+	const char			*str;
+	int					buf_len;
+	int					str_len;
+	char				*arg_save = 0, *arg_org = 0;
+	int					rc;
+	char				buf[32] = {0};
+	wl_pkt_filter_enable_t	enable_parm;
+	wl_pkt_filter_enable_t	* pkt_filterp;
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	arg_org = arg_save;
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_enable";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
+	buf[ sizeof(buf) - 1 ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+	if (dhd_conf_del_pkt_filter(dhd, enable_parm.id))
+		goto fail;
+
+	/* Parse enable/disable value. */
+	enable_parm.enable = htod32(enable);
+
+	buf_len += sizeof(enable_parm);
+	memcpy((char *)pkt_filterp,
+	       &enable_parm,
+	       sizeof(enable_parm));
+
+	/* Enable/disable the specified filter. */
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n",
+		__FUNCTION__, enable?"enable":"disable", arg, rc));
+	else
+		DHD_TRACE(("%s: successfully %s pktfilter %s\n",
+		__FUNCTION__, enable?"enable":"disable", arg));
+
+	/* Contorl the master mode */
+	rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
+		master_mode, WLC_SET_VAR, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n",
+			__FUNCTION__, master_mode, rc));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+/* Packet filter section: extended filters have named offsets, add table here */
+typedef struct {
+	char *name;
+	uint16 base;
+} wl_pfbase_t;
+
+static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES };
+
+static int
+wl_pkt_filter_base_parse(char *name)
+{
+	uint i;
+	char *bname, *uname;
+
+	for (i = 0; i < ARRAYSIZE(basenames); i++) {
+		bname = basenames[i].name;
+		for (uname = name; *uname; bname++, uname++) {
+			if (*bname != bcm_toupper(*uname)) {
+				break;
+			}
+		}
+		if (!*uname && !*bname) {
+			break;
+		}
+	}
+
+	if (i < ARRAYSIZE(basenames)) {
+		return basenames[i].base;
+	} else {
+		return -1;
+	}
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+	const char 			*str;
+	wl_pkt_filter_t		pkt_filter;
+	wl_pkt_filter_t		*pkt_filterp;
+	int					buf_len;
+	int					str_len;
+	int 				rc;
+	uint32				mask_size;
+	uint32				pattern_size;
+	char				*argv[16], * buf = 0;
+	int					i = 0;
+	char				*arg_save = 0, *arg_org = 0;
+#define BUF_SIZE		2048
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	arg_org = arg_save;
+
+	if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(buf, 0, BUF_SIZE);
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	if (strlen(arg) > BUF_SIZE) {
+		DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+		goto fail;
+	}
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+	while (argv[i++])
+		argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_add";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
+	buf[ str_len ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+	if (dhd_conf_del_pkt_filter(dhd, pkt_filter.id))
+		goto fail;
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Polarity not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter polarity. */
+	pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Filter type not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter type. */
+	pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+	if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) {
+		if (argv[++i] == NULL) {
+			DHD_ERROR(("Offset not provided\n"));
+			goto fail;
+		}
+
+		/* Parse pattern filter offset. */
+		pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+		if (argv[++i] == NULL) {
+			DHD_ERROR(("Bitmask not provided\n"));
+			goto fail;
+		}
+
+		/* Parse pattern filter mask. */
+		mask_size =
+			htod32(wl_pattern_atoh(argv[i],
+			(char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+		if (argv[++i] == NULL) {
+			DHD_ERROR(("Pattern not provided\n"));
+			goto fail;
+		}
+
+		/* Parse pattern filter pattern. */
+		pattern_size =
+			htod32(wl_pattern_atoh(argv[i],
+			(char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+		if (mask_size != pattern_size) {
+			DHD_ERROR(("Mask and pattern not the same size\n"));
+			goto fail;
+		}
+
+		pkt_filter.u.pattern.size_bytes = mask_size;
+		buf_len += WL_PKT_FILTER_FIXED_LEN;
+		buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+		/* Keep-alive attributes are set in local	variable (keep_alive_pkt), and
+		 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
+		 * guarantee that the buffer is properly aligned.
+		 */
+		memcpy((char *)pkt_filterp,
+			&pkt_filter,
+			WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+	} else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
+		int list_cnt = 0;
+		char *endptr = NULL;
+		wl_pkt_filter_pattern_listel_t *pf_el = &pkt_filterp->u.patlist.patterns[0];
+
+		while (argv[++i] != NULL) {
+			/* Parse pattern filter base and offset. */
+			if (bcm_isdigit(*argv[i])) {
+				/* Numeric base */
+				rc = strtoul(argv[i], &endptr, 0);
+			} else {
+				endptr = strchr(argv[i], ':');
+				if (endptr) {
+					*endptr = '\0';
+					rc = wl_pkt_filter_base_parse(argv[i]);
+					if (rc == -1) {
+						 printf("Invalid base %s\n", argv[i]);
+						goto fail;
+					}
+					*endptr = ':';
+				} else {
+					printf("Invalid [base:]offset format: %s\n", argv[i]);
+					goto fail;
+				}
+			}
+
+			if (*endptr == ':') {
+				pkt_filter.u.patlist.patterns[0].base_offs = htod16(rc);
+				rc = strtoul(endptr + 1, &endptr, 0);
+			} else {
+				/* Must have had a numeric offset only */
+				pkt_filter.u.patlist.patterns[0].base_offs = htod16(0);
+			}
+
+			if (*endptr) {
+				printf("Invalid [base:]offset format: %s\n", argv[i]);
+				goto fail;
+			}
+			if (rc > 0x0000FFFF) {
+				printf("Offset too large\n");
+				goto fail;
+			}
+			pkt_filter.u.patlist.patterns[0].rel_offs = htod16(rc);
+
+			/* Clear match_flag (may be set in parsing which follows) */
+			pkt_filter.u.patlist.patterns[0].match_flags = htod16(0);
+
+			/* Parse pattern filter mask and pattern directly into ioctl buffer */
+			if (argv[++i] == NULL) {
+				printf("Bitmask not provided\n");
+				goto fail;
+			}
+			rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
+			if (rc == -1) {
+				printf("Rejecting: %s\n", argv[i]);
+				goto fail;
+			}
+			mask_size = htod16(rc);
+
+			if (argv[++i] == NULL) {
+				printf("Pattern not provided\n");
+				goto fail;
+			}
+
+			if (*argv[i] == '!') {
+				pkt_filter.u.patlist.patterns[0].match_flags =
+					htod16(WL_PKT_FILTER_MFLAG_NEG);
+				(argv[i])++;
+			}
+			if (*argv[i] == '\0') {
+				printf("Pattern not provided\n");
+				goto fail;
+			}
+			rc = wl_pattern_atoh(argv[i], (char*)&pf_el->mask_and_data[rc]);
+			if (rc == -1) {
+				printf("Rejecting: %s\n", argv[i]);
+				goto fail;
+			}
+			pattern_size = htod16(rc);
+
+			if (mask_size != pattern_size) {
+				printf("Mask and pattern not the same size\n");
+				goto fail;
+			}
+
+			pkt_filter.u.patlist.patterns[0].size_bytes = mask_size;
+
+			/* Account for the size of this pattern element */
+			buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
+
+			/* And the pattern element fields that were put in a local for
+			 * alignment purposes now get copied to the ioctl buffer.
+			 */
+			memcpy((char*)pf_el, &pkt_filter.u.patlist.patterns[0],
+				WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+			/* Move to next element location in ioctl buffer */
+			pf_el = (wl_pkt_filter_pattern_listel_t*)
+				((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
+
+			/* Count list element */
+			list_cnt++;
+		}
+
+		/* Account for initial fixed size, and copy initial fixed fields */
+		buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
+
+		/* Update list count and total size */
+		pkt_filter.u.patlist.list_cnt = list_cnt;
+		pkt_filter.u.patlist.PAD1[0] = 0;
+		pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp;
+		pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN;
+
+		memcpy((char *)pkt_filterp, &pkt_filter,
+			WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN);
+	} else {
+		DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type));
+		goto fail;
+	}
+
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+	if (buf)
+		MFREE(dhd->osh, buf, BUF_SIZE);
+}
+
+void
+dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
+{
+	int ret;
+
+	ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
+		id, WLC_SET_VAR, TRUE, 0);
+	if (ret < 0) {
+		DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
+			__FUNCTION__, id, ret));
+	}
+	else
+		DHD_TRACE(("%s: successfully deleted pktfilter %d\n",
+		__FUNCTION__, id));
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+/* ========================== */
+/* ==== ARP OFFLOAD SUPPORT = */
+/* ========================== */
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+	int retcode;
+
+	retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
+		arp_mode, WLC_SET_VAR, TRUE, 0);
+
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+			__FUNCTION__, arp_mode, retcode));
+	else
+		DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n",
+			__FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+	int retcode;
+
+	retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
+		arp_enable, WLC_SET_VAR, TRUE, 0);
+
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+			__FUNCTION__, arp_enable, retcode));
+	else
+		DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
+			__FUNCTION__, arp_enable));
+	if (arp_enable) {
+		uint32 version;
+		retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
+			&version, WLC_GET_VAR, FALSE, 0);
+		if (retcode) {
+			DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
+				__FUNCTION__, retcode));
+			dhd->arp_version = 1;
+		}
+		else {
+			DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
+			dhd->arp_version = version;
+		}
+	}
+}
+
+void
+dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
+{
+	int ret = 0;
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
+	if (ret < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
+{
+	int ret = 0;
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
+	if (ret < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
+{
+	int ret;
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
+			NULL, 0, TRUE);
+	if (ret)
+		DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
+	else
+		DHD_ARPOE(("%s: sARP H ipaddr entry added \n",
+			__FUNCTION__));
+}
+
+int
+dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
+{
+	int ret, i;
+	uint32 *ptr32 = buf;
+	bool clr_bottom = FALSE;
+
+	if (!buf)
+		return -1;
+	if (dhd == NULL) return -1;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen,
+			FALSE);
+	if (ret) {
+		DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n",
+		__FUNCTION__, ret));
+
+		return -1;
+	}
+
+	/* clean up the buf, ascii reminder */
+	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+		if (!clr_bottom) {
+			if (*ptr32 == 0)
+				clr_bottom = TRUE;
+		} else {
+			*ptr32 = 0;
+		}
+		ptr32++;
+	}
+
+	return 0;
+}
+#endif /* ARP_OFFLOAD_SUPPORT  */
+
+/*
+ * Neighbor Discovery Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface comes up/goes down
+ */
+int
+dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
+{
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
+		ndo_enable, WLC_SET_VAR, TRUE, 0);
+	if (retcode)
+		DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
+			__FUNCTION__, ndo_enable, retcode));
+	else
+		DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
+			__FUNCTION__, ndo_enable));
+
+	return retcode;
+}
+
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface comes up
+ */
+int
+dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE] = {0};
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
+		IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: ndo ipaddr entry added \n",
+		__FUNCTION__));
+
+	return retcode;
+}
+
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface goes down
+ */
+int
+dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE] = {0};
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
+		0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: ndo ipaddr entry removed \n",
+		__FUNCTION__));
+
+	return retcode;
+}
+
+/* Enhanced ND offload */
+uint16
+dhd_ndo_get_version(dhd_pub_t *dhdp)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	wl_nd_hostip_t ndo_get_ver;
+	int iov_len;
+	int retcode;
+	uint16 ver = 0;
+
+	if (dhdp == NULL) {
+		return BCME_ERROR;
+	}
+
+	memset(&iovbuf, 0, sizeof(iovbuf));
+	ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER);
+	ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER);
+	ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
+	ndo_get_ver.u.version = 0;
+	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
+			WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return BCME_ERROR;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
+	if (retcode) {
+		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
+		/* ver iovar not supported. NDO version is 0 */
+		ver = 0;
+	} else {
+		wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf;
+
+		if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) &&
+				(dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) &&
+				(dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN
+					+ sizeof(uint16))) {
+			/* nd_hostip iovar version */
+			ver = dtoh16(ndo_ver_ret->u.version);
+		}
+
+		DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver));
+	}
+
+	return ver;
+}
+
+int
+dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	wl_nd_hostip_t ndo_add_addr;
+	int iov_len;
+	int retcode;
+
+	if (dhdp == NULL || ipv6addr == 0) {
+		return BCME_ERROR;
+	}
+
+	/* wl_nd_hostip_t fixed param */
+	ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
+	ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD);
+	ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
+	/* wl_nd_host_ip_addr_t param for add */
+	memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
+	ndo_add_addr.u.host_ip.type = type;
+
+	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr,
+		WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return BCME_ERROR;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+	if (retcode) {
+		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
+#ifdef NDO_CONFIG_SUPPORT
+		if (retcode == BCME_NORESOURCE) {
+			/* number of host ip addr exceeds FW capacity, Deactivate ND offload */
+			DHD_INFO(("%s: Host IP count exceed device capacity,"
+				"ND offload deactivated\n", __FUNCTION__));
+			dhdp->ndo_host_ip_overflow = TRUE;
+			dhd_ndo_enable(dhdp, 0);
+		}
+#endif /* NDO_CONFIG_SUPPORT */
+	} else {
+		DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode));
+	}
+
+	return retcode;
+}
+
+int
+dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	wl_nd_hostip_t ndo_del_addr;
+	int iov_len;
+	int retcode;
+
+	if (dhdp == NULL || ipv6addr == 0) {
+		return BCME_ERROR;
+	}
+
+	/* wl_nd_hostip_t fixed param */
+	ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
+	ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL);
+	ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
+	/* wl_nd_host_ip_addr_t param for del */
+	memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
+	ndo_del_addr.u.host_ip.type = 0;	/* don't care */
+
+	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
+		WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return BCME_ERROR;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+	if (retcode) {
+		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
+	} else {
+		DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
+	}
+
+	return retcode;
+}
+
+int
+dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	wl_nd_hostip_t ndo_del_addr;
+	int iov_len;
+	int retcode;
+
+	if (dhdp == NULL) {
+		return BCME_ERROR;
+	}
+
+	/* wl_nd_hostip_t fixed param */
+	ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
+	if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) {
+		ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC);
+	} else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) {
+		ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC);
+	} else {
+		return BCME_BADARG;
+	}
+	ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN);
+
+	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
+			iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return BCME_ERROR;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+	if (retcode) {
+		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
+	} else {
+		DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
+	}
+
+	return retcode;
+}
+
+int
+dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iov_len;
+	int retcode;
+
+	if (dhdp == NULL) {
+		return BCME_ERROR;
+	}
+
+	iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
+			iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return BCME_ERROR;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+	if (retcode)
+		DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
+			__FUNCTION__, enable, retcode));
+	else {
+		DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
+			__FUNCTION__, enable));
+	}
+
+	return retcode;
+}
+
+
+/*
+ * returns = TRUE if associated, FALSE if not associated
+ */
+bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
+{
+	char bssid[6], zbuf[6];
+	int ret = -1;
+
+	bzero(bssid, 6);
+	bzero(zbuf, 6);
+
+	ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
+		ETHER_ADDR_LEN, FALSE, ifidx);
+	DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
+
+	if (ret == BCME_NOTASSOCIATED) {
+		DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
+	}
+
+	if (retval)
+		*retval = ret;
+
+	if (ret < 0)
+		return FALSE;
+
+	if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
+		DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
+		return FALSE;
+	}
+	return TRUE;
+}
+
+/* Function to estimate possible DTIM_SKIP value */
+#if defined(BCMPCIE)
+int
+dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval)
+{
+	int bcn_li_dtim = 1; /* deafult no dtim skip setting */
+	int ret = -1;
+	int allowed_skip_dtim_cnt = 0;
+
+	/* Check if associated */
+	if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
+		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+		return bcn_li_dtim;
+	}
+
+	if (dtim_period == NULL || bcn_interval == NULL)
+		return bcn_li_dtim;
+
+	/* read associated AP beacon interval */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
+		bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
+		return bcn_li_dtim;
+	}
+
+	/* read associated AP dtim setup */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+		dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+		return bcn_li_dtim;
+	}
+
+	/* if not assocated just return */
+	if (*dtim_period == 0) {
+		return bcn_li_dtim;
+	}
+
+	if (dhd->max_dtim_enable) {
+		bcn_li_dtim =
+			(int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)));
+		if (bcn_li_dtim == 0) {
+			bcn_li_dtim = 1;
+		}
+	} else {
+		/* attemp to use platform defined dtim skip interval */
+		bcn_li_dtim = dhd->suspend_bcn_li_dtim;
+
+		/* check if sta listen interval fits into AP dtim */
+		if (*dtim_period > CUSTOM_LISTEN_INTERVAL) {
+			/* AP DTIM to big for our Listen Interval : no dtim skiping */
+			bcn_li_dtim = NO_DTIM_SKIP;
+			DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+				__FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL));
+			return bcn_li_dtim;
+		}
+
+		if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
+			allowed_skip_dtim_cnt =
+				MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval));
+			bcn_li_dtim =
+				(allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
+		}
+
+		if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) {
+			/* Round up dtim_skip to fit into STAs Listen Interval */
+			bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period);
+			DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+		}
+	}
+
+	DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+		__FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
+
+	return bcn_li_dtim;
+}
+#else /* OEM_ANDROID && BCMPCIE */
+int
+dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
+{
+	int bcn_li_dtim = 1; /* deafult no dtim skip setting */
+	int ret = -1;
+	int dtim_period = 0;
+	int ap_beacon = 0;
+	int allowed_skip_dtim_cnt = 0;
+	/* Check if associated */
+	if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
+		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* read associated AP beacon interval */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
+		&ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* read associated ap's dtim setup */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+		&dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* if not assocated just exit */
+	if (dtim_period == 0) {
+		goto exit;
+	}
+
+	if (dhd->max_dtim_enable) {
+		bcn_li_dtim = (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
+		if (bcn_li_dtim == 0) {
+			bcn_li_dtim = 1;
+		}
+		bcn_li_dtim = MAX(dhd->suspend_bcn_li_dtim, bcn_li_dtim);
+	} else {
+		/* attemp to use platform defined dtim skip interval */
+		bcn_li_dtim = dhd->suspend_bcn_li_dtim;
+
+		/* check if sta listen interval fits into AP dtim */
+		if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
+			/* AP DTIM to big for our Listen Interval : no dtim skiping */
+			bcn_li_dtim = NO_DTIM_SKIP;
+			DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+				__FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
+			goto exit;
+		}
+
+		if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
+			allowed_skip_dtim_cnt =
+				MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
+			bcn_li_dtim =
+				(allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
+		}
+
+		if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
+			/* Round up dtim_skip to fit into STAs Listen Interval */
+			bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
+			DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+		}
+	}
+
+	if (dhd->conf->suspend_bcn_li_dtim >= 0)
+		bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
+	DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+		__FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
+
+exit:
+	return bcn_li_dtim;
+}
+#endif /* OEM_ANDROID && BCMPCIE */
+
+/* Check if the mode supports STA MODE */
+bool dhd_support_sta_mode(dhd_pub_t *dhd)
+{
+
+#ifdef  WL_CFG80211
+	if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
+		return FALSE;
+	else
+#endif /* WL_CFG80211 */
+		return TRUE;
+}
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd)
+{
+	char				buf[32] = {0};
+	const char			*str;
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}};
+	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
+	int					buf_len;
+	int					str_len;
+	int res					= -1;
+
+	if (!dhd_support_sta_mode(dhd))
+		return res;
+
+	DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+	str = "mkeep_alive";
+	str_len = strlen(str);
+	strncpy(buf, str, sizeof(buf) - 1);
+	buf[ sizeof(buf) - 1 ] = '\0';
+	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+	mkeep_alive_pkt.period_msec = dhd->conf->keep_alive_period;
+	buf_len = str_len + 1;
+	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+	/* Setup keep alive zero for null packet generation */
+	mkeep_alive_pkt.keep_alive_id = 0;
+	mkeep_alive_pkt.len_bytes = 0;
+	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+	bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
+	/* Keep-alive attributes are set in local	variable (mkeep_alive_pkt), and
+	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+	 * guarantee that the buffer is properly aligned.
+	 */
+	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+	res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+
+	return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+
+#define CSCAN_TLV_TYPE_SSID_IE	'S'
+/*
+ *  SSIDs list parsing from cscan tlv list
+ */
+int
+wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
+{
+	char* str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+			*list_str = str;
+			DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+
+		/* Get proper CSCAN_TLV_TYPE_SSID_IE */
+		*bytes_left -= 1;
+		str += 1;
+		ssid[idx].rssi_thresh = 0;
+		ssid[idx].flags = 0;
+		if (str[0] == 0) {
+			/* Broadcast SSID */
+			ssid[idx].SSID_len = 0;
+			memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+			*bytes_left -= 1;
+			str += 1;
+
+			DHD_TRACE(("BROADCAST SCAN  left=%d\n", *bytes_left));
+		}
+		else if (str[0] <= DOT11_MAX_SSID_LEN) {
+			/* Get proper SSID size */
+			ssid[idx].SSID_len = str[0];
+			*bytes_left -= 1;
+			str += 1;
+
+			/* Get SSID */
+			if (ssid[idx].SSID_len > *bytes_left) {
+				DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+				__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+				return -1;
+			}
+
+			memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+			*bytes_left -= ssid[idx].SSID_len;
+			str += ssid[idx].SSID_len;
+			ssid[idx].hidden = TRUE;
+
+			DHD_TRACE(("%s :size=%d left=%d\n",
+				(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+		}
+		else {
+			DHD_ERROR(("### SSID size more that %d\n", str[0]));
+			return -1;
+		}
+
+		if (idx++ >  max) {
+			DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+#if defined(WL_WIRELESS_EXT)
+/* Android ComboSCAN support */
+
+/*
+ *  data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+                     int input_size, int *bytes_left)
+{
+	char* str;
+	uint16 short_temp;
+	uint32 int_temp;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+
+	/* Clean all dest bytes */
+	memset(dst, 0, dst_size);
+	while (*bytes_left > 0) {
+
+		if (str[0] != token) {
+			DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+				__FUNCTION__, token, str[0], *bytes_left));
+			return -1;
+		}
+
+		*bytes_left -= 1;
+		str += 1;
+
+		if (input_size == 1) {
+			memcpy(dst, str, input_size);
+		}
+		else if (input_size == 2) {
+			memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+				input_size);
+		}
+		else if (input_size == 4) {
+			memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+				input_size);
+		}
+
+		*bytes_left -= input_size;
+		str += input_size;
+		*list_str = str;
+		return 1;
+	}
+	return 1;
+}
+
+/*
+ *  channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+                             int channel_num, int *bytes_left)
+{
+	char* str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+			*list_str = str;
+			DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+		/* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* All channels */
+			channel_list[idx] = 0x0;
+		}
+		else {
+			channel_list[idx] = (uint16)str[0];
+			DHD_TRACE(("%s channel=%d \n", __FUNCTION__,  channel_list[idx]));
+		}
+		*bytes_left -= 1;
+		str += 1;
+
+		if (idx++ > 255) {
+			DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx.  Max specifies size of the ssid array.  Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied.  Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+	char* str, *ptr;
+
+	if ((list_str == NULL) || (*list_str == NULL))
+		return -1;
+
+	for (str = *list_str; str != NULL; str = ptr) {
+
+		/* check for next TAG */
+		if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+			*list_str	 = str + strlen(GET_CHANNEL);
+			return idx;
+		}
+
+		if ((ptr = strchr(str, ',')) != NULL) {
+			*ptr++ = '\0';
+		}
+
+		if (strlen(str) > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+			return -1;
+		}
+
+		if (strlen(str) == 0)
+			ssid[idx].SSID_len = 0;
+
+		if (idx < max) {
+			bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
+			strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1);
+			ssid[idx].SSID_len = strlen(str);
+		}
+		idx++;
+	}
+	return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+	int num;
+	int val;
+	char* str;
+	char* endptr = NULL;
+
+	if ((list_str == NULL)||(*list_str == NULL))
+		return -1;
+
+	str = *list_str;
+	num = 0;
+	while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+		val = (int)strtoul(str, &endptr, 0);
+		if (endptr == str) {
+			printf("could not parse channel number starting at"
+				" substring \"%s\" in list:\n%s\n",
+				str, *list_str);
+			return -1;
+		}
+		str = endptr + strspn(endptr, " ,");
+
+		if (num == channel_num) {
+			DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+				channel_num, *list_str));
+			return -1;
+		}
+
+		channel_list[num++] = (uint16)val;
+	}
+	*list_str = str;
+	return num;
+}
+
+#endif 
+
+#if defined(TRAFFIC_MGMT_DWM)
+static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
+	trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len)
+{
+	int ret = 0;
+	uint32              i;
+	trf_mgmt_filter_t   *trf_mgmt_filter;
+	uint8               dwm_tbl_entry;
+	uint32              dscp = 0;
+	uint16              dwm_filter_enabled = 0;
+
+
+	/* Check parameter length is adequate */
+	if (len < (OFFSETOF(trf_mgmt_filter_list_t, filter) +
+		trf_mgmt_filter_list->num_filters * sizeof(trf_mgmt_filter_t))) {
+		ret = BCME_BUFTOOSHORT;
+		return ret;
+	}
+
+	bzero(&dhd->dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
+
+	for (i = 0; i < trf_mgmt_filter_list->num_filters; i++) {
+		trf_mgmt_filter = &trf_mgmt_filter_list->filter[i];
+
+		dwm_filter_enabled = (trf_mgmt_filter->flags & TRF_FILTER_DWM);
+
+		if (dwm_filter_enabled) {
+			dscp = trf_mgmt_filter->dscp;
+			if (dscp >= DHD_DWM_TBL_SIZE) {
+				ret = BCME_BADARG;
+			return ret;
+			}
+		}
+
+		dhd->dhd_tm_dwm_tbl.dhd_dwm_enabled = 1;
+		/* set WMM AC bits */
+		dwm_tbl_entry = (uint8) trf_mgmt_filter->priority;
+		DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry);
+
+		/* set favored bits */
+		if (trf_mgmt_filter->flags & TRF_FILTER_FAVORED)
+			DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry);
+
+		dhd->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp] =  dwm_tbl_entry;
+	}
+	return ret;
+}
+#endif 
+
+/* Given filename and download type,  returns a buffer pointer and length
+ * for download to f/w. Type can be FW or NVRAM.
+ *
+ */
+int dhd_get_download_buffer(dhd_pub_t	*dhd, char *file_path, download_type_t component,
+	char ** buffer, int *length)
+
+{
+	int ret = BCME_ERROR;
+	int len = 0;
+	int file_len;
+	void *image = NULL;
+	uint8 *buf = NULL;
+
+	/* Point to cache if available. */
+#ifdef CACHE_FW_IMAGES
+	if (component == FW) {
+		if (dhd->cached_fw_length) {
+			len = dhd->cached_fw_length;
+			buf = dhd->cached_fw;
+		}
+	}
+	else if (component == NVRAM) {
+		if (dhd->cached_nvram_length) {
+			len = dhd->cached_nvram_length;
+			buf = dhd->cached_nvram;
+		}
+	}
+	else if (component == CLM_BLOB) {
+		if (dhd->cached_clm_length) {
+			len = dhd->cached_clm_length;
+			buf = dhd->cached_clm;
+		}
+	} else {
+		return ret;
+	}
+#endif /* CACHE_FW_IMAGES */
+	/* No Valid cache found on this call */
+	if (!len) {
+		file_len = *length;
+		*length = 0;
+
+		if (file_path) {
+			image = dhd_os_open_image(file_path);
+			if (image == NULL) {
+				printf("%s: Open image file failed %s\n", __FUNCTION__, file_path);
+				goto err;
+			}
+		}
+
+		buf = MALLOCZ(dhd->osh, file_len);
+		if (buf == NULL) {
+			DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+				__FUNCTION__, file_len));
+			goto err;
+		}
+
+		/* Download image */
+#if defined(BCMEMBEDIMAGE) && defined(DHD_EFI)
+		if (!image) {
+			memcpy(buf, nvram_arr, sizeof(nvram_arr));
+			len = sizeof(nvram_arr);
+		} else {
+			len = dhd_os_get_image_block((char *)buf, file_len, image);
+			if ((len <= 0 || len > file_len)) {
+				MFREE(dhd->osh, buf, file_len);
+				goto err;
+			}
+		}
+#else
+		len = dhd_os_get_image_block((char *)buf, file_len, image);
+		if ((len <= 0 || len > file_len)) {
+			MFREE(dhd->osh, buf, file_len);
+			goto err;
+		}
+#endif /* DHD_EFI */
+	}
+
+	ret = BCME_OK;
+	*length = len;
+	*buffer = (char *)buf;
+
+	/* Cache if first call. */
+#ifdef CACHE_FW_IMAGES
+	if (component == FW) {
+		if (!dhd->cached_fw_length) {
+			dhd->cached_fw = buf;
+			dhd->cached_fw_length = len;
+		}
+	}
+	else if (component == NVRAM) {
+		if (!dhd->cached_nvram_length) {
+			dhd->cached_nvram = buf;
+			dhd->cached_nvram_length = len;
+		}
+	}
+	else if (component == CLM_BLOB) {
+		if (!dhd->cached_clm_length) {
+			 dhd->cached_clm = buf;
+			 dhd->cached_clm_length = len;
+		}
+	}
+#endif /* CACHE_FW_IMAGES */
+
+err:
+	if (image)
+		dhd_os_close_image(image);
+
+	return ret;
+}
+
+int
+dhd_download_2_dongle(dhd_pub_t	*dhd, char *iovar, uint16 flag, uint16 dload_type,
+	unsigned char *dload_buf, int len)
+{
+	struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf;
+	int err = 0;
+	int dload_data_offset;
+	static char iovar_buf[WLC_IOCTL_MEDLEN];
+	int iovar_len;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+
+	dload_data_offset = OFFSETOF(wl_dload_data_t, data);
+	dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag;
+	dload_ptr->dload_type = dload_type;
+	dload_ptr->len = htod32(len - dload_data_offset);
+	dload_ptr->crc = 0;
+	len = ROUNDUP(len, 8);
+
+	iovar_len = bcm_mkiovar(iovar, (char *)dload_buf,
+		(uint)len, iovar_buf, sizeof(iovar_buf));
+	if (iovar_len == 0) {
+		DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
+		           __FUNCTION__, iovar));
+		return BCME_BUFTOOSHORT;
+	}
+
+	err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf,
+			iovar_len, IOV_SET, 0);
+
+	return err;
+}
+
+int
+dhd_download_blob(dhd_pub_t	*dhd, unsigned char *image,
+		uint32 len, char *iovar)
+{
+	int chunk_len;
+	int size2alloc;
+	unsigned char *new_buf;
+	int err = 0, data_offset;
+	uint16 dl_flag = DL_BEGIN;
+
+	data_offset = OFFSETOF(wl_dload_data_t, data);
+	size2alloc = data_offset + MAX_CHUNK_LEN;
+	size2alloc = ROUNDUP(size2alloc, 8);
+
+	if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
+		do {
+			chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
+				MAX_CHUNK_LEN, image);
+			if (chunk_len < 0) {
+				DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
+					__FUNCTION__, chunk_len));
+				err = BCME_ERROR;
+				goto exit;
+			}
+
+			if (len - chunk_len == 0)
+				dl_flag |= DL_END;
+
+			err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
+				new_buf, data_offset + chunk_len);
+
+			dl_flag &= ~DL_BEGIN;
+
+			len = len - chunk_len;
+		} while ((len > 0) && (err == 0));
+	} else {
+		err = BCME_NOMEM;
+	}
+exit:
+	if (new_buf) {
+		MFREE(dhd->osh, new_buf, size2alloc);
+	}
+
+	return err;
+}
+
+int
+dhd_check_current_clm_data(dhd_pub_t *dhd)
+{
+	char iovbuf[WLC_IOCTL_SMLEN] = {0};
+	wl_country_t *cspec;
+	int err = BCME_OK;
+
+	bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
+	err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+	if (err) {
+		DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
+		return err;
+	}
+
+	cspec = (wl_country_t *)iovbuf;
+	if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
+		DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
+			__FUNCTION__));
+		return FALSE;
+	}
+	DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
+		__FUNCTION__));
+	return TRUE;
+}
+
+int
+dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
+{
+	char *clm_blob_path;
+	int len;
+	unsigned char *imgbuf = NULL;
+	int err = BCME_OK;
+	char iovbuf[WLC_IOCTL_SMLEN] = {0};
+	int status = FALSE;
+
+	if (clm_path && clm_path[0] != '\0') {
+		if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
+			DHD_ERROR(("clm path exceeds max len\n"));
+			return BCME_ERROR;
+		}
+		clm_blob_path = clm_path;
+		DHD_TRACE(("clm path from module param:%s\n", clm_path));
+	} else {
+		clm_blob_path = CONFIG_BCMDHD_CLM_PATH;
+	}
+
+	/* If CLM blob file is found on the filesystem, download the file.
+	 * After CLM file download or If the blob file is not present,
+	 * validate the country code before proceeding with the initialization.
+	 * If country code is not valid, fail the initialization.
+	 */
+
+	imgbuf = dhd_os_open_image((char *)clm_blob_path);
+	if (imgbuf == NULL) {
+		printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path);
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+		if (dhd->is_blob) {
+			err = BCME_ERROR;
+		} else {
+			status = dhd_check_current_clm_data(dhd);
+			if (status == TRUE) {
+				err = BCME_OK;
+			} else {
+				err = BCME_ERROR;
+			}
+		}
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+		goto exit;
+	}
+
+	len = dhd_os_get_image_size(imgbuf);
+
+	if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && imgbuf) {
+		status = dhd_check_current_clm_data(dhd);
+		if (status == TRUE) {
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+			if (dhd->op_mode != DHD_FLAG_MFG_MODE) {
+				if (dhd->is_blob) {
+					err = BCME_ERROR;
+				}
+				goto exit;
+			}
+#else
+			DHD_ERROR(("%s: CLM already exist in F/W, "
+				"new CLM data will be added to the end of existing CLM data!\n",
+				__FUNCTION__));
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+		}
+
+		/* Found blob file. Download the file */
+		DHD_ERROR(("clm file download from %s \n", clm_blob_path));
+		err = dhd_download_blob(dhd, imgbuf, len, "clmload");
+		if (err) {
+			DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
+			/* Retrieve clmload_status and print */
+			bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
+			err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+			if (err) {
+				DHD_ERROR(("%s: clmload_status get failed err=%d \n",
+					__FUNCTION__, err));
+			} else {
+				DHD_ERROR(("%s: clmload_status: %d \n",
+					__FUNCTION__, *((int *)iovbuf)));
+				if (*((int *)iovbuf) == CHIPID_MISMATCH) {
+					DHD_ERROR(("Chip ID mismatch error \n"));
+				}
+			}
+			err = BCME_ERROR;
+			goto exit;
+		} else {
+			DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
+		}
+	} else {
+		DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, imgbuf));
+	}
+
+	/* Verify country code */
+	status = dhd_check_current_clm_data(dhd);
+
+	if (status != TRUE) {
+		/* Country code not initialized or CLM download not proper */
+		DHD_ERROR(("country code not initialized\n"));
+		err = BCME_ERROR;
+	}
+exit:
+
+	if (imgbuf) {
+		dhd_os_close_image(imgbuf);
+	}
+
+	return err;
+}
+
+void dhd_free_download_buffer(dhd_pub_t	*dhd, void *buffer, int length)
+{
+#ifdef CACHE_FW_IMAGES
+	return;
+#endif
+	MFREE(dhd->osh, buffer, length);
+}
+
+#if defined(DHD_8021X_DUMP)
+#define EAP_PRINT(str) \
+	DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: " str "\n", \
+	ifname, direction ? "TX" : "RX"));
+#else
+#define EAP_PRINT(str)
+#endif /* DHD_8021X_DUMP */
+/* Parse EAPOL 4 way handshake messages */
+void
+dhd_dump_eapol_4way_message(dhd_pub_t *dhd, char *ifname,
+	char *dump_data, bool direction)
+{
+	unsigned char type;
+	int pair, ack, mic, kerr, req, sec, install;
+	unsigned short us_tmp;
+
+	type = dump_data[15];
+	if (type == 0) {
+		if ((dump_data[22] == 1) && (dump_data[18] == 1)) {
+			dhd->conf->eapol_status = EAPOL_STATUS_WPS_REQID;
+			EAP_PRINT("EAP Packet, Request, Identity");
+		} else if ((dump_data[22] == 1) && (dump_data[18] == 2)) {
+			dhd->conf->eapol_status = EAPOL_STATUS_WPS_RSPID;
+			EAP_PRINT("EAP Packet, Response, Identity");
+		} else if (dump_data[22] == 254) {
+			if (dump_data[30] == 1) {
+				dhd->conf->eapol_status = EAPOL_STATUS_WPS_WSC_START;
+				EAP_PRINT("EAP Packet, WSC Start");
+			} else if (dump_data[30] == 4) {
+				if (dump_data[41] == 4) {
+					dhd->conf->eapol_status = EAPOL_STATUS_WPS_M1;
+					EAP_PRINT("EAP Packet, WPS M1");
+				} else if (dump_data[41] == 5) {
+					dhd->conf->eapol_status = EAPOL_STATUS_WPS_M2;
+					EAP_PRINT("EAP Packet, WPS M2");
+				} else if (dump_data[41] == 7) {
+					dhd->conf->eapol_status = EAPOL_STATUS_WPS_M3;
+					EAP_PRINT("EAP Packet, WPS M3");
+				} else if (dump_data[41] == 8) {
+					dhd->conf->eapol_status = EAPOL_STATUS_WPS_M4;
+					EAP_PRINT("EAP Packet, WPS M4");
+				} else if (dump_data[41] == 9) {
+					dhd->conf->eapol_status = EAPOL_STATUS_WPS_M5;
+					EAP_PRINT("EAP Packet, WPS M5");
+				} else if (dump_data[41] == 10) {
+					dhd->conf->eapol_status = EAPOL_STATUS_WPS_M6;
+					EAP_PRINT("EAP Packet, WPS M6");
+				} else if (dump_data[41] == 11) {
+					dhd->conf->eapol_status = EAPOL_STATUS_WPS_M7;
+					EAP_PRINT("EAP Packet, WPS M7");
+				} else if (dump_data[41] == 12) {
+					dhd->conf->eapol_status = EAPOL_STATUS_WPS_M8;
+					EAP_PRINT("EAP Packet, WPS M8");
+				}
+			} else if (dump_data[30] == 5) {
+				dhd->conf->eapol_status = EAPOL_STATUS_WPS_DONE;
+				EAP_PRINT("EAP Packet, WSC Done");
+			}
+		} else {
+			DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
+				ifname, direction ? "TX" : "RX",
+				dump_data[14], dump_data[15], dump_data[30]));
+		}
+	} else if (type == 3 && dump_data[18] == 2) {
+		us_tmp = (dump_data[19] << 8) | dump_data[20];
+		pair =  0 != (us_tmp & 0x08);
+		ack = 0  != (us_tmp & 0x80);
+		mic = 0  != (us_tmp & 0x100);
+		kerr =  0 != (us_tmp & 0x400);
+		req = 0  != (us_tmp & 0x800);
+		sec = 0  != (us_tmp & 0x200);
+		install  = 0 != (us_tmp & 0x40);
+
+		if (!sec && !mic && ack && !install && pair && !kerr && !req) {
+			dhd->conf->eapol_status = EAPOL_STATUS_WPA_M1;
+			EAP_PRINT("EAPOL Packet, 4-way handshake, M1");
+		} else if (pair && !install && !ack && mic && !sec && !kerr && !req) {
+			dhd->conf->eapol_status = EAPOL_STATUS_WPA_M2;
+			EAP_PRINT("EAPOL Packet, 4-way handshake, M2");
+		} else if (pair && ack && mic && sec && !kerr && !req) {
+			dhd->conf->eapol_status = EAPOL_STATUS_WPA_M3;
+			EAP_PRINT("EAPOL Packet, 4-way handshake, M3");
+		} else if (pair && !install && !ack && mic && sec && !req && !kerr) {
+			dhd->conf->eapol_status = EAPOL_STATUS_WPA_M4;
+			EAP_PRINT("EAPOL Packet, 4-way handshake, M4");
+		} else {
+			DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
+				ifname, direction ? "TX" : "RX",
+				dump_data[14], dump_data[15], dump_data[30]));
+		}
+	} else {
+		DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
+			ifname, direction ? "TX" : "RX",
+			dump_data[14], dump_data[15], dump_data[30]));
+	}
+}
+
+#ifdef REPORT_FATAL_TIMEOUTS
+void init_dhd_timeouts(dhd_pub_t *pub)
+{
+	pub->timeout_info = MALLOC(pub->osh, sizeof(timeout_info_t));
+	if (pub->timeout_info == NULL) {
+		DHD_ERROR(("%s: Failed to alloc timeout_info\n", __FUNCTION__));
+	} else {
+		DHD_INFO(("Initializing dhd_timeouts\n"));
+		pub->timeout_info->scan_timer_lock = dhd_os_spin_lock_init(pub->osh);
+		pub->timeout_info->join_timer_lock = dhd_os_spin_lock_init(pub->osh);
+		pub->timeout_info->bus_timer_lock = dhd_os_spin_lock_init(pub->osh);
+		pub->timeout_info->cmd_timer_lock = dhd_os_spin_lock_init(pub->osh);
+		pub->timeout_info->scan_timeout_val = SCAN_TIMEOUT_DEFAULT;
+		pub->timeout_info->join_timeout_val = JOIN_TIMEOUT_DEFAULT;
+		pub->timeout_info->cmd_timeout_val = CMD_TIMEOUT_DEFAULT;
+		pub->timeout_info->bus_timeout_val = BUS_TIMEOUT_DEFAULT;
+		pub->timeout_info->scan_timer_active = FALSE;
+		pub->timeout_info->join_timer_active = FALSE;
+		pub->timeout_info->cmd_timer_active = FALSE;
+		pub->timeout_info->bus_timer_active = FALSE;
+		pub->timeout_info->cmd_join_error = WLC_SSID_MASK;
+		pub->timeout_info->cmd_request_id = 0;
+	}
+}
+
+void
+deinit_dhd_timeouts(dhd_pub_t *pub)
+{
+	/* stop the join, scan bus, cmd timers
+	* as failing to do so may cause a kernel panic if
+	* an rmmod is done
+	*/
+	if (!pub->timeout_info) {
+		DHD_ERROR(("timeout_info pointer is NULL\n"));
+		ASSERT(0);
+		return;
+	}
+	if (dhd_stop_scan_timer(pub)) {
+		DHD_ERROR(("dhd_stop_scan_timer failed\n"));
+		ASSERT(0);
+	}
+	if (dhd_stop_bus_timer(pub)) {
+		DHD_ERROR(("dhd_stop_bus_timer failed\n"));
+		ASSERT(0);
+	}
+	if (dhd_stop_cmd_timer(pub)) {
+		DHD_ERROR(("dhd_stop_cmd_timer failed\n"));
+		ASSERT(0);
+	}
+	if (dhd_stop_join_timer(pub)) {
+		DHD_ERROR(("dhd_stop_join_timer failed\n"));
+		ASSERT(0);
+	}
+
+	dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->scan_timer_lock);
+	dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->join_timer_lock);
+	dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->bus_timer_lock);
+	dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->cmd_timer_lock);
+	MFREE(pub->osh, pub->timeout_info, sizeof(timeout_info_t));
+	pub->timeout_info = NULL;
+}
+
+static void
+dhd_cmd_timeout(void *ctx)
+{
+	dhd_pub_t *pub = (dhd_pub_t *)ctx;
+	unsigned long flags;
+
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ASSERT(0);
+		return;
+	}
+
+	DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
+	if (pub->timeout_info && pub->timeout_info->cmd_timer_active) {
+		DHD_ERROR(("\nERROR COMMAND TIMEOUT TO:%d\n", pub->timeout_info->cmd_timeout_val));
+		DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+#ifdef PCIE_OOB
+		/* Assert device_wake so that UART_Rx is available */
+		if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
+			DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
+			ASSERT(0);
+		}
+#endif /* PCIE_OOB */
+		if (dhd_stop_cmd_timer(pub)) {
+			DHD_ERROR(("%s: dhd_stop_cmd_timer() failed\n", __FUNCTION__));
+			ASSERT(0);
+		}
+		dhd_wakeup_ioctl_event(pub, IOCTL_RETURN_ON_ERROR);
+		if (!dhd_query_bus_erros(pub))
+			dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_COMMAND_TO);
+	} else {
+		DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+	}
+}
+
+int
+dhd_start_cmd_timer(dhd_pub_t *pub)
+{
+	int ret = BCME_OK;
+	unsigned long flags = 0;
+	uint32 cmd_to_ms;
+
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ret = BCME_ERROR;
+		ASSERT(0);
+		goto exit_null;
+	}
+	DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
+	cmd_to_ms = pub->timeout_info->cmd_timeout_val;
+
+	if (pub->timeout_info->cmd_timeout_val == 0) {
+		/* Disable Command timer timeout */
+		DHD_INFO(("DHD: Command Timeout Disabled\n"));
+		goto exit;
+	}
+	if (pub->timeout_info->cmd_timer_active) {
+		DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
+		ret = BCME_ERROR;
+		ASSERT(0);
+	} else {
+		pub->timeout_info->cmd_timer = osl_timer_init(pub->osh,
+			"cmd_timer", dhd_cmd_timeout, pub);
+		osl_timer_update(pub->osh, pub->timeout_info->cmd_timer,
+			cmd_to_ms, 0);
+		pub->timeout_info->cmd_timer_active = TRUE;
+	}
+	if (ret == BCME_OK) {
+		DHD_INFO(("%s Cmd Timer started\n", __FUNCTION__));
+	}
+exit:
+	DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+exit_null:
+	return ret;
+}
+
+int
+dhd_stop_cmd_timer(dhd_pub_t *pub)
+{
+	int ret = BCME_OK;
+	unsigned long flags = 0;
+
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ret = BCME_ERROR;
+		ASSERT(0);
+		goto exit;
+	}
+	DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
+
+	if (pub->timeout_info->cmd_timer_active) {
+		osl_timer_del(pub->osh, pub->timeout_info->cmd_timer);
+		pub->timeout_info->cmd_timer_active = FALSE;
+	}
+	else {
+		DHD_INFO(("DHD: CMD timer is not active\n"));
+	}
+	if (ret == BCME_OK) {
+		DHD_INFO(("%s Cmd Timer Stopped\n", __FUNCTION__));
+	}
+	DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+exit:
+	return ret;
+}
+
+static int
+__dhd_stop_join_timer(dhd_pub_t *pub)
+{
+	int ret = BCME_OK;
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ASSERT(0);
+		return BCME_ERROR;
+	}
+
+	if (pub->timeout_info->join_timer_active) {
+		osl_timer_del(pub->osh, pub->timeout_info->join_timer);
+		pub->timeout_info->join_timer_active = FALSE;
+	} else {
+		DHD_INFO(("DHD: JOIN timer is not active\n"));
+	}
+	if (ret == BCME_OK) {
+		DHD_INFO(("%s: Join Timer Stopped\n", __FUNCTION__));
+	}
+	return ret;
+}
+
+static void
+dhd_join_timeout(void *ctx)
+{
+	dhd_pub_t *pub = (dhd_pub_t *)ctx;
+	unsigned long flags;
+
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ASSERT(0);
+		return;
+	}
+
+	DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+	if (pub->timeout_info->join_timer_active) {
+		DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+		if (dhd_stop_join_timer(pub)) {
+			DHD_ERROR(("%s: dhd_stop_join_timer() failed\n", __FUNCTION__));
+			ASSERT(0);
+		}
+		if (pub->timeout_info->cmd_join_error) {
+			DHD_ERROR(("\nERROR JOIN TIMEOUT TO:%d:0x%x\n",
+				pub->timeout_info->join_timeout_val,
+				pub->timeout_info->cmd_join_error));
+#ifdef DHD_FW_COREDUMP
+				/* collect core dump and crash */
+				pub->memdump_enabled = DUMP_MEMFILE_BUGON;
+				pub->memdump_type = DUMP_TYPE_JOIN_TIMEOUT;
+				dhd_bus_mem_dump(pub);
+#endif /* DHD_FW_COREDUMP */
+
+		}
+	} else {
+		DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+	}
+}
+
+int
+dhd_start_join_timer(dhd_pub_t *pub)
+{
+	int ret = BCME_OK;
+	unsigned long flags = 0;
+	uint32 join_to_ms;
+
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ret = BCME_ERROR;
+		ASSERT(0);
+		goto exit;
+	}
+
+	join_to_ms = pub->timeout_info->join_timeout_val;
+	DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+	if (pub->timeout_info->join_timer_active) {
+		DHD_ERROR(("%s:Stoping active timer\n", __FUNCTION__));
+		__dhd_stop_join_timer(pub);
+	}
+	if (pub->timeout_info->join_timeout_val == 0) {
+		/* Disable Join timer timeout */
+		DHD_INFO(("DHD: Join Timeout Disabled\n"));
+	} else {
+		pub->timeout_info->join_timer = osl_timer_init(pub->osh,
+			"join_timer", dhd_join_timeout, pub);
+		osl_timer_update(pub->osh, pub->timeout_info->join_timer, join_to_ms, 0);
+		pub->timeout_info->join_timer_active = TRUE;
+		pub->timeout_info->cmd_join_error |= WLC_SSID_MASK;
+	}
+	if (ret == BCME_OK) {
+		DHD_INFO(("%s:Join Timer started 0x%x\n", __FUNCTION__,
+			pub->timeout_info->cmd_join_error));
+	}
+	DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+exit:
+	return ret;
+}
+
+int
+dhd_stop_join_timer(dhd_pub_t *pub)
+{
+	int ret = BCME_OK;
+	unsigned long flags;
+
+	DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+	ret = __dhd_stop_join_timer(pub);
+	DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+	return ret;
+}
+
+static void
+dhd_scan_timeout(void *ctx)
+{
+	dhd_pub_t *pub = (dhd_pub_t *)ctx;
+	unsigned long flags;
+
+	 if (pub->timeout_info == NULL) {
+		DHD_ERROR(("timeout_info pointer is NULL\n"));
+		ASSERT(0);
+		return;
+	 }
+
+	DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
+	if (pub->timeout_info && pub->timeout_info->scan_timer_active) {
+		DHD_ERROR(("\nERROR SCAN TIMEOUT TO:%d\n", pub->timeout_info->scan_timeout_val));
+		DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+		dhd_stop_scan_timer(pub);
+		if (!dhd_query_bus_erros(pub))
+			dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_SCAN_TO);
+	} else {
+		DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+	}
+}
+
+int
+dhd_start_scan_timer(dhd_pub_t *pub)
+{
+	int ret = BCME_OK;
+	unsigned long flags = 0;
+	uint32 scan_to_ms;
+
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ret = BCME_ERROR;
+		ASSERT(0);
+		goto exit_null;
+	}
+	DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
+	scan_to_ms = pub->timeout_info->scan_timeout_val;
+
+	if (pub->timeout_info->scan_timer_active) {
+		/* NOTE : New scan timeout value will be effective
+		 * only once current scan is completed.
+		 */
+		DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
+		ret = BCME_ERROR;
+		goto exit;
+	}
+
+	if (pub->timeout_info->scan_timeout_val == 0) {
+		/* Disable Scan timer timeout */
+		DHD_INFO(("DHD: Scan Timeout Disabled\n"));
+	} else {
+		pub->timeout_info->scan_timer = osl_timer_init(pub->osh, "scan_timer",
+			dhd_scan_timeout, pub);
+		pub->timeout_info->scan_timer_active = TRUE;
+		osl_timer_update(pub->osh, pub->timeout_info->scan_timer, scan_to_ms, 0);
+	}
+	if (ret == BCME_OK) {
+		DHD_INFO(("%s Scan Timer started\n", __FUNCTION__));
+	}
+exit:
+	DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+exit_null:
+	return ret;
+}
+
+int
+dhd_stop_scan_timer(dhd_pub_t *pub)
+{
+	int ret = BCME_OK;
+	unsigned long flags = 0;
+
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ret = BCME_ERROR;
+		ASSERT(0);
+		goto exit;
+	}
+	DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
+
+	if (pub->timeout_info->scan_timer_active) {
+		osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
+		pub->timeout_info->scan_timer_active = FALSE;
+	}
+	else {
+		DHD_INFO(("DHD: SCAN timer is not active\n"));
+	}
+
+	if (ret == BCME_OK) {
+		DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__));
+	}
+	DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+exit:
+	return ret;
+}
+
+static void
+dhd_bus_timeout(void *ctx)
+{
+	dhd_pub_t *pub = (dhd_pub_t *)ctx;
+	unsigned long flags;
+
+	if (pub->timeout_info == NULL) {
+		DHD_ERROR(("timeout_info pointer is NULL\n"));
+		ASSERT(0);
+		return;
+	}
+
+	DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
+	if (pub->timeout_info->bus_timer_active) {
+		DHD_ERROR(("\nERROR BUS TIMEOUT TO:%d\n", pub->timeout_info->bus_timeout_val));
+		DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+#ifdef PCIE_OOB
+		/* Assert device_wake so that UART_Rx is available */
+		if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
+			DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
+			ASSERT(0);
+		}
+#endif /* PCIE_OOB */
+		if (dhd_stop_bus_timer(pub)) {
+			DHD_ERROR(("%s: dhd_stop_bus_timer() failed\n", __FUNCTION__));
+			ASSERT(0);
+		}
+		if (!dhd_query_bus_erros(pub))
+			dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_OQS_TO);
+	} else {
+		DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+	}
+}
+
+int
+dhd_start_bus_timer(dhd_pub_t *pub)
+{
+	int ret = BCME_OK;
+	unsigned long flags = 0;
+	uint32 bus_to_ms;
+
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ret = BCME_ERROR;
+		ASSERT(0);
+		goto exit_null;
+	}
+	DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
+	bus_to_ms = pub->timeout_info->bus_timeout_val;
+
+	if (pub->timeout_info->bus_timeout_val == 0) {
+		/* Disable Bus timer timeout */
+		DHD_INFO(("DHD: Bus Timeout Disabled\n"));
+		goto exit;
+	}
+	if (pub->timeout_info->bus_timer_active) {
+		DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
+		ret = BCME_ERROR;
+		ASSERT(0);
+	} else {
+		pub->timeout_info->bus_timer = osl_timer_init(pub->osh,
+			"bus_timer", dhd_bus_timeout, pub);
+		pub->timeout_info->bus_timer_active = TRUE;
+		osl_timer_update(pub->osh, pub->timeout_info->bus_timer, bus_to_ms, 0);
+	}
+	if (ret == BCME_OK) {
+		DHD_INFO(("%s: BUS Timer started\n", __FUNCTION__));
+	}
+exit:
+	DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+exit_null:
+	return ret;
+}
+
+int
+dhd_stop_bus_timer(dhd_pub_t *pub)
+{
+	int ret = BCME_OK;
+	unsigned long flags = 0;
+
+	if (!pub->timeout_info) {
+		DHD_ERROR(("DHD: timeout_info NULL\n"));
+		ret = BCME_ERROR;
+		ASSERT(0);
+		goto exit;
+	}
+	DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
+
+	if (pub->timeout_info->bus_timer_active) {
+		osl_timer_del(pub->osh, pub->timeout_info->bus_timer);
+		pub->timeout_info->bus_timer_active = FALSE;
+	}
+	else {
+		DHD_INFO(("DHD: BUS timer is not active\n"));
+	}
+	if (ret == BCME_OK) {
+		DHD_INFO(("%s: Bus Timer Stopped\n", __FUNCTION__));
+	}
+	DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+exit:
+	return ret;
+}
+
+int
+dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd)
+{
+	DHD_INFO(("%s: id:%d\n", __FUNCTION__, id));
+	if (pub->timeout_info) {
+		pub->timeout_info->cmd_request_id = id;
+		pub->timeout_info->cmd = cmd;
+		return BCME_OK;
+	} else {
+		return BCME_ERROR;
+	}
+}
+
+uint16
+dhd_get_request_id(dhd_pub_t *pub)
+{
+	if (pub->timeout_info) {
+		return (pub->timeout_info->cmd_request_id);
+	} else {
+		return 0;
+	}
+}
+
+void
+dhd_set_join_error(dhd_pub_t *pub, uint32 mask)
+{
+	DHD_INFO(("Setting join Error %d\n", mask));
+	if (pub->timeout_info) {
+		pub->timeout_info->cmd_join_error |= mask;
+	}
+}
+
+void
+dhd_clear_join_error(dhd_pub_t *pub, uint32 mask)
+{
+	DHD_INFO(("clear join Error %d\n", mask));
+	if (pub->timeout_info) {
+		pub->timeout_info->cmd_join_error &= ~mask;
+	}
+}
+
+void
+dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val)
+{
+	if (pub->timeout_info) {
+		*to_val = pub->timeout_info->scan_timeout_val;
+	} else {
+		*to_val = 0;
+	}
+}
+
+void
+dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val)
+{
+	if (pub->timeout_info) {
+		DHD_INFO(("Setting TO val:%d\n", to_val));
+		pub->timeout_info->scan_timeout_val = to_val;
+	}
+}
+
+void
+dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val)
+{
+	if (pub->timeout_info) {
+		*to_val = pub->timeout_info->join_timeout_val;
+	} else {
+		*to_val = 0;
+	}
+}
+
+void
+dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val)
+{
+	if (pub->timeout_info) {
+		DHD_INFO(("Setting TO val:%d\n", to_val));
+		pub->timeout_info->join_timeout_val = to_val;
+	}
+}
+
+void
+dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val)
+{
+	if (pub->timeout_info) {
+		*to_val = pub->timeout_info->cmd_timeout_val;
+	} else {
+		*to_val = 0;
+	}
+}
+
+void
+dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val)
+{
+	if (pub->timeout_info) {
+		DHD_INFO(("Setting TO val:%d\n", to_val));
+		pub->timeout_info->cmd_timeout_val = to_val;
+	}
+}
+
+void
+dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val)
+{
+	if (pub->timeout_info) {
+		*to_val = pub->timeout_info->bus_timeout_val;
+	} else {
+		*to_val = 0;
+	}
+}
+
+void
+dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val)
+{
+	if (pub->timeout_info) {
+		DHD_INFO(("Setting TO val:%d\n", to_val));
+		pub->timeout_info->bus_timeout_val = to_val;
+	}
+}
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef SHOW_LOGTRACE
+int
+dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
+		dhd_event_log_t *event_log)
+{
+	logstr_header_t *hdr = NULL;
+	uint32 *lognums = NULL;
+	char *logstrs = NULL;
+	int ram_index = 0;
+	char **fmts;
+	int num_fmts = 0;
+	int32 i = 0;
+
+	/* Remember header from the logstrs.bin file */
+	hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
+		sizeof(logstr_header_t));
+
+	if (hdr->log_magic == LOGSTRS_MAGIC) {
+		/*
+		* logstrs.bin start with header.
+		*/
+		num_fmts =	hdr->rom_logstrs_offset / sizeof(uint32);
+		ram_index = (hdr->ram_lognums_offset -
+			hdr->rom_lognums_offset) / sizeof(uint32);
+		lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
+		logstrs = (char *)	 &raw_fmts[hdr->rom_logstrs_offset];
+	} else {
+		/*
+		 * Legacy logstrs.bin format without header.
+		 */
+		num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
+		if (num_fmts == 0) {
+			/* Legacy ROM/RAM logstrs.bin format:
+			  *  - ROM 'lognums' section
+			  *   - RAM 'lognums' section
+			  *   - ROM 'logstrs' section.
+			  *   - RAM 'logstrs' section.
+			  *
+			  * 'lognums' is an array of indexes for the strings in the
+			  * 'logstrs' section. The first uint32 is 0 (index of first
+			  * string in ROM 'logstrs' section).
+			  *
+			  * The 4324b5 is the only ROM that uses this legacy format. Use the
+			  * fixed number of ROM fmtnums to find the start of the RAM
+			  * 'lognums' section. Use the fixed first ROM string ("Con\n") to
+			  * find the ROM 'logstrs' section.
+			  */
+			#define NUM_4324B5_ROM_FMTS	186
+			#define FIRST_4324B5_ROM_LOGSTR "Con\n"
+			ram_index = NUM_4324B5_ROM_FMTS;
+			lognums = (uint32 *) raw_fmts;
+			num_fmts =	ram_index;
+			logstrs = (char *) &raw_fmts[num_fmts << 2];
+			while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
+				num_fmts++;
+				logstrs = (char *) &raw_fmts[num_fmts << 2];
+			}
+		} else {
+				/* Legacy RAM-only logstrs.bin format:
+				 *	  - RAM 'lognums' section
+				 *	  - RAM 'logstrs' section.
+				 *
+				 * 'lognums' is an array of indexes for the strings in the
+				 * 'logstrs' section. The first uint32 is an index to the
+				 * start of 'logstrs'. Therefore, if this index is divided
+				 * by 'sizeof(uint32)' it provides the number of logstr
+				 *	entries.
+				 */
+				ram_index = 0;
+				lognums = (uint32 *) raw_fmts;
+				logstrs = (char *)	&raw_fmts[num_fmts << 2];
+			}
+	}
+	fmts = MALLOC(osh, num_fmts  * sizeof(char *));
+	if (fmts == NULL) {
+		DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+	event_log->fmts_size = num_fmts  * sizeof(char *);
+
+	for (i = 0; i < num_fmts; i++) {
+		/* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
+		* (they are 0-indexed relative to 'rom_logstrs_offset').
+		*
+		* RAM lognums are already indexed to point to the correct RAM logstrs (they
+		* are 0-indexed relative to the start of the logstrs.bin file).
+		*/
+		if (i == ram_index) {
+			logstrs = raw_fmts;
+		}
+		fmts[i] = &logstrs[lognums[i]];
+	}
+	event_log->fmts = fmts;
+	event_log->raw_fmts_size = logstrs_size;
+	event_log->raw_fmts = raw_fmts;
+	event_log->num_fmts = num_fmts;
+
+	return BCME_OK;
+}
+
+int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
+		uint32 *rodata_end)
+{
+	char *raw_fmts =  NULL;
+	uint32 read_size = READ_NUM_BYTES;
+	int error = 0;
+	char * cptr = NULL;
+	char c;
+	uint8 count = 0;
+
+	*ramstart = 0;
+	*rodata_start = 0;
+	*rodata_end = 0;
+
+	/* Allocate 1 byte more than read_size to terminate it with NULL */
+	raw_fmts = MALLOC(osh, read_size + 1);
+	if (raw_fmts == NULL) {
+		DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* read ram start, rodata_start and rodata_end values from map  file */
+	while (count != ALL_MAP_VAL)
+	{
+		error = dhd_os_read_file(file, raw_fmts, read_size);
+		if (error < 0) {
+			DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
+					error));
+			goto fail;
+		}
+
+		/* End raw_fmts with NULL as strstr expects NULL terminated strings */
+		raw_fmts[read_size] = '\0';
+
+		/* Get ramstart address */
+		if ((cptr = strstr(raw_fmts, ramstart_str))) {
+			cptr = cptr - BYTES_AHEAD_NUM;
+			sscanf(cptr, "%x %c text_start", ramstart, &c);
+			count |= RAMSTART_BIT;
+		}
+
+		/* Get ram rodata start address */
+		if ((cptr = strstr(raw_fmts, rodata_start_str))) {
+			cptr = cptr - BYTES_AHEAD_NUM;
+			sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
+			count |= RDSTART_BIT;
+		}
+
+		/* Get ram rodata end address */
+		if ((cptr = strstr(raw_fmts, rodata_end_str))) {
+			cptr = cptr - BYTES_AHEAD_NUM;
+			sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
+			count |= RDEND_BIT;
+		}
+
+		if (error < (int)read_size) {
+			/*
+			* since we reset file pos back to earlier pos by
+			* GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
+			* The reason for this is if string is spreaded across
+			* bytes, the read function should not miss it.
+			* So if ret value is less than read_size, reached EOF don't read further
+			*/
+			break;
+		}
+		memset(raw_fmts, 0, read_size);
+		/*
+		* go back to predefined NUM of bytes so that we won't miss
+		* the string and  addr even if it comes as splited in next read.
+		*/
+		dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
+	}
+
+
+fail:
+	if (raw_fmts) {
+		MFREE(osh, raw_fmts, read_size + 1);
+		raw_fmts = NULL;
+	}
+	if (count == ALL_MAP_VAL)
+		return BCME_OK;
+	else {
+		DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
+				count));
+		return BCME_ERROR;
+	}
+
+}
+
+#ifdef PCIE_FULL_DONGLE
+int
+dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
+		dhd_event_log_t *event_data)
+{
+	uint32 infobuf_version;
+	info_buf_payload_hdr_t *payload_hdr_ptr;
+	uint16 payload_hdr_type;
+	uint16 payload_hdr_length;
+
+	DHD_TRACE(("%s:Enter\n", __FUNCTION__));
+
+	if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
+		DHD_ERROR(("%s: infobuf too small for version field\n",
+			__FUNCTION__));
+		goto exit;
+	}
+	infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
+	PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
+	if (infobuf_version != PCIE_INFOBUF_V1) {
+		DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
+			__FUNCTION__, infobuf_version));
+		goto exit;
+	}
+
+	/* Version 1 infobuf has a single type/length (and then value) field */
+	if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
+		DHD_ERROR(("%s: infobuf too small for v1 type/length  fields\n",
+			__FUNCTION__));
+		goto exit;
+	}
+	/* Process/parse the common info payload header (type/length) */
+	payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
+	payload_hdr_type = ltoh16(payload_hdr_ptr->type);
+	payload_hdr_length = ltoh16(payload_hdr_ptr->length);
+	if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
+		DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
+			__FUNCTION__, payload_hdr_type));
+		goto exit;
+	}
+	PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
+
+	/* Validate that the specified length isn't bigger than the
+	 * provided data.
+	 */
+	if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
+		DHD_ERROR(("%s: infobuf logtrace length is bigger"
+			" than actual buffer data\n", __FUNCTION__));
+		goto exit;
+	}
+	dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
+		event_data, payload_hdr_length);
+
+	return BCME_OK;
+
+exit:
+	return BCME_ERROR;
+}
+#endif /* PCIE_FULL_DONGLE */
+#endif /* SHOW_LOGTRACE */
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+
+/* To handle the TDLS event in the dhd_common.c
+ */
+int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
+{
+	int ret = BCME_OK;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	ret = dhd_tdls_update_peer_info(dhd_pub, event);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	return ret;
+}
+
+int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
+{
+	tdls_peer_node_t *cur = NULL, *prev = NULL;
+	if (!dhd_pub)
+		return BCME_ERROR;
+	cur = dhd_pub->peer_tbl.node;
+
+	if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
+		return BCME_ERROR;
+
+	while (cur != NULL) {
+		prev = cur;
+		cur = cur->next;
+		MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
+	}
+	dhd_pub->peer_tbl.tdls_peer_count = 0;
+	dhd_pub->peer_tbl.node = NULL;
+	return BCME_OK;
+}
+#endif	/* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+
+#ifdef DUMP_IOCTL_IOV_LIST
+void
+dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
+{
+	dll_t *item;
+	dhd_iov_li_t *iov_li;
+	dhd->dump_iovlist_len++;
+
+	if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
+		item = dll_head_p(list_head);
+		iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
+		dll_delete(item);
+		MFREE(dhd->osh, iov_li, sizeof(*iov_li));
+		dhd->dump_iovlist_len--;
+	}
+	dll_append(list_head, node);
+}
+
+void
+dhd_iov_li_print(dll_t *list_head)
+{
+	dhd_iov_li_t *iov_li;
+	dll_t *item, *next;
+	uint8 index = 0;
+	for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
+		next = dll_next_p(item);
+		iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
+		index++;
+		DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", index, iov_li->buff, iov_li->cmd));
+	}
+}
+
+void
+dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
+{
+	dll_t *item;
+	dhd_iov_li_t *iov_li;
+	while (!(dll_empty(list_head))) {
+		item = dll_head_p(list_head);
+		iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
+		dll_delete(item);
+		MFREE(dhd->osh, iov_li, sizeof(*iov_li));
+	}
+}
+#endif /* DUMP_IOCTL_IOV_LIST */
diff --git a/drivers/net/wireless/bcmdhd/dhd_config.c b/drivers/net/wireless/bcmdhd/dhd_config.c
new file mode 100644
index 0000000..f7818c9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_config.c
@@ -0,0 +1,3000 @@
+

+#include <typedefs.h>

+#include <osl.h>

+

+#include <bcmutils.h>

+#include <hndsoc.h>

+#include <bcmsdbus.h>

+#if defined(HW_OOB) || defined(FORCE_WOWLAN)

+#include <bcmdefs.h>

+#include <bcmsdh.h>

+#include <sdio.h>

+#include <sbchipc.h>

+#endif

+

+#include <dhd_config.h>

+#include <dhd_dbg.h>

+

+/* message levels */

+#define CONFIG_ERROR_LEVEL	0x0001

+#define CONFIG_TRACE_LEVEL	0x0002

+

+uint config_msg_level = CONFIG_ERROR_LEVEL;

+

+#define CONFIG_ERROR(x) \

+	do { \

+		if (config_msg_level & CONFIG_ERROR_LEVEL) { \

+			printk(KERN_ERR "CONFIG-ERROR) ");	\

+			printk x; \

+		} \

+	} while (0)

+#define CONFIG_TRACE(x) \

+	do { \

+		if (config_msg_level & CONFIG_TRACE_LEVEL) { \

+			printk(KERN_ERR "CONFIG-TRACE) ");	\

+			printk x; \

+		} \

+	} while (0)

+

+#define MAXSZ_BUF		1000

+#define	MAXSZ_CONFIG	4096

+

+#define htod32(i) i

+#define htod16(i) i

+#define dtoh32(i) i

+#define dtoh16(i) i

+#define htodchanspec(i) i

+#define dtohchanspec(i) i

+

+#define MAX_EVENT_BUF_NUM 16

+typedef struct eventmsg_buf {

+	u16 num;

+	struct {

+		u16 type;

+		bool set;

+	} event [MAX_EVENT_BUF_NUM];

+} eventmsg_buf_t;

+

+typedef struct cihp_name_map_t {

+	uint chip;

+	uint chiprev;

+	uint ag_type;

+	bool clm;

+	char *chip_name;

+	char *module_name;

+} cihp_name_map_t;

+

+/* Map of WLC_E events to connection failure strings */

+#define DONT_CARE	9999

+const cihp_name_map_t chip_name_map [] = {

+	/* ChipID			Chiprev	AG	 	CLM		ChipName	ModuleName  */

+#ifdef BCMSDIO

+	{BCM43362_CHIP_ID,	0,	DONT_CARE,	FALSE,	"bcm40181a0",		""},

+	{BCM43362_CHIP_ID,	1,	DONT_CARE,	FALSE,	"bcm40181a2",		""},

+	{BCM4330_CHIP_ID,	4,	FW_TYPE_G,	FALSE,	"bcm40183b2",		""},

+	{BCM4330_CHIP_ID,	4,	FW_TYPE_AG,	FALSE,	"bcm40183b2_ag",	""},

+	{BCM43430_CHIP_ID,	0,	DONT_CARE,	FALSE,	"bcm43438a0",		""},

+	{BCM43430_CHIP_ID,	1,	DONT_CARE,	FALSE,	"bcm43438a1",		""},

+	{BCM43430_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm43436b0",		""},

+	{BCM43012_CHIP_ID,	1,	DONT_CARE,	TRUE,	"bcm43013b0",		""},

+	{BCM4334_CHIP_ID,	3,	DONT_CARE,	FALSE,	"bcm4334b1_ag",		""},

+	{BCM43340_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm43341b0_ag",	""},

+	{BCM43341_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm43341b0_ag",	""},

+	{BCM4324_CHIP_ID,	5,	DONT_CARE,	FALSE,	"bcm43241b4_ag",	""},

+	{BCM4335_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm4339a0_ag",		""},

+	{BCM4339_CHIP_ID,	1,	DONT_CARE,	FALSE,	"bcm4339a0_ag",		""},

+	{BCM4345_CHIP_ID,	6,	DONT_CARE,	FALSE,	"bcm43455c0_ag",	""},

+	{BCM43454_CHIP_ID,	6,	DONT_CARE,	FALSE,	"bcm43455c0_ag",	""},

+	{BCM4345_CHIP_ID,	9,	DONT_CARE,	FALSE,	"bcm43456c5_ag",	""},

+	{BCM43454_CHIP_ID,	9,	DONT_CARE,	FALSE,	"bcm43456c5_ag",	""},

+	{BCM4354_CHIP_ID,	1,	DONT_CARE,	FALSE,	"bcm4354a1_ag",		""},

+	{BCM4354_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm4356a2_ag",		""},

+	{BCM4356_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm4356a2_ag",		""},

+	{BCM4371_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm4356a2_ag",		""},

+	{BCM43569_CHIP_ID,	3,	DONT_CARE,	FALSE,	"bcm4358a3_ag",		""},

+	{BCM4359_CHIP_ID,	5,	DONT_CARE,	FALSE,	"bcm4359b1_ag",		""},

+	{BCM4359_CHIP_ID,	9,	DONT_CARE,	FALSE,	"bcm4359c0_ag",		""},

+	{BCM4362_CHIP_ID,	0,	DONT_CARE,	TRUE,	"bcm43752a0_ag",	""},

+#endif

+#ifdef BCMPCIE

+	{BCM4354_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm4356a2_pcie_ag",	""},

+	{BCM4356_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm4356a2_pcie_ag",	""},

+	{BCM4359_CHIP_ID,	9,	DONT_CARE,	FALSE,	"bcm4359c0_pcie_ag",	""},

+	{BCM4362_CHIP_ID,	0,	DONT_CARE,	TRUE,	"bcm43752a0_pcie_ag",	""},

+#endif

+#ifdef BCMDBUS

+	{BCM43143_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm43143b0",		""},

+	{BCM43242_CHIP_ID,	1,	DONT_CARE,	FALSE,	"bcm43242a1_ag",	""},

+	{BCM43569_CHIP_ID,	2,	DONT_CARE,	FALSE,	"bcm4358u_ag",		""},

+#endif

+};

+

+#ifdef BCMSDIO

+void

+dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list)

+{

+	int i;

+

+	CONFIG_TRACE(("%s called\n", __FUNCTION__));

+	if (mac_list->m_mac_list_head) {

+		for (i=0; i<mac_list->count; i++) {

+			if (mac_list->m_mac_list_head[i].mac) {

+				CONFIG_TRACE(("%s Free mac %p\n", __FUNCTION__, mac_list->m_mac_list_head[i].mac));

+				kfree(mac_list->m_mac_list_head[i].mac);

+			}

+		}

+		CONFIG_TRACE(("%s Free m_mac_list_head %p\n", __FUNCTION__, mac_list->m_mac_list_head));

+		kfree(mac_list->m_mac_list_head);

+	}

+	mac_list->count = 0;

+}

+

+void

+dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list)

+{

+	CONFIG_TRACE(("%s called\n", __FUNCTION__));

+

+	if (chip_nv_list->m_chip_nv_path_head) {

+		CONFIG_TRACE(("%s Free %p\n", __FUNCTION__, chip_nv_list->m_chip_nv_path_head));

+		kfree(chip_nv_list->m_chip_nv_path_head);

+	}

+	chip_nv_list->count = 0;

+}

+

+#if defined(HW_OOB) || defined(FORCE_WOWLAN)

+void

+dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip)

+{

+	uint32 gpiocontrol, addr;

+

+	if (CHIPID(chip) == BCM43362_CHIP_ID) {

+		printf("%s: Enable HW OOB for 43362\n", __FUNCTION__);

+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, gpiocontrol);

+		gpiocontrol = bcmsdh_reg_read(sdh, addr, 4);

+		gpiocontrol |= 0x2;

+		bcmsdh_reg_write(sdh, addr, 4, gpiocontrol);

+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL);

+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL);

+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL);

+	}

+}

+#endif

+

+#define SBSDIO_CIS_SIZE_LIMIT		0x200

+#define F0_BLOCK_SIZE 32

+int

+dhd_conf_set_blksize(bcmsdh_info_t *sdh)

+{

+	int err = 0;

+	uint fn, numfn;

+	int32 blksize = 0, cur_blksize = 0;

+	uint8 cisd;

+

+	numfn = bcmsdh_query_iofnum(sdh);

+

+	for (fn = 0; fn <= numfn; fn++) {

+		if (!fn)

+			blksize = F0_BLOCK_SIZE;

+		else {

+			bcmsdh_cisaddr_read(sdh, fn, &cisd, 24);

+			blksize = cisd;

+			bcmsdh_cisaddr_read(sdh, fn, &cisd, 25);

+			blksize |= cisd << 8;

+		}

+#ifdef CUSTOM_SDIO_F2_BLKSIZE

+		if (fn == 2 && blksize > CUSTOM_SDIO_F2_BLKSIZE) {

+			blksize = CUSTOM_SDIO_F2_BLKSIZE;

+		}

+#endif

+		bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32),

+			&cur_blksize, sizeof(int32), FALSE);

+		if (cur_blksize != blksize) {

+			printf("%s: fn=%d, blksize=%d, cur_blksize=%d\n", __FUNCTION__,

+				fn, blksize, cur_blksize);

+			blksize |= (fn<<16);

+			if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &blksize,

+				sizeof(blksize), TRUE) != BCME_OK) {

+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));

+				err = -1;

+			}

+		}

+	}

+

+	return err;

+}

+

+int

+dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac)

+{

+	int i, err = -1;

+	uint8 *ptr = 0;

+	unsigned char tpl_code, tpl_link='\0';

+	uint8 header[3] = {0x80, 0x07, 0x19};

+	uint8 *cis;

+

+	if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) {

+		CONFIG_ERROR(("%s: cis malloc failed\n", __FUNCTION__));

+		return err;

+	}

+	bzero(cis, SBSDIO_CIS_SIZE_LIMIT);

+

+	if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) {

+		CONFIG_ERROR(("%s: cis read err %d\n", __FUNCTION__, err));

+		MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);

+		return err;

+	}

+	err = -1; // reset err;

+	ptr = cis;

+	do {

+		/* 0xff means we're done */

+		tpl_code = *ptr;

+		ptr++;

+		if (tpl_code == 0xff)

+			break;

+

+		/* null entries have no link field or data */

+		if (tpl_code == 0x00)

+			continue;

+

+		tpl_link = *ptr;

+		ptr++;

+		/* a size of 0xff also means we're done */

+		if (tpl_link == 0xff)

+			break;

+		if (config_msg_level & CONFIG_TRACE_LEVEL) {

+			printf("%s: tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n",

+				__FUNCTION__, tpl_code, tpl_link, *ptr);

+			printk("%s: value:", __FUNCTION__);

+			for (i=0; i<tpl_link-1; i++) {

+				printk("%02x ", ptr[i+1]);

+				if ((i+1) % 16 == 0)

+					printk("\n");

+			}

+			printk("\n");

+		}

+

+		if (tpl_code == 0x80 && tpl_link == 0x07 && *ptr == 0x19)

+			break;

+

+		ptr += tpl_link;

+	} while (1);

+

+	if (tpl_code == 0x80 && tpl_link == 0x07 && *ptr == 0x19) {

+		/* Normal OTP */

+		memcpy(mac, ptr+1, 6);

+		err = 0;

+	} else {

+		ptr = cis;

+		/* Special OTP */

+		if (bcmsdh_reg_read(sdh, SI_ENUM_BASE, 4) == 0x16044330) {

+			for (i=0; i<SBSDIO_CIS_SIZE_LIMIT; i++) {

+				if (!memcmp(header, ptr, 3)) {

+					memcpy(mac, ptr+3, 6);

+					err = 0;

+					break;

+				}

+				ptr++;

+			}

+		}

+	}

+

+	ASSERT(cis);

+	MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);

+

+	return err;

+}

+

+void

+dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path)

+{

+	int i, j;

+	uint8 mac[6]={0};

+	int fw_num=0, mac_num=0;

+	uint32 oui, nic;

+	wl_mac_list_t *mac_list;

+	wl_mac_range_t *mac_range;

+	int fw_type, fw_type_new;

+	char *name_ptr;

+

+	mac_list = dhd->conf->fw_by_mac.m_mac_list_head;

+	fw_num = dhd->conf->fw_by_mac.count;

+	if (!mac_list || !fw_num)

+		return;

+

+	if (dhd_conf_get_mac(dhd, sdh, mac)) {

+		CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__));

+		return;

+	}

+	oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);

+	nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);

+

+	/* find out the last '/' */

+	i = strlen(fw_path);

+	while (i > 0) {

+		if (fw_path[i] == '/') {

+			i++;

+			break;

+		}

+		i--;

+	}

+	name_ptr = &fw_path[i];

+

+	if (strstr(name_ptr, "_apsta"))

+		fw_type = FW_TYPE_APSTA;

+	else if (strstr(name_ptr, "_p2p"))

+		fw_type = FW_TYPE_P2P;

+	else if (strstr(name_ptr, "_mesh"))

+		fw_type = FW_TYPE_MESH;

+	else if (strstr(name_ptr, "_es"))

+		fw_type = FW_TYPE_ES;

+	else if (strstr(name_ptr, "_mfg"))

+		fw_type = FW_TYPE_MFG;

+	else

+		fw_type = FW_TYPE_STA;

+

+	for (i=0; i<fw_num; i++) {

+		mac_num = mac_list[i].count;

+		mac_range = mac_list[i].mac;

+		if (strstr(mac_list[i].name, "_apsta"))

+			fw_type_new = FW_TYPE_APSTA;

+		else if (strstr(mac_list[i].name, "_p2p"))

+			fw_type_new = FW_TYPE_P2P;

+		else if (strstr(mac_list[i].name, "_mesh"))

+			fw_type_new = FW_TYPE_MESH;

+		else if (strstr(mac_list[i].name, "_es"))

+			fw_type_new = FW_TYPE_ES;

+		else if (strstr(mac_list[i].name, "_mfg"))

+			fw_type_new = FW_TYPE_MFG;

+		else

+			fw_type_new = FW_TYPE_STA;

+		if (fw_type != fw_type_new) {

+			printf("%s: fw_typ=%d != fw_type_new=%d\n", __FUNCTION__, fw_type, fw_type_new);

+			continue;

+		}

+		for (j=0; j<mac_num; j++) {

+			if (oui == mac_range[j].oui) {

+				if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {

+					strcpy(name_ptr, mac_list[i].name);

+					printf("%s: matched oui=0x%06X, nic=0x%06X\n",

+						__FUNCTION__, oui, nic);

+					printf("%s: fw_path=%s\n", __FUNCTION__, fw_path);

+					return;

+				}

+			}

+		}

+	}

+}

+

+void

+dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path)

+{

+	int i, j;

+	uint8 mac[6]={0};

+	int nv_num=0, mac_num=0;

+	uint32 oui, nic;

+	wl_mac_list_t *mac_list;

+	wl_mac_range_t *mac_range;

+	char *pnv_name;

+

+	mac_list = dhd->conf->nv_by_mac.m_mac_list_head;

+	nv_num = dhd->conf->nv_by_mac.count;

+	if (!mac_list || !nv_num)

+		return;

+

+	if (dhd_conf_get_mac(dhd, sdh, mac)) {

+		CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__));

+		return;

+	}

+	oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);

+	nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);

+

+	/* find out the last '/' */

+	i = strlen(nv_path);

+	while (i > 0) {

+		if (nv_path[i] == '/') break;

+		i--;

+	}

+	pnv_name = &nv_path[i+1];

+

+	for (i=0; i<nv_num; i++) {

+		mac_num = mac_list[i].count;

+		mac_range = mac_list[i].mac;

+		for (j=0; j<mac_num; j++) {

+			if (oui == mac_range[j].oui) {

+				if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {

+					strcpy(pnv_name, mac_list[i].name);

+					printf("%s: matched oui=0x%06X, nic=0x%06X\n",

+						__FUNCTION__, oui, nic);

+					printf("%s: nv_path=%s\n", __FUNCTION__, nv_path);

+					return;

+				}

+			}

+		}

+	}

+}

+#endif

+

+void

+dhd_conf_free_country_list(conf_country_list_t *country_list)

+{

+	int i;

+

+	CONFIG_TRACE(("%s called\n", __FUNCTION__));

+	for (i=0; i<country_list->count; i++) {

+		if (country_list->cspec[i]) {

+			CONFIG_TRACE(("%s Free cspec %p\n", __FUNCTION__, country_list->cspec[i]));

+			kfree(country_list->cspec[i]);

+		}

+	}

+	country_list->count = 0;

+}

+

+void

+dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path)

+{

+	int fw_type, ag_type;

+	uint chip, chiprev;

+	int i;

+	char *name_ptr;

+

+	chip = dhd->conf->chip;

+	chiprev = dhd->conf->chiprev;

+

+	if (fw_path[0] == '\0') {

+#ifdef CONFIG_BCMDHD_FW_PATH

+		bcm_strncpy_s(fw_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);

+		if (fw_path[0] == '\0')

+#endif

+		{

+			printf("firmware path is null\n");

+			return;

+		}

+	}

+#ifndef FW_PATH_AUTO_SELECT

+	return;

+#endif

+

+	/* find out the last '/' */

+	i = strlen(fw_path);

+	while (i > 0) {

+		if (fw_path[i] == '/') {

+			i++;

+			break;

+		}

+		i--;

+	}

+	name_ptr = &fw_path[i];

+#ifdef BAND_AG

+	ag_type = FW_TYPE_AG;

+#else

+	ag_type = strstr(name_ptr, "_ag") ? FW_TYPE_AG : FW_TYPE_G;

+#endif

+	if (strstr(name_ptr, "_apsta"))

+		fw_type = FW_TYPE_APSTA;

+	else if (strstr(name_ptr, "_p2p"))

+		fw_type = FW_TYPE_P2P;

+	else if (strstr(name_ptr, "_mesh"))

+		fw_type = FW_TYPE_MESH;

+	else if (strstr(name_ptr, "_es"))

+		fw_type = FW_TYPE_ES;

+	else if (strstr(name_ptr, "_mfg"))

+		fw_type = FW_TYPE_MFG;

+	else

+		fw_type = FW_TYPE_STA;

+

+	for (i = 0;  i < sizeof(chip_name_map)/sizeof(chip_name_map[0]);  i++) {

+		const cihp_name_map_t* row = &chip_name_map[i];

+		if (row->chip == chip && row->chiprev == chiprev &&

+			(row->ag_type == ag_type || row->ag_type == DONT_CARE)) {

+			strcpy(name_ptr, "fw_");

+			strcat(fw_path, row->chip_name);

+#ifdef BCMUSBDEV_COMPOSITE

+			strcat(fw_path, "_cusb");

+#endif

+			if (fw_type == FW_TYPE_APSTA)

+				strcat(fw_path, "_apsta.bin");

+			else if (fw_type == FW_TYPE_P2P)

+				strcat(fw_path, "_p2p.bin");

+			else if (fw_type == FW_TYPE_MESH)

+				strcat(fw_path, "_mesh.bin");

+			else if (fw_type == FW_TYPE_ES)

+				strcat(fw_path, "_es.bin");

+			else if (fw_type == FW_TYPE_MFG)

+				strcat(fw_path, "_mfg.bin");

+			else

+				strcat(fw_path, ".bin");

+		}

+	}

+

+	dhd->conf->fw_type = fw_type;

+

+	CONFIG_TRACE(("%s: firmware_path=%s\n", __FUNCTION__, fw_path));

+}

+

+void

+dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path)

+{

+	uint chip, chiprev;

+	int i;

+	char *name_ptr;

+

+	chip = dhd->conf->chip;

+	chiprev = dhd->conf->chiprev;

+

+	if (clm_path[0] == '\0') {

+		printf("clm path is null\n");

+		return;

+	}

+

+	/* find out the last '/' */

+	i = strlen(clm_path);

+	while (i > 0) {

+		if (clm_path[i] == '/') {

+			i++;

+			break;

+		}

+		i--;

+	}

+	name_ptr = &clm_path[i];

+

+	for (i = 0;  i < sizeof(chip_name_map)/sizeof(chip_name_map[0]);  i++) {

+		const cihp_name_map_t* row = &chip_name_map[i];

+		if (row->chip == chip && row->chiprev == chiprev && row->clm) {

+			strcpy(name_ptr, "clm_");

+			strcat(clm_path, row->chip_name);

+			strcat(clm_path, ".blob");

+		}

+	}

+

+	CONFIG_TRACE(("%s: clm_path=%s\n", __FUNCTION__, clm_path));

+}

+

+void

+dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path)

+{

+	uint chip, chiprev;

+	int i;

+	char *name_ptr;

+

+	chip = dhd->conf->chip;

+	chiprev = dhd->conf->chiprev;

+

+	if (nv_path[0] == '\0') {

+#ifdef CONFIG_BCMDHD_NVRAM_PATH

+		bcm_strncpy_s(nv_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);

+		if (nv_path[0] == '\0')

+#endif

+		{

+			printf("nvram path is null\n");

+			return;

+		}

+	}

+

+	/* find out the last '/' */

+	i = strlen(nv_path);

+	while (i > 0) {

+		if (nv_path[i] == '/') {

+			i++;

+			break;

+		}

+		i--;

+	}

+	name_ptr = &nv_path[i];

+

+	for (i = 0;  i < sizeof(chip_name_map)/sizeof(chip_name_map[0]);  i++) {

+		const cihp_name_map_t* row = &chip_name_map[i];

+		if (row->chip == chip && row->chiprev == chiprev && strlen(row->module_name)) {

+			strcpy(name_ptr, "nvram_");

+			strcat(name_ptr, row->module_name);

+#ifdef BCMUSBDEV_COMPOSITE

+			strcat(name_ptr, "_cusb");

+#endif

+			strcat(name_ptr, ".txt");

+		}

+	}

+

+	for (i=0; i<dhd->conf->nv_by_chip.count; i++) {

+		if (chip==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chip &&

+				chiprev==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chiprev) {

+			strcpy(name_ptr, dhd->conf->nv_by_chip.m_chip_nv_path_head[i].name);

+			break;

+		}

+	}

+

+	CONFIG_TRACE(("%s: nvram_path=%s\n", __FUNCTION__, nv_path));

+}

+

+void

+dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path)

+{

+	int i;

+

+	if (src_path[0] == '\0') {

+		printf("src_path is null\n");

+		return;

+	} else

+		strcpy(dst_path, src_path);

+

+	/* find out the last '/' */

+	i = strlen(dst_path);

+	while (i > 0) {

+		if (dst_path[i] == '/') {

+			i++;

+			break;

+		}

+		i--;

+	}

+	strcpy(&dst_path[i], dst_name);

+

+	CONFIG_TRACE(("%s: dst_path=%s\n", __FUNCTION__, dst_path));

+}

+

+#ifdef CONFIG_PATH_AUTO_SELECT

+void

+dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path)

+{

+	uint chip, chiprev;

+	int i;

+	char *name_ptr;

+

+	chip = dhd->conf->chip;

+	chiprev = dhd->conf->chiprev;

+

+	if (conf_path[0] == '\0') {

+		printf("config path is null\n");

+		return;

+	}

+

+	/* find out the last '/' */

+	i = strlen(conf_path);

+	while (i > 0) {

+		if (conf_path[i] == '/') {

+			i++;

+			break;

+		}

+		i--;

+	}

+	name_ptr = &conf_path[i];

+

+	for (i = 0;  i < sizeof(chip_name_map)/sizeof(chip_name_map[0]);  i++) {

+		const cihp_name_map_t* row = &chip_name_map[i];

+		if (row->chip == chip && row->chiprev == chiprev) {

+			strcpy(name_ptr, "config_");

+			strcat(conf_path, row->chip_name);

+			strcat(conf_path, ".txt");

+		}

+	}

+

+	CONFIG_TRACE(("%s: config_path=%s\n", __FUNCTION__, conf_path));

+}

+#endif

+

+int

+dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val,

+	int def, bool down)

+{

+	int ret = -1;

+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */

+

+	if (val >= def) {

+		if (down) {

+			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)

+				CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret));

+		}

+		if (cmd == WLC_SET_VAR) {

+			CONFIG_TRACE(("%s: set %s %d\n", __FUNCTION__, name, val));

+			bcm_mkiovar(name, (char *)&val, sizeof(val), iovbuf, sizeof(iovbuf));

+			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)

+				CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));

+		} else {

+			CONFIG_TRACE(("%s: set %s %d %d\n", __FUNCTION__, name, cmd, val));

+			if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0)

+				CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));

+		}

+	}

+

+	return ret;

+}

+

+int

+dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf,

+	int len, bool down)

+{

+	char iovbuf[WLC_IOCTL_SMLEN];

+	int ret = -1;

+

+	if (down) {

+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)

+			CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret));

+	}

+

+	if (cmd == WLC_SET_VAR) {

+		bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf));

+		if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)

+			CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));

+	} else {

+		if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, 0)) < 0)

+			CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));

+	}

+

+	return ret;

+}

+

+int

+dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx)

+{

+	char iovbuf[WLC_IOCTL_SMLEN];

+	int ret = -1;

+

+	if (cmd == WLC_GET_VAR) {

+		if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {

+			ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), FALSE, ifidx);

+			if (!ret) {

+				memcpy(buf, iovbuf, len);

+			} else {

+				CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret));

+			}

+		} else {

+			CONFIG_ERROR(("%s: mkiovar %s failed\n", __FUNCTION__, name));

+		}

+	} else {

+		ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, FALSE, 0);

+		if (ret < 0)

+			CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret));

+	}

+

+	return ret;

+}

+

+uint

+dhd_conf_get_band(dhd_pub_t *dhd)

+{

+	int band = -1;

+

+	if (dhd && dhd->conf)

+		band = dhd->conf->band;

+	else

+		CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));

+

+	return band;

+}

+

+int

+dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec)

+{

+	int bcmerror = -1;

+

+	memset(cspec, 0, sizeof(wl_country_t));

+	bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t));

+	if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), FALSE, 0)) < 0)

+		CONFIG_ERROR(("%s: country code getting failed %d\n", __FUNCTION__, bcmerror));

+

+	return bcmerror;

+}

+

+int

+dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec)

+{

+	int bcmerror = -1, i;

+	struct dhd_conf *conf = dhd->conf;

+	conf_country_list_t *country_list = &conf->country_list;

+

+	for (i = 0; i < country_list->count; i++) {

+		if (!strncmp(cspec->country_abbrev, country_list->cspec[i]->country_abbrev, 2)) {

+			memcpy(cspec->ccode, country_list->cspec[i]->ccode, WLC_CNTRY_BUF_SZ);

+			cspec->rev = country_list->cspec[i]->rev;

+			bcmerror = 0;

+		}

+	}

+

+	if (!bcmerror)

+		printf("%s: %s/%d\n", __FUNCTION__, cspec->ccode, cspec->rev);

+

+	return bcmerror;

+}

+

+int

+dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec)

+{

+	int bcmerror = -1;

+

+	memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t));

+

+	printf("%s: set country %s, revision %d\n", __FUNCTION__, cspec->ccode, cspec->rev);

+	bcmerror = dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "country", (char *)cspec,

+		sizeof(wl_country_t), FALSE);

+	dhd_conf_get_country(dhd, cspec);

+	printf("Country code: %s (%s/%d)\n", cspec->country_abbrev, cspec->ccode, cspec->rev);

+

+	return bcmerror;

+}

+

+int

+dhd_conf_fix_country(dhd_pub_t *dhd)

+{

+	int bcmerror = -1;

+	uint band;

+	wl_uint32_list_t *list;

+	u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];

+	wl_country_t cspec;

+

+	if (!(dhd && dhd->conf)) {

+		return bcmerror;

+	}

+

+	memset(valid_chan_list, 0, sizeof(valid_chan_list));

+	list = (wl_uint32_list_t *)(void *) valid_chan_list;

+	list->count = htod32(WL_NUMCHANNELS);

+	if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), FALSE, 0)) < 0) {

+		CONFIG_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, bcmerror));

+	}

+

+	band = dhd_conf_get_band(dhd);

+

+	if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G) &&

+			dtoh32(list->count)<11)) {

+		CONFIG_ERROR(("%s: bcmerror=%d, # of channels %d\n",

+			__FUNCTION__, bcmerror, dtoh32(list->count)));

+		dhd_conf_map_country_list(dhd, &dhd->conf->cspec);

+		if ((bcmerror = dhd_conf_set_country(dhd, &dhd->conf->cspec)) < 0) {

+			strcpy(cspec.country_abbrev, "US");

+			cspec.rev = 0;

+			strcpy(cspec.ccode, "US");

+			dhd_conf_map_country_list(dhd, &cspec);

+			dhd_conf_set_country(dhd, &cspec);

+		}

+	}

+

+	return bcmerror;

+}

+

+bool

+dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel)

+{

+	int i;

+	bool match = false;

+

+	if (dhd && dhd->conf) {

+		if (dhd->conf->channels.count == 0)

+			return true;

+		for (i=0; i<dhd->conf->channels.count; i++) {

+			if (channel == dhd->conf->channels.channel[i])

+				match = true;

+		}

+	} else {

+		match = true;

+		CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));

+	}

+

+	return match;

+}

+

+int

+dhd_conf_set_roam(dhd_pub_t *dhd)

+{

+	int bcmerror = -1;

+	struct dhd_conf *conf = dhd->conf;

+

+	dhd_roam_disable = conf->roam_off;

+	dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_off", dhd->conf->roam_off, 0, FALSE);

+

+	if (!conf->roam_off || !conf->roam_off_suspend) {

+		printf("%s: set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]);

+		dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER",

+				(char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE);

+

+		printf("%s: set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]);

+		dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD",

+				(char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE);

+

+		printf("%s: set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]);

+		dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA",

+				(char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE);

+

+		dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "fullroamperiod", dhd->conf->fullroamperiod, 1, FALSE);

+	}

+

+	return bcmerror;

+}

+

+void

+dhd_conf_add_to_eventbuffer(struct eventmsg_buf *ev, u16 event, bool set)

+{

+	if (!ev || (event > WLC_E_LAST))

+		return;

+

+	if (ev->num < MAX_EVENT_BUF_NUM) {

+		ev->event[ev->num].type = event;

+		ev->event[ev->num].set = set;

+		ev->num++;

+	} else {

+		CONFIG_ERROR(("evenbuffer doesn't support > %u events. Update"

+			" the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM));

+		ASSERT(0);

+	}

+}

+

+s32

+dhd_conf_apply_eventbuffer(dhd_pub_t *dhd, eventmsg_buf_t *ev)

+{

+	char eventmask[WL_EVENTING_MASK_LEN];

+	int i, ret = 0;

+

+	if (!ev || (!ev->num))

+		return -EINVAL;

+

+	/* Read event_msgs mask */

+	ret = dhd_conf_get_iovar(dhd, WLC_GET_VAR, "event_msgs", eventmask, sizeof(eventmask), 0);

+	if (unlikely(ret)) {

+		CONFIG_ERROR(("Get event_msgs error (%d)\n", ret));

+		goto exit;

+	}

+

+	/* apply the set bits */

+	for (i = 0; i < ev->num; i++) {

+		if (ev->event[i].set)

+			setbit(eventmask, ev->event[i].type);

+		else

+			clrbit(eventmask, ev->event[i].type);

+	}

+

+	/* Write updated Event mask */

+	ret = dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs", eventmask,

+		sizeof(eventmask), FALSE);

+	if (unlikely(ret)) {

+		CONFIG_ERROR(("Set event_msgs error (%d)\n", ret));

+	}

+

+exit:

+	return ret;

+}

+

+int

+dhd_conf_enable_roam_offload(dhd_pub_t *dhd, int enable)

+{

+	int err;

+	eventmsg_buf_t ev_buf;

+

+	if (dhd->conf->roam_off_suspend)

+		return 0;

+

+	err = dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_offload", enable, 0, FALSE);

+	if (err)

+		return err;

+

+	bzero(&ev_buf, sizeof(eventmsg_buf_t));

+	dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);

+	dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);

+	dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);

+	dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);

+	dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable);

+	dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);

+	err = dhd_conf_apply_eventbuffer(dhd, &ev_buf);

+

+	CONFIG_TRACE(("%s: roam_offload %d\n", __FUNCTION__, enable));

+

+	return err;

+}

+

+void

+dhd_conf_set_bw_cap(dhd_pub_t *dhd)

+{

+	struct {

+		u32 band;

+		u32 bw_cap;

+	} param = {0, 0};

+

+	if (dhd->conf->bw_cap[0] >= 0) {

+		memset(&param, 0, sizeof(param));

+		param.band = WLC_BAND_2G;

+		param.bw_cap = (uint)dhd->conf->bw_cap[0];

+		printf("%s: set bw_cap 2g 0x%x\n", __FUNCTION__, param.bw_cap);

+		dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)&param, sizeof(param), TRUE);

+	}

+

+	if (dhd->conf->bw_cap[1] >= 0) {

+		memset(&param, 0, sizeof(param));

+		param.band = WLC_BAND_5G;

+		param.bw_cap = (uint)dhd->conf->bw_cap[1];

+		printf("%s: set bw_cap 5g 0x%x\n", __FUNCTION__, param.bw_cap);

+		dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)&param, sizeof(param), TRUE);

+	}

+}

+

+void

+dhd_conf_get_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acp)

+{

+	int bcmerror = -1;

+	char iovbuf[WLC_IOCTL_SMLEN];

+	edcf_acparam_t *acparam;

+

+	bzero(iovbuf, sizeof(iovbuf));

+

+	/*

+	 * Get current acparams, using buf as an input buffer.

+	 * Return data is array of 4 ACs of wme params.

+	 */

+	if (mode == 0)

+		bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf));

+	else

+		bcm_mkiovar("wme_ac_ap", NULL, 0, iovbuf, sizeof(iovbuf));

+	if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {

+		CONFIG_ERROR(("%s: wme_ac_sta getting failed %d\n", __FUNCTION__, bcmerror));

+		return;

+	}

+	memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT);

+

+	acparam = &acp[AC_BK];

+	CONFIG_TRACE(("%s: BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",

+		__FUNCTION__,

+		acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,

+		acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,

+		acparam->TXOP));

+	acparam = &acp[AC_BE];

+	CONFIG_TRACE(("%s: BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",

+		__FUNCTION__,

+		acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,

+		acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,

+		acparam->TXOP));

+	acparam = &acp[AC_VI];

+	CONFIG_TRACE(("%s: VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",

+		__FUNCTION__,

+		acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,

+		acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,

+		acparam->TXOP));

+	acparam = &acp[AC_VO];

+	CONFIG_TRACE(("%s: VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",

+		__FUNCTION__,

+		acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,

+		acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,

+		acparam->TXOP));

+

+	return;

+}

+

+void

+dhd_conf_update_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acparam_cur, int aci)

+{

+	int aifsn, ecwmin, ecwmax, txop;

+	edcf_acparam_t *acp;

+	struct dhd_conf *conf = dhd->conf;

+	wme_param_t *wme;

+

+	if (mode == 0)

+		wme = &conf->wme_sta;

+	else

+		wme = &conf->wme_ap;

+

+	/* Default value */

+	aifsn = acparam_cur->ACI&EDCF_AIFSN_MASK;

+	ecwmin = acparam_cur->ECW&EDCF_ECWMIN_MASK;

+	ecwmax = (acparam_cur->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT;

+	txop = acparam_cur->TXOP;

+

+	/* Modified value */

+	if (wme->aifsn[aci] > 0)

+		aifsn = wme->aifsn[aci];

+	if (wme->ecwmin[aci] > 0)

+		ecwmin = wme->ecwmin[aci];

+	if (wme->ecwmax[aci] > 0)

+		ecwmax = wme->ecwmax[aci];

+	if (wme->txop[aci] > 0)

+		txop = wme->txop[aci];

+

+	if (!(wme->aifsn[aci] || wme->ecwmin[aci] ||

+			wme->ecwmax[aci] || wme->txop[aci]))

+		return;

+

+	/* Update */

+	acp = acparam_cur;

+	acp->ACI = (acp->ACI & ~EDCF_AIFSN_MASK) | (aifsn & EDCF_AIFSN_MASK);

+	acp->ECW = ((ecwmax << EDCF_ECWMAX_SHIFT) & EDCF_ECWMAX_MASK) | (acp->ECW & EDCF_ECWMIN_MASK);

+	acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK));

+	acp->TXOP = txop;

+

+	printf("%s: wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",

+		__FUNCTION__, mode?"ap":"sta",

+		acp->ACI, acp->ACI&EDCF_AIFSN_MASK,

+		acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,

+		acp->TXOP);

+

+	/*

+	* Now use buf as an output buffer.

+	* Put WME acparams after "wme_ac\0" in buf.

+	* NOTE: only one of the four ACs can be set at a time.

+	*/

+	if (mode == 0)

+		dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_sta", (char *)acp, sizeof(edcf_acparam_t), FALSE);

+	else

+		dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_ap", (char *)acp, sizeof(edcf_acparam_t), FALSE);

+

+}

+

+void

+dhd_conf_set_wme(dhd_pub_t *dhd, int mode)

+{

+	edcf_acparam_t acparam_cur[AC_COUNT];

+

+	if (dhd && dhd->conf) {

+		if (!dhd->conf->force_wme_ac) {

+			CONFIG_TRACE(("%s: force_wme_ac is not enabled %d\n",

+				__FUNCTION__, dhd->conf->force_wme_ac));

+			return;

+		}

+

+		CONFIG_TRACE(("%s: Before change:\n", __FUNCTION__));

+		dhd_conf_get_wme(dhd, mode, acparam_cur);

+

+		dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BK], AC_BK);

+		dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BE], AC_BE);

+		dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VI], AC_VI);

+		dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VO], AC_VO);

+

+		CONFIG_TRACE(("%s: After change:\n", __FUNCTION__));

+		dhd_conf_get_wme(dhd, mode, acparam_cur);

+	} else {

+		CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));

+	}

+

+	return;

+}

+

+void

+dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int p2p_mode, int miracast_mode)

+{

+	int i;

+	struct dhd_conf *conf = dhd->conf;

+	bool set = true;

+

+	for (i=0; i<MCHAN_MAX_NUM; i++) {

+		set = true;

+		set &= (conf->mchan[i].bw >= 0);

+		set &= ((conf->mchan[i].p2p_mode == -1) | (conf->mchan[i].p2p_mode == p2p_mode));

+		set &= ((conf->mchan[i].miracast_mode == -1) | (conf->mchan[i].miracast_mode == miracast_mode));

+		if (set) {

+			dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "mchan_bw", conf->mchan[i].bw, 0, FALSE);

+		}

+	}

+

+	return;

+}

+

+#ifdef PKT_FILTER_SUPPORT

+void

+dhd_conf_add_pkt_filter(dhd_pub_t *dhd)

+{

+	int i, j;

+	char str[16];

+#define MACS "%02x%02x%02x%02x%02x%02x"

+

+	/*

+	 * Filter in less pkt: ARP(0x0806, ID is 105), BRCM(0x886C), 802.1X(0x888E)

+	 *   1) dhd_master_mode=1

+	 *   2) pkt_filter_del=100, 102, 103, 104, 105

+	 *   3) pkt_filter_add=131 0 0 12 0xFFFF 0x886C, 132 0 0 12 0xFFFF 0x888E

+	 *   4) magic_pkt_filter_add=141 0 1 12

+	 */

+	for(i=0; i<dhd->conf->pkt_filter_add.count; i++) {

+		dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i];

+		printf("%s: %s\n", __FUNCTION__, dhd->pktfilter[i+dhd->pktfilter_count]);

+	}

+	dhd->pktfilter_count += i;

+

+	if (dhd->conf->magic_pkt_filter_add) {

+		strcat(dhd->conf->magic_pkt_filter_add, " 0x");

+		strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");

+		for (j=0; j<16; j++)

+			strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");

+		strcat(dhd->conf->magic_pkt_filter_add, " 0x");

+		strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");

+		sprintf(str, MACS, MAC2STRDBG(dhd->mac.octet));

+		for (j=0; j<16; j++)

+			strncat(dhd->conf->magic_pkt_filter_add, str, 12);

+		dhd->pktfilter[dhd->pktfilter_count] = dhd->conf->magic_pkt_filter_add;

+		dhd->pktfilter_count += 1;

+	}

+}

+

+bool

+dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id)

+{

+	int i;

+

+	if (dhd && dhd->conf) {

+		for (i=0; i<dhd->conf->pkt_filter_del.count; i++) {

+			if (id == dhd->conf->pkt_filter_del.id[i]) {

+				printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]);

+				return true;

+			}

+		}

+		return false;

+	}

+	return false;

+}

+

+void

+dhd_conf_discard_pkt_filter(dhd_pub_t *dhd)

+{

+	dhd->pktfilter_count = 6;

+	dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL;

+	dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";

+	dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E";

+	dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333";

+	dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;

+	/* Do not enable ARP to pkt filter if dhd_master_mode is false.*/

+	dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL;

+

+	/* IPv4 broadcast address XXX.XXX.XXX.255 */

+	dhd->pktfilter[dhd->pktfilter_count] = "110 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF";

+	dhd->pktfilter_count++;

+	/* discard IPv4 multicast address 224.0.0.0/4 */

+	dhd->pktfilter[dhd->pktfilter_count] = "111 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0";

+	dhd->pktfilter_count++;

+	/* discard IPv6 multicast address FF00::/8 */

+	dhd->pktfilter[dhd->pktfilter_count] = "112 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF";

+	dhd->pktfilter_count++;

+	/* discard Netbios pkt */

+	dhd->pktfilter[dhd->pktfilter_count] = "121 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089";

+	dhd->pktfilter_count++;

+

+}

+#endif /* PKT_FILTER_SUPPORT */

+

+int

+dhd_conf_get_pm(dhd_pub_t *dhd)

+{

+	if (dhd && dhd->conf) {

+		return dhd->conf->pm;

+	}

+	return -1;

+}

+

+uint

+dhd_conf_get_insuspend(dhd_pub_t *dhd)

+{

+	uint mode = 0;

+

+	if (dhd->op_mode & DHD_FLAG_STA_MODE) {

+		mode = dhd->conf->insuspend &

+			(NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | ROAM_OFFLOAD_IN_SUSPEND);

+	} else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {

+		mode = dhd->conf->insuspend &

+			(NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | AP_DOWN_IN_SUSPEND);

+	}

+

+	return mode;

+}

+

+int

+dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend)

+{

+	uint mode = 0, wl_down = 1;

+	struct dhd_conf *conf = dhd->conf;

+

+	mode = dhd_conf_get_insuspend(dhd);

+	if (mode)

+		printf("%s: op_mode %d, suspend %d, mode 0x%x\n", __FUNCTION__,

+			dhd->op_mode, suspend, mode);

+

+	if (suspend) {

+		if (dhd->op_mode & DHD_FLAG_STA_MODE) {

+			if (mode & ROAM_OFFLOAD_IN_SUSPEND)

+				dhd_conf_enable_roam_offload(dhd, 1);

+		} else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {

+			if (mode & AP_DOWN_IN_SUSPEND) {

+				dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,

+					sizeof(wl_down), TRUE, 0);

+			}

+		}

+#ifdef SUSPEND_EVENT

+		if (mode & NO_EVENT_IN_SUSPEND) {

+			char suspend_eventmask[WL_EVENTING_MASK_LEN];

+			if (!conf->suspended) {

+				dhd_conf_get_iovar(dhd, WLC_GET_VAR, "event_msgs",

+					conf->resume_eventmask, sizeof(conf->resume_eventmask), 0);

+			}

+			memset(suspend_eventmask, 0, sizeof(suspend_eventmask));

+			dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs",

+				suspend_eventmask, sizeof(suspend_eventmask), FALSE);

+		}

+#endif

+		conf->suspended = TRUE;

+	} else {

+#ifdef SUSPEND_EVENT

+		if (mode & NO_EVENT_IN_SUSPEND) {

+			dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs",

+				conf->resume_eventmask, sizeof(conf->resume_eventmask), FALSE);

+			if (dhd->op_mode & DHD_FLAG_STA_MODE) {

+				struct ether_addr bssid;

+				int ret = 0;

+				ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid,

+					sizeof(struct ether_addr), FALSE, 0);

+				if (ret != BCME_NOTASSOCIATED && !memcmp(&ether_null, &bssid, ETHER_ADDR_LEN)) {

+					CONFIG_TRACE(("%s: send disassoc\n", __FUNCTION__));

+					dhd_conf_set_intiovar(dhd, WLC_DISASSOC, "WLC_DISASSOC", 0, 0, FALSE);

+				}

+			}

+		}

+#endif

+		if (dhd->op_mode & DHD_FLAG_STA_MODE) {

+			if (mode & ROAM_OFFLOAD_IN_SUSPEND)

+				dhd_conf_enable_roam_offload(dhd, 0);

+		} else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {

+			if (mode & AP_DOWN_IN_SUSPEND) {

+				wl_down = 0;

+				dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&wl_down,

+					sizeof(wl_down), TRUE, 0);

+			}

+		}

+		conf->suspended = FALSE;

+	}

+

+	return 0;

+}

+

+#ifdef PROP_TXSTATUS

+int

+dhd_conf_get_disable_proptx(dhd_pub_t *dhd)

+{

+	struct dhd_conf *conf = dhd->conf;

+	int disable_proptx = -1;

+	int fw_proptx = 0;

+

+	/* check fw proptx priority:

+	  * 1st: check fw support by wl cap

+	  * 2nd: 4334/43340/43341/43241 support proptx but not show in wl cap, so enable it by default

+	  * 	   if you would like to disable it, please set disable_proptx=1 in config.txt

+	  * 3th: disable when proptxstatus not support in wl cap

+	  */

+	if (FW_SUPPORTED(dhd, proptxstatus)) {

+		fw_proptx = 1;

+	} else if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID ||

+			dhd->conf->chip == BCM43340_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {

+		fw_proptx = 1;

+	} else {

+		fw_proptx = 0;

+	}

+

+	/* returned disable_proptx value:

+	  * -1: disable in STA and enable in P2P(follow original dhd settings when PROP_TXSTATUS_VSDB enabled)

+	  * 0: depend on fw support

+	  * 1: always disable proptx

+	  */

+	if (conf->disable_proptx == 0) {

+		// check fw support as well

+		if (fw_proptx)

+			disable_proptx = 0;

+		else

+			disable_proptx = 1;

+	} else if (conf->disable_proptx >= 1) {

+		disable_proptx = 1;

+	} else {

+		// check fw support as well

+		if (fw_proptx)

+			disable_proptx = -1;

+		else

+			disable_proptx = 1;

+	}

+

+	printf("%s: fw_proptx=%d, disable_proptx=%d\n", __FUNCTION__, fw_proptx, disable_proptx);

+

+	return disable_proptx;

+}

+#endif

+

+uint

+pick_config_vars(char *varbuf, uint len, uint start_pos, char *pickbuf)

+{

+	bool findNewline, changenewline=FALSE, pick=FALSE;

+	int column;

+	uint n, pick_column=0;

+

+	findNewline = FALSE;

+	column = 0;

+

+	if (start_pos >= len) {

+		CONFIG_ERROR(("%s: wrong start pos\n", __FUNCTION__));

+		return 0;

+	}

+

+	for (n = start_pos; n < len; n++) {

+		if (varbuf[n] == '\r')

+			continue;

+		if ((findNewline || changenewline) && varbuf[n] != '\n')

+			continue;

+		findNewline = FALSE;

+		if (varbuf[n] == '#') {

+			findNewline = TRUE;

+			continue;

+		}

+		if (varbuf[n] == '\\') {

+			changenewline = TRUE;

+			continue;

+		}

+		if (!changenewline && varbuf[n] == '\n') {

+			if (column == 0)

+				continue;

+			column = 0;

+			continue;

+		}

+		if (changenewline && varbuf[n] == '\n') {

+			changenewline = FALSE;

+			continue;

+		}

+

+		if (column==0 && !pick) { // start to pick

+			pick = TRUE;

+			column++;

+			pick_column = 0;

+		} else {

+			if (pick && column==0) { // stop to pick

+				pick = FALSE;

+				break;

+			} else

+				column++;

+		}

+		if (pick) {

+			if (varbuf[n] == 0x9)

+				continue;

+			pickbuf[pick_column] = varbuf[n];

+			pick_column++;

+		}

+	}

+

+	return n; // return current position

+}

+

+bool

+dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	char *data = full_param+len_param;

+

+	if (!strncmp("dhd_msg_level=", full_param, len_param)) {

+		dhd_msg_level = (int)simple_strtol(data, NULL, 0);

+		printf("%s: dhd_msg_level = 0x%X\n", __FUNCTION__, dhd_msg_level);

+	}

+#ifdef BCMSDIO

+	else if (!strncmp("sd_msglevel=", full_param, len_param)) {

+		sd_msglevel = (int)simple_strtol(data, NULL, 0);

+		printf("%s: sd_msglevel = 0x%X\n", __FUNCTION__, sd_msglevel);

+	}

+#endif

+#ifdef BCMDBUS

+	else if (!strncmp("dbus_msglevel=", full_param, len_param)) {

+		dbus_msglevel = (int)simple_strtol(data, NULL, 0);

+		printf("%s: dbus_msglevel = 0x%X\n", __FUNCTION__, dbus_msglevel);

+	}

+#endif

+	else if (!strncmp("android_msg_level=", full_param, len_param)) {

+		android_msg_level = (int)simple_strtol(data, NULL, 0);

+		printf("%s: android_msg_level = 0x%X\n", __FUNCTION__, android_msg_level);

+	}

+	else if (!strncmp("config_msg_level=", full_param, len_param)) {

+		config_msg_level = (int)simple_strtol(data, NULL, 0);

+		printf("%s: config_msg_level = 0x%X\n", __FUNCTION__, config_msg_level);

+	}

+#ifdef WL_CFG80211

+	else if (!strncmp("wl_dbg_level=", full_param, len_param)) {

+		wl_dbg_level = (int)simple_strtol(data, NULL, 0);

+		printf("%s: wl_dbg_level = 0x%X\n", __FUNCTION__, wl_dbg_level);

+	}

+#endif

+#if defined(WL_WIRELESS_EXT)

+	else if (!strncmp("iw_msg_level=", full_param, len_param)) {

+		iw_msg_level = (int)simple_strtol(data, NULL, 0);

+		printf("%s: iw_msg_level = 0x%X\n", __FUNCTION__, iw_msg_level);

+	}

+#endif

+#if defined(DHD_DEBUG)

+	else if (!strncmp("dhd_console_ms=", full_param, len_param)) {

+		dhd_console_ms = (int)simple_strtol(data, NULL, 0);

+		printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd_console_ms);

+	}

+#endif

+	else

+		return false;

+

+	return true;

+}

+

+void

+dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val)

+{

+	char *pick_tmp, *pch;

+

+	pick_tmp = pick;

+	pch = bcmstrstr(pick_tmp, "aifsn ");

+	if (pch) {

+		wme->aifsn[ac_val] = (int)simple_strtol(pch+strlen("aifsn "), NULL, 0);

+		printf("%s: ac_val=%d, aifsn=%d\n", __FUNCTION__, ac_val, wme->aifsn[ac_val]);

+	}

+	pick_tmp = pick;

+	pch = bcmstrstr(pick_tmp, "ecwmin ");

+	if (pch) {

+		wme->ecwmin[ac_val] = (int)simple_strtol(pch+strlen("ecwmin "), NULL, 0);

+		printf("%s: ac_val=%d, ecwmin=%d\n", __FUNCTION__, ac_val, wme->ecwmin[ac_val]);

+	}

+	pick_tmp = pick;

+	pch = bcmstrstr(pick_tmp, "ecwmax ");

+	if (pch) {

+		wme->ecwmax[ac_val] = (int)simple_strtol(pch+strlen("ecwmax "), NULL, 0);

+		printf("%s: ac_val=%d, ecwmax=%d\n", __FUNCTION__, ac_val, wme->ecwmax[ac_val]);

+	}

+	pick_tmp = pick;

+	pch = bcmstrstr(pick_tmp, "txop ");

+	if (pch) {

+		wme->txop[ac_val] = (int)simple_strtol(pch+strlen("txop "), NULL, 0);

+		printf("%s: ac_val=%d, txop=0x%x\n", __FUNCTION__, ac_val, wme->txop[ac_val]);

+	}

+

+}

+

+bool

+dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	// wme_ac_sta_be=aifsn 1 ecwmin 2 ecwmax 3 txop 0x5e

+	// wme_ac_sta_vo=aifsn 1 ecwmin 1 ecwmax 1 txop 0x5e

+

+	if (!strncmp("force_wme_ac=", full_param, len_param)) {

+		conf->force_wme_ac = (int)simple_strtol(data, NULL, 10);

+		printf("%s: force_wme_ac = %d\n", __FUNCTION__, conf->force_wme_ac);

+	}

+	else if (!strncmp("wme_ac_sta_be=", full_param, len_param)) {

+		dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BE);

+	}

+	else if (!strncmp("wme_ac_sta_bk=", full_param, len_param)) {

+		dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BK);

+	}

+	else if (!strncmp("wme_ac_sta_vi=", full_param, len_param)) {

+		dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VI);

+	}

+	else if (!strncmp("wme_ac_sta_vo=", full_param, len_param)) {

+		dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VO);

+	}

+	else if (!strncmp("wme_ac_ap_be=", full_param, len_param)) {

+		dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BE);

+	}

+	else if (!strncmp("wme_ac_ap_bk=", full_param, len_param)) {

+		dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BK);

+	}

+	else if (!strncmp("wme_ac_ap_vi=", full_param, len_param)) {

+		dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VI);

+	}

+	else if (!strncmp("wme_ac_ap_vo=", full_param, len_param)) {

+		dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VO);

+	}

+	else

+		return false;

+

+	return true;

+}

+

+bool

+dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	int i, j;

+	char *pch, *pick_tmp;

+	wl_mac_list_t *mac_list;

+	wl_mac_range_t *mac_range;

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	/* Process fw_by_mac:

+	 * fw_by_mac=[fw_mac_num] \

+	 *  [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \

+	 *                                    [oui1-1] [nic_start1-1] [nic_end1-1]... \

+	 *                                    [oui1-n] [nic_start1-n] [nic_end1-n] \

+	 *  [fw_name2] [mac_num2] [oui2-1] [nic_start2-1] [nic_end2-1] \

+	 *                                    [oui2-1] [nic_start2-1] [nic_end2-1]... \

+	 *                                    [oui2-n] [nic_start2-n] [nic_end2-n] \

+	 * Ex: fw_by_mac=2 \

+	 *  fw_bcmdhd1.bin 2 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \

+	 *  fw_bcmdhd2.bin 3 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \

+	 *                           0x983B16 0x916157 0x916487

+	 */

+

+	if (!strncmp("fw_by_mac=", full_param, len_param)) {

+		pick_tmp = data;

+		pch = bcmstrtok(&pick_tmp, " ", 0);

+		conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);

+		if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, GFP_KERNEL))) {

+			conf->fw_by_mac.count = 0;

+			CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));

+		}

+		printf("%s: fw_count=%d\n", __FUNCTION__, conf->fw_by_mac.count);

+		conf->fw_by_mac.m_mac_list_head = mac_list;

+		for (i=0; i<conf->fw_by_mac.count; i++) {

+			pch = bcmstrtok(&pick_tmp, " ", 0);

+			strcpy(mac_list[i].name, pch);

+			pch = bcmstrtok(&pick_tmp, " ", 0);

+			mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);

+			printf("%s: name=%s, mac_count=%d\n", __FUNCTION__,

+				mac_list[i].name, mac_list[i].count);

+			if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) {

+				mac_list[i].count = 0;

+				CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));

+				break;

+			}

+			mac_list[i].mac = mac_range;

+			for (j=0; j<mac_list[i].count; j++) {

+				pch = bcmstrtok(&pick_tmp, " ", 0);

+				mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);

+				pch = bcmstrtok(&pick_tmp, " ", 0);

+				mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);

+				pch = bcmstrtok(&pick_tmp, " ", 0);

+				mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);

+				printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",

+					__FUNCTION__, mac_range[j].oui,

+					mac_range[j].nic_start, mac_range[j].nic_end);

+			}

+		}

+	}

+	else

+		return false;

+

+	return true;

+}

+

+bool

+dhd_conf_read_nv_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	int i, j;

+	char *pch, *pick_tmp;

+	wl_mac_list_t *mac_list;

+	wl_mac_range_t *mac_range;

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	/* Process nv_by_mac:

+	 * [nv_by_mac]: The same format as fw_by_mac

+	 */

+	if (!strncmp("nv_by_mac=", full_param, len_param)) {

+		pick_tmp = data;

+		pch = bcmstrtok(&pick_tmp, " ", 0);

+		conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);

+		if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, GFP_KERNEL))) {

+			conf->nv_by_mac.count = 0;

+			CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));

+		}

+		printf("%s: nv_count=%d\n", __FUNCTION__, conf->nv_by_mac.count);

+		conf->nv_by_mac.m_mac_list_head = mac_list;

+		for (i=0; i<conf->nv_by_mac.count; i++) {

+			pch = bcmstrtok(&pick_tmp, " ", 0);

+			strcpy(mac_list[i].name, pch);

+			pch = bcmstrtok(&pick_tmp, " ", 0);

+			mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);

+			printf("%s: name=%s, mac_count=%d\n", __FUNCTION__,

+				mac_list[i].name, mac_list[i].count);

+			if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) {

+				mac_list[i].count = 0;

+				CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));

+				break;

+			}

+			mac_list[i].mac = mac_range;

+			for (j=0; j<mac_list[i].count; j++) {

+				pch = bcmstrtok(&pick_tmp, " ", 0);

+				mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);

+				pch = bcmstrtok(&pick_tmp, " ", 0);

+				mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);

+				pch = bcmstrtok(&pick_tmp, " ", 0);

+				mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);

+				printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",

+					__FUNCTION__, mac_range[j].oui,

+					mac_range[j].nic_start, mac_range[j].nic_end);

+			}

+		}

+	}

+	else

+		return false;

+

+	return true;

+}

+

+bool

+dhd_conf_read_nv_by_chip(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	int i;

+	char *pch, *pick_tmp;

+	wl_chip_nv_path_t *chip_nv_path;

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	/* Process nv_by_chip:

+	 * nv_by_chip=[nv_chip_num] \

+	 *  [chip1] [chiprev1] [nv_name1] [chip2] [chiprev2] [nv_name2] \

+	 * Ex: nv_by_chip=2 \

+	 *  43430 0 nvram_ap6212.txt 43430 1 nvram_ap6212a.txt \

+	 */

+	if (!strncmp("nv_by_chip=", full_param, len_param)) {

+		pick_tmp = data;

+		pch = bcmstrtok(&pick_tmp, " ", 0);

+		conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0);

+		if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count, GFP_KERNEL))) {

+			conf->nv_by_chip.count = 0;

+			CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));

+		}

+		printf("%s: nv_by_chip_count=%d\n", __FUNCTION__, conf->nv_by_chip.count);

+		conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path;

+		for (i=0; i<conf->nv_by_chip.count; i++) {

+			pch = bcmstrtok(&pick_tmp, " ", 0);

+			chip_nv_path[i].chip = (uint32)simple_strtol(pch, NULL, 0);

+			pch = bcmstrtok(&pick_tmp, " ", 0);

+			chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0);

+			pch = bcmstrtok(&pick_tmp, " ", 0);

+			strcpy(chip_nv_path[i].name, pch);

+			printf("%s: chip=0x%x, chiprev=%d, name=%s\n", __FUNCTION__,

+				chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name);

+		}

+	}

+	else

+		return false;

+

+	return true;

+}

+

+bool

+dhd_conf_read_roam_params(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	if (!strncmp("roam_off=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			conf->roam_off = 0;

+		else

+			conf->roam_off = 1;

+		printf("%s: roam_off = %d\n", __FUNCTION__, conf->roam_off);

+	}

+	else if (!strncmp("roam_off_suspend=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			conf->roam_off_suspend = 0;

+		else

+			conf->roam_off_suspend = 1;

+		printf("%s: roam_off_suspend = %d\n", __FUNCTION__, conf->roam_off_suspend);

+	}

+	else if (!strncmp("roam_trigger=", full_param, len_param)) {

+		conf->roam_trigger[0] = (int)simple_strtol(data, NULL, 10);

+		printf("%s: roam_trigger = %d\n", __FUNCTION__,

+			conf->roam_trigger[0]);

+	}

+	else if (!strncmp("roam_scan_period=", full_param, len_param)) {

+		conf->roam_scan_period[0] = (int)simple_strtol(data, NULL, 10);

+		printf("%s: roam_scan_period = %d\n", __FUNCTION__,

+			conf->roam_scan_period[0]);

+	}

+	else if (!strncmp("roam_delta=", full_param, len_param)) {

+		conf->roam_delta[0] = (int)simple_strtol(data, NULL, 10);

+		printf("%s: roam_delta = %d\n", __FUNCTION__, conf->roam_delta[0]);

+	}

+	else if (!strncmp("fullroamperiod=", full_param, len_param)) {

+		conf->fullroamperiod = (int)simple_strtol(data, NULL, 10);

+		printf("%s: fullroamperiod = %d\n", __FUNCTION__,

+			conf->fullroamperiod);

+	} else

+		return false;

+

+	return true;

+}

+

+bool

+dhd_conf_read_country_list(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	int i;

+	char *pch, *pick_tmp, *pick_tmp2;

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+	wl_country_t *cspec;

+	conf_country_list_t *country_list = NULL;

+

+	/* Process country_list:

+	 * country_list=[country1]:[ccode1]/[regrev1],

+	 * [country2]:[ccode2]/[regrev2] \

+	 * Ex: country_list=US:US/0, TW:TW/1

+	 */

+	if (!strncmp("country_list=", full_param, len_param)) {

+		country_list = &dhd->conf->country_list;

+	}

+	if (country_list) {

+		pick_tmp = data;

+		for (i=0; i<CONFIG_COUNTRY_LIST_SIZE; i++) {

+			pick_tmp2 = bcmstrtok(&pick_tmp, ", ", 0);

+			if (!pick_tmp2)

+				break;

+			pch = bcmstrtok(&pick_tmp2, ":", 0);

+			if (!pch)

+				break;

+			cspec = NULL;

+			if (!(cspec = kmalloc(sizeof(wl_country_t), GFP_KERNEL))) {

+				CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));

+				break;

+			}

+			memset(cspec, 0, sizeof(wl_country_t));

+

+			strcpy(cspec->country_abbrev, pch);

+			pch = bcmstrtok(&pick_tmp2, "/", 0);

+			if (!pch) {

+				kfree(cspec);

+				break;

+			}

+			memcpy(cspec->ccode, pch, 2);

+			pch = bcmstrtok(&pick_tmp2, "/", 0);

+			if (!pch) {

+				kfree(cspec);

+				break;

+			}

+			cspec->rev = (int32)simple_strtol(pch, NULL, 10);

+			country_list->count++;

+			country_list->cspec[i] = cspec;

+			CONFIG_TRACE(("%s: country_list abbrev=%s, ccode=%s, regrev=%d\n", __FUNCTION__,

+				cspec->country_abbrev, cspec->ccode, cspec->rev));

+		}

+		if (!strncmp("country_list=", full_param, len_param)) {

+			printf("%s: %d country in list\n", __FUNCTION__, conf->country_list.count);

+		}

+	}

+	else

+		return false;

+

+	return true;

+}

+

+bool

+dhd_conf_read_mchan_params(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	int i;

+	char *pch, *pick_tmp, *pick_tmp2;

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	/* Process mchan_bw:

+	 * mchan_bw=[val]/[any/go/gc]/[any/source/sink]

+	 * Ex: mchan_bw=80/go/source, 30/gc/sink

+	 */

+	if (!strncmp("mchan_bw=", full_param, len_param)) {

+		pick_tmp = data;

+		for (i=0; i<MCHAN_MAX_NUM; i++) {

+			pick_tmp2 = bcmstrtok(&pick_tmp, ", ", 0);

+			if (!pick_tmp2)

+				break;

+			pch = bcmstrtok(&pick_tmp2, "/", 0);

+			if (!pch) {

+				break;

+			} else {

+				conf->mchan[i].bw = (int)simple_strtol(pch, NULL, 0);

+				if (conf->mchan[i].bw < 0 || conf->mchan[i].bw > 100) {

+					CONFIG_ERROR(("%s: wrong bw %d\n", __FUNCTION__, conf->mchan[i].bw));

+					conf->mchan[i].bw = 0;

+					break;

+				}

+			}

+			pch = bcmstrtok(&pick_tmp2, "/", 0);

+			if (!pch) {

+				break;

+			} else {

+				if (bcmstrstr(pch, "any")) {

+					conf->mchan[i].p2p_mode = -1;

+				} else if (bcmstrstr(pch, "go")) {

+					conf->mchan[i].p2p_mode = WL_P2P_IF_GO;

+				} else if (bcmstrstr(pch, "gc")) {

+					conf->mchan[i].p2p_mode = WL_P2P_IF_CLIENT;

+				}

+			}

+			pch = bcmstrtok(&pick_tmp2, "/", 0);

+			if (!pch) {

+				break;

+			} else {

+				if (bcmstrstr(pch, "any")) {

+					conf->mchan[i].miracast_mode = -1;

+				} else if (bcmstrstr(pch, "source")) {

+					conf->mchan[i].miracast_mode = MIRACAST_SOURCE;

+				} else if (bcmstrstr(pch, "sink")) {

+					conf->mchan[i].miracast_mode = MIRACAST_SINK;

+				}

+			}

+		}

+		for (i=0; i<MCHAN_MAX_NUM; i++) {

+			if (conf->mchan[i].bw >= 0)

+				printf("%s: mchan_bw=%d/%d/%d\n", __FUNCTION__,

+					conf->mchan[i].bw, conf->mchan[i].p2p_mode, conf->mchan[i].miracast_mode);

+		}

+	}

+	else

+		return false;

+

+	return true;

+}

+

+#ifdef PKT_FILTER_SUPPORT

+bool

+dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+	char *pch, *pick_tmp;

+	int i;

+

+	/* Process pkt filter:

+	 * 1) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000

+	 * 2) pkt_filter_del=100, 102, 103, 104, 105

+	 * 3) magic_pkt_filter_add=141 0 1 12

+	 */

+	if (!strncmp("dhd_master_mode=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			dhd_master_mode = FALSE;

+		else

+			dhd_master_mode = TRUE;

+		printf("%s: dhd_master_mode = %d\n", __FUNCTION__, dhd_master_mode);

+	}

+	else if (!strncmp("pkt_filter_add=", full_param, len_param)) {

+		pick_tmp = data;

+		pch = bcmstrtok(&pick_tmp, ",.-", 0);

+		i=0;

+		while (pch != NULL && i<DHD_CONF_FILTER_MAX) {

+			strcpy(&conf->pkt_filter_add.filter[i][0], pch);

+			printf("%s: pkt_filter_add[%d][] = %s\n", __FUNCTION__, i, &conf->pkt_filter_add.filter[i][0]);

+			pch = bcmstrtok(&pick_tmp, ",.-", 0);

+			i++;

+		}

+		conf->pkt_filter_add.count = i;

+	}

+	else if (!strncmp("pkt_filter_del=", full_param, len_param)) {

+		pick_tmp = data;

+		pch = bcmstrtok(&pick_tmp, " ,.-", 0);

+		i=0;

+		while (pch != NULL && i<DHD_CONF_FILTER_MAX) {

+			conf->pkt_filter_del.id[i] = (uint32)simple_strtol(pch, NULL, 10);

+			pch = bcmstrtok(&pick_tmp, " ,.-", 0);

+			i++;

+		}

+		conf->pkt_filter_del.count = i;

+		printf("%s: pkt_filter_del id = ", __FUNCTION__);

+		for (i=0; i<conf->pkt_filter_del.count; i++)

+			printf("%d ", conf->pkt_filter_del.id[i]);

+		printf("\n");

+	}

+	else if (!strncmp("magic_pkt_filter_add=", full_param, len_param)) {

+		if (!(conf->magic_pkt_filter_add = kmalloc(MAGIC_PKT_FILTER_LEN, GFP_KERNEL))) {

+			CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));

+		} else {

+			memset(conf->magic_pkt_filter_add, 0, MAGIC_PKT_FILTER_LEN);

+			strcpy(conf->magic_pkt_filter_add, data);

+			printf("%s: magic_pkt_filter_add = %s\n", __FUNCTION__, conf->magic_pkt_filter_add);

+		}

+	}

+	else

+		return false;

+

+	return true;

+}

+#endif

+

+#ifdef ISAM_PREINIT

+/*

+ * isam_init=mode [sta|ap|apsta|dualap] vifname [wlan1]

+ * isam_config=ifname [wlan0|wlan1] ssid [xxx] chan [x]

+		 hidden [y|n] maxassoc [x]

+		 amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]

+		 emode [none|wep|tkip|aes|tkipaes]

+		 key [xxxxx]

+ * isam_enable=ifname [wlan0|wlan1]

+*/

+bool

+dhd_conf_read_isam(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	if (!strncmp("isam_init=", full_param, len_param)) {

+		sprintf(conf->isam_init, "isam_init %s", data);

+		printf("%s: isam_init=%s\n", __FUNCTION__, conf->isam_init);

+	}

+	else if (!strncmp("isam_config=", full_param, len_param)) {

+		sprintf(conf->isam_config, "isam_config %s", data);

+		printf("%s: isam_config=%s\n", __FUNCTION__, conf->isam_config);

+	}

+	else if (!strncmp("isam_enable=", full_param, len_param)) {

+		sprintf(conf->isam_enable, "isam_enable %s", data);

+		printf("%s: isam_enable=%s\n", __FUNCTION__, conf->isam_enable);

+	}

+	else

+		return false;

+

+	return true;

+}

+#endif

+

+#ifdef IDHCP

+bool

+dhd_conf_read_dhcp_params(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+	struct ipv4_addr ipa_set;

+

+	if (!strncmp("dhcpc_enable=", full_param, len_param)) {

+		conf->dhcpc_enable = (int)simple_strtol(data, NULL, 10);

+		printf("%s: dhcpc_enable = %d\n", __FUNCTION__, conf->dhcpc_enable);

+	}

+	else if (!strncmp("dhcpd_enable=", full_param, len_param)) {

+		conf->dhcpd_enable = (int)simple_strtol(data, NULL, 10);

+		printf("%s: dhcpd_enable = %d\n", __FUNCTION__, conf->dhcpd_enable);

+	}

+	else if (!strncmp("dhcpd_ip_addr=", full_param, len_param)) {

+		if (!bcm_atoipv4(data, &ipa_set))

+			printf("%s : dhcpd_ip_addr adress setting failed.\n", __FUNCTION__);

+		conf->dhcpd_ip_addr = ipa_set;

+		printf("%s: dhcpd_ip_addr = %s\n",__FUNCTION__, data);

+	}

+	else if (!strncmp("dhcpd_ip_mask=", full_param, len_param)) {

+		if (!bcm_atoipv4(data, &ipa_set))

+			printf("%s : dhcpd_ip_mask adress setting failed.\n", __FUNCTION__);

+		conf->dhcpd_ip_mask = ipa_set;

+		printf("%s: dhcpd_ip_mask = %s\n",__FUNCTION__, data);

+	}

+	else if (!strncmp("dhcpd_ip_start=", full_param, len_param)) {

+		if (!bcm_atoipv4(data, &ipa_set))

+			printf("%s : dhcpd_ip_start adress setting failed.\n", __FUNCTION__);

+		conf->dhcpd_ip_start = ipa_set;

+		printf("%s: dhcpd_ip_start = %s\n",__FUNCTION__, data);

+	}

+	else if (!strncmp("dhcpd_ip_end=", full_param, len_param)) {

+		if (!bcm_atoipv4(data, &ipa_set))

+			printf("%s : dhcpd_ip_end adress setting failed.\n", __FUNCTION__);

+		conf->dhcpd_ip_end = ipa_set;

+		printf("%s: dhcpd_ip_end = %s\n",__FUNCTION__, data);

+	}

+	else

+		return false;

+

+	return true;

+}

+#endif

+

+#ifdef BCMSDIO

+bool

+dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	if (!strncmp("dhd_doflow=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			dhd_doflow = FALSE;

+		else

+			dhd_doflow = TRUE;

+		printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow);

+	}

+	else if (!strncmp("dhd_slpauto=", full_param, len_param) ||

+			!strncmp("kso_enable=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			dhd_slpauto = FALSE;

+		else

+			dhd_slpauto = TRUE;

+		printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto);

+	}

+	else if (!strncmp("use_rxchain=", full_param, len_param)) {

+		conf->use_rxchain = (int)simple_strtol(data, NULL, 10);

+		printf("%s: use_rxchain = %d\n", __FUNCTION__, conf->use_rxchain);

+	}

+	else if (!strncmp("dhd_txminmax=", full_param, len_param)) {

+		conf->dhd_txminmax = (uint)simple_strtol(data, NULL, 10);

+		printf("%s: dhd_txminmax = %d\n", __FUNCTION__, conf->dhd_txminmax);

+	}

+	else if (!strncmp("txinrx_thres=", full_param, len_param)) {

+		conf->txinrx_thres = (int)simple_strtol(data, NULL, 10);

+		printf("%s: txinrx_thres = %d\n", __FUNCTION__, conf->txinrx_thres);

+	}

+	else if (!strncmp("sd_f2_blocksize=", full_param, len_param)) {

+		conf->sd_f2_blocksize = (int)simple_strtol(data, NULL, 10);

+		printf("%s: sd_f2_blocksize = %d\n", __FUNCTION__, conf->sd_f2_blocksize);

+	}

+#if defined(HW_OOB)

+	else if (!strncmp("oob_enabled_later=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			conf->oob_enabled_later = FALSE;

+		else

+			conf->oob_enabled_later = TRUE;

+		printf("%s: oob_enabled_later = %d\n", __FUNCTION__, conf->oob_enabled_later);

+	}

+#endif

+	else if (!strncmp("dpc_cpucore=", full_param, len_param)) {

+		conf->dpc_cpucore = (int)simple_strtol(data, NULL, 10);

+		printf("%s: dpc_cpucore = %d\n", __FUNCTION__, conf->dpc_cpucore);

+	}

+	else if (!strncmp("rxf_cpucore=", full_param, len_param)) {

+		conf->rxf_cpucore = (int)simple_strtol(data, NULL, 10);

+		printf("%s: rxf_cpucore = %d\n", __FUNCTION__, conf->rxf_cpucore);

+	}

+	else if (!strncmp("orphan_move=", full_param, len_param)) {

+		conf->orphan_move = (int)simple_strtol(data, NULL, 10);

+		printf("%s: orphan_move = %d\n", __FUNCTION__, conf->orphan_move);

+	}

+#if defined(BCMSDIOH_TXGLOM)

+	else if (!strncmp("txglomsize=", full_param, len_param)) {

+		conf->txglomsize = (uint)simple_strtol(data, NULL, 10);

+		if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)

+			conf->txglomsize = SDPCM_MAXGLOM_SIZE;

+		printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize);

+	}

+	else if (!strncmp("txglom_ext=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			conf->txglom_ext = FALSE;

+		else

+			conf->txglom_ext = TRUE;

+		printf("%s: txglom_ext = %d\n", __FUNCTION__, conf->txglom_ext);

+		if (conf->txglom_ext) {

+			if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID))

+				conf->txglom_bucket_size = 1680;

+			else if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||

+					conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID)

+				conf->txglom_bucket_size = 1684;

+		}

+		printf("%s: txglom_bucket_size = %d\n", __FUNCTION__, conf->txglom_bucket_size);

+	}

+	else if (!strncmp("bus:rxglom=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			conf->bus_rxglom = FALSE;

+		else

+			conf->bus_rxglom = TRUE;

+		printf("%s: bus:rxglom = %d\n", __FUNCTION__, conf->bus_rxglom);

+	}

+	else if (!strncmp("deferred_tx_len=", full_param, len_param)) {

+		conf->deferred_tx_len = (int)simple_strtol(data, NULL, 10);

+		printf("%s: deferred_tx_len = %d\n", __FUNCTION__, conf->deferred_tx_len);

+	}

+	else if (!strncmp("txctl_tmo_fix=", full_param, len_param)) {

+		conf->txctl_tmo_fix = (int)simple_strtol(data, NULL, 0);

+		printf("%s: txctl_tmo_fix = %d\n", __FUNCTION__, conf->txctl_tmo_fix);

+	}

+	else if (!strncmp("tx_max_offset=", full_param, len_param)) {

+		conf->tx_max_offset = (int)simple_strtol(data, NULL, 10);

+		printf("%s: tx_max_offset = %d\n", __FUNCTION__, conf->tx_max_offset);

+	}

+	else if (!strncmp("txglom_mode=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			conf->txglom_mode = FALSE;

+		else

+			conf->txglom_mode = TRUE;

+		printf("%s: txglom_mode = %d\n", __FUNCTION__, conf->txglom_mode);

+	}

+#endif

+	else

+		return false;

+

+	return true;

+}

+#endif

+

+#ifdef BCMPCIE

+bool

+dhd_conf_read_pcie_params(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	if (!strncmp("bus:deepsleep_disable=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			conf->bus_deepsleep_disable = 0;

+		else

+			conf->bus_deepsleep_disable = 1;

+		printf("%s: bus:deepsleep_disable = %d\n", __FUNCTION__, conf->bus_deepsleep_disable);

+	}

+	else

+		return false;

+

+	return true;

+}

+#endif

+

+bool

+dhd_conf_read_pm_params(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+

+	if (!strncmp("deepsleep=", full_param, len_param)) {

+		if (!strncmp(data, "1", 1))

+			conf->deepsleep = TRUE;

+		else

+			conf->deepsleep = FALSE;

+		printf("%s: deepsleep = %d\n", __FUNCTION__, conf->deepsleep);

+	}

+	else if (!strncmp("PM=", full_param, len_param)) {

+		conf->pm = (int)simple_strtol(data, NULL, 10);

+		printf("%s: PM = %d\n", __FUNCTION__, conf->pm);

+	}

+	else if (!strncmp("pm_in_suspend=", full_param, len_param)) {

+		conf->pm_in_suspend = (int)simple_strtol(data, NULL, 10);

+		printf("%s: pm_in_suspend = %d\n", __FUNCTION__, conf->pm_in_suspend);

+	}

+	else if (!strncmp("suspend_bcn_li_dtim=", full_param, len_param)) {

+		conf->suspend_bcn_li_dtim = (int)simple_strtol(data, NULL, 10);

+		printf("%s: suspend_bcn_li_dtim = %d\n", __FUNCTION__, conf->suspend_bcn_li_dtim);

+	}

+	else if (!strncmp("xmit_in_suspend=", full_param, len_param)) {

+		if (!strncmp(data, "1", 1))

+			conf->insuspend &= ~NO_TXDATA_IN_SUSPEND;

+		else

+			conf->insuspend |= NO_TXDATA_IN_SUSPEND;

+		printf("%s: insuspend = 0x%x\n", __FUNCTION__, conf->insuspend);

+	}

+	else if (!strncmp("insuspend=", full_param, len_param)) {

+		conf->insuspend = (int)simple_strtol(data, NULL, 0);

+		printf("%s: insuspend = 0x%x\n", __FUNCTION__, conf->insuspend);

+	}

+	else

+		return false;

+

+	return true;

+}

+

+bool

+dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char *data = full_param+len_param;

+	uint len_data = strlen(data);

+	char *pch, *pick_tmp;

+	int i;

+

+	if (!strncmp("dhd_poll=", full_param, len_param)) {

+		if (!strncmp(data, "0", 1))

+			conf->dhd_poll = 0;

+		else

+			conf->dhd_poll = 1;

+		printf("%s: dhd_poll = %d\n", __FUNCTION__, conf->dhd_poll);

+	}

+	else if (!strncmp("dhd_watchdog_ms=", full_param, len_param)) {

+		dhd_watchdog_ms = (int)simple_strtol(data, NULL, 10);

+		printf("%s: dhd_watchdog_ms = %d\n", __FUNCTION__, dhd_watchdog_ms);

+	}

+	else if (!strncmp("band=", full_param, len_param)) {

+		/* Process band:

+		 * band=a for 5GHz only and band=b for 2.4GHz only

+		 */

+		if (!strcmp(data, "b"))

+			conf->band = WLC_BAND_2G;

+		else if (!strcmp(data, "a"))

+			conf->band = WLC_BAND_5G;

+		else

+			conf->band = WLC_BAND_AUTO;

+		printf("%s: band = %d\n", __FUNCTION__, conf->band);

+	}

+	else if (!strncmp("bw_cap_2g=", full_param, len_param)) {

+		conf->bw_cap[0] = (uint)simple_strtol(data, NULL, 0);

+		printf("%s: bw_cap_2g = %d\n", __FUNCTION__, conf->bw_cap[0]);

+	}

+	else if (!strncmp("bw_cap_5g=", full_param, len_param)) {

+		conf->bw_cap[1] = (uint)simple_strtol(data, NULL, 0);

+		printf("%s: bw_cap_5g = %d\n", __FUNCTION__, conf->bw_cap[1]);

+	}

+	else if (!strncmp("bw_cap=", full_param, len_param)) {

+		pick_tmp = data;

+		pch = bcmstrtok(&pick_tmp, " ,.-", 0);

+		if (pch != NULL) {

+			conf->bw_cap[0] = (uint32)simple_strtol(pch, NULL, 0);

+			printf("%s: bw_cap 2g = %d\n", __FUNCTION__, conf->bw_cap[0]);

+		}

+		pch = bcmstrtok(&pick_tmp, " ,.-", 0);

+		if (pch != NULL) {

+			conf->bw_cap[1] = (uint32)simple_strtol(pch, NULL, 0);

+			printf("%s: bw_cap 5g = %d\n", __FUNCTION__, conf->bw_cap[1]);

+		}

+	}

+	else if (!strncmp("ccode=", full_param, len_param)) {

+		memset(&conf->cspec, 0, sizeof(wl_country_t));

+		memcpy(conf->cspec.country_abbrev, data, len_data);

+		memcpy(conf->cspec.ccode, data, len_data);

+		printf("%s: ccode = %s\n", __FUNCTION__, conf->cspec.ccode);

+	}

+	else if (!strncmp("regrev=", full_param, len_param)) {

+		conf->cspec.rev = (int32)simple_strtol(data, NULL, 10);

+		printf("%s: regrev = %d\n", __FUNCTION__, conf->cspec.rev);

+	}

+	else if (!strncmp("channels=", full_param, len_param)) {

+		pick_tmp = data;

+		pch = bcmstrtok(&pick_tmp, " ,.-", 0);

+		i=0;

+		while (pch != NULL && i<WL_NUMCHANNELS) {

+			conf->channels.channel[i] = (uint32)simple_strtol(pch, NULL, 10);

+			pch = bcmstrtok(&pick_tmp, " ,.-", 0);

+			i++;

+		}

+		conf->channels.count = i;

+		printf("%s: channels = ", __FUNCTION__);

+		for (i=0; i<conf->channels.count; i++)

+			printf("%d ", conf->channels.channel[i]);

+		printf("\n");

+	}

+	else if (!strncmp("keep_alive_period=", full_param, len_param)) {

+		conf->keep_alive_period = (uint)simple_strtol(data, NULL, 10);

+		printf("%s: keep_alive_period = %d\n", __FUNCTION__,

+			conf->keep_alive_period);

+	}

+	else if (!strncmp("phy_oclscdenable=", full_param, len_param)) {

+		conf->phy_oclscdenable = (int)simple_strtol(data, NULL, 10);

+		printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, conf->phy_oclscdenable);

+	}

+	else if (!strncmp("srl=", full_param, len_param)) {

+		conf->srl = (int)simple_strtol(data, NULL, 10);

+		printf("%s: srl = %d\n", __FUNCTION__, conf->srl);

+	}

+	else if (!strncmp("lrl=", full_param, len_param)) {

+		conf->lrl = (int)simple_strtol(data, NULL, 10);

+		printf("%s: lrl = %d\n", __FUNCTION__, conf->lrl);

+	}

+	else if (!strncmp("bcn_timeout=", full_param, len_param)) {

+		conf->bcn_timeout= (uint)simple_strtol(data, NULL, 10);

+		printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout);

+	}

+	else if (!strncmp("txbf=", full_param, len_param)) {

+		conf->txbf = (int)simple_strtol(data, NULL, 10);

+		printf("%s: txbf = %d\n", __FUNCTION__, conf->txbf);

+	}

+	else if (!strncmp("frameburst=", full_param, len_param)) {

+		conf->frameburst = (int)simple_strtol(data, NULL, 10);

+		printf("%s: frameburst = %d\n", __FUNCTION__, conf->frameburst);

+	}

+	else if (!strncmp("disable_proptx=", full_param, len_param)) {

+		conf->disable_proptx = (int)simple_strtol(data, NULL, 10);

+		printf("%s: disable_proptx = %d\n", __FUNCTION__, conf->disable_proptx);

+	}

+#ifdef DHDTCPACK_SUPPRESS

+	else if (!strncmp("tcpack_sup_mode=", full_param, len_param)) {

+		conf->tcpack_sup_mode = (uint)simple_strtol(data, NULL, 10);

+		printf("%s: tcpack_sup_mode = %d\n", __FUNCTION__, conf->tcpack_sup_mode);

+	}

+#endif

+	else if (!strncmp("pktprio8021x=", full_param, len_param)) {

+		conf->pktprio8021x = (int)simple_strtol(data, NULL, 10);

+		printf("%s: pktprio8021x = %d\n", __FUNCTION__, conf->pktprio8021x);

+	}

+#if defined(BCMSDIO) || defined(BCMPCIE)

+	else if (!strncmp("dhd_txbound=", full_param, len_param)) {

+		dhd_txbound = (uint)simple_strtol(data, NULL, 10);

+		printf("%s: dhd_txbound = %d\n", __FUNCTION__, dhd_txbound);

+	}

+	else if (!strncmp("dhd_rxbound=", full_param, len_param)) {

+		dhd_rxbound = (uint)simple_strtol(data, NULL, 10);

+		printf("%s: dhd_rxbound = %d\n", __FUNCTION__, dhd_rxbound);

+	}

+#endif

+	else if (!strncmp("tsq=", full_param, len_param)) {

+		conf->tsq = (int)simple_strtol(data, NULL, 10);

+		printf("%s: tsq = %d\n", __FUNCTION__, conf->tsq);

+	}

+	else if (!strncmp("ctrl_resched=", full_param, len_param)) {

+		conf->ctrl_resched = (int)simple_strtol(data, NULL, 10);

+		printf("%s: ctrl_resched = %d\n", __FUNCTION__, conf->ctrl_resched);

+	}

+	else if (!strncmp("dhd_ioctl_timeout_msec=", full_param, len_param)) {

+		conf->dhd_ioctl_timeout_msec = (int)simple_strtol(data, NULL, 10);

+		printf("%s: dhd_ioctl_timeout_msec = %d\n", __FUNCTION__, conf->dhd_ioctl_timeout_msec);

+	}

+	else if (!strncmp("in4way=", full_param, len_param)) {

+		conf->in4way = (int)simple_strtol(data, NULL, 0);

+		printf("%s: in4way = 0x%x\n", __FUNCTION__, conf->in4way);

+	}

+	else if (!strncmp("max_wait_gc_time=", full_param, len_param)) {

+		conf->max_wait_gc_time = (int)simple_strtol(data, NULL, 0);

+		printf("%s: max_wait_gc_time = %d\n", __FUNCTION__, conf->max_wait_gc_time);

+	}

+	else if (!strncmp("wl_preinit=", full_param, len_param)) {

+		if (!(conf->wl_preinit = kmalloc(len_param+1, GFP_KERNEL))) {

+			CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));

+		} else {

+			memset(conf->wl_preinit, 0, len_param+1);

+			strcpy(conf->wl_preinit, data);

+			printf("%s: wl_preinit = %s\n", __FUNCTION__, conf->wl_preinit);

+		}

+	}

+	else

+		return false;

+

+	return true;

+}

+

+int

+dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path)

+{

+	int bcmerror = -1;

+	uint len = 0, start_pos=0;

+	void * image = NULL;

+	char * memblock = NULL;

+	char *bufp, *pick = NULL, *pch;

+	bool conf_file_exists;

+	uint len_param;

+

+	conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0'));

+	if (!conf_file_exists) {

+		printf("%s: config path %s\n", __FUNCTION__, conf_path);

+		return (0);

+	}

+

+	if (conf_file_exists) {

+		image = dhd_os_open_image(conf_path);

+		if (image == NULL) {

+			printf("%s: Ignore config file %s\n", __FUNCTION__, conf_path);

+			goto err;

+		}

+	}

+

+	memblock = MALLOC(dhd->osh, MAXSZ_CONFIG);

+	if (memblock == NULL) {

+		CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",

+			__FUNCTION__, MAXSZ_CONFIG));

+		goto err;

+	}

+

+	pick = MALLOC(dhd->osh, MAXSZ_BUF);

+	if (!pick) {

+		CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",

+			__FUNCTION__, MAXSZ_BUF));

+		goto err;

+	}

+

+	/* Read variables */

+	if (conf_file_exists) {

+		len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image);

+	}

+	if (len > 0 && len < MAXSZ_CONFIG) {

+		bufp = (char *)memblock;

+		bufp[len] = 0;

+

+		while (start_pos < len) {

+			memset(pick, 0, MAXSZ_BUF);

+			start_pos = pick_config_vars(bufp, len, start_pos, pick);

+			pch = strchr(pick, '=');

+			if (pch != NULL) {

+				len_param = pch-pick+1;

+				if (len_param == strlen(pick)) {

+					CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick));

+					continue;

+				}

+			} else {

+				CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick));

+				continue;

+			}

+

+			if (dhd_conf_read_log_level(dhd, pick, len_param))

+				continue;

+			else if (dhd_conf_read_roam_params(dhd, pick, len_param))

+				continue;

+			else if (dhd_conf_read_wme_ac_params(dhd, pick, len_param))

+				continue;

+			else if (dhd_conf_read_fw_by_mac(dhd, pick, len_param))

+				continue;

+			else if (dhd_conf_read_nv_by_mac(dhd, pick, len_param))

+				continue;

+			else if (dhd_conf_read_nv_by_chip(dhd, pick, len_param))

+				continue;

+			else if (dhd_conf_read_country_list(dhd, pick, len_param))

+				continue;

+			else if (dhd_conf_read_mchan_params(dhd, pick, len_param))

+				continue;

+#ifdef PKT_FILTER_SUPPORT

+			else if (dhd_conf_read_pkt_filter(dhd, pick, len_param))

+				continue;

+#endif /* PKT_FILTER_SUPPORT */

+#ifdef ISAM_PREINIT

+			else if (dhd_conf_read_isam(dhd, pick, len_param))

+				continue;

+#endif /* ISAM_PREINIT */

+#ifdef IDHCP

+			else if (dhd_conf_read_dhcp_params(dhd, pick, len_param))

+				continue;

+#endif /* IDHCP */

+#ifdef BCMSDIO

+			else if (dhd_conf_read_sdio_params(dhd, pick, len_param))

+				continue;

+#endif /* BCMSDIO */

+#ifdef BCMPCIE

+			else if (dhd_conf_read_pcie_params(dhd, pick, len_param))

+				continue;

+#endif /* BCMPCIE */

+			else if (dhd_conf_read_pm_params(dhd, pick, len_param))

+				continue;

+			else if (dhd_conf_read_others(dhd, pick, len_param))

+				continue;

+			else

+				continue;

+		}

+

+		bcmerror = 0;

+	} else {

+		CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len));

+		bcmerror = BCME_SDIO_ERROR;

+	}

+

+err:

+	if (pick)

+		MFREE(dhd->osh, pick, MAXSZ_BUF);

+

+	if (memblock)

+		MFREE(dhd->osh, memblock, MAXSZ_CONFIG);

+

+	if (image)

+		dhd_os_close_image(image);

+

+	return bcmerror;

+}

+

+int

+dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev)

+{

+	printf("%s: chip=0x%x, chiprev=%d\n", __FUNCTION__, chip, chiprev);

+	dhd->conf->chip = chip;

+	dhd->conf->chiprev = chiprev;

+	return 0;

+}

+

+uint

+dhd_conf_get_chip(void *context)

+{

+	dhd_pub_t *dhd = context;

+

+	if (dhd && dhd->conf)

+		return dhd->conf->chip;

+	return 0;

+}

+

+uint

+dhd_conf_get_chiprev(void *context)

+{

+	dhd_pub_t *dhd = context;

+

+	if (dhd && dhd->conf)

+		return dhd->conf->chiprev;

+	return 0;

+}

+

+#ifdef BCMSDIO

+void

+dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable)

+{

+	struct dhd_conf *conf = dhd->conf;

+

+	if (enable) {

+#if defined(BCMSDIOH_TXGLOM_EXT)

+		if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||

+				conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||

+				conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {

+			conf->txglom_mode = SDPCM_TXGLOM_CPY;

+		}

+#endif

+		// other parameters set in preinit or config.txt

+	} else {

+		// clear txglom parameters

+		conf->txglom_ext = FALSE;

+		conf->txglom_bucket_size = 0;

+		conf->txglomsize = 0;

+		conf->deferred_tx_len = 0;

+	}

+	if (conf->txglom_ext)

+		printf("%s: txglom_ext=%d, txglom_bucket_size=%d\n", __FUNCTION__,

+			conf->txglom_ext, conf->txglom_bucket_size);

+	printf("%s: txglom_mode=%s\n", __FUNCTION__,

+ 		conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy");

+	printf("%s: txglomsize=%d, deferred_tx_len=%d\n", __FUNCTION__,

+		conf->txglomsize, conf->deferred_tx_len);

+	printf("%s: txinrx_thres=%d, dhd_txminmax=%d\n", __FUNCTION__,

+		conf->txinrx_thres, conf->dhd_txminmax);

+	printf("%s: tx_max_offset=%d, txctl_tmo_fix=%d\n", __FUNCTION__,

+		conf->tx_max_offset, conf->txctl_tmo_fix);

+

+}

+#endif

+

+static int

+dhd_conf_rsdb_mode(dhd_pub_t *dhd, char *buf)

+{

+	char *pch;

+	wl_config_t rsdb_mode_cfg = {1, 0};

+

+	pch = buf;

+	rsdb_mode_cfg.config = (int)simple_strtol(pch, NULL, 0);

+

+	if (pch) {

+		dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "rsdb_mode", (char *)&rsdb_mode_cfg,

+			sizeof(rsdb_mode_cfg), TRUE);

+		printf("%s: rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config);

+	}

+

+	return 0;

+}

+

+typedef int (tpl_parse_t)(dhd_pub_t *dhd, char *buf);

+

+typedef struct iovar_tpl_t {

+	int cmd;

+	char *name;

+	tpl_parse_t *parse;

+} iovar_tpl_t;

+

+const iovar_tpl_t iovar_tpl_list[] = {

+	{WLC_SET_VAR,	"rsdb_mode",	dhd_conf_rsdb_mode},

+};

+

+static int iovar_tpl_parse(const iovar_tpl_t *tpl, int tpl_count,

+	dhd_pub_t *dhd, int cmd, char *name, char *buf)

+{

+	int i, ret = 0;

+

+	/* look for a matching code in the table */

+	for (i = 0; i < tpl_count; i++, tpl++) {

+		if (tpl->cmd == cmd && !strcmp(tpl->name, name))

+			break;

+	}

+	if (i < tpl_count && tpl->parse) {

+		ret = tpl->parse(dhd, buf);

+	} else {

+		ret = -1;

+	}

+

+	return ret;

+}

+

+bool

+dhd_conf_set_wl_preinit(dhd_pub_t *dhd, char *data)

+{

+	int cmd, val, ret = 0;

+	char name[32], *pch, *pick_tmp, *pick_tmp2;

+

+	/* Process wl_preinit:

+	 * wl_preinit=[cmd]=[val], [cmd]=[val]

+	 * Ex: wl_preinit=86=0, mpc=0

+	 */

+	pick_tmp = data;

+	while (pick_tmp && (pick_tmp2 = bcmstrtok(&pick_tmp, ",", 0)) != NULL) {

+		pch = bcmstrtok(&pick_tmp2, "=", 0);

+		if (!pch)

+			break;

+		if (*pch == ' ') {

+			pch++;

+		}

+		memset(name, 0 , sizeof (name));

+		cmd = (int)simple_strtol(pch, NULL, 0);

+		if (cmd == 0) {

+			cmd = WLC_SET_VAR;

+			strcpy(name, pch);

+		}

+		pch = bcmstrtok(&pick_tmp2, ",", 0);

+		if (!pch) {

+			break;

+		}

+		ret = iovar_tpl_parse(iovar_tpl_list, ARRAY_SIZE(iovar_tpl_list),

+			dhd, cmd, name, pch);

+		if (ret) {

+			val = (int)simple_strtol(pch, NULL, 0);

+			dhd_conf_set_intiovar(dhd, cmd, name, val, -1, TRUE);

+		}

+	}

+

+	return true;

+}

+

+void

+dhd_conf_postinit_ioctls(dhd_pub_t *dhd)

+{

+	struct dhd_conf *conf = dhd->conf;

+	char wl_preinit[] = "assoc_retry_max=30";

+

+	dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE);

+	dhd_conf_map_country_list(dhd, &conf->cspec);

+	dhd_conf_set_country(dhd, &conf->cspec);

+	dhd_conf_fix_country(dhd);

+	dhd_conf_get_country(dhd, &dhd->dhd_cspec);

+

+	dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", conf->band, 0, FALSE);

+	dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bcn_timeout", conf->bcn_timeout, 0, FALSE);

+	dhd_conf_set_intiovar(dhd, WLC_SET_PM, "PM", conf->pm, 0, FALSE);

+	dhd_conf_set_intiovar(dhd, WLC_SET_SRL, "WLC_SET_SRL", conf->srl, 0, FALSE);

+	dhd_conf_set_intiovar(dhd, WLC_SET_LRL, "WLC_SET_LRL", conf->lrl, 0, FALSE);

+	dhd_conf_set_bw_cap(dhd);

+	dhd_conf_set_roam(dhd);

+

+#if defined(BCMPCIE)

+	dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bus:deepsleep_disable",

+		conf->bus_deepsleep_disable, 0, FALSE);

+#endif /* defined(BCMPCIE) */

+

+#ifdef IDHCP

+	dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable, 0, FALSE);

+	if (dhd->conf->dhcpd_enable >= 0) {

+		dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_addr",

+			(char *)&conf->dhcpd_ip_addr, sizeof(conf->dhcpd_ip_addr), FALSE);

+		dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_mask",

+			(char *)&conf->dhcpd_ip_mask, sizeof(conf->dhcpd_ip_mask), FALSE);

+		dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_start",

+			(char *)&conf->dhcpd_ip_start, sizeof(conf->dhcpd_ip_start), FALSE);

+		dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_end",

+			(char *)&conf->dhcpd_ip_end, sizeof(conf->dhcpd_ip_end), FALSE);

+		dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpd_enable",

+			conf->dhcpd_enable, 0, FALSE);

+	}

+#endif

+	dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", conf->txbf, 0, FALSE);

+	dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", conf->frameburst, 0, FALSE);

+

+	dhd_conf_set_wl_preinit(dhd, wl_preinit);

+	dhd_conf_set_wl_preinit(dhd, conf->wl_preinit);

+

+#ifndef WL_CFG80211

+	dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE);

+#endif

+

+}

+

+int

+dhd_conf_preinit(dhd_pub_t *dhd)

+{

+	struct dhd_conf *conf = dhd->conf;

+	int i;

+

+	CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));

+

+#ifdef BCMSDIO

+	dhd_conf_free_mac_list(&conf->fw_by_mac);

+	dhd_conf_free_mac_list(&conf->nv_by_mac);

+	dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);

+#endif

+	dhd_conf_free_country_list(&conf->country_list);

+	if (conf->magic_pkt_filter_add) {

+		kfree(conf->magic_pkt_filter_add);

+		conf->magic_pkt_filter_add = NULL;

+	}

+	if (conf->wl_preinit) {

+		kfree(conf->wl_preinit);

+		conf->wl_preinit = NULL;

+	}

+	memset(&conf->country_list, 0, sizeof(conf_country_list_t));

+	conf->band = -1;

+	memset(&conf->bw_cap, -1, sizeof(conf->bw_cap));

+	if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {

+		strcpy(conf->cspec.country_abbrev, "ALL");

+		strcpy(conf->cspec.ccode, "ALL");

+		conf->cspec.rev = 0;

+	} else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||

+			conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||

+			conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||

+			conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||

+			conf->chip == BCM4362_CHIP_ID) {

+		strcpy(conf->cspec.country_abbrev, "US");

+		strcpy(conf->cspec.ccode, "US");

+		conf->cspec.rev = 988;

+	} else {

+		strcpy(conf->cspec.country_abbrev, "US");

+		strcpy(conf->cspec.ccode, "US");

+		conf->cspec.rev = 0;

+	}

+	memset(&conf->channels, 0, sizeof(wl_channel_list_t));

+	conf->roam_off = 1;

+	conf->roam_off_suspend = 1;

+#ifdef CUSTOM_ROAM_TRIGGER_SETTING

+	conf->roam_trigger[0] = CUSTOM_ROAM_TRIGGER_SETTING;

+#else

+	conf->roam_trigger[0] = -65;

+#endif

+	conf->roam_trigger[1] = WLC_BAND_ALL;

+	conf->roam_scan_period[0] = 10;

+	conf->roam_scan_period[1] = WLC_BAND_ALL;

+#ifdef CUSTOM_ROAM_DELTA_SETTING

+	conf->roam_delta[0] = CUSTOM_ROAM_DELTA_SETTING;

+#else

+	conf->roam_delta[0] = 15;

+#endif

+	conf->roam_delta[1] = WLC_BAND_ALL;

+#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC

+	conf->fullroamperiod = 60;

+#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */

+	conf->fullroamperiod = 120;

+#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */

+#ifdef CUSTOM_KEEP_ALIVE_SETTING

+	conf->keep_alive_period = CUSTOM_KEEP_ALIVE_SETTING;

+#else

+	conf->keep_alive_period = 28000;

+#endif

+	conf->force_wme_ac = 0;

+	memset(&conf->wme_sta, 0, sizeof(wme_param_t));

+	memset(&conf->wme_ap, 0, sizeof(wme_param_t));

+	conf->phy_oclscdenable = -1;

+#ifdef PKT_FILTER_SUPPORT

+	memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t));

+	memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t));

+#endif

+	conf->srl = -1;

+	conf->lrl = -1;

+	conf->bcn_timeout = 16;

+	conf->txbf = -1;

+	conf->disable_proptx = -1;

+	conf->dhd_poll = -1;

+#ifdef BCMSDIO

+	conf->use_rxchain = 0;

+	conf->bus_rxglom = TRUE;

+	conf->txglom_ext = FALSE;

+	conf->tx_max_offset = 0;

+	conf->txglomsize = SDPCM_DEFGLOM_SIZE;

+	conf->txctl_tmo_fix = 300;

+	conf->txglom_mode = SDPCM_TXGLOM_MDESC;

+	conf->deferred_tx_len = 0;

+	conf->dhd_txminmax = 1;

+	conf->txinrx_thres = -1;

+	conf->sd_f2_blocksize = 0;

+#if defined(HW_OOB)

+	conf->oob_enabled_later = FALSE;

+#endif

+	conf->orphan_move = 0;

+#endif

+#ifdef BCMPCIE

+	conf->bus_deepsleep_disable = 1;

+#endif

+	conf->dpc_cpucore = -1;

+	conf->rxf_cpucore = -1;

+	conf->frameburst = -1;

+	conf->deepsleep = FALSE;

+	conf->pm = -1;

+	conf->pm_in_suspend = -1;

+	conf->suspend_bcn_li_dtim = -1;

+	conf->insuspend = 0;

+#ifdef SUSPEND_EVENT

+	memset(&conf->resume_eventmask, 0, sizeof(conf->resume_eventmask));

+#endif

+#ifdef IDHCP

+	conf->dhcpc_enable = -1;

+	conf->dhcpd_enable = -1;

+#endif

+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))

+	conf->tsq = 10;

+#else

+	conf->tsq = 0;

+#endif

+#ifdef DHDTCPACK_SUPPRESS

+#ifdef BCMPCIE

+	conf->tcpack_sup_mode = TCPACK_SUP_DEFAULT;

+#else

+	conf->tcpack_sup_mode = TCPACK_SUP_OFF;

+#endif

+#endif

+	conf->pktprio8021x = -1;

+	conf->ctrl_resched = 2;

+	conf->dhd_ioctl_timeout_msec = 0;

+	conf->in4way = NO_SCAN_IN4WAY | WAIT_DISCONNECTED;

+	conf->max_wait_gc_time = 300;

+#ifdef ISAM_PREINIT

+	memset(conf->isam_init, 0, sizeof(conf->isam_init));

+	memset(conf->isam_config, 0, sizeof(conf->isam_config));

+	memset(conf->isam_enable, 0, sizeof(conf->isam_enable));

+#endif

+	for (i=0; i<MCHAN_MAX_NUM; i++) {

+		memset(&conf->mchan[i], -1, sizeof(mchan_params_t));

+	}

+	if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||

+			conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||

+			conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||

+			conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||

+			conf->chip == BCM4362_CHIP_ID) {

+#ifdef DHDTCPACK_SUPPRESS

+#ifdef BCMSDIO

+		conf->tcpack_sup_mode = TCPACK_SUP_REPLACE;

+#endif

+#endif

+#if defined(BCMSDIO) || defined(BCMPCIE)

+		dhd_rxbound = 128;

+		dhd_txbound = 64;

+#endif

+		conf->txbf = 1;

+		conf->frameburst = 1;

+#ifdef BCMSDIO

+		conf->dhd_txminmax = -1;

+		conf->txinrx_thres = 128;

+		conf->sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;

+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))

+		conf->orphan_move = 1;

+#else

+		conf->orphan_move = 0;

+#endif

+#endif

+	}

+

+#ifdef BCMSDIO

+#if defined(BCMSDIOH_TXGLOM_EXT)

+	if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||

+			conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||

+			conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {

+		conf->txglom_ext = TRUE;

+	} else {

+		conf->txglom_ext = FALSE;

+	}

+	if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {

+		conf->txglom_bucket_size = 1680; // fixed value, don't change

+		conf->txglomsize = 6;

+	}

+	if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID ||

+			conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {

+		conf->txglom_bucket_size = 1684; // fixed value, don't change

+		conf->txglomsize = 16;

+	}

+#endif

+	if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)

+		conf->txglomsize = SDPCM_MAXGLOM_SIZE;

+#endif

+

+	return 0;

+}

+

+int

+dhd_conf_reset(dhd_pub_t *dhd)

+{

+#ifdef BCMSDIO

+	dhd_conf_free_mac_list(&dhd->conf->fw_by_mac);

+	dhd_conf_free_mac_list(&dhd->conf->nv_by_mac);

+	dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip);

+#endif

+	dhd_conf_free_country_list(&dhd->conf->country_list);

+	if (dhd->conf->magic_pkt_filter_add) {

+		kfree(dhd->conf->magic_pkt_filter_add);

+		dhd->conf->magic_pkt_filter_add = NULL;

+	}

+	if (dhd->conf->wl_preinit) {

+		kfree(dhd->conf->wl_preinit);

+		dhd->conf->wl_preinit = NULL;

+	}

+	memset(dhd->conf, 0, sizeof(dhd_conf_t));

+	return 0;

+}

+

+int

+dhd_conf_attach(dhd_pub_t *dhd)

+{

+	dhd_conf_t *conf;

+

+	CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));

+

+	if (dhd->conf != NULL) {

+		printf("%s: config is attached before!\n", __FUNCTION__);

+		return 0;

+	}

+	/* Allocate private bus interface state */

+	if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) {

+		CONFIG_ERROR(("%s: MALLOC failed\n", __FUNCTION__));

+		goto fail;

+	}

+	memset(conf, 0, sizeof(dhd_conf_t));

+

+	dhd->conf = conf;

+

+	return 0;

+

+fail:

+	if (conf != NULL)

+		MFREE(dhd->osh, conf, sizeof(dhd_conf_t));

+	return BCME_NOMEM;

+}

+

+void

+dhd_conf_detach(dhd_pub_t *dhd)

+{

+	CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));

+

+	if (dhd->conf) {

+#ifdef BCMSDIO

+		dhd_conf_free_mac_list(&dhd->conf->fw_by_mac);

+		dhd_conf_free_mac_list(&dhd->conf->nv_by_mac);

+		dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip);

+#endif

+		dhd_conf_free_country_list(&dhd->conf->country_list);

+		if (dhd->conf->magic_pkt_filter_add) {

+			kfree(dhd->conf->magic_pkt_filter_add);

+			dhd->conf->magic_pkt_filter_add = NULL;

+		}

+		if (dhd->conf->wl_preinit) {

+			kfree(dhd->conf->wl_preinit);

+			dhd->conf->wl_preinit = NULL;

+		}

+		MFREE(dhd->osh, dhd->conf, sizeof(dhd_conf_t));

+	}

+	dhd->conf = NULL;

+}

diff --git a/drivers/net/wireless/bcmdhd/dhd_config.h b/drivers/net/wireless/bcmdhd/dhd_config.h
new file mode 100644
index 0000000..f752e10
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_config.h
@@ -0,0 +1,289 @@
+
+#ifndef _dhd_config_
+#define _dhd_config_
+
+#include <bcmdevs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <wlioctl.h>
+#include <802.11.h>
+
+#define FW_TYPE_STA     0
+#define FW_TYPE_APSTA   1
+#define FW_TYPE_P2P     2
+#define FW_TYPE_MESH    3
+#define FW_TYPE_ES      4
+#define FW_TYPE_MFG     5
+#define FW_TYPE_G       0
+#define FW_TYPE_AG      1
+
+#define FW_PATH_AUTO_SELECT 1
+//#define CONFIG_PATH_AUTO_SELECT
+extern char firmware_path[MOD_PARAM_PATHLEN];
+#if defined(BCMSDIO) || defined(BCMPCIE)
+extern uint dhd_rxbound;
+extern uint dhd_txbound;
+#endif
+#ifdef BCMSDIO
+#define TXGLOM_RECV_OFFSET 8
+extern uint dhd_doflow;
+extern uint dhd_slpauto;
+#endif
+
+typedef struct wl_mac_range {
+	uint32 oui;
+	uint32 nic_start;
+	uint32 nic_end;
+} wl_mac_range_t;
+
+typedef struct wl_mac_list {
+	int count;
+	wl_mac_range_t *mac;
+	char name[MOD_PARAM_PATHLEN];
+} wl_mac_list_t;
+
+typedef struct wl_mac_list_ctrl {
+	int count;
+	struct wl_mac_list *m_mac_list_head;
+} wl_mac_list_ctrl_t;
+
+typedef struct wl_chip_nv_path {
+	uint chip;
+	uint chiprev;
+	char name[MOD_PARAM_PATHLEN];
+} wl_chip_nv_path_t;
+
+typedef struct wl_chip_nv_path_list_ctrl {
+	int count;
+	struct wl_chip_nv_path *m_chip_nv_path_head;
+} wl_chip_nv_path_list_ctrl_t;
+
+typedef struct wl_channel_list {
+	uint32 count;
+	uint32 channel[WL_NUMCHANNELS];
+} wl_channel_list_t;
+
+typedef struct wmes_param {
+	int aifsn[AC_COUNT];
+	int ecwmin[AC_COUNT];
+	int ecwmax[AC_COUNT];
+	int txop[AC_COUNT];
+} wme_param_t;
+
+#ifdef PKT_FILTER_SUPPORT
+#define DHD_CONF_FILTER_MAX	8
+#define PKT_FILTER_LEN 300
+#define MAGIC_PKT_FILTER_LEN 450
+typedef struct conf_pkt_filter_add {
+	uint32 count;
+	char filter[DHD_CONF_FILTER_MAX][PKT_FILTER_LEN];
+} conf_pkt_filter_add_t;
+
+typedef struct conf_pkt_filter_del {
+	uint32 count;
+	uint32 id[DHD_CONF_FILTER_MAX];
+} conf_pkt_filter_del_t;
+#endif
+
+#define CONFIG_COUNTRY_LIST_SIZE 100
+typedef struct conf_country_list {
+	uint32 count;
+	wl_country_t *cspec[CONFIG_COUNTRY_LIST_SIZE];
+} conf_country_list_t;
+
+/* mchan_params */
+#define MCHAN_MAX_NUM 4
+#define MIRACAST_SOURCE	1
+#define MIRACAST_SINK	2
+typedef struct mchan_params {
+	int bw;
+	int p2p_mode;
+	int miracast_mode;
+} mchan_params_t;
+
+enum in4way_flags {
+	NO_SCAN_IN4WAY	= (1 << (0)),
+	NO_BTC_IN4WAY	= (1 << (1)),
+	DONT_DELETE_GC_AFTER_WPS	= (1 << (2)),
+	WAIT_DISCONNECTED	= (1 << (3)),
+};
+
+enum in_suspend_flags {
+	NO_EVENT_IN_SUSPEND	= (1 << (0)),
+	NO_TXDATA_IN_SUSPEND	= (1 << (1)),
+	AP_DOWN_IN_SUSPEND	= (1 << (2)),
+	ROAM_OFFLOAD_IN_SUSPEND	= (1 << (3)),
+};
+
+enum eapol_status {
+	EAPOL_STATUS_NONE = 0,
+	EAPOL_STATUS_WPS_REQID,
+	EAPOL_STATUS_WPS_RSPID,
+	EAPOL_STATUS_WPS_WSC_START,
+	EAPOL_STATUS_WPS_M1,
+	EAPOL_STATUS_WPS_M2,
+	EAPOL_STATUS_WPS_M3,
+	EAPOL_STATUS_WPS_M4,
+	EAPOL_STATUS_WPS_M5,
+	EAPOL_STATUS_WPS_M6,
+	EAPOL_STATUS_WPS_M7,
+	EAPOL_STATUS_WPS_M8,
+	EAPOL_STATUS_WPS_DONE,
+	EAPOL_STATUS_WPA_START,
+	EAPOL_STATUS_WPA_M1,
+	EAPOL_STATUS_WPA_M2,
+	EAPOL_STATUS_WPA_M3,
+	EAPOL_STATUS_WPA_M4,
+	EAPOL_STATUS_WPA_END
+};
+
+typedef struct dhd_conf {
+	uint chip;
+	uint chiprev;
+	int fw_type;
+	wl_mac_list_ctrl_t fw_by_mac;
+	wl_mac_list_ctrl_t nv_by_mac;
+	wl_chip_nv_path_list_ctrl_t nv_by_chip;
+	conf_country_list_t country_list;
+	int band;
+	int bw_cap[2];
+	wl_country_t cspec;
+	wl_channel_list_t channels;
+	uint roam_off;
+	uint roam_off_suspend;
+	int roam_trigger[2];
+	int roam_scan_period[2];
+	int roam_delta[2];
+	int fullroamperiod;
+	uint keep_alive_period;
+	int force_wme_ac;
+	wme_param_t wme_sta;
+	wme_param_t wme_ap;
+	int phy_oclscdenable;
+#ifdef PKT_FILTER_SUPPORT
+	conf_pkt_filter_add_t pkt_filter_add;
+	conf_pkt_filter_del_t pkt_filter_del;
+	char *magic_pkt_filter_add;
+#endif
+	int srl;
+	int lrl;
+	uint bcn_timeout;
+	int txbf;
+	int disable_proptx;
+	int dhd_poll;
+#ifdef BCMSDIO
+	int use_rxchain;
+	bool bus_rxglom;
+	bool txglom_ext; /* Only for 43362/4330/43340/43341/43241 */
+	/* terence 20161011:
+	    1) conf->tx_max_offset = 1 to fix credict issue in adaptivity testing
+	    2) conf->tx_max_offset = 1 will cause to UDP Tx not work in rxglom supported,
+	        but not happened in sw txglom
+	*/
+	int tx_max_offset;
+	uint txglomsize;
+	int txctl_tmo_fix;
+	bool txglom_mode;
+	uint deferred_tx_len;
+	/*txglom_bucket_size:
+	 * 43362/4330: 1680
+	 * 43340/43341/43241: 1684
+	 */
+	int txglom_bucket_size;
+	int txinrx_thres;
+	int dhd_txminmax; // -1=DATABUFCNT(bus)
+	uint sd_f2_blocksize;
+	bool oob_enabled_later;
+	int orphan_move;
+#endif
+#ifdef BCMPCIE
+	int bus_deepsleep_disable;
+#endif
+	int dpc_cpucore;
+	int rxf_cpucore;
+	int frameburst;
+	bool deepsleep;
+	int pm;
+	int pm_in_suspend;
+	int suspend_bcn_li_dtim;
+#ifdef DHDTCPACK_SUPPRESS
+	uint8 tcpack_sup_mode;
+#endif
+	int pktprio8021x;
+	uint insuspend;
+	bool suspended;
+#ifdef SUSPEND_EVENT
+	char resume_eventmask[WL_EVENTING_MASK_LEN];
+#endif
+#ifdef IDHCP
+	int dhcpc_enable;
+	int dhcpd_enable;
+	struct ipv4_addr dhcpd_ip_addr;
+	struct ipv4_addr dhcpd_ip_mask;
+	struct ipv4_addr dhcpd_ip_start;
+	struct ipv4_addr dhcpd_ip_end;
+#endif
+#ifdef ISAM_PREINIT
+	char isam_init[50];
+	char isam_config[300];
+	char isam_enable[50];
+#endif
+	int ctrl_resched;
+	int dhd_ioctl_timeout_msec;
+	struct mchan_params mchan[MCHAN_MAX_NUM];
+	char *wl_preinit;
+	int tsq;
+	uint eapol_status;
+	uint in4way;
+	uint max_wait_gc_time;
+} dhd_conf_t;
+
+#ifdef BCMSDIO
+int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac);
+void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path);
+void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path);
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
+void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip);
+#endif
+void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable);
+int dhd_conf_set_blksize(bcmsdh_info_t *sdh);
+#endif
+void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path);
+void dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path);
+void dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path);
+void dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path);
+#ifdef CONFIG_PATH_AUTO_SELECT
+void dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path);
+#endif
+int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, int def, bool down);
+int dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, int len, bool down);
+uint dhd_conf_get_band(dhd_pub_t *dhd);
+int dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec);
+int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec);
+int dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec);
+int dhd_conf_fix_country(dhd_pub_t *dhd);
+bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel);
+void dhd_conf_set_wme(dhd_pub_t *dhd, int mode);
+void dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int go, int source);
+void dhd_conf_add_pkt_filter(dhd_pub_t *dhd);
+bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id);
+void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd);
+int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path);
+int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev);
+uint dhd_conf_get_chip(void *context);
+uint dhd_conf_get_chiprev(void *context);
+int dhd_conf_get_pm(dhd_pub_t *dhd);
+
+#ifdef PROP_TXSTATUS
+int dhd_conf_get_disable_proptx(dhd_pub_t *dhd);
+#endif
+uint dhd_conf_get_insuspend(dhd_pub_t *dhd);
+int dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend);
+void dhd_conf_postinit_ioctls(dhd_pub_t *dhd);
+int dhd_conf_preinit(dhd_pub_t *dhd);
+int dhd_conf_reset(dhd_pub_t *dhd);
+int dhd_conf_attach(dhd_pub_t *dhd);
+void dhd_conf_detach(dhd_pub_t *dhd);
+void *dhd_get_pub(struct net_device *dev);
+void *dhd_get_conf(struct net_device *dev);
+#endif /* _dhd_config_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
new file mode 100644
index 0000000..0aa51ca
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
@@ -0,0 +1,299 @@
+/*
+ * Customer code to add GPIO control during WLAN start/stop
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_custom_gpio.c 664997 2016-10-14 11:56:35Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+
+#include <wlioctl.h>
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC)  */
+
+/* Customer specific Host GPIO defintion  */
+static int dhd_oob_gpio_num = -1;
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+/* This function will return:
+ *  1) return :  Host gpio interrupt number per customer platform
+ *  2) irq_flags_ptr : Type of Host interrupt as Level or Edge
+ *
+ *  NOTE :
+ *  Customer should check his platform definitions
+ *  and his Host Interrupt spec
+ *  to figure out the proper setting for his platform.
+ *  Broadcom provides just reference settings as example.
+ *
+ */
+int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr)
+{
+	int  host_oob_irq = 0;
+
+#if defined(CUSTOMER_HW2)
+	host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr);
+
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+	if (dhd_oob_gpio_num < 0) {
+		dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+	}
+#endif /* CUSTOMER_OOB_GPIO_NUM */
+
+	if (dhd_oob_gpio_num < 0) {
+		WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+		__FUNCTION__));
+		return (dhd_oob_gpio_num);
+	}
+
+	WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+	         __FUNCTION__, dhd_oob_gpio_num));
+
+#endif 
+
+	return (host_oob_irq);
+}
+#endif 
+
+/* Customer function to control hw specific wlan gpios */
+int
+dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff)
+{
+	int err = 0;
+
+	return err;
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(void *adapter, unsigned char *buf)
+{
+	int ret = 0;
+
+	WL_TRACE(("%s Enter\n", __FUNCTION__));
+	if (!buf)
+		return -EINVAL;
+
+	/* Customer access to MAC address stored outside of DHD driver */
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+	ret = wifi_platform_get_mac_addr(adapter, buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+	/* EXAMPLE code */
+	{
+		struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+		bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+	}
+#endif /* EXAMPLE_GET_MAC */
+
+	return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+	{"",   "XY", 4},  /* Universal if Country code is unknown or empty */
+	{"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+	{"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+	{"EU", "EU", 5},  /* European union countries to : EU regrev 05 */
+	{"AT", "EU", 5},
+	{"BE", "EU", 5},
+	{"BG", "EU", 5},
+	{"CY", "EU", 5},
+	{"CZ", "EU", 5},
+	{"DK", "EU", 5},
+	{"EE", "EU", 5},
+	{"FI", "EU", 5},
+	{"FR", "EU", 5},
+	{"DE", "EU", 5},
+	{"GR", "EU", 5},
+	{"HU", "EU", 5},
+	{"IE", "EU", 5},
+	{"IT", "EU", 5},
+	{"LV", "EU", 5},
+	{"LI", "EU", 5},
+	{"LT", "EU", 5},
+	{"LU", "EU", 5},
+	{"MT", "EU", 5},
+	{"NL", "EU", 5},
+	{"PL", "EU", 5},
+	{"PT", "EU", 5},
+	{"RO", "EU", 5},
+	{"SK", "EU", 5},
+	{"SI", "EU", 5},
+	{"ES", "EU", 5},
+	{"SE", "EU", 5},
+	{"GB", "EU", 5},
+	{"KR", "XY", 3},
+	{"AU", "XY", 3},
+	{"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
+	{"TW", "XY", 3},
+	{"AR", "XY", 3},
+	{"MX", "XY", 3},
+	{"IL", "IL", 0},
+	{"CH", "CH", 0},
+	{"TR", "TR", 0},
+	{"NO", "NO", 0},
+#endif /* EXMAPLE_TABLE */
+#if defined(CUSTOMER_HW2)
+#if defined(BCM4335_CHIP)
+	{"",   "XZ", 11},  /* Universal if Country code is unknown or empty */
+#endif
+	{"AE", "AE", 1},
+	{"AR", "AR", 1},
+	{"AT", "AT", 1},
+	{"AU", "AU", 2},
+	{"BE", "BE", 1},
+	{"BG", "BG", 1},
+	{"BN", "BN", 1},
+	{"CA", "CA", 2},
+	{"CH", "CH", 1},
+	{"CY", "CY", 1},
+	{"CZ", "CZ", 1},
+	{"DE", "DE", 3},
+	{"DK", "DK", 1},
+	{"EE", "EE", 1},
+	{"ES", "ES", 1},
+	{"FI", "FI", 1},
+	{"FR", "FR", 1},
+	{"GB", "GB", 1},
+	{"GR", "GR", 1},
+	{"HR", "HR", 1},
+	{"HU", "HU", 1},
+	{"IE", "IE", 1},
+	{"IS", "IS", 1},
+	{"IT", "IT", 1},
+	{"ID", "ID", 1},
+	{"JP", "JP", 8},
+	{"KR", "KR", 24},
+	{"KW", "KW", 1},
+	{"LI", "LI", 1},
+	{"LT", "LT", 1},
+	{"LU", "LU", 1},
+	{"LV", "LV", 1},
+	{"MA", "MA", 1},
+	{"MT", "MT", 1},
+	{"MX", "MX", 1},
+	{"NL", "NL", 1},
+	{"NO", "NO", 1},
+	{"PL", "PL", 1},
+	{"PT", "PT", 1},
+	{"PY", "PY", 1},
+	{"RO", "RO", 1},
+	{"SE", "SE", 1},
+	{"SI", "SI", 1},
+	{"SK", "SK", 1},
+	{"TR", "TR", 7},
+	{"TW", "TW", 1},
+	{"IR", "XZ", 11},	/* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */
+	{"SD", "XZ", 11},	/* Universal if Country code is SUDAN */
+	{"SY", "XZ", 11},	/* Universal if Country code is SYRIAN ARAB REPUBLIC */
+	{"GL", "XZ", 11},	/* Universal if Country code is GREENLAND */
+	{"PS", "XZ", 11},	/* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */
+	{"TL", "XZ", 11},	/* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */
+	{"MH", "XZ", 11},	/* Universal if Country code is MARSHALL ISLANDS */
+#ifdef BCM4330_CHIP
+	{"RU", "RU", 1},
+	{"US", "US", 5}
+#endif
+#endif 
+};
+
+
+/* Customized Locale convertor
+*  input : ISO 3166-1 country abbreviation
+*  output: customized cspec
+*/
+void
+#ifdef CUSTOM_COUNTRY_CODE
+get_customized_country_code(void *adapter, char *country_iso_code,
+	wl_country_t *cspec, u32 flags)
+#else
+get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec)
+#endif /* CUSTOM_COUNTRY_CODE */
+{
+#if (defined(CUSTOMER_HW) || defined(CUSTOMER_HW2)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+
+	struct cntry_locales_custom *cloc_ptr;
+
+	if (!cspec)
+		return;
+#ifdef CUSTOM_COUNTRY_CODE
+	cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code, flags);
+#else
+	cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code);
+#endif /* CUSTOM_COUNTRY_CODE */
+
+	if (cloc_ptr) {
+		strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+		cspec->rev = cloc_ptr->custom_locale_rev;
+	}
+	return;
+#else
+	int size, i;
+
+	size = ARRAYSIZE(translate_custom_table);
+
+	if (cspec == 0)
+		 return;
+
+	if (size == 0)
+		 return;
+
+	for (i = 0; i < size; i++) {
+		if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+			memcpy(cspec->ccode,
+				translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+			cspec->rev = translate_custom_table[i].custom_locale_rev;
+			return;
+		}
+	}
+#ifdef EXAMPLE_TABLE
+	/* if no country code matched return first universal code from translate_custom_table */
+	memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+	cspec->rev = translate_custom_table[0].custom_locale_rev;
+#endif /* EXMAPLE_TABLE */
+	return;
+#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_memprealloc.c b/drivers/net/wireless/bcmdhd/dhd_custom_memprealloc.c
new file mode 100644
index 0000000..1f593a4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_custom_memprealloc.c
@@ -0,0 +1,513 @@
+/*
+ * Platform Dependent file for usage of Preallocted Memory
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_custom_memprealloc.c 707595 2017-06-28 08:28:30Z $
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <linux/unistd.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+
+#define WLAN_STATIC_SCAN_BUF0		5
+#define WLAN_STATIC_SCAN_BUF1		6
+#define WLAN_STATIC_DHD_INFO_BUF	7
+#define WLAN_STATIC_DHD_WLFC_BUF	8
+#define WLAN_STATIC_DHD_IF_FLOW_LKUP	9
+#define WLAN_STATIC_DHD_MEMDUMP_RAM	11
+#define WLAN_STATIC_DHD_WLFC_HANGER	12
+#define WLAN_STATIC_DHD_PKTID_MAP	13
+#define WLAN_STATIC_DHD_PKTID_IOCTL_MAP	14
+#define WLAN_STATIC_DHD_LOG_DUMP_BUF	15
+#define WLAN_STATIC_DHD_LOG_DUMP_BUF_EX	16
+#define WLAN_STATIC_DHD_PKTLOG_DUMP_BUF	17
+#define WLAN_STATIC_STAT_REPORT_BUF		18
+
+#define WLAN_SCAN_BUF_SIZE		(64 * 1024)
+
+#if defined(CONFIG_64BIT)
+#define WLAN_DHD_INFO_BUF_SIZE		(32 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE		(64 * 1024)
+#define WLAN_DHD_IF_FLOW_LKUP_SIZE	(64 * 1024)
+#else
+#define WLAN_DHD_INFO_BUF_SIZE		(32 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE		(16 * 1024)
+#define WLAN_DHD_IF_FLOW_LKUP_SIZE	(20 * 1024)
+#endif /* CONFIG_64BIT */
+#define WLAN_DHD_MEMDUMP_SIZE		(1536 * 1024)
+
+#define PREALLOC_WLAN_SEC_NUM		4
+#define PREALLOC_WLAN_BUF_NUM		160
+#define PREALLOC_WLAN_SECTION_HEADER	24
+
+#ifdef CONFIG_BCMDHD_PCIE
+#define DHD_SKB_1PAGE_BUFSIZE	(PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE	(PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE	(PAGE_SIZE*4)
+
+#define WLAN_SECTION_SIZE_0	(PREALLOC_WLAN_BUF_NUM * 128)
+#define WLAN_SECTION_SIZE_1	0
+#define WLAN_SECTION_SIZE_2	0
+#define WLAN_SECTION_SIZE_3	(PREALLOC_WLAN_BUF_NUM * 1024)
+
+#define DHD_SKB_1PAGE_BUF_NUM	0
+#define DHD_SKB_2PAGE_BUF_NUM	128
+#define DHD_SKB_4PAGE_BUF_NUM	0
+
+#else
+#define DHD_SKB_HDRSIZE		336
+#define DHD_SKB_1PAGE_BUFSIZE	((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_2PAGE_BUFSIZE	((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_4PAGE_BUFSIZE	((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
+
+#define WLAN_SECTION_SIZE_0	(PREALLOC_WLAN_BUF_NUM * 128)
+#define WLAN_SECTION_SIZE_1	(PREALLOC_WLAN_BUF_NUM * 128)
+#define WLAN_SECTION_SIZE_2	(PREALLOC_WLAN_BUF_NUM * 512)
+#define WLAN_SECTION_SIZE_3	(PREALLOC_WLAN_BUF_NUM * 1024)
+
+#define DHD_SKB_1PAGE_BUF_NUM	8
+#define DHD_SKB_2PAGE_BUF_NUM	8
+#define DHD_SKB_4PAGE_BUF_NUM	1
+#endif /* CONFIG_BCMDHD_PCIE */
+
+#define WLAN_SKB_1_2PAGE_BUF_NUM	((DHD_SKB_1PAGE_BUF_NUM) + \
+		(DHD_SKB_2PAGE_BUF_NUM))
+#define WLAN_SKB_BUF_NUM	((WLAN_SKB_1_2PAGE_BUF_NUM) + \
+		(DHD_SKB_4PAGE_BUF_NUM))
+
+#define WLAN_MAX_PKTID_ITEMS		(8192)
+#define WLAN_DHD_PKTID_MAP_HDR_SIZE	(20 + 4*(WLAN_MAX_PKTID_ITEMS + 1))
+#define WLAN_DHD_PKTID_MAP_ITEM_SIZE	(32)
+#define WLAN_DHD_PKTID_MAP_SIZE		((WLAN_DHD_PKTID_MAP_HDR_SIZE) + \
+		((WLAN_MAX_PKTID_ITEMS+1) * WLAN_DHD_PKTID_MAP_ITEM_SIZE))
+
+#define WLAN_MAX_PKTID_IOCTL_ITEMS	(32)
+#define WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE	(20 + 4*(WLAN_MAX_PKTID_IOCTL_ITEMS + 1))
+#define WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE	(32)
+#define WLAN_DHD_PKTID_IOCTL_MAP_SIZE		((WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE) + \
+		((WLAN_MAX_PKTID_IOCTL_ITEMS+1) * WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE))
+
+#define DHD_LOG_DUMP_BUF_SIZE	(1024 * 1024)
+#define DHD_LOG_DUMP_BUF_EX_SIZE	(8 * 1024)
+
+#define DHD_PKTLOG_DUMP_BUF_SIZE	(64 * 1024)
+
+#define DHD_STAT_REPORT_BUF_SIZE	(128 * 1024)
+
+#define WLAN_DHD_WLFC_HANGER_MAXITEMS		3072
+#define WLAN_DHD_WLFC_HANGER_ITEM_SIZE		32
+#define WLAN_DHD_WLFC_HANGER_SIZE	((WLAN_DHD_WLFC_HANGER_ITEM_SIZE) + \
+	((WLAN_DHD_WLFC_HANGER_MAXITEMS) * (WLAN_DHD_WLFC_HANGER_ITEM_SIZE)))
+
+static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM];
+
+struct wlan_mem_prealloc {
+	void *mem_ptr;
+	unsigned long size;
+};
+
+static struct wlan_mem_prealloc wlan_mem_array[PREALLOC_WLAN_SEC_NUM] = {
+	{NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER)},
+	{NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER)},
+	{NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER)},
+	{NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER)}
+};
+
+static void *wlan_static_scan_buf0 = NULL;
+static void *wlan_static_scan_buf1 = NULL;
+static void *wlan_static_dhd_info_buf = NULL;
+static void *wlan_static_dhd_wlfc_buf = NULL;
+static void *wlan_static_if_flow_lkup = NULL;
+static void *wlan_static_dhd_memdump_ram = NULL;
+static void *wlan_static_dhd_wlfc_hanger = NULL;
+static void *wlan_static_dhd_pktid_map = NULL;
+static void *wlan_static_dhd_pktid_ioctl_map = NULL;
+static void *wlan_static_dhd_log_dump_buf = NULL;
+static void *wlan_static_dhd_log_dump_buf_ex = NULL;
+static void *wlan_static_dhd_pktlog_dump_buf = NULL;
+static void *wlan_static_stat_report_buf = NULL;
+
+#define GET_STATIC_BUF(section, config_size, req_size, buf) ({\
+	void *__ret; \
+	if (req_size > config_size) {\
+		pr_err("request " #section " size(%lu) is bigger than" \
+				" static size(%d)\n", \
+				req_size, config_size); \
+		__ret = NULL; \
+	} else { __ret = buf;} \
+	__ret; \
+})
+
+void
+*dhd_wlan_mem_prealloc(int section, unsigned long size)
+{
+	if (section == PREALLOC_WLAN_SEC_NUM) {
+		return wlan_static_skb;
+	}
+
+	if (section == WLAN_STATIC_SCAN_BUF0) {
+		return wlan_static_scan_buf0;
+	}
+
+	if (section == WLAN_STATIC_SCAN_BUF1) {
+		return wlan_static_scan_buf1;
+	}
+
+	if (section == WLAN_STATIC_DHD_INFO_BUF) {
+		if (size > WLAN_DHD_INFO_BUF_SIZE) {
+			pr_err("request DHD_INFO size(%lu) is bigger than"
+				" static size(%d).\n", size,
+				WLAN_DHD_INFO_BUF_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_info_buf;
+	}
+
+	if (section == WLAN_STATIC_DHD_WLFC_BUF)  {
+		if (size > WLAN_DHD_WLFC_BUF_SIZE) {
+			pr_err("request DHD_WLFC size(%lu) is bigger than"
+				" static size(%d).\n",
+				size, WLAN_DHD_WLFC_BUF_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_wlfc_buf;
+	}
+
+	if (section == WLAN_STATIC_DHD_WLFC_HANGER) {
+		if (size > WLAN_DHD_WLFC_HANGER_SIZE) {
+			pr_err("request DHD_WLFC_HANGER size(%lu) is bigger than"
+				" static size(%d).\n",
+				size, WLAN_DHD_WLFC_HANGER_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_wlfc_hanger;
+	}
+
+	if (section == WLAN_STATIC_DHD_IF_FLOW_LKUP)  {
+		if (size > WLAN_DHD_IF_FLOW_LKUP_SIZE) {
+			pr_err("request DHD_WLFC size(%lu) is bigger than"
+				" static size(%d).\n",
+				size, WLAN_DHD_WLFC_BUF_SIZE);
+			return NULL;
+		}
+		return wlan_static_if_flow_lkup;
+	}
+
+	if (section == WLAN_STATIC_DHD_MEMDUMP_RAM) {
+		if (size > WLAN_DHD_MEMDUMP_SIZE) {
+			pr_err("request DHD_MEMDUMP_RAM size(%lu) is bigger"
+				" than static size(%d).\n",
+				size, WLAN_DHD_MEMDUMP_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_memdump_ram;
+	}
+
+	if (section == WLAN_STATIC_DHD_PKTID_MAP)  {
+		if (size > WLAN_DHD_PKTID_MAP_SIZE) {
+			pr_err("request DHD_PKTID_MAP size(%lu) is bigger than"
+				" static size(%d).\n",
+				size, WLAN_DHD_PKTID_MAP_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_pktid_map;
+	}
+
+
+	if (section == WLAN_STATIC_DHD_PKTID_IOCTL_MAP)  {
+		if (size > WLAN_DHD_PKTID_IOCTL_MAP_SIZE) {
+			pr_err("request DHD_PKTID_IOCTL_MAP size(%lu) is bigger than"
+				" static size(%d).\n",
+				size, WLAN_DHD_PKTID_IOCTL_MAP_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_pktid_ioctl_map;
+	}
+
+	if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF) {
+		if (size > DHD_LOG_DUMP_BUF_SIZE) {
+			pr_err("request DHD_LOG_DUMP_BUF size(%lu) is bigger then"
+				" static size(%d).\n",
+				size, DHD_LOG_DUMP_BUF_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_log_dump_buf;
+	}
+
+	if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF_EX) {
+		if (size > DHD_LOG_DUMP_BUF_EX_SIZE) {
+			pr_err("request DHD_LOG_DUMP_BUF_EX size(%lu) is bigger then"
+				" static size(%d).\n",
+				size, DHD_LOG_DUMP_BUF_EX_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_log_dump_buf_ex;
+	}
+
+	if (section == WLAN_STATIC_DHD_PKTLOG_DUMP_BUF) {
+		if (size > DHD_PKTLOG_DUMP_BUF_SIZE) {
+			pr_err("request DHD_PKTLOG_DUMP_BUF size(%lu) is bigger then"
+					" static size(%d).\n",
+					size, DHD_PKTLOG_DUMP_BUF_SIZE);
+			return NULL;
+		}
+		return wlan_static_dhd_pktlog_dump_buf;
+	}
+
+	if (section == WLAN_STATIC_STAT_REPORT_BUF) {
+		return GET_STATIC_BUF(WLAN_STATIC_STAT_REPORT_BUF,
+			DHD_STAT_REPORT_BUF_SIZE, size, wlan_static_stat_report_buf);
+	}
+
+	if ((section < 0) || (section >= PREALLOC_WLAN_SEC_NUM)) {
+		return NULL;
+	}
+
+	if (wlan_mem_array[section].size < size) {
+		return NULL;
+	}
+
+	return wlan_mem_array[section].mem_ptr;
+}
+EXPORT_SYMBOL(dhd_wlan_mem_prealloc);
+
+int
+dhd_init_wlan_mem(void)
+{
+	int i;
+	int j;
+
+	for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
+		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
+		if (!wlan_static_skb[i]) {
+			goto err_skb_alloc;
+		}
+	}
+
+	for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
+		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);
+		if (!wlan_static_skb[i]) {
+			goto err_skb_alloc;
+		}
+	}
+
+#if !defined(CONFIG_BCMDHD_PCIE)
+	wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);
+	if (!wlan_static_skb[i]) {
+		goto err_skb_alloc;
+	}
+#endif /* !CONFIG_BCMDHD_PCIE */
+
+	for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) {
+		if (wlan_mem_array[i].size > 0) {
+			wlan_mem_array[i].mem_ptr =
+				kmalloc(wlan_mem_array[i].size, GFP_KERNEL);
+
+			if (!wlan_mem_array[i].mem_ptr) {
+				goto err_mem_alloc;
+			}
+		}
+	}
+
+	wlan_static_scan_buf0 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
+	if (!wlan_static_scan_buf0) {
+		pr_err("Failed to alloc wlan_static_scan_buf0\n");
+		goto err_mem_alloc;
+	}
+
+	wlan_static_scan_buf1 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL);
+	if (!wlan_static_scan_buf1) {
+		pr_err("Failed to alloc wlan_static_scan_buf1\n");
+		goto err_mem_alloc;
+	}
+
+	wlan_static_dhd_log_dump_buf = kmalloc(DHD_LOG_DUMP_BUF_SIZE, GFP_KERNEL);
+	if (!wlan_static_dhd_log_dump_buf) {
+		pr_err("Failed to alloc wlan_static_dhd_log_dump_buf\n");
+		goto err_mem_alloc;
+	}
+
+	wlan_static_dhd_log_dump_buf_ex = kmalloc(DHD_LOG_DUMP_BUF_EX_SIZE, GFP_KERNEL);
+	if (!wlan_static_dhd_log_dump_buf_ex) {
+		pr_err("Failed to alloc wlan_static_dhd_log_dump_buf_ex\n");
+		goto err_mem_alloc;
+	}
+
+	wlan_static_dhd_info_buf = kmalloc(WLAN_DHD_INFO_BUF_SIZE, GFP_KERNEL);
+	if (!wlan_static_dhd_info_buf) {
+		pr_err("Failed to alloc wlan_static_dhd_info_buf\n");
+		goto err_mem_alloc;
+	}
+
+#ifdef CONFIG_BCMDHD_PCIE
+	wlan_static_if_flow_lkup = kmalloc(WLAN_DHD_IF_FLOW_LKUP_SIZE,
+		GFP_KERNEL);
+	if (!wlan_static_if_flow_lkup) {
+		pr_err("Failed to alloc wlan_static_if_flow_lkup\n");
+		goto err_mem_alloc;
+	}
+
+#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP
+	wlan_static_dhd_pktid_map = kmalloc(WLAN_DHD_PKTID_MAP_SIZE,
+		GFP_KERNEL);
+	if (!wlan_static_dhd_pktid_map) {
+		pr_err("Failed to alloc wlan_static_dhd_pktid_map\n");
+		goto err_mem_alloc;
+	}
+
+	wlan_static_dhd_pktid_ioctl_map = kmalloc(WLAN_DHD_PKTID_IOCTL_MAP_SIZE,
+		GFP_KERNEL);
+	if (!wlan_static_dhd_pktid_ioctl_map) {
+		pr_err("Failed to alloc wlan_static_dhd_pktid_ioctl_map\n");
+		goto err_mem_alloc;
+	}
+#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */
+#else
+	wlan_static_dhd_wlfc_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE,
+		GFP_KERNEL);
+	if (!wlan_static_dhd_wlfc_buf) {
+		pr_err("Failed to alloc wlan_static_dhd_wlfc_buf\n");
+		goto err_mem_alloc;
+	}
+
+	wlan_static_dhd_wlfc_hanger = kmalloc(WLAN_DHD_WLFC_HANGER_SIZE,
+		GFP_KERNEL);
+	if (!wlan_static_dhd_wlfc_hanger) {
+		pr_err("Failed to alloc wlan_static_dhd_wlfc_hanger\n");
+		goto err_mem_alloc;
+	}
+#endif /* CONFIG_BCMDHD_PCIE */
+
+#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP
+	wlan_static_dhd_memdump_ram = kmalloc(WLAN_DHD_MEMDUMP_SIZE, GFP_KERNEL);
+	if (!wlan_static_dhd_memdump_ram) {
+		pr_err("Failed to alloc wlan_static_dhd_memdump_ram\n");
+		goto err_mem_alloc;
+	}
+#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */
+
+	wlan_static_dhd_pktlog_dump_buf = kmalloc(DHD_PKTLOG_DUMP_BUF_SIZE, GFP_KERNEL);
+	if (!wlan_static_dhd_pktlog_dump_buf) {
+		pr_err("Failed to alloc wlan_static_dhd_pktlog_dump_buf\n");
+		goto err_mem_alloc;
+	}
+
+	wlan_static_stat_report_buf = kmalloc(DHD_STAT_REPORT_BUF_SIZE, GFP_KERNEL);
+	if (!wlan_static_stat_report_buf) {
+		pr_err("Failed to alloc wlan_static_stat_report_buf\n");
+		goto err_mem_alloc;
+	}
+
+	pr_err("%s: WIFI MEM Allocated\n", __FUNCTION__);
+	return 0;
+
+err_mem_alloc:
+#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP
+	if (wlan_static_dhd_memdump_ram) {
+		kfree(wlan_static_dhd_memdump_ram);
+	}
+
+#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */
+
+#ifdef CONFIG_BCMDHD_PCIE
+	if (wlan_static_if_flow_lkup) {
+		kfree(wlan_static_if_flow_lkup);
+	}
+
+#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP
+	if (wlan_static_dhd_pktid_map) {
+		kfree(wlan_static_dhd_pktid_map);
+	}
+
+	if (wlan_static_dhd_pktid_ioctl_map) {
+		kfree(wlan_static_dhd_pktid_ioctl_map);
+	}
+#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */
+#else
+	if (wlan_static_dhd_wlfc_buf) {
+		kfree(wlan_static_dhd_wlfc_buf);
+	}
+
+	if (wlan_static_dhd_wlfc_hanger) {
+		kfree(wlan_static_dhd_wlfc_hanger);
+	}
+#endif /* CONFIG_BCMDHD_PCIE */
+	if (wlan_static_dhd_info_buf) {
+		kfree(wlan_static_dhd_info_buf);
+	}
+
+	if (wlan_static_dhd_log_dump_buf) {
+		kfree(wlan_static_dhd_log_dump_buf);
+	}
+
+	if (wlan_static_dhd_log_dump_buf_ex) {
+		kfree(wlan_static_dhd_log_dump_buf_ex);
+	}
+
+	if (wlan_static_scan_buf1) {
+		kfree(wlan_static_scan_buf1);
+	}
+
+	if (wlan_static_scan_buf0) {
+		kfree(wlan_static_scan_buf0);
+	}
+
+	if (wlan_static_dhd_pktlog_dump_buf) {
+		kfree(wlan_static_dhd_pktlog_dump_buf);
+	}
+
+	if (wlan_static_stat_report_buf) {
+		kfree(wlan_static_stat_report_buf);
+	}
+
+	pr_err("Failed to mem_alloc for WLAN\n");
+
+	for (j = 0; j < i; j++) {
+		kfree(wlan_mem_array[j].mem_ptr);
+	}
+
+	i = WLAN_SKB_BUF_NUM;
+
+err_skb_alloc:
+	pr_err("Failed to skb_alloc for WLAN\n");
+	for (j = 0; j < i; j++) {
+		dev_kfree_skb(wlan_static_skb[j]);
+	}
+
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(dhd_init_wlan_mem);
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_msm.c b/drivers/net/wireless/bcmdhd/dhd_custom_msm.c
new file mode 100644
index 0000000..27399d0d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_custom_msm.c
@@ -0,0 +1,253 @@
+/*
+ * Platform Dependent file for Qualcomm MSM/APQ
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_custom_msm.c 674523 2016-12-09 04:05:27Z $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/skbuff.h>
+#include <linux/wlan_plat.h>
+#include <linux/mmc/host.h>
+#include <linux/msm_pcie.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/of_gpio.h>
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998)
+#include <linux/msm_pcie.h>
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 */
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+extern int dhd_init_wlan_mem(void);
+extern void *dhd_wlan_mem_prealloc(int section, unsigned long size);
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+#define WIFI_TURNON_DELAY       200
+static int wlan_reg_on = -1;
+#define DHD_DT_COMPAT_ENTRY		"android,bcmdhd_wlan"
+#ifdef CUSTOMER_HW2
+#define WIFI_WL_REG_ON_PROPNAME		"wl_reg_on"
+#else
+#define WIFI_WL_REG_ON_PROPNAME		"wlan-en-gpio"
+#endif /* CUSTOMER_HW2 */
+
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998)
+#define MSM_PCIE_CH_NUM			0
+#else
+#define MSM_PCIE_CH_NUM			1
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 */
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+static int wlan_host_wake_up = -1;
+static int wlan_host_wake_irq = 0;
+#ifdef CUSTOMER_HW2
+#define WIFI_WLAN_HOST_WAKE_PROPNAME    "wl_host_wake"
+#else
+#define WIFI_WLAN_HOST_WAKE_PROPNAME    "wlan-host-wake-gpio"
+#endif /* CUSTOMER_HW2 */
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+int __init
+dhd_wifi_init_gpio(void)
+{
+	char *wlan_node = DHD_DT_COMPAT_ENTRY;
+	struct device_node *root_node = NULL;
+
+	root_node = of_find_compatible_node(NULL, NULL, wlan_node);
+	if (!root_node) {
+		WARN(1, "failed to get device node of BRCM WLAN\n");
+		return -ENODEV;
+	}
+
+	/* ========== WLAN_PWR_EN ============ */
+	wlan_reg_on = of_get_named_gpio(root_node, WIFI_WL_REG_ON_PROPNAME, 0);
+	printk(KERN_INFO "%s: gpio_wlan_power : %d\n", __FUNCTION__, wlan_reg_on);
+
+	if (gpio_request_one(wlan_reg_on, GPIOF_OUT_INIT_LOW, "WL_REG_ON")) {
+		printk(KERN_ERR "%s: Faiiled to request gpio %d for WL_REG_ON\n",
+			__FUNCTION__, wlan_reg_on);
+	} else {
+		printk(KERN_ERR "%s: gpio_request WL_REG_ON done - WLAN_EN: GPIO %d\n",
+			__FUNCTION__, wlan_reg_on);
+	}
+
+	if (gpio_direction_output(wlan_reg_on, 1)) {
+		printk(KERN_ERR "%s: WL_REG_ON failed to pull up\n", __FUNCTION__);
+	} else {
+		printk(KERN_ERR "%s: WL_REG_ON is pulled up\n", __FUNCTION__);
+	}
+
+	if (gpio_get_value(wlan_reg_on)) {
+		printk(KERN_INFO "%s: Initial WL_REG_ON: [%d]\n",
+			__FUNCTION__, gpio_get_value(wlan_reg_on));
+	}
+
+	/* Wait for WIFI_TURNON_DELAY due to power stability */
+	msleep(WIFI_TURNON_DELAY);
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+	/* ========== WLAN_HOST_WAKE ============ */
+	wlan_host_wake_up = of_get_named_gpio(root_node, WIFI_WLAN_HOST_WAKE_PROPNAME, 0);
+	printk(KERN_INFO "%s: gpio_wlan_host_wake : %d\n", __FUNCTION__, wlan_host_wake_up);
+
+#ifndef CUSTOMER_HW2
+	if (gpio_request_one(wlan_host_wake_up, GPIOF_IN, "WLAN_HOST_WAKE")) {
+		printk(KERN_ERR "%s: Failed to request gpio %d for WLAN_HOST_WAKE\n",
+			__FUNCTION__, wlan_host_wake_up);
+			return -ENODEV;
+	} else {
+		printk(KERN_ERR "%s: gpio_request WLAN_HOST_WAKE done"
+			" - WLAN_HOST_WAKE: GPIO %d\n",
+			__FUNCTION__, wlan_host_wake_up);
+	}
+#endif /* !CUSTOMER_HW2 */
+
+	gpio_direction_input(wlan_host_wake_up);
+	wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+#if defined(CONFIG_BCM4359) || defined(CONFIG_BCM4361)
+	printk(KERN_INFO "%s: Call msm_pcie_enumerate\n", __FUNCTION__);
+	msm_pcie_enumerate(MSM_PCIE_CH_NUM);
+#endif /* CONFIG_BCM4359 || CONFIG_BCM4361 */
+
+	return 0;
+}
+
+int
+dhd_wlan_power(int onoff)
+{
+	printk(KERN_INFO"------------------------------------------------");
+	printk(KERN_INFO"------------------------------------------------\n");
+	printk(KERN_INFO"%s Enter: power %s\n", __func__, onoff ? "on" : "off");
+
+	if (onoff) {
+		if (gpio_direction_output(wlan_reg_on, 1)) {
+			printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__);
+			return -EIO;
+		}
+		if (gpio_get_value(wlan_reg_on)) {
+			printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n",
+				gpio_get_value(wlan_reg_on));
+		} else {
+			printk("[%s] gpio value is 0. We need reinit.\n", __func__);
+			if (gpio_direction_output(wlan_reg_on, 1)) {
+				printk(KERN_ERR "%s: WL_REG_ON is "
+					"failed to pull up\n", __func__);
+			}
+		}
+	} else {
+		if (gpio_direction_output(wlan_reg_on, 0)) {
+			printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__);
+			return -EIO;
+		}
+		if (gpio_get_value(wlan_reg_on)) {
+			printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n",
+				gpio_get_value(wlan_reg_on));
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(dhd_wlan_power);
+
+static int
+dhd_wlan_reset(int onoff)
+{
+	return 0;
+}
+
+static int
+dhd_wlan_set_carddetect(int val)
+{
+	return 0;
+}
+
+struct resource dhd_wlan_resources = {
+	.name	= "bcmdhd_wlan_irq",
+	.start	= 0, /* Dummy */
+	.end	= 0, /* Dummy */
+	.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE |
+#ifdef CONFIG_BCMDHD_PCIE
+	IORESOURCE_IRQ_HIGHEDGE,
+#else
+	IORESOURCE_IRQ_HIGHLEVEL,
+#endif /* CONFIG_BCMDHD_PCIE */
+};
+EXPORT_SYMBOL(dhd_wlan_resources);
+
+struct wifi_platform_data dhd_wlan_control = {
+	.set_power	= dhd_wlan_power,
+	.set_reset	= dhd_wlan_reset,
+	.set_carddetect	= dhd_wlan_set_carddetect,
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+	.mem_prealloc	= dhd_wlan_mem_prealloc,
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+};
+EXPORT_SYMBOL(dhd_wlan_control);
+
+int __init
+dhd_wlan_init(void)
+{
+	int ret;
+
+	printk(KERN_INFO"%s: START.......\n", __FUNCTION__);
+	ret = dhd_wifi_init_gpio();
+	if (ret < 0) {
+		printk(KERN_ERR "%s: failed to initiate GPIO, ret=%d\n",
+			__FUNCTION__, ret);
+		goto fail;
+	}
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+	dhd_wlan_resources.start = wlan_host_wake_irq;
+	dhd_wlan_resources.end = wlan_host_wake_irq;
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+	ret = dhd_init_wlan_mem();
+	if (ret < 0) {
+		printk(KERN_ERR "%s: failed to alloc reserved memory,"
+			" ret=%d\n", __FUNCTION__, ret);
+	}
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+fail:
+	printk(KERN_INFO"%s: FINISH.......\n", __FUNCTION__);
+	return ret;
+}
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998)
+#if defined(CONFIG_DEFERRED_INITCALLS)
+deferred_module_init(dhd_wlan_init);
+#else
+late_initcall(dhd_wlan_init);
+#endif /* CONFIG_DEFERRED_INITCALLS */
+#else
+device_initcall(dhd_wlan_init);
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 */
diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h
new file mode 100644
index 0000000..3f90c52
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h
@@ -0,0 +1,347 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_dbg.h 667145 2016-10-26 04:27:53Z $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+extern void dhd_log_dump_print(const char *fmt, ...);
+extern void dhd_log_dump_print_drv(const char *fmt, ...);
+#endif
+
+#if defined(DHD_DEBUG)
+
+#ifdef DHD_LOG_DUMP
+extern void dhd_log_dump_write(int type, const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
+#ifndef _DHD_LOG_DUMP_DEFINITIONS_
+#define _DHD_LOG_DUMP_DEFINITIONS_
+#define DLD_BUF_TYPE_GENERAL    0
+#define DLD_BUF_TYPE_SPECIAL    1
+#define DHD_LOG_DUMP_WRITE(fmt, ...)	dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, fmt, ##__VA_ARGS__)
+#define DHD_LOG_DUMP_WRITE_EX(fmt, ...)	dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, fmt, ##__VA_ARGS__)
+#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
+
+#ifdef DHD_EFI
+#define DHD_ERROR(args)	\
+do {	\
+	if (dhd_msg_level & DHD_ERROR_VAL) {	\
+		printf args;	\
+		dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+		dhd_log_dump_print_drv args; \
+	}	\
+} while (0)
+#define DHD_INFO(args) \
+do {	\
+	if (dhd_msg_level & DHD_INFO_VAL) {	\
+		printf args;	\
+		dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+		dhd_log_dump_print_drv args; \
+	}	\
+} while (0)
+#else /* DHD_EFI */
+#define DHD_ERROR(args)	\
+do {	\
+	if (dhd_msg_level & DHD_ERROR_VAL) {	\
+		printf args;	\
+		DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__);	\
+		DHD_LOG_DUMP_WRITE args;	\
+	}	\
+} while (0)
+#define DHD_INFO(args)		do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#endif /* DHD_EFI */
+#else /* DHD_LOG_DUMP */
+#define DHD_ERROR(args)		do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0)
+#define DHD_INFO(args)		do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP */
+#define DHD_TRACE(args)		do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+
+#define DHD_DATA(args)		do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args)		do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args)		do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args)		do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args)		do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args)		do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args)		do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#ifdef DHD_LOG_DUMP
+#ifndef DHD_EFI
+#define DHD_EVENT(args) \
+do {	\
+	if (dhd_msg_level & DHD_EVENT_VAL) {	\
+		printf args;	\
+		DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__);	\
+		DHD_LOG_DUMP_WRITE args;	\
+	}	\
+} while (0)
+#else
+#define DHD_EVENT(args) \
+do {	\
+	if (dhd_msg_level & DHD_EVENT_VAL) {	\
+		dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__);	\
+		dhd_log_dump_print args;	\
+	}	\
+} while (0)
+#endif /* !DHD_EFI */
+#else
+#define DHD_EVENT(args)		do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP */
+#define DHD_ISCAN(args)		do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args)		do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+#define DHD_REORDER(args)	do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
+#define DHD_PNO(args)		do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
+#define DHD_RTT(args)		do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0)
+#define DHD_PKT_MON(args)	do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0)
+#ifdef DHD_LOG_DUMP
+#ifndef DHD_EFI
+#define DHD_MSGTRACE_LOG(args)	\
+do {	\
+	if (dhd_msg_level & DHD_MSGTRACE_VAL) {	\
+		printf args;	\
+		DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__);	\
+		DHD_LOG_DUMP_WRITE args;	\
+	}   \
+} while (0)
+#else
+#define DHD_MSGTRACE_LOG(args)	\
+do {	\
+	if (dhd_msg_level & DHD_MSGTRACE_VAL) {	\
+		dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__);	\
+		dhd_log_dump_print args;	\
+	}   \
+} while (0)
+#endif /* !DHD_EFI	*/
+#else
+#define DHD_MSGTRACE_LOG(args)  do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP */
+
+#if defined(DHD_LOG_DUMP) && defined(DHD_EFI)
+#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args)
+#else
+
+#define DHD_FWLOG(args)		do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP & DHD_EFI */
+#define DHD_DBGIF(args)		do {if (dhd_msg_level & DHD_DBGIF_VAL) printf args;} while (0)
+
+#ifdef DHD_LOG_DUMP
+#ifdef DHD_EFI
+#define DHD_ERROR_MEM(args)					\
+do {                                        \
+	if (dhd_msg_level & DHD_ERROR_VAL) {    \
+		dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+		dhd_log_dump_print_drv args;	\
+	}	\
+} while (0)
+#define DHD_ERROR_EX(args)	DHD_ERROR(args)
+#else
+#define DHD_ERROR_MEM(args)					\
+do {                                        \
+	if (dhd_msg_level & DHD_ERROR_VAL) {    \
+			if (dhd_msg_level & DHD_ERROR_MEM_VAL) {	\
+				printf args;	\
+			}	\
+		DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__);	\
+		DHD_LOG_DUMP_WRITE args;	\
+	}	\
+} while (0)
+#define DHD_ERROR_EX(args)					\
+do {                                        \
+	if (dhd_msg_level & DHD_ERROR_VAL) {    \
+		printf args;	\
+		DHD_LOG_DUMP_WRITE_EX("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__);	\
+		DHD_LOG_DUMP_WRITE_EX args;	\
+	}	\
+} while (0)
+#endif /* DHD_EFI */
+#else
+#define DHD_ERROR_MEM(args)	DHD_ERROR(args)
+#define DHD_ERROR_EX(args)	DHD_ERROR(args)
+#endif /* DHD_LOG_DUMP */
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define DHD_TRACE_HW4	DHD_ERROR
+#define DHD_INFO_HW4	DHD_ERROR
+#else
+#define DHD_TRACE_HW4	DHD_TRACE
+#define DHD_INFO_HW4	DHD_INFO
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#define DHD_ERROR_ON()		(dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON()		(dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON()		(dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON()		(dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON()		(dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON()		(dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON()		(dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON()		(dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON()		(dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON()		(dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON()		(dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_ISCAN_ON()		(dhd_msg_level & DHD_ISCAN_VAL)
+#define DHD_ARPOE_ON()		(dhd_msg_level & DHD_ARPOE_VAL)
+#define DHD_REORDER_ON()	(dhd_msg_level & DHD_REORDER_VAL)
+#define DHD_NOCHECKDIED_ON()	(dhd_msg_level & DHD_NOCHECKDIED_VAL)
+#define DHD_PNO_ON()		(dhd_msg_level & DHD_PNO_VAL)
+#define DHD_RTT_ON()		(dhd_msg_level & DHD_RTT_VAL)
+#define DHD_MSGTRACE_ON()	(dhd_msg_level & DHD_MSGTRACE_VAL)
+#define DHD_FWLOG_ON()		(dhd_msg_level & DHD_FWLOG_VAL)
+#define DHD_DBGIF_ON()		(dhd_msg_level & DHD_DBGIF_VAL)
+#define DHD_PKT_MON_ON()	(dhd_msg_level & DHD_PKT_MON_VAL)
+#define DHD_PKT_MON_DUMP_ON()	(dhd_msg_level & DHD_PKT_MON_DUMP_VAL)
+
+#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#if defined(DHD_EFI)
+extern void dhd_log_dump_print_drv(const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
+#define DHD_ERROR(args)	\
+do {	\
+	if (dhd_msg_level & DHD_ERROR_VAL) {	\
+		printf args;	\
+		dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+		dhd_log_dump_print_drv args; \
+	}	\
+} while (0)
+#define DHD_INFO(args) \
+do {	\
+	if (dhd_msg_level & DHD_INFO_VAL) {	\
+		printf args;	\
+		dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+		dhd_log_dump_print_drv args; \
+	}	\
+} while (0)
+#define DHD_TRACE(args)
+#else /* DHD_EFI */
+
+#define DHD_ERROR(args)		do {if (dhd_msg_level & DHD_ERROR_VAL) \
+								printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#endif 
+
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_EVENT(args) \
+do {	\
+	if (dhd_msg_level & DHD_EVENT_VAL) {	\
+		dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__);	\
+		dhd_log_dump_print args;	\
+	}	\
+} while (0)
+#else
+#define DHD_EVENT(args)
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+
+#define DHD_ISCAN(args)
+#define DHD_ARPOE(args)
+#define DHD_REORDER(args)
+#define DHD_PNO(args)
+#define DHD_RTT(args)
+#define DHD_PKT_MON(args)
+
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_MSGTRACE_LOG(args)	\
+do {	\
+	if (dhd_msg_level & DHD_MSGTRACE_VAL) {	\
+		dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__);	\
+		dhd_log_dump_print args;	\
+	}   \
+} while (0)
+#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args)
+#else
+#define DHD_MSGTRACE_LOG(args)
+#define DHD_FWLOG(args)
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+
+#define DHD_DBGIF(args)
+
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_ERROR_MEM(args)                  \
+do {                                        \
+	if (dhd_msg_level & DHD_ERROR_VAL) {    \
+		dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__);	\
+		dhd_log_dump_print args;	\
+	}	\
+} while (0)
+#define DHD_ERROR_EX(args)	DHD_ERROR(args)
+#else
+#define DHD_ERROR_MEM(args) DHD_ERROR(args)
+#define DHD_ERROR_EX(args)	DHD_ERROR(args)
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define DHD_TRACE_HW4	DHD_ERROR
+#define DHD_INFO_HW4	DHD_ERROR
+#else
+#define DHD_TRACE_HW4	DHD_TRACE
+#define DHD_INFO_HW4	DHD_INFO
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#define DHD_ERROR_ON()		0
+#define DHD_TRACE_ON()		0
+#define DHD_INFO_ON()		0
+#define DHD_DATA_ON()		0
+#define DHD_CTL_ON()		0
+#define DHD_TIMER_ON()		0
+#define DHD_HDRS_ON()		0
+#define DHD_BYTES_ON()		0
+#define DHD_INTR_ON()		0
+#define DHD_GLOM_ON()		0
+#define DHD_EVENT_ON()		0
+#define DHD_ISCAN_ON()		0
+#define DHD_ARPOE_ON()		0
+#define DHD_REORDER_ON()	0
+#define DHD_NOCHECKDIED_ON()	0
+#define DHD_PNO_ON()		0
+#define DHD_RTT_ON()		0
+#define DHD_PKT_MON_ON()	0
+#define DHD_PKT_MON_DUMP_ON()	0
+#define DHD_MSGTRACE_ON()	0
+#define DHD_FWLOG_ON()		0
+#define DHD_DBGIF_ON()		0
+#endif 
+
+#define DHD_LOG(args)
+
+#define DHD_BLOG(cp, size)
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_debug.c b/drivers/net/wireless/bcmdhd/dhd_debug.c
new file mode 100644
index 0000000..310749e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_debug.c
@@ -0,0 +1,2335 @@
+/*
+ * DHD debugability support
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_debug.c 711908 2017-07-20 10:37:34Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#include <dhd_mschdbg.h>
+
+#include <event_log.h>
+#include <event_trace.h>
+#include <msgtrace.h>
+
+#if defined(DHD_EFI)
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#define container_of(ptr, type, member) \
+		((type *)((char *)(ptr) - offsetof(type, member)))
+#endif 
+
+#define DBGRING_FLUSH_THRESHOLD(ring)		(ring->ring_size / 3)
+#define RING_STAT_TO_STATUS(ring, status) \
+	do {               \
+		strncpy(status.name, ring->name, \
+			sizeof(status.name) - 1);  \
+		status.ring_id = ring->id;     \
+		status.ring_buffer_byte_size = ring->ring_size;  \
+		status.written_bytes = ring->stat.written_bytes; \
+		status.written_records = ring->stat.written_records; \
+		status.read_bytes = ring->stat.read_bytes; \
+		status.verbose_level = ring->log_level; \
+	} while (0)
+
+#define DHD_PKT_INFO DHD_ERROR
+struct map_table {
+	uint16 fw_id;
+	uint16 host_id;
+	char *desc;
+};
+
+struct map_table event_map[] = {
+	{WLC_E_AUTH, WIFI_EVENT_AUTH_COMPLETE, "AUTH_COMPLETE"},
+	{WLC_E_ASSOC, WIFI_EVENT_ASSOC_COMPLETE, "ASSOC_COMPLETE"},
+	{TRACE_FW_AUTH_STARTED, WIFI_EVENT_FW_AUTH_STARTED, "AUTH STARTED"},
+	{TRACE_FW_ASSOC_STARTED, WIFI_EVENT_FW_ASSOC_STARTED, "ASSOC STARTED"},
+	{TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_FW_RE_ASSOC_STARTED, "REASSOC STARTED"},
+	{TRACE_G_SCAN_STARTED, WIFI_EVENT_G_SCAN_STARTED, "GSCAN STARTED"},
+	{WLC_E_PFN_SCAN_COMPLETE, WIFI_EVENT_G_SCAN_COMPLETE, "GSCAN COMPLETE"},
+	{WLC_E_DISASSOC, WIFI_EVENT_DISASSOCIATION_REQUESTED, "DIASSOC REQUESTED"},
+	{WLC_E_REASSOC, WIFI_EVENT_RE_ASSOCIATION_REQUESTED, "REASSOC REQUESTED"},
+	{TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_REQUESTED, "ROAM REQUESTED"},
+	{WLC_E_BEACON_FRAME_RX, WIFI_EVENT_BEACON_RECEIVED, "BEACON Received"},
+	{TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_SCAN_STARTED, "ROAM SCAN STARTED"},
+	{TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"},
+	{TRACE_ROAM_AUTH_STARTED, WIFI_EVENT_ROAM_AUTH_STARTED, "ROAM AUTH STARTED"},
+	{WLC_E_AUTH, WIFI_EVENT_ROAM_AUTH_COMPLETE, "ROAM AUTH COMPLETED"},
+	{TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_ROAM_ASSOC_STARTED, "ROAM ASSOC STARTED"},
+	{WLC_E_ASSOC, WIFI_EVENT_ROAM_ASSOC_COMPLETE, "ROAM ASSOC COMPLETED"},
+	{TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"},
+	{TRACE_BT_COEX_BT_SCO_START, WIFI_EVENT_BT_COEX_BT_SCO_START, "BT SCO START"},
+	{TRACE_BT_COEX_BT_SCO_STOP, WIFI_EVENT_BT_COEX_BT_SCO_STOP, "BT SCO STOP"},
+	{TRACE_BT_COEX_BT_SCAN_START, WIFI_EVENT_BT_COEX_BT_SCAN_START, "BT COEX SCAN START"},
+	{TRACE_BT_COEX_BT_SCAN_STOP, WIFI_EVENT_BT_COEX_BT_SCAN_STOP, "BT COEX SCAN STOP"},
+	{TRACE_BT_COEX_BT_HID_START, WIFI_EVENT_BT_COEX_BT_HID_START, "BT HID START"},
+	{TRACE_BT_COEX_BT_HID_STOP, WIFI_EVENT_BT_COEX_BT_HID_STOP, "BT HID STOP"},
+	{WLC_E_EAPOL_MSG, WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, "FW EAPOL PKT RECEIVED"},
+	{TRACE_FW_EAPOL_FRAME_TRANSMIT_START, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START,
+	"FW EAPOL PKT TRANSMITED"},
+	{TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP,
+	"FW EAPOL PKT TX STOPPED"},
+	{TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE, WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE,
+	"BLOCK ACK NEGO COMPLETED"},
+};
+
+struct map_table event_tag_map[] = {
+	{TRACE_TAG_VENDOR_SPECIFIC, WIFI_TAG_VENDOR_SPECIFIC, "VENDOR SPECIFIC DATA"},
+	{TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"},
+	{TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"},
+	{TRACE_TAG_SSID, WIFI_TAG_SSID, "SSID"},
+	{TRACE_TAG_STATUS, WIFI_TAG_STATUS, "STATUS"},
+	{TRACE_TAG_CHANNEL_SPEC, WIFI_TAG_CHANNEL_SPEC, "CHANSPEC"},
+	{TRACE_TAG_WAKE_LOCK_EVENT, WIFI_TAG_WAKE_LOCK_EVENT, "WAKELOCK EVENT"},
+	{TRACE_TAG_ADDR1, WIFI_TAG_ADDR1, "ADDR_1"},
+	{TRACE_TAG_ADDR2, WIFI_TAG_ADDR2, "ADDR_2"},
+	{TRACE_TAG_ADDR3, WIFI_TAG_ADDR3, "ADDR_3"},
+	{TRACE_TAG_ADDR4, WIFI_TAG_ADDR4, "ADDR_4"},
+	{TRACE_TAG_TSF, WIFI_TAG_TSF, "TSF"},
+	{TRACE_TAG_IE, WIFI_TAG_IE, "802.11 IE"},
+	{TRACE_TAG_INTERFACE, WIFI_TAG_INTERFACE, "INTERFACE"},
+	{TRACE_TAG_REASON_CODE, WIFI_TAG_REASON_CODE, "REASON CODE"},
+	{TRACE_TAG_RATE_MBPS, WIFI_TAG_RATE_MBPS, "RATE"},
+};
+
+/* define log level per ring type */
+struct log_level_table fw_verbose_level_map[] = {
+	{1, EVENT_LOG_TAG_PCI_ERROR, EVENT_LOG_SET_BUS, "PCI_ERROR"},
+	{1, EVENT_LOG_TAG_PCI_WARN, EVENT_LOG_SET_BUS, "PCI_WARN"},
+	{2, EVENT_LOG_TAG_PCI_INFO, EVENT_LOG_SET_BUS, "PCI_INFO"},
+	{3, EVENT_LOG_TAG_PCI_DBG, EVENT_LOG_SET_BUS, "PCI_DEBUG"},
+	{3, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON_LOG"},
+	{2, EVENT_LOG_TAG_WL_ASSOC_LOG, EVENT_LOG_SET_WL, "ASSOC_LOG"},
+	{2, EVENT_LOG_TAG_WL_ROAM_LOG, EVENT_LOG_SET_WL, "ROAM_LOG"},
+	{1, EVENT_LOG_TAG_TRACE_WL_INFO, EVENT_LOG_SET_WL, "WL_INFO"},
+	{1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, EVENT_LOG_SET_WL, "BTCOEX_INFO"},
+#ifdef CUSTOMER_HW4_DEBUG
+	{3, EVENT_LOG_TAG_SCAN_WARN, EVENT_LOG_SET_WL, "SCAN_WARN"},
+#else
+	{1, EVENT_LOG_TAG_SCAN_WARN, EVENT_LOG_SET_WL, "SCAN_WARN"},
+#endif /* CUSTOMER_HW4_DEBUG */
+	{1, EVENT_LOG_TAG_SCAN_ERROR, EVENT_LOG_SET_WL, "SCAN_ERROR"},
+	{2, EVENT_LOG_TAG_SCAN_TRACE_LOW, EVENT_LOG_SET_WL, "SCAN_TRACE_LOW"},
+	{2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, EVENT_LOG_SET_WL, "SCAN_TRACE_HIGH"}
+};
+
+struct log_level_table fw_event_level_map[] = {
+	{1, EVENT_LOG_TAG_TRACE_WL_INFO, EVENT_LOG_SET_WL, "WL_INFO"},
+	{1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, EVENT_LOG_SET_WL, "BTCOEX_INFO"},
+#ifdef CUSTOMER_HW4_DEBUG
+	{3, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON LOG"},
+#else
+	{2, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON LOG"},
+#endif /* CUSTOMER_HW4_DEBUG */
+};
+
+struct map_table nan_event_map[] = {
+	{TRACE_NAN_CLUSTER_STARTED, NAN_EVENT_CLUSTER_STARTED, "NAN_CLUSTER_STARTED"},
+	{TRACE_NAN_CLUSTER_JOINED, NAN_EVENT_CLUSTER_JOINED, "NAN_CLUSTER_JOINED"},
+	{TRACE_NAN_CLUSTER_MERGED, NAN_EVENT_CLUSTER_MERGED, "NAN_CLUSTER_MERGED"},
+	{TRACE_NAN_ROLE_CHANGED, NAN_EVENT_ROLE_CHANGED, "NAN_ROLE_CHANGED"},
+	{TRACE_NAN_SCAN_COMPLETE, NAN_EVENT_SCAN_COMPLETE, "NAN_SCAN_COMPLETE"},
+	{TRACE_NAN_STATUS_CHNG, NAN_EVENT_STATUS_CHNG, "NAN_STATUS_CHNG"},
+};
+
+struct log_level_table nan_event_level_map[] = {
+	{1, EVENT_LOG_TAG_NAN_ERROR, 0, "NAN_ERROR"},
+	{2, EVENT_LOG_TAG_NAN_INFO, 0, "NAN_INFO"},
+	{3, EVENT_LOG_TAG_NAN_DBG, 0, "NAN_DEBUG"},
+};
+
+struct map_table nan_evt_tag_map[] = {
+	{TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"},
+	{TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"},
+};
+
+/* reference tab table */
+uint ref_tag_tbl[EVENT_LOG_TAG_MAX + 1] = {0};
+
+typedef struct dhddbg_loglist_item {
+	dll_t list;
+	event_log_hdr_t *hdr;
+} loglist_item_t;
+
+typedef struct dhbdbg_pending_item {
+	dll_t list;
+	dhd_dbg_ring_status_t ring_status;
+	dhd_dbg_ring_entry_t *ring_entry;
+} pending_item_t;
+
+/* trace log entry header user space processing */
+struct tracelog_header {
+	int magic_num;
+	int buf_size;
+	int seq_num;
+};
+#define TRACE_LOG_MAGIC_NUMBER 0xEAE47C06
+
+int
+dhd_dbg_ring_pull_single(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
+	bool strip_header)
+{
+	dhd_dbg_ring_t *ring;
+	dhd_dbg_ring_entry_t *r_entry;
+	uint32 rlen;
+	char *buf;
+
+	if (!dhdp || !dhdp->dbg) {
+		return 0;
+	}
+
+	ring = &dhdp->dbg->dbg_rings[ring_id];
+
+	if (ring->state != RING_ACTIVE) {
+		return 0;
+	}
+
+	if (ring->rp == ring->wp) {
+		return 0;
+	}
+
+	r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->rp);
+
+	/* Boundary Check */
+	rlen = ENTRY_LENGTH(r_entry);
+	if ((ring->rp + rlen) > ring->ring_size) {
+		DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d,"
+			" current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen,
+			ring->ring_size, ring->id, ring->name, ring->rp));
+		return 0;
+	}
+
+	if (strip_header) {
+		rlen = r_entry->len;
+		buf = (char *)r_entry + DBG_RING_ENTRY_SIZE;
+	} else {
+		rlen = ENTRY_LENGTH(r_entry);
+		buf = (char *)r_entry;
+	}
+	if (rlen > buf_len) {
+		DHD_ERROR(("%s: buf len %d is too small for entry len %d\n",
+			__FUNCTION__, buf_len, rlen));
+		DHD_ERROR(("%s: ring %d[%s] - ring size=%d, wp=%d, rp=%d\n",
+			__FUNCTION__, ring->id, ring->name, ring->ring_size,
+			ring->wp, ring->rp));
+		ASSERT(0);
+		return 0;
+	}
+
+	memcpy(data, buf, rlen);
+	/* update ring context */
+	ring->rp += ENTRY_LENGTH(r_entry);
+	/* skip padding if there is one */
+	if (ring->tail_padded && ((ring->rp + ring->rem_len) == ring->ring_size)) {
+		DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n",
+			__FUNCTION__, ring->id, ring->name, ring->rp, ring->wp));
+		ring->rp = 0;
+		ring->tail_padded = FALSE;
+		ring->rem_len = 0;
+	}
+	if (ring->rp >= ring->ring_size) {
+		DHD_ERROR(("%s: RING%d[%s] rp pointed out of ring boundary,"
+			" rp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
+			ring->name, ring->rp, ring->ring_size));
+		ASSERT(0);
+	}
+	ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
+	DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__,
+		ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp));
+
+	return rlen;
+}
+
+int
+dhd_dbg_ring_pull(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len)
+{
+	int32 r_len, total_r_len = 0;
+	dhd_dbg_ring_t *ring;
+
+	if (!dhdp || !dhdp->dbg)
+		return 0;
+	ring = &dhdp->dbg->dbg_rings[ring_id];
+	if (ring->state != RING_ACTIVE)
+		return 0;
+
+	while (buf_len > 0) {
+		r_len = dhd_dbg_ring_pull_single(dhdp, ring_id, data, buf_len, FALSE);
+		if (r_len == 0)
+			break;
+		data = (uint8 *)data + r_len;
+		buf_len -= r_len;
+		total_r_len += r_len;
+	}
+
+	return total_r_len;
+}
+
+int
+dhd_dbg_ring_push(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data)
+{
+	unsigned long flags;
+	uint32 pending_len;
+	uint32 w_len;
+	uint32 avail_size;
+	dhd_dbg_ring_t *ring;
+	dhd_dbg_ring_entry_t *w_entry, *r_entry;
+
+	if (!dhdp || !dhdp->dbg) {
+		return BCME_BADADDR;
+	}
+
+	ring = &dhdp->dbg->dbg_rings[ring_id];
+
+	if (ring->state != RING_ACTIVE) {
+		return BCME_OK;
+	}
+
+	flags = dhd_os_spin_lock(ring->lock);
+
+	w_len = ENTRY_LENGTH(hdr);
+
+	if (w_len > ring->ring_size) {
+		dhd_os_spin_unlock(ring->lock, flags);
+		return BCME_ERROR;
+	}
+
+	/* Claim the space */
+	do {
+		avail_size = DBG_RING_CHECK_WRITE_SPACE(ring->rp, ring->wp, ring->ring_size);
+		if (avail_size <= w_len) {
+			/* Prepare the space */
+			if (ring->rp <= ring->wp) {
+				ring->tail_padded = TRUE;
+				ring->rem_len = ring->ring_size - ring->wp;
+				DHD_DBGIF(("%s: RING%d[%s] Insuffient tail space,"
+					" rp=%d, wp=%d, rem_len=%d, ring_size=%d,"
+					" avail_size=%d, w_len=%d\n", __FUNCTION__,
+					ring->id, ring->name, ring->rp, ring->wp,
+					ring->rem_len, ring->ring_size, avail_size,
+					w_len));
+
+				/* 0 pad insufficient tail space */
+				memset((uint8 *)ring->ring_buf + ring->wp, 0, ring->rem_len);
+				if (ring->rp == ring->wp) {
+					ring->rp = 0;
+				}
+				ring->wp = 0;
+			} else {
+				/* Not enough space for new entry, free some up */
+				r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf +
+					ring->rp);
+				ring->rp += ENTRY_LENGTH(r_entry);
+				/* skip padding if there is one */
+				if (ring->tail_padded &&
+					((ring->rp + ring->rem_len) == ring->ring_size)) {
+					DHD_DBGIF(("%s: RING%d[%s] Found padding,"
+						" avail_size=%d, w_len=%d\n", __FUNCTION__,
+						ring->id, ring->name, avail_size, w_len));
+					ring->rp = 0;
+					ring->tail_padded = FALSE;
+					ring->rem_len = 0;
+				}
+				if (ring->rp >= ring->ring_size) {
+					DHD_ERROR(("%s: RING%d[%s] rp points out of boundary,"
+						" ring->rp = %d, ring->ring_size=%d\n",
+						__FUNCTION__, ring->id, ring->name, ring->rp,
+						ring->ring_size));
+					ASSERT(0);
+				}
+				ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
+				DHD_DBGIF(("%s: RING%d[%s] read_bytes  %d, wp=%d, rp=%d\n",
+					__FUNCTION__, ring->id, ring->name, ring->stat.read_bytes,
+					ring->wp, ring->rp));
+			}
+		} else {
+			break;
+		}
+	} while (TRUE);
+
+	w_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->wp);
+	/* header */
+	memcpy(w_entry, hdr, DBG_RING_ENTRY_SIZE);
+	w_entry->len = hdr->len;
+	/* payload */
+	memcpy((char *)w_entry + DBG_RING_ENTRY_SIZE, data, w_entry->len);
+	/* update write pointer */
+	ring->wp += w_len;
+	if (ring->wp >= ring->ring_size) {
+		DHD_ERROR(("%s: RING%d[%s] wp pointed out of ring boundary, "
+			"wp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
+			ring->name, ring->wp, ring->ring_size));
+		ASSERT(0);
+	}
+	/* update statistics */
+	ring->stat.written_records++;
+	ring->stat.written_bytes += w_len;
+	DHD_DBGIF(("%s : RING%d[%s] written_records %d, written_bytes %d, read_bytes=%d,"
+		" ring->threshold=%d, wp=%d, rp=%d\n", __FUNCTION__, ring->id, ring->name,
+		ring->stat.written_records, ring->stat.written_bytes, ring->stat.read_bytes,
+		ring->threshold, ring->wp, ring->rp));
+
+	/* Calculate current pending size */
+	if (ring->stat.written_bytes > ring->stat.read_bytes) {
+		pending_len = ring->stat.written_bytes - ring->stat.read_bytes;
+	} else if (ring->stat.written_bytes < ring->stat.read_bytes) {
+		pending_len = 0xFFFFFFFF - ring->stat.read_bytes + ring->stat.written_bytes;
+	} else {
+		pending_len = 0;
+	}
+
+	/* if the current pending size is bigger than threshold */
+	if (ring->threshold > 0 &&
+	   (pending_len >= ring->threshold) && ring->sched_pull) {
+		dhdp->dbg->pullreq(dhdp->dbg->private, ring->id);
+		ring->sched_pull = FALSE;
+	}
+	dhd_os_spin_unlock(ring->lock, flags);
+	return BCME_OK;
+}
+
+static int
+dhd_dbg_msgtrace_seqchk(uint32 *prev, uint32 cur)
+{
+	/* normal case including wrap around */
+	if ((cur == 0 && *prev == 0xFFFFFFFF) || ((cur - *prev) == 1)) {
+		goto done;
+	} else if (cur == *prev) {
+		DHD_EVENT(("%s duplicate trace\n", __FUNCTION__));
+		return -1;
+	} else if (cur > *prev) {
+		DHD_EVENT(("%s lost %d packets\n", __FUNCTION__, cur - *prev));
+	} else {
+		DHD_EVENT(("%s seq out of order, dhd %d, dongle %d\n",
+			__FUNCTION__, *prev, cur));
+	}
+done:
+	*prev = cur;
+	return 0;
+}
+
+#ifndef MACOSX_DHD
+static void
+dhd_dbg_msgtrace_msg_parser(void *event_data)
+{
+	msgtrace_hdr_t *hdr;
+	char *data, *s;
+	static uint32 seqnum_prev = 0;
+
+	hdr = (msgtrace_hdr_t *)event_data;
+	data = (char *)event_data + MSGTRACE_HDRLEN;
+
+	/* There are 2 bytes available at the end of data */
+	data[ntoh16(hdr->len)] = '\0';
+
+	if (ntoh32(hdr->discarded_bytes) || ntoh32(hdr->discarded_printf)) {
+		DHD_DBGIF(("WLC_E_TRACE: [Discarded traces in dongle -->"
+			"discarded_bytes %d discarded_printf %d]\n",
+			ntoh32(hdr->discarded_bytes),
+			ntoh32(hdr->discarded_printf)));
+	}
+
+	if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum)))
+		return;
+
+	/* Display the trace buffer. Advance from
+	 * \n to \n to avoid display big
+	 * printf (issue with Linux printk )
+	 */
+	while (*data != '\0' && (s = strstr(data, "\n")) != NULL) {
+		*s = '\0';
+		DHD_FWLOG(("[FWLOG] %s\n", data));
+		data = s+1;
+	}
+	if (*data)
+		DHD_FWLOG(("[FWLOG] %s", data));
+}
+#endif /* MACOSX_DHD */
+#ifdef SHOW_LOGTRACE
+static const uint8 *
+event_get_tlv(uint16 id, const char* tlvs, uint tlvs_len)
+{
+	const uint8 *pos = (const uint8 *)tlvs;
+	const uint8 *end = pos + tlvs_len;
+	const tlv_log *tlv;
+	int rest;
+
+	while (pos + 1 < end) {
+		if (pos + 4 + pos[1] > end)
+			break;
+		tlv = (const tlv_log *) pos;
+		if (tlv->tag == id)
+			return pos;
+		rest = tlv->len % 4; /* padding values */
+		pos += 4 + tlv->len + rest;
+	}
+	return NULL;
+}
+
+#define DATA_UNIT_FOR_LOG_CNT 4
+/* #pragma used as a WAR to fix build failure,
+ * ignore dropping of 'const' qualifier in tlv_data assignment
+ * this pragma disables the warning only for the following function
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+static int
+dhd_dbg_nan_event_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *data)
+{
+	int ret = BCME_OK;
+	wl_event_log_id_ver_t nan_hdr;
+	log_nan_event_t *evt_payload;
+	uint16 evt_payload_len = 0, tot_payload_len = 0;
+	dhd_dbg_ring_entry_t msg_hdr;
+	bool evt_match = FALSE;
+	event_log_hdr_t *ts_hdr;
+	uint32 *ts_data;
+	char *tlvs, *dest_tlvs;
+	tlv_log *tlv_data;
+	int tlv_len = 0;
+	int i = 0, evt_idx = 0;
+	char eaddr_buf[ETHER_ADDR_STR_LEN];
+
+	BCM_REFERENCE(eaddr_buf);
+
+	nan_hdr.t = *data;
+	DHD_DBGIF(("%s: version %u event %x\n", __FUNCTION__, nan_hdr.version,
+		nan_hdr.event));
+
+	if (nan_hdr.version != DIAG_VERSION) {
+		DHD_ERROR(("Event payload version %u mismatch with current version %u\n",
+			nan_hdr.version, DIAG_VERSION));
+		return BCME_VERSION;
+	}
+
+	/* nan event log should at least contain a wl_event_log_id_ver_t
+	 * header and a arm cycle count
+	 */
+	if (hdr->count < NAN_EVENT_LOG_MIN_LENGTH) {
+		return BCME_BADLEN;
+	}
+
+	memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
+	ts_hdr = (event_log_hdr_t *)((uint8 *)data - sizeof(event_log_hdr_t));
+	if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
+		ts_data = (uint32 *)ts_hdr - ts_hdr->count;
+		msg_hdr.timestamp = (uint64)ts_data[0];
+		msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
+	}
+	msg_hdr.type = DBG_RING_ENTRY_NAN_EVENT_TYPE;
+	for (i = 0; i < ARRAYSIZE(nan_event_map); i++) {
+		if (nan_event_map[i].fw_id == nan_hdr.event) {
+			evt_match = TRUE;
+			evt_idx = i;
+			break;
+		}
+	}
+	if (evt_match) {
+		DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, nan_event_map[evt_idx].desc));
+		/* payload length for nan event data */
+		evt_payload_len = sizeof(log_nan_event_t) +
+			(hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
+		if ((evt_payload = MALLOC(dhdp->osh, evt_payload_len)) == NULL) {
+			DHD_ERROR(("Memory allocation failed for nan evt log (%u)\n",
+				evt_payload_len));
+			return BCME_NOMEM;
+		}
+		evt_payload->version = NAN_EVENT_VERSION;
+		evt_payload->event = nan_event_map[evt_idx].host_id;
+		dest_tlvs = (char *)evt_payload->tlvs;
+		tot_payload_len = sizeof(log_nan_event_t);
+		tlvs = (char *)(&data[1]);
+		tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
+		for (i = 0; i < ARRAYSIZE(nan_evt_tag_map); i++) {
+			tlv_data = (tlv_log *)event_get_tlv(nan_evt_tag_map[i].fw_id,
+				tlvs, tlv_len);
+			if (tlv_data) {
+				DHD_DBGIF(("NAN evt tlv.tag(%s), tlv.len : %d, tlv.data :  ",
+					nan_evt_tag_map[i].desc, tlv_data->len));
+				memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len);
+				tot_payload_len += tlv_data->len + sizeof(tlv_log);
+				switch (tlv_data->tag) {
+					case TRACE_TAG_BSSID:
+					case TRACE_TAG_ADDR:
+						DHD_DBGIF(("%s\n",
+						bcm_ether_ntoa(
+							(const struct ether_addr *)tlv_data->value,
+							eaddr_buf)));
+					break;
+					default:
+						if (DHD_DBGIF_ON()) {
+							prhex(NULL, &tlv_data->value[0],
+								tlv_data->len);
+						}
+					break;
+				}
+				dest_tlvs += tlv_data->len + sizeof(tlv_log);
+			}
+		}
+		msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
+		msg_hdr.len = tot_payload_len;
+		dhd_dbg_ring_push(dhdp, NAN_EVENT_RING_ID, &msg_hdr, evt_payload);
+		MFREE(dhdp->osh, evt_payload, evt_payload_len);
+	}
+	return ret;
+}
+
+static int
+dhd_dbg_custom_evnt_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *data)
+{
+	int i = 0, match_idx = 0;
+	int payload_len, tlv_len;
+	uint16 tot_payload_len = 0;
+	int ret = BCME_OK;
+	int log_level;
+	wl_event_log_id_ver_t wl_log_id;
+	dhd_dbg_ring_entry_t msg_hdr;
+	log_conn_event_t *event_data;
+	bool evt_match = FALSE;
+	event_log_hdr_t *ts_hdr;
+	uint32 *ts_data;
+	char *tlvs, *dest_tlvs;
+	tlv_log *tlv_data;
+	static uint64 ts_saved = 0;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	char chanbuf[CHANSPEC_STR_LEN];
+
+	BCM_REFERENCE(eabuf);
+	BCM_REFERENCE(chanbuf);
+	/* get a event type and version */
+	wl_log_id.t = *data;
+	if (wl_log_id.version != DIAG_VERSION)
+		return BCME_VERSION;
+
+	/* custom event log should at least contain a wl_event_log_id_ver_t
+	 * header and a arm cycle count
+	 */
+	if (hdr->count < NAN_EVENT_LOG_MIN_LENGTH) {
+		return BCME_BADLEN;
+	}
+
+	ts_hdr = (event_log_hdr_t *)((uint8 *)data - sizeof(event_log_hdr_t));
+	if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
+		ts_data = (uint32 *)ts_hdr - ts_hdr->count;
+		ts_saved = (uint64)ts_data[0];
+	}
+	memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
+	msg_hdr.timestamp = ts_saved;
+
+	DHD_DBGIF(("Android Event ver %d, payload %d words, ts %llu\n",
+		(*data >> 16), hdr->count - 1, ts_saved));
+
+	/* Perform endian convertion */
+	for (i = 0; i < hdr->count; i++) {
+		/* *(data + i) = ntoh32(*(data + i)); */
+		DHD_DATA(("%08x ", *(data + i)));
+	}
+	DHD_DATA(("\n"));
+	msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
+	msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
+	msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE;
+
+	/* convert the data to log_conn_event_t format */
+	for (i = 0; i < ARRAYSIZE(event_map); i++) {
+		if (event_map[i].fw_id == wl_log_id.event) {
+			evt_match = TRUE;
+			match_idx = i;
+			break;
+		}
+	}
+	if (evt_match) {
+		log_level = dhdp->dbg->dbg_rings[FW_EVENT_RING_ID].log_level;
+		/* filter the data based on log_level */
+		for (i = 0; i < ARRAYSIZE(fw_event_level_map); i++) {
+			if ((fw_event_level_map[i].tag == hdr->tag) &&
+				(fw_event_level_map[i].log_level > log_level)) {
+				return BCME_OK;
+			}
+		}
+		DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, event_map[match_idx].desc));
+		/* get the payload length for event data (skip : log header + timestamp) */
+		payload_len = sizeof(log_conn_event_t) + DATA_UNIT_FOR_LOG_CNT * (hdr->count - 2);
+		event_data = MALLOC(dhdp->osh, payload_len);
+		if (!event_data) {
+			DHD_ERROR(("failed to allocate the log_conn_event_t with length(%d)\n",
+				payload_len));
+			return BCME_NOMEM;
+		}
+		event_data->event = event_map[match_idx].host_id;
+		dest_tlvs = (char *)event_data->tlvs;
+		tot_payload_len = sizeof(log_conn_event_t);
+		tlvs = (char *)(&data[1]);
+		tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
+		for (i = 0; i < ARRAYSIZE(event_tag_map); i++) {
+			tlv_data = (tlv_log *)event_get_tlv(event_tag_map[i].fw_id,
+			         tlvs, tlv_len);
+			if (tlv_data) {
+				DHD_DBGIF(("tlv.tag(%s), tlv.len : %d, tlv.data :  ",
+					event_tag_map[i].desc, tlv_data->len));
+				memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len);
+				tot_payload_len += tlv_data->len + sizeof(tlv_log);
+				switch (tlv_data->tag) {
+				case TRACE_TAG_BSSID:
+				case TRACE_TAG_ADDR:
+				case TRACE_TAG_ADDR1:
+				case TRACE_TAG_ADDR2:
+				case TRACE_TAG_ADDR3:
+				case TRACE_TAG_ADDR4:
+					DHD_DBGIF(("%s\n",
+					bcm_ether_ntoa((const struct ether_addr *)tlv_data->value,
+							eabuf)));
+					break;
+				case TRACE_TAG_SSID:
+					DHD_DBGIF(("%s\n", tlv_data->value));
+					break;
+				case TRACE_TAG_STATUS:
+					DHD_DBGIF(("%d\n", ltoh32_ua(&tlv_data->value[0])));
+					break;
+				case TRACE_TAG_REASON_CODE:
+					DHD_DBGIF(("%d\n", ltoh16_ua(&tlv_data->value[0])));
+					break;
+				case TRACE_TAG_RATE_MBPS:
+					DHD_DBGIF(("%d Kbps\n",
+						ltoh16_ua(&tlv_data->value[0]) * 500));
+					break;
+				case TRACE_TAG_CHANNEL_SPEC:
+					DHD_DBGIF(("%s\n",
+						wf_chspec_ntoa(
+							ltoh16_ua(&tlv_data->value[0]), chanbuf)));
+					break;
+				default:
+					if (DHD_DBGIF_ON()) {
+						prhex(NULL, &tlv_data->value[0], tlv_data->len);
+					}
+				}
+				dest_tlvs += tlv_data->len + sizeof(tlv_log);
+			}
+		}
+		msg_hdr.len = tot_payload_len;
+		dhd_dbg_ring_push(dhdp, FW_EVENT_RING_ID, &msg_hdr, event_data);
+		MFREE(dhdp->osh, event_data, payload_len);
+	}
+	return ret;
+}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+/* To identify format of types %Ns where N >= 0 is a number */
+bool
+check_valid_string_format(char *curr_ptr)
+{
+	char *next_ptr;
+	if ((next_ptr = bcmstrstr(curr_ptr, "s")) != NULL) {
+		/* Default %s format */
+		if (curr_ptr == next_ptr) {
+			return TRUE;
+		}
+
+		/* Verify each charater between '%' and 's' is a valid number */
+		while (curr_ptr < next_ptr) {
+			if (bcm_isdigit(*curr_ptr) == FALSE) {
+				return FALSE;
+			}
+			curr_ptr++;
+		}
+
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
+#define MAX_NO_OF_ARG	16
+#define FMTSTR_SIZE	132
+#define ROMSTR_SIZE	200
+#define SIZE_LOC_STR	50
+static uint64 verboselog_ts_saved = 0;
+static void
+dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
+		void *raw_event_ptr)
+{
+	event_log_hdr_t *ts_hdr;
+	uint32 *log_ptr = (uint32 *)hdr - hdr->count;
+	char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 };
+	uint32 rom_str_len = 0;
+	uint32 *ts_data;
+
+	if (!raw_event_ptr) {
+		return;
+	}
+
+	/* Get time stamp if it's updated */
+	ts_hdr = (event_log_hdr_t *)((char *)log_ptr - sizeof(event_log_hdr_t));
+	if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
+		ts_data = (uint32 *)ts_hdr - ts_hdr->count;
+		verboselog_ts_saved = (uint64)ts_data[0];
+		DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n",
+			ts_data[ts_hdr->count - 1], ts_data[0], ts_data[1]));
+	}
+
+	if (hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) {
+		rom_str_len = (hdr->count - 1) * sizeof(uint32);
+		if (rom_str_len >= (ROMSTR_SIZE -1))
+			rom_str_len = ROMSTR_SIZE - 1;
+
+		/* copy all ascii data for ROM printf to local string */
+		memcpy(fmtstr_loc_buf, log_ptr, rom_str_len);
+		/* add end of line at last */
+		fmtstr_loc_buf[rom_str_len] = '\0';
+
+		DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s",
+				log_ptr[hdr->count - 1], fmtstr_loc_buf));
+
+		/* Add newline if missing */
+		if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n')
+			DHD_MSGTRACE_LOG(("\n"));
+
+		return;
+	}
+
+	if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE || hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) {
+		wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, hdr->tag, log_ptr);
+		return;
+	}
+
+	/* print the message out in a logprint  */
+	dhd_dbg_verboselog_printf(dhdp, hdr, raw_event_ptr, log_ptr);
+}
+
+void
+dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
+	void *raw_event_ptr, uint32 *log_ptr)
+{
+	dhd_event_log_t *raw_event = (dhd_event_log_t *)raw_event_ptr;
+	uint16 count;
+	int log_level, id;
+	char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 };
+	char (*str_buf)[SIZE_LOC_STR] = NULL;
+	char *str_tmpptr = NULL;
+	uint32 addr = 0;
+	typedef union {
+		uint32 val;
+		char * addr;
+	} u_arg;
+	u_arg arg[MAX_NO_OF_ARG] = {{0}};
+	char *c_ptr = NULL;
+
+	BCM_REFERENCE(arg);
+
+	if (!raw_event) {
+		return;
+	}
+
+	/* print the message out in a logprint  */
+	if (!(raw_event->fmts) || hdr->fmt_num == 0xffff) {
+		if (dhdp->dbg) {
+			log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level;
+			for (id = 0; id < ARRAYSIZE(fw_verbose_level_map); id++) {
+				if ((fw_verbose_level_map[id].tag == hdr->tag) &&
+					(fw_verbose_level_map[id].log_level > log_level))
+					return;
+			}
+		}
+
+		DHD_EVENT(("%d.%d EL:tag=%d len=%d fmt=0x%x",
+			(uint32)verboselog_ts_saved / 1000,
+			(uint32)verboselog_ts_saved % 1000,
+			hdr->tag,
+			hdr->count,
+			hdr->fmt_num));
+
+		for (count = 0; count < (hdr->count-1); count++) {
+			if (count % 8 == 0)
+				DHD_EVENT(("\n\t%08x", log_ptr[count]));
+			else
+				DHD_EVENT((" %08x", log_ptr[count]));
+		}
+		DHD_EVENT(("\n"));
+
+		return;
+	}
+
+	str_buf = MALLOCZ(dhdp->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR));
+	if (!str_buf) {
+		DHD_ERROR(("%s: malloc failed str_buf\n", __FUNCTION__));
+		return;
+	}
+
+	if ((hdr->fmt_num >> 2) < raw_event->num_fmts) {
+		if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
+			snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "%s",
+				raw_event->fmts[hdr->fmt_num >> 2]);
+			hdr->count++;
+		} else {
+			snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E: %6d.%3d %s",
+				log_ptr[hdr->count-1]/1000, (log_ptr[hdr->count - 1] % 1000),
+				raw_event->fmts[hdr->fmt_num >> 2]);
+		}
+		c_ptr = fmtstr_loc_buf;
+	} else {
+		DHD_ERROR(("%s: fmt number out of range \n", __FUNCTION__));
+		goto exit;
+	}
+
+	for (count = 0; count < (hdr->count - 1); count++) {
+		if (c_ptr != NULL)
+			if ((c_ptr = bcmstrstr(c_ptr, "%")) != NULL)
+				c_ptr++;
+
+		if (c_ptr != NULL) {
+			if (check_valid_string_format(c_ptr)) {
+				if ((raw_event->raw_sstr) &&
+					((log_ptr[count] > raw_event->rodata_start) &&
+					(log_ptr[count] < raw_event->rodata_end))) {
+					/* ram static string */
+					addr = log_ptr[count] - raw_event->rodata_start;
+					str_tmpptr = raw_event->raw_sstr + addr;
+					memcpy(str_buf[count], str_tmpptr,
+						SIZE_LOC_STR);
+					str_buf[count][SIZE_LOC_STR-1] = '\0';
+					arg[count].addr = str_buf[count];
+				} else if ((raw_event->rom_raw_sstr) &&
+						((log_ptr[count] >
+						raw_event->rom_rodata_start) &&
+						(log_ptr[count] <
+						raw_event->rom_rodata_end))) {
+					/* rom static string */
+					addr = log_ptr[count] - raw_event->rom_rodata_start;
+					str_tmpptr = raw_event->rom_raw_sstr + addr;
+					memcpy(str_buf[count], str_tmpptr,
+						SIZE_LOC_STR);
+					str_buf[count][SIZE_LOC_STR-1] = '\0';
+					arg[count].addr = str_buf[count];
+				} else {
+					/*
+					*  Dynamic string OR
+					* No data for static string.
+					* So store all string's address as string.
+					*/
+					snprintf(str_buf[count], SIZE_LOC_STR,
+						"(s)0x%x", log_ptr[count]);
+					arg[count].addr = str_buf[count];
+				}
+			} else {
+				/* Other than string */
+				arg[count].val = log_ptr[count];
+			}
+		}
+	}
+
+	/* Print FW logs */
+	DHD_FWLOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
+		arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
+		arg[11], arg[12], arg[13], arg[14], arg[15]));
+
+exit:
+	MFREE(dhdp->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR));
+}
+
+static void
+dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data,
+	void *raw_event_ptr, uint datalen)
+{
+	msgtrace_hdr_t *hdr;
+	char *data;
+	int id;
+	uint32 log_hdr_len = sizeof(event_log_hdr_t);
+	uint32 log_pyld_len;
+	static uint32 seqnum_prev = 0;
+	event_log_hdr_t *log_hdr;
+	bool msg_processed = FALSE;
+	uint32 *log_ptr =  NULL;
+	dll_t list_head, *cur;
+	loglist_item_t *log_item;
+	int32 nan_evt_ring_log_level = 0;
+	dhd_dbg_ring_entry_t msg_hdr;
+	char *logbuf;
+	struct tracelog_header *logentry_header;
+
+	/* log trace event consists of:
+	 * msgtrace header
+	 * event log block header
+	 * event log payload
+	 */
+	if (datalen <= MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_HDRLEN) {
+		return;
+	}
+	hdr = (msgtrace_hdr_t *)event_data;
+	data = (char *)event_data + MSGTRACE_HDRLEN;
+	datalen -= MSGTRACE_HDRLEN;
+
+	if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum)))
+		return;
+
+	/* Save the whole message to event log ring */
+	memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
+	logbuf = VMALLOC(dhdp->osh, sizeof(*logentry_header) + datalen);
+	if (logbuf == NULL)
+		return;
+	logentry_header = (struct tracelog_header *)logbuf;
+	logentry_header->magic_num = TRACE_LOG_MAGIC_NUMBER;
+	logentry_header->buf_size = datalen;
+	logentry_header->seq_num = hdr->seqnum;
+	msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE;
+
+	if ((sizeof(*logentry_header) + datalen) > PAYLOAD_MAX_LEN) {
+		DHD_ERROR(("%s:Payload len=%u exceeds max len\n", __FUNCTION__,
+			((uint)sizeof(*logentry_header) + datalen)));
+		VMFREE(dhdp->osh, logbuf, sizeof(*logentry_header) + datalen);
+		return;
+	}
+
+	msg_hdr.len = sizeof(*logentry_header) + datalen;
+	memcpy(logbuf + sizeof(*logentry_header), data, datalen);
+	dhd_dbg_ring_push(dhdp, FW_VERBOSE_RING_ID, &msg_hdr, logbuf);
+	VMFREE(dhdp->osh, logbuf, sizeof(*logentry_header) + datalen);
+
+	/* Print sequence number, originating set and length of received
+	 * event log buffer. Refer to event log buffer structure in
+	 * event_log.h
+	 */
+	DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n",
+		ltoh16(*((uint16 *)(data+2))), ltoh32(*((uint32 *)(data + 4))),
+		ltoh16(*((uint16 *)(data)))));
+	data += EVENT_LOG_BLOCK_HDRLEN;
+	datalen -= EVENT_LOG_BLOCK_HDRLEN;
+
+	/* start parsing from the tail of packet
+	 * Sameple format of a meessage
+	 * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639
+	 * 001d3c54 00000064 00000064 035d6d89 0c580439
+	 * 0x0c580439 -- 39 is tag, 04 is count, 580c is format number
+	 * all these uint32 values comes in reverse order as group as EL data
+	 * while decoding we can only parse from last to first
+	 * |<-                     datalen                     ->|
+	 * |----(payload and maybe more logs)----|event_log_hdr_t|
+	 * data                                  log_hdr
+	 */
+	dll_init(&list_head);
+	while (datalen > log_hdr_len) {
+		log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len);
+		/* skip zero padding at end of frame */
+		if (log_hdr->tag == EVENT_LOG_TAG_NULL) {
+			datalen -= log_hdr_len;
+			continue;
+		}
+		/* Check argument count, any event log should contain at least
+		 * one argument (4 bytes) for arm cycle count and up to 16
+		 * arguments when the format is valid
+		 */
+		if (log_hdr->count == 0) {
+			break;
+		}
+		if ((log_hdr->count > MAX_NO_OF_ARG) && (log_hdr->fmt_num != 0xffff)) {
+			break;
+		}
+
+		log_pyld_len = log_hdr->count * DATA_UNIT_FOR_LOG_CNT;
+		/* log data should not cross the event data boundary */
+		if ((char *)log_hdr - data < log_pyld_len)
+			break;
+		/* skip 4 bytes time stamp packet */
+		if (log_hdr->tag == EVENT_LOG_TAG_TS) {
+			datalen -= log_pyld_len + log_hdr_len;
+			continue;
+		}
+		if (!(log_item = MALLOC(dhdp->osh, sizeof(*log_item)))) {
+			DHD_ERROR(("%s allocating log list item failed\n",
+				__FUNCTION__));
+			break;
+		}
+		log_item->hdr = log_hdr;
+		dll_insert(&log_item->list, &list_head);
+		datalen -= (log_pyld_len + log_hdr_len);
+	}
+
+	while (!dll_empty(&list_head)) {
+		msg_processed = FALSE;
+		cur = dll_head_p(&list_head);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		log_hdr = log_item->hdr;
+		log_ptr = (uint32 *)log_hdr - log_hdr->count;
+		dll_delete(cur);
+		MFREE(dhdp->osh, log_item, sizeof(*log_item));
+
+		/* Before DHD debugability is implemented WLC_E_TRACE had been
+		 * used to carry verbose logging from firmware. We need to
+		 * be able to handle those messages even without a initialized
+		 * debug layer.
+		 */
+		if (dhdp->dbg) {
+			/* check the data for NAN event ring; keeping first as small table */
+			/* process only user configured to log */
+			nan_evt_ring_log_level = dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level;
+			if (dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level) {
+				for (id = 0; id < ARRAYSIZE(nan_event_level_map); id++) {
+					if (nan_event_level_map[id].tag == log_hdr->tag) {
+						/* dont process if tag log level is greater
+						 * than ring log level
+						 */
+						if (nan_event_level_map[id].log_level >
+							nan_evt_ring_log_level) {
+							msg_processed = TRUE;
+							break;
+						}
+						/* In case of BCME_VERSION error,
+						 * this is not NAN event type data
+						 */
+						if (dhd_dbg_nan_event_handler(dhdp,
+							log_hdr, log_ptr) != BCME_VERSION) {
+							msg_processed = TRUE;
+						}
+						break;
+					}
+				}
+			}
+			if (!msg_processed) {
+				/* check the data for event ring */
+				for (id = 0; id < ARRAYSIZE(fw_event_level_map); id++) {
+					if (fw_event_level_map[id].tag == log_hdr->tag) {
+						/* In case of BCME_VERSION error,
+						 * this is not event type data
+						 */
+						if (dhd_dbg_custom_evnt_handler(dhdp,
+							log_hdr, log_ptr) != BCME_VERSION) {
+							msg_processed = TRUE;
+						}
+						break;
+					}
+				}
+			}
+		}
+		if (!msg_processed)
+			dhd_dbg_verboselog_handler(dhdp, log_hdr, raw_event_ptr);
+
+	}
+}
+#else /* !SHOW_LOGTRACE */
+static INLINE void dhd_dbg_verboselog_handler(dhd_pub_t *dhdp,
+	event_log_hdr_t *hdr, void *raw_event_ptr) {};
+static INLINE void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp,
+	void *event_data, void *raw_event_ptr, uint datalen) {};
+#endif /* SHOW_LOGTRACE */
+#ifndef MACOSX_DHD
+void
+dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data,
+		void *raw_event_ptr, uint datalen)
+{
+	msgtrace_hdr_t *hdr;
+
+	hdr = (msgtrace_hdr_t *)event_data;
+
+	if (hdr->version != MSGTRACE_VERSION) {
+		DHD_DBGIF(("%s unsupported MSGTRACE version, dhd %d, dongle %d\n",
+			__FUNCTION__, MSGTRACE_VERSION, hdr->version));
+		return;
+	}
+
+	if (hdr->trace_type == MSGTRACE_HDR_TYPE_MSG)
+		dhd_dbg_msgtrace_msg_parser(event_data);
+	else if (hdr->trace_type == MSGTRACE_HDR_TYPE_LOG)
+		dhd_dbg_msgtrace_log_parser(dhdp, event_data, raw_event_ptr, datalen);
+}
+#endif /* MACOSX_DHD */
+static int
+dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
+		uint32 ring_sz, int section)
+{
+	void *buf;
+	unsigned long flags;
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	buf = DHD_OS_PREALLOC(dhdp, section, ring_sz);
+#else
+	buf = MALLOCZ(dhdp->osh, ring_sz);
+#endif
+	if (!buf)
+		return BCME_NOMEM;
+
+	ring->lock = dhd_os_spin_lock_init(dhdp->osh);
+
+	flags = dhd_os_spin_lock(ring->lock);
+	ring->id = id;
+	strncpy(ring->name, name, DBGRING_NAME_MAX);
+	ring->name[DBGRING_NAME_MAX - 1] = 0;
+	ring->ring_size = ring_sz;
+	ring->wp = ring->rp = 0;
+	ring->ring_buf = buf;
+	ring->threshold = DBGRING_FLUSH_THRESHOLD(ring);
+	ring->state = RING_SUSPEND;
+	ring->sched_pull = TRUE;
+	ring->rem_len = 0;
+	dhd_os_spin_unlock(ring->lock, flags);
+
+	return BCME_OK;
+}
+
+static void
+dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring)
+{
+	void *buf;
+	uint32 ring_sz;
+	unsigned long flags;
+
+	if (!ring->ring_buf)
+		return;
+
+	flags = dhd_os_spin_lock(ring->lock);
+	ring->id = 0;
+	ring->name[0] = 0;
+	ring_sz = ring->ring_size;
+	ring->ring_size = 0;
+	ring->wp = ring->rp = 0;
+	buf = ring->ring_buf;
+	ring->ring_buf = NULL;
+	memset(&ring->stat, 0, sizeof(ring->stat));
+	ring->threshold = 0;
+	ring->state = RING_STOP;
+	dhd_os_spin_unlock(ring->lock, flags);
+
+	dhd_os_spin_lock_deinit(dhdp->osh, ring->lock);
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+	MFREE(dhdp->osh, buf, ring_sz);
+#endif
+}
+
+uint8
+dhd_dbg_find_sets_by_tag(uint16 tag)
+{
+	uint i;
+	uint8 sets = 0;
+
+	for (i = 0; i < ARRAYSIZE(fw_verbose_level_map); i++) {
+		if (fw_verbose_level_map[i].tag == tag) {
+			sets |= fw_verbose_level_map[i].sets;
+		}
+	}
+
+	for (i = 0; i < ARRAYSIZE(fw_event_level_map); i++) {
+		if (fw_event_level_map[i].tag == tag) {
+			sets |= fw_event_level_map[i].sets;
+		}
+	}
+
+	return sets;
+}
+
+/*
+ * dhd_dbg_set_event_log_tag : modify the state of an event log tag
+ */
+void
+dhd_dbg_set_event_log_tag(dhd_pub_t *dhdp, uint16 tag, uint8 set)
+{
+	wl_el_tag_params_t pars;
+	char *cmd = "event_log_tag_control";
+	char iovbuf[WLC_IOCTL_SMLEN] = { 0 };
+	int ret;
+
+	memset(&pars, 0, sizeof(pars));
+	pars.tag = tag;
+	pars.set = dhd_dbg_find_sets_by_tag(tag);
+	pars.flags = set ? EVENT_LOG_TAG_FLAG_LOG : EVENT_LOG_TAG_FLAG_NONE;
+
+	if (!bcm_mkiovar(cmd, (char *)&pars, sizeof(pars), iovbuf, sizeof(iovbuf))) {
+		DHD_ERROR(("%s mkiovar failed\n", __FUNCTION__));
+		return;
+	}
+
+	ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	if (ret) {
+		DHD_ERROR(("%s set log tag iovar failed %d\n", __FUNCTION__, ret));
+	}
+}
+
+int
+dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, int log_level, int flags, uint32 threshold)
+{
+	dhd_dbg_ring_t *ring;
+	uint8 set = 1;
+	unsigned long lock_flags;
+	int i, array_len = 0;
+	struct log_level_table *log_level_tbl = NULL;
+	if (!dhdp || !dhdp->dbg)
+		return BCME_BADADDR;
+
+	ring = &dhdp->dbg->dbg_rings[ring_id];
+
+	if (ring->state == RING_STOP)
+		return BCME_UNSUPPORTED;
+
+	lock_flags = dhd_os_spin_lock(ring->lock);
+	if (log_level == 0)
+		ring->state = RING_SUSPEND;
+	else
+		ring->state = RING_ACTIVE;
+	ring->log_level = log_level;
+
+	ring->threshold = MIN(threshold, DBGRING_FLUSH_THRESHOLD(ring));
+	dhd_os_spin_unlock(ring->lock, lock_flags);
+	if (log_level > 0)
+		set = TRUE;
+
+	if (ring->id == FW_EVENT_RING_ID) {
+		log_level_tbl = fw_event_level_map;
+		array_len = ARRAYSIZE(fw_event_level_map);
+	} else if (ring->id == FW_VERBOSE_RING_ID) {
+		log_level_tbl = fw_verbose_level_map;
+		array_len = ARRAYSIZE(fw_verbose_level_map);
+	} else if (ring->id == NAN_EVENT_RING_ID) {
+		log_level_tbl = nan_event_level_map;
+		array_len = ARRAYSIZE(nan_event_level_map);
+	}
+
+	for (i = 0; i < array_len; i++) {
+		if (log_level == 0 || (log_level_tbl[i].log_level > log_level)) {
+			/* clear the reference per ring */
+			ref_tag_tbl[log_level_tbl[i].tag] &= ~(1 << ring_id);
+		} else {
+			/* set the reference per ring */
+			ref_tag_tbl[log_level_tbl[i].tag] |= (1 << ring_id);
+		}
+		set = (ref_tag_tbl[log_level_tbl[i].tag])? 1 : 0;
+		DHD_DBGIF(("%s TAG(%s) is %s for the ring(%s)\n", __FUNCTION__,
+			log_level_tbl[i].desc, (set)? "SET" : "CLEAR", ring->name));
+		dhd_dbg_set_event_log_tag(dhdp, log_level_tbl[i].tag, set);
+	}
+	return BCME_OK;
+}
+
+/*
+* dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer
+* Return: An error code or 0 on success.
+*/
+
+int
+dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status)
+{
+	int ret = BCME_OK;
+	int id = 0;
+	dhd_dbg_t *dbg;
+	dhd_dbg_ring_t *dbg_ring;
+	dhd_dbg_ring_status_t ring_status;
+	if (!dhdp || !dhdp->dbg)
+		return BCME_BADADDR;
+	dbg = dhdp->dbg;
+
+	memset(&ring_status, 0, sizeof(dhd_dbg_ring_status_t));
+	for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) {
+		dbg_ring = &dbg->dbg_rings[id];
+		if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) {
+			RING_STAT_TO_STATUS(dbg_ring, ring_status);
+			*dbg_ring_status = ring_status;
+			break;
+		}
+	}
+	if (!VALID_RING(id)) {
+		DHD_ERROR(("%s : cannot find the ring_id : %d\n", __FUNCTION__, ring_id));
+		ret = BCME_NOTFOUND;
+	}
+	return ret;
+}
+
+/*
+* dhd_dbg_find_ring_id : return ring_id based on ring_name
+* Return: An invalid ring id for failure or valid ring id on success.
+*/
+
+int
+dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name)
+{
+	int id;
+	dhd_dbg_t *dbg;
+	dhd_dbg_ring_t *ring;
+
+	if (!dhdp || !dhdp->dbg)
+		return BCME_BADADDR;
+
+	dbg = dhdp->dbg;
+	for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) {
+		ring = &dbg->dbg_rings[id];
+		if (!strncmp((char *)ring->name, ring_name, sizeof(ring->name) - 1))
+			break;
+	}
+	return id;
+}
+
+/*
+* dhd_dbg_get_priv : get the private data of dhd dbugability module
+* Return : An NULL on failure or valid data address
+*/
+void *
+dhd_dbg_get_priv(dhd_pub_t *dhdp)
+{
+	if (!dhdp || !dhdp->dbg)
+		return NULL;
+	return dhdp->dbg->private;
+}
+
+/*
+* dhd_dbg_start : start and stop All of Ring buffers
+* Return: An error code or 0 on success.
+*/
+int
+dhd_dbg_start(dhd_pub_t *dhdp, bool start)
+{
+	int ret = BCME_OK;
+	int ring_id;
+	dhd_dbg_t *dbg;
+	dhd_dbg_ring_t *dbg_ring;
+	if (!dhdp)
+		return BCME_BADARG;
+	dbg = dhdp->dbg;
+
+	for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+		dbg_ring = &dbg->dbg_rings[ring_id];
+		if (!start) {
+			if (VALID_RING(dbg_ring->id)) {
+				/* Initialize the information for the ring */
+				dbg_ring->state = RING_SUSPEND;
+				dbg_ring->log_level = 0;
+				dbg_ring->rp = dbg_ring->wp = 0;
+				dbg_ring->threshold = 0;
+				memset(&dbg_ring->stat, 0, sizeof(struct ring_statistics));
+				memset(dbg_ring->ring_buf, 0, dbg_ring->ring_size);
+			}
+		}
+	}
+	return ret;
+}
+
+/*
+ * dhd_dbg_send_urgent_evt: send the health check evt to Upper layer
+ *
+ * Return: An error code or 0 on success.
+ */
+
+int
+dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len)
+{
+	dhd_dbg_t *dbg;
+	int ret = BCME_OK;
+	if (!dhdp || !dhdp->dbg)
+		return BCME_BADADDR;
+
+	dbg = dhdp->dbg;
+	if (dbg->urgent_notifier) {
+		dbg->urgent_notifier(dhdp, data, len);
+	}
+	return ret;
+}
+
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+uint32
+__dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid)
+{
+	uint32 __pkt;
+	uint32 __pktid;
+
+	__pkt = ((int)pkt) >= 0 ? (2 * pkt) : (-2 * pkt - 1);
+	__pktid = ((int)pktid) >= 0 ? (2 * pktid) : (-2 * pktid - 1);
+
+	return (__pkt >= __pktid ? (__pkt * __pkt + __pkt + __pktid) :
+			(__pkt + __pktid * __pktid));
+}
+
+#define __TIMESPEC_TO_US(ts) \
+	(((uint32)(ts).tv_sec * USEC_PER_SEC) + ((ts).tv_nsec / NSEC_PER_USEC))
+
+uint32
+__dhd_dbg_driver_ts_usec(void)
+{
+	struct timespec64 ts;
+
+	ktime_get_boottime_ts64(&ts);
+	return ((uint32)(__TIMESPEC_TO_US(ts)));
+}
+
+wifi_tx_packet_fate
+__dhd_dbg_map_tx_status_to_pkt_fate(uint16 status)
+{
+	wifi_tx_packet_fate pkt_fate;
+
+	switch (status) {
+		case WLFC_CTL_PKTFLAG_DISCARD:
+			pkt_fate = TX_PKT_FATE_ACKED;
+			break;
+		case WLFC_CTL_PKTFLAG_D11SUPPRESS:
+			/* intensional fall through */
+		case WLFC_CTL_PKTFLAG_WLSUPPRESS:
+			pkt_fate = TX_PKT_FATE_FW_QUEUED;
+			break;
+		case WLFC_CTL_PKTFLAG_TOSSED_BYWLC:
+			pkt_fate = TX_PKT_FATE_FW_DROP_INVALID;
+			break;
+		case WLFC_CTL_PKTFLAG_DISCARD_NOACK:
+			pkt_fate = TX_PKT_FATE_SENT;
+			break;
+		default:
+			pkt_fate = TX_PKT_FATE_FW_DROP_OTHER;
+			break;
+	}
+
+	return pkt_fate;
+}
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
+
+#ifdef DBG_PKT_MON
+static int
+__dhd_dbg_free_tx_pkts(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkts,
+	uint16 pkt_count)
+{
+	uint16 count;
+
+	DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
+	count = 0;
+	while ((count < pkt_count) && tx_pkts) {
+		if (tx_pkts->info.pkt)
+			PKTFREE(dhdp->osh, tx_pkts->info.pkt, TRUE);
+		tx_pkts++;
+		count++;
+	}
+
+	return BCME_OK;
+}
+
+static int
+__dhd_dbg_free_rx_pkts(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkts,
+	uint16 pkt_count)
+{
+	uint16 count;
+
+	DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
+	count = 0;
+	while ((count < pkt_count) && rx_pkts) {
+		if (rx_pkts->info.pkt)
+			PKTFREE(dhdp->osh, rx_pkts->info.pkt, TRUE);
+		rx_pkts++;
+		count++;
+	}
+
+	return BCME_OK;
+}
+
+void
+__dhd_dbg_dump_pkt_info(dhd_pub_t *dhdp, dhd_dbg_pkt_info_t *info)
+{
+	if (DHD_PKT_MON_DUMP_ON()) {
+		DHD_PKT_MON(("payload type   = %d\n", info->payload_type));
+		DHD_PKT_MON(("driver ts      = %u\n", info->driver_ts));
+		DHD_PKT_MON(("firmware ts    = %u\n", info->firmware_ts));
+		DHD_PKT_MON(("packet hash    = %u\n", info->pkt_hash));
+		DHD_PKT_MON(("packet length  = %zu\n", info->pkt_len));
+		DHD_PKT_MON(("packet address = %p\n", info->pkt));
+		DHD_PKT_MON(("packet data    = \n"));
+		if (DHD_PKT_MON_ON()) {
+			prhex(NULL, PKTDATA(dhdp->osh, info->pkt), info->pkt_len);
+		}
+	}
+}
+
+void
+__dhd_dbg_dump_tx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkt,
+	uint16 count)
+{
+	if (DHD_PKT_MON_DUMP_ON()) {
+		DHD_PKT_MON(("\nTX (count: %d)\n", ++count));
+		DHD_PKT_MON(("packet fate    = %d\n", tx_pkt->fate));
+		__dhd_dbg_dump_pkt_info(dhdp, &tx_pkt->info);
+	}
+}
+
+void
+__dhd_dbg_dump_rx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkt,
+	uint16 count)
+{
+	if (DHD_PKT_MON_DUMP_ON()) {
+		DHD_PKT_MON(("\nRX (count: %d)\n", ++count));
+		DHD_PKT_MON(("packet fate    = %d\n", rx_pkt->fate));
+		__dhd_dbg_dump_pkt_info(dhdp, &rx_pkt->info);
+	}
+}
+
+int
+dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
+		dbg_mon_tx_pkts_t tx_pkt_mon,
+		dbg_mon_tx_status_t tx_status_mon,
+		dbg_mon_rx_pkts_t rx_pkt_mon)
+{
+
+	dhd_dbg_tx_report_t *tx_report = NULL;
+	dhd_dbg_rx_report_t *rx_report = NULL;
+	dhd_dbg_tx_info_t *tx_pkts = NULL;
+	dhd_dbg_rx_info_t *rx_pkts = NULL;
+	dhd_dbg_pkt_mon_state_t tx_pkt_state;
+	dhd_dbg_pkt_mon_state_t tx_status_state;
+	dhd_dbg_pkt_mon_state_t rx_pkt_state;
+	gfp_t kflags;
+	uint32 alloc_len;
+	int ret = BCME_OK;
+	unsigned long flags;
+
+	DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
+	if (!dhdp || !dhdp->dbg) {
+		DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+			dhdp, (dhdp ? dhdp->dbg : NULL)));
+		return -EINVAL;
+	}
+
+	DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+	tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+	tx_status_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+	rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+	if (PKT_MON_ATTACHED(tx_pkt_state) || PKT_MON_ATTACHED(tx_status_state) ||
+			PKT_MON_ATTACHED(rx_pkt_state)) {
+		DHD_PKT_MON(("%s(): packet monitor is already attached, "
+			"tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
+			__FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
+		DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+		/* return success as the intention was to initialize packet monitor */
+		return BCME_OK;
+	}
+
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+	/* allocate and initialize tx packet monitoring */
+	alloc_len = sizeof(*tx_report);
+	tx_report = (dhd_dbg_tx_report_t *)kzalloc(alloc_len, kflags);
+	if (unlikely(!tx_report)) {
+		DHD_ERROR(("%s(): could not allocate memory for - "
+			"dhd_dbg_tx_report_t\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN);
+	tx_pkts = (dhd_dbg_tx_info_t *)kzalloc(alloc_len, kflags);
+	if (unlikely(!tx_pkts)) {
+		DHD_ERROR(("%s(): could not allocate memory for - "
+			"dhd_dbg_tx_info_t\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto fail;
+	}
+	dhdp->dbg->pkt_mon.tx_report = tx_report;
+	dhdp->dbg->pkt_mon.tx_report->tx_pkts = tx_pkts;
+	dhdp->dbg->pkt_mon.tx_pkt_mon = tx_pkt_mon;
+	dhdp->dbg->pkt_mon.tx_status_mon = tx_status_mon;
+	dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_ATTACHED;
+	dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_ATTACHED;
+
+	/* allocate and initialze rx packet monitoring */
+	alloc_len = sizeof(*rx_report);
+	rx_report = (dhd_dbg_rx_report_t *)kzalloc(alloc_len, kflags);
+	if (unlikely(!rx_report)) {
+		DHD_ERROR(("%s(): could not allocate memory for - "
+			"dhd_dbg_rx_report_t\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN);
+	rx_pkts = (dhd_dbg_rx_info_t *)kzalloc(alloc_len, kflags);
+	if (unlikely(!rx_pkts)) {
+		DHD_ERROR(("%s(): could not allocate memory for - "
+			"dhd_dbg_rx_info_t\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto fail;
+	}
+	dhdp->dbg->pkt_mon.rx_report = rx_report;
+	dhdp->dbg->pkt_mon.rx_report->rx_pkts = rx_pkts;
+	dhdp->dbg->pkt_mon.rx_pkt_mon = rx_pkt_mon;
+	dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_ATTACHED;
+
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+	DHD_PKT_MON(("%s(): packet monitor attach succeeded\n", __FUNCTION__));
+	return ret;
+
+fail:
+	/* tx packet monitoring */
+	if (tx_pkts) {
+		kfree(tx_pkts);
+	}
+	if (tx_report) {
+		kfree(tx_report);
+	}
+	dhdp->dbg->pkt_mon.tx_report = NULL;
+	dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL;
+	dhdp->dbg->pkt_mon.tx_pkt_mon = NULL;
+	dhdp->dbg->pkt_mon.tx_status_mon = NULL;
+	dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED;
+	dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED;
+
+	/* rx packet monitoring */
+	if (rx_pkts) {
+		kfree(rx_pkts);
+	}
+	if (rx_report) {
+		kfree(rx_report);
+	}
+	dhdp->dbg->pkt_mon.rx_report = NULL;
+	dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL;
+	dhdp->dbg->pkt_mon.rx_pkt_mon = NULL;
+	dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED;
+
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+	DHD_ERROR(("%s(): packet monitor attach failed\n", __FUNCTION__));
+	return ret;
+}
+
+int
+dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp)
+{
+	dhd_dbg_tx_report_t *tx_report;
+	dhd_dbg_rx_report_t *rx_report;
+	dhd_dbg_pkt_mon_state_t tx_pkt_state;
+	dhd_dbg_pkt_mon_state_t tx_status_state;
+	dhd_dbg_pkt_mon_state_t rx_pkt_state;
+	unsigned long flags;
+
+	DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
+	if (!dhdp || !dhdp->dbg) {
+		DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+			dhdp, (dhdp ? dhdp->dbg : NULL)));
+		return -EINVAL;
+	}
+
+	DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+	tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+	tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+	rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+	if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) ||
+			PKT_MON_DETACHED(rx_pkt_state)) {
+		DHD_PKT_MON(("%s(): packet monitor is not yet enabled, "
+			"tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
+			__FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
+		DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+		return -EINVAL;
+	}
+
+	dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTING;
+	dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTING;
+	dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTING;
+
+	tx_report = dhdp->dbg->pkt_mon.tx_report;
+	rx_report = dhdp->dbg->pkt_mon.rx_report;
+	if (!tx_report || !rx_report) {
+		DHD_PKT_MON(("%s(): tx_report=%p, rx_report=%p\n",
+			__FUNCTION__, tx_report, rx_report));
+		DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+		return -EINVAL;
+	}
+
+
+	tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+	tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+	rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+	/* Safe to free packets as state pkt_state is STARTING */
+	__dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts, tx_report->pkt_pos);
+
+	__dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts, rx_report->pkt_pos);
+
+	/* reset array postion */
+	tx_report->pkt_pos = 0;
+	tx_report->status_pos = 0;
+	dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTED;
+	dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTED;
+
+	rx_report->pkt_pos = 0;
+	dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTED;
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+
+	DHD_PKT_MON(("%s(): packet monitor started\n", __FUNCTION__));
+	return BCME_OK;
+}
+
+int
+dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid)
+{
+	dhd_dbg_tx_report_t *tx_report;
+	dhd_dbg_tx_info_t *tx_pkts;
+	dhd_dbg_pkt_mon_state_t tx_pkt_state;
+	uint32 pkt_hash, driver_ts;
+	uint16 pkt_pos;
+	unsigned long flags;
+
+	if (!dhdp || !dhdp->dbg) {
+		DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+			dhdp, (dhdp ? dhdp->dbg : NULL)));
+		return -EINVAL;
+	}
+
+	DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+	tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+	if (PKT_MON_STARTED(tx_pkt_state)) {
+		tx_report = dhdp->dbg->pkt_mon.tx_report;
+		pkt_pos = tx_report->pkt_pos;
+
+		if (!PKT_MON_PKT_FULL(pkt_pos)) {
+			tx_pkts = tx_report->tx_pkts;
+			pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
+			driver_ts = __dhd_dbg_driver_ts_usec();
+
+			tx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt);
+			tx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt);
+			tx_pkts[pkt_pos].info.pkt_hash = pkt_hash;
+			tx_pkts[pkt_pos].info.driver_ts = driver_ts;
+			tx_pkts[pkt_pos].info.firmware_ts = 0U;
+			tx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II;
+			tx_pkts[pkt_pos].fate = TX_PKT_FATE_DRV_QUEUED;
+
+			tx_report->pkt_pos++;
+		} else {
+			dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED;
+			DHD_PKT_MON(("%s(): tx pkt logging stopped, reached "
+				"max limit\n", __FUNCTION__));
+		}
+	}
+
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+	return BCME_OK;
+}
+
+int
+dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid,
+		uint16 status)
+{
+	dhd_dbg_tx_report_t *tx_report;
+	dhd_dbg_tx_info_t *tx_pkt;
+	dhd_dbg_pkt_mon_state_t tx_status_state;
+	wifi_tx_packet_fate pkt_fate;
+	uint32 pkt_hash, temp_hash;
+	uint16 pkt_pos, status_pos;
+	int16 count;
+	bool found = FALSE;
+	unsigned long flags;
+
+	if (!dhdp || !dhdp->dbg) {
+		DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+			dhdp, (dhdp ? dhdp->dbg : NULL)));
+		return -EINVAL;
+	}
+
+	DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+	tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+	if (PKT_MON_STARTED(tx_status_state)) {
+		tx_report = dhdp->dbg->pkt_mon.tx_report;
+		pkt_pos = tx_report->pkt_pos;
+		status_pos = tx_report->status_pos;
+
+		if (!PKT_MON_STATUS_FULL(pkt_pos, status_pos)) {
+			pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
+			pkt_fate = __dhd_dbg_map_tx_status_to_pkt_fate(status);
+
+			/* best bet (in-order tx completion) */
+			count = status_pos;
+			tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + status_pos);
+			while ((count < pkt_pos) && tx_pkt) {
+				temp_hash = tx_pkt->info.pkt_hash;
+				if (temp_hash == pkt_hash) {
+					tx_pkt->fate = pkt_fate;
+					tx_report->status_pos++;
+					found = TRUE;
+					break;
+				}
+				tx_pkt++;
+				count++;
+			}
+
+			/* search until beginning (handles out-of-order completion) */
+			if (!found) {
+				count = status_pos - 1;
+				tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + count);
+				while ((count >= 0) && tx_pkt) {
+					temp_hash = tx_pkt->info.pkt_hash;
+					if (temp_hash == pkt_hash) {
+						tx_pkt->fate = pkt_fate;
+						tx_report->status_pos++;
+						found = TRUE;
+						break;
+					}
+					tx_pkt--;
+					count--;
+				}
+
+				if (!found) {
+					/* still couldn't match tx_status */
+					DHD_ERROR(("%s(): couldn't match tx_status, pkt_pos=%u, "
+						"status_pos=%u, pkt_fate=%u\n", __FUNCTION__,
+						pkt_pos, status_pos, pkt_fate));
+				}
+			}
+		} else {
+			dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED;
+			DHD_PKT_MON(("%s(): tx_status logging stopped, reached "
+				"max limit\n", __FUNCTION__));
+		}
+	}
+
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+	return BCME_OK;
+}
+
+int
+dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt)
+{
+	dhd_dbg_rx_report_t *rx_report;
+	dhd_dbg_rx_info_t *rx_pkts;
+	dhd_dbg_pkt_mon_state_t rx_pkt_state;
+	uint32 driver_ts;
+	uint16 pkt_pos;
+	unsigned long flags;
+
+	if (!dhdp || !dhdp->dbg) {
+		DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+			dhdp, (dhdp ? dhdp->dbg : NULL)));
+		return -EINVAL;
+	}
+
+	DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+	rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+	if (PKT_MON_STARTED(rx_pkt_state)) {
+		rx_report = dhdp->dbg->pkt_mon.rx_report;
+		pkt_pos = rx_report->pkt_pos;
+
+		if (!PKT_MON_PKT_FULL(pkt_pos)) {
+			rx_pkts = rx_report->rx_pkts;
+			driver_ts = __dhd_dbg_driver_ts_usec();
+
+			rx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt);
+			rx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt);
+			rx_pkts[pkt_pos].info.pkt_hash = 0U;
+			rx_pkts[pkt_pos].info.driver_ts = driver_ts;
+			rx_pkts[pkt_pos].info.firmware_ts = 0U;
+			rx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II;
+			rx_pkts[pkt_pos].fate = RX_PKT_FATE_SUCCESS;
+
+			rx_report->pkt_pos++;
+		} else {
+			dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED;
+			DHD_PKT_MON(("%s(): rx pkt logging stopped, reached "
+					"max limit\n", __FUNCTION__));
+		}
+	}
+
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+	return BCME_OK;
+}
+
+int
+dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp)
+{
+	dhd_dbg_pkt_mon_state_t tx_pkt_state;
+	dhd_dbg_pkt_mon_state_t tx_status_state;
+	dhd_dbg_pkt_mon_state_t rx_pkt_state;
+	unsigned long flags;
+
+	DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
+	if (!dhdp || !dhdp->dbg) {
+		DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+			dhdp, (dhdp ? dhdp->dbg : NULL)));
+		return -EINVAL;
+	}
+
+	DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+	tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+	tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+	rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+	if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) ||
+			PKT_MON_DETACHED(rx_pkt_state)) {
+		DHD_PKT_MON(("%s(): packet monitor is not yet enabled, "
+			"tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
+			__FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
+		DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+		return -EINVAL;
+	}
+	dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED;
+	dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED;
+	dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED;
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+
+	DHD_PKT_MON(("%s(): packet monitor stopped\n", __FUNCTION__));
+	return BCME_OK;
+}
+
+#define __COPY_TO_USER(to, from, n) \
+	do { \
+		int __ret; \
+		__ret = copy_to_user((void __user *)(to), (void *)(from), \
+				(unsigned long)(n)); \
+		if (unlikely(__ret)) { \
+			DHD_ERROR(("%s():%d: copy_to_user failed, ret=%d\n", \
+				__FUNCTION__, __LINE__, __ret)); \
+			return __ret; \
+		} \
+	} while (0);
+
+int
+dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+		uint16 req_count, uint16 *resp_count)
+{
+	dhd_dbg_tx_report_t *tx_report;
+	dhd_dbg_tx_info_t *tx_pkt;
+	wifi_tx_report_t *ptr;
+	compat_wifi_tx_report_t *cptr;
+	dhd_dbg_pkt_mon_state_t tx_pkt_state;
+	dhd_dbg_pkt_mon_state_t tx_status_state;
+	uint16 pkt_count, count;
+	unsigned long flags;
+
+	DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
+	BCM_REFERENCE(ptr);
+	BCM_REFERENCE(cptr);
+
+	if (!dhdp || !dhdp->dbg) {
+		DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+			dhdp, (dhdp ? dhdp->dbg : NULL)));
+		return -EINVAL;
+	}
+
+	DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+	tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+	tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+	if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state)) {
+		DHD_PKT_MON(("%s(): packet monitor is not yet enabled, "
+			"tx_pkt_state=%d, tx_status_state=%d\n", __FUNCTION__,
+			tx_pkt_state, tx_status_state));
+		DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+		return -EINVAL;
+	}
+
+	count = 0;
+	tx_report = dhdp->dbg->pkt_mon.tx_report;
+	tx_pkt = tx_report->tx_pkts;
+	pkt_count = MIN(req_count, tx_report->status_pos);
+
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+	if (in_compat_syscall())
+#else
+	if (is_compat_task())
+#endif
+	{
+		cptr = (compat_wifi_tx_report_t *)user_buf;
+		while ((count < pkt_count) && tx_pkt && cptr) {
+			compat_wifi_tx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr);
+			compat_dhd_dbg_pkt_info_t compat_tx_pkt;
+			__dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count);
+			__COPY_TO_USER(&comp_ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate));
+
+			compat_tx_pkt.payload_type = tx_pkt->info.payload_type;
+			compat_tx_pkt.pkt_len = tx_pkt->info.pkt_len;
+			compat_tx_pkt.driver_ts = tx_pkt->info.driver_ts;
+			compat_tx_pkt.firmware_ts = tx_pkt->info.firmware_ts;
+			compat_tx_pkt.pkt_hash = tx_pkt->info.pkt_hash;
+			__COPY_TO_USER(&comp_ptr->frame_inf.payload_type,
+				&compat_tx_pkt.payload_type,
+				OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash));
+			__COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii,
+				PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len);
+
+			cptr++;
+			tx_pkt++;
+			count++;
+		}
+	} else
+#endif /* CONFIG_COMPAT */
+
+	{
+		ptr = (wifi_tx_report_t *)user_buf;
+		while ((count < pkt_count) && tx_pkt && ptr) {
+			__dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count);
+			__COPY_TO_USER(&ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate));
+			__COPY_TO_USER(&ptr->frame_inf.payload_type,
+				&tx_pkt->info.payload_type,
+				OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash));
+			__COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii,
+				PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len);
+
+			ptr++;
+			tx_pkt++;
+			count++;
+		}
+	}
+	*resp_count = pkt_count;
+
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+	if (!pkt_count) {
+		DHD_ERROR(("%s(): no tx_status in tx completion messages, "
+			"make sure that 'd11status' is enabled in firmware, "
+			"status_pos=%u\n", __FUNCTION__, pkt_count));
+	}
+
+	return BCME_OK;
+}
+
+int
+dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+		uint16 req_count, uint16 *resp_count)
+{
+	dhd_dbg_rx_report_t *rx_report;
+	dhd_dbg_rx_info_t *rx_pkt;
+	wifi_rx_report_t *ptr;
+	compat_wifi_rx_report_t *cptr;
+	dhd_dbg_pkt_mon_state_t rx_pkt_state;
+	uint16 pkt_count, count;
+	unsigned long flags;
+
+	DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
+	BCM_REFERENCE(ptr);
+	BCM_REFERENCE(cptr);
+
+	if (!dhdp || !dhdp->dbg) {
+		DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+			dhdp, (dhdp ? dhdp->dbg : NULL)));
+		return -EINVAL;
+	}
+
+	DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+	rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+	if (PKT_MON_DETACHED(rx_pkt_state)) {
+		DHD_PKT_MON(("%s(): packet fetch is not allowed , "
+			"rx_pkt_state=%d\n", __FUNCTION__, rx_pkt_state));
+		DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+		return -EINVAL;
+	}
+
+	count = 0;
+	rx_report = dhdp->dbg->pkt_mon.rx_report;
+	rx_pkt = rx_report->rx_pkts;
+	pkt_count = MIN(req_count, rx_report->pkt_pos);
+
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+	if (in_compat_syscall())
+#else
+	if (is_compat_task())
+#endif
+	{
+		cptr = (compat_wifi_rx_report_t *)user_buf;
+		while ((count < pkt_count) && rx_pkt && cptr) {
+			compat_wifi_rx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr);
+			compat_dhd_dbg_pkt_info_t compat_rx_pkt;
+			__dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count);
+			__COPY_TO_USER(&comp_ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate));
+
+			compat_rx_pkt.payload_type = rx_pkt->info.payload_type;
+			compat_rx_pkt.pkt_len = rx_pkt->info.pkt_len;
+			compat_rx_pkt.driver_ts = rx_pkt->info.driver_ts;
+			compat_rx_pkt.firmware_ts = rx_pkt->info.firmware_ts;
+			compat_rx_pkt.pkt_hash = rx_pkt->info.pkt_hash;
+			__COPY_TO_USER(&comp_ptr->frame_inf.payload_type,
+				&compat_rx_pkt.payload_type,
+				OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash));
+			__COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii,
+				PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len);
+
+			cptr++;
+			rx_pkt++;
+			count++;
+		}
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		ptr = (wifi_rx_report_t *)user_buf;
+		while ((count < pkt_count) && rx_pkt && ptr) {
+			__dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count);
+
+			__COPY_TO_USER(&ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate));
+			__COPY_TO_USER(&ptr->frame_inf.payload_type,
+				&rx_pkt->info.payload_type,
+				OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash));
+			__COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii,
+				PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len);
+
+			ptr++;
+			rx_pkt++;
+			count++;
+		}
+	}
+
+	*resp_count = pkt_count;
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+
+	return BCME_OK;
+}
+
+int
+dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp)
+{
+	dhd_dbg_tx_report_t *tx_report;
+	dhd_dbg_rx_report_t *rx_report;
+	dhd_dbg_pkt_mon_state_t tx_pkt_state;
+	dhd_dbg_pkt_mon_state_t tx_status_state;
+	dhd_dbg_pkt_mon_state_t rx_pkt_state;
+	unsigned long flags;
+
+	DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
+	if (!dhdp || !dhdp->dbg) {
+		DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__,
+			dhdp, (dhdp ? dhdp->dbg : NULL)));
+		return -EINVAL;
+	}
+
+	DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags);
+	tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
+	tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
+	rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
+
+	if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) ||
+			PKT_MON_DETACHED(rx_pkt_state)) {
+		DHD_PKT_MON(("%s(): packet monitor is already detached, "
+			"tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n",
+			__FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state));
+		DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+		return -EINVAL;
+	}
+
+	tx_report = dhdp->dbg->pkt_mon.tx_report;
+	rx_report = dhdp->dbg->pkt_mon.rx_report;
+
+	/* free and de-initalize tx packet monitoring */
+	dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED;
+	dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED;
+	if (tx_report) {
+		if (tx_report->tx_pkts) {
+			__dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts,
+				tx_report->pkt_pos);
+			kfree(tx_report->tx_pkts);
+			dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL;
+		}
+		kfree(tx_report);
+		dhdp->dbg->pkt_mon.tx_report = NULL;
+	}
+	dhdp->dbg->pkt_mon.tx_pkt_mon = NULL;
+	dhdp->dbg->pkt_mon.tx_status_mon = NULL;
+
+	/* free and de-initalize rx packet monitoring */
+	dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED;
+	if (rx_report) {
+		if (rx_report->rx_pkts) {
+			__dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts,
+				rx_report->pkt_pos);
+			kfree(rx_report->rx_pkts);
+			dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL;
+		}
+		kfree(rx_report);
+		dhdp->dbg->pkt_mon.rx_report = NULL;
+	}
+	dhdp->dbg->pkt_mon.rx_pkt_mon = NULL;
+
+	DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags);
+	DHD_PKT_MON(("%s(): packet monitor detach succeeded\n", __FUNCTION__));
+	return BCME_OK;
+}
+#endif /* DBG_PKT_MON */
+
+/*
+ * dhd_dbg_attach: initialziation of dhd dbugability module
+ *
+ * Return: An error code or 0 on success.
+ */
+int
+dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq,
+	dbg_urgent_noti_t os_urgent_notifier, void *os_priv)
+{
+	dhd_dbg_t *dbg;
+	int ret, ring_id;
+
+	dbg = MALLOCZ(dhdp->osh, sizeof(dhd_dbg_t));
+	if (!dbg)
+		return BCME_NOMEM;
+
+	ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID,
+			(uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, DHD_PREALLOC_FW_VERBOSE_RING);
+	if (ret)
+		goto error;
+
+	ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_EVENT_RING_ID], FW_EVENT_RING_ID,
+			(uint8 *)FW_EVENT_RING_NAME, FW_EVENT_RING_SIZE, DHD_PREALLOC_FW_EVENT_RING);
+	if (ret)
+		goto error;
+
+	ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID,
+			(uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, DHD_PREALLOC_DHD_EVENT_RING);
+	if (ret)
+		goto error;
+
+	ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[NAN_EVENT_RING_ID], NAN_EVENT_RING_ID,
+			(uint8 *)NAN_EVENT_RING_NAME, NAN_EVENT_RING_SIZE, DHD_PREALLOC_NAN_EVENT_RING);
+	if (ret)
+		goto error;
+
+	dbg->private = os_priv;
+	dbg->pullreq = os_pullreq;
+	dbg->urgent_notifier = os_urgent_notifier;
+	dhdp->dbg = dbg;
+
+	return BCME_OK;
+
+error:
+	for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+		if (VALID_RING(dbg->dbg_rings[ring_id].id)) {
+			dhd_dbg_ring_deinit(dhdp, &dbg->dbg_rings[ring_id]);
+		}
+	}
+	MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t));
+
+	return ret;
+}
+
+/*
+ * dhd_dbg_detach: clean up dhd dbugability module
+ */
+void
+dhd_dbg_detach(dhd_pub_t *dhdp)
+{
+	int ring_id;
+	dhd_dbg_t *dbg;
+	if (!dhdp->dbg)
+		return;
+	dbg = dhdp->dbg;
+	for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+		if (VALID_RING(dbg->dbg_rings[ring_id].id)) {
+			dhd_dbg_ring_deinit(dhdp, &dbg->dbg_rings[ring_id]);
+		}
+	}
+	MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t));
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_debug.h b/drivers/net/wireless/bcmdhd/dhd_debug.h
new file mode 100644
index 0000000..9337113
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_debug.h
@@ -0,0 +1,868 @@
+/*
+ * DHD debugability header file
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_debug.h 705824 2017-06-19 13:58:39Z $
+ */
+
+#ifndef _dhd_debug_h_
+#define _dhd_debug_h_
+#include <event_log.h>
+#include <bcmutils.h>
+
+enum {
+	DEBUG_RING_ID_INVALID	= 0,
+	FW_VERBOSE_RING_ID,
+	FW_EVENT_RING_ID,
+	DHD_EVENT_RING_ID,
+	NAN_EVENT_RING_ID,
+	/* add new id here */
+	DEBUG_RING_ID_MAX
+};
+
+enum {
+	/* Feature set */
+	DBG_MEMORY_DUMP_SUPPORTED = (1 << (0)), /* Memory dump of FW */
+	DBG_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)), /* PKT Status */
+	DBG_CONNECT_EVENT_SUPPORTED = (1 << (2)), /* Connectivity Event */
+	DBG_POWER_EVENT_SUPOORTED = (1 << (3)), /* POWER of Driver */
+	DBG_WAKE_LOCK_SUPPORTED = (1 << (4)), /* WAKE LOCK of Driver */
+	DBG_VERBOSE_LOG_SUPPORTED = (1 << (5)), /* verbose log of FW */
+	DBG_HEALTH_CHECK_SUPPORTED = (1 << (6)), /* monitor the health of FW */
+	DBG_DRIVER_DUMP_SUPPORTED = (1 << (7)), /* dumps driver state */
+	DBG_PACKET_FATE_SUPPORTED = (1 << (8)), /* tracks connection packets' fate */
+	DBG_NAN_EVENT_SUPPORTED = (1 << (9)), /* NAN Events */
+};
+
+enum {
+	/* set for binary entries */
+	DBG_RING_ENTRY_FLAGS_HAS_BINARY = (1 << (0)),
+	/* set if 64 bits timestamp is present */
+	DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP = (1 << (1))
+};
+
+#define DBGRING_NAME_MAX		32
+/* firmware verbose ring, ring id 1 */
+#define FW_VERBOSE_RING_NAME		"fw_verbose"
+#define FW_VERBOSE_RING_SIZE		(64 * 1024)
+/* firmware event ring, ring id 2 */
+#define FW_EVENT_RING_NAME		"fw_event"
+#define FW_EVENT_RING_SIZE		(64 * 1024)
+/* DHD connection event ring, ring id 3 */
+#define DHD_EVENT_RING_NAME		"dhd_event"
+#define DHD_EVENT_RING_SIZE		(64 * 1024)
+
+/* NAN event ring, ring id 4 */
+#define NAN_EVENT_RING_NAME		"nan_event"
+#define NAN_EVENT_RING_SIZE		(64 * 1024)
+
+#define TLV_LOG_SIZE(tlv) ((tlv) ? (sizeof(tlv_log) + (tlv)->len) : 0)
+
+#define TLV_LOG_NEXT(tlv) \
+	((tlv) ? ((tlv_log *)((uint8 *)tlv + TLV_LOG_SIZE(tlv))) : 0)
+
+#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t))
+
+#define VALID_RING(id)	\
+	((id > DEBUG_RING_ID_INVALID) && (id < DEBUG_RING_ID_MAX))
+
+#ifdef DEBUGABILITY
+#define DBG_RING_ACTIVE(dhdp, ring_id) \
+	((dhdp)->dbg->dbg_rings[(ring_id)].state == RING_ACTIVE)
+#else
+#define DBG_RING_ACTIVE(dhdp, ring_id) 0
+#endif /* DEBUGABILITY */
+
+#define TXACTIVESZ(r, w, d)			(((r) <= (w)) ? ((w) - (r)) : ((d) - (r) + (w)))
+#define DBG_RING_READ_AVAIL_SPACE(w, r, d)	(((w) >= (r)) ? ((w) - (r)) : ((d) - (r)))
+#define DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d)	(((w) >= (r)) ? ((d) - (w)) : ((r) - (w)))
+#define DBG_RING_WRITE_SPACE_AVAIL(r, w, d)	(d - (TXACTIVESZ(r, w, d)))
+#define DBG_RING_CHECK_WRITE_SPACE(r, w, d)	\
+	MIN(DBG_RING_WRITE_SPACE_AVAIL(r, w, d), DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d))
+
+enum {
+	/* driver receive association command from kernel */
+	WIFI_EVENT_ASSOCIATION_REQUESTED	= 0,
+	WIFI_EVENT_AUTH_COMPLETE,
+	WIFI_EVENT_ASSOC_COMPLETE,
+	/* received firmware event indicating auth frames are sent */
+	WIFI_EVENT_FW_AUTH_STARTED,
+	/* received firmware event indicating assoc frames are sent */
+	WIFI_EVENT_FW_ASSOC_STARTED,
+	/* received firmware event indicating reassoc frames are sent */
+	WIFI_EVENT_FW_RE_ASSOC_STARTED,
+	WIFI_EVENT_DRIVER_SCAN_REQUESTED,
+	WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND,
+	WIFI_EVENT_DRIVER_SCAN_COMPLETE,
+	WIFI_EVENT_G_SCAN_STARTED,
+	WIFI_EVENT_G_SCAN_COMPLETE,
+	WIFI_EVENT_DISASSOCIATION_REQUESTED,
+	WIFI_EVENT_RE_ASSOCIATION_REQUESTED,
+	WIFI_EVENT_ROAM_REQUESTED,
+	/* received beacon from AP (event enabled only in verbose mode) */
+	WIFI_EVENT_BEACON_RECEIVED,
+	/* firmware has triggered a roam scan (not g-scan) */
+	WIFI_EVENT_ROAM_SCAN_STARTED,
+	/* firmware has completed a roam scan (not g-scan) */
+	WIFI_EVENT_ROAM_SCAN_COMPLETE,
+	/* firmware has started searching for roam candidates (with reason =xx) */
+	WIFI_EVENT_ROAM_SEARCH_STARTED,
+	/* firmware has stopped searching for roam candidates (with reason =xx) */
+	WIFI_EVENT_ROAM_SEARCH_STOPPED,
+	WIFI_EVENT_UNUSED_0,
+	/* received channel switch anouncement from AP */
+	WIFI_EVENT_CHANNEL_SWITCH_ANOUNCEMENT,
+	/* fw start transmit eapol frame, with EAPOL index 1-4 */
+	WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START,
+	/* fw gives up eapol frame, with rate, success/failure and number retries */
+	WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP,
+	/* kernel queue EAPOL for transmission in driver with EAPOL index 1-4 */
+	WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED,
+	/* with rate, regardless of the fact that EAPOL frame is accepted or
+	 * rejected by firmware
+	 */
+	WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED,
+	WIFI_EVENT_UNUSED_1,
+	/* with rate, and eapol index, driver has received */
+	/* EAPOL frame and will queue it up to wpa_supplicant */
+	WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED,
+	/* with success/failure, parameters */
+	WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE,
+	WIFI_EVENT_BT_COEX_BT_SCO_START,
+	WIFI_EVENT_BT_COEX_BT_SCO_STOP,
+	/* for paging/scan etc..., when BT starts transmiting twice per BT slot */
+	WIFI_EVENT_BT_COEX_BT_SCAN_START,
+	WIFI_EVENT_BT_COEX_BT_SCAN_STOP,
+	WIFI_EVENT_BT_COEX_BT_HID_START,
+	WIFI_EVENT_BT_COEX_BT_HID_STOP,
+	/* firmware sends auth frame in roaming to next candidate */
+	WIFI_EVENT_ROAM_AUTH_STARTED,
+	/* firmware receive auth confirm from ap */
+	WIFI_EVENT_ROAM_AUTH_COMPLETE,
+	/* firmware sends assoc/reassoc frame in */
+	WIFI_EVENT_ROAM_ASSOC_STARTED,
+	/* firmware receive assoc/reassoc confirm from ap */
+	WIFI_EVENT_ROAM_ASSOC_COMPLETE,
+	/* firmware sends stop G_SCAN */
+	WIFI_EVENT_G_SCAN_STOP,
+	/* firmware indicates G_SCAN scan cycle started */
+	WIFI_EVENT_G_SCAN_CYCLE_STARTED,
+	/* firmware indicates G_SCAN scan cycle completed */
+	WIFI_EVENT_G_SCAN_CYCLE_COMPLETED,
+	/* firmware indicates G_SCAN scan start for a particular bucket */
+	WIFI_EVENT_G_SCAN_BUCKET_STARTED,
+	/* firmware indicates G_SCAN scan completed for particular bucket */
+	WIFI_EVENT_G_SCAN_BUCKET_COMPLETED,
+	/* Event received from firmware about G_SCAN scan results being available */
+	WIFI_EVENT_G_SCAN_RESULTS_AVAILABLE,
+	/* Event received from firmware with G_SCAN capabilities */
+	WIFI_EVENT_G_SCAN_CAPABILITIES,
+	/* Event received from firmware when eligible candidate is found */
+	WIFI_EVENT_ROAM_CANDIDATE_FOUND,
+	/* Event received from firmware when roam scan configuration gets enabled or disabled */
+	WIFI_EVENT_ROAM_SCAN_CONFIG,
+	/* firmware/driver timed out authentication */
+	WIFI_EVENT_AUTH_TIMEOUT,
+	/* firmware/driver timed out association */
+	WIFI_EVENT_ASSOC_TIMEOUT,
+	/* firmware/driver encountered allocation failure */
+	WIFI_EVENT_MEM_ALLOC_FAILURE,
+	/* driver added a PNO network in firmware */
+	WIFI_EVENT_DRIVER_PNO_ADD,
+	/* driver removed a PNO network in firmware */
+	WIFI_EVENT_DRIVER_PNO_REMOVE,
+	/* driver received PNO networks found indication from firmware */
+	WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND,
+	/* driver triggered a scan for PNO networks */
+	WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED,
+	/* driver received scan results of PNO networks */
+	WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND,
+	/* driver updated scan results from PNO candidates to cfg */
+	WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE
+};
+
+enum {
+	WIFI_TAG_VENDOR_SPECIFIC = 0, /* take a byte stream as parameter */
+	WIFI_TAG_BSSID, /* takes a 6 bytes MAC address as parameter */
+	WIFI_TAG_ADDR, /* takes a 6 bytes MAC address as parameter */
+	WIFI_TAG_SSID, /* takes a 32 bytes SSID address as parameter */
+	WIFI_TAG_STATUS, /* takes an integer as parameter */
+	WIFI_TAG_CHANNEL_SPEC, /* takes one or more wifi_channel_spec as parameter */
+	WIFI_TAG_WAKE_LOCK_EVENT, /* takes a wake_lock_event struct as parameter */
+	WIFI_TAG_ADDR1, /* takes a 6 bytes MAC address as parameter */
+	WIFI_TAG_ADDR2, /* takes a 6 bytes MAC address as parameter */
+	WIFI_TAG_ADDR3, /* takes a 6 bytes MAC address as parameter */
+	WIFI_TAG_ADDR4, /* takes a 6 bytes MAC address as parameter */
+	WIFI_TAG_TSF, /* take a 64 bits TSF value as parameter */
+	WIFI_TAG_IE,
+	/* take one or more specific 802.11 IEs parameter, IEs are in turn
+	 * indicated in TLV format as per 802.11 spec
+	 */
+	WIFI_TAG_INTERFACE,	/* take interface name as parameter */
+	WIFI_TAG_REASON_CODE,	/* take a reason code as per 802.11 as parameter */
+	WIFI_TAG_RATE_MBPS,	/* take a wifi rate in 0.5 mbps */
+	WIFI_TAG_REQUEST_ID,	/* take an integer as parameter */
+	WIFI_TAG_BUCKET_ID,	/* take an integer as parameter */
+	WIFI_TAG_GSCAN_PARAMS,	/* takes a wifi_scan_cmd_params struct as parameter */
+	WIFI_TAG_GSCAN_CAPABILITIES, /* takes a wifi_gscan_capabilities struct as parameter */
+	WIFI_TAG_SCAN_ID,	/* take an integer as parameter */
+	WIFI_TAG_RSSI,		/* takes s16 as parameter */
+	WIFI_TAG_CHANNEL,	/* takes u16 as parameter */
+	WIFI_TAG_LINK_ID,	/* take an integer as parameter */
+	WIFI_TAG_LINK_ROLE,	/* take an integer as parameter */
+	WIFI_TAG_LINK_STATE,	/* take an integer as parameter */
+	WIFI_TAG_LINK_TYPE,	/* take an integer as parameter */
+	WIFI_TAG_TSCO,		/* take an integer as parameter */
+	WIFI_TAG_RSCO,		/* take an integer as parameter */
+	WIFI_TAG_EAPOL_MESSAGE_TYPE /* take an integer as parameter */
+};
+
+/* NAN  events */
+typedef enum {
+	NAN_EVENT_INVALID = 0,
+	NAN_EVENT_CLUSTER_STARTED = 1,
+	NAN_EVENT_CLUSTER_JOINED = 2,
+	NAN_EVENT_CLUSTER_MERGED = 3,
+	NAN_EVENT_ROLE_CHANGED = 4,
+	NAN_EVENT_SCAN_COMPLETE = 5,
+	NAN_EVENT_STATUS_CHNG  = 6,
+	/* ADD new events before this line */
+	NAN_EVENT_MAX
+} nan_event_id_t;
+
+typedef struct {
+    uint16 tag;
+    uint16 len; /* length of value */
+    uint8 value[0];
+} tlv_log;
+
+typedef struct per_packet_status_entry {
+    uint8 flags;
+    uint8 tid; /* transmit or received tid */
+    uint16 MCS; /* modulation and bandwidth */
+	/*
+	* TX: RSSI of ACK for that packet
+	* RX: RSSI of packet
+	*/
+    uint8 rssi;
+    uint8 num_retries; /* number of attempted retries */
+    uint16 last_transmit_rate; /* last transmit rate in .5 mbps */
+	 /* transmit/reeive sequence for that MPDU packet */
+    uint16 link_layer_transmit_sequence;
+	/*
+	* TX: firmware timestamp (us) when packet is queued within firmware buffer
+	* for SDIO/HSIC or into PCIe buffer
+	* RX : firmware receive timestamp
+	*/
+    uint64 firmware_entry_timestamp;
+	/*
+	* firmware timestamp (us) when packet start contending for the
+	* medium for the first time, at head of its AC queue,
+	* or as part of an MPDU or A-MPDU. This timestamp is not updated
+	* for each retry, only the first transmit attempt.
+	*/
+    uint64 start_contention_timestamp;
+	/*
+	* fimrware timestamp (us) when packet is successfully transmitted
+	* or aborted because it has exhausted its maximum number of retries
+	*/
+	uint64 transmit_success_timestamp;
+	/*
+	* packet data. The length of packet data is determined by the entry_size field of
+	* the wifi_ring_buffer_entry structure. It is expected that first bytes of the
+	* packet, or packet headers only (up to TCP or RTP/UDP headers) will be copied into the ring
+	*/
+    uint8 *data;
+} per_packet_status_entry_t;
+
+#define PACKED_STRUCT __attribute__ ((packed))
+typedef struct log_conn_event {
+    uint16 event;
+    tlv_log *tlvs;
+	/*
+	* separate parameter structure per event to be provided and optional data
+	* the event_data is expected to include an official android part, with some
+	* parameter as transmit rate, num retries, num scan result found etc...
+	* as well, event_data can include a vendor proprietary part which is
+	* understood by the developer only.
+	*/
+} PACKED_STRUCT log_conn_event_t;
+
+/*
+ * Ring buffer name for power events ring. note that power event are extremely frequents
+ * and thus should be stored in their own ring/file so as not to clobber connectivity events
+ */
+
+typedef struct wake_lock_event {
+    uint32 status; /* 0 taken, 1 released */
+    uint32 reason; /* reason why this wake lock is taken */
+    char *name; /* null terminated */
+} wake_lock_event_t;
+
+typedef struct wifi_power_event {
+    uint16 event;
+    tlv_log *tlvs;
+} wifi_power_event_t;
+
+#define NAN_EVENT_VERSION 1
+typedef struct log_nan_event {
+    uint8 version;
+    uint8 pad;
+    uint16 event;
+    tlv_log *tlvs;
+} log_nan_event_t;
+
+/* entry type */
+enum {
+	DBG_RING_ENTRY_EVENT_TYPE = 1,
+	DBG_RING_ENTRY_PKT_TYPE,
+	DBG_RING_ENTRY_WAKE_LOCK_EVENT_TYPE,
+	DBG_RING_ENTRY_POWER_EVENT_TYPE,
+	DBG_RING_ENTRY_DATA_TYPE,
+	DBG_RING_ENTRY_NAN_EVENT_TYPE
+};
+
+typedef struct dhd_dbg_ring_entry {
+	uint16 len; /* payload length excluding the header */
+	uint8 flags;
+	uint8 type; /* Per ring specific */
+	uint64 timestamp; /* present if has_timestamp bit is set. */
+} PACKED_STRUCT dhd_dbg_ring_entry_t;
+
+#define DBG_RING_ENTRY_SIZE (sizeof(dhd_dbg_ring_entry_t))
+
+#define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE)
+
+#define PAYLOAD_MAX_LEN 65535
+
+typedef struct dhd_dbg_ring_status {
+	uint8 name[DBGRING_NAME_MAX];
+	uint32 flags;
+	int ring_id; /* unique integer representing the ring */
+	/* total memory size allocated for the buffer */
+	uint32 ring_buffer_byte_size;
+	uint32 verbose_level;
+	/* number of bytes that was written to the buffer by driver */
+	uint32 written_bytes;
+	/* number of bytes that was read from the buffer by user land */
+	uint32 read_bytes;
+	/* number of records that was read from the buffer by user land */
+	uint32 written_records;
+} dhd_dbg_ring_status_t;
+
+struct log_level_table {
+	int log_level;
+	uint16 tag;
+	uint8 sets;
+	char *desc;
+};
+
+#ifdef DEBUGABILITY
+#define DBG_EVENT_LOG(dhdp, connect_state)					\
+{										\
+	do {									\
+		uint16 state = connect_state;					\
+		if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID))			\
+			dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,	\
+				&state, sizeof(state));				\
+	} while (0);								\
+}
+#else
+#define DBG_EVENT_LOG(dhdp, connect_state)
+#endif /* DEBUGABILITY */
+
+
+#define MD5_PREFIX_LEN				4
+#define MAX_FATE_LOG_LEN			32
+
+#define MAX_FRAME_LEN_ETHERNET		1518
+#define MAX_FRAME_LEN_80211_MGMT	2352 /* 802.11-2012 Fig. 8-34 */
+
+typedef enum {
+	/* Sent over air and ACKed. */
+	TX_PKT_FATE_ACKED,
+
+	/* Sent over air but not ACKed. (Normal for broadcast/multicast.) */
+	TX_PKT_FATE_SENT,
+
+	/* Queued within firmware, but not yet sent over air. */
+	TX_PKT_FATE_FW_QUEUED,
+
+	/*
+	 * Dropped by firmware as invalid. E.g. bad source address,
+	 * bad checksum, or invalid for current state.
+	 */
+	TX_PKT_FATE_FW_DROP_INVALID,
+
+	/* Dropped by firmware due to lack of buffer space. */
+	TX_PKT_FATE_FW_DROP_NOBUFS,
+
+	/*
+	 * Dropped by firmware for any other reason. Includes
+	 * frames that were sent by driver to firmware, but
+	 * unaccounted for by firmware.
+	 */
+	TX_PKT_FATE_FW_DROP_OTHER,
+
+	/* Queued within driver, not yet sent to firmware. */
+	TX_PKT_FATE_DRV_QUEUED,
+
+	/*
+	 * Dropped by driver as invalid. E.g. bad source address,
+	 * or invalid for current state.
+	 */
+	TX_PKT_FATE_DRV_DROP_INVALID,
+
+	/* Dropped by driver due to lack of buffer space. */
+	TX_PKT_FATE_DRV_DROP_NOBUFS,
+
+	/*  Dropped by driver for any other reason. */
+	TX_PKT_FATE_DRV_DROP_OTHER,
+
+	} wifi_tx_packet_fate;
+
+typedef enum {
+	/* Valid and delivered to network stack (e.g., netif_rx()). */
+	RX_PKT_FATE_SUCCESS,
+
+	/* Queued within firmware, but not yet sent to driver. */
+	RX_PKT_FATE_FW_QUEUED,
+
+	/* Dropped by firmware due to host-programmable filters. */
+	RX_PKT_FATE_FW_DROP_FILTER,
+
+	/*
+	 * Dropped by firmware as invalid. E.g. bad checksum,
+	 * decrypt failed, or invalid for current state.
+	 */
+	RX_PKT_FATE_FW_DROP_INVALID,
+
+	/* Dropped by firmware due to lack of buffer space. */
+	RX_PKT_FATE_FW_DROP_NOBUFS,
+
+	/* Dropped by firmware for any other reason. */
+	RX_PKT_FATE_FW_DROP_OTHER,
+
+	/* Queued within driver, not yet delivered to network stack. */
+	RX_PKT_FATE_DRV_QUEUED,
+
+	/* Dropped by driver due to filter rules. */
+	RX_PKT_FATE_DRV_DROP_FILTER,
+
+	/* Dropped by driver as invalid. E.g. not permitted in current state. */
+	RX_PKT_FATE_DRV_DROP_INVALID,
+
+	/* Dropped by driver due to lack of buffer space. */
+	RX_PKT_FATE_DRV_DROP_NOBUFS,
+
+	/* Dropped by driver for any other reason. */
+	RX_PKT_FATE_DRV_DROP_OTHER,
+
+	} wifi_rx_packet_fate;
+
+typedef enum {
+	FRAME_TYPE_UNKNOWN,
+	FRAME_TYPE_ETHERNET_II,
+	FRAME_TYPE_80211_MGMT,
+	} frame_type;
+
+typedef struct wifi_frame_info {
+	/*
+	 * The type of MAC-layer frame that this frame_info holds.
+	 * - For data frames, use FRAME_TYPE_ETHERNET_II.
+	 * - For management frames, use FRAME_TYPE_80211_MGMT.
+	 * - If the type of the frame is unknown, use FRAME_TYPE_UNKNOWN.
+	 */
+	frame_type payload_type;
+
+	/*
+	 * The number of bytes included in |frame_content|. If the frame
+	 * contents are missing (e.g. RX frame dropped in firmware),
+	 * |frame_len| should be set to 0.
+	 */
+	size_t frame_len;
+
+	/*
+	 * Host clock when this frame was received by the driver (either
+	 *	outbound from the host network stack, or inbound from the
+	 *	firmware).
+	 *	- The timestamp should be taken from a clock which includes time
+	 *	  the host spent suspended (e.g. ktime_get_boottime()).
+	 *	- If no host timestamp is available (e.g. RX frame was dropped in
+	 *	  firmware), this field should be set to 0.
+	 */
+	uint32 driver_timestamp_usec;
+
+	/*
+	 * Firmware clock when this frame was received by the firmware
+	 *	(either outbound from the host, or inbound from a remote
+	 *	station).
+	 *	- The timestamp should be taken from a clock which includes time
+	 *	  firmware spent suspended (if applicable).
+	 *	- If no firmware timestamp is available (e.g. TX frame was
+	 *	  dropped by driver), this field should be set to 0.
+	 *	- Consumers of |frame_info| should _not_ assume any
+	 *	  synchronization between driver and firmware clocks.
+	 */
+	uint32 firmware_timestamp_usec;
+
+	/*
+	 * Actual frame content.
+	 * - Should be provided for TX frames originated by the host.
+	 * - Should be provided for RX frames received by the driver.
+	 * - Optionally provided for TX frames originated by firmware. (At
+	 *   discretion of HAL implementation.)
+	 * - Optionally provided for RX frames dropped in firmware. (At
+	 *   discretion of HAL implementation.)
+	 * - If frame content is not provided, |frame_len| should be set
+	 *   to 0.
+	 */
+	union {
+		char ethernet_ii[MAX_FRAME_LEN_ETHERNET];
+		char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT];
+	} frame_content;
+} wifi_frame_info_t;
+
+typedef struct wifi_tx_report {
+	/*
+	 * Prefix of MD5 hash of |frame_inf.frame_content|. If frame
+	 * content is not provided, prefix of MD5 hash over the same data
+	 * that would be in frame_content, if frame content were provided.
+	 */
+	char md5_prefix[MD5_PREFIX_LEN];
+	wifi_tx_packet_fate fate;
+	wifi_frame_info_t frame_inf;
+} wifi_tx_report_t;
+
+typedef struct wifi_rx_report {
+	/*
+	 * Prefix of MD5 hash of |frame_inf.frame_content|. If frame
+	 * content is not provided, prefix of MD5 hash over the same data
+	 * that would be in frame_content, if frame content were provided.
+	 */
+	char md5_prefix[MD5_PREFIX_LEN];
+	wifi_rx_packet_fate fate;
+	wifi_frame_info_t frame_inf;
+} wifi_rx_report_t;
+
+typedef struct compat_wifi_frame_info {
+	frame_type payload_type;
+
+	uint32 frame_len;
+
+	uint32 driver_timestamp_usec;
+
+	uint32 firmware_timestamp_usec;
+
+	union {
+		char ethernet_ii[MAX_FRAME_LEN_ETHERNET];
+		char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT];
+	} frame_content;
+} compat_wifi_frame_info_t;
+
+
+typedef struct compat_wifi_tx_report {
+	char md5_prefix[MD5_PREFIX_LEN];
+	wifi_tx_packet_fate fate;
+	compat_wifi_frame_info_t frame_inf;
+} compat_wifi_tx_report_t;
+
+typedef struct compat_wifi_rx_report {
+	char md5_prefix[MD5_PREFIX_LEN];
+	wifi_rx_packet_fate fate;
+	compat_wifi_frame_info_t frame_inf;
+} compat_wifi_rx_report_t;
+
+
+/*
+ * Packet logging - internal data
+ */
+
+typedef enum dhd_dbg_pkt_mon_state {
+	PKT_MON_INVALID = 0,
+	PKT_MON_ATTACHED,
+	PKT_MON_STARTING,
+	PKT_MON_STARTED,
+	PKT_MON_STOPPING,
+	PKT_MON_STOPPED,
+	PKT_MON_DETACHED,
+	} dhd_dbg_pkt_mon_state_t;
+
+typedef struct dhd_dbg_pkt_info {
+	frame_type payload_type;
+	size_t pkt_len;
+	uint32 driver_ts;
+	uint32 firmware_ts;
+	uint32 pkt_hash;
+	void *pkt;
+} dhd_dbg_pkt_info_t;
+
+typedef struct compat_dhd_dbg_pkt_info {
+	frame_type payload_type;
+	uint32 pkt_len;
+	uint32 driver_ts;
+	uint32 firmware_ts;
+	uint32 pkt_hash;
+	void *pkt;
+} compat_dhd_dbg_pkt_info_t;
+
+typedef struct dhd_dbg_tx_info
+{
+	wifi_tx_packet_fate fate;
+	dhd_dbg_pkt_info_t info;
+} dhd_dbg_tx_info_t;
+
+typedef struct dhd_dbg_rx_info
+{
+	wifi_rx_packet_fate fate;
+	dhd_dbg_pkt_info_t info;
+} dhd_dbg_rx_info_t;
+
+typedef struct dhd_dbg_tx_report
+{
+	dhd_dbg_tx_info_t *tx_pkts;
+	uint16 pkt_pos;
+	uint16 status_pos;
+} dhd_dbg_tx_report_t;
+
+typedef struct dhd_dbg_rx_report
+{
+	dhd_dbg_rx_info_t *rx_pkts;
+	uint16 pkt_pos;
+} dhd_dbg_rx_report_t;
+
+typedef void (*dbg_pullreq_t)(void *os_priv, const int ring_id);
+typedef void (*dbg_urgent_noti_t) (dhd_pub_t *dhdp, const void *data, const uint32 len);
+typedef int (*dbg_mon_tx_pkts_t) (dhd_pub_t *dhdp, void *pkt, uint32 pktid);
+typedef int (*dbg_mon_tx_status_t) (dhd_pub_t *dhdp, void *pkt,
+	uint32 pktid, uint16 status);
+typedef int (*dbg_mon_rx_pkts_t) (dhd_pub_t *dhdp, void *pkt);
+
+typedef struct dhd_dbg_pkt_mon
+{
+	dhd_dbg_tx_report_t *tx_report;
+	dhd_dbg_rx_report_t *rx_report;
+	dhd_dbg_pkt_mon_state_t tx_pkt_state;
+	dhd_dbg_pkt_mon_state_t tx_status_state;
+	dhd_dbg_pkt_mon_state_t rx_pkt_state;
+
+	/* call backs */
+	dbg_mon_tx_pkts_t tx_pkt_mon;
+	dbg_mon_tx_status_t tx_status_mon;
+	dbg_mon_rx_pkts_t rx_pkt_mon;
+} dhd_dbg_pkt_mon_t;
+
+enum dbg_ring_state {
+	RING_STOP       = 0,    /* ring is not initialized */
+	RING_ACTIVE,    /* ring is live and logging */
+	RING_SUSPEND    /* ring is initialized but not logging */
+};
+
+struct ring_statistics {
+	/* number of bytes that was written to the buffer by driver */
+	uint32 written_bytes;
+	/* number of bytes that was read from the buffer by user land */
+	uint32 read_bytes;
+	/* number of records that was written to the buffer by driver */
+	uint32 written_records;
+};
+
+typedef struct dhd_dbg_ring {
+	int     id;             /* ring id */
+	uint8   name[DBGRING_NAME_MAX]; /* name string */
+	uint32  ring_size;      /* numbers of item in ring */
+	uint32  wp;             /* write pointer */
+	uint32  rp;             /* read pointer */
+	uint32  log_level; /* log_level */
+	uint32  threshold; /* threshold bytes */
+	void *  ring_buf;       /* pointer of actually ring buffer */
+	void *  lock;           /* spin lock for ring access */
+	struct ring_statistics stat; /* statistics */
+	enum dbg_ring_state state;      /* ring state enum */
+	bool tail_padded;		/* writer does not have enough space */
+	uint32 rem_len;		/* number of bytes from wp_pad to end */
+	bool sched_pull;	/* schedule reader immediately */
+} dhd_dbg_ring_t;
+
+typedef struct dhd_dbg {
+	dhd_dbg_ring_t dbg_rings[DEBUG_RING_ID_MAX];
+	void *private;          /* os private_data */
+	dhd_dbg_pkt_mon_t pkt_mon;
+	void *pkt_mon_lock; /* spin lock for packet monitoring */
+	dbg_pullreq_t pullreq;
+	dbg_urgent_noti_t urgent_notifier;
+} dhd_dbg_t;
+
+#define PKT_MON_ATTACHED(state) \
+		(((state) > PKT_MON_INVALID) && ((state) < PKT_MON_DETACHED))
+#define PKT_MON_DETACHED(state) \
+		(((state) == PKT_MON_INVALID) || ((state) == PKT_MON_DETACHED))
+#define PKT_MON_STARTED(state) ((state) == PKT_MON_STARTED)
+#define PKT_MON_STOPPED(state) ((state) == PKT_MON_STOPPED)
+#define PKT_MON_NOT_OPERATIONAL(state) \
+	(((state) != PKT_MON_STARTED) && ((state) != PKT_MON_STOPPED))
+#define PKT_MON_SAFE_TO_FREE(state) \
+	(((state) == PKT_MON_STARTING) || ((state) == PKT_MON_STOPPED))
+#define PKT_MON_PKT_FULL(pkt_count) ((pkt_count) >= MAX_FATE_LOG_LEN)
+#define PKT_MON_STATUS_FULL(pkt_count, status_count) \
+		(((status_count) >= (pkt_count)) || ((status_count) >= MAX_FATE_LOG_LEN))
+
+#ifdef DBG_PKT_MON
+#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid) \
+	do { \
+		if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_pkt_mon && (pkt)) { \
+			(dhdp)->dbg->pkt_mon.tx_pkt_mon((dhdp), (pkt), (pktid)); \
+		} \
+	} while (0);
+#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status) \
+	do { \
+		if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_status_mon && (pkt)) { \
+			(dhdp)->dbg->pkt_mon.tx_status_mon((dhdp), (pkt), (pktid), (status)); \
+		} \
+	} while (0);
+#define DHD_DBG_PKT_MON_RX(dhdp, pkt) \
+	do { \
+		if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.rx_pkt_mon && (pkt)) { \
+			if (ntoh16((pkt)->protocol) != ETHER_TYPE_BRCM) { \
+				(dhdp)->dbg->pkt_mon.rx_pkt_mon((dhdp), (pkt)); \
+			} \
+		} \
+	} while (0);
+
+#define DHD_DBG_PKT_MON_START(dhdp) \
+		dhd_os_dbg_start_pkt_monitor((dhdp));
+#define DHD_DBG_PKT_MON_STOP(dhdp) \
+		dhd_os_dbg_stop_pkt_monitor((dhdp));
+#else
+#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid)
+#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status)
+#define DHD_DBG_PKT_MON_RX(dhdp, pkt)
+#define DHD_DBG_PKT_MON_START(dhdp)
+#define DHD_DBG_PKT_MON_STOP(dhdp)
+#endif /* DBG_PKT_MON */
+
+#ifdef DUMP_IOCTL_IOV_LIST
+typedef struct dhd_iov_li {
+	dll_t list;
+	uint32 cmd;
+	char buff[100];
+} dhd_iov_li_t;
+
+#define IOV_LIST_MAX_LEN 5
+#endif /* DUMP_IOCTL_IOV_LIST */
+
+#ifdef DHD_DEBUG
+typedef struct {
+	dll_t list;
+	uint32 id; /* wasted chunk id */
+	uint32 handle; /* wasted chunk handle */
+	uint32 size; /* wasted chunk size */
+} dhd_dbg_mwli_t;
+#endif /* DHD_DEBUG */
+
+/* dhd_dbg functions */
+extern void dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data,
+		void *raw_event_ptr, uint datalen);
+extern int dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq,
+	dbg_urgent_noti_t os_urgent_notifier, void *os_priv);
+extern void dhd_dbg_detach(dhd_pub_t *dhdp);
+extern int dhd_dbg_start(dhd_pub_t *dhdp, bool start);
+extern int dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id,
+		int log_level, int flags, uint32 threshold);
+extern int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id,
+		dhd_dbg_ring_status_t *dbg_ring_status);
+extern int dhd_dbg_ring_push(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data);
+extern int dhd_dbg_ring_pull(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len);
+extern int dhd_dbg_ring_pull_single(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
+		bool strip_header);
+extern int dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name);
+extern void *dhd_dbg_get_priv(dhd_pub_t *dhdp);
+extern int dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len);
+extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
+	void *raw_event_ptr, uint32 *log_ptr);
+
+#ifdef DBG_PKT_MON
+extern int dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
+		dbg_mon_tx_pkts_t tx_pkt_mon,
+		dbg_mon_tx_status_t tx_status_mon,
+		dbg_mon_rx_pkts_t rx_pkt_mon);
+extern int dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid);
+extern int dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt,
+		uint32 pktid, uint16 status);
+extern int dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt);
+extern int dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+		uint16 req_count, uint16 *resp_count);
+extern int dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+		uint16 req_count, uint16 *resp_count);
+extern int dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp);
+#endif /* DBG_PKT_MON */
+
+/* os wrapper function */
+extern int dhd_os_dbg_attach(dhd_pub_t *dhdp);
+extern void dhd_os_dbg_detach(dhd_pub_t *dhdp);
+extern int dhd_os_dbg_register_callback(int ring_id,
+	void (*dbg_ring_sub_cb)(void *ctx, const int ring_id, const void *data,
+		const uint32 len, const dhd_dbg_ring_status_t dbg_ring_status));
+extern int dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp,
+	void (*urgent_noti)(void *ctx, const void *data, const uint32 len, const uint32 fw_len));
+
+extern int dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level,
+		int flags, int time_intval, int threshold);
+extern int dhd_os_reset_logging(dhd_pub_t *dhdp);
+extern int dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress);
+
+extern int dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id,
+		dhd_dbg_ring_status_t *dbg_ring_status);
+extern int dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name);
+extern int dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len);
+extern int dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features);
+
+#ifdef DBG_PKT_MON
+extern int dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt,
+	uint32 pktid);
+extern int dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt,
+	uint32 pktid, uint16 status);
+extern int dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt);
+extern int dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp);
+extern int dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp,
+	void __user *user_buf, uint16 req_count, uint16 *resp_count);
+extern int dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp,
+	void __user *user_buf, uint16 req_count, uint16 *resp_count);
+extern int dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp);
+#endif /* DBG_PKT_MON */
+
+#ifdef DUMP_IOCTL_IOV_LIST
+extern void dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node);
+extern void dhd_iov_li_print(dll_t *list_head);
+extern void dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head);
+#endif /* DUMP_IOCTL_IOV_LIST */
+
+#ifdef DHD_DEBUG
+extern void dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head);
+#endif /* DHD_DEBUG */
+#endif /* _dhd_debug_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_debug_linux.c b/drivers/net/wireless/bcmdhd/dhd_debug_linux.c
new file mode 100644
index 0000000..7137529
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_debug_linux.c
@@ -0,0 +1,512 @@
+/*
+ * DHD debugability Linux os layer
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_debug_linux.c 710862 2017-07-14 07:43:59Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+
+#include <net/cfg80211.h>
+#include <wl_cfgvendor.h>
+
+typedef void (*dbg_ring_send_sub_t)(void *ctx, const int ring_id, const void *data,
+	const uint32 len, const dhd_dbg_ring_status_t ring_status);
+typedef void (*dbg_urgent_noti_sub_t)(void *ctx, const void *data,
+	const uint32 len, const uint32 fw_len);
+
+static dbg_ring_send_sub_t ring_send_sub_cb[DEBUG_RING_ID_MAX];
+static dbg_urgent_noti_sub_t urgent_noti_sub_cb;
+typedef struct dhd_dbg_os_ring_info {
+	dhd_pub_t *dhdp;
+	int ring_id;
+	int log_level;
+	unsigned long interval;
+	struct delayed_work work;
+	uint64 tsoffset;
+} linux_dbgring_info_t;
+
+struct log_level_table dhd_event_map[] = {
+	{1, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, 0, "DRIVER EAPOL TX REQ"},
+	{1, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, 0, "DRIVER EAPOL RX"},
+	{2, WIFI_EVENT_DRIVER_SCAN_REQUESTED, 0, "SCAN_REQUESTED"},
+	{2, WIFI_EVENT_DRIVER_SCAN_COMPLETE, 0, "SCAN COMPELETE"},
+	{3, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, 0, "SCAN RESULT FOUND"},
+	{2, WIFI_EVENT_DRIVER_PNO_ADD, 0, "PNO ADD"},
+	{2, WIFI_EVENT_DRIVER_PNO_REMOVE, 0, "PNO REMOVE"},
+	{2, WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, 0, "PNO NETWORK FOUND"},
+	{2, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, 0, "PNO SCAN_REQUESTED"},
+	{1, WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, 0, "PNO SCAN RESULT FOUND"},
+	{1, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE, 0, "PNO SCAN COMPELETE"}
+};
+
+static void
+debug_data_send(dhd_pub_t *dhdp, int ring_id, const void *data, const uint32 len,
+	const dhd_dbg_ring_status_t ring_status)
+{
+	struct net_device *ndev;
+	dbg_ring_send_sub_t ring_sub_send;
+	ndev = dhd_linux_get_primary_netdev(dhdp);
+	if (!ndev)
+		return;
+	if (ring_send_sub_cb[ring_id]) {
+		ring_sub_send = ring_send_sub_cb[ring_id];
+		ring_sub_send(ndev, ring_id, data, len, ring_status);
+	}
+}
+
+static void
+dhd_os_dbg_urgent_notifier(dhd_pub_t *dhdp, const void *data, const uint32 len)
+{
+	struct net_device *ndev;
+	ndev = dhd_linux_get_primary_netdev(dhdp);
+	if (!ndev)
+		return;
+	if (urgent_noti_sub_cb) {
+		urgent_noti_sub_cb(ndev, data, len, dhdp->soc_ram_length);
+	}
+}
+
+static void
+dbg_ring_poll_worker(struct work_struct *work)
+{
+	struct delayed_work *d_work = to_delayed_work(work);
+	bool sched = TRUE;
+	dhd_dbg_ring_t *ring;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	linux_dbgring_info_t *ring_info =
+		container_of(d_work, linux_dbgring_info_t, work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	dhd_pub_t *dhdp = ring_info->dhdp;
+	int ringid = ring_info->ring_id;
+	dhd_dbg_ring_status_t ring_status;
+	void *buf;
+	dhd_dbg_ring_entry_t *hdr;
+	uint32 buflen, rlen;
+	unsigned long flags;
+
+	ring = &dhdp->dbg->dbg_rings[ringid];
+	flags = dhd_os_spin_lock(ring->lock);
+	dhd_dbg_get_ring_status(dhdp, ringid, &ring_status);
+
+	if (ring->wp > ring->rp) {
+		buflen = ring->wp - ring->rp;
+	} else if (ring->wp < ring->rp) {
+		buflen = ring->ring_size - ring->rp + ring->wp;
+	} else {
+		goto exit;
+	}
+
+	if (buflen > ring->ring_size) {
+		goto exit;
+	}
+
+	buf = MALLOCZ(dhdp->osh, buflen);
+	if (!buf) {
+		DHD_ERROR(("%s failed to allocate read buf\n", __FUNCTION__));
+		sched = FALSE;
+		goto exit;
+	}
+
+	rlen = dhd_dbg_ring_pull(dhdp, ringid, buf, buflen);
+	if (!ring->sched_pull) {
+		ring->sched_pull = TRUE;
+	}
+
+	hdr = (dhd_dbg_ring_entry_t *)buf;
+	while (rlen > 0) {
+		ring_status.read_bytes += ENTRY_LENGTH(hdr);
+		/* offset fw ts to host ts */
+		hdr->timestamp += ring_info->tsoffset;
+		debug_data_send(dhdp, ringid, hdr, ENTRY_LENGTH(hdr),
+			ring_status);
+		rlen -= ENTRY_LENGTH(hdr);
+		hdr = (dhd_dbg_ring_entry_t *)((char *)hdr + ENTRY_LENGTH(hdr));
+	}
+	MFREE(dhdp->osh, buf, buflen);
+
+exit:
+	if (sched) {
+		/* retrigger the work at same interval */
+		if ((ring_status.written_bytes == ring_status.read_bytes) &&
+				(ring_info->interval)) {
+			schedule_delayed_work(d_work, ring_info->interval);
+		}
+	}
+
+	dhd_os_spin_unlock(ring->lock, flags);
+
+	return;
+}
+
+int
+dhd_os_dbg_register_callback(int ring_id, dbg_ring_send_sub_t callback)
+{
+	if (!VALID_RING(ring_id))
+		return BCME_RANGE;
+
+	ring_send_sub_cb[ring_id] = callback;
+	return BCME_OK;
+}
+
+int
+dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp, dbg_urgent_noti_sub_t urgent_noti_sub)
+{
+	if (!dhdp || !urgent_noti_sub)
+		return BCME_BADARG;
+	urgent_noti_sub_cb = urgent_noti_sub;
+
+	return BCME_OK;
+}
+
+int
+dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level,
+	int flags, int time_intval, int threshold)
+{
+	int ret = BCME_OK;
+	int ring_id;
+	linux_dbgring_info_t *os_priv, *ring_info;
+	uint32 ms;
+
+	ring_id = dhd_dbg_find_ring_id(dhdp, ring_name);
+	if (!VALID_RING(ring_id))
+		return BCME_UNSUPPORTED;
+
+	DHD_DBGIF(("%s , log_level : %d, time_intval : %d, threshod %d Bytes\n",
+		__FUNCTION__, log_level, time_intval, threshold));
+
+	/* change the configuration */
+	ret = dhd_dbg_set_configuration(dhdp, ring_id, log_level, flags, threshold);
+	if (ret) {
+		DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret));
+		return ret;
+	}
+
+	os_priv = dhd_dbg_get_priv(dhdp);
+	if (!os_priv)
+		return BCME_ERROR;
+	ring_info = &os_priv[ring_id];
+	ring_info->log_level = log_level;
+	if (ring_id == FW_VERBOSE_RING_ID || ring_id == FW_EVENT_RING_ID) {
+		ring_info->tsoffset = local_clock();
+		if (dhd_wl_ioctl_get_intiovar(dhdp, "rte_timesync", &ms, WLC_GET_VAR,
+				FALSE, 0))
+			DHD_ERROR(("%s rte_timesync failed\n", __FUNCTION__));
+		do_div(ring_info->tsoffset, 1000000);
+		ring_info->tsoffset -= ms;
+	}
+	if (time_intval == 0 || log_level == 0) {
+		ring_info->interval = 0;
+		cancel_delayed_work_sync(&ring_info->work);
+	} else {
+		ring_info->interval = msecs_to_jiffies(time_intval * MSEC_PER_SEC);
+		cancel_delayed_work_sync(&ring_info->work);
+		schedule_delayed_work(&ring_info->work, ring_info->interval);
+	}
+
+	return ret;
+}
+
+int
+dhd_os_reset_logging(dhd_pub_t *dhdp)
+{
+	int ret = BCME_OK;
+	int ring_id;
+	linux_dbgring_info_t *os_priv, *ring_info;
+
+	os_priv = dhd_dbg_get_priv(dhdp);
+	if (!os_priv)
+		return BCME_ERROR;
+
+	/* Stop all rings */
+	for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+		DHD_DBGIF(("%s: Stop ring buffer %d\n", __FUNCTION__, ring_id));
+
+		ring_info = &os_priv[ring_id];
+		/* cancel any pending work */
+		cancel_delayed_work_sync(&ring_info->work);
+		/* log level zero makes stop logging on that ring */
+		ring_info->log_level = 0;
+		ring_info->interval = 0;
+		/* change the configuration */
+		ret = dhd_dbg_set_configuration(dhdp, ring_id, 0, 0, 0);
+		if (ret) {
+			DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret));
+			return ret;
+		}
+	}
+	return ret;
+}
+
+#define SUPPRESS_LOG_LEVEL 1
+int
+dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress)
+{
+	int ret = BCME_OK;
+	int max_log_level;
+	int enable = (suppress) ? 0 : 1;
+	linux_dbgring_info_t *os_priv;
+
+	os_priv = dhd_dbg_get_priv(dhdp);
+	if (!os_priv)
+		return BCME_ERROR;
+
+	max_log_level = MAX(os_priv[FW_VERBOSE_RING_ID].log_level,
+		os_priv[FW_EVENT_RING_ID].log_level);
+	if (max_log_level == SUPPRESS_LOG_LEVEL) {
+		/* suppress the logging in FW not to wake up host while device in suspend mode */
+		ret = dhd_iovar(dhdp, 0, "logtrace", (char *)&enable, sizeof(enable), NULL, 0,
+				TRUE);
+		if (ret < 0 && (ret != BCME_UNSUPPORTED)) {
+			DHD_ERROR(("logtrace is failed : %d\n", ret));
+		}
+	}
+
+	return ret;
+}
+
+int
+dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status)
+{
+	return dhd_dbg_get_ring_status(dhdp, ring_id, dbg_ring_status);
+}
+
+int
+dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name)
+{
+	int ret = BCME_OK;
+	int ring_id;
+	linux_dbgring_info_t *os_priv, *ring_info;
+	ring_id = dhd_dbg_find_ring_id(dhdp, ring_name);
+	if (!VALID_RING(ring_id))
+		return BCME_UNSUPPORTED;
+	os_priv = dhd_dbg_get_priv(dhdp);
+	if (os_priv) {
+		ring_info = &os_priv[ring_id];
+		if (ring_info->interval) {
+			cancel_delayed_work_sync(&ring_info->work);
+		}
+		schedule_delayed_work(&ring_info->work, 0);
+	} else {
+		DHD_ERROR(("%s : os_priv is NULL\n", __FUNCTION__));
+		ret = BCME_ERROR;
+	}
+	return ret;
+}
+
+int
+dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len)
+{
+	int ret = BCME_OK, i;
+	dhd_dbg_ring_entry_t msg_hdr;
+	log_conn_event_t *event_data = (log_conn_event_t *)data;
+	linux_dbgring_info_t *os_priv, *ring_info = NULL;
+
+	if (!VALID_RING(ring_id))
+		return BCME_UNSUPPORTED;
+	os_priv = dhd_dbg_get_priv(dhdp);
+
+	if (os_priv) {
+		ring_info = &os_priv[ring_id];
+	} else
+		return BCME_NORESOURCE;
+
+	memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
+
+	if (ring_id == DHD_EVENT_RING_ID) {
+		msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE;
+		msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
+		msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
+		msg_hdr.timestamp = local_clock();
+		/* convert to ms */
+		do_div(msg_hdr.timestamp, 1000000);
+		msg_hdr.len = data_len;
+		/* filter the event for higher log level with current log level */
+		for (i = 0; i < ARRAYSIZE(dhd_event_map); i++) {
+			if ((dhd_event_map[i].tag == event_data->event) &&
+				dhd_event_map[i].log_level > ring_info->log_level) {
+				return ret;
+			}
+		}
+	}
+	ret = dhd_dbg_ring_push(dhdp, ring_id, &msg_hdr, event_data);
+	if (ret) {
+		DHD_ERROR(("%s : failed to push data into the ring (%d) with ret(%d)\n",
+			__FUNCTION__, ring_id, ret));
+	}
+
+	return ret;
+}
+
+#ifdef DBG_PKT_MON
+int
+dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp)
+{
+	return dhd_dbg_attach_pkt_monitor(dhdp, dhd_os_dbg_monitor_tx_pkts,
+		dhd_os_dbg_monitor_tx_status, dhd_os_dbg_monitor_rx_pkts);
+}
+
+int
+dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp)
+{
+	return dhd_dbg_start_pkt_monitor(dhdp);
+}
+
+int
+dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid)
+{
+	return dhd_dbg_monitor_tx_pkts(dhdp, pkt, pktid);
+}
+
+int
+dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid,
+	uint16 status)
+{
+	return dhd_dbg_monitor_tx_status(dhdp, pkt, pktid, status);
+}
+
+int
+dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt)
+{
+	return dhd_dbg_monitor_rx_pkts(dhdp, pkt);
+}
+
+int
+dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp)
+{
+	return dhd_dbg_stop_pkt_monitor(dhdp);
+}
+
+int
+dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+	uint16 req_count, uint16 *resp_count)
+{
+	return dhd_dbg_monitor_get_tx_pkts(dhdp, user_buf, req_count, resp_count);
+}
+
+int
+dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf,
+	uint16 req_count, uint16 *resp_count)
+{
+	return dhd_dbg_monitor_get_rx_pkts(dhdp, user_buf, req_count, resp_count);
+}
+
+int
+dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp)
+{
+	return dhd_dbg_detach_pkt_monitor(dhdp);
+}
+#endif /* DBG_PKT_MON */
+
+int
+dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features)
+{
+	int ret = BCME_OK;
+	*features = 0;
+	*features |= DBG_MEMORY_DUMP_SUPPORTED;
+	if (FW_SUPPORTED(dhdp, logtrace)) {
+		*features |= DBG_CONNECT_EVENT_SUPPORTED;
+		*features |= DBG_VERBOSE_LOG_SUPPORTED;
+	}
+	if (FW_SUPPORTED(dhdp, hchk)) {
+		*features |= DBG_HEALTH_CHECK_SUPPORTED;
+	}
+#ifdef DBG_PKT_MON
+	if (FW_SUPPORTED(dhdp, d11status)) {
+		*features |= DBG_PACKET_FATE_SUPPORTED;
+	}
+#endif /* DBG_PKT_MON */
+	return ret;
+}
+
+static void
+dhd_os_dbg_pullreq(void *os_priv, int ring_id)
+{
+	linux_dbgring_info_t *ring_info;
+
+	ring_info = &((linux_dbgring_info_t *)os_priv)[ring_id];
+	cancel_delayed_work(&ring_info->work);
+	schedule_delayed_work(&ring_info->work, 0);
+}
+
+int
+dhd_os_dbg_attach(dhd_pub_t *dhdp)
+{
+	int ret = BCME_OK;
+	linux_dbgring_info_t *os_priv, *ring_info;
+	int ring_id;
+
+	/* os_dbg data */
+	os_priv = MALLOCZ(dhdp->osh, sizeof(*os_priv) * DEBUG_RING_ID_MAX);
+	if (!os_priv)
+		return BCME_NOMEM;
+
+	for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX;
+	     ring_id++) {
+		ring_info = &os_priv[ring_id];
+		INIT_DELAYED_WORK(&ring_info->work, dbg_ring_poll_worker);
+		ring_info->dhdp = dhdp;
+		ring_info->ring_id = ring_id;
+	}
+
+	ret = dhd_dbg_attach(dhdp, dhd_os_dbg_pullreq, dhd_os_dbg_urgent_notifier, os_priv);
+	if (ret)
+		MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX);
+
+	return ret;
+}
+
+void
+dhd_os_dbg_detach(dhd_pub_t *dhdp)
+{
+	linux_dbgring_info_t *os_priv, *ring_info;
+	int ring_id;
+	/* free os_dbg data */
+	os_priv = dhd_dbg_get_priv(dhdp);
+	if (!os_priv)
+		return;
+	/* abort pending any job */
+	for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+		ring_info = &os_priv[ring_id];
+		if (ring_info->interval) {
+			ring_info->interval = 0;
+			cancel_delayed_work_sync(&ring_info->work);
+		}
+	}
+	MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX);
+
+	return dhd_dbg_detach(dhdp);
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.c b/drivers/net/wireless/bcmdhd/dhd_flowring.c
new file mode 100644
index 0000000..17a9dc3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_flowring.c
@@ -0,0 +1,1063 @@
+/*
+ * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
+ *
+ * Flow rings are transmit traffic (=propagating towards antenna) related entities
+ *
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_flowring.c 710862 2017-07-14 07:43:59Z $
+ */
+
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <ethernet.h>
+#include <bcmevent.h>
+#include <dngl_stats.h>
+
+#include <dhd.h>
+
+#include <dhd_flowring.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <802.1d.h>
+#include <pcie_core.h>
+#include <bcmmsgbuf.h>
+#include <dhd_pcie.h>
+
+static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
+
+static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
+                                     uint8 prio, char *sa, char *da);
+
+static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
+                                      uint8 prio, char *sa, char *da);
+
+static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
+                                uint8 prio, char *sa, char *da, uint16 *flowid);
+int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
+
+#define FLOW_QUEUE_PKT_NEXT(p)          PKTLINK(p)
+#define FLOW_QUEUE_PKT_SETNEXT(p, x)    PKTSETLINK((p), (x))
+
+#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
+const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
+#else
+const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
+#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
+const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
+static INLINE int
+dhd_flow_queue_throttle(flow_queue_t *queue)
+{
+	return DHD_FLOW_QUEUE_FULL(queue);
+}
+
+int BCMFASTPATH
+dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt)
+{
+	return BCME_NORESOURCE;
+}
+
+/** Returns flow ring given a flowid */
+flow_ring_node_t *
+dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
+{
+	flow_ring_node_t * flow_ring_node;
+
+	ASSERT(dhdp != (dhd_pub_t*)NULL);
+	ASSERT(flowid < dhdp->num_flow_rings);
+
+	flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
+
+	ASSERT(flow_ring_node->flowid == flowid);
+	return flow_ring_node;
+}
+
+/** Returns 'backup' queue given a flowid */
+flow_queue_t *
+dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
+{
+	flow_ring_node_t * flow_ring_node;
+
+	flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
+	return &flow_ring_node->queue;
+}
+
+/* Flow ring's queue management functions */
+
+/** Reinitialize a flow ring's queue. */
+void
+dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
+{
+	ASSERT((queue != NULL) && (max > 0));
+
+	queue->head = queue->tail = NULL;
+	queue->len = 0;
+
+	/* Set queue's threshold and queue's parent cummulative length counter */
+	ASSERT(max > 1);
+	DHD_FLOW_QUEUE_SET_MAX(queue, max);
+	DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
+	DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
+	DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr);
+
+	queue->failures = 0U;
+	queue->cb = &dhd_flow_queue_overflow;
+}
+
+/** Initialize a flow ring's queue, called on driver initialization. */
+void
+dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
+{
+	ASSERT((queue != NULL) && (max > 0));
+
+	dll_init(&queue->list);
+	dhd_flow_queue_reinit(dhdp, queue, max);
+}
+
+/** Register an enqueue overflow callback handler */
+void
+dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
+{
+	ASSERT(queue != NULL);
+	queue->cb = cb;
+}
+
+/**
+ * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
+ * to the flow ring itself.
+ */
+int BCMFASTPATH
+dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
+{
+	int ret = BCME_OK;
+
+	ASSERT(queue != NULL);
+
+	if (dhd_flow_queue_throttle(queue)) {
+		queue->failures++;
+		ret = (*queue->cb)(queue, pkt);
+		goto done;
+	}
+
+	if (queue->head) {
+		FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
+	} else {
+		queue->head = pkt;
+	}
+
+	FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
+
+	queue->tail = pkt; /* at tail */
+
+	queue->len++;
+	/* increment parent's cummulative length */
+	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
+	/* increment grandparent's cummulative length */
+	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
+
+done:
+	return ret;
+}
+
+/** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
+void * BCMFASTPATH
+dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue)
+{
+	void * pkt;
+
+	ASSERT(queue != NULL);
+
+	pkt = queue->head; /* from head */
+
+	if (pkt == NULL) {
+		ASSERT((queue->len == 0) && (queue->tail == NULL));
+		goto done;
+	}
+
+	queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
+	if (queue->head == NULL)
+		queue->tail = NULL;
+
+	queue->len--;
+	/* decrement parent's cummulative length */
+	DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
+	/* decrement grandparent's cummulative length */
+	DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
+
+	FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
+
+done:
+	return pkt;
+}
+
+/** Reinsert a dequeued 802.3 packet back at the head */
+void BCMFASTPATH
+dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
+{
+	if (queue->head == NULL) {
+		queue->tail = pkt;
+	}
+
+	FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
+	queue->head = pkt;
+	queue->len++;
+	/* increment parent's cummulative length */
+	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
+	/* increment grandparent's cummulative length */
+	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
+}
+
+/** Fetch the backup queue for a flowring, and assign flow control thresholds */
+void
+dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
+                     int queue_budget, int cumm_threshold, void *cumm_ctr,
+                     int l2cumm_threshold, void *l2cumm_ctr)
+{
+	flow_queue_t * queue;
+
+	ASSERT(dhdp != (dhd_pub_t*)NULL);
+	ASSERT(queue_budget > 1);
+	ASSERT(cumm_threshold > 1);
+	ASSERT(cumm_ctr != (void*)NULL);
+	ASSERT(l2cumm_threshold > 1);
+	ASSERT(l2cumm_ctr != (void*)NULL);
+
+	queue = dhd_flow_queue(dhdp, flowid);
+
+	DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
+
+	/* Set the queue's parent threshold and cummulative counter */
+	DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
+	DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
+
+	/* Set the queue's grandparent threshold and cummulative counter */
+	DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
+	DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
+}
+
+/** Initializes data structures of multiple flow rings */
+int
+dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
+{
+	uint32 idx;
+	uint32 flow_ring_table_sz;
+	uint32 if_flow_lkup_sz = 0;
+	void * flowid_allocator;
+	flow_ring_table_t *flow_ring_table = NULL;
+	if_flow_lkup_t *if_flow_lkup = NULL;
+	void *lock = NULL;
+	void *list_lock = NULL;
+	unsigned long flags;
+
+	DHD_INFO(("%s\n", __FUNCTION__));
+
+	/* Construct a 16bit flowid allocator */
+	flowid_allocator = id16_map_init(dhdp->osh,
+	                       num_flow_rings - dhdp->bus->max_cmn_rings, FLOWID_RESERVED);
+	if (flowid_allocator == NULL) {
+		DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
+		return BCME_NOMEM;
+	}
+
+	/* Allocate a flow ring table, comprising of requested number of rings */
+	flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t));
+	flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
+	if (flow_ring_table == NULL) {
+		DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Initialize flow ring table state */
+	DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
+	DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr);
+	bzero((uchar *)flow_ring_table, flow_ring_table_sz);
+	for (idx = 0; idx < num_flow_rings; idx++) {
+		flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
+		flow_ring_table[idx].flowid = (uint16)idx;
+		flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
+#ifdef IDLE_TX_FLOW_MGMT
+		flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
+#endif /* IDLE_TX_FLOW_MGMT */
+		if (flow_ring_table[idx].lock == NULL) {
+			DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
+			goto fail;
+		}
+
+		dll_init(&flow_ring_table[idx].list);
+
+		/* Initialize the per flow ring backup queue */
+		dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
+		                    FLOW_RING_QUEUE_THRESHOLD);
+	}
+
+	/* Allocate per interface hash table (for fast lookup from interface to flow ring) */
+	if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
+	if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
+		DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
+	if (if_flow_lkup == NULL) {
+		DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Initialize per interface hash table */
+	for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+		int hash_ix;
+		if_flow_lkup[idx].status = 0;
+		if_flow_lkup[idx].role = 0;
+		for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
+			if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
+	}
+
+	lock = dhd_os_spin_lock_init(dhdp->osh);
+	if (lock == NULL)
+		goto fail;
+
+	list_lock = dhd_os_spin_lock_init(dhdp->osh);
+	if (list_lock == NULL)
+		goto lock_fail;
+
+	dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
+	bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+#ifdef DHD_LOSSLESS_ROAMING
+	dhdp->dequeue_prec_map = ALLPRIO;
+#endif
+	/* Now populate into dhd pub */
+	DHD_FLOWID_LOCK(lock, flags);
+	dhdp->num_flow_rings = num_flow_rings;
+	dhdp->flowid_allocator = (void *)flowid_allocator;
+	dhdp->flow_ring_table = (void *)flow_ring_table;
+	dhdp->if_flow_lkup = (void *)if_flow_lkup;
+	dhdp->flowid_lock = lock;
+	dhdp->flow_rings_inited = TRUE;
+	dhdp->flowring_list_lock = list_lock;
+	DHD_FLOWID_UNLOCK(lock, flags);
+
+	DHD_INFO(("%s done\n", __FUNCTION__));
+	return BCME_OK;
+
+lock_fail:
+	/* deinit the spinlock */
+	dhd_os_spin_lock_deinit(dhdp->osh, lock);
+
+fail:
+	/* Destruct the per interface flow lkup table */
+	if (if_flow_lkup != NULL) {
+		DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
+	}
+	if (flow_ring_table != NULL) {
+		for (idx = 0; idx < num_flow_rings; idx++) {
+			if (flow_ring_table[idx].lock != NULL)
+				dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
+		}
+		MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
+	}
+	id16_map_fini(dhdp->osh, flowid_allocator);
+
+	return BCME_NOMEM;
+}
+
+/** Deinit Flow Ring specific data structures */
+void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
+{
+	uint16 idx;
+	uint32 flow_ring_table_sz;
+	uint32 if_flow_lkup_sz;
+	flow_ring_table_t *flow_ring_table;
+	unsigned long flags;
+	void *lock;
+
+	DHD_INFO(("dhd_flow_rings_deinit\n"));
+
+	if (!(dhdp->flow_rings_inited)) {
+		DHD_ERROR(("dhd_flow_rings not initialized!\n"));
+		return;
+	}
+
+	if (dhdp->flow_ring_table != NULL) {
+
+		ASSERT(dhdp->num_flow_rings > 0);
+
+		DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+		flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+		dhdp->flow_ring_table = NULL;
+		DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+		for (idx = 0; idx < dhdp->num_flow_rings; idx++) {
+			if (flow_ring_table[idx].active) {
+				dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
+			}
+			ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
+
+			/* Deinit flow ring queue locks before destroying flow ring table */
+			if (flow_ring_table[idx].lock != NULL) {
+				dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
+			}
+			flow_ring_table[idx].lock = NULL;
+
+		}
+
+		/* Destruct the flow ring table */
+		flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t);
+		MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
+	}
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+
+	/* Destruct the per interface flow lkup table */
+	if (dhdp->if_flow_lkup != NULL) {
+		if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
+		bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
+		DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
+		dhdp->if_flow_lkup = NULL;
+	}
+
+	/* Destruct the flowid allocator */
+	if (dhdp->flowid_allocator != NULL)
+		dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
+
+	dhdp->num_flow_rings = 0U;
+	bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+	lock = dhdp->flowid_lock;
+	dhdp->flowid_lock = NULL;
+
+	if (lock) {
+		DHD_FLOWID_UNLOCK(lock, flags);
+		dhd_os_spin_lock_deinit(dhdp->osh, lock);
+	}
+
+	dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
+	dhdp->flowring_list_lock = NULL;
+
+	ASSERT(dhdp->if_flow_lkup == NULL);
+	ASSERT(dhdp->flowid_allocator == NULL);
+	ASSERT(dhdp->flow_ring_table == NULL);
+	dhdp->flow_rings_inited = FALSE;
+}
+
+/** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
+uint8
+dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
+{
+	if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+	ASSERT(if_flow_lkup);
+	return if_flow_lkup[ifindex].role;
+}
+
+#ifdef WLTDLS
+bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
+{
+	unsigned long flags;
+	tdls_peer_node_t *cur = NULL;
+
+	DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
+	cur = dhdp->peer_tbl.node;
+
+	while (cur != NULL) {
+		if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+			DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+			return TRUE;
+		}
+		cur = cur->next;
+	}
+	DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+	return FALSE;
+}
+#endif /* WLTDLS */
+
+/** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
+static INLINE uint16
+dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
+{
+	int hash;
+	bool ismcast = FALSE;
+	flow_hash_info_t *cur;
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	ASSERT(if_flow_lkup);
+
+	if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
+			(if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
+#ifdef WLTDLS
+		if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
+			is_tdls_destination(dhdp, da)) {
+			hash = DHD_FLOWRING_HASHINDEX(da, prio);
+			cur = if_flow_lkup[ifindex].fl_hash[hash];
+			while (cur != NULL) {
+				if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
+					DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+					return cur->flowid;
+				}
+				cur = cur->next;
+			}
+			DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+			return FLOWID_INVALID;
+		}
+#endif /* WLTDLS */
+		/* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */
+		cur = if_flow_lkup[ifindex].fl_hash[prio];
+		if (cur) {
+			DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+			return cur->flowid;
+		}
+	} else {
+
+		if (ETHER_ISMULTI(da)) {
+			ismcast = TRUE;
+			hash = 0;
+		} else {
+			hash = DHD_FLOWRING_HASHINDEX(da, prio);
+		}
+
+		cur = if_flow_lkup[ifindex].fl_hash[hash];
+
+		while (cur) {
+			if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
+				(!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
+				(cur->flow_info.tid == prio))) {
+				DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+				return cur->flowid;
+			}
+			cur = cur->next;
+		}
+	}
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__));
+	return FLOWID_INVALID;
+} /* dhd_flowid_find */
+
+/** Create unique Flow ID, called when a flow ring is created. */
+static INLINE uint16
+dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
+{
+	flow_hash_info_t *fl_hash_node, *cur;
+	if_flow_lkup_t *if_flow_lkup;
+	int hash;
+	uint16 flowid;
+	unsigned long flags;
+
+	fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
+	if (fl_hash_node == NULL) {
+		DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
+		return FLOWID_INVALID;
+	}
+	memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	ASSERT(dhdp->flowid_allocator != NULL);
+	flowid = id16_map_alloc(dhdp->flowid_allocator);
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	if (flowid == FLOWID_INVALID) {
+		MFREE(dhdp->osh, fl_hash_node,  sizeof(flow_hash_info_t));
+		DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
+		return FLOWID_INVALID;
+	}
+
+	fl_hash_node->flowid = flowid;
+	fl_hash_node->flow_info.tid = prio;
+	fl_hash_node->flow_info.ifindex = ifindex;
+	fl_hash_node->next = NULL;
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
+			(if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
+		/* For STA non TDLS dest and WDS dest we allocate entry based on prio only */
+#ifdef WLTDLS
+		if (dhdp->peer_tbl.tdls_peer_count &&
+			(is_tdls_destination(dhdp, da))) {
+			hash = DHD_FLOWRING_HASHINDEX(da, prio);
+			cur = if_flow_lkup[ifindex].fl_hash[hash];
+			if (cur) {
+				while (cur->next) {
+					cur = cur->next;
+				}
+				cur->next = fl_hash_node;
+			} else {
+				if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
+			}
+		} else
+#endif /* WLTDLS */
+			if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
+	} else {
+
+		/* For bcast/mcast assign first slot in in interface */
+		hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
+		cur = if_flow_lkup[ifindex].fl_hash[hash];
+		if (cur) {
+			while (cur->next) {
+				cur = cur->next;
+			}
+			cur->next = fl_hash_node;
+		} else
+			if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
+	}
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
+
+	return fl_hash_node->flowid;
+} /* dhd_flowid_alloc */
+
+/** Get flow ring ID, if not present try to create one */
+static INLINE int
+dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
+                  uint8 prio, char *sa, char *da, uint16 *flowid)
+{
+	uint16 id;
+	flow_ring_node_t *flow_ring_node;
+	flow_ring_table_t *flow_ring_table;
+	unsigned long flags;
+	int ret;
+	bool is_sta_assoc;
+
+	DHD_INFO(("%s\n", __FUNCTION__));
+	if (!dhdp->flow_ring_table) {
+		return BCME_ERROR;
+	}
+
+	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+
+	id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
+
+	if (id == FLOWID_INVALID) {
+
+		if_flow_lkup_t *if_flow_lkup;
+		if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+		if (!if_flow_lkup[ifindex].status)
+			return BCME_ERROR;
+		BCM_REFERENCE(is_sta_assoc);
+#if defined(PCIE_FULL_DONGLE)
+		is_sta_assoc = dhd_sta_associated(dhdp, ifindex, (uint8 *)da);
+		DHD_ERROR(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__,
+			ETHER_ISMULTI(da), ifindex, if_flow_lkup[ifindex].role,
+			is_sta_assoc));
+		if (!ETHER_ISMULTI(da) &&
+		    ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_AP) ||
+		    (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_P2P_GO)) &&
+		    (!is_sta_assoc))
+			return BCME_ERROR;
+#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
+
+		id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
+		if (id == FLOWID_INVALID) {
+			DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
+			           __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
+			return BCME_ERROR;
+		}
+
+		ASSERT(id < dhdp->num_flow_rings);
+
+		/* register this flowid in dhd_pub */
+		dhd_add_flowid(dhdp, ifindex, prio, da, id);
+
+		flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
+
+		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+		/* Init Flow info */
+		memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
+		memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
+		flow_ring_node->flow_info.tid = prio;
+		flow_ring_node->flow_info.ifindex = ifindex;
+		flow_ring_node->active = TRUE;
+		flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
+
+#ifdef DEVICE_TX_STUCK_DETECT
+		flow_ring_node->tx_cmpl = flow_ring_node->tx_cmpl_prev = OSL_SYSUPTIME();
+		flow_ring_node->stuck_count = 0;
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+		/* Create and inform device about the new flow */
+		if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
+				!= BCME_OK) {
+			DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
+			return BCME_ERROR;
+		}
+
+		*flowid = id;
+		return BCME_OK;
+	} else {
+		/* if the Flow id was found in the hash */
+		ASSERT(id < dhdp->num_flow_rings);
+
+		flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
+		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+		/*
+		 * If the flow_ring_node is in Open State or Status pending state then
+		 * we can return the Flow id to the caller.If the flow_ring_node is in
+		 * FLOW_RING_STATUS_PENDING this means the creation is in progress and
+		 * hence the packets should be queued.
+		 *
+		 * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
+		 * FLOW_RING_STATUS_CLOSED, then we should return Error.
+		 * Note that if the flowing is being deleted we would mark it as
+		 * FLOW_RING_STATUS_DELETE_PENDING.  Now before Dongle could respond and
+		 * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
+		 * We should drop the packets in that case.
+		 * The decission to return OK should NOT be based on 'active' variable, beause
+		 * active is made TRUE when a flow_ring_node gets allocated and is made
+		 * FALSE when the flow ring gets removed and does not reflect the True state
+		 * of the Flow ring.
+		 * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
+		 * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
+		 * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
+		 * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
+		 * FLOW_RING_STATUS_CREATE_PENDING.
+		 */
+		if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
+			flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
+			*flowid = FLOWID_INVALID;
+			ret = BCME_ERROR;
+		} else {
+			*flowid = id;
+			ret = BCME_OK;
+		}
+
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+		return ret;
+	} /* Flow Id found in the hash */
+} /* dhd_flowid_lookup */
+
+/**
+ * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
+ * select the flowring to send the packet to the dongle.
+ */
+int BCMFASTPATH
+dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
+{
+	uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+	struct ether_header *eh = (struct ether_header *)pktdata;
+	uint16 flowid;
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+
+	if (ifindex >= DHD_MAX_IFS) {
+		return BCME_BADARG;
+	}
+
+	if (!dhdp->flowid_allocator) {
+		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost,
+		&flowid) != BCME_OK) {
+		return BCME_ERROR;
+	}
+
+	DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
+
+	/* Tag the packet with flowid */
+	DHD_PKT_SET_FLOWID(pktbuf, flowid);
+	return BCME_OK;
+}
+
+void
+dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
+{
+	int hashix;
+	bool found = FALSE;
+	flow_hash_info_t *cur, *prev;
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
+
+		cur = if_flow_lkup[ifindex].fl_hash[hashix];
+
+		if (cur) {
+			if (cur->flowid == flowid) {
+				found = TRUE;
+			}
+
+			prev = NULL;
+			while (!found && cur) {
+				if (cur->flowid == flowid) {
+					found = TRUE;
+					break;
+				}
+				prev = cur;
+				cur = cur->next;
+			}
+			if (found) {
+				if (!prev) {
+					if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
+				} else {
+					prev->next = cur->next;
+				}
+
+				/* deregister flowid from dhd_pub. */
+				dhd_del_flowid(dhdp, ifindex, flowid);
+
+				id16_map_free(dhdp->flowid_allocator, flowid);
+				DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+				MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
+
+				return;
+			}
+		}
+	}
+
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+	DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
+	           __FUNCTION__, flowid));
+} /* dhd_flowid_free */
+
+/**
+ * Delete all Flow rings associated with the given interface. Is called when eg the dongle
+ * indicates that a wireless link has gone down.
+ */
+void
+dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
+{
+	uint32 id;
+	flow_ring_table_t *flow_ring_table;
+
+	DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return;
+
+	if (!dhdp->flow_ring_table)
+		return;
+
+	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+	for (id = 0; id < dhdp->num_flow_rings; id++) {
+		if (flow_ring_table[id].active &&
+			(flow_ring_table[id].flow_info.ifindex == ifindex) &&
+			(flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
+			dhd_bus_flow_ring_delete_request(dhdp->bus,
+			                                 (void *) &flow_ring_table[id]);
+		}
+	}
+}
+
+void
+dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
+{
+	uint32 id;
+	flow_ring_table_t *flow_ring_table;
+
+	DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return;
+
+	if (!dhdp->flow_ring_table)
+		return;
+	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+
+	for (id = 0; id <= dhdp->num_flow_rings; id++) {
+		if (flow_ring_table[id].active &&
+			(flow_ring_table[id].flow_info.ifindex == ifindex) &&
+			(flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
+			dhd_bus_flow_ring_flush_request(dhdp->bus,
+			                                 (void *) &flow_ring_table[id]);
+		}
+	}
+}
+
+
+/** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */
+void
+dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
+{
+	uint32 id;
+	flow_ring_table_t *flow_ring_table;
+
+	DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return;
+
+	if (!dhdp->flow_ring_table)
+		return;
+
+	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+	for (id = 0; id < dhdp->num_flow_rings; id++) {
+		if (flow_ring_table[id].active &&
+			(flow_ring_table[id].flow_info.ifindex == ifindex) &&
+			(!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
+			(flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
+			DHD_ERROR(("%s: deleting flowid %d\n",
+				__FUNCTION__, flow_ring_table[id].flowid));
+			dhd_bus_flow_ring_delete_request(dhdp->bus,
+				(void *) &flow_ring_table[id]);
+		}
+	}
+}
+
+/** Handles interface ADD, CHANGE, DEL indications from the dongle */
+void
+dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
+                               uint8 op, uint8 role)
+{
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return;
+
+	DHD_ERROR(("%s: ifindex %u op %u role is %u \n",
+	          __FUNCTION__, ifindex, op, role));
+	if (!dhdp->flowid_allocator) {
+		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
+		return;
+	}
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
+
+		if_flow_lkup[ifindex].role = role;
+
+		if (role == WLC_E_IF_ROLE_WDS) {
+			/**
+			 * WDS role does not send WLC_E_LINK event after interface is up.
+			 * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
+			 * same is true while making the status as FALSE.
+			 * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
+			 * interfaces are handled uniformly.
+			 */
+			if_flow_lkup[ifindex].status = TRUE;
+			DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
+			          __FUNCTION__, ifindex, role));
+		}
+	} else	if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
+		if_flow_lkup[ifindex].status = FALSE;
+		DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
+		          __FUNCTION__, ifindex, role));
+	}
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+}
+
+/** Handles a STA 'link' indication from the dongle */
+int
+dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
+{
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return BCME_BADARG;
+
+	DHD_ERROR(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	if (status) {
+		if_flow_lkup[ifindex].status = TRUE;
+	} else {
+		if_flow_lkup[ifindex].status = FALSE;
+	}
+
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	return BCME_OK;
+}
+
+/** Update flow priority mapping, called on IOVAR */
+int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
+{
+	uint16 flowid;
+	flow_ring_node_t *flow_ring_node;
+
+	if (map > DHD_FLOW_PRIO_LLR_MAP)
+		return BCME_BADOPTION;
+
+	/* Check if we need to change prio map */
+	if (map == dhdp->flow_prio_map_type)
+		return BCME_OK;
+
+	/* If any ring is active we cannot change priority mapping for flow rings */
+	for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
+		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+		if (flow_ring_node->active)
+			return BCME_EPERM;
+	}
+
+	/* Inform firmware about new mapping type */
+	if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
+		return BCME_ERROR;
+
+	/* update internal structures */
+	dhdp->flow_prio_map_type = map;
+	if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
+		bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+	else
+		bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+	return BCME_OK;
+}
+
+/** Inform firmware on updated flow priority mapping, called on IOVAR */
+int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
+{
+	uint8 iovbuf[24] = {0};
+	if (!set) {
+		bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+		if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+			DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+		*map = iovbuf[0];
+		return BCME_OK;
+	}
+	bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s: failed to set fl_prio_map \n",
+			__FUNCTION__));
+		return BCME_ERROR;
+	}
+	return BCME_OK;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.h b/drivers/net/wireless/bcmdhd/dhd_flowring.h
new file mode 100644
index 0000000..d5faf0a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_flowring.h
@@ -0,0 +1,275 @@
+/*
+ * @file Header file describing the flow rings DHD interfaces.
+ *
+ * Flow rings are transmit traffic (=propagating towards antenna) related entities.
+ *
+ * Provides type definitions and function prototypes used to create, delete and manage flow rings at
+ * high level.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_flowring.h 672438 2016-11-28 12:35:24Z $
+ */
+
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_flowrings_h_
+#define _dhd_flowrings_h_
+
+/* Max pkts held in a flow ring's backup queue */
+#define FLOW_RING_QUEUE_THRESHOLD       (2048)
+
+/* Number of H2D common rings */
+#define FLOW_RING_COMMON                BCMPCIE_H2D_COMMON_MSGRINGS
+
+#define FLOWID_INVALID                  (ID16_INVALID)
+#define FLOWID_RESERVED                 (FLOW_RING_COMMON)
+
+#define FLOW_RING_STATUS_OPEN           0
+#define FLOW_RING_STATUS_CREATE_PENDING	1
+#define FLOW_RING_STATUS_CLOSED         2
+#define FLOW_RING_STATUS_DELETE_PENDING 3
+#define FLOW_RING_STATUS_FLUSH_PENDING  4
+
+#ifdef IDLE_TX_FLOW_MGMT
+#define FLOW_RING_STATUS_SUSPENDED	5
+#define FLOW_RING_STATUS_RESUME_PENDING	6
+#endif /* IDLE_TX_FLOW_MGMT */
+#define FLOW_RING_STATUS_STA_FREEING    7
+
+#ifdef DHD_EFI
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ	1600
+#else
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ	2048
+#endif
+
+#define DHD_FLOW_PRIO_AC_MAP		0
+#define DHD_FLOW_PRIO_TID_MAP		1
+/* Flow ring prority map for lossless roaming */
+#define DHD_FLOW_PRIO_LLR_MAP		2
+
+/* Hashing a MacAddress for lkup into a per interface flow hash table */
+#define DHD_FLOWRING_HASH_SIZE    256
+#define	DHD_FLOWRING_HASHINDEX(ea, prio) \
+	       ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \
+		% DHD_FLOWRING_HASH_SIZE)
+
+#define DHD_IF_ROLE(pub, idx)		(((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role)
+#define DHD_IF_ROLE_AP(pub, idx)	(DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
+#define DHD_IF_ROLE_STA(pub, idx)	(DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA)
+#define DHD_IF_ROLE_P2PGO(pub, idx)	(DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
+#define DHD_IF_ROLE_WDS(pub, idx)	(DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_WDS)
+#define DHD_FLOW_RING(dhdp, flowid) \
+	(flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid])
+
+struct flow_queue;
+
+/* Flow Ring Queue Enqueue overflow callback */
+typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt);
+
+/**
+ * Each flow ring has an associated (tx flow controlled) queue. 802.3 packets are transferred
+ * between queue and ring. A packet from the host stack is first added to the queue, and in a later
+ * stage transferred to the flow ring. Packets in the queue are dhd owned, whereas packets in the
+ * flow ring are device owned.
+ */
+typedef struct flow_queue {
+	dll_t  list;                /* manage a flowring queue in a double linked list */
+	void * head;                /* first packet in the queue */
+	void * tail;                /* last packet in the queue */
+	uint16 len;                 /* number of packets in the queue */
+	uint16 max;                 /* maximum or min budget (used in cumm) */
+	uint32 threshold;           /* parent's cummulative length threshold */
+	void * clen_ptr;            /* parent's cummulative length counter */
+	uint32 failures;            /* enqueue failures due to queue overflow */
+	flow_queue_cb_t cb;         /* callback invoked on threshold crossing */
+	uint32 l2threshold;         /* grandparent's (level 2) cummulative length threshold */
+	void * l2clen_ptr;          /* grandparent's (level 2) cummulative length counter */
+} flow_queue_t;
+
+#define DHD_FLOW_QUEUE_LEN(queue)       ((int)(queue)->len)
+#define DHD_FLOW_QUEUE_MAX(queue)       ((int)(queue)->max)
+#define DHD_FLOW_QUEUE_THRESHOLD(queue) ((int)(queue)->threshold)
+#define DHD_FLOW_QUEUE_L2THRESHOLD(queue) ((int)(queue)->l2threshold)
+#define DHD_FLOW_QUEUE_EMPTY(queue)     ((queue)->len == 0)
+#define DHD_FLOW_QUEUE_FAILURES(queue)  ((queue)->failures)
+
+#define DHD_FLOW_QUEUE_AVAIL(queue)     ((int)((queue)->max - (queue)->len))
+#define DHD_FLOW_QUEUE_FULL(queue)      ((queue)->len >= (queue)->max)
+
+#define DHD_FLOW_QUEUE_OVFL(queue, budget)  \
+	(((queue)->len) > budget)
+
+#define DHD_FLOW_QUEUE_SET_MAX(queue, budget) \
+	((queue)->max) = ((budget) - 1)
+
+/* Queue's cummulative threshold. */
+#define DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold) \
+	((queue)->threshold) = ((cumm_threshold) - 1)
+
+/* Queue's cummulative length object accessor. */
+#define DHD_FLOW_QUEUE_CLEN_PTR(queue)  ((queue)->clen_ptr)
+
+/* Set a queue's cumm_len point to a parent's cumm_ctr_t cummulative length */
+#define DHD_FLOW_QUEUE_SET_CLEN(queue, parent_clen_ptr)  \
+	((queue)->clen_ptr) = (void *)(parent_clen_ptr)
+
+/* Queue's level 2 cummulative threshold. */
+#define DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold) \
+	((queue)->l2threshold) = ((l2cumm_threshold) - 1)
+
+/* Queue's level 2 cummulative length object accessor. */
+#define DHD_FLOW_QUEUE_L2CLEN_PTR(queue)  ((queue)->l2clen_ptr)
+
+/* Set a queue's level 2 cumm_len point to a grandparent's cumm_ctr_t cummulative length */
+#define DHD_FLOW_QUEUE_SET_L2CLEN(queue, grandparent_clen_ptr)  \
+	((queue)->l2clen_ptr) = (void *)(grandparent_clen_ptr)
+
+/*  see wlfc_proto.h for tx status details */
+#define DHD_FLOWRING_MAXSTATUS_MSGS	5
+#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus)
+
+/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
+typedef struct dhd_pkttag_fr {
+	uint16  flowid;
+	uint16  ifid;
+	int     dataoff;
+	dmaaddr_t physaddr;
+	uint32 pa_len;
+} dhd_pkttag_fr_t;
+
+#define DHD_PKTTAG_SET_IFID(tag, idx)       ((tag)->ifid = (uint16)(idx))
+#define DHD_PKTTAG_SET_PA(tag, pa)          ((tag)->physaddr = (pa))
+#define DHD_PKTTAG_SET_PA_LEN(tag, palen)   ((tag)->pa_len = (palen))
+#define DHD_PKTTAG_IFID(tag)                ((tag)->ifid)
+#define DHD_PKTTAG_PA(tag)                  ((tag)->physaddr)
+#define DHD_PKTTAG_PA_LEN(tag)              ((tag)->pa_len)
+
+
+/** each flow ring is dedicated to a tid/sa/da combination */
+typedef struct flow_info {
+	uint8		tid;
+	uint8		ifindex;
+	char		sa[ETHER_ADDR_LEN];
+	char		da[ETHER_ADDR_LEN];
+} flow_info_t;
+
+/** a flow ring is used for outbound (towards antenna) 802.3 packets */
+typedef struct flow_ring_node {
+	dll_t		list;  /* manage a constructed flowring in a dll, must be at first place */
+	flow_queue_t	queue; /* queues packets before they enter the flow ring, flow control */
+	bool		active;
+	uint8		status;
+	/*
+	 * flowid: unique ID of a flow ring, which can either be unicast or broadcast/multicast. For
+	 * unicast flow rings, the flow id accelerates ARM 802.3->802.11 header translation.
+	 */
+	uint16		flowid;
+	flow_info_t	flow_info;
+	void		*prot_info;
+	void		*lock; /* lock for flowring access protection */
+
+#ifdef IDLE_TX_FLOW_MGMT
+	uint64		last_active_ts; /* contains last active timestamp */
+#endif /* IDLE_TX_FLOW_MGMT */
+#ifdef DEVICE_TX_STUCK_DETECT
+	/* Time stamp(msec) when last time a Tx packet completion is received on this flow ring */
+	uint32		tx_cmpl;
+	/*
+	 * Holds the tx_cmpl which was read during the previous
+	 * iteration of the stuck detection algo
+	 */
+	uint32		tx_cmpl_prev;
+	/* counter to decide if this particlur flow is stuck or not */
+	uint32		stuck_count;
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+} flow_ring_node_t;
+
+typedef flow_ring_node_t flow_ring_table_t;
+
+typedef struct flow_hash_info {
+	uint16			flowid;
+	flow_info_t		flow_info;
+	struct flow_hash_info	*next;
+} flow_hash_info_t;
+
+typedef struct if_flow_lkup {
+	bool		status;
+	uint8		role; /* Interface role: STA/AP */
+	flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */
+} if_flow_lkup_t;
+
+static INLINE flow_ring_node_t *
+dhd_constlist_to_flowring(dll_t *item)
+{
+	return ((flow_ring_node_t *)item);
+}
+
+/* Exported API */
+
+/* Flow ring's queue management functions */
+extern flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid);
+extern flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid);
+
+extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max);
+extern void dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max);
+extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb);
+extern int  dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
+extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue);
+extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
+
+extern void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
+                          int queue_budget, int cumm_threshold, void *cumm_ctr,
+                          int l2cumm_threshold, void *l2cumm_ctr);
+extern int  dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings);
+
+extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp);
+
+extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
+                void *pktbuf);
+
+extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid);
+
+extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex);
+extern void dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex);
+
+extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex,
+                char *addr);
+
+/* Handle Interface ADD, DEL operations */
+extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
+                uint8 op, uint8 role);
+
+/* Handle a STA interface link status update */
+extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex,
+                uint8 status);
+extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set);
+extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map);
+
+extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex);
+#endif /* _dhd_flowrings_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_gpio.c b/drivers/net/wireless/bcmdhd/dhd_gpio.c
new file mode 100644
index 0000000..bd706bf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_gpio.c
@@ -0,0 +1,331 @@
+
+#include <osl.h>
+#include <dhd_linux.h>
+#include <linux/gpio.h>
+
+#ifdef CUSTOMER_HW_PLATFORM
+#include <plat/sdhci.h>
+#define	sdmmc_channel	sdmmc_device_mmc0
+#endif /* CUSTOMER_HW_PLATFORM */
+
+#if defined(BUS_POWER_RESTORE) && defined(BCMSDIO)
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#endif /* defined(BUS_POWER_RESTORE) && defined(BCMSDIO) */
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+extern void *dhd_wlan_mem_prealloc(int section, unsigned long size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+static int gpio_wl_reg_on = -1; // WL_REG_ON is input pin of WLAN module
+#ifdef CUSTOMER_OOB
+static int gpio_wl_host_wake = -1; // WL_HOST_WAKE is output pin of WLAN module
+#endif
+
+static int
+dhd_wlan_set_power(int on
+#ifdef BUS_POWER_RESTORE
+, wifi_adapter_info_t *adapter
+#endif /* BUS_POWER_RESTORE */
+)
+{
+	int err = 0;
+
+	if (on) {
+		printf("======== PULL WL_REG_ON(%d) HIGH! ========\n", gpio_wl_reg_on);
+		if (gpio_wl_reg_on >= 0) {
+			err = gpio_direction_output(gpio_wl_reg_on, 1);
+			if (err) {
+				printf("%s: WL_REG_ON didn't output high\n", __FUNCTION__);
+				return -EIO;
+			}
+		}
+#if defined(BUS_POWER_RESTORE)
+#if defined(BCMSDIO)
+		if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) {
+			printf("======== mmc_power_restore_host! ========\n");
+			mmc_power_restore_host(adapter->sdio_func->card->host);
+		}
+#elif defined(BCMPCIE)
+		OSL_SLEEP(50); /* delay needed to be able to restore PCIe configuration registers */
+		if (adapter->pci_dev) {
+			printf("======== pci_set_power_state PCI_D0! ========\n");
+			pci_set_power_state(adapter->pci_dev, PCI_D0);
+			if (adapter->pci_saved_state)
+				pci_load_and_free_saved_state(adapter->pci_dev, &adapter->pci_saved_state);
+			pci_restore_state(adapter->pci_dev);
+			err = pci_enable_device(adapter->pci_dev);
+			if (err < 0)
+				printf("%s: PCI enable device failed", __FUNCTION__);
+			pci_set_master(adapter->pci_dev);
+		}
+#endif /* BCMPCIE */
+#endif /* BUS_POWER_RESTORE */
+		/* Lets customer power to get stable */
+		mdelay(100);
+	} else {
+#if defined(BUS_POWER_RESTORE)
+#if defined(BCMSDIO)
+		if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) {
+			printf("======== mmc_power_save_host! ========\n");
+			mmc_power_save_host(adapter->sdio_func->card->host);
+		}
+#elif defined(BCMPCIE)
+		if (adapter->pci_dev) {
+			printf("======== pci_set_power_state PCI_D3hot! ========\n");
+			pci_save_state(adapter->pci_dev);
+			adapter->pci_saved_state = pci_store_saved_state(adapter->pci_dev);
+			if (pci_is_enabled(adapter->pci_dev))
+				pci_disable_device(adapter->pci_dev);
+			pci_set_power_state(adapter->pci_dev, PCI_D3hot);
+		}
+#endif /* BCMPCIE */
+#endif /* BUS_POWER_RESTORE */
+		printf("======== PULL WL_REG_ON(%d) LOW! ========\n", gpio_wl_reg_on);
+		if (gpio_wl_reg_on >= 0) {
+			err = gpio_direction_output(gpio_wl_reg_on, 0);
+			if (err) {
+				printf("%s: WL_REG_ON didn't output low\n", __FUNCTION__);
+				return -EIO;
+			}
+		}
+	}
+
+	return err;
+}
+
+static int dhd_wlan_set_reset(int onoff)
+{
+	return 0;
+}
+
+static int dhd_wlan_set_carddetect(int present)
+{
+	int err = 0;
+
+#if !defined(BUS_POWER_RESTORE)
+	if (present) {
+#if defined(BCMSDIO)
+		printf("======== Card detection to detect SDIO card! ========\n");
+#ifdef CUSTOMER_HW_PLATFORM
+		err = sdhci_force_presence_change(&sdmmc_channel, 1);
+#endif /* CUSTOMER_HW_PLATFORM */
+#elif defined(BCMPCIE)
+		printf("======== Card detection to detect PCIE card! ========\n");
+#endif
+	} else {
+#if defined(BCMSDIO)
+		printf("======== Card detection to remove SDIO card! ========\n");
+#ifdef CUSTOMER_HW_PLATFORM
+		err = sdhci_force_presence_change(&sdmmc_channel, 0);
+#endif /* CUSTOMER_HW_PLATFORM */
+#elif defined(BCMPCIE)
+		printf("======== Card detection to remove PCIE card! ========\n");
+#endif
+	}
+#endif /* BUS_POWER_RESTORE */
+
+	return err;
+}
+
+static int dhd_wlan_get_mac_addr(unsigned char *buf)
+{
+	int err = 0;
+
+	printf("======== %s ========\n", __FUNCTION__);
+#ifdef EXAMPLE_GET_MAC
+	/* EXAMPLE code */
+	{
+		struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+		bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+	}
+#endif /* EXAMPLE_GET_MAC */
+#ifdef EXAMPLE_GET_MAC_VER2
+	/* EXAMPLE code */
+	{
+		char macpad[56]= {
+		0x00,0xaa,0x9c,0x84,0xc7,0xbc,0x9b,0xf6,
+		0x02,0x33,0xa9,0x4d,0x5c,0xb4,0x0a,0x5d,
+		0xa8,0xef,0xb0,0xcf,0x8e,0xbf,0x24,0x8a,
+		0x87,0x0f,0x6f,0x0d,0xeb,0x83,0x6a,0x70,
+		0x4a,0xeb,0xf6,0xe6,0x3c,0xe7,0x5f,0xfc,
+		0x0e,0xa7,0xb3,0x0f,0x00,0xe4,0x4a,0xaf,
+		0x87,0x08,0x16,0x6d,0x3a,0xe3,0xc7,0x80};
+		bcopy(macpad, buf+6, sizeof(macpad));
+	}
+#endif /* EXAMPLE_GET_MAC_VER2 */
+
+	return err;
+}
+
+static struct cntry_locales_custom brcm_wlan_translate_custom_table[] = {
+	/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+	{"",   "XT", 49},  /* Universal if Country code is unknown or empty */
+	{"US", "US", 0},
+#endif /* EXMAPLE_TABLE */
+};
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+struct cntry_locales_custom brcm_wlan_translate_nodfs_table[] = {
+#ifdef EXAMPLE_TABLE
+	{"",   "XT", 50},  /* Universal if Country code is unknown or empty */
+	{"US", "US", 0},
+#endif /* EXMAPLE_TABLE */
+};
+#endif
+
+static void *dhd_wlan_get_country_code(char *ccode
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+	, u32 flags
+#endif
+)
+{
+	struct cntry_locales_custom *locales;
+	int size;
+	int i;
+
+	if (!ccode)
+		return NULL;
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+	if (flags & WLAN_PLAT_NODFS_FLAG) {
+		locales = brcm_wlan_translate_nodfs_table;
+		size = ARRAY_SIZE(brcm_wlan_translate_nodfs_table);
+	} else {
+#endif
+		locales = brcm_wlan_translate_custom_table;
+		size = ARRAY_SIZE(brcm_wlan_translate_custom_table);
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+	}
+#endif
+
+	for (i = 0; i < size; i++)
+		if (strcmp(ccode, locales[i].iso_abbrev) == 0)
+			return &locales[i];
+	return NULL;
+}
+
+struct resource dhd_wlan_resources[] = {
+	[0] = {
+		.name	= "bcmdhd_wlan_irq",
+		.start	= 0, /* Dummy */
+		.end	= 0, /* Dummy */
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE
+			| IORESOURCE_IRQ_HIGHLEVEL, /* Dummy */
+	},
+};
+
+struct wifi_platform_data dhd_wlan_control = {
+	.set_power	= dhd_wlan_set_power,
+	.set_reset	= dhd_wlan_set_reset,
+	.set_carddetect	= dhd_wlan_set_carddetect,
+	.get_mac_addr	= dhd_wlan_get_mac_addr,
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	.mem_prealloc	= dhd_wlan_mem_prealloc,
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	.get_country_code = dhd_wlan_get_country_code,
+};
+
+int dhd_wlan_init_gpio(void)
+{
+	int err = 0;
+#ifdef CUSTOMER_OOB
+	int host_oob_irq = -1;
+	uint host_oob_irq_flags = 0;
+#endif
+
+	/* Please check your schematic and fill right GPIO number which connected to
+	* WL_REG_ON and WL_HOST_WAKE.
+	*/
+	gpio_wl_reg_on = -1;
+#ifdef CUSTOMER_OOB
+	gpio_wl_host_wake = -1;
+#endif
+
+	if (gpio_wl_reg_on >= 0) {
+		err = gpio_request(gpio_wl_reg_on, "WL_REG_ON");
+		if (err < 0) {
+			printf("%s: gpio_request(%d) for WL_REG_ON failed\n",
+				__FUNCTION__, gpio_wl_reg_on);
+			gpio_wl_reg_on = -1;
+		}
+	}
+
+#ifdef CUSTOMER_OOB
+	if (gpio_wl_host_wake >= 0) {
+		err = gpio_request(gpio_wl_host_wake, "bcmdhd");
+		if (err < 0) {
+			printf("%s: gpio_request(%d) for WL_HOST_WAKE failed\n",
+				__FUNCTION__, gpio_wl_host_wake);
+			return -1;
+		}
+		err = gpio_direction_input(gpio_wl_host_wake);
+		if (err < 0) {
+			printf("%s: gpio_direction_input(%d) for WL_HOST_WAKE failed\n",
+				__FUNCTION__, gpio_wl_host_wake);
+			gpio_free(gpio_wl_host_wake);
+			return -1;
+		}
+		host_oob_irq = gpio_to_irq(gpio_wl_host_wake);
+		if (host_oob_irq < 0) {
+			printf("%s: gpio_to_irq(%d) for WL_HOST_WAKE failed\n",
+				__FUNCTION__, gpio_wl_host_wake);
+			gpio_free(gpio_wl_host_wake);
+			return -1;
+		}
+	}
+
+#ifdef HW_OOB
+#ifdef HW_OOB_LOW_LEVEL
+	host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#else
+	host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#endif
+#else
+	host_oob_irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_SHAREABLE;
+#endif
+
+	dhd_wlan_resources[0].start = dhd_wlan_resources[0].end = host_oob_irq;
+	dhd_wlan_resources[0].flags = host_oob_irq_flags;
+	printf("%s: WL_HOST_WAKE=%d, oob_irq=%d, oob_irq_flags=0x%x\n", __FUNCTION__,
+		gpio_wl_host_wake, host_oob_irq, host_oob_irq_flags);
+#endif /* CUSTOMER_OOB */
+	printf("%s: WL_REG_ON=%d\n", __FUNCTION__, gpio_wl_reg_on);
+
+	return 0;
+}
+
+static void dhd_wlan_deinit_gpio(void)
+{
+	if (gpio_wl_reg_on >= 0) {
+		printf("%s: gpio_free(WL_REG_ON %d)\n", __FUNCTION__, gpio_wl_reg_on);
+		gpio_free(gpio_wl_reg_on);
+		gpio_wl_reg_on = -1;
+	}
+#ifdef CUSTOMER_OOB
+	if (gpio_wl_host_wake >= 0) {
+		printf("%s: gpio_free(WL_HOST_WAKE %d)\n", __FUNCTION__, gpio_wl_host_wake);
+		gpio_free(gpio_wl_host_wake);
+		gpio_wl_host_wake = -1;
+	}
+#endif /* CUSTOMER_OOB */
+}
+
+int dhd_wlan_init_plat_data(void)
+{
+	int err = 0;
+
+	printf("======== %s ========\n", __FUNCTION__);
+	err = dhd_wlan_init_gpio();
+	return err;
+}
+
+void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter)
+{
+	printf("======== %s ========\n", __FUNCTION__);
+	dhd_wlan_deinit_gpio();
+}
+
diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.c b/drivers/net/wireless/bcmdhd/dhd_ip.c
new file mode 100644
index 0000000..47cfe8a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_ip.c
@@ -0,0 +1,1334 @@
+/*
+ * IP Packet Parser Module.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_ip.c 700317 2017-05-18 15:13:29Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <ethernet.h>
+#include <vlan.h>
+#include <802.3.h>
+#include <bcmip.h>
+#include <bcmendian.h>
+
+#include <dhd_dbg.h>
+
+#include <dhd_ip.h>
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <bcmtcp.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+/* special values */
+/* 802.3 llc/snap header */
+static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+pkt_frag_t pkt_frag_info(osl_t *osh, void *p)
+{
+	uint8 *frame;
+	int length;
+	uint8 *pt;			/* Pointer to type field */
+	uint16 ethertype;
+	struct ipv4_hdr *iph;		/* IP frame pointer */
+	int ipl;			/* IP frame length */
+	uint16 iph_frag;
+
+	ASSERT(osh && p);
+
+	frame = PKTDATA(osh, p);
+	length = PKTLEN(osh, p);
+
+	/* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+	if (length < ETHER_HDR_LEN) {
+		DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length));
+		return DHD_PKT_FRAG_NONE;
+	} else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) {
+		/* Frame is Ethernet II */
+		pt = frame + ETHER_TYPE_OFFSET;
+	} else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+	           !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+		pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+	} else {
+		DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	ethertype = ntoh16(*(uint16 *)pt);
+
+	/* Skip VLAN tag, if any */
+	if (ethertype == ETHER_TYPE_8021Q) {
+		pt += VLAN_TAG_LEN;
+
+		if (pt + ETHER_TYPE_LEN > frame + length) {
+			DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length));
+			return DHD_PKT_FRAG_NONE;
+		}
+
+		ethertype = ntoh16(*(uint16 *)pt);
+	}
+
+	if (ethertype != ETHER_TYPE_IP) {
+		DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n",
+			__FUNCTION__, ethertype, length));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN);
+	ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame));
+
+	/* We support IPv4 only */
+	if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) {
+		DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	iph_frag = ntoh16(iph->frag);
+
+	if (iph_frag & IPV4_FRAG_DONT) {
+		return DHD_PKT_FRAG_NONE;
+	} else if ((iph_frag & IPV4_FRAG_MORE) == 0) {
+		return DHD_PKT_FRAG_LAST;
+	} else {
+		return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST;
+	}
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+
+typedef struct {
+	void *pkt_in_q;		/* TCP ACK packet that is already in txq or DelayQ */
+	void *pkt_ether_hdr;	/* Ethernet header pointer of pkt_in_q */
+	int ifidx;
+	uint8 supp_cnt;
+	dhd_pub_t *dhdp;
+	struct timer_list timer;
+} tcpack_info_t;
+
+typedef struct _tdata_psh_info_t {
+	uint32 end_seq;			/* end seq# of a received TCP PSH DATA pkt */
+	struct _tdata_psh_info_t *next;	/* next pointer of the link chain */
+} tdata_psh_info_t;
+
+typedef struct {
+	struct {
+		uint8 src[IPV4_ADDR_LEN];	/* SRC ip addrs of this TCP stream */
+		uint8 dst[IPV4_ADDR_LEN];	/* DST ip addrs of this TCP stream */
+	} ip_addr;
+	struct {
+		uint8 src[TCP_PORT_LEN];	/* SRC tcp ports of this TCP stream */
+		uint8 dst[TCP_PORT_LEN];	/* DST tcp ports of this TCP stream */
+	} tcp_port;
+	tdata_psh_info_t *tdata_psh_info_head;	/* Head of received TCP PSH DATA chain */
+	tdata_psh_info_t *tdata_psh_info_tail;	/* Tail of received TCP PSH DATA chain */
+	uint32 last_used_time;	/* The last time this tcpdata_info was used(in ms) */
+} tcpdata_info_t;
+
+/* TCPACK SUPPRESS module */
+typedef struct {
+	int tcpack_info_cnt;
+	tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM];	/* Info of TCP ACK to send */
+	int tcpdata_info_cnt;
+	tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM];	/* Info of received TCP DATA */
+	tdata_psh_info_t *tdata_psh_info_pool;	/* Pointer to tdata_psh_info elements pool */
+	tdata_psh_info_t *tdata_psh_info_free;	/* free tdata_psh_info elements chain in pool */
+#ifdef DHDTCPACK_SUP_DBG
+	int psh_info_enq_num;	/* Number of free TCP PSH DATA info elements in pool */
+#endif /* DHDTCPACK_SUP_DBG */
+} tcpack_sup_module_t;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1};
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+static void
+_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod,
+	tdata_psh_info_t *tdata_psh_info)
+{
+	if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) {
+		DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__,
+			tcpack_sup_mod, tdata_psh_info));
+		return;
+	}
+
+	ASSERT(tdata_psh_info->next == NULL);
+	tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free;
+	tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info;
+#ifdef DHDTCPACK_SUP_DBG
+	tcpack_sup_mod->psh_info_enq_num++;
+#endif
+}
+
+static tdata_psh_info_t*
+_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod)
+{
+	tdata_psh_info_t *tdata_psh_info = NULL;
+
+	if (tcpack_sup_mod == NULL) {
+		DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__,
+			tcpack_sup_mod));
+		return NULL;
+	}
+
+	tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free;
+	if (tdata_psh_info == NULL)
+		DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__));
+	else {
+		tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+		tdata_psh_info->next = NULL;
+#ifdef DHDTCPACK_SUP_DBG
+		tcpack_sup_mod->psh_info_enq_num--;
+#endif /* DHDTCPACK_SUP_DBG */
+	}
+
+	return tdata_psh_info;
+}
+
+static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp,
+	tcpack_sup_module_t *tcpack_sup_mod)
+{
+	tdata_psh_info_t *tdata_psh_info_pool = NULL;
+	uint i;
+
+	DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+	if (tcpack_sup_mod == NULL)
+		return BCME_ERROR;
+
+	ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL);
+	ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL);
+
+	tdata_psh_info_pool =
+		MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+
+	if (tdata_psh_info_pool == NULL)
+		return BCME_NOMEM;
+	bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+#ifdef DHDTCPACK_SUP_DBG
+	tcpack_sup_mod->psh_info_enq_num = 0;
+#endif /* DHDTCPACK_SUP_DBG */
+
+	/* Enqueue newly allocated tcpdata psh info elements to the pool */
+	for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++)
+		_tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]);
+
+	ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL);
+	tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool;
+
+	return BCME_OK;
+}
+
+static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp,
+	tcpack_sup_module_t *tcpack_sup_mod)
+{
+	uint i;
+	tdata_psh_info_t *tdata_psh_info;
+
+	DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+	if (tcpack_sup_mod == NULL) {
+		DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n",
+			__FUNCTION__, __LINE__));
+		return;
+	}
+
+	for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+		tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		/* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */
+		while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+			tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+			tdata_psh_info->next = NULL;
+			_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+		}
+		tcpdata_info->tdata_psh_info_tail = NULL;
+	}
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+	i = 0;
+	/* Be sure we recollected all tdata_psh_info elements */
+	while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) {
+		tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+		tdata_psh_info->next = NULL;
+		i++;
+	}
+	ASSERT(i == TCPDATA_PSH_INFO_MAXNUM);
+	MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool,
+		sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+	tcpack_sup_mod->tdata_psh_info_pool = NULL;
+
+	return;
+}
+
+static void dhd_tcpack_send(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	ulong data
+#endif
+)
+{
+	tcpack_sup_module_t *tcpack_sup_mod;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	tcpack_info_t *cur_tbl = from_timer(cur_tbl, t, timer);
+#else
+	tcpack_info_t *cur_tbl = (tcpack_info_t *)data;
+#endif
+	dhd_pub_t *dhdp;
+	int ifidx;
+	void* pkt;
+	unsigned long flags;
+
+	if (!cur_tbl) {
+		return;
+	}
+
+	dhdp = cur_tbl->dhdp;
+	if (!dhdp) {
+		return;
+	}
+
+	flags = dhd_os_tcpacklock(dhdp);
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
+			__FUNCTION__, __LINE__));
+		dhd_os_tcpackunlock(dhdp, flags);
+		return;
+	}
+	pkt = cur_tbl->pkt_in_q;
+	ifidx = cur_tbl->ifidx;
+	if (!pkt) {
+		dhd_os_tcpackunlock(dhdp, flags);
+		return;
+	}
+	cur_tbl->pkt_in_q = NULL;
+	cur_tbl->pkt_ether_hdr = NULL;
+	cur_tbl->ifidx = 0;
+	cur_tbl->supp_cnt = 0;
+	if (--tcpack_sup_mod->tcpack_info_cnt < 0) {
+		DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n",
+			__FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
+	}
+
+	dhd_os_tcpackunlock(dhdp, flags);
+
+	dhd_sendpkt(dhdp, ifidx, pkt);
+}
+
+int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode)
+{
+	int ret = BCME_OK;
+	unsigned long flags;
+	tcpack_sup_module_t *tcpack_sup_module;
+	uint8 invalid_mode = FALSE;
+	int prev_mode;
+	int i = 0;
+
+	flags = dhd_os_tcpacklock(dhdp);
+	tcpack_sup_module = dhdp->tcpack_sup_module;
+	prev_mode = dhdp->tcpack_sup_mode;
+
+	/* Check a new mode */
+	if (prev_mode == mode) {
+		DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode));
+		goto exit;
+	}
+
+	invalid_mode |= (mode >= TCPACK_SUP_LAST_MODE);
+#ifdef BCMSDIO
+	invalid_mode |= (mode == TCPACK_SUP_HOLD);
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+	invalid_mode |= ((mode == TCPACK_SUP_REPLACE) || (mode == TCPACK_SUP_DELAYTX));
+#endif /* BCMPCIE */
+
+	if (invalid_mode) {
+		DHD_ERROR(("%s %d: Invalid TCP ACK Suppress mode %d\n",
+			__FUNCTION__, __LINE__, mode));
+		ret = BCME_BADARG;
+		goto exit;
+	}
+
+	printf("%s: TCP ACK Suppress mode %d -> mode %d\n",
+		__FUNCTION__, dhdp->tcpack_sup_mode, mode);
+
+	/* Pre-process routines to change a new mode as per previous mode */
+	switch (prev_mode) {
+		case TCPACK_SUP_OFF:
+			if (tcpack_sup_module == NULL) {
+				tcpack_sup_module = MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t));
+				if (tcpack_sup_module == NULL) {
+					DHD_ERROR(("%s[%d]: Failed to allocate the new memory for "
+						"tcpack_sup_module\n", __FUNCTION__, __LINE__));
+					dhdp->tcpack_sup_mode = TCPACK_SUP_OFF;
+					ret = BCME_NOMEM;
+					goto exit;
+				}
+				dhdp->tcpack_sup_module = tcpack_sup_module;
+			}
+			bzero(tcpack_sup_module, sizeof(tcpack_sup_module_t));
+			break;
+		case TCPACK_SUP_DELAYTX:
+			if (tcpack_sup_module) {
+				/* We won't need tdata_psh_info pool and
+				 * tcpddata_info_tbl anymore
+				 */
+				_tdata_psh_info_pool_deinit(dhdp, tcpack_sup_module);
+				tcpack_sup_module->tcpdata_info_cnt = 0;
+				bzero(tcpack_sup_module->tcpdata_info_tbl,
+					sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM);
+			}
+
+			/* For half duplex bus interface, tx precedes rx by default */
+			if (dhdp->bus) {
+				dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+			}
+
+			if (tcpack_sup_module == NULL) {
+				DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n",
+					__FUNCTION__, __LINE__));
+				dhdp->tcpack_sup_mode = TCPACK_SUP_OFF;
+				goto exit;
+			}
+			break;
+	}
+
+	/* Update a new mode */
+	dhdp->tcpack_sup_mode = mode;
+
+	/* Process for a new mode */
+	switch (mode) {
+		case TCPACK_SUP_OFF:
+			ASSERT(tcpack_sup_module != NULL);
+			/* Clean up timer/data structure for
+			 * any remaining/pending packet or timer.
+			 */
+			if (tcpack_sup_module) {
+				/* Check if previous mode is TCAPACK_SUP_HOLD */
+				if (prev_mode == TCPACK_SUP_HOLD) {
+					for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+						tcpack_info_t *tcpack_info_tbl =
+							&tcpack_sup_module->tcpack_info_tbl[i];
+						del_timer(&tcpack_info_tbl->timer);
+						if (tcpack_info_tbl->pkt_in_q) {
+							PKTFREE(dhdp->osh,
+								tcpack_info_tbl->pkt_in_q, TRUE);
+							tcpack_info_tbl->pkt_in_q = NULL;
+						}
+					}
+				}
+				MFREE(dhdp->osh, tcpack_sup_module, sizeof(tcpack_sup_module_t));
+				dhdp->tcpack_sup_module = NULL;
+			} else {
+				DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n",
+					__FUNCTION__, __LINE__));
+			}
+			break;
+		case TCPACK_SUP_REPLACE:
+			/* There is nothing to configure for this mode */
+			break;
+		case TCPACK_SUP_DELAYTX:
+			ret = _tdata_psh_info_pool_init(dhdp, tcpack_sup_module);
+			if (ret != BCME_OK) {
+				DHD_ERROR(("%s %d: pool init fail with %d\n",
+					__FUNCTION__, __LINE__, ret));
+				break;
+			}
+			if (dhdp->bus) {
+				dhd_bus_set_dotxinrx(dhdp->bus, FALSE);
+			}
+			break;
+		case TCPACK_SUP_HOLD:
+			dhdp->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO;
+			dhdp->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME;
+			for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+				tcpack_info_t *tcpack_info_tbl =
+					&tcpack_sup_module->tcpack_info_tbl[i];
+				tcpack_info_tbl->dhdp = dhdp;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+				timer_setup(&tcpack_info_tbl->timer, dhd_tcpack_send, 0);
+#else
+				init_timer(&tcpack_info_tbl->timer);
+				tcpack_info_tbl->timer.data = (ulong)tcpack_info_tbl;
+				tcpack_info_tbl->timer.function = dhd_tcpack_send;
+#endif
+			}
+			break;
+	}
+
+exit:
+	dhd_os_tcpackunlock(dhdp, flags);
+	return ret;
+}
+
+void
+dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp)
+{
+	tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+	int i;
+	unsigned long flags;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	flags = dhd_os_tcpacklock(dhdp);
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
+			__FUNCTION__, __LINE__));
+		dhd_os_tcpackunlock(dhdp, flags);
+		goto exit;
+	}
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) {
+		for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+			if (tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q) {
+				PKTFREE(dhdp->osh, tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q,
+					TRUE);
+				tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q = NULL;
+				tcpack_sup_mod->tcpack_info_tbl[i].pkt_ether_hdr = NULL;
+				tcpack_sup_mod->tcpack_info_tbl[i].ifidx = 0;
+				tcpack_sup_mod->tcpack_info_tbl[i].supp_cnt = 0;
+			}
+		}
+	} else {
+		tcpack_sup_mod->tcpack_info_cnt = 0;
+		bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM);
+	}
+
+	dhd_os_tcpackunlock(dhdp, flags);
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) {
+		for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+			del_timer_sync(&tcpack_sup_mod->tcpack_info_tbl[i].timer);
+		}
+	}
+
+exit:
+	return;
+}
+
+inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 i;
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpack_info_t *tcpack_info_tbl;
+	int tbl_cnt;
+	int ret = BCME_OK;
+	void *pdata;
+	uint32 pktlen;
+	unsigned long flags;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	pdata = PKTDATA(dhdp->osh, pkt);
+	pktlen = PKTLEN(dhdp->osh, pkt) - dhd_prot_hdrlen(dhdp, pdata);
+
+	if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) {
+		DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+			__FUNCTION__, __LINE__, pktlen));
+		goto exit;
+	}
+
+	flags = dhd_os_tcpacklock(dhdp);
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp, flags);
+		goto exit;
+	}
+	tbl_cnt = tcpack_sup_mod->tcpack_info_cnt;
+	tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+	ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM);
+
+	for (i = 0; i < tbl_cnt; i++) {
+		if (tcpack_info_tbl[i].pkt_in_q == pkt) {
+			DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n",
+				__FUNCTION__, __LINE__, pkt, i, tbl_cnt));
+			/* This pkt is being transmitted so remove the tcp_ack_info of it. */
+			if (i < tbl_cnt - 1) {
+				bcopy(&tcpack_info_tbl[tbl_cnt - 1],
+					&tcpack_info_tbl[i], sizeof(tcpack_info_t));
+			}
+			bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t));
+			if (--tcpack_sup_mod->tcpack_info_cnt < 0) {
+				DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n",
+					__FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
+				ret = BCME_ERROR;
+			}
+			break;
+		}
+	}
+	dhd_os_tcpackunlock(dhdp, flags);
+
+exit:
+	return ret;
+}
+
+static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr,
+	uint8 *tcp_hdr, uint32 tcp_ack_num)
+{
+	tcpack_sup_module_t *tcpack_sup_mod;
+	int i;
+	tcpdata_info_t *tcpdata_info = NULL;
+	tdata_psh_info_t *tdata_psh_info = NULL;
+	bool ret = FALSE;
+
+	if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+		goto exit;
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+		" TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+		tcp_ack_num));
+
+	for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+		tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.src)),
+			IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.dst)),
+			ntoh16_ua(tcpdata_info_tmp->tcp_port.src),
+			ntoh16_ua(tcpdata_info_tmp->tcp_port.dst)));
+
+		/* If either IP address or TCP port number does not match, skip. */
+		if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+			tcpdata_info_tmp->ip_addr.dst, IPV4_ADDR_LEN) == 0 &&
+			memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET],
+			tcpdata_info_tmp->ip_addr.src, IPV4_ADDR_LEN) == 0 &&
+			memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+			tcpdata_info_tmp->tcp_port.dst, TCP_PORT_LEN) == 0 &&
+			memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET],
+			tcpdata_info_tmp->tcp_port.src, TCP_PORT_LEN) == 0) {
+			tcpdata_info = tcpdata_info_tmp;
+			break;
+		}
+	}
+
+	if (tcpdata_info == NULL) {
+		DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	if (tcpdata_info->tdata_psh_info_head == NULL) {
+		DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__));
+	}
+
+	while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+		if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) {
+			DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n",
+				__FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq));
+			tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+			tdata_psh_info->next = NULL;
+			_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+			ret = TRUE;
+		} else
+			break;
+	}
+	if (tdata_psh_info == NULL)
+		tcpdata_info->tdata_psh_info_tail = NULL;
+
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+exit:
+	return ret;
+}
+
+bool
+dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 *new_ether_hdr;	/* Ethernet header of the new packet */
+	uint16 new_ether_type;	/* Ethernet type of the new packet */
+	uint8 *new_ip_hdr;		/* IP header of the new packet */
+	uint8 *new_tcp_hdr;		/* TCP header of the new packet */
+	uint32 new_ip_hdr_len;	/* IP header length of the new packet */
+	uint32 cur_framelen;
+	uint32 new_tcp_ack_num;		/* TCP acknowledge number of the new packet */
+	uint16 new_ip_total_len;	/* Total length of IP packet for the new packet */
+	uint32 new_tcp_hdr_len;		/* TCP header length of the new packet */
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpack_info_t *tcpack_info_tbl;
+	int i;
+	bool ret = FALSE;
+	bool set_dotxinrx = TRUE;
+	unsigned long flags;
+
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	new_ether_hdr = PKTDATA(dhdp->osh, pkt);
+	cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+	if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) {
+		DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+			__FUNCTION__, __LINE__, cur_framelen));
+		goto exit;
+	}
+
+	new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13];
+
+	if (new_ether_type != ETHER_TYPE_IP) {
+		DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+			__FUNCTION__, __LINE__, new_ether_type));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type));
+
+	new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN;
+	cur_framelen -= ETHER_HDR_LEN;
+
+	ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+	new_ip_hdr_len = IPV4_HLEN(new_ip_hdr);
+	if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) {
+		DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+			__FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr)));
+		goto exit;
+	}
+
+	new_tcp_hdr = new_ip_hdr + new_ip_hdr_len;
+	cur_framelen -= new_ip_hdr_len;
+
+	ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+	DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+	/* is it an ack ? Allow only ACK flag, not to suppress others. */
+	if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) {
+		DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n",
+			__FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET]));
+		goto exit;
+	}
+
+	new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]);
+	new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]);
+
+	/* This packet has TCP data, so just send */
+	if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) {
+		DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len);
+
+	new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+	DHD_TRACE(("%s %d: TCP ACK with zero DATA length"
+		" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+		__FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+	/* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
+	flags = dhd_os_tcpacklock(dhdp);
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+	counter_printlog(&tack_tbl);
+	tack_tbl.cnt[0]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+	tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp, flags);
+		goto exit;
+	}
+
+	if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) {
+		/* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+		tack_tbl.cnt[5]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+	} else
+		set_dotxinrx = FALSE;
+
+	for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) {
+		void *oldpkt;	/* TCPACK packet that is already in txq or DelayQ */
+		uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr;
+		uint32 old_ip_hdr_len, old_tcp_hdr_len;
+		uint32 old_tcpack_num;	/* TCP ACK number of old TCPACK packet in Q */
+
+		if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) {
+			DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+			break;
+		}
+
+		if (PKTDATA(dhdp->osh, oldpkt) == NULL) {
+			DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+			break;
+		}
+
+		old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr;
+		old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN;
+		old_ip_hdr_len = IPV4_HLEN(old_ip_hdr);
+		old_tcp_hdr = old_ip_hdr + old_ip_hdr_len;
+		old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]);
+
+		DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])),
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])),
+			ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+			ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+		/* If either of IP address or TCP port number does not match, skip.
+		 * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total.
+		 * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total.
+		 */
+		if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
+			&old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
+			memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
+			&old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2))
+			continue;
+
+		old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+		if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) {
+			/* New packet has higher TCP ACK number, so it replaces the old packet */
+			if (new_ip_hdr_len == old_ip_hdr_len &&
+				new_tcp_hdr_len == old_tcp_hdr_len) {
+				ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0);
+				bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len);
+				PKTFREE(dhdp->osh, pkt, FALSE);
+				DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n",
+					__FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num));
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+				tack_tbl.cnt[2]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+				ret = TRUE;
+			} else {
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+				tack_tbl.cnt[6]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+				DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d"
+					" ACK %u -> %u\n", __FUNCTION__, __LINE__,
+					new_ip_hdr_len, old_ip_hdr_len,
+					new_tcp_hdr_len, old_tcp_hdr_len,
+					old_tcpack_num, new_tcp_ack_num));
+			}
+		} else if (new_tcp_ack_num == old_tcpack_num) {
+			set_dotxinrx = TRUE;
+			/* TCPACK retransmission */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+			tack_tbl.cnt[3]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+		} else {
+			DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n",
+				__FUNCTION__, __LINE__, old_tcpack_num, oldpkt,
+				new_tcp_ack_num, pkt));
+		}
+		dhd_os_tcpackunlock(dhdp, flags);
+		goto exit;
+	}
+
+	if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) {
+		/* No TCPACK packet with the same IP addr and TCP port is found
+		 * in tcp_ack_info_tbl. So add this packet to the table.
+		 */
+		DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n",
+			__FUNCTION__, __LINE__, pkt, new_ether_hdr,
+			tcpack_sup_mod->tcpack_info_cnt));
+
+		tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt;
+		tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr;
+		tcpack_sup_mod->tcpack_info_cnt++;
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+		tack_tbl.cnt[1]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+	} else {
+		ASSERT(i == tcpack_sup_mod->tcpack_info_cnt);
+		DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
+			__FUNCTION__, __LINE__));
+	}
+	dhd_os_tcpackunlock(dhdp, flags);
+
+exit:
+	/* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx)
+		dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+
+	return ret;
+}
+
+bool
+dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 *ether_hdr;	/* Ethernet header of the new packet */
+	uint16 ether_type;	/* Ethernet type of the new packet */
+	uint8 *ip_hdr;		/* IP header of the new packet */
+	uint8 *tcp_hdr;		/* TCP header of the new packet */
+	uint32 ip_hdr_len;	/* IP header length of the new packet */
+	uint32 cur_framelen;
+	uint16 ip_total_len;	/* Total length of IP packet for the new packet */
+	uint32 tcp_hdr_len;		/* TCP header length of the new packet */
+	uint32 tcp_seq_num;		/* TCP sequence number of the new packet */
+	uint16 tcp_data_len;	/* TCP DATA length that excludes IP and TCP headers */
+	uint32 end_tcp_seq_num;	/* TCP seq number of the last byte in the new packet */
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpdata_info_t *tcpdata_info = NULL;
+	tdata_psh_info_t *tdata_psh_info;
+
+	int i;
+	bool ret = FALSE;
+	unsigned long flags;
+
+	if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+		goto exit;
+
+	ether_hdr = PKTDATA(dhdp->osh, pkt);
+	cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+	ether_type = ether_hdr[12] << 8 | ether_hdr[13];
+
+	if (ether_type != ETHER_TYPE_IP) {
+		DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+			__FUNCTION__, __LINE__, ether_type));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type));
+
+	ip_hdr = ether_hdr + ETHER_HDR_LEN;
+	cur_framelen -= ETHER_HDR_LEN;
+
+	ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+	ip_hdr_len = IPV4_HLEN(ip_hdr);
+	if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
+		DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+			__FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
+		goto exit;
+	}
+
+	tcp_hdr = ip_hdr + ip_hdr_len;
+	cur_framelen -= ip_hdr_len;
+
+	ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+	DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+	ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]);
+	tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]);
+
+	/* This packet is mere TCP ACK, so do nothing */
+	if (ip_total_len == ip_hdr_len + tcp_hdr_len) {
+		DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len);
+
+	if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) {
+		DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length"
+		" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n",
+		__FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+		tcp_hdr[TCP_FLAGS_OFFSET]));
+
+	flags = dhd_os_tcpacklock(dhdp);
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp, flags);
+		goto exit;
+	}
+
+	/* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */
+	i = 0;
+	while (i < tcpack_sup_mod->tcpdata_info_cnt) {
+		tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		uint32 now_in_ms = OSL_SYSUPTIME();
+		DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.src)),
+			IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.dst)),
+			ntoh16_ua(tdata_info_tmp->tcp_port.src),
+			ntoh16_ua(tdata_info_tmp->tcp_port.dst)));
+
+		/* If both IP address and TCP port number match, we found it so break.
+		 * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total.
+		 * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total.
+		 */
+		if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+			(void *)&tdata_info_tmp->ip_addr, IPV4_ADDR_LEN * 2) == 0 &&
+			memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+			(void *)&tdata_info_tmp->tcp_port, TCP_PORT_LEN * 2) == 0) {
+			tcpdata_info = tdata_info_tmp;
+			tcpdata_info->last_used_time = now_in_ms;
+			break;
+		}
+
+		if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) {
+			tdata_psh_info_t *tdata_psh_info_tmp;
+			tcpdata_info_t *last_tdata_info;
+
+			while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) {
+				tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next;
+				tdata_psh_info_tmp->next = NULL;
+				DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n",
+					__FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq));
+				_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp);
+			}
+#ifdef DHDTCPACK_SUP_DBG
+			DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+				__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+			tcpack_sup_mod->tcpdata_info_cnt--;
+			ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0);
+
+			last_tdata_info =
+				&tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt];
+			if (i < tcpack_sup_mod->tcpdata_info_cnt) {
+				ASSERT(last_tdata_info != tdata_info_tmp);
+				bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t));
+			}
+			bzero(last_tdata_info, sizeof(tcpdata_info_t));
+			DHD_INFO(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt));
+			/* Don't increase "i" here, so that the prev last tcpdata_info is checked */
+		} else
+			 i++;
+	}
+
+	tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]);
+	tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len;
+	end_tcp_seq_num = tcp_seq_num + tcp_data_len;
+
+	if (tcpdata_info == NULL) {
+		ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt);
+		if (i >= TCPDATA_INFO_MAXNUM) {
+			DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d"
+				" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt,
+				IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+				IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+				ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+				ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+			dhd_os_tcpackunlock(dhdp, flags);
+			goto exit;
+		}
+		tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+
+		/* No TCP flow with the same IP addr and TCP port is found
+		 * in tcp_data_info_tbl. So add this flow to the table.
+		 */
+		DHD_INFO(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n",
+			__FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt,
+			IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+			IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+			ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+			ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+		/* Note that src/dst addr fields in ip header are contiguous being 8 bytes in total.
+		 * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total.
+		 */
+		bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], (void *)&tcpdata_info->ip_addr,
+			IPV4_ADDR_LEN * 2);
+		bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], (void *)&tcpdata_info->tcp_port,
+			TCP_PORT_LEN * 2);
+
+		tcpdata_info->last_used_time = OSL_SYSUPTIME();
+		tcpack_sup_mod->tcpdata_info_cnt++;
+	}
+
+	ASSERT(tcpdata_info != NULL);
+
+	tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod);
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+	if (tdata_psh_info == NULL) {
+		DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp, flags);
+		goto exit;
+	}
+	tdata_psh_info->end_seq = end_tcp_seq_num;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+	tack_tbl.cnt[4]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+	DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n",
+		__FUNCTION__, __LINE__, tdata_psh_info->end_seq));
+
+	ASSERT(tdata_psh_info->next == NULL);
+
+	if (tcpdata_info->tdata_psh_info_head == NULL)
+		tcpdata_info->tdata_psh_info_head = tdata_psh_info;
+	else {
+		ASSERT(tcpdata_info->tdata_psh_info_tail);
+		tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info;
+	}
+	tcpdata_info->tdata_psh_info_tail = tdata_psh_info;
+
+	dhd_os_tcpackunlock(dhdp, flags);
+
+exit:
+	return ret;
+}
+
+bool
+dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx)
+{
+	uint8 *new_ether_hdr;	/* Ethernet header of the new packet */
+	uint16 new_ether_type;	/* Ethernet type of the new packet */
+	uint8 *new_ip_hdr;		/* IP header of the new packet */
+	uint8 *new_tcp_hdr;		/* TCP header of the new packet */
+	uint32 new_ip_hdr_len;	/* IP header length of the new packet */
+	uint32 cur_framelen;
+	uint32 new_tcp_ack_num;		/* TCP acknowledge number of the new packet */
+	uint16 new_ip_total_len;	/* Total length of IP packet for the new packet */
+	uint32 new_tcp_hdr_len;		/* TCP header length of the new packet */
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpack_info_t *tcpack_info_tbl;
+	int i, free_slot = TCPACK_INFO_MAXNUM;
+	bool hold = FALSE;
+	unsigned long flags;
+
+	if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
+		goto exit;
+	}
+
+	if (dhdp->tcpack_sup_ratio == 1) {
+		goto exit;
+	}
+
+	new_ether_hdr = PKTDATA(dhdp->osh, pkt);
+	cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+	if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) {
+		DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+			__FUNCTION__, __LINE__, cur_framelen));
+		goto exit;
+	}
+
+	new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13];
+
+	if (new_ether_type != ETHER_TYPE_IP) {
+		DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+			__FUNCTION__, __LINE__, new_ether_type));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type));
+
+	new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN;
+	cur_framelen -= ETHER_HDR_LEN;
+
+	ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+	new_ip_hdr_len = IPV4_HLEN(new_ip_hdr);
+	if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) {
+		DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+			__FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr)));
+		goto exit;
+	}
+
+	new_tcp_hdr = new_ip_hdr + new_ip_hdr_len;
+	cur_framelen -= new_ip_hdr_len;
+
+	ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+	DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+	/* is it an ack ? Allow only ACK flag, not to suppress others. */
+	if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) {
+		DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n",
+			__FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET]));
+		goto exit;
+	}
+
+	new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]);
+	new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]);
+
+	/* This packet has TCP data, so just send */
+	if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) {
+		DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len);
+
+	new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+	DHD_TRACE(("%s %d: TCP ACK with zero DATA length"
+		" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+		__FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+	/* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
+	flags = dhd_os_tcpacklock(dhdp);
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+	tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		dhd_os_tcpackunlock(dhdp, flags);
+		goto exit;
+	}
+
+	hold = TRUE;
+
+	for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
+		void *oldpkt;	/* TCPACK packet that is already in txq or DelayQ */
+		uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr;
+		uint32 old_ip_hdr_len;
+		uint32 old_tcpack_num;	/* TCP ACK number of old TCPACK packet in Q */
+
+		if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) {
+			if (free_slot == TCPACK_INFO_MAXNUM) {
+				free_slot = i;
+			}
+			continue;
+		}
+
+		if (PKTDATA(dhdp->osh, oldpkt) == NULL) {
+			DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d\n",
+				__FUNCTION__, __LINE__, i));
+			hold = FALSE;
+			dhd_os_tcpackunlock(dhdp, flags);
+			goto exit;
+		}
+
+		old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr;
+		old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN;
+		old_ip_hdr_len = IPV4_HLEN(old_ip_hdr);
+		old_tcp_hdr = old_ip_hdr + old_ip_hdr_len;
+
+		DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])),
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])),
+			ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+			ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+		/* If either of IP address or TCP port number does not match, skip. */
+		if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
+			&old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
+			memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
+			&old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) {
+			continue;
+		}
+
+		old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+		if (IS_TCPSEQ_GE(new_tcp_ack_num, old_tcpack_num)) {
+			tcpack_info_tbl[i].supp_cnt++;
+			if (tcpack_info_tbl[i].supp_cnt >= dhdp->tcpack_sup_ratio) {
+				tcpack_info_tbl[i].pkt_in_q = NULL;
+				tcpack_info_tbl[i].pkt_ether_hdr = NULL;
+				tcpack_info_tbl[i].ifidx = 0;
+				tcpack_info_tbl[i].supp_cnt = 0;
+				hold = FALSE;
+			} else {
+				tcpack_info_tbl[i].pkt_in_q = pkt;
+				tcpack_info_tbl[i].pkt_ether_hdr = new_ether_hdr;
+				tcpack_info_tbl[i].ifidx = ifidx;
+			}
+			PKTFREE(dhdp->osh, oldpkt, TRUE);
+		} else {
+			PKTFREE(dhdp->osh, pkt, TRUE);
+		}
+		dhd_os_tcpackunlock(dhdp, flags);
+
+		if (!hold) {
+			del_timer_sync(&tcpack_info_tbl[i].timer);
+		}
+		goto exit;
+	}
+
+	if (free_slot < TCPACK_INFO_MAXNUM) {
+		/* No TCPACK packet with the same IP addr and TCP port is found
+		 * in tcp_ack_info_tbl. So add this packet to the table.
+		 */
+		DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n",
+			__FUNCTION__, __LINE__, pkt, new_ether_hdr,
+			free_slot));
+
+		tcpack_info_tbl[free_slot].pkt_in_q = pkt;
+		tcpack_info_tbl[free_slot].pkt_ether_hdr = new_ether_hdr;
+		tcpack_info_tbl[free_slot].ifidx = ifidx;
+		tcpack_info_tbl[free_slot].supp_cnt = 1;
+		mod_timer(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer,
+			jiffies + msecs_to_jiffies(dhdp->tcpack_sup_delay));
+		tcpack_sup_mod->tcpack_info_cnt++;
+	} else {
+		DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
+			__FUNCTION__, __LINE__));
+	}
+	dhd_os_tcpackunlock(dhdp, flags);
+
+exit:
+	return hold;
+}
+#endif /* DHDTCPACK_SUPPRESS */
diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.h b/drivers/net/wireless/bcmdhd/dhd_ip.h
new file mode 100644
index 0000000..240d8521
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_ip.h
@@ -0,0 +1,85 @@
+/*
+ * Header file describing the common ip parser function.
+ *
+ * Provides type definitions and function prototypes used to parse ip packet.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_ip.h 536854 2015-02-24 13:17:29Z $
+ */
+
+#ifndef _dhd_ip_h_
+#define _dhd_ip_h_
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dngl_stats.h>
+#include <bcmutils.h>
+#include <dhd.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+typedef enum pkt_frag
+{
+	DHD_PKT_FRAG_NONE = 0,
+	DHD_PKT_FRAG_FIRST,
+	DHD_PKT_FRAG_CONT,
+	DHD_PKT_FRAG_LAST
+} pkt_frag_t;
+
+extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
+
+#ifdef DHDTCPACK_SUPPRESS
+#define	TCPACKSZMIN	(ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
+/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */
+#define	TCPACKSZMAX	(TCPACKSZMIN + 100)
+
+/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */
+#define TCPACK_INFO_MAXNUM 4
+#define TCPDATA_INFO_MAXNUM 4
+#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM)
+
+#define TCPDATA_INFO_TIMEOUT 5000	/* Remove tcpdata_info if inactive for this time (in ms) */
+
+#define DEFAULT_TCPACK_SUPP_RATIO 3
+#ifndef CUSTOM_TCPACK_SUPP_RATIO
+#define CUSTOM_TCPACK_SUPP_RATIO DEFAULT_TCPACK_SUPP_RATIO
+#endif /* CUSTOM_TCPACK_SUPP_RATIO */
+
+#define DEFAULT_TCPACK_DELAY_TIME 10 /* ms */
+#ifndef CUSTOM_TCPACK_DELAY_TIME
+#define CUSTOM_TCPACK_DELAY_TIME DEFAULT_TCPACK_DELAY_TIME
+#endif /* CUSTOM_TCPACK_DELAY_TIME */
+
+extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on);
+extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp);
+extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx);
+/* #define DHDTCPACK_SUP_DBG */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+extern counter_tbl_t tack_tbl;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+#endif /* DHDTCPACK_SUPPRESS */
+
+#endif /* _dhd_ip_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c
new file mode 100644
index 0000000..0a47fa4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.c
@@ -0,0 +1,19759 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux.c 710862 2017-07-14 07:43:59Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#ifdef SHOW_LOGTRACE
+#include <linux/syscalls.h>
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <net/addrconf.h>
+#include <uapi/linux/sched/types.h>
+#ifdef ENABLE_ADAPTIVE_SCHED
+#include <linux/cpufreq.h>
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+
+#include <ethernet.h>
+#include <bcmevent.h>
+#include <vlan.h>
+#include <802.3.h>
+
+#include <dngl_stats.h>
+#include <dhd_linux_wq.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#ifdef DHD_WET
+#include <dhd_wet.h>
+#endif /* DHD_WET */
+#ifdef PCIE_FULL_DONGLE
+#include <dhd_flowring.h>
+#endif
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_config.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS8895)
+#include <linux/exynos-pci-ctrl.h>
+#endif /* CONFIG_SOC_EXYNOS8895 */
+
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+#ifdef DHD_L2_FILTER
+#include <bcmicmp.h>
+#include <bcm_l2_filter.h>
+#include <dhd_l2_filter.h>
+#endif /* DHD_L2_FILTER */
+
+#ifdef DHD_PSTA
+#include <dhd_psta.h>
+#endif /* DHD_PSTA */
+
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+#include <dhd_daemon.h>
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
+#if defined(STAT_REPORT)
+#include <wl_statreport.h>
+#endif /* STAT_REPORT */
+#ifdef DHD_DEBUG_PAGEALLOC
+typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
+void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
+extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
+#endif /* DHD_DEBUG_PAGEALLOC */
+
+
+#if defined(DHD_LB)
+#if !defined(PCIE_FULL_DONGLE)
+#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
+#endif /* !PCIE_FULL_DONGLE */
+#endif /* DHD_LB */
+
+#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
+	defined(DHD_LB_STATS)
+#if !defined(DHD_LB)
+#error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
+#endif /* !DHD_LB */
+#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
+
+#if defined(DHD_LB)
+/* Dynamic CPU selection for load balancing */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#if !defined(DHD_LB_PRIMARY_CPUS)
+#define DHD_LB_PRIMARY_CPUS     0x0 /* Big CPU coreids mask */
+#endif
+#if !defined(DHD_LB_SECONDARY_CPUS)
+#define DHD_LB_SECONDARY_CPUS   0xFE /* Little CPU coreids mask */
+#endif
+
+#define HIST_BIN_SIZE	9
+
+static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
+
+#if defined(DHD_LB_TXP)
+static void dhd_lb_tx_handler(unsigned long data);
+static void dhd_tx_dispatcher_work(struct work_struct * work);
+static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
+static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
+
+/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
+typedef struct dhd_tx_lb_pkttag_fr {
+	struct net_device *net;
+	int ifidx;
+} dhd_tx_lb_pkttag_fr_t;
+
+#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp)	((tag)->net = netdevp)
+#define DHD_LB_TX_PKTTAG_NETDEV(tag)			((tag)->net)
+
+#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx)	((tag)->ifidx = ifidx)
+#define DHD_LB_TX_PKTTAG_IFIDX(tag)		((tag)->ifidx)
+#endif /* DHD_LB_TXP */
+#endif /* DHD_LB */
+
+#ifdef HOFFLOAD_MODULES
+#include <linux/firmware.h>
+#endif
+
+#ifdef WLMEDIA_HTSF
+#include <linux/time.h>
+#include <htsf.h>
+
+#define HTSF_MINLEN 200    /* min. packet length to timestamp */
+#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us  */
+#define TSMAX  1000        /* max no. of timing record kept   */
+#define NUMBIN 34
+
+static uint32 tsidx = 0;
+static uint32 htsf_seqnum = 0;
+uint32 tsfsync;
+struct timeval tsync;
+static uint32 tsport = 5010;
+
+typedef struct histo_ {
+	uint32 bin[NUMBIN];
+} histo_t;
+
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
+#endif /* WLMEDIA_HTSF */
+
+#ifdef WL_MONITOR
+#include <bcmmsgbuf.h>
+#include <bcmwifi_monitor.h>
+#endif
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#ifdef STBLINUX
+#ifdef quote_str
+#undef quote_str
+#endif /* quote_str */
+#ifdef to_str
+#undef to_str
+#endif /* quote_str */
+#define to_str(s) #s
+#define quote_str(s) to_str(s)
+
+static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
+#endif /* STBLINUX */
+
+
+
+#if defined(SOFTAP)
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+#endif
+
+extern void dhd_dump_eapol_4way_message(dhd_pub_t *dhd, char *ifname,
+	char *dump_data, bool direction);
+
+#ifdef FIX_CPU_MIN_CLOCK
+#include <linux/pm_qos.h>
+#endif /* FIX_CPU_MIN_CLOCK */
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
+#define CONFIG_DHD_SET_RANDOM_MAC_VAL	0x001A11
+#endif
+static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
+#endif /* SET_RANDOM_MAC_SOFTAP */
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+#define DEFAULT_CPUFREQ_THRESH		1000000	/* threshold frequency : 1000000 = 1GHz */
+#ifndef CUSTOM_CPUFREQ_THRESH
+#define CUSTOM_CPUFREQ_THRESH	DEFAULT_CPUFREQ_THRESH
+#endif /* CUSTOM_CPUFREQ_THRESH */
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+/* enable HOSTIP cache update from the host side when an eth0:N is up */
+#define AOE_IP_ALIAS_SUPPORT 1
+
+#ifdef BCM_FD_AGGR
+#include <bcm_rpc.h>
+#include <bcm_rpc_tp.h>
+#endif
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <wl_android.h>
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif
+
+/* Maximum STA per radio */
+#define DHD_MAX_STA     32
+
+
+
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio)  wme_fifo2ac[prio2fifo[(prio)]]
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+	unsigned long event, void *ptr);
+static struct notifier_block dhd_inetaddr_notifier = {
+	.notifier_call = dhd_inetaddr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inetaddr_notifier_registered = FALSE;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+int dhd_inet6addr_notifier_call(struct notifier_block *this,
+	unsigned long event, void *ptr);
+static struct notifier_block dhd_inet6addr_notifier = {
+	.notifier_call = dhd_inet6addr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inet6addr_notifier_registered = FALSE;
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
+#endif 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL and additional rights");
+#endif /* LinuxVer */
+
+#if defined(MULTIPLE_SUPPLICANT)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+DEFINE_MUTEX(_dhd_mutex_lock_);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+#endif 
+
+#ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
+#define MAX_CONSECUTIVE_HANG_COUNTS 5
+#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
+
+#include <dhd_bus.h>
+
+#ifdef DHD_ULP
+#include <dhd_ulp.h>
+#endif /* DHD_ULP */
+
+#ifdef BCM_FD_AGGR
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
+#else
+#ifndef PROP_TXSTATUS
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen)
+#else
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
+#endif
+#endif /* BCM_FD_AGGR */
+
+#ifdef PROP_TXSTATUS
+extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
+extern void dhd_wlfc_plat_init(void *dhd);
+extern void dhd_wlfc_plat_deinit(void *dhd);
+#endif /* PROP_TXSTATUS */
+extern uint sd_f2_blocksize;
+extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+	return "";
+}
+#endif	/* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+extern wl_iw_extra_params_t  g_wl_iw_params;
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifdef CONFIG_PARTIALSUSPEND_SLP
+#include <linux/partialsuspend_slp.h>
+#define CONFIG_HAS_EARLYSUSPEND
+#define DHD_USE_EARLYSUSPEND
+#define register_early_suspend		register_pre_suspend
+#define unregister_early_suspend	unregister_pre_suspend
+#define early_suspend				pre_suspend
+#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN		50
+#else
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+#endif /* CONFIG_PARTIALSUSPEND_SLP */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+#include <linux/nl80211.h>
+#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
+
+#if defined(BCMPCIE)
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
+#else
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+#endif /* OEM_ANDROID && BCMPCIE */
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+#endif
+
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
+	u8* program, uint32 program_len);
+static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
+	uint32 mode, uint32 enable);
+static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
+#endif /* PKT_FILTER_SUPPORT && APF */
+
+
+
+static INLINE int argos_register_notifier_init(struct net_device *net) { return 0;}
+static INLINE int argos_register_notifier_deinit(void) { return 0;}
+
+#if defined(BT_OVER_SDIO)
+extern void wl_android_set_wifi_on_flag(bool enable);
+#endif /* BT_OVER_SDIO */
+
+
+#if defined(TRAFFIC_MGMT_DWM)
+void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf);
+#endif 
+
+#ifdef DHD_FW_COREDUMP
+static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
+#endif /* DHD_FW_COREDUMP */
+#ifdef DHD_LOG_DUMP
+#define DLD_BUFFER_NUM  2
+/* [0]: General, [1]: Special */
+struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
+static const int dld_buf_size[] = {
+	(1024 * 1024),	/* DHD_LOG_DUMP_BUFFER_SIZE */
+	(8 * 1024)	/* DHD_LOG_DUMP_BUFFER_EX_SIZE */
+};
+static void dhd_log_dump_init(dhd_pub_t *dhd);
+static void dhd_log_dump_deinit(dhd_pub_t *dhd);
+static void dhd_log_dump(void *handle, void *event_info, u8 event);
+void dhd_schedule_log_dump(dhd_pub_t *dhdp);
+static int do_dhd_log_dump(dhd_pub_t *dhdp);
+#endif /* DHD_LOG_DUMP */
+
+#ifdef DHD_DEBUG_UART
+#include <linux/kmod.h>
+#define DHD_DEBUG_UART_EXEC_PATH	"/system/bin/wldu"
+static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
+static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
+#endif	/* DHD_DEBUG_UART */
+
+static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
+static struct notifier_block dhd_reboot_notifier = {
+	.notifier_call = dhd_reboot_callback,
+	.priority = 1,
+};
+
+#ifdef BCMPCIE
+static int is_reboot = 0;
+#endif /* BCMPCIE */
+
+#if defined(BT_OVER_SDIO)
+#include "dhd_bt_interface.h"
+dhd_pub_t	*g_dhd_pub = NULL;
+#endif /* defined (BT_OVER_SDIO) */
+
+atomic_t exit_in_progress = ATOMIC_INIT(0);
+
+typedef struct dhd_if_event {
+	struct list_head	list;
+	wl_event_data_if_t	event;
+	char			name[IFNAMSIZ+1];
+	uint8			mac[ETHER_ADDR_LEN];
+} dhd_if_event_t;
+
+/* Interface control information */
+typedef struct dhd_if {
+	struct dhd_info *info;			/* back pointer to dhd_info */
+	/* OS/stack specifics */
+	struct net_device *net;
+	int				idx;			/* iface idx in dongle */
+	uint			subunit;		/* subunit */
+	uint8			mac_addr[ETHER_ADDR_LEN];	/* assigned MAC address */
+	bool			set_macaddress;
+	bool			set_multicast;
+	uint8			bssidx;			/* bsscfg index for the interface */
+	bool			attached;		/* Delayed attachment when unset */
+	bool			txflowcontrol;	/* Per interface flow control indicator */
+	char			name[IFNAMSIZ+1]; /* linux interface name */
+	char			dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
+	struct net_device_stats stats;
+#ifdef DHD_WMF
+	dhd_wmf_t		wmf;		/* per bsscfg wmf setting */
+	bool	wmf_psta_disable;		/* enable/disable MC pkt to each mac
+						 * of MC group behind PSTA
+						 */
+#endif /* DHD_WMF */
+#ifdef PCIE_FULL_DONGLE
+	struct list_head sta_list;		/* sll of associated stations */
+#if !defined(BCM_GMAC3)
+	spinlock_t	sta_list_lock;		/* lock for manipulating sll */
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+	uint32  ap_isolate;			/* ap-isolation settings */
+#ifdef DHD_L2_FILTER
+	bool parp_enable;
+	bool parp_discard;
+	bool parp_allnode;
+	arp_table_t *phnd_arp_table;
+	/* for Per BSS modification */
+	bool dhcp_unicast;
+	bool block_ping;
+	bool grat_arp;
+#endif /* DHD_L2_FILTER */
+#ifdef DHD_MCAST_REGEN
+	bool mcast_regen_bss_enable;
+#endif
+	bool rx_pkt_chainable;		/* set all rx packet to chainable config by default */
+	cumm_ctr_t cumm_ctr;			/* cummulative queue length of child flowrings */
+} dhd_if_t;
+
+#ifdef WLMEDIA_HTSF
+typedef struct {
+	uint32 low;
+	uint32 high;
+} tsf_t;
+
+typedef struct {
+	uint32 last_cycle;
+	uint32 last_sec;
+	uint32 last_tsf;
+	uint32 coef;     /* scaling factor */
+	uint32 coefdec1; /* first decimal  */
+	uint32 coefdec2; /* second decimal */
+} htsf_t;
+
+typedef struct {
+	uint32 t1;
+	uint32 t2;
+	uint32 t3;
+	uint32 t4;
+} tstamp_t;
+
+static tstamp_t ts[TSMAX];
+static tstamp_t maxdelayts;
+static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
+
+#endif  /* WLMEDIA_HTSF */
+
+struct ipv6_work_info_t {
+	uint8			if_idx;
+	char			ipv6_addr[IPV6_ADDR_LEN];
+	unsigned long		event;
+};
+static void dhd_process_daemon_msg(struct sk_buff *skb);
+static void dhd_destroy_to_notifier_skt(void);
+static int dhd_create_to_notifier_skt(void);
+static struct sock *nl_to_event_sk = NULL;
+int sender_pid = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+struct netlink_kernel_cfg g_cfg = {
+	.groups = 1,
+	.input = dhd_process_daemon_msg,
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
+
+typedef struct dhd_dump {
+	uint8 *buf;
+	int bufsize;
+} dhd_dump_t;
+
+
+/* When Perimeter locks are deployed, any blocking calls must be preceeded
+ * with a PERIM UNLOCK and followed by a PERIM LOCK.
+ * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
+ * wait_event_timeout().
+ */
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(WL_WIRELESS_EXT)
+	wl_iw_t		iw;		/* wireless extensions state (must be first) */
+#endif /* defined(WL_WIRELESS_EXT) */
+	dhd_pub_t pub;
+	dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
+
+	wifi_adapter_info_t *adapter;			/* adapter information, interrupt, fw path etc. */
+	char fw_path[PATH_MAX];		/* path to firmware image */
+	char nv_path[PATH_MAX];		/* path to nvram vars file */
+	char clm_path[PATH_MAX];		/* path to clm vars file */
+	char conf_path[PATH_MAX];	/* path to config vars file */
+#ifdef DHD_UCODE_DOWNLOAD
+	char uc_path[PATH_MAX];	/* path to ucode image */
+#endif /* DHD_UCODE_DOWNLOAD */
+
+	/* serialize dhd iovars */
+	struct mutex dhd_iovar_mutex;
+
+	struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+	spinlock_t	wlfc_spinlock;
+
+#ifdef BCMDBUS
+	ulong		wlfc_lock_flags;
+	ulong		wlfc_pub_lock_flags;
+#endif /* BCMDBUS */
+#endif /* PROP_TXSTATUS */
+#ifdef WLMEDIA_HTSF
+	htsf_t  htsf;
+#endif
+	wait_queue_head_t ioctl_resp_wait;
+	wait_queue_head_t d3ack_wait;
+	wait_queue_head_t dhd_bus_busy_state_wait;
+	uint32	default_wd_interval;
+
+	struct timer_list timer;
+	bool wd_timer_valid;
+#ifdef DHD_PCIE_RUNTIMEPM
+	struct timer_list rpm_timer;
+	bool rpm_timer_valid;
+	tsk_ctl_t	  thr_rpm_ctl;
+#endif /* DHD_PCIE_RUNTIMEPM */
+	struct tasklet_struct tasklet;
+	spinlock_t	sdlock;
+	spinlock_t	txqlock;
+	spinlock_t	rxqlock;
+	spinlock_t	dhd_lock;
+#ifdef BCMDBUS
+	ulong		txqlock_flags;
+#else
+
+	struct semaphore sdsem;
+	tsk_ctl_t	thr_dpc_ctl;
+	tsk_ctl_t	thr_wdt_ctl;
+#endif /* BCMDBUS */
+
+	tsk_ctl_t	thr_rxf_ctl;
+	spinlock_t	rxf_lock;
+	bool		rxthread_enabled;
+
+	/* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	struct wake_lock wl_wifi;   /* Wifi wakelock */
+	struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+	struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
+	struct wake_lock wl_wdwake; /* Wifi wd wakelock */
+	struct wake_lock wl_evtwake; /* Wifi event wakelock */
+	struct wake_lock wl_pmwake;   /* Wifi pm handler wakelock */
+	struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
+#ifdef BCMPCIE_OOB_HOST_WAKE
+	struct wake_lock wl_intrwake; /* Host wakeup wakelock */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+	struct wake_lock wl_scanwake;  /* Wifi scan wakelock */
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	/* net_device interface lock, prevent race conditions among net_dev interface
+	 * calls and wifi_on or wifi_off
+	 */
+	struct mutex dhd_net_if_mutex;
+	struct mutex dhd_suspend_mutex;
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+	struct mutex dhd_apf_mutex;
+#endif /* PKT_FILTER_SUPPORT && APF */
+#endif 
+	spinlock_t wakelock_spinlock;
+	spinlock_t wakelock_evt_spinlock;
+	uint32 wakelock_counter;
+	int wakelock_wd_counter;
+	int wakelock_rx_timeout_enable;
+	int wakelock_ctrl_timeout_enable;
+	bool waive_wakelock;
+	uint32 wakelock_before_waive;
+
+	/* Thread to issue ioctl for multicast */
+	wait_queue_head_t ctrl_wait;
+	atomic_t pend_8021x_cnt;
+	dhd_attach_states_t dhd_state;
+#ifdef SHOW_LOGTRACE
+	dhd_event_log_t event_data;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef BCM_FD_AGGR
+	void *rpc_th;
+	void *rpc_osh;
+	struct timer_list rpcth_timer;
+	bool rpcth_timer_active;
+	uint8 fdaggr;
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+	spinlock_t	tcpack_lock;
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef FIX_CPU_MIN_CLOCK
+	bool cpufreq_fix_status;
+	struct mutex cpufreq_fix;
+	struct pm_qos_request dhd_cpu_qos;
+#ifdef FIX_BUS_MIN_CLOCK
+	struct pm_qos_request dhd_bus_qos;
+#endif /* FIX_BUS_MIN_CLOCK */
+#endif /* FIX_CPU_MIN_CLOCK */
+	void			*dhd_deferred_wq;
+#ifdef DEBUG_CPU_FREQ
+	struct notifier_block freq_trans;
+	int __percpu *new_freq;
+#endif
+	unsigned int unit;
+	struct notifier_block pm_notifier;
+#ifdef DHD_PSTA
+	uint32	psta_mode;	/* PSTA or PSR */
+#endif /* DHD_PSTA */
+#ifdef DHD_WET
+	        uint32  wet_mode;
+#endif /* DHD_WET */
+#ifdef DHD_DEBUG
+	dhd_dump_t *dump;
+	struct timer_list join_timer;
+	u32 join_timeout_val;
+	bool join_timer_active;
+	uint scan_time_count;
+	struct timer_list scan_timer;
+	bool scan_timer_active;
+#endif
+#if defined(DHD_LB)
+	/* CPU Load Balance dynamic CPU selection */
+
+	/* Variable that tracks the currect CPUs available for candidacy */
+	cpumask_var_t cpumask_curr_avail;
+
+	/* Primary and secondary CPU mask */
+	cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
+	cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
+
+	struct notifier_block cpu_notifier;
+
+	/* Tasklet to handle Tx Completion packet freeing */
+	struct tasklet_struct tx_compl_tasklet;
+	atomic_t                   tx_compl_cpu;
+
+	/* Tasklet to handle RxBuf Post during Rx completion */
+	struct tasklet_struct rx_compl_tasklet;
+	atomic_t                   rx_compl_cpu;
+
+	/* Napi struct for handling rx packet sendup. Packets are removed from
+	 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
+	 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
+	 * to run to rx_napi_cpu.
+	 */
+	struct sk_buff_head   rx_pend_queue  ____cacheline_aligned;
+	struct sk_buff_head   rx_napi_queue  ____cacheline_aligned;
+	struct napi_struct    rx_napi_struct ____cacheline_aligned;
+	atomic_t                   rx_napi_cpu; /* cpu on which the napi is dispatched */
+	struct net_device    *rx_napi_netdev; /* netdev of primary interface */
+
+	struct work_struct    rx_napi_dispatcher_work;
+	struct work_struct	  tx_compl_dispatcher_work;
+	struct work_struct    tx_dispatcher_work;
+
+	/* Number of times DPC Tasklet ran */
+	uint32	dhd_dpc_cnt;
+	/* Number of times NAPI processing got scheduled */
+	uint32	napi_sched_cnt;
+	/* Number of times NAPI processing ran on each available core */
+	uint32	*napi_percpu_run_cnt;
+	/* Number of times RX Completions got scheduled */
+	uint32	rxc_sched_cnt;
+	/* Number of times RX Completion ran on each available core */
+	uint32	*rxc_percpu_run_cnt;
+	/* Number of times TX Completions got scheduled */
+	uint32	txc_sched_cnt;
+	/* Number of times TX Completions ran on each available core */
+	uint32	*txc_percpu_run_cnt;
+	/* CPU status */
+	/* Number of times each CPU came online */
+	uint32	*cpu_online_cnt;
+	/* Number of times each CPU went offline */
+	uint32	*cpu_offline_cnt;
+
+	/* Number of times TX processing run on each core */
+	uint32	*txp_percpu_run_cnt;
+	/* Number of times TX start run on each core */
+	uint32	*tx_start_percpu_run_cnt;
+
+	/* Tx load balancing */
+
+	/* TODO: Need to see if batch processing is really required in case of TX
+	 * processing. In case of RX the Dongle can send a bunch of rx completions,
+	 * hence we took a 3 queue approach
+	 * enque - adds the skbs to rx_pend_queue
+	 * dispatch - uses a lock and adds the list of skbs from pend queue to
+	 *            napi queue
+	 * napi processing - copies the pend_queue into a local queue and works
+	 * on it.
+	 * But for TX its going to be 1 skb at a time, so we are just thinking
+	 * of using only one queue and use the lock supported skb queue functions
+	 * to add and process it. If its in-efficient we'll re-visit the queue
+	 * design.
+	 */
+
+	/* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
+	/* struct sk_buff_head		tx_pend_queue  ____cacheline_aligned;  */
+	/*
+	 * From the Tasklet that actually sends out data
+	 * copy the list tx_pend_queue into tx_active_queue. There by we need
+	 * to spinlock to only perform the copy the rest of the code ie to
+	 * construct the tx_pend_queue and the code to process tx_active_queue
+	 * can be lockless. The concept is borrowed as is from RX processing
+	 */
+	/* struct sk_buff_head		tx_active_queue  ____cacheline_aligned; */
+
+	/* Control TXP in runtime, enable by default */
+	atomic_t                lb_txp_active;
+
+	/*
+	 * When the NET_TX tries to send a TX packet put it into tx_pend_queue
+	 * For now, the processing tasklet will also direcly operate on this
+	 * queue
+	 */
+	struct sk_buff_head	tx_pend_queue  ____cacheline_aligned;
+
+	/* cpu on which the DHD Tx is happenning */
+	atomic_t		tx_cpu;
+
+	/* CPU on which the Network stack is calling the DHD's xmit function */
+	atomic_t		net_tx_cpu;
+
+	/* Tasklet context from which the DHD's TX processing happens */
+	struct tasklet_struct tx_tasklet;
+
+	/*
+	 * Consumer Histogram - NAPI RX Packet processing
+	 * -----------------------------------------------
+	 * On Each CPU, when the NAPI RX Packet processing call back was invoked
+	 * how many packets were processed is captured in this data structure.
+	 * Now its difficult to capture the "exact" number of packets processed.
+	 * So considering the packet counter to be a 32 bit one, we have a
+	 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
+	 * processed is rounded off to the next power of 2 and put in the
+	 * approriate "bin" the value in the bin gets incremented.
+	 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
+	 * and the packet count processed is as follows (assume the bin counters are 0)
+	 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
+	 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
+	 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
+	 */
+	uint32 *napi_rx_hist[HIST_BIN_SIZE];
+	uint32 *txc_hist[HIST_BIN_SIZE];
+	uint32 *rxc_hist[HIST_BIN_SIZE];
+#endif /* DHD_LB */
+
+#ifdef SHOW_LOGTRACE
+	struct work_struct	  event_log_dispatcher_work;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+	struct kobject dhd_kobj;
+#ifdef SHOW_LOGTRACE
+	struct sk_buff_head   evt_trace_queue     ____cacheline_aligned;
+#endif
+	struct timer_list timesync_timer;
+#if defined(BT_OVER_SDIO)
+	char btfw_path[PATH_MAX];
+#endif /* defined (BT_OVER_SDIO) */
+
+#ifdef WL_MONITOR
+	struct net_device *monitor_dev; /* monitor pseudo device */
+	struct sk_buff *monitor_skb;
+	uint	monitor_len;
+	uint monitor_type;   /* monitor pseudo device */
+	monitor_info_t *monitor_info;
+#endif /* WL_MONITOR */
+	uint32 shub_enable;
+#if defined(BT_OVER_SDIO)
+	struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
+	int	bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
+#endif /* BT_OVER_SDIO */
+#ifdef DHD_DEBUG_UART
+	bool duart_execute;
+#endif
+#ifdef PCIE_INB_DW
+	wait_queue_head_t ds_exit_wait;
+#endif /* PCIE_INB_DW */
+} dhd_info_t;
+
+#ifdef WL_MONITOR
+#define MONPKT_EXTRA_LEN	48
+#endif
+
+#define DHDIF_FWDER(dhdif)      FALSE
+
+#if defined(BT_OVER_SDIO)
+/* Flag to indicate if driver is initialized */
+uint dhd_driver_init_done = TRUE;
+#else
+/* Flag to indicate if driver is initialized */
+uint dhd_driver_init_done = FALSE;
+#endif
+/* Flag to indicate if we should download firmware on driver load */
+uint dhd_download_fw_on_driverload = TRUE;
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+char clm_path[MOD_PARAM_PATHLEN];
+char config_path[MOD_PARAM_PATHLEN];
+#ifdef DHD_UCODE_DOWNLOAD
+char ucode_path[MOD_PARAM_PATHLEN];
+#endif /* DHD_UCODE_DOWNLOAD */
+
+module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
+
+
+/* backup buffer for firmware and nvram path */
+char fw_bak_path[MOD_PARAM_PATHLEN];
+char nv_bak_path[MOD_PARAM_PATHLEN];
+
+/* information string to keep firmware, chio, cheip version info visiable from log */
+char info_string[MOD_PARAM_INFOLEN];
+module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
+int op_mode = 0;
+int disable_proptx = 0;
+module_param(op_mode, int, 0644);
+extern int wl_control_wl_start(struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(BCMLXSDMMC) || defined(BCMDBUS))
+struct semaphore dhd_registration_sem;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+/* deferred handlers */
+static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
+
+#ifdef DHD_UPDATE_INTF_MAC
+static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event);
+#endif /* DHD_UPDATE_INTF_MAC */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+#ifdef WL_CFG80211
+extern void dhd_netdev_free(struct net_device *ndev);
+#endif /* WL_CFG80211 */
+
+#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
+/* update rx_pkt_chainable state of dhd interface */
+static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
+#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
+
+#ifdef HOFFLOAD_MODULES
+char dhd_hmem_module_string[MOD_PARAM_SRLEN];
+module_param_string(dhd_hmem_module_string, dhd_hmem_module_string, MOD_PARAM_SRLEN, 0660);
+#endif
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+#if defined(WL_WIRELESS_EXT)
+module_param(iw_msg_level, int, 0);
+#endif
+#ifdef WL_CFG80211
+module_param(wl_dbg_level, int, 0);
+#endif
+module_param(android_msg_level, int, 0);
+module_param(config_msg_level, int, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+
+#ifdef ENABLE_ARP_SNOOP_MODE
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY;
+#else
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
+#endif /* ENABLE_ARP_SNOOP_MODE */
+
+module_param(dhd_arp_mode, uint, 0);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/* Disable Prop tx */
+module_param(disable_proptx, int, 0644);
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
+#ifdef DHD_UCODE_DOWNLOAD
+module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
+#endif /* DHD_UCODE_DOWNLOAD */
+
+/* Watchdog interval */
+
+/* extend watchdog expiration to 2 seconds when DPC is running */
+#define WATCHDOG_EXTEND_INTERVAL (2000)
+
+uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
+#endif /* DHD_PCIE_RUNTIMEPMT */
+#if defined(DHD_DEBUG)
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0644);
+#else
+uint dhd_console_ms = 0;
+#endif /* DHD_DEBUG */
+
+uint dhd_slpauto = TRUE;
+module_param(dhd_slpauto, uint, 0);
+
+#ifdef PKT_FILTER_SUPPORT
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+#endif
+
+/* Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
+uint dhd_master_mode = FALSE;
+#else
+uint dhd_master_mode = FALSE;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
+module_param(dhd_master_mode, uint, 0);
+
+int dhd_watchdog_prio = 0;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority */
+int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
+module_param(dhd_dpc_prio, int, 0);
+
+/* RX frame thread priority */
+int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
+module_param(dhd_rxf_prio, int, 0);
+
+#if !defined(BCMDBUS)
+extern int dhd_dongle_ramsize;
+module_param(dhd_dongle_ramsize, int, 0);
+#endif /* !BCMDBUS */
+
+#ifdef WL_CFG80211
+int passive_channel_skip = 0;
+module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
+#endif /* WL_CFG80211 */
+
+/* Keep track of number of instances */
+static int dhd_found = 0;
+static int instance_base = 0; /* Starting instance number */
+module_param(instance_base, int, 0644);
+
+#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
+static int dhd_napi_weight = 32;
+module_param(dhd_napi_weight, int, 0644);
+#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
+
+#ifdef PCIE_FULL_DONGLE
+extern int h2d_max_txpost;
+module_param(h2d_max_txpost, int, 0644);
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_DHCP_DUMP
+struct bootp_fmt {
+	struct iphdr ip_header;
+	struct udphdr udp_header;
+	uint8 op;
+	uint8 htype;
+	uint8 hlen;
+	uint8 hops;
+	uint32 transaction_id;
+	uint16 secs;
+	uint16 flags;
+	uint32 client_ip;
+	uint32 assigned_ip;
+	uint32 server_ip;
+	uint32 relay_ip;
+	uint8 hw_address[16];
+	uint8 server_name[64];
+	uint8 file_name[128];
+	uint8 options[312];
+};
+
+static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
+static const char dhcp_ops[][10] = {
+	"NA", "REQUEST", "REPLY"
+};
+static const char dhcp_types[][10] = {
+	"NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
+};
+static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx);
+#endif /* DHD_DHCP_DUMP */
+
+#ifdef DHD_ICMP_DUMP
+#include <net/icmp.h>
+static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx);
+#endif /* DHD_ICMP_DUMP */
+
+/* Functions to manage sysfs interface for dhd */
+static int dhd_sysfs_init(dhd_info_t *dhd);
+static void dhd_sysfs_exit(dhd_info_t *dhd);
+
+#ifdef SHOW_LOGTRACE
+#if defined(CUSTOMER_HW4_DEBUG)
+static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
+static char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
+static char *map_file_path = PLATFORM_PATH"rtecdc.map";
+static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
+static char *rom_map_file_path = PLATFORM_PATH"roml.map";
+#elif defined(CUSTOMER_HW2)
+static char *logstrs_path = "/data/misc/wifi/logstrs.bin";
+static char *st_str_file_path = "/data/misc/wifi/rtecdc.bin";
+static char *map_file_path = "/data/misc/wifi/rtecdc.map";
+static char *rom_st_str_file_path = "/data/misc/wifi/roml.bin";
+static char *rom_map_file_path = "/data/misc/wifi/roml.map";
+#else
+static char *logstrs_path = "/installmedia/logstrs.bin";
+static char *st_str_file_path = "/installmedia/rtecdc.bin";
+static char *map_file_path = "/installmedia/rtecdc.map";
+static char *rom_st_str_file_path = "/installmedia/roml.bin";
+static char *rom_map_file_path = "/installmedia/roml.map";
+#endif /* CUSTOMER_HW4_DEBUG || CUSTOMER_HW2 */
+static char *ram_file_str = "rtecdc";
+static char *rom_file_str = "roml";
+
+module_param(logstrs_path, charp, S_IRUGO);
+module_param(st_str_file_path, charp, S_IRUGO);
+module_param(map_file_path, charp, S_IRUGO);
+module_param(rom_st_str_file_path, charp, S_IRUGO);
+module_param(rom_map_file_path, charp, S_IRUGO);
+
+static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
+static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
+	uint32 *rodata_end);
+static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
+	char *map_file);
+#endif /* SHOW_LOGTRACE */
+
+#if defined(DHD_LB)
+
+static void
+dhd_lb_set_default_cpus(dhd_info_t *dhd)
+{
+	/* Default CPU allocation for the jobs */
+	atomic_set(&dhd->rx_napi_cpu, 1);
+	atomic_set(&dhd->rx_compl_cpu, 2);
+	atomic_set(&dhd->tx_compl_cpu, 2);
+	atomic_set(&dhd->tx_cpu, 2);
+	atomic_set(&dhd->net_tx_cpu, 0);
+}
+
+static void
+dhd_cpumasks_deinit(dhd_info_t *dhd)
+{
+	free_cpumask_var(dhd->cpumask_curr_avail);
+	free_cpumask_var(dhd->cpumask_primary);
+	free_cpumask_var(dhd->cpumask_primary_new);
+	free_cpumask_var(dhd->cpumask_secondary);
+	free_cpumask_var(dhd->cpumask_secondary_new);
+}
+
+static int
+dhd_cpumasks_init(dhd_info_t *dhd)
+{
+	int id;
+	uint32 cpus, num_cpus = num_possible_cpus();
+	int ret = 0;
+
+	DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
+		DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
+
+	if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
+	    !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
+	    !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
+	    !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
+	    !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
+		DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
+	cpumask_clear(dhd->cpumask_primary);
+	cpumask_clear(dhd->cpumask_secondary);
+
+	if (num_cpus > 32) {
+		DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
+		ASSERT(0);
+	}
+
+	cpus = DHD_LB_PRIMARY_CPUS;
+	for (id = 0; id < num_cpus; id++) {
+		if (isset(&cpus, id))
+			cpumask_set_cpu(id, dhd->cpumask_primary);
+	}
+
+	cpus = DHD_LB_SECONDARY_CPUS;
+	for (id = 0; id < num_cpus; id++) {
+		if (isset(&cpus, id))
+			cpumask_set_cpu(id, dhd->cpumask_secondary);
+	}
+
+	return ret;
+fail:
+	dhd_cpumasks_deinit(dhd);
+	return ret;
+}
+
+/*
+ * The CPU Candidacy Algorithm
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * The available CPUs for selection are divided into two groups
+ *  Primary Set - A CPU mask that carries the First Choice CPUs
+ *  Secondary Set - A CPU mask that carries the Second Choice CPUs.
+ *
+ * There are two types of Job, that needs to be assigned to
+ * the CPUs, from one of the above mentioned CPU group. The Jobs are
+ * 1) Rx Packet Processing - napi_cpu
+ * 2) Completion Processiong (Tx, RX) - compl_cpu
+ *
+ * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
+ * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
+ * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
+ * If there are more processors free, it assigns one to compl_cpu.
+ * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
+ * CPU, as much as possible.
+ *
+ * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
+ * would allow Tx completion skb's to be released into a local free pool from
+ * which the rx buffer posts could have been serviced. it is important to note
+ * that a Tx packet may not have a large enough buffer for rx posting.
+ */
+void dhd_select_cpu_candidacy(dhd_info_t *dhd)
+{
+	uint32 primary_available_cpus; /* count of primary available cpus */
+	uint32 secondary_available_cpus; /* count of secondary available cpus */
+	uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
+	uint32 compl_cpu = 0; /* cpu selected for completion jobs */
+	uint32 tx_cpu = 0; /* cpu selected for tx processing job */
+
+	cpumask_clear(dhd->cpumask_primary_new);
+	cpumask_clear(dhd->cpumask_secondary_new);
+
+	/*
+	 * Now select from the primary mask. Even if a Job is
+	 * already running on a CPU in secondary group, we still move
+	 * to primary CPU. So no conditional checks.
+	 */
+	cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
+		dhd->cpumask_curr_avail);
+
+	cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
+		dhd->cpumask_curr_avail);
+
+	primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
+
+	if (primary_available_cpus > 0) {
+		napi_cpu = cpumask_first(dhd->cpumask_primary_new);
+
+		/* If no further CPU is available,
+		 * cpumask_next returns >= nr_cpu_ids
+		 */
+		tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
+		if (tx_cpu >= nr_cpu_ids)
+			tx_cpu = 0;
+
+		/* In case there are no more CPUs, do completions & Tx in same CPU */
+		compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
+		if (compl_cpu >= nr_cpu_ids)
+			compl_cpu = tx_cpu;
+	}
+
+	DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
+		__FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
+
+	/* -- Now check for the CPUs from the secondary mask -- */
+	secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
+
+	DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
+		__FUNCTION__, secondary_available_cpus, nr_cpu_ids));
+
+	if (secondary_available_cpus > 0) {
+		/* At this point if napi_cpu is unassigned it means no CPU
+		 * is online from Primary Group
+		 */
+		if (napi_cpu == 0) {
+			napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
+			tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
+			compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
+		} else if (tx_cpu == 0) {
+			tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
+			compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
+		} else if (compl_cpu == 0) {
+			compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
+		}
+
+		/* If no CPU was available for tx processing, choose CPU 0 */
+		if (tx_cpu >= nr_cpu_ids)
+			tx_cpu = 0;
+
+		/* If no CPU was available for completion, choose CPU 0 */
+		if (compl_cpu >= nr_cpu_ids)
+			compl_cpu = 0;
+	}
+	if ((primary_available_cpus == 0) &&
+		(secondary_available_cpus == 0)) {
+		/* No CPUs available from primary or secondary mask */
+		napi_cpu = 1;
+		compl_cpu = 0;
+		tx_cpu = 2;
+	}
+
+	DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
+		__FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
+
+	ASSERT(napi_cpu < nr_cpu_ids);
+	ASSERT(compl_cpu < nr_cpu_ids);
+	ASSERT(tx_cpu < nr_cpu_ids);
+
+	atomic_set(&dhd->rx_napi_cpu, napi_cpu);
+	atomic_set(&dhd->tx_compl_cpu, compl_cpu);
+	atomic_set(&dhd->rx_compl_cpu, compl_cpu);
+	atomic_set(&dhd->tx_cpu, tx_cpu);
+
+	return;
+}
+
+/*
+ * Function to handle CPU Hotplug notifications.
+ * One of the task it does is to trigger the CPU Candidacy algorithm
+ * for load balancing.
+ */
+int
+dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+	unsigned long int cpu = (unsigned long int)hcpu;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
+		DHD_INFO(("%s(): LB data is not initialized yet.\n",
+			__FUNCTION__));
+		return NOTIFY_BAD;
+	}
+
+	switch (action)
+	{
+		case CPU_ONLINE:
+		case CPU_ONLINE_FROZEN:
+			DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
+			cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
+			dhd_select_cpu_candidacy(dhd);
+			break;
+
+		case CPU_DOWN_PREPARE:
+		case CPU_DOWN_PREPARE_FROZEN:
+			DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
+			cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
+			dhd_select_cpu_candidacy(dhd);
+			break;
+		default:
+			break;
+	}
+
+	return NOTIFY_OK;
+}
+
+#if defined(DHD_LB_STATS)
+void dhd_lb_stats_init(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	int i, j, num_cpus = num_possible_cpus();
+	int alloc_size = sizeof(uint32) * num_cpus;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+			__FUNCTION__));
+		return;
+	}
+
+	dhd = dhdp->info;
+	if (dhd == NULL) {
+		DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+		return;
+	}
+
+	DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
+	DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
+
+	dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+	if (!dhd->napi_percpu_run_cnt) {
+		DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
+			__FUNCTION__));
+		return;
+	}
+	for (i = 0; i < num_cpus; i++)
+		DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
+
+	DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
+
+	dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+	if (!dhd->rxc_percpu_run_cnt) {
+		DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
+			__FUNCTION__));
+		return;
+	}
+	for (i = 0; i < num_cpus; i++)
+		DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
+
+	DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
+
+	dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+	if (!dhd->txc_percpu_run_cnt) {
+		DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
+			__FUNCTION__));
+		return;
+	}
+	for (i = 0; i < num_cpus; i++)
+		DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
+
+	dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+	if (!dhd->cpu_online_cnt) {
+		DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
+			__FUNCTION__));
+		return;
+	}
+	for (i = 0; i < num_cpus; i++)
+		DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
+
+	dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+	if (!dhd->cpu_offline_cnt) {
+		DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
+			__FUNCTION__));
+		return;
+	}
+	for (i = 0; i < num_cpus; i++)
+		DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
+
+	dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+	if (!dhd->txp_percpu_run_cnt) {
+		DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
+			__FUNCTION__));
+		return;
+	}
+	for (i = 0; i < num_cpus; i++)
+		DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+
+	dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+	if (!dhd->tx_start_percpu_run_cnt) {
+		DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
+			__FUNCTION__));
+		return;
+	}
+	for (i = 0; i < num_cpus; i++)
+		DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+
+	for (j = 0; j < HIST_BIN_SIZE; j++) {
+		dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+		if (!dhd->napi_rx_hist[j]) {
+			DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
+				__FUNCTION__, j));
+			return;
+		}
+		for (i = 0; i < num_cpus; i++) {
+			DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
+		}
+	}
+#ifdef DHD_LB_TXC
+	for (j = 0; j < HIST_BIN_SIZE; j++) {
+		dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+		if (!dhd->txc_hist[j]) {
+			DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
+			         __FUNCTION__, j));
+			return;
+		}
+		for (i = 0; i < num_cpus; i++) {
+			DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
+		}
+	}
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+	for (j = 0; j < HIST_BIN_SIZE; j++) {
+		dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+		if (!dhd->rxc_hist[j]) {
+			DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
+				__FUNCTION__, j));
+			return;
+		}
+		for (i = 0; i < num_cpus; i++) {
+			DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
+		}
+	}
+#endif /* DHD_LB_RXC */
+	return;
+}
+
+void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	int j, num_cpus = num_possible_cpus();
+	int alloc_size = sizeof(uint32) * num_cpus;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+			__FUNCTION__));
+		return;
+	}
+
+	dhd = dhdp->info;
+	if (dhd == NULL) {
+		DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+		return;
+	}
+
+	if (dhd->napi_percpu_run_cnt) {
+		MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
+		dhd->napi_percpu_run_cnt = NULL;
+	}
+	if (dhd->rxc_percpu_run_cnt) {
+		MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
+		dhd->rxc_percpu_run_cnt = NULL;
+	}
+	if (dhd->txc_percpu_run_cnt) {
+		MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
+		dhd->txc_percpu_run_cnt = NULL;
+	}
+	if (dhd->cpu_online_cnt) {
+		MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
+		dhd->cpu_online_cnt = NULL;
+	}
+	if (dhd->cpu_offline_cnt) {
+		MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
+		dhd->cpu_offline_cnt = NULL;
+	}
+
+	if (dhd->txp_percpu_run_cnt) {
+		MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
+		dhd->txp_percpu_run_cnt = NULL;
+	}
+	if (dhd->tx_start_percpu_run_cnt) {
+		MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
+		dhd->tx_start_percpu_run_cnt = NULL;
+	}
+
+	for (j = 0; j < HIST_BIN_SIZE; j++) {
+		if (dhd->napi_rx_hist[j]) {
+			MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
+			dhd->napi_rx_hist[j] = NULL;
+		}
+#ifdef DHD_LB_TXC
+		if (dhd->txc_hist[j]) {
+			MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
+			dhd->txc_hist[j] = NULL;
+		}
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+		if (dhd->rxc_hist[j]) {
+			MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
+			dhd->rxc_hist[j] = NULL;
+		}
+#endif /* DHD_LB_RXC */
+	}
+
+	return;
+}
+
+static void dhd_lb_stats_dump_histo(
+	struct bcmstrbuf *strbuf, uint32 **hist)
+{
+	int i, j;
+	uint32 *per_cpu_total;
+	uint32 total = 0;
+	uint32 num_cpus = num_possible_cpus();
+
+	per_cpu_total = (uint32 *)kmalloc(sizeof(uint32) * num_cpus, GFP_ATOMIC);
+	if (!per_cpu_total) {
+		DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
+		return;
+	}
+	bzero(per_cpu_total, sizeof(uint32) * num_cpus);
+
+	bcm_bprintf(strbuf, "CPU: \t\t");
+	for (i = 0; i < num_cpus; i++)
+		bcm_bprintf(strbuf, "%d\t", i);
+	bcm_bprintf(strbuf, "\nBin\n");
+
+	for (i = 0; i < HIST_BIN_SIZE; i++) {
+		bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
+		for (j = 0; j < num_cpus; j++) {
+			bcm_bprintf(strbuf, "%d\t", hist[i][j]);
+		}
+		bcm_bprintf(strbuf, "\n");
+	}
+	bcm_bprintf(strbuf, "Per CPU Total \t");
+	total = 0;
+	for (i = 0; i < num_cpus; i++) {
+		for (j = 0; j < HIST_BIN_SIZE; j++) {
+			per_cpu_total[i] += (hist[j][i] * (1<<j));
+		}
+		bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
+		total += per_cpu_total[i];
+	}
+	bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
+
+	kfree(per_cpu_total);
+	return;
+}
+
+static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
+{
+	int i, num_cpus = num_possible_cpus();
+
+	bcm_bprintf(strbuf, "CPU: \t");
+	for (i = 0; i < num_cpus; i++)
+		bcm_bprintf(strbuf, "%d\t", i);
+	bcm_bprintf(strbuf, "\n");
+
+	bcm_bprintf(strbuf, "Val: \t");
+	for (i = 0; i < num_cpus; i++)
+		bcm_bprintf(strbuf, "%u\t", *(p+i));
+	bcm_bprintf(strbuf, "\n");
+	return;
+}
+
+void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	dhd_info_t *dhd;
+
+	if (dhdp == NULL || strbuf == NULL) {
+		DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
+			__FUNCTION__, dhdp, strbuf));
+		return;
+	}
+
+	dhd = dhdp->info;
+	if (dhd == NULL) {
+		DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+		return;
+	}
+
+	bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
+	dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
+
+	bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
+	dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
+
+	bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
+		dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
+		dhd->txc_sched_cnt);
+
+#ifdef DHD_LB_RXP
+	bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
+	dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
+	bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
+	dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LB_RXC
+	bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
+	dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
+	bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
+	dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
+#endif /* DHD_LB_RXC */
+
+#ifdef DHD_LB_TXC
+	bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
+	dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
+	bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
+	dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
+#endif /* DHD_LB_TXC */
+
+#ifdef DHD_LB_TXP
+	bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
+	dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
+
+	bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
+	dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
+#endif /* DHD_LB_TXP */
+
+	bcm_bprintf(strbuf, "\nCPU masks primary(big)=0x%x secondary(little)=0x%x\n",
+		DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS);
+
+	bcm_bprintf(strbuf, "napi_cpu %x tx_cpu %x\n",
+		atomic_read(&dhd->rx_napi_cpu), atomic_read(&dhd->tx_cpu));
+
+}
+
+/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
+static inline uint32 next_larger_power2(uint32 num)
+{
+	num--;
+	num |= (num >> 1);
+	num |= (num >> 2);
+	num |= (num >> 4);
+	num |= (num >> 8);
+	num |= (num >> 16);
+
+	return (num + 1);
+}
+
+static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
+{
+	uint32 bin_power;
+	uint32 *p;
+	bin_power = next_larger_power2(count);
+
+	switch (bin_power) {
+		case   1: p = bin[0] + cpu; break;
+		case   2: p = bin[1] + cpu; break;
+		case   4: p = bin[2] + cpu; break;
+		case   8: p = bin[3] + cpu; break;
+		case  16: p = bin[4] + cpu; break;
+		case  32: p = bin[5] + cpu; break;
+		case  64: p = bin[6] + cpu; break;
+		case 128: p = bin[7] + cpu; break;
+		default : p = bin[8] + cpu; break;
+	}
+
+	*p = *p + 1;
+	return;
+}
+
+extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
+{
+	int cpu;
+	dhd_info_t *dhd = dhdp->info;
+
+	cpu = get_cpu();
+	put_cpu();
+	dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
+
+	return;
+}
+
+extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+	int cpu;
+	dhd_info_t *dhd = dhdp->info;
+
+	cpu = get_cpu();
+	put_cpu();
+	dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
+
+	return;
+}
+
+extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+	int cpu;
+	dhd_info_t *dhd = dhdp->info;
+
+	cpu = get_cpu();
+	put_cpu();
+	dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
+
+	return;
+}
+
+extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+	DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
+}
+
+extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+	DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
+}
+#endif /* DHD_LB_STATS */
+
+#endif /* DHD_LB */
+
+#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
+int g_frameburst = 1;
+#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
+
+static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
+
+/* DHD Perimiter lock only used in router with bypass forwarding. */
+#define DHD_PERIM_RADIO_INIT()              do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_TRY(unit, flag)      do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_TRY(unit, flag)    do { /* noop */ } while (0)
+
+#ifdef PCIE_FULL_DONGLE
+#if defined(BCM_GMAC3)
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp)      do { /* noop */ } while (0)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags)    ({ BCM_REFERENCE(flags); })
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags)  ({ BCM_REFERENCE(flags); })
+
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
+#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+
+#else /* ! BCM_GMAC3 */
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
+	spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
+	spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
+
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
+	struct list_head *snapshot_list);
+static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
+#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
+#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+
+/* Control fw roaming */
+uint dhd_roam_disable = 0;
+
+#ifdef BCMDBGFS
+extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
+extern void dhd_dbgfs_remove(void);
+#endif
+
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* DS Exit response timeout */
+int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+#ifdef BCMSDIO
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#endif /* BCMSDIO */
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+
+
+#ifndef BCMDBUS
+/* Allow delayed firmware download for debug purpose */
+int allow_delay_fwdl = FALSE;
+module_param(allow_delay_fwdl, int, 0);
+#endif /* !BCMDBUS */
+
+extern char dhd_version[];
+extern char fw_version[];
+extern char clm_version[];
+
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+static void dhd_suspend_lock(dhd_pub_t *dhdp);
+static void dhd_suspend_unlock(dhd_pub_t *dhdp);
+
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
+
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifndef BCMDBUS
+static void dhd_dpc(ulong data);
+#endif /* !BCMDBUS */
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+void dhd_os_wd_timer_extend(void *bus, bool extend);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
+		wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if defined(CONFIG_PM_SLEEP)
+static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+	int ret = NOTIFY_DONE;
+	bool suspend = FALSE;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	BCM_REFERENCE(dhdinfo);
+	BCM_REFERENCE(suspend);
+
+	switch (action) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		suspend = TRUE;
+		break;
+
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		suspend = FALSE;
+		break;
+	}
+
+#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
+	if (suspend) {
+		DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
+		dhd_wlfc_suspend(&dhdinfo->pub);
+		DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
+	} else {
+		dhd_wlfc_resume(&dhdinfo->pub);
+	}
+#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(2, 6, 39))
+	dhd_mmc_suspend = suspend;
+	smp_mb();
+#endif
+
+	return ret;
+}
+
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_pm_notifier_registered = FALSE;
+
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* CONFIG_PM_SLEEP */
+
+/* Request scheduling of the bus rx frame */
+static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
+static void dhd_os_rxflock(dhd_pub_t *pub);
+static void dhd_os_rxfunlock(dhd_pub_t *pub);
+
+/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
+typedef struct dhd_dev_priv {
+	dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
+	dhd_if_t   * ifp; /* cached pointer to dhd_if in netdevice priv */
+	int          ifidx; /* interface index */
+	void       * lkup;
+} dhd_dev_priv_t;
+
+#define DHD_DEV_PRIV_SIZE       (sizeof(dhd_dev_priv_t))
+#define DHD_DEV_PRIV(dev)       ((dhd_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_DEV_INFO(dev)       (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
+#define DHD_DEV_IFP(dev)        (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
+#define DHD_DEV_IFIDX(dev)      (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
+#define DHD_DEV_LKUP(dev)		(((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
+
+#if defined(DHD_OF_SUPPORT)
+extern int dhd_wlan_init(void);
+#endif /* defined(DHD_OF_SUPPORT) */
+/** Clear the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_clear(struct net_device * dev)
+{
+	dhd_dev_priv_t * dev_priv;
+	ASSERT(dev != (struct net_device *)NULL);
+	dev_priv = DHD_DEV_PRIV(dev);
+	dev_priv->dhd = (dhd_info_t *)NULL;
+	dev_priv->ifp = (dhd_if_t *)NULL;
+	dev_priv->ifidx = DHD_BAD_IF;
+	dev_priv->lkup = (void *)NULL;
+}
+
+/** Setup the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
+                  int ifidx)
+{
+	dhd_dev_priv_t * dev_priv;
+	ASSERT(dev != (struct net_device *)NULL);
+	dev_priv = DHD_DEV_PRIV(dev);
+	dev_priv->dhd = dhd;
+	dev_priv->ifp = ifp;
+	dev_priv->ifidx = ifidx;
+}
+
+#ifdef PCIE_FULL_DONGLE
+
+/** Dummy objects are defined with state representing bad|down.
+ * Performance gains from reducing branch conditionals, instruction parallelism,
+ * dual issue, reducing load shadows, avail of larger pipelines.
+ * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
+ * is accessed via the dhd_sta_t.
+ */
+
+/* Dummy dhd_info object */
+dhd_info_t dhd_info_null = {
+#if defined(BCM_GMAC3)
+	.fwdh = FWDER_NULL,
+#endif
+	.pub = {
+	         .info = &dhd_info_null,
+#ifdef DHDTCPACK_SUPPRESS
+	         .tcpack_sup_mode = TCPACK_SUP_REPLACE,
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(TRAFFIC_MGMT_DWM)
+	         .dhd_tm_dwm_tbl = { .dhd_dwm_enabled = TRUE },
+#endif
+	         .up = FALSE,
+	         .busstate = DHD_BUS_DOWN
+	}
+};
+#define DHD_INFO_NULL (&dhd_info_null)
+#define DHD_PUB_NULL  (&dhd_info_null.pub)
+
+/* Dummy netdevice object */
+struct net_device dhd_net_dev_null = {
+	.reg_state = NETREG_UNREGISTERED
+};
+#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
+
+/* Dummy dhd_if object */
+dhd_if_t dhd_if_null = {
+#if defined(BCM_GMAC3)
+	.fwdh = FWDER_NULL,
+#endif
+#ifdef WMF
+	.wmf = { .wmf_enable = TRUE },
+#endif
+	.info = DHD_INFO_NULL,
+	.net = DHD_NET_DEV_NULL,
+	.idx = DHD_BAD_IF
+};
+#define DHD_IF_NULL  (&dhd_if_null)
+
+#define DHD_STA_NULL ((dhd_sta_t *)NULL)
+
+/** Interface STA list management. */
+
+/** Fetch the dhd_if object, given the interface index in the dhd. */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
+
+/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
+static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
+static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
+
+/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
+static void dhd_if_del_sta_list(dhd_if_t * ifp);
+static void	dhd_if_flush_sta(dhd_if_t * ifp);
+
+/* Construct/Destruct a sta pool. */
+static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
+static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
+
+
+/* Return interface pointer */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
+{
+	ASSERT(ifidx < DHD_MAX_IFS);
+
+	if (ifidx >= DHD_MAX_IFS)
+		return NULL;
+
+	return dhdp->info->iflist[ifidx];
+}
+
+/** Reset a dhd_sta object and free into the dhd pool. */
+static void
+dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
+{
+	int prio;
+
+	ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
+
+	ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+
+	/*
+	 * Flush and free all packets in all flowring's queues belonging to sta.
+	 * Packets in flow ring will be flushed later.
+	 */
+	for (prio = 0; prio < (int)NUMPRIO; prio++) {
+		uint16 flowid = sta->flowid[prio];
+
+		if (flowid != FLOWID_INVALID) {
+			unsigned long flags;
+			flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
+			flow_ring_node_t * flow_ring_node;
+
+#ifdef DHDTCPACK_SUPPRESS
+			/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+			 * when there is a newly coming packet from network stack.
+			 */
+			dhd_tcpack_info_tbl_clean(dhdp);
+#endif /* DHDTCPACK_SUPPRESS */
+
+			flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
+			DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+			flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
+
+			if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
+				void * pkt;
+				while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
+					PKTFREE(dhdp->osh, pkt, TRUE);
+				}
+			}
+
+			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+			ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+		}
+
+		sta->flowid[prio] = FLOWID_INVALID;
+	}
+
+	id16_map_free(dhdp->staid_allocator, sta->idx);
+	DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
+	sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
+	sta->ifidx = DHD_BAD_IF;
+	bzero(sta->ea.octet, ETHER_ADDR_LEN);
+	INIT_LIST_HEAD(&sta->list);
+	sta->idx = ID16_INVALID; /* implying free */
+}
+
+/** Allocate a dhd_sta object from the dhd pool. */
+static dhd_sta_t *
+dhd_sta_alloc(dhd_pub_t * dhdp)
+{
+	uint16 idx;
+	dhd_sta_t * sta;
+	dhd_sta_pool_t * sta_pool;
+
+	ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+
+	idx = id16_map_alloc(dhdp->staid_allocator);
+	if (idx == ID16_INVALID) {
+		DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
+		return DHD_STA_NULL;
+	}
+
+	sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
+	sta = &sta_pool[idx];
+
+	ASSERT((sta->idx == ID16_INVALID) &&
+	       (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
+
+	DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
+
+	sta->idx = idx; /* implying allocated */
+
+	return sta;
+}
+
+/** Delete all STAs in an interface's STA list. */
+static void
+dhd_if_del_sta_list(dhd_if_t *ifp)
+{
+	dhd_sta_t *sta, *next;
+	unsigned long flags;
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+#if defined(BCM_GMAC3)
+		if (ifp->fwdh) {
+			/* Remove sta from WOFA forwarder. */
+			fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (uintptr_t)sta);
+		}
+#endif /* BCM_GMAC3 */
+		list_del(&sta->list);
+		dhd_sta_free(&ifp->info->pub, sta);
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return;
+}
+
+/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
+static void
+dhd_if_flush_sta(dhd_if_t * ifp)
+{
+#if defined(BCM_GMAC3)
+
+	if (ifp && (ifp->fwdh != FWDER_NULL)) {
+		dhd_sta_t *sta, *next;
+		unsigned long flags;
+
+		DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+		list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+			/* Remove any sta entry from WOFA forwarder. */
+			fwder_flush(ifp->fwdh, (uintptr_t)sta);
+		}
+
+		DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+	}
+#endif /* BCM_GMAC3 */
+}
+
+/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
+static int
+dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
+{
+	int idx, prio, sta_pool_memsz;
+	dhd_sta_t * sta;
+	dhd_sta_pool_t * sta_pool;
+	void * staid_allocator;
+
+	ASSERT(dhdp != (dhd_pub_t *)NULL);
+	ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
+
+	/* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+	staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
+	if (staid_allocator == NULL) {
+		DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	/* Pre allocate a pool of dhd_sta objects (one extra). */
+	sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
+	sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
+	if (sta_pool == NULL) {
+		DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
+		id16_map_fini(dhdp->osh, staid_allocator);
+		return BCME_ERROR;
+	}
+
+	dhdp->sta_pool = sta_pool;
+	dhdp->staid_allocator = staid_allocator;
+
+	/* Initialize all sta(s) for the pre-allocated free pool. */
+	bzero((uchar *)sta_pool, sta_pool_memsz);
+	for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+		sta = &sta_pool[idx];
+		sta->idx = id16_map_alloc(staid_allocator);
+		ASSERT(sta->idx <= max_sta);
+	}
+	/* Now place them into the pre-allocated free pool. */
+	for (idx = 1; idx <= max_sta; idx++) {
+		sta = &sta_pool[idx];
+		for (prio = 0; prio < (int)NUMPRIO; prio++) {
+			sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
+		}
+		dhd_sta_free(dhdp, sta);
+	}
+
+	return BCME_OK;
+}
+
+/** Destruct the pool of dhd_sta_t objects.
+ * Caller must ensure that no STA objects are currently associated with an if.
+ */
+static void
+dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
+{
+	dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+
+	if (sta_pool) {
+		int idx;
+		int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+		for (idx = 1; idx <= max_sta; idx++) {
+			ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
+			ASSERT(sta_pool[idx].idx == ID16_INVALID);
+		}
+		MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
+		dhdp->sta_pool = NULL;
+	}
+
+	id16_map_fini(dhdp->osh, dhdp->staid_allocator);
+	dhdp->staid_allocator = NULL;
+}
+
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void
+dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
+{
+	int idx, prio, sta_pool_memsz;
+	dhd_sta_t * sta;
+	dhd_sta_pool_t * sta_pool;
+	void *staid_allocator;
+
+	if (!dhdp) {
+		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+	staid_allocator = dhdp->staid_allocator;
+
+	if (!sta_pool) {
+		DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (!staid_allocator) {
+		DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	/* clear free pool */
+	sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+	bzero((uchar *)sta_pool, sta_pool_memsz);
+
+	/* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+	id16_map_clear(staid_allocator, max_sta, 1);
+
+	/* Initialize all sta(s) for the pre-allocated free pool. */
+	for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+		sta = &sta_pool[idx];
+		sta->idx = id16_map_alloc(staid_allocator);
+		ASSERT(sta->idx <= max_sta);
+	}
+	/* Now place them into the pre-allocated free pool. */
+	for (idx = 1; idx <= max_sta; idx++) {
+		sta = &sta_pool[idx];
+		for (prio = 0; prio < (int)NUMPRIO; prio++) {
+			sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
+		}
+		dhd_sta_free(dhdp, sta);
+	}
+}
+
+/** Find STA with MAC address ea in an interface's STA list. */
+dhd_sta_t *
+dhd_find_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta;
+	dhd_if_t *ifp;
+	unsigned long flags;
+
+	ASSERT(ea != NULL);
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+	if (ifp == NULL)
+		return DHD_STA_NULL;
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry(sta, &ifp->sta_list, list) {
+		if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+			DHD_INFO(("%s: found STA " MACDBG "\n",
+				__FUNCTION__, MAC2STRDBG((char *)ea)));
+			DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+			return sta;
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return DHD_STA_NULL;
+}
+
+/** Add STA into the interface's STA list. */
+dhd_sta_t *
+dhd_add_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta;
+	dhd_if_t *ifp;
+	unsigned long flags;
+
+	ASSERT(ea != NULL);
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+	if (ifp == NULL)
+		return DHD_STA_NULL;
+
+	sta = dhd_sta_alloc((dhd_pub_t *)pub);
+	if (sta == DHD_STA_NULL) {
+		DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
+		return DHD_STA_NULL;
+	}
+
+	memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
+
+	/* link the sta and the dhd interface */
+	sta->ifp = ifp;
+	sta->ifidx = ifidx;
+#ifdef DHD_WMF
+	sta->psta_prim = NULL;
+#endif
+	INIT_LIST_HEAD(&sta->list);
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_add_tail(&sta->list, &ifp->sta_list);
+
+#if defined(BCM_GMAC3)
+	if (ifp->fwdh) {
+		ASSERT(ISALIGNED(ea, 2));
+		/* Add sta to WOFA forwarder. */
+		fwder_reassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta);
+	}
+#endif /* BCM_GMAC3 */
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return sta;
+}
+
+/** Delete all STAs from the interface's STA list. */
+void
+dhd_del_all_sta(void *pub, int ifidx)
+{
+	dhd_sta_t *sta, *next;
+	dhd_if_t *ifp;
+	unsigned long flags;
+
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+	if (ifp == NULL)
+		return;
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+#if defined(BCM_GMAC3)
+		if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
+			ASSERT(ISALIGNED(sta->ea.octet, 2));
+			fwder_deassoc(ifp->fwdh, (uint16 *)sta->ea.octet, (uintptr_t)sta);
+		}
+#endif /* BCM_GMAC3 */
+
+		list_del(&sta->list);
+		dhd_sta_free(&ifp->info->pub, sta);
+#ifdef DHD_L2_FILTER
+		if (ifp->parp_enable) {
+			/* clear Proxy ARP cache of specific Ethernet Address */
+			bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
+					ifp->phnd_arp_table, FALSE,
+					sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
+		}
+#endif /* DHD_L2_FILTER */
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return;
+}
+
+/** Delete STA from the interface's STA list. */
+void
+dhd_del_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta, *next;
+	dhd_if_t *ifp;
+	unsigned long flags;
+	char macstr[ETHER_ADDR_STR_LEN];
+
+	ASSERT(ea != NULL);
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+	if (ifp == NULL)
+		return;
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+		if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+#if defined(BCM_GMAC3)
+			if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
+				ASSERT(ISALIGNED(ea, 2));
+				fwder_deassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta);
+			}
+#endif /* BCM_GMAC3 */
+			DHD_MAC_TO_STR(((char *)ea), macstr);
+			DHD_ERROR(("%s: Deleting STA  %s\n", __FUNCTION__, macstr));
+			list_del(&sta->list);
+			dhd_sta_free(&ifp->info->pub, sta);
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+#ifdef DHD_L2_FILTER
+	if (ifp->parp_enable) {
+		/* clear Proxy ARP cache of specific Ethernet Address */
+		bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
+			ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
+	}
+#endif /* DHD_L2_FILTER */
+	return;
+}
+
+/** Add STA if it doesn't exist. Not reentrant. */
+dhd_sta_t*
+dhd_findadd_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta;
+
+	sta = dhd_find_sta(pub, ifidx, ea);
+
+	if (!sta) {
+		/* Add entry */
+		sta = dhd_add_sta(pub, ifidx, ea);
+	}
+
+	return sta;
+}
+
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+#if !defined(BCM_GMAC3)
+static struct list_head *
+dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
+{
+	unsigned long flags;
+	dhd_sta_t *sta, *snapshot;
+
+	INIT_LIST_HEAD(snapshot_list);
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_for_each_entry(sta, &ifp->sta_list, list) {
+		/* allocate one and add to snapshot */
+		snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
+		if (snapshot == NULL) {
+			DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
+			continue;
+		}
+
+		memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
+
+		INIT_LIST_HEAD(&snapshot->list);
+		list_add_tail(&snapshot->list, snapshot_list);
+	}
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return snapshot_list;
+}
+
+static void
+dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
+{
+	dhd_sta_t *sta, *next;
+
+	list_for_each_entry_safe(sta, next, snapshot_list, list) {
+		list_del(&sta->list);
+		MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
+	}
+}
+#endif /* !BCM_GMAC3 */
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+
+#else
+static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
+static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
+static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
+static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
+static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
+dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
+dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
+void dhd_del_sta(void *pub, int ifidx, void *ea) {}
+#endif /* PCIE_FULL_DONGLE */
+
+
+
+#if defined(DHD_LB)
+
+#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP)
+/**
+ * dhd_tasklet_schedule - Function that runs in IPI context of the destination
+ * CPU and schedules a tasklet.
+ * @tasklet: opaque pointer to the tasklet
+ */
+INLINE void
+dhd_tasklet_schedule(void *tasklet)
+{
+	tasklet_schedule((struct tasklet_struct *)tasklet);
+}
+/**
+ * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
+ * @tasklet: tasklet to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * smp_call_function_single with no wait and the tasklet_schedule function
+ * will be invoked to schedule the specified tasklet on the requested CPU.
+ */
+INLINE void
+dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
+{
+	const int wait = 0;
+	smp_call_function_single(on_cpu,
+		dhd_tasklet_schedule, (void *)tasklet, wait);
+}
+
+/**
+ * dhd_work_schedule_on - Executes the passed work in a given CPU
+ * @work: work to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * schedule_work_on and the work function
+ * will be invoked to schedule the specified work on the requested CPU.
+ */
+
+INLINE void
+dhd_work_schedule_on(struct work_struct *work, int on_cpu)
+{
+	schedule_work_on(on_cpu, work);
+}
+#endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP */
+
+#if defined(DHD_LB_TXC)
+/**
+ * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
+ * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
+ * freeing the packets placed in the tx_compl workq
+ */
+void
+dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+	int curr_cpu, on_cpu;
+
+	if (dhd->rx_napi_netdev == NULL) {
+		DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
+	/*
+	 * If the destination CPU is NOT online or is same as current CPU
+	 * no need to schedule the work
+	 */
+	curr_cpu = get_cpu();
+	put_cpu();
+
+	on_cpu = atomic_read(&dhd->tx_compl_cpu);
+
+	if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+		dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+	} else {
+		schedule_work(&dhd->tx_compl_dispatcher_work);
+	}
+}
+
+static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
+{
+	struct dhd_info *dhd =
+		container_of(work, struct dhd_info, tx_compl_dispatcher_work);
+	int cpu;
+
+	get_online_cpus();
+	cpu = atomic_read(&dhd->tx_compl_cpu);
+	if (!cpu_online(cpu))
+		dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+	else
+		dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
+	put_online_cpus();
+}
+#endif /* DHD_LB_TXC */
+
+#if defined(DHD_LB_RXC)
+/**
+ * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
+ * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
+ * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
+ * placed in the rx_compl workq.
+ *
+ * @dhdp: pointer to dhd_pub object
+ */
+void
+dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+	int curr_cpu, on_cpu;
+
+	if (dhd->rx_napi_netdev == NULL) {
+		DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
+	/*
+	 * If the destination CPU is NOT online or is same as current CPU
+	 * no need to schedule the work
+	 */
+	curr_cpu = get_cpu();
+	put_cpu();
+	on_cpu = atomic_read(&dhd->rx_compl_cpu);
+
+	if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+		dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+	} else {
+		dhd_rx_compl_dispatcher_fn(dhdp);
+	}
+}
+
+static void dhd_rx_compl_dispatcher_fn(dhd_pub_t *dhdp)
+{
+	struct dhd_info *dhd = dhdp->info;
+	int cpu;
+
+	preempt_disable();
+	cpu = atomic_read(&dhd->rx_compl_cpu);
+	if (!cpu_online(cpu))
+		dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+	else {
+		dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
+	}
+	preempt_enable();
+}
+#endif /* DHD_LB_RXC */
+
+#if defined(DHD_LB_TXP)
+static void dhd_tx_dispatcher_work(struct work_struct * work)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	struct dhd_info *dhd =
+		container_of(work, struct dhd_info, tx_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	dhd_tasklet_schedule(&dhd->tx_tasklet);
+}
+
+static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
+{
+	int cpu;
+	int net_tx_cpu;
+	dhd_info_t *dhd = dhdp->info;
+
+	preempt_disable();
+	cpu = atomic_read(&dhd->tx_cpu);
+	net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
+
+	/*
+	 * Now if the NET_TX has pushed the packet in the same
+	 * CPU that is chosen for Tx processing, seperate it out
+	 * i.e run the TX processing tasklet in compl_cpu
+	 */
+	if (net_tx_cpu == cpu)
+		cpu = atomic_read(&dhd->tx_compl_cpu);
+
+	if (!cpu_online(cpu)) {
+		/*
+		 * Ooohh... but the Chosen CPU is not online,
+		 * Do the job in the current CPU itself.
+		 */
+		dhd_tasklet_schedule(&dhd->tx_tasklet);
+	} else {
+		/*
+		 * Schedule tx_dispatcher_work to on the cpu which
+		 * in turn will schedule tx_tasklet.
+		 */
+		dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
+	}
+	preempt_enable();
+}
+
+/**
+ * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
+ * on another cpu. The tx_tasklet will take care of actually putting
+ * the skbs into appropriate flow ring and ringing H2D interrupt
+ *
+ * @dhdp: pointer to dhd_pub object
+ */
+static void
+dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+	int curr_cpu;
+
+	curr_cpu = get_cpu();
+	put_cpu();
+
+	/* Record the CPU in which the TX request from Network stack came */
+	atomic_set(&dhd->net_tx_cpu, curr_cpu);
+
+	/* Schedule the work to dispatch ... */
+	dhd_tx_dispatcher_fn(dhdp);
+
+}
+#endif /* DHD_LB_TXP */
+
+#if defined(DHD_LB_RXP)
+/**
+ * dhd_napi_poll - Load balance napi poll function to process received
+ * packets and send up the network stack using netif_receive_skb()
+ *
+ * @napi: napi object in which context this poll function is invoked
+ * @budget: number of packets to be processed.
+ *
+ * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
+ * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
+ * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
+ * packet tag and sendup.
+ */
+static int
+dhd_napi_poll(struct napi_struct *napi, int budget)
+{
+	int ifid;
+	const int pkt_count = 1;
+	const int chan = 0;
+	struct sk_buff * skb;
+	unsigned long flags;
+	struct dhd_info *dhd;
+	int processed = 0;
+	struct sk_buff_head rx_process_queue;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	dhd = container_of(napi, struct dhd_info, rx_napi_struct);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
+		__FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
+		__skb_queue_head_init(&rx_process_queue);
+
+	/* extract the entire rx_napi_queue into local rx_process_queue */
+	spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+	skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
+	spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
+
+	while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
+		OSL_PREFETCH(skb->data);
+
+		ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
+
+		DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
+			__FUNCTION__, skb, ifid));
+
+		dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
+		processed++;
+	}
+
+	DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
+
+	DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
+	napi_complete(napi);
+
+	return budget - 1;
+}
+
+/**
+ * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
+ * poll list. This function may be invoked via the smp_call_function_single
+ * from a remote CPU.
+ *
+ * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
+ * after the napi_struct is added to the softnet data's poll_list
+ *
+ * @info: pointer to a dhd_info struct
+ */
+static void
+dhd_napi_schedule(void *info)
+{
+	dhd_info_t *dhd = (dhd_info_t *)info;
+
+	DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
+		__FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
+
+	/* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
+	if (napi_schedule_prep(&dhd->rx_napi_struct)) {
+		__napi_schedule(&dhd->rx_napi_struct);
+		DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
+	}
+
+	/*
+	 * If the rx_napi_struct was already running, then we let it complete
+	 * processing all its packets. The rx_napi_struct may only run on one
+	 * core at a time, to avoid out-of-order handling.
+	 */
+}
+
+/**
+ * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
+ * action after placing the dhd's rx_process napi object in the the remote CPU's
+ * softnet data's poll_list.
+ *
+ * @dhd: dhd_info which has the rx_process napi object
+ * @on_cpu: desired remote CPU id
+ */
+static INLINE int
+dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
+{
+	int wait = 0; /* asynchronous IPI */
+	DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
+		__FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
+
+	if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
+		DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
+			__FUNCTION__, on_cpu));
+	}
+
+	DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
+
+	return 0;
+}
+
+/*
+ * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
+ * Why should we do this?
+ * The candidacy algorithm is run from the call back function
+ * registered to CPU hotplug notifier. This call back happens from Worker
+ * context. The dhd_napi_schedule_on is also from worker context.
+ * Note that both of this can run on two different CPUs at the same time.
+ * So we can possibly have a window where a given CPUn is being brought
+ * down from CPUm while we try to run a function on CPUn.
+ * To prevent this its better have the whole code to execute an SMP
+ * function under get_online_cpus.
+ * This function call ensures that hotplug mechanism does not kick-in
+ * until we are done dealing with online CPUs
+ * If the hotplug worker is already running, no worries because the
+ * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
+ *
+ * The below mentioned code structure is proposed in
+ * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
+ * for the question
+ * Q: I need to ensure that a particular cpu is not removed when there is some
+ *    work specific to this cpu is in progress
+ *
+ * According to the documentation calling get_online_cpus is NOT required, if
+ * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
+ * run from Work Queue context we have to call these functions
+ */
+static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	struct dhd_info *dhd =
+		container_of(work, struct dhd_info, rx_napi_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	int cpu;
+
+	get_online_cpus();
+	cpu = atomic_read(&dhd->rx_napi_cpu);
+
+	if (!cpu_online(cpu))
+		dhd_napi_schedule(dhd);
+	else
+		dhd_napi_schedule_on(dhd, cpu);
+
+	put_online_cpus();
+}
+
+/**
+ * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
+ * to run on another CPU. The rx_napi_struct's poll function will retrieve all
+ * the packets enqueued into the rx_napi_queue and sendup.
+ * The producer's rx packet queue is appended to the rx_napi_queue before
+ * dispatching the rx_napi_struct.
+ */
+void
+dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
+{
+	unsigned long flags;
+	dhd_info_t *dhd = dhdp->info;
+	int curr_cpu;
+	int on_cpu;
+
+	if (dhd->rx_napi_netdev == NULL) {
+		DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
+		skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
+
+	/* append the producer's queue of packets to the napi's rx process queue */
+	spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+	skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
+	spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
+
+	/*
+	 * If the destination CPU is NOT online or is same as current CPU
+	 * no need to schedule the work
+	 */
+	curr_cpu = get_cpu();
+	put_cpu();
+
+	on_cpu = atomic_read(&dhd->rx_napi_cpu);
+	if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+		dhd_napi_schedule(dhd);
+	} else {
+		schedule_work(&dhd->rx_napi_dispatcher_work);
+	}
+}
+
+/**
+ * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
+ */
+void
+dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
+{
+	dhd_info_t *dhd = dhdp->info;
+
+	DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
+		pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
+	DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
+	__skb_queue_tail(&dhd->rx_pend_queue, pkt);
+}
+#endif /* DHD_LB_RXP */
+
+#endif /* DHD_LB */
+
+
+/** Returns dhd iflist index corresponding the the bssidx provided by apps */
+int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
+{
+	dhd_if_t *ifp;
+	dhd_info_t *dhd = dhdp->info;
+	int i;
+
+	ASSERT(bssidx < DHD_MAX_IFS);
+	ASSERT(dhdp);
+
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		ifp = dhd->iflist[i];
+		if (ifp && (ifp->bssidx == bssidx)) {
+			DHD_TRACE(("Index manipulated for %s from %d to %d\n",
+				ifp->name, bssidx, i));
+			break;
+		}
+	}
+	return i;
+}
+
+static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
+{
+	uint32 store_idx;
+	uint32 sent_idx;
+
+	if (!skb) {
+		DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
+		return BCME_ERROR;
+	}
+
+	dhd_os_rxflock(dhdp);
+	store_idx = dhdp->store_idx;
+	sent_idx = dhdp->sent_idx;
+	if (dhdp->skbbuf[store_idx] != NULL) {
+		/* Make sure the previous packets are processed */
+		dhd_os_rxfunlock(dhdp);
+#ifdef RXF_DEQUEUE_ON_BUSY
+		DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+			skb, store_idx, sent_idx));
+		return BCME_BUSY;
+#else /* RXF_DEQUEUE_ON_BUSY */
+		DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+			skb, store_idx, sent_idx));
+		/* removed msleep here, should use wait_event_timeout if we
+		 * want to give rx frame thread a chance to run
+		 */
+#if defined(WAIT_DEQUEUE)
+		OSL_SLEEP(1);
+#endif
+		return BCME_ERROR;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+	}
+	DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
+		skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
+	dhdp->skbbuf[store_idx] = skb;
+	dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
+	dhd_os_rxfunlock(dhdp);
+
+	return BCME_OK;
+}
+
+static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
+{
+	uint32 store_idx;
+	uint32 sent_idx;
+	void *skb;
+
+	dhd_os_rxflock(dhdp);
+
+	store_idx = dhdp->store_idx;
+	sent_idx = dhdp->sent_idx;
+	skb = dhdp->skbbuf[sent_idx];
+
+	if (skb == NULL) {
+		dhd_os_rxfunlock(dhdp);
+		DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
+			store_idx, sent_idx));
+		return NULL;
+	}
+
+	dhdp->skbbuf[sent_idx] = NULL;
+	dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
+
+	DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
+		skb, sent_idx));
+
+	dhd_os_rxfunlock(dhdp);
+
+	return skb;
+}
+
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
+{
+	if (prepost) { /* pre process */
+		dhd_read_cis(dhdp);
+		dhd_check_module_cid(dhdp);
+		dhd_check_module_mac(dhdp);
+		dhd_set_macaddr_from_file(dhdp);
+	} else { /* post process */
+		dhd_write_macaddr(&dhdp->mac);
+		dhd_clear_cis(dhdp);
+	}
+
+	return 0;
+}
+
+// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
+#if defined(PKT_FILTER_SUPPORT)
+#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
+static bool
+_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
+{
+	bool _apply = FALSE;
+	/* In case of IBSS mode, apply arp pkt filter */
+	if (op_mode_param & DHD_FLAG_IBSS_MODE) {
+		_apply = TRUE;
+		goto exit;
+	}
+	/* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
+	if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
+		_apply = TRUE;
+		goto exit;
+	}
+
+exit:
+	return _apply;
+}
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+
+void
+dhd_set_packet_filter(dhd_pub_t *dhd)
+{
+	int i;
+
+	DHD_TRACE(("%s: enter\n", __FUNCTION__));
+	if (dhd_pkt_filter_enable) {
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+			dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+		}
+	}
+}
+
+void
+dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
+{
+	int i;
+
+	DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
+	if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
+		DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
+		return;
+	}
+	/* 1 - Enable packet filter, only allow unicast packet to send up */
+	/* 0 - Disable packet filter */
+	if (dhd_pkt_filter_enable && (!value ||
+	    (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
+	{
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
+#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
+			if (value && (i == DHD_ARP_FILTER_NUM) &&
+				!_turn_on_arp_filter(dhd, dhd->op_mode)) {
+				DHD_TRACE(("Do not turn on ARP white list pkt filter:"
+					"val %d, cnt %d, op_mode 0x%x\n",
+					value, i, dhd->op_mode));
+				continue;
+			}
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+			dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+				value, dhd_master_mode);
+		}
+	}
+}
+
+int
+dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
+{
+	char *filterp = NULL;
+	int filter_id = 0;
+
+	switch (num) {
+		case DHD_BROADCAST_FILTER_NUM:
+			filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+			filter_id = 101;
+			break;
+		case DHD_MULTICAST4_FILTER_NUM:
+			filter_id = 102;
+			if (FW_SUPPORTED((dhdp), pf6)) {
+				if (dhdp->pktfilter[num] != NULL) {
+					dhd_pktfilter_offload_delete(dhdp, filter_id);
+					dhdp->pktfilter[num] = NULL;
+				}
+				if (!add_remove) {
+					filterp = DISCARD_IPV4_MCAST;
+					add_remove = 1;
+					break;
+				}
+			}
+			filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+			break;
+		case DHD_MULTICAST6_FILTER_NUM:
+			filter_id = 103;
+			if (FW_SUPPORTED((dhdp), pf6)) {
+				if (dhdp->pktfilter[num] != NULL) {
+					dhd_pktfilter_offload_delete(dhdp, filter_id);
+					dhdp->pktfilter[num] = NULL;
+				}
+				if (!add_remove) {
+					filterp = DISCARD_IPV6_MCAST;
+					add_remove = 1;
+					break;
+				}
+			}
+			filterp = "103 0 0 0 0xFFFF 0x3333";
+			break;
+		case DHD_MDNS_FILTER_NUM:
+			filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
+			filter_id = 104;
+			break;
+		case DHD_ARP_FILTER_NUM:
+			filterp = "105 0 0 12 0xFFFF 0x0806";
+			filter_id = 105;
+			break;
+		case DHD_BROADCAST_ARP_FILTER_NUM:
+			filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
+				" 0xFFFFFFFFFFFF0000000000000806";
+			filter_id = 106;
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	/* Add filter */
+	if (add_remove) {
+		dhdp->pktfilter[num] = filterp;
+		dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
+	} else { /* Delete filter */
+		if (dhdp->pktfilter[num]) {
+			dhd_pktfilter_offload_delete(dhdp, filter_id);
+			dhdp->pktfilter[num] = NULL;
+		}
+	}
+
+	return 0;
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+	int power_mode = PM_MAX;
+#ifdef SUPPORT_SENSORHUB
+	shub_control_t shub_ctl;
+#endif /* SUPPORT_SENSORHUB */
+	/* wl_pkt_filter_enable_t	enable_parm; */
+	int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
+	int ret = 0;
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+	int bcn_timeout = 0;
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+	int roam_time_thresh = 0;   /* (ms) */
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+	uint roamvar = dhd->conf->roam_off_suspend;
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+	int bcn_li_bcn;
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+	uint nd_ra_filter = 0;
+#endif /* DHD_USE_EARLYSUSPEND */
+#ifdef PASS_ALL_MCAST_PKTS
+	struct dhd_info *dhdinfo;
+	uint32 allmulti;
+	uint i;
+#endif /* PASS_ALL_MCAST_PKTS */
+#ifdef ENABLE_IPMCAST_FILTER
+	int ipmcast_l2filter;
+#endif /* ENABLE_IPMCAST_FILTER */
+#ifdef DYNAMIC_SWOOB_DURATION
+#ifndef CUSTOM_INTR_WIDTH
+#define CUSTOM_INTR_WIDTH 100
+	int intr_width = 0;
+#endif /* CUSTOM_INTR_WIDTH */
+#endif /* DYNAMIC_SWOOB_DURATION */
+
+#if defined(BCMPCIE)
+	int lpas = 0;
+	int dtim_period = 0;
+	int bcn_interval = 0;
+	int bcn_to_dly = 0;
+#ifndef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+	int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
+#else
+	bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#endif /* OEM_ANDROID && BCMPCIE */
+
+	if (!dhd)
+		return -ENODEV;
+
+#ifdef PASS_ALL_MCAST_PKTS
+	dhdinfo = dhd->info;
+#endif /* PASS_ALL_MCAST_PKTS */
+
+	DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+		__FUNCTION__, value, dhd->in_suspend));
+
+	dhd_suspend_lock(dhd);
+
+#ifdef CUSTOM_SET_CPUCORE
+	DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
+	/* set specific cpucore */
+	dhd_set_cpucore(dhd, TRUE);
+#endif /* CUSTOM_SET_CPUCORE */
+
+	if (dhd->conf->pm >= 0)
+		power_mode = dhd->conf->pm;
+	else
+		power_mode = PM_FAST;
+
+	if (dhd->up) {
+		if (value && dhd->in_suspend) {
+#ifdef PKT_FILTER_SUPPORT
+			dhd->early_suspended = 1;
+#endif
+			/* Kernel suspended */
+			DHD_ERROR(("%s: force extra suspend setting\n", __FUNCTION__));
+
+			if (dhd->conf->pm_in_suspend >= 0)
+				power_mode = dhd->conf->pm_in_suspend;
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				sizeof(power_mode), TRUE, 0);
+
+#ifdef PKT_FILTER_SUPPORT
+			/* Enable packet filter,
+			 * only allow unicast packet to send up
+			 */
+			dhd_enable_packet_filter(1, dhd);
+#ifdef APF
+			dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
+
+#ifdef SUPPORT_SENSORHUB
+			shub_ctl.enable = 1;
+			shub_ctl.cmd = 0x000;
+			shub_ctl.op_mode = 1;
+			shub_ctl.interval = 0;
+			if (dhd->info->shub_enable == 1) {
+				ret = dhd_iovar(dhd, 0, "shub_msreq",
+					(char *)&shub_ctl, sizeof(shub_ctl), NULL, 0, TRUE);
+				if (ret < 0) {
+					DHD_ERROR(("%s SensorHub MS start: failed %d\n",
+						__FUNCTION__, ret));
+				}
+			}
+#endif /* SUPPORT_SENSORHUB */
+
+
+#ifdef PASS_ALL_MCAST_PKTS
+			allmulti = 0;
+			for (i = 0; i < DHD_MAX_IFS; i++) {
+				if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
+					dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
+							sizeof(allmulti), NULL, 0, TRUE);
+
+			}
+#endif /* PASS_ALL_MCAST_PKTS */
+
+			/* If DTIM skip is set up as default, force it to wake
+			 * each third DTIM for better power savings.  Note that
+			 * one side effect is a chance to miss BC/MC packet.
+			 */
+#ifdef WLTDLS
+			/* Do not set bcn_li_ditm on WFD mode */
+			if (dhd->tdls_mode) {
+				bcn_li_dtim = 0;
+			} else
+#endif /* WLTDLS */
+#if defined(BCMPCIE)
+			bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
+				&bcn_interval);
+			dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+					sizeof(bcn_li_dtim), NULL, 0, TRUE);
+
+			if ((bcn_li_dtim * dtim_period * bcn_interval) >=
+				MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
+				/*
+				 * Increase max roaming threshold from 2 secs to 8 secs
+				 * the real roam threshold is MIN(max_roam_threshold,
+				 * bcn_timeout/2)
+				 */
+				lpas = 1;
+				dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
+						0, TRUE);
+
+				bcn_to_dly = 1;
+				/*
+				 * if bcn_to_dly is 1, the real roam threshold is
+				 * MIN(max_roam_threshold, bcn_timeout -1);
+				 * notify link down event after roaming procedure complete
+				 * if we hit bcn_timeout while we are in roaming progress.
+				 */
+				dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
+						sizeof(bcn_to_dly), NULL, 0, TRUE);
+				/* Increase beacon timeout to 6 secs or use bigger one */
+				bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
+				dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+						sizeof(bcn_timeout), NULL, 0, TRUE);
+			}
+#else
+			bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
+			if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+					sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
+				DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
+#endif /* OEM_ANDROID && BCMPCIE */
+
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+			bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
+			dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+					sizeof(bcn_timeout), NULL, 0, TRUE);
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+			roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
+			dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
+					sizeof(roam_time_thresh), NULL, 0, TRUE);
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+			/* Disable firmware roaming during suspend */
+			dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar),
+					NULL, 0, TRUE);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+			bcn_li_bcn = 0;
+			dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
+					sizeof(bcn_li_bcn), NULL, 0, TRUE);
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+#ifdef NDO_CONFIG_SUPPORT
+			if (dhd->ndo_enable) {
+				if (!dhd->ndo_host_ip_overflow) {
+					/* enable ND offload on suspend */
+					ret = dhd_ndo_enable(dhd, 1);
+					if (ret < 0) {
+						DHD_ERROR(("%s: failed to enable NDO\n",
+							__FUNCTION__));
+					}
+				} else {
+					DHD_INFO(("%s: NDO disabled on suspend due to"
+							"HW capacity\n", __FUNCTION__));
+				}
+			}
+#endif /* NDO_CONFIG_SUPPORT */
+#ifndef APF
+			if (FW_SUPPORTED(dhd, ndoe))
+#else
+			if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
+#endif /* APF */
+			{
+				/* enable IPv6 RA filter in  firmware during suspend */
+				nd_ra_filter = 1;
+				ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
+						(char *)&nd_ra_filter, sizeof(nd_ra_filter),
+						NULL, 0, TRUE);
+				if (ret < 0)
+					DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+						ret));
+			}
+			dhd_os_suppress_logging(dhd, TRUE);
+#ifdef ENABLE_IPMCAST_FILTER
+			ipmcast_l2filter = 1;
+			ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
+					(char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
+					NULL, 0, TRUE);
+#endif /* ENABLE_IPMCAST_FILTER */
+#ifdef DYNAMIC_SWOOB_DURATION
+			intr_width = CUSTOM_INTR_WIDTH;
+			ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
+					sizeof(intr_width), NULL, 0, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+			}
+#endif /* DYNAMIC_SWOOB_DURATION */
+#endif /* DHD_USE_EARLYSUSPEND */
+			dhd_conf_set_suspend_resume(dhd, value);
+		} else {
+			dhd_conf_set_suspend_resume(dhd, value);
+#ifdef PKT_FILTER_SUPPORT
+			dhd->early_suspended = 0;
+#endif
+			/* Kernel resumed  */
+			DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
+
+#ifdef SUPPORT_SENSORHUB
+			shub_ctl.enable = 1;
+			shub_ctl.cmd = 0x000;
+			shub_ctl.op_mode = 0;
+			shub_ctl.interval = 0;
+			if (dhd->info->shub_enable == 1) {
+				ret = dhd_iovar(dhd, 0, "shub_msreq",
+						(char *)&shub_ctl, sizeof(shub_ctl),
+						NULL, 0, TRUE);
+				if (ret < 0) {
+					DHD_ERROR(("%s SensorHub MS stop: failed %d\n",
+						__FUNCTION__, ret));
+				}
+			}
+#endif /* SUPPORT_SENSORHUB */
+
+#ifdef DYNAMIC_SWOOB_DURATION
+			intr_width = 0;
+			ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
+					sizeof(intr_width), NULL, 0, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+			}
+#endif /* DYNAMIC_SWOOB_DURATION */
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				sizeof(power_mode), TRUE, 0);
+#ifdef PKT_FILTER_SUPPORT
+			/* disable pkt filter */
+			dhd_enable_packet_filter(0, dhd);
+#ifdef APF
+			dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef PASS_ALL_MCAST_PKTS
+			allmulti = 1;
+			for (i = 0; i < DHD_MAX_IFS; i++) {
+				if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
+					dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
+							sizeof(allmulti), NULL, 0, TRUE);
+			}
+#endif /* PASS_ALL_MCAST_PKTS */
+#if defined(BCMPCIE)
+			/* restore pre-suspend setting */
+			ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+					sizeof(bcn_li_dtim), NULL, 0, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
+			}
+
+			dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL, 0,
+					TRUE);
+
+			dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
+					sizeof(bcn_to_dly), NULL, 0, TRUE);
+
+			dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+					sizeof(bcn_timeout), NULL, 0, TRUE);
+#else
+			/* restore pre-suspend setting for dtim_skip */
+			ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+					sizeof(bcn_li_dtim), NULL, 0, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
+			}
+#endif /* OEM_ANDROID && BCMPCIE */
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+			bcn_timeout = CUSTOM_BCN_TIMEOUT;
+			dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+					sizeof(bcn_timeout), NULL, 0, TRUE);
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+			roam_time_thresh = 2000;
+			dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
+					sizeof(roam_time_thresh), NULL, 0, TRUE);
+
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+			roamvar = dhd_roam_disable;
+			dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar),
+					NULL, 0, TRUE);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+			bcn_li_bcn = 1;
+			dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
+					sizeof(bcn_li_bcn), NULL, 0, TRUE);
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+#ifdef NDO_CONFIG_SUPPORT
+			if (dhd->ndo_enable) {
+				/* Disable ND offload on resume */
+				ret = dhd_ndo_enable(dhd, 0);
+				if (ret < 0) {
+					DHD_ERROR(("%s: failed to disable NDO\n",
+						__FUNCTION__));
+				}
+			}
+#endif /* NDO_CONFIG_SUPPORT */
+#ifndef APF
+			if (FW_SUPPORTED(dhd, ndoe))
+#else
+			if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
+#endif /* APF */
+			{
+				/* disable IPv6 RA filter in  firmware during suspend */
+				nd_ra_filter = 0;
+				ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
+						(char *)&nd_ra_filter, sizeof(nd_ra_filter),
+						NULL, 0, TRUE);
+				if (ret < 0) {
+					DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+						ret));
+				}
+			}
+			dhd_os_suppress_logging(dhd, FALSE);
+#ifdef ENABLE_IPMCAST_FILTER
+			ipmcast_l2filter = 0;
+			ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
+					(char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
+					NULL, 0, TRUE);
+#endif /* ENABLE_IPMCAST_FILTER */
+#endif /* DHD_USE_EARLYSUSPEND */
+
+			/* terence 2017029: Reject in early suspend */
+			if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {
+				dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+			}
+		}
+	}
+	dhd_suspend_unlock(dhd);
+
+	return 0;
+}
+
+static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
+{
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ret = 0;
+
+	DHD_OS_WAKE_LOCK(dhdp);
+	DHD_PERIM_LOCK(dhdp);
+
+	/* Set flag when early suspend was called */
+	dhdp->in_suspend = val;
+	if ((force || !dhdp->suspend_disable_flag) &&
+		(dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp)))
+	{
+		ret = dhd_set_suspend(val, dhdp);
+	}
+
+	DHD_PERIM_UNLOCK(dhdp);
+	DHD_OS_WAKE_UNLOCK(dhdp);
+	return ret;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+static void dhd_early_suspend(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 1, 0);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 0, 0);
+}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+/*
+ * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
+ *
+ *      dhd_timeout_start(&tmo, usec);
+ *      while (!dhd_timeout_expired(&tmo))
+ *              if (poll_something())
+ *                      break;
+ *      if (dhd_timeout_expired(&tmo))
+ *              fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+	tmo->limit = usec;
+	tmo->increment = 0;
+	tmo->elapsed = 0;
+	tmo->tick = jiffies_to_usecs(1);
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+	/* Does nothing the first call */
+	if (tmo->increment == 0) {
+		tmo->increment = 1;
+		return 0;
+	}
+
+	if (tmo->elapsed >= tmo->limit)
+		return 1;
+
+	/* Add the delay that's about to take place */
+	tmo->elapsed += tmo->increment;
+
+	if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
+		OSL_DELAY(tmo->increment);
+		tmo->increment *= 2;
+		if (tmo->increment > tmo->tick)
+			tmo->increment = tmo->tick;
+	} else {
+		wait_queue_head_t delay_wait;
+		DECLARE_WAITQUEUE(wait, current);
+		init_waitqueue_head(&delay_wait);
+		add_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		(void)schedule_timeout(1);
+		remove_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_RUNNING);
+	}
+
+	return 0;
+}
+
+int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+	int i = 0;
+
+	if (!dhd) {
+		DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
+		return DHD_BAD_IF;
+	}
+
+	while (i < DHD_MAX_IFS) {
+		if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
+			return i;
+		i++;
+	}
+
+	return DHD_BAD_IF;
+}
+
+struct net_device * dhd_idx2net(void *pub, int ifidx)
+{
+	struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
+	struct dhd_info *dhd_info;
+
+	if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+		return NULL;
+	dhd_info = dhd_pub->info;
+	if (dhd_info && dhd_info->iflist[ifidx])
+		return dhd_info->iflist[ifidx]->net;
+	return NULL;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+	int i = DHD_MAX_IFS;
+
+	ASSERT(dhd);
+
+	if (name == NULL || *name == '\0')
+		return 0;
+
+	while (--i > 0)
+		if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
+				break;
+
+	DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+	return i;	/* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	ASSERT(dhd);
+
+	if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+		return "<if_bad>";
+	}
+
+	if (dhd->iflist[ifidx] == NULL) {
+		DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+		return "<if_null>";
+	}
+
+	if (dhd->iflist[ifidx]->net)
+		return dhd->iflist[ifidx]->net->name;
+
+	return "<if_none>";
+}
+
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
+{
+	int i;
+	dhd_info_t *dhd = (dhd_info_t *)dhdp;
+
+	ASSERT(dhd);
+	for (i = 0; i < DHD_MAX_IFS; i++)
+	if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+		return dhd->iflist[i]->mac_addr;
+
+	return NULL;
+}
+
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+	struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	struct netdev_hw_addr *ha;
+#else
+	struct dev_mc_list *mclist;
+#endif
+	uint32 allmulti, cnt;
+
+	wl_ioctl_t ioc;
+	char *buf, *bufp;
+	uint buflen;
+	int ret;
+
+	if (!dhd->iflist[ifidx]) {
+		DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
+		return;
+	}
+	dev = dhd->iflist[ifidx]->net;
+	if (!dev)
+		return;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+	netif_addr_lock_bh(dev);
+#endif /* LINUX >= 2.6.27 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	cnt = netdev_mc_count(dev);
+#else
+	cnt = dev->mc_count;
+#endif /* LINUX >= 2.6.35 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+	netif_addr_unlock_bh(dev);
+#endif /* LINUX >= 2.6.27 */
+
+	/* Determine initial value of allmulti flag */
+	allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+#ifdef PASS_ALL_MCAST_PKTS
+#ifdef PKT_FILTER_SUPPORT
+	if (!dhd->pub.early_suspended)
+#endif /* PKT_FILTER_SUPPORT */
+		allmulti = TRUE;
+#endif /* PASS_ALL_MCAST_PKTS */
+
+	/* Send down the multicast list first. */
+
+
+	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+	if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+		           dhd_ifname(&dhd->pub, ifidx), cnt));
+		return;
+	}
+
+	strncpy(bufp, "mcast_list", buflen - 1);
+	bufp[buflen - 1] = '\0';
+	bufp += strlen("mcast_list") + 1;
+
+	cnt = htol32(cnt);
+	memcpy(bufp, &cnt, sizeof(cnt));
+	bufp += sizeof(cnt);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+	netif_addr_lock_bh(dev);
+#endif /* LINUX >= 2.6.27 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	netdev_for_each_mc_addr(ha, dev) {
+		if (!cnt)
+			break;
+		memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+		bufp += ETHER_ADDR_LEN;
+		cnt--;
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#else /* LINUX < 2.6.35 */
+	for (mclist = dev->mc_list; (mclist && (cnt > 0));
+		cnt--, mclist = mclist->next) {
+		memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+		bufp += ETHER_ADDR_LEN;
+	}
+#endif /* LINUX >= 2.6.35 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+	netif_addr_unlock_bh(dev);
+#endif /* LINUX >= 2.6.27 */
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+			dhd_ifname(&dhd->pub, ifidx), cnt));
+		allmulti = cnt ? TRUE : allmulti;
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Now send the allmulti setting.  This is based on the setting in the
+	 * net_device flags, but might be modified above to be turned on if we
+	 * were trying to set some addresses and dongle rejected it...
+	 */
+
+	allmulti = htol32(allmulti);
+	ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
+			sizeof(allmulti), NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set allmulti %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+
+	/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+	allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+
+	allmulti = htol32(allmulti);
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_PROMISC;
+	ioc.buf = &allmulti;
+	ioc.len = sizeof(allmulti);
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set promisc %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+}
+
+int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
+{
+	int ret;
+
+	ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
+			ETHER_ADDR_LEN, NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+	} else {
+		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+		if (ifidx == 0)
+			memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
+	}
+
+	return ret;
+}
+
+#ifdef DHD_WMF
+void dhd_update_psta_interface_for_sta(dhd_pub_t* dhdp, char* ifname, void* ea,
+		void* event_data)
+{
+	struct wl_psta_primary_intf_event *psta_prim_event =
+			(struct wl_psta_primary_intf_event*)event_data;
+	dhd_sta_t *psta_interface =  NULL;
+	dhd_sta_t *sta = NULL;
+	uint8 ifindex;
+	ASSERT(ifname);
+	ASSERT(psta_prim_event);
+	ASSERT(ea);
+
+	ifindex = (uint8)dhd_ifname2idx(dhdp->info, ifname);
+	sta = dhd_find_sta(dhdp, ifindex, ea);
+	if (sta != NULL) {
+		psta_interface = dhd_find_sta(dhdp, ifindex,
+				(void *)(psta_prim_event->prim_ea.octet));
+		if (psta_interface != NULL) {
+			sta->psta_prim = psta_interface;
+		}
+	}
+}
+
+/* Get wmf_psta_disable configuration configuration */
+int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+	ASSERT(idx < DHD_MAX_IFS);
+	ifp = dhd->iflist[idx];
+	return ifp->wmf_psta_disable;
+}
+
+/* Set wmf_psta_disable configuration configuration */
+int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+	ASSERT(idx < DHD_MAX_IFS);
+	ifp = dhd->iflist[idx];
+	ifp->wmf_psta_disable = val;
+	return 0;
+}
+#endif /* DHD_WMF */
+
+#ifdef DHD_PSTA
+/* Get psta/psr configuration configuration */
+int dhd_get_psta_mode(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+	return (int)dhd->psta_mode;
+}
+/* Set psta/psr configuration configuration */
+int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd->psta_mode = val;
+	return 0;
+}
+#endif /* DHD_PSTA */
+
+#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
+static void
+dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	if (
+#ifdef DHD_L2_FILTER
+		(ifp->block_ping) ||
+#endif
+#ifdef DHD_WET
+		(dhd->wet_mode) ||
+#endif
+#ifdef DHD_MCAST_REGEN
+		(ifp->mcast_regen_bss_enable) ||
+#endif
+		FALSE) {
+		ifp->rx_pkt_chainable = FALSE;
+	}
+}
+#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
+
+#ifdef DHD_WET
+/* Get wet configuration configuration */
+int dhd_get_wet_mode(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+	return (int)dhd->wet_mode;
+}
+
+/* Set wet configuration configuration */
+int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd->wet_mode = val;
+	dhd_update_rx_pkt_chainable_state(dhdp, 0);
+	return 0;
+}
+#endif /* DHD_WET */
+
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+int32 dhd_role_to_nl80211_iftype(int32 role)
+{
+	switch (role) {
+	case WLC_E_IF_ROLE_STA:
+		return NL80211_IFTYPE_STATION;
+	case WLC_E_IF_ROLE_AP:
+		return NL80211_IFTYPE_AP;
+	case WLC_E_IF_ROLE_WDS:
+		return NL80211_IFTYPE_WDS;
+	case WLC_E_IF_ROLE_P2P_GO:
+		return NL80211_IFTYPE_P2P_GO;
+	case WLC_E_IF_ROLE_P2P_CLIENT:
+		return NL80211_IFTYPE_P2P_CLIENT;
+	case WLC_E_IF_ROLE_IBSS:
+	case WLC_E_IF_ROLE_NAN:
+		return NL80211_IFTYPE_ADHOC;
+	default:
+		return NL80211_IFTYPE_UNSPECIFIED;
+	}
+}
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+static void
+dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_event_t *if_event = event_info;
+	struct net_device *ndev;
+	int ifidx, bssidx;
+	int ret;
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	struct wl_if_event_info info;
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+	if (event != DHD_WQ_WORK_IF_ADD) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (!if_event) {
+		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ifidx = if_event->event.ifidx;
+	bssidx = if_event->event.bssidx;
+	DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
+
+
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	if (if_event->event.ifidx > 0) {
+		bzero(&info, sizeof(info));
+		info.ifidx = if_event->event.ifidx;
+		info.bssidx = if_event->event.bssidx;
+		info.role = if_event->event.role;
+		strncpy(info.name, if_event->name, IFNAMSIZ);
+		if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
+			&info, if_event->mac, NULL, true) != NULL) {
+			/* Do the post interface create ops */
+			DHD_ERROR(("Post ifcreate ops done. Returning \n"));
+			goto done;
+		}
+	}
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+	/* This path is for non-android case */
+	/* The interface name in host and in event msg are same */
+	/* if name in event msg is used to create dongle if list on host */
+	ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
+		if_event->mac, bssidx, TRUE, if_event->name);
+	if (!ndev) {
+		DHD_ERROR(("%s: net device alloc failed  \n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
+	DHD_PERIM_LOCK(&dhd->pub);
+	if (ret != BCME_OK) {
+		DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
+		dhd_remove_if(&dhd->pub, ifidx, TRUE);
+		goto done;
+	}
+#ifndef PCIE_FULL_DONGLE
+	/* Turn on AP isolation in the firmware for interfaces operating in AP mode */
+	if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
+		uint32 var_int =  1;
+		ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
+				NULL, 0, TRUE);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
+			dhd_remove_if(&dhd->pub, ifidx, TRUE);
+		}
+	}
+#endif /* PCIE_FULL_DONGLE */
+
+done:
+	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	int ifidx;
+	dhd_if_event_t *if_event = event_info;
+
+
+	if (event != DHD_WQ_WORK_IF_DEL) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (!if_event) {
+		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ifidx = if_event->event.ifidx;
+	DHD_TRACE(("Removing interface with idx %d\n", ifidx));
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	if (if_event->event.ifidx > 0) {
+		/* Do the post interface del ops */
+		if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net, true) == 0) {
+			DHD_TRACE(("Post ifdel ops done. Returning \n"));
+			DHD_PERIM_LOCK(&dhd->pub);
+			goto done;
+		}
+	}
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+	dhd_remove_if(&dhd->pub, ifidx, TRUE);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+done:
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+#ifdef DHD_UPDATE_INTF_MAC
+static void
+dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	int ifidx;
+	dhd_if_event_t *if_event = event_info;
+
+	if (event != DHD_WQ_WORK_IF_UPDATE) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (!if_event) {
+		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+
+	ifidx = if_event->event.ifidx;
+	DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx));
+
+	dhd_op_if_update(&dhd->pub, ifidx);
+
+	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx)
+{
+	dhd_info_t *    dhdinfo = NULL;
+	dhd_if_t   *    ifp = NULL;
+	int             ret = 0;
+	char            buf[128];
+
+	if ((NULL==dhdpub)||(NULL==dhdpub->info)) {
+		DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__));
+		return -1;
+	} else {
+		dhdinfo = (dhd_info_t *)dhdpub->info;
+		ifp = dhdinfo->iflist[ifidx];
+		if (NULL==ifp) {
+		    DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__));
+		    return -2;
+		}
+	}
+
+	DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+	// Get MAC address
+	strcpy(buf, "cur_etheraddr");
+	ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx);
+	if (0>ret) {
+		DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret));
+		// avoid collision
+		dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1;
+		// force locally administrate address
+		ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr);
+	} else {
+		DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
+		           ifp->name, ifp->idx,
+		           (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2],
+		           (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5]));
+		memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN);
+		if (dhdinfo->iflist[ifp->idx]->net) {
+		    memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN);
+		}
+	}
+
+	return ret;
+}
+#endif /* DHD_UPDATE_INTF_MAC */
+
+static void
+dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_t *ifp = event_info;
+
+	if (event != DHD_WQ_WORK_SET_MAC) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	// terence 20160907: fix for not able to set mac when wlan0 is down
+	if (ifp == NULL || !ifp->set_macaddress) {
+		goto done;
+	}
+	if (ifp == NULL || !dhd->pub.up) {
+		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
+	ifp->set_macaddress = FALSE;
+	if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
+		DHD_INFO(("%s: MACID is overwritten\n",	__FUNCTION__));
+	else
+		DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+
+done:
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	int ifidx = (int)((long int)event_info);
+	dhd_if_t *ifp = NULL;
+
+	if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ifp = dhd->iflist[ifidx];
+
+	if (ifp == NULL || !dhd->pub.up) {
+		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+		goto done;
+	}
+
+	if (ifp == NULL || !dhd->pub.up) {
+		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+		goto done;
+	}
+
+	ifidx = ifp->idx;
+
+
+	_dhd_set_multicast_list(dhd, ifidx);
+	DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
+
+done:
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+	int ret = 0;
+
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int ifidx;
+	dhd_if_t *dhdif;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return -1;
+
+	dhdif = dhd->iflist[ifidx];
+
+	dhd_net_if_lock_local(dhd);
+	memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
+	dhdif->set_macaddress = TRUE;
+	dhd_net_if_unlock_local(dhd);
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
+		dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
+	return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ifidx;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return;
+
+	dhd->iflist[ifidx]->set_multicast = TRUE;
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
+		DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
+
+	// terence 20160907: fix for not able to set mac when wlan0 is down
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
+		DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
+}
+
+#ifdef DHD_UCODE_DOWNLOAD
+/* Get ucode path */
+char *
+dhd_get_ucode_path(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+	return dhd->uc_path;
+}
+#endif /* DHD_UCODE_DOWNLOAD */
+
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+	ASSERT(di != NULL);
+	/* terence 20161229: don't do spin lock if proptx not enabled */
+	if (disable_proptx)
+		return 1;
+#ifdef BCMDBUS
+	spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
+#else
+	spin_lock_bh(&di->wlfc_spinlock);
+#endif /* BCMDBUS */
+	return 1;
+}
+
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+
+	ASSERT(di != NULL);
+	/* terence 20161229: don't do spin lock if proptx not enabled */
+	if (disable_proptx)
+		return 1;
+#ifdef BCMDBUS
+	spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
+#else
+	spin_unlock_bh(&di->wlfc_spinlock);
+#endif /* BCMDBUS */
+	return 1;
+}
+
+#endif /* PROP_TXSTATUS */
+
+#if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
+typedef struct {
+	uint16 type;
+	const char *str;
+} PKTTYPE_INFO;
+
+static const PKTTYPE_INFO packet_type_info[] =
+{
+	{ ETHER_TYPE_IP, "IP" },
+	{ ETHER_TYPE_ARP, "ARP" },
+	{ ETHER_TYPE_BRCM, "BRCM" },
+	{ ETHER_TYPE_802_1X, "802.1X" },
+	{ ETHER_TYPE_WAI, "WAPI" },
+	{ 0, ""}
+};
+
+static const char *_get_packet_type_str(uint16 type)
+{
+	int i;
+	int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
+
+	for (i = 0; i < n; i++) {
+		if (packet_type_info[i].type == type)
+			return packet_type_info[i].str;
+	}
+
+	return packet_type_info[n].str;
+}
+
+void
+dhd_trx_dump(struct net_device *ndev, uint8 *dump_data, uint datalen, bool tx)
+{
+	uint16 protocol;
+	char *ifname;
+
+	protocol = (dump_data[12] << 8) | dump_data[13];
+	ifname = ndev ? ndev->name : "N/A";
+
+	if (protocol != ETHER_TYPE_BRCM) {
+		DHD_ERROR(("%s DUMP[%s] - %s\n", tx?"Tx":"Rx", ifname,
+			_get_packet_type_str(protocol)));
+#if defined(DHD_TX_FULL_DUMP) || defined(DHD_RX_FULL_DUMP)
+		prhex("Data", dump_data, datalen);
+#endif /* DHD_TX_FULL_DUMP || DHD_RX_FULL_DUMP */
+	}
+}
+#endif /* DHD_TX_DUMP || DHD_RX_DUMP */
+
+/*  This routine do not support Packet chain feature, Currently tested for
+ *  proxy arp feature
+ */
+int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
+{
+	struct sk_buff *skb;
+	void *skbhead = NULL;
+	void *skbprev = NULL;
+	dhd_if_t *ifp;
+	ASSERT(!PKTISCHAINED(p));
+	skb = PKTTONATIVE(dhdp->osh, p);
+
+	ifp = dhdp->info->iflist[ifidx];
+	skb->dev = ifp->net;
+#if defined(BCM_GMAC3)
+	/* Forwarder capable interfaces use WOFA based forwarding */
+	if (ifp->fwdh) {
+		struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
+		uint16 * da = (uint16 *)(eh->ether_dhost);
+		uintptr_t wofa_data;
+		ASSERT(ISALIGNED(da, 2));
+
+		wofa_data = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
+		if (wofa_data == WOFA_DATA_INVALID) { /* Unknown MAC address */
+			if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
+				return BCME_OK;
+			}
+		}
+		PKTFRMNATIVE(dhdp->osh, p);
+		PKTFREE(dhdp->osh, p, FALSE);
+		return BCME_OK;
+	}
+#endif /* BCM_GMAC3 */
+
+	skb->protocol = eth_type_trans(skb, skb->dev);
+
+	if (in_interrupt()) {
+		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+			__FUNCTION__, __LINE__);
+		netif_rx(skb);
+	} else {
+		if (dhdp->info->rxthread_enabled) {
+			if (!skbhead) {
+				skbhead = skb;
+			} else {
+				PKTSETNEXT(dhdp->osh, skbprev, skb);
+			}
+			skbprev = skb;
+		} else {
+			/* If the receive is not processed inside an ISR,
+			 * the softirqd must be woken explicitly to service
+			 * the NET_RX_SOFTIRQ.	In 2.6 kernels, this is handled
+			 * by netif_rx_ni(), but in earlier kernels, we need
+			 * to do it manually.
+			 */
+			bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+				__FUNCTION__, __LINE__);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+			netif_rx_ni(skb);
+#else
+			ulong flags;
+			netif_rx(skb);
+			local_irq_save(flags);
+			RAISE_RX_SOFTIRQ();
+			local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+		}
+	}
+
+	if (dhdp->info->rxthread_enabled && skbhead)
+		dhd_sched_rxf(dhdp, skbhead);
+
+	return BCME_OK;
+}
+
+int BCMFASTPATH
+__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+	int ret = BCME_OK;
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh = NULL;
+#if defined(DHD_L2_FILTER)
+	dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
+#endif 
+
+	/* Reject if down */
+	if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+		/* free the packet here since the caller won't */
+		PKTCFREE(dhdp->osh, pktbuf, TRUE);
+		return -ENODEV;
+	}
+
+#ifdef PCIE_FULL_DONGLE
+	if (dhdp->busstate == DHD_BUS_SUSPEND) {
+		DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+		PKTCFREE(dhdp->osh, pktbuf, TRUE);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
+	}
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_L2_FILTER
+	/* if dhcp_unicast is enabled, we need to convert the */
+	/* broadcast DHCP ACK/REPLY packets to Unicast. */
+	if (ifp->dhcp_unicast) {
+	    uint8* mac_addr;
+	    uint8* ehptr = NULL;
+	    int ret;
+	    ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
+	    if (ret == BCME_OK) {
+		/*  if given mac address having valid entry in sta list
+		 *  copy the given mac address, and return with BCME_OK
+		*/
+		if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
+		    ehptr = PKTDATA(dhdp->osh, pktbuf);
+		    bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+		}
+	    }
+	}
+
+	if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+	    if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
+			PKTCFREE(dhdp->osh, pktbuf, TRUE);
+			return BCME_ERROR;
+	    }
+	}
+
+	if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+		ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
+
+		/* Drop the packets if l2 filter has processed it already
+		 * otherwise continue with the normal path
+		 */
+		if (ret == BCME_OK) {
+			PKTCFREE(dhdp->osh, pktbuf, TRUE);
+			return BCME_ERROR;
+		}
+	}
+#endif /* DHD_L2_FILTER */
+	/* Update multicast statistic */
+	if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+		eh = (struct ether_header *)pktdata;
+
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			dhdp->tx_multicast++;
+		if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
+#ifdef DHD_LOSSLESS_ROAMING
+			uint8 prio = (uint8)PKTPRIO(pktbuf);
+
+			/* back up 802.1x's priority */
+			dhdp->prio_8021x = prio;
+#endif /* DHD_LOSSLESS_ROAMING */
+			DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
+			atomic_inc(&dhd->pend_8021x_cnt);
+			dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), pktdata, TRUE);
+		}
+
+		if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+#ifdef DHD_DHCP_DUMP
+			dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
+#endif /* DHD_DHCP_DUMP */
+#ifdef DHD_ICMP_DUMP
+			dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
+#endif /* DHD_ICMP_DUMP */
+		}
+	} else {
+			PKTCFREE(dhdp->osh, pktbuf, TRUE);
+			return BCME_ERROR;
+	}
+
+	{
+		/* Look into the packet and update the packet priority */
+#ifndef PKTPRIO_OVERRIDE
+		if (PKTPRIO(pktbuf) == 0)
+#endif /* !PKTPRIO_OVERRIDE */
+		{
+#if defined(QOS_MAP_SET)
+			pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
+#else
+			pktsetprio(pktbuf, FALSE);
+#endif /* QOS_MAP_SET */
+		}
+	}
+
+
+#if defined(TRAFFIC_MGMT_DWM)
+	traffic_mgmt_pkt_set_prio(dhdp, pktbuf);
+
+#ifdef BCM_GMAC3
+	DHD_PKT_SET_DATAOFF(pktbuf, 0);
+#endif /* BCM_GMAC3 */
+#endif 
+
+#ifdef PCIE_FULL_DONGLE
+	/*
+	 * Lkup the per interface hash table, for a matching flowring. If one is not
+	 * available, allocate a unique flowid and add a flowring entry.
+	 * The found or newly created flowid is placed into the pktbuf's tag.
+	 */
+	ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
+	if (ret != BCME_OK) {
+		PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
+		return ret;
+	}
+#endif
+
+#if defined(DHD_TX_DUMP)
+	dhd_trx_dump(dhd_idx2net(dhdp, ifidx), PKTDATA(dhdp->osh, pktbuf),
+		PKTLEN(dhdp->osh, pktbuf), TRUE);
+#endif
+	/* terence 20150901: Micky add to ajust the 802.1X priority */
+	/* Set the 802.1X packet with the highest priority 7 */
+	if (dhdp->conf->pktprio8021x >= 0)
+		pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
+
+#ifdef PROP_TXSTATUS
+	if (dhd_wlfc_is_supported(dhdp)) {
+		/* store the interface ID */
+		DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
+
+		/* store destination MAC in the tag as well */
+		DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
+
+		/* decide which FIFO this packet belongs to */
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			/* one additional queue index (highest AC + 1) is used for bc/mc queue */
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+		else
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+	} else
+#endif /* PROP_TXSTATUS */
+	{
+		/* If the protocol uses a data header, apply it */
+		dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+	}
+
+	/* Use bus module to send data frame */
+#ifdef WLMEDIA_HTSF
+	dhd_htsf_addtxts(dhdp, pktbuf);
+#endif
+#ifdef PROP_TXSTATUS
+	{
+		if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
+			dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
+			/* non-proptxstatus way */
+#ifdef BCMPCIE
+			ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+			ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+		}
+	}
+#else
+#ifdef BCMPCIE
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+#endif /* PROP_TXSTATUS */
+#ifdef BCMDBUS
+	if (ret)
+		PKTCFREE(dhdp->osh, pktbuf, TRUE);
+#endif /* BCMDBUS */
+
+	return ret;
+}
+
+int BCMFASTPATH
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	DHD_GENERAL_LOCK(dhdp, flags);
+	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+		DHD_ERROR(("%s: returning as busstate=%d\n",
+			__FUNCTION__, dhdp->busstate));
+		DHD_GENERAL_UNLOCK(dhdp, flags);
+		PKTCFREE(dhdp->osh, pktbuf, TRUE);
+		return -ENODEV;
+	}
+	DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
+	DHD_GENERAL_UNLOCK(dhdp, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+	if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
+		DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+		PKTCFREE(dhdp->osh, pktbuf, TRUE);
+		ret = -EBUSY;
+		goto exit;
+	}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+	DHD_GENERAL_LOCK(dhdp, flags);
+	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
+		DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
+		dhd_os_busbusy_wake(dhdp);
+		DHD_GENERAL_UNLOCK(dhdp, flags);
+		PKTCFREE(dhdp->osh, pktbuf, TRUE);
+		return -ENODEV;
+	}
+	DHD_GENERAL_UNLOCK(dhdp, flags);
+
+	ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+exit:
+#endif
+	DHD_GENERAL_LOCK(dhdp, flags);
+	DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
+	dhd_os_busbusy_wake(dhdp);
+	DHD_GENERAL_UNLOCK(dhdp, flags);
+	return ret;
+}
+
+#if defined(DHD_LB_TXP)
+
+int BCMFASTPATH
+dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
+	int ifidx, void *skb)
+{
+	DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
+
+	/* If the feature is disabled run-time do TX from here */
+	if (atomic_read(&dhd->lb_txp_active) == 0) {
+		DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
+		 return __dhd_sendpkt(&dhd->pub, ifidx, skb);
+	}
+
+	/* Store the address of net device and interface index in the Packet tag */
+	DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
+	DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
+
+	/* Enqueue the skb into tx_pend_queue */
+	skb_queue_tail(&dhd->tx_pend_queue, skb);
+
+	DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
+
+	/* Dispatch the Tx job to be processed by the tx_tasklet */
+	dhd_lb_tx_dispatch(&dhd->pub);
+
+	return NETDEV_TX_OK;
+}
+#endif /* DHD_LB_TXP */
+
+int BCMFASTPATH
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	int ret;
+	uint datalen;
+	void *pktbuf;
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_if_t *ifp = NULL;
+	int ifidx;
+	unsigned long flags;
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
+#else
+	uint8 htsfdlystat_sz = 0;
+#endif 
+#ifdef DHD_WMF
+	struct ether_header *eh;
+	uint8 *iph;
+#endif /* DHD_WMF */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhd_query_bus_erros(&dhd->pub)) {
+		return -ENODEV;
+	}
+
+	/* terence 2017029: Reject in early suspend */
+	if ((dhd->pub.conf->insuspend & NO_TXDATA_IN_SUSPEND) && dhd->pub.early_suspended) {
+		dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+
+	DHD_GENERAL_LOCK(&dhd->pub, flags);
+	DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
+	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+	if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
+		/* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
+		/* stop the network queue temporarily until resume done */
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		if (!dhdpcie_is_resume_done(&dhd->pub)) {
+			dhd_bus_stop_queue(dhd->pub.bus);
+		}
+		DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+		dhd_os_busbusy_wake(&dhd->pub);
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+	DHD_GENERAL_LOCK(&dhd->pub, flags);
+#ifdef BCMPCIE
+	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
+		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+			__FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
+		DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+#ifdef PCIE_FULL_DONGLE
+		/* Stop tx queues if suspend is in progress */
+		if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
+			dhd_bus_stop_queue(dhd->pub.bus);
+		}
+#endif /* PCIE_FULL_DONGLE */
+		dhd_os_busbusy_wake(&dhd->pub);
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+#else
+	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
+		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+			__FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
+	}
+#endif
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+	if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
+		dhd->pub.busstate = DHD_BUS_DOWN;
+	}
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+	/* Reject if down */
+	if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
+		DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+			__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+		netif_stop_queue(net);
+		/* Send Event when bus down detected during data session */
+		if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) {
+			DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+			dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
+			net_os_send_hang_message(net);
+		}
+		DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+		dhd_os_busbusy_wake(&dhd->pub);
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+		DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+
+	ifp = DHD_DEV_IFP(net);
+	ifidx = DHD_DEV_IFIDX(net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+		netif_stop_queue(net);
+		DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+		dhd_os_busbusy_wake(&dhd->pub);
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+		DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+
+	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+	ASSERT(ifidx == dhd_net2idx(dhd, net));
+	ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
+
+	bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
+
+	/* re-align socket buffer if "skb->data" is odd address */
+	if (((unsigned long)(skb->data)) & 0x1) {
+		unsigned char *data = skb->data;
+		uint32 length = skb->len;
+		PKTPUSH(dhd->pub.osh, skb, 1);
+		memmove(skb->data, data, length);
+		PKTSETLEN(dhd->pub.osh, skb, length);
+	}
+
+	datalen  = PKTLEN(dhd->pub.osh, skb);
+
+	/* Make sure there's enough room for any header */
+	if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+		struct sk_buff *skb2;
+
+		DHD_INFO(("%s: insufficient headroom\n",
+		          dhd_ifname(&dhd->pub, ifidx)));
+		dhd->pub.tx_realloc++;
+
+		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
+		skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
+
+		dev_kfree_skb(skb);
+		if ((skb = skb2) == NULL) {
+			DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+			           dhd_ifname(&dhd->pub, ifidx)));
+			ret = -ENOMEM;
+			goto done;
+		}
+		bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
+	}
+
+	/* Convert to packet */
+	if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+		DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+		           dhd_ifname(&dhd->pub, ifidx)));
+		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
+		dev_kfree_skb_any(skb);
+		ret = -ENOMEM;
+		goto done;
+	}
+
+#if defined(WLMEDIA_HTSF)
+	if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
+		struct ether_header *eh = (struct ether_header *)pktdata;
+
+		if (!ETHER_ISMULTI(eh->ether_dhost) &&
+			(ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
+			eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
+		}
+	}
+#endif 
+#ifdef DHD_WET
+	/* wet related packet proto manipulation should be done in DHD
+	   since dongle doesn't have complete payload
+	 */
+	if (WET_ENABLED(&dhd->pub) &&
+			(dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
+		DHD_INFO(("%s:%s: wet send proc failed\n",
+				__FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
+		PKTFREE(dhd->pub.osh, pktbuf, FALSE);
+		ret =  -EFAULT;
+		goto done;
+	}
+#endif /* DHD_WET */
+
+#ifdef DHD_WMF
+	eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
+	iph = (uint8 *)eh + ETHER_HDR_LEN;
+
+	/* WMF processing for multicast packets
+	 * Only IPv4 packets are handled
+	 */
+	if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
+		(IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
+		((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+		void *sdu_clone;
+		bool ucast_convert = FALSE;
+#ifdef DHD_UCAST_UPNP
+		uint32 dest_ip;
+
+		dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
+		ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
+#endif /* DHD_UCAST_UPNP */
+#ifdef DHD_IGMP_UCQUERY
+		ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
+			(IPV4_PROT(iph) == IP_PROT_IGMP) &&
+			(*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
+#endif /* DHD_IGMP_UCQUERY */
+		if (ucast_convert) {
+			dhd_sta_t *sta;
+			unsigned long flags;
+			struct list_head snapshot_list;
+			struct list_head *wmf_ucforward_list;
+
+			ret = NETDEV_TX_OK;
+
+			/* For non BCM_GMAC3 platform we need a snapshot sta_list to
+			 * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
+			 */
+			wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
+
+			/* Convert upnp/igmp query to unicast for each assoc STA */
+			list_for_each_entry(sta, wmf_ucforward_list, list) {
+				/* Skip sending to proxy interfaces of proxySTA */
+				if (sta->psta_prim != NULL && !ifp->wmf_psta_disable) {
+					continue;
+				}
+				if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
+					ret = WMF_NOP;
+					break;
+				}
+				dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
+			}
+			DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
+
+			DHD_GENERAL_LOCK(&dhd->pub, flags);
+			DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+			dhd_os_busbusy_wake(&dhd->pub);
+			DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+			DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+			DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+			if (ret == NETDEV_TX_OK)
+				PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+
+			return ret;
+		} else
+#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
+		{
+			/* There will be no STA info if the packet is coming from LAN host
+			 * Pass as NULL
+			 */
+			ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
+			switch (ret) {
+			case WMF_TAKEN:
+			case WMF_DROP:
+				/* Either taken by WMF or we should drop it.
+				 * Exiting send path
+				 */
+
+				DHD_GENERAL_LOCK(&dhd->pub, flags);
+				DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+				dhd_os_busbusy_wake(&dhd->pub);
+				DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+				DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+				return NETDEV_TX_OK;
+			default:
+				/* Continue the transmit path */
+				break;
+			}
+		}
+	}
+#endif /* DHD_WMF */
+#ifdef DHD_PSTA
+	/* PSR related packet proto manipulation should be done in DHD
+	 * since dongle doesn't have complete payload
+	 */
+	if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
+		ifidx, &pktbuf, TRUE) < 0)) {
+			DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
+				dhd_ifname(&dhd->pub, ifidx)));
+	}
+#endif /* DHD_PSTA */
+
+#ifdef DHDTCPACK_SUPPRESS
+	if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
+		/* If this packet has been hold or got freed, just return */
+		if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
+			ret = 0;
+			goto done;
+		}
+	} else {
+		/* If this packet has replaced another packet and got freed, just return */
+		if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
+			ret = 0;
+			goto done;
+		}
+	}
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/*
+	 * If Load Balance is enabled queue the packet
+	 * else send directly from here.
+	 */
+#if defined(DHD_LB_TXP)
+	ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
+#else
+	ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+#endif
+
+done:
+	if (ret) {
+		ifp->stats.tx_dropped++;
+		dhd->pub.tx_dropped++;
+	} else {
+#ifdef PROP_TXSTATUS
+		/* tx_packets counter can counted only when wlfc is disabled */
+		if (!dhd_wlfc_is_supported(&dhd->pub))
+#endif
+		{
+			dhd->pub.tx_packets++;
+			ifp->stats.tx_packets++;
+			ifp->stats.tx_bytes += datalen;
+		}
+	}
+
+
+	DHD_GENERAL_LOCK(&dhd->pub, flags);
+	DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+	dhd_os_busbusy_wake(&dhd->pub);
+	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+	DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	/* Return ok: we always eat the packet */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+	return 0;
+#else
+	return NETDEV_TX_OK;
+#endif
+}
+
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+	struct net_device *net;
+	dhd_info_t *dhd = dhdp->info;
+	int i;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(dhd);
+
+#ifdef DHD_LOSSLESS_ROAMING
+	/* block flowcontrol during roaming */
+	if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
+		return;
+	}
+#endif
+
+	if (ifidx == ALL_INTERFACES) {
+		/* Flow control on all active interfaces */
+		dhdp->txoff = state;
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i]) {
+				net = dhd->iflist[i]->net;
+				if (state == ON)
+					netif_stop_queue(net);
+				else
+					netif_wake_queue(net);
+			}
+		}
+	} else {
+		if (dhd->iflist[ifidx]) {
+			net = dhd->iflist[ifidx]->net;
+			if (state == ON)
+				netif_stop_queue(net);
+			else
+				netif_wake_queue(net);
+		}
+	}
+}
+
+
+#ifdef DHD_WMF
+bool
+dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+
+	return dhd->rxthread_enabled;
+}
+#endif /* DHD_WMF */
+
+#ifdef DHD_MCAST_REGEN
+/*
+ * Description: This function is called to do the reverse translation
+ *
+ * Input    eh - pointer to the ethernet header
+ */
+int32
+dhd_mcast_reverse_translation(struct ether_header *eh)
+{
+	uint8 *iph;
+	uint32 dest_ip;
+
+	iph = (uint8 *)eh + ETHER_HDR_LEN;
+	dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
+
+	/* Only IP packets are handled */
+	if (eh->ether_type != hton16(ETHER_TYPE_IP))
+		return BCME_ERROR;
+
+	/* Non-IPv4 multicast packets are not handled */
+	if (IP_VER(iph) != IP_VER_4)
+		return BCME_ERROR;
+
+	/*
+	 * The packet has a multicast IP and unicast MAC. That means
+	 * we have to do the reverse translation
+	 */
+	if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
+		ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+#endif /* MCAST_REGEN */
+
+#ifdef SHOW_LOGTRACE
+static int
+dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	int ret = BCME_OK;
+	uint datalen;
+	bcm_event_msg_u_t evu;
+	void *data = NULL;
+	void *pktdata = NULL;
+	bcm_event_t *pvt_data;
+	uint pktlen;
+
+	DHD_TRACE(("%s:Enter\n", __FUNCTION__));
+
+	/* In dhd_rx_frame, header is stripped using skb_pull
+	 * of size ETH_HLEN, so adjust pktlen accordingly
+	 */
+	pktlen = skb->len + ETH_HLEN;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+	pktdata = (void *)skb_mac_header(skb);
+#else
+	pktdata = (void *)skb->mac.raw;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
+
+	ret = wl_host_event_get_data(pktdata, pktlen, &evu);
+
+	if (ret != BCME_OK) {
+		DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
+			__FUNCTION__, ret));
+		goto exit;
+	}
+
+	datalen = ntoh32(evu.event.datalen);
+
+	pvt_data = (bcm_event_t *)pktdata;
+	data = &pvt_data[1];
+
+	dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
+
+exit:
+	return ret;
+}
+
+static void
+dhd_event_logtrace_process(struct work_struct * work)
+{
+/* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	struct dhd_info *dhd =
+		container_of(work, struct dhd_info, event_log_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	dhd_pub_t *dhdp;
+	struct sk_buff *skb;
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	dhdp = &dhd->pub;
+
+	if (!dhdp) {
+		DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
+		return;
+	}
+
+	DHD_TRACE(("%s:Enter\n", __FUNCTION__));
+
+	/* Run while(1) loop till all skbs are dequeued */
+	while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
+#ifdef PCIE_FULL_DONGLE
+		int ifid;
+		ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
+		if (ifid == DHD_EVENT_IF) {
+			dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
+			/* For sending skb to network layer, convert it to Native PKT
+			 * after that assign skb->dev with Primary interface n/w device
+			 * as for infobuf events, we are sending special DHD_EVENT_IF
+			 */
+#ifdef DHD_USE_STATIC_CTRLBUF
+			PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+#else
+			PKTFREE(dhdp->osh, skb, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+			continue;
+		}
+		else {
+			dhd_event_logtrace_pkt_process(dhdp, skb);
+		}
+#else
+		dhd_event_logtrace_pkt_process(dhdp, skb);
+#endif /* PCIE_FULL_DONGLE */
+
+		/* Free skb buffer here if DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+		* macro is defined the Info Ring event and WLC_E_TRACE event is freed in DHD
+		* else it is always sent up to network layers.
+		*/
+#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+#ifdef DHD_USE_STATIC_CTRLBUF
+		PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+#else
+		PKTFREE(dhdp->osh, skb, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+#else /* !DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
+		/* Do not call netif_recieve_skb as this workqueue scheduler is not from NAPI
+		 * Also as we are not in INTR context, do not call netif_rx, instead call
+		 * netif_rx_ni (for kerenl >= 2.6) which  does netif_rx, disables irq, raise
+		 * NET_IF_RX softirq and enables interrupts back
+		 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+		netif_rx_ni(skb);
+#else
+		{
+			ulong flags;
+			netif_rx(skb);
+			local_irq_save(flags);
+			RAISE_RX_SOFTIRQ();
+			local_irq_restore(flags);
+		}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
+	}
+}
+
+void
+dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+#ifdef PCIE_FULL_DONGLE
+	/* Add ifidx in the PKTTAG */
+	DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
+#endif /* PCIE_FULL_DONGLE */
+	skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
+
+	schedule_work(&dhd->event_log_dispatcher_work);
+}
+
+void
+dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+		PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+#else
+		PKTFREE(dhdp->osh, skb, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+	}
+}
+#endif /* SHOW_LOGTRACE */
+
+/** Called when a frame is received by the dongle on interface 'ifidx' */
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	uchar *eth;
+	uint len;
+	void *data, *pnext = NULL;
+	int i;
+	dhd_if_t *ifp;
+	wl_event_msg_t event;
+	int tout_rx = 0;
+	int tout_ctrl = 0;
+	void *skbhead = NULL;
+	void *skbprev = NULL;
+	uint16 protocol;
+	unsigned char *dump_data;
+#ifdef DHD_MCAST_REGEN
+	uint8 interface_role;
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+#endif
+#ifdef DHD_WAKE_STATUS
+	int pkt_wake = 0;
+	wake_counts_t *wcp = NULL;
+#endif /* DHD_WAKE_STATUS */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+		struct ether_header *eh;
+
+		pnext = PKTNEXT(dhdp->osh, pktbuf);
+		PKTSETNEXT(dhdp->osh, pktbuf, NULL);
+
+		/* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
+		 * special ifidx of DHD_EVENT_IF.  This is just internal to dhd to get the data from
+		 * dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
+		 */
+		if (ifidx == DHD_EVENT_IF) {
+			/* Event msg printing is called from dhd_rx_frame which is in Tasklet
+			 * context in case of PCIe FD, in case of other bus this will be from
+			 * DPC context. If we get bunch of events from Dongle then printing all
+			 * of them from Tasklet/DPC context that too in data path is costly.
+			 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
+			 * events with type WLC_E_TRACE.
+			 * We'll print this console logs from the WorkQueue context by enqueing SKB
+			 * here and Dequeuing will be done in WorkQueue and will be freed only if
+			 * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
+			 */
+#ifdef SHOW_LOGTRACE
+			dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
+#else /* !SHOW_LOGTRACE */
+		/* If SHOW_LOGTRACE not defined and ifidx is DHD_EVENT_IF,
+		 * free the PKT here itself
+		 */
+#ifdef DHD_USE_STATIC_CTRLBUF
+		PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+		PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+#endif /* SHOW_LOGTRACE */
+			continue;
+		}
+#ifdef DHD_WAKE_STATUS
+#ifdef BCMDBUS
+		wcp = NULL;
+#else
+		pkt_wake = dhd_bus_get_bus_wake(dhdp);
+		wcp = dhd_bus_get_wakecount(dhdp);
+#endif /* BCMDBUS */
+		if (wcp == NULL) {
+			/* If wakeinfo count buffer is null do not update wake count values */
+			pkt_wake = 0;
+		}
+#endif /* DHD_WAKE_STATUS */
+
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL) {
+			DHD_ERROR(("%s: ifp is NULL. drop packet\n",
+				__FUNCTION__));
+			PKTCFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+
+		eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+
+		/* Dropping only data packets before registering net device to avoid kernel panic */
+#ifndef PROP_TXSTATUS_VSDB
+		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
+			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
+#else
+		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
+			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
+#endif /* PROP_TXSTATUS_VSDB */
+		{
+			DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
+			__FUNCTION__));
+			PKTCFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+
+#ifdef PROP_TXSTATUS
+		if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
+			/* WLFC may send header only packet when
+			there is an urgent message but no packet to
+			piggy-back on
+			*/
+			PKTCFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+#endif
+#ifdef DHD_L2_FILTER
+		/* If block_ping is enabled drop the ping packet */
+		if (ifp->block_ping) {
+			if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
+				PKTCFREE(dhdp->osh, pktbuf, FALSE);
+				continue;
+			}
+		}
+		if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
+		    if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
+				PKTCFREE(dhdp->osh, pktbuf, FALSE);
+				continue;
+		    }
+		}
+		if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+			int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
+
+			/* Drop the packets if l2 filter has processed it already
+			 * otherwise continue with the normal path
+			 */
+			if (ret == BCME_OK) {
+				PKTCFREE(dhdp->osh, pktbuf, TRUE);
+				continue;
+			}
+		}
+#endif /* DHD_L2_FILTER */
+
+#ifdef DHD_MCAST_REGEN
+		DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+		if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+		ASSERT(if_flow_lkup);
+
+		interface_role = if_flow_lkup[ifidx].role;
+		DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+		if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
+				!DHD_IF_ROLE_AP(dhdp, ifidx) &&
+				ETHER_ISUCAST(eh->ether_dhost)) {
+			if (dhd_mcast_reverse_translation(eh) ==  BCME_OK) {
+#ifdef DHD_PSTA
+				/* Change bsscfg to primary bsscfg for unicast-multicast packets */
+				if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
+						(dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
+					if (ifidx != 0) {
+						/* Let the primary in PSTA interface handle this
+						 * frame after unicast to Multicast conversion
+						 */
+						ifp = dhd_get_ifp(dhdp, 0);
+						ASSERT(ifp);
+					}
+				}
+			}
+#endif /* PSTA */
+		}
+#endif /* MCAST_REGEN */
+
+#ifdef DHD_WMF
+		/* WMF processing for multicast packets */
+		if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
+			dhd_sta_t *sta;
+			int ret;
+
+			sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
+			ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
+			switch (ret) {
+				case WMF_TAKEN:
+					/* The packet is taken by WMF. Continue to next iteration */
+					continue;
+				case WMF_DROP:
+					/* Packet DROP decision by WMF. Toss it */
+					DHD_ERROR(("%s: WMF decides to drop packet\n",
+						__FUNCTION__));
+					PKTCFREE(dhdp->osh, pktbuf, FALSE);
+					continue;
+				default:
+					/* Continue the transmit path */
+					break;
+			}
+		}
+#endif /* DHD_WMF */
+
+#ifdef DHDTCPACK_SUPPRESS
+		dhd_tcpdata_info_get(dhdp, pktbuf);
+#endif
+		skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+#ifdef DHD_WET
+		/* wet related packet proto manipulation should be done in DHD
+		 * since dongle doesn't have complete payload
+		 */
+		if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
+				pktbuf) < 0)) {
+			DHD_INFO(("%s:%s: wet recv proc failed\n",
+				__FUNCTION__, dhd_ifname(dhdp, ifidx)));
+		}
+#endif /* DHD_WET */
+
+#ifdef DHD_PSTA
+		if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
+				DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
+					dhd_ifname(dhdp, ifidx)));
+		}
+#endif /* DHD_PSTA */
+
+#ifdef PCIE_FULL_DONGLE
+		if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
+			(!ifp->ap_isolate)) {
+			eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+			if (ETHER_ISUCAST(eh->ether_dhost)) {
+				if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
+					dhd_sendpkt(dhdp, ifidx, pktbuf);
+					continue;
+				}
+			} else {
+				void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
+				if (npktbuf)
+					dhd_sendpkt(dhdp, ifidx, npktbuf);
+			}
+		}
+#endif /* PCIE_FULL_DONGLE */
+
+		/* Get the protocol, maintain skb around eth_type_trans()
+		 * The main reason for this hack is for the limitation of
+		 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+		 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+		 * coping of the packet coming from the network stack to add
+		 * BDC, Hardware header etc, during network interface registration
+		 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+		 * for BDC, Hardware header etc. and not just the ETH_HLEN
+		 */
+		eth = skb->data;
+		len = skb->len;
+
+		dump_data = skb->data;
+
+		protocol = (skb->data[12] << 8) | skb->data[13];
+		if (protocol == ETHER_TYPE_802_1X) {
+			DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
+			dhd_dump_eapol_4way_message(dhdp, dhd_ifname(dhdp, ifidx), dump_data, FALSE);
+		}
+
+		if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
+#ifdef DHD_DHCP_DUMP
+			dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
+#endif /* DHD_DHCP_DUMP */
+#ifdef DHD_ICMP_DUMP
+			dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
+#endif /* DHD_ICMP_DUMP */
+		}
+#ifdef DHD_RX_DUMP
+		dhd_trx_dump(dhd_idx2net(dhdp, ifidx), dump_data, skb->len, FALSE);
+#endif /* DHD_RX_DUMP */
+#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
+		if (pkt_wake) {
+			prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
+		}
+#endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
+
+		skb->protocol = eth_type_trans(skb, skb->dev);
+
+		if (skb->pkt_type == PACKET_MULTICAST) {
+			dhd->pub.rx_multicast++;
+			ifp->stats.multicast++;
+		}
+
+		skb->data = eth;
+		skb->len = len;
+
+#ifdef WLMEDIA_HTSF
+		dhd_htsf_addrxts(dhdp, pktbuf);
+#endif
+#ifdef DBG_PKT_MON
+		DHD_DBG_PKT_MON_RX(dhdp, skb);
+#endif /* DBG_PKT_MON */
+#ifdef DHD_PKT_LOGGING
+		DHD_PKTLOG_RX(dhdp, skb);
+#endif /* DHD_PKT_LOGGING */
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		/* Process special event packets and then discard them */
+		memset(&event, 0, sizeof(event));
+
+		if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+			bcm_event_msg_u_t evu;
+			int ret_event;
+			int event_type;
+
+			ret_event = wl_host_event_get_data(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+			skb_mac_header(skb),
+#else
+			skb->mac.raw,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
+			len, &evu);
+
+			if (ret_event != BCME_OK) {
+				DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
+					__FUNCTION__, ret_event));
+#ifdef DHD_USE_STATIC_CTRLBUF
+				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+				PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+				continue;
+			}
+
+			memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
+			event_type = ntoh32_ua((void *)&event.event_type);
+#ifdef SHOW_LOGTRACE
+			/* Event msg printing is called from dhd_rx_frame which is in Tasklet
+			 * context in case of PCIe FD, in case of other bus this will be from
+			 * DPC context. If we get bunch of events from Dongle then printing all
+			 * of them from Tasklet/DPC context that too in data path is costly.
+			 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
+			 * events with type WLC_E_TRACE.
+			 * We'll print this console logs from the WorkQueue context by enqueing SKB
+			 * here and Dequeuing will be done in WorkQueue and will be freed only if
+			 * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
+			 */
+			if (event_type == WLC_E_TRACE) {
+				DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
+				dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
+				continue;
+			}
+#endif /* SHOW_LOGTRACE */
+
+			ret_event = dhd_wl_host_event(dhd, ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+			skb_mac_header(skb),
+#else
+			skb->mac.raw,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
+			len, &event, &data);
+
+			wl_event_to_host_order(&event);
+			if (!tout_ctrl)
+				tout_ctrl = DHD_PACKET_TIMEOUT_MS;
+
+#if defined(PNO_SUPPORT)
+			if (event_type == WLC_E_PFN_NET_FOUND) {
+				/* enforce custom wake lock to garantee that Kernel not suspended */
+				tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
+			}
+#endif /* PNO_SUPPORT */
+			if (numpkt != 1) {
+				DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
+				__FUNCTION__));
+			}
+
+#ifdef DHD_WAKE_STATUS
+			if (unlikely(pkt_wake)) {
+#ifdef DHD_WAKE_EVENT_STATUS
+				if (event.event_type < WLC_E_LAST) {
+					wcp->rc_event[event.event_type]++;
+					wcp->rcwake++;
+					pkt_wake = 0;
+				}
+#endif /* DHD_WAKE_EVENT_STATUS */
+			}
+#endif /* DHD_WAKE_STATUS */
+
+			/* For delete virtual interface event, wl_host_event returns positive
+			 * i/f index, do not proceed. just free the pkt.
+			 */
+			if ((event_type == WLC_E_IF) && (ret_event > 0)) {
+				DHD_ERROR(("%s: interface is deleted. Free event packet\n",
+				__FUNCTION__));
+#ifdef DHD_USE_STATIC_CTRLBUF
+				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+				PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+				continue;
+			}
+
+#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+#ifdef DHD_USE_STATIC_CTRLBUF
+			PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+			PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+			continue;
+#else
+			/*
+			 * For the event packets, there is a possibility
+			 * of ifidx getting modifed.Thus update the ifp
+			 * once again.
+			 */
+			ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+			ifp = dhd->iflist[ifidx];
+#ifndef PROP_TXSTATUS_VSDB
+			if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
+#else
+			if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
+				dhd->pub.up))
+#endif /* PROP_TXSTATUS_VSDB */
+			{
+				DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
+				__FUNCTION__));
+#ifdef DHD_USE_STATIC_CTRLBUF
+				PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+				PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+				continue;
+			}
+#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
+		} else {
+			tout_rx = DHD_PACKET_TIMEOUT_MS;
+
+#ifdef PROP_TXSTATUS
+			dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
+#endif /* PROP_TXSTATUS */
+
+#ifdef DHD_WAKE_STATUS
+			if (unlikely(pkt_wake)) {
+				wcp->rxwake++;
+#ifdef DHD_WAKE_RX_STATUS
+#define ETHER_ICMP6_HEADER	20
+#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
+#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
+#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
+
+				if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
+					wcp->rx_arp++;
+				if (dump_data[0] == 0xFF) { /* Broadcast */
+					wcp->rx_bcast++;
+				} else if (dump_data[0] & 0x01) { /* Multicast */
+					wcp->rx_mcast++;
+					if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
+					    wcp->rx_multi_ipv6++;
+					    if ((skb->len > ETHER_ICMP6_HEADER) &&
+					        (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
+					        wcp->rx_icmpv6++;
+					        if (skb->len > ETHER_ICMPV6_TYPE) {
+					            switch (dump_data[ETHER_ICMPV6_TYPE]) {
+					            case NDISC_ROUTER_ADVERTISEMENT:
+					                wcp->rx_icmpv6_ra++;
+					                break;
+					            case NDISC_NEIGHBOUR_ADVERTISEMENT:
+					                wcp->rx_icmpv6_na++;
+					                break;
+					            case NDISC_NEIGHBOUR_SOLICITATION:
+					                wcp->rx_icmpv6_ns++;
+					                break;
+					            }
+					        }
+					    }
+					} else if (dump_data[2] == 0x5E) {
+						wcp->rx_multi_ipv4++;
+					} else {
+						wcp->rx_multi_other++;
+					}
+				} else { /* Unicast */
+					wcp->rx_ucast++;
+				}
+#undef ETHER_ICMP6_HEADER
+#undef ETHER_IPV6_SADDR
+#undef ETHER_IPV6_DAADR
+#undef ETHER_ICMPV6_TYPE
+#endif /* DHD_WAKE_RX_STATUS */
+				pkt_wake = 0;
+			}
+#endif /* DHD_WAKE_STATUS */
+		}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+		if (ifp->net)
+			ifp->net->last_rx = jiffies;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
+
+		if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
+			dhdp->dstats.rx_bytes += skb->len;
+			dhdp->rx_packets++; /* Local count */
+			ifp->stats.rx_bytes += skb->len;
+			ifp->stats.rx_packets++;
+		}
+
+		if (in_interrupt()) {
+			bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+				__FUNCTION__, __LINE__);
+			DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#if defined(DHD_LB_RXP)
+			netif_receive_skb(skb);
+#else /* !defined(DHD_LB_RXP) */
+			netif_rx(skb);
+#endif /* !defined(DHD_LB_RXP) */
+			DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+		} else {
+			if (dhd->rxthread_enabled) {
+				if (!skbhead)
+					skbhead = skb;
+				else
+					PKTSETNEXT(dhdp->osh, skbprev, skb);
+				skbprev = skb;
+			} else {
+
+				/* If the receive is not processed inside an ISR,
+				 * the softirqd must be woken explicitly to service
+				 * the NET_RX_SOFTIRQ.	In 2.6 kernels, this is handled
+				 * by netif_rx_ni(), but in earlier kernels, we need
+				 * to do it manually.
+				 */
+				bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+					__FUNCTION__, __LINE__);
+
+#if defined(DHD_LB_RXP)
+				DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+				netif_receive_skb(skb);
+				DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#else /* !defined(DHD_LB_RXP) */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+				DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+				netif_rx_ni(skb);
+				DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#else
+				ulong flags;
+				DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+				netif_rx(skb);
+				DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+				local_irq_save(flags);
+				RAISE_RX_SOFTIRQ();
+				local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+#endif /* !defined(DHD_LB_RXP) */
+			}
+		}
+	}
+
+	if (dhd->rxthread_enabled && skbhead)
+		dhd_sched_rxf(dhdp, skbhead);
+
+	DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
+	DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+	/* Linux version has nothing to do */
+	return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh;
+	uint16 type;
+
+	dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
+
+
+	eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+	type  = ntoh16(eh->ether_type);
+
+	if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0)) {
+		atomic_dec(&dhd->pend_8021x_cnt);
+	}
+
+#ifdef PROP_TXSTATUS
+	if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
+		dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
+		uint datalen  = PKTLEN(dhd->pub.osh, txp);
+		if (ifp != NULL) {
+			if (success) {
+				dhd->pub.tx_packets++;
+				ifp->stats.tx_packets++;
+				ifp->stats.tx_bytes += datalen;
+			} else {
+				ifp->stats.tx_dropped++;
+			}
+		}
+	}
+#endif
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_if_t *ifp;
+	int ifidx;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!dhd) {
+		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+		goto error;
+	}
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
+		goto error;
+	}
+
+	ifp = dhd->iflist[ifidx];
+
+	if (!ifp) {
+		ASSERT(ifp);
+		DHD_ERROR(("%s: ifp is NULL\n", __FUNCTION__));
+		goto error;
+	}
+
+	if (dhd->pub.up) {
+		/* Use the protocol to get dongle stats */
+		dhd_prot_dstats(&dhd->pub);
+	}
+	return &ifp->stats;
+
+error:
+	memset(&net->stats, 0, sizeof(net->stats));
+	return &net->stats;
+}
+
+#ifndef BCMDBUS
+static int
+dhd_watchdog_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_watchdog_prio > 0) {
+		struct sched_param param;
+		param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+			dhd_watchdog_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+	while (1) {
+		if (down_interruptible (&tsk->sema) == 0) {
+			unsigned long flags;
+			unsigned long jiffies_at_start = jiffies;
+			unsigned long time_lapse;
+			DHD_OS_WD_WAKE_LOCK(&dhd->pub);
+
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			if (dhd->pub.dongle_reset == FALSE) {
+				DHD_TIMER(("%s:\n", __FUNCTION__));
+				dhd_bus_watchdog(&dhd->pub);
+
+#ifdef DHD_TIMESYNC
+				/* Call the timesync module watchdog */
+				dhd_timesync_watchdog(&dhd->pub);
+#endif /* DHD_TIMESYNC */
+
+				DHD_GENERAL_LOCK(&dhd->pub, flags);
+				/* Count the tick for reference */
+				dhd->pub.tickcnt++;
+#ifdef DHD_L2_FILTER
+				dhd_l2_filter_watchdog(&dhd->pub);
+#endif /* DHD_L2_FILTER */
+				time_lapse = jiffies - jiffies_at_start;
+
+				/* Reschedule the watchdog */
+				if (dhd->wd_timer_valid) {
+					mod_timer(&dhd->timer,
+					    jiffies +
+					    msecs_to_jiffies(dhd_watchdog_ms) -
+					    min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
+				}
+				DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+			}
+			DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+		} else {
+			break;
+		}
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static void dhd_watchdog(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	ulong data
+#endif
+)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	dhd_info_t *dhd = from_timer(dhd, t, timer);
+#else
+	dhd_info_t *dhd = (dhd_info_t *)data;
+#endif
+	unsigned long flags;
+
+	if (dhd->pub.dongle_reset) {
+		return;
+	}
+
+	if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+		up(&dhd->thr_wdt_ctl.sema);
+		return;
+	}
+
+	DHD_OS_WD_WAKE_LOCK(&dhd->pub);
+	/* Call the bus module watchdog */
+	dhd_bus_watchdog(&dhd->pub);
+
+#ifdef DHD_TIMESYNC
+	/* Call the timesync module watchdog */
+	dhd_timesync_watchdog(&dhd->pub);
+#endif /* DHD_TIMESYNC */
+
+	DHD_GENERAL_LOCK(&dhd->pub, flags);
+	/* Count the tick for reference */
+	dhd->pub.tickcnt++;
+
+#ifdef DHD_L2_FILTER
+	dhd_l2_filter_watchdog(&dhd->pub);
+#endif /* DHD_L2_FILTER */
+	/* Reschedule the watchdog */
+	if (dhd->wd_timer_valid)
+		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+	DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+}
+
+#ifdef DHD_PCIE_RUNTIMEPM
+static int
+dhd_rpm_state_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+	while (1) {
+		if (down_interruptible (&tsk->sema) == 0) {
+			unsigned long flags;
+			unsigned long jiffies_at_start = jiffies;
+			unsigned long time_lapse;
+
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			if (dhd->pub.dongle_reset == FALSE) {
+				DHD_TIMER(("%s:\n", __FUNCTION__));
+				if (dhd->pub.up) {
+					dhd_runtimepm_state(&dhd->pub);
+				}
+
+				DHD_GENERAL_LOCK(&dhd->pub, flags);
+				time_lapse = jiffies - jiffies_at_start;
+
+				/* Reschedule the watchdog */
+				if (dhd->rpm_timer_valid) {
+					mod_timer(&dhd->rpm_timer,
+						jiffies +
+						msecs_to_jiffies(dhd_runtimepm_ms) -
+						min(msecs_to_jiffies(dhd_runtimepm_ms),
+							time_lapse));
+				}
+				DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+			}
+		} else {
+			break;
+		}
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static void dhd_runtimepm(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	ulong data
+#endif
+)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	dhd_info_t *dhd = from_timer(dhd, t, rpm_timer);
+#else
+	dhd_info_t *dhd = (dhd_info_t *)data;
+#endif
+
+	if (dhd->pub.dongle_reset) {
+		return;
+	}
+
+	if (dhd->thr_rpm_ctl.thr_pid >= 0) {
+		up(&dhd->thr_rpm_ctl.sema);
+		return;
+	}
+}
+
+void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
+{
+	dhd_os_runtimepm_timer(dhdp, 0);
+	dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
+	DHD_ERROR(("DHD Runtime PM Disabled \n"));
+}
+
+void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
+{
+	if (dhd_get_idletime(dhdp)) {
+		dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
+		DHD_ERROR(("DHD Runtime PM Enabled \n"));
+	}
+}
+
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+static void
+dhd_sched_policy(int prio)
+{
+	struct sched_param param;
+	if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
+		param.sched_priority = 0;
+		setScheduler(current, SCHED_NORMAL, &param);
+	} else {
+		if (get_scheduler_policy(current) != SCHED_FIFO) {
+			param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
+			setScheduler(current, SCHED_FIFO, &param);
+		}
+	}
+}
+#endif /* ENABLE_ADAPTIVE_SCHED */
+#ifdef DEBUG_CPU_FREQ
+static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+	dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
+	struct cpufreq_freqs *freq = data;
+	if (dhd) {
+		if (!dhd->new_freq)
+			goto exit;
+		if (val == CPUFREQ_POSTCHANGE) {
+			DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
+				freq->new, freq->cpu));
+			*per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
+		}
+	}
+exit:
+	return 0;
+}
+#endif /* DEBUG_CPU_FREQ */
+
+static int
+dhd_dpc_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_dpc_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+#ifdef CUSTOM_DPC_CPUCORE
+	set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->pub.current_dpc = current;
+#endif /* CUSTOM_SET_CPUCORE */
+	/* Run until signal received */
+	while (1) {
+		if (dhd->pub.conf->dpc_cpucore >= 0) {
+			printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
+			set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
+			dhd->pub.conf->dpc_cpucore = -1;
+		}
+		if (!binary_sema_down(tsk)) {
+#ifdef ENABLE_ADAPTIVE_SCHED
+			dhd_sched_policy(dhd_dpc_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			/* Call bus dpc unless it indicated down (then clean stop) */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+				int resched_cnt = 0;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+				dhd_os_wd_timer_extend(&dhd->pub, TRUE);
+				while (dhd_bus_dpc(dhd->pub.bus)) {
+					/* process all data */
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+					resched_cnt++;
+					if (resched_cnt > MAX_RESCHED_CNT) {
+						DHD_INFO(("%s Calling msleep to"
+							"let other processes run. \n",
+							__FUNCTION__));
+						dhd->pub.dhd_bug_on = true;
+						resched_cnt = 0;
+						OSL_SLEEP(1);
+					}
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+				}
+				dhd_os_wd_timer_extend(&dhd->pub, FALSE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+			} else {
+				if (dhd->pub.up)
+					dhd_bus_stop(dhd->pub.bus, TRUE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+			}
+		} else {
+			break;
+		}
+	}
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static int
+dhd_rxf_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+#if defined(WAIT_DEQUEUE)
+#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) /  */
+	ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
+#endif
+	dhd_pub_t *pub = &dhd->pub;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_rxf_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->pub.current_rxf = current;
+#endif /* CUSTOM_SET_CPUCORE */
+	/* Run until signal received */
+	while (1) {
+		if (dhd->pub.conf->rxf_cpucore >= 0) {
+			printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
+			set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
+			dhd->pub.conf->rxf_cpucore = -1;
+		}
+		if (down_interruptible(&tsk->sema) == 0) {
+			void *skb;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+			ulong flags;
+#endif
+#ifdef ENABLE_ADAPTIVE_SCHED
+			dhd_sched_policy(dhd_rxf_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+			SMP_RD_BARRIER_DEPENDS();
+
+			if (tsk->terminated) {
+				break;
+			}
+			skb = dhd_rxf_dequeue(pub);
+
+			if (skb == NULL) {
+				continue;
+			}
+			while (skb) {
+				void *skbnext = PKTNEXT(pub->osh, skb);
+				PKTSETNEXT(pub->osh, skb, NULL);
+				bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+					__FUNCTION__, __LINE__);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+				netif_rx_ni(skb);
+#else
+				netif_rx(skb);
+				local_irq_save(flags);
+				RAISE_RX_SOFTIRQ();
+				local_irq_restore(flags);
+
+#endif
+				skb = skbnext;
+			}
+#if defined(WAIT_DEQUEUE)
+			if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
+				OSL_SLEEP(1);
+				watchdogTime = OSL_SYSUPTIME();
+			}
+#endif
+
+			DHD_OS_WAKE_UNLOCK(pub);
+		} else {
+			break;
+		}
+	}
+	complete_and_exit(&tsk->completed, 0);
+}
+
+#ifdef BCMPCIE
+void dhd_dpc_enable(dhd_pub_t *dhdp)
+{
+#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
+	dhd_info_t *dhd;
+
+	if (!dhdp || !dhdp->info)
+		return;
+	dhd = dhdp->info;
+#endif /* DHD_LB_RXP || DHD_LB_TXP */
+
+#ifdef DHD_LB_RXP
+	__skb_queue_head_init(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LB_TXP
+	skb_queue_head_init(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+}
+#endif /* BCMPCIE */
+
+#ifdef BCMPCIE
+void
+dhd_dpc_kill(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	if (!dhdp) {
+		return;
+	}
+
+	dhd = dhdp->info;
+
+	if (!dhd) {
+		return;
+	}
+
+	if (dhd->thr_dpc_ctl.thr_pid < 0) {
+		tasklet_kill(&dhd->tasklet);
+		DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
+	}
+
+#ifdef DHD_LB
+#ifdef DHD_LB_RXP
+	cancel_work_sync(&dhd->rx_napi_dispatcher_work);
+	__skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LB_TXP
+	cancel_work_sync(&dhd->tx_dispatcher_work);
+	skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+
+	/* Kill the Load Balancing Tasklets */
+#if defined(DHD_LB_TXC)
+	tasklet_kill(&dhd->tx_compl_tasklet);
+#endif /* DHD_LB_TXC */
+#if defined(DHD_LB_RXC)
+	tasklet_kill(&dhd->rx_compl_tasklet);
+#endif /* DHD_LB_RXC */
+#if defined(DHD_LB_TXP)
+	tasklet_kill(&dhd->tx_tasklet);
+#endif /* DHD_LB_TXP */
+#endif /* DHD_LB */
+}
+
+void
+dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	if (!dhdp) {
+		return;
+	}
+
+	dhd = dhdp->info;
+
+	if (!dhd) {
+		return;
+	}
+
+	if (dhd->thr_dpc_ctl.thr_pid < 0) {
+		tasklet_kill(&dhd->tasklet);
+	}
+}
+#endif /* BCMPCIE */
+
+static void
+dhd_dpc(ulong data)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)data;
+
+	/* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+	 * down below , wake lock is set,
+	 * the tasklet is initialized in dhd_attach()
+	 */
+	/* Call bus dpc unless it indicated down (then clean stop) */
+	if (dhd->pub.busstate != DHD_BUS_DOWN) {
+#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
+		DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
+#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
+		if (dhd_bus_dpc(dhd->pub.bus)) {
+			tasklet_schedule(&dhd->tasklet);
+		}
+	} else {
+		dhd_bus_stop(dhd->pub.bus, TRUE);
+	}
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+		DHD_OS_WAKE_LOCK(dhdp);
+		/* If the semaphore does not get up,
+		* wake unlock should be done here
+		*/
+		if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
+			DHD_OS_WAKE_UNLOCK(dhdp);
+		}
+		return;
+	} else {
+		tasklet_schedule(&dhd->tasklet);
+	}
+}
+#endif /* BCMDBUS */
+
+static void
+dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef RXF_DEQUEUE_ON_BUSY
+	int ret = BCME_OK;
+	int retry = 2;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+
+	DHD_OS_WAKE_LOCK(dhdp);
+
+	DHD_TRACE(("dhd_sched_rxf: Enter\n"));
+#ifdef RXF_DEQUEUE_ON_BUSY
+	do {
+		ret = dhd_rxf_enqueue(dhdp, skb);
+		if (ret == BCME_OK || ret == BCME_ERROR)
+			break;
+		else
+			OSL_SLEEP(50); /* waiting for dequeueing */
+	} while (retry-- > 0);
+
+	if (retry <= 0 && ret == BCME_BUSY) {
+		void *skbp = skb;
+
+		while (skbp) {
+			void *skbnext = PKTNEXT(dhdp->osh, skbp);
+			PKTSETNEXT(dhdp->osh, skbp, NULL);
+			bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+				__FUNCTION__, __LINE__);
+			netif_rx_ni(skbp);
+			skbp = skbnext;
+		}
+		DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
+	} else {
+		if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+			up(&dhd->thr_rxf_ctl.sema);
+		}
+	}
+#else /* RXF_DEQUEUE_ON_BUSY */
+	do {
+		if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
+			break;
+	} while (1);
+	if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+		up(&dhd->thr_rxf_ctl.sema);
+	}
+	return;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+}
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+	char buf[32];
+	int ret;
+
+	ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+
+	if (ret < 0) {
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
+				ifidx)));
+			return -EOPNOTSUPP;
+		}
+
+		DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	memcpy(toe_ol, buf, sizeof(uint32));
+	return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+	int toe, ret;
+
+	/* Set toe_ol as requested */
+	ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+			dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	/* Enable toe globally only if any components are enabled. */
+	toe = (toe_ol != 0);
+	ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	return 0;
+}
+#endif /* TOE */
+
+#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
+void dhd_set_scb_probe(dhd_pub_t *dhd)
+{
+	wl_scb_probe_t scb_probe;
+	int ret;
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		return;
+	}
+
+	ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0,
+			(char *)&scb_probe, sizeof(scb_probe), FALSE);
+	if (ret < 0) {
+		DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
+	}
+
+	scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
+
+	ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(scb_probe),
+			NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
+		return;
+	}
+}
+#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+
+	snprintf(info->driver, sizeof(info->driver), "wl");
+	snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+	.get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+	struct ethtool_drvinfo info;
+	char drvname[sizeof(info.driver)];
+	uint32 cmd;
+#ifdef TOE
+	struct ethtool_value edata;
+	uint32 toe_cmpnt, csum_dir;
+	int ret;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* all ethtool calls start with a cmd word */
+	if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+		return -EFAULT;
+
+	switch (cmd) {
+	case ETHTOOL_GDRVINFO:
+		/* Copy out any request driver name */
+		if (copy_from_user(&info, uaddr, sizeof(info)))
+			return -EFAULT;
+		strncpy(drvname, info.driver, sizeof(info.driver));
+		drvname[sizeof(info.driver)-1] = '\0';
+
+		/* clear struct for return */
+		memset(&info, 0, sizeof(info));
+		info.cmd = cmd;
+
+		/* if dhd requested, identify ourselves */
+		if (strcmp(drvname, "?dhd") == 0) {
+			snprintf(info.driver, sizeof(info.driver), "dhd");
+			strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
+			info.version[sizeof(info.version) - 1] = '\0';
+		}
+
+		/* otherwise, require dongle to be up */
+		else if (!dhd->pub.up) {
+			DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+			return -ENODEV;
+		}
+
+		/* finally, report dongle driver type */
+		else if (dhd->pub.iswl)
+			snprintf(info.driver, sizeof(info.driver), "wl");
+		else
+			snprintf(info.driver, sizeof(info.driver), "xx");
+
+		snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
+		if (copy_to_user(uaddr, &info, sizeof(info)))
+			return -EFAULT;
+		DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+		         (int)sizeof(drvname), drvname, info.driver));
+		break;
+
+#ifdef TOE
+	/* Get toe offload components from dongle */
+	case ETHTOOL_GRXCSUM:
+	case ETHTOOL_GTXCSUM:
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		edata.cmd = cmd;
+		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+		if (copy_to_user(uaddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		break;
+
+	/* Set toe offload components in dongle */
+	case ETHTOOL_SRXCSUM:
+	case ETHTOOL_STXCSUM:
+		if (copy_from_user(&edata, uaddr, sizeof(edata)))
+			return -EFAULT;
+
+		/* Read the current settings, update and write back */
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		if (edata.data != 0)
+			toe_cmpnt |= csum_dir;
+		else
+			toe_cmpnt &= ~csum_dir;
+
+		if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+			return ret;
+
+		/* If setting TX checksum mode, tell Linux the new mode */
+		if (cmd == ETHTOOL_STXCSUM) {
+			if (edata.data)
+				dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+			else
+				dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+		}
+
+		break;
+#endif /* TOE */
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
+{
+	if (!dhdp) {
+		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	if (!dhdp->up)
+		return FALSE;
+
+#if !defined(BCMPCIE) && !defined(BCMDBUS)
+	if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
+		DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
+		return FALSE;
+	}
+#endif /* !BCMPCIE && !BCMDBUS */
+
+	if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
+		((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
+#ifdef BCMPCIE
+		DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d d3acke=%d e=%d s=%d\n",
+			__FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
+			dhdp->d3ackcnt_timeout, error, dhdp->busstate));
+#else
+		DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d e=%d s=%d\n", __FUNCTION__,
+			dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+#endif /* BCMPCIE */
+		if (dhdp->hang_reason == 0) {
+			if (dhdp->dongle_trap_occured) {
+				dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
+#ifdef BCMPCIE
+			} else if (dhdp->d3ackcnt_timeout) {
+				dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
+#endif /* BCMPCIE */
+			} else {
+				dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
+			}
+		}
+		printf("%s\n", info_string);
+		net_os_send_hang_message(net);
+		return TRUE;
+	}
+	return FALSE;
+}
+
+#ifdef WL_MONITOR
+bool
+dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
+{
+	return (dhd->info->monitor_type != 0);
+}
+
+void
+dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef HOST_RADIOTAP_CONV
+	uint16 len = 0, offset = 0;
+	monitor_pkt_info_t pkt_info;
+	memcpy(&pkt_info.marker, &msg->marker, sizeof(msg->marker));
+	memcpy(&pkt_info.ts, &msg->ts, sizeof(monitor_pkt_ts_t));
+
+	if (!dhd->monitor_skb) {
+		if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
+			return;
+	}
+
+	len = bcmwifi_monitor(dhd->monitor_info, &pkt_info, PKTDATA(dhdp->osh, pkt),
+		PKTLEN(dhdp->osh, pkt), PKTDATA(dhdp->osh, dhd->monitor_skb), &offset);
+
+	if (dhd->monitor_type && dhd->monitor_dev)
+		dhd->monitor_skb->dev = dhd->monitor_dev;
+	else {
+		PKTFREE(dhdp->osh, pkt, FALSE);
+		dev_kfree_skb(dhd->monitor_skb);
+		return;
+	}
+
+	PKTFREE(dhdp->osh, pkt, FALSE);
+
+	if (!len) {
+		return;
+	}
+
+	skb_put(dhd->monitor_skb, len);
+	skb_pull(dhd->monitor_skb, offset);
+
+	dhd->monitor_skb->protocol = eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
+#else
+	uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
+		BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
+	switch (amsdu_flag) {
+		case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
+		default:
+			if (!dhd->monitor_skb) {
+				if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) == NULL)
+					return;
+			}
+
+			if (dhd->monitor_type && dhd->monitor_dev)
+				dhd->monitor_skb->dev = dhd->monitor_dev;
+			else {
+				PKTFREE(dhdp->osh, pkt, FALSE);
+				dhd->monitor_skb = NULL;
+				return;
+			}
+
+			dhd->monitor_skb->protocol =
+				eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
+			dhd->monitor_len = 0;
+			break;
+		case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
+			if (!dhd->monitor_skb) {
+				if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
+					return;
+				dhd->monitor_len = 0;
+			}
+			if (dhd->monitor_type && dhd->monitor_dev)
+				dhd->monitor_skb->dev = dhd->monitor_dev;
+			else {
+				PKTFREE(dhdp->osh, pkt, FALSE);
+				dev_kfree_skb(dhd->monitor_skb);
+				return;
+			}
+			memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
+				PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
+
+			dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
+			PKTFREE(dhdp->osh, pkt, FALSE);
+			return;
+		case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
+			memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
+				PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
+			dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
+
+			PKTFREE(dhdp->osh, pkt, FALSE);
+			return;
+		case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
+			memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
+				PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
+			dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
+
+			PKTFREE(dhdp->osh, pkt, FALSE);
+			skb_put(dhd->monitor_skb, dhd->monitor_len);
+			dhd->monitor_skb->protocol =
+				eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
+			dhd->monitor_len = 0;
+			break;
+	}
+
+#endif /* HOST_RADIOTAP_CONV */
+	if (in_interrupt()) {
+		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+			__FUNCTION__, __LINE__);
+		DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+		netif_rx(dhd->monitor_skb);
+		DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+	} else {
+		/* If the receive is not processed inside an ISR,
+		 * the softirqd must be woken explicitly to service
+		 * the NET_RX_SOFTIRQ.	In 2.6 kernels, this is handled
+		 * by netif_rx_ni(), but in earlier kernels, we need
+		 * to do it manually.
+		 */
+		bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
+			__FUNCTION__, __LINE__);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+		DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+		netif_rx_ni(dhd->monitor_skb);
+		DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#else
+		ulong flags;
+		DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+		netif_rx(dhd->monitor_skb);
+		DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+		local_irq_save(flags);
+		RAISE_RX_SOFTIRQ();
+		local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+	}
+
+	dhd->monitor_skb = NULL;
+}
+
+typedef struct dhd_mon_dev_priv {
+	struct net_device_stats stats;
+} dhd_mon_dev_priv_t;
+
+#define DHD_MON_DEV_PRIV_SIZE		(sizeof(dhd_mon_dev_priv_t))
+#define DHD_MON_DEV_PRIV(dev)		((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_MON_DEV_STATS(dev)		(((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
+
+static int
+dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
+{
+	PKTFREE(NULL, skb, FALSE);
+	return 0;
+}
+
+static int
+dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	return 0;
+}
+
+static struct net_device_stats*
+dhd_monitor_get_stats(struct net_device *dev)
+{
+	return &DHD_MON_DEV_STATS(dev);
+}
+
+static const struct net_device_ops netdev_monitor_ops =
+{
+	.ndo_start_xmit = dhd_monitor_start,
+	.ndo_get_stats = dhd_monitor_get_stats,
+	.ndo_do_ioctl = dhd_monitor_ioctl
+};
+
+static void
+dhd_add_monitor_if(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	struct net_device *dev;
+	char *devname;
+
+	if (event != DHD_WQ_WORK_IF_ADD) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
+	if (!dev) {
+		DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
+		return;
+	}
+
+	devname = "radiotap";
+
+	snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
+
+#ifndef ARPHRD_IEEE80211_PRISM  /* From Linux 2.4.18 */
+#define ARPHRD_IEEE80211_PRISM 802
+#endif
+
+#ifndef ARPHRD_IEEE80211_RADIOTAP
+#define ARPHRD_IEEE80211_RADIOTAP	803 /* IEEE 802.11 + radiotap header */
+#endif /* ARPHRD_IEEE80211_RADIOTAP */
+
+	dev->type = ARPHRD_IEEE80211_RADIOTAP;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	dev->hard_start_xmit = dhd_monitor_start;
+	dev->do_ioctl = dhd_monitor_ioctl;
+	dev->get_stats = dhd_monitor_get_stats;
+#else
+	dev->netdev_ops = &netdev_monitor_ops;
+#endif
+
+	if (register_netdev(dev)) {
+		DHD_ERROR(("%s, register_netdev failed for %s\n",
+			__FUNCTION__, dev->name));
+		free_netdev(dev);
+	}
+
+	bcmwifi_monitor_create(&dhd->monitor_info);
+	dhd->monitor_dev = dev;
+}
+
+static void
+dhd_del_monitor_if(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+
+	if (event != DHD_WQ_WORK_IF_DEL) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (dhd->monitor_dev) {
+		unregister_netdev(dhd->monitor_dev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+		MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE);
+		MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device));
+#else
+		free_netdev(dhd->monitor_dev);
+#endif /* 2.6.24 */
+
+		dhd->monitor_dev = NULL;
+	}
+
+	if (dhd->monitor_info) {
+		bcmwifi_monitor_delete(dhd->monitor_info);
+		dhd->monitor_info = NULL;
+	}
+}
+
+static void
+dhd_set_monitor(dhd_pub_t *dhd, int ifidx, int val)
+{
+	dhd_info_t *info = dhd->info;
+
+	DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
+	if ((val && info->monitor_dev) || (!val && !info->monitor_dev)) {
+		DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__));
+		return;
+	}
+
+	/* Delete monitor */
+	if (!val) {
+		info->monitor_type = val;
+		dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_DEL,
+			dhd_del_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
+		return;
+	}
+
+	/* Add monitor */
+	info->monitor_type = val;
+	dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_ADD,
+		dhd_add_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
+}
+#endif /* WL_MONITOR */
+
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
+{
+	int bcmerror = BCME_OK;
+	int buflen = 0;
+	struct net_device *net;
+
+#ifdef REPORT_FATAL_TIMEOUTS
+	if (ioc->cmd == WLC_SET_WPA_AUTH) {
+		int wpa_auth;
+
+		wpa_auth = *((int *)ioc->buf);
+		DHD_INFO(("wpa_auth:%d\n", wpa_auth));
+		if (wpa_auth != WPA_AUTH_DISABLED) {
+			/* If AP is with security then enable WLC_E_PSK_SUP event checking */
+			dhd_set_join_error(pub, WLC_WPA_MASK);
+		} else {
+			/* If AP is with open then disable WLC_E_PSK_SUP event checking */
+			dhd_clear_join_error(pub, WLC_WPA_MASK);
+		}
+	}
+
+	if (ioc->cmd == WLC_SET_AUTH) {
+		int auth;
+		auth = *((int *)ioc->buf);
+		DHD_INFO(("Auth:%d\n", auth));
+
+		if (auth != WL_AUTH_OPEN_SYSTEM) {
+			/* If AP is with security then enable WLC_E_PSK_SUP event checking */
+			dhd_set_join_error(pub, WLC_WPA_MASK);
+		} else {
+			/* If AP is with open then disable WLC_E_PSK_SUP event checking */
+			dhd_clear_join_error(pub, WLC_WPA_MASK);
+		}
+	}
+#endif /* REPORT_FATAL_TIMEOUTS */
+	net = dhd_idx2net(pub, ifidx);
+	if (!net) {
+		bcmerror = BCME_BADARG;
+		goto done;
+	}
+
+	/* check for local dhd ioctl and handle it */
+	if (ioc->driver == DHD_IOCTL_MAGIC) {
+		/* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
+		if (data_buf)
+			buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
+		bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
+		if (bcmerror)
+			pub->bcmerror = bcmerror;
+		goto done;
+	}
+
+	/* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
+	if (data_buf)
+		buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
+
+#ifndef BCMDBUS
+	/* send to dongle (must be up, and wl). */
+	if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
+		if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
+			int ret;
+			if (atomic_read(&exit_in_progress)) {
+				DHD_ERROR(("%s module exit in progress\n", __func__));
+				bcmerror = BCME_DONGLE_DOWN;
+				goto done;
+			}
+			ret = dhd_bus_start(pub);
+			if (ret != 0) {
+				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+				bcmerror = BCME_DONGLE_DOWN;
+				goto done;
+			}
+		} else {
+			bcmerror = BCME_DONGLE_DOWN;
+			goto done;
+		}
+	}
+
+	if (!pub->iswl) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+#endif /* !BCMDBUS */
+
+	/*
+	 * Flush the TX queue if required for proper message serialization:
+	 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+	 * prevent M4 encryption and
+	 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+	 * prevent disassoc frame being sent before WPS-DONE frame.
+	 */
+	if (ioc->cmd == WLC_SET_KEY ||
+	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+	     strncmp("wsec_key", data_buf, 9) == 0) ||
+	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+	     strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
+	    ioc->cmd == WLC_DISASSOC)
+		dhd_wait_pend8021x(net);
+
+#ifdef WLMEDIA_HTSF
+	if (data_buf) {
+		/*  short cut wl ioctl calls here  */
+		if (strcmp("htsf", data_buf) == 0) {
+			dhd_ioctl_htsf_get(dhd, 0);
+			return BCME_OK;
+		}
+
+		if (strcmp("htsflate", data_buf) == 0) {
+			if (ioc->set) {
+				memset(ts, 0, sizeof(tstamp_t)*TSMAX);
+				memset(&maxdelayts, 0, sizeof(tstamp_t));
+				maxdelay = 0;
+				tspktcnt = 0;
+				maxdelaypktno = 0;
+				memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			} else {
+				dhd_dump_latency();
+			}
+			return BCME_OK;
+		}
+		if (strcmp("htsfclear", data_buf) == 0) {
+			memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			htsf_seqnum = 0;
+			return BCME_OK;
+		}
+		if (strcmp("htsfhis", data_buf) == 0) {
+			dhd_dump_htsfhisto(&vi_d1, "H to D");
+			dhd_dump_htsfhisto(&vi_d2, "D to D");
+			dhd_dump_htsfhisto(&vi_d3, "D to H");
+			dhd_dump_htsfhisto(&vi_d4, "H to H");
+			return BCME_OK;
+		}
+		if (strcmp("tsport", data_buf) == 0) {
+			if (ioc->set) {
+				memcpy(&tsport, data_buf + 7, 4);
+			} else {
+				DHD_ERROR(("current timestamp port: %d \n", tsport));
+			}
+			return BCME_OK;
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+
+	if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
+		data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
+#ifdef BCM_FD_AGGR
+		bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+#else
+		bcmerror = BCME_UNSUPPORTED;
+#endif
+		goto done;
+	}
+	bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+
+#ifdef WL_MONITOR
+	/* Intercept monitor ioctl here, add/del monitor if */
+	if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
+		dhd_set_monitor(pub, ifidx, *(int32*)data_buf);
+	}
+#endif
+
+#ifdef REPORT_FATAL_TIMEOUTS
+	if (ioc->cmd == WLC_SCAN && bcmerror == 0) {
+		dhd_start_scan_timer(pub);
+	}
+	if (ioc->cmd == WLC_SET_SSID && bcmerror == 0) {
+		dhd_start_join_timer(pub);
+	}
+#endif  /* REPORT_FATAL_TIMEOUTS */
+
+done:
+	dhd_check_hang(net, pub, bcmerror);
+
+	return bcmerror;
+}
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_ioctl_t ioc;
+	int bcmerror = 0;
+	int ifidx;
+	int ret;
+	void *local_buf = NULL;
+	void __user *ioc_buf_user = NULL;
+	u16 buflen = 0;
+
+	if (atomic_read(&exit_in_progress)) {
+		DHD_ERROR(("%s module exit in progress\n", __func__));
+		bcmerror = BCME_DONGLE_DOWN;
+		return OSL_ERROR(bcmerror);
+	}
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	/* Interface up check for built-in type */
+	if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
+		DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return OSL_ERROR(BCME_NOTUP);
+	}
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -1;
+	}
+
+#if defined(WL_WIRELESS_EXT)
+	/* linux wireless extensions */
+	if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+		/* may recurse, do NOT lock */
+		ret = wl_iw_ioctl(net, ifr, cmd);
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+	if (cmd == SIOCETHTOOL) {
+		ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+	if (cmd == SIOCDEVPRIVATE+1) {
+		ret = wl_android_priv_cmd(net, ifr, cmd);
+		dhd_check_hang(net, &dhd->pub, ret);
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+
+	if (cmd != SIOCDEVPRIVATE) {
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -EOPNOTSUPP;
+	}
+
+	memset(&ioc, 0, sizeof(ioc));
+
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+	if (in_compat_syscall())
+#else
+	if (is_compat_task())
+#endif
+	{
+		compat_wl_ioctl_t compat_ioc;
+		if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+		ioc.cmd = compat_ioc.cmd;
+		if (ioc.cmd & WLC_SPEC_FLAG) {
+			memset(&ioc, 0, sizeof(ioc));
+			/* Copy the ioc control structure part of ioctl request */
+			if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+				bcmerror = BCME_BADADDR;
+				goto done;
+			}
+			ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
+
+			/* To differentiate between wl and dhd read 4 more byes */
+			if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+				sizeof(uint)) != 0)) {
+				bcmerror = BCME_BADADDR;
+				goto done;
+			}
+
+		} else { /* ioc.cmd & WLC_SPEC_FLAG */
+			ioc.buf = compat_ptr(compat_ioc.buf);
+			ioc.len = compat_ioc.len;
+			ioc.set = compat_ioc.set;
+			ioc.used = compat_ioc.used;
+			ioc.needed = compat_ioc.needed;
+			/* To differentiate between wl and dhd read 4 more byes */
+			if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
+				sizeof(uint)) != 0)) {
+				bcmerror = BCME_BADADDR;
+				goto done;
+			}
+		} /* ioc.cmd & WLC_SPEC_FLAG */
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		/* Copy the ioc control structure part of ioctl request */
+		if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+#ifdef CONFIG_COMPAT
+		ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
+#endif
+
+		/* To differentiate between wl and dhd read 4 more byes */
+		if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+			sizeof(uint)) != 0)) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+	}
+
+	if (!capable(CAP_NET_ADMIN)) {
+		bcmerror = BCME_EPERM;
+		goto done;
+	}
+
+	/* Take backup of ioc.buf and restore later */
+	ioc_buf_user = ioc.buf;
+
+	if (ioc.len > 0) {
+		buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+		if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		if (copy_from_user(local_buf, ioc.buf, buflen)) {
+			DHD_PERIM_LOCK(&dhd->pub);
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+		DHD_PERIM_LOCK(&dhd->pub);
+
+		*((char *)local_buf + buflen) = '\0';
+
+		/* For some platforms accessing userspace memory
+		 * of ioc.buf is causing kernel panic, so to avoid that
+		 * make ioc.buf pointing to kernel space memory local_buf
+		 */
+		ioc.buf = local_buf;
+	}
+
+	/* Skip all the non DHD iovars (wl iovars) after f/w hang */
+	if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
+		DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
+
+	/* Restore back userspace pointer to ioc.buf */
+	ioc.buf = ioc_buf_user;
+
+	if (!bcmerror && buflen && local_buf && ioc.buf) {
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		if (copy_to_user(ioc.buf, local_buf, buflen))
+			bcmerror = -EFAULT;
+		DHD_PERIM_LOCK(&dhd->pub);
+	}
+
+done:
+	if (local_buf)
+		MFREE(dhd->pub.osh, local_buf, buflen+1);
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	return OSL_ERROR(bcmerror);
+}
+
+
+#ifdef FIX_CPU_MIN_CLOCK
+static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
+{
+	if (dhd) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+		mutex_init(&dhd->cpufreq_fix);
+#endif
+		dhd->cpufreq_fix_status = FALSE;
+	}
+	return 0;
+}
+
+static void dhd_fix_cpu_freq(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_lock(&dhd->cpufreq_fix);
+#endif
+	if (dhd && !dhd->cpufreq_fix_status) {
+		pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
+#ifdef FIX_BUS_MIN_CLOCK
+		pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
+#endif /* FIX_BUS_MIN_CLOCK */
+		DHD_ERROR(("pm_qos_add_requests called\n"));
+
+		dhd->cpufreq_fix_status = TRUE;
+	}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_unlock(&dhd->cpufreq_fix);
+#endif
+}
+
+static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_lock(&dhd ->cpufreq_fix);
+#endif
+	if (dhd && dhd->cpufreq_fix_status != TRUE) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+		mutex_unlock(&dhd->cpufreq_fix);
+#endif
+		return;
+	}
+
+	pm_qos_remove_request(&dhd->dhd_cpu_qos);
+#ifdef FIX_BUS_MIN_CLOCK
+	pm_qos_remove_request(&dhd->dhd_bus_qos);
+#endif /* FIX_BUS_MIN_CLOCK */
+	DHD_ERROR(("pm_qos_add_requests called\n"));
+
+	dhd->cpufreq_fix_status = FALSE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_unlock(&dhd->cpufreq_fix);
+#endif
+}
+#endif /* FIX_CPU_MIN_CLOCK */
+
+#if defined(BT_OVER_SDIO)
+
+void
+dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
+{
+	dhdp->info->bus_user_count++;
+}
+
+void
+dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
+{
+	dhdp->info->bus_user_count--;
+}
+
+/* Return values:
+ * Success: Returns 0
+ * Failure: Returns -1 or errono code
+ */
+int
+dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	int ret = 0;
+
+	mutex_lock(&dhd->bus_user_lock);
+	++dhd->bus_user_count;
+	if (dhd->bus_user_count < 0) {
+		DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
+		ret = -1;
+		goto exit;
+	}
+
+	if (dhd->bus_user_count == 1) {
+
+		dhd->pub.hang_was_sent = 0;
+
+		/* First user, turn on WL_REG, start the bus */
+		DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
+
+		if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
+			/* Enable F1 */
+			ret = dhd_bus_resume(dhdp, 0);
+			if (ret) {
+				DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
+					__FUNCTION__, ret));
+				goto exit;
+			}
+		}
+
+		dhd_update_fw_nv_path(dhd);
+		/* update firmware and nvram path to sdio bus */
+		dhd_bus_update_fw_nv_path(dhd->pub.bus,
+			dhd->fw_path, dhd->nv_path);
+		/* download the firmware, Enable F2 */
+		/* TODO: Should be done only in case of FW switch */
+		ret = dhd_bus_devreset(dhdp, FALSE);
+		dhd_bus_resume(dhdp, 1);
+		if (!ret) {
+			if (dhd_sync_with_dongle(&dhd->pub) < 0) {
+				DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
+				ret = -EFAULT;
+			}
+		} else {
+			DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
+		}
+	} else {
+		DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
+			__FUNCTION__, dhd->bus_user_count));
+	}
+exit:
+	mutex_unlock(&dhd->bus_user_lock);
+	return ret;
+}
+EXPORT_SYMBOL(dhd_bus_get);
+
+/* Return values:
+ * Success: Returns 0
+ * Failure: Returns -1 or errono code
+ */
+int
+dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	int ret = 0;
+	BCM_REFERENCE(owner);
+
+	mutex_lock(&dhd->bus_user_lock);
+	--dhd->bus_user_count;
+	if (dhd->bus_user_count < 0) {
+		DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
+		dhd->bus_user_count = 0;
+		ret = -1;
+		goto exit;
+	}
+
+	if (dhd->bus_user_count == 0) {
+		/* Last user, stop the bus and turn Off WL_REG */
+		DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
+			__FUNCTION__));
+#ifdef PROP_TXSTATUS
+		if (dhd->pub.wlfc_enabled) {
+			dhd_wlfc_deinit(&dhd->pub);
+		}
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+		if (dhd->pub.pno_state) {
+			dhd_pno_deinit(&dhd->pub);
+		}
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+		if (dhd->pub.rtt_state) {
+			dhd_rtt_deinit(&dhd->pub);
+		}
+#endif /* RTT_SUPPORT */
+		ret = dhd_bus_devreset(dhdp, TRUE);
+		if (!ret) {
+			dhd_bus_suspend(dhdp);
+			wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
+		}
+	} else {
+		DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
+			__FUNCTION__, dhd->bus_user_count));
+	}
+exit:
+	mutex_unlock(&dhd->bus_user_lock);
+	return ret;
+}
+EXPORT_SYMBOL(dhd_bus_put);
+
+int
+dhd_net_bus_get(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return dhd_bus_get(&dhd->pub, WLAN_MODULE);
+}
+
+int
+dhd_net_bus_put(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return dhd_bus_put(&dhd->pub, WLAN_MODULE);
+}
+
+/*
+ * Function to enable the Bus Clock
+ * Returns BCME_OK on success and BCME_xxx on failure
+ *
+ * This function is not callable from non-sleepable context
+ */
+int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+
+	int ret;
+
+	dhd_os_sdlock(dhdp);
+	/*
+	 * The second argument is TRUE, that means, we expect
+	 * the function to "wait" until the clocks are really
+	 * available
+	 */
+	ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
+	dhd_os_sdunlock(dhdp);
+
+	return ret;
+}
+EXPORT_SYMBOL(dhd_bus_clk_enable);
+
+/*
+ * Function to disable the Bus Clock
+ * Returns BCME_OK on success and BCME_xxx on failure
+ *
+ * This function is not callable from non-sleepable context
+ */
+int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+
+	int ret;
+
+	dhd_os_sdlock(dhdp);
+	/*
+	 * The second argument is TRUE, that means, we expect
+	 * the function to "wait" until the clocks are really
+	 * disabled
+	 */
+	ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
+	dhd_os_sdunlock(dhdp);
+
+	return ret;
+}
+EXPORT_SYMBOL(dhd_bus_clk_disable);
+
+/*
+ * Function to reset bt_use_count counter to zero.
+ *
+ * This function is not callable from non-sleepable context
+ */
+void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+
+	/* take the lock and reset bt use count */
+	dhd_os_sdlock(dhdp);
+	dhdsdio_reset_bt_use_count(dhdp->bus);
+	dhd_os_sdunlock(dhdp);
+}
+EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
+
+#endif /* BT_OVER_SDIO */
+
+#define MAX_TRY_CNT             5 /* Number of tries to disable deepsleep */
+int dhd_deepsleep(dhd_info_t *dhd, int flag)
+{
+	char iovbuf[20];
+	uint powervar = 0;
+	dhd_pub_t *dhdp;
+	int cnt = 0;
+	int ret = 0;
+
+	dhdp = &dhd->pub;
+
+	switch (flag) {
+		case 1 :  /* Deepsleep on */
+			DHD_ERROR(("dhd_deepsleep: ON\n"));
+			/* give some time to sysioc_work before deepsleep */
+			OSL_SLEEP(200);
+#ifdef PKT_FILTER_SUPPORT
+			/* disable pkt filter */
+			dhd_enable_packet_filter(0, dhdp);
+#endif /* PKT_FILTER_SUPPORT */
+			/* Disable MPC */
+			powervar = 0;
+			memset(iovbuf, 0, sizeof(iovbuf));
+			bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
+			dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+			/* Enable Deepsleep */
+			powervar = 1;
+			memset(iovbuf, 0, sizeof(iovbuf));
+			bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
+			dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			break;
+
+		case 0: /* Deepsleep Off */
+			DHD_ERROR(("dhd_deepsleep: OFF\n"));
+
+			/* Disable Deepsleep */
+			for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
+				powervar = 0;
+				memset(iovbuf, 0, sizeof(iovbuf));
+				bcm_mkiovar("deepsleep", (char *)&powervar, 4,
+					iovbuf, sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
+					sizeof(iovbuf), TRUE, 0);
+
+				memset(iovbuf, 0, sizeof(iovbuf));
+				bcm_mkiovar("deepsleep", (char *)&powervar, 4,
+					iovbuf, sizeof(iovbuf));
+				if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
+					sizeof(iovbuf),	FALSE, 0)) < 0) {
+					DHD_ERROR(("the error of dhd deepsleep status"
+						" ret value :%d\n", ret));
+				} else {
+					if (!(*(int *)iovbuf)) {
+						DHD_ERROR(("deepsleep mode is 0,"
+							" count: %d\n", cnt));
+						break;
+					}
+				}
+			}
+
+			/* Enable MPC */
+			powervar = 1;
+			memset(iovbuf, 0, sizeof(iovbuf));
+			bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
+			dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			break;
+	}
+
+	return 0;
+}
+
+static int
+dhd_stop(struct net_device *net)
+{
+	int ifidx = 0;
+#ifdef WL_CFG80211
+	unsigned long flags = 0;
+#endif /* WL_CFG80211 */
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+	printf("%s: Enter %p\n", __FUNCTION__, net);
+	dhd->pub.rxcnt_timeout = 0;
+	dhd->pub.txcnt_timeout = 0;
+
+#ifdef BCMPCIE
+	dhd->pub.d3ackcnt_timeout = 0;
+#endif /* BCMPCIE */
+
+	if (dhd->pub.up == 0) {
+		goto exit;
+	}
+#if defined(DHD_HANG_SEND_UP_TEST)
+	if (dhd->pub.req_hang_type) {
+		DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+			__FUNCTION__, dhd->pub.req_hang_type));
+		dhd->pub.req_hang_type = 0;
+	}
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+	dhd_if_flush_sta(DHD_DEV_IFP(net));
+
+	/* Disable Runtime PM before interface down */
+	DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+
+#ifdef FIX_CPU_MIN_CLOCK
+	if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
+		dhd_rollback_cpu_freq(dhd);
+#endif /* FIX_CPU_MIN_CLOCK */
+
+	ifidx = dhd_net2idx(dhd, net);
+	BCM_REFERENCE(ifidx);
+
+	/* Set state and stop OS transmissions */
+	netif_stop_queue(net);
+#ifdef WL_CFG80211
+	spin_lock_irqsave(&dhd->pub.up_lock, flags);
+	dhd->pub.up = 0;
+	spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
+#else
+	dhd->pub.up = 0;
+#endif /* WL_CFG80211 */
+
+#ifdef WL_CFG80211
+	if (ifidx == 0) {
+		dhd_if_t *ifp;
+		wl_cfg80211_down(net);
+
+		ifp = dhd->iflist[0];
+		ASSERT(ifp && ifp->net);
+		/*
+		 * For CFG80211: Clean up all the left over virtual interfaces
+		 * when the primary Interface is brought down. [ifconfig wlan0 down]
+		 */
+		if (!dhd_download_fw_on_driverload) {
+			if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
+				(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+				int i;
+#ifdef WL_CFG80211_P2P_DEV_IF
+				wl_cfg80211_del_p2p_wdev(net);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+				dhd_net_if_lock_local(dhd);
+				for (i = 1; i < DHD_MAX_IFS; i++)
+					dhd_remove_if(&dhd->pub, i, FALSE);
+
+				if (ifp && ifp->net) {
+					dhd_if_del_sta_list(ifp);
+				}
+#ifdef ARP_OFFLOAD_SUPPORT
+				if (dhd_inetaddr_notifier_registered) {
+					dhd_inetaddr_notifier_registered = FALSE;
+					unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+				}
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+				if (dhd_inet6addr_notifier_registered) {
+					dhd_inet6addr_notifier_registered = FALSE;
+					unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+				}
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+				dhd_net_if_unlock_local(dhd);
+			}
+#if 0
+			// terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
+			cancel_work_sync(dhd->dhd_deferred_wq);
+#endif
+
+#ifdef SHOW_LOGTRACE
+			/* Wait till event_log_dispatcher_work finishes */
+			cancel_work_sync(&dhd->event_log_dispatcher_work);
+#endif /* SHOW_LOGTRACE */
+
+#if defined(DHD_LB_RXP)
+			__skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXP)
+			skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+		}
+
+		argos_register_notifier_deinit();
+#ifdef DHDTCPACK_SUPPRESS
+		dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(DHD_LB_RXP)
+		if (ifp->net == dhd->rx_napi_netdev) {
+			DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
+				__FUNCTION__, &dhd->rx_napi_struct, net, net->name));
+			skb_queue_purge(&dhd->rx_napi_queue);
+			napi_disable(&dhd->rx_napi_struct);
+			netif_napi_del(&dhd->rx_napi_struct);
+			dhd->rx_napi_netdev = NULL;
+		}
+#endif /* DHD_LB_RXP */
+	}
+#endif /* WL_CFG80211 */
+
+	DHD_SSSR_DUMP_DEINIT(&dhd->pub);
+
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
+#endif
+#ifdef SHOW_LOGTRACE
+	if (!dhd_download_fw_on_driverload) {
+		/* Release the skbs from queue for WLC_E_TRACE event */
+		dhd_event_logtrace_flush_queue(&dhd->pub);
+		if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
+			if (dhd->event_data.fmts) {
+				MFREE(dhd->pub.osh, dhd->event_data.fmts,
+					dhd->event_data.fmts_size);
+				dhd->event_data.fmts = NULL;
+			}
+			if (dhd->event_data.raw_fmts) {
+				MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
+					dhd->event_data.raw_fmts_size);
+				dhd->event_data.raw_fmts = NULL;
+			}
+			if (dhd->event_data.raw_sstr) {
+				MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
+					dhd->event_data.raw_sstr_size);
+				dhd->event_data.raw_sstr = NULL;
+			}
+			if (dhd->event_data.rom_raw_sstr) {
+				MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
+					dhd->event_data.rom_raw_sstr_size);
+				dhd->event_data.rom_raw_sstr = NULL;
+			}
+			dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
+		}
+	}
+#endif /* SHOW_LOGTRACE */
+#ifdef APF
+	dhd_dev_apf_delete_filter(net);
+#endif /* APF */
+
+	/* Stop the protocol module */
+	dhd_prot_stop(&dhd->pub);
+
+	OLD_MOD_DEC_USE_COUNT;
+exit:
+#if defined(WL_WIRELESS_EXT)
+	if (ifidx == 0) {
+#ifdef WL_ESCAN
+		wl_escan_down(&dhd->pub);
+#else
+		wl_iw_down(&dhd->pub);
+#endif /* WL_ESCAN */
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+	if (ifidx == 0 && !dhd_download_fw_on_driverload) {
+#if defined(BT_OVER_SDIO)
+		dhd_bus_put(&dhd->pub, WLAN_MODULE);
+		wl_android_set_wifi_on_flag(FALSE);
+#else
+		wl_android_wifi_off(net, TRUE);
+#ifdef WL_EXT_IAPSTA
+		wl_ext_iapsta_dettach_netdev(net, ifidx);
+#endif
+	} else {
+		if (dhd->pub.conf->deepsleep)
+			dhd_deepsleep(dhd, 1);
+#endif /* BT_OVER_SDIO */
+	}
+	dhd->pub.hang_was_sent = 0;
+
+	/* Clear country spec for for built-in type driver */
+	if (!dhd_download_fw_on_driverload) {
+		dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
+		dhd->pub.dhd_cspec.rev = 0;
+		dhd->pub.dhd_cspec.ccode[0] = 0x00;
+	}
+
+#ifdef BCMDBGFS
+	dhd_dbgfs_remove();
+#endif
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	/* Destroy wakelock */
+	if (!dhd_download_fw_on_driverload &&
+		(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+		DHD_OS_WAKE_LOCK_DESTROY(dhd);
+		dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
+	}
+	printf("%s: Exit\n", __FUNCTION__);
+
+	return 0;
+}
+
+#if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
+extern bool g_first_broadcast_scan;
+#endif 
+
+#ifdef WL11U
+static int dhd_interworking_enable(dhd_pub_t *dhd)
+{
+	uint32 enable = true;
+	int ret = BCME_OK;
+
+	ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
+	}
+
+	return ret;
+}
+#endif /* WL11u */
+
+static int
+dhd_open(struct net_device *net)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+#ifdef TOE
+	uint32 toe_ol;
+#endif
+#ifdef BCM_FD_AGGR
+	char iovbuf[WLC_IOCTL_SMLEN];
+	dbus_config_t config;
+	uint32 agglimit = 0;
+	uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
+#endif /* BCM_FD_AGGR */
+	int ifidx;
+	int32 ret = 0;
+#if defined(OOB_INTR_ONLY)
+	uint32 bus_type = -1;
+	uint32 bus_num = -1;
+	uint32 slot_num = -1;
+	wifi_adapter_info_t *adapter = NULL;
+#endif
+#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
+	int bytes_written = 0;
+	struct dhd_conf *conf;
+#endif
+
+	if (!dhd_download_fw_on_driverload) {
+		if (!dhd_driver_init_done) {
+			DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
+			return -1;
+		}
+	}
+
+	printf("%s: Enter %p\n", __FUNCTION__, net);
+	DHD_MUTEX_LOCK();
+	/* Init wakelock */
+	if (!dhd_download_fw_on_driverload) {
+		if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+			DHD_OS_WAKE_LOCK_INIT(dhd);
+			dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+		}
+#ifdef SHOW_LOGTRACE
+		skb_queue_head_init(&dhd->evt_trace_queue);
+
+		if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
+			ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
+			if (ret == BCME_OK) {
+				dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
+					st_str_file_path, map_file_path);
+				dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
+					rom_st_str_file_path, rom_map_file_path);
+				dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
+			}
+		}
+#endif /* SHOW_LOGTRACE */
+	}
+
+#if defined(PREVENT_REOPEN_DURING_HANG)
+	/* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
+	if (dhd->pub.hang_was_sent == 1) {
+		DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+		/* Force to bring down WLAN interface in case dhd_stop() is not called
+		 * from the upper layer when HANG event is triggered.
+		 */
+		if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
+			DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
+			dhd_stop(net);
+		} else {
+			return -1;
+		}
+	}
+#endif /* PREVENT_REOPEN_DURING_HANG */
+
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+	dhd->pub.dongle_trap_occured = 0;
+	dhd->pub.hang_was_sent = 0;
+	dhd->pub.hang_reason = 0;
+	dhd->pub.iovar_timeout_occured = 0;
+#ifdef PCIE_FULL_DONGLE
+	dhd->pub.d3ack_timeout_occured = 0;
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_LOSSLESS_ROAMING
+	dhd->pub.dequeue_prec_map = ALLPRIO;
+#endif
+#if 0
+	/*
+	 * Force start if ifconfig_up gets called before START command
+	 *  We keep WEXT's wl_control_wl_start to provide backward compatibility
+	 *  This should be removed in the future
+	 */
+	ret = wl_control_wl_start(net);
+	if (ret != 0) {
+		DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+		ret = -1;
+		goto exit;
+	}
+#endif
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	if (ifidx < 0) {
+		DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+		ret = -1;
+		goto exit;
+	}
+
+	if (!dhd->iflist[ifidx]) {
+		DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+		ret = -1;
+		goto exit;
+	}
+
+	if (ifidx == 0) {
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+		if (!dhd_download_fw_on_driverload) {
+			DHD_ERROR(("\n%s\n", dhd_version));
+#ifdef WL_EXT_IAPSTA
+			wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
+#endif
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+			g_first_broadcast_scan = TRUE;
+#endif 
+#if defined(BT_OVER_SDIO)
+			ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
+			wl_android_set_wifi_on_flag(TRUE);
+#else
+			ret = wl_android_wifi_on(net);
+#endif /* BT_OVER_SDIO */
+			if (ret != 0) {
+				DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
+					__FUNCTION__, ret));
+				ret = -1;
+				goto exit;
+			}
+		}
+#ifdef FIX_CPU_MIN_CLOCK
+		if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
+			dhd_init_cpufreq_fix(dhd);
+			dhd_fix_cpu_freq(dhd);
+		}
+#endif /* FIX_CPU_MIN_CLOCK */
+#if defined(OOB_INTR_ONLY)
+		if (dhd->pub.conf->dpc_cpucore >= 0) {
+			dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
+			adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+			if (adapter) {
+				printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
+				irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
+			}
+		}
+#endif
+
+		if (dhd->pub.busstate != DHD_BUS_DATA) {
+#ifdef BCMDBUS
+			dhd_set_path(&dhd->pub);
+			DHD_MUTEX_UNLOCK();
+			wait_event_interruptible_timeout(dhd->adapter->status_event,
+				wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY),
+				msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
+			DHD_MUTEX_LOCK();
+			if ((ret = dbus_up(dhd->pub.bus)) != 0) {
+				DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
+				goto exit;
+			} else {
+				dhd->pub.busstate = DHD_BUS_DATA;
+			}
+			if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+				goto exit;
+			}
+#else
+			/* try to bring up bus */
+			DHD_PERIM_UNLOCK(&dhd->pub);
+			ret = dhd_bus_start(&dhd->pub);
+			DHD_PERIM_LOCK(&dhd->pub);
+			if (ret) {
+				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+				ret = -1;
+				goto exit;
+			}
+#endif /* !BCMDBUS */
+
+		}
+#ifdef WL_EXT_IAPSTA
+		wl_ext_iapsta_attach_name(net, ifidx);
+#endif
+		if (dhd_download_fw_on_driverload) {
+			if (dhd->pub.conf->deepsleep)
+				dhd_deepsleep(dhd, 0);
+		}
+
+#ifdef BCM_FD_AGGR
+		config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
+
+
+		memset(iovbuf, 0, sizeof(iovbuf));
+		bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
+			iovbuf, sizeof(iovbuf));
+
+		if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
+			agglimit = *(uint32 *)iovbuf;
+			config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
+			config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
+			DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
+				agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
+			if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
+				DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
+			}
+		} else {
+			DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
+			rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
+		}
+
+		/* Set aggregation for TX */
+		bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
+			rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
+
+		/* Set aggregation for RX */
+		memset(iovbuf, 0, sizeof(iovbuf));
+		bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
+		if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
+			dhd->pub.info->fdaggr = 0;
+			if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
+				dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
+			if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
+				dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
+		} else {
+			DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
+		}
+#endif /* BCM_FD_AGGR */
+
+#ifdef BT_OVER_SDIO
+		if (dhd->pub.is_bt_recovery_required) {
+			DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
+			bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
+		}
+		dhd->pub.is_bt_recovery_required = FALSE;
+#endif
+
+		/* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
+		memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+		/* Get current TOE mode from dongle */
+		if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
+			dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+		} else {
+			dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+		}
+#endif /* TOE */
+
+#if defined(DHD_LB_RXP)
+		__skb_queue_head_init(&dhd->rx_pend_queue);
+		if (dhd->rx_napi_netdev == NULL) {
+			dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
+			memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
+			netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
+				dhd_napi_poll, dhd_napi_weight);
+			DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
+				__FUNCTION__, &dhd->rx_napi_struct, net, net->name));
+			napi_enable(&dhd->rx_napi_struct);
+			DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
+			skb_queue_head_init(&dhd->rx_napi_queue);
+		} /* rx_napi_netdev == NULL */
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXP)
+		/* Use the variant that uses locks */
+		skb_queue_head_init(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+
+#if defined(WL_CFG80211)
+		if (unlikely(wl_cfg80211_up(net))) {
+			DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+			ret = -1;
+			goto exit;
+		}
+		if (!dhd_download_fw_on_driverload) {
+#ifdef ARP_OFFLOAD_SUPPORT
+			dhd->pend_ipaddr = 0;
+			if (!dhd_inetaddr_notifier_registered) {
+				dhd_inetaddr_notifier_registered = TRUE;
+				register_inetaddr_notifier(&dhd_inetaddr_notifier);
+			}
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+			if (!dhd_inet6addr_notifier_registered) {
+				dhd_inet6addr_notifier_registered = TRUE;
+				register_inet6addr_notifier(&dhd_inet6addr_notifier);
+			}
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+		}
+
+		argos_register_notifier_init(net);
+#if defined(NUM_SCB_MAX_PROBE)
+		dhd_set_scb_probe(&dhd->pub);
+#endif /* NUM_SCB_MAX_PROBE */
+#endif /* WL_CFG80211 */
+#if defined(WL_WIRELESS_EXT)
+#ifdef WL_ESCAN
+		if (unlikely(wl_escan_up(net, &dhd->pub))) {
+			DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__));
+			ret = -1;
+			goto exit;
+		}
+#endif
+#endif
+#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
+		if (!dhd_download_fw_on_driverload) {
+			conf = dhd_get_conf(net);
+			if (conf) {
+				wl_android_ext_priv_cmd(net, conf->isam_init, 0, &bytes_written);
+				wl_android_ext_priv_cmd(net, conf->isam_config, 0, &bytes_written);
+				wl_android_ext_priv_cmd(net, conf->isam_enable, 0, &bytes_written);
+			}
+		}
+#endif
+	}
+
+	/* Allow transmit calls */
+	netif_start_queue(net);
+	dhd->pub.up = 1;
+
+	OLD_MOD_INC_USE_COUNT;
+
+#ifdef BCMDBGFS
+	dhd_dbgfs_init(&dhd->pub);
+#endif
+
+exit:
+	if (ret) {
+		dhd_stop(net);
+	}
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	DHD_MUTEX_UNLOCK();
+
+	printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
+	return ret;
+}
+
+int dhd_do_driver_init(struct net_device *net)
+{
+	dhd_info_t *dhd = NULL;
+
+	if (!net) {
+		DHD_ERROR(("Primary Interface not initialized \n"));
+		return -EINVAL;
+	}
+
+	DHD_MUTEX_IS_LOCK_RETURN();
+
+	/*  && defined(OEM_ANDROID) && defined(BCMSDIO) */
+	dhd = DHD_DEV_INFO(net);
+
+	/* If driver is already initialized, do nothing
+	 */
+	if (dhd->pub.busstate == DHD_BUS_DATA) {
+		DHD_TRACE(("Driver already Inititalized. Nothing to do"));
+		return 0;
+	}
+
+	if (dhd_open(net) < 0) {
+		DHD_ERROR(("Driver Init Failed \n"));
+		return -1;
+	}
+
+	return 0;
+}
+
+int
+dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+
+#ifdef WL_CFG80211
+		if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
+			ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+		return BCME_OK;
+#endif
+
+	/* handle IF event caused by wl commands, SoftAP, WEXT and
+	 * anything else. This has to be done asynchronously otherwise
+	 * DPC will be blocked (and iovars will timeout as DPC has no chance
+	 * to read the response back)
+	 */
+	if (ifevent->ifidx > 0) {
+		dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+		if (if_event == NULL) {
+			DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
+				MALLOCED(dhdinfo->pub.osh)));
+			return BCME_NOMEM;
+		}
+
+		memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+		memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+		strncpy(if_event->name, name, IFNAMSIZ);
+		if_event->name[IFNAMSIZ - 1] = '\0';
+		dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
+			DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
+	}
+
+	return BCME_OK;
+}
+
+int
+dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+	dhd_if_event_t *if_event;
+
+#ifdef WL_CFG80211
+		if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
+			ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+		return BCME_OK;
+#endif /* WL_CFG80211 */
+
+	/* handle IF event caused by wl commands, SoftAP, WEXT and
+	 * anything else
+	 */
+	if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+	if (if_event == NULL) {
+		DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
+			MALLOCED(dhdinfo->pub.osh)));
+		return BCME_NOMEM;
+	}
+	memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+	memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+	strncpy(if_event->name, name, IFNAMSIZ);
+	if_event->name[IFNAMSIZ - 1] = '\0';
+	dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
+		dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
+
+	return BCME_OK;
+}
+
+int
+dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+#ifdef DHD_UPDATE_INTF_MAC
+	dhd_if_event_t *if_event;
+#endif /* DHD_UPDATE_INTF_MAC */
+
+#ifdef WL_CFG80211
+	wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
+		ifevent->ifidx, name, mac, ifevent->bssidx);
+#endif /* WL_CFG80211 */
+
+#ifdef DHD_UPDATE_INTF_MAC
+	/* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
+	 * anything else
+	 */
+	if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+	if (if_event == NULL) {
+		DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
+			MALLOCED(dhdinfo->pub.osh)));
+		return BCME_NOMEM;
+	}
+	memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+	// construct a change event
+	if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name);
+	if_event->event.opcode = WLC_E_IF_CHANGE;
+	memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+	strncpy(if_event->name, name, IFNAMSIZ);
+	if_event->name[IFNAMSIZ - 1] = '\0';
+	dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE,
+		dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
+#endif /* DHD_UPDATE_INTF_MAC */
+
+	return BCME_OK;
+}
+
+/* unregister and free the existing net_device interface (if any) in iflist and
+ * allocate a new one. the slot is reused. this function does NOT register the
+ * new interface to linux kernel. dhd_register_if does the job
+ */
+struct net_device*
+dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
+	uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
+{
+	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+	dhd_if_t *ifp;
+
+	ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
+	ifp = dhdinfo->iflist[ifidx];
+
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
+				__FUNCTION__, ifp->net->name, ifidx));
+
+			if (ifidx == 0) {
+				/* For primary ifidx (0), there shouldn't be
+				 * any netdev present already.
+				 */
+				DHD_ERROR(("Primary ifidx populated already\n"));
+				ASSERT(0);
+				return NULL;
+			}
+
+			dhd_dev_priv_clear(ifp->net); /* clear net_device private */
+
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+				free_netdev(ifp->net);
+			} else {
+				netif_stop_queue(ifp->net);
+				if (need_rtnl_lock)
+					unregister_netdev(ifp->net);
+				else
+					unregister_netdevice(ifp->net);
+			}
+			ifp->net = NULL;
+		}
+	} else {
+		ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
+		if (ifp == NULL) {
+			DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
+			return NULL;
+		}
+	}
+
+	memset(ifp, 0, sizeof(dhd_if_t));
+	ifp->info = dhdinfo;
+	ifp->idx = ifidx;
+	ifp->bssidx = bssidx;
+#ifdef DHD_MCAST_REGEN
+	ifp->mcast_regen_bss_enable = FALSE;
+#endif
+	/* set to TRUE rx_pkt_chainable at alloc time */
+	ifp->rx_pkt_chainable = TRUE;
+
+	if (mac != NULL)
+		memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
+
+	/* Allocate etherdev, including space for private structure */
+	ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
+	if (ifp->net == NULL) {
+		DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
+		goto fail;
+	}
+
+	/* Setup the dhd interface's netdevice private structure. */
+	dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
+
+	if (name && name[0]) {
+		strncpy(ifp->net->name, name, IFNAMSIZ);
+		ifp->net->name[IFNAMSIZ - 1] = '\0';
+	}
+
+#ifdef WL_CFG80211
+	if (ifidx == 0) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+		ifp->net->destructor = free_netdev;
+#else
+		ifp->net->needs_free_netdev = true;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
+	} else {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+		ifp->net->destructor = dhd_netdev_free;
+#else
+		ifp->net->needs_free_netdev = true;
+		ifp->net->priv_destructor = dhd_netdev_free;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
+	}
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	ifp->net->destructor = free_netdev;
+#else
+	ifp->net->needs_free_netdev = true;
+#endif
+#endif /* WL_CFG80211 */
+	strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
+	ifp->name[IFNAMSIZ - 1] = '\0';
+	dhdinfo->iflist[ifidx] = ifp;
+
+/* initialize the dongle provided if name */
+	if (dngl_name)
+		strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
+	else if (name)
+		strncpy(ifp->dngl_name, name, IFNAMSIZ);
+
+#ifdef PCIE_FULL_DONGLE
+	/* Initialize STA info list */
+	INIT_LIST_HEAD(&ifp->sta_list);
+	DHD_IF_STA_LIST_LOCK_INIT(ifp);
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_L2_FILTER
+	ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
+	ifp->parp_allnode = TRUE;
+#endif /* DHD_L2_FILTER */
+
+
+	DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
+
+	return ifp->net;
+
+fail:
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
+			if (ifp->net == dhdinfo->rx_napi_netdev) {
+				napi_disable(&dhdinfo->rx_napi_struct);
+				netif_napi_del(&dhdinfo->rx_napi_struct);
+				skb_queue_purge(&dhdinfo->rx_napi_queue);
+				dhdinfo->rx_napi_netdev = NULL;
+			}
+#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
+			dhd_dev_priv_clear(ifp->net);
+			free_netdev(ifp->net);
+			ifp->net = NULL;
+		}
+		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+		ifp = NULL;
+	}
+	dhdinfo->iflist[ifidx] = NULL;
+	return NULL;
+}
+
+/* unregister and free the the net_device interface associated with the indexed
+ * slot, also free the slot memory and set the slot pointer to NULL
+ */
+int
+dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+	dhd_if_t *ifp;
+#ifdef PCIE_FULL_DONGLE
+	if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdpub->if_flow_lkup;
+#endif /* PCIE_FULL_DONGLE */
+
+	ifp = dhdinfo->iflist[ifidx];
+
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
+
+			dhdinfo->iflist[ifidx] = NULL;
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+				free_netdev(ifp->net);
+			} else {
+				netif_tx_disable(ifp->net);
+
+
+
+#if defined(SET_RPS_CPUS)
+				custom_rps_map_clear(ifp->net->_rx);
+#endif /* SET_RPS_CPUS */
+				if (need_rtnl_lock)
+					unregister_netdev(ifp->net);
+				else
+					unregister_netdevice(ifp->net);
+#ifdef WL_EXT_IAPSTA
+				wl_ext_iapsta_dettach_netdev(ifp->net, ifidx);
+#endif
+			}
+			ifp->net = NULL;
+		}
+#ifdef DHD_WMF
+		dhd_wmf_cleanup(dhdpub, ifidx);
+#endif /* DHD_WMF */
+#ifdef DHD_L2_FILTER
+		bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
+			NULL, FALSE, dhdpub->tickcnt);
+		deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
+		ifp->phnd_arp_table = NULL;
+#endif /* DHD_L2_FILTER */
+
+
+		dhd_if_del_sta_list(ifp);
+#ifdef PCIE_FULL_DONGLE
+		/* Delete flowrings of WDS interface */
+		if (if_flow_lkup[ifidx].role == WLC_E_IF_ROLE_WDS) {
+			dhd_flow_rings_delete(dhdpub, ifidx);
+		}
+#endif /* PCIE_FULL_DONGLE */
+		DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
+
+		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+		ifp = NULL;
+	}
+
+	return BCME_OK;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+	.ndo_open = dhd_open,
+	.ndo_stop = dhd_stop,
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+
+static struct net_device_ops dhd_ops_virt = {
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
+
+#ifdef DEBUGGER
+extern void debugger_init(void *bus_handle);
+#endif
+
+
+#ifdef SHOW_LOGTRACE
+int
+dhd_os_read_file(void *file, char *buf, uint32 size)
+{
+	struct file *filep = (struct file *)file;
+
+	if (!file || !buf)
+		return -1;
+
+	return vfs_read(filep, buf, size, &filep->f_pos);
+}
+
+int
+dhd_os_seek_file(void *file, int64 offset)
+{
+	struct file *filep = (struct file *)file;
+	if (!file)
+		return -1;
+
+	/* offset can be -ve */
+	filep->f_pos = filep->f_pos + offset;
+
+	return 0;
+}
+
+static int
+dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
+{
+	struct file *filep = NULL;
+	struct kstat stat;
+	mm_segment_t fs;
+	char *raw_fmts =  NULL;
+	int logstrs_size = 0;
+	int error = 0;
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	filep = filp_open(logstrs_path, O_RDONLY, 0);
+
+	if (IS_ERR(filep)) {
+		DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
+		goto fail;
+	}
+	error = vfs_stat(logstrs_path, &stat);
+	if (error) {
+		DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
+		goto fail;
+	}
+	logstrs_size = (int) stat.size;
+
+	if (logstrs_size == 0) {
+		DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
+		goto fail1;
+	}
+
+	raw_fmts = MALLOC(osh, logstrs_size);
+	if (raw_fmts == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
+		goto fail;
+	}
+	if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) !=	logstrs_size) {
+		DHD_ERROR(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
+		goto fail;
+	}
+
+	if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
+				== BCME_OK) {
+		filp_close(filep, NULL);
+		set_fs(fs);
+		return BCME_OK;
+	}
+
+fail:
+	if (raw_fmts) {
+		MFREE(osh, raw_fmts, logstrs_size);
+		raw_fmts = NULL;
+	}
+
+fail1:
+	if (!IS_ERR(filep))
+		filp_close(filep, NULL);
+
+	set_fs(fs);
+	temp->fmts = NULL;
+	return BCME_ERROR;
+}
+
+static int
+dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
+		uint32 *rodata_end)
+{
+	struct file *filep = NULL;
+	mm_segment_t fs;
+	int err = BCME_ERROR;
+
+	if (fname == NULL) {
+		DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	filep = filp_open(fname, O_RDONLY, 0);
+	if (IS_ERR(filep)) {
+		DHD_ERROR(("%s: Failed to open %s \n",  __FUNCTION__, fname));
+		goto fail;
+	}
+
+	if ((err = dhd_parse_map_file(osh, filep, ramstart,
+			rodata_start, rodata_end)) < 0)
+		goto fail;
+
+fail:
+	if (!IS_ERR(filep))
+		filp_close(filep, NULL);
+
+	set_fs(fs);
+
+	return err;
+}
+
+static int
+dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
+{
+	struct file *filep = NULL;
+	mm_segment_t fs;
+	char *raw_fmts =  NULL;
+	uint32 logstrs_size = 0;
+
+	int error = 0;
+	uint32 ramstart = 0;
+	uint32 rodata_start = 0;
+	uint32 rodata_end = 0;
+	uint32 logfilebase = 0;
+
+	error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
+	if (error != BCME_OK) {
+		DHD_ERROR(("readmap Error!! \n"));
+		/* don't do event log parsing in actual case */
+		if (strstr(str_file, ram_file_str) != NULL) {
+			temp->raw_sstr = NULL;
+		} else if (strstr(str_file, rom_file_str) != NULL) {
+			temp->rom_raw_sstr = NULL;
+		}
+		return error;
+	}
+	DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
+		ramstart, rodata_start, rodata_end));
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	filep = filp_open(str_file, O_RDONLY, 0);
+	if (IS_ERR(filep)) {
+		DHD_ERROR(("%s: Failed to open the file %s \n",  __FUNCTION__, str_file));
+		goto fail;
+	}
+
+	/* Full file size is huge. Just read required part */
+	logstrs_size = rodata_end - rodata_start;
+
+	if (logstrs_size == 0) {
+		DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
+		goto fail1;
+	}
+
+	raw_fmts = MALLOC(osh, logstrs_size);
+	if (raw_fmts == NULL) {
+		DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
+		goto fail;
+	}
+
+	logfilebase = rodata_start - ramstart;
+
+	error = generic_file_llseek(filep, logfilebase, SEEK_SET);
+	if (error < 0) {
+		DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
+		goto fail;
+	}
+
+	error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
+	if (error != logstrs_size) {
+		DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
+		goto fail;
+	}
+
+	if (strstr(str_file, ram_file_str) != NULL) {
+		temp->raw_sstr = raw_fmts;
+		temp->raw_sstr_size = logstrs_size;
+		temp->ramstart = ramstart;
+		temp->rodata_start = rodata_start;
+		temp->rodata_end = rodata_end;
+	} else if (strstr(str_file, rom_file_str) != NULL) {
+		temp->rom_raw_sstr = raw_fmts;
+		temp->rom_raw_sstr_size = logstrs_size;
+		temp->rom_ramstart = ramstart;
+		temp->rom_rodata_start = rodata_start;
+		temp->rom_rodata_end = rodata_end;
+	}
+
+	filp_close(filep, NULL);
+	set_fs(fs);
+
+	return BCME_OK;
+
+fail:
+	if (raw_fmts) {
+		MFREE(osh, raw_fmts, logstrs_size);
+		raw_fmts = NULL;
+	}
+
+fail1:
+	if (!IS_ERR(filep))
+		filp_close(filep, NULL);
+
+	set_fs(fs);
+
+	if (strstr(str_file, ram_file_str) != NULL) {
+		temp->raw_sstr = NULL;
+	} else if (strstr(str_file, rom_file_str) != NULL) {
+		temp->rom_raw_sstr = NULL;
+	}
+
+	return error;
+}
+
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BCMDBUS
+uint
+dhd_get_rxsz(dhd_pub_t *pub)
+{
+	struct net_device *net = NULL;
+	dhd_info_t *dhd = NULL;
+	uint rxsz;
+
+	/* Assign rxsz for dbus_attach */
+	dhd = pub->info;
+	net = dhd->iflist[0]->net;
+	net->hard_header_len = ETH_HLEN + pub->hdrlen;
+	rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+	return rxsz;
+}
+
+void
+dhd_set_path(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = NULL;
+
+	dhd = pub->info;
+
+	/* try to download image and nvram to the dongle */
+	if	(dhd_update_fw_nv_path(dhd) && dhd->pub.bus) {
+		DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
+			__FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
+		dhd_bus_update_fw_nv_path(dhd->pub.bus,
+				dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+	}
+}
+#endif
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
+#ifdef BCMDBUS
+	, void *data
+#endif
+)
+{
+	dhd_info_t *dhd = NULL;
+	struct net_device *net = NULL;
+	char if_name[IFNAMSIZ] = {'\0'};
+#ifdef SHOW_LOGTRACE
+	int ret;
+#endif /* SHOW_LOGTRACE */
+#if defined(BCMSDIO) || defined(BCMPCIE)
+	uint32 bus_type = -1;
+	uint32 bus_num = -1;
+	uint32 slot_num = -1;
+	wifi_adapter_info_t *adapter = NULL;
+#elif defined(BCMDBUS)
+	wifi_adapter_info_t *adapter = data;
+#endif
+#ifdef GET_CUSTOM_MAC_ENABLE
+	char hw_ether[62];
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+	dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef STBLINUX
+	DHD_ERROR(("%s\n", driver_target));
+#endif /* STBLINUX */
+	/* will implement get_ids for DBUS later */
+#if defined(BCMSDIO)
+	dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
+#endif 
+#if defined(BCMSDIO) || defined(BCMPCIE)
+	adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+#endif
+
+	/* Allocate primary dhd_info */
+	dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
+	if (dhd == NULL) {
+		dhd = MALLOC(osh, sizeof(dhd_info_t));
+		if (dhd == NULL) {
+			DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+			goto dhd_null_flag;
+		}
+	}
+	memset(dhd, 0, sizeof(dhd_info_t));
+	dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
+
+	dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
+
+	dhd->pub.osh = osh;
+#ifdef DUMP_IOCTL_IOV_LIST
+	dll_init(&(dhd->pub.dump_iovlist_head));
+#endif /* DUMP_IOCTL_IOV_LIST */
+	dhd->adapter = adapter;
+	dhd->pub.adapter = (void *)adapter;
+#ifdef DHD_DEBUG
+	dll_init(&(dhd->pub.mw_list_head));
+#endif /* DHD_DEBUG */
+#ifdef BT_OVER_SDIO
+	dhd->pub.is_bt_recovery_required = FALSE;
+	mutex_init(&dhd->bus_user_lock);
+#endif /* BT_OVER_SDIO */
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	wifi_platform_get_mac_addr(dhd->adapter, hw_ether);
+	bcopy(hw_ether, dhd->pub.mac.octet, sizeof(struct ether_addr));
+#endif /* GET_CUSTOM_MAC_ENABLE */
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+	dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+	dhd->pub.force_country_change = TRUE;
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+#ifdef CUSTOM_COUNTRY_CODE
+	get_customized_country_code(dhd->adapter,
+		dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
+		dhd->pub.dhd_cflags);
+#endif /* CUSTOM_COUNTRY_CODE */
+#ifndef BCMDBUS
+	dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+	dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+#ifdef DHD_WET
+	dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
+#endif /* DHD_WET */
+	/* Initialize thread based operation and lock */
+	sema_init(&dhd->sdsem, 1);
+#endif /* !BCMDBUS */
+
+	/* Link to info module */
+	dhd->pub.info = dhd;
+
+
+	/* Link to bus module */
+	dhd->pub.bus = bus;
+	dhd->pub.hdrlen = bus_hdrlen;
+
+	/* dhd_conf must be attached after linking dhd to dhd->pub.info,
+	 * because dhd_detech will check .info is NULL or not.
+	*/
+	if (dhd_conf_attach(&dhd->pub) != 0) {
+		DHD_ERROR(("dhd_conf_attach failed\n"));
+		goto fail;
+	}
+#ifndef BCMDBUS
+	dhd_conf_reset(&dhd->pub);
+	dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
+	dhd_conf_preinit(&dhd->pub);
+#endif /* !BCMDBUS */
+
+	/* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
+	 * This is indeed a hack but we have to make it work properly before we have a better
+	 * solution
+	 */
+	dhd_update_fw_nv_path(dhd);
+
+	/* Set network interface name if it was provided as module parameter */
+	if (iface_name[0]) {
+		int len;
+		char ch;
+		strncpy(if_name, iface_name, IFNAMSIZ);
+		if_name[IFNAMSIZ - 1] = 0;
+		len = strlen(if_name);
+		ch = if_name[len - 1];
+		if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+			strncat(if_name, "%d", 2);
+	}
+
+	/* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
+	net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
+	if (net == NULL) {
+		goto fail;
+	}
+
+
+	dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+#ifdef DHD_L2_FILTER
+	/* initialize the l2_filter_cnt */
+	dhd->pub.l2_filter_cnt = 0;
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+
+	mutex_init(&dhd->dhd_iovar_mutex);
+	sema_init(&dhd->proto_sem, 1);
+#ifdef DHD_ULP
+	if (!(dhd_ulp_init(osh, &dhd->pub)))
+		goto fail;
+#endif /* DHD_ULP */
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+	dhd->pub.req_hang_type = 0;
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+#ifdef PROP_TXSTATUS
+	spin_lock_init(&dhd->wlfc_spinlock);
+
+	dhd->pub.skip_fc = dhd_wlfc_skip_fc;
+	dhd->pub.plat_init = dhd_wlfc_plat_init;
+	dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
+
+#ifdef DHD_WLFC_THREAD
+	init_waitqueue_head(&dhd->pub.wlfc_wqhead);
+	dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
+	if (IS_ERR(dhd->pub.wlfc_thread)) {
+		DHD_ERROR(("create wlfc thread failed\n"));
+		goto fail;
+	} else {
+		wake_up_process(dhd->pub.wlfc_thread);
+	}
+#endif /* DHD_WLFC_THREAD */
+#endif /* PROP_TXSTATUS */
+
+	/* Initialize other structure content */
+	init_waitqueue_head(&dhd->ioctl_resp_wait);
+	init_waitqueue_head(&dhd->d3ack_wait);
+#ifdef PCIE_INB_DW
+	init_waitqueue_head(&dhd->ds_exit_wait);
+#endif /* PCIE_INB_DW */
+	init_waitqueue_head(&dhd->ctrl_wait);
+	init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
+	dhd->pub.dhd_bus_busy_state = 0;
+
+	/* Initialize the spinlocks */
+	spin_lock_init(&dhd->sdlock);
+	spin_lock_init(&dhd->txqlock);
+	spin_lock_init(&dhd->rxqlock);
+	spin_lock_init(&dhd->dhd_lock);
+	spin_lock_init(&dhd->rxf_lock);
+#ifdef WLTDLS
+	spin_lock_init(&dhd->pub.tdls_lock);
+#endif /* WLTDLS */
+#if defined(RXFRAME_THREAD)
+	dhd->rxthread_enabled = TRUE;
+#endif /* defined(RXFRAME_THREAD) */
+
+#ifdef DHDTCPACK_SUPPRESS
+	spin_lock_init(&dhd->tcpack_lock);
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Initialize Wakelock stuff */
+	spin_lock_init(&dhd->wakelock_spinlock);
+	spin_lock_init(&dhd->wakelock_evt_spinlock);
+	DHD_OS_WAKE_LOCK_INIT(dhd);
+	dhd->wakelock_counter = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+	// terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+	wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+	wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
+#endif /* CONFIG_HAS_WAKELOCK */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_init(&dhd->dhd_net_if_mutex);
+	mutex_init(&dhd->dhd_suspend_mutex);
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+	mutex_init(&dhd->dhd_apf_mutex);
+#endif /* PKT_FILTER_SUPPORT && APF */
+#endif 
+	dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+
+	/* Attach and link in the protocol */
+	if (dhd_prot_attach(&dhd->pub) != 0) {
+		DHD_ERROR(("dhd_prot_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+
+#ifdef DHD_TIMESYNC
+	/* attach the timesync module */
+	if (dhd_timesync_attach(&dhd->pub) != 0) {
+		DHD_ERROR(("dhd_timesync_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_TIMESYNC_ATTACH_DONE;
+#endif /* DHD_TIMESYNC */
+
+#ifdef WL_CFG80211
+	spin_lock_init(&dhd->pub.up_lock);
+	/* Attach and link in the cfg80211 */
+	if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+		DHD_ERROR(("wl_cfg80211_attach failed\n"));
+		goto fail;
+	}
+
+	dhd_monitor_init(&dhd->pub);
+	dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+#ifdef DHD_LOG_DUMP
+	dhd_log_dump_init(&dhd->pub);
+#endif /* DHD_LOG_DUMP */
+#if defined(WL_WIRELESS_EXT)
+#ifdef WL_ESCAN
+	if (wl_escan_attach(net, &dhd->pub) != 0) {
+		DHD_ERROR(("wl_escan_attach failed\n"));
+		goto fail;
+	}
+#else
+	/* Attach and link in the iw */
+	if (wl_iw_attach(net, &dhd->pub) != 0) {
+		DHD_ERROR(("wl_iw_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+#endif /* WL_ESCAN */
+#endif /* defined(WL_WIRELESS_EXT) */
+#ifdef WL_EXT_IAPSTA
+	if (wl_ext_iapsta_attach(&dhd->pub) != 0) {
+		DHD_ERROR(("wl_ext_iapsta_attach failed\n"));
+		goto fail;
+	}
+#endif
+
+#ifdef SHOW_LOGTRACE
+	ret = dhd_init_logstrs_array(osh, &dhd->event_data);
+	if (ret == BCME_OK) {
+		dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
+		dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
+			rom_map_file_path);
+		dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
+	}
+#endif /* SHOW_LOGTRACE */
+
+#ifdef DEBUGABILITY
+	/* attach debug if support */
+	if (dhd_os_dbg_attach(&dhd->pub)) {
+		DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+#ifdef DBG_PKT_MON
+	dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
+#ifdef DBG_PKT_MON_INIT_DEFAULT
+	dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
+#endif /* DBG_PKT_MON_INIT_DEFAULT */
+#endif /* DBG_PKT_MON */
+#endif /* DEBUGABILITY */
+#ifdef DHD_PKT_LOGGING
+	dhd_os_attach_pktlog(&dhd->pub);
+#endif /* DHD_PKT_LOGGING */
+
+	if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
+		DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
+		goto fail;
+	}
+
+
+
+#ifndef BCMDBUS
+	/* Set up the watchdog timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	timer_setup(&dhd->timer, dhd_watchdog, 0);
+#else
+	init_timer(&dhd->timer);
+	dhd->timer.data = (ulong)dhd;
+	dhd->timer.function = dhd_watchdog;
+#endif
+	dhd->default_wd_interval = dhd_watchdog_ms;
+
+	if (dhd_watchdog_prio >= 0) {
+		/* Initialize watchdog thread */
+		PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
+		if (dhd->thr_wdt_ctl.thr_pid < 0) {
+			goto fail;
+		}
+
+	} else {
+		dhd->thr_wdt_ctl.thr_pid = -1;
+	}
+
+#ifdef DHD_PCIE_RUNTIMEPM
+	/* Setup up the runtime PM Idlecount timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	timer_setup(&dhd->rpm_timer, dhd_runtimepm, 0);
+#else
+	init_timer(&dhd->rpm_timer);
+	dhd->rpm_timer.data = (ulong)dhd;
+	dhd->rpm_timer.function = dhd_runtimepm;
+#endif
+	dhd->rpm_timer_valid = FALSE;
+
+	dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
+	PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
+	if (dhd->thr_rpm_ctl.thr_pid < 0) {
+		goto fail;
+	}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#ifdef DEBUGGER
+	debugger_init((void *) bus);
+#endif
+
+	/* Set up the bottom half handler */
+	if (dhd_dpc_prio >= 0) {
+		/* Initialize DPC thread */
+		PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
+		if (dhd->thr_dpc_ctl.thr_pid < 0) {
+			goto fail;
+		}
+	} else {
+		/*  use tasklet for dpc */
+		tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+		dhd->thr_dpc_ctl.thr_pid = -1;
+	}
+
+	if (dhd->rxthread_enabled) {
+		bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
+		/* Initialize RXF thread */
+		PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
+		if (dhd->thr_rxf_ctl.thr_pid < 0) {
+			goto fail;
+		}
+	}
+#endif /* !BCMDBUS */
+#ifdef SHOW_LOGTRACE
+	skb_queue_head_init(&dhd->evt_trace_queue);
+#endif /* SHOW_LOGTRACE */
+
+	dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+
+#if defined(CONFIG_PM_SLEEP)
+	if (!dhd_pm_notifier_registered) {
+		dhd_pm_notifier_registered = TRUE;
+		dhd->pm_notifier.notifier_call = dhd_pm_callback;
+		dhd->pm_notifier.priority = 10;
+		register_pm_notifier(&dhd->pm_notifier);
+	}
+
+#endif /* CONFIG_PM_SLEEP */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+	dhd->early_suspend.suspend = dhd_early_suspend;
+	dhd->early_suspend.resume = dhd_late_resume;
+	register_early_suspend(&dhd->early_suspend);
+	dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	dhd->pend_ipaddr = 0;
+	if (!dhd_inetaddr_notifier_registered) {
+		dhd_inetaddr_notifier_registered = TRUE;
+		register_inetaddr_notifier(&dhd_inetaddr_notifier);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+	if (!dhd_inet6addr_notifier_registered) {
+		dhd_inet6addr_notifier_registered = TRUE;
+		register_inet6addr_notifier(&dhd_inet6addr_notifier);
+	}
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+	dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
+#ifdef DEBUG_CPU_FREQ
+	dhd->new_freq = alloc_percpu(int);
+	dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
+	cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DEFAULT);
+#endif /* DHDTCPACK_SUPPRESS */
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+
+#ifdef DHD_DEBUG_PAGEALLOC
+	register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
+#endif /* DHD_DEBUG_PAGEALLOC */
+
+#if defined(DHD_LB)
+
+	dhd_lb_set_default_cpus(dhd);
+
+	/* Initialize the CPU Masks */
+	if (dhd_cpumasks_init(dhd) == 0) {
+		/* Now we have the current CPU maps, run through candidacy */
+		dhd_select_cpu_candidacy(dhd);
+		/*
+		* If we are able to initialize CPU masks, lets register to the
+		* CPU Hotplug framework to change the CPU for each job dynamically
+		* using candidacy algorithm.
+		*/
+		dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
+		register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
+	} else {
+		/*
+		* We are unable to initialize CPU masks, so candidacy algorithm
+		* won't run, but still Load Balancing will be honoured based
+		* on the CPUs allocated for a given job statically during init
+		*/
+		dhd->cpu_notifier.notifier_call = NULL;
+		DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
+			__FUNCTION__));
+	}
+
+#ifdef DHD_LB_TXP
+#ifdef DHD_LB_TXP_DEFAULT_ENAB
+	/* Trun ON the feature by default */
+	atomic_set(&dhd->lb_txp_active, 1);
+#else
+	/* Trun OFF the feature by default */
+	atomic_set(&dhd->lb_txp_active, 0);
+#endif /* DHD_LB_TXP_DEFAULT_ENAB */
+#endif /* DHD_LB_TXP */
+
+	DHD_LB_STATS_INIT(&dhd->pub);
+
+	/* Initialize the Load Balancing Tasklets and Napi object */
+#if defined(DHD_LB_TXC)
+	tasklet_init(&dhd->tx_compl_tasklet,
+		dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
+	INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
+	DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
+#endif /* DHD_LB_TXC */
+
+#if defined(DHD_LB_RXC)
+	tasklet_init(&dhd->rx_compl_tasklet,
+		dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
+	DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
+#endif /* DHD_LB_RXC */
+
+#if defined(DHD_LB_RXP)
+	__skb_queue_head_init(&dhd->rx_pend_queue);
+	skb_queue_head_init(&dhd->rx_napi_queue);
+	/* Initialize the work that dispatches NAPI job to a given core */
+	INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
+	DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXP)
+	INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
+	skb_queue_head_init(&dhd->tx_pend_queue);
+	/* Initialize the work that dispatches TX job to a given core */
+	tasklet_init(&dhd->tx_tasklet,
+		dhd_lb_tx_handler, (ulong)(dhd));
+	DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
+#endif /* DHD_LB_TXP */
+
+	dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
+#endif /* DHD_LB */
+
+#ifdef SHOW_LOGTRACE
+	INIT_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
+#endif /* SHOW_LOGTRACE */
+
+	DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
+
+#ifdef REPORT_FATAL_TIMEOUTS
+	init_dhd_timeouts(&dhd->pub);
+#endif /* REPORT_FATAL_TIMEOUTS */
+#ifdef BCMPCIE
+	dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+	if (dhd->pub.extended_trap_data == NULL) {
+		DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
+	}
+#endif /* BCMPCIE */
+
+	(void)dhd_sysfs_init(dhd);
+
+	dhd_state |= DHD_ATTACH_STATE_DONE;
+	dhd->dhd_state = dhd_state;
+
+	dhd_found++;
+
+	return &dhd->pub;
+
+fail:
+	if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
+		DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+			__FUNCTION__, dhd_state, &dhd->pub));
+		dhd->dhd_state = dhd_state;
+		dhd_detach(&dhd->pub);
+		dhd_free(&dhd->pub);
+	}
+dhd_null_flag:
+	return NULL;
+}
+
+int dhd_get_fw_mode(dhd_info_t *dhdinfo)
+{
+	if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
+		return DHD_FLAG_HOSTAP_MODE;
+	if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
+		return DHD_FLAG_P2P_MODE;
+	if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
+		return DHD_FLAG_IBSS_MODE;
+	if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
+		return DHD_FLAG_MFG_MODE;
+
+	return DHD_FLAG_STA_MODE;
+}
+
+int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
+{
+	return dhd_get_fw_mode(dhdp->info);
+}
+
+bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
+{
+	int fw_len;
+	int nv_len;
+	int clm_len;
+	int conf_len;
+	const char *fw = NULL;
+	const char *nv = NULL;
+	const char *clm = NULL;
+	const char *conf = NULL;
+#ifdef DHD_UCODE_DOWNLOAD
+	int uc_len;
+	const char *uc = NULL;
+#endif /* DHD_UCODE_DOWNLOAD */
+	wifi_adapter_info_t *adapter = dhdinfo->adapter;
+	int fw_path_len = sizeof(dhdinfo->fw_path);
+	int nv_path_len = sizeof(dhdinfo->nv_path);
+
+
+	/* Update firmware and nvram path. The path may be from adapter info or module parameter
+	 * The path from adapter info is used for initialization only (as it won't change).
+	 *
+	 * The firmware_path/nvram_path module parameter may be changed by the system at run
+	 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
+	 * command may change dhdinfo->fw_path. As such we need to clear the path info in
+	 * module parameter after it is copied. We won't update the path until the module parameter
+	 * is changed again (first character is not '\0')
+	 */
+
+	/* set default firmware and nvram path for built-in type driver */
+//	if (!dhd_download_fw_on_driverload) {
+#ifdef CONFIG_BCMDHD_FW_PATH
+		fw = CONFIG_BCMDHD_FW_PATH;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+		nv = CONFIG_BCMDHD_NVRAM_PATH;
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+//	}
+
+	/* check if we need to initialize the path */
+	if (dhdinfo->fw_path[0] == '\0') {
+		if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
+			fw = adapter->fw_path;
+
+	}
+	if (dhdinfo->nv_path[0] == '\0') {
+		if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
+			nv = adapter->nv_path;
+	}
+	if (dhdinfo->clm_path[0] == '\0') {
+		if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
+			clm = adapter->clm_path;
+	}
+	if (dhdinfo->conf_path[0] == '\0') {
+		if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
+			conf = adapter->conf_path;
+	}
+
+	/* Use module parameter if it is valid, EVEN IF the path has not been initialized
+	 *
+	 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
+	 */
+	if (firmware_path[0] != '\0')
+		fw = firmware_path;
+	if (nvram_path[0] != '\0')
+		nv = nvram_path;
+	if (clm_path[0] != '\0')
+		clm = clm_path;
+	if (config_path[0] != '\0')
+		conf = config_path;
+#ifdef DHD_UCODE_DOWNLOAD
+	if (ucode_path[0] != '\0')
+		uc = ucode_path;
+#endif /* DHD_UCODE_DOWNLOAD */
+
+	if (fw && fw[0] != '\0') {
+		fw_len = strlen(fw);
+		if (fw_len >= fw_path_len) {
+			DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->fw_path, fw, fw_path_len);
+		if (dhdinfo->fw_path[fw_len-1] == '\n')
+		       dhdinfo->fw_path[fw_len-1] = '\0';
+	}
+	if (nv && nv[0] != '\0') {
+		nv_len = strlen(nv);
+		if (nv_len >= nv_path_len) {
+			DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
+			return FALSE;
+		}
+		memset(dhdinfo->nv_path, 0, nv_path_len);
+		strncpy(dhdinfo->nv_path, nv, nv_path_len);
+#ifdef DHD_USE_SINGLE_NVRAM_FILE
+		/* Remove "_net" or "_mfg" tag from current nvram path */
+		{
+			char *nvram_tag = "nvram_";
+			char *ext_tag = ".txt";
+			char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
+			bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
+				strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
+			if (valid_buf) {
+				char *sp = sp_nvram + strlen(nvram_tag) - 1;
+				uint32 padding_size = (uint32)(dhdinfo->nv_path +
+					nv_path_len - sp);
+				memset(sp, 0, padding_size);
+				strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
+				nv_len = strlen(dhdinfo->nv_path);
+				DHD_INFO(("%s: new nvram path = %s\n",
+					__FUNCTION__, dhdinfo->nv_path));
+			} else if (sp_nvram) {
+				DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
+					__FUNCTION__));
+				return FALSE;
+			} else {
+				DHD_ERROR(("%s: Couldn't find the nvram tag. current"
+					" nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
+			}
+		}
+#endif /* DHD_USE_SINGLE_NVRAM_FILE */
+		if (dhdinfo->nv_path[nv_len-1] == '\n')
+		       dhdinfo->nv_path[nv_len-1] = '\0';
+	}
+	if (clm && clm[0] != '\0') {
+		clm_len = strlen(clm);
+		if (clm_len >= sizeof(dhdinfo->clm_path)) {
+			DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
+		if (dhdinfo->clm_path[clm_len-1] == '\n')
+		       dhdinfo->clm_path[clm_len-1] = '\0';
+	}
+	if (conf && conf[0] != '\0') {
+		conf_len = strlen(conf);
+		if (conf_len >= sizeof(dhdinfo->conf_path)) {
+			DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
+		if (dhdinfo->conf_path[conf_len-1] == '\n')
+		       dhdinfo->conf_path[conf_len-1] = '\0';
+	}
+#ifdef DHD_UCODE_DOWNLOAD
+	if (uc && uc[0] != '\0') {
+		uc_len = strlen(uc);
+		if (uc_len >= sizeof(dhdinfo->uc_path)) {
+			DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
+		if (dhdinfo->uc_path[uc_len-1] == '\n')
+		       dhdinfo->uc_path[uc_len-1] = '\0';
+	}
+#endif /* DHD_UCODE_DOWNLOAD */
+
+#if 0
+	/* clear the path in module parameter */
+	if (dhd_download_fw_on_driverload) {
+		firmware_path[0] = '\0';
+		nvram_path[0] = '\0';
+		clm_path[0] = '\0';
+		config_path[0] = '\0';
+	}
+#endif
+#ifdef DHD_UCODE_DOWNLOAD
+	ucode_path[0] = '\0';
+	DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
+#endif /* DHD_UCODE_DOWNLOAD */
+
+#ifndef BCMEMBEDIMAGE
+	/* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
+	if (dhdinfo->fw_path[0] == '\0') {
+		DHD_ERROR(("firmware path not found\n"));
+		return FALSE;
+	}
+	if (dhdinfo->nv_path[0] == '\0') {
+		DHD_ERROR(("nvram path not found\n"));
+		return FALSE;
+	}
+#endif /* BCMEMBEDIMAGE */
+
+	return TRUE;
+}
+
+#if defined(BT_OVER_SDIO)
+extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
+{
+	int fw_len;
+	const char *fw = NULL;
+	wifi_adapter_info_t *adapter = dhdinfo->adapter;
+
+
+	/* Update bt firmware path. The path may be from adapter info or module parameter
+	 * The path from adapter info is used for initialization only (as it won't change).
+	 *
+	 * The btfw_path module parameter may be changed by the system at run
+	 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
+	 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
+	 * module parameter after it is copied. We won't update the path until the module parameter
+	 * is changed again (first character is not '\0')
+	 */
+
+	/* set default firmware and nvram path for built-in type driver */
+	if (!dhd_download_fw_on_driverload) {
+#ifdef CONFIG_BCMDHD_BTFW_PATH
+		fw = CONFIG_BCMDHD_BTFW_PATH;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+	}
+
+	/* check if we need to initialize the path */
+	if (dhdinfo->btfw_path[0] == '\0') {
+		if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
+			fw = adapter->btfw_path;
+	}
+
+	/* Use module parameter if it is valid, EVEN IF the path has not been initialized
+	 */
+	if (btfw_path[0] != '\0')
+		fw = btfw_path;
+
+	if (fw && fw[0] != '\0') {
+		fw_len = strlen(fw);
+		if (fw_len >= sizeof(dhdinfo->btfw_path)) {
+			DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
+		if (dhdinfo->btfw_path[fw_len-1] == '\n')
+		       dhdinfo->btfw_path[fw_len-1] = '\0';
+	}
+
+	/* clear the path in module parameter */
+	btfw_path[0] = '\0';
+
+	if (dhdinfo->btfw_path[0] == '\0') {
+		DHD_ERROR(("bt firmware path not found\n"));
+		return FALSE;
+	}
+
+	return TRUE;
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+
+#ifdef CUSTOMER_HW4_DEBUG
+bool dhd_validate_chipid(dhd_pub_t *dhdp)
+{
+	uint chipid = dhd_bus_chip_id(dhdp);
+	uint config_chipid;
+
+#ifdef BCM4361_CHIP
+	config_chipid = BCM4361_CHIP_ID;
+#elif defined(BCM4359_CHIP)
+	config_chipid = BCM4359_CHIP_ID;
+#elif defined(BCM4358_CHIP)
+	config_chipid = BCM4358_CHIP_ID;
+#elif defined(BCM4354_CHIP)
+	config_chipid = BCM4354_CHIP_ID;
+#elif defined(BCM4339_CHIP)
+	config_chipid = BCM4339_CHIP_ID;
+#elif defined(BCM43349_CHIP)
+	config_chipid = BCM43349_CHIP_ID;
+#elif defined(BCM4335_CHIP)
+	config_chipid = BCM4335_CHIP_ID;
+#elif defined(BCM43241_CHIP)
+	config_chipid = BCM4324_CHIP_ID;
+#elif defined(BCM4330_CHIP)
+	config_chipid = BCM4330_CHIP_ID;
+#elif defined(BCM43430_CHIP)
+	config_chipid = BCM43430_CHIP_ID;
+#elif defined(BCM43018_CHIP)
+	config_chipid = BCM43018_CHIP_ID;
+#elif defined(BCM43455_CHIP)
+	config_chipid = BCM4345_CHIP_ID;
+#elif defined(BCM4334W_CHIP)
+	config_chipid = BCM43342_CHIP_ID;
+#elif defined(BCM43454_CHIP)
+	config_chipid = BCM43454_CHIP_ID;
+#elif defined(BCM43012_CHIP_)
+	config_chipid = BCM43012_CHIP_ID;
+#else
+	DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
+		" please add CONFIG_BCMXXXX into the Kernel and"
+		" BCMXXXX_CHIP definition into the DHD driver\n",
+		__FUNCTION__));
+	config_chipid = 0;
+
+	return FALSE;
+#endif /* BCM4354_CHIP */
+
+#ifdef SUPPORT_MULTIPLE_CHIP_4345X
+	if (config_chipid == BCM43454_CHIP_ID || config_chipid == BCM4345_CHIP_ID) {
+		return TRUE;
+	}
+#endif /* SUPPORT_MULTIPLE_CHIP_4345X */
+#if defined(BCM4359_CHIP)
+	if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
+		return TRUE;
+	}
+#endif /* BCM4359_CHIP */
+#if defined(BCM4361_CHIP)
+	if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
+		return TRUE;
+	}
+#endif /* BCM4361_CHIP */
+
+	return config_chipid == chipid;
+}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#if defined(BT_OVER_SDIO)
+wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
+{
+	DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
+	/* assuming that dhd_pub_t type pointer is available from a global variable */
+	return (wlan_bt_handle_t) g_dhd_pub;
+} EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
+
+int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
+{
+	int ret = -1;
+	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+
+
+	/* Download BT firmware image to the dongle */
+	if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
+		DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
+		ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
+		if (ret < 0) {
+			DHD_ERROR(("%s: failed to download btfw from: %s\n",
+				__FUNCTION__, dhd->btfw_path));
+			return ret;
+		}
+	}
+	return ret;
+} EXPORT_SYMBOL(dhd_download_btfw);
+#endif /* defined (BT_OVER_SDIO) */
+
+#ifndef BCMDBUS
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+	int ret = -1;
+	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+	unsigned long flags;
+
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+	int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
+#endif /* DHD_DEBUG && BCMSDIO */
+	ASSERT(dhd);
+
+	DHD_TRACE(("Enter %s:\n", __FUNCTION__));
+
+	DHD_PERIM_LOCK(dhdp);
+#ifdef HOFFLOAD_MODULES
+	dhd_linux_get_modfw_address(dhdp);
+#endif
+	/* try to download image and nvram to the dongle */
+	if  (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
+		/* Indicate FW Download has not yet done */
+		dhd->pub.fw_download_done = FALSE;
+		DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
+			__FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+		fw_download_start = OSL_SYSUPTIME();
+#endif /* DHD_DEBUG && BCMSDIO */
+		ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+			dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+		fw_download_end = OSL_SYSUPTIME();
+#endif /* DHD_DEBUG && BCMSDIO */
+		if (ret < 0) {
+			DHD_ERROR(("%s: failed to download firmware %s\n",
+				__FUNCTION__, dhd->fw_path));
+			DHD_PERIM_UNLOCK(dhdp);
+			return ret;
+		}
+		/* Indicate FW Download has succeeded */
+		dhd->pub.fw_download_done = TRUE;
+	}
+	if (dhd->pub.busstate != DHD_BUS_LOAD) {
+		DHD_PERIM_UNLOCK(dhdp);
+		return -ENETDOWN;
+	}
+
+#ifdef BCMSDIO
+	dhd_os_sdlock(dhdp);
+#endif /* BCMSDIO */
+
+	/* Start the watchdog timer */
+	dhd->pub.tickcnt = 0;
+	dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+	/* Bring up the bus */
+	if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+
+		DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+#ifdef BCMSDIO
+		dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+		DHD_PERIM_UNLOCK(dhdp);
+		return ret;
+	}
+
+	DHD_ENABLE_RUNTIME_PM(&dhd->pub);
+
+#ifdef DHD_ULP
+	dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
+#endif /* DHD_ULP */
+#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
+	/* Host registration for OOB interrupt */
+	if (dhd_bus_oob_intr_register(dhdp)) {
+		/* deactivate timer and wait for the handler to finish */
+#if !defined(BCMPCIE_OOB_HOST_WAKE)
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
+		DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+		DHD_PERIM_UNLOCK(dhdp);
+		DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+	dhd_bus_oob_intr_set(dhdp, TRUE);
+#else
+	/* Enable oob at firmware */
+	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#elif defined(FORCE_WOWLAN)
+	/* Enable oob at firmware */
+	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif 
+#ifdef PCIE_FULL_DONGLE
+	{
+		/* max_h2d_rings includes H2D common rings */
+		uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
+
+		DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
+			max_h2d_rings));
+		if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
+#ifdef BCMSDIO
+			dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+			DHD_PERIM_UNLOCK(dhdp);
+			return ret;
+		}
+	}
+#endif /* PCIE_FULL_DONGLE */
+
+	/* Do protocol initialization necessary for IOCTL/IOVAR */
+	ret = dhd_prot_init(&dhd->pub);
+	if (unlikely(ret) != BCME_OK) {
+		DHD_PERIM_UNLOCK(dhdp);
+		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+
+	/* If bus is not ready, can't come up */
+	if (dhd->pub.busstate != DHD_BUS_DATA) {
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+		DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+		DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+#ifdef BCMSDIO
+		dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+		DHD_PERIM_UNLOCK(dhdp);
+		return -ENODEV;
+	}
+
+#ifdef BCMSDIO
+	dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+
+	/* Bus is ready, query any dongle information */
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+	f2_sync_start = OSL_SYSUPTIME();
+#endif /* DHD_DEBUG && BCMSDIO */
+	if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+		DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
+		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+		DHD_PERIM_UNLOCK(dhdp);
+		return ret;
+	}
+#if defined(CONFIG_SOC_EXYNOS8895)
+	DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
+	exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
+#endif /* CONFIG_SOC_EXYNOS8895 */
+
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+	f2_sync_end = OSL_SYSUPTIME();
+	DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
+			(fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
+#endif /* DHD_DEBUG && BCMSDIO */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->pend_ipaddr) {
+#ifdef AOE_IP_ALIAS_SUPPORT
+		aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+		dhd->pend_ipaddr = 0;
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(TRAFFIC_MGMT_DWM)
+	bzero(&dhd->pub.dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
+#endif 
+	DHD_PERIM_UNLOCK(dhdp);
+	return 0;
+}
+#endif /* !BCMDBUS */
+
+#ifdef WLTDLS
+int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+	uint32 tdls = tdls_on;
+	int ret = 0;
+	uint32 tdls_auto_op = 0;
+	uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
+	int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
+	int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
+	BCM_REFERENCE(mac);
+	if (!FW_SUPPORTED(dhd, tdls))
+		return BCME_ERROR;
+
+	if (dhd->tdls_enable == tdls_on)
+		goto auto_mode;
+	ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
+		goto exit;
+	}
+	dhd->tdls_enable = tdls_on;
+auto_mode:
+
+	tdls_auto_op = auto_on;
+	ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
+			0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	if (tdls_auto_op) {
+		ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
+				sizeof(tdls_idle_time), NULL, 0, TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+		ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
+				sizeof(tdls_rssi_high), NULL, 0, TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+		ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
+				sizeof(tdls_rssi_low), NULL, 0, TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+	}
+
+exit:
+	return ret;
+}
+
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+	if (dhd)
+		ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
+	else
+		ret = BCME_ERROR;
+	return ret;
+}
+
+int
+dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
+{
+	int ret = 0;
+	bool auto_on = false;
+	uint32 mode =  wfd_mode;
+
+#ifdef ENABLE_TDLS_AUTO_MODE
+	if (wfd_mode) {
+		auto_on = false;
+	} else {
+		auto_on = true;
+	}
+#else
+	auto_on = false;
+#endif /* ENABLE_TDLS_AUTO_MODE */
+	ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
+	if (ret < 0) {
+		DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
+		return ret;
+	}
+
+	ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
+	if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
+		DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
+		return ret;
+	}
+
+	ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
+	if (ret < 0) {
+		DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
+		return ret;
+	}
+
+	dhd->tdls_mode = mode;
+	return ret;
+}
+#ifdef PCIE_FULL_DONGLE
+int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
+{
+	dhd_pub_t *dhd_pub = dhdp;
+	tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
+	tdls_peer_node_t *new = NULL, *prev = NULL;
+	int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
+	uint8 *da = (uint8 *)&event->addr.octet[0];
+	bool connect = FALSE;
+	uint32 reason = ntoh32(event->reason);
+	unsigned long flags;
+
+	if (reason == WLC_E_TDLS_PEER_CONNECTED)
+		connect = TRUE;
+	else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
+		connect = FALSE;
+	else
+	{
+		DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+	if (ifindex == DHD_BAD_IF)
+		return BCME_ERROR;
+
+	if (connect) {
+		while (cur != NULL) {
+			if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+				DHD_ERROR(("%s: TDLS Peer exist already %d\n",
+					__FUNCTION__, __LINE__));
+				return BCME_ERROR;
+			}
+			cur = cur->next;
+		}
+
+		new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
+		if (new == NULL) {
+			DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+		memcpy(new->addr, da, ETHER_ADDR_LEN);
+		DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
+		new->next = dhd_pub->peer_tbl.node;
+		dhd_pub->peer_tbl.node = new;
+		dhd_pub->peer_tbl.tdls_peer_count++;
+		DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+
+	} else {
+		while (cur != NULL) {
+			if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+				dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
+				DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
+				if (prev)
+					prev->next = cur->next;
+				else
+					dhd_pub->peer_tbl.node = cur->next;
+				MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
+				dhd_pub->peer_tbl.tdls_peer_count--;
+				DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+				return BCME_OK;
+			}
+			prev = cur;
+			cur = cur->next;
+		}
+		DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
+	}
+	return BCME_OK;
+}
+#endif /* PCIE_FULL_DONGLE */
+#endif 
+
+bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
+{
+	if (!dhd)
+		return FALSE;
+
+	if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
+		return TRUE;
+	else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
+		DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
+		return TRUE;
+	else
+		return FALSE;
+}
+#if !defined(AP) && defined(WLP2P)
+/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
+ * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
+ * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
+ * would still be named as fw_bcmdhd_apsta.
+ */
+uint32
+dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
+{
+	int32 ret = 0;
+	char buf[WLC_IOCTL_SMLEN];
+	bool mchan_supported = FALSE;
+	/* if dhd->op_mode is already set for HOSTAP and Manufacturing
+	 * test mode, that means we only will use the mode as it is
+	 */
+	if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
+		return 0;
+	if (FW_SUPPORTED(dhd, vsdb)) {
+		mchan_supported = TRUE;
+	}
+	if (!FW_SUPPORTED(dhd, p2p)) {
+		DHD_TRACE(("Chip does not support p2p\n"));
+		return 0;
+	} else {
+		/* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
+		memset(buf, 0, sizeof(buf));
+		ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
+				sizeof(buf), FALSE);
+		if (ret < 0) {
+			DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
+			return 0;
+		} else {
+			if (buf[0] == 1) {
+				/* By default, chip supports single chan concurrency,
+				* now lets check for mchan
+				*/
+				ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
+				if (mchan_supported)
+					ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
+				if (FW_SUPPORTED(dhd, rsdb)) {
+					ret |= DHD_FLAG_RSDB_MODE;
+				}
+#ifdef WL_SUPPORT_MULTIP2P
+				if (FW_SUPPORTED(dhd, mp2p)) {
+					ret |= DHD_FLAG_MP2P_MODE;
+				}
+#endif /* WL_SUPPORT_MULTIP2P */
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+				return ret;
+#else
+				return 0;
+#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
+			}
+		}
+	}
+	return 0;
+}
+#endif 
+
+#ifdef SUPPORT_AP_POWERSAVE
+#define RXCHAIN_PWRSAVE_PPS			10
+#define RXCHAIN_PWRSAVE_QUIET_TIME		10
+#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK	0
+int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
+{
+	int32 pps = RXCHAIN_PWRSAVE_PPS;
+	int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
+	int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
+	int ret;
+
+	if (enable) {
+		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
+				NULL, 0, TRUE);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("Failed to enable AP power save\n"));
+		}
+		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_pps", (char *)&pps, sizeof(pps), NULL, 0,
+				TRUE);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("Failed to set pps\n"));
+		}
+		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_quiet_time", (char *)&quiet_time,
+				sizeof(quiet_time), NULL, 0, TRUE);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("Failed to set quiet time\n"));
+		}
+		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_stas_assoc_check",
+				(char *)&stas_assoc_check, sizeof(stas_assoc_check), NULL, 0, TRUE);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("Failed to set stas assoc check\n"));
+		}
+	} else {
+		ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
+				NULL, 0, TRUE);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("Failed to disable AP power save\n"));
+		}
+	}
+
+	return 0;
+}
+#endif /* SUPPORT_AP_POWERSAVE */
+
+
+
+
+#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
+int
+dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
+{
+	int i;
+	int len;
+	int ret = BCME_OK;
+
+	bcm_iov_buf_t *iov_buf = NULL;
+	wl_adps_params_v1_t *data = NULL;
+	char buf[WL_EVENTING_MASK_LEN + 12];	/* Room for "event_msgs" + '\0' + bitvec  */
+
+	len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
+	iov_buf = kmalloc(len, GFP_KERNEL);
+	if (iov_buf == NULL) {
+		DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
+		ret = BCME_NOMEM;
+		goto exit;
+	}
+
+	iov_buf->version = WL_ADPS_IOV_VER;
+	iov_buf->len = sizeof(*data);
+	iov_buf->id = WL_ADPS_IOV_MODE;
+
+	data = (wl_adps_params_v1_t *)iov_buf->data;
+	data->version = ADPS_SUB_IOV_VERSION_1;
+	data->length = sizeof(*data);
+	data->mode = on;
+
+	for (i = 1; i <= MAX_BANDS; i++) {
+		data->band = i;
+		bcm_mkiovar("adps", (char *)iov_buf, len, buf, sizeof(buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0)) < 0) {
+			if (ret == BCME_UNSUPPORTED) {
+				DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
+				ret = BCME_OK;
+				goto exit;
+			}
+			else {
+				DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
+					__FUNCTION__, on ? "On" : "Off", i, ret));
+				goto exit;
+			}
+		}
+	}
+
+exit:
+	if (iov_buf) {
+		kfree(iov_buf);
+	}
+	return ret;
+}
+#endif /* WLADPS || WLADPS_PRIVATE_CMD */
+
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */
+	uint32 buf_key_b4_m4 = 1;
+	uint8 msglen;
+	eventmsgs_ext_t *eventmask_msg = NULL;
+	char* iov_buf = NULL;
+	int ret2 = 0;
+	uint32 wnm_cap = 0;
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
+	uint32 ampdu_ba_wsize = 0;
+#endif 
+#if defined(CUSTOM_AMPDU_MPDU)
+	int32 ampdu_mpdu = 0;
+#endif
+#if defined(CUSTOM_AMPDU_RELEASE)
+	int32 ampdu_release = 0;
+#endif
+#if defined(CUSTOM_AMSDU_AGGSF)
+	int32 amsdu_aggsf = 0;
+#endif
+	shub_control_t shub_ctl;
+
+#if defined(BCMSDIO) || defined(BCMDBUS)
+#ifdef PROP_TXSTATUS
+	int wlfc_enable = TRUE;
+#ifndef DISABLE_11N
+	uint32 hostreorder = 1;
+	uint wl_down = 1;
+#endif /* DISABLE_11N */
+#endif /* PROP_TXSTATUS */
+#endif /* BCMSDIO || BCMDBUS */
+#ifndef PCIE_FULL_DONGLE
+	uint32 wl_ap_isolate;
+#endif /* PCIE_FULL_DONGLE */
+	uint32 frameburst = CUSTOM_FRAMEBURST_SET;
+	uint wnm_bsstrans_resp = 0;
+#ifdef SUPPORT_SET_CAC
+	uint32 cac = 1;
+#endif /* SUPPORT_SET_CAC */
+#ifdef DHD_ENABLE_LPC
+	uint32 lpc = 1;
+#endif /* DHD_ENABLE_LPC */
+	uint power_mode = PM_FAST;
+#if defined(BCMSDIO)
+	uint32 dongle_align = DHD_SDALIGN;
+	uint32 glom = CUSTOM_GLOM_SETTING;
+#endif /* defined(BCMSDIO) */
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+	uint32 credall = 1;
+#endif
+	uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
+	uint scancache_enab = TRUE;
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+	uint32 bcn_li_bcn = 1;
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+	uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
+#if defined(ARP_OFFLOAD_SUPPORT)
+	int arpoe = 1;
+#endif
+	int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
+	int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
+	int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
+	char buf[WLC_IOCTL_SMLEN];
+	char *ptr;
+	uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
+	wl_el_tag_params_t *el_tag = NULL;
+#endif /* DHD_8021X_DUMP */
+#ifdef ROAM_ENABLE
+	uint roamvar = 0;
+	int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
+	int roam_scan_period[2] = {10, WLC_BAND_ALL};
+	int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
+#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
+	int roam_fullscan_period = 60;
+#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+	int roam_fullscan_period = 120;
+#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+#ifdef DISABLE_BCNLOSS_ROAM
+	uint roam_bcnloss_off = 1;
+#endif /* DISABLE_BCNLOSS_ROAM */
+#else
+#ifdef DISABLE_BUILTIN_ROAM
+	uint roamvar = 1;
+#endif /* DISABLE_BUILTIN_ROAM */
+#endif /* ROAM_ENABLE */
+
+#if defined(SOFTAP)
+	uint dtim = 1;
+#endif
+#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
+	struct ether_addr p2p_ea;
+#endif
+#ifdef SOFTAP_UAPSD_OFF
+	uint32 wme_apsd = 0;
+#endif /* SOFTAP_UAPSD_OFF */
+#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
+	uint32 apsta = 1; /* Enable APSTA mode */
+#elif defined(SOFTAP_AND_GC)
+	uint32 apsta = 0;
+	int ap_mode = 1;
+#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
+#ifdef GET_CUSTOM_MAC_ENABLE
+	struct ether_addr ea_addr;
+	char hw_ether[62];
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef DISABLE_11N
+	uint32 nmode = 0;
+#endif /* DISABLE_11N */
+
+#ifdef USE_WL_TXBF
+	uint32 txbf = 1;
+#endif /* USE_WL_TXBF */
+#ifdef DISABLE_TXBFR
+	uint32 txbf_bfr_cap = 0;
+#endif /* DISABLE_TXBFR */
+#if defined(PROP_TXSTATUS)
+#ifdef USE_WFA_CERT_CONF
+	uint32 proptx = 0;
+#endif /* USE_WFA_CERT_CONF */
+#endif /* PROP_TXSTATUS */
+#if defined(SUPPORT_5G_1024QAM_VHT)
+	uint32 vht_features = 0; /* init to 0, will be set based on each support */
+#endif 
+#ifdef DISABLE_11N_PROPRIETARY_RATES
+	uint32 ht_features = 0;
+#endif /* DISABLE_11N_PROPRIETARY_RATES */
+#ifdef CUSTOM_PSPRETEND_THR
+	uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
+#endif
+#ifdef CUSTOM_EVENT_PM_WAKE
+	uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
+#endif	/* CUSTOM_EVENT_PM_WAKE */
+	uint32 rsdb_mode = 0;
+#ifdef ENABLE_TEMP_THROTTLING
+	wl_temp_control_t temp_control;
+#endif /* ENABLE_TEMP_THROTTLING */
+#ifdef DISABLE_PRUNED_SCAN
+	uint32 scan_features = 0;
+#endif /* DISABLE_PRUNED_SCAN */
+#ifdef PKT_FILTER_SUPPORT
+	dhd_pkt_filter_enable = TRUE;
+#ifdef APF
+	dhd->apf_set = FALSE;
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef WLTDLS
+	dhd->tdls_enable = FALSE;
+	dhd_tdls_set_mode(dhd, false);
+#endif /* WLTDLS */
+	dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
+#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
+	dhd->max_dtim_enable = TRUE;
+#else
+	dhd->max_dtim_enable = FALSE;
+#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
+#ifdef CUSTOM_SET_OCLOFF
+	dhd->ocl_off = FALSE;
+#endif /* CUSTOM_SET_OCLOFF */
+	DHD_TRACE(("Enter %s\n", __FUNCTION__));
+
+#ifdef DHDTCPACK_SUPPRESS
+	dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
+#endif
+	dhd->op_mode = 0;
+
+#if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
+	/* clear AP flags */
+	dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
+#endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
+
+#ifdef CUSTOMER_HW4_DEBUG
+	if (!dhd_validate_chipid(dhd)) {
+		DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
+			__FUNCTION__, dhd_bus_chip_id(dhd)));
+#ifndef SUPPORT_MULTIPLE_CHIPS
+		ret = BCME_BADARG;
+		goto done;
+#endif /* !SUPPORT_MULTIPLE_CHIPS */
+	}
+#endif /* CUSTOMER_HW4_DEBUG */
+	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+		(op_mode == DHD_FLAG_MFG_MODE)) {
+		dhd->op_mode = DHD_FLAG_MFG_MODE;
+#ifdef DHD_PCIE_RUNTIMEPM
+		/* Disable RuntimePM in mfg mode */
+		DHD_DISABLE_RUNTIME_PM(dhd);
+		DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
+#endif /* DHD_PCIE_RUNTIME_PM */
+		/* Check and adjust IOCTL response timeout for Manufactring firmware */
+		dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
+		DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
+			__FUNCTION__));
+	} else {
+		dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+		DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
+	}
+#ifdef GET_CUSTOM_MAC_ENABLE
+	ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether);
+	if (!ret) {
+		memset(buf, 0, sizeof(buf));
+		bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
+		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret < 0) {
+			memset(buf, 0, sizeof(buf));
+			bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
+			ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+			if (ret) {
+				int i;
+				DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
+					__FUNCTION__, MAC2STRDBG(hw_ether), ret));
+				for (i=0; i<sizeof(hw_ether)-ETHER_ADDR_LEN; i++) {
+					printf("0x%02x,", hw_ether[i+ETHER_ADDR_LEN]);
+					if ((i+1)%8 == 0)
+						printf("\n");
+				}
+				ret = BCME_NOTUP;
+				goto done;
+			}
+		}
+	} else {
+		DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
+		ret = BCME_NOTUP;
+		goto done;
+	}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+	/* Get the default device MAC address directly from firmware */
+	memset(buf, 0, sizeof(buf));
+	bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+		FALSE, 0)) < 0) {
+		DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+		ret = BCME_NOTUP;
+		goto done;
+	}
+	/* Update public MAC address after reading from Firmware */
+	memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+
+	if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
+		DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
+		goto done;
+	}
+
+	/* get a capabilities from firmware */
+	{
+		uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
+		memset(dhd->fw_capabilities, 0, cap_buf_size);
+		ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
+				FALSE);
+		if (ret < 0) {
+			DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+				__FUNCTION__, ret));
+			return 0;
+		}
+
+		memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
+		dhd->fw_capabilities[0] = ' ';
+		dhd->fw_capabilities[cap_buf_size - 2] = ' ';
+		dhd->fw_capabilities[cap_buf_size - 1] = '\0';
+	}
+
+	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
+		(op_mode == DHD_FLAG_HOSTAP_MODE)) {
+#ifdef SET_RANDOM_MAC_SOFTAP
+		uint rand_mac;
+#endif /* SET_RANDOM_MAC_SOFTAP */
+		dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+			dhd_pkt_filter_enable = FALSE;
+#endif
+#ifdef SET_RANDOM_MAC_SOFTAP
+		SRANDOM32((uint)jiffies);
+		rand_mac = RANDOM32();
+		iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02;	/* local admin bit */
+		iovbuf[1] = (unsigned char)(vendor_oui >> 8);
+		iovbuf[2] = (unsigned char)vendor_oui;
+		iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+		iovbuf[4] = (unsigned char)(rand_mac >> 8);
+		iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+		ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
+				TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+		} else
+			memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+#endif /* SET_RANDOM_MAC_SOFTAP */
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+		dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+#ifdef SUPPORT_AP_POWERSAVE
+		dhd_set_ap_powersave(dhd, 0, TRUE);
+#endif /* SUPPORT_AP_POWERSAVE */
+#ifdef SOFTAP_UAPSD_OFF
+		ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
+				TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
+				__FUNCTION__, ret));
+		}
+#endif /* SOFTAP_UAPSD_OFF */
+#if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
+		/* set AP flag for specific country code of SOFTAP */
+		dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
+#endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
+	} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+		(op_mode == DHD_FLAG_MFG_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+		arpoe = 0;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PKT_FILTER_SUPPORT
+		dhd_pkt_filter_enable = FALSE;
+#endif /* PKT_FILTER_SUPPORT */
+		dhd->op_mode = DHD_FLAG_MFG_MODE;
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+		dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+		if (FW_SUPPORTED(dhd, rsdb)) {
+			rsdb_mode = 0;
+			ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
+					NULL, 0, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
+					__FUNCTION__, ret));
+			}
+		}
+	} else {
+		uint32 concurrent_mode = 0;
+		if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
+			(op_mode == DHD_FLAG_P2P_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+			dhd_pkt_filter_enable = FALSE;
+#endif
+			dhd->op_mode = DHD_FLAG_P2P_MODE;
+		} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
+			(op_mode == DHD_FLAG_IBSS_MODE)) {
+			dhd->op_mode = DHD_FLAG_IBSS_MODE;
+		} else
+			dhd->op_mode = DHD_FLAG_STA_MODE;
+#if !defined(AP) && defined(WLP2P)
+		if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
+			(concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 1;
+#endif
+			dhd->op_mode |= concurrent_mode;
+		}
+
+		/* Check if we are enabling p2p */
+		if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+			ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
+					TRUE);
+			if (ret < 0)
+				DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
+
+#if defined(SOFTAP_AND_GC)
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
+			(char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
+				DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
+		}
+#endif
+			memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
+			ETHER_SET_LOCALADDR(&p2p_ea);
+			ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
+					NULL, 0, TRUE);
+			if (ret < 0)
+				DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
+			else
+				DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
+		}
+#else
+	(void)concurrent_mode;
+#endif 
+	}
+#ifdef BCMSDIO
+	if (dhd->conf->sd_f2_blocksize)
+		dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize);
+#endif
+
+#if defined(RSDB_MODE_FROM_FILE)
+	(void)dhd_rsdb_mode_from_file(dhd);
+#endif 
+
+#ifdef DISABLE_PRUNED_SCAN
+	if (FW_SUPPORTED(dhd, rsdb)) {
+		ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
+				sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
+		if (ret < 0) {
+			DHD_ERROR(("%s get scan_features is failed ret=%d\n",
+				__FUNCTION__, ret));
+		} else {
+			memcpy(&scan_features, iovbuf, 4);
+			scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
+			ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
+					sizeof(scan_features), NULL, 0, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("%s set scan_features is failed ret=%d\n",
+					__FUNCTION__, ret));
+			}
+		}
+	}
+#endif /* DISABLE_PRUNED_SCAN */
+
+	DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
+		dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
+#ifdef CUSTOMER_HW2
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+	if (!dhd->pub.is_blob)
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+	{
+		/* get a ccode and revision for the country code */
+#if defined(CUSTOM_COUNTRY_CODE)
+		get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
+			&dhd->dhd_cspec, dhd->dhd_cflags);
+#else
+		get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
+			&dhd->dhd_cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
+	}
+#endif /* CUSTOMER_HW2 */
+
+#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
+	if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
+		dhd->info->rxthread_enabled = FALSE;
+	else
+		dhd->info->rxthread_enabled = TRUE;
+#endif
+	/* Set Country code  */
+	if (dhd->dhd_cspec.ccode[0] != 0) {
+		ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
+				NULL, 0, TRUE);
+		if (ret < 0)
+			DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+	}
+
+
+	/* Set Listen Interval */
+	ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
+			NULL, 0, TRUE);
+	if (ret < 0)
+		DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+#ifdef USE_WFA_CERT_CONF
+	if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
+		DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
+	}
+#endif /* USE_WFA_CERT_CONF */
+	/* Disable built-in roaming to allowed ext supplicant to take care of roaming */
+	dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+#if defined(ROAM_ENABLE)
+#ifdef DISABLE_BCNLOSS_ROAM
+	dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off, sizeof(roam_bcnloss_off),
+			NULL, 0, TRUE);
+#endif /* DISABLE_BCNLOSS_ROAM */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
+		sizeof(roam_trigger), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
+		sizeof(roam_scan_period), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
+	if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
+		sizeof(roam_delta), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
+	ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
+			sizeof(roam_fullscan_period), NULL, 0, TRUE);
+	if (ret < 0)
+		DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
+#endif /* ROAM_ENABLE */
+
+#ifdef CUSTOM_EVENT_PM_WAKE
+	ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
+			sizeof(pm_awake_thresh), NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
+	}
+#endif	/* CUSTOM_EVENT_PM_WAKE */
+#ifdef WLTDLS
+#ifdef ENABLE_TDLS_AUTO_MODE
+	/* by default TDLS on and auto mode on */
+	_dhd_tdls_enable(dhd, true, true, NULL);
+#else
+	/* by default TDLS on and auto mode off */
+	_dhd_tdls_enable(dhd, true, false, NULL);
+#endif /* ENABLE_TDLS_AUTO_MODE */
+#endif /* WLTDLS */
+
+#ifdef DHD_ENABLE_LPC
+	/* Set lpc 1 */
+	ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s Set lpc failed  %d\n", __FUNCTION__, ret));
+
+		if (ret == BCME_NOTDOWN) {
+			uint wl_down = 1;
+			ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+				(char *)&wl_down, sizeof(wl_down), TRUE, 0);
+			DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
+
+			ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
+			DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
+		}
+	}
+#endif /* DHD_ENABLE_LPC */
+
+#ifdef WLADPS
+#ifdef WLADPS_SEAK_AP_WAR
+	dhd->disabled_adps = FALSE;
+#endif /* WLADPS_SEAK_AP_WAR */
+	if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+#ifdef ADPS_MODE_FROM_FILE
+		dhd_adps_mode_from_file(dhd);
+#else
+		if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
+			DHD_ERROR(("%s dhd_enable_adps failed %d\n",
+					__FUNCTION__, ret));
+		}
+#endif /* ADPS_MODE_FROM_FILE */
+	}
+#endif /* WLADPS */
+
+	/* Set PowerSave mode */
+	(void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+
+#if defined(BCMSDIO)
+	/* Match Host and Dongle rx alignment */
+	dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
+			NULL, 0, TRUE);
+
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+	/* enable credall to reduce the chance of no bus credit happened. */
+	dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE);
+#endif
+
+#ifdef USE_WFA_CERT_CONF
+	if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
+		DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
+	}
+#endif /* USE_WFA_CERT_CONF */
+	if (glom != DEFAULT_GLOM_VALUE) {
+		DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
+		dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
+	}
+#endif /* defined(BCMSDIO) */
+
+	/* Setup timeout if Beacons are lost and roam is off to report link down */
+	dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0, TRUE);
+
+	/* Setup assoc_retry_max count to reconnect target AP in dongle */
+	dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0, TRUE);
+
+#if defined(AP) && !defined(WLP2P)
+	dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
+
+#endif /* defined(AP) && !defined(WLP2P) */
+
+#ifdef MIMO_ANT_SETTING
+	dhd_sel_ant_from_file(dhd);
+#endif /* MIMO_ANT_SETTING */
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == TRUE) {
+		dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+	}
+#endif 
+
+#if defined(KEEP_ALIVE)
+	{
+	/* Set Keep Alive : be sure to use FW with -keepalive */
+	int res;
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == FALSE)
+#endif 
+		if (!(dhd->op_mode &
+			(DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+			if ((res = dhd_keep_alive_onoff(dhd)) < 0)
+				DHD_ERROR(("%s set keeplive failed %d\n",
+				__FUNCTION__, res));
+		}
+	}
+#endif /* defined(KEEP_ALIVE) */
+
+#ifdef USE_WL_TXBF
+	ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
+	if (ret < 0)
+		DHD_ERROR(("%s Set txbf failed  %d\n", __FUNCTION__, ret));
+
+#endif /* USE_WL_TXBF */
+
+	ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
+			0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
+	}
+
+#ifdef DISABLE_TXBFR
+	ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
+			0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s Clear txbf_bfr_cap failed  %d\n", __FUNCTION__, ret));
+	}
+#endif /* DISABLE_TXBFR */
+
+#ifdef USE_WFA_CERT_CONF
+#ifdef USE_WL_FRAMEBURST
+	 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
+		DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
+	 }
+#endif /* USE_WL_FRAMEBURST */
+#ifdef DISABLE_FRAMEBURST_VSDB
+	 g_frameburst = frameburst;
+#endif /* DISABLE_FRAMEBURST_VSDB */
+#endif /* USE_WFA_CERT_CONF */
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+	/* Disable Framebursting for SofAP */
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		frameburst = 0;
+	}
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+	/* Set frameburst to value */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
+		sizeof(frameburst), TRUE, 0)) < 0) {
+		DHD_INFO(("%s frameburst not supported  %d\n", __FUNCTION__, ret));
+	}
+
+	iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+	if (iov_buf == NULL) {
+		DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
+		ret = BCME_NOMEM;
+		goto done;
+	}
+
+
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
+	/* Set ampdu ba wsize to 64 or 16 */
+#ifdef CUSTOM_AMPDU_BA_WSIZE
+	ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
+#endif
+	if (ampdu_ba_wsize != 0) {
+		ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&ampdu_ba_wsize,
+				sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed  %d\n",
+				__FUNCTION__, ampdu_ba_wsize, ret));
+		}
+	}
+#endif 
+
+#ifdef ENABLE_TEMP_THROTTLING
+	if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+		memset(&temp_control, 0, sizeof(temp_control));
+		temp_control.enable = 1;
+		temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
+		ret = dhd_iovar(dhd, 0, "temp_throttle_control", (char *)&temp_control,
+				sizeof(temp_control), NULL, 0, TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
+				__FUNCTION__, ret));
+		}
+	}
+#endif /* ENABLE_TEMP_THROTTLING */
+
+#if defined(CUSTOM_AMPDU_MPDU)
+	ampdu_mpdu = CUSTOM_AMPDU_MPDU;
+	if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
+		ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&ampdu_mpdu, sizeof(ampdu_mpdu),
+				NULL, 0, TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s Set ampdu_mpdu to %d failed  %d\n",
+				__FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
+		}
+	}
+#endif /* CUSTOM_AMPDU_MPDU */
+
+#if defined(CUSTOM_AMPDU_RELEASE)
+	ampdu_release = CUSTOM_AMPDU_RELEASE;
+	if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
+		ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&ampdu_release,
+				sizeof(ampdu_release), NULL, 0, TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s Set ampdu_release to %d failed  %d\n",
+				__FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
+		}
+	}
+#endif /* CUSTOM_AMPDU_RELEASE */
+
+#if defined(CUSTOM_AMSDU_AGGSF)
+	amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
+	if (amsdu_aggsf != 0) {
+		ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
+				NULL, 0, TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
+				__FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
+		}
+	}
+#endif /* CUSTOM_AMSDU_AGGSF */
+
+#if defined(SUPPORT_5G_1024QAM_VHT)
+#ifdef SUPPORT_5G_1024QAM_VHT
+	if (dhd_get_chipid(dhd) == BCM4361_CHIP_ID) {
+		vht_features |= 0x6; /* 5G 1024 QAM support */
+	}
+#endif /* SUPPORT_5G_1024QAM_VHT */
+	if (vht_features) {
+		ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
+				NULL, 0, TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
+
+			if (ret == BCME_NOTDOWN) {
+				uint wl_down = 1;
+				ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+					(char *)&wl_down, sizeof(wl_down), TRUE, 0);
+				DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
+					" vht_features = 0x%x\n",
+					__FUNCTION__, ret, vht_features));
+
+				ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
+					sizeof(vht_features), NULL, 0, TRUE);
+				DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
+			}
+		}
+	}
+#endif 
+#ifdef DISABLE_11N_PROPRIETARY_RATES
+	ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
+			TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
+	}
+#endif /* DISABLE_11N_PROPRIETARY_RATES */
+#ifdef CUSTOM_PSPRETEND_THR
+	/* Turn off MPC in AP mode */
+	ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
+			sizeof(pspretend_thr), NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s pspretend_threshold for HostAPD failed  %d\n",
+			__FUNCTION__, ret));
+	}
+#endif
+
+	ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
+			NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
+	}
+#ifdef SUPPORT_SET_CAC
+	bcm_mkiovar("cac", (char *)&cac, sizeof(cac), iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
+	}
+#endif /* SUPPORT_SET_CAC */
+#ifdef DHD_ULP
+	/* Get the required details from dongle during preinit ioctl */
+	dhd_ulp_preinit(dhd);
+#endif /* DHD_ULP */
+
+	/* Read event_msgs mask */
+	ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+			sizeof(iovbuf), FALSE);
+	if (ret < 0) {
+		DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+
+	/* Setup event_msgs */
+	setbit(eventmask, WLC_E_SET_SSID);
+	setbit(eventmask, WLC_E_PRUNE);
+	setbit(eventmask, WLC_E_AUTH);
+	setbit(eventmask, WLC_E_AUTH_IND);
+	setbit(eventmask, WLC_E_ASSOC);
+	setbit(eventmask, WLC_E_REASSOC);
+	setbit(eventmask, WLC_E_REASSOC_IND);
+	if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
+		setbit(eventmask, WLC_E_DEAUTH);
+	setbit(eventmask, WLC_E_DEAUTH_IND);
+	setbit(eventmask, WLC_E_DISASSOC_IND);
+	setbit(eventmask, WLC_E_DISASSOC);
+	setbit(eventmask, WLC_E_JOIN);
+	setbit(eventmask, WLC_E_BSSID);
+	setbit(eventmask, WLC_E_START);
+	setbit(eventmask, WLC_E_ASSOC_IND);
+	setbit(eventmask, WLC_E_PSK_SUP);
+	setbit(eventmask, WLC_E_LINK);
+	setbit(eventmask, WLC_E_MIC_ERROR);
+	setbit(eventmask, WLC_E_ASSOC_REQ_IE);
+	setbit(eventmask, WLC_E_ASSOC_RESP_IE);
+#ifdef LIMIT_BORROW
+	setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
+#endif
+#ifndef WL_CFG80211
+	setbit(eventmask, WLC_E_PMKID_CACHE);
+//	setbit(eventmask, WLC_E_TXFAIL); // terence 20181106: remove unnecessary event
+#endif
+	setbit(eventmask, WLC_E_JOIN_START);
+//	setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
+#ifdef DHD_DEBUG
+	setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
+#endif
+#ifdef WLMEDIA_HTSF
+	setbit(eventmask, WLC_E_HTSFSYNC);
+#endif /* WLMEDIA_HTSF */
+#ifdef PNO_SUPPORT
+	setbit(eventmask, WLC_E_PFN_NET_FOUND);
+	setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
+	setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
+	setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
+#endif /* PNO_SUPPORT */
+	/* enable dongle roaming event */
+	setbit(eventmask, WLC_E_ROAM);
+#ifdef WLTDLS
+	setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
+#endif /* WLTDLS */
+#ifdef WL_ESCAN
+	setbit(eventmask, WLC_E_ESCAN_RESULT);
+#endif /* WL_ESCAN */
+#ifdef RTT_SUPPORT
+	setbit(eventmask, WLC_E_PROXD);
+#endif /* RTT_SUPPORT */
+#ifdef WL_CFG80211
+	setbit(eventmask, WLC_E_ESCAN_RESULT);
+	setbit(eventmask, WLC_E_AP_STARTED);
+	setbit(eventmask, WLC_E_ACTION_FRAME_RX);
+	if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+		setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+	}
+#endif /* WL_CFG80211 */
+
+#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
+	if (dhd_logtrace_from_file(dhd)) {
+		setbit(eventmask, WLC_E_TRACE);
+	} else {
+		clrbit(eventmask, WLC_E_TRACE);
+	}
+#elif defined(SHOW_LOGTRACE)
+	setbit(eventmask, WLC_E_TRACE);
+#else
+	clrbit(eventmask, WLC_E_TRACE);
+#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
+
+	setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
+#ifdef DHD_WMF
+	setbit(eventmask, WLC_E_PSTA_PRIMARY_INTF_IND);
+#endif
+#ifdef CUSTOM_EVENT_PM_WAKE
+	setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
+#endif	/* CUSTOM_EVENT_PM_WAKE */
+#ifdef DHD_LOSSLESS_ROAMING
+	setbit(eventmask, WLC_E_ROAM_PREP);
+#endif
+#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
+	dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
+#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
+
+#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
+	dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
+#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
+
+	/* Write updated Event mask */
+	ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
+	if (ret < 0) {
+		DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+
+	/* make up event mask ext message iovar for event larger than 128 */
+	msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
+	eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
+	if (eventmask_msg == NULL) {
+		DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
+		ret = BCME_NOMEM;
+		goto done;
+	}
+	bzero(eventmask_msg, msglen);
+	eventmask_msg->ver = EVENTMSGS_VER;
+	eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+
+	/* Read event_msgs_ext mask */
+	ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
+			WLC_IOCTL_SMLEN, FALSE);
+
+	if (ret2 == 0) { /* event_msgs_ext must be supported */
+		bcopy(iov_buf, eventmask_msg, msglen);
+#ifdef RSSI_MONITOR_SUPPORT
+		setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef GSCAN_SUPPORT
+		setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
+		setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
+		setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
+		setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
+#endif /* GSCAN_SUPPORT */
+		setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
+#ifdef BT_WIFI_HANDOVER
+		setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
+#endif /* BT_WIFI_HANDOVER */
+#ifdef DBG_PKT_MON
+		setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
+#endif /* DBG_PKT_MON */
+#ifdef DHD_ULP
+		setbit(eventmask_msg->mask, WLC_E_ULP);
+#endif
+#ifdef ENABLE_TEMP_THROTTLING
+		setbit(eventmask_msg->mask, WLC_E_TEMP_THROTTLE);
+#endif /* ENABLE_TEMP_THROTTLING */
+
+		/* Write updated Event mask */
+		eventmask_msg->ver = EVENTMSGS_VER;
+		eventmask_msg->command = EVENTMSGS_SET_MASK;
+		eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+		ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
+				TRUE);
+		if (ret < 0) {
+			DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
+			goto done;
+		}
+	} else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
+		/* Skip for BCME_UNSUPPORTED or BCME_VERSION */
+		DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
+			__FUNCTION__, ret2));
+	} else {
+		DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
+		ret = ret2;
+		goto done;
+	}
+
+#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
+	/* Enabling event log trace for EAP events */
+	el_tag = (wl_el_tag_params_t *)kmalloc(sizeof(wl_el_tag_params_t), GFP_KERNEL);
+	if (el_tag == NULL) {
+		DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
+				(int)sizeof(wl_el_tag_params_t)));
+		ret = BCME_NOMEM;
+		goto done;
+	}
+	el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
+	el_tag->set = 1;
+	el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
+	bcm_mkiovar("event_log_tag_control", (char *)el_tag,
+			sizeof(*el_tag), iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* DHD_8021X_DUMP */
+
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+		sizeof(scan_assoc_time), TRUE, 0);
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+		sizeof(scan_unassoc_time), TRUE, 0);
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
+		sizeof(scan_passive_time), TRUE, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	/* Set and enable ARP offload feature for STA only  */
+#if defined(SOFTAP)
+	if (arpoe && !ap_fw_loaded)
+#else
+	if (arpoe)
+#endif
+	{
+		dhd_arp_offload_enable(dhd, TRUE);
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+	} else {
+		dhd_arp_offload_enable(dhd, FALSE);
+		dhd_arp_offload_set(dhd, 0);
+	}
+	dhd_arp_enable = arpoe;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+	/* Setup default defintions for pktfilter , enable in suspend */
+	if (dhd_master_mode) {
+		dhd->pktfilter_count = 6;
+		dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
+		if (!FW_SUPPORTED(dhd, pf6)) {
+			dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
+			dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+		} else {
+			/* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
+			dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
+			dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
+		}
+		/* apply APP pktfilter */
+		dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
+
+		/* Setup filter to allow only unicast */
+		dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
+
+		/* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
+		dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
+
+		dhd->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM] = NULL;
+		if (FW_SUPPORTED(dhd, pf6)) {
+			/* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
+			dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] =
+				"107 1 6 IP4_H:16 0xf0 !0xe0 IP4_H:19 0xff 0xff";
+			dhd->pktfilter_count = 8;
+		}
+
+#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
+		dhd->pktfilter_count = 4;
+		/* Setup filter to block broadcast and NAT Keepalive packets */
+		/* discard all broadcast packets */
+		dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
+		/* discard NAT Keepalive packets */
+		dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
+		/* discard NAT Keepalive packets */
+		dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
+		dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
+	} else
+		dhd_conf_discard_pkt_filter(dhd);
+	dhd_conf_add_pkt_filter(dhd);
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded) {
+		dhd_enable_packet_filter(0, dhd);
+	}
+#endif /* defined(SOFTAP) */
+	dhd_set_packet_filter(dhd);
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef DISABLE_11N
+	ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
+	if (ret < 0)
+		DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
+#endif /* DISABLE_11N */
+
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+	dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0, TRUE);
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+	/* query for 'clmver' to get clm version info from firmware */
+	memset(buf, 0, sizeof(buf));
+	ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
+	if (ret < 0)
+		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+	else {
+		char *clmver_temp_buf = NULL;
+
+		if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
+			DHD_ERROR(("Couldn't find \"Data:\"\n"));
+		} else {
+			ptr = (clmver_temp_buf + strlen("Data:"));
+			if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
+				DHD_ERROR(("Couldn't find New line character\n"));
+			} else {
+				memset(clm_version, 0, CLM_VER_STR_LEN);
+				strncpy(clm_version, clmver_temp_buf,
+					MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1));
+			}
+		}
+	}
+
+	/* query for 'ver' to get version info from firmware */
+	memset(buf, 0, sizeof(buf));
+	ptr = buf;
+	ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+	if (ret < 0)
+		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+	else {
+		bcmstrtok(&ptr, "\n", 0);
+		strncpy(fw_version, buf, FW_VER_STR_LEN);
+		fw_version[FW_VER_STR_LEN-1] = '\0';
+		dhd_set_version_info(dhd, buf);
+#ifdef WRITE_WLANINFO
+		sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
+#endif /* WRITE_WLANINFO */
+	}
+#ifdef GEN_SOFTAP_INFO_FILE
+	sec_save_softap_info();
+#endif /* GEN_SOFTAP_INFO_FILE */
+
+#if defined(BCMSDIO)
+	dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
+#endif /* defined(BCMSDIO) */
+
+#if defined(BCMSDIO) || defined(BCMDBUS)
+#ifdef PROP_TXSTATUS
+	if (disable_proptx ||
+#ifdef PROP_TXSTATUS_VSDB
+		/* enable WLFC only if the firmware is VSDB when it is in STA mode */
+		(dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+		 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
+#endif /* PROP_TXSTATUS_VSDB */
+		FALSE) {
+		wlfc_enable = FALSE;
+	}
+	ret = dhd_conf_get_disable_proptx(dhd);
+	if (ret == 0){
+		disable_proptx = 0;
+		wlfc_enable = TRUE;
+	} else if (ret >= 1) {
+		disable_proptx = 1;
+		wlfc_enable = FALSE;
+		/* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
+		hostreorder = 0;
+	}
+
+#if defined(PROP_TXSTATUS)
+#ifdef USE_WFA_CERT_CONF
+	if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
+		DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
+		wlfc_enable = proptx;
+	}
+#endif /* USE_WFA_CERT_CONF */
+#endif /* PROP_TXSTATUS */
+
+#ifndef DISABLE_11N
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+	ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
+			NULL, 0, TRUE);
+	if (ret2 < 0) {
+		DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
+		if (ret2 != BCME_UNSUPPORTED)
+			ret = ret2;
+
+		if (ret == BCME_NOTDOWN) {
+			uint wl_down = 1;
+			ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
+				sizeof(wl_down), TRUE, 0);
+			DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
+				__FUNCTION__, ret2, hostreorder));
+
+			ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
+					sizeof(hostreorder), NULL, 0, TRUE);
+			DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
+			if (ret2 != BCME_UNSUPPORTED)
+					ret = ret2;
+		}
+		if (ret2 != BCME_OK)
+			hostreorder = 0;
+	}
+#endif /* DISABLE_11N */
+
+
+	if (wlfc_enable) {
+		dhd_wlfc_init(dhd);
+		/* terence 20161229: enable ampdu_hostreorder if tlv enabled */
+		dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
+	}
+#ifndef DISABLE_11N
+	else if (hostreorder)
+		dhd_wlfc_hostreorder_init(dhd);
+#endif /* DISABLE_11N */
+#else
+	/* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
+	printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
+	dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
+#endif /* PROP_TXSTATUS */
+#endif /* BCMSDIO || BCMDBUS */
+#ifndef PCIE_FULL_DONGLE
+	/* For FD we need all the packets at DHD to handle intra-BSS forwarding */
+	if (FW_SUPPORTED(dhd, ap)) {
+		wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
+		ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
+				NULL, 0, TRUE);
+		if (ret < 0)
+			DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+	}
+#endif /* PCIE_FULL_DONGLE */
+#ifdef PNO_SUPPORT
+	if (!dhd->pno_state) {
+		dhd_pno_init(dhd);
+	}
+#endif
+#ifdef RTT_SUPPORT
+	if (!dhd->rtt_state) {
+		ret = dhd_rtt_init(dhd);
+		if (ret < 0) {
+			DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
+		}
+	}
+#endif
+#ifdef WL11U
+	dhd_interworking_enable(dhd);
+#endif /* WL11U */
+
+#ifdef SUPPORT_SENSORHUB
+	DHD_ERROR(("%s: SensorHub enabled %d\n",
+			__FUNCTION__, dhd->info->shub_enable));
+	ret2 = dhd_iovar(dhd, 0, "shub", NULL, 0,
+			(char *)&shub_ctl, sizeof(shub_ctl), FALSE);
+	if (ret2 < 0) {
+		DHD_ERROR(("%s failed to get shub hub enable information %d\n",
+			__FUNCTION__, ret2));
+		dhd->info->shub_enable = 0;
+	} else {
+		dhd->info->shub_enable = shub_ctl.enable;
+		DHD_ERROR(("%s: checking sensorhub enable %d\n",
+			__FUNCTION__, dhd->info->shub_enable));
+	}
+#else
+	DHD_ERROR(("%s: SensorHub diabled %d\n",
+			__FUNCTION__, dhd->info->shub_enable));
+	dhd->info->shub_enable = FALSE;
+	shub_ctl.enable = FALSE;
+	ret2 = dhd_iovar(dhd, 0, "shub", (char *)&shub_ctl, sizeof(shub_ctl),
+			NULL, 0, TRUE);
+	if (ret2 < 0) {
+		DHD_ERROR(("%s failed to set ShubHub disable\n",
+			__FUNCTION__));
+	}
+#endif /* SUPPORT_SENSORHUB */
+
+
+#ifdef NDO_CONFIG_SUPPORT
+	dhd->ndo_enable = FALSE;
+	dhd->ndo_host_ip_overflow = FALSE;
+	dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
+#endif /* NDO_CONFIG_SUPPORT */
+
+	/* ND offload version supported */
+	dhd->ndo_version = dhd_ndo_get_version(dhd);
+	if (dhd->ndo_version > 0) {
+		DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
+
+#ifdef NDO_CONFIG_SUPPORT
+		/* enable Unsolicited NA filter */
+		ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
+		if (ret < 0) {
+			DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
+		}
+#endif /* NDO_CONFIG_SUPPORT */
+	}
+
+	/* check dongle supports wbtext or not */
+	dhd->wbtext_support = FALSE;
+	if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
+			WLC_GET_VAR, FALSE, 0) != BCME_OK) {
+		DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
+	}
+	if (wnm_bsstrans_resp == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
+		dhd->wbtext_support = TRUE;
+	}
+#ifndef WBTEXT
+	/* driver can turn off wbtext feature through makefile */
+	if (dhd->wbtext_support) {
+		if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
+				WL_BSSTRANS_POLICY_ROAM_ALWAYS,
+				WLC_SET_VAR, FALSE, 0) != BCME_OK) {
+			DHD_ERROR(("failed to disable WBTEXT\n"));
+		}
+	}
+#endif /* !WBTEXT */
+
+	/* WNM capabilities */
+	wnm_cap = 0
+#ifdef WL11U
+		| WL_WNM_BSSTRANS | WL_WNM_NOTIF
+#endif
+#ifdef WBTEXT
+		| WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
+#endif
+		;
+	if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
+		DHD_ERROR(("failed to set WNM capabilities\n"));
+	}
+
+	dhd_conf_postinit_ioctls(dhd);
+done:
+
+	if (eventmask_msg)
+		kfree(eventmask_msg);
+	if (iov_buf)
+		kfree(iov_buf);
+#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
+	if (el_tag)
+		kfree(el_tag);
+#endif /* DHD_8021X_DUMP */
+	return ret;
+}
+
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
+		uint res_len, int set)
+{
+	char *buf = NULL;
+	int input_len;
+	wl_ioctl_t ioc;
+	int ret;
+
+	if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
+		return BCME_BADARG;
+
+	input_len = strlen(name) + 1 + param_len;
+	if (input_len > WLC_IOCTL_MAXLEN)
+		return BCME_BADARG;
+
+	buf = NULL;
+	if (set) {
+		if (res_buf || res_len != 0) {
+			DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
+			ret = BCME_BADARG;
+			goto exit;
+		}
+		buf = kzalloc(input_len, GFP_KERNEL);
+		if (!buf) {
+			DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
+			ret = BCME_NOMEM;
+			goto exit;
+		}
+		ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
+		if (!ret) {
+			ret = BCME_NOMEM;
+			goto exit;
+		}
+
+		ioc.cmd = WLC_SET_VAR;
+		ioc.buf = buf;
+		ioc.len = input_len;
+		ioc.set = set;
+
+		ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+	} else {
+		if (!res_buf || !res_len) {
+			DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
+			ret = BCME_BADARG;
+			goto exit;
+		}
+
+		if (res_len < input_len) {
+			DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
+					res_len, input_len));
+			buf = kzalloc(input_len, GFP_KERNEL);
+			if (!buf) {
+				DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
+				ret = BCME_NOMEM;
+				goto exit;
+			}
+			ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
+			if (!ret) {
+				ret = BCME_NOMEM;
+				goto exit;
+			}
+
+			ioc.cmd = WLC_GET_VAR;
+			ioc.buf = buf;
+			ioc.len = input_len;
+			ioc.set = set;
+
+			ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+
+			if (ret == BCME_OK) {
+				memcpy(res_buf, buf, res_len);
+			}
+		} else {
+			memset(res_buf, 0, res_len);
+			ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
+			if (!ret) {
+				ret = BCME_NOMEM;
+				goto exit;
+			}
+
+			ioc.cmd = WLC_GET_VAR;
+			ioc.buf = res_buf;
+			ioc.len = res_len;
+			ioc.set = set;
+
+			ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+		}
+	}
+exit:
+	kfree(buf);
+	return ret;
+}
+
+int
+dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
+	uint cmd_len, char **resptr, uint resp_len)
+{
+	int len = resp_len;
+	int ret;
+	char *buf = *resptr;
+	wl_ioctl_t ioc;
+	if (resp_len > WLC_IOCTL_MAXLEN)
+		return BCME_BADARG;
+
+	memset(buf, 0, resp_len);
+
+	ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+	if (ret == 0) {
+		return BCME_BUFTOOSHORT;
+	}
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = 0;
+
+	ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+
+	return ret;
+}
+
+
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+	struct dhd_info *dhd = dhdp->info;
+	struct net_device *dev = NULL;
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	dev = dhd->iflist[ifidx]->net;
+	ASSERT(dev);
+
+	if (netif_running(dev)) {
+		DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
+		return BCME_NOTDOWN;
+	}
+
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
+
+	if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+		DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+		return BCME_BADARG;
+	}
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface)  */
+void
+aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
+{
+	u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
+	int i;
+	int ret;
+
+	bzero(ipv4_buf, sizeof(ipv4_buf));
+
+	/* display what we've got */
+	ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+	DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+#ifdef AOE_DBG
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+	/* now we saved hoste_ip table, clr it in the dongle AOE */
+	dhd_aoe_hostip_clr(dhd_pub, idx);
+
+	if (ret) {
+		DHD_ERROR(("%s failed\n", __FUNCTION__));
+		return;
+	}
+
+	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+		if (add && (ipv4_buf[i] == 0)) {
+				ipv4_buf[i] = ipa;
+				add = FALSE; /* added ipa to local table  */
+				DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+				__FUNCTION__, i));
+		} else if (ipv4_buf[i] == ipa) {
+			ipv4_buf[i]	= 0;
+			DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+				__FUNCTION__, ipa, i));
+		}
+
+		if (ipv4_buf[i] != 0) {
+			/* add back host_ip entries from our local cache */
+			dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
+			DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+				__FUNCTION__, ipv4_buf[i], i));
+		}
+	}
+#ifdef AOE_DBG
+	/* see the resulting hostip table */
+	dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+	DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+}
+
+/*
+ * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
+ * whenever there is an event related to an IP address.
+ * ptr : kernel provided pointer to IP address that has changed
+ */
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+	unsigned long event,
+	void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+	dhd_info_t *dhd;
+	dhd_pub_t *dhd_pub;
+	int idx;
+
+	if (!dhd_arp_enable)
+		return NOTIFY_DONE;
+	if (!ifa || !(ifa->ifa_dev->dev))
+		return NOTIFY_DONE;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	/* Filter notifications meant for non Broadcom devices */
+	if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
+	    (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
+#if defined(WL_ENABLE_P2P_IF)
+		if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
+#endif /* WL_ENABLE_P2P_IF */
+			return NOTIFY_DONE;
+	}
+#endif /* LINUX_VERSION_CODE */
+
+	dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
+	if (!dhd)
+		return NOTIFY_DONE;
+
+	dhd_pub = &dhd->pub;
+
+	if (dhd_pub->arp_version == 1) {
+		idx = 0;
+	} else {
+		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+			if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
+			break;
+		}
+		if (idx < DHD_MAX_IFS)
+			DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
+				dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
+		else {
+			DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
+			idx = 0;
+		}
+	}
+
+	switch (event) {
+		case NETDEV_UP:
+			DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+			if (dhd->pub.busstate != DHD_BUS_DATA) {
+				DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
+				if (dhd->pend_ipaddr) {
+					DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
+						__FUNCTION__, dhd->pend_ipaddr));
+				}
+				dhd->pend_ipaddr = ifa->ifa_address;
+				break;
+			}
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+			DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+				__FUNCTION__));
+			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+			break;
+
+		case NETDEV_DOWN:
+			DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+			dhd->pend_ipaddr = 0;
+#ifdef AOE_IP_ALIAS_SUPPORT
+			DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
+				__FUNCTION__));
+			if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
+				(ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
+				aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
+			} else
+#endif /* AOE_IP_ALIAS_SUPPORT */
+			{
+				dhd_aoe_hostip_clr(&dhd->pub, idx);
+				dhd_aoe_arp_clr(&dhd->pub, idx);
+			}
+			break;
+
+		default:
+			DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+				__func__, ifa->ifa_label, event));
+			break;
+	}
+	return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+/* Neighbor Discovery Offload: defered handler */
+static void
+dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
+{
+	struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
+	dhd_info_t *dhd = (dhd_info_t *)dhd_info;
+	dhd_pub_t *dhdp;
+	int ret;
+
+	if (!dhd) {
+		DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
+		goto done;
+	}
+	dhdp = &dhd->pub;
+
+	if (event != DHD_WQ_WORK_IPV6_NDO) {
+		DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
+		goto done;
+	}
+
+	if (!ndo_work) {
+		DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
+		return;
+	}
+
+	switch (ndo_work->event) {
+		case NETDEV_UP:
+#ifndef NDO_CONFIG_SUPPORT
+			DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
+			ret = dhd_ndo_enable(dhdp, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
+			}
+#endif /* !NDO_CONFIG_SUPPORT */
+			DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
+			if (dhdp->ndo_version > 0) {
+				/* inet6 addr notifier called only for unicast address */
+				ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
+					WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
+			} else {
+				ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
+						ndo_work->if_idx);
+			}
+			if (ret < 0) {
+				DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
+					__FUNCTION__, ret));
+			}
+			break;
+		case NETDEV_DOWN:
+			if (dhdp->ndo_version > 0) {
+				DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
+				ret = dhd_ndo_remove_ip_by_addr(dhdp,
+					&ndo_work->ipv6_addr[0], ndo_work->if_idx);
+			} else {
+				DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
+				ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
+			}
+			if (ret < 0) {
+				DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
+					__FUNCTION__, ret));
+				goto done;
+			}
+#ifdef NDO_CONFIG_SUPPORT
+			if (dhdp->ndo_host_ip_overflow) {
+				ret = dhd_dev_ndo_update_inet6addr(
+					dhd_idx2net(dhdp, ndo_work->if_idx));
+				if ((ret < 0) && (ret != BCME_NORESOURCE)) {
+					DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+			}
+#else /* !NDO_CONFIG_SUPPORT */
+			DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
+			ret = dhd_ndo_enable(dhdp, FALSE);
+			if (ret < 0) {
+				DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
+				goto done;
+			}
+#endif /* NDO_CONFIG_SUPPORT */
+			break;
+
+		default:
+			DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
+			break;
+	}
+done:
+	/* free ndo_work. alloced while scheduling the work */
+	if (ndo_work) {
+		kfree(ndo_work);
+	}
+
+	return;
+}
+
+/*
+ * Neighbor Discovery Offload: Called when an interface
+ * is assigned with ipv6 address.
+ * Handles only primary interface
+ */
+int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
+{
+	dhd_info_t *dhd;
+	dhd_pub_t *dhdp;
+	struct inet6_ifaddr *inet6_ifa = ptr;
+	struct ipv6_work_info_t *ndo_info;
+	int idx;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	/* Filter notifications meant for non Broadcom devices */
+	if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
+			return NOTIFY_DONE;
+	}
+#endif /* LINUX_VERSION_CODE */
+
+	dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
+	if (!dhd) {
+		return NOTIFY_DONE;
+	}
+	dhdp = &dhd->pub;
+
+	/* Supports only primary interface */
+	idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
+	if (idx != 0) {
+		return NOTIFY_DONE;
+	}
+
+	/* FW capability */
+	if (!FW_SUPPORTED(dhdp, ndoe)) {
+		return NOTIFY_DONE;
+	}
+
+	ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
+	if (!ndo_info) {
+		DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
+		return NOTIFY_DONE;
+	}
+
+	/* fill up ndo_info */
+	ndo_info->event = event;
+	ndo_info->if_idx = idx;
+	memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
+
+	/* defer the work to thread as it may block kernel */
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
+		dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
+	return NOTIFY_DONE;
+}
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+
+int
+dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	dhd_if_t *ifp;
+	struct net_device *net = NULL;
+	int err = 0;
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
+		DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	ifp = dhd->iflist[ifidx];
+	net = ifp->net;
+	ASSERT(net && (ifp->idx == ifidx));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->get_stats = dhd_get_stats;
+	net->do_ioctl = dhd_ioctl_entry;
+	net->hard_start_xmit = dhd_start_xmit;
+	net->set_mac_address = dhd_set_mac_address;
+	net->set_multicast_list = dhd_set_multicast_list;
+	net->open = net->stop = NULL;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &dhd_ops_virt;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+	/* Ok, link into the network layer... */
+	if (ifidx == 0) {
+		/*
+		 * device functions for the primary interface only
+		 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+		net->open = dhd_open;
+		net->stop = dhd_stop;
+#else
+		net->netdev_ops = &dhd_ops_pri;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+		if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
+			memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+	} else {
+		/*
+		 * We have to use the primary MAC for virtual interfaces
+		 */
+		memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
+		/*
+		 * Android sets the locally administered bit to indicate that this is a
+		 * portable hotspot.  This will not work in simultaneous AP/STA mode,
+		 * nor with P2P.  Need to set the Donlge's MAC address, and then use that.
+		 */
+		if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
+			ETHER_ADDR_LEN)) {
+			DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
+			__func__, net->name));
+			temp_addr[0] |= 0x02;
+		}
+	}
+
+	net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(WL_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+	net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+	net->wireless_handlers = &wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(WL_WIRELESS_EXT) */
+
+	dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+#ifdef WLMESH
+	if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) {
+		temp_addr[4] ^= 0x80;
+		temp_addr[4] += ifidx;
+		temp_addr[5] += ifidx;
+	}
+#endif
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+	if (ifidx == 0)
+		printf("%s\n", dhd_version);
+#ifdef WL_EXT_IAPSTA
+	else
+		wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
+#endif
+	if (ifidx != 0) {
+		if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr) == 0)
+			DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
+		else
+			DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+	}
+
+	if (need_rtnl_lock)
+		err = register_netdev(net);
+	else
+		err = register_netdevice(net);
+
+	if (err != 0) {
+		DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
+		goto fail;
+	}
+#ifdef WL_EXT_IAPSTA
+	if (ifidx == 0)
+		wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
+	wl_ext_iapsta_attach_name(net, ifidx);
+#endif
+
+
+
+	printf("Register interface [%s]  MAC: "MACDBG"\n\n", net->name,
+#if defined(CUSTOMER_HW4_DEBUG)
+		MAC2STRDBG(dhd->pub.mac.octet));
+#else
+		MAC2STRDBG(net->dev_addr));
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
+//		wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif
+
+#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
+	KERNEL_VERSION(2, 6, 27))) || defined(BCMDBUS))
+	if (ifidx == 0) {
+#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
+		up(&dhd_registration_sem);
+#endif /* BCMLXSDMMC */
+		if (!dhd_download_fw_on_driverload) {
+#ifdef WL_CFG80211
+			wl_terminate_event_handler(net);
+#endif /* WL_CFG80211 */
+#if defined(DHD_LB_RXP)
+			__skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXP)
+			skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+
+#ifdef SHOW_LOGTRACE
+			/* Release the skbs from queue for WLC_E_TRACE event */
+			dhd_event_logtrace_flush_queue(dhdp);
+#endif /* SHOW_LOGTRACE */
+
+#ifdef DHDTCPACK_SUPPRESS
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+			dhd_net_bus_devreset(net, TRUE);
+#ifdef BCMLXSDMMC
+			dhd_net_bus_suspend(net);
+#endif /* BCMLXSDMMC */
+			wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
+#if defined(BT_OVER_SDIO)
+			dhd->bus_user_count--;
+#endif /* BT_OVER_SDIO */
+		}
+#if defined(WL_WIRELESS_EXT)
+#ifdef WL_ESCAN
+		wl_escan_down(&dhd->pub);
+#endif /* WL_ESCAN */
+#endif /* defined(WL_WIRELESS_EXT) */
+	}
+#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
+	return 0;
+
+fail:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+	return err;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		dhd = (dhd_info_t *)dhdp->info;
+		if (dhd) {
+
+			/*
+			 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
+			 *  calling stop again will cuase SD read/write errors.
+			 */
+			if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) {
+				/* Stop the protocol module */
+				dhd_prot_stop(&dhd->pub);
+
+				/* Stop the bus module */
+#ifdef BCMDBUS
+				/* Force Dongle terminated */
+				if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
+					DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
+						__FUNCTION__));
+				dbus_stop(dhd->pub.bus);
+				dhd->pub.busstate = DHD_BUS_DOWN;
+#else
+				dhd_bus_stop(dhd->pub.bus, TRUE);
+#endif /* BCMDBUS */
+			}
+
+#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
+			dhd_bus_oob_intr_unregister(dhdp);
+#endif 
+		}
+	}
+}
+
+
+void dhd_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	unsigned long flags;
+	int timer_valid = FALSE;
+	struct net_device *dev;
+#ifdef WL_CFG80211
+	struct bcm_cfg80211 *cfg = NULL;
+#endif
+#ifdef HOFFLOAD_MODULES
+	struct module_metadata *hmem = NULL;
+#endif
+	if (!dhdp)
+		return;
+
+	dhd = (dhd_info_t *)dhdp->info;
+	if (!dhd)
+		return;
+
+	dev = dhd->iflist[0]->net;
+
+	if (dev) {
+		rtnl_lock();
+		if (dev->flags & IFF_UP) {
+			/* If IFF_UP is still up, it indicates that
+			 * "ifconfig wlan0 down" hasn't been called.
+			 * So invoke dev_close explicitly here to
+			 * bring down the interface.
+			 */
+			DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
+			dev_close(dev);
+		}
+		rtnl_unlock();
+	}
+
+	DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+
+	dhd->pub.up = 0;
+	if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+		/* Give sufficient time for threads to start running in case
+		 * dhd_attach() has failed
+		 */
+		OSL_SLEEP(100);
+	}
+#ifdef DHD_WET
+	dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
+#endif /* DHD_WET */
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+#ifdef PROP_TXSTATUS
+#ifdef DHD_WLFC_THREAD
+	if (dhd->pub.wlfc_thread) {
+		kthread_stop(dhd->pub.wlfc_thread);
+		dhdp->wlfc_thread_go = TRUE;
+		wake_up_interruptible(&dhdp->wlfc_wqhead);
+	}
+	dhd->pub.wlfc_thread = NULL;
+#endif /* DHD_WLFC_THREAD */
+#endif /* PROP_TXSTATUS */
+
+#ifdef DHD_TIMESYNC
+	if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) {
+		dhd_timesync_detach(dhdp);
+	}
+#endif /* DHD_TIMESYNC */
+#ifdef WL_CFG80211
+	if (dev) {
+		wl_cfg80211_down(dev);
+	}
+#endif /* WL_CFG80211 */
+
+	if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+		dhd_bus_detach(dhdp);
+#ifdef BCMPCIE
+		if (is_reboot == SYS_RESTART) {
+			extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
+			if (dhd_wifi_platdata && !dhdp->dongle_reset) {
+				dhdpcie_bus_clock_stop(dhdp->bus);
+				wifi_platform_set_power(dhd_wifi_platdata->adapters,
+					FALSE, WIFI_TURNOFF_DELAY);
+			}
+		}
+#endif /* BCMPCIE */
+#ifndef PCIE_FULL_DONGLE
+		if (dhdp->prot)
+			dhd_prot_detach(dhdp);
+#endif /* !PCIE_FULL_DONGLE */
+	}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd_inetaddr_notifier_registered) {
+		dhd_inetaddr_notifier_registered = FALSE;
+		unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+	if (dhd_inet6addr_notifier_registered) {
+		dhd_inet6addr_notifier_registered = FALSE;
+		unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+	}
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+		if (dhd->early_suspend.suspend)
+			unregister_early_suspend(&dhd->early_suspend);
+	}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#if defined(WL_WIRELESS_EXT)
+#ifdef WL_ESCAN
+	wl_escan_detach(dhdp);
+#else
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+		/* Detatch and unlink in the iw */
+		wl_iw_detach(dhdp);
+	}
+#endif /* WL_ESCAN */
+#endif /* defined(WL_WIRELESS_EXT) */
+#ifdef WL_EXT_IAPSTA
+	wl_ext_iapsta_dettach(dhdp);
+#endif
+
+#ifdef DHD_ULP
+	dhd_ulp_deinit(dhd->pub.osh, dhdp);
+#endif /* DHD_ULP */
+
+	/* delete all interfaces, start with virtual  */
+	if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+		int i = 1;
+		dhd_if_t *ifp;
+
+		/* Cleanup virtual interfaces */
+		dhd_net_if_lock_local(dhd);
+		for (i = 1; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i]) {
+				dhd_remove_if(&dhd->pub, i, TRUE);
+			}
+		}
+		dhd_net_if_unlock_local(dhd);
+
+		/*  delete primary interface 0 */
+		ifp = dhd->iflist[0];
+		ASSERT(ifp);
+		ASSERT(ifp->net);
+		if (ifp && ifp->net) {
+#ifdef WL_CFG80211
+			cfg = wl_get_cfg(ifp->net);
+#endif
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+				free_netdev(ifp->net);
+			} else {
+				argos_register_notifier_deinit();
+#ifdef SET_RPS_CPUS
+				custom_rps_map_clear(ifp->net->_rx);
+#endif /* SET_RPS_CPUS */
+				netif_tx_disable(ifp->net);
+				unregister_netdev(ifp->net);
+			}
+#ifdef PCIE_FULL_DONGLE
+			ifp->net = DHD_NET_DEV_NULL;
+#else
+			ifp->net = NULL;
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_WMF
+			dhd_wmf_cleanup(dhdp, 0);
+#endif /* DHD_WMF */
+#ifdef DHD_L2_FILTER
+			bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
+				NULL, FALSE, dhdp->tickcnt);
+			deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
+			ifp->phnd_arp_table = NULL;
+#endif /* DHD_L2_FILTER */
+
+
+			dhd_if_del_sta_list(ifp);
+
+			MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+			dhd->iflist[0] = NULL;
+		}
+	}
+
+	/* Clear the watchdog timer */
+	DHD_GENERAL_LOCK(&dhd->pub, flags);
+	timer_valid = dhd->wd_timer_valid;
+	dhd->wd_timer_valid = FALSE;
+	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+	if (timer_valid)
+		del_timer_sync(&dhd->timer);
+	DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+
+#ifdef BCMDBUS
+	tasklet_kill(&dhd->tasklet);
+#else
+	if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+#ifdef DHD_PCIE_RUNTIMEPM
+		if (dhd->thr_rpm_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_rpm_ctl);
+		}
+#endif /* DHD_PCIE_RUNTIMEPM */
+		if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_wdt_ctl);
+		}
+
+		if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_rxf_ctl);
+		}
+
+		if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_dpc_ctl);
+		} else
+		{
+			tasklet_kill(&dhd->tasklet);
+		}
+	}
+#endif /* BCMDBUS */
+
+#ifdef DHD_LB
+	if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
+		/* Clear the flag first to avoid calling the cpu notifier */
+		dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
+
+		/* Kill the Load Balancing Tasklets */
+#ifdef DHD_LB_RXP
+		cancel_work_sync(&dhd->rx_napi_dispatcher_work);
+		__skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LB_TXP
+		cancel_work_sync(&dhd->tx_dispatcher_work);
+		tasklet_kill(&dhd->tx_tasklet);
+		__skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+#ifdef DHD_LB_TXC
+		cancel_work_sync(&dhd->tx_compl_dispatcher_work);
+		tasklet_kill(&dhd->tx_compl_tasklet);
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+		tasklet_kill(&dhd->rx_compl_tasklet);
+#endif /* DHD_LB_RXC */
+
+		if (dhd->cpu_notifier.notifier_call != NULL) {
+			unregister_cpu_notifier(&dhd->cpu_notifier);
+		}
+		dhd_cpumasks_deinit(dhd);
+		DHD_LB_STATS_DEINIT(&dhd->pub);
+	}
+#endif /* DHD_LB */
+
+	DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
+
+#ifdef DHD_LOG_DUMP
+	dhd_log_dump_deinit(&dhd->pub);
+#endif /* DHD_LOG_DUMP */
+#ifdef WL_CFG80211
+	if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+		if (!cfg) {
+			DHD_ERROR(("cfg NULL!\n"));
+			ASSERT(0);
+		} else {
+			wl_cfg80211_detach(cfg);
+			dhd_monitor_uninit();
+		}
+	}
+#endif
+
+#ifdef DEBUGABILITY
+	if (dhdp->dbg) {
+#ifdef DBG_PKT_MON
+		dhd_os_dbg_detach_pkt_monitor(dhdp);
+		dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
+#endif /* DBG_PKT_MON */
+		dhd_os_dbg_detach(dhdp);
+	}
+#endif /* DEBUGABILITY */
+#ifdef SHOW_LOGTRACE
+#ifdef DHD_PKT_LOGGING
+	dhd_os_detach_pktlog(dhdp);
+#endif /* DHD_PKT_LOGGING */
+	/* Release the skbs from queue for WLC_E_TRACE event */
+	dhd_event_logtrace_flush_queue(dhdp);
+
+	if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
+		if (dhd->event_data.fmts) {
+			MFREE(dhd->pub.osh, dhd->event_data.fmts,
+					dhd->event_data.fmts_size);
+			dhd->event_data.fmts = NULL;
+		}
+		if (dhd->event_data.raw_fmts) {
+			MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
+					dhd->event_data.raw_fmts_size);
+			dhd->event_data.raw_fmts = NULL;
+		}
+		if (dhd->event_data.raw_sstr) {
+			MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
+					dhd->event_data.raw_sstr_size);
+			dhd->event_data.raw_sstr = NULL;
+		}
+		if (dhd->event_data.rom_raw_sstr) {
+			MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
+					dhd->event_data.rom_raw_sstr_size);
+			dhd->event_data.rom_raw_sstr = NULL;
+		}
+		dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
+	}
+#endif /* SHOW_LOGTRACE */
+#ifdef BCMPCIE
+	if (dhdp->extended_trap_data)
+	{
+		MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+		dhdp->extended_trap_data = NULL;
+	}
+#endif /* BCMPCIE */
+#ifdef PNO_SUPPORT
+	if (dhdp->pno_state)
+		dhd_pno_deinit(dhdp);
+#endif
+#ifdef RTT_SUPPORT
+	if (dhdp->rtt_state) {
+		dhd_rtt_deinit(dhdp);
+	}
+#endif
+#if defined(CONFIG_PM_SLEEP)
+	if (dhd_pm_notifier_registered) {
+		unregister_pm_notifier(&dhd->pm_notifier);
+		dhd_pm_notifier_registered = FALSE;
+	}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef DEBUG_CPU_FREQ
+		if (dhd->new_freq)
+			free_percpu(dhd->new_freq);
+		dhd->new_freq = NULL;
+		cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+	DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd->wakelock_wd_counter = 0;
+	wake_lock_destroy(&dhd->wl_wdwake);
+	// terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+	wake_lock_destroy(&dhd->wl_wifi);
+#endif /* CONFIG_HAS_WAKELOCK */
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+		DHD_OS_WAKE_LOCK_DESTROY(dhd);
+	}
+
+
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* This will free all MEM allocated for TCPACK SUPPRESS */
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef PCIE_FULL_DONGLE
+	dhd_flow_rings_deinit(dhdp);
+	if (dhdp->prot)
+		dhd_prot_detach(dhdp);
+#endif
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+		dhd_free_tdls_peer_list(dhdp);
+#endif
+
+#ifdef HOFFLOAD_MODULES
+	hmem = &dhdp->hmem;
+	dhd_free_module_memory(dhdp->bus, hmem);
+#endif /* HOFFLOAD_MODULES */
+#if defined(BT_OVER_SDIO)
+	mutex_destroy(&dhd->bus_user_lock);
+#endif /* BT_OVER_SDIO */
+#ifdef DUMP_IOCTL_IOV_LIST
+	dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
+#endif /* DUMP_IOCTL_IOV_LIST */
+#ifdef DHD_DEBUG
+	/* memory waste feature list initilization */
+	dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
+#endif /* DHD_DEBUG */
+#ifdef WL_MONITOR
+	dhd_del_monitor_if(dhd, NULL, DHD_WQ_WORK_IF_DEL);
+#endif /* WL_MONITOR */
+
+	/* Prefer adding de-init code above this comment unless necessary.
+	 * The idea is to cancel work queue, sysfs and flags at the end.
+	 */
+	dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
+	dhd->dhd_deferred_wq = NULL;
+
+#ifdef SHOW_LOGTRACE
+	/* Wait till event_log_dispatcher_work finishes */
+	cancel_work_sync(&dhd->event_log_dispatcher_work);
+#endif /* SHOW_LOGTRACE */
+
+	dhd_sysfs_exit(dhd);
+	dhd->pub.fw_download_done = FALSE;
+	dhd_conf_detach(dhdp);
+}
+
+
+void
+dhd_free(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		int i;
+		for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+			if (dhdp->reorder_bufs[i]) {
+				reorder_info_t *ptr;
+				uint32 buf_size = sizeof(struct reorder_info);
+
+				ptr = dhdp->reorder_bufs[i];
+
+				buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+				DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+					i, ptr->max_idx, buf_size));
+
+				MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+				dhdp->reorder_bufs[i] = NULL;
+			}
+		}
+
+		dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
+
+		dhd = (dhd_info_t *)dhdp->info;
+		if (dhdp->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+			DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#else
+			MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+			dhdp->soc_ram = NULL;
+		}
+#ifdef CACHE_FW_IMAGES
+		if (dhdp->cached_fw) {
+			MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
+			dhdp->cached_fw = NULL;
+		}
+
+		if (dhdp->cached_nvram) {
+			MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
+			dhdp->cached_nvram = NULL;
+		}
+#endif
+		if (dhd) {
+#ifdef REPORT_FATAL_TIMEOUTS
+			deinit_dhd_timeouts(&dhd->pub);
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+			/* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
+			if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
+					DHD_PREALLOC_DHD_INFO, 0, FALSE))
+				MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+			dhd = NULL;
+		}
+	}
+}
+
+void
+dhd_clear(dhd_pub_t *dhdp)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		int i;
+#ifdef DHDTCPACK_SUPPRESS
+		/* Clean up timer/data structure for any remaining/pending packet or timer. */
+		dhd_tcpack_info_tbl_clean(dhdp);
+#endif /* DHDTCPACK_SUPPRESS */
+		for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+			if (dhdp->reorder_bufs[i]) {
+				reorder_info_t *ptr;
+				uint32 buf_size = sizeof(struct reorder_info);
+
+				ptr = dhdp->reorder_bufs[i];
+
+				buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+				DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+					i, ptr->max_idx, buf_size));
+
+				MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+				dhdp->reorder_bufs[i] = NULL;
+			}
+		}
+
+		dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
+
+		if (dhdp->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+			DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#else
+			MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+			dhdp->soc_ram = NULL;
+		}
+	}
+}
+
+static void
+dhd_module_cleanup(void)
+{
+	printf("%s: Enter\n", __FUNCTION__);
+
+	dhd_bus_unregister();
+
+	wl_android_exit();
+
+	dhd_wifi_platform_unregister_drv();
+	printf("%s: Exit\n", __FUNCTION__);
+}
+
+static void __exit
+dhd_module_exit(void)
+{
+	atomic_set(&exit_in_progress, 1);
+	dhd_module_cleanup();
+	unregister_reboot_notifier(&dhd_reboot_notifier);
+	dhd_destroy_to_notifier_skt();
+}
+
+static int __init
+dhd_module_init(void)
+{
+	int err;
+	int retry = POWERUP_MAX_RETRY;
+
+	printf("%s: in %s\n", __FUNCTION__, dhd_version);
+
+	DHD_PERIM_RADIO_INIT();
+
+
+	if (firmware_path[0] != '\0') {
+		strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
+		fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+	}
+
+	if (nvram_path[0] != '\0') {
+		strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
+		nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+	}
+
+	do {
+		err = dhd_wifi_platform_register_drv();
+		if (!err) {
+			register_reboot_notifier(&dhd_reboot_notifier);
+			break;
+		} else {
+			DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
+				__FUNCTION__, retry));
+			strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
+			firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
+			strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
+			nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
+		}
+	} while (retry--);
+
+	dhd_create_to_notifier_skt();
+
+	if (err) {
+		DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
+	} else {
+		if (!dhd_download_fw_on_driverload) {
+			dhd_driver_init_done = TRUE;
+		}
+	}
+
+	printf("%s: Exit err=%d\n", __FUNCTION__, err);
+	return err;
+}
+
+static int
+dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
+{
+	DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
+	if (code == SYS_RESTART) {
+#ifdef BCMPCIE
+		is_reboot = code;
+#endif /* BCMPCIE */
+	}
+	return NOTIFY_DONE;
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
+	defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8895) || \
+	defined(CONFIG_ARCH_MSM8998)
+deferred_module_init_sync(dhd_module_init);
+#else
+deferred_module_init(dhd_module_init);
+#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
+	* CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8895 || CONFIG_ARCH_MSM8998
+	*/
+#elif defined(USE_LATE_INITCALL_SYNC)
+late_initcall_sync(dhd_module_init);
+#else
+late_initcall(dhd_module_init);
+#endif /* USE_LATE_INITCALL_SYNC */
+#else
+module_init(dhd_module_init);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+
+module_exit(dhd_module_exit);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		DHD_PERIM_UNLOCK(pub);
+
+		down(&dhd->proto_sem);
+
+		DHD_PERIM_LOCK(pub);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		up(&dhd->proto_sem);
+		return 1;
+	}
+
+	return 0;
+}
+
+void
+dhd_os_dhdiovar_lock(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		mutex_lock(&dhd->dhd_iovar_mutex);
+	}
+}
+
+void
+dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		mutex_unlock(&dhd->dhd_iovar_mutex);
+	}
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+	return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+	dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool resched)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	int timeout, timeout_tmp = dhd_ioctl_timeout_msec;
+
+	if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
+		timeout_tmp = dhd_ioctl_timeout_msec;
+		dhd_ioctl_timeout_msec = pub->conf->dhd_ioctl_timeout_msec;
+	}
+
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#else
+	timeout = dhd_ioctl_timeout_msec * HZ / 1000;
+#endif
+
+	DHD_PERIM_UNLOCK(pub);
+
+	timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
+
+	if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
+		dhd_ioctl_timeout_msec = timeout_tmp;
+	}
+
+	DHD_PERIM_LOCK(pub);
+
+	return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	wake_up(&dhd->ioctl_resp_wait);
+	return 0;
+}
+
+int
+dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	int timeout;
+
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#else
+	timeout = dhd_ioctl_timeout_msec * HZ / 1000;
+#endif
+
+	DHD_PERIM_UNLOCK(pub);
+
+	timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
+
+	DHD_PERIM_LOCK(pub);
+
+	return timeout;
+}
+
+#ifdef PCIE_INB_DW
+int
+dhd_os_ds_exit_wait(dhd_pub_t *pub, uint *condition)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	int timeout;
+
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(ds_exit_timeout_msec);
+#else
+	timeout = ds_exit_timeout_msec * HZ / 1000;
+#endif
+
+	DHD_PERIM_UNLOCK(pub);
+
+	timeout = wait_event_timeout(dhd->ds_exit_wait, (*condition), timeout);
+
+	DHD_PERIM_LOCK(pub);
+
+	return timeout;
+}
+
+int
+dhd_os_ds_exit_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	wake_up(&dhd->ds_exit_wait);
+	return 0;
+}
+
+#endif /* PCIE_INB_DW */
+
+int
+dhd_os_d3ack_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	wake_up(&dhd->d3ack_wait);
+	return 0;
+}
+
+int
+dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	int timeout;
+
+	/* Wait for bus usage contexts to gracefully exit within some timeout value
+	 * Set time out to little higher than dhd_ioctl_timeout_msec,
+	 * so that IOCTL timeout should not get affected.
+	 */
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
+#else
+	timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
+#endif
+
+	timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
+
+	return timeout;
+}
+
+/*
+ * Wait until the condition *var == condition is met.
+ * Returns 0 if the @condition evaluated to false after the timeout elapsed
+ * Returns 1 if the @condition evaluated to true
+ */
+int
+dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	int timeout;
+
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
+#else
+	timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
+#endif
+
+	timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
+
+	return timeout;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
+/* Fix compilation error for FC11 */
+INLINE
+#endif
+int
+dhd_os_busbusy_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	/* Call wmb() to make sure before waking up the other event value gets updated */
+	OSL_SMP_WMB();
+	wake_up(&dhd->dhd_bus_busy_state_wait);
+	return 0;
+}
+
+void
+dhd_os_wd_timer_extend(void *bus, bool extend)
+{
+#ifndef BCMDBUS
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+
+	if (extend)
+		dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
+	else
+		dhd_os_wd_timer(bus, dhd->default_wd_interval);
+#endif /* !BCMDBUS */
+}
+
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+#ifndef BCMDBUS
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_GENERAL_LOCK(pub, flags);
+
+	/* don't start the wd until fw is loaded */
+	if (pub->busstate == DHD_BUS_DOWN) {
+		DHD_GENERAL_UNLOCK(pub, flags);
+		return;
+	}
+
+	/* Totally stop the timer */
+	if (!wdtick && dhd->wd_timer_valid == TRUE) {
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(pub, flags);
+		del_timer_sync(&dhd->timer);
+		return;
+	}
+
+	if (wdtick) {
+		dhd_watchdog_ms = (uint)wdtick;
+		/* Re arm the timer, at last watchdog period */
+		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+		dhd->wd_timer_valid = TRUE;
+	}
+	DHD_GENERAL_UNLOCK(pub, flags);
+#endif /* !BCMDBUS */
+}
+
+#ifdef DHD_PCIE_RUNTIMEPM
+void
+dhd_os_runtimepm_timer(void *bus, uint tick)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_GENERAL_LOCK(pub, flags);
+
+	/* don't start the RPM until fw is loaded */
+	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
+		DHD_GENERAL_UNLOCK(pub, flags);
+		return;
+	}
+
+	/* If tick is non-zero, the request is to start the timer */
+	if (tick) {
+		/* Start the timer only if its not already running */
+		if (dhd->rpm_timer_valid == FALSE) {
+			mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
+			dhd->rpm_timer_valid = TRUE;
+		}
+	} else {
+		/* tick is zero, we have to stop the timer */
+		/* Stop the timer only if its running, otherwise we don't have to do anything */
+		if (dhd->rpm_timer_valid == TRUE) {
+			dhd->rpm_timer_valid = FALSE;
+			DHD_GENERAL_UNLOCK(pub, flags);
+			del_timer_sync(&dhd->rpm_timer);
+			/* we have already released the lock, so just go to exit */
+			goto exit;
+		}
+	}
+
+	DHD_GENERAL_UNLOCK(pub, flags);
+exit:
+	return;
+
+}
+
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+void *
+dhd_os_open_image(char *filename)
+{
+	struct file *fp;
+	int size;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp)) {
+		 fp = NULL;
+		 goto err;
+	 }
+
+	 if (!S_ISREG(file_inode(fp)->i_mode)) {
+		 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
+		 fp = NULL;
+		 goto err;
+	 }
+
+	 size = i_size_read(file_inode(fp));
+	 if (size <= 0) {
+		 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
+		 fp = NULL;
+		 goto err;
+	 }
+
+	 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
+
+err:
+	 return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+	int size;
+
+	if (!image) {
+		return 0;
+	}
+
+	size = i_size_read(file_inode(fp));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	rdlen = kernel_read(fp, buf, MIN(len, size), &fp->f_pos);
+#else
+	rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
+#endif
+
+	if (len >= size && size != rdlen) {
+		return -EIO;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+	if (rdlen > 0) {
+		fp->f_pos += rdlen;
+	}
+#endif
+
+	return rdlen;
+}
+
+int
+dhd_os_get_image_size(void *image)
+{
+	struct file *fp = (struct file *)image;
+	int size;
+	if (!image) {
+		return 0;
+	}
+
+	size = i_size_read(file_inode(fp));
+
+	return size;
+}
+
+#if defined(BT_OVER_SDIO)
+int
+dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rd_len;
+	uint str_len = 0;
+	char *str_end = NULL;
+
+	if (!image)
+		return 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	rd_len = kernel_read(fp, str, len, &fp->f_pos);
+#else
+	rd_len = kernel_read(fp, fp->f_pos, str, len);
+#endif
+	str_end = strnchr(str, len, '\n');
+	if (str_end == NULL) {
+		goto err;
+	}
+	str_len = (uint)(str_end - str);
+
+	/* Advance file pointer past the string length */
+	fp->f_pos += str_len + 1;
+	bzero(str_end, rd_len - str_len);
+
+err:
+	return str_len;
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+
+void
+dhd_os_close_image(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+#ifdef BCMDBUS
+	spin_lock_bh(&dhd->sdlock);
+#else
+	if (dhd_dpc_prio >= 0)
+		down(&dhd->sdsem);
+	else
+		spin_lock_bh(&dhd->sdlock);
+#endif /* !BCMDBUS */
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+#ifdef BCMDBUS
+	spin_unlock_bh(&dhd->sdlock);
+#else
+	if (dhd_dpc_prio >= 0)
+		up(&dhd->sdsem);
+	else
+		spin_unlock_bh(&dhd->sdlock);
+#endif /* !BCMDBUS */
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+#ifdef BCMDBUS
+	spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
+#else
+	spin_lock_bh(&dhd->txqlock);
+#endif /* BCMDBUS */
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+#ifdef BCMDBUS
+	spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
+#else
+	spin_unlock_bh(&dhd->txqlock);
+#endif /* BCMDBUS */
+}
+
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+#if 0
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->rxqlock);
+#endif
+}
+
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+#if 0
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->rxqlock);
+#endif
+}
+
+static void
+dhd_os_rxflock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->rxf_lock);
+
+}
+
+static void
+dhd_os_rxfunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->rxf_lock);
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+unsigned long
+dhd_os_tcpacklock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+	unsigned long flags = 0;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+#ifdef BCMSDIO
+		spin_lock_bh(&dhd->tcpack_lock);
+#else
+		spin_lock_irqsave(&dhd->tcpack_lock, flags);
+#endif /* BCMSDIO */
+	}
+
+	return flags;
+}
+
+void
+dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
+{
+	dhd_info_t *dhd;
+
+#ifdef BCMSDIO
+	BCM_REFERENCE(flags);
+#endif /* BCMSDIO */
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+#ifdef BCMSDIO
+		spin_unlock_bh(&dhd->tcpack_lock);
+#else
+		spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
+#endif /* BCMSDIO */
+	}
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
+{
+	uint8* buf;
+	gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+
+	buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
+	if (buf == NULL && kmalloc_if_fail)
+		buf = kmalloc(size, flags);
+
+	return buf;
+}
+
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
+{
+}
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+	int res = 0;
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (!dhd->pub.up) {
+		return NULL;
+	}
+
+	res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+	if (res == 0)
+		return &dhd->iw.wstats;
+	else
+		return NULL;
+}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
+	wl_event_msg_t *event, void **data)
+{
+	int bcmerror = 0;
+#ifdef WL_CFG80211
+	unsigned long flags = 0;
+#endif /* WL_CFG80211 */
+	ASSERT(dhd != NULL);
+
+#ifdef SHOW_LOGTRACE
+	bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
+		&dhd->event_data);
+#else
+	bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
+		NULL);
+#endif /* SHOW_LOGTRACE */
+
+	if (bcmerror != BCME_OK)
+		return (bcmerror);
+
+#if defined(WL_EXT_IAPSTA)
+	wl_ext_iapsta_event(dhd->iflist[ifidx]->net, event, *data);
+#endif /* defined(WL_EXT_IAPSTA)  */
+#if defined(WL_WIRELESS_EXT)
+	if (event->bsscfgidx == 0) {
+		/*
+		 * Wireless ext is on primary interface only
+		 */
+
+		ASSERT(dhd->iflist[ifidx] != NULL);
+		ASSERT(dhd->iflist[ifidx]->net != NULL);
+
+		if (dhd->iflist[ifidx]->net) {
+			wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
+		}
+	}
+#endif /* defined(WL_WIRELESS_EXT)  */
+
+#ifdef WL_CFG80211
+	ASSERT(dhd->iflist[ifidx] != NULL);
+	ASSERT(dhd->iflist[ifidx]->net != NULL);
+	if (dhd->iflist[ifidx]->net) {
+		spin_lock_irqsave(&dhd->pub.up_lock, flags);
+		if (dhd->pub.up) {
+			wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
+		}
+		spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
+	}
+#endif /* defined(WL_CFG80211) */
+
+	return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	/* Just return from here */
+	return;
+}
+
+#ifdef LOG_INTO_TCPDUMP
+void
+dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
+{
+	struct sk_buff *p, *skb;
+	uint32 pktlen;
+	int len;
+	dhd_if_t *ifp;
+	dhd_info_t *dhd;
+	uchar *skb_data;
+	int ifidx = 0;
+	struct ether_header eth;
+
+	pktlen = sizeof(eth) + data_len;
+	dhd = dhdp->info;
+
+	if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+		ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+		bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
+		bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
+		ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
+		eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+		bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
+		bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
+		skb = PKTTONATIVE(dhdp->osh, p);
+		skb_data = skb->data;
+		len = skb->len;
+
+		ifidx = dhd_ifname2idx(dhd, "wlan0");
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL)
+			 ifp = dhd->iflist[0];
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+		skb->protocol = eth_type_trans(skb, skb->dev);
+		skb->data = skb_data;
+		skb->len = len;
+
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+			__FUNCTION__, __LINE__);
+		/* Send the packet */
+		if (in_interrupt()) {
+			netif_rx(skb);
+		} else {
+			netif_rx_ni(skb);
+		}
+	} else {
+		/* Could not allocate a sk_buf */
+		DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
+	}
+}
+#endif /* LOG_INTO_TCPDUMP */
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
+#else
+	int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+	dhd_os_sdunlock(dhd);
+	wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
+	dhd_os_sdlock(dhd);
+#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+	return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+	if (waitqueue_active(&dhdinfo->ctrl_wait))
+		wake_up(&dhdinfo->ctrl_wait);
+#endif
+	return;
+}
+
+#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
+int
+dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
+{
+	int ret;
+
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (flag == TRUE) {
+		/* Issue wl down command before resetting the chip */
+		if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+			DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
+		}
+#ifdef PROP_TXSTATUS
+		if (dhd->pub.wlfc_enabled) {
+			dhd_wlfc_deinit(&dhd->pub);
+		}
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+		if (dhd->pub.pno_state) {
+			dhd_pno_deinit(&dhd->pub);
+		}
+#endif
+#ifdef RTT_SUPPORT
+		if (dhd->pub.rtt_state) {
+			dhd_rtt_deinit(&dhd->pub);
+		}
+#endif /* RTT_SUPPORT */
+
+#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
+		dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
+#endif /* DBG_PKT_MON */
+	}
+
+#ifdef BCMSDIO
+	if (!flag) {
+		dhd_update_fw_nv_path(dhd);
+		/* update firmware and nvram path to sdio bus */
+		dhd_bus_update_fw_nv_path(dhd->pub.bus,
+			dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+	}
+#endif /* BCMSDIO */
+
+	ret = dhd_bus_devreset(&dhd->pub, flag);
+	if (ret) {
+		DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+		return ret;
+	}
+
+	return ret;
+}
+
+#ifdef BCMSDIO
+int
+dhd_net_bus_suspend(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return dhd_bus_suspend(&dhd->pub);
+}
+
+int
+dhd_net_bus_resume(struct net_device *dev, uint8 stage)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return dhd_bus_resume(&dhd->pub, stage);
+}
+
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd) {
+		ret = dhd->pub.suspend_disable_flag;
+		dhd->pub.suspend_disable_flag = val;
+	}
+	return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val, int force)
+{
+	int ret = 0;
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd) {
+#ifdef CONFIG_MACH_UNIVERSAL7420
+#endif /* CONFIG_MACH_UNIVERSAL7420 */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+		ret = dhd_set_suspend(val, &dhd->pub);
+#else
+		ret = dhd_suspend_resume_helper(dhd, val, force);
+#endif
+#ifdef WL_CFG80211
+		wl_cfg80211_update_power_mode(dev);
+#endif
+	}
+	return ret;
+}
+
+int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd)
+		dhd->pub.suspend_bcn_li_dtim = val;
+
+	return 0;
+}
+
+int net_os_set_max_dtim_enable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd) {
+		DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
+			__FUNCTION__, (val ? "Enable" : "Disable")));
+		if (val) {
+			dhd->pub.max_dtim_enable = TRUE;
+		} else {
+			dhd->pub.max_dtim_enable = FALSE;
+		}
+	} else {
+		return -1;
+	}
+
+	return 0;
+}
+
+#ifdef PKT_FILTER_SUPPORT
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+	int ret = 0;
+
+#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (!dhd_master_mode)
+		add_remove = !add_remove;
+	DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
+	if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
+		return 0;
+	}
+
+
+	if (num >= dhd->pub.pktfilter_count) {
+		return -EINVAL;
+	}
+
+	ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+
+	return ret;
+}
+
+int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+
+{
+	int ret = 0;
+
+	/* Packet filtering is set only if we still in early-suspend and
+	 * we need either to turn it ON or turn it OFF
+	 * We can always turn it OFF in case of early-suspend, but we turn it
+	 * back ON only if suspend_disable_flag was not set
+	*/
+	if (dhdp && dhdp->up) {
+		if (dhdp->in_suspend) {
+			if (!val || (val && !dhdp->suspend_disable_flag))
+				dhd_enable_packet_filter(val, dhdp);
+		}
+	}
+	return ret;
+}
+
+/* function to enable/disable packet for Network device */
+int net_os_enable_packet_filter(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
+	return dhd_os_enable_packet_filter(&dhd->pub, val);
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+int
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret;
+
+	if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
+		goto done;
+
+done:
+	return ret;
+}
+
+int
+dhd_dev_get_feature_set(struct net_device *dev)
+{
+	dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
+	dhd_pub_t *dhd = (&ptr->pub);
+	int feature_set = 0;
+
+	if (FW_SUPPORTED(dhd, sta))
+		feature_set |= WIFI_FEATURE_INFRA;
+	if (FW_SUPPORTED(dhd, dualband))
+		feature_set |= WIFI_FEATURE_INFRA_5G;
+	if (FW_SUPPORTED(dhd, p2p))
+		feature_set |= WIFI_FEATURE_P2P;
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+		feature_set |= WIFI_FEATURE_SOFT_AP;
+	if (FW_SUPPORTED(dhd, tdls))
+		feature_set |= WIFI_FEATURE_TDLS;
+	if (FW_SUPPORTED(dhd, vsdb))
+		feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
+	if (FW_SUPPORTED(dhd, nan)) {
+		feature_set |= WIFI_FEATURE_NAN;
+		/* NAN is essentail for d2d rtt */
+		if (FW_SUPPORTED(dhd, rttd2d))
+			feature_set |= WIFI_FEATURE_D2D_RTT;
+	}
+#ifdef RTT_SUPPORT
+	if (dhd->rtt_supported) {
+		feature_set |= WIFI_FEATURE_D2D_RTT;
+		feature_set |= WIFI_FEATURE_D2AP_RTT;
+	}
+#endif /* RTT_SUPPORT */
+#ifdef LINKSTAT_SUPPORT
+	feature_set |= WIFI_FEATURE_LINKSTAT;
+#endif /* LINKSTAT_SUPPORT */
+
+#ifdef PNO_SUPPORT
+	if (dhd_is_pno_supported(dhd)) {
+		feature_set |= WIFI_FEATURE_PNO;
+#ifdef GSCAN_SUPPORT
+		/* terence 20171115: remove to get GTS PASS
+		 * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
+		 */
+//		feature_set |= WIFI_FEATURE_GSCAN;
+//		feature_set |= WIFI_FEATURE_HAL_EPNO;
+#endif /* GSCAN_SUPPORT */
+	}
+#endif /* PNO_SUPPORT */
+#ifdef RSSI_MONITOR_SUPPORT
+	if (FW_SUPPORTED(dhd, rssi_mon)) {
+		feature_set |= WIFI_FEATURE_RSSI_MONITOR;
+	}
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef WL11U
+	feature_set |= WIFI_FEATURE_HOTSPOT;
+#endif /* WL11U */
+#ifdef NDO_CONFIG_SUPPORT
+	feature_set |= WIFI_FEATURE_CONFIG_NDO;
+#endif /* NDO_CONFIG_SUPPORT */
+#ifdef KEEP_ALIVE
+	feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
+#endif /* KEEP_ALIVE */
+
+	return feature_set;
+}
+
+int
+dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
+{
+	int feature_set_full;
+	int ret = 0;
+
+	feature_set_full = dhd_dev_get_feature_set(dev);
+
+	/* Common feature set for all interface */
+	ret = (feature_set_full & WIFI_FEATURE_INFRA) |
+		(feature_set_full & WIFI_FEATURE_INFRA_5G) |
+		(feature_set_full & WIFI_FEATURE_D2D_RTT) |
+		(feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+		(feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
+		(feature_set_full & WIFI_FEATURE_EPR);
+
+	/* Specific feature group for each interface */
+	switch (num) {
+	case 0:
+		ret |= (feature_set_full & WIFI_FEATURE_P2P) |
+			/* Not supported yet */
+			/* (feature_set_full & WIFI_FEATURE_NAN) | */
+			(feature_set_full & WIFI_FEATURE_TDLS) |
+			(feature_set_full & WIFI_FEATURE_PNO) |
+			(feature_set_full & WIFI_FEATURE_HAL_EPNO) |
+			(feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
+			(feature_set_full & WIFI_FEATURE_GSCAN) |
+			(feature_set_full & WIFI_FEATURE_HOTSPOT) |
+			(feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
+		break;
+
+	case 1:
+		ret |= (feature_set_full & WIFI_FEATURE_P2P);
+		/* Not yet verified NAN with P2P */
+		/* (feature_set_full & WIFI_FEATURE_NAN) | */
+		break;
+
+	case 2:
+		ret |= (feature_set_full & WIFI_FEATURE_NAN) |
+			(feature_set_full & WIFI_FEATURE_TDLS) |
+			(feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
+		break;
+
+	default:
+		ret = WIFI_FEATURE_INVALID;
+		DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
+		break;
+	}
+
+	return ret;
+}
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+int
+dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (nodfs)
+		dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+	else
+		dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
+	dhd->pub.force_country_change = TRUE;
+	return 0;
+}
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+
+#ifdef NDO_CONFIG_SUPPORT
+int
+dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ret = 0;
+
+	if (enable) {
+		/* enable ND offload feature (will be enabled in FW on suspend) */
+		dhdp->ndo_enable = TRUE;
+
+		/* Update changes of anycast address & DAD failed address */
+		ret = dhd_dev_ndo_update_inet6addr(dev);
+		if ((ret < 0) && (ret != BCME_NORESOURCE)) {
+			DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
+			return ret;
+		}
+	} else {
+		/* disable ND offload feature */
+		dhdp->ndo_enable = FALSE;
+
+		/* disable ND offload in FW */
+		ret = dhd_ndo_enable(dhdp, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
+		}
+	}
+	return ret;
+}
+
+/* #pragma used as a WAR to fix build failure,
+* ignore dropping of 'const' qualifier in 'list_entry' macro
+* this pragma disables the warning only for the following function
+*/
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static int
+dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
+{
+	struct inet6_ifaddr *ifa;
+	struct ifacaddr6 *acaddr = NULL;
+	int addr_count = 0;
+
+	/* lock */
+	read_lock_bh(&inet6->lock);
+
+	/* Count valid unicast address */
+	list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+		if ((ifa->flags & IFA_F_DADFAILED) == 0) {
+			addr_count++;
+		}
+	}
+
+	/* Count anycast address */
+	acaddr = inet6->ac_list;
+	while (acaddr) {
+		addr_count++;
+		acaddr = acaddr->aca_next;
+	}
+
+	/* unlock */
+	read_unlock_bh(&inet6->lock);
+
+	return addr_count;
+}
+
+int
+dhd_dev_ndo_update_inet6addr(struct net_device *dev)
+{
+	dhd_info_t *dhd;
+	dhd_pub_t *dhdp;
+	struct inet6_dev *inet6;
+	struct inet6_ifaddr *ifa;
+	struct ifacaddr6 *acaddr = NULL;
+	struct in6_addr *ipv6_addr = NULL;
+	int cnt, i;
+	int ret = BCME_OK;
+
+	/*
+	 * this function evaulates host ip address in struct inet6_dev
+	 * unicast addr in inet6_dev->addr_list
+	 * anycast addr in inet6_dev->ac_list
+	 * while evaluating inet6_dev, read_lock_bh() is required to prevent
+	 * access on null(freed) pointer.
+	 */
+
+	if (dev) {
+		inet6 = dev->ip6_ptr;
+		if (!inet6) {
+			DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+
+		dhd = DHD_DEV_INFO(dev);
+		if (!dhd) {
+			DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+		dhdp = &dhd->pub;
+
+		if (dhd_net2idx(dhd, dev) != 0) {
+			DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+	} else {
+		DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	/* Check host IP overflow */
+	cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
+	if (cnt > dhdp->ndo_max_host_ip) {
+		if (!dhdp->ndo_host_ip_overflow) {
+			dhdp->ndo_host_ip_overflow = TRUE;
+			/* Disable ND offload in FW */
+			DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
+			ret = dhd_ndo_enable(dhdp, 0);
+		}
+
+		return ret;
+	}
+
+	/*
+	 * Allocate ipv6 addr buffer to store addresses to be added/removed.
+	 * driver need to lock inet6_dev while accessing structure. but, driver
+	 * cannot use ioctl while inet6_dev locked since it requires scheduling
+	 * hence, copy addresses to the buffer and do ioctl after unlock.
+	 */
+	ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
+		sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
+	if (!ipv6_addr) {
+		DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
+		return BCME_NOMEM;
+	}
+
+	/* Find DAD failed unicast address to be removed */
+	cnt = 0;
+	read_lock_bh(&inet6->lock);
+	list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+		/* DAD failed unicast address */
+		if ((ifa->flags & IFA_F_DADFAILED) &&
+			(cnt < dhdp->ndo_max_host_ip)) {
+				memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
+				cnt++;
+		}
+	}
+	read_unlock_bh(&inet6->lock);
+
+	/* Remove DAD failed unicast address */
+	for (i = 0; i < cnt; i++) {
+		DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
+		ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
+		if (ret < 0) {
+			goto done;
+		}
+	}
+
+	/* Remove all anycast address */
+	ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
+	if (ret < 0) {
+		goto done;
+	}
+
+	/*
+	 * if ND offload was disabled due to host ip overflow,
+	 * attempt to add valid unicast address.
+	 */
+	if (dhdp->ndo_host_ip_overflow) {
+		/* Find valid unicast address */
+		cnt = 0;
+		read_lock_bh(&inet6->lock);
+		list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+			/* valid unicast address */
+			if (!(ifa->flags & IFA_F_DADFAILED) &&
+				(cnt < dhdp->ndo_max_host_ip)) {
+					memcpy(&ipv6_addr[cnt], &ifa->addr,
+						sizeof(struct in6_addr));
+					cnt++;
+			}
+		}
+		read_unlock_bh(&inet6->lock);
+
+		/* Add valid unicast address */
+		for (i = 0; i < cnt; i++) {
+			ret = dhd_ndo_add_ip_with_type(dhdp,
+				(char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
+			if (ret < 0) {
+				goto done;
+			}
+		}
+	}
+
+	/* Find anycast address */
+	cnt = 0;
+	read_lock_bh(&inet6->lock);
+	acaddr = inet6->ac_list;
+	while (acaddr) {
+		if (cnt < dhdp->ndo_max_host_ip) {
+			memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
+			cnt++;
+		}
+		acaddr = acaddr->aca_next;
+	}
+	read_unlock_bh(&inet6->lock);
+
+	/* Add anycast address */
+	for (i = 0; i < cnt; i++) {
+		ret = dhd_ndo_add_ip_with_type(dhdp,
+			(char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
+		if (ret < 0) {
+			goto done;
+		}
+	}
+
+	/* Now All host IP addr were added successfully */
+	if (dhdp->ndo_host_ip_overflow) {
+		dhdp->ndo_host_ip_overflow = FALSE;
+		if (dhdp->in_suspend) {
+			/* drvier is in (early) suspend state, need to enable ND offload in FW */
+			DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
+			ret = dhd_ndo_enable(dhdp, 1);
+		}
+	}
+
+done:
+	if (ipv6_addr) {
+		MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
+	}
+
+	return ret;
+}
+#pragma GCC diagnostic pop
+
+#endif /* NDO_CONFIG_SUPPORT */
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_stop_for_ssid */
+int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return (dhd_pno_stop_for_ssid(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_set_for_ssid */
+int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
+		pno_repeat, pno_freq_expo_max, channel_list, nchan));
+}
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int enable)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return (dhd_pno_enable(&dhd->pub, enable));
+}
+
+/* Linux wrapper to call common dhd_pno_set_for_hotlist */
+int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
+int
+dhd_dev_pno_stop_for_batch(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_stop_for_batch(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
+int
+dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+}
+
+/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+}
+#endif /* PNO_SUPPORT */
+
+#if defined(PNO_SUPPORT)
+#ifdef GSCAN_SUPPORT
+bool
+dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_is_legacy_pno_enabled(&dhd->pub));
+}
+
+int
+dhd_dev_set_epno(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	if (!dhd) {
+		return BCME_ERROR;
+	}
+	return dhd_pno_set_epno(&dhd->pub);
+}
+int
+dhd_dev_flush_fw_epno(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	if (!dhd) {
+		return BCME_ERROR;
+	}
+	return dhd_pno_flush_fw_epno(&dhd->pub);
+}
+
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, bool flush)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
+}
+
+/* Linux wrapper to call common dhd_wait_batch_results_complete */
+int
+dhd_dev_wait_batch_results_complete(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_wait_batch_results_complete(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_lock_batch_results */
+int
+dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_lock_batch_results(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_unlock_batch_results */
+void
+dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_unlock_batch_results(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
+int
+dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
+}
+
+/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
+int
+dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
+}
+
+/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
+void *
+dhd_dev_hotlist_scan_event(struct net_device *dev,
+      const void  *data, int *send_evt_bytes, hotlist_type_t type)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
+}
+
+/* Linux wrapper to call common dhd_process_full_gscan_result */
+void *
+dhd_dev_process_full_gscan_result(struct net_device *dev,
+const void  *data, uint32 len, int *send_evt_bytes)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
+}
+
+void
+dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
+
+	return;
+}
+
+int
+dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_retreive_batch_scan_results */
+int
+dhd_dev_retrieve_batch_scan(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_retreive_batch_scan_results(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_process_epno_result */
+void * dhd_dev_process_epno_result(struct net_device *dev,
+	const void  *data, uint32 event, int *send_evt_bytes)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
+}
+
+int
+dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
+             wlc_roam_exp_params_t *roam_param)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	wl_roam_exp_cfg_t roam_exp_cfg;
+	int err;
+
+	if (!roam_param) {
+		return BCME_BADARG;
+	}
+
+	DHD_ERROR(("a_band_boost_thr %d a_band_penalty_thr %d\n",
+	      roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
+	DHD_ERROR(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
+	      roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
+	      roam_param->cur_bssid_boost));
+	DHD_ERROR(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
+	      roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
+
+	memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
+	roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
+	roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
+	if (dhd->pub.lazy_roam_enable) {
+		roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
+	}
+	err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
+			(char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
+			TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
+	}
+	return err;
+}
+
+int
+dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
+{
+	int err;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	wl_roam_exp_cfg_t roam_exp_cfg;
+
+	memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
+	roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
+	if (enable) {
+		roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
+	}
+
+	err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
+			(char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
+			TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
+	} else {
+		dhd->pub.lazy_roam_enable = (enable != 0);
+	}
+	return err;
+}
+
+int
+dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
+       wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
+{
+	int err;
+	int len;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	bssid_pref->version = BSSID_PREF_LIST_VERSION;
+	/* By default programming bssid pref flushes out old values */
+	bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
+	len = sizeof(wl_bssid_pref_cfg_t);
+	len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
+	err = dhd_iovar(&(dhd->pub), 0, "roam_exp_bssid_pref", (char *)bssid_pref,
+		len, NULL, 0, TRUE);
+	if (err != BCME_OK) {
+		DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
+	}
+	return err;
+}
+
+int
+dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
+    uint32 len, uint32 flush)
+{
+	int err;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int macmode;
+
+	if (blacklist) {
+		err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
+				len, TRUE, 0);
+		if (err != BCME_OK) {
+			DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
+			return err;
+		}
+	}
+	/* By default programming blacklist flushes out old values */
+	macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
+	err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
+	              sizeof(macmode), TRUE, 0);
+	if (err != BCME_OK) {
+		DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
+	}
+	return err;
+}
+
+int
+dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
+    uint32 len, uint32 flush)
+{
+	int err;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	wl_ssid_whitelist_t whitelist_ssid_flush;
+
+	if (!ssid_whitelist) {
+		if (flush) {
+			ssid_whitelist = &whitelist_ssid_flush;
+			ssid_whitelist->ssid_count = 0;
+		} else {
+			DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
+			return BCME_BADARG;
+		}
+	}
+	ssid_whitelist->version = SSID_WHITELIST_VERSION;
+	ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
+	err = dhd_iovar(&(dhd->pub), 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist,
+			len, NULL, 0, TRUE);
+	if (err != BCME_OK) {
+		DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
+	}
+	return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+/* Linux wrapper to call common dhd_pno_get_gscan */
+void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+	void *info, uint32 *len)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
+}
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#endif 
+
+#ifdef  RSSI_MONITOR_SUPPORT
+int
+dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
+             int8 max_rssi, int8 min_rssi)
+{
+	int err;
+	wl_rssi_monitor_cfg_t rssi_monitor;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	rssi_monitor.version = RSSI_MONITOR_VERSION;
+	rssi_monitor.max_rssi = max_rssi;
+	rssi_monitor.min_rssi = min_rssi;
+	rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
+	err = dhd_iovar(&(dhd->pub), 0, "rssi_monitor", (char *)&rssi_monitor,
+		sizeof(rssi_monitor), NULL, 0, TRUE);
+	if (err < 0 && err != BCME_UNSUPPORTED) {
+		DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
+	}
+	return err;
+}
+#endif /* RSSI_MONITOR_SUPPORT */
+
+#ifdef DHDTCPACK_SUPPRESS
+int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
+{
+	int err;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	err = dhd_tcpack_suppress_set(&(dhd->pub), enable);
+	if (err != BCME_OK) {
+		DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
+	}
+	return err;
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+int
+dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	dhd_pub_t *dhdp = &dhd->pub;
+
+	if (!dhdp || !oui) {
+		DHD_ERROR(("NULL POINTER : %s\n",
+			__FUNCTION__));
+		return BCME_ERROR;
+	}
+	if (ETHER_ISMULTI(oui)) {
+		DHD_ERROR(("Expected unicast OUI\n"));
+		return BCME_ERROR;
+	} else {
+		uint8 *rand_mac_oui = dhdp->rand_mac_oui;
+		memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
+		DHD_ERROR(("Random MAC OUI to be used - %02x:%02x:%02x\n", rand_mac_oui[0],
+		    rand_mac_oui[1], rand_mac_oui[2]));
+	}
+	return BCME_OK;
+}
+
+int
+dhd_set_rand_mac_oui(dhd_pub_t *dhd)
+{
+	int err;
+	wl_pfn_macaddr_cfg_t wl_cfg;
+	uint8 *rand_mac_oui = dhd->rand_mac_oui;
+
+	memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
+	memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
+	wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
+	if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
+		wl_cfg.flags = 0;
+	} else {
+		wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
+	}
+
+	DHD_ERROR(("Setting rand mac oui to FW - %02x:%02x:%02x\n", rand_mac_oui[0],
+		rand_mac_oui[1], rand_mac_oui[2]));
+
+	err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
+	}
+	return err;
+}
+
+#ifdef RTT_SUPPORT
+#ifdef WL_CFG80211
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_set_cfg(&dhd->pub, buf));
+}
+
+int
+dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
+}
+
+int
+dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
+}
+
+int
+dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
+}
+
+int
+dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_capability(&dhd->pub, capa));
+}
+
+int
+dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
+}
+
+int
+dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
+}
+
+int dhd_dev_rtt_cancel_responder(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	return (dhd_rtt_cancel_responder(&dhd->pub));
+}
+#endif /* WL_CFG80211 */
+#endif /* RTT_SUPPORT */
+
+#ifdef KEEP_ALIVE
+#define KA_TEMP_BUF_SIZE 512
+#define KA_FRAME_SIZE 300
+
+int
+dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
+	uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
+{
+	const int		ETHERTYPE_LEN = 2;
+	char			*pbuf = NULL;
+	const char		*str;
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt;
+	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp = NULL;
+	int			buf_len = 0;
+	int			str_len = 0;
+	int			res = BCME_ERROR;
+	int			len_bytes = 0;
+	int			i = 0;
+
+	/* ether frame to have both max IP pkt (256 bytes) and ether header */
+	char			*pmac_frame = NULL;
+	char			*pmac_frame_begin = NULL;
+
+	/*
+	 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
+	 * dongle shall reject a mkeep_alive request.
+	 */
+	if (!dhd_support_sta_mode(dhd_pub))
+		return res;
+
+	DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+	if ((pbuf = kzalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) {
+		DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
+		res = BCME_NOMEM;
+		return res;
+	}
+
+	if ((pmac_frame = kzalloc(KA_FRAME_SIZE, GFP_KERNEL)) == NULL) {
+		DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
+		res = BCME_NOMEM;
+		goto exit;
+	}
+	pmac_frame_begin = pmac_frame;
+
+	/*
+	 * Get current mkeep-alive status.
+	 */
+	res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
+			KA_TEMP_BUF_SIZE, FALSE);
+	if (res < 0) {
+		DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
+		goto exit;
+	} else {
+		/* Check available ID whether it is occupied */
+		mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
+		if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
+			DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
+				__FUNCTION__, mkeep_alive_id));
+
+			/* Current occupied ID info */
+			DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
+			DHD_ERROR(("   Id    : %d\n"
+				"   Period: %d msec\n"
+				"   Length: %d\n"
+				"   Packet: 0x",
+				mkeep_alive_pktp->keep_alive_id,
+				dtoh32(mkeep_alive_pktp->period_msec),
+				dtoh16(mkeep_alive_pktp->len_bytes)));
+
+			for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
+				DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
+			}
+			DHD_ERROR(("\n"));
+
+			res = BCME_NOTFOUND;
+			goto exit;
+		}
+	}
+
+	/* Request the specified ID */
+	memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+	memset(pbuf, 0, KA_TEMP_BUF_SIZE);
+	str = "mkeep_alive";
+	str_len = strlen(str);
+	strncpy(pbuf, str, str_len);
+	pbuf[str_len] = '\0';
+
+	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
+	mkeep_alive_pkt.period_msec = htod32(period_msec);
+	buf_len = str_len + 1;
+	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+
+	/* ID assigned */
+	mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
+
+	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+
+	/*
+	 * Build up Ethernet Frame
+	 */
+
+	/* Mapping dest mac addr */
+	memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
+	pmac_frame += ETHER_ADDR_LEN;
+
+	/* Mapping src mac addr */
+	memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
+	pmac_frame += ETHER_ADDR_LEN;
+
+	/* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
+	*(pmac_frame++) = 0x08;
+	*(pmac_frame++) = 0x00;
+
+	/* Mapping IP pkt */
+	memcpy(pmac_frame, ip_pkt, ip_pkt_len);
+	pmac_frame += ip_pkt_len;
+
+	/*
+	 * Length of ether frame (assume to be all hexa bytes)
+	 *     = src mac + dst mac + ether type + ip pkt len
+	 */
+	len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
+	memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
+	buf_len += len_bytes;
+	mkeep_alive_pkt.len_bytes = htod16(len_bytes);
+
+	/*
+	 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
+	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+	 * guarantee that the buffer is properly aligned.
+	 */
+	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+	res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
+exit:
+	kfree(pmac_frame_begin);
+	kfree(pbuf);
+	return res;
+}
+
+int
+dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
+{
+	char			*pbuf;
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt;
+	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
+	int			res = BCME_ERROR;
+	int			i;
+
+	/*
+	 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
+	 * dongle shall reject a mkeep_alive request.
+	 */
+	if (!dhd_support_sta_mode(dhd_pub))
+		return res;
+
+	DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+	/*
+	 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
+	 */
+	if ((pbuf = kmalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) {
+		DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
+		return res;
+	}
+
+	res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
+			sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
+	if (res < 0) {
+		DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
+		goto exit;
+	} else {
+		/* Check occupied ID */
+		mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
+		DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
+		DHD_INFO(("   Id    : %d\n"
+			"   Period: %d msec\n"
+			"   Length: %d\n"
+			"   Packet: 0x",
+			mkeep_alive_pktp->keep_alive_id,
+			dtoh32(mkeep_alive_pktp->period_msec),
+			dtoh16(mkeep_alive_pktp->len_bytes)));
+
+		for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
+			DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
+		}
+		DHD_INFO(("\n"));
+	}
+
+	/* Make it stop if available */
+	if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
+		DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
+		memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+
+		mkeep_alive_pkt.period_msec = 0;
+		mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+		mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+		mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
+
+		res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
+				(char *)&mkeep_alive_pkt,
+				WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
+	} else {
+		DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
+		res = BCME_NOTFOUND;
+	}
+exit:
+	kfree(pbuf);
+	return res;
+}
+#endif /* KEEP_ALIVE */
+
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+static void _dhd_apf_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd) {
+		mutex_lock(&dhd->dhd_apf_mutex);
+	}
+#endif
+}
+
+static void _dhd_apf_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd) {
+		mutex_unlock(&dhd->dhd_apf_mutex);
+	}
+#endif
+}
+
+static int
+__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
+	u8* program, uint32 program_len)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	wl_pkt_filter_t * pkt_filterp;
+	wl_apf_program_t *apf_program;
+	char *buf;
+	u32 cmd_len, buf_len;
+	int ifidx, ret;
+	gfp_t kflags;
+	char cmd[] = "pkt_filter_add";
+
+	ifidx = dhd_net2idx(dhd, ndev);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	cmd_len = sizeof(cmd);
+
+	/* Check if the program_len is more than the expected len
+	 * and if the program is NULL return from here.
+	 */
+	if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
+		DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
+				__FUNCTION__, program_len, program));
+		return -EINVAL;
+	}
+	buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
+		WL_APF_PROGRAM_FIXED_LEN + program_len;
+
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	buf = kzalloc(buf_len, kflags);
+	if (unlikely(!buf)) {
+		DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
+		return -ENOMEM;
+	}
+
+	memcpy(buf, cmd, cmd_len);
+
+	pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
+	pkt_filterp->id = htod32(filter_id);
+	pkt_filterp->negate_match = htod32(FALSE);
+	pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
+
+	apf_program = &pkt_filterp->u.apf_program;
+	apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
+	apf_program->instr_len = htod16(program_len);
+	memcpy(apf_program->instrs, program, program_len);
+
+	ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
+	if (unlikely(ret)) {
+		DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
+			__FUNCTION__, filter_id, ret));
+	}
+
+	if (buf) {
+		kfree(buf);
+	}
+	return ret;
+}
+
+static int
+__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
+	uint32 mode, uint32 enable)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	wl_pkt_filter_enable_t * pkt_filterp;
+	char *buf;
+	u32 cmd_len, buf_len;
+	int ifidx, ret;
+	gfp_t kflags;
+	char cmd[] = "pkt_filter_enable";
+
+	ifidx = dhd_net2idx(dhd, ndev);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	cmd_len = sizeof(cmd);
+	buf_len = cmd_len + sizeof(*pkt_filterp);
+
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	buf = kzalloc(buf_len, kflags);
+	if (unlikely(!buf)) {
+		DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
+		return -ENOMEM;
+	}
+
+	memcpy(buf, cmd, cmd_len);
+
+	pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
+	pkt_filterp->id = htod32(filter_id);
+	pkt_filterp->enable = htod32(enable);
+
+	ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
+	if (unlikely(ret)) {
+		DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
+			__FUNCTION__, filter_id, ret));
+		goto exit;
+	}
+
+	ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
+		WLC_SET_VAR, TRUE, ifidx);
+	if (unlikely(ret)) {
+		DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
+			__FUNCTION__, filter_id, ret));
+	}
+
+exit:
+	if (buf) {
+		kfree(buf);
+	}
+	return ret;
+}
+
+static int
+__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ifidx, ret;
+
+	ifidx = dhd_net2idx(dhd, ndev);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
+		htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
+	if (unlikely(ret)) {
+		DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
+			__FUNCTION__, filter_id, ret));
+	}
+
+	return ret;
+}
+
+void dhd_apf_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	_dhd_apf_lock_local(dhd);
+}
+
+void dhd_apf_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	_dhd_apf_unlock_local(dhd);
+}
+
+int
+dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ifidx, ret;
+
+	if (!FW_SUPPORTED(dhdp, apf)) {
+		DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
+
+		/*
+		 * Notify Android framework that APF is not supported by setting
+		 * version as zero.
+		 */
+		*version = 0;
+		return BCME_OK;
+	}
+
+	ifidx = dhd_net2idx(dhd, ndev);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
+		WLC_GET_VAR, FALSE, ifidx);
+	if (unlikely(ret)) {
+		DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
+			__FUNCTION__, ret));
+	}
+
+	return ret;
+}
+
+int
+dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ifidx, ret;
+
+	if (!FW_SUPPORTED(dhdp, apf)) {
+		DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
+		*max_len = 0;
+		return BCME_OK;
+	}
+
+	ifidx = dhd_net2idx(dhd, ndev);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
+		WLC_GET_VAR, FALSE, ifidx);
+	if (unlikely(ret)) {
+		DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
+			__FUNCTION__, ret));
+	}
+
+	return ret;
+}
+
+int
+dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
+	uint32 program_len)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ret;
+
+	DHD_APF_LOCK(ndev);
+
+	/* delete, if filter already exists */
+	if (dhdp->apf_set) {
+		ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
+		if (unlikely(ret)) {
+			goto exit;
+		}
+		dhdp->apf_set = FALSE;
+	}
+
+	ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
+	if (ret) {
+		goto exit;
+	}
+	dhdp->apf_set = TRUE;
+
+	if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		/* Driver is still in (early) suspend state, enable APF filter back */
+		ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+			PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
+	}
+exit:
+	DHD_APF_UNLOCK(ndev);
+
+	return ret;
+}
+
+int
+dhd_dev_apf_enable_filter(struct net_device *ndev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ret = 0;
+
+	DHD_APF_LOCK(ndev);
+
+	if (dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+			PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
+	}
+
+	DHD_APF_UNLOCK(ndev);
+
+	return ret;
+}
+
+int
+dhd_dev_apf_disable_filter(struct net_device *ndev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ret = 0;
+
+	DHD_APF_LOCK(ndev);
+
+	if (dhdp->apf_set) {
+		ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+			PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
+	}
+
+	DHD_APF_UNLOCK(ndev);
+
+	return ret;
+}
+
+int
+dhd_dev_apf_delete_filter(struct net_device *ndev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ret = 0;
+
+	DHD_APF_LOCK(ndev);
+
+	if (dhdp->apf_set) {
+		ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
+		if (!ret) {
+			dhdp->apf_set = FALSE;
+		}
+	}
+
+	DHD_APF_UNLOCK(ndev);
+
+	return ret;
+}
+#endif /* PKT_FILTER_SUPPORT && APF */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
+{
+	dhd_info_t *dhd;
+	struct net_device *dev;
+
+	dhd = (dhd_info_t *)dhd_info;
+	if (!dhd || !dhd->iflist[0])
+		return;
+	dev = dhd->iflist[0]->net;
+
+	if (dev) {
+		/*
+		 * For HW2, dev_close need to be done to recover
+		 * from upper layer after hang. For Interposer skip
+		 * dev_close so that dhd iovars can be used to take
+		 * socramdump after crash, also skip for HW4 as
+		 * handling of hang event is different
+		 */
+#if !defined(CUSTOMER_HW2_INTERPOSER)
+		rtnl_lock();
+		dev_close(dev);
+		rtnl_unlock();
+#endif 
+#if defined(WL_WIRELESS_EXT)
+		wl_iw_send_priv_event(dev, "HANG");
+#endif
+#if defined(WL_CFG80211)
+		wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+	}
+}
+
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+extern dhd_pub_t *link_recovery;
+void dhd_host_recover_link(void)
+{
+	DHD_ERROR(("****** %s ******\n", __FUNCTION__));
+	link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+	dhd_bus_set_linkdown(link_recovery, TRUE);
+	dhd_os_send_hang_message(link_recovery);
+}
+EXPORT_SYMBOL(dhd_host_recover_link);
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+
+int dhd_os_send_hang_message(dhd_pub_t *dhdp)
+{
+	int ret = 0;
+	if (dhdp) {
+#if defined(DHD_HANG_SEND_UP_TEST)
+		if (dhdp->req_hang_type) {
+			DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+				__FUNCTION__, dhdp->req_hang_type));
+			dhdp->req_hang_type = 0;
+		}
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+		if (!dhdp->hang_was_sent) {
+#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
+			dhdp->hang_counts++;
+			if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
+				DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
+					__func__, dhdp->hang_counts));
+				BUG_ON(1);
+			}
+#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
+#ifdef DHD_DEBUG_UART
+			/* If PCIe lane has broken, execute the debug uart application
+			 * to gether a ramdump data from dongle via uart
+			 */
+			if (!dhdp->info->duart_execute) {
+				dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+					(void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
+					dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
+			}
+#endif	/* DHD_DEBUG_UART */
+			dhdp->hang_was_sent = 1;
+#ifdef BT_OVER_SDIO
+			dhdp->is_bt_recovery_required = TRUE;
+#endif
+			dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
+				DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WQ_WORK_PRIORITY_HIGH);
+			DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d s=%d\n", __FUNCTION__,
+				dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
+		}
+	}
+	return ret;
+}
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd) {
+		/* Report FW problem when enabled */
+		if (dhd->pub.hang_report) {
+#ifdef BT_OVER_SDIO
+			if (netif_running(dev)) {
+#endif /* BT_OVER_SDIO */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				ret = dhd_os_send_hang_message(&dhd->pub);
+#else
+				ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+#ifdef BT_OVER_SDIO
+			}
+			DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
+			bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
+#endif /* BT_OVER_SDIO */
+		} else {
+			DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
+				__FUNCTION__));
+		}
+	}
+	return ret;
+}
+
+int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
+{
+	dhd_info_t *dhd = NULL;
+	dhd_pub_t *dhdp = NULL;
+	int reason;
+
+	dhd = DHD_DEV_INFO(dev);
+	if (dhd) {
+		dhdp = &dhd->pub;
+	}
+
+	if (!dhd || !dhdp) {
+		return 0;
+	}
+
+	reason = bcm_strtoul(string_num, NULL, 0);
+	DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
+
+	if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
+		reason = 0;
+	}
+
+	dhdp->hang_reason = reason;
+
+	return net_os_send_hang_message(dev);
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+
+
+int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return wifi_platform_set_power(dhd->adapter, on, delay_msec);
+}
+
+bool dhd_force_country_change(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd && dhd->pub.up)
+		return dhd->pub.force_country_change;
+	return FALSE;
+}
+
+void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+	wl_country_t *cspec)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+	if (!dhd->pub.is_blob)
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+	{
+#if defined(CUSTOM_COUNTRY_CODE)
+		get_customized_country_code(dhd->adapter, country_iso_code, cspec,
+			dhd->pub.dhd_cflags);
+#else
+		get_customized_country_code(dhd->adapter, country_iso_code, cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
+	}
+
+	BCM_REFERENCE(dhd);
+}
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef WL_CFG80211
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif
+
+	if (dhd && dhd->pub.up) {
+		memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+#ifdef WL_CFG80211
+		wl_update_wiphybands(cfg, notify);
+#endif
+	}
+}
+
+void dhd_bus_band_set(struct net_device *dev, uint band)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef WL_CFG80211
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif
+	if (dhd && dhd->pub.up) {
+#ifdef WL_CFG80211
+		wl_update_wiphybands(cfg, true);
+#endif
+	}
+}
+
+int dhd_net_set_fw_path(struct net_device *dev, char *fw)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (!fw || fw[0] == '\0')
+		return -EINVAL;
+
+	strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
+	dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
+
+#if defined(SOFTAP)
+	if (strstr(fw, "apsta") != NULL) {
+		DHD_INFO(("GOT APSTA FIRMWARE\n"));
+		ap_fw_loaded = TRUE;
+	} else {
+		DHD_INFO(("GOT STA FIRMWARE\n"));
+		ap_fw_loaded = FALSE;
+	}
+#endif 
+	return 0;
+}
+
+void dhd_net_if_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	dhd_net_if_lock_local(dhd);
+}
+
+void dhd_net_if_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd)
+		mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd)
+		mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_suspend_lock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	if (dhd)
+		mutex_lock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+static void dhd_suspend_unlock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	if (dhd)
+		mutex_unlock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags = 0;
+
+	if (dhd)
+		spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+	return flags;
+}
+
+void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd)
+		spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
+
+/* Linux specific multipurpose spinlock API */
+void *
+dhd_os_spin_lock_init(osl_t *osh)
+{
+	/* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
+	/* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
+	/* and this results in kernel asserts in internal builds */
+	spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
+	if (lock)
+		spin_lock_init(lock);
+	return ((void *)lock);
+}
+void
+dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
+{
+	if (lock)
+		MFREE(osh, lock, sizeof(spinlock_t) + 4);
+}
+unsigned long
+dhd_os_spin_lock(void *lock)
+{
+	unsigned long flags = 0;
+
+	if (lock)
+		spin_lock_irqsave((spinlock_t *)lock, flags);
+
+	return flags;
+}
+void
+dhd_os_spin_unlock(void *lock, unsigned long flags)
+{
+	if (lock)
+		spin_unlock_irqrestore((spinlock_t *)lock, flags);
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+	return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX	100
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int timeout = msecs_to_jiffies(10);
+	int ntimes = MAX_WAIT_FOR_8021X_TX;
+	int pend = dhd_get_pend_8021x_cnt(dhd);
+
+	while (ntimes && pend) {
+		if (pend) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			DHD_PERIM_UNLOCK(&dhd->pub);
+			schedule_timeout(timeout);
+			DHD_PERIM_LOCK(&dhd->pub);
+			set_current_state(TASK_RUNNING);
+			ntimes--;
+		}
+		pend = dhd_get_pend_8021x_cnt(dhd);
+	}
+	if (ntimes == 0)
+	{
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+		DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
+	}
+	return pend;
+}
+
+#if defined(DHD_DEBUG)
+int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
+{
+	int ret = 0;
+	struct file *fp = NULL;
+	mm_segment_t old_fs;
+	loff_t pos = 0;
+	/* change to KERNEL_DS address limit */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	/* open file to write */
+	fp = filp_open(file_name, flags, 0664);
+	if (IS_ERR(fp)) {
+		DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
+		ret = -1;
+		goto exit;
+	}
+
+	/* Write buf to file */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	ret = kernel_write(fp, buf, size, &pos);
+#else
+	ret = vfs_write(fp, buf, size, &pos);
+#endif
+	if (ret < 0) {
+		DHD_ERROR(("write file error, err = %d\n", ret));
+		goto exit;
+	}
+
+	/* Sync file from filesystem to physical media */
+	ret = vfs_fsync(fp, 0);
+	if (ret < 0) {
+		DHD_ERROR(("sync file error, error = %d\n", ret));
+		goto exit;
+	}
+	ret = BCME_OK;
+
+exit:
+	/* close file before return */
+	if (!IS_ERR(fp))
+		filp_close(fp, current->files);
+
+	/* restore previous address limit */
+	set_fs(old_fs);
+
+	return ret;
+}
+#endif 
+
+#ifdef DHD_DEBUG
+static void
+dhd_convert_memdump_type_to_str(uint32 type, char *buf)
+{
+	char *type_str = NULL;
+
+	switch (type) {
+		case DUMP_TYPE_RESUMED_ON_TIMEOUT:
+			type_str = "resumed_on_timeout";
+			break;
+		case DUMP_TYPE_D3_ACK_TIMEOUT:
+			type_str = "D3_ACK_timeout";
+			break;
+		case DUMP_TYPE_DONGLE_TRAP:
+			type_str = "Dongle_Trap";
+			break;
+		case DUMP_TYPE_MEMORY_CORRUPTION:
+			type_str = "Memory_Corruption";
+			break;
+		case DUMP_TYPE_PKTID_AUDIT_FAILURE:
+			type_str = "PKTID_AUDIT_Fail";
+			break;
+		case DUMP_TYPE_PKTID_INVALID:
+			type_str = "PKTID_INVALID";
+			break;
+		case DUMP_TYPE_SCAN_TIMEOUT:
+			type_str = "SCAN_timeout";
+			break;
+		case DUMP_TYPE_JOIN_TIMEOUT:
+			type_str = "JOIN_timeout";
+			break;
+		case DUMP_TYPE_SCAN_BUSY:
+			type_str = "SCAN_Busy";
+			break;
+		case DUMP_TYPE_BY_SYSDUMP:
+			type_str = "BY_SYSDUMP";
+			break;
+		case DUMP_TYPE_BY_LIVELOCK:
+			type_str = "BY_LIVELOCK";
+			break;
+		case DUMP_TYPE_AP_LINKUP_FAILURE:
+			type_str = "BY_AP_LINK_FAILURE";
+			break;
+		case DUMP_TYPE_AP_ABNORMAL_ACCESS:
+			type_str = "INVALID_ACCESS";
+			break;
+		case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
+			type_str = "CFG_VENDOR_TRIGGERED";
+			break;
+		case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
+			type_str = "ERROR_RX_TIMED_OUT";
+			break;
+		case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
+			type_str = "ERROR_TX_TIMED_OUT";
+			break;
+		case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
+			type_str = "BY_INVALID_RING_RDWR";
+			break;
+		case DUMP_TYPE_DONGLE_HOST_EVENT:
+			type_str = "BY_DONGLE_HOST_EVENT";
+			break;
+		case DUMP_TYPE_TRANS_ID_MISMATCH:
+			type_str = "BY_TRANS_ID_MISMATCH";
+			break;
+		case DUMP_TYPE_HANG_ON_IFACE_OP_FAIL:
+			type_str = "HANG_IFACE_OP_FAIL";
+			break;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+		case DUMP_TYPE_READ_SHM_FAIL:
+			type_str = "READ_SHM_FAIL";
+			break;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+		default:
+			type_str = "Unknown_type";
+			break;
+	}
+
+	strncpy(buf, type_str, strlen(type_str));
+	buf[strlen(type_str)] = 0;
+}
+
+int
+write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
+{
+	int ret = 0;
+	char memdump_path[128];
+	char memdump_type[32];
+	struct timespec64 curtime;
+	uint32 file_mode;
+
+	/* Init file name */
+	memset(memdump_path, 0, sizeof(memdump_path));
+	memset(memdump_type, 0, sizeof(memdump_type));
+	ktime_get_real_ts64(&curtime);
+	dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
+#ifdef CUSTOMER_HW4_DEBUG
+	snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+		DHD_COMMON_DUMP_PATH, fname, memdump_type,
+		(unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec / NSEC_PER_USEC);
+	file_mode = O_CREAT | O_WRONLY | O_SYNC;
+#elif defined(CUSTOMER_HW2)
+	snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+		"/data/misc/wifi/", fname, memdump_type,
+		(unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec / NSEC_PER_USEC);
+	file_mode = O_CREAT | O_WRONLY | O_SYNC;
+#elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
+	snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+		"/data/misc/wifi/", fname, memdump_type,
+		(unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec / NSEC_PER_USEC);
+	file_mode = O_CREAT | O_WRONLY;
+#else
+	snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+		"/installmedia/", fname, memdump_type,
+		(unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec / NSEC_PER_USEC);
+	/* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
+	 * calling BUG_ON immediately after collecting the socram dump.
+	 * So the file write operation should directly write the contents into the
+	 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
+	 * instead of appending.
+	 */
+	file_mode = O_CREAT | O_WRONLY | O_SYNC;
+	{
+		struct file *fp = filp_open(memdump_path, file_mode, 0664);
+		/* Check if it is live Brix image having /installmedia, else use /data */
+		if (IS_ERR(fp)) {
+			DHD_ERROR(("open file %s, try /data/\n", memdump_path));
+			snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+				"/data/", fname, memdump_type,
+				(unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec / NSEC_PER_USEC);
+		} else {
+			filp_close(fp, NULL);
+		}
+	}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+	/* print SOCRAM dump file path */
+	DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
+
+	/* Write file */
+	ret = write_file(memdump_path, file_mode, buf, size);
+
+	return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
+			dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (dhd->wakelock_rx_timeout_enable)
+			wake_lock_timeout(&dhd->wl_rxwake,
+				msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
+		if (dhd->wakelock_ctrl_timeout_enable)
+			wake_lock_timeout(&dhd->wl_ctrlwake,
+				msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
+#endif
+		dhd->wakelock_rx_timeout_enable = 0;
+		dhd->wakelock_ctrl_timeout_enable = 0;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_timeout(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (val > dhd->wakelock_rx_timeout_enable)
+			dhd->wakelock_rx_timeout_enable = val;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (val > dhd->wakelock_ctrl_timeout_enable)
+			dhd->wakelock_ctrl_timeout_enable = val;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (wake_lock_active(&dhd->wl_ctrlwake))
+			wake_unlock(&dhd->wl_ctrlwake);
+#endif
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
+	return ret;
+}
+
+int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
+	return ret;
+}
+
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#include <linux/hashtable.h>
+#else
+#include <linux/hash.h>
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+/* Define 2^5 = 32 bucket size hash table */
+DEFINE_HASHTABLE(wklock_history, 5);
+#else
+/* Define 2^5 = 32 bucket size hash table */
+struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+int trace_wklock_onoff = 1;
+typedef enum dhd_wklock_type {
+	DHD_WAKE_LOCK,
+	DHD_WAKE_UNLOCK,
+	DHD_WAIVE_LOCK,
+	DHD_RESTORE_LOCK
+} dhd_wklock_t;
+
+struct wk_trace_record {
+	unsigned long addr;	            /* Address of the instruction */
+	dhd_wklock_t lock_type;         /* lock_type */
+	unsigned long long counter;		/* counter information */
+	struct hlist_node wklock_node;  /* hash node */
+};
+
+static struct wk_trace_record *find_wklock_entry(unsigned long addr)
+{
+	struct wk_trace_record *wklock_info;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
+#else
+	struct hlist_node *entry;
+	int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
+	hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+	{
+		if (wklock_info->addr == addr) {
+			return wklock_info;
+		}
+	}
+	return NULL;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define HASH_ADD(hashtable, node, key) \
+	do { \
+		hash_add(hashtable, node, key); \
+	} while (0);
+#else
+#define HASH_ADD(hashtable, node, key) \
+	do { \
+		int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
+		hlist_add_head(node, &hashtable[index]); \
+	} while (0);
+#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
+
+#define STORE_WKLOCK_RECORD(wklock_type) \
+	do { \
+		struct wk_trace_record *wklock_info = NULL; \
+		unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
+		wklock_info = find_wklock_entry(func_addr); \
+		if (wklock_info) { \
+			if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
+				wklock_info->counter = dhd->wakelock_counter; \
+			} else { \
+				wklock_info->counter++; \
+			} \
+		} else { \
+			wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
+			if (!wklock_info) {\
+				printk("Can't allocate wk_trace_record \n"); \
+			} else { \
+				wklock_info->addr = func_addr; \
+				wklock_info->lock_type = wklock_type; \
+				if (wklock_type == DHD_WAIVE_LOCK || \
+						wklock_type == DHD_RESTORE_LOCK) { \
+					wklock_info->counter = dhd->wakelock_counter; \
+				} else { \
+					wklock_info->counter++; \
+				} \
+				HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
+			} \
+		} \
+	} while (0);
+
+static inline void dhd_wk_lock_rec_dump(void)
+{
+	int bkt;
+	struct wk_trace_record *wklock_info;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
+#else
+	struct hlist_node *entry = NULL;
+	int max_index = ARRAY_SIZE(wklock_history);
+	for (bkt = 0; bkt < max_index; bkt++)
+		hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+		{
+			switch (wklock_info->lock_type) {
+				case DHD_WAKE_LOCK:
+					printk("wakelock lock : %pS  lock_counter : %llu \n",
+						(void *)wklock_info->addr, wklock_info->counter);
+					break;
+				case DHD_WAKE_UNLOCK:
+					printk("wakelock unlock : %pS, unlock_counter : %llu \n",
+						(void *)wklock_info->addr, wklock_info->counter);
+					break;
+				case DHD_WAIVE_LOCK:
+					printk("wakelock waive : %pS  before_waive : %llu \n",
+						(void *)wklock_info->addr, wklock_info->counter);
+					break;
+				case DHD_RESTORE_LOCK:
+					printk("wakelock restore : %pS, after_waive : %llu \n",
+						(void *)wklock_info->addr, wklock_info->counter);
+					break;
+			}
+		}
+}
+
+static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
+{
+	unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+	int i;
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+	spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	hash_init(wklock_history);
+#else
+	for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
+		INIT_HLIST_HEAD(&wklock_history[i]);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+	spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+}
+
+static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
+{
+	int bkt;
+	struct wk_trace_record *wklock_info;
+	struct hlist_node *tmp;
+	unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+	struct hlist_node *entry = NULL;
+	int max_index = ARRAY_SIZE(wklock_history);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+	spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
+#else
+	for (bkt = 0; bkt < max_index; bkt++)
+		hlist_for_each_entry_safe(wklock_info, entry, tmp,
+			&wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+		{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+			hash_del(&wklock_info->wklock_node);
+#else
+			hlist_del_init(&wklock_info->wklock_node);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+			kfree(wklock_info);
+		}
+	spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+}
+
+void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	unsigned long flags;
+
+	printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
+	spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+	dhd_wk_lock_rec_dump();
+	spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+
+}
+#else
+#define STORE_WKLOCK_RECORD(wklock_type)
+#endif /* ! DHD_TRACE_WAKE_LOCK */
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+			wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+			dhd_bus_dev_pm_stay_awake(pub);
+#endif
+		}
+#ifdef DHD_TRACE_WAKE_LOCK
+		if (trace_wklock_onoff) {
+			STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
+		}
+#endif /* DHD_TRACE_WAKE_LOCK */
+		dhd->wakelock_counter++;
+		ret = dhd->wakelock_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+
+	return ret;
+}
+
+void dhd_event_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_lock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+		dhd_bus_dev_pm_stay_awake(pub);
+#endif
+	}
+}
+
+void
+dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
+	}
+#endif /* CONFIG_HAS_WAKE_LOCK */
+}
+
+void
+dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
+	}
+#endif /* CONFIG_HAS_WAKE_LOCK */
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	dhd_os_wake_lock_timeout(pub);
+	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+
+		if (dhd->wakelock_counter > 0) {
+			dhd->wakelock_counter--;
+#ifdef DHD_TRACE_WAKE_LOCK
+			if (trace_wklock_onoff) {
+				STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
+			}
+#endif /* DHD_TRACE_WAKE_LOCK */
+			if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+				wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+				dhd_bus_dev_pm_relax(pub);
+#endif
+			}
+			ret = dhd->wakelock_counter;
+		}
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+void dhd_event_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_unlock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+		dhd_bus_dev_pm_relax(pub);
+#endif
+	}
+}
+
+void dhd_pm_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		/* if wl_pmwake is active, unlock it */
+		if (wake_lock_active(&dhd->wl_pmwake)) {
+			wake_unlock(&dhd->wl_pmwake);
+		}
+	}
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+void dhd_txfl_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		/* if wl_txflwake is active, unlock it */
+		if (wake_lock_active(&dhd->wl_txflwake)) {
+			wake_unlock(&dhd->wl_txflwake);
+		}
+	}
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+int dhd_os_check_wakelock(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
+	KERNEL_VERSION(2, 6, 36)))
+	dhd_info_t *dhd;
+
+	if (!pub)
+		return 0;
+	dhd = (dhd_info_t *)(pub->info);
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+
+#ifdef CONFIG_HAS_WAKELOCK
+	/* Indicate to the SD Host to avoid going to suspend if internal locks are up */
+	if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
+		(wake_lock_active(&dhd->wl_wdwake))))
+		return 1;
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+		return 1;
+#endif
+	return 0;
+}
+
+int
+dhd_os_check_wakelock_all(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
+	KERNEL_VERSION(2, 6, 36)))
+#if defined(CONFIG_HAS_WAKELOCK)
+	int l1, l2, l3, l4, l7, l8, l9;
+	int l5 = 0, l6 = 0;
+	int c, lock_active;
+#endif /* CONFIG_HAS_WAKELOCK */
+	dhd_info_t *dhd;
+
+	if (!pub) {
+		return 0;
+	}
+	dhd = (dhd_info_t *)(pub->info);
+	if (!dhd) {
+		return 0;
+	}
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+
+#ifdef CONFIG_HAS_WAKELOCK
+	c = dhd->wakelock_counter;
+	l1 = wake_lock_active(&dhd->wl_wifi);
+	l2 = wake_lock_active(&dhd->wl_wdwake);
+	l3 = wake_lock_active(&dhd->wl_rxwake);
+	l4 = wake_lock_active(&dhd->wl_ctrlwake);
+	l7 = wake_lock_active(&dhd->wl_evtwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+	l5 = wake_lock_active(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+	l6 = wake_lock_active(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+	l8 = wake_lock_active(&dhd->wl_pmwake);
+	l9 = wake_lock_active(&dhd->wl_txflwake);
+	lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
+
+	/* Indicate to the Host to avoid going to suspend if internal locks are up */
+	if (lock_active) {
+		DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
+			"ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
+			__FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
+		return 1;
+	}
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
+		return 1;
+	}
+#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+	return 0;
+}
+
+int net_os_wake_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_unlock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wd_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+		/* if wakelock_wd_counter was never used : lock it at once */
+		if (!dhd->wakelock_wd_counter)
+			wake_lock(&dhd->wl_wdwake);
+#endif
+		dhd->wakelock_wd_counter++;
+		ret = dhd->wakelock_wd_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_wd_counter) {
+			dhd->wakelock_wd_counter = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+			wake_unlock(&dhd->wl_wdwake);
+#endif
+		}
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+void
+dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
+	}
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+void
+dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		/* if wl_intrwake is active, unlock it */
+		if (wake_lock_active(&dhd->wl_intrwake)) {
+			wake_unlock(&dhd->wl_intrwake);
+		}
+	}
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef DHD_USE_SCAN_WAKELOCK
+void
+dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
+	}
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+void
+dhd_os_scan_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		/* if wl_scanwake is active, unlock it */
+		if (wake_lock_active(&dhd->wl_scanwake)) {
+			wake_unlock(&dhd->wl_scanwake);
+		}
+	}
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+#endif /* DHD_USE_SCAN_WAKELOCK */
+
+/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
+ * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
+ */
+int dhd_os_wake_lock_waive(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+
+		/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+		if (dhd->waive_wakelock == FALSE) {
+#ifdef DHD_TRACE_WAKE_LOCK
+			if (trace_wklock_onoff) {
+				STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
+			}
+#endif /* DHD_TRACE_WAKE_LOCK */
+			/* record current lock status */
+			dhd->wakelock_before_waive = dhd->wakelock_counter;
+			dhd->waive_wakelock = TRUE;
+		}
+		ret = dhd->wakelock_wd_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_wake_lock_restore(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!dhd)
+		return 0;
+	if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
+		return 0;
+
+	spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+
+	/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+	if (!dhd->waive_wakelock)
+		goto exit;
+
+	dhd->waive_wakelock = FALSE;
+	/* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
+	 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
+	 * the lock in between, do the same by calling wake_unlock or pm_relax
+	 */
+#ifdef DHD_TRACE_WAKE_LOCK
+	if (trace_wklock_onoff) {
+		STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
+	}
+#endif /* DHD_TRACE_WAKE_LOCK */
+
+	if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+		dhd_bus_dev_pm_stay_awake(&dhd->pub);
+#endif
+	} else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+		dhd_bus_dev_pm_relax(&dhd->pub);
+#endif
+	}
+	dhd->wakelock_before_waive = 0;
+exit:
+	ret = dhd->wakelock_wd_counter;
+	spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	return ret;
+}
+
+void dhd_os_wake_lock_init(struct dhd_info *dhd)
+{
+	DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
+	dhd->wakelock_counter = 0;
+	dhd->wakelock_rx_timeout_enable = 0;
+	dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+	// terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+	wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+	wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
+	wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
+	wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
+	wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
+#ifdef BCMPCIE_OOB_HOST_WAKE
+	wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+	wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#endif /* CONFIG_HAS_WAKELOCK */
+#ifdef DHD_TRACE_WAKE_LOCK
+	dhd_wk_lock_trace_init(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+}
+
+void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
+{
+	DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
+#ifdef CONFIG_HAS_WAKELOCK
+	dhd->wakelock_counter = 0;
+	dhd->wakelock_rx_timeout_enable = 0;
+	dhd->wakelock_ctrl_timeout_enable = 0;
+	// terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+	wake_lock_destroy(&dhd->wl_rxwake);
+	wake_lock_destroy(&dhd->wl_ctrlwake);
+	wake_lock_destroy(&dhd->wl_evtwake);
+	wake_lock_destroy(&dhd->wl_pmwake);
+	wake_lock_destroy(&dhd->wl_txflwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+	wake_lock_destroy(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+	wake_lock_destroy(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#ifdef DHD_TRACE_WAKE_LOCK
+	dhd_wk_lock_trace_deinit(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+bool dhd_os_check_if_up(dhd_pub_t *pub)
+{
+	if (!pub)
+		return FALSE;
+	return pub->up;
+}
+
+/* function to collect firmware, chip id and chip version info */
+void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
+{
+	int i;
+
+	i = snprintf(info_string, sizeof(info_string),
+		"  Driver: %s\n  Firmware: %s\n  CLM: %s ", EPI_VERSION_STR, fw, clm_version);
+	printf("%s\n", info_string);
+
+	if (!dhdp)
+		return;
+
+	i = snprintf(&info_string[i], sizeof(info_string) - i,
+		"\n  Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
+		dhd_conf_get_chiprev(dhdp));
+}
+
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
+{
+	int ifidx;
+	int ret = 0;
+	dhd_info_t *dhd = NULL;
+
+	if (!net || !DEV_PRIV(net)) {
+		DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd = DHD_DEV_INFO(net);
+	if (!dhd)
+		return -EINVAL;
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+	dhd_check_hang(net, &dhd->pub, ret);
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	return ret;
+}
+
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+{
+	struct net_device *net;
+
+	net = dhd_idx2net(dhdp, ifidx);
+	if (!net) {
+		DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
+		return -EINVAL;
+	}
+
+	return dhd_check_hang(net, dhdp, ret);
+}
+
+/* Return instance */
+int dhd_get_instance(dhd_pub_t *dhdp)
+{
+	return dhdp->info->unit;
+}
+
+
+#ifdef PROP_TXSTATUS
+
+void dhd_wlfc_plat_init(void *dhd)
+{
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+	dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+	return;
+}
+
+void dhd_wlfc_plat_deinit(void *dhd)
+{
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+	dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+	return;
+}
+
+bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
+{
+#ifdef SKIP_WLFC_ON_CONCURRENT
+
+#ifdef WL_CFG80211
+	struct net_device * net =  dhd_idx2net((dhd_pub_t *)dhdp, idx);
+	if (net)
+	/* enable flow control in vsdb mode */
+	return !(wl_cfg80211_is_concurrent_mode(net));
+#else
+	return TRUE; /* skip flow control */
+#endif /* WL_CFG80211 */
+
+#else
+	return FALSE;
+#endif /* SKIP_WLFC_ON_CONCURRENT */
+	return FALSE;
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef BCMDBGFS
+#include <linux/debugfs.h>
+
+typedef struct dhd_dbgfs {
+	struct dentry	*debugfs_dir;
+	struct dentry	*debugfs_mem;
+	dhd_pub_t	*dhdp;
+	uint32		size;
+} dhd_dbgfs_t;
+
+dhd_dbgfs_t g_dbgfs;
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+                       size_t count, loff_t *ppos)
+{
+	ssize_t rval;
+	uint32 tmp;
+	loff_t pos = *ppos;
+	size_t ret;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	/* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+	tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+	ret = copy_to_user(ubuf, &tmp, 4);
+	if (ret == count)
+		return -EFAULT;
+
+	count -= ret;
+	*ppos = pos + count;
+	rval = count;
+
+	return rval;
+}
+
+
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	size_t ret;
+	uint32 buf;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+	if (ret == count)
+		return -EFAULT;
+
+	/* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+	dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+	return count;
+}
+
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+	loff_t pos = -1;
+
+	switch (whence) {
+		case 0:
+			pos = off;
+			break;
+		case 1:
+			pos = file->f_pos + off;
+			break;
+		case 2:
+			pos = g_dbgfs.size - off;
+	}
+	return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+	.read   = dhd_dbg_state_read,
+	.write	= dhd_debugfs_write,
+	.open   = dhd_dbg_state_open,
+	.llseek	= dhd_debugfs_lseek
+};
+
+static void dhd_dbgfs_create(void)
+{
+	if (g_dbgfs.debugfs_dir) {
+		g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+			NULL, &dhd_dbg_state_ops);
+	}
+}
+
+void dhd_dbgfs_init(dhd_pub_t *dhdp)
+{
+	g_dbgfs.dhdp = dhdp;
+	g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+
+	g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+	if (IS_ERR(g_dbgfs.debugfs_dir)) {
+		g_dbgfs.debugfs_dir = NULL;
+		return;
+	}
+
+	dhd_dbgfs_create();
+
+	return;
+}
+
+void dhd_dbgfs_remove(void)
+{
+	debugfs_remove(g_dbgfs.debugfs_mem);
+	debugfs_remove(g_dbgfs.debugfs_dir);
+
+	bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+}
+#endif /* BCMDBGFS */
+
+#ifdef WLMEDIA_HTSF
+
+static
+void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct sk_buff *skb;
+	uint32 htsf = 0;
+	uint16 dport = 0, oldmagic = 0xACAC;
+	char *p1;
+	htsfts_t ts;
+
+	/*  timestamp packet  */
+
+	p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
+/*		memcpy(&proto, p1+26, 4);  	*/
+		memcpy(&dport, p1+40, 2);
+/* 	proto = ((ntoh32(proto))>> 16) & 0xFF;  */
+		dport = ntoh16(dport);
+	}
+
+	/* timestamp only if  icmp or udb iperf with port 5555 */
+/*	if (proto == 17 && dport == tsport) { */
+	if (dport >= tsport && dport <= tsport + 20) {
+
+		skb = (struct sk_buff *) pktbuf;
+
+		htsf = dhd_get_htsf(dhd, 0);
+		memset(skb->data + 44, 0, 2); /* clear checksum */
+		memcpy(skb->data+82, &oldmagic, 2);
+		memcpy(skb->data+84, &htsf, 4);
+
+		memset(&ts, 0, sizeof(htsfts_t));
+		ts.magic  = HTSFMAGIC;
+		ts.prio   = PKTPRIO(pktbuf);
+		ts.seqnum = htsf_seqnum++;
+		ts.c10    = get_cycles();
+		ts.t10    = htsf;
+		ts.endmagic = HTSFENDMAGIC;
+
+		memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
+	}
+}
+
+static void dhd_dump_htsfhisto(histo_t *his, char *s)
+{
+	int pktcnt = 0, curval = 0, i;
+	for (i = 0; i < (NUMBIN-2); i++) {
+		curval += 500;
+		printf("%d ",  his->bin[i]);
+		pktcnt += his->bin[i];
+	}
+	printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
+		his->bin[NUMBIN-1], s);
+}
+
+static
+void sorttobin(int value, histo_t *histo)
+{
+	int i, binval = 0;
+
+	if (value < 0) {
+		histo->bin[NUMBIN-1]++;
+		return;
+	}
+	if (value > histo->bin[NUMBIN-2])  /* store the max value  */
+		histo->bin[NUMBIN-2] = value;
+
+	for (i = 0; i < (NUMBIN-2); i++) {
+		binval += 500; /* 500m s bins */
+		if (value <= binval) {
+			histo->bin[i]++;
+			return;
+		}
+	}
+	histo->bin[NUMBIN-3]++;
+}
+
+static
+void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	char *p1;
+	uint16 old_magic;
+	int d1, d2, d3, end2end;
+	htsfts_t *htsf_ts;
+	uint32 htsf;
+
+	skb = PKTTONATIVE(dhdp->osh, pktbuf);
+	p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
+		memcpy(&old_magic, p1+78, 2);
+		htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
+	} else {
+		return;
+	}
+
+	if (htsf_ts->magic == HTSFMAGIC) {
+		htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
+		htsf_ts->cE0 = get_cycles();
+	}
+
+	if (old_magic == 0xACAC) {
+
+		tspktcnt++;
+		htsf = dhd_get_htsf(dhd, 0);
+		memcpy(skb->data+92, &htsf, sizeof(uint32));
+
+		memcpy(&ts[tsidx].t1, skb->data+80, 16);
+
+		d1 = ts[tsidx].t2 - ts[tsidx].t1;
+		d2 = ts[tsidx].t3 - ts[tsidx].t2;
+		d3 = ts[tsidx].t4 - ts[tsidx].t3;
+		end2end = ts[tsidx].t4 - ts[tsidx].t1;
+
+		sorttobin(d1, &vi_d1);
+		sorttobin(d2, &vi_d2);
+		sorttobin(d3, &vi_d3);
+		sorttobin(end2end, &vi_d4);
+
+		if (end2end > 0 && end2end >  maxdelay) {
+			maxdelay = end2end;
+			maxdelaypktno = tspktcnt;
+			memcpy(&maxdelayts, &ts[tsidx], 16);
+		}
+		if (++tsidx >= TSMAX)
+			tsidx = 0;
+	}
+}
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
+{
+	uint32 htsf = 0, cur_cycle, delta, delta_us;
+	uint32    factor, baseval, baseval2;
+	cycles_t t;
+
+	t = get_cycles();
+	cur_cycle = t;
+
+	if (cur_cycle >  dhd->htsf.last_cycle)
+		delta = cur_cycle -  dhd->htsf.last_cycle;
+	else {
+		delta = cur_cycle + (0xFFFFFFFF -  dhd->htsf.last_cycle);
+	}
+
+	delta = delta >> 4;
+
+	if (dhd->htsf.coef) {
+		/* times ten to get the first digit */
+	        factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
+		baseval  = (delta*10)/factor;
+		baseval2 = (delta*10)/(factor+1);
+		delta_us  = (baseval -  (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
+		htsf = (delta_us << 4) +  dhd->htsf.last_tsf + HTSF_BUS_DELAY;
+	} else {
+		DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
+	}
+
+	return htsf;
+}
+
+static void dhd_dump_latency(void)
+{
+	int i, max = 0;
+	int d1, d2, d3, d4, d5;
+
+	printf("T1       T2       T3       T4           d1  d2   t4-t1     i    \n");
+	for (i = 0; i < TSMAX; i++) {
+		d1 = ts[i].t2 - ts[i].t1;
+		d2 = ts[i].t3 - ts[i].t2;
+		d3 = ts[i].t4 - ts[i].t3;
+		d4 = ts[i].t4 - ts[i].t1;
+		d5 = ts[max].t4-ts[max].t1;
+		if (d4 > d5 && d4 > 0)  {
+			max = i;
+		}
+		printf("%08X %08X %08X %08X \t%d %d %d   %d i=%d\n",
+			ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
+			d1, d2, d3, d4, i);
+	}
+
+	printf("current idx = %d \n", tsidx);
+
+	printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
+	printf("%08X %08X %08X %08X \t%d %d %d   %d\n",
+	maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
+	maxdelayts.t2 - maxdelayts.t1,
+	maxdelayts.t3 - maxdelayts.t2,
+	maxdelayts.t4 - maxdelayts.t3,
+	maxdelayts.t4 - maxdelayts.t1);
+}
+
+
+static int
+dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
+{
+	char buf[32];
+	int ret;
+	uint32 s1, s2;
+
+	struct tsf {
+		uint32 low;
+		uint32 high;
+	} tsf_buf;
+
+	memset(&tsf_buf, 0, sizeof(tsf_buf));
+
+	s1 = dhd_get_htsf(dhd, 0);
+	ret = dhd_iovar(&dhd->pub, ifidx, "tsf", NULL, 0, buf, sizeof(buf), FALSE);
+	if (ret < 0) {
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: tsf is not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+		return ret;
+	}
+	s2 = dhd_get_htsf(dhd, 0);
+
+	memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+	printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
+		tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
+		dhd->htsf.coefdec2, s2-tsf_buf.low);
+	printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
+	return 0;
+}
+
+void htsf_update(dhd_info_t *dhd, void *data)
+{
+	static ulong  cur_cycle = 0, prev_cycle = 0;
+	uint32 htsf, tsf_delta = 0;
+	uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
+	ulong b, a;
+	cycles_t t;
+
+	/* cycles_t in inlcude/mips/timex.h */
+
+	t = get_cycles();
+
+	prev_cycle = cur_cycle;
+	cur_cycle = t;
+
+	if (cur_cycle > prev_cycle)
+		cyc_delta = cur_cycle - prev_cycle;
+	else {
+		b = cur_cycle;
+		a = prev_cycle;
+		cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
+	}
+
+	if (data == NULL)
+		printf(" tsf update ata point er is null \n");
+
+	memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
+	memcpy(&cur_tsf, data, sizeof(tsf_t));
+
+	if (cur_tsf.low == 0) {
+		DHD_INFO((" ---- 0 TSF, do not update, return\n"));
+		return;
+	}
+
+	if (cur_tsf.low > prev_tsf.low)
+		tsf_delta = (cur_tsf.low - prev_tsf.low);
+	else {
+		DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
+		 cur_tsf.low, prev_tsf.low));
+		if (cur_tsf.high > prev_tsf.high) {
+			tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
+			DHD_INFO((" ---- Wrap around tsf coutner  adjusted TSF=%08X\n", tsf_delta));
+		} else {
+			return; /* do not update */
+		}
+	}
+
+	if (tsf_delta)  {
+		hfactor = cyc_delta / tsf_delta;
+		tmp  = 	(cyc_delta - (hfactor * tsf_delta))*10;
+		dec1 =  tmp/tsf_delta;
+		dec2 =  ((tmp - dec1*tsf_delta)*10) / tsf_delta;
+		tmp  = 	(tmp   - (dec1*tsf_delta))*10;
+		dec3 =  ((tmp - dec2*tsf_delta)*10) / tsf_delta;
+
+		if (dec3 > 4) {
+			if (dec2 == 9) {
+				dec2 = 0;
+				if (dec1 == 9) {
+					dec1 = 0;
+					hfactor++;
+				} else {
+					dec1++;
+				}
+			} else {
+				dec2++;
+			}
+		}
+	}
+
+	if (hfactor) {
+		htsf = ((cyc_delta * 10)  / (hfactor*10+dec1)) + prev_tsf.low;
+		dhd->htsf.coef = hfactor;
+		dhd->htsf.last_cycle = cur_cycle;
+		dhd->htsf.last_tsf = cur_tsf.low;
+		dhd->htsf.coefdec1 = dec1;
+		dhd->htsf.coefdec2 = dec2;
+	} else {
+		htsf = prev_tsf.low;
+	}
+}
+
+#endif /* WLMEDIA_HTSF */
+
+#ifdef CUSTOM_SET_CPUCORE
+void dhd_set_cpucore(dhd_pub_t *dhd, int set)
+{
+	int e_dpc = 0, e_rxf = 0, retry_set = 0;
+
+	if (!(dhd->chan_isvht80)) {
+		DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
+		return;
+	}
+
+	if (DPC_CPUCORE) {
+		do {
+			if (set == TRUE) {
+				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+					cpumask_of(DPC_CPUCORE));
+			} else {
+				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+					cpumask_of(PRIMARY_CPUCORE));
+			}
+			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+				DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
+				return;
+			}
+			if (e_dpc < 0)
+				OSL_SLEEP(1);
+		} while (e_dpc < 0);
+	}
+	if (RXF_CPUCORE) {
+		do {
+			if (set == TRUE) {
+				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+					cpumask_of(RXF_CPUCORE));
+			} else {
+				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+					cpumask_of(PRIMARY_CPUCORE));
+			}
+			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+				DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
+				return;
+			}
+			if (e_rxf < 0)
+				OSL_SLEEP(1);
+		} while (e_rxf < 0);
+	}
+#ifdef DHD_OF_SUPPORT
+	interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE);
+#endif /* DHD_OF_SUPPORT */
+	DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
+
+	return;
+}
+#endif /* CUSTOM_SET_CPUCORE */
+
+#ifdef DHD_MCAST_REGEN
+/* Get interface specific ap_isolate configuration */
+int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	return ifp->mcast_regen_bss_enable;
+}
+
+/* Set interface specific mcast_regen configuration */
+int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	ifp->mcast_regen_bss_enable = val;
+
+	/* Disable rx_pkt_chain feature for interface, if mcast_regen feature
+	 * is enabled
+	 */
+	dhd_update_rx_pkt_chainable_state(dhdp, idx);
+	return BCME_OK;
+}
+#endif	/* DHD_MCAST_REGEN */
+
+/* Get interface specific ap_isolate configuration */
+int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	return ifp->ap_isolate;
+}
+
+/* Set interface specific ap_isolate configuration */
+int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	if (ifp)
+		ifp->ap_isolate = val;
+
+	return 0;
+}
+
+#ifdef DHD_FW_COREDUMP
+#if defined(CONFIG_X86)
+#define MEMDUMPINFO_LIVE "/installmedia/.memdump.info"
+#define MEMDUMPINFO_INST "/data/.memdump.info"
+#endif /* CONFIG_X86 && OEM_ANDROID */
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define MEMDUMPINFO PLATFORM_PATH".memdump.info"
+#elif defined(CUSTOMER_HW2)
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#else
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#endif /* CUSTOMER_HW4_DEBUG */
+
+void dhd_get_memdump_info(dhd_pub_t *dhd)
+{
+	struct file *fp = NULL;
+	uint32 mem_val = DUMP_MEMFILE_MAX;
+	int ret = 0;
+	char *filepath = MEMDUMPINFO;
+
+	/* Read memdump info from the file */
+	fp = filp_open(filepath, O_RDONLY, 0);
+	if (IS_ERR(fp)) {
+		DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+#if defined(CONFIG_X86)
+		/* Check if it is Live Brix Image */
+		if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
+			goto done;
+		}
+		/* Try if it is Installed Brix Image */
+		filepath = MEMDUMPINFO_INST;
+		DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
+		fp = filp_open(filepath, O_RDONLY, 0);
+		if (IS_ERR(fp)) {
+			DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+			goto done;
+		}
+#else /* Non Brix Android platform */
+		goto done;
+#endif /* CONFIG_X86 && OEM_ANDROID */
+	}
+
+	/* Handle success case */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	ret = kernel_read(fp, (char *)&mem_val, 4, NULL);
+#else
+	ret = kernel_read(fp, 0, (char *)&mem_val, 4);
+#endif
+	if (ret < 0) {
+		DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+		filp_close(fp, NULL);
+		goto done;
+	}
+
+	mem_val = bcm_atoi((char *)&mem_val);
+
+	filp_close(fp, NULL);
+
+#ifdef DHD_INIT_DEFAULT_MEMDUMP
+	if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
+		mem_val = DUMP_MEMFILE_BUGON;
+#endif /* DHD_INIT_DEFAULT_MEMDUMP */
+
+done:
+#ifdef CUSTOMER_HW4_DEBUG
+	dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
+#else
+	dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE;
+#endif /* CUSTOMER_HW4_DEBUG */
+
+	DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled));
+}
+
+void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
+{
+	dhd_dump_t *dump = NULL;
+	dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
+	if (dump == NULL) {
+		DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
+		return;
+	}
+	dump->buf = buf;
+	dump->bufsize = size;
+
+#if defined(CONFIG_ARM64)
+	DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
+		(uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
+#elif defined(__ARM_ARCH_7A__)
+	DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
+		(uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
+#endif /* __ARM_ARCH_7A__ */
+	if (dhdp->memdump_enabled == DUMP_MEMONLY) {
+		BUG_ON(1);
+	}
+
+#ifdef DHD_LOG_DUMP
+	if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
+		dhd_schedule_log_dump(dhdp);
+	}
+#endif /* DHD_LOG_DUMP */
+	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
+		DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+
+static void
+dhd_mem_dump(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_dump_t *dump = event_info;
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (!dump) {
+		DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
+		DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
+		dhd->pub.memdump_success = FALSE;
+	}
+
+	if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
+#ifdef DHD_LOG_DUMP
+		dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
+#endif /* DHD_LOG_DUMP */
+#ifdef DHD_DEBUG_UART
+		dhd->pub.memdump_success == TRUE &&
+#endif	/* DHD_DEBUG_UART */
+		dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
+
+#ifdef SHOW_LOGTRACE
+		/* Wait till event_log_dispatcher_work finishes */
+		cancel_work_sync(&dhd->event_log_dispatcher_work);
+#endif /* SHOW_LOGTRACE */
+
+		BUG_ON(1);
+	}
+	MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
+}
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef DHD_SSSR_DUMP
+
+static void
+dhd_sssr_dump(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_pub_t *dhdp;
+	int i;
+	char before_sr_dump[128];
+	char after_sr_dump[128];
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	dhdp = &dhd->pub;
+
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		/* Init file name */
+		memset(before_sr_dump, 0, sizeof(before_sr_dump));
+		memset(after_sr_dump, 0, sizeof(after_sr_dump));
+
+		snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
+			"sssr_core", i, "before_SR");
+		snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
+			"sssr_core", i, "after_SR");
+
+		if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i]) {
+			if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
+				dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
+				DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
+					__FUNCTION__));
+			}
+		}
+		if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
+			if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
+				dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
+				DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
+					__FUNCTION__));
+			}
+		}
+	}
+
+	if (dhdp->sssr_vasip_buf_before) {
+		if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_before,
+			dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_before_SR")) {
+			DHD_ERROR(("%s: writing SSSR VASIP dump before to the file failed\n",
+				__FUNCTION__));
+		}
+	}
+
+	if (dhdp->sssr_vasip_buf_after) {
+		if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_after,
+			dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_after_SR")) {
+			DHD_ERROR(("%s: writing SSSR VASIP dump after to the file failed\n",
+				__FUNCTION__));
+		}
+	}
+
+}
+
+void
+dhd_schedule_sssr_dump(dhd_pub_t *dhdp)
+{
+	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
+		DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef DHD_LOG_DUMP
+static void
+dhd_log_dump(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (do_dhd_log_dump(&dhd->pub)) {
+		DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
+		return;
+	}
+}
+
+void dhd_schedule_log_dump(dhd_pub_t *dhdp)
+{
+	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+		(void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
+		dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+
+static int
+do_dhd_log_dump(dhd_pub_t *dhdp)
+{
+	int ret = 0, i = 0;
+	struct file *fp = NULL;
+	mm_segment_t old_fs;
+	loff_t pos = 0;
+	unsigned int wr_size = 0;
+	char dump_path[128];
+	struct timespec64 curtime;
+	uint32 file_mode;
+	unsigned long flags = 0;
+	struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
+
+	const char *pre_strs =
+		"-------------------- General log ---------------------------\n";
+
+	const char *post_strs =
+		"-------------------- Specific log --------------------------\n";
+
+	if (!dhdp) {
+		return -1;
+	}
+
+	DHD_ERROR(("DHD version: %s\n", dhd_version));
+	DHD_ERROR(("F/W version: %s\n", fw_version));
+
+	/* change to KERNEL_DS address limit */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	/* Init file name */
+	memset(dump_path, 0, sizeof(dump_path));
+	ktime_get_real_ts64(&curtime);
+	snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
+		DHD_COMMON_DUMP_PATH "debug_dump",
+		(unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_nsec / NSEC_PER_USEC);
+	file_mode = O_CREAT | O_WRONLY | O_SYNC;
+
+	DHD_ERROR(("debug_dump_path = %s\n", dump_path));
+	fp = filp_open(dump_path, file_mode, 0664);
+	if (IS_ERR(fp)) {
+		ret = PTR_ERR(fp);
+		DHD_ERROR(("open file error, err = %d\n", ret));
+		goto exit;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	ret = kernel_write(fp, pre_strs, strlen(pre_strs), &pos);
+#else
+	ret = vfs_write(fp, pre_strs, strlen(pre_strs), &pos);
+#endif
+	if (ret < 0) {
+		DHD_ERROR(("write file error, err = %d\n", ret));
+		goto exit;
+	}
+
+	do {
+		unsigned int buf_size = (unsigned int)(dld_buf->max -
+			(unsigned long)dld_buf->buffer);
+		if (dld_buf->wraparound) {
+			wr_size = buf_size;
+		} else {
+			if (!dld_buf->buffer[0]) { /* print log if buf is empty. */
+				DHD_ERROR_EX(("Buffer is empty. No event/log.\n"));
+			}
+			wr_size = (unsigned int)(dld_buf->present - dld_buf->front);
+		}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+		ret = kernel_write(fp, dld_buf->buffer, wr_size, &pos);
+#else
+		ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos);
+#endif
+		if (ret < 0) {
+			DHD_ERROR(("write file error, err = %d\n", ret));
+			goto exit;
+		}
+
+		/* re-init dhd_log_dump_buf structure */
+		spin_lock_irqsave(&dld_buf->lock, flags);
+		dld_buf->wraparound = 0;
+		dld_buf->present = dld_buf->front;
+		dld_buf->remain = buf_size;
+		bzero(dld_buf->buffer, buf_size);
+		spin_unlock_irqrestore(&dld_buf->lock, flags);
+		ret = BCME_OK;
+
+		if (++i < DLD_BUFFER_NUM) {
+			dld_buf = &g_dld_buf[i];
+		} else {
+			break;
+		}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+		ret = kernel_write(fp, post_strs, strlen(post_strs), &pos);
+#else
+		ret = vfs_write(fp, post_strs, strlen(post_strs), &pos);
+#endif
+		if (ret < 0) {
+			DHD_ERROR(("write file error, err = %d\n", ret));
+			goto exit;
+		}
+	} while (1);
+
+exit:
+#if defined(STAT_REPORT)
+	if (!IS_ERR(fp) && ret >= 0) {
+		wl_stat_report_file_save(dhdp, fp);
+	}
+#endif /* STAT_REPORT */
+
+	if (!IS_ERR(fp)) {
+		filp_close(fp, NULL);
+	}
+	set_fs(old_fs);
+
+	return ret;
+}
+#endif /* DHD_LOG_DUMP */
+
+
+#ifdef BCMASSERT_LOG
+#ifdef CUSTOMER_HW4_DEBUG
+#define ASSERTINFO PLATFORM_PATH".assert.info"
+#elif defined(CUSTOMER_HW2)
+#define ASSERTINFO "/data/misc/wifi/.assert.info"
+#else
+#define ASSERTINFO "/installmedia/.assert.info"
+#endif /* CUSTOMER_HW4_DEBUG */
+void dhd_get_assert_info(dhd_pub_t *dhd)
+{
+	struct file *fp = NULL;
+	char *filepath = ASSERTINFO;
+	int mem_val = -1;
+
+	/*
+	 * Read assert info from the file
+	 * 0: Trigger Kernel crash by panic()
+	 * 1: Print out the logs and don't trigger Kernel panic. (default)
+	 * 2: Trigger Kernel crash by BUG()
+	 * File doesn't exist: Keep default value (1).
+	 */
+	fp = filp_open(filepath, O_RDONLY, 0);
+	if (IS_ERR(fp)) {
+		DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+	} else {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+		ssize_t ret = kernel_read(fp, (char *)&mem_val, 4, NULL);
+#else
+		int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
+#endif
+		if (ret < 0) {
+			DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+		} else {
+			mem_val = bcm_atoi((char *)&mem_val);
+			DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
+		}
+		filp_close(fp, NULL);
+	}
+#ifdef CUSTOMER_HW4_DEBUG
+		/* By default. set to 1, No Kernel Panic */
+		g_assert_type = (mem_val >= 0) ? mem_val : 1;
+#else
+		/* By default. set to 0, Kernel Panic */
+		g_assert_type = (mem_val >= 0) ? mem_val : 0;
+#endif
+}
+#endif /* BCMASSERT_LOG */
+
+/*
+ * This call is to get the memdump size so that,
+ * halutil can alloc that much buffer in user space.
+ */
+int
+dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
+{
+	int ret = BCME_OK;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	dhd_pub_t *dhdp = &dhd->pub;
+
+	if (dhdp->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
+			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
+		return BCME_ERROR;
+	}
+
+	ret = dhd_common_socram_dump(dhdp);
+	if (ret == BCME_OK) {
+		*dump_size = dhdp->soc_ram_length;
+	}
+	return ret;
+}
+
+/*
+ * This is to get the actual memdup after getting the memdump size
+ */
+int
+dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
+{
+	int ret = BCME_OK;
+	int orig_len = 0;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	dhd_pub_t *dhdp = &dhd->pub;
+	if (buf == NULL)
+		return BCME_ERROR;
+	orig_len = *size;
+	if (dhdp->soc_ram) {
+		if (orig_len >= dhdp->soc_ram_length) {
+			memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length);
+			/* reset the storage of dump */
+			memset(dhdp->soc_ram, 0, dhdp->soc_ram_length);
+			*size = dhdp->soc_ram_length;
+		} else {
+			ret = BCME_BUFTOOSHORT;
+			DHD_ERROR(("The length of the buffer is too short"
+				" to save the memory dump with %d\n", dhdp->soc_ram_length));
+		}
+	} else {
+		DHD_ERROR(("socram_dump is not ready to get\n"));
+		ret = BCME_NOTREADY;
+	}
+	return ret;
+}
+
+int
+dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
+{
+	char *fw_str;
+
+	if (size == 0)
+		return BCME_BADARG;
+
+	fw_str = strstr(info_string, "Firmware: ");
+	if (fw_str == NULL) {
+		return BCME_ERROR;
+	}
+
+	memset(*buf, 0, size);
+	if (dhd_ver) {
+		strncpy(*buf, dhd_version, size - 1);
+	} else {
+		strncpy(*buf, fw_str, size - 1);
+	}
+	return BCME_OK;
+}
+
+#ifdef DHD_WMF
+/* Returns interface specific WMF configuration */
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+	return &ifp->wmf;
+}
+#endif /* DHD_WMF */
+
+#if defined(TRAFFIC_MGMT_DWM)
+void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *pktdata, *ip_body;
+	uint8  dwm_filter;
+	uint8 tos_tc = 0;
+	uint8 dscp   = 0;
+	pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+	eh = (struct ether_header *) pktdata;
+	ip_body = NULL;
+
+	if (dhdp->dhd_tm_dwm_tbl.dhd_dwm_enabled) {
+		if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
+			evh = (struct ethervlan_header *)eh;
+			if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
+				(evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+				ip_body = pktdata + sizeof(struct ethervlan_header);
+			}
+		} else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
+			(eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+			ip_body = pktdata + sizeof(struct ether_header);
+		}
+		if (ip_body) {
+			tos_tc = IP_TOS46(ip_body);
+			dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
+		}
+
+		if (dscp < DHD_DWM_TBL_SIZE) {
+			dwm_filter = dhdp->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp];
+			if (DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_filter)) {
+				PKTSETPRIO(pktbuf, DHD_TRF_MGMT_DWM_PRIO(dwm_filter));
+			}
+		}
+	}
+}
+#endif 
+
+bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
+{
+	return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
+}
+
+#ifdef DHD_L2_FILTER
+arp_table_t*
+dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(bssidx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[bssidx];
+	return ifp->phnd_arp_table;
+}
+
+int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	if (ifp)
+		return ifp->parp_enable;
+	else
+		return FALSE;
+}
+
+/* Set interface specific proxy arp configuration */
+int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+	ASSERT(idx < DHD_MAX_IFS);
+	ifp = dhd->iflist[idx];
+
+	if (!ifp)
+	    return BCME_ERROR;
+
+	/* At present all 3 variables are being
+	 * handled at once
+	 */
+	ifp->parp_enable = val;
+	ifp->parp_discard = val;
+	ifp->parp_allnode = val;
+
+	/* Flush ARP entries when disabled */
+	if (val == FALSE) {
+		bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
+			FALSE, dhdp->tickcnt);
+	}
+	return BCME_OK;
+}
+
+bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	ASSERT(ifp);
+	return ifp->parp_discard;
+}
+
+bool
+dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	ASSERT(ifp);
+
+	return ifp->parp_allnode;
+}
+
+int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	ASSERT(ifp);
+
+	return ifp->dhcp_unicast;
+}
+
+int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+	ASSERT(idx < DHD_MAX_IFS);
+	ifp = dhd->iflist[idx];
+
+	ASSERT(ifp);
+
+	ifp->dhcp_unicast = val;
+	return BCME_OK;
+}
+
+int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	ASSERT(ifp);
+
+	return ifp->block_ping;
+}
+
+int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+	ASSERT(idx < DHD_MAX_IFS);
+	ifp = dhd->iflist[idx];
+
+	ASSERT(ifp);
+
+	ifp->block_ping = val;
+	/* Disable rx_pkt_chain feature for interface if block_ping option is
+	 * enabled
+	 */
+	dhd_update_rx_pkt_chainable_state(dhdp, idx);
+	return BCME_OK;
+}
+
+int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	ASSERT(ifp);
+
+	return ifp->grat_arp;
+}
+
+int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+	ASSERT(idx < DHD_MAX_IFS);
+	ifp = dhd->iflist[idx];
+
+	ASSERT(ifp);
+
+	ifp->grat_arp = val;
+
+	return BCME_OK;
+}
+#endif /* DHD_L2_FILTER */
+
+
+#if defined(SET_RPS_CPUS)
+int dhd_rps_cpus_enable(struct net_device *net, int enable)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_if_t *ifp;
+	int ifidx;
+	char * RPS_CPU_SETBUF;
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	if (ifidx == PRIMARY_INF) {
+		if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
+			DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
+			RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
+		} else {
+			DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
+			RPS_CPU_SETBUF = RPS_CPUS_MASK;
+		}
+	} else if (ifidx == VIRTUAL_INF) {
+		DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
+		RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
+	} else {
+		DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
+		return -EINVAL;
+	}
+
+	ifp = dhd->iflist[ifidx];
+	if (ifp) {
+		if (enable) {
+			DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
+			custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
+		} else {
+			custom_rps_map_clear(ifp->net->_rx);
+		}
+	} else {
+		DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
+		return -ENODEV;
+	}
+	return BCME_OK;
+}
+
+int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
+{
+	struct rps_map *old_map, *map;
+	cpumask_var_t mask;
+	int err, cpu, i;
+	static DEFINE_SPINLOCK(rps_map_lock);
+
+	DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+
+	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+		DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
+		return -ENOMEM;
+	}
+
+	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+	if (err) {
+		free_cpumask_var(mask);
+		DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
+		return err;
+	}
+
+	map = kzalloc(max_t(unsigned int,
+		RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
+		GFP_KERNEL);
+	if (!map) {
+		free_cpumask_var(mask);
+		DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
+		return -ENOMEM;
+	}
+
+	i = 0;
+	for_each_cpu(cpu, mask) {
+		map->cpus[i++] = cpu;
+	}
+
+	if (i) {
+		map->len = i;
+	} else {
+		kfree(map);
+		map = NULL;
+		free_cpumask_var(mask);
+		DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
+		return -1;
+	}
+
+	spin_lock(&rps_map_lock);
+	old_map = rcu_dereference_protected(queue->rps_map,
+		lockdep_is_held(&rps_map_lock));
+	rcu_assign_pointer(queue->rps_map, map);
+	spin_unlock(&rps_map_lock);
+
+	if (map) {
+		static_key_slow_inc(&rps_needed);
+	}
+	if (old_map) {
+		kfree_rcu(old_map, rcu);
+		static_key_slow_dec(&rps_needed);
+	}
+	free_cpumask_var(mask);
+
+	DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
+	return map->len;
+}
+
+void custom_rps_map_clear(struct netdev_rx_queue *queue)
+{
+	struct rps_map *map;
+
+	DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+
+	map = rcu_dereference_protected(queue->rps_map, 1);
+	if (map) {
+		RCU_INIT_POINTER(queue->rps_map, NULL);
+		kfree_rcu(map, rcu);
+		DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
+	}
+}
+#endif 
+
+
+
+#ifdef DHD_DEBUG_PAGEALLOC
+
+void
+dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+
+	DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
+		__FUNCTION__, addr_corrupt, (uint32)len));
+
+	DHD_OS_WAKE_LOCK(dhdp);
+	prhex("Page Corruption:", addr_corrupt, len);
+	dhd_dump_to_kernelog(dhdp);
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+	/* Load the dongle side dump to host memory and then BUG_ON() */
+	dhdp->memdump_enabled = DUMP_MEMONLY;
+	dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
+	dhd_bus_mem_dump(dhdp);
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+	DHD_OS_WAKE_UNLOCK(dhdp);
+}
+EXPORT_SYMBOL(dhd_page_corrupt_cb);
+#endif /* DHD_DEBUG_PAGEALLOC */
+
+#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
+void
+dhd_pktid_error_handler(dhd_pub_t *dhdp)
+{
+	DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
+	DHD_OS_WAKE_LOCK(dhdp);
+	dhd_dump_to_kernelog(dhdp);
+#ifdef DHD_FW_COREDUMP
+	/* Load the dongle side dump to host memory */
+	if (dhdp->memdump_enabled == DUMP_DISABLED) {
+		dhdp->memdump_enabled = DUMP_MEMFILE;
+	}
+	dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
+	dhd_bus_mem_dump(dhdp);
+#endif /* DHD_FW_COREDUMP */
+	dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
+	dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
+	DHD_OS_WAKE_UNLOCK(dhdp);
+}
+#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
+
+struct net_device *
+dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+
+	if (dhd->iflist[0] && dhd->iflist[0]->net)
+		return dhd->iflist[0]->net;
+	else
+		return NULL;
+}
+
+#ifdef DHD_DHCP_DUMP
+static void
+dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx)
+{
+	struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN];
+	struct iphdr *h = &b->ip_header;
+	uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len);
+	int dhcp_type = 0, len, opt_len;
+
+	/* check IP header */
+	if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) {
+		return;
+	}
+
+	/* check UDP port for bootp (67, 68) */
+	if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) &&
+			b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) {
+		return;
+	}
+
+	/* check header length */
+	if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) {
+		return;
+	}
+
+	len = ntohs(b->udp_header.len) - sizeof(struct udphdr);
+	opt_len = len
+		- (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options));
+
+	/* parse bootp options */
+	if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) {
+		ptr = &b->options[4];
+		while (ptr < end && *ptr != 0xff) {
+			opt = ptr++;
+			if (*opt == 0) {
+				continue;
+			}
+			ptr += *ptr + 1;
+			if (ptr >= end) {
+				break;
+			}
+			/* 53 is dhcp type */
+			if (*opt == 53) {
+				if (opt[1]) {
+					dhcp_type = opt[2];
+					DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
+						ifname, dhcp_types[dhcp_type],
+						tx ? "TX" : "RX", dhcp_ops[b->op]));
+					break;
+				}
+			}
+		}
+	}
+}
+#endif /* DHD_DHCP_DUMP */
+
+#ifdef DHD_ICMP_DUMP
+static void
+dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx)
+{
+	uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+	struct iphdr *iph = (struct iphdr *)pkt;
+	struct icmphdr *icmph;
+
+	/* check IP header */
+	if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) {
+		return;
+	}
+
+	icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
+	if (icmph->type == ICMP_ECHO) {
+		DHD_ERROR(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
+			ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
+	} else if (icmph->type == ICMP_ECHOREPLY) {
+		DHD_ERROR(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
+			ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
+	} else {
+		DHD_ERROR(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
+			ifname, tx ? "TX" : "RX", icmph->type, icmph->code));
+	}
+}
+#endif /* DHD_ICMP_DUMP */
+
+#ifdef SHOW_LOGTRACE
+void
+dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info)
+{
+	dhd_dbg_ring_status_t ring_status;
+	uint32 rlen;
+
+	rlen = dhd_dbg_ring_pull_single(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf,
+		TRACE_LOG_BUF_MAX_SIZE, TRUE);
+	trace_buf_info->size = rlen;
+	trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
+	if (rlen == 0) {
+		trace_buf_info->availability = BUF_NOT_AVAILABLE;
+		return;
+	}
+	dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status);
+	if (ring_status.written_bytes != ring_status.read_bytes) {
+		trace_buf_info->availability = NEXT_BUF_AVAIL;
+	}
+}
+#endif /* SHOW_LOGTRACE */
+
+bool
+dhd_fw_download_status(dhd_pub_t * dhd_pub)
+{
+	return dhd_pub->fw_download_done;
+}
+
+int
+dhd_create_to_notifier_skt(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	/* Kernel 3.7 onwards this API accepts only 3 arguments. */
+	/* Kernel version 3.6 is a special case which accepts 4 arguments */
+	nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &g_cfg);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+	/* Kernel version 3.5 and below use this old API format */
+	nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
+			dhd_process_daemon_msg, NULL, THIS_MODULE);
+#else
+	nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE, &g_cfg);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
+	if (!nl_to_event_sk)
+	{
+		printf("Error creating socket.\n");
+		return -1;
+	}
+	DHD_INFO(("nl_to socket created successfully...\n"));
+	return 0;
+}
+
+void
+dhd_destroy_to_notifier_skt(void)
+{
+	DHD_INFO(("Destroying nl_to socket\n"));
+	if (nl_to_event_sk) {
+		netlink_kernel_release(nl_to_event_sk);
+	}
+}
+
+static void
+dhd_recv_msg_from_daemon(struct sk_buff *skb)
+{
+	struct nlmsghdr *nlh;
+	bcm_to_info_t *cmd;
+
+	nlh = (struct nlmsghdr *)skb->data;
+	cmd = (bcm_to_info_t *)nlmsg_data(nlh);
+	if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
+		sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
+		DHD_INFO(("DHD Daemon Started\n"));
+	}
+}
+
+int
+dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
+{
+	struct nlmsghdr *nlh;
+	struct sk_buff *skb_out;
+
+	if (!nl_to_event_sk) {
+		DHD_INFO(("No socket available\n"));
+		return -1;
+	}
+
+	BCM_REFERENCE(skb);
+	if (sender_pid == 0) {
+		DHD_INFO(("Invalid PID 0\n"));
+		return -1;
+	}
+
+	if ((skb_out = nlmsg_new(size, 0)) == NULL) {
+		DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
+		return -1;
+	}
+	nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
+	NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
+	memcpy(nlmsg_data(nlh), (char *)data, size);
+
+	if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
+		DHD_INFO(("Error sending message\n"));
+	}
+	return 0;
+}
+
+
+static void
+dhd_process_daemon_msg(struct sk_buff *skb)
+{
+	bcm_to_info_t to_info;
+
+	to_info.magic = BCM_TO_MAGIC;
+	to_info.reason = REASON_DAEMON_STARTED;
+	to_info.trap = NO_TRAP;
+
+	dhd_recv_msg_from_daemon(skb);
+	dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
+}
+
+#ifdef REPORT_FATAL_TIMEOUTS
+static void
+dhd_send_trap_to_fw(dhd_pub_t * pub, int reason, int trap)
+{
+	bcm_to_info_t to_info;
+
+	to_info.magic = BCM_TO_MAGIC;
+	to_info.reason = reason;
+	to_info.trap = trap;
+
+	DHD_ERROR(("Sending Event reason:%d trap:%d\n", reason, trap));
+	dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
+}
+
+void
+dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason)
+{
+	int to_reason;
+	int trap = NO_TRAP;
+	switch (reason) {
+	        case DHD_REASON_COMMAND_TO:
+	                to_reason = REASON_COMMAND_TO;
+	                trap = DO_TRAP;
+	                break;
+	        case DHD_REASON_JOIN_TO:
+	                to_reason = REASON_JOIN_TO;
+	                break;
+	        case DHD_REASON_SCAN_TO:
+	                to_reason = REASON_SCAN_TO;
+	                break;
+	        case DHD_REASON_OQS_TO:
+	                to_reason = REASON_OQS_TO;
+	                trap = DO_TRAP;
+	                break;
+	        default:
+	                to_reason = REASON_UNKOWN;
+	}
+	dhd_send_trap_to_fw(pub, to_reason, trap);
+}
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef DHD_LOG_DUMP
+void
+dhd_log_dump_init(dhd_pub_t *dhd)
+{
+	struct dhd_log_dump_buf *dld_buf;
+	int i = 0;
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+	int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+
+	for (i = 0; i < DLD_BUFFER_NUM; i++) {
+		dld_buf = &g_dld_buf[i];
+		spin_lock_init(&dld_buf->lock);
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+		dld_buf->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++, dld_buf_size[i]);
+#else
+		dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+
+		if (!dld_buf->buffer) {
+			dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL);
+			DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
+
+			if (!dld_buf->buffer) {
+				DHD_ERROR(("Failed to allocate memory for dld_buf[%d].\n", i));
+				goto fail;
+			}
+		}
+
+		dld_buf->wraparound = 0;
+		dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
+		dld_buf->present = dld_buf->front = dld_buf->buffer;
+		dld_buf->remain = dld_buf_size[i];
+		dld_buf->enable = 1;
+	}
+	return;
+
+fail:
+	for (i = 0; i < DLD_BUFFER_NUM; i++) {
+		if (dld_buf[i].buffer) {
+			kfree(dld_buf[i].buffer);
+		}
+	}
+}
+
+void
+dhd_log_dump_deinit(dhd_pub_t *dhd)
+{
+	struct dhd_log_dump_buf *dld_buf;
+	int i = 0;
+
+	for (i = 0; i < DLD_BUFFER_NUM; i++) {
+		dld_buf = &g_dld_buf[i];
+		dld_buf->enable = 0;
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+		DHD_OS_PREFREE(dhd, dld_buf->buffer, dld_buf_size[i]);
+#else
+		kfree(dld_buf->buffer);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+	}
+}
+
+void
+dhd_log_dump_write(int type, const char *fmt, ...)
+{
+	int len = 0;
+	char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
+	va_list args;
+	unsigned long flags = 0;
+	struct dhd_log_dump_buf *dld_buf = NULL;
+
+	switch (type)
+	{
+		case DLD_BUF_TYPE_GENERAL:
+			dld_buf = &g_dld_buf[type];
+			break;
+		case DLD_BUF_TYPE_SPECIAL:
+			dld_buf = &g_dld_buf[type];
+			break;
+		default:
+			DHD_ERROR(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
+				__FUNCTION__, type));
+			return;
+	}
+
+	if (dld_buf->enable != 1) {
+		return;
+	}
+
+	va_start(args, fmt);
+
+	len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
+	/* Non ANSI C99 compliant returns -1,
+	 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
+	 */
+	if (len < 0) {
+		return;
+	}
+
+	if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
+		len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
+		tmp_buf[len] = '\0';
+	}
+
+	/* make a critical section to eliminate race conditions */
+	spin_lock_irqsave(&dld_buf->lock, flags);
+	if (dld_buf->remain < len) {
+		dld_buf->wraparound = 1;
+		dld_buf->present = dld_buf->front;
+		dld_buf->remain = dld_buf_size[type];
+	}
+
+	strncpy(dld_buf->present, tmp_buf, len);
+	dld_buf->remain -= len;
+	dld_buf->present += len;
+	spin_unlock_irqrestore(&dld_buf->lock, flags);
+
+	/* double check invalid memory operation */
+	ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
+	va_end(args);
+}
+
+char*
+dhd_log_dump_get_timestamp(void)
+{
+	static char buf[16];
+	u64 ts_nsec;
+	unsigned long rem_nsec;
+
+	ts_nsec = local_clock();
+	rem_nsec = do_div(ts_nsec, 1000000000);
+	snprintf(buf, sizeof(buf), "%5lu.%06lu",
+		(unsigned long)ts_nsec, rem_nsec / 1000);
+
+	return buf;
+}
+#endif /* DHD_LOG_DUMP */
+
+int
+dhd_write_file(const char *filepath, char *buf, int buf_len)
+{
+	struct file *fp = NULL;
+	mm_segment_t old_fs;
+	int ret = 0;
+
+	/* change to KERNEL_DS address limit */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	/* File is always created. */
+	fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
+	if (IS_ERR(fp)) {
+		DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
+			__FUNCTION__, filepath, PTR_ERR(fp)));
+		ret = BCME_ERROR;
+	} else {
+		if (fp->f_mode & FMODE_WRITE) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+			ret = kernel_write(fp, buf, buf_len, &fp->f_pos);
+#else
+			ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
+#endif
+			if (ret < 0) {
+				DHD_ERROR(("%s: Couldn't write file '%s'\n",
+					__FUNCTION__, filepath));
+				ret = BCME_ERROR;
+			} else {
+				ret = BCME_OK;
+			}
+		}
+		filp_close(fp, NULL);
+	}
+
+	/* restore previous address limit */
+	set_fs(old_fs);
+
+	return ret;
+}
+
+int
+dhd_read_file(const char *filepath, char *buf, int buf_len)
+{
+	struct file *fp = NULL;
+	mm_segment_t old_fs;
+	int ret;
+
+	/* change to KERNEL_DS address limit */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	fp = filp_open(filepath, O_RDONLY, 0);
+	if (IS_ERR(fp)) {
+		set_fs(old_fs);
+		DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
+		return BCME_ERROR;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	ret = kernel_read(fp, buf, buf_len, NULL);
+#else
+	ret = kernel_read(fp, 0, buf, buf_len);
+#endif
+	filp_close(fp, NULL);
+
+	/* restore previous address limit */
+	set_fs(old_fs);
+
+	/* Return the number of bytes read */
+	if (ret > 0) {
+		/* Success to read */
+		ret = 0;
+	} else {
+		DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
+			__FUNCTION__, filepath, ret));
+		ret = BCME_ERROR;
+	}
+
+	return ret;
+}
+
+int
+dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
+{
+	int ret;
+
+	ret = dhd_write_file(filepath, buf, buf_len);
+	if (ret < 0) {
+		return ret;
+	}
+
+	/* Read the file again and check if the file size is not zero */
+	memset(buf, 0, buf_len);
+	ret = dhd_read_file(filepath, buf, buf_len);
+
+	return ret;
+}
+
+#ifdef DHD_LB_TXP
+#define DHD_LB_TXBOUND	64
+/*
+ * Function that performs the TX processing on a given CPU
+ */
+bool
+dhd_lb_tx_process(dhd_info_t *dhd)
+{
+	struct sk_buff *skb;
+	int cnt = 0;
+	struct net_device *net;
+	int ifidx;
+	bool resched = FALSE;
+
+	DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
+	if (dhd == NULL) {
+		DHD_ERROR((" Null pointer DHD \r\n"));
+		return resched;
+	}
+
+	DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
+
+	/* Base Loop to perform the actual Tx */
+	do {
+		skb = skb_dequeue(&dhd->tx_pend_queue);
+		if (skb == NULL) {
+			DHD_TRACE(("Dequeued a Null Packet \r\n"));
+			break;
+		}
+		cnt++;
+
+		net =  DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
+		ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
+
+		BCM_REFERENCE(net);
+		DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
+			net, ifidx));
+
+		__dhd_sendpkt(&dhd->pub, ifidx, skb);
+
+		if (cnt >= DHD_LB_TXBOUND) {
+			resched = TRUE;
+			break;
+		}
+
+	} while (1);
+
+	DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
+
+	return resched;
+}
+
+void
+dhd_lb_tx_handler(unsigned long data)
+{
+	dhd_info_t *dhd = (dhd_info_t *)data;
+
+	if (dhd_lb_tx_process(dhd)) {
+		dhd_tasklet_schedule(&dhd->tx_tasklet);
+	}
+}
+
+#endif /* DHD_LB_TXP */
+
+/* ----------------------------------------------------------------------------
+ * Infrastructure code for sysfs interface support for DHD
+ *
+ * What is sysfs interface?
+ * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
+ *
+ * Why sysfs interface?
+ * This is the Linux standard way of changing/configuring Run Time parameters
+ * for a driver. We can use this interface to control "linux" specific driver
+ * parameters.
+ *
+ * -----------------------------------------------------------------------------
+ */
+
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+
+/* Function to show the history buffer */
+static ssize_t
+show_wklock_trace(struct dhd_info *dev, char *buf)
+{
+	ssize_t ret = 0;
+	dhd_info_t *dhd = (dhd_info_t *)dev;
+
+	buf[ret] = '\n';
+	buf[ret+1] = 0;
+
+	dhd_wk_lock_stats_dump(&dhd->pub);
+	return ret+1;
+}
+
+/* Function to enable/disable wakelock trace */
+static ssize_t
+wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+	unsigned long onoff;
+	unsigned long flags;
+	dhd_info_t *dhd = (dhd_info_t *)dev;
+
+	onoff = bcm_strtoul(buf, NULL, 10);
+	if (onoff != 0 && onoff != 1) {
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+	trace_wklock_onoff = onoff;
+	spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	if (trace_wklock_onoff) {
+		printk("ENABLE WAKLOCK TRACE\n");
+	} else {
+		printk("DISABLE WAKELOCK TRACE\n");
+	}
+
+	return (ssize_t)(onoff+1);
+}
+#endif /* DHD_TRACE_WAKE_LOCK */
+
+#if defined(DHD_LB_TXP)
+static ssize_t
+show_lbtxp(struct dhd_info *dev, char *buf)
+{
+	ssize_t ret = 0;
+	unsigned long onoff;
+	dhd_info_t *dhd = (dhd_info_t *)dev;
+
+	onoff = atomic_read(&dhd->lb_txp_active);
+	ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+		onoff);
+	return ret;
+}
+
+static ssize_t
+lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+	unsigned long onoff;
+	dhd_info_t *dhd = (dhd_info_t *)dev;
+	int i;
+
+	onoff = bcm_strtoul(buf, NULL, 10);
+
+	sscanf(buf, "%lu", &onoff);
+	if (onoff != 0 && onoff != 1) {
+		return -EINVAL;
+	}
+	atomic_set(&dhd->lb_txp_active, onoff);
+
+	/* Since the scheme is changed clear the counters */
+	for (i = 0; i < NR_CPUS; i++) {
+		DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+		DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+	}
+
+	return count;
+}
+
+#endif /* DHD_LB_TXP */
+/*
+ * Generic Attribute Structure for DHD.
+ * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
+ * to instantiate an object of type dhd_attr,  populate it with
+ * the required show/store functions (ex:- dhd_attr_cpumask_primary)
+ * and add the object to default_attrs[] array, that gets registered
+ * to the kobject of dhd (named bcm-dhd).
+ */
+
+struct dhd_attr {
+	struct attribute attr;
+	ssize_t(*show)(struct dhd_info *, char *);
+	ssize_t(*store)(struct dhd_info *, const char *, size_t count);
+};
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+static struct dhd_attr dhd_attr_wklock =
+	__ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
+#endif /* defined(DHD_TRACE_WAKE_LOCK */
+
+#if defined(DHD_LB_TXP)
+static struct dhd_attr dhd_attr_lbtxp =
+	__ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
+#endif /* DHD_LB_TXP */
+
+/* Attribute object that gets registered with "bcm-dhd" kobject tree */
+static struct attribute *default_attrs[] = {
+#if defined(DHD_TRACE_WAKE_LOCK)
+	&dhd_attr_wklock.attr,
+#endif /* DHD_TRACE_WAKE_LOCK */
+#if defined(DHD_LB_TXP)
+	&dhd_attr_lbtxp.attr,
+#endif /* DHD_LB_TXP */
+	NULL
+};
+
+#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
+#define to_attr(a) container_of(a, struct dhd_attr, attr)
+
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the show function is called.
+ */
+static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	dhd_info_t *dhd = to_dhd(kobj);
+	struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	int ret;
+
+	if (d_attr->show)
+		ret = d_attr->show(dhd, buf);
+	else
+		ret = -EIO;
+
+	return ret;
+}
+
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the store function is called.
+ */
+static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
+	const char *buf, size_t count)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	dhd_info_t *dhd = to_dhd(kobj);
+	struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	int ret;
+
+	if (d_attr->store)
+		ret = d_attr->store(dhd, buf, count);
+	else
+		ret = -EIO;
+
+	return ret;
+
+}
+
+static struct sysfs_ops dhd_sysfs_ops = {
+	.show = dhd_show,
+	.store = dhd_store,
+};
+
+static struct kobj_type dhd_ktype = {
+	.sysfs_ops = &dhd_sysfs_ops,
+	.default_attrs = default_attrs,
+};
+
+/* Create a kobject and attach to sysfs interface */
+static int dhd_sysfs_init(dhd_info_t *dhd)
+{
+	int ret = -1;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+		return ret;
+	}
+
+	/* Initialize the kobject */
+	ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
+	if (ret) {
+		kobject_put(&dhd->dhd_kobj);
+		DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
+		return ret;
+	}
+
+	/*
+	 * We are always responsible for sending the uevent that the kobject
+	 * was added to the system.
+	 */
+	kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
+
+	return ret;
+}
+
+/* Done with the kobject and detach the sysfs interface */
+static void dhd_sysfs_exit(dhd_info_t *dhd)
+{
+	if (dhd == NULL) {
+		DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+		return;
+	}
+
+	/* Releae the kobject */
+	if (dhd->dhd_kobj.state_initialized)
+		kobject_put(&dhd->dhd_kobj);
+}
+
+#ifdef DHD_DEBUG_UART
+bool
+dhd_debug_uart_is_running(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd->duart_execute) {
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+static void
+dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
+{
+	dhd_pub_t *dhdp = handle;
+	dhd_debug_uart_exec(dhdp, "rd");
+}
+
+static void
+dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
+{
+	int ret;
+
+	char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
+	char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
+
+#ifdef DHD_FW_COREDUMP
+	if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
+#endif
+	{
+		if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN ||
+#ifdef DHD_FW_COREDUMP
+			dhdp->memdump_success == FALSE ||
+#endif
+			FALSE) {
+			dhdp->info->duart_execute = TRUE;
+			DHD_ERROR(("DHD: %s - execute %s %s\n",
+				__FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
+			ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+			DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
+				__FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
+			dhdp->info->duart_execute = FALSE;
+
+#ifdef DHD_LOG_DUMP
+			if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
+#endif
+			{
+				BUG_ON(1);
+			}
+		}
+	}
+}
+#endif	/* DHD_DEBUG_UART */
+
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+void
+dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
+{
+	struct file *fp;
+	char *filepath = CONFIG_BCMDHD_CLM_PATH;
+
+	fp = filp_open(filepath, O_RDONLY, 0);
+	if (IS_ERR(fp)) {
+		DHD_ERROR(("%s: ----- blob file dosen't exist -----\n", __FUNCTION__));
+		dhdp->is_blob = FALSE;
+	} else {
+		DHD_ERROR(("%s: ----- blob file exist -----\n", __FUNCTION__));
+		dhdp->is_blob = TRUE;
+#if defined(CONCATE_BLOB)
+		strncat(fw_path, "_blob", strlen("_blob"));
+#else
+		BCM_REFERENCE(fw_path);
+#endif /* SKIP_CONCATE_BLOB */
+		filp_close(fp, NULL);
+	}
+}
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+#if defined(PCIE_FULL_DONGLE)
+/** test / loopback */
+void
+dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
+{
+	dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
+	dhd_info_t *dhd_info = (dhd_info_t *)handle;
+	dhd_pub_t *dhdp = &dhd_info->pub;
+
+	if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if ((dhd_info == NULL) || (dhdp == NULL)) {
+		DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
+		return;
+	}
+
+	if (dmmap == NULL) {
+		DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
+		return;
+	}
+	dmaxfer_free_prev_dmaaddr(dhdp, dmmap);
+}
+
+
+void
+dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
+{
+	dhd_info_t *dhd_info = dhdp->info;
+
+	dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
+		DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
+}
+#endif /* PCIE_FULL_DONGLE */
+/* ---------------------------- End of sysfs implementation ------------------------------------- */
+#ifdef HOFFLOAD_MODULES
+void
+dhd_linux_get_modfw_address(dhd_pub_t *dhd)
+{
+	const char* module_name = NULL;
+	const struct firmware *module_fw;
+	struct module_metadata *hmem = &dhd->hmem;
+
+	if (dhd_hmem_module_string[0] != '\0') {
+		module_name = dhd_hmem_module_string;
+	} else {
+		DHD_ERROR(("%s No module image name specified\n", __FUNCTION__));
+		return;
+	}
+	if (request_firmware(&module_fw, module_name, dhd_bus_to_dev(dhd->bus))) {
+		DHD_ERROR(("modules.img not available\n"));
+		return;
+	}
+	if (!dhd_alloc_module_memory(dhd->bus, module_fw->size, hmem)) {
+		release_firmware(module_fw);
+		return;
+	}
+	memcpy(hmem->data, module_fw->data, module_fw->size);
+	release_firmware(module_fw);
+}
+#endif /* HOFFLOAD_MODULES */
+
+#ifdef SET_PCIE_IRQ_CPU_CORE
+void
+dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set)
+{
+	unsigned int irq;
+	if (!dhdp) {
+		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhdp->bus) {
+		DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (dhdpcie_get_pcieirq(dhdp->bus, &irq)) {
+		return;
+	}
+
+	set_irq_cpucore(irq, set);
+}
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+void
+dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
+{
+	dhd_info_t *dhd = NULL;
+	dhd_pub_t *dhdp = NULL;
+	uint reason = HANG_REASON_MAX;
+	char buf[WLC_IOCTL_SMLEN] = {0, };
+	uint32 fw_test_code = 0;
+	dhd = DHD_DEV_INFO(dev);
+
+	if (dhd) {
+		dhdp = &dhd->pub;
+	}
+
+	if (!dhd || !dhdp) {
+		return;
+	}
+
+	reason = (uint) bcm_strtoul(string_num, NULL, 0);
+	DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__,  reason));
+
+	if (reason == 0) {
+		if (dhdp->req_hang_type) {
+			DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+				__FUNCTION__, dhdp->req_hang_type));
+			dhdp->req_hang_type = 0;
+			return;
+		} else {
+			DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
+			return;
+		}
+	} else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
+		DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
+		return;
+	}
+
+	if (dhdp->req_hang_type != 0) {
+		DHD_ERROR(("Already HANG requested for test\n"));
+		return;
+	}
+
+	switch (reason) {
+		case HANG_REASON_IOCTL_RESP_TIMEOUT:
+			DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
+			dhdp->req_hang_type = reason;
+			fw_test_code = 102; /* resumed on timeour */
+			bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf));
+			dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+			break;
+		case HANG_REASON_DONGLE_TRAP:
+			DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
+			dhdp->req_hang_type = reason;
+			fw_test_code = 99; /* dongle trap */
+			bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf));
+			dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+			break;
+		case HANG_REASON_D3_ACK_TIMEOUT:
+			DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
+			dhdp->req_hang_type = reason;
+			break;
+		case HANG_REASON_BUS_DOWN:
+			DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
+			dhdp->req_hang_type = reason;
+			break;
+		case HANG_REASON_PCIE_LINK_DOWN:
+		case HANG_REASON_MSGBUF_LIVELOCK:
+			dhdp->req_hang_type = 0;
+			DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
+			break;
+		case HANG_REASON_IFACE_OP_FAILURE:
+			DHD_ERROR(("Make HANG!!!: P2P inrerface delete failure(0x%x)\n", reason));
+			dhdp->req_hang_type = reason;
+			break;
+		case HANG_REASON_HT_AVAIL_ERROR:
+			dhdp->req_hang_type = 0;
+			DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
+			break;
+		case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
+			DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
+			dhdp->req_hang_type = reason;
+			break;
+		default:
+			dhdp->req_hang_type = 0;
+			DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
+			break;
+	}
+}
+#endif /* DHD_HANG_SEND_UP_TEST */
+#ifdef DHD_WAKE_STATUS
+wake_counts_t*
+dhd_get_wakecount(dhd_pub_t *dhdp)
+{
+#ifdef BCMDBUS
+	return NULL;
+#else
+	return dhd_bus_get_wakecount(dhdp);
+#endif /* BCMDBUS */
+}
+#endif /* DHD_WAKE_STATUS */
+
+#ifdef BCM_ASLR_HEAP
+uint32
+dhd_get_random_number(void)
+{
+	uint32 rand = 0;
+	get_random_bytes_arch(&rand, sizeof(rand));
+	return rand;
+}
+#endif /* BCM_ASLR_HEAP */
+
+#ifdef DHD_PKT_LOGGING
+void
+dhd_pktlog_dump(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (dhd_pktlog_write_file(&dhd->pub)) {
+		DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__));
+		return;
+	}
+}
+
+void
+dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
+{
+	dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+			(void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
+			dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* DHD_PKT_LOGGING */
+
+void *dhd_get_pub(struct net_device *dev)
+{
+	dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
+	if (dhdinfo)
+		return (void *)&dhdinfo->pub;
+	else {
+		printf("%s: null dhdinfo\n", __FUNCTION__);
+		return NULL;
+	}
+}
+
+void *dhd_get_conf(struct net_device *dev)
+{
+	dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
+	if (dhdinfo)
+		return (void *)dhdinfo->pub.conf;
+	else {
+		printf("%s: null dhdinfo\n", __FUNCTION__);
+		return NULL;
+	}
+}
+
+bool dhd_os_wd_timer_enabled(void *bus)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+		return FALSE;
+	}
+	return dhd->wd_timer_valid;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.h b/drivers/net/wireless/bcmdhd/dhd_linux.h
new file mode 100644
index 0000000..b7b3981
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.h
@@ -0,0 +1,187 @@
+/*
+ * DHD Linux header file (dhd_linux exports for cfg80211 and other components)
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux.h 699532 2017-05-15 11:00:39Z $
+ */
+
+/* wifi platform functions for power, interrupt and pre-alloc, either
+ * from Android-like platform device data, or Broadcom wifi platform
+ * device data.
+ *
+ */
+#ifndef __DHD_LINUX_H__
+#define __DHD_LINUX_H__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* defined(WL_WIRELESS_EXT) */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+
+/* dongle status */
+enum wifi_adapter_status {
+	WIFI_STATUS_POWER_ON = 0,
+	WIFI_STATUS_ATTACH,
+	WIFI_STATUS_FW_READY,
+	WIFI_STATUS_DETTACH
+};
+#define wifi_chk_adapter_status(adapter, stat) (test_bit(stat, &(adapter)->status))
+#define wifi_get_adapter_status(adapter, stat) (test_bit(stat, &(adapter)->status))
+#define wifi_set_adapter_status(adapter, stat) (set_bit(stat, &(adapter)->status))
+#define wifi_clr_adapter_status(adapter, stat) (clear_bit(stat, &(adapter)->status))
+#define wifi_chg_adapter_status(adapter, stat) (change_bit(stat, &(adapter)->status))
+
+#define DHD_REGISTRATION_TIMEOUT  1000  /* msec : allowed time to finished dhd registration */
+#define DHD_FW_READY_TIMEOUT  5000  /* msec : allowed time to finished fw download */
+
+typedef struct wifi_adapter_info {
+	const char	*name;
+	uint		irq_num;
+	uint		intr_flags;
+	const char	*fw_path;
+	const char	*nv_path;
+	const char	*clm_path;
+	const char	*conf_path;
+	void		*wifi_plat_data;	/* wifi ctrl func, for backward compatibility */
+	uint		bus_type;
+	uint		bus_num;
+	uint		slot_num;
+	wait_queue_head_t status_event;
+	unsigned long status;
+#if defined(BT_OVER_SDIO)
+	const char	*btfw_path;
+#endif /* defined (BT_OVER_SDIO) */
+#ifdef BUS_POWER_RESTORE
+#if defined(BCMSDIO)
+	struct sdio_func *sdio_func;
+#endif /* BCMSDIO */
+#if defined(BCMPCIE)
+	struct pci_dev *pci_dev;
+	struct pci_saved_state *pci_saved_state;
+#endif /* BCMPCIE */
+#endif
+} wifi_adapter_info_t;
+
+#define WLAN_PLAT_NODFS_FLAG	0x01
+#define WLAN_PLAT_AP_FLAG	0x02
+struct wifi_platform_data {
+#ifdef BUS_POWER_RESTORE
+	int (*set_power)(int val, wifi_adapter_info_t *adapter);
+#else
+	int (*set_power)(int val);
+#endif
+	int (*set_reset)(int val);
+	int (*set_carddetect)(int val);
+	void *(*mem_prealloc)(int section, unsigned long size);
+	int (*get_mac_addr)(unsigned char *buf);
+#if defined(CUSTOM_COUNTRY_CODE)
+	void *(*get_country_code)(char *ccode, u32 flags);
+#else /* defined (CUSTOM_COUNTRY_CODE) */
+	void *(*get_country_code)(char *ccode);
+#endif
+};
+
+typedef struct bcmdhd_wifi_platdata {
+	uint				num_adapters;
+	wifi_adapter_info_t	*adapters;
+} bcmdhd_wifi_platdata_t;
+
+/** Per STA params. A list of dhd_sta objects are managed in dhd_if */
+typedef struct dhd_sta {
+	cumm_ctr_t cumm_ctr;    /* cummulative queue length of child flowrings */
+	uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */
+	void * ifp;             /* associated dhd_if */
+	struct ether_addr ea;   /* stations ethernet mac address */
+	struct list_head list;  /* link into dhd_if::sta_list */
+	int idx;                /* index of self in dhd_pub::sta_pool[] */
+	int ifidx;              /* index of interface in dhd */
+#ifdef DHD_WMF
+	struct dhd_sta *psta_prim; /* primary index of psta interface */
+#endif /* DHD_WMF */
+} dhd_sta_t;
+typedef dhd_sta_t dhd_sta_pool_t;
+
+int dhd_wifi_platform_register_drv(void);
+void dhd_wifi_platform_unregister_drv(void);
+wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type,
+	uint32 bus_num, uint32 slot_num, unsigned long status);
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num,
+	uint32 slot_num);
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec);
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present);
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr);
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf);
+#ifdef CUSTOM_COUNTRY_CODE
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode,
+	u32 flags);
+#else
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode);
+#endif /* CUSTOM_COUNTRY_CODE */
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size);
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter);
+
+int dhd_get_fw_mode(struct dhd_info *dhdinfo);
+bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo);
+
+#ifdef DHD_WMF
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx);
+int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx);
+int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val);
+void dhd_update_psta_interface_for_sta(dhd_pub_t *dhdp, char* ifname,
+		void* mac_addr, void* event_data);
+#endif /* DHD_WMF */
+#if defined(BT_OVER_SDIO)
+int dhd_net_bus_get(struct net_device *dev);
+int dhd_net_bus_put(struct net_device *dev);
+#endif /* BT_OVER_SDIO */
+#ifdef HOFFLOAD_MODULES
+extern void dhd_free_module_memory(struct dhd_bus *bus, struct module_metadata *hmem);
+extern void* dhd_alloc_module_memory(struct dhd_bus *bus, uint32_t size,
+	struct module_metadata *hmem);
+#endif /* HOFFLOAD_MODULES */
+#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
+#define ADPS_ENABLE	1
+#define ADPS_DISABLE	0
+typedef struct bcm_iov_buf {
+	uint16 version;
+	uint16 len;
+	uint16 id;
+	uint16 data[1];
+} bcm_iov_buf_t;
+
+int dhd_enable_adps(dhd_pub_t *dhd, uint8 on);
+#endif /* WLADPS || WLADPS_PRIVATE_CMD */
+#endif /* __DHD_LINUX_H__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c
new file mode 100644
index 0000000..6510178
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c
@@ -0,0 +1,991 @@
+/*
+ * Linux platform device for DHD WLAN adapter
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_platdev.c 662397 2016-09-29 10:15:08Z $
+ */
+#include <typedefs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_linux.h>
+#include <wl_android.h>
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+#include <linux/wlan_plat.h>
+#endif
+#ifdef CONFIG_DTS
+#include<linux/regulator/consumer.h>
+#include<linux/of_gpio.h>
+#endif /* CONFIG_DTS */
+
+#if defined(CUSTOMER_HW)
+extern int dhd_wlan_init_plat_data(void);
+extern void dhd_wlan_deinit_plat_data(wifi_adapter_info_t *adapter);
+#endif /* CUSTOMER_HW */
+
+#define WIFI_PLAT_NAME		"bcmdhd_wlan"
+#define WIFI_PLAT_NAME2		"bcm4329_wlan"
+#define WIFI_PLAT_EXT		"bcmdhd_wifi_platform"
+
+#ifdef CONFIG_DTS
+struct regulator *wifi_regulator = NULL;
+extern struct wifi_platform_data dhd_wlan_control;
+#endif /* CONFIG_DTS */
+
+bool cfg_multichip = FALSE;
+bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL;
+static int wifi_plat_dev_probe_ret = 0;
+static bool is_power_on = FALSE;
+#if !defined(CONFIG_DTS)
+#if defined(DHD_OF_SUPPORT)
+static bool dts_enabled = TRUE;
+extern struct resource dhd_wlan_resources;
+extern struct wifi_platform_data dhd_wlan_control;
+#else
+static bool dts_enabled = FALSE;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+#endif
+struct resource dhd_wlan_resources = {0};
+struct wifi_platform_data dhd_wlan_control = {0};
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */
+#endif /* !defind(CONFIG_DTS) */
+
+static int dhd_wifi_platform_load(void);
+
+extern void* wl_cfg80211_get_dhdp(struct net_device *dev);
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24;	/* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type,
+	uint32 bus_num, uint32 slot_num, unsigned long status)
+{
+	int i;
+
+	if (dhd_wifi_platdata == NULL)
+		return NULL;
+
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i];
+		if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) &&
+			(adapter->bus_num == -1 || adapter->bus_num == bus_num) &&
+			(adapter->slot_num == -1 || adapter->slot_num == slot_num)
+#if defined(ENABLE_INSMOD_NO_FW_LOAD)
+			&& (wifi_chk_adapter_status(adapter, status))
+#endif
+		) {
+			DHD_ERROR(("attach adapter info '%s'\n", adapter->name));
+			return adapter;
+		}
+	}
+	return NULL;
+}
+
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num)
+{
+	int i;
+
+	if (dhd_wifi_platdata == NULL)
+		return NULL;
+
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i];
+		if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) &&
+			(adapter->bus_num == -1 || adapter->bus_num == bus_num) &&
+			(adapter->slot_num == -1 || adapter->slot_num == slot_num)) {
+			DHD_TRACE(("found adapter info '%s'\n", adapter->name));
+			return adapter;
+		}
+	}
+	return NULL;
+}
+
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size)
+{
+	void *alloc_ptr = NULL;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+	if (plat_data->mem_prealloc) {
+		alloc_ptr = plat_data->mem_prealloc(section, size);
+		if (alloc_ptr) {
+			DHD_INFO(("success alloc section %d\n", section));
+			if (size != 0L)
+				bzero(alloc_ptr, size);
+			return alloc_ptr;
+		}
+	} else
+		return NULL;
+
+	DHD_ERROR(("%s: failed to alloc static mem section %d\n", __FUNCTION__, section));
+	return NULL;
+}
+
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter)
+{
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+	return plat_data->mem_prealloc;
+}
+
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr)
+{
+	if (adapter == NULL)
+		return -1;
+	if (irq_flags_ptr)
+		*irq_flags_ptr = adapter->intr_flags;
+	return adapter->irq_num;
+}
+
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec)
+{
+	int err = 0;
+#ifndef CONFIG_DTS
+	struct wifi_platform_data *plat_data;
+#endif
+#ifdef BT_OVER_SDIO
+	if (is_power_on == on) {
+		return -EINVAL;
+	}
+#endif /* BT_OVER_SDIO */
+	if (on) {
+		wifi_set_adapter_status(adapter, WIFI_STATUS_POWER_ON);
+	} else {
+		wifi_clr_adapter_status(adapter, WIFI_STATUS_POWER_ON);
+	}
+#ifdef CONFIG_DTS
+	if (on) {
+		printf("======== PULL WL_REG_ON HIGH! ========\n");
+		err = regulator_enable(wifi_regulator);
+		is_power_on = TRUE;
+	}
+	else {
+		printf("======== PULL WL_REG_ON LOW! ========\n");
+		err = regulator_disable(wifi_regulator);
+		is_power_on = FALSE;
+	}
+	if (err < 0) {
+		DHD_ERROR(("%s: regulator enable/disable failed", __FUNCTION__));
+		goto fail;
+	}
+#else
+	if (!adapter || !adapter->wifi_plat_data) {
+		err = -EINVAL;
+		goto fail;
+	}
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+	if (plat_data->set_power) {
+#ifdef ENABLE_4335BT_WAR
+		if (on) {
+			printk("WiFi: trying to acquire BT lock\n");
+			if (bcm_bt_lock(lock_cookie_wifi) != 0)
+				printk("** WiFi: timeout in acquiring bt lock**\n");
+			printk("%s: btlock acquired\n", __FUNCTION__);
+		}
+		else {
+			/* For a exceptional case, release btlock */
+			bcm_bt_unlock(lock_cookie_wifi);
+		}
+#endif /* ENABLE_4335BT_WAR */
+
+#ifdef BUS_POWER_RESTORE
+		err = plat_data->set_power(on, adapter);
+#else
+		err = plat_data->set_power(on);
+#endif
+	}
+
+	if (msec && !err)
+		OSL_SLEEP(msec);
+
+	if (on && !err)
+		is_power_on = TRUE;
+	else
+		is_power_on = FALSE;
+
+#endif /* CONFIG_DTS */
+
+	return err;
+fail:
+	if (on) {
+		wifi_clr_adapter_status(adapter, WIFI_STATUS_POWER_ON);
+	} else {
+		wifi_set_adapter_status(adapter, WIFI_STATUS_POWER_ON);
+	}
+	return err;
+}
+
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present)
+{
+	int err = 0;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present));
+	if (plat_data->set_carddetect) {
+		err = plat_data->set_carddetect(device_present);
+	}
+	return err;
+
+}
+
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf)
+{
+	struct wifi_platform_data *plat_data;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+	if (!buf || !adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+	if (plat_data->get_mac_addr) {
+		return plat_data->get_mac_addr(buf);
+	}
+	return -EOPNOTSUPP;
+}
+
+void *
+#ifdef CUSTOM_COUNTRY_CODE
+wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, u32 flags)
+#else
+wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode)
+#endif /* CUSTOM_COUNTRY_CODE */
+{
+	/* get_country_code was added after 2.6.39 */
+#if	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+	struct wifi_platform_data *plat_data;
+
+	if (!ccode || !adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_TRACE(("%s\n", __FUNCTION__));
+	if (plat_data->get_country_code) {
+#ifdef CUSTOM_COUNTRY_CODE
+		return plat_data->get_country_code(ccode, flags);
+#else
+		return plat_data->get_country_code(ccode);
+#endif /* CUSTOM_COUNTRY_CODE */
+	}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+	return NULL;
+}
+
+#ifndef CUSTOMER_HW
+static int wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+	struct resource *resource;
+	wifi_adapter_info_t *adapter;
+#if defined(CONFIG_DTS) && defined(CUSTOMER_OOB)
+	int irq, gpio;
+#endif /* CONFIG_DTS */
+
+	/* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+	 * is kept for backward compatibility and supports only 1 adapter
+	 */
+	ASSERT(dhd_wifi_platdata != NULL);
+	ASSERT(dhd_wifi_platdata->num_adapters == 1);
+	adapter = &dhd_wifi_platdata->adapters[0];
+	adapter->wifi_plat_data = (void *)&dhd_wlan_control;
+//	adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
+	if (resource == NULL)
+		resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq");
+	if (resource) {
+		adapter->irq_num = resource->start;
+		adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+#ifdef DHD_ISR_NO_SUSPEND
+		adapter->intr_flags |= IRQF_NO_SUSPEND;
+#endif
+	}
+
+#ifdef CONFIG_DTS
+	wifi_regulator = regulator_get(&pdev->dev, "wlreg_on");
+	if (wifi_regulator == NULL) {
+		DHD_ERROR(("%s regulator is null\n", __FUNCTION__));
+		return -1;
+	}
+
+#if defined(CUSTOMER_OOB)
+	/* This is to get the irq for the OOB */
+	gpio = of_get_gpio(pdev->dev.of_node, 0);
+
+	if (gpio < 0) {
+		DHD_ERROR(("%s gpio information is incorrect\n", __FUNCTION__));
+		return -1;
+	}
+	irq = gpio_to_irq(gpio);
+	if (irq < 0) {
+		DHD_ERROR(("%s irq information is incorrect\n", __FUNCTION__));
+		return -1;
+	}
+	adapter->irq_num = irq;
+
+	/* need to change the flags according to our requirement */
+	adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL |
+		IORESOURCE_IRQ_SHAREABLE;
+#endif
+#endif /* CONFIG_DTS */
+
+	wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+	return wifi_plat_dev_probe_ret;
+}
+
+static int wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+	wifi_adapter_info_t *adapter;
+
+	/* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+	 * is kept for backward compatibility and supports only 1 adapter
+	 */
+	ASSERT(dhd_wifi_platdata != NULL);
+	ASSERT(dhd_wifi_platdata->num_adapters == 1);
+	adapter = &dhd_wifi_platdata->adapters[0];
+	if (is_power_on) {
+#ifdef BCMPCIE
+		wifi_platform_bus_enumerate(adapter, FALSE);
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+#else
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+#endif /* BCMPCIE */
+	}
+
+#ifdef CONFIG_DTS
+	regulator_put(wifi_regulator);
+#endif /* CONFIG_DTS */
+	return 0;
+}
+
+static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+	defined(BCMSDIO)
+	bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+static int wifi_plat_dev_drv_resume(struct platform_device *pdev)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+	defined(BCMSDIO)
+	if (dhd_os_check_if_up(wl_cfg80211_get_dhdp()))
+		bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+#ifdef CONFIG_DTS
+static const struct of_device_id wifi_device_dt_match[] = {
+	{ .compatible = "android,bcmdhd_wlan", },
+	{},
+};
+#endif /* CONFIG_DTS */
+
+static struct platform_driver wifi_platform_dev_driver = {
+	.probe          = wifi_plat_dev_drv_probe,
+	.remove         = wifi_plat_dev_drv_remove,
+	.suspend        = wifi_plat_dev_drv_suspend,
+	.resume         = wifi_plat_dev_drv_resume,
+	.driver         = {
+	.name   = WIFI_PLAT_NAME,
+#ifdef CONFIG_DTS
+	.of_match_table = wifi_device_dt_match,
+#endif /* CONFIG_DTS */
+	}
+};
+
+static struct platform_driver wifi_platform_dev_driver_legacy = {
+	.probe          = wifi_plat_dev_drv_probe,
+	.remove         = wifi_plat_dev_drv_remove,
+	.suspend        = wifi_plat_dev_drv_suspend,
+	.resume         = wifi_plat_dev_drv_resume,
+	.driver         = {
+	.name	= WIFI_PLAT_NAME2,
+	}
+};
+
+static int wifi_platdev_match(struct device *dev, void *data)
+{
+	char *name = (char*)data;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	const struct platform_device *pdev = to_platform_device(dev);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	if (strcmp(pdev->name, name) == 0) {
+		DHD_ERROR(("found wifi platform device %s\n", name));
+		return TRUE;
+	}
+
+	return FALSE;
+}
+#endif
+
+static int wifi_ctrlfunc_register_drv(void)
+{
+	wifi_adapter_info_t *adapter;
+
+#ifndef CUSTOMER_HW
+	int err = 0;
+	struct device *dev1, *dev2;
+	dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+	dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+#endif
+
+#if !defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
+	if (!dts_enabled) {
+		if (dev1 == NULL && dev2 == NULL) {
+			DHD_ERROR(("no wifi platform data, skip\n"));
+			return -ENXIO;
+		}
+	}
+#endif /* !defined(CONFIG_DTS) */
+
+	/* multi-chip support not enabled, build one adapter information for
+	 * DHD (either SDIO, USB or PCIe)
+	 */
+	adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL);
+	if (adapter == NULL) {
+		DHD_ERROR(("%s:adapter alloc failed", __FUNCTION__));
+		return -ENOMEM;
+	}
+	adapter->name = "DHD generic adapter";
+	adapter->bus_type = -1;
+	adapter->bus_num = -1;
+	adapter->slot_num = -1;
+	adapter->irq_num = -1;
+	is_power_on = FALSE;
+	wifi_plat_dev_probe_ret = 0;
+	dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL);
+	dhd_wifi_platdata->num_adapters = 1;
+	dhd_wifi_platdata->adapters = adapter;
+	init_waitqueue_head(&adapter->status_event);
+
+#ifndef CUSTOMER_HW
+	if (dev1) {
+		err = platform_driver_register(&wifi_platform_dev_driver);
+		if (err) {
+			DHD_ERROR(("%s: failed to register wifi ctrl func driver\n",
+				__FUNCTION__));
+			return err;
+		}
+	}
+	if (dev2) {
+		err = platform_driver_register(&wifi_platform_dev_driver_legacy);
+		if (err) {
+			DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n",
+				__FUNCTION__));
+			return err;
+		}
+	}
+#endif
+
+#if !defined(CONFIG_DTS)
+	if (dts_enabled) {
+		struct resource *resource;
+		adapter->wifi_plat_data = (void *)&dhd_wlan_control;
+		resource = &dhd_wlan_resources;
+#ifdef CUSTOMER_HW
+		wifi_plat_dev_probe_ret = dhd_wlan_init_plat_data();
+		if (wifi_plat_dev_probe_ret)
+			return wifi_plat_dev_probe_ret;
+#endif
+		adapter->irq_num = resource->start;
+		adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+#ifdef DHD_ISR_NO_SUSPEND
+		adapter->intr_flags |= IRQF_NO_SUSPEND;
+#endif
+		wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+	}
+#endif /* !defined(CONFIG_DTS) */
+
+
+#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
+	wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver);
+#endif /* CONFIG_DTS */
+
+	/* return probe function's return value if registeration succeeded */
+	return wifi_plat_dev_probe_ret;
+}
+
+void wifi_ctrlfunc_unregister_drv(void)
+{
+#ifndef CONFIG_DTS
+	wifi_adapter_info_t *adapter = NULL;
+#endif
+
+#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
+	DHD_ERROR(("unregister wifi platform drivers\n"));
+	platform_driver_unregister(&wifi_platform_dev_driver);
+#else
+#ifndef CUSTOMER_HW
+	struct device *dev1, *dev2;
+	dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+	dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+	if (!dts_enabled)
+		if (dev1 == NULL && dev2 == NULL)
+			return;
+#endif
+	DHD_ERROR(("unregister wifi platform drivers\n"));
+#ifndef CUSTOMER_HW
+	if (dev1)
+		platform_driver_unregister(&wifi_platform_dev_driver);
+	if (dev2)
+		platform_driver_unregister(&wifi_platform_dev_driver_legacy);
+#endif
+	if (dts_enabled) {
+		adapter = &dhd_wifi_platdata->adapters[0];
+		if (is_power_on) {
+			wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		}
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+#endif /* !defined(CONFIG_DTS) */
+
+#if defined(CUSTOMER_HW)
+	dhd_wlan_deinit_plat_data(adapter);
+#endif
+
+	kfree(dhd_wifi_platdata->adapters);
+	dhd_wifi_platdata->adapters = NULL;
+	dhd_wifi_platdata->num_adapters = 0;
+	kfree(dhd_wifi_platdata);
+	dhd_wifi_platdata = NULL;
+}
+
+#ifndef CUSTOMER_HW
+static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+	dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data);
+
+	return dhd_wifi_platform_load();
+}
+
+static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+	int i;
+	wifi_adapter_info_t *adapter;
+	ASSERT(dhd_wifi_platdata != NULL);
+
+	/* power down all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+	return 0;
+}
+
+static struct platform_driver dhd_wifi_platform_dev_driver = {
+	.probe          = bcmdhd_wifi_plat_dev_drv_probe,
+	.remove         = bcmdhd_wifi_plat_dev_drv_remove,
+	.driver         = {
+	.name   = WIFI_PLAT_EXT,
+	}
+};
+#endif
+
+int dhd_wifi_platform_register_drv(void)
+{
+	int err = 0;
+#ifndef CUSTOMER_HW
+	struct device *dev;
+
+	/* register Broadcom wifi platform data driver if multi-chip is enabled,
+	 * otherwise use Android style wifi platform data (aka wifi control function)
+	 * if it exists
+	 *
+	 * to support multi-chip DHD, Broadcom wifi platform data device must
+	 * be added in kernel early boot (e.g. board config file).
+	 */
+	if (cfg_multichip) {
+		dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match);
+		if (dev == NULL) {
+			DHD_ERROR(("bcmdhd wifi platform data device not found!!\n"));
+			return -ENXIO;
+		}
+		err = platform_driver_register(&dhd_wifi_platform_dev_driver);
+	} else
+#endif
+	{
+		err = wifi_ctrlfunc_register_drv();
+
+		/* no wifi ctrl func either, load bus directly and ignore this error */
+		if (err) {
+			if (err == -ENXIO) {
+				/* wifi ctrl function does not exist */
+				err = dhd_wifi_platform_load();
+			} else {
+				/* unregister driver due to initialization failure */
+				wifi_ctrlfunc_unregister_drv();
+			}
+		}
+	}
+
+	return err;
+}
+
+#ifdef BCMPCIE
+static int dhd_wifi_platform_load_pcie(void)
+{
+	int err = 0;
+	int i;
+	wifi_adapter_info_t *adapter;
+
+	BCM_REFERENCE(i);
+	BCM_REFERENCE(adapter);
+
+	if (dhd_wifi_platdata == NULL) {
+		err = dhd_bus_register();
+	} else {
+		if (dhd_download_fw_on_driverload) {
+			/* power up all adapters */
+			for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+				int retry = POWERUP_MAX_RETRY;
+				adapter = &dhd_wifi_platdata->adapters[i];
+
+				DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+				DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+					adapter->irq_num, adapter->intr_flags, adapter->fw_path,
+					adapter->nv_path));
+				DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+					adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+				do {
+					err = wifi_platform_set_power(adapter,
+						TRUE, WIFI_TURNON_DELAY);
+					if (err) {
+						DHD_ERROR(("failed to power up %s,"
+							" %d retry left\n",
+							adapter->name, retry));
+						/* WL_REG_ON state unknown, Power off forcely */
+						wifi_platform_set_power(adapter,
+							FALSE, WIFI_TURNOFF_DELAY);
+						continue;
+					} else {
+						err = wifi_platform_bus_enumerate(adapter, TRUE);
+						if (err) {
+							DHD_ERROR(("failed to enumerate bus %s, "
+								"%d retry left\n",
+								adapter->name, retry));
+							wifi_platform_set_power(adapter, FALSE,
+								WIFI_TURNOFF_DELAY);
+						} else {
+							break;
+						}
+					}
+				} while (retry--);
+
+				if (retry < 0) {
+					DHD_ERROR(("failed to power up %s, max retry reached**\n",
+						adapter->name));
+					return -ENODEV;
+				}
+			}
+		}
+
+		err = dhd_bus_register();
+
+		if (err) {
+			DHD_ERROR(("%s: pcie_register_driver failed\n", __FUNCTION__));
+			if (dhd_download_fw_on_driverload) {
+				/* power down all adapters */
+				for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+					adapter = &dhd_wifi_platdata->adapters[i];
+					wifi_platform_bus_enumerate(adapter, FALSE);
+					wifi_platform_set_power(adapter,
+						FALSE, WIFI_TURNOFF_DELAY);
+				}
+			}
+		}
+	}
+
+	return err;
+}
+#else
+static int dhd_wifi_platform_load_pcie(void)
+{
+	return 0;
+}
+#endif /* BCMPCIE  */
+
+
+void dhd_wifi_platform_unregister_drv(void)
+{
+#ifndef CUSTOMER_HW
+	if (cfg_multichip)
+		platform_driver_unregister(&dhd_wifi_platform_dev_driver);
+	else
+#endif
+		wifi_ctrlfunc_unregister_drv();
+}
+
+extern int dhd_watchdog_prio;
+extern int dhd_dpc_prio;
+extern uint dhd_deferred_tx;
+#if defined(BCMLXSDMMC) || defined(BCMDBUS)
+extern struct semaphore dhd_registration_sem;
+#endif 
+
+#ifdef BCMSDIO
+static int dhd_wifi_platform_load_sdio(void)
+{
+	int i;
+	int err = 0;
+	wifi_adapter_info_t *adapter;
+
+	BCM_REFERENCE(i);
+	BCM_REFERENCE(adapter);
+	/* Sanity check on the module parameters
+	 * - Both watchdog and DPC as tasklets are ok
+	 * - If both watchdog and DPC are threads, TX must be deferred
+	 */
+	if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) &&
+		!(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx))
+		return -EINVAL;
+
+#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
+	sema_init(&dhd_registration_sem, 0);
+#endif
+
+	if (dhd_wifi_platdata == NULL) {
+		DHD_ERROR(("DHD wifi platform data is required for Android build\n"));
+		DHD_ERROR(("DHD registeing bus directly\n"));
+		/* x86 bring-up PC needs no power-up operations */
+		err = dhd_bus_register();
+		return err;
+	}
+
+#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
+	/* power up all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		bool chip_up = FALSE;
+		int retry = POWERUP_MAX_RETRY;
+		struct semaphore dhd_chipup_sem;
+
+		adapter = &dhd_wifi_platdata->adapters[i];
+
+		DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+		DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+			adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path));
+		DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+			adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+		do {
+			sema_init(&dhd_chipup_sem, 0);
+			err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
+			if (err) {
+				DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n",
+					__FUNCTION__, err));
+				return err;
+			}
+			err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
+			if (err) {
+				dhd_bus_unreg_sdio_notify();
+				/* WL_REG_ON state unknown, Power off forcely */
+				wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+				continue;
+			} else {
+				wifi_platform_bus_enumerate(adapter, TRUE);
+			}
+
+			if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
+				dhd_bus_unreg_sdio_notify();
+				chip_up = TRUE;
+				break;
+			}
+
+			DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry));
+			dhd_bus_unreg_sdio_notify();
+			wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+			wifi_platform_bus_enumerate(adapter, FALSE);
+		} while (retry--);
+
+		if (!chip_up) {
+			DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name));
+			return -ENODEV;
+		}
+
+	}
+
+	err = dhd_bus_register();
+
+	if (err) {
+		DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/*
+	 * Wait till MMC sdio_register_driver callback called and made driver attach.
+	 * It's needed to make sync up exit from dhd insmod  and
+	 * Kernel MMC sdio device callback registration
+	 */
+	err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT));
+	if (err) {
+		DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__));
+		dhd_bus_unregister();
+		goto fail;
+	}
+
+	return err;
+
+fail:
+	/* power down all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+#endif 
+
+	return err;
+}
+#else /* BCMSDIO */
+static int dhd_wifi_platform_load_sdio(void)
+{
+	return 0;
+}
+#endif /* BCMSDIO */
+
+#ifdef BCMDBUS
+static int dhd_wifi_platform_load_usb(void)
+{
+	wifi_adapter_info_t *adapter;
+	s32 timeout = -1;
+	int i;
+	int err = 0;
+	enum wifi_adapter_status wait_status;
+
+	err = dhd_bus_register();
+	if (err) {
+		DHD_ERROR(("%s: usb_register failed\n", __FUNCTION__));
+		goto exit;
+	}
+
+	/* power up all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+		DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+			adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path));
+		DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+			adapter->bus_type, adapter->bus_num, adapter->slot_num));
+		err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
+		if (err) {
+			DHD_ERROR(("failed to wifi_platform_set_power on %s\n", adapter->name));
+			goto fail;
+		}
+		if (dhd_download_fw_on_driverload)
+			wait_status = WIFI_STATUS_ATTACH;
+		else
+			wait_status = WIFI_STATUS_DETTACH;
+		timeout = wait_event_interruptible_timeout(adapter->status_event,
+			wifi_get_adapter_status(adapter, wait_status),
+			msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT));
+		if (timeout <= 0) {
+			err = -1;
+			DHD_ERROR(("%s: usb_register_driver timeout\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+
+exit:
+	return err;
+
+fail:
+	dhd_bus_unregister();
+	/* power down all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+	}
+
+	return err;
+}
+#else /* BCMDBUS */
+static int dhd_wifi_platform_load_usb(void)
+{
+	return 0;
+}
+#endif /* BCMDBUS */
+
+static int dhd_wifi_platform_load()
+{
+	int err = 0;
+	printf("%s: Enter\n", __FUNCTION__);
+
+	wl_android_init();
+
+	if ((err = dhd_wifi_platform_load_usb()))
+		goto end;
+	else if ((err = dhd_wifi_platform_load_sdio()))
+		goto end;
+	else
+		err = dhd_wifi_platform_load_pcie();
+
+end:
+	if (err)
+		wl_android_exit();
+#if !defined(MULTIPLE_SUPPLICANT)
+	else
+		wl_android_post_init();
+#endif
+
+	return err;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
new file mode 100644
index 0000000..88c0cce
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
@@ -0,0 +1,51 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_sched.c 514727 2014-11-12 03:02:48Z $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <typedefs.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+	int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+	return rc;
+}
+
+int get_scheduler_policy(struct task_struct *p)
+{
+	int rc = SCHED_NORMAL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = p->policy;
+#endif /* LinuxVer */
+	return rc;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.c b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c
new file mode 100644
index 0000000..1cba8d1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c
@@ -0,0 +1,379 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_wq.c 641330 2016-06-02 06:55:00Z $
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/kfifo.h>
+
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_linux_wq.h>
+
+typedef struct dhd_deferred_event {
+	u8 event;		/* holds the event */
+	void *event_data;	/* holds event specific data */
+	event_handler_t event_handler;
+	unsigned long pad;	/* for memory alignment to power of 2 */
+} dhd_deferred_event_t;
+
+#define DEFRD_EVT_SIZE	(sizeof(dhd_deferred_event_t))
+
+/*
+ * work events may occur simultaneously.
+ * can hold upto 64 low priority events and 16 high priority events
+ */
+#define DHD_PRIO_WORK_FIFO_SIZE	(16 * DEFRD_EVT_SIZE)
+#define DHD_WORK_FIFO_SIZE	(64 * DEFRD_EVT_SIZE)
+
+#define DHD_FIFO_HAS_FREE_SPACE(fifo) \
+	((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
+#define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
+	((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
+
+struct dhd_deferred_wq {
+	struct work_struct deferred_work; /* should be the first member */
+
+	struct kfifo *prio_fifo;
+	struct kfifo *work_fifo;
+	u8 *prio_fifo_buf;
+	u8 *work_fifo_buf;
+	spinlock_t work_lock;
+	void *dhd_info; /* review: does it require */
+};
+
+static inline struct kfifo*
+dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
+{
+	struct kfifo *fifo;
+	gfp_t flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+	fifo = kfifo_init(buf, size, flags, lock);
+#else
+	fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
+	if (!fifo) {
+		return NULL;
+	}
+	kfifo_init(fifo, buf, size);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+	return fifo;
+}
+
+static inline void
+dhd_kfifo_free(struct kfifo *fifo)
+{
+	kfifo_free(fifo);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+	/* FC11 releases the fifo memory */
+	kfree(fifo);
+#endif
+}
+
+/* deferred work functions */
+static void dhd_deferred_work_handler(struct work_struct *data);
+
+void*
+dhd_deferred_work_init(void *dhd_info)
+{
+	struct dhd_deferred_wq *work = NULL;
+	u8* buf;
+	unsigned long fifo_size = 0;
+	gfp_t flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
+
+	if (!dhd_info) {
+		DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
+		goto return_null;
+	}
+
+	work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
+		flags);
+	if (!work) {
+		DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
+		goto return_null;
+	}
+
+	INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
+
+	/* initialize event fifo */
+	spin_lock_init(&work->work_lock);
+
+	/* allocate buffer to hold prio events */
+	fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
+	fifo_size = is_power_of_2(fifo_size) ? fifo_size :
+			roundup_pow_of_two(fifo_size);
+	buf = (u8*)kzalloc(fifo_size, flags);
+	if (!buf) {
+		DHD_ERROR(("%s: prio work fifo allocation failed\n",
+			__FUNCTION__));
+		goto return_null;
+	}
+
+	/* Initialize prio event fifo */
+	work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+	if (!work->prio_fifo) {
+		kfree(buf);
+		goto return_null;
+	}
+
+	/* allocate buffer to hold work events */
+	fifo_size = DHD_WORK_FIFO_SIZE;
+	fifo_size = is_power_of_2(fifo_size) ? fifo_size :
+			roundup_pow_of_two(fifo_size);
+	buf = (u8*)kzalloc(fifo_size, flags);
+	if (!buf) {
+		DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
+		goto return_null;
+	}
+
+	/* Initialize event fifo */
+	work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+	if (!work->work_fifo) {
+		kfree(buf);
+		goto return_null;
+	}
+
+	work->dhd_info = dhd_info;
+	DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
+	return work;
+
+return_null:
+	if (work) {
+		dhd_deferred_work_deinit(work);
+	}
+
+	return NULL;
+}
+
+void
+dhd_deferred_work_deinit(void *work)
+{
+	struct dhd_deferred_wq *deferred_work = work;
+
+
+	if (!deferred_work) {
+		DHD_ERROR(("%s: deferred work has been freed already\n",
+			__FUNCTION__));
+		return;
+	}
+
+	/* cancel the deferred work handling */
+	cancel_work_sync((struct work_struct *)deferred_work);
+
+	/*
+	 * free work event fifo.
+	 * kfifo_free frees locally allocated fifo buffer
+	 */
+	if (deferred_work->prio_fifo) {
+		dhd_kfifo_free(deferred_work->prio_fifo);
+	}
+
+	if (deferred_work->work_fifo) {
+		dhd_kfifo_free(deferred_work->work_fifo);
+	}
+
+	kfree(deferred_work);
+}
+
+/* select kfifo according to priority */
+static inline struct kfifo *
+dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
+	u8 priority)
+{
+	if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
+		return deferred_wq->prio_fifo;
+	} else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
+		return deferred_wq->work_fifo;
+	} else {
+		return NULL;
+	}
+}
+
+/*
+ *	Prepares event to be queued
+ *	Schedules the event
+ */
+int
+dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+	event_handler_t event_handler, u8 priority)
+{
+	struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
+	struct kfifo *fifo;
+	dhd_deferred_event_t deferred_event;
+	int bytes_copied = 0;
+
+	if (!deferred_wq) {
+		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+		ASSERT(0);
+		return DHD_WQ_STS_UNINITIALIZED;
+	}
+
+	if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
+		DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
+			event));
+		return DHD_WQ_STS_UNKNOWN_EVENT;
+	}
+
+	if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
+		DHD_ERROR(("%s: unknown priority, priority=%d\n",
+			__FUNCTION__, priority));
+		return DHD_WQ_STS_UNKNOWN_PRIORITY;
+	}
+
+	/*
+	 * default element size is 1, which can be changed
+	 * using kfifo_esize(). Older kernel(FC11) doesn't support
+	 * changing element size. For compatibility changing
+	 * element size is not prefered
+	 */
+	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+	deferred_event.event = event;
+	deferred_event.event_data = event_data;
+	deferred_event.event_handler = event_handler;
+
+	fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
+	if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
+		bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	}
+	if (bytes_copied != DEFRD_EVT_SIZE) {
+		DHD_ERROR(("%s: failed to schedule deferred work, "
+			"priority=%d, bytes_copied=%d\n", __FUNCTION__,
+			priority, bytes_copied));
+		return DHD_WQ_STS_SCHED_FAILED;
+	}
+	schedule_work((struct work_struct *)deferred_wq);
+	return DHD_WQ_STS_OK;
+}
+
+static bool
+dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
+	dhd_deferred_event_t *event)
+{
+	int bytes_copied = 0;
+
+	if (!deferred_wq) {
+		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+		return DHD_WQ_STS_UNINITIALIZED;
+	}
+
+	/*
+	 * default element size is 1 byte, which can be changed
+	 * using kfifo_esize(). Older kernel(FC11) doesn't support
+	 * changing element size. For compatibility changing
+	 * element size is not prefered
+	 */
+	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+	/* handle priority work */
+	if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
+		bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
+			event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	}
+
+	/* handle normal work if priority work doesn't have enough data */
+	if ((bytes_copied != DEFRD_EVT_SIZE) &&
+		DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
+		bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
+			event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	}
+
+	return (bytes_copied == DEFRD_EVT_SIZE);
+}
+
+static inline void
+dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
+{
+	if (!work_event) {
+		DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
+		work_event->event));
+	DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
+		work_event->event_data));
+	DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
+		work_event->event_handler));
+}
+
+/*
+ *	Called when work is scheduled
+ */
+static void
+dhd_deferred_work_handler(struct work_struct *work)
+{
+	struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
+	dhd_deferred_event_t work_event;
+
+	if (!deferred_work) {
+		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+		return;
+	}
+
+	do {
+		if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
+			DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
+			break;
+		}
+
+		if (work_event.event >= DHD_MAX_WQ_EVENTS) {
+			DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
+			dhd_deferred_dump_work_event(&work_event);
+			ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
+			continue;
+		}
+
+
+		if (work_event.event_handler) {
+			work_event.event_handler(deferred_work->dhd_info,
+				work_event.event_data, work_event.event);
+		} else {
+			DHD_ERROR(("%s: event handler is null\n",
+				__FUNCTION__));
+			dhd_deferred_dump_work_event(&work_event);
+			ASSERT(work_event.event_handler != NULL);
+		}
+	} while (1);
+
+	return;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.h b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h
new file mode 100644
index 0000000..9c51d06
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h
@@ -0,0 +1,81 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_wq.h 704361 2017-06-13 08:50:38Z $
+ */
+#ifndef _dhd_linux_wq_h_
+#define _dhd_linux_wq_h_
+/*
+ *	Work event definitions
+ */
+enum _wq_event {
+	DHD_WQ_WORK_IF_ADD = 1,
+	DHD_WQ_WORK_IF_DEL,
+	DHD_WQ_WORK_SET_MAC,
+	DHD_WQ_WORK_SET_MCAST_LIST,
+	DHD_WQ_WORK_IPV6_NDO,
+	DHD_WQ_WORK_HANG_MSG,
+	DHD_WQ_WORK_SOC_RAM_DUMP,
+	DHD_WQ_WORK_DHD_LOG_DUMP,
+	DHD_WQ_WORK_INFORM_DHD_MON,
+	DHD_WQ_WORK_EVENT_LOGTRACE,
+	DHD_WQ_WORK_DMA_LB_MEM_REL,
+	DHD_WQ_WORK_DEBUG_UART_DUMP,
+	DHD_WQ_WORK_SSSR_DUMP,
+	DHD_WQ_WORK_PKTLOG_DUMP,
+#ifdef DHD_UPDATE_INTF_MAC
+	DHD_WQ_WORK_IF_UPDATE,
+#endif /* DHD_UPDATE_INTF_MAC */
+	DHD_MAX_WQ_EVENTS
+};
+
+/*
+ *	Work event priority
+ */
+enum wq_priority {
+	DHD_WQ_WORK_PRIORITY_LOW = 1,
+	DHD_WQ_WORK_PRIORITY_HIGH,
+	DHD_WQ_MAX_PRIORITY
+};
+
+/*
+ *	Error definitions
+ */
+#define DHD_WQ_STS_OK			 0
+#define DHD_WQ_STS_FAILED		-1	/* General failure */
+#define DHD_WQ_STS_UNINITIALIZED	-2
+#define DHD_WQ_STS_SCHED_FAILED		-3
+#define DHD_WQ_STS_UNKNOWN_EVENT	-4
+#define DHD_WQ_STS_UNKNOWN_PRIORITY	-5
+
+typedef void (*event_handler_t)(void *handle, void *event_data, u8 event);
+
+void *dhd_deferred_work_init(void *dhd);
+void dhd_deferred_work_deinit(void *workq);
+int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+	event_handler_t evt_handler, u8 priority);
+#endif /* _dhd_linux_wq_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_mschdbg.c b/drivers/net/wireless/bcmdhd/dhd_mschdbg.c
new file mode 100644
index 0000000..c1032f2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_mschdbg.c
@@ -0,0 +1,747 @@
+/*
+ * DHD debugability support
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_mschdbg.c 639872 2016-05-25 05:39:30Z $
+ */
+#ifdef SHOW_LOGTRACE
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_debug.h>
+#include <dhd_mschdbg.h>
+
+#include <event_log.h>
+#include <event_trace.h>
+#include <msgtrace.h>
+
+static const char *head_log = "";
+#define MSCH_EVENT_HEAD(space) \
+	do { \
+		MSCH_EVENT(("%s_E:  ", head_log)); \
+		if (space > 0) { \
+			int ii; \
+			for (ii = 0; ii < space; ii += 4) MSCH_EVENT(("    ")); \
+		} \
+	} while (0)
+#define MSCH_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+
+static uint64 solt_start_time[4], req_start_time[4], profiler_start_time[4];
+static uint32 solt_chanspec[4] = {0, }, req_start[4] = {0, };
+static bool lastMessages = FALSE;
+
+#define US_PRE_SEC		1000000
+
+static void dhd_mschdbg_us_to_sec(uint32 time_h, uint32 time_l, uint32 *sec, uint32 *remain)
+{
+	uint64 cur_time = ((uint64)(ntoh32(time_h)) << 32) | ntoh32(time_l);
+	uint64 r, u = 0;
+
+	r = cur_time;
+	while (time_h != 0) {
+		u += (uint64)((0xffffffff / US_PRE_SEC)) * time_h;
+		r = cur_time - u * US_PRE_SEC;
+		time_h = (uint32)(r >> 32);
+	}
+
+	*sec = (uint32)(u + ((uint32)(r) / US_PRE_SEC));
+	*remain = (uint32)(r) % US_PRE_SEC;
+}
+
+static char *dhd_mschdbg_display_time(uint32 time_h, uint32 time_l)
+{
+	static char display_time[32];
+	uint32 s, ss;
+
+	if (time_h == 0xffffffff && time_l == 0xffffffff) {
+		snprintf(display_time, 31, "-1");
+	} else {
+		dhd_mschdbg_us_to_sec(time_h, time_l, &s, &ss);
+		snprintf(display_time, 31, "%d.%06d", s, ss);
+	}
+	return display_time;
+}
+
+static void
+dhd_mschdbg_chanspec_list(int sp, char *data, uint16 ptr, uint16 chanspec_cnt)
+{
+	int i, cnt = (int)ntoh16(chanspec_cnt);
+	uint16 *chanspec_list = (uint16 *)(data + ntoh16(ptr));
+	char buf[CHANSPEC_STR_LEN];
+	chanspec_t c;
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("<chanspec_list>:"));
+	for (i = 0; i < cnt; i++) {
+		c = (chanspec_t)ntoh16(chanspec_list[i]);
+		MSCH_EVENT((" %s", wf_chspec_ntoa(c, buf)));
+	}
+	MSCH_EVENT(("\n"));
+}
+
+static void
+dhd_mschdbg_elem_list(int sp, char *title, char *data, uint16 ptr, uint16 list_cnt)
+{
+	int i, cnt = (int)ntoh16(list_cnt);
+	uint32 *list = (uint32 *)(data + ntoh16(ptr));
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("%s_list: ", title));
+	for (i = 0; i < cnt; i++) {
+		MSCH_EVENT(("0x%08x->", ntoh32(list[i])));
+	}
+	MSCH_EVENT(("null\n"));
+}
+
+static void
+dhd_mschdbg_req_param_profiler_event_data(int sp, int ver, char *data, uint16 ptr)
+{
+	int sn = sp + 4;
+	msch_req_param_profiler_event_data_t *p =
+		(msch_req_param_profiler_event_data_t *)(data + ntoh16(ptr));
+	uint32 type, flags;
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("<request parameters>\n"));
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("req_type: "));
+
+	type = p->req_type;
+	if (type < 4) {
+		char *req_type[] = {"fixed", "start-flexible", "duration-flexible",
+			"both-flexible"};
+		MSCH_EVENT(("%s", req_type[type]));
+	}
+	else
+		MSCH_EVENT(("unknown(%d)", type));
+
+	flags = ntoh16(p->flags);
+	if (flags & WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS)
+		MSCH_EVENT((", CHAN_CONTIGUOUS"));
+	if (flags & WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS)
+		MSCH_EVENT((", MERGE_CONT_SLOTS"));
+	if (flags & WL_MSCH_REQ_FLAGS_PREMTABLE)
+		MSCH_EVENT((", PREMTABLE"));
+	if (flags & WL_MSCH_REQ_FLAGS_PREMT_CURTS)
+		MSCH_EVENT((", PREMT_CURTS"));
+	if (flags & WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE)
+		MSCH_EVENT((", PREMT_IMMEDIATE"));
+	MSCH_EVENT((", priority: %d\n", p->priority));
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("start-time: %s, duration: %d(us), interval: %d(us)\n",
+		dhd_mschdbg_display_time(p->start_time_h, p->start_time_l),
+		ntoh32(p->duration), ntoh32(p->interval)));
+
+	if (type == WL_MSCH_RT_DUR_FLEX) {
+		MSCH_EVENT_HEAD(sn);
+		MSCH_EVENT(("dur_flex: %d(us)\n", ntoh32(p->flex.dur_flex)));
+	} else if (type == WL_MSCH_RT_BOTH_FLEX) {
+		MSCH_EVENT_HEAD(sn);
+		MSCH_EVENT(("min_dur: %d(us), max_away_dur: %d(us)\n",
+			ntoh32(p->flex.bf.min_dur), ntoh32(p->flex.bf.max_away_dur)));
+
+		MSCH_EVENT_HEAD(sn);
+		MSCH_EVENT(("hi_prio_time: %s, hi_prio_interval: %d(us)\n",
+			dhd_mschdbg_display_time(p->flex.bf.hi_prio_time_h,
+			p->flex.bf.hi_prio_time_l),
+			ntoh32(p->flex.bf.hi_prio_interval)));
+	}
+}
+
+static void
+dhd_mschdbg_timeslot_profiler_event_data(int sp, int ver, char *title, char *data,
+	uint16 ptr, bool empty)
+{
+	int s, sn = sp + 4;
+	msch_timeslot_profiler_event_data_t *p =
+		(msch_timeslot_profiler_event_data_t *)(data + ntoh16(ptr));
+	char *state[] = {"NONE", "CHN_SW", "ONCHAN_FIRE", "OFF_CHN_PREP",
+		"OFF_CHN_DONE", "TS_COMPLETE"};
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("<%s timeslot>: ", title));
+	if (empty) {
+		MSCH_EVENT((" null\n"));
+		return;
+	}
+	else
+		MSCH_EVENT(("0x%08x\n", ntoh32(p->p_timeslot)));
+
+	s = (int)(ntoh32(p->state));
+	if (s > 5) s = 0;
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("id: %d, state[%d]: %s, chan_ctxt: [0x%08x]\n",
+		ntoh32(p->timeslot_id), ntoh32(p->state), state[s], ntoh32(p->p_chan_ctxt)));
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("fire_time: %s",
+		dhd_mschdbg_display_time(p->fire_time_h, p->fire_time_l)));
+
+	MSCH_EVENT((", pre_start_time: %s",
+		dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l)));
+
+	MSCH_EVENT((", end_time: %s",
+		dhd_mschdbg_display_time(p->end_time_h, p->end_time_l)));
+
+	MSCH_EVENT((", sch_dur: %s\n",
+		dhd_mschdbg_display_time(p->sch_dur_h, p->sch_dur_l)));
+}
+
+static void
+dhd_mschdbg_req_timing_profiler_event_data(int sp, int ver, char *title, char *data,
+	uint16 ptr, bool empty)
+{
+	int sn = sp + 4;
+	msch_req_timing_profiler_event_data_t *p =
+		(msch_req_timing_profiler_event_data_t *)(data + ntoh16(ptr));
+	uint32 type;
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("<%s req_timing>: ", title));
+	if (empty) {
+		MSCH_EVENT((" null\n"));
+		return;
+	}
+	else
+		MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n",
+			ntoh32(p->p_req_timing), ntoh32(p->p_prev), ntoh32(p->p_next)));
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("flags:"));
+	type = ntoh16(p->flags);
+	if ((type & 0x7f) == 0)
+		MSCH_EVENT((" NONE"));
+	else {
+		if (type & WL_MSCH_RC_FLAGS_ONCHAN_FIRE)
+			MSCH_EVENT((" ONCHAN_FIRE"));
+		if (type & WL_MSCH_RC_FLAGS_START_FIRE_DONE)
+			MSCH_EVENT((" START_FIRE"));
+		if (type & WL_MSCH_RC_FLAGS_END_FIRE_DONE)
+			MSCH_EVENT((" END_FIRE"));
+		if (type & WL_MSCH_RC_FLAGS_ONFIRE_DONE)
+			MSCH_EVENT((" ONFIRE_DONE"));
+		if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_START)
+			MSCH_EVENT((" SPLIT_SLOT_START"));
+		if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_END)
+			MSCH_EVENT((" SPLIT_SLOT_END"));
+		if (type & WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE)
+			MSCH_EVENT((" PRE_ONFIRE_DONE"));
+	}
+	MSCH_EVENT(("\n"));
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("pre_start_time: %s",
+		dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l)));
+
+	MSCH_EVENT((", start_time: %s",
+		dhd_mschdbg_display_time(p->start_time_h, p->start_time_l)));
+
+	MSCH_EVENT((", end_time: %s\n",
+		dhd_mschdbg_display_time(p->end_time_h, p->end_time_l)));
+
+	if (p->p_timeslot && (p->timeslot_ptr == 0)) {
+		MSCH_EVENT_HEAD(sn);
+		MSCH_EVENT(("<%s timeslot>: 0x%08x\n", title, ntoh32(p->p_timeslot)));
+	} else
+		dhd_mschdbg_timeslot_profiler_event_data(sn, ver, title, data, p->timeslot_ptr,
+			(p->timeslot_ptr == 0));
+}
+
+static void
+dhd_mschdbg_chan_ctxt_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty)
+{
+	int sn = sp + 4;
+	msch_chan_ctxt_profiler_event_data_t *p =
+		(msch_chan_ctxt_profiler_event_data_t *)(data + ntoh16(ptr));
+	chanspec_t c;
+	char buf[CHANSPEC_STR_LEN];
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("<chan_ctxt>: "));
+	if (empty) {
+		MSCH_EVENT((" null\n"));
+		return;
+	}
+	else
+		MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n",
+			ntoh32(p->p_chan_ctxt), ntoh32(p->p_prev), ntoh32(p->p_next)));
+
+	c = (chanspec_t)ntoh16(p->chanspec);
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("channel: %s, bf_sch_pending: %s, bf_skipped: %d\n",
+		wf_chspec_ntoa(c, buf), p->bf_sch_pending? "TRUE" : "FALSE",
+		ntoh32(p->bf_skipped_count)));
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("bf_link: prev 0x%08x, next 0x%08x\n",
+		ntoh32(p->bf_link_prev), ntoh32(p->bf_link_next)));
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("onchan_time: %s",
+		dhd_mschdbg_display_time(p->onchan_time_h, p->onchan_time_l)));
+	MSCH_EVENT((", actual_onchan_dur: %s",
+		dhd_mschdbg_display_time(p->actual_onchan_dur_h, p->actual_onchan_dur_l)));
+	MSCH_EVENT((", pend_onchan_dur: %s\n",
+		dhd_mschdbg_display_time(p->pend_onchan_dur_h, p->pend_onchan_dur_l)));
+
+	dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr,
+		p->req_entity_list_cnt);
+	dhd_mschdbg_elem_list(sn, "bf_entity", data, p->bf_entity_list_ptr,
+		p->bf_entity_list_cnt);
+}
+
+static void
+dhd_mschdbg_req_entity_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty)
+{
+	int sn = sp + 4;
+	msch_req_entity_profiler_event_data_t *p =
+		(msch_req_entity_profiler_event_data_t *)(data + ntoh16(ptr));
+	char buf[CHANSPEC_STR_LEN];
+	chanspec_t c;
+	uint32 flags;
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("<req_entity>: "));
+	if (empty) {
+		MSCH_EVENT((" null\n"));
+		return;
+	}
+	else
+		MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n",
+			ntoh32(p->p_req_entity), ntoh32(p->req_hdl_link_prev),
+			ntoh32(p->req_hdl_link_next)));
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("req_hdl: [0x%08x]\n", ntoh32(p->p_req_hdl)));
+
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("chan_ctxt_link: prev 0x%08x, next 0x%08x\n",
+		ntoh32(p->chan_ctxt_link_prev), ntoh32(p->chan_ctxt_link_next)));
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("rt_specific_link: prev 0x%08x, next 0x%08x\n",
+		ntoh32(p->rt_specific_link_prev), ntoh32(p->rt_specific_link_next)));
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("start_fixed_link: prev 0x%08x, next 0x%08x\n",
+		ntoh32(p->start_fixed_link_prev), ntoh32(p->start_fixed_link_next)));
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("both_flex_list: prev 0x%08x, next 0x%08x\n",
+		ntoh32(p->both_flex_list_prev), ntoh32(p->both_flex_list_next)));
+
+	c = (chanspec_t)ntoh16(p->chanspec);
+	MSCH_EVENT_HEAD(sn);
+	if (ver >= 2) {
+		MSCH_EVENT(("channel: %s, onchan Id %d, current chan Id %d, priority %d",
+			wf_chspec_ntoa(c, buf), ntoh16(p->onchan_chn_idx), ntoh16(p->cur_chn_idx),
+			ntoh16(p->priority)));
+		flags = ntoh32(p->flags);
+		if (flags & WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE)
+			MSCH_EVENT((" : MULTI_INSTANCE\n"));
+		else
+			MSCH_EVENT(("\n"));
+		MSCH_EVENT_HEAD(sn);
+		MSCH_EVENT(("actual_start_time: %s, ",
+			dhd_mschdbg_display_time(p->actual_start_time_h, p->actual_start_time_l)));
+		MSCH_EVENT(("curts_fire_time: %s, ",
+			dhd_mschdbg_display_time(p->curts_fire_time_h, p->curts_fire_time_l)));
+	} else {
+		MSCH_EVENT(("channel: %s, priority %d, ", wf_chspec_ntoa(c, buf),
+			ntoh16(p->priority)));
+	}
+	MSCH_EVENT(("bf_last_serv_time: %s\n",
+		dhd_mschdbg_display_time(p->bf_last_serv_time_h, p->bf_last_serv_time_l)));
+
+	dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "current", data, p->cur_slot_ptr,
+		(p->cur_slot_ptr == 0));
+	dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "pending", data, p->pend_slot_ptr,
+		(p->pend_slot_ptr == 0));
+
+	if (p->p_chan_ctxt && (p->chan_ctxt_ptr == 0)) {
+		MSCH_EVENT_HEAD(sn);
+		MSCH_EVENT(("<chan_ctxt>: 0x%08x\n", ntoh32(p->p_chan_ctxt)));
+	}
+	else
+		dhd_mschdbg_chan_ctxt_profiler_event_data(sn, ver, data, p->chan_ctxt_ptr,
+			(p->chan_ctxt_ptr == 0));
+}
+
+static void
+dhd_mschdbg_req_handle_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty)
+{
+	int sn = sp + 4;
+	msch_req_handle_profiler_event_data_t *p =
+		(msch_req_handle_profiler_event_data_t *)(data + ntoh16(ptr));
+	uint32 flags;
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("<req_handle>: "));
+	if (empty) {
+		MSCH_EVENT((" null\n"));
+		return;
+	}
+	else
+		MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n",
+			ntoh32(p->p_req_handle), ntoh32(p->p_prev), ntoh32(p->p_next)));
+
+	dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr,
+		p->req_entity_list_cnt);
+	MSCH_EVENT_HEAD(sn);
+	MSCH_EVENT(("cb_func: [0x%08x], cb_func: [0x%08x]",
+		ntoh32(p->cb_func), ntoh32(p->cb_ctxt)));
+	if (ver < 2) {
+		MSCH_EVENT((", chan_cnt: %d", ntoh16(p->chan_cnt)));
+	}
+	flags = ntoh32(p->flags);
+	if (flags & WL_MSCH_REQ_HDL_FLAGS_NEW_REQ)
+		MSCH_EVENT((", NEW_REQ"));
+	MSCH_EVENT(("\n"));
+
+	dhd_mschdbg_req_param_profiler_event_data(sn, ver, data, p->req_param_ptr);
+
+	if (ver >= 2) {
+		MSCH_EVENT_HEAD(sn);
+		MSCH_EVENT(("req_time: %s\n",
+			dhd_mschdbg_display_time(p->req_time_h, p->req_time_l)));
+		MSCH_EVENT_HEAD(sn);
+		MSCH_EVENT(("chan_cnt: %d, chan idx %d, last chan idx %d\n",
+			ntoh16(p->chan_cnt), ntoh16(p->chan_idx), ntoh16(p->last_chan_idx)));
+		if (p->chanspec_list && p->chanspec_cnt) {
+			dhd_mschdbg_chanspec_list(sn, data, p->chanspec_list, p->chanspec_cnt);
+		}
+	}
+}
+
+static void
+dhd_mschdbg_profiler_profiler_event_data(int sp, int ver, char *data, uint16 ptr)
+{
+	msch_profiler_profiler_event_data_t *p =
+		(msch_profiler_profiler_event_data_t *)(data + ntoh16(ptr));
+	uint32 flags;
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("free list: req_hdl 0x%08x, req_entity 0x%08x,"
+		" chan_ctxt 0x%08x, chanspec 0x%08x\n",
+		ntoh32(p->free_req_hdl_list), ntoh32(p->free_req_entity_list),
+		ntoh32(p->free_chan_ctxt_list), ntoh32(p->free_chanspec_list)));
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("alloc count: chanspec %d, req_entity %d, req_hdl %d, "
+		"chan_ctxt %d, timeslot %d\n",
+		ntoh16(p->msch_chanspec_alloc_cnt), ntoh16(p->msch_req_entity_alloc_cnt),
+		ntoh16(p->msch_req_hdl_alloc_cnt), ntoh16(p->msch_chan_ctxt_alloc_cnt),
+		ntoh16(p->msch_timeslot_alloc_cnt)));
+
+	dhd_mschdbg_elem_list(sp, "req_hdl", data, p->msch_req_hdl_list_ptr,
+		p->msch_req_hdl_list_cnt);
+	dhd_mschdbg_elem_list(sp, "chan_ctxt", data, p->msch_chan_ctxt_list_ptr,
+		p->msch_chan_ctxt_list_cnt);
+	dhd_mschdbg_elem_list(sp, "req_timing", data, p->msch_req_timing_list_ptr,
+		p->msch_req_timing_list_cnt);
+	dhd_mschdbg_elem_list(sp, "start_fixed", data, p->msch_start_fixed_list_ptr,
+		p->msch_start_fixed_list_cnt);
+	dhd_mschdbg_elem_list(sp, "both_flex_req_entity", data,
+		p->msch_both_flex_req_entity_list_ptr,
+		p->msch_both_flex_req_entity_list_cnt);
+	dhd_mschdbg_elem_list(sp, "start_flex", data, p->msch_start_flex_list_ptr,
+		p->msch_start_flex_list_cnt);
+	dhd_mschdbg_elem_list(sp, "both_flex", data, p->msch_both_flex_list_ptr,
+		p->msch_both_flex_list_cnt);
+
+	if (p->p_cur_msch_timeslot && (p->cur_msch_timeslot_ptr == 0)) {
+		MSCH_EVENT_HEAD(sp);
+		MSCH_EVENT(("<cur_msch timeslot>: 0x%08x\n",
+			ntoh32(p->p_cur_msch_timeslot)));
+	} else
+		dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "cur_msch", data,
+			p->cur_msch_timeslot_ptr, (p->cur_msch_timeslot_ptr == 0));
+
+	if (p->p_next_timeslot && (p->next_timeslot_ptr == 0)) {
+		MSCH_EVENT_HEAD(sp);
+		MSCH_EVENT(("<next timeslot>: 0x%08x\n",
+			ntoh32(p->p_next_timeslot)));
+	} else
+		dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "next", data,
+			p->next_timeslot_ptr, (p->next_timeslot_ptr == 0));
+
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("ts_id: %d, ", ntoh32(p->ts_id)));
+	flags = ntoh32(p->flags);
+	if (flags & WL_MSCH_STATE_IN_TIEMR_CTXT)
+		MSCH_EVENT(("IN_TIEMR_CTXT, "));
+	if (flags & WL_MSCH_STATE_SCHD_PENDING)
+		MSCH_EVENT(("SCHD_PENDING, "));
+	MSCH_EVENT(("slotskip_flags: %d, cur_armed_timeslot: 0x%08x\n",
+		(ver >= 2)? ntoh32(p->slotskip_flag) : 0, ntoh32(p->cur_armed_timeslot)));
+	MSCH_EVENT_HEAD(sp);
+	MSCH_EVENT(("flex_list_cnt: %d, service_interval: %d, "
+		"max_lo_prio_interval: %d\n",
+		ntoh16(p->flex_list_cnt), ntoh32(p->service_interval),
+		ntoh32(p->max_lo_prio_interval)));
+}
+
+static void dhd_mschdbg_dump_data(dhd_pub_t *dhdp, void *raw_event_ptr, int type,
+	char *data, int len)
+{
+	uint64 t = 0, tt = 0;
+	uint32 s = 0, ss = 0;
+	int wlc_index, ver;
+
+	ver = (type & WL_MSCH_PROFILER_VER_MASK) >> WL_MSCH_PROFILER_VER_SHIFT;
+	wlc_index = (type & WL_MSCH_PROFILER_WLINDEX_MASK) >> WL_MSCH_PROFILER_WLINDEX_SHIFT;
+	if (wlc_index >= 4)
+		return;
+
+	type &= WL_MSCH_PROFILER_TYPE_MASK;
+	if (type <= WL_MSCH_PROFILER_PROFILE_END) {
+		msch_profiler_event_data_t *pevent = (msch_profiler_event_data_t *)data;
+		tt = ((uint64)(ntoh32(pevent->time_hi)) << 32) | ntoh32(pevent->time_lo);
+		dhd_mschdbg_us_to_sec(pevent->time_hi, pevent->time_lo, &s, &ss);
+	}
+
+	if (lastMessages && (type != WL_MSCH_PROFILER_MESSAGE) &&
+		(type != WL_MSCH_PROFILER_EVENT_LOG)) {
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("\n"));
+		lastMessages = FALSE;
+	}
+
+	switch (type) {
+	case WL_MSCH_PROFILER_START:
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("%06d.%06d START\n", s, ss));
+		break;
+
+	case WL_MSCH_PROFILER_EXIT:
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("%06d.%06d EXIT\n", s, ss));
+		break;
+
+	case WL_MSCH_PROFILER_REQ:
+	{
+		msch_req_profiler_event_data_t *p = (msch_req_profiler_event_data_t *)data;
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("\n"));
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("===============================\n"));
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("%06d.%06d [wl%d] REGISTER:\n", s, ss, wlc_index));
+		dhd_mschdbg_req_param_profiler_event_data(4, ver, data, p->req_param_ptr);
+		dhd_mschdbg_chanspec_list(4, data, p->chanspec_ptr, p->chanspec_cnt);
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("===============================\n"));
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("\n"));
+	}
+		break;
+
+	case WL_MSCH_PROFILER_CALLBACK:
+	{
+		msch_callback_profiler_event_data_t *p =
+			(msch_callback_profiler_event_data_t *)data;
+		char buf[CHANSPEC_STR_LEN];
+		chanspec_t chanspec;
+		uint16 cbtype;
+
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("%06d.%06d [wl%d] CALLBACK: ", s, ss, wlc_index));
+		chanspec = (chanspec_t)ntoh16(p->chanspec);
+		MSCH_EVENT(("req_hdl[0x%08x], channel %s --",
+			ntoh32(p->p_req_hdl), wf_chspec_ntoa(chanspec, buf)));
+
+		cbtype = ntoh16(p->type);
+		if (cbtype & WL_MSCH_CT_ON_CHAN)
+			MSCH_EVENT((" ON_CHAN"));
+		if (cbtype & WL_MSCH_CT_OFF_CHAN)
+			MSCH_EVENT((" OFF_CHAN"));
+		if (cbtype & WL_MSCH_CT_REQ_START)
+			MSCH_EVENT((" REQ_START"));
+		if (cbtype & WL_MSCH_CT_REQ_END)
+			MSCH_EVENT((" REQ_END"));
+		if (cbtype & WL_MSCH_CT_SLOT_START)
+			MSCH_EVENT((" SLOT_START"));
+		if (cbtype & WL_MSCH_CT_SLOT_SKIP)
+			MSCH_EVENT((" SLOT_SKIP"));
+		if (cbtype & WL_MSCH_CT_SLOT_END)
+			MSCH_EVENT((" SLOT_END"));
+		if (cbtype & WL_MSCH_CT_OFF_CHAN_DONE)
+			MSCH_EVENT((" OFF_CHAN_DONE"));
+		if (cbtype & WL_MSCH_CT_PARTIAL)
+			MSCH_EVENT((" PARTIAL"));
+		if (cbtype & WL_MSCH_CT_PRE_ONCHAN)
+			MSCH_EVENT((" PRE_ONCHAN"));
+		if (cbtype & WL_MSCH_CT_PRE_REQ_START)
+			MSCH_EVENT((" PRE_REQ_START"));
+
+		if (cbtype & WL_MSCH_CT_REQ_START) {
+			req_start[wlc_index] = 1;
+			req_start_time[wlc_index] = tt;
+		} else if (cbtype & WL_MSCH_CT_REQ_END) {
+			if (req_start[wlc_index]) {
+				MSCH_EVENT((" : REQ duration %d",
+					(uint32)(tt - req_start_time[wlc_index])));
+				req_start[wlc_index] = 0;
+			}
+		}
+
+		if (cbtype & WL_MSCH_CT_SLOT_START) {
+			solt_chanspec[wlc_index] = p->chanspec;
+			solt_start_time[wlc_index] = tt;
+		} else if (cbtype & WL_MSCH_CT_SLOT_END) {
+			if (p->chanspec == solt_chanspec[wlc_index]) {
+				MSCH_EVENT((" : SLOT duration %d",
+					(uint32)(tt - solt_start_time[wlc_index])));
+				solt_chanspec[wlc_index] = 0;
+			}
+		}
+		MSCH_EVENT(("\n"));
+
+		if (cbtype & (WL_MSCH_CT_ON_CHAN | WL_MSCH_CT_SLOT_SKIP)) {
+			MSCH_EVENT_HEAD(4);
+			if (cbtype & WL_MSCH_CT_ON_CHAN) {
+				MSCH_EVENT(("ID %d onchan idx %d cur_chan_seq_start %s ",
+					ntoh32(p->timeslot_id), ntoh32(p->onchan_idx),
+					dhd_mschdbg_display_time(p->cur_chan_seq_start_time_h,
+					p->cur_chan_seq_start_time_l)));
+			}
+			t = ((uint64)(ntoh32(p->start_time_h)) << 32) |
+				ntoh32(p->start_time_l);
+			MSCH_EVENT(("start %s ",
+				dhd_mschdbg_display_time(p->start_time_h,
+				p->start_time_l)));
+			tt = ((uint64)(ntoh32(p->end_time_h)) << 32) | ntoh32(p->end_time_l);
+			MSCH_EVENT(("end %s duration %d\n",
+				dhd_mschdbg_display_time(p->end_time_h, p->end_time_l),
+				(p->end_time_h == 0xffffffff && p->end_time_l == 0xffffffff)?
+				-1 : (int)(tt - t)));
+		}
+
+	}
+		break;
+
+	case WL_MSCH_PROFILER_EVENT_LOG:
+	{
+		while (len > 0) {
+			msch_event_log_profiler_event_data_t *p =
+				(msch_event_log_profiler_event_data_t *)data;
+			int size = WL_MSCH_EVENT_LOG_HEAD_SIZE + p->hdr.count * sizeof(uint32);
+			data += size;
+			len -= size;
+			dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
+			MSCH_EVENT_HEAD(0);
+			MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
+			p->hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
+			p->hdr.fmt_num = ntoh16(p->hdr.fmt_num);
+			dhd_dbg_verboselog_printf(dhdp, &p->hdr, raw_event_ptr, p->data);
+		}
+		lastMessages = TRUE;
+		break;
+	}
+
+	case WL_MSCH_PROFILER_MESSAGE:
+	{
+		msch_message_profiler_event_data_t *p = (msch_message_profiler_event_data_t *)data;
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("%06d.%06d [wl%d]: %s", s, ss, wlc_index, p->message));
+		lastMessages = TRUE;
+		break;
+	}
+
+	case WL_MSCH_PROFILER_PROFILE_START:
+		profiler_start_time[wlc_index] = tt;
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("-------------------------------\n"));
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("%06d.%06d [wl%d] PROFILE DATA:\n", s, ss, wlc_index));
+		dhd_mschdbg_profiler_profiler_event_data(4, ver, data, 0);
+		break;
+
+	case WL_MSCH_PROFILER_PROFILE_END:
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("%06d.%06d [wl%d] PROFILE END: take time %d\n", s, ss,
+			wlc_index, (uint32)(tt - profiler_start_time[wlc_index])));
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("-------------------------------\n"));
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("\n"));
+		break;
+
+	case WL_MSCH_PROFILER_REQ_HANDLE:
+		dhd_mschdbg_req_handle_profiler_event_data(4, ver, data, 0, FALSE);
+		break;
+
+	case WL_MSCH_PROFILER_REQ_ENTITY:
+		dhd_mschdbg_req_entity_profiler_event_data(4, ver, data, 0, FALSE);
+		break;
+
+	case WL_MSCH_PROFILER_CHAN_CTXT:
+		dhd_mschdbg_chan_ctxt_profiler_event_data(4, ver, data, 0, FALSE);
+		break;
+
+	case WL_MSCH_PROFILER_REQ_TIMING:
+		dhd_mschdbg_req_timing_profiler_event_data(4, ver, "msch", data, 0, FALSE);
+		break;
+
+	default:
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("[wl%d] ERROR: unsupported EVENT reason code:%d; ",
+			wlc_index, type));
+		break;
+	}
+}
+
+void
+wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type, void *data, int len)
+{
+	head_log = "MSCH";
+	dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, (char *)data, len);
+}
+
+void
+wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int tag, uint32 *log_ptr)
+{
+	head_log = "CONSOLE";
+	if (tag == EVENT_LOG_TAG_MSCHPROFILE) {
+		msch_event_log_profiler_event_data_t *p =
+			(msch_event_log_profiler_event_data_t *)log_ptr;
+		uint32 s, ss;
+		dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
+		MSCH_EVENT_HEAD(0);
+		MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
+		p->hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
+		p->hdr.fmt_num = ntoh16(p->hdr.fmt_num);
+		dhd_dbg_verboselog_printf(dhdp, &p->hdr, raw_event_ptr, p->data);
+	} else {
+		msch_collect_tlv_t *p = (msch_collect_tlv_t *)log_ptr;
+		int type = ntoh16(p->type);
+		int len = ntoh16(p->size);
+		dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, p->value, len);
+	}
+}
+#endif /* SHOW_LOGTRACE */
diff --git a/drivers/net/wireless/bcmdhd/dhd_mschdbg.h b/drivers/net/wireless/bcmdhd/dhd_mschdbg.h
new file mode 100644
index 0000000..749e111
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_mschdbg.h
@@ -0,0 +1,39 @@
+/*
+ * DHD debugability header file
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_mschdbg.h 571265 2015-07-14 20:50:18Z $
+ */
+
+#ifndef _dhd_mschdbg_h_
+#define _dhd_mschdbg_h_
+
+#ifdef SHOW_LOGTRACE
+extern void wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type,
+	void *data, int len);
+extern void wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int tag,
+	uint32 *log_ptr);
+#endif /* SHOW_LOGTRACE */
+
+#endif /* _dhd_mschdbg_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_msgbuf.c b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c
new file mode 100644
index 0000000..41ae6e0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c
@@ -0,0 +1,8951 @@
+/**
+ * @file definition of host message ring functionality
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_msgbuf.c 704361 2017-06-13 08:50:38Z $
+ */
+
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmmsgbuf.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+
+#include <dhd_bus.h>
+
+#include <dhd_dbg.h>
+#include <siutils.h>
+#include <dhd_debug.h>
+
+#include <dhd_flowring.h>
+
+#include <pcie_core.h>
+#include <bcmpcie.h>
+#include <dhd_pcie.h>
+#include <dhd_config.h>
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
+
+#if defined(DHD_LB)
+#include <linux/cpu.h>
+#include <bcm_ring.h>
+#define DHD_LB_WORKQ_SZ			    (8192)
+#define DHD_LB_WORKQ_SYNC           (16)
+#define DHD_LB_WORK_SCHED           (DHD_LB_WORKQ_SYNC * 2)
+#endif /* DHD_LB */
+
+#include <hnd_debug.h>
+#include <hnd_armtrap.h>
+
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
+
+extern char dhd_version[];
+extern char fw_version[];
+
+/**
+ * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
+ * address where a value must be written. Host may also interrupt coalescing
+ * on this soft doorbell.
+ * Use Case: Hosts with network processors, may register with the dongle the
+ * network processor's thread wakeup register and a value corresponding to the
+ * core/thread context. Dongle will issue a write transaction <address,value>
+ * to the PCIE RC which will need to be routed to the mapped register space, by
+ * the host.
+ */
+/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
+
+/* Dependency Check */
+#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
+#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
+#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
+
+#define RETRIES 2		/* # of retries to retrieve matching ioctl response */
+
+#define DEFAULT_RX_BUFFERS_TO_POST	256
+#define RXBUFPOST_THRESHOLD			32
+#define RX_BUF_BURST				32 /* Rx buffers for MSDU Data */
+
+#define DHD_STOP_QUEUE_THRESHOLD	200
+#define DHD_START_QUEUE_THRESHOLD	100
+
+#define RX_DMA_OFFSET		8 /* Mem2mem DMA inserts an extra 8 */
+#define IOCT_RETBUF_SIZE	(RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
+
+/* flags for ioctl pending status */
+#define MSGBUF_IOCTL_ACK_PENDING	(1<<0)
+#define MSGBUF_IOCTL_RESP_PENDING	(1<<1)
+
+#define DMA_ALIGN_LEN		4
+
+#define DMA_D2H_SCRATCH_BUF_LEN	8
+#define DMA_XFER_LEN_LIMIT	0x400000
+
+#ifdef BCM_HOST_BUF
+#ifndef DMA_HOST_BUFFER_LEN
+#define DMA_HOST_BUFFER_LEN	0x200000
+#endif
+#endif /* BCM_HOST_BUF */
+
+#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ		8192
+
+#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D		1
+#define DHD_FLOWRING_MAX_EVENTBUF_POST			32
+#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST		8
+#define DHD_H2D_INFORING_MAX_BUF_POST			32
+#define DHD_MAX_TSBUF_POST			8
+
+#define DHD_PROT_FUNCS	41
+
+/* Length of buffer in host for bus throughput measurement */
+#define DHD_BUS_TPUT_BUF_LEN 2048
+
+#define TXP_FLUSH_NITEMS
+
+/* optimization to write "n" tx items at a time to ring */
+#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT	48
+
+#define RING_NAME_MAX_LENGTH		24
+#define CTRLSUB_HOSTTS_MEESAGE_SIZE		1024
+/* Giving room before ioctl_trans_id rollsover. */
+#define BUFFER_BEFORE_ROLLOVER 300
+
+struct msgbuf_ring; /* ring context for common and flow rings */
+
+/**
+ * PCIE D2H DMA Complete Sync Modes
+ *
+ * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
+ * Host system memory. A WAR using one of 3 approaches is needed:
+ * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
+ * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
+ *    writes in the last word of each work item. Each work item has a seqnum
+ *    number = sequence num % 253.
+ *
+ * 3. Read Barrier: Dongle does a host memory read access prior to posting an
+ *    interrupt, ensuring that D2H data transfer indeed completed.
+ * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
+ *    ring contents before the indices.
+ *
+ * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
+ * callback (see dhd_prot_d2h_sync_none) may be bound.
+ *
+ * Dongle advertizes host side sync mechanism requirements.
+ */
+
+#define PCIE_D2H_SYNC_WAIT_TRIES    (512UL)
+#define PCIE_D2H_SYNC_NUM_OF_STEPS  (5UL)
+#define PCIE_D2H_SYNC_DELAY         (100UL)	/* in terms of usecs */
+
+/**
+ * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
+ *
+ * On success: return cmn_msg_hdr_t::msg_type
+ * On failure: return 0 (invalid msg_type)
+ */
+typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
+                                volatile cmn_msg_hdr_t *msg, int msglen);
+
+/*
+ * +----------------------------------------------------------------------------
+ *
+ * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
+ * flowids do not.
+ *
+ * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
+ * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
+ *
+ * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
+ *  BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
+ *  BCMPCIE_COMMON_MSGRINGS     = 5, i.e. include 3 D2H common rings.
+ *
+ *  H2D Control  Submit   RingId = 0        FlowId = 0 reserved never allocated
+ *  H2D RxPost   Submit   RingId = 1        FlowId = 1 reserved never allocated
+ *
+ *  D2H Control  Complete RingId = 2
+ *  D2H Transmit Complete RingId = 3
+ *  D2H Receive  Complete RingId = 4
+ *
+ *  H2D TxPost   FLOWRING RingId = 5         FlowId = 2     (1st flowring)
+ *  H2D TxPost   FLOWRING RingId = 6         FlowId = 3     (2nd flowring)
+ *  H2D TxPost   FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
+ *
+ * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
+ * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
+ *
+ * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
+ * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
+ * FlowId values would be in the range [2..133] and the corresponding
+ * RingId values would be in the range [5..136].
+ *
+ * The flowId allocator, may chose to, allocate Flowids:
+ *   bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
+ *   X# of uc flowids in consecutive ranges (per station Id), where X is the
+ *   packet's access category (e.g. 4 uc flowids per station).
+ *
+ * CAUTION:
+ * When DMA indices array feature is used, RingId=5, corresponding to the 0th
+ * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
+ * since the FlowId truly represents the index in the H2D DMA indices array.
+ *
+ * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
+ * will represent the index in the D2H DMA indices array.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* First TxPost Flowring Id */
+#define DHD_FLOWRING_START_FLOWID   BCMPCIE_H2D_COMMON_MSGRINGS
+
+/* Determine whether a ringid belongs to a TxPost flowring */
+#define DHD_IS_FLOWRING(ringid, max_flow_rings) \
+	((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
+	(ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
+
+/* Convert a H2D TxPost FlowId to a MsgBuf RingId */
+#define DHD_FLOWID_TO_RINGID(flowid) \
+	(BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
+
+/* Convert a MsgBuf RingId to a H2D TxPost FlowId */
+#define DHD_RINGID_TO_FLOWID(ringid) \
+	(BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
+
+/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
+ * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
+ * any array of H2D rings.
+ */
+#define DHD_H2D_RING_OFFSET(ringid) \
+	(((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
+
+/* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
+ * This may be used for IFRM.
+ */
+#define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
+	((ringid) - BCMPCIE_COMMON_MSGRINGS)
+
+/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
+ * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
+ * any array of D2H rings.
+ * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
+ * max_h2d_rings: total number of h2d rings
+ */
+#define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
+	((ringid) > (max_h2d_rings) ? \
+		((ringid) - max_h2d_rings) : \
+		((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
+
+/* Convert a D2H DMA Indices Offset to a RingId */
+#define DHD_D2H_RINGID(offset) \
+	((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
+
+
+#define DHD_DMAH_NULL      ((void*)NULL)
+
+/*
+ * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
+ * buffer does not occupy the entire cacheline, and another object is placed
+ * following the DMA-able buffer, data corruption may occur if the DMA-able
+ * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
+ * is not available.
+ */
+#if defined(L1_CACHE_BYTES)
+#define DHD_DMA_PAD        (L1_CACHE_BYTES)
+#else
+#define DHD_DMA_PAD        (128)
+#endif
+
+/* Used in loopback tests */
+typedef struct dhd_dmaxfer {
+	dhd_dma_buf_t srcmem;
+	dhd_dma_buf_t dstmem;
+	uint32        srcdelay;
+	uint32        destdelay;
+	uint32        len;
+	bool          in_progress;
+	uint64        start_usec;
+	uint32	      d11_lpbk;
+} dhd_dmaxfer_t;
+
+/**
+ * msgbuf_ring : This object manages the host side ring that includes a DMA-able
+ * buffer, the WR and RD indices, ring parameters such as max number of items
+ * an length of each items, and other miscellaneous runtime state.
+ * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
+ * H2D TxPost ring as specified in the PCIE FullDongle Spec.
+ * Ring parameters are conveyed to the dongle, which maintains its own peer end
+ * ring state. Depending on whether the DMA Indices feature is supported, the
+ * host will update the WR/RD index in the DMA indices array in host memory or
+ * directly in dongle memory.
+ */
+typedef struct msgbuf_ring {
+	bool           inited;
+	uint16         idx;       /* ring id */
+	uint16         rd;        /* read index */
+	uint16         curr_rd;   /* read index for debug */
+	uint16         wr;        /* write index */
+	uint16         max_items; /* maximum number of items in ring */
+	uint16         item_len;  /* length of each item in the ring */
+	sh_addr_t      base_addr; /* LITTLE ENDIAN formatted: base address */
+	dhd_dma_buf_t  dma_buf;   /* DMA-able buffer: pa, va, len, dmah, secdma */
+	uint32         seqnum;    /* next expected item's sequence number */
+#ifdef TXP_FLUSH_NITEMS
+	void           *start_addr;
+	/* # of messages on ring not yet announced to dongle */
+	uint16         pend_items_count;
+#endif /* TXP_FLUSH_NITEMS */
+
+	uint8   ring_type;
+	uint8   n_completion_ids;
+	bool    create_pending;
+	uint16  create_req_id;
+	uint8   current_phase;
+	uint16	compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
+	uchar		name[RING_NAME_MAX_LENGTH];
+	uint32		ring_mem_allocated;
+} msgbuf_ring_t;
+
+#define DHD_RING_BGN_VA(ring)           ((ring)->dma_buf.va)
+#define DHD_RING_END_VA(ring) \
+	((uint8 *)(DHD_RING_BGN_VA((ring))) + \
+	 (((ring)->max_items - 1) * (ring)->item_len))
+
+
+
+/* This can be overwritten by module parameter defined in dhd_linux.c
+ * or by dhd iovar h2d_max_txpost.
+ */
+int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
+
+/** DHD protocol handle. Is an opaque type to other DHD software layers. */
+typedef struct dhd_prot {
+	osl_t *osh;		/* OSL handle */
+	uint16 rxbufpost;
+	uint16 max_rxbufpost;
+	uint16 max_eventbufpost;
+	uint16 max_ioctlrespbufpost;
+	uint16 max_tsbufpost;
+	uint16 max_infobufpost;
+	uint16 infobufpost;
+	uint16 cur_event_bufs_posted;
+	uint16 cur_ioctlresp_bufs_posted;
+	uint16 cur_ts_bufs_posted;
+
+	/* Flow control mechanism based on active transmits pending */
+	uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
+	uint16 h2d_max_txpost;
+	uint16 txp_threshold;  /* optimization to write "n" tx items at a time to ring */
+
+	/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
+	msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
+	msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
+	msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
+	msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
+	msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
+	msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
+	msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
+
+	msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
+	dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
+	uint16        h2d_rings_total; /* total H2D (common rings + flowrings) */
+
+	uint32		rx_dataoffset;
+
+	dhd_mb_ring_t	mb_ring_fn;	/* called when dongle needs to be notified of new msg */
+	dhd_mb_ring_2_t	mb_2_ring_fn;	/* called when dongle needs to be notified of new msg */
+
+	/* ioctl related resources */
+	uint8 ioctl_state;
+	int16 ioctl_status;		/* status returned from dongle */
+	uint16 ioctl_resplen;
+	dhd_ioctl_recieved_status_t ioctl_received;
+	uint curr_ioctl_cmd;
+	dhd_dma_buf_t	retbuf;		/* For holding ioctl response */
+	dhd_dma_buf_t	ioctbuf;	/* For holding ioctl request */
+
+	dhd_dma_buf_t	d2h_dma_scratch_buf;	/* For holding d2h scratch */
+
+	/* DMA-able arrays for holding WR and RD indices */
+	uint32          rw_index_sz; /* Size of a RD or WR index in dongle */
+	dhd_dma_buf_t   h2d_dma_indx_wr_buf;	/* Array of H2D WR indices */
+	dhd_dma_buf_t	h2d_dma_indx_rd_buf;	/* Array of H2D RD indices */
+	dhd_dma_buf_t	d2h_dma_indx_wr_buf;	/* Array of D2H WR indices */
+	dhd_dma_buf_t	d2h_dma_indx_rd_buf;	/* Array of D2H RD indices */
+	dhd_dma_buf_t h2d_ifrm_indx_wr_buf;	/* Array of H2D WR indices for ifrm */
+
+	dhd_dma_buf_t	host_bus_throughput_buf; /* bus throughput measure buffer */
+
+	dhd_dma_buf_t   *flowring_buf;    /* pool of flow ring buf */
+	uint32			flowring_num;
+
+	d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
+	ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
+	ulong d2h_sync_wait_tot; /* total wait loops */
+
+	dhd_dmaxfer_t	dmaxfer; /* for test/DMA loopback */
+
+	uint16		ioctl_seq_no;
+	uint16		data_seq_no;
+	uint16		ioctl_trans_id;
+	void		*pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
+	void		*pktid_rx_map;	/* pktid map for rx path */
+	void		*pktid_tx_map;	/* pktid map for tx path */
+	void		*rx_lock;	/* rx pktid map and rings access protection */
+	bool		metadata_dbg;
+	void		*pktid_map_handle_ioctl;
+
+	/* Applications/utilities can read tx and rx metadata using IOVARs */
+	uint16		rx_metadata_offset;
+	uint16		tx_metadata_offset;
+
+
+#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
+	/* Host's soft doorbell configuration */
+	bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
+#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
+
+	/* Work Queues to be used by the producer and the consumer, and threshold
+	 * when the WRITE index must be synced to consumer's workq
+	 */
+#if defined(DHD_LB_TXC)
+	uint32 tx_compl_prod_sync ____cacheline_aligned;
+	bcm_workq_t tx_compl_prod, tx_compl_cons;
+#endif /* DHD_LB_TXC */
+#if defined(DHD_LB_RXC)
+	uint32 rx_compl_prod_sync ____cacheline_aligned;
+	bcm_workq_t rx_compl_prod, rx_compl_cons;
+#endif /* DHD_LB_RXC */
+
+	dhd_dma_buf_t	fw_trap_buf; /* firmware trap buffer */
+
+    uint32  host_ipc_version; /* Host sypported IPC rev */
+	uint32  device_ipc_version; /* FW supported IPC rev */
+	uint32  active_ipc_version; /* Host advertised IPC rev */
+	dhd_dma_buf_t   hostts_req_buf; /* For holding host timestamp request buf */
+	bool    hostts_req_buf_inuse;
+	bool    rx_ts_log_enabled;
+	bool    tx_ts_log_enabled;
+} dhd_prot_t;
+
+extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
+
+static atomic_t dhd_msgbuf_rxbuf_post_event_bufs_running = ATOMIC_INIT(0);
+
+/* Convert a dmaaddr_t to a base_addr with htol operations */
+static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
+
+/* APIs for managing a DMA-able buffer */
+static int  dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+static int  dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
+static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+
+/* msgbuf ring management */
+static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+	const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
+static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
+
+/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
+static int  dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
+static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
+static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
+
+/* Fetch and Release a flowring msgbuf_ring from flowring  pool */
+static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
+	uint16 flowid);
+/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
+
+/* Producer: Allocate space in a msgbuf ring */
+static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+	uint16 nitems, uint16 *alloced, bool exactly_nitems);
+static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
+	uint16 *alloced, bool exactly_nitems);
+
+/* Consumer: Determine the location where the next message may be consumed */
+static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+	uint32 *available_len);
+
+/* Producer (WR index update) or Consumer (RD index update) indication */
+static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+	void *p, uint16 len);
+static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+
+static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
+	dhd_dma_buf_t *dma_buf, uint32 bufsz);
+
+/* Set/Get a RD or WR index in the array of indices */
+/* See also: dhd_prot_dma_indx_init() */
+void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
+	uint16 ringid);
+static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
+
+/* Locate a packet given a pktid */
+static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
+	bool free_pktid);
+/* Locate a packet given a PktId and free it. */
+static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
+
+static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+	void *buf, uint len, uint8 action);
+static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+	void *buf, uint len, uint8 action);
+static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
+static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
+	void *buf, int ifidx);
+
+/* Post buffers for Rx, control ioctl response and events */
+static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
+static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
+static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
+static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
+static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
+static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
+
+static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
+
+
+/* D2H Message handling */
+static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
+
+/* D2H Message handlers */
+static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
+
+/* Loopback test with dongle */
+static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
+static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
+	uint destdelay, dhd_dmaxfer_t *dma);
+static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
+
+/* Flowring management communication with dongle */
+static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
+static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
+
+/* Monitor Mode */
+#ifdef WL_MONITOR
+extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
+extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
+#endif /* WL_MONITOR */
+
+/* Configure a soft doorbell per D2H ring */
+static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
+static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
+static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
+static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
+static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
+static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
+static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
+
+typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
+
+/** callback functions for messages generated by the dongle */
+#define MSG_TYPE_INVALID 0
+
+static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
+	dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
+	dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
+	dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
+	NULL,
+	dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
+	NULL,
+	dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
+	NULL,
+	dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
+	NULL,
+	dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
+	NULL,
+	dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
+	NULL,
+	dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
+	NULL,
+	dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
+	NULL,
+	NULL,	/* MSG_TYPE_RX_CMPLT use dedicated handler */
+	NULL,
+	dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
+	NULL, /* MSG_TYPE_FLOW_RING_RESUME */
+	dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
+	NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
+	dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
+	NULL, /* MSG_TYPE_INFO_BUF_POST */
+	dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
+	NULL, /* MSG_TYPE_H2D_RING_CREATE */
+	NULL, /* MSG_TYPE_D2H_RING_CREATE */
+	dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
+	dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
+	NULL, /* MSG_TYPE_H2D_RING_CONFIG */
+	NULL, /* MSG_TYPE_D2H_RING_CONFIG */
+	NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
+	dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
+	NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
+	dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
+	NULL,	/* MSG_TYPE_TIMSTAMP_BUFPOST */
+	NULL,	/* MSG_TYPE_HOSTTIMSTAMP */
+	dhd_prot_process_d2h_host_ts_complete,	/* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
+	dhd_prot_process_fw_timestamp,	/* MSG_TYPE_FIRMWARE_TIMESTAMP */
+};
+
+
+#ifdef DHD_RX_CHAINING
+
+#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
+	(dhd_wet_chainable(dhd) && \
+	dhd_rx_pkt_chainable((dhd), (ifidx)) && \
+	!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
+	!ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
+	!eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
+	!eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
+	((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
+	((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
+	(((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
+
+static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
+static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
+static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
+
+#define DHD_PKT_CTF_MAX_CHAIN_LEN	64
+
+#endif /* DHD_RX_CHAINING */
+
+static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
+
+/**
+ * D2H DMA to completion callback handlers. Based on the mode advertised by the
+ * dongle through the PCIE shared region, the appropriate callback will be
+ * registered in the proto layer to be invoked prior to precessing any message
+ * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
+ * does not require host participation, then a noop callback handler will be
+ * bound that simply returns the msg_type.
+ */
+static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
+                                       uint32 tries, volatile uchar *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+                                      volatile cmn_msg_hdr_t *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+                                       volatile cmn_msg_hdr_t *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+                                    volatile cmn_msg_hdr_t *msg, int msglen);
+static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
+static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create);
+static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create);
+static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
+
+bool
+dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
+{
+	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
+	uint16 rd, wr;
+	bool ret;
+
+	if (dhd->dma_d2h_ring_upd_support) {
+		wr = flow_ring->wr;
+	} else {
+		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
+	}
+	if (dhd->dma_h2d_ring_upd_support) {
+		rd = flow_ring->rd;
+	} else {
+		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
+	}
+	ret = (wr == rd) ? TRUE : FALSE;
+	return ret;
+}
+uint16
+dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
+{
+	return (uint16)h2d_max_txpost;
+}
+void
+dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
+{
+	h2d_max_txpost = max_txpost;
+}
+/**
+ * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
+ * not completed, a livelock condition occurs. Host will avert this livelock by
+ * dropping this message and moving to the next. This dropped message can lead
+ * to a packet leak, or even something disastrous in the case the dropped
+ * message happens to be a control response.
+ * Here we will log this condition. One may choose to reboot the dongle.
+ *
+ */
+static void
+dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
+                           volatile uchar *msg, int msglen)
+{
+	uint32 ring_seqnum = ring->seqnum;
+	DHD_ERROR((
+		"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
+		" tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n",
+		dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
+		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
+		ring->dma_buf.va, msg, ring->curr_rd));
+	prhex("D2H MsgBuf Failure", (volatile uchar *)msg, msglen);
+
+	dhd_bus_dump_console_buffer(dhd->bus);
+	dhd_prot_debug_info_print(dhd);
+
+#ifdef DHD_FW_COREDUMP
+	if (dhd->memdump_enabled) {
+		/* collect core dump */
+		dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
+		dhd_bus_mem_dump(dhd);
+	}
+#endif /* DHD_FW_COREDUMP */
+
+	dhd_schedule_reset(dhd);
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+	dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+	dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
+	dhd_os_send_hang_message(dhd);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+}
+
+/**
+ * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
+ * mode. Sequence number is always in the last word of a message.
+ */
+static uint8 BCMFASTPATH
+dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+                         volatile cmn_msg_hdr_t *msg, int msglen)
+{
+	uint32 tries;
+	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
+	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
+	volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
+	dhd_prot_t *prot = dhd->prot;
+	uint32 msg_seqnum;
+	uint32 step = 0;
+	uint32 delay = PCIE_D2H_SYNC_DELAY;
+	uint32 total_tries = 0;
+
+	ASSERT(msglen == ring->item_len);
+
+	BCM_REFERENCE(delay);
+	/*
+	 * For retries we have to make some sort of stepper algorithm.
+	 * We see that every time when the Dongle comes out of the D3
+	 * Cold state, the first D2H mem2mem DMA takes more time to
+	 * complete, leading to livelock issues.
+	 *
+	 * Case 1 - Apart from Host CPU some other bus master is
+	 * accessing the DDR port, probably page close to the ring
+	 * so, PCIE does not get a change to update the memory.
+	 * Solution - Increase the number of tries.
+	 *
+	 * Case 2 - The 50usec delay given by the Host CPU is not
+	 * sufficient for the PCIe RC to start its work.
+	 * In this case the breathing time of 50usec given by
+	 * the Host CPU is not sufficient.
+	 * Solution: Increase the delay in a stepper fashion.
+	 * This is done to ensure that there are no
+	 * unwanted extra delay introdcued in normal conditions.
+	 */
+	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+			msg_seqnum = *marker;
+			if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
+				ring->seqnum++; /* next expected sequence number */
+				goto dma_completed;
+			}
+
+			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
+
+			if (total_tries > prot->d2h_sync_wait_max)
+				prot->d2h_sync_wait_max = total_tries;
+
+			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
+			OSL_DELAY(delay * step); /* Add stepper delay */
+
+		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
+	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
+
+	dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
+		(volatile uchar *) msg, msglen);
+
+	ring->seqnum++; /* skip this message ... leak of a pktid */
+	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
+
+dma_completed:
+
+	prot->d2h_sync_wait_tot += tries;
+	return msg->msg_type;
+}
+
+/**
+ * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
+ * mode. The xorcsum is placed in the last word of a message. Dongle will also
+ * place a seqnum in the epoch field of the cmn_msg_hdr.
+ */
+static uint8 BCMFASTPATH
+dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+                          volatile cmn_msg_hdr_t *msg, int msglen)
+{
+	uint32 tries;
+	uint32 prot_checksum = 0; /* computed checksum */
+	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
+	uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
+	dhd_prot_t *prot = dhd->prot;
+	uint32 step = 0;
+	uint32 delay = PCIE_D2H_SYNC_DELAY;
+	uint32 total_tries = 0;
+
+	ASSERT(msglen == ring->item_len);
+
+	BCM_REFERENCE(delay);
+	/*
+	 * For retries we have to make some sort of stepper algorithm.
+	 * We see that every time when the Dongle comes out of the D3
+	 * Cold state, the first D2H mem2mem DMA takes more time to
+	 * complete, leading to livelock issues.
+	 *
+	 * Case 1 - Apart from Host CPU some other bus master is
+	 * accessing the DDR port, probably page close to the ring
+	 * so, PCIE does not get a change to update the memory.
+	 * Solution - Increase the number of tries.
+	 *
+	 * Case 2 - The 50usec delay given by the Host CPU is not
+	 * sufficient for the PCIe RC to start its work.
+	 * In this case the breathing time of 50usec given by
+	 * the Host CPU is not sufficient.
+	 * Solution: Increase the delay in a stepper fashion.
+	 * This is done to ensure that there are no
+	 * unwanted extra delay introdcued in normal conditions.
+	 */
+	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+			prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
+			if (prot_checksum == 0U) { /* checksum is OK */
+				if (msg->epoch == ring_seqnum) {
+					ring->seqnum++; /* next expected sequence number */
+					goto dma_completed;
+				}
+			}
+
+			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
+
+			if (total_tries > prot->d2h_sync_wait_max)
+				prot->d2h_sync_wait_max = total_tries;
+
+			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
+			OSL_DELAY(delay * step); /* Add stepper delay */
+
+		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
+	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
+
+	DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
+	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
+		(volatile uchar *) msg, msglen);
+
+	ring->seqnum++; /* skip this message ... leak of a pktid */
+	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
+
+dma_completed:
+
+	prot->d2h_sync_wait_tot += tries;
+	return msg->msg_type;
+}
+
+/**
+ * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
+ * need to try to sync. This noop sync handler will be bound when the dongle
+ * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
+ */
+static uint8 BCMFASTPATH
+dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+                       volatile cmn_msg_hdr_t *msg, int msglen)
+{
+	return msg->msg_type;
+}
+
+INLINE void
+dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
+{
+	/* To synchronize with the previous memory operations call wmb() */
+	OSL_SMP_WMB();
+	dhd->prot->ioctl_received = reason;
+	/* Call another wmb() to make sure before waking up the other event value gets updated */
+	OSL_SMP_WMB();
+	dhd_os_ioctl_resp_wake(dhd);
+}
+
+/**
+ * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
+ * dongle advertizes.
+ */
+static void
+dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	prot->d2h_sync_wait_max = 0UL;
+	prot->d2h_sync_wait_tot = 0UL;
+
+	prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+	prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+	prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+	prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+	prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+	prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+
+	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
+		prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
+		DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
+	} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
+		prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
+		DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
+	} else {
+		prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
+		DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
+	}
+}
+
+/**
+ * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
+ */
+static void
+dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
+	prot->h2dring_rxp_subn.current_phase = 0;
+
+	prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
+	prot->h2dring_ctrl_subn.current_phase = 0;
+}
+
+/* +-----------------  End of PCIE DHD H2D DMA SYNC ------------------------+ */
+
+
+/*
+ * +---------------------------------------------------------------------------+
+ * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
+ * virtual and physical address, the buffer lenght and the DMA handler.
+ * A secdma handler is also included in the dhd_dma_buf object.
+ * +---------------------------------------------------------------------------+
+ */
+
+static INLINE void
+dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
+{
+	base_addr->low_addr = htol32(PHYSADDRLO(pa));
+	base_addr->high_addr = htol32(PHYSADDRHI(pa));
+}
+
+
+/**
+ * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
+ */
+static int
+dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
+{
+	uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
+	ASSERT(dma_buf);
+	pa_lowaddr = PHYSADDRLO(dma_buf->pa);
+	ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
+	ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
+	ASSERT(dma_buf->len != 0);
+
+	/* test 32bit offset arithmetic over dma buffer for loss of carry-over */
+	end = (pa_lowaddr + dma_buf->len); /* end address */
+
+	if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
+		DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
+			__FUNCTION__, pa_lowaddr, dma_buf->len));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+/**
+ * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
+ * returns BCME_OK=0 on success
+ * returns non-zero negative error value on failure.
+ */
+static int
+dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
+{
+	uint32 dma_pad = 0;
+	osl_t *osh = dhd->osh;
+	uint16 dma_align = DMA_ALIGN_LEN;
+
+
+	ASSERT(dma_buf != NULL);
+	ASSERT(dma_buf->va == NULL);
+	ASSERT(dma_buf->len == 0);
+
+	/* Pad the buffer length by one extra cacheline size.
+	 * Required for D2H direction.
+	 */
+	dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
+	dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
+		dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
+
+	if (dma_buf->va == NULL) {
+		DHD_ERROR(("%s: buf_len %d, no memory available\n",
+			__FUNCTION__, buf_len));
+		return BCME_NOMEM;
+	}
+
+	dma_buf->len = buf_len; /* not including padded len */
+
+	if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
+		dhd_dma_buf_free(dhd, dma_buf);
+		return BCME_ERROR;
+	}
+
+	dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
+
+	return BCME_OK;
+}
+
+/**
+ * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
+ */
+static void
+dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
+{
+	if ((dma_buf == NULL) || (dma_buf->va == NULL))
+		return;
+
+	(void)dhd_dma_buf_audit(dhd, dma_buf);
+
+	/* Zero out the entire buffer and cache flush */
+	memset((void*)dma_buf->va, 0, dma_buf->len);
+	OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
+}
+
+/**
+ * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
+ * dhd_dma_buf_alloc().
+ */
+static void
+dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
+{
+	osl_t *osh = dhd->osh;
+
+	ASSERT(dma_buf);
+
+	if (dma_buf->va == NULL)
+		return; /* Allow for free invocation, when alloc failed */
+
+	/* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
+	(void)dhd_dma_buf_audit(dhd, dma_buf);
+
+	/* dma buffer may have been padded at allocation */
+	DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
+		dma_buf->pa, dma_buf->dmah);
+
+	memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
+}
+
+/**
+ * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
+ * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
+ */
+void
+dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
+	void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
+{
+	dhd_dma_buf_t *dma_buf;
+	ASSERT(dhd_dma_buf);
+	dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
+	dma_buf->va = va;
+	dma_buf->len = len;
+	dma_buf->pa = pa;
+	dma_buf->dmah = dmah;
+	dma_buf->secdma = secdma;
+
+	/* Audit user defined configuration */
+	(void)dhd_dma_buf_audit(dhd, dma_buf);
+}
+
+/* +------------------  End of PCIE DHD DMA BUF ADT ------------------------+ */
+
+/*
+ * +---------------------------------------------------------------------------+
+ * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
+ * Main purpose is to save memory on the dongle, has other purposes as well.
+ * The packet id map, also includes storage for some packet parameters that
+ * may be saved. A native packet pointer along with the parameters may be saved
+ * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
+ * and the metadata may be retrieved using the previously allocated packet id.
+ * +---------------------------------------------------------------------------+
+ */
+#define DHD_PCIE_PKTID
+#define MAX_CTRL_PKTID		(1024) /* Maximum number of pktids supported */
+#define MAX_RX_PKTID		(1024)
+#define MAX_TX_PKTID		(3072 * 2)
+
+/* On Router, the pktptr serves as a pktid. */
+
+
+#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
+#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
+#endif
+
+/* Enum for marking the buffer color based on usage */
+typedef enum dhd_pkttype {
+	PKTTYPE_DATA_TX = 0,
+	PKTTYPE_DATA_RX,
+	PKTTYPE_IOCTL_RX,
+	PKTTYPE_EVENT_RX,
+	PKTTYPE_INFO_RX,
+	/* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
+	PKTTYPE_NO_CHECK,
+	PKTTYPE_TSBUF_RX
+} dhd_pkttype_t;
+
+#define DHD_PKTID_INVALID               (0U)
+#define DHD_IOCTL_REQ_PKTID             (0xFFFE)
+#define DHD_FAKE_PKTID                  (0xFACE)
+#define DHD_H2D_DBGRING_REQ_PKTID	0xFFFD
+#define DHD_D2H_DBGRING_REQ_PKTID	0xFFFC
+#define DHD_H2D_HOSTTS_REQ_PKTID	0xFFFB
+
+#define IS_FLOWRING(ring) \
+	((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
+
+typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
+
+/* Construct a packet id mapping table, returning an opaque map handle */
+static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
+
+/* Destroy a packet id mapping table, freeing all packets active in the table */
+static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
+
+#define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
+#define DHD_NATIVE_TO_PKTID_RESET(dhd, map)  dhd_pktid_map_reset((dhd), (map))
+#define DHD_NATIVE_TO_PKTID_FINI(dhd, map)   dhd_pktid_map_fini((dhd), (map))
+#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map)  dhd_pktid_map_fini_ioctl((osh), (map))
+
+#ifdef MACOSX_DHD
+#undef DHD_PCIE_PKTID
+#define DHD_PCIE_PKTID 1
+#endif /* MACOSX_DHD */
+
+#if defined(DHD_PCIE_PKTID)
+#if defined(MACOSX_DHD) || defined(DHD_EFI)
+#define IOCTLRESP_USE_CONSTMEM
+static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
+static int  alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
+#endif 
+
+/* Determine number of pktids that are available */
+static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
+
+/* Allocate a unique pktid against which a pkt and some metadata is saved */
+static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+	void *pkt, dhd_pkttype_t pkttype);
+static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+	void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
+	void *dmah, void *secdma, dhd_pkttype_t pkttype);
+static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
+	void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
+	void *dmah, void *secdma, dhd_pkttype_t pkttype);
+
+/* Return an allocated pktid, retrieving previously saved pkt and metadata */
+static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
+	uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
+	void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
+
+/*
+ * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
+ *
+ * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
+ * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
+ *
+ * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
+ *    either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
+ */
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+#define USE_DHD_PKTID_AUDIT_LOCK 1
+/* Audit the pktidmap allocator */
+/* #define DHD_PKTID_AUDIT_MAP */
+
+/* Audit the pktid during production/consumption of workitems */
+#define DHD_PKTID_AUDIT_RING
+
+#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
+#error "May only enabled audit of MAP or RING, at a time."
+#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
+
+#define DHD_DUPLICATE_ALLOC     1
+#define DHD_DUPLICATE_FREE      2
+#define DHD_TEST_IS_ALLOC       3
+#define DHD_TEST_IS_FREE        4
+
+#ifdef USE_DHD_PKTID_AUDIT_LOCK
+#define DHD_PKTID_AUDIT_LOCK_INIT(osh)          dhd_os_spin_lock_init(osh)
+#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  dhd_os_spin_lock_deinit(osh, lock)
+#define DHD_PKTID_AUDIT_LOCK(lock)              dhd_os_spin_lock(lock)
+#define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     dhd_os_spin_unlock(lock, flags)
+#else
+#define DHD_PKTID_AUDIT_LOCK_INIT(osh)          (void *)(1)
+#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  do { /* noop */ } while (0)
+#define DHD_PKTID_AUDIT_LOCK(lock)              0
+#define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     do { /* noop */ } while (0)
+#endif /* !USE_DHD_PKTID_AUDIT_LOCK */
+
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+/* #define USE_DHD_PKTID_LOCK   1 */
+
+#ifdef USE_DHD_PKTID_LOCK
+#define DHD_PKTID_LOCK_INIT(osh)                dhd_os_spin_lock_init(osh)
+#define DHD_PKTID_LOCK_DEINIT(osh, lock)        dhd_os_spin_lock_deinit(osh, lock)
+#define DHD_PKTID_LOCK(lock)                    dhd_os_spin_lock(lock)
+#define DHD_PKTID_UNLOCK(lock, flags)           dhd_os_spin_unlock(lock, flags)
+#else
+#define DHD_PKTID_LOCK_INIT(osh)                (void *)(1)
+#define DHD_PKTID_LOCK_DEINIT(osh, lock)	\
+	do { \
+		BCM_REFERENCE(osh); \
+		BCM_REFERENCE(lock); \
+	} while (0)
+#define DHD_PKTID_LOCK(lock)                    0
+#define DHD_PKTID_UNLOCK(lock, flags)           \
+	do { \
+		BCM_REFERENCE(lock); \
+		BCM_REFERENCE(flags); \
+	} while (0)
+#endif /* !USE_DHD_PKTID_LOCK */
+
+typedef enum dhd_locker_state {
+	LOCKER_IS_FREE,
+	LOCKER_IS_BUSY,
+	LOCKER_IS_RSVD
+} dhd_locker_state_t;
+
+/* Packet metadata saved in packet id mapper */
+
+typedef struct dhd_pktid_item {
+	dhd_locker_state_t state;  /* tag a locker to be free, busy or reserved */
+	uint8       dir;      /* dma map direction (Tx=flush or Rx=invalidate) */
+	dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
+	uint16      len;      /* length of mapped packet's buffer */
+	void        *pkt;     /* opaque native pointer to a packet */
+	dmaaddr_t   pa;       /* physical address of mapped packet's buffer */
+	void        *dmah;    /* handle to OS specific DMA map */
+	void		*secdma;
+} dhd_pktid_item_t;
+
+typedef uint32 dhd_pktid_key_t;
+
+typedef struct dhd_pktid_map {
+	uint32      items;    /* total items in map */
+	uint32      avail;    /* total available items */
+	int         failures; /* lockers unavailable count */
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+	void		*pktid_audit_lock;
+	struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+	dhd_pktid_key_t	*keys; /* map_items +1 unique pkt ids */
+	dhd_pktid_item_t lockers[0];           /* metadata storage */
+} dhd_pktid_map_t;
+
+/*
+ * PktId (Locker) #0 is never allocated and is considered invalid.
+ *
+ * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
+ * depleted pktid pool and must not be used by the caller.
+ *
+ * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
+ */
+
+#define DHD_PKTID_FREE_LOCKER           (FALSE)
+#define DHD_PKTID_RSV_LOCKER            (TRUE)
+
+#define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
+#define DHD_PKIDMAP_ITEMS(items)        (items)
+#define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
+	                                     (DHD_PKTID_ITEM_SZ * ((items) + 1)))
+#define DHD_PKTIDMAP_KEYS_SZ(items)     (sizeof(dhd_pktid_key_t) * ((items) + 1))
+
+#define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map)  dhd_pktid_map_reset_ioctl((dhd), (map))
+
+/* Convert a packet to a pktid, and save pkt pointer in busy locker */
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)    \
+	dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
+/* Reuse a previously reserved locker to save packet params */
+#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
+	dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
+		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
+		(dhd_pkttype_t)(pkttype))
+/* Convert a packet to a pktid, and save packet params in locker */
+#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
+	dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
+		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
+		(dhd_pkttype_t)(pkttype))
+
+/* Convert pktid to a packet, and free the locker */
+#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
+	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
+		(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+		(void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
+
+/* Convert the pktid to a packet, empty locker, but keep it reserved */
+#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
+	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
+	                   (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+	                   (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
+
+#define DHD_PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+/**
+* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
+*/
+static int
+dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
+	const int test_for, const char *errmsg)
+{
+#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
+	struct bcm_mwbmap *handle;
+	uint32	flags;
+	bool ignore_audit;
+
+	if (pktid_map == (dhd_pktid_map_t *)NULL) {
+		DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
+		return BCME_OK;
+	}
+
+	flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
+
+	handle = pktid_map->pktid_audit;
+	if (handle == (struct bcm_mwbmap *)NULL) {
+		DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
+		DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+		return BCME_OK;
+	}
+
+	/* Exclude special pktids from audit */
+	ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
+	if (ignore_audit) {
+		DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+		return BCME_OK;
+	}
+
+	if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
+		DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
+		/* lock is released in "error" */
+		goto error;
+	}
+
+	/* Perform audit */
+	switch (test_for) {
+		case DHD_DUPLICATE_ALLOC:
+			if (!bcm_mwbmap_isfree(handle, pktid)) {
+				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
+				           errmsg, pktid));
+				goto error;
+			}
+			bcm_mwbmap_force(handle, pktid);
+			break;
+
+		case DHD_DUPLICATE_FREE:
+			if (bcm_mwbmap_isfree(handle, pktid)) {
+				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
+				           errmsg, pktid));
+				goto error;
+			}
+			bcm_mwbmap_free(handle, pktid);
+			break;
+
+		case DHD_TEST_IS_ALLOC:
+			if (bcm_mwbmap_isfree(handle, pktid)) {
+				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
+				           errmsg, pktid));
+				goto error;
+			}
+			break;
+
+		case DHD_TEST_IS_FREE:
+			if (!bcm_mwbmap_isfree(handle, pktid)) {
+				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
+				           errmsg, pktid));
+				goto error;
+			}
+			break;
+
+		default:
+			goto error;
+	}
+
+	DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+	return BCME_OK;
+
+error:
+
+	DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+	/* May insert any trap mechanism here ! */
+	dhd_pktid_error_handler(dhd);
+
+	return BCME_ERROR;
+}
+
+#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
+	dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
+
+static int
+dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
+	const int test_for, void *msg, uint32 msg_len, const char * func)
+{
+	int ret = 0;
+	ret = DHD_PKTID_AUDIT(dhdp, map, pktid, test_for);
+	if (ret == BCME_ERROR) {
+		prhex(func, (uchar *)msg, msg_len);
+	}
+	return ret;
+}
+#define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
+	dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
+		(pktid), (test_for), msg, msg_len, __FUNCTION__)
+
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+
+/**
+ * +---------------------------------------------------------------------------+
+ * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
+ *
+ * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
+ *
+ * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
+ * packet id is returned. This unique packet id may be used to retrieve the
+ * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
+ * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
+ * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
+ *
+ * Implementation Note:
+ * Convert this into a <key,locker> abstraction and place into bcmutils !
+ * Locker abstraction should treat contents as opaque storage, and a
+ * callback should be registered to handle busy lockers on destructor.
+ *
+ * +---------------------------------------------------------------------------+
+ */
+
+/** Allocate and initialize a mapper of num_items <numbered_key, locker> */
+
+static dhd_pktid_map_handle_t *
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
+{
+	void* osh;
+	uint32 nkey;
+	dhd_pktid_map_t *map;
+	uint32 dhd_pktid_map_sz;
+	uint32 map_items;
+	uint32 map_keys_sz;
+	osh = dhd->osh;
+
+	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
+
+	map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
+	if (map == NULL) {
+		DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
+			__FUNCTION__, __LINE__, dhd_pktid_map_sz));
+		return (dhd_pktid_map_handle_t *)NULL;
+	}
+
+	/* Initialize the lock that protects this structure */
+	map->items = num_items;
+	map->avail = num_items;
+
+	map_items = DHD_PKIDMAP_ITEMS(map->items);
+
+	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
+	map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
+	if (map->keys == NULL) {
+		DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
+			__FUNCTION__, __LINE__, map_keys_sz));
+		goto error;
+	}
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+		/* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
+		map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
+		if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
+			DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
+			goto error;
+		} else {
+			DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
+				__FUNCTION__, __LINE__, map_items + 1));
+		}
+		map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+	for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
+		map->keys[nkey] = nkey; /* populate with unique keys */
+		map->lockers[nkey].state = LOCKER_IS_FREE;
+		map->lockers[nkey].pkt   = NULL; /* bzero: redundant */
+		map->lockers[nkey].len   = 0;
+	}
+
+	/* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
+	map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
+	map->lockers[DHD_PKTID_INVALID].pkt   = NULL; /* bzero: redundant */
+	map->lockers[DHD_PKTID_INVALID].len   = 0;
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+	/* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
+	bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+	return (dhd_pktid_map_handle_t *)map; /* opaque handle */
+
+error:
+	if (map) {
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+		if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+			bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
+			map->pktid_audit = (struct bcm_mwbmap *)NULL;
+			if (map->pktid_audit_lock)
+				DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
+		}
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+		if (map->keys) {
+			MFREE(osh, map->keys, map_keys_sz);
+		}
+		VMFREE(osh, map, dhd_pktid_map_sz);
+	}
+	return (dhd_pktid_map_handle_t *)NULL;
+}
+
+/**
+ * Retrieve all allocated keys and free all <numbered_key, locker>.
+ * Freeing implies: unmapping the buffers and freeing the native packet
+ * This could have been a callback registered with the pktid mapper.
+ */
+static void
+dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+	void *osh;
+	uint32 nkey;
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+	uint32 map_items;
+	uint32 flags;
+	bool data_tx = FALSE;
+
+	map = (dhd_pktid_map_t *)handle;
+	DHD_GENERAL_LOCK(dhd, flags);
+	osh = dhd->osh;
+
+	map_items = DHD_PKIDMAP_ITEMS(map->items);
+	/* skip reserved KEY #0, and start from 1 */
+
+	for (nkey = 1; nkey <= map_items; nkey++) {
+		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
+			locker = &map->lockers[nkey];
+			locker->state = LOCKER_IS_FREE;
+			data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
+			if (data_tx) {
+				dhd->prot->active_tx_count--;
+			}
+
+#ifdef DHD_PKTID_AUDIT_RING
+			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
+#endif /* DHD_PKTID_AUDIT_RING */
+
+			{
+				if (SECURE_DMA_ENAB(dhd->osh))
+					SECURE_DMA_UNMAP(osh, locker->pa,
+						locker->len, locker->dir, 0,
+						locker->dmah, locker->secdma, 0);
+				else
+					DMA_UNMAP(osh, locker->pa, locker->len,
+						locker->dir, 0, locker->dmah);
+			}
+			dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
+				locker->pkttype, data_tx);
+		}
+		else {
+#ifdef DHD_PKTID_AUDIT_RING
+			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+#endif /* DHD_PKTID_AUDIT_RING */
+		}
+		map->keys[nkey] = nkey; /* populate with unique keys */
+	}
+
+	map->avail = map_items;
+	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+}
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+/** Called in detach scenario. Releasing IOCTL buffers. */
+static void
+dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+	uint32 nkey;
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+	uint32 map_items;
+	uint32 flags;
+
+	map = (dhd_pktid_map_t *)handle;
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	map_items = DHD_PKIDMAP_ITEMS(map->items);
+	/* skip reserved KEY #0, and start from 1 */
+	for (nkey = 1; nkey <= map_items; nkey++) {
+		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
+			dhd_dma_buf_t retbuf;
+
+#ifdef DHD_PKTID_AUDIT_RING
+			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
+#endif /* DHD_PKTID_AUDIT_RING */
+
+			locker = &map->lockers[nkey];
+			retbuf.va = locker->pkt;
+			retbuf.len = locker->len;
+			retbuf.pa = locker->pa;
+			retbuf.dmah = locker->dmah;
+			retbuf.secdma = locker->secdma;
+
+			/* This could be a callback registered with dhd_pktid_map */
+			DHD_GENERAL_UNLOCK(dhd, flags);
+			free_ioctl_return_buffer(dhd, &retbuf);
+			DHD_GENERAL_LOCK(dhd, flags);
+		}
+		else {
+#ifdef DHD_PKTID_AUDIT_RING
+			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+#endif /* DHD_PKTID_AUDIT_RING */
+		}
+		map->keys[nkey] = nkey; /* populate with unique keys */
+	}
+
+	map->avail = map_items;
+	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+
+/**
+ * Free the pktid map.
+ */
+static void
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+	dhd_pktid_map_t *map;
+	uint32 dhd_pktid_map_sz;
+	uint32 map_keys_sz;
+
+	/* Free any pending packets */
+	dhd_pktid_map_reset(dhd, handle);
+
+	map = (dhd_pktid_map_t *)handle;
+	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
+	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
+		map->pktid_audit = (struct bcm_mwbmap *)NULL;
+		if (map->pktid_audit_lock) {
+			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
+		}
+	}
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+	MFREE(dhd->osh, map->keys, map_keys_sz);
+	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
+}
+#ifdef IOCTLRESP_USE_CONSTMEM
+static void
+dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
+{
+	dhd_pktid_map_t *map;
+	uint32 dhd_pktid_map_sz;
+	uint32 map_keys_sz;
+
+	/* Free any pending packets */
+	dhd_pktid_map_reset_ioctl(dhd, handle);
+
+	map = (dhd_pktid_map_t *)handle;
+	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
+	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
+		map->pktid_audit = (struct bcm_mwbmap *)NULL;
+		if (map->pktid_audit_lock) {
+			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
+		}
+	}
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+	MFREE(dhd->osh, map->keys, map_keys_sz);
+	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
+}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+/** Get the pktid free count */
+static INLINE uint32 BCMFASTPATH
+dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
+{
+	dhd_pktid_map_t *map;
+	uint32	avail;
+
+	ASSERT(handle != NULL);
+	map = (dhd_pktid_map_t *)handle;
+
+	avail = map->avail;
+
+	return avail;
+}
+
+/**
+ * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
+ * yet populated. Invoke the pktid save api to populate the packet parameters
+ * into the locker. This function is not reentrant, and is the caller's
+ * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
+ * a failure case, implying a depleted pool of pktids.
+ */
+static INLINE uint32
+dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+	void *pkt, dhd_pkttype_t pkttype)
+{
+	uint32 nkey;
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+
+	ASSERT(handle != NULL);
+	map = (dhd_pktid_map_t *)handle;
+
+	if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
+		map->failures++;
+		DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
+		return DHD_PKTID_INVALID; /* failed alloc request */
+	}
+
+	ASSERT(map->avail <= map->items);
+	nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
+
+	if ((map->avail > map->items) || (nkey > map->items)) {
+		map->failures++;
+		DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
+			" map->avail<%u>, nkey<%u>, pkttype<%u>\n",
+			__FUNCTION__, __LINE__, map->avail, nkey,
+			pkttype));
+		return DHD_PKTID_INVALID; /* failed alloc request */
+	}
+
+	locker = &map->lockers[nkey]; /* save packet metadata in locker */
+	map->avail--;
+	locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
+	locker->len = 0;
+	locker->state = LOCKER_IS_BUSY; /* reserve this locker */
+
+	ASSERT(nkey != DHD_PKTID_INVALID);
+	return nkey; /* return locker's numbered key */
+}
+
+/*
+ * dhd_pktid_map_save - Save a packet's parameters into a locker
+ * corresponding to a previously reserved unique numbered key.
+ */
+static INLINE void
+dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
+	uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
+	dhd_pkttype_t pkttype)
+{
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+
+	ASSERT(handle != NULL);
+	map = (dhd_pktid_map_t *)handle;
+
+	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
+		DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
+			__FUNCTION__, __LINE__, nkey, pkttype));
+#ifdef DHD_FW_COREDUMP
+		if (dhd->memdump_enabled) {
+			/* collect core dump */
+			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+			dhd_bus_mem_dump(dhd);
+		}
+#else
+		ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+		return;
+	}
+
+	locker = &map->lockers[nkey];
+
+	ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
+		((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
+
+	/* store contents in locker */
+	locker->dir = dir;
+	locker->pa = pa;
+	locker->len = (uint16)len; /* 16bit len */
+	locker->dmah = dmah; /* 16bit len */
+	locker->secdma = secdma;
+	locker->pkttype = pkttype;
+	locker->pkt = pkt;
+	locker->state = LOCKER_IS_BUSY; /* make this locker busy */
+}
+
+/**
+ * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
+ * contents into the corresponding locker. Return the numbered key.
+ */
+static uint32 BCMFASTPATH
+dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
+	dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
+	dhd_pkttype_t pkttype)
+{
+	uint32 nkey;
+
+	nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
+	if (nkey != DHD_PKTID_INVALID) {
+		dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
+			len, dir, dmah, secdma, pkttype);
+	}
+
+	return nkey;
+}
+
+/**
+ * dhd_pktid_map_free - Given a numbered key, return the locker contents.
+ * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
+ * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
+ * value. Only a previously allocated pktid may be freed.
+ */
+static void * BCMFASTPATH
+dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
+	dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
+	bool rsv_locker)
+{
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+	void * pkt;
+	unsigned long long locker_addr;
+
+	ASSERT(handle != NULL);
+
+	map = (dhd_pktid_map_t *)handle;
+
+	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
+		DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
+		           __FUNCTION__, __LINE__, nkey, pkttype));
+#ifdef DHD_FW_COREDUMP
+		if (dhd->memdump_enabled) {
+			/* collect core dump */
+			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+			dhd_bus_mem_dump(dhd);
+		}
+#else
+		ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+		return NULL;
+	}
+
+	locker = &map->lockers[nkey];
+
+#if defined(DHD_PKTID_AUDIT_MAP)
+	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
+#endif /* DHD_PKTID_AUDIT_MAP */
+
+	/* Debug check for cloned numbered key */
+	if (locker->state == LOCKER_IS_FREE) {
+		DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
+		           __FUNCTION__, __LINE__, nkey));
+#ifdef DHD_FW_COREDUMP
+		if (dhd->memdump_enabled) {
+			/* collect core dump */
+			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+			dhd_bus_mem_dump(dhd);
+		}
+#else
+		ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+		return NULL;
+	}
+
+	/* Check for the colour of the buffer i.e The buffer posted for TX,
+	 * should be freed for TX completion. Similarly the buffer posted for
+	 * IOCTL should be freed for IOCT completion etc.
+	 */
+	if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
+
+		DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
+			__FUNCTION__, __LINE__, nkey));
+#ifdef BCMDMA64OSL
+		PHYSADDRTOULONG(locker->pa, locker_addr);
+#else
+		locker_addr = PHYSADDRLO(locker->pa);
+#endif /* BCMDMA64OSL */
+		DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
+			"pkttype <%d> locker->pa <0x%llx> \n",
+			__FUNCTION__, __LINE__, locker->state, locker->pkttype,
+			pkttype, locker_addr));
+#ifdef DHD_FW_COREDUMP
+		if (dhd->memdump_enabled) {
+			/* collect core dump */
+			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
+			dhd_bus_mem_dump(dhd);
+		}
+#else
+		ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+		return NULL;
+	}
+
+	if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
+		map->avail++;
+		map->keys[map->avail] = nkey; /* make this numbered key available */
+		locker->state = LOCKER_IS_FREE; /* open and free Locker */
+	} else {
+		/* pktid will be reused, but the locker does not have a valid pkt */
+		locker->state = LOCKER_IS_RSVD;
+	}
+
+#if defined(DHD_PKTID_AUDIT_MAP)
+	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+#endif /* DHD_PKTID_AUDIT_MAP */
+
+	*pa = locker->pa; /* return contents of locker */
+	*len = (uint32)locker->len;
+	*dmah = locker->dmah;
+	*secdma = locker->secdma;
+
+	pkt = locker->pkt;
+	locker->pkt = NULL; /* Clear pkt */
+	locker->len = 0;
+
+	return pkt;
+}
+
+#else /* ! DHD_PCIE_PKTID */
+
+
+typedef struct pktlist {
+	PKT_LIST *tx_pkt_list;		/* list for tx packets */
+	PKT_LIST *rx_pkt_list;		/* list for rx packets */
+	PKT_LIST *ctrl_pkt_list;	/* list for ioctl/event buf post */
+} pktlists_t;
+
+/*
+ * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
+ * of a one to one mapping 32bit pktptr and a 32bit pktid.
+ *
+ * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
+ * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
+ *   a lock.
+ * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
+ */
+#define DHD_PKTID32(pktptr32)	((uint32)(pktptr32))
+#define DHD_PKTPTR32(pktid32)	((void *)(pktid32))
+
+
+static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
+	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
+	dhd_pkttype_t pkttype);
+static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
+	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
+	dhd_pkttype_t pkttype);
+
+static dhd_pktid_map_handle_t *
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
+{
+	osl_t *osh = dhd->osh;
+	pktlists_t *handle = NULL;
+
+	if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
+		DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
+		           __FUNCTION__, __LINE__, sizeof(pktlists_t)));
+		goto error_done;
+	}
+
+	if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
+		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
+		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
+		goto error;
+	}
+
+	if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
+		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
+		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
+		goto error;
+	}
+
+	if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
+		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
+		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
+		goto error;
+	}
+
+	PKTLIST_INIT(handle->tx_pkt_list);
+	PKTLIST_INIT(handle->rx_pkt_list);
+	PKTLIST_INIT(handle->ctrl_pkt_list);
+
+	return (dhd_pktid_map_handle_t *) handle;
+
+error:
+	if (handle->ctrl_pkt_list) {
+		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
+	}
+
+	if (handle->rx_pkt_list) {
+		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
+	}
+
+	if (handle->tx_pkt_list) {
+		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
+	}
+
+	if (handle) {
+		MFREE(osh, handle, sizeof(pktlists_t));
+	}
+
+error_done:
+	return (dhd_pktid_map_handle_t *)NULL;
+}
+
+static void
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
+{
+	osl_t *osh = dhd->osh;
+	pktlists_t *handle = (pktlists_t *) map;
+
+	ASSERT(handle != NULL);
+	if (handle == (pktlists_t *)NULL)
+		return;
+
+	if (handle->ctrl_pkt_list) {
+		PKTLIST_FINI(handle->ctrl_pkt_list);
+		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
+	}
+
+	if (handle->rx_pkt_list) {
+		PKTLIST_FINI(handle->rx_pkt_list);
+		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
+	}
+
+	if (handle->tx_pkt_list) {
+		PKTLIST_FINI(handle->tx_pkt_list);
+		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
+	}
+
+	if (handle) {
+		MFREE(osh, handle, sizeof(pktlists_t));
+	}
+}
+
+/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
+static INLINE uint32
+dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
+	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
+	dhd_pkttype_t pkttype)
+{
+	pktlists_t *handle = (pktlists_t *) map;
+	ASSERT(pktptr32 != NULL);
+	DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
+	DHD_PKT_SET_DMAH(pktptr32, dmah);
+	DHD_PKT_SET_PA(pktptr32, pa);
+	DHD_PKT_SET_SECDMA(pktptr32, secdma);
+
+	if (pkttype == PKTTYPE_DATA_TX) {
+		PKTLIST_ENQ(handle->tx_pkt_list,  pktptr32);
+	} else if (pkttype == PKTTYPE_DATA_RX) {
+		PKTLIST_ENQ(handle->rx_pkt_list,  pktptr32);
+	} else {
+		PKTLIST_ENQ(handle->ctrl_pkt_list,  pktptr32);
+	}
+
+	return DHD_PKTID32(pktptr32);
+}
+
+/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
+static INLINE void *
+dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
+	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
+	dhd_pkttype_t pkttype)
+{
+	pktlists_t *handle = (pktlists_t *) map;
+	void *pktptr32;
+
+	ASSERT(pktid32 != 0U);
+	pktptr32 = DHD_PKTPTR32(pktid32);
+	*dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
+	*dmah = DHD_PKT_GET_DMAH(pktptr32);
+	*pa = DHD_PKT_GET_PA(pktptr32);
+	*secdma = DHD_PKT_GET_SECDMA(pktptr32);
+
+	if (pkttype == PKTTYPE_DATA_TX) {
+		PKTLIST_UNLINK(handle->tx_pkt_list,  pktptr32);
+	} else if (pkttype == PKTTYPE_DATA_RX) {
+		PKTLIST_UNLINK(handle->rx_pkt_list,  pktptr32);
+	} else {
+		PKTLIST_UNLINK(handle->ctrl_pkt_list,  pktptr32);
+	}
+
+	return pktptr32;
+}
+
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)  DHD_PKTID32(pkt)
+
+#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
+	({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
+	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
+			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
+	})
+
+#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
+	({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
+	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
+			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
+	})
+
+#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
+	({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype);	\
+		dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
+				(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+				(void **)&secdma, (dhd_pkttype_t)(pkttype)); \
+	})
+
+#define DHD_PKTID_AVAIL(map)  (~0)
+
+#endif /* ! DHD_PCIE_PKTID */
+
+/* +------------------ End of PCIE DHD PKTID MAPPER  -----------------------+ */
+
+
+/**
+ * The PCIE FD protocol layer is constructed in two phases:
+ *    Phase 1. dhd_prot_attach()
+ *    Phase 2. dhd_prot_init()
+ *
+ * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
+ * All Common rings are allose attached (msgbuf_ring_t objects are allocated
+ * with DMA-able buffers).
+ * All dhd_dma_buf_t objects are also allocated here.
+ *
+ * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
+ * initialization of objects that requires information advertized by the dongle
+ * may not be performed here.
+ * E.g. the number of TxPost flowrings is not know at this point, neither do
+ * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
+ * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
+ * rings (common + flow).
+ *
+ * dhd_prot_init() is invoked after the bus layer has fetched the information
+ * advertized by the dongle in the pcie_shared_t.
+ */
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+	osl_t *osh = dhd->osh;
+	dhd_prot_t *prot;
+
+	/* Allocate prot structure */
+	if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
+		sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(prot, 0, sizeof(*prot));
+
+	prot->osh = osh;
+	dhd->prot = prot;
+
+	/* DMAing ring completes supported? FALSE by default  */
+	dhd->dma_d2h_ring_upd_support = FALSE;
+	dhd->dma_h2d_ring_upd_support = FALSE;
+	dhd->dma_ring_upd_overwrite = FALSE;
+
+	dhd->idma_inited = 0;
+	dhd->ifrm_inited = 0;
+
+	/* Common Ring Allocations */
+
+	/* Ring  0: H2D Control Submission */
+	if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
+	        H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
+	        BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
+		DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
+			__FUNCTION__));
+		goto fail;
+	}
+
+	/* Ring  1: H2D Receive Buffer Post */
+	if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
+	        H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
+	        BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
+		DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
+			__FUNCTION__));
+		goto fail;
+	}
+
+	/* Ring  2: D2H Control Completion */
+	if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
+	        D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
+	        BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
+		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
+			__FUNCTION__));
+		goto fail;
+	}
+
+	/* Ring  3: D2H Transmit Complete */
+	if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
+	        D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
+	        BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
+		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
+			__FUNCTION__));
+		goto fail;
+
+	}
+
+	/* Ring  4: D2H Receive Complete */
+	if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
+	        D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
+	        BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
+		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
+			__FUNCTION__));
+		goto fail;
+
+	}
+
+	/*
+	 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
+	 * buffers for flowrings will be instantiated, in dhd_prot_init() .
+	 * See dhd_prot_flowrings_pool_attach()
+	 */
+	/* ioctl response buffer */
+	if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
+		goto fail;
+	}
+
+	/* IOCTL request buffer */
+	if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
+		goto fail;
+	}
+
+	/* Host TS request buffer one buffer for now */
+	if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
+		goto fail;
+	}
+	prot->hostts_req_buf_inuse = FALSE;
+
+	/* Scratch buffer for dma rx offset */
+#ifdef BCM_HOST_BUF
+	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
+		ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN))
+#else
+	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN))
+#endif /* BCM_HOST_BUF */
+	{
+		goto fail;
+	}
+
+	/* scratch buffer bus throughput measurement */
+	if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
+		goto fail;
+	}
+
+#ifdef DHD_RX_CHAINING
+	dhd_rxchain_reset(&prot->rxchain);
+#endif
+
+	prot->rx_lock = dhd_os_spin_lock_init(dhd->osh);
+
+	prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
+	if (prot->pktid_ctrl_map == NULL) {
+		goto fail;
+	}
+
+	prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
+	if (prot->pktid_rx_map == NULL)
+		goto fail;
+
+	prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
+	if (prot->pktid_rx_map == NULL)
+		goto fail;
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+	prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
+		DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
+	if (prot->pktid_map_handle_ioctl == NULL) {
+		goto fail;
+	}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+	   /* Initialize the work queues to be used by the Load Balancing logic */
+#if defined(DHD_LB_TXC)
+	{
+		void *buffer;
+		buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
+		bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
+			buffer, DHD_LB_WORKQ_SZ);
+		prot->tx_compl_prod_sync = 0;
+		DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
+			__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
+	}
+#endif /* DHD_LB_TXC */
+
+#if defined(DHD_LB_RXC)
+	{
+		void *buffer;
+		buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
+		bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
+			buffer, DHD_LB_WORKQ_SZ);
+		prot->rx_compl_prod_sync = 0;
+		DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
+			__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
+	}
+#endif /* DHD_LB_RXC */
+	/* Initialize trap buffer */
+	if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, BCMPCIE_EXT_TRAP_DATA_MAXLEN)) {
+		DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
+		goto fail;
+	}
+
+	return BCME_OK;
+
+fail:
+
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+	if (prot != NULL) {
+		dhd_prot_detach(dhd);
+	}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	return BCME_NOMEM;
+} /* dhd_prot_attach */
+
+void
+dhd_set_host_cap(dhd_pub_t *dhd)
+{
+	uint32 data = 0;
+	dhd_prot_t *prot = dhd->prot;
+
+	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
+		if (dhd->h2d_phase_supported) {
+
+			data |= HOSTCAP_H2D_VALID_PHASE;
+
+			if (dhd->force_dongletrap_on_bad_h2d_phase) {
+				data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
+			}
+		}
+		if (prot->host_ipc_version > prot->device_ipc_version) {
+			prot->active_ipc_version = prot->device_ipc_version;
+		} else {
+			prot->active_ipc_version = prot->host_ipc_version;
+		}
+
+		data |= prot->active_ipc_version;
+
+		if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
+
+			DHD_INFO(("Advertise Hostready Capability\n"));
+
+			data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
+		}
+#ifdef PCIE_INB_DW
+		if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) {
+			DHD_INFO(("Advertise Inband-DW Capability\n"));
+			data |= HOSTCAP_DS_INBAND_DW;
+			data |= HOSTCAP_DS_NO_OOB_DW;
+			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB);
+		} else
+#endif /* PCIE_INB_DW */
+#ifdef PCIE_OOB
+		if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) {
+			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB);
+		} else
+#endif /* PCIE_OOB */
+		{
+			/* Disable DS altogether */
+			data |= HOSTCAP_DS_NO_OOB_DW;
+			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
+		}
+
+		if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
+
+			DHD_ERROR(("IDMA inited\n"));
+			data |= HOSTCAP_H2D_IDMA;
+			dhd->idma_inited = TRUE;
+		}
+
+		if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
+			DHD_ERROR(("IFRM Inited\n"));
+			data |= HOSTCAP_H2D_IFRM;
+			dhd->ifrm_inited = TRUE;
+			dhd->dma_h2d_ring_upd_support = FALSE;
+			dhd_prot_dma_indx_free(dhd);
+		}
+
+		/* Indicate support for TX status metadata */
+		data |= HOSTCAP_TXSTATUS_METADATA;
+
+		/* Indicate support for extended trap data */
+		data |= HOSTCAP_EXTENDED_TRAP_DATA;
+
+		DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
+			__FUNCTION__,
+			prot->active_ipc_version, prot->host_ipc_version,
+			prot->device_ipc_version));
+
+		dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
+		dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
+			sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
+	}
+#ifdef HOFFLOAD_MODULES
+		dhd_bus_cmn_writeshared(dhd->bus, &dhd->hmem.data_addr,
+			sizeof(dhd->hmem.data_addr), WRT_HOST_MODULE_ADDR, 0);
+#endif
+
+#ifdef DHD_TIMESYNC
+	dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version);
+#endif /* DHD_TIMESYNC */
+}
+
+/**
+ * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
+ * completed it's initialization of the pcie_shared structure, we may now fetch
+ * the dongle advertized features and adjust the protocol layer accordingly.
+ *
+ * dhd_prot_init() may be invoked again after a dhd_prot_reset().
+ */
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+	sh_addr_t base_addr;
+	dhd_prot_t *prot = dhd->prot;
+	int ret = 0;
+
+	/**
+	 * A user defined value can be assigned to global variable h2d_max_txpost via
+	 * 1. DHD IOVAR h2d_max_txpost, before firmware download
+	 * 2. module parameter h2d_max_txpost
+	 * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
+	 * if user has not defined any buffers by one of the above methods.
+	 */
+	prot->h2d_max_txpost = (uint16)h2d_max_txpost;
+
+	DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
+
+	/* Read max rx packets supported by dongle */
+	dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
+	if (prot->max_rxbufpost == 0) {
+		/* This would happen if the dongle firmware is not */
+		/* using the latest shared structure template */
+		prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
+	}
+	DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+
+	/* Initialize.  bzero() would blow away the dma pointers. */
+	prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
+	prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
+	prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
+	prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
+
+	prot->cur_ioctlresp_bufs_posted = 0;
+	prot->active_tx_count = 0;
+	prot->data_seq_no = 0;
+	prot->ioctl_seq_no = 0;
+	prot->rxbufpost = 0;
+	prot->cur_event_bufs_posted = 0;
+	prot->ioctl_state = 0;
+	prot->curr_ioctl_cmd = 0;
+	prot->cur_ts_bufs_posted = 0;
+	prot->infobufpost = 0;
+
+	prot->dmaxfer.srcmem.va = NULL;
+	prot->dmaxfer.dstmem.va = NULL;
+	prot->dmaxfer.in_progress = FALSE;
+
+	prot->metadata_dbg = FALSE;
+	prot->rx_metadata_offset = 0;
+	prot->tx_metadata_offset = 0;
+	prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
+
+	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
+	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
+	prot->ioctl_state = 0;
+	prot->ioctl_status = 0;
+	prot->ioctl_resplen = 0;
+	prot->ioctl_received = IOCTL_WAIT;
+
+	/* Register the interrupt function upfront */
+	/* remove corerev checks in data path */
+	prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
+
+	prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
+
+	/* Initialize Common MsgBuf Rings */
+
+	prot->device_ipc_version = dhd->bus->api.fw_rev;
+	prot->host_ipc_version = PCIE_SHARED_VERSION;
+
+	/* Init the host API version */
+	dhd_set_host_cap(dhd);
+
+	dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
+	dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
+	dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
+
+	/* Make it compatibile with pre-rev7 Firmware */
+	if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
+		prot->d2hring_tx_cpln.item_len =
+			D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
+		prot->d2hring_rx_cpln.item_len =
+			D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
+	}
+	dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
+	dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
+
+	dhd_prot_d2h_sync_init(dhd);
+
+	dhd_prot_h2d_sync_init(dhd);
+
+#ifdef PCIE_INB_DW
+	/* Set the initial DS state */
+	if (INBAND_DW_ENAB(dhd->bus)) {
+		dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus,
+			DW_DEVICE_DS_ACTIVE);
+	}
+#endif /* PCIE_INB_DW */
+
+	/* init the scratch buffer */
+	dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
+	dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+		D2H_DMA_SCRATCH_BUF, 0);
+	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
+		sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
+
+	/* If supported by the host, indicate the memory block
+	 * for completion writes / submission reads to shared space
+	 */
+	if (dhd->dma_d2h_ring_upd_support) {
+		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
+		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+			D2H_DMA_INDX_WR_BUF, 0);
+		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
+		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+			H2D_DMA_INDX_RD_BUF, 0);
+	}
+
+	if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
+		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
+		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+			H2D_DMA_INDX_WR_BUF, 0);
+		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
+		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+			D2H_DMA_INDX_RD_BUF, 0);
+
+	}
+
+	/* Signal to the dongle that common ring init is complete */
+	dhd_bus_hostready(dhd->bus);
+
+	/*
+	 * If the DMA-able buffers for flowring needs to come from a specific
+	 * contiguous memory region, then setup prot->flowrings_dma_buf here.
+	 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
+	 * this contiguous memory region, for each of the flowrings.
+	 */
+
+	/* Pre-allocate pool of msgbuf_ring for flowrings */
+	if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
+		return BCME_ERROR;
+	}
+
+	/* If IFRM is enabled, wait for FW to setup the DMA channel */
+	if (IFRM_ENAB(dhd)) {
+		dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
+		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+			H2D_IFRM_INDX_WR_BUF, 0);
+	}
+
+	/* See if info rings could be created */
+	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
+		if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
+			/* For now log and proceed, further clean up action maybe necessary
+			 * when we have more clarity.
+			 */
+			DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
+				__FUNCTION__, ret));
+		}
+	}
+
+	/* Host should configure soft doorbells if needed ... here */
+
+	/* Post to dongle host configured soft doorbells */
+	dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
+
+	/* Post buffers for packet reception and ioctl/event responses */
+	dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
+	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+	/* Fix re-entry problem without general lock */
+	atomic_set(&dhd_msgbuf_rxbuf_post_event_bufs_running, 0);
+	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+	return BCME_OK;
+} /* dhd_prot_init */
+
+
+/**
+ * dhd_prot_detach - PCIE FD protocol layer destructor.
+ * Unlink, frees allocated protocol memory (including dhd_prot)
+ */
+void dhd_prot_detach(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Stop the protocol module */
+	if (prot) {
+
+		/* free up all DMA-able buffers allocated during prot attach/init */
+
+		dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
+		dhd_dma_buf_free(dhd, &prot->retbuf);
+		dhd_dma_buf_free(dhd, &prot->ioctbuf);
+		dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
+		dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
+		dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
+
+		/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
+		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
+		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
+		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
+		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
+
+		dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
+
+		/* Common MsgBuf Rings */
+		dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
+		dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
+		dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
+		dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
+		dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
+
+		/* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
+		dhd_prot_flowrings_pool_detach(dhd);
+
+		/* detach info rings */
+		dhd_prot_detach_info_rings(dhd);
+
+		/* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
+		 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
+		 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
+		 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
+		 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
+		 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
+		 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
+		 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
+		 */
+		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
+		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
+		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
+#ifdef IOCTLRESP_USE_CONSTMEM
+		DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
+#endif
+
+		dhd_os_spin_lock_deinit(dhd->osh, prot->rx_lock);
+
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+#if defined(DHD_LB_TXC)
+		if (prot->tx_compl_prod.buffer)
+			MFREE(dhd->osh, prot->tx_compl_prod.buffer,
+			      sizeof(void*) * DHD_LB_WORKQ_SZ);
+#endif /* DHD_LB_TXC */
+#if defined(DHD_LB_RXC)
+		if (prot->rx_compl_prod.buffer)
+			MFREE(dhd->osh, prot->rx_compl_prod.buffer,
+			      sizeof(void*) * DHD_LB_WORKQ_SZ);
+#endif /* DHD_LB_RXC */
+
+		dhd->prot = NULL;
+	}
+} /* dhd_prot_detach */
+
+
+/**
+ * dhd_prot_reset - Reset the protocol layer without freeing any objects.
+ * This may be invoked to soft reboot the dongle, without having to
+ * detach and attach the entire protocol layer.
+ *
+ * After dhd_prot_reset(), dhd_prot_init() may be invoked
+ * without going througha dhd_prot_attach() phase.
+ */
+void
+dhd_prot_reset(dhd_pub_t *dhd)
+{
+	struct dhd_prot *prot = dhd->prot;
+
+	DHD_TRACE(("%s\n", __FUNCTION__));
+
+	if (prot == NULL) {
+		return;
+	}
+
+	dhd_prot_flowrings_pool_reset(dhd);
+
+	/* Reset Common MsgBuf Rings */
+	dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
+	dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
+	dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
+	dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
+	dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
+
+	/* Reset info rings */
+	if (prot->h2dring_info_subn) {
+		dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
+	}
+
+	if (prot->d2hring_info_cpln) {
+		dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
+	}
+
+	/* Reset all DMA-able buffers allocated during prot attach */
+	dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
+	dhd_dma_buf_reset(dhd, &prot->retbuf);
+	dhd_dma_buf_reset(dhd, &prot->ioctbuf);
+	dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
+	dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
+	dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
+
+	dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
+
+	/* Reset all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
+	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
+	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
+	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
+	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
+
+
+	prot->rx_metadata_offset = 0;
+	prot->tx_metadata_offset = 0;
+
+	prot->rxbufpost = 0;
+	prot->cur_event_bufs_posted = 0;
+	prot->cur_ioctlresp_bufs_posted = 0;
+
+	prot->active_tx_count = 0;
+	prot->data_seq_no = 0;
+	prot->ioctl_seq_no = 0;
+	prot->ioctl_state = 0;
+	prot->curr_ioctl_cmd = 0;
+	prot->ioctl_received = IOCTL_WAIT;
+	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
+	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
+
+	/* dhd_flow_rings_init is located at dhd_bus_start,
+	 * so when stopping bus, flowrings shall be deleted
+	 */
+	if (dhd->flow_rings_inited) {
+		dhd_flow_rings_deinit(dhd);
+	}
+
+	/* Reset PKTID map */
+	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
+	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
+	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
+#ifdef IOCTLRESP_USE_CONSTMEM
+	DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
+#endif /* IOCTLRESP_USE_CONSTMEM */
+#ifdef DMAMAP_STATS
+	dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
+	dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
+#ifndef IOCTLRESP_USE_CONSTMEM
+	dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
+#endif /* IOCTLRESP_USE_CONSTMEM */
+	dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
+	dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
+	dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
+#endif /* DMAMAP_STATS */
+} /* dhd_prot_reset */
+
+#if defined(DHD_LB_RXP)
+#define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	dhd_lb_dispatch_rx_process(dhdp)
+#else /* !DHD_LB_RXP */
+#define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	do { /* noop */ } while (0)
+#endif /* !DHD_LB_RXP */
+
+#if defined(DHD_LB_RXC)
+#define DHD_LB_DISPATCH_RX_COMPL(dhdp)	dhd_lb_dispatch_rx_compl(dhdp)
+#else /* !DHD_LB_RXC */
+#define DHD_LB_DISPATCH_RX_COMPL(dhdp)	do { /* noop */ } while (0)
+#endif /* !DHD_LB_RXC */
+
+#if defined(DHD_LB_TXC)
+#define DHD_LB_DISPATCH_TX_COMPL(dhdp)	dhd_lb_dispatch_tx_compl(dhdp)
+#else /* !DHD_LB_TXC */
+#define DHD_LB_DISPATCH_TX_COMPL(dhdp)	do { /* noop */ } while (0)
+#endif /* !DHD_LB_TXC */
+
+
+#if defined(DHD_LB)
+/* DHD load balancing: deferral of work to another online CPU */
+/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
+extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
+extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
+extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
+extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
+
+#if defined(DHD_LB_RXP)
+/**
+ * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
+ * to other CPU cores
+ */
+static INLINE void
+dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
+{
+	dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
+}
+#endif /* DHD_LB_RXP */
+
+#if defined(DHD_LB_TXC)
+/**
+ * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
+ * to other CPU cores
+ */
+static INLINE void
+dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
+{
+	bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
+	dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
+}
+
+/**
+ * DHD load balanced tx completion tasklet handler, that will perform the
+ * freeing of packets on the selected CPU. Packet pointers are delivered to
+ * this tasklet via the tx complete workq.
+ */
+void
+dhd_lb_tx_compl_handler(unsigned long data)
+{
+	int elem_ix;
+	void *pkt, **elem;
+	dmaaddr_t pa;
+	uint32 pa_len;
+	dhd_pub_t *dhd = (dhd_pub_t *)data;
+	dhd_prot_t *prot = dhd->prot;
+	bcm_workq_t *workq = &prot->tx_compl_cons;
+	uint32 count = 0;
+
+	int curr_cpu;
+	curr_cpu = get_cpu();
+	put_cpu();
+
+	DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
+
+	while (1) {
+		elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
+
+		if (elem_ix == BCM_RING_EMPTY) {
+			break;
+		}
+
+		elem = WORKQ_ELEMENT(void *, workq, elem_ix);
+		pkt = *elem;
+
+		DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
+
+		OSL_PREFETCH(PKTTAG(pkt));
+		OSL_PREFETCH(pkt);
+
+		pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
+		pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
+
+		DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
+#if defined(BCMPCIE)
+		dhd_txcomplete(dhd, pkt, true);
+#endif 
+
+		PKTFREE(dhd->osh, pkt, TRUE);
+		count++;
+	}
+
+	/* smp_wmb(); */
+	bcm_workq_cons_sync(workq);
+	DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
+}
+#endif /* DHD_LB_TXC */
+
+#if defined(DHD_LB_RXC)
+
+/**
+ * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
+ * to other CPU cores
+ */
+static INLINE void
+dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
+{
+	dhd_prot_t *prot = dhdp->prot;
+	/* Schedule the takslet only if we have to */
+	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
+		/* flush WR index */
+		bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
+		dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
+	}
+}
+
+void
+dhd_lb_rx_compl_handler(unsigned long data)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)data;
+	bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
+
+	DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
+
+	dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
+	bcm_workq_cons_sync(workq);
+}
+#endif /* DHD_LB_RXC */
+#endif /* DHD_LB */
+
+void
+dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
+{
+	dhd_prot_t *prot = dhd->prot;
+	prot->rx_dataoffset = rx_offset;
+}
+
+static int
+dhd_check_create_info_rings(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = BCME_ERROR;
+	uint16 ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
+
+	if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
+		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+	}
+
+	if (prot->h2dring_info_subn == NULL) {
+		prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+		if (prot->h2dring_info_subn == NULL) {
+			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
+				__FUNCTION__));
+			return BCME_NOMEM;
+		}
+
+		DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
+		ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
+			H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
+			ringid);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
+				__FUNCTION__));
+			goto err;
+		}
+	}
+
+	if (prot->d2hring_info_cpln == NULL) {
+		prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
+
+		if (prot->d2hring_info_cpln == NULL) {
+			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
+				__FUNCTION__));
+			return BCME_NOMEM;
+		}
+
+		/* create the debug info completion ring next to debug info submit ring
+		* ringid = id next to debug info submit ring
+		*/
+		ringid = ringid + 1;
+
+		DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
+		ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
+			D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
+			ringid);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
+				__FUNCTION__));
+			dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
+			goto err;
+		}
+	}
+
+	return ret;
+err:
+	MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
+	prot->h2dring_info_subn = NULL;
+
+	if (prot->d2hring_info_cpln) {
+		MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
+		prot->d2hring_info_cpln = NULL;
+	}
+	return ret;
+} /* dhd_check_create_info_rings */
+
+int
+dhd_prot_init_info_rings(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = BCME_OK;
+
+	if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
+		DHD_ERROR(("%s: info rings aren't created! \n",
+			__FUNCTION__));
+		return ret;
+	}
+
+	if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
+		DHD_INFO(("Info completion ring was created!\n"));
+		return ret;
+	}
+
+	DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
+	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln);
+	if (ret != BCME_OK)
+		return ret;
+
+	prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+
+	DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
+	prot->h2dring_info_subn->n_completion_ids = 1;
+	prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
+
+	ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn);
+
+	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
+	 * so can not cleanup if one ring was created while the other failed
+	 */
+	return ret;
+} /* dhd_prot_init_info_rings */
+
+static void
+dhd_prot_detach_info_rings(dhd_pub_t *dhd)
+{
+	if (dhd->prot->h2dring_info_subn) {
+		dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
+		MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
+		dhd->prot->h2dring_info_subn = NULL;
+	}
+	if (dhd->prot->d2hring_info_cpln) {
+		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
+		MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
+		dhd->prot->d2hring_info_cpln = NULL;
+	}
+}
+
+/**
+ * Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+int dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	wlc_rev_info_t revinfo;
+
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+
+	/* Post ts buffer after shim layer is attached */
+	ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+
+
+#ifdef DHD_FW_COREDUMP
+	/* Check the memdump capability */
+	dhd_get_memdump_info(dhd);
+#endif /* DHD_FW_COREDUMP */
+#ifdef BCMASSERT_LOG
+	dhd_get_assert_info(dhd);
+#endif /* BCMASSERT_LOG */
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+	if (ret < 0) {
+		DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
+		goto done;
+	}
+	DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+		revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
+
+	DHD_SSSR_DUMP_INIT(dhd);
+
+	dhd_process_cid_mac(dhd, TRUE);
+	ret = dhd_preinit_ioctls(dhd);
+	dhd_process_cid_mac(dhd, FALSE);
+
+	/* Always assumes wl for now */
+	dhd->iswl = TRUE;
+done:
+	return ret;
+} /* dhd_sync_with_dongle */
+
+
+#define DHD_DBG_SHOW_METADATA	0
+
+#if DHD_DBG_SHOW_METADATA
+static void BCMFASTPATH
+dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
+{
+	uint8 tlv_t;
+	uint8 tlv_l;
+	uint8 *tlv_v = (uint8 *)ptr;
+
+	if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
+		return;
+
+	len -= BCMPCIE_D2H_METADATA_HDRLEN;
+	tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
+
+	while (len > TLV_HDR_LEN) {
+		tlv_t = tlv_v[TLV_TAG_OFF];
+		tlv_l = tlv_v[TLV_LEN_OFF];
+
+		len -= TLV_HDR_LEN;
+		tlv_v += TLV_HDR_LEN;
+		if (len < tlv_l)
+			break;
+		if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
+			break;
+
+		switch (tlv_t) {
+		case WLFC_CTL_TYPE_TXSTATUS: {
+			uint32 txs;
+			memcpy(&txs, tlv_v, sizeof(uint32));
+			if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
+				printf("METADATA TX_STATUS: %08x\n", txs);
+			} else {
+				wl_txstatus_additional_info_t tx_add_info;
+				memcpy(&tx_add_info, tlv_v + sizeof(uint32),
+					sizeof(wl_txstatus_additional_info_t));
+				printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
+					" rate = %08x tries = %d - %d\n", txs,
+					tx_add_info.seq, tx_add_info.entry_ts,
+					tx_add_info.enq_ts, tx_add_info.last_ts,
+					tx_add_info.rspec, tx_add_info.rts_cnt,
+					tx_add_info.tx_cnt);
+			}
+			} break;
+
+		case WLFC_CTL_TYPE_RSSI: {
+			if (tlv_l == 1)
+				printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
+			else
+				printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
+					(*(tlv_v + 3) << 8) | *(tlv_v + 2),
+					(int8)(*tlv_v), *(tlv_v + 1));
+			} break;
+
+		case WLFC_CTL_TYPE_FIFO_CREDITBACK:
+			bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
+			bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_RX_STAMP: {
+			struct {
+				uint32 rspec;
+				uint32 bus_time;
+				uint32 wlan_time;
+			} rx_tmstamp;
+			memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
+			printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
+				rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
+			} break;
+
+		case WLFC_CTL_TYPE_TRANS_ID:
+			bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_COMP_TXSTATUS:
+			bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
+			break;
+
+		default:
+			bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
+			break;
+		}
+
+		len -= tlv_l;
+		tlv_v += tlv_l;
+	}
+}
+#endif /* DHD_DBG_SHOW_METADATA */
+
+static INLINE void BCMFASTPATH
+dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
+{
+	if (pkt) {
+		if (pkttype == PKTTYPE_IOCTL_RX ||
+			pkttype == PKTTYPE_EVENT_RX ||
+			pkttype == PKTTYPE_INFO_RX ||
+			pkttype == PKTTYPE_TSBUF_RX) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+			PKTFREE_STATIC(dhd->osh, pkt, send);
+#else
+			PKTFREE(dhd->osh, pkt, send);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+		} else {
+			PKTFREE(dhd->osh, pkt, send);
+		}
+	}
+}
+
+/* dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle */
+static INLINE void * BCMFASTPATH
+dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
+{
+	void *PKTBUF;
+	dmaaddr_t pa;
+	uint32 len;
+	void *dmah;
+	void *secdma;
+
+#ifdef DHD_PCIE_PKTID
+	if (free_pktid) {
+		PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
+			pktid, pa, len, dmah, secdma, pkttype);
+	} else {
+		PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
+			pktid, pa, len, dmah, secdma, pkttype);
+	}
+#else
+	PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
+		len, dmah, secdma, pkttype);
+#endif /* DHD_PCIE_PKTID */
+	if (PKTBUF) {
+		{
+			if (SECURE_DMA_ENAB(dhd->osh))
+				SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
+					secdma, 0);
+			else
+				DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+#ifdef DMAMAP_STATS
+			switch (pkttype) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+				case PKTTYPE_IOCTL_RX:
+					dhd->dma_stats.ioctl_rx--;
+					dhd->dma_stats.ioctl_rx_sz -= len;
+					break;
+#endif /* IOCTLRESP_USE_CONSTMEM */
+				case PKTTYPE_EVENT_RX:
+					dhd->dma_stats.event_rx--;
+					dhd->dma_stats.event_rx_sz -= len;
+					break;
+				case PKTTYPE_INFO_RX:
+					dhd->dma_stats.info_rx--;
+					dhd->dma_stats.info_rx_sz -= len;
+					break;
+				case PKTTYPE_TSBUF_RX:
+					dhd->dma_stats.tsbuf_rx--;
+					dhd->dma_stats.tsbuf_rx_sz -= len;
+					break;
+			}
+#endif /* DMAMAP_STATS */
+		}
+	}
+
+	return PKTBUF;
+}
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+static INLINE void BCMFASTPATH
+dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
+{
+	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
+	retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
+		retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
+
+	return;
+}
+#endif
+
+#ifdef PCIE_INB_DW
+static int
+dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus)
+{
+	unsigned long flags = 0;
+
+	if (INBAND_DW_ENAB(bus)) {
+		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+		bus->host_active_cnt++;
+		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+		if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) {
+			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+			bus->host_active_cnt--;
+			dhd_bus_inb_ack_pending_ds_req(bus);
+			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+			return BCME_ERROR;
+		}
+	}
+
+	return BCME_OK;
+}
+
+static void
+dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus)
+{
+	unsigned long flags = 0;
+	if (INBAND_DW_ENAB(bus)) {
+		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+		bus->host_active_cnt--;
+		dhd_bus_inb_ack_pending_ds_req(bus);
+		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+	}
+}
+#endif /* PCIE_INB_DW */
+
+static void BCMFASTPATH
+dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int16 fillbufs;
+	uint16 cnt = 256;
+	int retcount = 0;
+
+	fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+	while (fillbufs >= RX_BUF_BURST) {
+		cnt--;
+		if (cnt == 0) {
+			/* find a better way to reschedule rx buf post if space not available */
+			DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
+			DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
+			break;
+		}
+
+		/* Post in a burst of 32 buffers at a time */
+		fillbufs = MIN(fillbufs, RX_BUF_BURST);
+
+		/* Post buffers */
+		retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
+
+		if (retcount >= 0) {
+			prot->rxbufpost += (uint16)retcount;
+#ifdef DHD_LB_RXC
+			/* dhd_prot_rxbuf_post returns the number of buffers posted */
+			DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
+#endif /* DHD_LB_RXC */
+			/* how many more to post */
+			fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+		} else {
+			/* Make sure we don't run loop any further */
+			fillbufs = 0;
+		}
+	}
+}
+
+/** Post 'count' no of rx buffers to dongle */
+static int BCMFASTPATH
+dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
+{
+	void *p, **pktbuf;
+	uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+	uint8 *rxbuf_post_tmp;
+	host_rxbuf_post_t *rxbuf_post;
+	void *msg_start;
+	dmaaddr_t pa, *pktbuf_pa;
+	uint32 *pktlen;
+	uint16 i = 0, alloced = 0;
+	unsigned long flags;
+	uint32 pktid;
+	dhd_prot_t *prot = dhd->prot;
+	msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
+	void *lcl_buf;
+	uint16 lcl_buf_size;
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	/* allocate a local buffer to store pkt buffer va, pa and length */
+	lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
+		RX_BUF_BURST;
+	lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
+	if (!lcl_buf) {
+		DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return 0;
+	}
+	pktbuf = lcl_buf;
+	pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
+	pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
+
+	for (i = 0; i < count; i++) {
+		if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
+			DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
+			dhd->rx_pktgetfail++;
+			break;
+		}
+
+		pktlen[i] = PKTLEN(dhd->osh, p);
+		if (SECURE_DMA_ENAB(dhd->osh)) {
+			pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
+				DMA_RX, p, 0, ring->dma_buf.secdma, 0);
+		}
+#ifndef BCM_SECURE_DMA
+		else
+			pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
+#endif /* #ifndef BCM_SECURE_DMA */
+
+		if (PHYSADDRISZERO(pa)) {
+			PKTFREE(dhd->osh, p, FALSE);
+			DHD_ERROR(("Invalid phyaddr 0\n"));
+			ASSERT(0);
+			break;
+		}
+#ifdef DMAMAP_STATS
+		dhd->dma_stats.rxdata++;
+		dhd->dma_stats.rxdata_sz += pktlen[i];
+#endif /* DMAMAP_STATS */
+
+		PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
+		pktlen[i] = PKTLEN(dhd->osh, p);
+		pktbuf[i] = p;
+		pktbuf_pa[i] = pa;
+	}
+
+	/* only post what we have */
+	count = i;
+
+	/* grab the rx lock to allocate pktid and post on ring */
+	DHD_SPIN_LOCK(prot->rx_lock, flags);
+
+	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
+	msg_start = (void *)
+		dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
+	if (msg_start == NULL) {
+		DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
+		goto cleanup;
+	}
+	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
+	ASSERT(alloced > 0);
+
+	rxbuf_post_tmp = (uint8*)msg_start;
+
+	for (i = 0; i < alloced; i++) {
+		rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
+		p = pktbuf[i];
+		pa = pktbuf_pa[i];
+
+#if defined(DHD_LB_RXC)
+		if (use_rsv_pktid == TRUE) {
+			bcm_workq_t *workq = &prot->rx_compl_cons;
+			int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
+
+			if (elem_ix == BCM_RING_EMPTY) {
+				DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
+				pktid = DHD_PKTID_INVALID;
+				goto alloc_pkt_id;
+			} else {
+				uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
+				pktid = *elem;
+			}
+
+			rxbuf_post->cmn_hdr.request_id = htol32(pktid);
+
+			/* Now populate the previous locker with valid information */
+			if (pktid != DHD_PKTID_INVALID) {
+				DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
+					p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
+					PKTTYPE_DATA_RX);
+			}
+		} else
+#endif /* ! DHD_LB_RXC */
+		{
+#if defined(DHD_LB_RXC)
+alloc_pkt_id:
+#endif /* DHD_LB_RXC */
+		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
+			pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
+#if defined(DHD_PCIE_PKTID)
+		if (pktid == DHD_PKTID_INVALID) {
+			break;
+		}
+#endif /* DHD_PCIE_PKTID */
+		}
+
+		/* Common msg header */
+		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
+		rxbuf_post->cmn_hdr.if_id = 0;
+		rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+		rxbuf_post->cmn_hdr.flags = ring->current_phase;
+		ring->seqnum++;
+		rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
+		rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+		rxbuf_post->data_buf_addr.low_addr =
+			htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
+
+		if (prot->rx_metadata_offset) {
+			rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
+			rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+			rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
+		} else {
+			rxbuf_post->metadata_buf_len = 0;
+			rxbuf_post->metadata_buf_addr.high_addr = 0;
+			rxbuf_post->metadata_buf_addr.low_addr  = 0;
+		}
+
+#ifdef DHD_PKTID_AUDIT_RING
+		DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+		rxbuf_post->cmn_hdr.request_id = htol32(pktid);
+
+		/* Move rxbuf_post_tmp to next item */
+		rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
+	}
+
+	if (i < alloced) {
+		if (ring->wr < (alloced - i))
+			ring->wr = ring->max_items - (alloced - i);
+		else
+			ring->wr -= (alloced - i);
+
+		if (ring->wr == 0) {
+			DHD_INFO(("%s: flipping the phase now\n", ring->name));
+				ring->current_phase = ring->current_phase ?
+				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+		}
+
+		alloced = i;
+	}
+
+	/* update ring's WR index and ring doorbell to dongle */
+	if (alloced > 0) {
+		unsigned long flags1;
+		DHD_GENERAL_LOCK(dhd, flags1);
+		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
+		DHD_GENERAL_UNLOCK(dhd, flags1);
+	}
+
+	DHD_SPIN_UNLOCK(prot->rx_lock, flags);
+
+cleanup:
+	for (i = alloced; i < count; i++) {
+		p = pktbuf[i];
+		pa = pktbuf_pa[i];
+
+		if (SECURE_DMA_ENAB(dhd->osh))
+			SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
+				DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
+		else
+			DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
+		PKTFREE(dhd->osh, p, FALSE);
+	}
+
+	MFREE(dhd->osh, lcl_buf, lcl_buf_size);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+	return alloced;
+} /* dhd_prot_rxbufpost */
+
+static int
+dhd_prot_infobufpost(dhd_pub_t *dhd)
+{
+	unsigned long flags;
+	uint32 pktid;
+	dhd_prot_t *prot = dhd->prot;
+	msgbuf_ring_t *ring = prot->h2dring_info_subn;
+	uint16 alloced = 0;
+	uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+	uint32 pktlen;
+	info_buf_post_msg_t *infobuf_post;
+	uint8 *infobuf_post_tmp;
+	void *p;
+	void* msg_start;
+	uint8 i = 0;
+	dmaaddr_t pa;
+	int16 count;
+
+	if (ring == NULL)
+		return 0;
+
+	if (ring->inited != TRUE)
+		return 0;
+	if (prot->max_infobufpost == 0)
+		return 0;
+
+	count = prot->max_infobufpost - prot->infobufpost;
+
+	if (count <= 0) {
+		DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
+	msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (msg_start == NULL) {
+		DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return -1;
+	}
+
+	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
+	ASSERT(alloced > 0);
+
+	infobuf_post_tmp = (uint8*) msg_start;
+
+	/* loop through each allocated message in the host ring */
+	for (i = 0; i < alloced; i++) {
+		infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
+		/* Create a rx buffer */
+#ifdef DHD_USE_STATIC_CTRLBUF
+		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
+#else
+		p = PKTGET(dhd->osh, pktsz, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+		if (p == NULL) {
+			DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
+			dhd->rx_pktgetfail++;
+			break;
+		}
+		pktlen = PKTLEN(dhd->osh, p);
+		if (SECURE_DMA_ENAB(dhd->osh)) {
+			pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
+				DMA_RX, p, 0, ring->dma_buf.secdma, 0);
+		}
+#ifndef BCM_SECURE_DMA
+		else
+			pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+#endif /* #ifndef BCM_SECURE_DMA */
+		if (PHYSADDRISZERO(pa)) {
+			if (SECURE_DMA_ENAB(dhd->osh)) {
+				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
+					ring->dma_buf.secdma, 0);
+			}
+			else
+				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+#ifdef DHD_USE_STATIC_CTRLBUF
+			PKTFREE_STATIC(dhd->osh, p, FALSE);
+#else
+			PKTFREE(dhd->osh, p, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+			DHD_ERROR(("Invalid phyaddr 0\n"));
+			ASSERT(0);
+			break;
+		}
+#ifdef DMAMAP_STATS
+		dhd->dma_stats.info_rx++;
+		dhd->dma_stats.info_rx_sz += pktlen;
+#endif /* DMAMAP_STATS */
+		pktlen = PKTLEN(dhd->osh, p);
+
+		/* Common msg header */
+		infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
+		infobuf_post->cmn_hdr.if_id = 0;
+		infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+		infobuf_post->cmn_hdr.flags = ring->current_phase;
+		ring->seqnum++;
+
+#if defined(DHD_PCIE_PKTID)
+		/* get the lock before calling DHD_NATIVE_TO_PKTID */
+		DHD_GENERAL_LOCK(dhd, flags);
+#endif /* DHD_PCIE_PKTID */
+
+		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
+			pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
+
+
+#if defined(DHD_PCIE_PKTID)
+		/* free lock */
+		DHD_GENERAL_UNLOCK(dhd, flags);
+
+		if (pktid == DHD_PKTID_INVALID) {
+			if (SECURE_DMA_ENAB(dhd->osh)) {
+				DHD_GENERAL_LOCK(dhd, flags);
+				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
+					ring->dma_buf.secdma, 0);
+				DHD_GENERAL_UNLOCK(dhd, flags);
+			} else
+				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+			PKTFREE_STATIC(dhd->osh, p, FALSE);
+#else
+			PKTFREE(dhd->osh, p, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+			DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
+			break;
+		}
+#endif /* DHD_PCIE_PKTID */
+
+		infobuf_post->host_buf_len = htol16((uint16)pktlen);
+		infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+		infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
+
+#ifdef DHD_PKTID_AUDIT_RING
+		DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+		DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
+			infobuf_post->cmn_hdr.request_id,  infobuf_post->host_buf_addr.low_addr,
+			infobuf_post->host_buf_addr.high_addr));
+
+		infobuf_post->cmn_hdr.request_id = htol32(pktid);
+		/* Move rxbuf_post_tmp to next item */
+		infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
+	}
+
+	if (i < alloced) {
+		if (ring->wr < (alloced - i))
+			ring->wr = ring->max_items - (alloced - i);
+		else
+			ring->wr -= (alloced - i);
+
+		alloced = i;
+		if (alloced && ring->wr == 0) {
+			DHD_INFO(("%s: flipping the phase now\n", ring->name));
+			ring->current_phase = ring->current_phase ?
+				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+		}
+	}
+
+	/* Update the write pointer in TCM & ring bell */
+	if (alloced > 0) {
+		prot->infobufpost += alloced;
+		DHD_INFO(("allocated %d buffers for info ring\n",  alloced));
+		DHD_GENERAL_LOCK(dhd, flags);
+		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
+		DHD_GENERAL_UNLOCK(dhd, flags);
+	}
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+	return alloced;
+} /* dhd_prot_infobufpost */
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+static int
+alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
+{
+	int err;
+	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
+
+	if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
+		DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
+		ASSERT(0);
+		return BCME_NOMEM;
+	}
+
+	return BCME_OK;
+}
+
+static void
+free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
+{
+	/* retbuf (declared on stack) not fully populated ...  */
+	if (retbuf->va) {
+		uint32 dma_pad;
+		dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
+		retbuf->len = IOCT_RETBUF_SIZE;
+		retbuf->_alloced = retbuf->len + dma_pad;
+	}
+
+	dhd_dma_buf_free(dhd, retbuf);
+	return;
+}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+static int
+dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
+{
+	void *p;
+	uint16 pktsz;
+	ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
+	dmaaddr_t pa;
+	uint32 pktlen;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 alloced = 0;
+	unsigned long flags;
+	dhd_dma_buf_t retbuf;
+	void *dmah = NULL;
+	uint32 pktid;
+	void *map_handle;
+	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+	bool non_ioctl_resp_buf = 0;
+	dhd_pkttype_t buf_type;
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
+		return -1;
+	}
+	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
+
+	if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
+		buf_type = PKTTYPE_IOCTL_RX;
+	else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
+		buf_type = PKTTYPE_EVENT_RX;
+	else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
+		buf_type = PKTTYPE_TSBUF_RX;
+	else {
+		DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
+		return -1;
+	}
+
+
+	if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
+		non_ioctl_resp_buf = TRUE;
+	else
+		non_ioctl_resp_buf = FALSE;
+
+	if (non_ioctl_resp_buf) {
+		/* Allocate packet for not ioctl resp buffer post */
+		pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+	} else {
+		/* Allocate packet for ctrl/ioctl buffer post */
+		pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
+	}
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+	if (!non_ioctl_resp_buf) {
+		if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
+			DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
+			return -1;
+		}
+		ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
+		p = retbuf.va;
+		pktlen = retbuf.len;
+		pa = retbuf.pa;
+		dmah = retbuf.dmah;
+	} else
+#endif /* IOCTLRESP_USE_CONSTMEM */
+	{
+#ifdef DHD_USE_STATIC_CTRLBUF
+		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
+#else
+		p = PKTGET(dhd->osh, pktsz, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+		if (p == NULL) {
+			DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
+				__FUNCTION__, __LINE__, non_ioctl_resp_buf ?
+				"EVENT" : "IOCTL RESP"));
+			dhd->rx_pktgetfail++;
+			return -1;
+		}
+
+		pktlen = PKTLEN(dhd->osh, p);
+
+		if (SECURE_DMA_ENAB(dhd->osh)) {
+			DHD_GENERAL_LOCK(dhd, flags);
+			pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
+				DMA_RX, p, 0, ring->dma_buf.secdma, 0);
+			DHD_GENERAL_UNLOCK(dhd, flags);
+		}
+#ifndef BCM_SECURE_DMA
+		else
+			pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+#endif /* #ifndef BCM_SECURE_DMA */
+
+		if (PHYSADDRISZERO(pa)) {
+			DHD_ERROR(("Invalid physaddr 0\n"));
+			ASSERT(0);
+			goto free_pkt_return;
+		}
+
+#ifdef DMAMAP_STATS
+		switch (buf_type) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+			case PKTTYPE_IOCTL_RX:
+				dhd->dma_stats.ioctl_rx++;
+				dhd->dma_stats.ioctl_rx_sz += pktlen;
+				break;
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+			case PKTTYPE_EVENT_RX:
+				dhd->dma_stats.event_rx++;
+				dhd->dma_stats.event_rx_sz += pktlen;
+				break;
+			case PKTTYPE_TSBUF_RX:
+				dhd->dma_stats.tsbuf_rx++;
+				dhd->dma_stats.tsbuf_rx_sz += pktlen;
+				break;
+			default:
+				break;
+		}
+#endif /* DMAMAP_STATS */
+
+	}
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
+		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+	if (rxbuf_post == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
+			__FUNCTION__, __LINE__));
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+		if (non_ioctl_resp_buf)
+#endif /* IOCTLRESP_USE_CONSTMEM */
+		{
+			if (SECURE_DMA_ENAB(dhd->osh)) {
+				DHD_GENERAL_LOCK(dhd, flags);
+				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
+					ring->dma_buf.secdma, 0);
+				DHD_GENERAL_UNLOCK(dhd, flags);
+			} else {
+				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+			}
+		}
+		goto free_pkt_return;
+	}
+
+	/* CMN msg header */
+	rxbuf_post->cmn_hdr.msg_type = msg_type;
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+	if (!non_ioctl_resp_buf) {
+		map_handle = dhd->prot->pktid_map_handle_ioctl;
+		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
+			ring->dma_buf.secdma, buf_type);
+	} else
+#endif /* IOCTLRESP_USE_CONSTMEM */
+	{
+		map_handle = dhd->prot->pktid_ctrl_map;
+		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
+			p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
+			buf_type);
+	}
+
+	if (pktid == DHD_PKTID_INVALID) {
+		if (ring->wr == 0) {
+			ring->wr = ring->max_items - 1;
+		} else {
+			ring->wr--;
+			if (ring->wr == 0) {
+				ring->current_phase = ring->current_phase ? 0 :
+					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+			}
+		}
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+		DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
+		goto free_pkt_return;
+	}
+
+#ifdef DHD_PKTID_AUDIT_RING
+	DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+	rxbuf_post->cmn_hdr.request_id = htol32(pktid);
+	rxbuf_post->cmn_hdr.if_id = 0;
+	rxbuf_post->cmn_hdr.epoch =  ring->seqnum % H2D_EPOCH_MODULO;
+	ring->seqnum++;
+	rxbuf_post->cmn_hdr.flags = ring->current_phase;
+
+#if defined(DHD_PCIE_PKTID)
+	if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+		if (ring->wr == 0) {
+			ring->wr = ring->max_items - 1;
+		} else {
+			if (ring->wr == 0) {
+				ring->current_phase = ring->current_phase ? 0 :
+					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+			}
+		}
+		DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef IOCTLRESP_USE_CONSTMEM
+		if (non_ioctl_resp_buf)
+#endif /* IOCTLRESP_USE_CONSTMEM */
+		{
+			if (SECURE_DMA_ENAB(dhd->osh)) {
+				DHD_GENERAL_LOCK(dhd, flags);
+				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
+					ring->dma_buf.secdma, 0);
+				DHD_GENERAL_UNLOCK(dhd, flags);
+			} else
+				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+		}
+		goto free_pkt_return;
+	}
+#endif /* DHD_PCIE_PKTID */
+
+#ifndef IOCTLRESP_USE_CONSTMEM
+	rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
+#else
+	rxbuf_post->host_buf_len = htol16((uint16)pktlen);
+#endif /* IOCTLRESP_USE_CONSTMEM */
+	rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+	rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
+
+	/* update ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return 1;
+
+free_pkt_return:
+#ifdef IOCTLRESP_USE_CONSTMEM
+	if (!non_ioctl_resp_buf) {
+		free_ioctl_return_buffer(dhd, &retbuf);
+	} else
+#endif
+	{
+		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
+	}
+
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return -1;
+} /* dhd_prot_rxbufpost_ctrl */
+
+static uint16
+dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
+{
+	uint32 i = 0;
+	int32 ret_val;
+
+	DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
+		return 0;
+	}
+
+	while (i < max_to_post) {
+		ret_val  = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
+		if (ret_val < 0)
+			break;
+		i++;
+	}
+	DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
+	return (uint16)i;
+}
+
+static void
+dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int max_to_post;
+
+	DHD_INFO(("ioctl resp buf post\n"));
+	max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
+	if (max_to_post <= 0) {
+		DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
+			__FUNCTION__));
+		return;
+	}
+	prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
+		MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
+}
+
+static void
+dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int max_to_post;
+
+	/* Use atomic variable to avoid re-entry */
+	if (atomic_read(&dhd_msgbuf_rxbuf_post_event_bufs_running) > 0) {
+		return;
+	}
+	atomic_inc(&dhd_msgbuf_rxbuf_post_event_bufs_running);
+
+	max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
+	if (max_to_post <= 0) {
+		DHD_ERROR(("%s: Cannot post more than max event buffers\n",
+			__FUNCTION__));
+		return;
+	}
+	prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
+		MSG_TYPE_EVENT_BUF_POST, max_to_post);
+
+	atomic_dec(&dhd_msgbuf_rxbuf_post_event_bufs_running);
+}
+
+static int
+dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
+{
+#ifdef DHD_TIMESYNC
+	dhd_prot_t *prot = dhd->prot;
+	int max_to_post;
+
+	if (prot->active_ipc_version < 7) {
+		DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n",
+			prot->active_ipc_version));
+		return 0;
+	}
+
+	max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted;
+	if (max_to_post <= 0) {
+		DHD_INFO(("%s: Cannot post more than max ts buffers\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+	prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
+		MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post);
+#endif /* DHD_TIMESYNC */
+	return 0;
+}
+
+bool BCMFASTPATH
+dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
+{
+	dhd_prot_t *prot = dhd->prot;
+	bool more = TRUE;
+	uint n = 0;
+	msgbuf_ring_t *ring = prot->d2hring_info_cpln;
+
+	if (ring == NULL)
+		return FALSE;
+	if (ring->inited != TRUE)
+		return FALSE;
+
+	/* Process all the messages - DTOH direction */
+	while (!dhd_is_device_removed(dhd)) {
+		uint8 *msg_addr;
+		uint32 msg_len;
+
+		if (dhd->hang_was_sent) {
+			more = FALSE;
+			break;
+		}
+
+		/* Get the message from ring */
+		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+		if (msg_addr == NULL) {
+			more = FALSE;
+			break;
+		}
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(msg_addr);
+
+		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
+				__FUNCTION__, msg_len));
+		}
+
+		/* Update read pointer */
+		dhd_prot_upd_read_idx(dhd, ring);
+
+		/* After batch processing, check RX bound */
+		n += msg_len / ring->item_len;
+		if (n >= bound) {
+			break;
+		}
+	}
+
+	return more;
+}
+
+/** called when DHD needs to check for 'receive complete' messages from the dongle */
+bool BCMFASTPATH
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
+{
+	bool more = FALSE;
+	uint n = 0;
+	dhd_prot_t *prot = dhd->prot;
+	msgbuf_ring_t *ring = &prot->d2hring_rx_cpln;
+	uint16 item_len = ring->item_len;
+	host_rxbuf_cmpl_t *msg = NULL;
+	uint8 *msg_addr;
+	uint32 msg_len;
+	uint16 pkt_cnt, pkt_cnt_newidx;
+	unsigned long flags;
+	dmaaddr_t pa;
+	uint32 len;
+	void *dmah;
+	void *secdma;
+	int ifidx = 0, if_newidx = 0;
+	void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
+	uint32 pktid;
+	int i;
+	uint8 sync;
+
+	while (1) {
+		if (dhd_is_device_removed(dhd))
+			break;
+
+		if (dhd->hang_was_sent)
+			break;
+
+		pkt_cnt = 0;
+		pktqhead = pkt_newidx = NULL;
+		pkt_cnt_newidx = 0;
+
+		DHD_SPIN_LOCK(prot->rx_lock, flags);
+
+		/* Get the address of the next message to be read from ring */
+		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+		if (msg_addr == NULL) {
+			DHD_SPIN_UNLOCK(prot->rx_lock, flags);
+			break;
+		}
+
+		while (msg_len > 0) {
+			msg = (host_rxbuf_cmpl_t *)msg_addr;
+
+			/* Wait until DMA completes, then fetch msg_type */
+			sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
+			/*
+			 * Update the curr_rd to the current index in the ring, from where
+			 * the work item is fetched. This way if the fetched work item
+			 * fails in LIVELOCK, we can print the exact read index in the ring
+			 * that shows up the corrupted work item.
+			 */
+			if ((ring->curr_rd + 1) >= ring->max_items) {
+				ring->curr_rd = 0;
+			} else {
+				ring->curr_rd += 1;
+			}
+
+			if (!sync) {
+				msg_len -= item_len;
+				msg_addr += item_len;
+				continue;
+			}
+
+			pktid = ltoh32(msg->cmn_hdr.request_id);
+
+#ifdef DHD_PKTID_AUDIT_RING
+			DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
+				DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+			pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
+			        len, dmah, secdma, PKTTYPE_DATA_RX);
+			if (!pkt) {
+				msg_len -= item_len;
+				msg_addr += item_len;
+				continue;
+			}
+
+			if (SECURE_DMA_ENAB(dhd->osh))
+				SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
+				    dmah, secdma, 0);
+			else
+				DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+
+#ifdef DMAMAP_STATS
+			dhd->dma_stats.rxdata--;
+			dhd->dma_stats.rxdata_sz -= len;
+#endif /* DMAMAP_STATS */
+			DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
+				"pktdata %p, metalen %d\n",
+				ltoh32(msg->cmn_hdr.request_id),
+				ltoh16(msg->data_offset),
+				ltoh16(msg->data_len), msg->cmn_hdr.if_id,
+				msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
+				ltoh16(msg->metadata_len)));
+
+			pkt_cnt++;
+			msg_len -= item_len;
+			msg_addr += item_len;
+
+#if DHD_DBG_SHOW_METADATA
+			if (prot->metadata_dbg && prot->rx_metadata_offset &&
+			        msg->metadata_len) {
+				uchar *ptr;
+				ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
+				/* header followed by data */
+				bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
+				dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
+			}
+#endif /* DHD_DBG_SHOW_METADATA */
+
+			/* data_offset from buf start */
+			if (ltoh16(msg->data_offset)) {
+				/* data offset given from dongle after split rx */
+				PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
+			}
+			else if (prot->rx_dataoffset) {
+				/* DMA RX offset updated through shared area */
+				PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
+			}
+			/* Actual length of the packet */
+			PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
+#if defined(WL_MONITOR)
+			if (dhd_monitor_enabled(dhd, ifidx) &&
+				(msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) {
+				dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
+				continue;
+			}
+#endif 
+
+			if (!pktqhead) {
+				pktqhead = prevpkt = pkt;
+				ifidx = msg->cmn_hdr.if_id;
+			} else {
+				if (ifidx != msg->cmn_hdr.if_id) {
+					pkt_newidx = pkt;
+					if_newidx = msg->cmn_hdr.if_id;
+					pkt_cnt--;
+					pkt_cnt_newidx = 1;
+					break;
+				} else {
+					PKTSETNEXT(dhd->osh, prevpkt, pkt);
+					prevpkt = pkt;
+				}
+			}
+
+#ifdef DHD_TIMESYNC
+			if (dhd->prot->rx_ts_log_enabled) {
+				ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts;
+				dhd_timesync_log_rx_timestamp(dhd->ts, ifidx, ts->low, ts->high);
+			}
+#endif /* DHD_TIMESYNC */
+		}
+
+		/* roll back read pointer for unprocessed message */
+		if (msg_len > 0) {
+			if (ring->rd < msg_len / item_len)
+				ring->rd = ring->max_items - msg_len / item_len;
+			else
+				ring->rd -= msg_len / item_len;
+		}
+
+		/* Update read pointer */
+		dhd_prot_upd_read_idx(dhd, ring);
+
+		DHD_SPIN_UNLOCK(prot->rx_lock, flags);
+
+		pkt = pktqhead;
+		for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
+			nextpkt = PKTNEXT(dhd->osh, pkt);
+			PKTSETNEXT(dhd->osh, pkt, NULL);
+#ifdef DHD_LB_RXP
+			dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
+#elif defined(DHD_RX_CHAINING)
+			dhd_rxchain_frame(dhd, pkt, ifidx);
+#else
+			dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+#endif /* DHD_LB_RXP */
+		}
+
+		if (pkt_newidx) {
+#ifdef DHD_LB_RXP
+			dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
+#elif defined(DHD_RX_CHAINING)
+			dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
+#else
+			dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
+#endif /* DHD_LB_RXP */
+		}
+
+		pkt_cnt += pkt_cnt_newidx;
+
+		/* Post another set of rxbufs to the device */
+		dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
+
+		/* After batch processing, check RX bound */
+		n += pkt_cnt;
+		if (n >= bound) {
+			more = TRUE;
+			break;
+		}
+	}
+
+	/* Call lb_dispatch only if packets are queued */
+	if (n) {
+		DHD_LB_DISPATCH_RX_COMPL(dhd);
+		DHD_LB_DISPATCH_RX_PROCESS(dhd);
+	}
+
+	return more;
+}
+
+/**
+ * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
+ */
+void
+dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
+{
+	msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
+
+	if (ring == NULL) {
+		DHD_ERROR(("%s: NULL txflowring. exiting...\n",  __FUNCTION__));
+		return;
+	}
+	/* Update read pointer */
+	if (dhd->dma_d2h_ring_upd_support) {
+		ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+	}
+
+	DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
+		ring->idx, flowid, ring->wr, ring->rd));
+
+	/* Need more logic here, but for now use it directly */
+	dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
+}
+
+/** called when DHD needs to check for 'transmit complete' messages from the dongle */
+bool BCMFASTPATH
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
+{
+	bool more = TRUE;
+	uint n = 0;
+	msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
+
+	/* Process all the messages - DTOH direction */
+	while (!dhd_is_device_removed(dhd)) {
+		uint8 *msg_addr;
+		uint32 msg_len;
+
+		if (dhd->hang_was_sent) {
+			more = FALSE;
+			break;
+		}
+
+		/* Get the address of the next message to be read from ring */
+		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+		if (msg_addr == NULL) {
+			more = FALSE;
+			break;
+		}
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(msg_addr);
+
+		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
+				__FUNCTION__, ring->name, msg_addr, msg_len));
+		}
+
+		/* Write to dngl rd ptr */
+		dhd_prot_upd_read_idx(dhd, ring);
+
+		/* After batch processing, check bound */
+		n += msg_len / ring->item_len;
+		if (n >= bound) {
+			break;
+		}
+	}
+
+	DHD_LB_DISPATCH_TX_COMPL(dhd);
+
+	return more;
+}
+
+int BCMFASTPATH
+dhd_prot_process_trapbuf(dhd_pub_t *dhd)
+{
+	uint32 data;
+	dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
+
+	/* Interrupts can come in before this struct
+	 *  has been initialized.
+	 */
+	if (trap_addr->va == NULL) {
+		DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
+		return 0;
+	}
+
+	OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
+	data = *(uint32 *)(trap_addr->va);
+
+	if (data & D2H_DEV_FWHALT) {
+		DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
+		if (data & D2H_DEV_EXT_TRAP_DATA)
+		{
+			if (dhd->extended_trap_data) {
+				OSL_CACHE_INV((void *)trap_addr->va,
+				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+				memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
+				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+			}
+			DHD_ERROR(("Extended trap data available\n"));
+		}
+		return data;
+	}
+	return 0;
+}
+
+/** called when DHD needs to check for 'ioctl complete' messages from the dongle */
+int BCMFASTPATH
+dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
+
+	/* Process all the messages - DTOH direction */
+	while (!dhd_is_device_removed(dhd)) {
+		uint8 *msg_addr;
+		uint32 msg_len;
+
+		if (dhd->hang_was_sent) {
+			break;
+		}
+
+		/* Get the address of the next message to be read from ring */
+		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+		if (msg_addr == NULL) {
+			break;
+		}
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(msg_addr);
+		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
+				__FUNCTION__, ring->name, msg_addr, msg_len));
+		}
+
+		/* Write to dngl rd ptr */
+		dhd_prot_upd_read_idx(dhd, ring);
+	}
+
+	return 0;
+}
+
+/**
+ * Consume messages out of the D2H ring. Ensure that the message's DMA to host
+ * memory has completed, before invoking the message handler via a table lookup
+ * of the cmn_msg_hdr::msg_type.
+ */
+static int BCMFASTPATH
+dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
+{
+	uint32 buf_len = len;
+	uint16 item_len;
+	uint8 msg_type;
+	cmn_msg_hdr_t *msg = NULL;
+	int ret = BCME_OK;
+
+	ASSERT(ring);
+	item_len = ring->item_len;
+	if (item_len == 0) {
+		DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
+			__FUNCTION__, ring->idx, item_len, buf_len));
+		return BCME_ERROR;
+	}
+
+	while (buf_len > 0) {
+		if (dhd->hang_was_sent) {
+			ret = BCME_ERROR;
+			goto done;
+		}
+
+		msg = (cmn_msg_hdr_t *)buf;
+
+		/* Wait until DMA completes, then fetch msg_type */
+		msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
+
+		/*
+		 * Update the curr_rd to the current index in the ring, from where
+		 * the work item is fetched. This way if the fetched work item
+		 * fails in LIVELOCK, we can print the exact read index in the ring
+		 * that shows up the corrupted work item.
+		 */
+		if ((ring->curr_rd + 1) >= ring->max_items) {
+			ring->curr_rd = 0;
+		} else {
+			ring->curr_rd += 1;
+		}
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(buf + item_len);
+
+		DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
+			msg_type, item_len, buf_len));
+
+		if (msg_type == MSG_TYPE_LOOPBACK) {
+			bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
+			DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
+		}
+
+		ASSERT(msg_type < DHD_PROT_FUNCS);
+		if (msg_type >= DHD_PROT_FUNCS) {
+			DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
+				__FUNCTION__, msg_type, item_len, buf_len));
+			ret = BCME_ERROR;
+			goto done;
+		}
+
+		if (table_lookup[msg_type]) {
+			table_lookup[msg_type](dhd, buf);
+		}
+
+		if (buf_len < item_len) {
+			ret = BCME_ERROR;
+			goto done;
+		}
+		buf_len = buf_len - item_len;
+		buf = buf + item_len;
+	}
+
+done:
+
+#ifdef DHD_RX_CHAINING
+	dhd_rxchain_commit(dhd);
+#endif
+
+	return ret;
+} /* dhd_prot_process_msgtype */
+
+static void
+dhd_prot_noop(dhd_pub_t *dhd, void *msg)
+{
+	return;
+}
+
+/** called on MSG_TYPE_RING_STATUS message received from dongle */
+static void
+dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
+{
+	pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
+	uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
+	uint16 status = ltoh16(ring_status->compl_hdr.status);
+	uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
+
+	DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
+		request_id, status, ring_id, ltoh16(ring_status->write_idx)));
+
+	if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
+		return;
+	if (status == BCMPCIE_BAD_PHASE) {
+		/* bad phase report from */
+		DHD_ERROR(("Bad phase\n"));
+	}
+	if (status != BCMPCIE_BADOPTION)
+		return;
+
+	if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
+		if (dhd->prot->h2dring_info_subn != NULL) {
+			if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
+				DHD_ERROR(("H2D ring create failed for info ring\n"));
+				dhd->prot->h2dring_info_subn->create_pending = FALSE;
+			}
+			else
+				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
+		} else {
+			DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
+		}
+	}
+	else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
+		if (dhd->prot->d2hring_info_cpln != NULL) {
+			if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
+				DHD_ERROR(("D2H ring create failed for info ring\n"));
+				dhd->prot->d2hring_info_cpln->create_pending = FALSE;
+			}
+			else
+				DHD_ERROR(("ring create ID for info ring, create not pending\n"));
+		} else {
+			DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
+		}
+	}
+	else {
+		DHD_ERROR(("don;t know how to pair with original request\n"));
+	}
+	/* How do we track this to pair it with ??? */
+	return;
+}
+
+/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
+static void
+dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
+{
+	pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
+	DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
+		gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
+		gen_status->compl_hdr.flow_ring_id));
+
+	/* How do we track this to pair it with ??? */
+	return;
+}
+
+/**
+ * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
+ * dongle received the ioctl message in dongle memory.
+ */
+static void
+dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
+{
+	ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
+	unsigned long flags;
+#ifdef DHD_PKTID_AUDIT_RING
+	uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
+
+	/* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
+	if (pktid != DHD_IOCTL_REQ_PKTID) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
+			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#else
+		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
+			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+	}
+#endif /* DHD_PKTID_AUDIT_RING */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
+		(dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
+		dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
+	} else {
+		DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
+			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
+		prhex("dhd_prot_ioctack_process:",
+			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+	}
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
+		ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
+		ioct_ack->compl_hdr.flow_ring_id));
+	if (ioct_ack->compl_hdr.status != 0)  {
+		DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
+	}
+#ifdef REPORT_FATAL_TIMEOUTS
+	else {
+		dhd_stop_bus_timer(dhd);
+	}
+#endif /* REPORT_FATAL_TIMEOUTS */
+}
+
+/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
+static void
+dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
+{
+	dhd_prot_t *prot = dhd->prot;
+	uint32 pkt_id, xt_id;
+	ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
+	void *pkt;
+	unsigned long flags;
+	dhd_dma_buf_t retbuf;
+#ifdef REPORT_FATAL_TIMEOUTS
+	uint16	dhd_xt_id;
+#endif
+
+	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
+
+	pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
+
+#ifdef DHD_PKTID_AUDIT_RING
+#ifndef IOCTLRESP_USE_CONSTMEM
+	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
+		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#else
+	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
+		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+#endif /* DHD_PKTID_AUDIT_RING */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
+		!(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
+		DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
+			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
+		prhex("dhd_prot_ioctcmplt_process:",
+			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return;
+	}
+
+	/* Clear Response pending bit */
+	prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
+
+#ifndef IOCTLRESP_USE_CONSTMEM
+	pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
+#else
+	dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
+	pkt = retbuf.va;
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+	if (!pkt) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
+		prhex("dhd_prot_ioctcmplt_process:",
+			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+		return;
+	}
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
+	prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
+	xt_id = ltoh16(ioct_resp->trans_id);
+
+	if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
+		DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
+			__FUNCTION__, xt_id, prot->ioctl_trans_id,
+			prot->curr_ioctl_cmd, ioct_resp->cmd));
+#ifdef REPORT_FATAL_TIMEOUTS
+		dhd_stop_cmd_timer(dhd);
+#endif /* REPORT_FATAL_TIMEOUTS */
+		dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
+		dhd_prot_debug_info_print(dhd);
+#ifdef DHD_FW_COREDUMP
+		if (dhd->memdump_enabled) {
+			/* collect core dump */
+			dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
+			dhd_bus_mem_dump(dhd);
+		}
+#else
+		ASSERT(0);
+#endif /* DHD_FW_COREDUMP */
+		dhd_schedule_reset(dhd);
+		goto exit;
+	}
+#ifdef REPORT_FATAL_TIMEOUTS
+	dhd_xt_id = dhd_get_request_id(dhd);
+	if (xt_id == dhd_xt_id) {
+		dhd_stop_cmd_timer(dhd);
+	} else {
+		DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d",
+			__FUNCTION__, xt_id, dhd_xt_id));
+	}
+#endif /* REPORT_FATAL_TIMEOUTS */
+	DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
+		pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
+
+	if (prot->ioctl_resplen > 0) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+		bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
+#else
+		bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+	}
+
+	/* wake up any dhd_os_ioctl_resp_wait() */
+	dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
+
+exit:
+#ifndef IOCTLRESP_USE_CONSTMEM
+	dhd_prot_packet_free(dhd, pkt,
+		PKTTYPE_IOCTL_RX, FALSE);
+#else
+	free_ioctl_return_buffer(dhd, &retbuf);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+
+	/* Post another ioctl buf to the device */
+	if (prot->cur_ioctlresp_bufs_posted > 0) {
+		prot->cur_ioctlresp_bufs_posted--;
+	}
+
+	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+}
+
+/** called on MSG_TYPE_TX_STATUS message received from dongle */
+static void BCMFASTPATH
+dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
+{
+	dhd_prot_t *prot = dhd->prot;
+	host_txbuf_cmpl_t * txstatus;
+	unsigned long flags;
+	uint32 pktid;
+	void *pkt;
+	dmaaddr_t pa;
+	uint32 len;
+	void *dmah;
+	void *secdma;
+	bool pkt_fate;
+#ifdef DEVICE_TX_STUCK_DETECT
+	flow_ring_node_t *flow_ring_node;
+	uint16 flowid;
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+
+	txstatus = (host_txbuf_cmpl_t *)msg;
+#ifdef DEVICE_TX_STUCK_DETECT
+	flowid = txstatus->compl_hdr.flow_ring_id;
+	flow_ring_node = DHD_FLOW_RING(dhd, flowid);
+	/**
+	 * Since we got a completion message on this flowid,
+	 * update tx_cmpl time stamp
+	 */
+	flow_ring_node->tx_cmpl = OSL_SYSUPTIME();
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+	/* locks required to protect circular buffer accesses */
+	DHD_GENERAL_LOCK(dhd, flags);
+	pktid = ltoh32(txstatus->cmn_hdr.request_id);
+	pkt_fate = TRUE;
+
+#ifdef DHD_PKTID_AUDIT_RING
+	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
+			DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+	DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
+	if (prot->active_tx_count) {
+		prot->active_tx_count--;
+
+		/* Release the Lock when no more tx packets are pending */
+		if (prot->active_tx_count == 0)
+			 DHD_TXFL_WAKE_UNLOCK(dhd);
+	} else {
+		DHD_ERROR(("Extra packets are freed\n"));
+	}
+
+	ASSERT(pktid != 0);
+#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
+	{
+		int elem_ix;
+		void **elem;
+		bcm_workq_t *workq;
+		dmaaddr_t pa;
+		uint32 pa_len;
+
+		pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map,
+			pktid, pa, pa_len, dmah, secdma, PKTTYPE_DATA_TX);
+
+		workq = &prot->tx_compl_prod;
+		/*
+		 * Produce the packet into the tx_compl workq for the tx compl tasklet
+		 * to consume.
+		 */
+		OSL_PREFETCH(PKTTAG(pkt));
+
+		/* fetch next available slot in workq */
+		elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
+
+		DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
+		DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), pa_len);
+
+		if (elem_ix == BCM_RING_FULL) {
+			DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
+			goto workq_ring_full;
+		}
+
+		elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
+		*elem = pkt;
+
+		smp_wmb();
+
+		/* Sync WR index to consumer if the SYNC threshold has been reached */
+		if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
+			bcm_workq_prod_sync(workq);
+			prot->tx_compl_prod_sync = 0;
+		}
+
+		DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
+			__FUNCTION__, pkt, prot->tx_compl_prod_sync));
+
+		DHD_GENERAL_UNLOCK(dhd, flags);
+
+		return;
+	}
+
+workq_ring_full:
+
+#endif /* !DHD_LB_TXC */
+
+	pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
+		pa, len, dmah, secdma, PKTTYPE_DATA_TX);
+
+	if (pkt) {
+		if (SECURE_DMA_ENAB(dhd->osh)) {
+			int offset = 0;
+			BCM_REFERENCE(offset);
+
+			if (dhd->prot->tx_metadata_offset)
+				offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
+			SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
+				(uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
+				secdma, offset);
+		} else
+			DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+#ifdef DMAMAP_STATS
+		dhd->dma_stats.txdata--;
+		dhd->dma_stats.txdata_sz -= len;
+#endif /* DMAMAP_STATS */
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+		if (dhd->d11_tx_status) {
+			uint16 tx_status;
+
+			tx_status = ltoh16(txstatus->compl_hdr.status) &
+				WLFC_CTL_PKTFLAG_MASK;
+			pkt_fate = (tx_status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE;
+
+			DHD_DBG_PKT_MON_TX_STATUS(dhd, pkt, pktid, tx_status);
+#ifdef DHD_PKT_LOGGING
+			DHD_PKTLOG_TXS(dhd, pkt, pktid, tx_status);
+#endif /* DHD_PKT_LOGGING */
+		}
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
+#if defined(BCMPCIE)
+		dhd_txcomplete(dhd, pkt, pkt_fate);
+#endif 
+
+#if DHD_DBG_SHOW_METADATA
+		if (dhd->prot->metadata_dbg &&
+		    dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
+			uchar *ptr;
+			/* The Ethernet header of TX frame was copied and removed.
+			 * Here, move the data pointer forward by Ethernet header size.
+			 */
+			PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
+			ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
+			bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
+			dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
+		}
+#endif /* DHD_DBG_SHOW_METADATA */
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		PKTFREE(dhd->osh, pkt, TRUE);
+		DHD_GENERAL_LOCK(dhd, flags);
+		DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
+		txstatus->tx_status);
+
+#ifdef DHD_TIMESYNC
+		if (dhd->prot->tx_ts_log_enabled) {
+			ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts);
+			dhd_timesync_log_tx_timestamp(dhd->ts,
+				txstatus->compl_hdr.flow_ring_id,
+				txstatus->cmn_hdr.if_id,
+				ts->low, ts->high);
+		}
+#endif /* DHD_TIMESYNC */
+	}
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return;
+} /* dhd_prot_txstatus_process */
+
+/** called on MSG_TYPE_WL_EVENT message received from dongle */
+static void
+dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
+{
+	wlevent_req_msg_t *evnt;
+	uint32 bufid;
+	uint16 buflen;
+	int ifidx = 0;
+	void* pkt;
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Event complete header */
+	evnt = (wlevent_req_msg_t *)msg;
+	bufid = ltoh32(evnt->cmn_hdr.request_id);
+
+#ifdef DHD_PKTID_AUDIT_RING
+	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
+			DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+	buflen = ltoh16(evnt->event_data_len);
+
+	ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
+
+	/* Post another rxbuf to the device */
+	if (prot->cur_event_bufs_posted)
+		prot->cur_event_bufs_posted--;
+	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+	/* locks required to protect pktid_map */
+	DHD_GENERAL_LOCK(dhd, flags);
+	pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (!pkt) {
+		DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
+		return;
+	}
+
+	/* DMA RX offset updated through shared area */
+	if (dhd->prot->rx_dataoffset)
+		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+
+	PKTSETLEN(dhd->osh, pkt, buflen);
+
+	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+}
+
+/** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
+static void BCMFASTPATH
+dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
+{
+	info_buf_resp_t *resp;
+	uint32 pktid;
+	uint16 buflen;
+	void * pkt;
+	unsigned long flags;
+
+	resp = (info_buf_resp_t *)buf;
+	pktid = ltoh32(resp->cmn_hdr.request_id);
+	buflen = ltoh16(resp->info_data_len);
+
+#ifdef DHD_PKTID_AUDIT_RING
+	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
+			DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
+		pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
+		dhd->prot->rx_dataoffset));
+
+	if (!dhd->prot->infobufpost) {
+		DHD_ERROR(("infobuf posted are zero, but there is a completion\n"));
+		return;
+	}
+
+	dhd->prot->infobufpost--;
+	dhd_prot_infobufpost(dhd);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (!pkt)
+		return;
+
+	/* DMA RX offset updated through shared area */
+	if (dhd->prot->rx_dataoffset)
+		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+
+	PKTSETLEN(dhd->osh, pkt, buflen);
+
+	/* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
+	 * special ifidx of -1.  This is just internal to dhd to get the data to
+	 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
+	 */
+	dhd_bus_rx_frame(dhd->bus, pkt, DHD_EVENT_IF /* ifidx HACK */, 1);
+}
+
+/** Stop protocol: sync w/dongle state. */
+void dhd_prot_stop(dhd_pub_t *dhd)
+{
+	ASSERT(dhd);
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+}
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+void BCMFASTPATH
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+	return;
+}
+
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+	return 0;
+}
+
+
+#define PKTBUF pktbuf
+
+/**
+ * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
+ * the corresponding flow ring.
+ */
+int BCMFASTPATH
+dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
+{
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+	host_txbuf_post_t *txdesc = NULL;
+	dmaaddr_t pa, meta_pa;
+	uint8 *pktdata;
+	uint32 pktlen;
+	uint32 pktid;
+	uint8	prio;
+	uint16 flowid = 0;
+	uint16 alloced = 0;
+	uint16	headroom;
+	msgbuf_ring_t *ring;
+	flow_ring_table_t *flow_ring_table;
+	flow_ring_node_t *flow_ring_node;
+
+	if (dhd->flow_ring_table == NULL) {
+		return BCME_NORESOURCE;
+	}
+
+	flowid = DHD_PKT_GET_FLOWID(PKTBUF);
+	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+
+	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	/* Create a unique 32-bit packet id */
+	pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
+		PKTBUF, PKTTYPE_DATA_TX);
+#if defined(DHD_PCIE_PKTID)
+	if (pktid == DHD_PKTID_INVALID) {
+		DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
+		/*
+		 * If we return error here, the caller would queue the packet
+		 * again. So we'll just free the skb allocated in DMA Zone.
+		 * Since we have not freed the original SKB yet the caller would
+		 * requeue the same.
+		 */
+		goto err_no_res_pktfree;
+	}
+#endif /* DHD_PCIE_PKTID */
+
+	/* Reserve space in the circular buffer */
+	txdesc = (host_txbuf_post_t *)
+		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+	if (txdesc == NULL) {
+		DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
+			__FUNCTION__, __LINE__, prot->active_tx_count));
+		goto err_free_pktid;
+	}
+
+#ifdef DBG_PKT_MON
+	DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
+#endif /* DBG_PKT_MON */
+#ifdef DHD_PKT_LOGGING
+	DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
+#endif /* DHD_PKT_LOGGING */
+
+
+	/* Extract the data pointer and length information */
+	pktdata = PKTDATA(dhd->osh, PKTBUF);
+	pktlen  = PKTLEN(dhd->osh, PKTBUF);
+
+	/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
+	bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
+
+	/* Extract the ethernet header and adjust the data pointer and length */
+	pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+	pktlen -= ETHER_HDR_LEN;
+
+	/* Map the data pointer to a DMA-able address */
+	if (SECURE_DMA_ENAB(dhd->osh)) {
+		int offset = 0;
+		BCM_REFERENCE(offset);
+
+		if (prot->tx_metadata_offset)
+			offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
+
+		pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
+			DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
+	}
+#ifndef BCM_SECURE_DMA
+	else
+		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
+#endif /* #ifndef BCM_SECURE_DMA */
+
+	if (PHYSADDRISZERO(pa)) {
+		DHD_ERROR(("%s: Something really bad, unless 0 is "
+			"a valid phyaddr for pa\n", __FUNCTION__));
+		ASSERT(0);
+		goto err_rollback_idx;
+	}
+
+#ifdef DMAMAP_STATS
+	dhd->dma_stats.txdata++;
+	dhd->dma_stats.txdata_sz += pktlen;
+#endif /* DMAMAP_STATS */
+	/* No need to lock. Save the rest of the packet's metadata */
+	DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
+	    pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
+
+#ifdef TXP_FLUSH_NITEMS
+	if (ring->pend_items_count == 0)
+		ring->start_addr = (void *)txdesc;
+	ring->pend_items_count++;
+#endif
+
+	/* Form the Tx descriptor message buffer */
+
+	/* Common message hdr */
+	txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
+	txdesc->cmn_hdr.if_id = ifidx;
+	txdesc->cmn_hdr.flags = ring->current_phase;
+
+	txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
+	prio = (uint8)PKTPRIO(PKTBUF);
+
+
+	txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
+	txdesc->seg_cnt = 1;
+
+	txdesc->data_len = htol16((uint16) pktlen);
+	txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+	txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
+
+	/* Move data pointer to keep ether header in local PKTBUF for later reference */
+	PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+
+	/* Handle Tx metadata */
+	headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
+	if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
+		DHD_ERROR(("No headroom for Metadata tx %d %d\n",
+		prot->tx_metadata_offset, headroom));
+
+	if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
+		DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
+
+		/* Adjust the data pointer to account for meta data in DMA_MAP */
+		PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+
+		if (SECURE_DMA_ENAB(dhd->osh)) {
+			meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
+				prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
+				0, ring->dma_buf.secdma);
+		}
+#ifndef BCM_SECURE_DMA
+		else
+			meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
+				prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
+#endif /* #ifndef BCM_SECURE_DMA */
+
+		if (PHYSADDRISZERO(meta_pa)) {
+			/* Unmap the data pointer to a DMA-able address */
+			if (SECURE_DMA_ENAB(dhd->osh)) {
+
+				int offset = 0;
+				BCM_REFERENCE(offset);
+
+				if (prot->tx_metadata_offset) {
+					offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
+				}
+
+				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
+					DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
+			}
+#ifndef BCM_SECURE_DMA
+			else {
+				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
+			}
+#endif /* #ifndef BCM_SECURE_DMA */
+#ifdef TXP_FLUSH_NITEMS
+			/* update pend_items_count */
+			ring->pend_items_count--;
+#endif /* TXP_FLUSH_NITEMS */
+
+			DHD_ERROR(("%s: Something really bad, unless 0 is "
+				"a valid phyaddr for meta_pa\n", __FUNCTION__));
+			ASSERT(0);
+			goto err_rollback_idx;
+		}
+
+		/* Adjust the data pointer back to original value */
+		PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+
+		txdesc->metadata_buf_len = prot->tx_metadata_offset;
+		txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
+		txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
+	} else {
+		txdesc->metadata_buf_len = htol16(0);
+		txdesc->metadata_buf_addr.high_addr = 0;
+		txdesc->metadata_buf_addr.low_addr = 0;
+	}
+
+#ifdef DHD_PKTID_AUDIT_RING
+	DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+	txdesc->cmn_hdr.request_id = htol32(pktid);
+
+	DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
+		txdesc->cmn_hdr.request_id));
+
+	/* Update the write pointer in TCM & ring bell */
+#ifdef TXP_FLUSH_NITEMS
+	/* Flush if we have either hit the txp_threshold or if this msg is */
+	/* occupying the last slot in the flow_ring - before wrap around.  */
+	if ((ring->pend_items_count == prot->txp_threshold) ||
+		((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
+		dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
+	}
+#else
+	/* update ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
+#endif
+
+	prot->active_tx_count++;
+
+	/*
+	 * Take a wake lock, do not sleep if we have atleast one packet
+	 * to finish.
+	 */
+	if (prot->active_tx_count >= 1)
+		DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return BCME_OK;
+
+err_rollback_idx:
+	/* roll back write pointer for unprocessed message */
+	if (ring->wr == 0) {
+		ring->wr = ring->max_items - 1;
+	} else {
+		ring->wr--;
+		if (ring->wr == 0) {
+			DHD_INFO(("%s: flipping the phase now\n", ring->name));
+			ring->current_phase = ring->current_phase ?
+				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+		}
+	}
+
+err_free_pktid:
+#if defined(DHD_PCIE_PKTID)
+	{
+		void *dmah;
+		void *secdma;
+		/* Free up the PKTID. physaddr and pktlen will be garbage. */
+		DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
+			pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
+	}
+
+err_no_res_pktfree:
+#endif /* DHD_PCIE_PKTID */
+
+
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+	return BCME_NORESOURCE;
+} /* dhd_prot_txdata */
+
+/* called with a lock */
+/** optimization to write "n" tx items at a time to ring */
+void BCMFASTPATH
+dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
+{
+#ifdef TXP_FLUSH_NITEMS
+	unsigned long flags = 0;
+	flow_ring_table_t *flow_ring_table;
+	flow_ring_node_t *flow_ring_node;
+	msgbuf_ring_t *ring;
+
+	if (dhd->flow_ring_table == NULL) {
+		return;
+	}
+
+	if (!in_lock) {
+		DHD_GENERAL_LOCK(dhd, flags);
+	}
+
+	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+
+	if (ring->pend_items_count) {
+		/* update ring's WR index and ring doorbell to dongle */
+		dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
+			ring->pend_items_count);
+		ring->pend_items_count = 0;
+		ring->start_addr = NULL;
+	}
+
+	if (!in_lock) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+	}
+#endif /* TXP_FLUSH_NITEMS */
+}
+
+#undef PKTBUF	/* Only defined in the above routine */
+
+int BCMFASTPATH
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
+{
+	return 0;
+}
+
+/** post a set of receive buffers to the dongle */
+static void BCMFASTPATH
+dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
+{
+	dhd_prot_t *prot = dhd->prot;
+#if defined(DHD_LB_RXC)
+	int elem_ix;
+	uint32 *elem;
+	bcm_workq_t *workq;
+
+	workq = &prot->rx_compl_prod;
+
+	/* Produce the work item */
+	elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
+	if (elem_ix == BCM_RING_FULL) {
+		DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
+		ASSERT(0);
+		return;
+	}
+
+	elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
+	*elem = pktid;
+
+	smp_wmb();
+
+	/* Sync WR index to consumer if the SYNC threshold has been reached */
+	if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
+		bcm_workq_prod_sync(workq);
+		prot->rx_compl_prod_sync = 0;
+	}
+
+	DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
+		__FUNCTION__, pktid, prot->rx_compl_prod_sync));
+
+#endif /* DHD_LB_RXC */
+
+	if (prot->rxbufpost >= rxcnt) {
+		prot->rxbufpost -= (uint16)rxcnt;
+	} else {
+		/* ASSERT(0); */
+		prot->rxbufpost = 0;
+	}
+
+#if !defined(DHD_LB_RXC)
+	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
+		dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
+#endif /* !DHD_LB_RXC */
+	return;
+}
+
+/* called before an ioctl is sent to the dongle */
+static void
+dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
+		int slen = 0;
+		pcie_bus_tput_params_t *tput_params;
+
+		slen = strlen("pcie_bus_tput") + 1;
+		tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
+		bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
+			sizeof(tput_params->host_buf_addr));
+		tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
+	}
+}
+
+
+/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
+int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+	int ret = -1;
+	uint8 action;
+
+	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n",
+				__FUNCTION__, dhd->busstate, dhd->hang_was_sent));
+		goto done;
+	}
+
+	if (dhd->busstate == DHD_BUS_SUSPEND) {
+		DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (ioc->cmd == WLC_SET_PM) {
+		DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
+	}
+
+	ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+	if (len > WLC_IOCTL_MAXLEN)
+		goto done;
+
+	action = ioc->set;
+
+	dhd_prot_wlioctl_intercept(dhd, ioc, buf);
+
+	if (action & WL_IOCTL_ACTION_SET) {
+		ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+	} else {
+		ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+		if (ret > 0)
+			ioc->used = ret;
+	}
+
+	/* Too many programs assume ioctl() returns 0 on success */
+	if (ret >= 0) {
+		ret = 0;
+	} else {
+		DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
+		dhd->dongle_error = ret;
+	}
+
+	if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
+		/* Intercept the wme_dp ioctl here */
+		if (!strcmp(buf, "wme_dp")) {
+			int slen, val = 0;
+
+			slen = strlen("wme_dp") + 1;
+			if (len >= (int)(slen + sizeof(int)))
+				bcopy(((char *)buf + slen), &val, sizeof(int));
+			dhd->wme_dp = (uint8) ltoh32(val);
+		}
+
+	}
+
+done:
+	return ret;
+
+} /* dhd_prot_ioctl */
+
+/** test / loopback */
+
+int
+dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
+{
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 alloced = 0;
+
+	ioct_reqst_hdr_t *ioct_rqst;
+
+	uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
+	uint16 msglen = len + hdrlen;
+	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+	msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
+	msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	ioct_rqst = (ioct_reqst_hdr_t *)
+		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+	if (ioct_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return 0;
+	}
+
+	{
+		uint8 *ptr;
+		uint16 i;
+
+		ptr = (uint8 *)ioct_rqst;
+		for (i = 0; i < msglen; i++) {
+			ptr[i] = i % 256;
+		}
+	}
+
+	/* Common msg buf hdr */
+	ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+	ring->seqnum++;
+
+	ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
+	ioct_rqst->msg.if_id = 0;
+	ioct_rqst->msg.flags = ring->current_phase;
+
+	bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
+
+	/* update ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return 0;
+}
+
+/** test / loopback */
+void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
+{
+	if (dmaxfer == NULL)
+		return;
+
+	dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
+	dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
+}
+
+/** test / loopback */
+int
+dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
+{
+	dhd_prot_t *prot = dhdp->prot;
+	dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
+	dmaxref_mem_map_t *dmap = NULL;
+
+	dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
+	if (!dmap) {
+		DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
+		goto mem_alloc_fail;
+	}
+	dmap->srcmem = &(dmaxfer->srcmem);
+	dmap->dstmem = &(dmaxfer->dstmem);
+
+	DMAXFER_FREE(dhdp, dmap);
+	return BCME_OK;
+
+mem_alloc_fail:
+	if (dmap) {
+		MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
+		dmap = NULL;
+	}
+	return BCME_NOMEM;
+} /* dhd_prepare_schedule_dmaxfer_free */
+
+
+/** test / loopback */
+void
+dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
+{
+
+	dhd_dma_buf_free(dhdp, dmmap->srcmem);
+	dhd_dma_buf_free(dhdp, dmmap->dstmem);
+
+	MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
+	dmmap = NULL;
+
+} /* dmaxfer_free_prev_dmaaddr */
+
+
+/** test / loopback */
+int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
+	uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
+{
+	uint i;
+	if (!dmaxfer)
+		return BCME_ERROR;
+
+	/* First free up existing buffers */
+	dmaxfer_free_dmaaddr(dhd, dmaxfer);
+
+	if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
+		return BCME_NOMEM;
+	}
+
+	if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
+		dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
+		return BCME_NOMEM;
+	}
+
+	dmaxfer->len = len;
+
+	/* Populate source with a pattern */
+	for (i = 0; i < dmaxfer->len; i++) {
+		((uint8*)dmaxfer->srcmem.va)[i] = i % 256;
+	}
+	OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
+
+	dmaxfer->srcdelay = srcdelay;
+	dmaxfer->destdelay = destdelay;
+
+	return BCME_OK;
+} /* dmaxfer_prepare_dmaaddr */
+
+static void
+dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
+{
+	dhd_prot_t *prot = dhd->prot;
+	uint64 end_usec;
+	pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
+
+	BCM_REFERENCE(cmplt);
+	DHD_INFO(("DMA status: %d\n", cmplt->compl_hdr.status));
+	OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+	if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
+		if (memcmp(prot->dmaxfer.srcmem.va,
+		        prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) {
+			prhex("XFER SRC: ",
+			    prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
+			prhex("XFER DST: ",
+			    prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+		        DHD_ERROR(("DMA failed\n"));
+		}
+		else {
+			if (prot->dmaxfer.d11_lpbk) {
+				DHD_ERROR(("DMA successful with d11 loopback\n"));
+			} else {
+				DHD_ERROR(("DMA successful without d11 loopback\n"));
+			}
+		}
+	}
+	end_usec = OSL_SYSUPTIME_US();
+	dhd_prepare_schedule_dmaxfer_free(dhd);
+	end_usec -= prot->dmaxfer.start_usec;
+	DHD_ERROR(("DMA loopback %d bytes in %llu usec, %u kBps\n",
+		prot->dmaxfer.len, end_usec,
+		(prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1))));
+	dhd->prot->dmaxfer.in_progress = FALSE;
+}
+
+/** Test functionality.
+ * Transfers bytes from host to dongle and to host again using DMA
+ * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
+ * by a spinlock.
+ */
+int
+dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, uint d11_lpbk)
+{
+	unsigned long flags;
+	int ret = BCME_OK;
+	dhd_prot_t *prot = dhd->prot;
+	pcie_dma_xfer_params_t *dmap;
+	uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
+	uint16 alloced = 0;
+	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+	if (prot->dmaxfer.in_progress) {
+		DHD_ERROR(("DMA is in progress...\n"));
+		return ret;
+	}
+
+	prot->dmaxfer.in_progress = TRUE;
+	if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
+	        &prot->dmaxfer)) != BCME_OK) {
+		prot->dmaxfer.in_progress = FALSE;
+		return ret;
+	}
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	dmap = (pcie_dma_xfer_params_t *)
+		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+	if (dmap == NULL) {
+		dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
+		prot->dmaxfer.in_progress = FALSE;
+		DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
+	dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
+	dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+	dmap->cmn_hdr.flags = ring->current_phase;
+	ring->seqnum++;
+
+	dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
+	dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
+	dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
+	dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
+	dmap->xfer_len = htol32(prot->dmaxfer.len);
+	dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
+	dmap->destdelay = htol32(prot->dmaxfer.destdelay);
+	prot->dmaxfer.d11_lpbk = d11_lpbk ? 1 : 0;
+	dmap->flags = (prot->dmaxfer.d11_lpbk << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT)
+			& PCIE_DMA_XFER_FLG_D11_LPBK_MASK;
+
+	/* update ring's WR index and ring doorbell to dongle */
+	prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
+	dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	DHD_INFO(("DMA Started...\n"));
+
+	return BCME_OK;
+} /* dhdmsgbuf_dmaxfer_req */
+
+/** Called in the process of submitting an ioctl to the dongle */
+static int
+dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	int ret = 0;
+	uint copylen = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (cmd == WLC_GET_VAR && buf)
+	{
+		if (!len || !*(uint8 *)buf) {
+			DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
+			ret = BCME_BADARG;
+			goto done;
+		}
+
+		/* Respond "bcmerror" and "bcmerrorstr" with local cache */
+		copylen = MIN(len, BCME_STRLEN);
+
+		if ((len >= strlen("bcmerrorstr")) &&
+			(!strcmp((char *)buf, "bcmerrorstr"))) {
+
+			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
+			*(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
+
+			goto done;
+		} else if ((len >= strlen("bcmerror")) &&
+			!strcmp((char *)buf, "bcmerror")) {
+
+			*(uint32 *)(uint32 *)buf = dhd->dongle_error;
+
+			goto done;
+		}
+	}
+
+
+	DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
+	    action, ifidx, cmd, len));
+#ifdef REPORT_FATAL_TIMEOUTS
+	/*
+	 * These timers "should" be started before sending H2D interrupt.
+	 * Think of the scenario where H2D interrupt is fired and the Dongle
+	 * responds back immediately. From the DPC we would stop the cmd, bus
+	 * timers. But the process context could have switched out leading to
+	 * a situation where the timers are Not started yet, but are actually stopped.
+	 *
+	 * Disable preemption from the time we start the timer until we are done
+	 * with seding H2D interrupts.
+	 */
+	OSL_DISABLE_PREEMPTION(dhd->osh);
+	dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
+	dhd_start_cmd_timer(dhd);
+	dhd_start_bus_timer(dhd);
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
+
+#ifdef REPORT_FATAL_TIMEOUTS
+	/* For some reason if we fail to ring door bell, stop the timers */
+	if (ret < 0) {
+		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+		dhd_stop_cmd_timer(dhd);
+		dhd_stop_bus_timer(dhd);
+		OSL_ENABLE_PREEMPTION(dhd->osh);
+		goto done;
+	}
+	OSL_ENABLE_PREEMPTION(dhd->osh);
+#else
+	if (ret < 0) {
+		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+		goto done;
+	}
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+	/* wait for IOCTL completion message from dongle and get first fragment */
+	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
+
+done:
+	return ret;
+}
+
+/**
+ * Waits for IOCTL completion message from the dongle, copies this into caller
+ * provided parameter 'buf'.
+ */
+static int
+dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int timeleft;
+	unsigned long flags;
+	int ret = 0;
+	static uint cnt = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhd_query_bus_erros(dhd)) {
+		ret = -EIO;
+		goto out;
+	}
+
+	timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received, false);
+
+#ifdef DHD_RECOVER_TIMEOUT
+	if (prot->ioctl_received == 0) {
+		uint32 intstatus = 0;
+		uint32 intmask = 0;
+		intstatus = si_corereg(dhd->bus->sih,
+			dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+		intmask = si_corereg(dhd->bus->sih,
+			dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+		if ((intstatus) && (!intmask) && (timeleft == 0) && (!dhd_query_bus_erros(dhd)))
+		{
+			DHD_ERROR(("%s: iovar timeout trying again intstatus=%x intmask=%x\n",
+				__FUNCTION__, intstatus, intmask));
+			DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n"));
+			DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
+				"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
+				"dpc_return_busdown_count=%lu\n",
+				dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
+				dhd->bus->isr_intr_disable_count,
+				dhd->bus->suspend_intr_disable_count,
+				dhd->bus->dpc_return_busdown_count));
+
+			dhd_prot_process_ctrlbuf(dhd);
+
+			timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
+			/* Enable Back Interrupts using IntMask */
+			dhdpcie_bus_intr_enable(dhd->bus);
+		}
+	}
+#endif /* DHD_RECOVER_TIMEOUT */
+
+	if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
+		cnt++;
+		if (cnt <= dhd->conf->ctrl_resched) {
+			uint32 intstatus = 0, intmask = 0;
+			intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+			intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+			if (intstatus) {
+				DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n",
+					__FUNCTION__, cnt, intstatus, intmask));
+				dhd->bus->intstatus = intstatus;
+				dhd->bus->ipend = TRUE;
+				dhd->bus->dpc_sched = TRUE;
+				dhd_sched_dpc(dhd);
+				timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received, true);
+			}
+		}
+	} else {
+		cnt = 0;
+	}
+
+	if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
+		uint32 intstatus;
+
+		dhd->rxcnt_timeout++;
+		dhd->rx_ctlerrs++;
+		dhd->iovar_timeout_occured = TRUE;
+		DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
+			"trans_id %d state %d busstate=%d ioctl_received=%d\n",
+			__FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
+			prot->ioctl_trans_id, prot->ioctl_state,
+			dhd->busstate, prot->ioctl_received));
+		if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
+			prot->curr_ioctl_cmd == WLC_GET_VAR) {
+			char iovbuf[32];
+			int i;
+			int dump_size = 128;
+			uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
+			memset(iovbuf, 0, sizeof(iovbuf));
+			strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
+			iovbuf[sizeof(iovbuf) - 1] = '\0';
+			DHD_ERROR(("Current IOVAR (%s): %s\n",
+				prot->curr_ioctl_cmd == WLC_SET_VAR ?
+				"WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
+			DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
+			for (i = 0; i < dump_size; i++) {
+				DHD_ERROR(("%02X ", ioctl_buf[i]));
+				if ((i % 32) == 31) {
+					DHD_ERROR(("\n"));
+				}
+			}
+			DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
+		}
+
+		/* Check the PCIe link status by reading intstatus register */
+		intstatus = si_corereg(dhd->bus->sih,
+			dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+		if (intstatus == (uint32)-1) {
+			DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+			dhd->bus->is_linkdown = TRUE;
+		}
+
+		dhd_bus_dump_console_buffer(dhd->bus);
+		dhd_prot_debug_info_print(dhd);
+
+#ifdef DHD_FW_COREDUMP
+		/* Collect socram dump */
+		if (dhd->memdump_enabled) {
+			/* collect core dump */
+			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
+			dhd_bus_mem_dump(dhd);
+		}
+#endif /* DHD_FW_COREDUMP */
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+		dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+		ret = -ETIMEDOUT;
+		goto out;
+	} else {
+		if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
+			DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
+				__FUNCTION__, prot->ioctl_received));
+			ret = -EINVAL;
+			goto out;
+		}
+		dhd->rxcnt_timeout = 0;
+		dhd->rx_ctlpkts++;
+		DHD_CTL(("%s: ioctl resp resumed, got %d\n",
+			__FUNCTION__, prot->ioctl_resplen));
+	}
+
+	if (dhd->prot->ioctl_resplen > len)
+		dhd->prot->ioctl_resplen = (uint16)len;
+	if (buf)
+		bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
+
+	ret = (int)(dhd->prot->ioctl_status);
+
+out:
+	DHD_GENERAL_LOCK(dhd, flags);
+	dhd->prot->ioctl_state = 0;
+	dhd->prot->ioctl_resplen = 0;
+	dhd->prot->ioctl_received = IOCTL_WAIT;
+	dhd->prot->curr_ioctl_cmd = 0;
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return ret;
+} /* dhd_msgbuf_wait_ioctl_cmplt */
+
+static int
+dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	int ret = 0;
+
+	DHD_TRACE(("%s: Enter \n", __FUNCTION__));
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return -EIO;
+	}
+
+	/* don't talk to the dongle if fw is about to be reloaded */
+	if (dhd->hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+			__FUNCTION__));
+		return -EIO;
+	}
+
+	DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
+		action, ifidx, cmd, len));
+
+#ifdef REPORT_FATAL_TIMEOUTS
+	/*
+	 * These timers "should" be started before sending H2D interrupt.
+	 * Think of the scenario where H2D interrupt is fired and the Dongle
+	 * responds back immediately. From the DPC we would stop the cmd, bus
+	 * timers. But the process context could have switched out leading to
+	 * a situation where the timers are Not started yet, but are actually stopped.
+	 *
+	 * Disable preemption from the time we start the timer until we are done
+	 * with seding H2D interrupts.
+	 */
+	OSL_DISABLE_PREEMPTION(dhd->osh);
+	dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
+	dhd_start_cmd_timer(dhd);
+	dhd_start_bus_timer(dhd);
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+	/* Fill up msgbuf for ioctl req */
+	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
+
+#ifdef REPORT_FATAL_TIMEOUTS
+	/* For some reason if we fail to ring door bell, stop the timers */
+	if (ret < 0) {
+		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+		dhd_stop_cmd_timer(dhd);
+		dhd_stop_bus_timer(dhd);
+		OSL_ENABLE_PREEMPTION(dhd->osh);
+		goto done;
+	}
+
+	OSL_ENABLE_PREEMPTION(dhd->osh);
+#else
+	if (ret < 0) {
+		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+		goto done;
+	}
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
+
+done:
+	return ret;
+}
+
+/** Called by upper DHD layer. Handles a protocol control response asynchronously. */
+int dhd_prot_ctl_complete(dhd_pub_t *dhd)
+{
+	return 0;
+}
+
+/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
+int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
+                             void *params, int plen, void *arg, int len, bool set)
+{
+	return BCME_UNSUPPORTED;
+}
+
+/** Add prot dump output to a buffer */
+void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
+{
+
+	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
+		bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
+	else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
+		bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
+	else
+		bcm_bprintf(b, "\nd2h_sync: NONE:");
+	bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
+		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
+
+	bcm_bprintf(b, "\nDongle DMA Indices: h2d %d  d2h %d index size %d bytes\n",
+		dhd->dma_h2d_ring_upd_support,
+		dhd->dma_d2h_ring_upd_support,
+		dhd->prot->rw_index_sz);
+	bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
+		h2d_max_txpost, dhd->prot->h2d_max_txpost);
+}
+
+/* Update local copy of dongle statistics */
+void dhd_prot_dstats(dhd_pub_t *dhd)
+{
+	return;
+}
+
+/** Called by upper DHD layer */
+int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+	uint reorder_info_len, void **pkt, uint32 *free_buf_count)
+{
+	return 0;
+}
+
+/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
+int
+dhd_post_dummy_msg(dhd_pub_t *dhd)
+{
+	unsigned long flags;
+	hostevent_hdr_t *hevent = NULL;
+	uint16 alloced = 0;
+
+	dhd_prot_t *prot = dhd->prot;
+	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	hevent = (hostevent_hdr_t *)
+		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+	if (hevent == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return -1;
+	}
+
+	/* CMN msg header */
+	hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+	ring->seqnum++;
+	hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
+	hevent->msg.if_id = 0;
+	hevent->msg.flags = ring->current_phase;
+
+	/* Event payload */
+	hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
+
+	/* Since, we are filling the data directly into the bufptr obtained
+	 * from the msgbuf, we can directly call the write_complete
+	 */
+	dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return 0;
+}
+
+/**
+ * If exactly_nitems is true, this function will allocate space for nitems or fail
+ * If exactly_nitems is false, this function will allocate space for nitems or less
+ */
+static void * BCMFASTPATH
+dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+	uint16 nitems, uint16 * alloced, bool exactly_nitems)
+{
+	void * ret_buf;
+
+	/* Alloc space for nitems in the ring */
+	ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
+
+	if (ret_buf == NULL) {
+		/* if alloc failed , invalidate cached read ptr */
+		if (dhd->dma_d2h_ring_upd_support) {
+			ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+		} else {
+			dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+			/* Check if ring->rd is valid */
+			if (ring->rd >= ring->max_items) {
+				dhd->bus->read_shm_fail = TRUE;
+				DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
+				return NULL;
+			}
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+		}
+
+		/* Try allocating once more */
+		ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
+
+		if (ret_buf == NULL) {
+			DHD_INFO(("%s: Ring space not available  \n", ring->name));
+			return NULL;
+		}
+	}
+
+	if (ret_buf == HOST_RING_BASE(ring)) {
+		DHD_INFO(("%s: setting the phase now\n", ring->name));
+		ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
+	}
+
+	/* Return alloced space */
+	return ret_buf;
+}
+
+/**
+ * Non inline ioct request.
+ * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
+ * Form a separate request buffer where a 4 byte cmn header is added in the front
+ * buf contents from parent function is copied to remaining section of this buffer
+ */
+static int
+dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
+{
+	dhd_prot_t *prot = dhd->prot;
+	ioctl_req_msg_t *ioct_rqst;
+	void * ioct_buf;	/* For ioctl payload */
+	uint16  rqstlen, resplen;
+	unsigned long flags;
+	uint16 alloced = 0;
+	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+	if (dhd_query_bus_erros(dhd)) {
+		return -EIO;
+	}
+
+	rqstlen = len;
+	resplen = len;
+
+	/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
+	/* 8K allocation of dongle buffer fails */
+	/* dhd doesnt give separate input & output buf lens */
+	/* so making the assumption that input length can never be more than 1.5k */
+	rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	if (prot->ioctl_state) {
+		DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
+		DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return BCME_BUSY;
+	} else {
+		prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
+	}
+
+	/* Request for cbuf space */
+	ioct_rqst = (ioctl_req_msg_t*)
+		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+	if (ioct_rqst == NULL) {
+		DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
+		prot->ioctl_state = 0;
+		prot->curr_ioctl_cmd = 0;
+		prot->ioctl_received = IOCTL_WAIT;
+		DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return -1;
+	}
+
+	/* Common msg buf hdr */
+	ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
+	ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
+	ioct_rqst->cmn_hdr.flags = ring->current_phase;
+	ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
+	ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+	ring->seqnum++;
+
+	ioct_rqst->cmd = htol32(cmd);
+	prot->curr_ioctl_cmd = cmd;
+	ioct_rqst->output_buf_len = htol16(resplen);
+	prot->ioctl_trans_id++;
+	ioct_rqst->trans_id = prot->ioctl_trans_id;
+
+	/* populate ioctl buffer info */
+	ioct_rqst->input_buf_len = htol16(rqstlen);
+	ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
+	ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
+	/* copy ioct payload */
+	ioct_buf = (void *) prot->ioctbuf.va;
+
+	if (buf)
+		memcpy(ioct_buf, buf, len);
+
+	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
+
+	if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
+		DHD_ERROR(("host ioct address unaligned !!!!! \n"));
+
+	DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
+		ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
+		ioct_rqst->trans_id));
+
+	/* update ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return 0;
+} /* dhd_fillup_ioct_reqst */
+
+
+/**
+ * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
+ * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
+ * information is posted to the dongle.
+ *
+ * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
+ * each flowring in pool of flowrings.
+ *
+ * returns BCME_OK=0 on success
+ * returns non-zero negative error value on failure.
+ */
+static int
+dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
+	uint16 max_items, uint16 item_len, uint16 ringid)
+{
+	int dma_buf_alloced = BCME_NOMEM;
+	uint32 dma_buf_len = max_items * item_len;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+
+	ASSERT(ring);
+	ASSERT(name);
+	ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
+
+	/* Init name */
+	strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
+	ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
+
+	ring->idx = ringid;
+
+	ring->max_items = max_items;
+	ring->item_len = item_len;
+
+	/* A contiguous space may be reserved for all flowrings */
+	if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
+		/* Carve out from the contiguous DMA-able flowring buffer */
+		uint16 flowid;
+		uint32 base_offset;
+
+		dhd_dma_buf_t *dma_buf = &ring->dma_buf;
+		dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
+
+		flowid = DHD_RINGID_TO_FLOWID(ringid);
+		base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
+
+		ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
+
+		dma_buf->len = dma_buf_len;
+		dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
+		PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
+		PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
+
+		/* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
+		ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
+
+		dma_buf->dmah   = rsv_buf->dmah;
+		dma_buf->secdma = rsv_buf->secdma;
+
+		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
+	} else {
+		/* Allocate a dhd_dma_buf */
+		dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
+		if (dma_buf_alloced != BCME_OK) {
+			return BCME_NOMEM;
+		}
+	}
+
+	/* CAUTION: Save ring::base_addr in little endian format! */
+	dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
+
+#ifdef BCM_SECURE_DMA
+	if (SECURE_DMA_ENAB(prot->osh)) {
+		ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
+		if (ring->dma_buf.secdma == NULL) {
+			goto free_dma_buf;
+		}
+	}
+#endif /* BCM_SECURE_DMA */
+
+	DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
+		"ring start %p buf phys addr  %x:%x \n",
+		ring->name, ring->max_items, ring->item_len,
+		dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+		ltoh32(ring->base_addr.low_addr)));
+
+	return BCME_OK;
+
+#ifdef BCM_SECURE_DMA
+free_dma_buf:
+	if (dma_buf_alloced == BCME_OK) {
+		dhd_dma_buf_free(dhd, &ring->dma_buf);
+	}
+#endif /* BCM_SECURE_DMA */
+
+	return BCME_NOMEM;
+
+} /* dhd_prot_ring_attach */
+
+
+/**
+ * dhd_prot_ring_init - Post the common ring information to dongle.
+ *
+ * Used only for common rings.
+ *
+ * The flowrings information is passed via the create flowring control message
+ * (tx_flowring_create_request_t) sent over the H2D control submission common
+ * ring.
+ */
+static void
+dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+	ring->wr = 0;
+	ring->rd = 0;
+	ring->curr_rd = 0;
+
+	/* CAUTION: ring::base_addr already in Little Endian */
+	dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
+		sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
+	dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
+		sizeof(uint16), RING_MAX_ITEMS, ring->idx);
+	dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
+		sizeof(uint16), RING_ITEM_LEN, ring->idx);
+
+	dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+		sizeof(uint16), RING_WR_UPD, ring->idx);
+	dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
+		sizeof(uint16), RING_RD_UPD, ring->idx);
+
+	/* ring inited */
+	ring->inited = TRUE;
+
+} /* dhd_prot_ring_init */
+
+
+/**
+ * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
+ * Reset WR and RD indices to 0.
+ */
+static void
+dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+	DHD_TRACE(("%s\n", __FUNCTION__));
+
+	dhd_dma_buf_reset(dhd, &ring->dma_buf);
+
+	ring->rd = ring->wr = 0;
+	ring->curr_rd = 0;
+	ring->inited = FALSE;
+	ring->create_pending = FALSE;
+}
+
+
+/**
+ * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
+ * hanging off the msgbuf_ring.
+ */
+static void
+dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+	dhd_prot_t *prot = dhd->prot;
+	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+	ASSERT(ring);
+
+	ring->inited = FALSE;
+	/* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
+
+#ifdef BCM_SECURE_DMA
+	if (SECURE_DMA_ENAB(prot->osh)) {
+		if (ring->dma_buf.secdma) {
+			SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
+			MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
+			ring->dma_buf.secdma = NULL;
+		}
+	}
+#endif /* BCM_SECURE_DMA */
+
+	/* If the DMA-able buffer was carved out of a pre-reserved contiguous
+	 * memory, then simply stop using it.
+	 */
+	if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
+		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
+		memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
+	} else {
+		dhd_dma_buf_free(dhd, &ring->dma_buf);
+	}
+
+} /* dhd_prot_ring_detach */
+
+
+/*
+ * +----------------------------------------------------------------------------
+ * Flowring Pool
+ *
+ * Unlike common rings, which are attached very early on (dhd_prot_attach),
+ * flowrings are dynamically instantiated. Moreover, flowrings may require a
+ * larger DMA-able buffer. To avoid issues with fragmented cache coherent
+ * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
+ * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
+ *
+ * Each DMA-able buffer may be allocated independently, or may be carved out
+ * of a single large contiguous region that is registered with the protocol
+ * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
+ * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
+ *
+ * No flowring pool action is performed in dhd_prot_attach(), as the number
+ * of h2d rings is not yet known.
+ *
+ * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
+ * determine the number of flowrings required, and a pool of msgbuf_rings are
+ * allocated and a DMA-able buffer (carved or allocated) is attached.
+ * See: dhd_prot_flowrings_pool_attach()
+ *
+ * A flowring msgbuf_ring object may be fetched from this pool during flowring
+ * creation, using the flowid. Likewise, flowrings may be freed back into the
+ * pool on flowring deletion.
+ * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
+ *
+ * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
+ * are detached (returned back to the carved region or freed), and the pool of
+ * msgbuf_ring and any objects allocated against it are freed.
+ * See: dhd_prot_flowrings_pool_detach()
+ *
+ * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
+ * state as-if upon an attach. All DMA-able buffers are retained.
+ * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
+ * pool attach will notice that the pool persists and continue to use it. This
+ * will avoid the case of a fragmented DMA-able region.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* Conversion of a flowid to a flowring pool index */
+#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
+	((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
+#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
+	(msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
+	    DHD_FLOWRINGS_POOL_OFFSET(flowid)
+
+/* Traverse each flowring in the flowring pool, assigning ring and flowid */
+#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
+	for ((flowid) = DHD_FLOWRING_START_FLOWID, \
+		(ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
+		 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
+		 (ring)++, (flowid)++)
+
+/* Fetch number of H2D flowrings given the total number of h2d rings */
+static uint16
+dhd_get_max_flow_rings(dhd_pub_t *dhd)
+{
+	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
+		return dhd->bus->max_tx_flowrings;
+	else
+		return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
+}
+
+/**
+ * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
+ *
+ * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
+ * Dongle includes common rings when it advertizes the number of H2D rings.
+ * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
+ * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
+ *
+ * dhd_prot_ring_attach is invoked to perform the actual initialization and
+ * attaching the DMA-able buffer.
+ *
+ * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
+ * initialized msgbuf_ring_t object.
+ *
+ * returns BCME_OK=0 on success
+ * returns non-zero negative error value on failure.
+ */
+static int
+dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
+{
+	uint16 flowid;
+	msgbuf_ring_t *ring;
+	uint16 h2d_flowrings_total; /* exclude H2D common rings */
+	dhd_prot_t *prot = dhd->prot;
+	char ring_name[RING_NAME_MAX_LENGTH];
+
+	if (prot->h2d_flowrings_pool != NULL)
+		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
+
+	ASSERT(prot->h2d_rings_total == 0);
+
+	/* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
+	prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
+
+	if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
+		DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
+			__FUNCTION__, prot->h2d_rings_total));
+		return BCME_ERROR;
+	}
+
+	/* Subtract number of H2D common rings, to determine number of flowrings */
+	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+
+	DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
+
+	/* Allocate pool of msgbuf_ring_t objects for all flowrings */
+	prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
+		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
+
+	if (prot->h2d_flowrings_pool == NULL) {
+		DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
+			__FUNCTION__, h2d_flowrings_total));
+		goto fail;
+	}
+
+	/* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
+	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+		snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
+		if (dhd_prot_ring_attach(dhd, ring, ring_name,
+		        prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
+		        DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
+			goto attach_fail;
+		}
+	}
+
+	return BCME_OK;
+
+attach_fail:
+	dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
+
+fail:
+	prot->h2d_rings_total = 0;
+	return BCME_NOMEM;
+
+} /* dhd_prot_flowrings_pool_attach */
+
+
+/**
+ * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
+ * Invokes dhd_prot_ring_reset to perform the actual reset.
+ *
+ * The DMA-able buffer is not freed during reset and neither is the flowring
+ * pool freed.
+ *
+ * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
+ * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
+ * from a previous flowring pool instantiation will be reused.
+ *
+ * This will avoid a fragmented DMA-able memory condition, if multiple
+ * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
+ * cycle.
+ */
+static void
+dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
+{
+	uint16 flowid, h2d_flowrings_total;
+	msgbuf_ring_t *ring;
+	dhd_prot_t *prot = dhd->prot;
+
+	if (prot->h2d_flowrings_pool == NULL) {
+		ASSERT(prot->h2d_rings_total == 0);
+		return;
+	}
+	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+	/* Reset each flowring in the flowring pool */
+	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+		dhd_prot_ring_reset(dhd, ring);
+		ring->inited = FALSE;
+	}
+
+	/* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
+}
+
+
+/**
+ * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
+ * DMA-able buffers for flowrings.
+ * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
+ * de-initialization of each msgbuf_ring_t.
+ */
+static void
+dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
+{
+	int flowid;
+	msgbuf_ring_t *ring;
+	uint16 h2d_flowrings_total; /* exclude H2D common rings */
+	dhd_prot_t *prot = dhd->prot;
+
+	if (prot->h2d_flowrings_pool == NULL) {
+		ASSERT(prot->h2d_rings_total == 0);
+		return;
+	}
+
+	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
+	/* Detach the DMA-able buffer for each flowring in the flowring pool */
+	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
+		dhd_prot_ring_detach(dhd, ring);
+	}
+
+
+	MFREE(prot->osh, prot->h2d_flowrings_pool,
+		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
+
+	prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
+	prot->h2d_rings_total = 0;
+
+} /* dhd_prot_flowrings_pool_detach */
+
+
+/**
+ * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
+ * msgbuf_ring from the flowring pool, and assign it.
+ *
+ * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
+ * ring information to the dongle, a flowring's information is passed via a
+ * flowring create control message.
+ *
+ * Only the ring state (WR, RD) index are initialized.
+ */
+static msgbuf_ring_t *
+dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
+{
+	msgbuf_ring_t *ring;
+	dhd_prot_t *prot = dhd->prot;
+
+	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
+	ASSERT(flowid < prot->h2d_rings_total);
+	ASSERT(prot->h2d_flowrings_pool != NULL);
+
+	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
+
+	/* ASSERT flow_ring->inited == FALSE */
+
+	ring->wr = 0;
+	ring->rd = 0;
+	ring->curr_rd = 0;
+	ring->inited = TRUE;
+	/**
+	 * Every time a flowring starts dynamically, initialize current_phase with 0
+	 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
+	 */
+	ring->current_phase = 0;
+	return ring;
+}
+
+
+/**
+ * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
+ * msgbuf_ring back to the flow_ring pool.
+ */
+void
+dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
+{
+	msgbuf_ring_t *ring;
+	dhd_prot_t *prot = dhd->prot;
+
+	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
+	ASSERT(flowid < prot->h2d_rings_total);
+	ASSERT(prot->h2d_flowrings_pool != NULL);
+
+	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
+
+	ASSERT(ring == (msgbuf_ring_t*)flow_ring);
+	/* ASSERT flow_ring->inited == TRUE */
+
+	(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
+
+	ring->wr = 0;
+	ring->rd = 0;
+	ring->inited = FALSE;
+
+	ring->curr_rd = 0;
+}
+
+
+/* Assumes only one index is updated at a time */
+/* If exactly_nitems is true, this function will allocate space for nitems or fail */
+/*    Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
+/* If exactly_nitems is false, this function will allocate space for nitems or less */
+static void *BCMFASTPATH
+dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
+	bool exactly_nitems)
+{
+	void *ret_ptr = NULL;
+	uint16 ring_avail_cnt;
+
+	ASSERT(nitems <= ring->max_items);
+
+	ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
+
+	if ((ring_avail_cnt == 0) ||
+	       (exactly_nitems && (ring_avail_cnt < nitems) &&
+	       ((ring->max_items - ring->wr) >= nitems))) {
+		DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
+			ring->name, nitems, ring->wr, ring->rd));
+		return NULL;
+	}
+	*alloced = MIN(nitems, ring_avail_cnt);
+
+	/* Return next available space */
+	ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
+
+	/* Update write index */
+	if ((ring->wr + *alloced) == ring->max_items)
+		ring->wr = 0;
+	else if ((ring->wr + *alloced) < ring->max_items)
+		ring->wr += *alloced;
+	else {
+		/* Should never hit this */
+		ASSERT(0);
+		return NULL;
+	}
+
+	return ret_ptr;
+} /* dhd_prot_get_ring_space */
+
+
+/**
+ * dhd_prot_ring_write_complete - Host updates the new WR index on producing
+ * new messages in a H2D ring. The messages are flushed from cache prior to
+ * posting the new WR index. The new WR index will be updated in the DMA index
+ * array or directly in the dongle's ring state memory.
+ * A PCIE doorbell will be generated to wake up the dongle.
+ * This is a non-atomic function, make sure the callers
+ * always hold appropriate locks.
+ */
+static void BCMFASTPATH
+dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
+	uint16 nitems)
+{
+	dhd_prot_t *prot = dhd->prot;
+	uint8 db_index;
+	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+
+	/* cache flush */
+	OSL_CACHE_FLUSH(p, ring->item_len * nitems);
+
+	if (IDMA_DS_ACTIVE(dhd) && IDMA_ACTIVE(dhd)) {
+		dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+			sizeof(uint16), RING_WR_UPD, ring->idx);
+	} else if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+			dhd_prot_dma_indx_set(dhd, ring->wr,
+			                      H2D_DMA_INDX_WR_UPD, ring->idx);
+	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
+			dhd_prot_dma_indx_set(dhd, ring->wr,
+			H2D_IFRM_INDX_WR_UPD, ring->idx);
+	} else {
+			dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+				sizeof(uint16), RING_WR_UPD, ring->idx);
+	}
+
+	/* raise h2d interrupt */
+	if (IDMA_ACTIVE(dhd) ||
+		(IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
+		if (IDMA_DS_ACTIVE(dhd)) {
+			prot->mb_ring_fn(dhd->bus, ring->wr);
+		} else {
+			db_index = IDMA_IDX0;
+			prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
+		}
+	} else {
+		prot->mb_ring_fn(dhd->bus, ring->wr);
+	}
+}
+
+/**
+ * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
+ * from a D2H ring. The new RD index will be updated in the DMA Index array or
+ * directly in dongle's ring state memory.
+ */
+static void
+dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+	dhd_prot_t *prot = dhd->prot;
+	uint8 db_index;
+
+	/* update read index */
+	/* If dma'ing h2d indices supported
+	 * update the r -indices in the
+	 * host memory o/w in TCM
+	 */
+	if (IDMA_ACTIVE(dhd)) {
+		dhd_prot_dma_indx_set(dhd, ring->rd,
+		                      D2H_DMA_INDX_RD_UPD, ring->idx);
+		if (IDMA_DS_ACTIVE(dhd)) {
+			dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
+				sizeof(uint16), RING_RD_UPD, ring->idx);
+		} else {
+			db_index = IDMA_IDX1;
+			prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
+		}
+	} else if (dhd->dma_h2d_ring_upd_support) {
+		dhd_prot_dma_indx_set(dhd, ring->rd,
+		                      D2H_DMA_INDX_RD_UPD, ring->idx);
+	} else {
+		dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
+			sizeof(uint16), RING_RD_UPD, ring->idx);
+	}
+}
+
+static int
+dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create)
+{
+	unsigned long flags;
+	d2h_ring_create_req_t  *d2h_ring;
+	uint16 alloced = 0;
+	int ret = BCME_OK;
+	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
+
+	if (ring_to_create == NULL) {
+		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
+		ret = BCME_ERROR;
+		goto err;
+	}
+
+	/* Request for ring buffer space */
+	d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
+		&dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+		&alloced, FALSE);
+
+	if (d2h_ring == NULL) {
+		DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
+			__FUNCTION__));
+		ret = BCME_NOMEM;
+		goto err;
+	}
+	ring_to_create->create_req_id = DHD_D2H_DBGRING_REQ_PKTID;
+	ring_to_create->create_pending = TRUE;
+
+	/* Common msg buf hdr */
+	d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
+	d2h_ring->msg.if_id = 0;
+	d2h_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
+	d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
+	d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
+	d2h_ring->ring_type = BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL;
+	d2h_ring->max_items = htol16(D2HRING_DYNAMIC_INFO_MAX_ITEM);
+	d2h_ring->len_item = htol16(D2HRING_INFO_BUFCMPLT_ITEMSIZE);
+	d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
+	d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
+
+	d2h_ring->flags = 0;
+	d2h_ring->msg.epoch =
+		dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
+	dhd->prot->h2dring_ctrl_subn.seqnum++;
+
+	/* Update the flow_ring's WRITE index */
+	dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, d2h_ring,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+err:
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+	return ret;
+}
+
+static int
+dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create)
+{
+	unsigned long flags;
+	h2d_ring_create_req_t  *h2d_ring;
+	uint16 alloced = 0;
+	uint8 i = 0;
+	int ret = BCME_OK;
+
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
+
+	if (ring_to_create == NULL) {
+		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
+		ret = BCME_ERROR;
+		goto err;
+	}
+
+	/* Request for ring buffer space */
+	h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
+		&dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+		&alloced, FALSE);
+
+	if (h2d_ring == NULL) {
+		DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
+			__FUNCTION__));
+		ret = BCME_NOMEM;
+		goto err;
+	}
+	ring_to_create->create_req_id = DHD_H2D_DBGRING_REQ_PKTID;
+	ring_to_create->create_pending = TRUE;
+
+	/* Common msg buf hdr */
+	h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
+	h2d_ring->msg.if_id = 0;
+	h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
+	h2d_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
+	h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
+	h2d_ring->ring_type = BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT;
+	h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
+	h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
+	h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
+	h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
+	h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
+
+	for (i = 0; i < ring_to_create->n_completion_ids; i++) {
+		h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
+	}
+
+	h2d_ring->flags = 0;
+	h2d_ring->msg.epoch =
+		dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
+	dhd->prot->h2dring_ctrl_subn.seqnum++;
+
+	/* Update the flow_ring's WRITE index */
+	dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_ring,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+err:
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+	return ret;
+}
+
+/**
+ * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
+ * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
+ * See dhd_prot_dma_indx_init()
+ */
+void
+dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
+{
+	uint8 *ptr;
+	uint16 offset;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
+
+	switch (type) {
+		case H2D_DMA_INDX_WR_UPD:
+			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
+			offset = DHD_H2D_RING_OFFSET(ringid);
+			break;
+
+		case D2H_DMA_INDX_RD_UPD:
+			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
+			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
+			break;
+
+		case H2D_IFRM_INDX_WR_UPD:
+			ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
+			offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
+			break;
+
+		default:
+			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+				__FUNCTION__));
+			return;
+	}
+
+	ASSERT(prot->rw_index_sz != 0);
+	ptr += offset * prot->rw_index_sz;
+
+	*(uint16*)ptr = htol16(new_index);
+
+	OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
+
+	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
+		__FUNCTION__, new_index, type, ringid, ptr, offset));
+
+} /* dhd_prot_dma_indx_set */
+
+
+/**
+ * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
+ * array.
+ * Dongle DMAes an entire array to host memory (if the feature is enabled).
+ * See dhd_prot_dma_indx_init()
+ */
+static uint16
+dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
+{
+	uint8 *ptr;
+	uint16 data;
+	uint16 offset;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
+
+	switch (type) {
+		case H2D_DMA_INDX_WR_UPD:
+			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
+			offset = DHD_H2D_RING_OFFSET(ringid);
+			break;
+
+		case H2D_DMA_INDX_RD_UPD:
+			ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
+			offset = DHD_H2D_RING_OFFSET(ringid);
+			break;
+
+		case D2H_DMA_INDX_WR_UPD:
+			ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
+			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
+			break;
+
+		case D2H_DMA_INDX_RD_UPD:
+			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
+			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
+			break;
+
+		default:
+			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+				__FUNCTION__));
+			return 0;
+	}
+
+	ASSERT(prot->rw_index_sz != 0);
+	ptr += offset * prot->rw_index_sz;
+
+	OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
+
+	data = LTOH16(*((uint16*)ptr));
+
+	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
+		__FUNCTION__, data, type, ringid, ptr, offset));
+
+	return (data);
+
+} /* dhd_prot_dma_indx_get */
+
+/**
+ * An array of DMA read/write indices, containing information about host rings, can be maintained
+ * either in host memory or in device memory, dependent on preprocessor options. This function is,
+ * dependent on these options, called during driver initialization. It reserves and initializes
+ * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
+ * address of these host memory blocks are communicated to the dongle later on. By reading this host
+ * memory, the dongle learns about the state of the host rings.
+ */
+
+static INLINE int
+dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
+	dhd_dma_buf_t *dma_buf, uint32 bufsz)
+{
+	int rc;
+
+	if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
+		return BCME_OK;
+
+	rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
+
+	return rc;
+}
+
+int
+dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
+{
+	uint32 bufsz;
+	dhd_prot_t *prot = dhd->prot;
+	dhd_dma_buf_t *dma_buf;
+
+	if (prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return BCME_ERROR;
+	}
+
+	/* Dongle advertizes 2B or 4B RW index size */
+	ASSERT(rw_index_sz != 0);
+	prot->rw_index_sz = rw_index_sz;
+
+	bufsz = rw_index_sz * length;
+
+	switch (type) {
+		case H2D_DMA_INDX_WR_BUF:
+			dma_buf = &prot->h2d_dma_indx_wr_buf;
+			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+				goto ret_no_mem;
+			DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
+				dma_buf->len, rw_index_sz, length));
+			break;
+
+		case H2D_DMA_INDX_RD_BUF:
+			dma_buf = &prot->h2d_dma_indx_rd_buf;
+			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+				goto ret_no_mem;
+			DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
+				dma_buf->len, rw_index_sz, length));
+			break;
+
+		case D2H_DMA_INDX_WR_BUF:
+			dma_buf = &prot->d2h_dma_indx_wr_buf;
+			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+				goto ret_no_mem;
+			DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
+				dma_buf->len, rw_index_sz, length));
+			break;
+
+		case D2H_DMA_INDX_RD_BUF:
+			dma_buf = &prot->d2h_dma_indx_rd_buf;
+			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+				goto ret_no_mem;
+			DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
+				dma_buf->len, rw_index_sz, length));
+			break;
+
+		case H2D_IFRM_INDX_WR_BUF:
+			dma_buf = &prot->h2d_ifrm_indx_wr_buf;
+			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
+				goto ret_no_mem;
+			DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
+				dma_buf->len, rw_index_sz, length));
+			break;
+
+		default:
+			DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
+			return BCME_BADOPTION;
+	}
+
+	return BCME_OK;
+
+ret_no_mem:
+	DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
+		__FUNCTION__, type, bufsz));
+	return BCME_NOMEM;
+
+} /* dhd_prot_dma_indx_init */
+
+
+/**
+ * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
+ * from, or NULL if there are no more messages to read.
+ */
+static uint8*
+dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
+{
+	uint16 wr;
+	uint16 rd;
+	uint16 depth;
+	uint16 items;
+	void  *read_addr = NULL; /* address of next msg to be read in ring */
+	uint16 d2h_wr = 0;
+
+	DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
+		__FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
+		(uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
+
+	/* Remember the read index in a variable.
+	 * This is becuase ring->rd gets updated in the end of this function
+	 * So if we have to print the exact read index from which the
+	 * message is read its not possible.
+	 */
+	ring->curr_rd = ring->rd;
+
+	/* update write pointer */
+	if (dhd->dma_d2h_ring_upd_support) {
+		/* DMAing write/read indices supported */
+		d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+		ring->wr = d2h_wr;
+	} else {
+		dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
+	}
+
+	wr = ring->wr;
+	rd = ring->rd;
+	depth = ring->max_items;
+
+	/* check for avail space, in number of ring items */
+	items = READ_AVAIL_SPACE(wr, rd, depth);
+	if (items == 0)
+		return NULL;
+
+	/*
+	 * Note that there are builds where Assert translates to just printk
+	 * so, even if we had hit this condition we would never halt. Now
+	 * dhd_prot_process_msgtype can get into an big loop if this
+	 * happens.
+	 */
+	if (items > ring->max_items) {
+		DHD_ERROR(("\r\n======================= \r\n"));
+		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
+			__FUNCTION__, ring, ring->name, ring->max_items, items));
+		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n", wr, rd, depth));
+		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
+			dhd->busstate, dhd->bus->wait_for_d3_ack));
+		DHD_ERROR(("\r\n======================= \r\n"));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+		if (wr >= ring->max_items) {
+			dhd->bus->read_shm_fail = TRUE;
+		}
+#else
+#ifdef DHD_FW_COREDUMP
+		if (dhd->memdump_enabled) {
+			/* collect core dump */
+			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
+			dhd_bus_mem_dump(dhd);
+
+		}
+#endif /* DHD_FW_COREDUMP */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+		*available_len = 0;
+		dhd_schedule_reset(dhd);
+
+		return NULL;
+	}
+
+	/* if space is available, calculate address to be read */
+	read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
+
+	/* update read pointer */
+	if ((ring->rd + items) >= ring->max_items)
+		ring->rd = 0;
+	else
+		ring->rd += items;
+
+	ASSERT(ring->rd < ring->max_items);
+
+	/* convert items to bytes : available_len must be 32bits */
+	*available_len = (uint32)(items * ring->item_len);
+
+	OSL_CACHE_INV(read_addr, *available_len);
+
+	/* return read address */
+	return read_addr;
+
+} /* dhd_prot_get_read_addr */
+
+/**
+ * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
+ * make sure the callers always hold appropriate locks.
+ */
+int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
+{
+	h2d_mailbox_data_t *h2d_mb_data;
+	uint16 alloced = 0;
+	int num_post = 1;
+	int i;
+
+	DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
+		__FUNCTION__, mb_data));
+	if (!dhd->prot->h2dring_ctrl_subn.inited) {
+		DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+#ifdef PCIE_INB_DW
+	if ((INBAND_DW_ENAB(dhd->bus)) &&
+		(dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) ==
+			DW_DEVICE_DS_DEV_SLEEP)) {
+		if (mb_data == H2D_HOST_CONS_INT) {
+			/* One additional device_wake post needed */
+			num_post = 2;
+		}
+	}
+#endif /* PCIE_INB_DW */
+
+	for (i = 0; i < num_post; i ++) {
+		/* Request for ring buffer space */
+		h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
+			&dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+			&alloced, FALSE);
+
+		if (h2d_mb_data == NULL) {
+			DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
+				__FUNCTION__));
+			return BCME_NOMEM;
+		}
+
+		memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
+	/* Common msg buf hdr */
+		h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
+		h2d_mb_data->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
+
+		h2d_mb_data->msg.epoch =
+			dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
+		dhd->prot->h2dring_ctrl_subn.seqnum++;
+
+#ifdef PCIE_INB_DW
+		/* post device_wake first */
+		if ((num_post == 2) && (i == 0)) {
+			h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE);
+		} else
+#endif /* PCIE_INB_DW */
+		{
+			h2d_mb_data->mail_box_data = htol32(mb_data);
+		}
+
+		DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
+
+		/* upd wrt ptr and raise interrupt */
+		/* caller of dhd_prot_h2d_mbdata_send_ctrlmsg already holding general lock */
+		dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_mb_data,
+			DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+#ifdef PCIE_INB_DW
+		/* Add a delay if device_wake is posted */
+		if ((num_post == 2) && (i == 0)) {
+			OSL_DELAY(1000);
+		}
+#endif /* PCIE_INB_DW */
+	}
+
+	return 0;
+}
+
+/** Creates a flow ring and informs dongle of this event */
+int
+dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_flowring_create_request_t *flow_create_rqst;
+	msgbuf_ring_t *flow_ring;
+	dhd_prot_t *prot = dhd->prot;
+	unsigned long flags;
+	uint16 alloced = 0;
+	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
+
+	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
+	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
+	if (flow_ring == NULL) {
+		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
+			__FUNCTION__, flow_ring_node->flowid));
+		return BCME_NOMEM;
+	}
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	/* Request for ctrl_ring buffer space */
+	flow_create_rqst = (tx_flowring_create_request_t *)
+		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
+
+	if (flow_create_rqst == NULL) {
+		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
+		DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
+			__FUNCTION__, flow_ring_node->flowid));
+		DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return BCME_NOMEM;
+	}
+
+	flow_ring_node->prot_info = (void *)flow_ring;
+
+	/* Common msg buf hdr */
+	flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
+	flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_create_rqst->msg.request_id = htol32(0); /* TBD */
+	flow_create_rqst->msg.flags = ctrl_ring->current_phase;
+
+	flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+	ctrl_ring->seqnum++;
+
+	/* Update flow create message */
+	flow_create_rqst->tid = flow_ring_node->flow_info.tid;
+	flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
+	memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
+	/* CAUTION: ring::base_addr already in Little Endian */
+	flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
+	flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
+	flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
+	flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
+
+	/* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
+	 * currently it is not used for priority. so uses solely for ifrm mask
+	 */
+	if (IFRM_ACTIVE(dhd))
+		flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
+
+	DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
+		" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
+		MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
+		flow_ring_node->flow_info.ifindex));
+
+	/* Update the flow_ring's WRITE index */
+	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
+	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
+		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+			H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
+	} else {
+		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
+			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
+	}
+
+	/* update control subn ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return BCME_OK;
+} /* dhd_prot_flow_ring_create */
+
+/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
+static void
+dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
+{
+	tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
+
+	DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
+		ltoh16(flow_create_resp->cmplt.status),
+		ltoh16(flow_create_resp->cmplt.flow_ring_id)));
+
+	dhd_bus_flow_ring_create_response(dhd->bus,
+		ltoh16(flow_create_resp->cmplt.flow_ring_id),
+		ltoh16(flow_create_resp->cmplt.status));
+}
+
+static void
+dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
+{
+	h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
+	DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
+		ltoh16(resp->cmplt.status),
+		ltoh16(resp->cmplt.ring_id),
+		ltoh32(resp->cmn_hdr.request_id)));
+	if (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) {
+		DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
+		return;
+	}
+	if (!dhd->prot->h2dring_info_subn->create_pending) {
+		DHD_ERROR(("info ring create status for not pending submit ring\n"));
+	}
+
+	if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+		DHD_ERROR(("info ring create failed with status %d\n",
+			ltoh16(resp->cmplt.status)));
+		return;
+	}
+	dhd->prot->h2dring_info_subn->create_pending = FALSE;
+	dhd->prot->h2dring_info_subn->inited = TRUE;
+	dhd_prot_infobufpost(dhd);
+}
+
+static void
+dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
+{
+	d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
+	DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
+		ltoh16(resp->cmplt.status),
+		ltoh16(resp->cmplt.ring_id),
+		ltoh32(resp->cmn_hdr.request_id)));
+	if (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) {
+		DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
+		return;
+	}
+	if (!dhd->prot->d2hring_info_cpln->create_pending) {
+		DHD_ERROR(("info ring create status for not pending cpl ring\n"));
+		return;
+	}
+
+	if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+		DHD_ERROR(("info cpl ring create failed with status %d\n",
+			ltoh16(resp->cmplt.status)));
+		return;
+	}
+	dhd->prot->d2hring_info_cpln->create_pending = FALSE;
+	dhd->prot->d2hring_info_cpln->inited = TRUE;
+}
+
+static void
+dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
+{
+	d2h_mailbox_data_t *d2h_data;
+
+	d2h_data = (d2h_mailbox_data_t *)buf;
+	DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
+		d2h_data->d2h_mailbox_data));
+	dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
+}
+
+static void
+dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
+{
+#ifdef DHD_TIMESYNC
+	host_timestamp_msg_cpl_t  *host_ts_cpl;
+	uint32 pktid;
+	dhd_prot_t *prot = dhd->prot;
+
+	host_ts_cpl = (host_timestamp_msg_cpl_t *)buf;
+	DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__,
+		host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
+
+	pktid = ltoh32(host_ts_cpl->msg.request_id);
+	if (prot->hostts_req_buf_inuse == FALSE) {
+		DHD_ERROR(("No Pending Host TS req, but completion\n"));
+		return;
+	}
+	prot->hostts_req_buf_inuse = FALSE;
+	if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) {
+		DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n",
+			pktid, DHD_H2D_HOSTTS_REQ_PKTID));
+		return;
+	}
+	dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
+		host_ts_cpl->cmplt.status);
+#else /* DHD_TIMESYNC */
+	DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
+#endif /* DHD_TIMESYNC */
+
+}
+
+/** called on e.g. flow ring delete */
+void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
+{
+	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+	dhd_prot_ring_detach(dhd, flow_ring);
+	DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
+}
+
+void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
+	struct bcmstrbuf *strbuf, const char * fmt)
+{
+	const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x"
+		" WORK ITEM SIZE %d MAX WORK ITEMS %d SIZE %d\n";
+	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+	uint16 rd, wr;
+	uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
+
+	if (fmt == NULL) {
+		fmt = default_fmt;
+	}
+	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
+	dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
+	bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
+		ltoh32(flow_ring->base_addr.high_addr),
+		ltoh32(flow_ring->base_addr.low_addr),
+		flow_ring->item_len, flow_ring->max_items, dma_buf_len);
+}
+
+void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+	dhd_prot_t *prot = dhd->prot;
+	bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
+		dhd->prot->device_ipc_version,
+		dhd->prot->host_ipc_version,
+		dhd->prot->active_ipc_version);
+
+	bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
+		dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
+	bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
+		dhd->prot->max_infobufpost, dhd->prot->infobufpost);
+	bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
+		dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
+	bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
+		dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
+	bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
+		dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
+
+	bcm_bprintf(strbuf,
+		"%14s %5s %5s %17s %17s %14s %14s %10s\n",
+		"Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
+		"WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
+	bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
+	dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
+		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+	bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
+	dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
+		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+	bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
+	dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
+		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+	bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
+	dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
+		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+	bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
+	dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
+		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+	if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
+		bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
+		dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
+			" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+		bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
+		dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
+			" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
+	}
+
+	bcm_bprintf(strbuf, "active_tx_count %d	 pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
+		dhd->prot->active_tx_count,
+		DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
+		DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
+		DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
+
+}
+
+int
+dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_flowring_delete_request_t *flow_delete_rqst;
+	dhd_prot_t *prot = dhd->prot;
+	unsigned long flags;
+	uint16 alloced = 0;
+	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	/* Request for ring buffer space */
+	flow_delete_rqst = (tx_flowring_delete_request_t *)
+		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+	if (flow_delete_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
+	flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
+	flow_delete_rqst->msg.flags = ring->current_phase;
+
+	flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+	ring->seqnum++;
+
+	/* Update Delete info */
+	flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	flow_delete_rqst->reason = htol16(BCME_OK);
+
+	DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
+		" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
+		MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
+		flow_ring_node->flow_info.ifindex));
+
+	/* update ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return BCME_OK;
+}
+
+static void
+dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
+{
+	tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
+
+	DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
+		flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
+
+	dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
+		flow_delete_resp->cmplt.status);
+}
+
+static void
+dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
+{
+#ifdef IDLE_TX_FLOW_MGMT
+	tx_idle_flowring_resume_response_t	*flow_resume_resp =
+		(tx_idle_flowring_resume_response_t *)msg;
+
+	DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
+		flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
+
+	dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
+		flow_resume_resp->cmplt.status);
+#endif /* IDLE_TX_FLOW_MGMT */
+}
+
+static void
+dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
+{
+#ifdef IDLE_TX_FLOW_MGMT
+	int16 status;
+	tx_idle_flowring_suspend_response_t	*flow_suspend_resp =
+		(tx_idle_flowring_suspend_response_t *)msg;
+	status = flow_suspend_resp->cmplt.status;
+
+	DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
+		__FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
+		status));
+
+	if (status != BCME_OK) {
+
+		DHD_ERROR(("%s Error in Suspending Flow rings!!"
+			"Dongle will still be polling idle rings!!Status = %d \n",
+			__FUNCTION__, status));
+	}
+#endif /* IDLE_TX_FLOW_MGMT */
+}
+
+int
+dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_flowring_flush_request_t *flow_flush_rqst;
+	dhd_prot_t *prot = dhd->prot;
+	unsigned long flags;
+	uint16 alloced = 0;
+	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	/* Request for ring buffer space */
+	flow_flush_rqst = (tx_flowring_flush_request_t *)
+		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+	if (flow_flush_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
+	flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
+	flow_flush_rqst->msg.flags = ring->current_phase;
+	flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+	ring->seqnum++;
+
+	flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	flow_flush_rqst->reason = htol16(BCME_OK);
+
+	DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
+
+	/* update ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return BCME_OK;
+} /* dhd_prot_flow_ring_flush */
+
+static void
+dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
+{
+	tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
+
+	DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
+		flow_flush_resp->cmplt.status));
+
+	dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
+		flow_flush_resp->cmplt.status);
+}
+
+/**
+ * Request dongle to configure soft doorbells for D2H rings. Host populated soft
+ * doorbell information is transferred to dongle via the d2h ring config control
+ * message.
+ */
+void
+dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
+{
+#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
+	uint16 ring_idx;
+	uint8 *msg_next;
+	void *msg_start;
+	uint16 alloced = 0;
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+	ring_config_req_t *ring_config_req;
+	bcmpcie_soft_doorbell_t *soft_doorbell;
+	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+	const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+	/* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
+
+	if (msg_start == NULL) {
+		DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
+			__FUNCTION__, d2h_rings));
+		DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return;
+	}
+
+	msg_next = (uint8*)msg_start;
+
+	for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
+
+		/* position the ring_config_req into the ctrl subm ring */
+		ring_config_req = (ring_config_req_t *)msg_next;
+
+		/* Common msg header */
+		ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
+		ring_config_req->msg.if_id = 0;
+		ring_config_req->msg.flags = 0;
+
+		ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+		ctrl_ring->seqnum++;
+
+		ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
+
+		/* Ring Config subtype and d2h ring_id */
+		ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
+		ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
+
+		/* Host soft doorbell configuration */
+		soft_doorbell = &prot->soft_doorbell[ring_idx];
+
+		ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
+		ring_config_req->soft_doorbell.haddr.high =
+			htol32(soft_doorbell->haddr.high);
+		ring_config_req->soft_doorbell.haddr.low =
+			htol32(soft_doorbell->haddr.low);
+		ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
+		ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
+
+		DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
+			__FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
+			ring_config_req->soft_doorbell.haddr.low,
+			ring_config_req->soft_doorbell.value));
+
+		msg_next = msg_next + ctrl_ring->item_len;
+	}
+
+	/* update control subn ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
+}
+
+static void
+dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
+{
+	DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
+		__FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
+		ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
+}
+
+int
+dhd_prot_debug_dma_info_print(dhd_pub_t *dhd)
+{
+	if (dhd->bus->is_linkdown) {
+		DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
+			"due to PCIe link down ------- \r\n"));
+		return 0;
+	}
+
+	DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
+
+	//HostToDev
+	DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
+	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
+	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
+
+	DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
+	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
+	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
+
+	//DevToHost
+	DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
+	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
+	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
+
+	DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
+	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
+	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
+		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
+
+	return 0;
+}
+
+int
+dhd_prot_debug_info_print(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	msgbuf_ring_t *ring;
+	uint16 rd, wr;
+	uint32 intstatus = 0;
+	uint32 intmask = 0;
+	uint32 mbintstatus = 0;
+	uint32 d2h_mb_data = 0;
+	uint32 dma_buf_len;
+
+	DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
+	DHD_ERROR(("DHD: %s\n", dhd_version));
+	DHD_ERROR(("Firmware: %s\n", fw_version));
+
+	DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
+	DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
+		prot->device_ipc_version,
+		prot->host_ipc_version,
+		prot->active_ipc_version));
+	DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
+		prot->max_tsbufpost, prot->cur_ts_bufs_posted));
+	DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
+		prot->max_infobufpost, prot->infobufpost));
+	DHD_ERROR(("max event bufs to post: %d, posted %d\n",
+		prot->max_eventbufpost, prot->cur_event_bufs_posted));
+	DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
+		prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
+	DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
+		prot->max_rxbufpost, prot->rxbufpost));
+	DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
+		h2d_max_txpost, prot->h2d_max_txpost));
+
+	DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
+
+	ring = &prot->h2dring_ctrl_subn;
+	dma_buf_len = ring->max_items * ring->item_len;
+	DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+		ltoh32(ring->base_addr.low_addr), dma_buf_len));
+	DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+	dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+	DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+	DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
+
+	ring = &prot->d2hring_ctrl_cpln;
+	dma_buf_len = ring->max_items * ring->item_len;
+	DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+		ltoh32(ring->base_addr.low_addr), dma_buf_len));
+	DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+	dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+	DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+	DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
+
+	ring = prot->h2dring_info_subn;
+	if (ring) {
+		dma_buf_len = ring->max_items * ring->item_len;
+		DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+			ltoh32(ring->base_addr.low_addr), dma_buf_len));
+		DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+		DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+		DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
+	}
+	ring = prot->d2hring_info_cpln;
+	if (ring) {
+		dma_buf_len = ring->max_items * ring->item_len;
+		DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+			ltoh32(ring->base_addr.low_addr), dma_buf_len));
+		DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+		DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+		DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
+	}
+
+	DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
+		__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
+
+	if (!dhd->bus->is_linkdown && dhd->bus->intstatus != (uint32)-1) {
+		DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
+		intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+			PCIMailBoxInt, 0, 0);
+		intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+			PCIMailBoxMask, 0, 0);
+		mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+			PCID2H_MailBox, 0, 0);
+		dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+
+		DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
+			intstatus, intmask, mbintstatus));
+		DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
+			dhd->bus->def_intmask));
+
+		DHD_ERROR(("host pcie_irq enabled = %d\n", dhdpcie_irq_enabled(dhd->bus)));
+
+		DHD_ERROR(("\n ------- DUMPING PCIE Registers ------- \r\n"));
+		/* hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */
+		DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x\n",
+			PCIECFGREG_STATUS_CMD,
+			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
+			PCIECFGREG_BASEADDR0,
+			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32))));
+		DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
+			"L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
+			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
+			sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
+			dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
+			sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
+			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
+			sizeof(uint32))));
+
+		/* hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/
+		 * CurrentPcieGen2ProgramGuide/pcie_ep.htm
+		 */
+		DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
+			"ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
+			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
+			PCIECFGREG_PHY_DBG_CLKREQ1,
+			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
+			PCIECFGREG_PHY_DBG_CLKREQ2,
+			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
+			PCIECFGREG_PHY_DBG_CLKREQ3,
+			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
+
+#if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID)
+		DHD_ERROR(("Pcie RC Error Status Val=0x%x\n",
+			dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+			PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
+
+		DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
+			dhd_debug_get_rc_linkcap(dhd->bus)));
+#endif
+
+		DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters  ------- \r\n"));
+		DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
+			"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
+			"dpc_return_busdown_count=%lu\n",
+			dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
+			dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
+			dhd->bus->dpc_return_busdown_count));
+
+	}
+	dhd_prot_debug_dma_info_print(dhd);
+#ifdef DHD_FW_COREDUMP
+	if (dhd->memdump_enabled) {
+#ifdef DHD_SSSR_DUMP
+		if (dhd->sssr_inited) {
+			dhdpcie_sssr_dump(dhd);
+		}
+#endif /* DHD_SSSR_DUMP */
+	}
+#endif /* DHD_FW_COREDUMP */
+	return 0;
+}
+
+int
+dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
+{
+	uint32 *ptr;
+	uint32 value;
+	uint32 i;
+	uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
+
+	OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
+		dhd->prot->d2h_dma_indx_wr_buf.len);
+
+	ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
+
+	bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
+
+	bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
+	ptr++;
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
+
+	ptr++;
+	bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
+	for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
+		value = ltoh32(*ptr);
+		bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
+		ptr++;
+	}
+
+	OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
+		dhd->prot->h2d_dma_indx_rd_buf.len);
+
+	ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
+
+	bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
+	ptr++;
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
+	ptr++;
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
+
+	return 0;
+}
+
+uint32
+dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
+{
+	dhd_prot_t *prot = dhd->prot;
+#if DHD_DBG_SHOW_METADATA
+	prot->metadata_dbg = val;
+#endif
+	return (uint32)prot->metadata_dbg;
+}
+
+uint32
+dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	return (uint32)prot->metadata_dbg;
+}
+
+uint32
+dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
+{
+	dhd_prot_t *prot = dhd->prot;
+	if (rx)
+		prot->rx_metadata_offset = (uint16)val;
+	else
+		prot->tx_metadata_offset = (uint16)val;
+	return dhd_prot_metadatalen_get(dhd, rx);
+}
+
+uint32
+dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
+{
+	dhd_prot_t *prot = dhd->prot;
+	if (rx)
+		return prot->rx_metadata_offset;
+	else
+		return prot->tx_metadata_offset;
+}
+
+/** optimization to write "n" tx items at a time to ring */
+uint32
+dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+	dhd_prot_t *prot = dhd->prot;
+	if (set)
+		prot->txp_threshold = (uint16)val;
+	val = prot->txp_threshold;
+	return val;
+}
+
+#ifdef DHD_RX_CHAINING
+
+static INLINE void BCMFASTPATH
+dhd_rxchain_reset(rxchain_info_t *rxchain)
+{
+	rxchain->pkt_count = 0;
+}
+
+static void BCMFASTPATH
+dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
+{
+	uint8 *eh;
+	uint8 prio;
+	dhd_prot_t *prot = dhd->prot;
+	rxchain_info_t *rxchain = &prot->rxchain;
+
+	ASSERT(!PKTISCHAINED(pkt));
+	ASSERT(PKTCLINK(pkt) == NULL);
+	ASSERT(PKTCGETATTR(pkt) == 0);
+
+	eh = PKTDATA(dhd->osh, pkt);
+	prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
+
+	if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
+		rxchain->h_da, rxchain->h_prio))) {
+		/* Different flow - First release the existing chain */
+		dhd_rxchain_commit(dhd);
+	}
+
+	/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
+	/* so that the chain can be handed off to CTF bridge as is. */
+	if (rxchain->pkt_count == 0) {
+		/* First packet in chain */
+		rxchain->pkthead = rxchain->pkttail = pkt;
+
+		/* Keep a copy of ptr to ether_da, ether_sa and prio */
+		rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
+		rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
+		rxchain->h_prio = prio;
+		rxchain->ifidx = ifidx;
+		rxchain->pkt_count++;
+	} else {
+		/* Same flow - keep chaining */
+		PKTSETCLINK(rxchain->pkttail, pkt);
+		rxchain->pkttail = pkt;
+		rxchain->pkt_count++;
+	}
+
+	if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
+		((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
+		(((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
+		PKTSETCHAINED(dhd->osh, pkt);
+		PKTCINCRCNT(rxchain->pkthead);
+		PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
+	} else {
+		dhd_rxchain_commit(dhd);
+		return;
+	}
+
+	/* If we have hit the max chain length, dispatch the chain and reset */
+	if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
+		dhd_rxchain_commit(dhd);
+	}
+}
+
+static void BCMFASTPATH
+dhd_rxchain_commit(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	rxchain_info_t *rxchain = &prot->rxchain;
+
+	if (rxchain->pkt_count == 0)
+		return;
+
+	/* Release the packets to dhd_linux */
+	dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
+
+	/* Reset the chain */
+	dhd_rxchain_reset(rxchain);
+}
+
+#endif /* DHD_RX_CHAINING */
+
+
+#ifdef IDLE_TX_FLOW_MGMT
+int
+dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_idle_flowring_resume_request_t *flow_resume_rqst;
+	msgbuf_ring_t *flow_ring;
+	dhd_prot_t *prot = dhd->prot;
+	unsigned long flags;
+	uint16 alloced = 0;
+	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+
+	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
+	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
+	if (flow_ring == NULL) {
+		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
+			__FUNCTION__, flow_ring_node->flowid));
+		return BCME_NOMEM;
+	}
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	/* Request for ctrl_ring buffer space */
+	flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
+		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
+
+	if (flow_resume_rqst == NULL) {
+		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
+		DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
+			__FUNCTION__, flow_ring_node->flowid));
+		DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return BCME_NOMEM;
+	}
+
+	flow_ring_node->prot_info = (void *)flow_ring;
+
+	/* Common msg buf hdr */
+	flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
+	flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
+
+	flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+	ctrl_ring->seqnum++;
+
+	flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
+		__FUNCTION__, flow_ring_node->flowid));
+
+	/* Update the flow_ring's WRITE index */
+	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
+	} else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
+		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+			H2D_IFRM_INDX_WR_UPD,
+			(flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
+	} else {
+		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
+			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
+	}
+
+	/* update control subn ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return BCME_OK;
+} /* dhd_prot_flow_ring_create */
+
+int
+dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
+{
+	tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
+	dhd_prot_t *prot = dhd->prot;
+	unsigned long flags;
+	uint16 index;
+	uint16 alloced = 0;
+	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
+
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	/* Request for ring buffer space */
+	flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
+		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
+	if (flow_suspend_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
+	/* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
+	flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
+
+	flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+	ring->seqnum++;
+
+	/* Update flow id  info */
+	for (index = 0; index < count; index++)
+	{
+		flow_suspend_rqst->ring_id[index] = ringid[index];
+	}
+	flow_suspend_rqst->num = count;
+
+	DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
+
+	/* update ring's WR index and ring doorbell to dongle */
+	dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
+	return BCME_OK;
+}
+#endif /* IDLE_TX_FLOW_MGMT */
+
+
+int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
+{
+	uint32 i;
+	uint32 *ext_data;
+	hnd_ext_trap_hdr_t *hdr;
+	bcm_tlv_t *tlv;
+	trap_t *tr;
+	uint32 *stack;
+	hnd_ext_trap_bp_err_t *bpe;
+	uint32 raw_len;
+
+	ext_data = dhdp->extended_trap_data;
+
+	/* return if there is no extended trap data */
+	if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
+	{
+		bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
+		return BCME_OK;
+	}
+
+	bcm_bprintf(b, "Extended trap data\n");
+
+	/* First word is original trap_data */
+	bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
+	ext_data++;
+
+	/* Followed by the extended trap data header */
+	hdr = (hnd_ext_trap_hdr_t *)ext_data;
+	bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
+
+	if (raw)
+	{
+		raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
+		for (i = 0; i < raw_len; i++)
+		{
+			bcm_bprintf(b, "0x%08x ", ext_data[i]);
+			if (i % 4 == 3)
+				bcm_bprintf(b, "\n");
+		}
+		return BCME_OK;
+	}
+
+	/* Extract the various supported TLVs from the extended trap data */
+	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
+	if (tlv)
+	{
+		bcm_bprintf(b, "\nTAG_TRAP_SIGNATURE len: %d\n", tlv->len);
+		tr = (trap_t *)tlv->data;
+
+		bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
+		       tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
+		bcm_bprintf(b, "  r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
+		       tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
+		bcm_bprintf(b, "  r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
+		       tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
+	}
+
+	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
+	if (tlv)
+	{
+		bcm_bprintf(b, "\nTAG_TRAP_STACK len: %d\n", tlv->len);
+		stack = (uint32 *)tlv->data;
+		for (i = 0; i < (uint32)(tlv->len / 4); i++)
+		{
+			bcm_bprintf(b, "  0x%08x\n", *stack);
+			stack++;
+		}
+	}
+
+	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
+	if (tlv)
+	{
+		bcm_bprintf(b, "\nTAG_TRAP_BACKPLANE len: %d\n", tlv->len);
+		bpe = (hnd_ext_trap_bp_err_t *)tlv->data;
+		bcm_bprintf(b, " error: %x\n", bpe->error);
+		bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
+		bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
+		bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
+		bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
+		bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
+		bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
+		bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
+		bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
+		bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
+		bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
+		bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
+		bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
+		bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
+		bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
+	}
+
+	return BCME_OK;
+}
+
+
+#ifdef BCMPCIE
+int
+dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
+	uint16 seqnum, uint16 xt_id)
+{
+	dhd_prot_t *prot = dhdp->prot;
+	host_timestamp_msg_t *ts_req;
+	unsigned long flags;
+	uint16 alloced = 0;
+	uchar *ts_tlv_buf;
+
+	if ((tlvs == NULL) || (tlv_len == 0)) {
+		DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
+			__FUNCTION__, tlvs, tlv_len));
+		return -1;
+	}
+#ifdef PCIE_INB_DW
+	if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
+		return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+	DHD_GENERAL_LOCK(dhdp, flags);
+
+	/* if Host TS req already pending go away */
+	if (prot->hostts_req_buf_inuse == TRUE) {
+		DHD_ERROR(("one host TS request already pending at device\n"));
+		DHD_GENERAL_UNLOCK(dhdp, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
+		return -1;
+	}
+
+	/* Request for cbuf space */
+	ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, &prot->h2dring_ctrl_subn,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,	&alloced, FALSE);
+	if (ts_req == NULL) {
+		DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
+		DHD_GENERAL_UNLOCK(dhdp, flags);
+#ifdef PCIE_INB_DW
+		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
+		return -1;
+	}
+
+	/* Common msg buf hdr */
+	ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
+	ts_req->msg.if_id = 0;
+	ts_req->msg.flags =  prot->h2dring_ctrl_subn.current_phase;
+	ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
+
+	ts_req->msg.epoch = prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
+	prot->h2dring_ctrl_subn.seqnum++;
+
+	ts_req->xt_id = xt_id;
+	ts_req->seqnum = seqnum;
+	/* populate TS req buffer info */
+	ts_req->input_data_len = htol16(tlv_len);
+	ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
+	ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
+	/* copy ioct payload */
+	ts_tlv_buf = (void *) prot->hostts_req_buf.va;
+	prot->hostts_req_buf_inuse = TRUE;
+	memcpy(ts_tlv_buf, tlvs, tlv_len);
+
+	OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
+
+	if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
+		DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
+	}
+
+	DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
+		ts_req->msg.request_id, ts_req->input_data_len,
+		ts_req->xt_id, ts_req->seqnum));
+
+
+	/* upd wrt ptr and raise interrupt */
+	dhd_prot_ring_write_complete(dhdp, &prot->h2dring_ctrl_subn, ts_req,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhdp, flags);
+#ifdef PCIE_INB_DW
+	dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
+
+	return 0;
+} /* dhd_prot_send_host_timestamp */
+
+
+bool
+dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
+{
+	if (set)
+		dhd->prot->tx_ts_log_enabled = enable;
+
+	return dhd->prot->tx_ts_log_enabled;
+}
+
+bool
+dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
+{
+	if (set)
+		dhd->prot->rx_ts_log_enabled = enable;
+
+	return dhd->prot->rx_ts_log_enabled;
+}
+#endif /* BCMPCIE */
+
+void
+dhd_prot_dma_indx_free(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
+	dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
+}
+
+static void BCMFASTPATH
+dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
+{
+#ifdef DHD_TIMESYNC
+	fw_timestamp_event_msg_t *resp;
+	uint32 pktid;
+	uint16 buflen, seqnum;
+	void * pkt;
+	unsigned long flags;
+
+	resp = (fw_timestamp_event_msg_t *)buf;
+	pktid = ltoh32(resp->msg.request_id);
+	buflen = ltoh16(resp->buf_len);
+	seqnum = ltoh16(resp->seqnum);
+
+#if defined(DHD_PKTID_AUDIT_RING)
+	DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
+		DHD_DUPLICATE_FREE);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n",
+		pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
+
+	if (!dhd->prot->cur_ts_bufs_posted) {
+		DHD_ERROR(("tsbuf posted are zero, but there is a completion\n"));
+		return;
+	}
+
+	dhd->prot->cur_ts_bufs_posted--;
+	if (dhd->prot->max_tsbufpost > 0)
+		dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (!pkt) {
+		DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid));
+		return;
+	}
+
+	PKTSETLEN(dhd->osh, pkt, buflen);
+	dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
+#ifdef DHD_USE_STATIC_CTRLBUF
+	PKTFREE_STATIC(dhd->osh, pkt, TRUE);
+#else
+	PKTFREE(dhd->osh, pkt, TRUE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+#else /* DHD_TIMESYNC */
+	DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
+#endif /* DHD_TIMESYNC */
+
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.c b/drivers/net/wireless/bcmdhd/dhd_pcie.c
new file mode 100644
index 0000000..a785fe5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pcie.c
@@ -0,0 +1,8518 @@
+/*
+ * DHD Bus Module for PCIE
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pcie.c 710862 2017-07-14 07:43:59Z $
+ */
+
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <hnd_debug.h>
+#include <sbchipc.h>
+#include <hnd_armtrap.h>
+#if defined(DHD_DEBUG)
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_flowring.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhd_daemon.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <bcmpcie.h>
+#include <bcmendian.h>
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+#include <bcmevent.h>
+#include <dhd_config.h>
+
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
+
+#if defined(BCMEMBEDIMAGE)
+#ifndef DHD_EFI
+#include BCMEMBEDIMAGE
+#else
+#include <rtecdc_4364.h>
+#endif /* !DHD_EFI */
+#endif /* BCMEMBEDIMAGE */
+
+#define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
+#define MAX_WKLK_IDLE_CHECK	3	/* times wake_lock checked before deciding not to suspend */
+
+#define ARMCR4REG_BANKIDX	(0x40/sizeof(uint32))
+#define ARMCR4REG_BANKPDA	(0x4C/sizeof(uint32))
+/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
+
+/* CTO Prevention Recovery */
+#define CTO_TO_CLEAR_WAIT_MS 1000
+#define CTO_TO_CLEAR_WAIT_MAX_CNT 10
+
+#if defined(SUPPORT_MULTIPLE_BOARD_REV)
+	extern unsigned int system_rev;
+#endif /* SUPPORT_MULTIPLE_BOARD_REV */
+
+int dhd_dongle_memsize;
+int dhd_dongle_ramsize;
+static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
+#if defined(DHD_FW_COREDUMP)
+struct dhd_bus *g_dhd_bus = NULL;
+static int dhdpcie_mem_dump(dhd_bus_t *bus);
+#endif /* DHD_FW_COREDUMP */
+
+static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
+static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
+	const char *name, void *params,
+	int plen, void *arg, int len, int val_size);
+static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
+static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
+	uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk);
+static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
+static int _dhdpcie_download_firmware(struct dhd_bus *bus);
+static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
+static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
+static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
+static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
+static int dhdpcie_readshared(dhd_bus_t *bus);
+static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
+static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
+static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
+static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
+	bool dongle_isolation, bool reset_flag);
+static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
+static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
+static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
+static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
+static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
+#ifdef DHD_SUPPORT_64BIT
+static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
+static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
+#endif /* DHD_SUPPORT_64BIT */
+static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
+static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
+static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
+static void dhdpcie_fw_trap(dhd_bus_t *bus);
+static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
+extern void dhd_dpc_enable(dhd_pub_t *dhdp);
+extern void dhd_dpc_kill(dhd_pub_t *dhdp);
+
+#ifdef IDLE_TX_FLOW_MGMT
+static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
+static void dhd_bus_idle_scan(dhd_bus_t *bus);
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef BCMEMBEDIMAGE
+static int dhdpcie_download_code_array(dhd_bus_t *bus);
+#endif /* BCMEMBEDIMAGE */
+
+
+#ifdef EXYNOS_PCIE_DEBUG
+extern void exynos_pcie_register_dump(int ch_num);
+#endif /* EXYNOS_PCIE_DEBUG */
+
+#define     PCI_VENDOR_ID_BROADCOM          0x14e4
+
+#define DHD_DEFAULT_DOORBELL_TIMEOUT 200	/* ms */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
+static void dhdpcie_cto_error_recovery(struct dhd_bus *bus);
+
+#ifdef BCM_ASLR_HEAP
+static void dhdpcie_wrt_rnd(struct dhd_bus *bus);
+#endif /* BCM_ASLR_HEAP */
+
+extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
+extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
+
+/* IOVar table */
+enum {
+	IOV_INTR = 1,
+	IOV_MEMSIZE,
+	IOV_SET_DOWNLOAD_STATE,
+	IOV_DEVRESET,
+	IOV_VARS,
+	IOV_MSI_SIM,
+	IOV_PCIE_LPBK,
+	IOV_CC_NVMSHADOW,
+	IOV_RAMSIZE,
+	IOV_RAMSTART,
+	IOV_SLEEP_ALLOWED,
+	IOV_PCIE_DMAXFER,
+	IOV_PCIE_SUSPEND,
+	IOV_DONGLEISOLATION,
+	IOV_LTRSLEEPON_UNLOOAD,
+	IOV_METADATA_DBG,
+	IOV_RX_METADATALEN,
+	IOV_TX_METADATALEN,
+	IOV_TXP_THRESHOLD,
+	IOV_BUZZZ_DUMP,
+	IOV_DUMP_RINGUPD_BLOCK,
+	IOV_DMA_RINGINDICES,
+	IOV_FORCE_FW_TRAP,
+	IOV_DB1_FOR_MB,
+	IOV_FLOW_PRIO_MAP,
+#ifdef DHD_PCIE_RUNTIMEPM
+	IOV_IDLETIME,
+#endif /* DHD_PCIE_RUNTIMEPM */
+	IOV_RXBOUND,
+	IOV_TXBOUND,
+	IOV_HANGREPORT,
+	IOV_H2D_MAILBOXDATA,
+	IOV_INFORINGS,
+	IOV_H2D_PHASE,
+	IOV_H2D_ENABLE_TRAP_BADPHASE,
+	IOV_H2D_TXPOST_MAX_ITEM,
+	IOV_TRAPDATA,
+	IOV_TRAPDATA_RAW,
+	IOV_CTO_PREVENTION,
+#ifdef PCIE_OOB
+	IOV_OOB_BT_REG_ON,
+	IOV_OOB_ENABLE,
+#endif /* PCIE_OOB */
+	IOV_PCIE_WD_RESET,
+	IOV_CTO_THRESHOLD,
+#ifdef DHD_EFI
+	IOV_CONTROL_SIGNAL,
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+	IOV_DEEP_SLEEP,
+#endif /* PCIE_OOB || PCIE_INB_DW */
+#endif /* DHD_EFI */
+#ifdef DEVICE_TX_STUCK_DETECT
+	IOV_DEVICE_TX_STUCK_DETECT,
+#endif /* DEVICE_TX_STUCK_DETECT */
+	IOV_INB_DW_ENABLE,
+	IOV_IDMA_ENABLE,
+	IOV_IFRM_ENABLE,
+	IOV_CLEAR_RING,
+#ifdef DHD_EFI
+	IOV_WIFI_PROPERTIES,
+	IOV_OTP_DUMP
+#endif
+};
+
+
+const bcm_iovar_t dhdpcie_iovars[] = {
+	{"intr",	IOV_INTR,	0,	0, IOVT_BOOL,	0 },
+	{"memsize",	IOV_MEMSIZE,	0,	0, IOVT_UINT32,	0 },
+	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0,	0, IOVT_BOOL,	0 },
+	{"vars",	IOV_VARS,	0,	0, IOVT_BUFFER,	0 },
+	{"devreset",	IOV_DEVRESET,	0,	0, IOVT_BOOL,	0 },
+	{"pcie_device_trap", IOV_FORCE_FW_TRAP, 0,	0, 0,	0 },
+	{"pcie_lpbk",	IOV_PCIE_LPBK,	0,	0, IOVT_UINT32,	0 },
+	{"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
+	{"ramsize",	IOV_RAMSIZE,	0,	0, IOVT_UINT32,	0 },
+	{"ramstart",	IOV_RAMSTART,	0,	0, IOVT_UINT32,	0 },
+	{"pcie_dmaxfer",	IOV_PCIE_DMAXFER,	0,	0, IOVT_BUFFER,	3 * sizeof(int32) },
+	{"pcie_suspend", IOV_PCIE_SUSPEND,	0,	0, IOVT_UINT32,	0 },
+#ifdef PCIE_OOB
+	{"oob_bt_reg_on", IOV_OOB_BT_REG_ON,    0, 0,  IOVT_UINT32,    0 },
+	{"oob_enable",   IOV_OOB_ENABLE,    0, 0,  IOVT_UINT32,    0 },
+#endif /* PCIE_OOB */
+	{"sleep_allowed",	IOV_SLEEP_ALLOWED,	0,	0, IOVT_BOOL,	0 },
+	{"dngl_isolation", IOV_DONGLEISOLATION,	0,	0, IOVT_UINT32,	0 },
+	{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,	0,	0, IOVT_UINT32,	0 },
+	{"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,	0,	0, IOVT_BUFFER,	0 },
+	{"dma_ring_indices", IOV_DMA_RINGINDICES,	0,	0, IOVT_UINT32,	0},
+	{"metadata_dbg", IOV_METADATA_DBG,	0,	0, IOVT_BOOL,	0 },
+	{"rx_metadata_len", IOV_RX_METADATALEN,	0,	0, IOVT_UINT32,	0 },
+	{"tx_metadata_len", IOV_TX_METADATALEN,	0,	0, IOVT_UINT32,	0 },
+	{"db1_for_mb", IOV_DB1_FOR_MB,	0,	0, IOVT_UINT32,	0 },
+	{"txp_thresh", IOV_TXP_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
+	{"buzzz_dump", IOV_BUZZZ_DUMP,		0,	0, IOVT_UINT32,	0 },
+	{"flow_prio_map", IOV_FLOW_PRIO_MAP,	0,	0, IOVT_UINT32,	0 },
+#ifdef DHD_PCIE_RUNTIMEPM
+	{"idletime",    IOV_IDLETIME,   0, 0,      IOVT_INT32,     0 },
+#endif /* DHD_PCIE_RUNTIMEPM */
+	{"rxbound",     IOV_RXBOUND,    0, 0,      IOVT_UINT32,    0 },
+	{"txbound",     IOV_TXBOUND,    0, 0,      IOVT_UINT32,    0 },
+	{"fw_hang_report", IOV_HANGREPORT,	0,	0, IOVT_BOOL,	0 },
+	{"h2d_mb_data",     IOV_H2D_MAILBOXDATA,    0, 0,      IOVT_UINT32,    0 },
+	{"inforings",   IOV_INFORINGS,    0, 0,      IOVT_UINT32,    0 },
+	{"h2d_phase",   IOV_H2D_PHASE,    0, 0,      IOVT_UINT32,    0 },
+	{"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE,    0, 0,
+	IOVT_UINT32,    0 },
+	{"h2d_max_txpost",   IOV_H2D_TXPOST_MAX_ITEM,    0, 0,      IOVT_UINT32,    0 },
+	{"trap_data",	IOV_TRAPDATA,	0,	0,	IOVT_BUFFER,	0 },
+	{"trap_data_raw",	IOV_TRAPDATA_RAW,	0, 0,	IOVT_BUFFER,	0 },
+	{"cto_prevention",	IOV_CTO_PREVENTION,	0,	0, IOVT_UINT32,	0 },
+	{"pcie_wd_reset",	IOV_PCIE_WD_RESET,	0,	0, IOVT_BOOL,	0 },
+	{"cto_threshold",	IOV_CTO_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
+#ifdef DHD_EFI
+	{"control_signal", IOV_CONTROL_SIGNAL,	0, 0, IOVT_UINT32, 0},
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+	{"deep_sleep", IOV_DEEP_SLEEP, 0, 0, IOVT_UINT32,    0},
+#endif /* PCIE_OOB || PCIE_INB_DW */
+#endif /* DHD_EFI */
+	{"inb_dw_enable",   IOV_INB_DW_ENABLE,    0, 0,  IOVT_UINT32,    0 },
+#ifdef DEVICE_TX_STUCK_DETECT
+	{"dev_tx_stuck_monitor", IOV_DEVICE_TX_STUCK_DETECT, 0, 0, IOVT_UINT32, 0 },
+#endif /* DEVICE_TX_STUCK_DETECT */
+	{"idma_enable",   IOV_IDMA_ENABLE,    0, 0,  IOVT_UINT32,    0 },
+	{"ifrm_enable",   IOV_IFRM_ENABLE,    0, 0,  IOVT_UINT32,    0 },
+	{"clear_ring",   IOV_CLEAR_RING,    0, 0,  IOVT_UINT32,    0 },
+#ifdef DHD_EFI
+	{"properties", IOV_WIFI_PROPERTIES,	0, 0, IOVT_BUFFER, 0},
+	{"otp_dump", IOV_OTP_DUMP,	0, 0, IOVT_BUFFER, 0},
+#endif
+	{NULL, 0, 0, 0, 0, 0 }
+};
+
+
+#define MAX_READ_TIMEOUT	5 * 1000 * 1000
+
+#ifndef DHD_RXBOUND
+#define DHD_RXBOUND		64
+#endif
+#ifndef DHD_TXBOUND
+#define DHD_TXBOUND		64
+#endif
+
+#define DHD_INFORING_BOUND	32
+
+uint dhd_rxbound = DHD_RXBOUND;
+uint dhd_txbound = DHD_TXBOUND;
+
+/**
+ * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
+ * link with the bus driver, in order to look for or await the device.
+ */
+int
+dhd_bus_register(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	return dhdpcie_bus_register();
+}
+
+void
+dhd_bus_unregister(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhdpcie_bus_unregister();
+	return;
+}
+
+
+/** returns a host virtual address */
+uint32 *
+dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
+{
+	return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
+{
+	REG_UNMAP(addr);
+	return;
+}
+
+/**
+ * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
+ * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
+ * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
+ *
+ * 'tcm' is the *host* virtual address at which tcm is mapped.
+ */
+dhd_bus_t* dhdpcie_bus_attach(osl_t *osh,
+	volatile char *regs, volatile char *tcm, void *pci_dev)
+{
+	dhd_bus_t *bus;
+
+	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+	do {
+		if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
+			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+			break;
+		}
+
+		bus->regs = regs;
+		bus->tcm = tcm;
+		bus->osh = osh;
+		/* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
+		bus->dev = (struct pci_dev *)pci_dev;
+
+
+		dll_init(&bus->flowring_active_list);
+#ifdef IDLE_TX_FLOW_MGMT
+		bus->active_list_last_process_ts = OSL_SYSUPTIME();
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef DEVICE_TX_STUCK_DETECT
+		/* Enable the Device stuck detection feature by default */
+		bus->dev_tx_stuck_monitor = TRUE;
+		bus->device_tx_stuck_check = OSL_SYSUPTIME();
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+		/* Attach pcie shared structure */
+		if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
+			DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* dhd_common_init(osh); */
+
+		if (dhdpcie_dongle_attach(bus)) {
+			DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* software resources */
+		if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
+			DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+
+			break;
+		}
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		bus->db1_for_mb = TRUE;
+		bus->dhd->hang_report = TRUE;
+		bus->use_mailbox = FALSE;
+		bus->use_d0_inform = FALSE;
+#ifdef IDLE_TX_FLOW_MGMT
+		bus->enable_idle_flowring_mgmt = FALSE;
+#endif /* IDLE_TX_FLOW_MGMT */
+		bus->irq_registered = FALSE;
+
+		DHD_TRACE(("%s: EXIT SUCCESS\n",
+			__FUNCTION__));
+#ifdef DHD_FW_COREDUMP
+		g_dhd_bus = bus;
+#endif
+		return bus;
+	} while (0);
+
+	DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
+
+	if (bus && bus->pcie_sh) {
+		MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+	}
+
+	if (bus) {
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+	}
+	return NULL;
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+	ASSERT(bus);
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+	return bus->dhd;
+}
+
+const void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+	return (const void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+	return &bus->txq;
+}
+
+/** Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return  bus->sih->chip;
+}
+
+/** Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return bus->sih->chiprev;
+}
+
+/** Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return bus->sih->chippkg;
+}
+
+/** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
+uint32
+dhdpcie_bus_intstatus(dhd_bus_t *bus)
+{
+	uint32 intstatus = 0;
+#ifndef DHD_READ_INTSTATUS_IN_DPC
+	uint32 intmask = 0;
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+
+	if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
+		bus->wait_for_d3_ack) {
+#ifdef DHD_EFI
+		DHD_INFO(("%s: trying to clear intstatus during suspend (%d)"
+			" or suspend in progress %d\n",
+			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
+#else
+		DHD_ERROR(("%s: trying to clear intstatus during suspend (%d)"
+			" or suspend in progress %d\n",
+			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
+#endif /* !DHD_EFI */
+		return intstatus;
+	}
+	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
+		(bus->sih->buscorerev == 2)) {
+		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
+		intstatus &= I_MB;
+	} else {
+		/* this is a PCIE core register..not a config register... */
+		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+
+#ifndef DHD_READ_INTSTATUS_IN_DPC
+		/* this is a PCIE core register..not a config register... */
+		intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+
+		intstatus &= intmask;
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+		/* Is device removed. intstatus & intmask read 0xffffffff */
+		if (intstatus == (uint32)-1) {
+			DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
+#ifdef CUSTOMER_HW4_DEBUG
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+			bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+			dhd_os_send_hang_message(bus->dhd);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+#endif /* CUSTOMER_HW4_DEBUG */
+			return intstatus;
+		}
+
+
+		/*
+		 * The fourth argument to si_corereg is the "mask" fields of the register to update
+		 * and the fifth field is the "value" to update. Now if we are interested in only
+		 * few fields of the "mask" bit map, we should not be writing back what we read
+		 * By doing so, we might clear/ack interrupts that are not handled yet.
+		 */
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
+			intstatus);
+
+		intstatus &= bus->def_intmask;
+	}
+
+	return intstatus;
+}
+
+/**
+ * Name:  dhdpcie_bus_isr
+ * Parameters:
+ * 1: IN int irq   -- interrupt vector
+ * 2: IN void *arg      -- handle to private data structure
+ * Return value:
+ * Status (TRUE or FALSE)
+ *
+ * Description:
+ * Interrupt Service routine checks for the status register,
+ * disable interrupt and queue DPC if mail box interrupts are raised.
+ */
+int32
+dhdpcie_bus_isr(dhd_bus_t *bus)
+{
+	uint32 intstatus = 0;
+
+	do {
+		DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+		/* verify argument */
+		if (!bus) {
+			DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__));
+			break;
+		}
+
+		if (bus->dhd->dongle_reset) {
+			break;
+		}
+
+		if (bus->dhd->busstate == DHD_BUS_DOWN) {
+			break;
+		}
+
+
+		if (PCIECTO_ENAB(bus->dhd)) {
+			/* read pci_intstatus */
+			intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
+
+			if (intstatus & PCI_CTO_INT_MASK) {
+				/* reset backplane and cto,
+				 *  then access through pcie is recovered.
+				 */
+				dhdpcie_cto_error_recovery(bus);
+				return TRUE;
+			}
+		}
+
+#ifndef DHD_READ_INTSTATUS_IN_DPC
+		intstatus = dhdpcie_bus_intstatus(bus);
+
+		/* Check if the interrupt is ours or not */
+		if (intstatus == 0) {
+			break;
+		}
+
+		/* save the intstatus */
+		/* read interrupt status register!! Status bits will be cleared in DPC !! */
+		bus->intstatus = intstatus;
+
+		/* return error for 0xFFFFFFFF */
+		if (intstatus == (uint32)-1) {
+			dhdpcie_disable_irq_nosync(bus);
+			bus->is_linkdown = TRUE;
+			return BCME_ERROR;
+		}
+
+		/*  Overall operation:
+		 *    - Mask further interrupts
+		 *    - Read/ack intstatus
+		 *    - Take action based on bits and state
+		 *    - Reenable interrupts (as per state)
+		 */
+
+		/* Count the interrupt call */
+		bus->intrcount++;
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+
+		bus->ipend = TRUE;
+
+		bus->isr_intr_disable_count++;
+		dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
+
+		bus->intdis = TRUE;
+
+#if defined(PCIE_ISR_THREAD)
+
+		DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
+		DHD_OS_WAKE_LOCK(bus->dhd);
+		while (dhd_bus_dpc(bus));
+		DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+		bus->dpc_sched = TRUE;
+		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
+#endif /* defined(SDIO_ISR_THREAD) */
+
+		DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
+		return TRUE;
+
+	} while (0);
+
+	DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
+	return FALSE;
+}
+
+int
+dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
+{
+	uint32 cur_state = 0;
+	uint32 pm_csr = 0;
+	osl_t *osh = bus->osh;
+
+	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
+	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
+
+	if (cur_state == state) {
+		DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
+		return BCME_OK;
+	}
+
+	if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
+		return BCME_ERROR;
+
+	/* Validate the state transition
+	* if already in a lower power state, return error
+	*/
+	if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
+			cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
+			cur_state > state) {
+		DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
+	pm_csr |= state;
+
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
+
+	/* need to wait for the specified mandatory pcie power transition delay time */
+	if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
+			cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
+			OSL_DELAY(DHDPCIE_PM_D3_DELAY);
+	else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
+			cur_state == PCIECFGREG_PM_CSR_STATE_D2)
+			OSL_DELAY(DHDPCIE_PM_D2_DELAY);
+
+	/* read back the power state and verify */
+	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
+	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
+	if (cur_state != state) {
+		DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
+				__FUNCTION__, cur_state));
+		return BCME_ERROR;
+	} else {
+		DHD_ERROR(("%s: power transition to %u success \n",
+				__FUNCTION__, cur_state));
+	}
+
+	return BCME_OK;
+
+}
+
+int
+dhdpcie_config_check(dhd_bus_t *bus)
+{
+	uint32 i, val;
+	int ret = BCME_ERROR;
+
+	for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
+		val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
+		if ((val & 0xFFFF) == VENDOR_BROADCOM) {
+			ret = BCME_OK;
+			break;
+		}
+		OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
+	}
+
+	return ret;
+}
+
+int
+dhdpcie_config_restore(dhd_bus_t *bus,  bool restore_pmcsr)
+{
+	uint32 i;
+	osl_t *osh = bus->osh;
+
+	if (BCME_OK != dhdpcie_config_check(bus)) {
+		return BCME_ERROR;
+	}
+
+	for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
+		OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
+	}
+	OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
+
+	if (restore_pmcsr)
+		OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
+				sizeof(uint32), bus->saved_config.pmcsr);
+
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
+			bus->saved_config.msi_addr0);
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
+			sizeof(uint32), bus->saved_config.msi_addr1);
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
+			sizeof(uint32), bus->saved_config.msi_data);
+
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
+			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
+			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
+			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
+			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
+
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
+			sizeof(uint32), bus->saved_config.l1pm0);
+	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
+			sizeof(uint32), bus->saved_config.l1pm1);
+
+	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN,
+			sizeof(uint32), bus->saved_config.bar0_win);
+	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN,
+			sizeof(uint32), bus->saved_config.bar1_win);
+
+	return BCME_OK;
+}
+
+int
+dhdpcie_config_save(dhd_bus_t *bus)
+{
+	uint32 i;
+	osl_t *osh = bus->osh;
+
+	if (BCME_OK != dhdpcie_config_check(bus)) {
+		return BCME_ERROR;
+	}
+
+	for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
+		bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
+	}
+
+	bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
+
+	bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
+			sizeof(uint32));
+	bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
+			sizeof(uint32));
+	bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
+			sizeof(uint32));
+	bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
+			sizeof(uint32));
+
+	bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
+			PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
+	bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
+			PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
+	bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
+			PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
+	bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
+			PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
+
+	bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
+			sizeof(uint32));
+	bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
+			sizeof(uint32));
+
+	bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
+			sizeof(uint32));
+	bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
+			sizeof(uint32));
+	return BCME_OK;
+}
+
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+dhd_pub_t *link_recovery = NULL;
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+static bool
+dhdpcie_dongle_attach(dhd_bus_t *bus)
+{
+
+	osl_t *osh = bus->osh;
+	volatile void *regsva = (volatile void*)bus->regs;
+	uint16 devid = bus->cl_devid;
+	uint32 val;
+	sbpcieregs_t *sbpcieregs;
+
+	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+	link_recovery = bus->dhd;
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+
+	bus->alp_only = TRUE;
+	bus->sih = NULL;
+
+	/* Set bar0 window to si_enum_base */
+	dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
+
+	/* Checking PCIe bus status with reading configuration space */
+	val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
+	if ((val & 0xFFFF) != VENDOR_BROADCOM) {
+		DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/*
+	 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
+	 * due to switch address space from PCI_BUS to SI_BUS.
+	 */
+	val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
+	if (val == 0xffffffff) {
+		DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
+		goto fail;
+	}
+
+#ifdef DHD_EFI
+	/* Save good copy of PCIe config space */
+	if (BCME_OK != dhdpcie_config_save(bus)) {
+		DHD_ERROR(("%s : failed to save PCI configuration space!\n", __FUNCTION__));
+		goto fail;
+	}
+#endif /* DHD_EFI */
+
+	/* si_attach() will provide an SI handle and scan the backplane */
+	if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
+	                           &bus->vars, &bus->varsz))) {
+		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Olympic EFI requirement - stop driver load if FW is already running
+	*  need to do this here before pcie_watchdog_reset, because
+	*  pcie_watchdog_reset will put the ARM back into halt state
+	*/
+	if (!dhdpcie_is_arm_halted(bus)) {
+		DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
+				__FUNCTION__));
+		goto fail;
+	}
+
+	/* Enable CLKREQ# */
+	dhdpcie_clkreq(bus->osh, 1, 1);
+
+#ifndef DONGLE_ENABLE_ISOLATION
+	/*
+	 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
+	 * This is required to avoid spurious interrupts to the Host and bring back
+	 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
+	 */
+	pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
+#endif /* !DONGLE_ENABLE_ISOLATION */
+
+#ifdef DHD_EFI
+	dhdpcie_dongle_pwr_toggle(bus);
+#endif
+
+	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+	sbpcieregs = (sbpcieregs_t*)(bus->regs);
+
+	/* WAR where the BAR1 window may not be sized properly */
+	W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
+	val = R_REG(osh, &sbpcieregs->configdata);
+	W_REG(osh, &sbpcieregs->configdata, val);
+
+	/* Get info on the ARM and SOCRAM cores... */
+	/* Should really be qualified by device id */
+	if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+	    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+	    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
+	    (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+		bus->armrev = si_corerev(bus->sih);
+	} else {
+		DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
+		/* Only set dongle RAMSIZE to default value when ramsize is not adjusted */
+		if (!bus->ramsize_adjusted) {
+			if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
+				DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
+				goto fail;
+			}
+			/* also populate base address */
+			bus->dongle_ram_base = CA7_4365_RAM_BASE;
+			/* Default reserve 1.75MB for CA7 */
+			bus->orig_ramsize = 0x1c0000;
+		}
+	} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+			goto fail;
+		}
+	} else {
+		/* cr4 has a different way to find the RAM size from TCM's */
+		if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+			DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+			goto fail;
+		}
+		/* also populate base address */
+		switch ((uint16)bus->sih->chip) {
+		case BCM4339_CHIP_ID:
+		case BCM4335_CHIP_ID:
+			bus->dongle_ram_base = CR4_4335_RAM_BASE;
+			break;
+		case BCM4358_CHIP_ID:
+		case BCM4354_CHIP_ID:
+		case BCM43567_CHIP_ID:
+		case BCM43569_CHIP_ID:
+		case BCM4350_CHIP_ID:
+		case BCM43570_CHIP_ID:
+			bus->dongle_ram_base = CR4_4350_RAM_BASE;
+			break;
+		case BCM4360_CHIP_ID:
+			bus->dongle_ram_base = CR4_4360_RAM_BASE;
+			break;
+
+		case BCM4364_CHIP_ID:
+			bus->dongle_ram_base = CR4_4364_RAM_BASE;
+			break;
+
+		CASE_BCM4345_CHIP:
+			bus->dongle_ram_base = (bus->sih->chiprev < 6)  /* changed at 4345C0 */
+				? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
+			break;
+		CASE_BCM43602_CHIP:
+			bus->dongle_ram_base = CR4_43602_RAM_BASE;
+			break;
+		case BCM4349_CHIP_GRPID:
+			/* RAM based changed from 4349c0(revid=9) onwards */
+			bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
+				CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
+			break;
+		case BCM4347_CHIP_GRPID:
+			bus->dongle_ram_base = CR4_4347_RAM_BASE;
+			break;
+		case BCM4362_CHIP_ID:
+			bus->dongle_ram_base = CR4_4362_RAM_BASE;
+			break;
+		default:
+			bus->dongle_ram_base = 0;
+			DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+			           __FUNCTION__, bus->dongle_ram_base));
+		}
+	}
+	bus->ramsize = bus->orig_ramsize;
+	if (dhd_dongle_memsize)
+		dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+	DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+	           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+	bus->srmemsize = si_socram_srmem_size(bus->sih);
+
+
+	bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = (bool)dhd_intr;
+	if ((bus->poll = (bool)dhd_poll))
+		bus->pollrate = 1;
+
+	bus->wait_for_d3_ack = 1;
+#ifdef PCIE_OOB
+	dhdpcie_oob_init(bus);
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+	bus->inb_enabled = TRUE;
+#endif /* PCIE_INB_DW */
+	bus->dongle_in_ds = FALSE;
+	bus->idma_enabled = TRUE;
+	bus->ifrm_enabled = TRUE;
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+	bus->ds_enabled = TRUE;
+#endif
+	DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
+	return 0;
+
+fail:
+	if (bus->sih != NULL) {
+		si_detach(bus->sih);
+		bus->sih = NULL;
+	}
+	DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
+	return -1;
+}
+
+int
+dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
+{
+	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
+	return 0;
+}
+int
+dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
+{
+	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
+	return 0;
+}
+
+void
+dhdpcie_bus_intr_enable(dhd_bus_t *bus)
+{
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+	if (bus && bus->sih && !bus->is_linkdown) {
+		if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+			(bus->sih->buscorerev == 4)) {
+			dhpcie_bus_unmask_interrupt(bus);
+		} else {
+			/* Skip after recieving D3 ACK */
+			if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
+				bus->wait_for_d3_ack) {
+				return;
+			}
+			si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+				bus->def_intmask, bus->def_intmask);
+		}
+	}
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+void
+dhdpcie_bus_intr_disable(dhd_bus_t *bus)
+{
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+	if (bus && bus->sih && !bus->is_linkdown) {
+		if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+			(bus->sih->buscorerev == 4)) {
+			dhpcie_bus_mask_interrupt(bus);
+		} else {
+			/* Skip after recieving D3 ACK */
+			if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
+				bus->wait_for_d3_ack) {
+				return;
+			}
+			si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+				bus->def_intmask, 0);
+		}
+	}
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+/*
+ *  dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
+ * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
+ * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
+ * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
+ * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
+ */
+static void
+dhdpcie_advertise_bus_cleanup(dhd_pub_t	 *dhdp)
+{
+	unsigned long flags;
+	int timeleft;
+
+	DHD_GENERAL_LOCK(dhdp, flags);
+	dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
+	DHD_GENERAL_UNLOCK(dhdp, flags);
+
+	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+	if ((timeleft == 0) || (timeleft == 1)) {
+		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+				__FUNCTION__, dhdp->dhd_bus_busy_state));
+		ASSERT(0);
+	}
+
+	return;
+}
+
+static void
+dhdpcie_advertise_bus_remove(dhd_pub_t	 *dhdp)
+{
+	unsigned long flags;
+	int timeleft;
+
+	DHD_GENERAL_LOCK(dhdp, flags);
+	dhdp->busstate = DHD_BUS_REMOVE;
+	DHD_GENERAL_UNLOCK(dhdp, flags);
+
+	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+	if ((timeleft == 0) || (timeleft == 1)) {
+		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+				__FUNCTION__, dhdp->dhd_bus_busy_state));
+		ASSERT(0);
+	}
+
+	return;
+}
+
+
+static void
+dhdpcie_bus_remove_prep(dhd_bus_t *bus)
+{
+	unsigned long flags;
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef PCIE_INB_DW
+	/* De-Initialize the lock to serialize Device Wake Inband activities */
+	if (bus->inb_lock) {
+		dhd_os_spin_lock_deinit(bus->dhd->osh, bus->inb_lock);
+		bus->inb_lock = NULL;
+	}
+#endif
+
+
+	dhd_os_sdlock(bus->dhd);
+
+	if (bus->sih && !bus->dhd->dongle_isolation) {
+		/* Has insmod fails after rmmod issue in Brix Android */
+		/* if the pcie link is down, watchdog reset should not be done, as it may hang */
+		if (!bus->is_linkdown)
+			pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
+		else
+			DHD_ERROR(("%s: skipping watchdog reset, due to pcie link down ! \n",
+					__FUNCTION__));
+
+		bus->dhd->is_pcie_watchdog_reset = TRUE;
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+/** Detach and free everything */
+void
+dhdpcie_bus_release(dhd_bus_t *bus)
+{
+	bool dongle_isolation = FALSE;
+	osl_t *osh = NULL;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+
+		osh = bus->osh;
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dhdpcie_advertise_bus_remove(bus->dhd);
+			dongle_isolation = bus->dhd->dongle_isolation;
+			bus->dhd->is_pcie_watchdog_reset = FALSE;
+			dhdpcie_bus_remove_prep(bus);
+
+			if (bus->intr) {
+				dhdpcie_bus_intr_disable(bus);
+				dhdpcie_free_irq(bus);
+			}
+			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+			dhd_detach(bus->dhd);
+			dhd_free(bus->dhd);
+			bus->dhd = NULL;
+		}
+
+		/* unmap the regs and tcm here!! */
+		if (bus->regs) {
+			dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
+			bus->regs = NULL;
+		}
+		if (bus->tcm) {
+			dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
+			bus->tcm = NULL;
+		}
+
+		dhdpcie_bus_release_malloc(bus, osh);
+		/* Detach pcie shared structure */
+		if (bus->pcie_sh) {
+			MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+			bus->pcie_sh = NULL;
+		}
+
+		if (bus->console.buf != NULL) {
+			MFREE(osh, bus->console.buf, bus->console.bufsize);
+		}
+
+
+		/* Finally free bus info */
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+
+	}
+
+	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+} /* dhdpcie_bus_release */
+
+
+void
+dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+		bus->dhd, bus->dhd->dongle_reset));
+
+	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
+		DHD_TRACE(("%s Exit\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->sih) {
+
+		if (!dongle_isolation &&
+		(bus->dhd && !bus->dhd->is_pcie_watchdog_reset))
+			pcie_watchdog_reset(bus->osh, bus->sih,
+				(sbpcieregs_t *) bus->regs);
+#ifdef DHD_EFI
+		dhdpcie_dongle_pwr_toggle(bus);
+#endif
+		if (bus->ltrsleep_on_unload) {
+			si_corereg(bus->sih, bus->sih->buscoreidx,
+				OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
+		}
+
+		if (bus->sih->buscorerev == 13)
+			 pcie_serdes_iddqdisable(bus->osh, bus->sih,
+			                         (sbpcieregs_t *) bus->regs);
+
+		/* Disable CLKREQ# */
+		dhdpcie_clkreq(bus->osh, 1, 0);
+
+		if (bus->sih != NULL) {
+			si_detach(bus->sih);
+			bus->sih = NULL;
+		}
+		if (bus->vars && bus->varsz)
+			MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+uint32
+dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
+{
+	uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
+	return data;
+}
+
+/** 32 bit config write */
+void
+dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
+{
+	OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
+}
+
+void
+dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
+{
+	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
+}
+
+void
+dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+	int32 min_size =  DONGLE_MIN_MEMSIZE;
+	/* Restrict the memsize to user specified limit */
+	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+		dhd_dongle_memsize, min_size));
+	if ((dhd_dongle_memsize > min_size) &&
+		(dhd_dongle_memsize < (int32)bus->orig_ramsize))
+		bus->ramsize = dhd_dongle_memsize;
+}
+
+void
+dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd && bus->dhd->dongle_reset)
+		return;
+
+	if (bus->vars && bus->varsz) {
+		MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+	return;
+
+}
+
+/** Stop bus module: clear pending frames, disable data flow */
+void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+	uint32 status;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!bus->dhd)
+		return;
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_DISABLE_RUNTIME_PM(bus->dhd);
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	dhdpcie_bus_intr_disable(bus);
+	status =  dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+	dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
+
+	if (!dhd_download_fw_on_driverload) {
+		dhd_dpc_kill(bus->dhd);
+	}
+
+	/* Clear rx control and wake any waiters */
+	dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
+	dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
+
+done:
+	return;
+}
+
+#ifdef DEVICE_TX_STUCK_DETECT
+void
+dhd_bus_send_msg_to_daemon(int reason)
+{
+	bcm_to_info_t to_info;
+
+	to_info.magic = BCM_TO_MAGIC;
+	to_info.reason = reason;
+
+	dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
+	return;
+}
+
+/**
+ * scan the flow rings in active list to check if stuck and notify application
+ * The conditions for warn/stuck detection are
+ * 1. Flow ring is active
+ * 2. There are packets to be consumed by the consumer (wr != rd)
+ * If 1 and 2 are true, then
+ * 3. Warn, if Tx completion is not received for a duration of DEVICE_TX_STUCK_WARN_DURATION
+ * 4. Trap FW, if Tx completion is not received for a duration of DEVICE_TX_STUCK_DURATION
+ */
+static void
+dhd_bus_device_tx_stuck_scan(dhd_bus_t *bus)
+{
+	uint32 tx_cmpl;
+	unsigned long list_lock_flags;
+	unsigned long ring_lock_flags;
+	dll_t *item, *prev;
+	flow_ring_node_t *flow_ring_node;
+	bool ring_empty;
+	bool active;
+
+	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags);
+
+	for (item = dll_tail_p(&bus->flowring_active_list);
+			!dll_end(&bus->flowring_active_list, item); item = prev) {
+
+		prev = dll_prev_p(item);
+
+		flow_ring_node = dhd_constlist_to_flowring(item);
+		DHD_FLOWRING_LOCK(flow_ring_node->lock, ring_lock_flags);
+		tx_cmpl = flow_ring_node->tx_cmpl;
+		active = flow_ring_node->active;
+		ring_empty = dhd_prot_is_cmpl_ring_empty(bus->dhd, flow_ring_node->prot_info);
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, ring_lock_flags);
+
+		if (ring_empty) {
+			/* reset conters... etc */
+			flow_ring_node->stuck_count = 0;
+			flow_ring_node->tx_cmpl_prev = tx_cmpl;
+			continue;
+		}
+		/**
+		 * DEVICE_TX_STUCK_WARN_DURATION, DEVICE_TX_STUCK_DURATION are integer
+		 * representation of time, to decide if a flow is in warn state or stuck.
+		 *
+		 * flow_ring_node->stuck_count is an integer counter representing how long
+		 * tx_cmpl is not received though there are pending packets in the ring
+		 * to be consumed by the dongle for that particular flow.
+		 *
+		 * This method of determining time elapsed is helpful in sleep/wake scenarios.
+		 * If host sleeps and wakes up, that sleep time is not considered into
+		 * stuck duration.
+		 */
+		if ((tx_cmpl == flow_ring_node->tx_cmpl_prev) && active) {
+
+			flow_ring_node->stuck_count++;
+
+			DHD_ERROR(("%s: flowid: %d tx_cmpl: %u tx_cmpl_prev: %u stuck_count: %d\n",
+				__func__, flow_ring_node->flowid, tx_cmpl,
+				flow_ring_node->tx_cmpl_prev, flow_ring_node->stuck_count));
+
+			switch (flow_ring_node->stuck_count) {
+				case DEVICE_TX_STUCK_WARN_DURATION:
+					/**
+					 * Notify Device Tx Stuck Notification App about the
+					 * device Tx stuck warning for this flowid.
+					 * App will collect the logs required.
+					 */
+					DHD_ERROR(("stuck warning for flowid: %d sent to app\n",
+						flow_ring_node->flowid));
+					dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK_WARNING);
+					break;
+				case DEVICE_TX_STUCK_DURATION:
+					/**
+					 * Notify Device Tx Stuck Notification App about the
+					 * device Tx stuck info for this flowid.
+					 * App will collect the logs required.
+					 */
+					DHD_ERROR(("stuck information for flowid: %d sent to app\n",
+						flow_ring_node->flowid));
+					dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK);
+					break;
+				default:
+					break;
+			}
+		} else {
+			flow_ring_node->tx_cmpl_prev = tx_cmpl;
+			flow_ring_node->stuck_count = 0;
+		}
+	}
+	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags);
+}
+/**
+ * schedules dhd_bus_device_tx_stuck_scan after DEVICE_TX_STUCK_CKECK_TIMEOUT,
+ * to determine if any flowid is stuck.
+ */
+static void
+dhd_bus_device_stuck_scan(dhd_bus_t *bus)
+{
+	uint32 time_stamp; /* in millisec */
+	uint32 diff;
+
+	/* Need not run the algorith if Dongle has trapped */
+	if (bus->dhd->dongle_trap_occured) {
+		return;
+	}
+	time_stamp = OSL_SYSUPTIME();
+	diff = time_stamp - bus->device_tx_stuck_check;
+	if (diff > DEVICE_TX_STUCK_CKECK_TIMEOUT) {
+		dhd_bus_device_tx_stuck_scan(bus);
+		bus->device_tx_stuck_check = OSL_SYSUPTIME();
+	}
+	return;
+}
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+/** Watchdog timer function */
+bool dhd_bus_watchdog(dhd_pub_t *dhd)
+{
+	unsigned long flags;
+	dhd_bus_t *bus;
+	bus = dhd->bus;
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
+			DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return FALSE;
+	}
+	DHD_BUS_BUSY_SET_IN_WD(dhd);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+	dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+
+
+	/* Poll for console output periodically */
+	if (dhd->busstate == DHD_BUS_DATA &&
+		dhd_console_ms != 0 && !bus->d3_suspend_pending) {
+		bus->console.count += dhd_watchdog_ms;
+		if (bus->console.count >= dhd_console_ms) {
+			bus->console.count -= dhd_console_ms;
+			/* Make sure backplane clock is on */
+			if (dhdpcie_bus_readconsole(bus) < 0)
+				dhd_console_ms = 0;	/* On error, stop trying */
+		}
+	}
+
+#ifdef DHD_READ_INTSTATUS_IN_DPC
+	if (bus->poll) {
+		bus->ipend = TRUE;
+		bus->dpc_sched = TRUE;
+		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
+	}
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+	/* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
+	if (dhd_doorbell_timeout != 0 && dhd->busstate == DHD_BUS_DATA &&
+		dhd->up && dhd_timeout_expired(&bus->doorbell_timer)) {
+		dhd_bus_set_device_wake(bus, FALSE);
+	}
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+#ifdef PCIE_INB_DW
+	if (INBAND_DW_ENAB(bus)) {
+		if (bus->ds_exit_timeout) {
+			bus->ds_exit_timeout --;
+			if (bus->ds_exit_timeout == 1) {
+				DHD_ERROR(("DS-EXIT TIMEOUT\n"));
+				bus->ds_exit_timeout = 0;
+				bus->inband_ds_exit_to_cnt++;
+			}
+		}
+		if (bus->host_sleep_exit_timeout) {
+			bus->host_sleep_exit_timeout --;
+			if (bus->host_sleep_exit_timeout == 1) {
+				DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n"));
+				bus->host_sleep_exit_timeout = 0;
+				bus->inband_host_sleep_exit_to_cnt++;
+			}
+		}
+	}
+#endif /* PCIE_INB_DW */
+
+#ifdef DEVICE_TX_STUCK_DETECT
+	if (dhd->bus->dev_tx_stuck_monitor == TRUE) {
+		dhd_bus_device_stuck_scan(dhd->bus);
+	}
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
+	dhd_os_busbusy_wake(dhd);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+	return TRUE;
+} /* dhd_bus_watchdog */
+
+
+uint16
+dhd_get_chipid(dhd_pub_t *dhd)
+{
+	dhd_bus_t *bus = dhd->bus;
+
+	if (bus && bus->sih)
+		return (uint16)si_chipid(bus->sih);
+	else
+		return 0;
+}
+
+/* Download firmware image and nvram image */
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+                          char *pfw_path, char *pnv_path,
+                          char *pclm_path, char *pconf_path)
+{
+	int ret;
+
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+	bus->dhd->clm_path = pclm_path;
+	bus->dhd->conf_path = pconf_path;
+
+
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+	dhd_set_blob_support(bus->dhd, bus->fw_path);
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+	DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
+		__FUNCTION__, bus->fw_path, bus->nv_path));
+
+	ret = dhdpcie_download_firmware(bus, osh);
+
+	return ret;
+}
+
+void
+dhd_set_path_params(struct dhd_bus *bus)
+{
+	/* External conf takes precedence if specified */
+	dhd_conf_preinit(bus->dhd);
+
+	if (bus->dhd->clm_path[0] == '\0') {
+		dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
+	}
+	dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
+	if (bus->dhd->conf_path[0] == '\0') {
+		dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
+	}
+#ifdef CONFIG_PATH_AUTO_SELECT
+	dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
+#endif
+
+	dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
+
+	dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
+	dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
+	dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
+
+	printf("Final fw_path=%s\n", bus->fw_path);
+	printf("Final nv_path=%s\n", bus->nv_path);
+	printf("Final clm_path=%s\n", bus->dhd->clm_path);
+	printf("Final conf_path=%s\n", bus->dhd->conf_path);
+
+}
+
+void
+dhd_set_bus_params(struct dhd_bus *bus)
+{
+	if (bus->dhd->conf->dhd_poll >= 0) {
+		bus->poll = bus->dhd->conf->dhd_poll;
+		if (!bus->pollrate)
+			bus->pollrate = 1;
+		printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
+	}
+}
+
+static int
+dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
+{
+	int ret = 0;
+#if defined(BCM_REQUEST_FW)
+	uint chipid = bus->sih->chip;
+	uint revid = bus->sih->chiprev;
+	char fw_path[64] = "/lib/firmware/brcm/bcm";	/* path to firmware image */
+	char nv_path[64];		/* path to nvram vars file */
+	bus->fw_path = fw_path;
+	bus->nv_path = nv_path;
+	switch (chipid) {
+	case BCM43570_CHIP_ID:
+		bcmstrncat(fw_path, "43570", 5);
+		switch (revid) {
+		case 0:
+			bcmstrncat(fw_path, "a0", 2);
+			break;
+		case 2:
+			bcmstrncat(fw_path, "a2", 2);
+			break;
+		default:
+			DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
+			revid));
+			break;
+		}
+		break;
+	default:
+		DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
+		chipid));
+		return 0;
+	}
+	/* load board specific nvram file */
+	snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
+	/* load firmware */
+	snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
+#endif /* BCM_REQUEST_FW */
+
+	DHD_OS_WAKE_LOCK(bus->dhd);
+
+	dhd_set_path_params(bus);
+	dhd_set_bus_params(bus);
+
+	ret = _dhdpcie_download_firmware(bus);
+
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+	return ret;
+}
+
+static int
+dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+	int bcmerror = BCME_ERROR;
+	int offset = 0;
+	int len = 0;
+	bool store_reset;
+	char *imgbuf = NULL;
+	uint8 *memblock = NULL, *memptr;
+	uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
+
+	int offset_end = bus->ramsize;
+
+#ifndef DHD_EFI
+	DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+#endif /* DHD_EFI */
+
+	/* Should succeed in opening image if it is actually given through registry
+	 * entry or in module param.
+	 */
+	imgbuf = dhd_os_open_image(pfw_path);
+	if (imgbuf == NULL) {
+		printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
+		goto err;
+	}
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if (dhd_msg_level & DHD_TRACE_VAL) {
+		memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+		if (memptr_tmp == NULL) {
+			DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+			goto err;
+		}
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+	}
+
+
+	/* check if CR4/CA7 */
+	store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+			si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
+
+	/* Download image with MEMBLOCK size */
+	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
+		if (len < 0) {
+			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+		/* if address is 0, store the reset instruction to be written in 0 */
+		if (store_reset) {
+			ASSERT(offset == 0);
+			bus->resetinstr = *(((uint32*)memptr));
+			/* Add start of RAM address to the address given by user */
+			offset += bus->dongle_ram_base;
+			offset_end += offset;
+			store_reset = FALSE;
+		}
+
+		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+				__FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		if (dhd_msg_level & DHD_TRACE_VAL) {
+			bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+				        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto err;
+			}
+			if (memcmp(memptr_tmp, memptr, len)) {
+				DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
+				goto err;
+			} else
+				DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
+		}
+		offset += MEMBLOCK;
+
+		if (offset >= offset_end) {
+			DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
+				__FUNCTION__, offset, offset_end));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+	}
+
+err:
+	if (memblock) {
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+		if (dhd_msg_level & DHD_TRACE_VAL) {
+			if (memptr_tmp)
+				MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
+		}
+	}
+
+	if (imgbuf) {
+		dhd_os_close_image(imgbuf);
+	}
+
+	return bcmerror;
+} /* dhdpcie_download_code_file */
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define MIN_NVRAMVARS_SIZE 128
+#endif /* CUSTOMER_HW4_DEBUG */
+
+static int
+dhdpcie_download_nvram(struct dhd_bus *bus)
+{
+	int bcmerror = BCME_ERROR;
+	uint len;
+	char * memblock = NULL;
+	char *bufp;
+	char *pnv_path;
+	bool nvram_file_exists;
+	bool nvram_uefi_exists = FALSE;
+	bool local_alloc = FALSE;
+	pnv_path = bus->nv_path;
+
+#ifdef BCMEMBEDIMAGE
+	nvram_file_exists = TRUE;
+#else
+	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+#endif
+
+	/* First try UEFI */
+	len = MAX_NVRAMBUF_SIZE;
+	dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
+
+	/* If UEFI empty, then read from file system */
+	if ((len <= 0) || (memblock == NULL)) {
+
+		if (nvram_file_exists) {
+			len = MAX_NVRAMBUF_SIZE;
+			dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
+			if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
+				goto err;
+			}
+		}
+		else {
+			/* For SROM OTP no external file or UEFI required */
+			bcmerror = BCME_OK;
+		}
+	} else {
+		nvram_uefi_exists = TRUE;
+	}
+
+	DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
+
+	if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
+		bufp = (char *) memblock;
+
+#ifdef CACHE_FW_IMAGES
+		if (bus->processed_nvram_params_len) {
+			len = bus->processed_nvram_params_len;
+		}
+
+		if (!bus->processed_nvram_params_len) {
+			bufp[len] = 0;
+			if (nvram_uefi_exists || nvram_file_exists) {
+				len = process_nvram_vars(bufp, len);
+				bus->processed_nvram_params_len = len;
+			}
+		} else
+#else
+		{
+			bufp[len] = 0;
+			if (nvram_uefi_exists || nvram_file_exists) {
+				len = process_nvram_vars(bufp, len);
+			}
+		}
+#endif /* CACHE_FW_IMAGES */
+
+		DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
+#ifdef CUSTOMER_HW4_DEBUG
+		if (len < MIN_NVRAMVARS_SIZE) {
+			DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
+				__FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+		if (len % 4) {
+			len += 4 - (len % 4);
+		}
+		bufp += len;
+		*bufp++ = 0;
+		if (len)
+			bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error downloading vars: %d\n",
+				__FUNCTION__, bcmerror));
+		}
+	}
+
+
+err:
+	if (memblock) {
+		if (local_alloc) {
+			MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+		} else {
+			dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
+		}
+	}
+
+	return bcmerror;
+}
+
+
+#ifdef BCMEMBEDIMAGE
+int
+dhdpcie_download_code_array(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	unsigned char *p_dlarray  = NULL;
+	unsigned int dlarray_size = 0;
+	unsigned int downloded_len, remaining_len, len;
+	char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
+	uint8 *memblock = NULL, *memptr;
+
+	downloded_len = 0;
+	remaining_len = 0;
+	len = 0;
+
+#ifdef DHD_EFI
+	p_dlarray = rtecdc_fw_arr;
+	dlarray_size = sizeof(rtecdc_fw_arr);
+#else
+	p_dlarray = dlarray;
+	dlarray_size = sizeof(dlarray);
+	p_dlimagename = dlimagename;
+	p_dlimagever  = dlimagever;
+	p_dlimagedate = dlimagedate;
+#endif /* DHD_EFI */
+
+#ifndef DHD_EFI
+	if ((p_dlarray == 0) ||	(dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
+		(p_dlimagename == 0) ||	(p_dlimagever  == 0) ||	(p_dlimagedate == 0))
+		goto err;
+#endif /* DHD_EFI */
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	while (downloded_len  < dlarray_size) {
+		remaining_len = dlarray_size - downloded_len;
+		if (remaining_len >= MEMBLOCK)
+			len = MEMBLOCK;
+		else
+			len = remaining_len;
+
+		memcpy(memptr, (p_dlarray + downloded_len), len);
+		/* check if CR4/CA7 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+			si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)memptr));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
+		downloded_len += len;
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+				__FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+		offset += MEMBLOCK;
+	}
+
+#ifdef DHD_DEBUG
+	/* Upload and compare the downloaded code */
+	{
+		unsigned char *ularray = NULL;
+		unsigned int uploded_len;
+		uploded_len = 0;
+		bcmerror = -1;
+		ularray = MALLOC(bus->dhd->osh, dlarray_size);
+		if (ularray == NULL)
+			goto upload_err;
+		/* Upload image to verify downloaded contents. */
+		offset = bus->dongle_ram_base;
+		memset(ularray, 0xaa, dlarray_size);
+		while (uploded_len  < dlarray_size) {
+			remaining_len = dlarray_size - uploded_len;
+			if (remaining_len >= MEMBLOCK)
+				len = MEMBLOCK;
+			else
+				len = remaining_len;
+			bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
+				(uint8 *)(ularray + uploded_len), len);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto upload_err;
+			}
+
+			uploded_len += len;
+			offset += MEMBLOCK;
+		}
+#ifdef DHD_EFI
+		if (memcmp(p_dlarray, ularray, dlarray_size)) {
+			DHD_ERROR(("%s: Downloaded image is corrupted ! \n", __FUNCTION__));
+			goto upload_err;
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded .\n", __FUNCTION__));
+#else
+		if (memcmp(p_dlarray, ularray, dlarray_size)) {
+			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+			goto upload_err;
+
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+#endif /* DHD_EFI */
+
+upload_err:
+		if (ularray)
+			MFREE(bus->dhd->osh, ularray, dlarray_size);
+	}
+#endif /* DHD_DEBUG */
+err:
+
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	return bcmerror;
+} /* dhdpcie_download_code_array */
+#endif /* BCMEMBEDIMAGE */
+
+
+static int
+dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
+{
+	int bcmerror = BCME_ERROR;
+	char *imgbuf = NULL;
+
+	if (buf == NULL || len == 0)
+		goto err;
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		imgbuf = dhd_os_open_image(bus->fw_path);
+		if (imgbuf == NULL) {
+			DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
+			goto err;
+		}
+
+		/* Read it */
+		if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
+			DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
+			goto err;
+		}
+
+		bcmerror = BCME_OK;
+	}
+
+err:
+	if (imgbuf)
+		dhd_os_close_image(imgbuf);
+
+	return bcmerror;
+}
+
+
+/* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
+ * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
+ * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
+ */
+static void
+dhdpcie_ramsize_adj(struct dhd_bus *bus)
+{
+	int i, search_len = 0;
+	uint8 *memptr = NULL;
+	uint8 *ramsizeptr = NULL;
+	uint ramsizelen;
+	uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
+	hnd_ramsize_ptr_t ramsize_info;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Adjust dongle RAMSIZE already called. */
+	if (bus->ramsize_adjusted) {
+		return;
+	}
+
+	/* success or failure,  we don't want to be here
+	 * more than once.
+	 */
+	bus->ramsize_adjusted = TRUE;
+
+	/* Not handle if user restrict dongle ram size enabled */
+	if (dhd_dongle_memsize) {
+		DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
+			dhd_dongle_memsize));
+		return;
+	}
+
+#ifndef BCMEMBEDIMAGE
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
+		return;
+	}
+#endif /* !BCMEMBEDIMAGE */
+
+	/* Get maximum RAMSIZE info search length */
+	for (i = 0; ; i++) {
+		if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
+			break;
+
+		if (search_len < (int)ramsize_ptr_ptr[i])
+			search_len = (int)ramsize_ptr_ptr[i];
+	}
+
+	if (!search_len)
+		return;
+
+	search_len += sizeof(hnd_ramsize_ptr_t);
+
+	memptr = MALLOC(bus->dhd->osh, search_len);
+	if (memptr == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len));
+		return;
+	}
+
+	/* External image takes precedence if specified */
+	if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
+#if defined(BCMEMBEDIMAGE) && !defined(DHD_EFI)
+		unsigned char *p_dlarray  = NULL;
+		unsigned int dlarray_size = 0;
+		char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
+
+		p_dlarray = dlarray;
+		dlarray_size = sizeof(dlarray);
+		p_dlimagename = dlimagename;
+		p_dlimagever  = dlimagever;
+		p_dlimagedate = dlimagedate;
+
+		if ((p_dlarray == 0) ||	(dlarray_size == 0) || (p_dlimagename == 0) ||
+			(p_dlimagever  == 0) ||	(p_dlimagedate == 0))
+			goto err;
+
+		ramsizeptr = p_dlarray;
+		ramsizelen = dlarray_size;
+#else
+		goto err;
+#endif /* BCMEMBEDIMAGE && !DHD_EFI */
+	}
+	else {
+		ramsizeptr = memptr;
+		ramsizelen = search_len;
+	}
+
+	if (ramsizeptr) {
+		/* Check Magic */
+		for (i = 0; ; i++) {
+			if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
+				break;
+
+			if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen)
+				continue;
+
+			memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
+				sizeof(hnd_ramsize_ptr_t));
+
+			if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
+				bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
+				bus->ramsize = LTOH32(ramsize_info.ram_size);
+				DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
+					bus->ramsize));
+				break;
+			}
+		}
+	}
+
+err:
+	if (memptr)
+		MFREE(bus->dhd->osh, memptr, search_len);
+
+	return;
+} /* _dhdpcie_download_firmware */
+
+static int
+_dhdpcie_download_firmware(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+
+	bool embed = FALSE;	/* download embedded firmware */
+	bool dlok = FALSE;	/* download firmware succeeded */
+
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+		embed = TRUE;
+#else
+		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
+		return 0;
+#endif
+	}
+	/* Adjust ram size */
+	dhdpcie_ramsize_adj(bus);
+
+	/* Keep arm in reset */
+	if (dhdpcie_bus_download_state(bus, TRUE)) {
+		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		if (dhdpcie_download_code_file(bus, bus->fw_path)) {
+			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+			embed = TRUE;
+#else
+			goto err;
+#endif
+		} else {
+			embed = FALSE;
+			dlok = TRUE;
+		}
+	}
+
+#ifdef BCMEMBEDIMAGE
+	if (embed) {
+		if (dhdpcie_download_code_array(bus)) {
+			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+			goto err;
+		} else {
+			dlok = TRUE;
+		}
+	}
+#else
+	BCM_REFERENCE(embed);
+#endif
+	if (!dlok) {
+		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* EXAMPLE: nvram_array */
+	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+
+	/* External nvram takes precedence if specified */
+	if (dhdpcie_download_nvram(bus)) {
+		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* Take arm out of reset */
+	if (dhdpcie_bus_download_state(bus, FALSE)) {
+		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	bcmerror = 0;
+
+err:
+	return bcmerror;
+} /* _dhdpcie_download_firmware */
+
+#define CONSOLE_LINE_MAX	192
+
+static int
+dhdpcie_bus_readconsole(dhd_bus_t *bus)
+{
+	dhd_console_t *c = &bus->console;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return -1;
+
+	/* Read console log struct */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
+
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = ltoh32(c->log.buf_size);
+		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+			return BCME_NOMEM;
+	}
+	idx = ltoh32(c->log.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return BCME_ERROR;
+
+	/* Skip reading the console buffer if the index pointer has not moved */
+	if (idx == c->last)
+		return BCME_OK;
+
+	/* Read the console buffer */
+	addr = ltoh32(c->log.buf);
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.  Instead, back up
+				 * the buffer pointer and output this line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			DHD_FWLOG(("CONSOLE: %s\n", line));
+		}
+	}
+break2:
+
+	return BCME_OK;
+} /* dhdpcie_bus_readconsole */
+
+void
+dhd_bus_dump_console_buffer(dhd_bus_t *bus)
+{
+	uint32 n, i;
+	uint32 addr;
+	char *console_buffer = NULL;
+	uint32 console_ptr, console_size, console_index;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	int rv;
+
+	DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
+
+	if (bus->is_linkdown) {
+		DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
+		return;
+	}
+
+	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+		(uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
+		goto exit;
+	}
+
+	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+		(uint8 *)&console_size, sizeof(console_size))) < 0) {
+		goto exit;
+	}
+
+	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+		(uint8 *)&console_index, sizeof(console_index))) < 0) {
+		goto exit;
+	}
+
+	console_ptr = ltoh32(console_ptr);
+	console_size = ltoh32(console_size);
+	console_index = ltoh32(console_index);
+
+	if (console_size > CONSOLE_BUFFER_MAX ||
+		!(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
+		goto exit;
+	}
+
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
+		(uint8 *)console_buffer, console_size)) < 0) {
+		goto exit;
+	}
+
+	for (i = 0, n = 0; i < console_size; i += n + 1) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			ch = console_buffer[(console_index + i + n) % console_size];
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			/* Don't use DHD_ERROR macro since we print
+			 * a lot of information quickly. The macro
+			 * will truncate a lot of the printfs
+			 */
+
+			DHD_FWLOG(("CONSOLE: %s\n", line));
+		}
+	}
+
+exit:
+	if (console_buffer)
+		MFREE(bus->dhd->osh, console_buffer, console_size);
+	return;
+}
+
+static int
+dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+	int bcmerror = 0;
+	uint msize = 512;
+	char *mbuffer = NULL;
+	uint maxstrlen = 256;
+	char *str = NULL;
+	pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
+	struct bcmstrbuf strbuf;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (DHD_NOCHECKDIED_ON()) {
+		return 0;
+	}
+
+	if (data == NULL) {
+		/*
+		 * Called after a rx ctrl timeout. "data" is NULL.
+		 * allocate memory to trace the trap or assert.
+		 */
+		size = msize;
+		mbuffer = data = MALLOC(bus->dhd->osh, msize);
+
+		if (mbuffer == NULL) {
+			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+	}
+
+	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+		bcmerror = BCME_NOMEM;
+		goto done;
+	}
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
+		goto done;
+	}
+
+	bcm_binit(&strbuf, data, size);
+
+	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
+	            local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
+
+	if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+	}
+
+	if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "No trap%s in dongle",
+		          (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
+		          ?"/assrt" :"");
+	} else {
+		if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
+			/* Download assert */
+			bcm_bprintf(&strbuf, "Dongle assert");
+			if (bus->pcie_sh->assert_exp_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+					bus->pcie_sh->assert_exp_addr,
+					(uint8 *)str, maxstrlen)) < 0) {
+					goto done;
+				}
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " expr \"%s\"", str);
+			}
+
+			if (bus->pcie_sh->assert_file_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+					bus->pcie_sh->assert_file_addr,
+					(uint8 *)str, maxstrlen)) < 0) {
+					goto done;
+				}
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " file \"%s\"", str);
+			}
+
+			bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
+		}
+
+		if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
+			trap_t *tr = &bus->dhd->last_trap_info;
+			bus->dhd->dongle_trap_occured = TRUE;
+			if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+				bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
+				goto done;
+			}
+			dhd_bus_dump_trap_info(bus, &strbuf);
+
+			dhd_bus_dump_console_buffer(bus);
+		}
+	}
+
+	if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
+		printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
+#ifdef REPORT_FATAL_TIMEOUTS
+		/**
+		 * stop the timers as FW trapped
+		 */
+		if (dhd_stop_scan_timer(bus->dhd)) {
+			DHD_ERROR(("dhd_stop_scan_timer failed\n"));
+			ASSERT(0);
+		}
+		if (dhd_stop_bus_timer(bus->dhd)) {
+			DHD_ERROR(("dhd_stop_bus_timer failed\n"));
+			ASSERT(0);
+		}
+		if (dhd_stop_cmd_timer(bus->dhd)) {
+			DHD_ERROR(("dhd_stop_cmd_timer failed\n"));
+			ASSERT(0);
+		}
+		if (dhd_stop_join_timer(bus->dhd)) {
+			DHD_ERROR(("dhd_stop_join_timer failed\n"));
+			ASSERT(0);
+		}
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+		dhd_prot_debug_info_print(bus->dhd);
+
+#if defined(DHD_FW_COREDUMP)
+		/* save core dump or write to a file */
+		if (bus->dhd->memdump_enabled) {
+			bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
+			dhdpcie_mem_dump(bus);
+		}
+#endif /* DHD_FW_COREDUMP */
+
+		/* wake up IOCTL wait event */
+		dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
+
+		dhd_schedule_reset(bus->dhd);
+
+
+	}
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
+	dhd_os_busbusy_wake(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+done:
+	if (mbuffer)
+		MFREE(bus->dhd->osh, mbuffer, msize);
+	if (str)
+		MFREE(bus->dhd->osh, str, maxstrlen);
+
+	return bcmerror;
+} /* dhdpcie_checkdied */
+
+
+/* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
+void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
+{
+	int ret = 0;
+	int size; /* Full mem size */
+	int start; /* Start address */
+	int read_size = 0; /* Read size of each iteration */
+	uint8 *databuf = buf;
+
+	if (bus == NULL) {
+		return;
+	}
+
+	start = bus->dongle_ram_base;
+	read_size = 4;
+	/* check for dead bus */
+	{
+		uint test_word = 0;
+		ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
+		/* if read error or bus timeout */
+		if (ret || (test_word == 0xFFFFFFFF)) {
+			return;
+		}
+	}
+
+	/* Get full mem size */
+	size = bus->ramsize;
+	/* Read mem content */
+	while (size)
+	{
+		read_size = MIN(MEMBLOCK, size);
+		if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
+			return;
+		}
+
+		/* Decrement size and increment start address */
+		size -= read_size;
+		start += read_size;
+		databuf += read_size;
+	}
+	bus->dhd->soc_ram = buf;
+	bus->dhd->soc_ram_length = bus->ramsize;
+	return;
+}
+
+
+#if defined(DHD_FW_COREDUMP)
+static int
+dhdpcie_mem_dump(dhd_bus_t *bus)
+{
+	int ret = 0;
+	int size; /* Full mem size */
+	int start = bus->dongle_ram_base; /* Start address */
+	int read_size = 0; /* Read size of each iteration */
+	uint8 *buf = NULL, *databuf = NULL;
+
+#ifdef EXYNOS_PCIE_DEBUG
+	exynos_pcie_register_dump(1);
+#endif /* EXYNOS_PCIE_DEBUG */
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	if (bus->is_linkdown) {
+		DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+	/* Get full mem size */
+	size = bus->ramsize;
+	buf = dhd_get_fwdump_buf(bus->dhd, size);
+	if (!buf) {
+		DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
+		return BCME_ERROR;
+	}
+
+	/* Read mem content */
+	DHD_TRACE_HW4(("Dump dongle memory\n"));
+	databuf = buf;
+	while (size)
+	{
+		read_size = MIN(MEMBLOCK, size);
+		if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
+		{
+			DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
+			bus->dhd->memdump_success = FALSE;
+			return BCME_ERROR;
+		}
+		DHD_TRACE(("."));
+
+		/* Decrement size and increment start address */
+		size -= read_size;
+		start += read_size;
+		databuf += read_size;
+	}
+	bus->dhd->memdump_success = TRUE;
+
+	dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
+	/* buf, actually soc_ram free handled in dhd_{free,clear} */
+
+	return ret;
+}
+
+int
+dhd_bus_mem_dump(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	if (dhdp->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s bus is down\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+#ifdef DHD_PCIE_RUNTIMEPM
+	if (dhdp->memdump_type == DUMP_TYPE_BY_SYSDUMP) {
+		DHD_ERROR(("%s : bus wakeup by SYSDUMP\n", __FUNCTION__));
+		dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
+	}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
+			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
+		return BCME_ERROR;
+	}
+
+	return dhdpcie_mem_dump(bus);
+}
+
+int
+dhd_dongle_mem_dump(void)
+{
+	if (!g_dhd_bus) {
+		DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	dhd_bus_dump_console_buffer(g_dhd_bus);
+	dhd_prot_debug_info_print(g_dhd_bus->dhd);
+
+	g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
+	g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
+
+#ifdef DHD_PCIE_RUNTIMEPM
+	dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+	DHD_OS_WAKE_LOCK(g_dhd_bus->dhd);
+	dhd_bus_mem_dump(g_dhd_bus->dhd);
+	DHD_OS_WAKE_UNLOCK(g_dhd_bus->dhd);
+	return 0;
+}
+EXPORT_SYMBOL(dhd_dongle_mem_dump);
+#endif	/* DHD_FW_COREDUMP */
+
+int
+dhd_socram_dump(dhd_bus_t *bus)
+{
+#ifdef DHD_PCIE_RUNTIMEPM
+	dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#if defined(DHD_FW_COREDUMP)
+	DHD_OS_WAKE_LOCK(bus->dhd);
+	dhd_bus_mem_dump(bus->dhd);
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+	return 0;
+#else
+	return -1;
+#endif
+}
+
+/**
+ * Transfers bytes from host to dongle using pio mode.
+ * Parameter 'address' is a backplane address.
+ */
+static int
+dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
+{
+	uint dsize;
+	int detect_endian_flag = 0x01;
+	bool little_endian;
+
+	if (write && bus->is_linkdown) {
+		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+
+	/* Detect endianness. */
+	little_endian = *(char *)&detect_endian_flag;
+
+	/* In remap mode, adjust address beyond socram and redirect
+	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+	 * is not backplane accessible
+	 */
+
+	/* Determine initial transfer parameters */
+#ifdef DHD_SUPPORT_64BIT
+	dsize = sizeof(uint64);
+#else /* !DHD_SUPPORT_64BIT */
+	dsize = sizeof(uint32);
+#endif /* DHD_SUPPORT_64BIT */
+
+	/* Do the transfer(s) */
+	DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n",
+	          __FUNCTION__, (write ? "write" : "read"), size, address));
+	if (write) {
+		while (size) {
+#ifdef DHD_SUPPORT_64BIT
+			if (size >= sizeof(uint64) && little_endian &&	!(address % 8)) {
+				dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
+			}
+#else /* !DHD_SUPPORT_64BIT */
+			if (size >= sizeof(uint32) && little_endian &&	!(address % 4)) {
+				dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
+			}
+#endif /* DHD_SUPPORT_64BIT */
+			else {
+				dsize = sizeof(uint8);
+				dhdpcie_bus_wtcm8(bus, address, *data);
+			}
+
+			/* Adjust for next transfer (if any) */
+			if ((size -= dsize)) {
+				data += dsize;
+				address += dsize;
+			}
+		}
+	} else {
+		while (size) {
+#ifdef DHD_SUPPORT_64BIT
+			if (size >= sizeof(uint64) && little_endian &&	!(address % 8))
+			{
+				*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
+			}
+#else /* !DHD_SUPPORT_64BIT */
+			if (size >= sizeof(uint32) && little_endian &&	!(address % 4))
+			{
+				*(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
+			}
+#endif /* DHD_SUPPORT_64BIT */
+			else {
+				dsize = sizeof(uint8);
+				*data = dhdpcie_bus_rtcm8(bus, address);
+			}
+
+			/* Adjust for next transfer (if any) */
+			if ((size -= dsize) > 0) {
+				data += dsize;
+				address += dsize;
+			}
+		}
+	}
+	return BCME_OK;
+} /* dhdpcie_bus_membytes */
+
+/**
+ * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
+ * to the (non flow controlled) flow ring.
+ */
+int BCMFASTPATH
+dhd_bus_schedule_queue(struct dhd_bus  *bus, uint16 flow_id, bool txs)
+{
+	flow_ring_node_t *flow_ring_node;
+	int ret = BCME_OK;
+#ifdef DHD_LOSSLESS_ROAMING
+	dhd_pub_t *dhdp = bus->dhd;
+#endif
+	DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
+
+	/* ASSERT on flow_id */
+	if (flow_id >= bus->max_submission_rings) {
+		DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
+			flow_id, bus->max_submission_rings));
+		return 0;
+	}
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
+
+#ifdef DHD_LOSSLESS_ROAMING
+	if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
+		DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
+			__FUNCTION__, flow_ring_node->flow_info.tid));
+		return BCME_OK;
+	}
+#endif /* DHD_LOSSLESS_ROAMING */
+
+	{
+		unsigned long flags;
+		void *txp = NULL;
+		flow_queue_t *queue;
+#ifdef DHD_LOSSLESS_ROAMING
+		struct ether_header *eh;
+		uint8 *pktdata;
+#endif /* DHD_LOSSLESS_ROAMING */
+
+		queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
+			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+			return BCME_NOTREADY;
+		}
+
+		while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+			PKTORPHAN(txp, bus->dhd->conf->tsq);
+
+			/*
+			 * Modifying the packet length caused P2P cert failures.
+			 * Specifically on test cases where a packet of size 52 bytes
+			 * was injected, the sniffer capture showed 62 bytes because of
+			 * which the cert tests failed. So making the below change
+			 * only Router specific.
+			 */
+
+#ifdef DHDTCPACK_SUPPRESS
+			if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
+				ret = dhd_tcpack_check_xmit(bus->dhd, txp);
+				if (ret != BCME_OK) {
+					DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
+						__FUNCTION__));
+				}
+			}
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_LOSSLESS_ROAMING
+			pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
+			eh = (struct ether_header *) pktdata;
+			if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+				uint8 prio = (uint8)PKTPRIO(txp);
+
+				/* Restore to original priority for 802.1X packet */
+				if (prio == PRIO_8021D_NC) {
+					PKTSETPRIO(txp, dhdp->prio_8021x);
+				}
+			}
+#endif /* DHD_LOSSLESS_ROAMING */
+
+			/* Attempt to transfer packet over flow ring */
+			ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
+			if (ret != BCME_OK) { /* may not have resources in flow ring */
+				DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
+				dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
+				/* reinsert at head */
+				dhd_flow_queue_reinsert(bus->dhd, queue, txp);
+				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+				/* If we are able to requeue back, return success */
+				return BCME_OK;
+			}
+		}
+
+		dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
+
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+	}
+
+	return ret;
+} /* dhd_bus_schedule_queue */
+
+/** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
+int BCMFASTPATH
+dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
+{
+	uint16 flowid;
+#ifdef IDLE_TX_FLOW_MGMT
+	uint8	node_status;
+#endif /* IDLE_TX_FLOW_MGMT */
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+	int ret = BCME_OK;
+	void *txp_pend = NULL;
+
+	if (!bus->dhd->flowid_allocator) {
+		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
+		goto toss;
+	}
+
+	flowid = DHD_PKT_GET_FLOWID(txp);
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+
+	DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
+		__FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
+
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+	if ((flowid >= bus->dhd->num_flow_rings) ||
+#ifdef IDLE_TX_FLOW_MGMT
+		(!flow_ring_node->active))
+#else
+		(!flow_ring_node->active) ||
+		(flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
+		(flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
+#endif /* IDLE_TX_FLOW_MGMT */
+	{
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+		DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
+			__FUNCTION__, flowid, flow_ring_node->status,
+			flow_ring_node->active));
+		ret = BCME_ERROR;
+			goto toss;
+	}
+
+#ifdef IDLE_TX_FLOW_MGMT
+	node_status = flow_ring_node->status;
+
+	/* handle diffrent status states here!! */
+	switch (node_status)
+	{
+		case FLOW_RING_STATUS_OPEN:
+
+			if (bus->enable_idle_flowring_mgmt) {
+				/* Move the node to the head of active list */
+				dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
+			}
+			break;
+
+		case FLOW_RING_STATUS_SUSPENDED:
+			DHD_INFO(("Need to Initiate TX Flow resume\n"));
+			/* Issue resume_ring request */
+			dhd_bus_flow_ring_resume_request(bus,
+					flow_ring_node);
+			break;
+
+		case FLOW_RING_STATUS_CREATE_PENDING:
+		case FLOW_RING_STATUS_RESUME_PENDING:
+			/* Dont do anything here!! */
+			DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
+				node_status));
+			break;
+
+		case FLOW_RING_STATUS_DELETE_PENDING:
+		default:
+			DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
+				flowid, node_status));
+			/* error here!! */
+			ret = BCME_ERROR;
+			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+			goto toss;
+	}
+	/* Now queue the packet */
+#endif /* IDLE_TX_FLOW_MGMT */
+
+	queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+	if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
+		txp_pend = txp;
+
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	if (flow_ring_node->status) {
+		DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
+		    __FUNCTION__, flowid, flow_ring_node->status,
+		    flow_ring_node->active));
+		if (txp_pend) {
+			txp = txp_pend;
+			goto toss;
+		}
+		return BCME_OK;
+	}
+	ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
+
+	/* If we have anything pending, try to push into q */
+	if (txp_pend) {
+		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+		if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
+			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+			txp = txp_pend;
+			goto toss;
+		}
+
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+	}
+
+	return ret;
+
+toss:
+	DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
+/* for EFI, pass the 'send' flag as false, to avoid enqueuing the failed tx pkt
+* into the Tx done queue
+*/
+#ifdef DHD_EFI
+	PKTCFREE(bus->dhd->osh, txp, FALSE);
+#else
+	PKTCFREE(bus->dhd->osh, txp, TRUE);
+#endif
+	return ret;
+} /* dhd_bus_txdata */
+
+
+void
+dhd_bus_stop_queue(struct dhd_bus *bus)
+{
+	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+	bus->bus_flowctrl = TRUE;
+}
+
+void
+dhd_bus_start_queue(struct dhd_bus *bus)
+{
+	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+	bus->bus_flowctrl = TRUE;
+}
+
+/* Device console input function */
+int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
+{
+	dhd_bus_t *bus = dhd->bus;
+	uint32 addr, val;
+	int rv;
+	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
+	if (bus->console_addr == 0)
+		return BCME_UNSUPPORTED;
+
+	/* Don't allow input if dongle is in reset */
+	if (bus->dhd->dongle_reset) {
+		return BCME_NOTREADY;
+	}
+
+	/* Zero cbuf_index */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
+	val = htol32(0);
+	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Write message into cbuf */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
+	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+		goto done;
+
+	/* Write length into vcons_in */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
+	val = htol32(msglen);
+	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* generate an interrupt to dongle to indicate that it needs to process cons command */
+	dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
+done:
+	return rv;
+} /* dhd_bus_console_in */
+
+/**
+ * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
+ * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
+ */
+void BCMFASTPATH
+dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
+{
+	dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
+}
+
+/** 'offset' is a backplane address */
+void
+dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
+{
+	W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
+}
+
+uint8
+dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
+{
+	volatile uint8 data;
+	data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+	return data;
+}
+
+void
+dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
+{
+	W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
+}
+void
+dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
+{
+	W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
+}
+#ifdef DHD_SUPPORT_64BIT
+void
+dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
+{
+	W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
+}
+#endif /* DHD_SUPPORT_64BIT */
+
+uint16
+dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
+{
+	volatile uint16 data;
+	data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+	return data;
+}
+
+uint32
+dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
+{
+	volatile uint32 data;
+	data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+	return data;
+}
+
+#ifdef DHD_SUPPORT_64BIT
+uint64
+dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
+{
+	volatile uint64 data;
+	data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+	return data;
+}
+#endif /* DHD_SUPPORT_64BIT */
+
+/** A snippet of dongle memory is shared between host and dongle */
+void
+dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
+{
+	uint64 long_data;
+	uintptr tcm_offset;
+
+	DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
+
+	if (bus->is_linkdown) {
+		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+		return;
+	}
+
+	switch (type) {
+		case D2H_DMA_SCRATCH_BUF:
+		{
+			pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer);
+			dhdpcie_bus_membytes(bus, TRUE,
+				(ulong)tcm_offset, (uint8*) &long_data, len);
+			if (dhd_msg_level & DHD_INFO_VAL) {
+				prhex(__FUNCTION__, data, len);
+			}
+			break;
+		}
+
+		case D2H_DMA_SCRATCH_BUF_LEN :
+		{
+			pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
+			tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer_len);
+			dhdpcie_bus_wtcm32(bus,
+				(ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data));
+			if (dhd_msg_level & DHD_INFO_VAL) {
+				prhex(__FUNCTION__, data, len);
+			}
+			break;
+		}
+
+		case H2D_DMA_INDX_WR_BUF:
+		{
+			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (uintptr)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE,
+				(ulong)tcm_offset, (uint8*) &long_data, len);
+			if (dhd_msg_level & DHD_INFO_VAL) {
+				prhex(__FUNCTION__, data, len);
+			}
+			break;
+		}
+
+		case H2D_DMA_INDX_RD_BUF:
+		{
+			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (uintptr)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE,
+				(ulong)tcm_offset, (uint8*) &long_data, len);
+			if (dhd_msg_level & DHD_INFO_VAL) {
+				prhex(__FUNCTION__, data, len);
+			}
+			break;
+		}
+
+		case D2H_DMA_INDX_WR_BUF:
+		{
+			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (uintptr)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE,
+				(ulong)tcm_offset, (uint8*) &long_data, len);
+			if (dhd_msg_level & DHD_INFO_VAL) {
+				prhex(__FUNCTION__, data, len);
+			}
+			break;
+		}
+
+		case D2H_DMA_INDX_RD_BUF:
+		{
+			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (uintptr)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE,
+				(ulong)tcm_offset, (uint8*) &long_data, len);
+			if (dhd_msg_level & DHD_INFO_VAL) {
+				prhex(__FUNCTION__, data, len);
+			}
+			break;
+		}
+
+		case H2D_IFRM_INDX_WR_BUF:
+		{
+			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (uintptr)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, ifrm_w_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE,
+				(ulong)tcm_offset, (uint8*) &long_data, len);
+			if (dhd_msg_level & DHD_INFO_VAL) {
+				prhex(__FUNCTION__, data, len);
+			}
+			break;
+		}
+
+		case RING_ITEM_LEN :
+			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+			tcm_offset += OFFSETOF(ring_mem_t, len_items);
+			dhdpcie_bus_wtcm16(bus,
+				(ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case RING_MAX_ITEMS :
+			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+			tcm_offset += OFFSETOF(ring_mem_t, max_item);
+			dhdpcie_bus_wtcm16(bus,
+				(ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case RING_BUF_ADDR :
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+			tcm_offset += OFFSETOF(ring_mem_t, base_addr);
+			dhdpcie_bus_membytes(bus, TRUE,
+				(ulong)tcm_offset, (uint8 *) &long_data, len);
+			if (dhd_msg_level & DHD_INFO_VAL) {
+				prhex(__FUNCTION__, data, len);
+			}
+			break;
+
+		case RING_WR_UPD :
+			tcm_offset = bus->ring_sh[ringid].ring_state_w;
+			dhdpcie_bus_wtcm16(bus,
+				(ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case RING_RD_UPD :
+			tcm_offset = bus->ring_sh[ringid].ring_state_r;
+			dhdpcie_bus_wtcm16(bus,
+				(ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case D2H_MB_DATA:
+			dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
+				(uint32) HTOL32(*(uint32 *)data));
+			break;
+
+		case H2D_MB_DATA:
+			dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
+				(uint32) HTOL32(*(uint32 *)data));
+			break;
+
+		case HOST_API_VERSION:
+		{
+			pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
+			tcm_offset = (uintptr)sh + OFFSETOF(pciedev_shared_t, host_cap);
+			dhdpcie_bus_wtcm32(bus,
+				(ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data));
+			break;
+		}
+
+		case DNGL_TO_HOST_TRAP_ADDR:
+		{
+			pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (uintptr)&(sh->host_trap_addr);
+			dhdpcie_bus_membytes(bus, TRUE,
+				(ulong)tcm_offset, (uint8*) &long_data, len);
+			break;
+		}
+
+#ifdef HOFFLOAD_MODULES
+		case WRT_HOST_MODULE_ADDR:
+		{
+			pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (uintptr)&(sh->hoffload_addr);
+			dhdpcie_bus_membytes(bus, TRUE,
+				(ulong)tcm_offset, (uint8*) &long_data, len);
+			break;
+		}
+#endif
+		default:
+			break;
+	}
+} /* dhd_bus_cmn_writeshared */
+
+/** A snippet of dongle memory is shared between host and dongle */
+void
+dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
+{
+	ulong tcm_offset;
+
+	switch (type) {
+		case RING_WR_UPD :
+			tcm_offset = bus->ring_sh[ringid].ring_state_w;
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
+			break;
+		case RING_RD_UPD :
+			tcm_offset = bus->ring_sh[ringid].ring_state_r;
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
+			break;
+		case TOTAL_LFRAG_PACKET_CNT :
+		{
+			pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+				(ulong)(uintptr) &sh->total_lfrag_pkt_cnt));
+			break;
+		}
+		case H2D_MB_DATA:
+			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
+			break;
+		case D2H_MB_DATA:
+			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
+			break;
+		case MAX_HOST_RXBUFS :
+		{
+			pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+				(ulong)(uintptr) &sh->max_host_rxbufs));
+			break;
+		}
+		default :
+			break;
+	}
+}
+
+uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
+{
+	return ((pciedev_shared_t*)bus->pcie_sh)->flags;
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = BCME_UNSUPPORTED;
+	int val_size;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
+		goto exit;
+	}
+
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+} /* dhd_bus_iovar_op */
+
+#ifdef BCM_BUZZZ
+#include <bcm_buzzz.h>
+
+int
+dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
+	const int num_counters)
+{
+	int bytes = 0;
+	uint32 ctr;
+	uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
+	uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
+
+	/* Compute elapsed counter values per counter event type */
+	for (ctr = 0U; ctr < num_counters; ctr++) {
+		prev[ctr] = core[ctr];
+		curr[ctr] = *log++;
+		core[ctr] = curr[ctr];  /* saved for next log */
+
+		if (curr[ctr] < prev[ctr])
+			delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
+		else
+			delta[ctr] = (curr[ctr] - prev[ctr]);
+
+		bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
+	}
+
+	return bytes;
+}
+
+typedef union cm3_cnts { /* export this in bcm_buzzz.h */
+	uint32 u32;
+	uint8  u8[4];
+	struct {
+		uint8 cpicnt;
+		uint8 exccnt;
+		uint8 sleepcnt;
+		uint8 lsucnt;
+	};
+} cm3_cnts_t;
+
+int
+dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
+{
+	int bytes = 0;
+
+	uint32 cyccnt, instrcnt;
+	cm3_cnts_t cm3_cnts;
+	uint8 foldcnt;
+
+	{   /* 32bit cyccnt */
+		uint32 curr, prev, delta;
+		prev = core[0]; curr = *log++; core[0] = curr;
+		if (curr < prev)
+			delta = curr + (~0U - prev);
+		else
+			delta = (curr - prev);
+
+		bytes += sprintf(p + bytes, "%12u ", delta);
+		cyccnt = delta;
+	}
+
+	{	/* Extract the 4 cnts: cpi, exc, sleep and lsu */
+		int i;
+		uint8 max8 = ~0;
+		cm3_cnts_t curr, prev, delta;
+		prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
+		for (i = 0; i < 4; i++) {
+			if (curr.u8[i] < prev.u8[i])
+				delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
+			else
+				delta.u8[i] = (curr.u8[i] - prev.u8[i]);
+			bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
+		}
+		cm3_cnts.u32 = delta.u32;
+	}
+
+	{   /* Extract the foldcnt from arg0 */
+		uint8 curr, prev, delta, max8 = ~0;
+		bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
+		prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
+		if (curr < prev)
+			delta = curr + (max8 - prev);
+		else
+			delta = (curr - prev);
+		bytes += sprintf(p + bytes, "%4u ", delta);
+		foldcnt = delta;
+	}
+
+	instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
+		                 + cm3_cnts.u8[3]) + foldcnt;
+	if (instrcnt > 0xFFFFFF00)
+		bytes += sprintf(p + bytes, "[%10s] ", "~");
+	else
+		bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
+	return bytes;
+}
+
+int
+dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
+{
+	int bytes = 0;
+	bcm_buzzz_arg0_t arg0;
+	static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
+
+	if (buzzz->counters == 6) {
+		bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
+		log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
+	} else {
+		bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
+		log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
+	}
+
+	/* Dump the logged arguments using the registered formats */
+	arg0.u32 = *log++;
+
+	switch (arg0.klog.args) {
+		case 0:
+			bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
+			break;
+		case 1:
+		{
+			uint32 arg1 = *log++;
+			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
+			break;
+		}
+		case 2:
+		{
+			uint32 arg1, arg2;
+			arg1 = *log++; arg2 = *log++;
+			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
+			break;
+		}
+		case 3:
+		{
+			uint32 arg1, arg2, arg3;
+			arg1 = *log++; arg2 = *log++; arg3 = *log++;
+			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
+			break;
+		}
+		case 4:
+		{
+			uint32 arg1, arg2, arg3, arg4;
+			arg1 = *log++; arg2 = *log++;
+			arg3 = *log++; arg4 = *log++;
+			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
+			break;
+		}
+		default:
+			printf("%s: Maximum one argument supported\n", __FUNCTION__);
+			break;
+	}
+
+	bytes += sprintf(p + bytes, "\n");
+
+	return bytes;
+}
+
+void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
+{
+	int i;
+	uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
+	void * log;
+
+	for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
+		core[i] = 0;
+	}
+
+	log_sz = buzzz_p->log_sz;
+
+	part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
+
+	if (buzzz_p->wrap == TRUE) {
+		part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
+		total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
+	} else {
+		part2 = 0U;
+		total = buzzz_p->count;
+	}
+
+	if (total == 0U) {
+		printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
+		return;
+	} else {
+		printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
+		       total, part2, part1);
+	}
+
+	if (part2) {   /* with wrap */
+		log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
+		while (part2--) {   /* from cur to end : part2 */
+			p[0] = '\0';
+			dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+			printf("%s", p);
+			log = (void*)((size_t)log + buzzz_p->log_sz);
+		}
+	}
+
+	log = (void*)buffer_p;
+	while (part1--) {
+		p[0] = '\0';
+		dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+		printf("%s", p);
+		log = (void*)((size_t)log + buzzz_p->log_sz);
+	}
+
+	printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
+}
+
+int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
+{
+	bcm_buzzz_t * buzzz_p = NULL;
+	void * buffer_p = NULL;
+	char * page_p = NULL;
+	pciedev_shared_t *sh;
+	int ret = 0;
+
+	if (bus->dhd->busstate != DHD_BUS_DATA) {
+		return BCME_UNSUPPORTED;
+	}
+	if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
+		printf("%s: Page memory allocation failure\n", __FUNCTION__);
+		goto done;
+	}
+	if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
+		printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
+		goto done;
+	}
+
+	ret = dhdpcie_readshared(bus);
+	if (ret < 0) {
+		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+		goto done;
+	}
+
+	sh = bus->pcie_sh;
+
+	DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
+
+	if (sh->buzz_dbg_ptr != 0U) {	/* Fetch and display dongle BUZZZ Trace */
+
+		dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
+		                     (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
+
+		printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
+			"count<%u> status<%u> wrap<%u>\n"
+			"cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
+			(int)sh->buzz_dbg_ptr,
+			(int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
+			buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
+			buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
+			buzzz_p->buffer_sz, buzzz_p->log_sz);
+
+		if (buzzz_p->count == 0) {
+			printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
+			goto done;
+		}
+
+		/* Allocate memory for trace buffer and format strings */
+		buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
+		if (buffer_p == NULL) {
+			printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
+			goto done;
+		}
+
+		/* Fetch the trace. format strings are exported via bcm_buzzz.h */
+		dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
+		                     (uint8 *)buffer_p, buzzz_p->buffer_sz);
+
+		/* Process and display the trace using formatted output */
+
+		{
+			int ctr;
+			for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
+				printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
+			}
+			printf("<code execution point>\n");
+		}
+
+		dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
+
+		printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__);
+
+		MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
+	}
+
+done:
+
+	if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
+	if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
+	if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
+
+	return BCME_OK;
+}
+#endif /* BCM_BUZZZ */
+
+#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) &&	\
+	((sih)->buscoretype == PCIE2_CORE_ID))
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	int bcmerror = 0;
+	unsigned long flags;
+#ifdef CONFIG_ARCH_MSM
+	int retry = POWERUP_MAX_RETRY;
+#endif /* CONFIG_ARCH_MSM */
+
+	if (dhd_download_fw_on_driverload) {
+		bcmerror = dhd_bus_start(dhdp);
+	} else {
+		if (flag == TRUE) { /* Turn off WLAN */
+			/* Removing Power */
+			DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
+
+			bus->dhd->up = FALSE;
+
+			if (bus->dhd->busstate != DHD_BUS_DOWN) {
+				dhdpcie_advertise_bus_cleanup(bus->dhd);
+				if (bus->intr) {
+					dhdpcie_bus_intr_disable(bus);
+					dhdpcie_free_irq(bus);
+				}
+#ifdef BCMPCIE_OOB_HOST_WAKE
+				/* Clean up any pending host wake IRQ */
+				dhd_bus_oob_intr_set(bus->dhd, FALSE);
+				dhd_bus_oob_intr_unregister(bus->dhd);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+				dhd_os_wd_timer(dhdp, 0);
+				dhd_bus_stop(bus, TRUE);
+				dhd_prot_reset(dhdp);
+				dhd_clear(dhdp);
+				dhd_bus_release_dongle(bus);
+				dhdpcie_bus_free_resource(bus);
+				bcmerror = dhdpcie_bus_disable_device(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+#ifdef CONFIG_ARCH_MSM
+				bcmerror = dhdpcie_bus_clock_stop(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: host clock stop failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+#endif /* CONFIG_ARCH_MSM */
+				DHD_GENERAL_LOCK(bus->dhd, flags);
+				bus->dhd->busstate = DHD_BUS_DOWN;
+				DHD_GENERAL_UNLOCK(bus->dhd, flags);
+			} else {
+				if (bus->intr) {
+					dhdpcie_free_irq(bus);
+				}
+#ifdef BCMPCIE_OOB_HOST_WAKE
+				/* Clean up any pending host wake IRQ */
+				dhd_bus_oob_intr_set(bus->dhd, FALSE);
+				dhd_bus_oob_intr_unregister(bus->dhd);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+				dhd_dpc_kill(bus->dhd);
+				dhd_prot_reset(dhdp);
+				dhd_clear(dhdp);
+				dhd_bus_release_dongle(bus);
+				dhdpcie_bus_free_resource(bus);
+				bcmerror = dhdpcie_bus_disable_device(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+#ifdef CONFIG_ARCH_MSM
+				bcmerror = dhdpcie_bus_clock_stop(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: host clock stop failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+#endif  /* CONFIG_ARCH_MSM */
+			}
+
+			bus->dhd->dongle_reset = TRUE;
+			DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
+
+		} else { /* Turn on WLAN */
+			if (bus->dhd->busstate == DHD_BUS_DOWN) {
+				/* Powering On */
+				DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
+#ifdef CONFIG_ARCH_MSM
+				while (--retry) {
+					bcmerror = dhdpcie_bus_clock_start(bus);
+					if (!bcmerror) {
+						DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
+							__FUNCTION__));
+						break;
+					} else {
+						OSL_SLEEP(10);
+					}
+				}
+
+				if (bcmerror && !retry) {
+					DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+#endif /* CONFIG_ARCH_MSM */
+				bus->is_linkdown = 0;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+				bus->read_shm_fail = FALSE;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+				bcmerror = dhdpcie_bus_enable_device(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: host configuration restore failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+				bcmerror = dhdpcie_bus_alloc_resource(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+				bcmerror = dhdpcie_bus_dongle_attach(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+				bcmerror = dhd_bus_request_irq(bus);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+				bus->dhd->dongle_reset = FALSE;
+
+				bcmerror = dhd_bus_start(dhdp);
+				if (bcmerror) {
+					DHD_ERROR(("%s: dhd_bus_start: %d\n",
+						__FUNCTION__, bcmerror));
+					goto done;
+				}
+
+				bus->dhd->up = TRUE;
+				DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
+			} else {
+				DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
+				goto done;
+			}
+		}
+	}
+
+done:
+	if (bcmerror) {
+		DHD_GENERAL_LOCK(bus->dhd, flags);
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		DHD_GENERAL_UNLOCK(bus->dhd, flags);
+	}
+
+	return bcmerror;
+}
+
+static int
+dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	int32 int_val2 = 0;
+	int32 int_val3 = 0;
+	bool bool_val = 0;
+
+	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	if (plen >= (int)sizeof(int_val) * 2)
+		bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
+
+	if (plen >= (int)sizeof(int_val) * 3)
+		bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
+		bcmerror = BCME_NOTREADY;
+		goto exit;
+	}
+
+	switch (actionid) {
+
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdpcie_downloadvars(bus, arg, len);
+		break;
+	case IOV_SVAL(IOV_PCIE_LPBK):
+		bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
+		break;
+
+	case IOV_SVAL(IOV_PCIE_DMAXFER): {
+		int int_val4 = 0;
+		if (plen >= (int)sizeof(int_val) * 4) {
+			bcopy((void*)((uintptr)params + 3 * sizeof(int_val)),
+				&int_val4, sizeof(int_val4));
+		}
+		bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3, int_val4);
+		break;
+	}
+
+#ifdef DEVICE_TX_STUCK_DETECT
+	case IOV_GVAL(IOV_DEVICE_TX_STUCK_DETECT):
+		int_val = bus->dev_tx_stuck_monitor;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_DEVICE_TX_STUCK_DETECT):
+		bus->dev_tx_stuck_monitor = (bool)int_val;
+		break;
+#endif /* DEVICE_TX_STUCK_DETECT */
+	case IOV_GVAL(IOV_PCIE_SUSPEND):
+		int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PCIE_SUSPEND):
+		if (bool_val) { /* Suspend */
+			int ret;
+			unsigned long flags;
+
+			/*
+			 * If some other context is busy, wait until they are done,
+			 * before starting suspend
+			 */
+			ret = dhd_os_busbusy_wait_condition(bus->dhd,
+				&bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
+			if (ret == 0) {
+				DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
+					__FUNCTION__, bus->dhd->dhd_bus_busy_state));
+				return BCME_BUSY;
+			}
+
+			DHD_GENERAL_LOCK(bus->dhd, flags);
+			DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
+			DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+			dhdpcie_bus_suspend(bus, TRUE);
+
+			DHD_GENERAL_LOCK(bus->dhd, flags);
+			DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
+			dhd_os_busbusy_wake(bus->dhd);
+			DHD_GENERAL_UNLOCK(bus->dhd, flags);
+		} else { /* Resume */
+			unsigned long flags;
+			DHD_GENERAL_LOCK(bus->dhd, flags);
+			DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
+			DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+			dhdpcie_bus_suspend(bus, FALSE);
+
+			DHD_GENERAL_LOCK(bus->dhd, flags);
+			DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
+			dhd_os_busbusy_wake(bus->dhd);
+			DHD_GENERAL_UNLOCK(bus->dhd, flags);
+		}
+		break;
+
+	case IOV_GVAL(IOV_MEMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+#ifdef BCM_BUZZZ
+	/* Dump dongle side buzzz trace to console */
+	case IOV_GVAL(IOV_BUZZZ_DUMP):
+		bcmerror = dhd_buzzz_dump_dngl(bus);
+		break;
+#endif /* BCM_BUZZZ */
+
+	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+		bcmerror = dhdpcie_bus_download_state(bus, bool_val);
+		break;
+
+	case IOV_GVAL(IOV_RAMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RAMSIZE):
+		bus->ramsize = int_val;
+		bus->orig_ramsize = int_val;
+		break;
+
+	case IOV_GVAL(IOV_RAMSTART):
+		int_val = (int32)bus->dongle_ram_base;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_CC_NVMSHADOW):
+	{
+		struct bcmstrbuf dump_b;
+
+		bcm_binit(&dump_b, arg, len);
+		bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
+		break;
+	}
+
+	case IOV_GVAL(IOV_SLEEP_ALLOWED):
+		bool_val = bus->sleep_allowed;
+		bcopy(&bool_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SLEEP_ALLOWED):
+		bus->sleep_allowed = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_DONGLEISOLATION):
+		int_val = bus->dhd->dongle_isolation;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DONGLEISOLATION):
+		bus->dhd->dongle_isolation = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
+		int_val = bus->ltrsleep_on_unload;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
+		bus->ltrsleep_on_unload = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
+	{
+		struct bcmstrbuf dump_b;
+		bcm_binit(&dump_b, arg, len);
+		bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
+		break;
+	}
+	case IOV_GVAL(IOV_DMA_RINGINDICES):
+	{	int h2d_support, d2h_support;
+
+		d2h_support = bus->dhd->dma_d2h_ring_upd_support ? 1 : 0;
+		h2d_support = bus->dhd->dma_h2d_ring_upd_support ? 1 : 0;
+		int_val = d2h_support | (h2d_support << 1);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+	case IOV_SVAL(IOV_DMA_RINGINDICES):
+		/* Can change it only during initialization/FW download */
+		if (bus->dhd->busstate == DHD_BUS_DOWN) {
+			if ((int_val > 3) || (int_val < 0)) {
+				DHD_ERROR(("%s: Bad argument. Possible values: 0, 1, 2 & 3\n", __FUNCTION__));
+				bcmerror = BCME_BADARG;
+			} else {
+				bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
+				bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
+				bus->dhd->dma_ring_upd_overwrite = TRUE;
+			}
+		} else {
+			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+				__FUNCTION__));
+			bcmerror = BCME_NOTDOWN;
+		}
+		break;
+
+	case IOV_GVAL(IOV_METADATA_DBG):
+		int_val = dhd_prot_metadata_dbg_get(bus->dhd);
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_METADATA_DBG):
+		dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
+		break;
+
+	case IOV_GVAL(IOV_RX_METADATALEN):
+		int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RX_METADATALEN):
+		if (int_val > 64) {
+			bcmerror = BCME_BUFTOOLONG;
+			break;
+		}
+		dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
+		break;
+
+	case IOV_SVAL(IOV_TXP_THRESHOLD):
+		dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
+		break;
+
+	case IOV_GVAL(IOV_TXP_THRESHOLD):
+		int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DB1_FOR_MB):
+		if (int_val)
+			bus->db1_for_mb = TRUE;
+		else
+			bus->db1_for_mb = FALSE;
+		break;
+
+	case IOV_GVAL(IOV_DB1_FOR_MB):
+		if (bus->db1_for_mb)
+			int_val = 1;
+		else
+			int_val = 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_TX_METADATALEN):
+		int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TX_METADATALEN):
+		if (int_val > 64) {
+			bcmerror = BCME_BUFTOOLONG;
+			break;
+		}
+		dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
+		break;
+
+	case IOV_SVAL(IOV_DEVRESET):
+		dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+		break;
+	case IOV_SVAL(IOV_FORCE_FW_TRAP):
+		if (bus->dhd->busstate == DHD_BUS_DATA)
+			dhdpcie_fw_trap(bus);
+		else {
+			DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
+			bcmerror = BCME_NOTUP;
+		}
+		break;
+	case IOV_GVAL(IOV_FLOW_PRIO_MAP):
+		int_val = bus->dhd->flow_prio_map_type;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_FLOW_PRIO_MAP):
+		int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+#ifdef DHD_PCIE_RUNTIMEPM
+	case IOV_GVAL(IOV_IDLETIME):
+		int_val = bus->idletime;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLETIME):
+		if (int_val < 0) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->idletime = int_val;
+			if (bus->idletime) {
+				DHD_ENABLE_RUNTIME_PM(bus->dhd);
+			} else {
+				DHD_DISABLE_RUNTIME_PM(bus->dhd);
+			}
+		}
+		break;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+	case IOV_GVAL(IOV_TXBOUND):
+		int_val = (int32)dhd_txbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXBOUND):
+		dhd_txbound = (uint)int_val;
+		break;
+
+	case IOV_SVAL(IOV_H2D_MAILBOXDATA):
+		dhdpcie_send_mb_data(bus, (uint)int_val);
+		break;
+
+	case IOV_SVAL(IOV_INFORINGS):
+		dhd_prot_init_info_rings(bus->dhd);
+		break;
+
+	case IOV_SVAL(IOV_H2D_PHASE):
+		if (bus->dhd->busstate != DHD_BUS_DOWN) {
+			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+				__FUNCTION__));
+			bcmerror = BCME_NOTDOWN;
+			break;
+		}
+		if (int_val)
+			bus->dhd->h2d_phase_supported = TRUE;
+		else
+			bus->dhd->h2d_phase_supported = FALSE;
+		break;
+
+	case IOV_GVAL(IOV_H2D_PHASE):
+		int_val = (int32) bus->dhd->h2d_phase_supported;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
+		if (bus->dhd->busstate != DHD_BUS_DOWN) {
+			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+				__FUNCTION__));
+			bcmerror = BCME_NOTDOWN;
+			break;
+		}
+		if (int_val)
+			bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
+		else
+			bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
+		break;
+
+	case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
+		int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
+		if (bus->dhd->busstate != DHD_BUS_DOWN) {
+			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+				__FUNCTION__));
+			bcmerror = BCME_NOTDOWN;
+			break;
+		}
+		dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
+		break;
+
+	case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
+		int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_RXBOUND):
+		int_val = (int32)dhd_rxbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RXBOUND):
+		dhd_rxbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_TRAPDATA):
+	{
+		struct bcmstrbuf dump_b;
+		bcm_binit(&dump_b, arg, len);
+		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
+		break;
+	}
+
+	case IOV_GVAL(IOV_TRAPDATA_RAW):
+	{
+		struct bcmstrbuf dump_b;
+		bcm_binit(&dump_b, arg, len);
+		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
+		break;
+	}
+	case IOV_SVAL(IOV_HANGREPORT):
+		bus->dhd->hang_report = bool_val;
+		DHD_ERROR(("%s: Set hang_report as %d\n",
+			__FUNCTION__, bus->dhd->hang_report));
+		break;
+
+	case IOV_GVAL(IOV_HANGREPORT):
+		int_val = (int32)bus->dhd->hang_report;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CTO_PREVENTION):
+		{
+			uint32 pcie_lnkst;
+
+			if (bus->sih->buscorerev < 19) {
+				bcmerror = BCME_UNSUPPORTED;
+				break;
+			}
+			si_corereg(bus->sih, bus->sih->buscoreidx,
+					OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
+
+			pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
+				OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+
+			/* 4347A0 in PCIEGEN1 doesn't support CTO prevention due to
+			 * 4347A0 DAR Issue : JIRA:CRWLPCIEGEN2-443: Issue in DAR write
+			 */
+			if ((bus->sih->buscorerev == 19) &&
+				(((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
+					PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) {
+				bcmerror = BCME_UNSUPPORTED;
+				break;
+			}
+			bus->dhd->cto_enable = bool_val;
+			dhdpcie_cto_init(bus, bus->dhd->cto_enable);
+			DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
+				__FUNCTION__, bus->dhd->cto_enable));
+		}
+		break;
+
+	case IOV_GVAL(IOV_CTO_PREVENTION):
+		if (bus->sih->buscorerev < 19) {
+			bcmerror = BCME_UNSUPPORTED;
+			break;
+		}
+		int_val = (int32)bus->dhd->cto_enable;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CTO_THRESHOLD):
+		{
+			if (bus->sih->buscorerev < 19) {
+				bcmerror = BCME_UNSUPPORTED;
+				break;
+			}
+			bus->dhd->cto_threshold = (uint32)int_val;
+		}
+		break;
+
+	case IOV_GVAL(IOV_CTO_THRESHOLD):
+		if (bus->sih->buscorerev < 19) {
+			bcmerror = BCME_UNSUPPORTED;
+			break;
+		}
+		if (bus->dhd->cto_threshold)
+			int_val = (int32)bus->dhd->cto_threshold;
+		else
+			int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
+
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PCIE_WD_RESET):
+		if (bool_val) {
+			pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
+		}
+		break;
+#ifdef DHD_EFI
+	case IOV_SVAL(IOV_CONTROL_SIGNAL):
+		{
+			bcmerror = dhd_control_signal(bus, arg, TRUE);
+			break;
+		}
+
+	case IOV_GVAL(IOV_CONTROL_SIGNAL):
+		{
+			bcmerror = dhd_control_signal(bus, params, FALSE);
+			break;
+		}
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+	case IOV_GVAL(IOV_DEEP_SLEEP):
+		int_val = bus->ds_enabled;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DEEP_SLEEP):
+		if (int_val == 1) {
+			bus->ds_enabled = TRUE;
+			/* Deassert */
+			if (dhd_bus_set_device_wake(bus, FALSE) == BCME_OK) {
+#ifdef PCIE_INB_DW
+				int timeleft;
+				timeleft = dhd_os_ds_enter_wait(bus->dhd, NULL);
+				if (timeleft == 0) {
+					DHD_ERROR(("DS-ENTER timeout\n"));
+					bus->ds_enabled = FALSE;
+					break;
+				}
+#endif /* PCIE_INB_DW */
+			}
+			else {
+				DHD_ERROR(("%s: Enable Deep Sleep failed !\n", __FUNCTION__));
+				bus->ds_enabled = FALSE;
+			}
+		}
+		else if (int_val == 0) {
+			/* Assert */
+			if (dhd_bus_set_device_wake(bus, TRUE) == BCME_OK)
+				bus->ds_enabled = FALSE;
+			else
+				DHD_ERROR(("%s: Disable Deep Sleep failed !\n", __FUNCTION__));
+		}
+		else
+			DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__));
+
+		break;
+#endif /* PCIE_OOB || PCIE_INB_DW */
+
+	case IOV_GVAL(IOV_WIFI_PROPERTIES):
+		bcmerror = dhd_wifi_properties(bus, params);
+		break;
+
+	case IOV_GVAL(IOV_OTP_DUMP):
+		bcmerror = dhd_otp_dump(bus, params);
+		break;
+#endif /* DHD_EFI */
+
+	case IOV_GVAL(IOV_IDMA_ENABLE):
+		int_val = bus->idma_enabled;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_IDMA_ENABLE):
+		bus->idma_enabled = (bool)int_val;
+		break;
+	case IOV_GVAL(IOV_IFRM_ENABLE):
+		int_val = bus->ifrm_enabled;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_IFRM_ENABLE):
+		bus->ifrm_enabled = (bool)int_val;
+		break;
+	case IOV_GVAL(IOV_CLEAR_RING):
+		bcopy(&int_val, arg, val_size);
+		dhd_flow_rings_flush(bus->dhd, 0);
+		break;
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	return bcmerror;
+} /* dhdpcie_bus_doiovar */
+
+/** Transfers bytes from host to dongle using pio mode */
+static int
+dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
+{
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
+		return 0;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
+		return 0;
+	}
+	if (bus->dhd->busstate != DHD_BUS_DATA) {
+		DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
+		return 0;
+	}
+	dhdmsgbuf_lpbk_req(bus->dhd, len);
+	return 0;
+}
+
+/* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
+void
+dhd_bus_hostready(struct  dhd_bus *bus)
+{
+	if (!bus->dhd->d2h_hostrdy_supported) {
+		return;
+	}
+
+	if (bus->is_linkdown) {
+		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
+		dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
+	si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
+	bus->hostready_count ++;
+	DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
+}
+
+/* Clear INTSTATUS */
+void
+dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
+{
+	uint32 intstatus = 0;
+	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
+		(bus->sih->buscorerev == 2)) {
+		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
+	} else {
+		/* this is a PCIE core register..not a config register... */
+		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
+			intstatus);
+	}
+}
+
+int
+dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
+{
+	int timeleft;
+	int rc = 0;
+	unsigned long flags;
+
+	printf("%s: state=%d\n", __FUNCTION__, state);
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (dhd_query_bus_erros(bus->dhd)) {
+		return BCME_ERROR;
+	}
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
+		DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__));
+		DHD_GENERAL_UNLOCK(bus->dhd, flags);
+		return BCME_ERROR;
+	}
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+	if (bus->dhd->dongle_reset) {
+		DHD_ERROR(("Dongle is in reset state.\n"));
+		return -EIO;
+	}
+
+	/* Check whether we are already in the requested state.
+	 * state=TRUE means Suspend
+	 * state=FALSE meanse Resume
+	 */
+	if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
+		DHD_ERROR(("Bus is already in SUSPEND state.\n"));
+		return BCME_OK;
+	} else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
+		DHD_ERROR(("Bus is already in RESUME state.\n"));
+		return BCME_OK;
+	}
+
+	if (bus->d3_suspend_pending) {
+		DHD_ERROR(("Suspend pending ...\n"));
+		return BCME_ERROR;
+	}
+
+
+	if (state) {
+		int idle_retry = 0;
+		int active;
+
+		if (bus->is_linkdown) {
+			DHD_ERROR(("%s: PCIe link was down, state=%d\n",
+				__FUNCTION__, state));
+			return BCME_ERROR;
+		}
+
+		/* Suspend */
+		DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
+
+		DHD_GENERAL_LOCK(bus->dhd, flags);
+		if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
+			DHD_ERROR(("Tx Request is not ended\n"));
+			bus->dhd->busstate = DHD_BUS_DATA;
+			DHD_GENERAL_UNLOCK(bus->dhd, flags);
+#ifndef DHD_EFI
+			return -EBUSY;
+#else
+			return BCME_ERROR;
+#endif
+		}
+
+		/* stop all interface network queue. */
+		dhd_bus_stop_queue(bus);
+		DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+#ifdef DHD_TIMESYNC
+		/* disable time sync mechanism, if configed */
+		dhd_timesync_control(bus->dhd, TRUE);
+#endif /* DHD_TIMESYNC */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+		dhd_bus_set_device_wake(bus, TRUE);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+#ifdef PCIE_OOB
+		bus->oob_presuspend = TRUE;
+#endif
+#ifdef PCIE_INB_DW
+		/* De-assert at this point for In-band device_wake */
+		if (INBAND_DW_ENAB(bus)) {
+			dhd_bus_set_device_wake(bus, FALSE);
+			dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_SLEEP_WAIT);
+		}
+#endif /* PCIE_INB_DW */
+
+		/* Clear wait_for_d3_ack */
+		bus->wait_for_d3_ack = 0;
+		/*
+		 * Send H2D_HOST_D3_INFORM to dongle and mark
+		 * bus->d3_suspend_pending to TRUE in dhdpcie_send_mb_data
+		 * inside atomic context, so that no more DBs will be
+		 * rung after sending D3_INFORM
+		 */
+		dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
+
+		/* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
+		dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT);
+		timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
+
+#ifdef DHD_RECOVER_TIMEOUT
+		if (bus->wait_for_d3_ack == 0) {
+			/* If wait_for_d3_ack was not updated because D2H MB was not received */
+			uint32 intstatus = 0;
+			uint32 intmask = 0;
+			intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+			intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+			if ((intstatus) && (!intmask) && (timeleft == 0) &&
+				(!dhd_query_bus_erros(bus->dhd))) {
+
+				DHD_ERROR(("%s: D3 ACK trying again intstatus=%x intmask=%x\n",
+					__FUNCTION__, intstatus, intmask));
+				DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n"));
+				DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_en_count=%lu\n"
+					"isr_intr_disable_count=%lu suspend_intr_dis_count=%lu\n"
+					"dpc_return_busdown_count=%lu\n",
+					bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
+					bus->isr_intr_disable_count,
+					bus->suspend_intr_disable_count,
+					bus->dpc_return_busdown_count));
+
+				dhd_prot_process_ctrlbuf(bus->dhd);
+
+				timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
+
+				/* Enable Back Interrupts using IntMask  */
+				dhdpcie_bus_intr_enable(bus);
+			}
+
+
+		} /* bus->wait_for_d3_ack was 0 */
+#endif /* DHD_RECOVER_TIMEOUT */
+
+		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+
+		/* To allow threads that got pre-empted to complete.
+		 */
+		while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
+			(idle_retry < MAX_WKLK_IDLE_CHECK)) {
+			OSL_SLEEP(1);
+			idle_retry++;
+		}
+
+		if (bus->wait_for_d3_ack) {
+			DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
+
+			/* Got D3 Ack. Suspend the bus */
+			if (active) {
+				DHD_ERROR(("%s():Suspend failed because of wakelock"
+					"restoring Dongle to D0\n", __FUNCTION__));
+
+				/*
+				 * Dongle still thinks that it has to be in D3 state until
+				 * it gets a D0 Inform, but we are backing off from suspend.
+				 * Ensure that Dongle is brought back to D0.
+				 *
+				 * Bringing back Dongle from D3 Ack state to D0 state is a
+				 * 2 step process. Dongle would want to know that D0 Inform
+				 * would be sent as a MB interrupt to bring it out of D3 Ack
+				 * state to D0 state. So we have to send both this message.
+				 */
+
+				/* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
+				bus->wait_for_d3_ack = 0;
+
+				/* Enable back the intmask which was cleared in DPC
+				 * after getting D3_ACK.
+				 */
+				bus->resume_intr_enable_count++;
+				dhdpcie_bus_intr_enable(bus);
+
+				if (bus->use_d0_inform) {
+					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+					dhdpcie_send_mb_data(bus,
+						(H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
+					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+				}
+				/* ring doorbell 1 (hostready) */
+				dhd_bus_hostready(bus);
+
+				DHD_GENERAL_LOCK(bus->dhd, flags);
+				bus->d3_suspend_pending = FALSE;
+				bus->dhd->busstate = DHD_BUS_DATA;
+				/* resume all interface network queue. */
+				dhd_bus_start_queue(bus);
+				DHD_GENERAL_UNLOCK(bus->dhd, flags);
+				rc = BCME_ERROR;
+			} else {
+#ifdef PCIE_OOB
+				bus->oob_presuspend = FALSE;
+				if (OOB_DW_ENAB(bus)) {
+					dhd_bus_set_device_wake(bus, FALSE);
+				}
+#endif /* PCIE_OOB */
+#if defined(PCIE_OOB) || defined(BCMPCIE_OOB_HOST_WAKE)
+				bus->oob_presuspend = TRUE;
+#endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */
+#ifdef PCIE_INB_DW
+				if (INBAND_DW_ENAB(bus)) {
+					if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+						DW_DEVICE_HOST_SLEEP_WAIT) {
+						dhdpcie_bus_set_pcie_inband_dw_state(bus,
+							DW_DEVICE_HOST_SLEEP);
+					}
+				}
+#endif /* PCIE_INB_DW */
+				if (bus->use_d0_inform &&
+					(bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
+					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+					dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
+					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+				}
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+				dhdpcie_oob_intr_set(bus, TRUE);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+				DHD_GENERAL_LOCK(bus->dhd, flags);
+				/* The Host cannot process interrupts now so disable the same.
+				 * No need to disable the dongle INTR using intmask, as we are
+				 * already calling dhdpcie_bus_intr_disable from DPC context after
+				 * getting D3_ACK. Code may not look symmetric between Suspend and
+				 * Resume paths but this is done to close down the timing window
+				 * between DPC and suspend context.
+				 */
+				/* Disable interrupt from host side!! */
+				dhdpcie_disable_irq_nosync(bus);
+
+				bus->dhd->d3ackcnt_timeout = 0;
+				bus->d3_suspend_pending = FALSE;
+				bus->dhd->busstate = DHD_BUS_SUSPEND;
+				DHD_GENERAL_UNLOCK(bus->dhd, flags);
+				/* Handle Host Suspend */
+				rc = dhdpcie_pci_suspend_resume(bus, state);
+			}
+		} else if (timeleft == 0) {
+			bus->dhd->d3ack_timeout_occured = TRUE;
+			/* If the D3 Ack has timeout */
+			bus->dhd->d3ackcnt_timeout++;
+			DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n",
+					__FUNCTION__, bus->dhd->d3ackcnt_timeout));
+			DHD_GENERAL_LOCK(bus->dhd, flags);
+			bus->d3_suspend_pending = FALSE;
+			bus->dhd->busstate = DHD_BUS_DATA;
+			/* resume all interface network queue. */
+			dhd_bus_start_queue(bus);
+			DHD_GENERAL_UNLOCK(bus->dhd, flags);
+			if (!bus->dhd->dongle_trap_occured) {
+				uint32 intstatus = 0;
+
+				/* Check if PCIe bus status is valid */
+				intstatus = si_corereg(bus->sih,
+					bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+				if (intstatus == (uint32)-1) {
+					/* Invalidate PCIe bus status */
+					bus->is_linkdown = 1;
+				}
+
+				dhd_bus_dump_console_buffer(bus);
+				dhd_prot_debug_info_print(bus->dhd);
+#ifdef DHD_FW_COREDUMP
+				if (bus->dhd->memdump_enabled) {
+					/* write core dump to file */
+					bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
+					dhdpcie_mem_dump(bus);
+				}
+#endif /* DHD_FW_COREDUMP */
+				DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
+					__FUNCTION__));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+				bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+				dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
+			}
+			rc = -ETIMEDOUT;
+		}
+		bus->wait_for_d3_ack = 1;
+
+#ifdef PCIE_OOB
+		bus->oob_presuspend = FALSE;
+#endif /* PCIE_OOB */
+	} else {
+		/* Resume */
+		/**
+		 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
+		 * si_backplane_access(function to read/write backplane)
+		 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
+		 * window being accessed is different form the window
+		 * being pointed by second_bar0win.
+		 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
+		 * invalidating second_bar0win after resume updates
+		 * PCIE2_BAR0_CORE2_WIN with right window.
+		 */
+		si_invalidate_second_bar0win(bus->sih);
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+		DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef PCIE_INB_DW
+		if (INBAND_DW_ENAB(bus)) {
+			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_HOST_SLEEP) {
+				dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_WAKE_WAIT);
+			}
+		}
+#endif /* PCIE_INB_DW */
+		rc = dhdpcie_pci_suspend_resume(bus, state);
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+		bus->oob_presuspend = FALSE;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+		if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
+			if (bus->use_d0_inform) {
+				DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+				dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
+				DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+			}
+			/* ring doorbell 1 (hostready) */
+			dhd_bus_hostready(bus);
+		}
+
+		DHD_GENERAL_LOCK(bus->dhd, flags);
+		bus->dhd->busstate = DHD_BUS_DATA;
+#ifdef DHD_PCIE_RUNTIMEPM
+		if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
+			bus->bus_wake = 1;
+			OSL_SMP_WMB();
+			wake_up_interruptible(&bus->rpm_queue);
+		}
+#endif /* DHD_PCIE_RUNTIMEPM */
+#ifdef PCIE_OOB
+		/*
+		 * Assert & Deassert the Device Wake. The following is the explanation for doing so.
+		 * 0) At this point,
+		 *    Host is in suspend state, Link is in L2/L3, Dongle is in D3 Cold
+		 *    Device Wake is enabled.
+		 * 1) When the Host comes out of Suspend, it first sends PERST# in the Link.
+		 *    Looking at this the Dongle moves from D3 Cold to NO DS State
+		 * 2) Now The Host OS calls the "resume" function of DHD. From here the DHD first
+		 *    Asserts the Device Wake.
+		 *    From the defn, when the Device Wake is asserted, The dongle FW will ensure
+		 *    that the Dongle is out of deep sleep IF the device is already in deep sleep.
+		 *    But note that now the Dongle is NOT in Deep sleep and is actually in
+		 *    NO DS state. So just driving the Device Wake high does not trigger any state
+		 *    transitions. The Host should actually "Toggle" the Device Wake to ensure
+		 *    that Dongle synchronizes with the Host and starts the State Transition to D0.
+		 * 4) Note that the above explanation is applicable Only when the Host comes out of
+		 *    suspend and the Dongle comes out of D3 Cold
+		 */
+		/* This logic is not required when hostready is enabled */
+
+		if (!bus->dhd->d2h_hostrdy_supported) {
+			if (OOB_DW_ENAB(bus)) {
+				dhd_bus_set_device_wake(bus, TRUE);
+				OSL_DELAY(1000);
+				dhd_bus_set_device_wake(bus, FALSE);
+			}
+		}
+#endif /* PCIE_OOB */
+		/* resume all interface network queue. */
+		dhd_bus_start_queue(bus);
+		/* The Host is ready to process interrupts now so enable the same. */
+
+		/* TODO: for NDIS also we need to use enable_irq in future */
+		bus->resume_intr_enable_count++;
+		dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
+		dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
+		DHD_GENERAL_UNLOCK(bus->dhd, flags);
+#ifdef DHD_TIMESYNC
+		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+		/* enable time sync mechanism, if configed */
+		dhd_timesync_control(bus->dhd, FALSE);
+		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+#endif /* DHD_TIMESYNC */
+	}
+	return rc;
+}
+
+uint32
+dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
+{
+	ASSERT(bus && bus->sih);
+	if (enable) {
+	si_corereg(bus->sih, bus->sih->buscoreidx,
+		OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
+	} else {
+		si_corereg(bus->sih, bus->sih->buscoreidx,
+			OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
+	}
+	return 0;
+}
+
+/* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
+uint32
+dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
+{
+	uint reg_val;
+
+	ASSERT(bus && bus->sih);
+
+	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+		0x1004);
+	reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
+		OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+	reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
+	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
+		reg_val);
+
+	return 0;
+}
+
+/** Transfers bytes from host to dongle and to host again using DMA */
+static int
+dhdpcie_bus_dmaxfer_req(
+	struct  dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk)
+{
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->busstate != DHD_BUS_DATA) {
+		DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (len < 5 || len > 4194296) {
+		DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+	return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay, d11_lpbk);
+}
+
+
+
+static int
+dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
+{
+	int bcmerror = 0;
+	volatile uint32 *cr4_regs;
+
+	if (!bus->sih) {
+		DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+	/* To enter download state, disable ARM and reset SOCRAM.
+	 * To exit download state, simply reset ARM (default is RAM boot).
+	 */
+	if (enter) {
+		/* Make sure BAR1 maps to backplane address 0 */
+		dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000);
+		bus->alp_only = TRUE;
+
+		/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
+		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+
+		if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
+			/* Halt ARM & remove reset */
+			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+			if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			si_core_reset(bus->sih, 0, 0);
+			/* reset last 4 bytes of RAM address. to be used for shared area */
+			dhdpcie_init_shared_addr(bus);
+		} else if (cr4_regs == NULL) { /* no CR4 present on chip */
+			si_core_disable(bus->sih, 0);
+
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			si_core_reset(bus->sih, 0, 0);
+
+			/* Clear the top bit of memory */
+			if (bus->ramsize) {
+				uint32 zeros = 0;
+				if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
+				                     (uint8*)&zeros, 4) < 0) {
+					bcmerror = BCME_ERROR;
+					goto fail;
+				}
+			}
+		} else {
+			/* For CR4,
+			 * Halt ARM
+			 * Remove ARM reset
+			 * Read RAM base address [0x18_0000]
+			 * [next] Download firmware
+			 * [done at else] Populate the reset vector
+			 * [done at else] Remove ARM halt
+			*/
+			/* Halt ARM & remove reset */
+			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+			if (BCM43602_CHIP(bus->sih->chip)) {
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+			}
+			/* reset last 4 bytes of RAM address. to be used for shared area */
+			dhdpcie_init_shared_addr(bus);
+		}
+	} else {
+		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
+			/* write vars */
+			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+			/* switch back to arm core again */
+			if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			/* write address 0 with reset instruction */
+			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
+				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+			/* now remove reset and halt and continue to run CA7 */
+		} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if (!si_iscoreup(bus->sih)) {
+				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			/* Enable remap before ARM reset but after vars.
+			 * No backplane access in remap mode
+			 */
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+
+			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		} else {
+			if (BCM43602_CHIP(bus->sih->chip)) {
+				/* Firmware crashes on SOCSRAM access when core is in reset */
+				if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+					DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
+						__FUNCTION__));
+					bcmerror = BCME_ERROR;
+					goto fail;
+				}
+				si_core_reset(bus->sih, 0, 0);
+				si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+			}
+
+			/* write vars */
+			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+#ifdef BCM_ASLR_HEAP
+			/* write a random number to TCM for the purpose of
+			 * randomizing heap address space.
+			 */
+			dhdpcie_wrt_rnd(bus);
+#endif /* BCM_ASLR_HEAP */
+
+			/* switch back to arm core again */
+			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			/* write address 0 with reset instruction */
+			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
+				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+			if (bcmerror == BCME_OK) {
+				uint32 tmp;
+
+				bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
+				                                (uint8 *)&tmp, sizeof(tmp));
+
+				if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
+					DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
+					          __FUNCTION__, bus->resetinstr));
+					DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
+					          __FUNCTION__, tmp));
+					bcmerror = BCME_ERROR;
+					goto fail;
+				}
+			}
+
+			/* now remove reset and halt and continue to run CR4 */
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+
+		/* Allow HT Clock now that the ARM is running. */
+		bus->alp_only = FALSE;
+
+		bus->dhd->busstate = DHD_BUS_LOAD;
+	}
+
+fail:
+	/* Always return to PCIE core */
+	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+
+	return bcmerror;
+} /* dhdpcie_bus_download_state */
+
+static int
+dhdpcie_bus_write_vars(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+	uint32 varsize, phys_size;
+	uint32 varaddr;
+	uint8 *vbuffer;
+	uint32 varsizew;
+#ifdef DHD_DEBUG
+	uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+	varaddr = (bus->ramsize - 4) - varsize;
+
+	varaddr += bus->dongle_ram_base;
+
+	if (bus->vars) {
+
+		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+		if (!vbuffer)
+			return BCME_NOMEM;
+
+		bzero(vbuffer, varsize);
+		bcopy(bus->vars, vbuffer, bus->varsz);
+		/* Write the vars list */
+		bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+
+		/* Implement read back and verify later */
+#ifdef DHD_DEBUG
+		/* Verify NVRAM bytes */
+		DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
+		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+		if (!nvram_ularray)
+			return BCME_NOMEM;
+
+		/* Upload image to verify downloaded contents. */
+		memset(nvram_ularray, 0xaa, varsize);
+
+		/* Read the vars list to temp buffer for comparison */
+		bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+		if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, varsize, varaddr));
+		}
+
+		/* Compare the org NVRAM with the one read from RAM */
+		if (memcmp(vbuffer, nvram_ularray, varsize)) {
+			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+			__FUNCTION__));
+
+		MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+		MFREE(bus->dhd->osh, vbuffer, varsize);
+	}
+
+	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+	phys_size += bus->dongle_ram_base;
+
+	/* adjust to the user specified RAM */
+	DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
+		phys_size, bus->ramsize));
+	DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
+		varaddr, varsize));
+	varsize = ((phys_size - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+		bus->nvram_csm = varsizew;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		bus->nvram_csm = varsizew;
+		varsizew = htol32(varsizew);
+	}
+
+	DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
+		(uint8*)&varsizew, 4);
+
+	return bcmerror;
+} /* dhdpcie_bus_write_vars */
+
+int
+dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+	int bcmerror = BCME_OK;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Basic sanity checks */
+	if (bus->dhd->up) {
+		bcmerror = BCME_NOTDOWN;
+		goto err;
+	}
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* Free the old ones and replace with passed variables */
+	if (bus->vars)
+		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+	bus->vars = MALLOC(bus->dhd->osh, len);
+	bus->varsz = bus->vars ? len : 0;
+	if (bus->vars == NULL) {
+		bcmerror = BCME_NOMEM;
+		goto err;
+	}
+
+	/* Copy the passed variables, which should include the terminating double-null */
+	bcopy(arg, bus->vars, bus->varsz);
+
+#ifdef DHD_USE_SINGLE_NVRAM_FILE
+	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
+		char *sp = NULL;
+		char *ep = NULL;
+		int i;
+		char tag[2][8] = {"ccode=", "regrev="};
+
+		/* Find ccode and regrev info */
+		for (i = 0; i < 2; i++) {
+			sp = strnstr(bus->vars, tag[i], bus->varsz);
+			if (!sp) {
+				DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
+					__FUNCTION__, bus->nv_path));
+				bcmerror = BCME_ERROR;
+				goto err;
+			}
+			sp = strchr(sp, '=');
+			ep = strchr(sp, '\0');
+			/* We assumed that string length of both ccode and
+			 * regrev values should not exceed WLC_CNTRY_BUF_SZ
+			 */
+			if (sp && ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
+				sp++;
+				while (*sp != '\0') {
+					DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
+						__FUNCTION__, tag[i], *sp));
+					*sp++ = '0';
+				}
+			} else {
+				DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
+					__FUNCTION__, tag[i]));
+				bcmerror = BCME_ERROR;
+				goto err;
+			}
+		}
+	}
+#endif /* DHD_USE_SINGLE_NVRAM_FILE */
+
+
+err:
+	return bcmerror;
+}
+
+/* loop through the capability list and see if the pcie capabilty exists */
+uint8
+dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
+{
+	uint8 cap_id;
+	uint8 cap_ptr = 0;
+	uint8 byte_val;
+
+	/* check for Header type 0 */
+	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
+	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
+		DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
+		goto end;
+	}
+
+	/* check if the capability pointer field exists */
+	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
+	if (!(byte_val & PCI_CAPPTR_PRESENT)) {
+		DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
+		goto end;
+	}
+
+	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
+	/* check if the capability pointer is 0x00 */
+	if (cap_ptr == 0x00) {
+		DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
+		goto end;
+	}
+
+	/* loop thr'u the capability list and see if the pcie capabilty exists */
+
+	cap_id = read_pci_cfg_byte(cap_ptr);
+
+	while (cap_id != req_cap_id) {
+		cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
+		if (cap_ptr == 0x00) break;
+		cap_id = read_pci_cfg_byte(cap_ptr);
+	}
+
+end:
+	return cap_ptr;
+}
+
+void
+dhdpcie_pme_active(osl_t *osh, bool enable)
+{
+	uint8 cap_ptr;
+	uint32 pme_csr;
+
+	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
+
+	if (!cap_ptr) {
+		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
+		return;
+	}
+
+	pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
+	DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
+
+	pme_csr |= PME_CSR_PME_STAT;
+	if (enable) {
+		pme_csr |= PME_CSR_PME_EN;
+	} else {
+		pme_csr &= ~PME_CSR_PME_EN;
+	}
+
+	OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
+}
+
+bool
+dhdpcie_pme_cap(osl_t *osh)
+{
+	uint8 cap_ptr;
+	uint32 pme_cap;
+
+	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
+
+	if (!cap_ptr) {
+		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
+
+	DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
+
+	return ((pme_cap & PME_CAP_PM_STATES) != 0);
+}
+
+uint32
+dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
+{
+
+	uint8	pcie_cap;
+	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
+	uint32	reg_val;
+
+
+	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
+
+	if (!pcie_cap) {
+		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
+		return 0;
+	}
+
+	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
+
+	/* set operation */
+	if (mask) {
+		/* read */
+		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
+
+		/* modify */
+		reg_val &= ~mask;
+		reg_val |= (mask & val);
+
+		/* write */
+		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
+	}
+	return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
+}
+
+
+
+uint8
+dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
+{
+	uint8	pcie_cap;
+	uint32	reg_val;
+	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
+
+	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
+
+	if (!pcie_cap) {
+		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
+		return 0;
+	}
+
+	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
+
+	reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
+	/* set operation */
+	if (mask) {
+		if (val)
+			reg_val |= PCIE_CLKREQ_ENAB;
+		else
+			reg_val &= ~PCIE_CLKREQ_ENAB;
+		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
+		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
+	}
+	if (reg_val & PCIE_CLKREQ_ENAB)
+		return 1;
+	else
+		return 0;
+}
+
+void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+	uint32 intstatus = 0;
+	uint32 intmask = 0;
+	uint32 mbintstatus = 0;
+	uint32 d2h_mb_data = 0;
+
+	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+	mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
+	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+
+	bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
+		intstatus, intmask, mbintstatus);
+	bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
+		d2h_mb_data, dhd->bus->def_intmask);
+	bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
+	bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
+		"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
+		"dpc_return_busdown_count=%lu\n",
+		dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
+		dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
+		dhd->bus->dpc_return_busdown_count);
+}
+
+/** Add bus dump output to a buffer */
+void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	uint16 flowid;
+	int ix = 0;
+	flow_ring_node_t *flow_ring_node;
+	flow_info_t *flow_info;
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	if (dhdp->busstate != DHD_BUS_DATA)
+		return;
+
+#ifdef DHD_WAKE_STATUS
+	bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
+		bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
+		dhdp->bus->wake_counts.rcwake);
+#ifdef DHD_WAKE_RX_STATUS
+	bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
+		dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
+		dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
+	bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
+		dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
+		dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
+	bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
+		dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
+		dhdp->bus->wake_counts.rx_icmpv6_ns);
+#endif /* DHD_WAKE_RX_STATUS */
+#ifdef DHD_WAKE_EVENT_STATUS
+	for (flowid = 0; flowid < WLC_E_LAST; flowid++)
+		if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
+			bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
+				dhdp->bus->wake_counts.rc_event[flowid]);
+	bcm_bprintf(strbuf, "\n");
+#endif /* DHD_WAKE_EVENT_STATUS */
+#endif /* DHD_WAKE_STATUS */
+
+	dhd_prot_print_info(dhdp, strbuf);
+	dhd_dump_intr_registers(dhdp, strbuf);
+	bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
+		dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
+	bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
+	bcm_bprintf(strbuf,
+		"%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
+		"Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
+		"Overflows", "RD", "WR");
+	bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
+
+	for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
+		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+		if (!flow_ring_node->active)
+			continue;
+
+		flow_info = &flow_ring_node->flow_info;
+		bcm_bprintf(strbuf,
+			"%3d. %4d %2d %4d %17s %4d %4d %6d %10u ", ix++,
+			flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
+			bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf),
+			DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
+			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
+			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
+			DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
+		dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
+			"%4d %4d ");
+		bcm_bprintf(strbuf,
+			"%5s %6s %5s\n", "NA", "NA", "NA");
+	}
+	bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
+	bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
+	bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
+	if (dhdp->d2h_hostrdy_supported) {
+		bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
+	}
+#ifdef PCIE_INB_DW
+	/* Inband device wake counters */
+	if (INBAND_DW_ENAB(dhdp->bus)) {
+		bcm_bprintf(strbuf, "Inband device_wake assert count: %d\n",
+			dhdp->bus->inband_dw_assert_cnt);
+		bcm_bprintf(strbuf, "Inband device_wake deassert count: %d\n",
+			dhdp->bus->inband_dw_deassert_cnt);
+		bcm_bprintf(strbuf, "Inband DS-EXIT <host initiated> count: %d\n",
+			dhdp->bus->inband_ds_exit_host_cnt);
+		bcm_bprintf(strbuf, "Inband DS-EXIT <device initiated> count: %d\n",
+			dhdp->bus->inband_ds_exit_device_cnt);
+		bcm_bprintf(strbuf, "Inband DS-EXIT Timeout count: %d\n",
+			dhdp->bus->inband_ds_exit_to_cnt);
+		bcm_bprintf(strbuf, "Inband HOST_SLEEP-EXIT Timeout count: %d\n",
+			dhdp->bus->inband_host_sleep_exit_to_cnt);
+	}
+#endif /* PCIE_INB_DW */
+}
+
+/**
+ * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
+ * flow queue to their flow ring.
+ */
+static void
+dhd_update_txflowrings(dhd_pub_t *dhd)
+{
+	unsigned long flags;
+	dll_t *item, *next;
+	flow_ring_node_t *flow_ring_node;
+	struct dhd_bus *bus = dhd->bus;
+
+	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
+	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+	for (item = dll_head_p(&bus->flowring_active_list);
+		(!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
+		item = next) {
+		if (dhd->hang_was_sent) {
+			break;
+		}
+
+		next = dll_next_p(item);
+		flow_ring_node = dhd_constlist_to_flowring(item);
+
+		/* Ensure that flow_ring_node in the list is Not Null */
+		ASSERT(flow_ring_node != NULL);
+
+		/* Ensure that the flowring node has valid contents */
+		ASSERT(flow_ring_node->prot_info != NULL);
+
+		dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
+	}
+	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+}
+
+/** Mailbox ringbell Function */
+static void
+dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
+{
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
+		return;
+	}
+	if (bus->db1_for_mb)  {
+		/* this is a pcie core register, not the config register */
+		DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
+	} else {
+		DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
+		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+	}
+}
+
+/* Upon receiving a mailbox interrupt,
+ * if H2D_FW_TRAP bit is set in mailbox location
+ * device traps
+ */
+static void
+dhdpcie_fw_trap(dhd_bus_t *bus)
+{
+	/* Send the mailbox data and generate mailbox intr. */
+	dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
+}
+
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+void
+dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
+{
+	if (dhd_doorbell_timeout)
+		dhd_timeout_start(&bus->doorbell_timer,
+			(dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
+	else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) {
+		dhd_bus_set_device_wake(bus, FALSE);
+	}
+}
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+
+#ifdef PCIE_INB_DW
+
+void
+dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus)
+{
+	/* The DHD_BUS_INB_DW_LOCK must be held before
+	* calling this function !!
+	*/
+	if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+		DW_DEVICE_DS_DEV_SLEEP_PEND) &&
+		(bus->host_active_cnt == 0)) {
+		dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
+		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+	}
+}
+
+int
+dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val)
+{
+	int timeleft;
+	unsigned long flags;
+	int ret;
+
+	if (!INBAND_DW_ENAB(bus)) {
+		return BCME_ERROR;
+	}
+
+	if (val) {
+		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+
+		/*
+		 * Reset the Door Bell Timeout value. So that the Watchdog
+		 * doesn't try to Deassert Device Wake, while we are in
+		 * the process of still Asserting the same.
+		 */
+		if (dhd_doorbell_timeout) {
+			dhd_timeout_start(&bus->doorbell_timer,
+				(dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
+		}
+
+		if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+			DW_DEVICE_DS_DEV_SLEEP) {
+			/* Clear wait_for_ds_exit */
+			bus->wait_for_ds_exit = 0;
+			ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_ASSERT);
+			if (ret != BCME_OK) {
+				DHD_ERROR(("Failed: assert Inband device_wake\n"));
+				bus->wait_for_ds_exit = 1;
+				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+				ret = BCME_ERROR;
+				goto exit;
+			}
+			dhdpcie_bus_set_pcie_inband_dw_state(bus,
+				DW_DEVICE_DS_DISABLED_WAIT);
+			bus->inband_dw_assert_cnt++;
+		} else {
+			DHD_INFO(("Not in DS SLEEP state \n"));
+			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+			ret = BCME_OK;
+			goto exit;
+		}
+
+		/*
+		 * Since we are going to wait/sleep .. release the lock.
+		 * The Device Wake sanity is still valid, because
+		 * a) If there is another context that comes in and tries
+		 *    to assert DS again and if it gets the lock, since
+		 *    ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the
+		 *    context would return saying Not in DS Sleep.
+		 * b) If ther is another context that comes in and tries
+		 *    to de-assert DS and gets the lock,
+		 *    since the ds_state is != DW_DEVICE_DS_DEV_WAKE
+		 *    that context would return too. This can not happen
+		 *    since the watchdog is the only context that can
+		 *    De-Assert Device Wake and as the first step of
+		 *    Asserting the Device Wake, we have pushed out the
+		 *    Door Bell Timeout.
+		 *
+		 */
+		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+
+		if (!CAN_SLEEP()) {
+			/* Called from context that cannot sleep */
+			OSL_DELAY(1000);
+			bus->wait_for_ds_exit = 1;
+		} else {
+			/* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */
+			timeleft = dhd_os_ds_exit_wait(bus->dhd, &bus->wait_for_ds_exit);
+			if (!bus->wait_for_ds_exit && timeleft == 0) {
+				DHD_ERROR(("DS-EXIT timeout\n"));
+				bus->inband_ds_exit_to_cnt++;
+				bus->ds_exit_timeout = 0;
+				ret = BCME_ERROR;
+				goto exit;
+			}
+		}
+
+		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+		dhdpcie_bus_set_pcie_inband_dw_state(bus,
+			DW_DEVICE_DS_DEV_WAKE);
+		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+
+		ret = BCME_OK;
+	} else {
+		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+		if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+			DW_DEVICE_DS_DEV_WAKE)) {
+			ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_DEASSERT);
+			if (ret != BCME_OK) {
+				DHD_ERROR(("Failed: deassert Inband device_wake\n"));
+				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+				goto exit;
+			}
+			dhdpcie_bus_set_pcie_inband_dw_state(bus,
+				DW_DEVICE_DS_ACTIVE);
+			bus->inband_dw_deassert_cnt++;
+		} else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+			DW_DEVICE_DS_DEV_SLEEP_PEND) &&
+			(bus->host_active_cnt == 0)) {
+			dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
+			dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+		}
+
+		ret = BCME_OK;
+		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+	}
+
+exit:
+	return ret;
+}
+#endif /* PCIE_INB_DW */
+
+
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+int
+dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
+{
+	if (bus->ds_enabled) {
+#ifdef PCIE_INB_DW
+		if (INBAND_DW_ENAB(bus)) {
+			return dhd_bus_inb_set_device_wake(bus, val);
+		}
+#endif /* PCIE_INB_DW */
+#ifdef PCIE_OOB
+		if (OOB_DW_ENAB(bus)) {
+			return dhd_os_oob_set_device_wake(bus, val);
+		}
+#endif /* PCIE_OOB */
+	}
+	return BCME_OK;
+}
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+
+/** mailbox doorbell ring function */
+void
+dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
+{
+	/* Skip after sending D3_INFORM */
+	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+			"busstate=%d, d3_suspend_pending=%d\n",
+			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
+		return;
+	}
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
+	} else {
+		/* this is a pcie core register, not the config regsiter */
+		DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
+		if (IDMA_ACTIVE(bus->dhd)) {
+			si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2,
+				~0, value);
+		} else {
+			si_corereg(bus->sih, bus->sih->buscoreidx,
+				PCIH2D_MailBox, ~0, 0x12345678);
+		}
+	}
+}
+
+/** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
+void
+dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
+{
+	/* this is a pcie core register, not the config regsiter */
+	/* Skip after sending D3_INFORM */
+	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+			"busstate=%d, d3_suspend_pending=%d\n",
+			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
+		return;
+	}
+	DHD_INFO(("writing a door bell 2 to the device\n"));
+	si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2,
+		~0, value);
+}
+
+void
+dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
+{
+	/* Skip after sending D3_INFORM */
+	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+			"busstate=%d, d3_suspend_pending=%d\n",
+			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
+		return;
+	}
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+	if (OOB_DW_ENAB(bus)) {
+		dhd_bus_set_device_wake(bus, TRUE);
+	}
+	dhd_bus_doorbell_timeout_reset(bus);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
+}
+
+void
+dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
+{
+	/* Skip after sending D3_INFORM */
+	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+			"busstate=%d, d3_suspend_pending=%d\n",
+			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
+		return;
+	}
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+	if (devwake) {
+		if (OOB_DW_ENAB(bus)) {
+			dhd_bus_set_device_wake(bus, TRUE);
+		}
+	}
+	dhd_bus_doorbell_timeout_reset(bus);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+
+	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
+}
+
+static void
+dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
+{
+	uint32 w;
+	/* Skip after sending D3_INFORM */
+	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+			"busstate=%d, d3_suspend_pending=%d\n",
+			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
+		return;
+	}
+	w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
+	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
+}
+
+dhd_mb_ring_t
+dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
+{
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+			PCIMailBoxInt);
+		if (bus->pcie_mb_intr_addr) {
+			bus->pcie_mb_intr_osh = si_osh(bus->sih);
+			return dhd_bus_ringbell_oldpcie;
+		}
+	} else {
+		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+			PCIH2D_MailBox);
+		if (bus->pcie_mb_intr_addr) {
+			bus->pcie_mb_intr_osh = si_osh(bus->sih);
+			return dhdpcie_bus_ringbell_fast;
+		}
+	}
+	return dhd_bus_ringbell;
+}
+
+dhd_mb_ring_2_t
+dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
+{
+	bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+		PCIH2D_MailBox_2);
+	if (bus->pcie_mb_intr_2_addr) {
+		bus->pcie_mb_intr_osh = si_osh(bus->sih);
+		return dhdpcie_bus_ringbell_2_fast;
+	}
+	return dhd_bus_ringbell_2;
+}
+
+bool BCMFASTPATH
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+	bool resched = FALSE;	  /* Flag indicating resched wanted */
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	/* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
+	 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
+	 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
+	 * and if we return from here, then IOCTL response will never be handled
+	 */
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+		bus->intstatus = 0;
+		DHD_GENERAL_UNLOCK(bus->dhd, flags);
+		bus->dpc_return_busdown_count++;
+		return 0;
+	}
+#ifdef DHD_PCIE_RUNTIMEPM
+	bus->idlecount = 0;
+#endif /* DHD_PCIE_RUNTIMEPM */
+	DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef DHD_READ_INTSTATUS_IN_DPC
+	if (bus->ipend) {
+		bus->ipend = FALSE;
+		bus->intstatus = dhdpcie_bus_intstatus(bus);
+		/* Check if the interrupt is ours or not */
+		if (bus->intstatus == 0) {
+			goto INTR_ON;
+		}
+		bus->intrcount++;
+	}
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+
+	resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
+	if (!resched) {
+		bus->intstatus = 0;
+#ifdef DHD_READ_INTSTATUS_IN_DPC
+INTR_ON:
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
+		bus->dpc_intr_enable_count++;
+		dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
+	}
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
+	dhd_os_busbusy_wake(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	return resched;
+
+}
+
+
+int
+dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
+{
+	uint32 cur_h2d_mb_data = 0;
+	unsigned long flags;
+
+	DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
+
+	if (bus->is_linkdown) {
+		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+
+	if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
+		DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
+			h2d_mb_data));
+		/* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
+#ifdef PCIE_OOB
+		bus->oob_enabled = FALSE;
+#endif /* PCIE_OOB */
+		if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
+			DHD_ERROR(("failure sending the H2D Mailbox message to firmware\n"));
+			goto fail;
+		}
+#ifdef PCIE_OOB
+		bus->oob_enabled = TRUE;
+#endif /* PCIE_OOB */
+		goto done;
+	}
+
+	dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
+
+	if (cur_h2d_mb_data != 0) {
+		uint32 i = 0;
+		DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
+		while ((i++ < 100) && cur_h2d_mb_data) {
+			OSL_DELAY(10);
+			dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
+		}
+		if (i >= 100) {
+			DHD_ERROR(("%s : waited 1ms for the dngl "
+				"to ack the previous mb transaction\n", __FUNCTION__));
+			DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
+				__FUNCTION__, cur_h2d_mb_data));
+		}
+	}
+
+	dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
+	dhd_bus_gen_devmb_intr(bus);
+
+done:
+	if (h2d_mb_data == H2D_HOST_D3_INFORM) {
+		DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
+		/* Mark D3_INFORM in the atomic context to
+		 * skip ringing H2D DB after D3_INFORM
+		 */
+		bus->d3_suspend_pending = TRUE;
+		bus->d3_inform_cnt++;
+	}
+	if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
+		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
+		bus->d0_inform_in_use_cnt++;
+	}
+	if (h2d_mb_data == H2D_HOST_D0_INFORM) {
+		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
+		bus->d0_inform_cnt++;
+	}
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+	return BCME_OK;
+
+fail:
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+	return BCME_ERROR;
+}
+
+void
+dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
+{
+#ifdef PCIE_INB_DW
+	unsigned long flags = 0;
+#endif
+	DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
+
+	if (d2h_mb_data & D2H_DEV_FWHALT)  {
+		DHD_ERROR(("FW trap has happened\n"));
+		dhdpcie_checkdied(bus, NULL, 0);
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+		bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+		dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
+		return;
+	}
+	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
+		if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
+			bus->wait_for_d3_ack) {
+			DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+			return;
+		}
+		/* what should we do */
+		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
+#ifdef PCIE_INB_DW
+		if (INBAND_DW_ENAB(bus)) {
+			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_DS_ACTIVE) {
+				dhdpcie_bus_set_pcie_inband_dw_state(bus,
+						DW_DEVICE_DS_DEV_SLEEP_PEND);
+				if (bus->host_active_cnt == 0) {
+					dhdpcie_bus_set_pcie_inband_dw_state(bus,
+						DW_DEVICE_DS_DEV_SLEEP);
+					dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+				}
+			}
+			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+			dhd_os_ds_enter_wake(bus->dhd);
+		} else
+#endif /* PCIE_INB_DW */
+		{
+			dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+		}
+		if (IDMA_DS_ENAB(bus->dhd)) {
+			bus->dongle_in_ds = TRUE;
+		}
+		DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
+	}
+	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
+		/* what should we do */
+		bus->dongle_in_ds = FALSE;
+		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
+#ifdef PCIE_INB_DW
+		if (INBAND_DW_ENAB(bus)) {
+			bus->inband_ds_exit_device_cnt++;
+			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+					DW_DEVICE_DS_DISABLED_WAIT) {
+				/* wake up only if some one is waiting in
+				* DW_DEVICE_DS_DISABLED_WAIT state
+				* in this case the waiter will change the state
+				* to DW_DEVICE_DS_DEV_WAKE
+				*/
+				bus->wait_for_ds_exit = 1;
+				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+				dhd_os_ds_exit_wake(bus->dhd);
+			} else {
+				DHD_INFO(("D2H_MB_DATA: not in DW_DEVICE_DS_DISABLED_WAIT!\n"));
+				/*
+				* If there is no one waiting, then update the state from here
+				*/
+				bus->wait_for_ds_exit = 1;
+				dhdpcie_bus_set_pcie_inband_dw_state(bus,
+					DW_DEVICE_DS_DEV_WAKE);
+				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+			}
+		}
+#endif /* PCIE_INB_DW */
+	}
+	if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK)  {
+		/* what should we do */
+		DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
+#ifdef PCIE_INB_DW
+		if (INBAND_DW_ENAB(bus)) {
+			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+				DW_DEVICE_HOST_WAKE_WAIT) {
+				dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_ACTIVE);
+			}
+			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+		}
+#endif /* PCIE_INB_DW */
+	}
+	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
+		/* what should we do */
+		DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
+		if (!bus->wait_for_d3_ack) {
+			/* Disable dongle Interrupts Immediately after D3 */
+			bus->suspend_intr_disable_count++;
+			dhdpcie_bus_intr_disable(bus);
+#if defined(DHD_HANG_SEND_UP_TEST)
+			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
+				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
+			} else {
+				bus->wait_for_d3_ack = 1;
+				dhd_os_d3ack_wake(bus->dhd);
+			}
+#else /* DHD_HANG_SEND_UP_TEST */
+			bus->wait_for_d3_ack = 1;
+			dhd_os_d3ack_wake(bus->dhd);
+#endif /* DHD_HANG_SEND_UP_TEST */
+		}
+	}
+}
+
+static void
+dhdpcie_handle_mb_data(dhd_bus_t *bus)
+{
+	uint32 d2h_mb_data = 0;
+	uint32 zero = 0;
+	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
+	if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
+		DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
+			__FUNCTION__, d2h_mb_data));
+		return;
+	}
+
+	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
+
+	DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
+	if (d2h_mb_data & D2H_DEV_FWHALT)  {
+		DHD_ERROR(("FW trap has happened\n"));
+		dhdpcie_checkdied(bus, NULL, 0);
+		/* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
+		return;
+	}
+	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
+		/* what should we do */
+		DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
+		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+		if (IDMA_DS_ENAB(bus->dhd)) {
+			bus->dongle_in_ds = TRUE;
+		}
+		DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
+	}
+	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
+		/* what should we do */
+		DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
+		bus->dongle_in_ds = FALSE;
+	}
+	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
+		/* what should we do */
+		DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
+		if (!bus->wait_for_d3_ack) {
+#if defined(DHD_HANG_SEND_UP_TEST)
+			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
+				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
+			} else {
+				bus->wait_for_d3_ack = 1;
+				dhd_os_d3ack_wake(bus->dhd);
+			}
+#else /* DHD_HANG_SEND_UP_TEST */
+			bus->wait_for_d3_ack = 1;
+			dhd_os_d3ack_wake(bus->dhd);
+#endif /* DHD_HANG_SEND_UP_TEST */
+		}
+	}
+}
+
+static void
+dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
+{
+	uint32 d2h_mb_data = 0;
+	uint32 zero = 0;
+
+	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
+	if (!d2h_mb_data)
+		return;
+
+	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
+
+	dhd_bus_handle_mb_data(bus, d2h_mb_data);
+}
+
+static bool
+dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
+{
+	bool resched = FALSE;
+
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		/* Msg stream interrupt */
+		if (intstatus & I_BIT1) {
+			resched = dhdpci_bus_read_frames(bus);
+		} else if (intstatus & I_BIT0) {
+			/* do nothing for Now */
+		}
+	} else {
+		if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
+			bus->api.handle_mb_data(bus);
+
+		if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
+			goto exit;
+		}
+
+		if (intstatus & PCIE_MB_D2H_MB_MASK) {
+			resched = dhdpci_bus_read_frames(bus);
+		}
+	}
+
+exit:
+	return resched;
+}
+
+static bool
+dhdpci_bus_read_frames(dhd_bus_t *bus)
+{
+	bool more = FALSE;
+
+	/* First check if there a FW trap */
+	if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
+		(bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
+		dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
+		return FALSE;
+	}
+
+	/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
+	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
+
+	dhd_prot_process_ctrlbuf(bus->dhd);
+	/* Unlock to give chance for resp to be handled */
+	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
+
+	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
+	/* update the flow ring cpls */
+	dhd_update_txflowrings(bus->dhd);
+
+	/* With heavy TX traffic, we could get a lot of TxStatus
+	 * so add bound
+	 */
+	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
+
+	/* With heavy RX traffic, this routine potentially could spend some time
+	 * processing RX frames without RX bound
+	 */
+	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
+
+	/* Process info ring completion messages */
+	more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
+
+#ifdef IDLE_TX_FLOW_MGMT
+	if (bus->enable_idle_flowring_mgmt) {
+		/* Look for idle flow rings */
+		dhd_bus_check_idle_scan(bus);
+	}
+#endif /* IDLE_TX_FLOW_MGMT */
+
+	/* don't talk to the dongle if fw is about to be reloaded */
+	if (bus->dhd->hang_was_sent) {
+		more = FALSE;
+	}
+	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	if (bus->read_shm_fail) {
+		/* Read interrupt state once again to confirm linkdown */
+		int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+		if (intstatus != (uint32)-1) {
+			DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
+#ifdef DHD_FW_COREDUMP
+			if (bus->dhd->memdump_enabled) {
+				DHD_OS_WAKE_LOCK(bus->dhd);
+				bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
+				dhd_bus_mem_dump(bus->dhd);
+				DHD_OS_WAKE_UNLOCK(bus->dhd);
+			}
+#endif /* DHD_FW_COREDUMP */
+			bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+			dhd_os_send_hang_message(bus->dhd);
+		} else {
+			DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
+#ifdef CONFIG_ARCH_MSM
+			bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+			bus->is_linkdown = 1;
+			bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+			dhd_os_send_hang_message(bus->dhd);
+		}
+	}
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+	return more;
+}
+
+bool
+dhdpcie_tcm_valid(dhd_bus_t *bus)
+{
+	uint32 addr = 0;
+	int rv;
+	uint32 shaddr = 0;
+	pciedev_shared_t sh;
+
+	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+
+	/* Read last word in memory to determine address of pciedev_shared structure */
+	addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
+
+	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
+		(addr > shaddr)) {
+		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
+			__FUNCTION__, addr));
+		return FALSE;
+	}
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
+		sizeof(pciedev_shared_t))) < 0) {
+		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
+		return FALSE;
+	}
+
+	/* Compare any field in pciedev_shared_t */
+	if (sh.console_addr != bus->pcie_sh->console_addr) {
+		DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
+		return FALSE;
+	}
+
+	return TRUE;
+}
+
+static void
+dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
+{
+	snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
+			firmware_api_version, host_api_version);
+	return;
+}
+
+static bool
+dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
+{
+	bool retcode = FALSE;
+
+	DHD_INFO(("firmware api revision %d, host api revision %d\n",
+		firmware_api_version, host_api_version));
+
+	switch (firmware_api_version) {
+	case PCIE_SHARED_VERSION_7:
+	case PCIE_SHARED_VERSION_6:
+	case PCIE_SHARED_VERSION_5:
+		retcode = TRUE;
+		break;
+	default:
+		if (firmware_api_version <= host_api_version)
+			retcode = TRUE;
+	}
+	return retcode;
+}
+
+static int
+dhdpcie_readshared(dhd_bus_t *bus)
+{
+	uint32 addr = 0;
+	int rv, dma_indx_wr_buf, dma_indx_rd_buf;
+	uint32 shaddr = 0;
+	pciedev_shared_t *sh = bus->pcie_sh;
+	dhd_timeout_t tmo;
+
+	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+	/* start a timer for 5 seconds */
+	dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
+
+	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
+		/* Read last word in memory to determine address of pciedev_shared structure */
+		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
+	}
+
+	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
+		(addr > shaddr)) {
+		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
+			__FUNCTION__, addr));
+		DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
+		return BCME_ERROR;
+	} else {
+		bus->shared_addr = (ulong)addr;
+		DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
+			"before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed));
+	}
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
+		sizeof(pciedev_shared_t))) < 0) {
+		DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv));
+		return rv;
+	}
+
+	/* Endianness */
+	sh->flags = ltoh32(sh->flags);
+	sh->trap_addr = ltoh32(sh->trap_addr);
+	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+	sh->assert_line = ltoh32(sh->assert_line);
+	sh->console_addr = ltoh32(sh->console_addr);
+	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+	sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
+	sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
+	sh->flags2 = ltoh32(sh->flags2);
+
+	/* load bus console address */
+	bus->console_addr = sh->console_addr;
+
+	/* Read the dma rx offset */
+	bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
+	dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
+
+	DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
+
+	bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
+	if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
+	{
+		DHD_ERROR(("%s: pcie_shared version %d in dhd "
+		           "is older than pciedev_shared version %d in dongle\n",
+		           __FUNCTION__, PCIE_SHARED_VERSION,
+		           bus->api.fw_rev));
+		return BCME_ERROR;
+	}
+	dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
+
+	bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
+		sizeof(uint16) : sizeof(uint32);
+	DHD_INFO(("%s: Dongle advertizes %d size indices\n",
+		__FUNCTION__, bus->rw_index_sz));
+
+#ifdef IDLE_TX_FLOW_MGMT
+	if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
+		DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
+			__FUNCTION__));
+		bus->enable_idle_flowring_mgmt = TRUE;
+	}
+#endif /* IDLE_TX_FLOW_MGMT */
+
+	bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
+	bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
+
+	bus->dhd->idma_retention_ds = (sh->flags & PCIE_SHARED_IDMA_RETENTION_DS) ? TRUE : FALSE;
+
+	bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
+
+	/* Does the FW support DMA'ing r/w indices */
+	if (sh->flags & PCIE_SHARED_DMA_INDEX) {
+		if (!bus->dhd->dma_ring_upd_overwrite) {
+			{
+				if (!IFRM_ENAB(bus->dhd)) {
+					bus->dhd->dma_h2d_ring_upd_support = TRUE;
+				}
+				bus->dhd->dma_d2h_ring_upd_support = TRUE;
+			}
+		}
+
+		if (bus->dhd->dma_d2h_ring_upd_support)
+			bus->dhd->d2h_sync_mode = 0;
+
+		DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
+			__FUNCTION__,
+			(bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
+			(bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
+	} else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
+		DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
+			__FUNCTION__));
+		return BCME_UNSUPPORTED;
+	} else {
+		bus->dhd->dma_h2d_ring_upd_support = FALSE;
+		bus->dhd->dma_d2h_ring_upd_support = FALSE;
+	}
+
+	/* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
+	{
+		ring_info_t  ring_info;
+
+		if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
+			(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
+			return rv;
+
+		bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
+		bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
+
+
+		if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
+			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
+			bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
+			bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
+			bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
+			bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
+			bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
+		}
+		else {
+			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
+			bus->max_submission_rings = bus->max_tx_flowrings;
+			bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
+			bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
+			bus->api.handle_mb_data = dhdpcie_handle_mb_data;
+		}
+		if (bus->max_completion_rings == 0) {
+			DHD_ERROR(("dongle completion rings are invalid %d\n",
+				bus->max_completion_rings));
+			return BCME_ERROR;
+		}
+		if (bus->max_submission_rings == 0) {
+			DHD_ERROR(("dongle submission rings are invalid %d\n",
+				bus->max_submission_rings));
+			return BCME_ERROR;
+		}
+		if (bus->max_tx_flowrings == 0) {
+			DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
+			return BCME_ERROR;
+		}
+
+		/* If both FW and Host support DMA'ing indices, allocate memory and notify FW
+		 * The max_sub_queues is read from FW initialized ring_info
+		 */
+		if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
+			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+				H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
+			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+				D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
+
+			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
+				DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
+						"Host will use w/r indices in TCM\n",
+						__FUNCTION__));
+				bus->dhd->dma_h2d_ring_upd_support = FALSE;
+				bus->dhd->idma_enable = FALSE;
+			}
+		}
+
+		if (bus->dhd->dma_d2h_ring_upd_support) {
+			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+				D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
+			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+				H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
+
+			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
+				DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
+						"Host will use w/r indices in TCM\n",
+						__FUNCTION__));
+				bus->dhd->dma_d2h_ring_upd_support = FALSE;
+			}
+		}
+
+		if (IFRM_ENAB(bus->dhd)) {
+			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+				H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
+
+			if (dma_indx_wr_buf != BCME_OK) {
+				DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
+						__FUNCTION__));
+				bus->dhd->ifrm_enable = FALSE;
+			}
+		}
+
+		/* read ringmem and ringstate ptrs from shared area and store in host variables */
+		dhd_fillup_ring_sharedptr_info(bus, &ring_info);
+		if (dhd_msg_level & DHD_INFO_VAL) {
+			bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
+		}
+		DHD_INFO(("%s: ring_info\n", __FUNCTION__));
+
+		DHD_ERROR(("%s: max H2D queues %d\n",
+			__FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
+
+		DHD_INFO(("mail box address\n"));
+		DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
+			__FUNCTION__, bus->h2d_mb_data_ptr_addr));
+		DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
+			__FUNCTION__, bus->d2h_mb_data_ptr_addr));
+	}
+
+	DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
+		__FUNCTION__, bus->dhd->d2h_sync_mode));
+
+	bus->dhd->d2h_hostrdy_supported =
+		((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
+
+#ifdef PCIE_OOB
+	bus->dhd->d2h_no_oob_dw = (sh->flags & PCIE_SHARED_NO_OOB_DW) ? TRUE : FALSE;
+#endif /* PCIE_OOB */
+
+#ifdef PCIE_INB_DW
+	bus->dhd->d2h_inband_dw = (sh->flags & PCIE_SHARED_INBAND_DS) ? TRUE : FALSE;
+#endif /* PCIE_INB_DW */
+
+#if defined(PCIE_OOB) && defined(PCIE_INB_DW)
+	DHD_ERROR(("FW supports Inband dw ? %s oob dw ? %s\n",
+		bus->dhd->d2h_inband_dw ? "Y":"N",
+		bus->dhd->d2h_no_oob_dw ? "N":"Y"));
+#endif /* defined(PCIE_OOB) && defined(PCIE_INB_DW) */
+
+	bus->dhd->ext_trap_data_supported =
+		((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
+
+	return BCME_OK;
+} /* dhdpcie_readshared */
+
+/** Read ring mem and ring state ptr info from shared memory area in device memory */
+static void
+dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
+{
+	uint16 i = 0;
+	uint16 j = 0;
+	uint32 tcm_memloc;
+	uint32	d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
+	uint16  max_tx_flowrings = bus->max_tx_flowrings;
+
+	/* Ring mem ptr info */
+	/* Alloated in the order
+		H2D_MSGRING_CONTROL_SUBMIT              0
+		H2D_MSGRING_RXPOST_SUBMIT               1
+		D2H_MSGRING_CONTROL_COMPLETE            2
+		D2H_MSGRING_TX_COMPLETE                 3
+		D2H_MSGRING_RX_COMPLETE                 4
+	*/
+
+	{
+		/* ringmemptr holds start of the mem block address space */
+		tcm_memloc = ltoh32(ring_info->ringmem_ptr);
+
+		/* Find out ringmem ptr for each ring common  ring */
+		for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
+			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
+			/* Update mem block */
+			tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
+			DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
+				i, bus->ring_sh[i].ring_mem_addr));
+		}
+	}
+
+	/* Ring state mem ptr info */
+	{
+		d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
+		d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
+		h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
+		h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
+
+		/* Store h2d common ring write/read pointers */
+		for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
+			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+			/* update mem block */
+			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
+			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
+
+			DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
+				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+		}
+
+		/* Store d2h common ring write/read pointers */
+		for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
+			bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
+			bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
+
+			/* update mem block */
+			d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
+			d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
+
+			DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
+				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+		}
+
+		/* Store txflow ring write/read pointers */
+		if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
+			max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
+		} else {
+			/* Account for Debug info h2d ring located after the last tx flow ring */
+			max_tx_flowrings = max_tx_flowrings + 1;
+		}
+		for (j = 0; j < max_tx_flowrings; i++, j++)
+		{
+			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+			/* update mem block */
+			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
+			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
+
+			DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
+				__FUNCTION__, i,
+				bus->ring_sh[i].ring_state_w,
+				bus->ring_sh[i].ring_state_r));
+		}
+		/* store wr/rd pointers for  debug info completion ring */
+		bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
+		bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
+		d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
+		d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
+		DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
+			bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+	}
+} /* dhd_fillup_ring_sharedptr_info */
+
+/**
+ * Initialize bus module: prepare for communication with the dongle. Called after downloading
+ * firmware into the dongle.
+ */
+int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	int  ret = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(bus->dhd);
+	if (!bus->dhd)
+		return 0;
+
+	/* Make sure we're talking to the core. */
+	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+	ASSERT(bus->reg != NULL);
+
+	/* before opening up bus for data transfer, check if shared are is intact */
+	ret = dhdpcie_readshared(bus);
+	if (ret < 0) {
+		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+		return ret;
+	}
+
+	/* Make sure we're talking to the core. */
+	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+	ASSERT(bus->reg != NULL);
+
+	/* Set bus state according to enable result */
+	dhdp->busstate = DHD_BUS_DATA;
+	bus->d3_suspend_pending = FALSE;
+
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+	if (bus->pcie_sh->flags2 & PCIE_SHARED_D2H_D11_TX_STATUS) {
+		uint32 flags2 = bus->pcie_sh->flags2;
+		uint32 addr;
+
+		addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
+		flags2 |= PCIE_SHARED_H2D_D11_TX_STATUS;
+		ret = dhdpcie_bus_membytes(bus, TRUE, addr,
+			(uint8 *)&flags2, sizeof(flags2));
+		if (ret < 0) {
+			DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
+				__FUNCTION__));
+			return ret;
+		}
+		bus->pcie_sh->flags2 = flags2;
+		bus->dhd->d11_tx_status = TRUE;
+	}
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
+
+	if (!dhd_download_fw_on_driverload)
+		dhd_dpc_enable(bus->dhd);
+	/* Enable the interrupt after device is up */
+	dhdpcie_bus_intr_enable(bus);
+
+	/* bcmsdh_intr_unmask(bus->sdh); */
+#ifdef DHD_PCIE_RUNTIMEPM
+	bus->idlecount = 0;
+	bus->idletime = (int32)MAX_IDLE_COUNT;
+	init_waitqueue_head(&bus->rpm_queue);
+	mutex_init(&bus->pm_lock);
+#else
+	bus->idletime = 0;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#ifdef PCIE_INB_DW
+	/* Initialize the lock to serialize Device Wake Inband activities */
+	if (!bus->inb_lock) {
+		bus->inb_lock = dhd_os_spin_lock_init(bus->dhd->osh);
+	}
+#endif
+
+
+	/* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
+	if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
+		bus->use_d0_inform = TRUE;
+	} else {
+		bus->use_d0_inform = FALSE;
+	}
+
+	return ret;
+}
+
+static void
+dhdpcie_init_shared_addr(dhd_bus_t *bus)
+{
+	uint32 addr = 0;
+	uint32 val = 0;
+	addr = bus->dongle_ram_base + bus->ramsize - 4;
+#ifdef DHD_PCIE_RUNTIMEPM
+	dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
+}
+
+
+bool
+dhdpcie_chipmatch(uint16 vendor, uint16 device)
+{
+	if (vendor != PCI_VENDOR_ID_BROADCOM) {
+#ifndef DHD_EFI
+		DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
+			vendor, device));
+#endif /* DHD_EFI */
+		return (-ENODEV);
+	}
+
+	if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
+		(device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
+		(device == BCM43569_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
+		(device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
+		(device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
+		(device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
+		(device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device))
+		return 0;
+
+	if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
+		(device == BCM43452_D11AC5G_ID))
+		return 0;
+
+	if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
+		(device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
+		return 0;
+
+	if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
+		(device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
+		return 0;
+
+	if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
+		(device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
+		(device == BCM4358_D11AC5G_ID))
+		return 0;
+
+	if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
+		(device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
+		(device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
+		(device == BCM4359_D11AC5G_ID))
+		return 0;
+
+	if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
+		(device == BCM43596_D11AC5G_ID))
+		return 0;
+
+	if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
+		(device == BCM43597_D11AC5G_ID))
+		return 0;
+
+	if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
+		(device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
+		(device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
+		(device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID))
+		return 0;
+	
+	if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
+		(device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
+		return 0;
+	}
+
+	if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
+		(device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
+		(device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID))
+		return 0;
+#ifndef DHD_EFI
+	DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
+#endif
+	return (-ENODEV);
+} /* dhdpcie_chipmatch */
+
+/**
+ * Name:  dhdpcie_cc_nvmshadow
+ *
+ * Description:
+ * A shadow of OTP/SPROM exists in ChipCommon Region
+ * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
+ * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
+ * can also be read from ChipCommon Registers.
+ */
+static int
+dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
+{
+	uint16 dump_offset = 0;
+	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
+
+	/* Table for 65nm OTP Size (in bits) */
+	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
+
+	volatile uint16 *nvm_shadow;
+
+	uint cur_coreid;
+	uint chipc_corerev;
+	chipcregs_t *chipcregs;
+
+	/* Save the current core */
+	cur_coreid = si_coreid(bus->sih);
+	/* Switch to ChipC */
+	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
+	ASSERT(chipcregs != NULL);
+
+	chipc_corerev = si_corerev(bus->sih);
+
+	/* Check ChipcommonCore Rev */
+	if (chipc_corerev < 44) {
+		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
+		return BCME_UNSUPPORTED;
+	}
+
+	/* Check ChipID */
+	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
+	        ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
+	        ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
+		DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
+					"4350/4345/4355/4364 only\n", __FUNCTION__));
+		return BCME_UNSUPPORTED;
+	}
+
+	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
+	if (chipcregs->sromcontrol & SRC_PRESENT) {
+		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
+		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
+					>> SRC_SIZE_SHIFT))) * 1024;
+		bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
+	}
+
+	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
+		bcm_bprintf(b, "\nOTP Present");
+
+		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
+			== OTPL_WRAP_TYPE_40NM) {
+			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
+			/* Chipcommon rev51 is a variation on rev45 and does not support
+			 * the latest OTP configuration.
+			 */
+			if (chipc_corerev != 51 && chipc_corerev >= 49) {
+				otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
+					>> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
+				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+			} else {
+				otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
+				        >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
+				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+			}
+		} else {
+			/* This part is untested since newer chips have 40nm OTP */
+			/* Chipcommon rev51 is a variation on rev45 and does not support
+			 * the latest OTP configuration.
+			 */
+			if (chipc_corerev != 51 && chipc_corerev >= 49) {
+				otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
+						>> OTPL_ROW_SIZE_SHIFT];
+				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+			} else {
+				otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
+					        >> CC_CAP_OTPSIZE_SHIFT];
+				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+				DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
+					__FUNCTION__));
+			}
+		}
+	}
+
+	/* Chipcommon rev51 is a variation on rev45 and does not support
+	 * the latest OTP configuration.
+	 */
+	if (chipc_corerev != 51 && chipc_corerev >= 49) {
+		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
+			((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
+			DHD_ERROR(("%s: SPROM and OTP could not be found "
+				"sromcontrol = %x, otplayout = %x \n",
+				__FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
+			return BCME_NOTFOUND;
+		}
+	} else {
+		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
+			((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
+			DHD_ERROR(("%s: SPROM and OTP could not be found "
+				"sromcontrol = %x, capablities = %x \n",
+				__FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
+			return BCME_NOTFOUND;
+		}
+	}
+
+	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
+	if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
+		(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
+
+		bcm_bprintf(b, "OTP Strap selected.\n"
+		               "\nOTP Shadow in ChipCommon:\n");
+
+		dump_size = otp_size / 16 ; /* 16bit words */
+
+	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
+		(chipcregs->sromcontrol & SRC_PRESENT)) {
+
+		bcm_bprintf(b, "SPROM Strap selected\n"
+				"\nSPROM Shadow in ChipCommon:\n");
+
+		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
+		/* dump_size in 16bit words */
+		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
+	} else {
+		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
+			__FUNCTION__));
+		return BCME_NOTFOUND;
+	}
+
+	if (bus->regs == NULL) {
+		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
+		return BCME_NOTREADY;
+	} else {
+		bcm_bprintf(b, "\n OffSet:");
+
+		/* Chipcommon rev51 is a variation on rev45 and does not support
+		 * the latest OTP configuration.
+		 */
+		if (chipc_corerev != 51 && chipc_corerev >= 49) {
+			/* Chip common can read only 8kbits,
+			* for ccrev >= 49 otp size is around 12 kbits so use GCI core
+			*/
+			nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
+		} else {
+			/* Point to the SPROM/OTP shadow in ChipCommon */
+			nvm_shadow = chipcregs->sromotp;
+		}
+
+		if (nvm_shadow == NULL) {
+			DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
+			return BCME_NOTFOUND;
+		}
+
+		/*
+		* Read 16 bits / iteration.
+		* dump_size & dump_offset in 16-bit words
+		*/
+		while (dump_offset < dump_size) {
+			if (dump_offset % 2 == 0)
+				/* Print the offset in the shadow space in Bytes */
+				bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
+
+			bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
+			dump_offset += 0x1;
+		}
+	}
+
+	/* Switch back to the original core */
+	si_setcore(bus->sih, cur_coreid, 0);
+
+	return BCME_OK;
+} /* dhdpcie_cc_nvmshadow */
+
+/** Flow rings are dynamically created and destroyed */
+void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
+{
+	void *pkt;
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
+	unsigned long flags;
+
+	queue = &flow_ring_node->queue;
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+	 * when there is a newly coming packet from network stack.
+	 */
+	dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* clean up BUS level info */
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+	/* Flush all pending packets in the queue, if any */
+	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+		PKTFREE(bus->dhd->osh, pkt, TRUE);
+	}
+	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+
+	/* Reinitialise flowring's queue */
+	dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
+	flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
+	flow_ring_node->active = FALSE;
+
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
+	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+	dll_delete(&flow_ring_node->list);
+	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+	/* Release the flowring object back into the pool */
+	dhd_prot_flowrings_pool_release(bus->dhd,
+		flow_ring_node->flowid, flow_ring_node->prot_info);
+
+	/* Free the flowid back to the flowid allocator */
+	dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
+	                flow_ring_node->flowid);
+}
+
+/**
+ * Allocate a Flow ring buffer,
+ * Init Ring buffer, send Msg to device about flow ring creation
+*/
+int
+dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
+{
+	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
+
+	DHD_INFO(("%s :Flow create\n", __FUNCTION__));
+
+	/* Send Msg to device about flow ring creation */
+	if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
+		return BCME_NOMEM;
+
+	return BCME_OK;
+}
+
+/** Handle response from dongle on a 'flow ring create' request */
+void
+dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
+{
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Flow create Response failure error status = %d \n",
+		     __FUNCTION__, status));
+		/* Call Flow clean up */
+		dhd_bus_clean_flow_ring(bus, flow_ring_node);
+		return;
+	}
+
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	/* Now add the Flow ring node into the active list
+	 * Note that this code to add the newly created node to the active
+	 * list was living in dhd_flowid_lookup. But note that after
+	 * adding the node to the active list the contents of node is being
+	 * filled in dhd_prot_flow_ring_create.
+	 * If there is a D2H interrupt after the node gets added to the
+	 * active list and before the node gets populated with values
+	 * from the Bottom half dhd_update_txflowrings would be called.
+	 * which will then try to walk through the active flow ring list,
+	 * pickup the nodes and operate on them. Now note that since
+	 * the function dhd_prot_flow_ring_create is not finished yet
+	 * the contents of flow_ring_node can still be NULL leading to
+	 * crashes. Hence the flow_ring_node should be added to the
+	 * active list only after its truely created, which is after
+	 * receiving the create response message from the Host.
+	 */
+	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
+	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+	dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
+
+	return;
+}
+
+int
+dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
+{
+	void * pkt;
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
+
+	flow_ring_node = (flow_ring_node_t *)arg;
+
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+	if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+		DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
+		return BCME_ERROR;
+	}
+	flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
+
+	queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+	 * when there is a newly coming packet from network stack.
+	 */
+	dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+	/* Flush all pending packets in the queue, if any */
+	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+		PKTFREE(bus->dhd->osh, pkt, TRUE);
+	}
+	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	/* Send Msg to device about flow ring deletion */
+	dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
+
+	return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+	flow_ring_node_t *flow_ring_node;
+
+	DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
+		    __FUNCTION__, status));
+		return;
+	}
+	/* Call Flow clean up */
+	dhd_bus_clean_flow_ring(bus, flow_ring_node);
+
+	return;
+
+}
+
+int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
+{
+	void *pkt;
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
+
+	flow_ring_node = (flow_ring_node_t *)arg;
+
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+	queue = &flow_ring_node->queue; /* queue associated with flow ring */
+	/* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
+	 * once flow ring flush response is received for this flowring node.
+	 */
+	flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+	 * when there is a newly coming packet from network stack.
+	 */
+	dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Flush all pending packets in the queue, if any */
+	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+		PKTFREE(bus->dhd->osh, pkt, TRUE);
+	}
+	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	/* Send Msg to device about flow ring flush */
+	dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
+
+	return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+	flow_ring_node_t *flow_ring_node;
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
+		    __FUNCTION__, status));
+		return;
+	}
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+	return;
+}
+
+uint32
+dhd_bus_max_h2d_queues(struct dhd_bus *bus)
+{
+	return bus->max_submission_rings;
+}
+
+/* To be symmetric with SDIO */
+void
+dhd_bus_pktq_flush(dhd_pub_t *dhdp)
+{
+	return;
+}
+
+void
+dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
+{
+	dhdp->bus->is_linkdown = val;
+}
+
+#ifdef IDLE_TX_FLOW_MGMT
+/* resume request */
+int
+dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
+{
+	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
+
+	DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
+
+	flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
+
+	/* Send Msg to device about flow ring resume */
+	dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
+
+	return BCME_OK;
+}
+
+/* add the node back to active flowring */
+void
+dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
+{
+
+	flow_ring_node_t *flow_ring_node;
+
+	DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Error Status = %d \n",
+			__FUNCTION__, status));
+		return;
+	}
+
+	DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
+		__FUNCTION__, flow_ring_node->flowid,  flow_ring_node->queue.len));
+
+	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+
+	dhd_bus_schedule_queue(bus, flowid, FALSE);
+	return;
+}
+
+/* scan the flow rings in active list for idle time out */
+void
+dhd_bus_check_idle_scan(dhd_bus_t *bus)
+{
+	uint64 time_stamp; /* in millisec */
+	uint64 diff;
+
+	time_stamp = OSL_SYSUPTIME();
+	diff = time_stamp - bus->active_list_last_process_ts;
+
+	if (diff > IDLE_FLOW_LIST_TIMEOUT) {
+		dhd_bus_idle_scan(bus);
+		bus->active_list_last_process_ts = OSL_SYSUPTIME();
+	}
+
+	return;
+}
+
+
+/* scan the nodes in active list till it finds a non idle node */
+void
+dhd_bus_idle_scan(dhd_bus_t *bus)
+{
+	dll_t *item, *prev;
+	flow_ring_node_t *flow_ring_node;
+	uint64 time_stamp, diff;
+	unsigned long flags;
+	uint16 ringid[MAX_SUSPEND_REQ];
+	uint16 count = 0;
+
+	time_stamp = OSL_SYSUPTIME();
+	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+
+	for (item = dll_tail_p(&bus->flowring_active_list);
+	         !dll_end(&bus->flowring_active_list, item); item = prev) {
+		prev = dll_prev_p(item);
+
+		flow_ring_node = dhd_constlist_to_flowring(item);
+
+		if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
+			continue;
+
+		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
+			/* Takes care of deleting zombie rings */
+			/* delete from the active list */
+			DHD_INFO(("deleting flow id %u from active list\n",
+				flow_ring_node->flowid));
+			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
+			continue;
+		}
+
+		diff = time_stamp - flow_ring_node->last_active_ts;
+
+		if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len))  {
+			DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
+			/* delete from the active list */
+			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
+			flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
+			ringid[count] = flow_ring_node->flowid;
+			count++;
+			if (count == MAX_SUSPEND_REQ) {
+				/* create a batch message now!! */
+				dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
+				count = 0;
+			}
+
+		} else {
+
+			/* No more scanning, break from here! */
+			break;
+		}
+	}
+
+	if (count) {
+		dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
+	}
+
+	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+	return;
+}
+
+void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
+{
+	unsigned long flags;
+	dll_t* list;
+
+	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+	/* check if the node is already at head, otherwise delete it and prepend */
+	list = dll_head_p(&bus->flowring_active_list);
+	if (&flow_ring_node->list != list) {
+		dll_delete(&flow_ring_node->list);
+		dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
+	}
+
+	/* update flow ring timestamp */
+	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
+
+	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+	return;
+}
+
+void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
+{
+	unsigned long flags;
+
+	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+
+	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
+	/* update flow ring timestamp */
+	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
+
+	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+	return;
+}
+void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
+{
+	dll_delete(&flow_ring_node->list);
+}
+
+void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
+{
+	unsigned long flags;
+
+	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+
+	__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
+
+	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+	return;
+}
+#endif /* IDLE_TX_FLOW_MGMT */
+
+int
+dhdpcie_bus_clock_start(struct dhd_bus *bus)
+{
+	return dhdpcie_start_host_pcieclock(bus);
+}
+
+int
+dhdpcie_bus_clock_stop(struct dhd_bus *bus)
+{
+	return dhdpcie_stop_host_pcieclock(bus);
+}
+
+int
+dhdpcie_bus_disable_device(struct dhd_bus *bus)
+{
+	return dhdpcie_disable_device(bus);
+}
+
+int
+dhdpcie_bus_enable_device(struct dhd_bus *bus)
+{
+	return dhdpcie_enable_device(bus);
+}
+
+int
+dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
+{
+	return dhdpcie_alloc_resource(bus);
+}
+
+void
+dhdpcie_bus_free_resource(struct dhd_bus *bus)
+{
+	dhdpcie_free_resource(bus);
+}
+
+int
+dhd_bus_request_irq(struct dhd_bus *bus)
+{
+	return dhdpcie_bus_request_irq(bus);
+}
+
+bool
+dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
+{
+	return dhdpcie_dongle_attach(bus);
+}
+
+int
+dhd_bus_release_dongle(struct dhd_bus *bus)
+{
+	bool dongle_isolation;
+	osl_t *osh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		osh = bus->osh;
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dongle_isolation = bus->dhd->dongle_isolation;
+			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+		}
+	}
+
+	return 0;
+}
+
+void
+dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
+{
+	if (enable) {
+		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
+			PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
+		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_BACKPLANE_EN);
+
+		if (bus->dhd->cto_threshold == 0) {
+			bus->dhd->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
+		}
+
+		si_corereg(bus->sih, bus->sih->buscoreidx,
+				OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
+				((bus->dhd->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
+				PCIE_CTO_TO_THRESHHOLD_MASK) |
+				((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
+				PCIE_CTO_CLKCHKCNT_MASK) |
+				PCIE_CTO_ENAB_MASK);
+	} else {
+		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
+		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, 0);
+
+		si_corereg(bus->sih, bus->sih->buscoreidx,
+				OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
+	}
+}
+
+static void
+dhdpcie_cto_error_recovery(struct dhd_bus *bus)
+{
+	uint32 pci_intmask, err_status;
+	uint8 i = 0;
+
+	pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
+	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
+
+	DHD_OS_WAKE_LOCK(bus->dhd);
+
+	/* reset backplane */
+	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_CFG_TO_SB_RST);
+
+	/* clear timeout error */
+	while (1) {
+		err_status =  si_corereg(bus->sih, bus->sih->buscoreidx,
+			OFFSETOF(sbpcieregs_t, dm_errlog),
+			0, 0);
+		if (err_status & PCIE_CTO_ERR_MASK) {
+			si_corereg(bus->sih, bus->sih->buscoreidx,
+					OFFSETOF(sbpcieregs_t, dm_errlog),
+					~0, PCIE_CTO_ERR_MASK);
+		} else {
+			break;
+		}
+		OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
+		i++;
+		if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
+			DHD_ERROR(("cto recovery fail\n"));
+
+			DHD_OS_WAKE_UNLOCK(bus->dhd);
+			return;
+		}
+	}
+
+	/* clear interrupt status */
+	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
+
+	/* Halt ARM & remove reset */
+	/* TBD : we can add ARM Halt here in case */
+
+	DHD_ERROR(("cto recovery success\n"));
+
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+}
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+int
+dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
+{
+	return dhdpcie_oob_intr_register(dhdp->bus);
+}
+
+void
+dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+{
+	dhdpcie_oob_intr_unregister(dhdp->bus);
+}
+
+void
+dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+{
+	dhdpcie_oob_intr_set(dhdp->bus, enable);
+}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+
+
+bool
+dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
+{
+	return bus->dhd->d2h_hostrdy_supported;
+}
+
+void
+dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
+{
+	dhd_bus_t *bus = pub->bus;
+	uint32	coreoffset = index << 12;
+	uint32	core_addr = SI_ENUM_BASE + coreoffset;
+	uint32 value;
+
+
+	while (first_addr <= last_addr) {
+		core_addr = SI_ENUM_BASE + coreoffset + first_addr;
+		if (si_backplane_access(bus->sih, core_addr, 4, &value, TRUE) != BCME_OK) {
+			DHD_ERROR(("Invalid size/addr combination \n"));
+		}
+		DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
+		first_addr = first_addr + 4;
+	}
+}
+
+#ifdef PCIE_OOB
+bool
+dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus)
+{
+	if (!bus->dhd)
+		return FALSE;
+	if (bus->oob_enabled) {
+		return !bus->dhd->d2h_no_oob_dw;
+	} else {
+		return FALSE;
+	}
+}
+#endif /* PCIE_OOB */
+
+void
+dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
+{
+	DHD_ERROR(("ENABLING DW:%d\n", dw_option));
+	bus->dw_option = dw_option;
+}
+
+#ifdef PCIE_INB_DW
+bool
+dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus)
+{
+	if (!bus->dhd)
+		return FALSE;
+	if (bus->inb_enabled) {
+		return bus->dhd->d2h_inband_dw;
+	} else {
+		return FALSE;
+	}
+}
+
+void
+dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, enum dhd_bus_ds_state state)
+{
+	if (!INBAND_DW_ENAB(bus))
+		return;
+
+	DHD_INFO(("%s:%d\n", __FUNCTION__, state));
+	bus->dhd->ds_state = state;
+	if (state == DW_DEVICE_DS_DISABLED_WAIT || state == DW_DEVICE_DS_D3_INFORM_WAIT) {
+		bus->ds_exit_timeout = 100;
+	}
+	if (state == DW_DEVICE_HOST_WAKE_WAIT) {
+		bus->host_sleep_exit_timeout = 100;
+	}
+	if (state == DW_DEVICE_DS_DEV_WAKE) {
+		bus->ds_exit_timeout = 0;
+	}
+	if (state == DW_DEVICE_DS_ACTIVE) {
+		bus->host_sleep_exit_timeout = 0;
+	}
+}
+
+enum dhd_bus_ds_state
+dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus)
+{
+	if (!INBAND_DW_ENAB(bus))
+		return DW_DEVICE_DS_INVALID;
+	return bus->dhd->ds_state;
+}
+#endif /* PCIE_INB_DW */
+
+bool
+dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
+{
+	if (!bus->dhd)
+		return FALSE;
+	else if (bus->idma_enabled) {
+		return bus->dhd->idma_enable;
+	} else {
+		return FALSE;
+	}
+}
+
+bool
+dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
+{
+	if (!bus->dhd)
+		return FALSE;
+	else if (bus->ifrm_enabled) {
+		return bus->dhd->ifrm_enable;
+	} else {
+		return FALSE;
+	}
+}
+
+
+void
+dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
+{
+	trap_t *tr = &bus->dhd->last_trap_info;
+	bcm_bprintf(strbuf,
+		"\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+		" lp 0x%x, rpc 0x%x"
+		"\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+		"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+		ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
+		ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
+		ltoh32(bus->pcie_sh->trap_addr),
+		ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
+		ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7));
+}
+
+int
+dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
+{
+	int bcmerror = 0;
+	struct dhd_bus *bus = dhdp->bus;
+
+	if (si_backplane_access(bus->sih, addr, size, data, read) != BCME_OK) {
+			DHD_ERROR(("Invalid size/addr combination \n"));
+			bcmerror = BCME_ERROR;
+	}
+
+	return bcmerror;
+}
+
+int
+dhd_get_idletime(dhd_pub_t *dhd)
+{
+	return dhd->bus->idletime;
+}
+
+#ifdef DHD_SSSR_DUMP
+
+static INLINE void
+dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
+{
+	OSL_DELAY(1);
+	si_backplane_access(dhd->bus->sih, addr, sizeof(uint), val, read);
+	DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read));
+	return;
+}
+
+static int
+dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
+	uint addr_reg, uint data_reg)
+{
+	uint addr;
+	uint val = 0;
+	int i;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	if (!buf) {
+		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (!fifo_size) {
+		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	/* Set the base address offset to 0 */
+	addr = addr_reg;
+	val = 0;
+	dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+	addr = data_reg;
+	/* Read 4 bytes at once and loop for fifo_size / 4 */
+	for (i = 0; i < fifo_size / 4; i++) {
+		si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
+		buf[i] = val;
+		OSL_DELAY(1);
+	}
+	return BCME_OK;
+}
+
+static int
+dhdpcie_get_sssr_vasip_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
+	uint addr_reg)
+{
+	uint addr;
+	uint val = 0;
+	int i;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	if (!buf) {
+		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (!fifo_size) {
+		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	/* Check if vasip clk is disabled, if yes enable it */
+	addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
+	dhd_sbreg_op(dhd, addr, &val, TRUE);
+	if (!val) {
+		val = 1;
+		dhd_sbreg_op(dhd, addr, &val, FALSE);
+	}
+
+	addr = addr_reg;
+	/* Read 4 bytes at once and loop for fifo_size / 4 */
+	for (i = 0; i < fifo_size / 4; i++, addr += 4) {
+		si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
+		buf[i] = val;
+		OSL_DELAY(1);
+	}
+	return BCME_OK;
+}
+
+static int
+dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd)
+{
+	uint addr;
+	uint val;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	/* conditionally clear bits [11:8] of PowerCtrl */
+	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
+	dhd_sbreg_op(dhd, addr, &val, TRUE);
+	if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
+		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
+		val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask;
+		dhd_sbreg_op(dhd, addr, &val, FALSE);
+	}
+	return BCME_OK;
+}
+
+static int
+dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
+{
+	uint addr;
+	uint val;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	/* conditionally clear bits [11:8] of PowerCtrl */
+	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
+	dhd_sbreg_op(dhd, addr, &val, TRUE);
+	if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
+		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
+		val = 0;
+		dhd_sbreg_op(dhd, addr, &val, FALSE);
+	}
+	return BCME_OK;
+}
+
+static int
+dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
+{
+	uint addr;
+	uint val;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	/* clear chipcommon intmask */
+	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
+	val = 0x0;
+	dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+	/* clear PMUIntMask0 */
+	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
+	val = 0x0;
+	dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+	/* clear PMUIntMask1 */
+	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
+	val = 0x0;
+	dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+	/* clear res_req_timer */
+	addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
+	val = 0x0;
+	dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+	/* clear macresreqtimer */
+	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
+	val = 0x0;
+	dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+	/* clear macresreqtimer1 */
+	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
+	val = 0x0;
+	dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+	/* clear VasipClkEn */
+	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
+		addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
+		val = 0x0;
+		dhd_sbreg_op(dhd, addr, &val, FALSE);
+	}
+
+	return BCME_OK;
+}
+
+static int
+dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
+{
+	int i;
+	uint addr;
+	uint val = 0;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		/* Check if bit 0 of resetctrl is cleared */
+		addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
+		dhd_sbreg_op(dhd, addr, &val, TRUE);
+		if (!(val & 1)) {
+			dhd->sssr_d11_outofreset[i] = TRUE;
+		} else {
+			dhd->sssr_d11_outofreset[i] = FALSE;
+		}
+		DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
+			__FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
+	}
+	return BCME_OK;
+}
+
+static int
+dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
+{
+	int i;
+	uint addr;
+	uint val = 0;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		if (dhd->sssr_d11_outofreset[i]) {
+			/* clear request clk only if itopoobb is non zero */
+			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
+			dhd_sbreg_op(dhd, addr, &val, TRUE);
+			if (val != 0) {
+				/* clear clockcontrolstatus */
+				addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
+				val =
+				dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
+				dhd_sbreg_op(dhd, addr, &val, FALSE);
+			}
+		}
+	}
+	return BCME_OK;
+}
+
+static int
+dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
+{
+	uint addr;
+	uint val = 0;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	/* Check if bit 0 of resetctrl is cleared */
+	addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
+	dhd_sbreg_op(dhd, addr, &val, TRUE);
+	if (!(val & 1)) {
+		/* clear request clk only if itopoobb is non zero */
+		addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
+		dhd_sbreg_op(dhd, addr, &val, TRUE);
+		if (val != 0) {
+			/* clear clockcontrolstatus */
+			addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
+			val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
+			dhd_sbreg_op(dhd, addr, &val, FALSE);
+		}
+	}
+	return BCME_OK;
+}
+
+static int
+dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
+{
+	uint addr;
+	uint val = 0;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	/* clear request clk only if itopoobb is non zero */
+	addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
+	dhd_sbreg_op(dhd, addr, &val, TRUE);
+	if (val) {
+		/* clear clockcontrolstatus */
+		addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
+		val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
+		dhd_sbreg_op(dhd, addr, &val, FALSE);
+	}
+	return BCME_OK;
+}
+
+static int
+dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
+{
+	uint addr;
+	uint val = 0;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
+	val = LTR_ACTIVE;
+	dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+	val = LTR_SLEEP;
+	dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+	return BCME_OK;
+}
+
+static int
+dhdpcie_clear_clk_req(dhd_pub_t *dhd)
+{
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	dhdpcie_arm_clear_clk_req(dhd);
+
+	dhdpcie_d11_clear_clk_req(dhd);
+
+	dhdpcie_pcie_clear_clk_req(dhd);
+
+	return BCME_OK;
+}
+
+static int
+dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
+{
+	int i;
+	uint addr;
+	uint val = 0;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		if (dhd->sssr_d11_outofreset[i]) {
+			/* disable core by setting bit 0 */
+			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
+			val = 1;
+			dhd_sbreg_op(dhd, addr, &val, FALSE);
+			OSL_DELAY(6000);
+
+			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
+			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
+			dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
+			dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+			/* enable core by clearing bit 0 */
+			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
+			val = 0;
+			dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
+			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
+			dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
+			dhd_sbreg_op(dhd, addr, &val, FALSE);
+
+			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
+			dhd_sbreg_op(dhd, addr, &val, FALSE);
+		}
+	}
+	return BCME_OK;
+}
+
+static int
+dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
+{
+	int i;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		if (dhd->sssr_d11_outofreset[i]) {
+			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
+				dhd->sssr_reg_info.mac_regs[i].sr_size,
+				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
+				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
+		}
+	}
+
+	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
+		dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_before,
+			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
+			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
+	}
+
+	return BCME_OK;
+}
+
+static int
+dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
+{
+	int i;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+
+	for (i = 0; i < MAX_NUM_D11CORES; i++) {
+		if (dhd->sssr_d11_outofreset[i]) {
+			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
+				dhd->sssr_reg_info.mac_regs[i].sr_size,
+				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
+				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
+		}
+	}
+
+	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
+		dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_after,
+			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
+			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
+	}
+
+	return BCME_OK;
+}
+
+int
+dhdpcie_sssr_dump(dhd_pub_t *dhd)
+{
+	if (!dhd->sssr_inited) {
+		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (dhd->bus->is_linkdown) {
+		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	dhdpcie_d11_check_outofreset(dhd);
+
+	DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
+	if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
+		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	dhdpcie_clear_intmask_and_timer(dhd);
+	dhdpcie_suspend_chipcommon_powerctrl(dhd);
+	dhdpcie_clear_clk_req(dhd);
+	dhdpcie_pcie_send_ltrsleep(dhd);
+
+	/* Wait for some time before Restore */
+	OSL_DELAY(6000);
+
+	dhdpcie_resume_chipcommon_powerctrl(dhd);
+	dhdpcie_bring_d11_outofreset(dhd);
+
+	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
+	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
+		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	dhd_schedule_sssr_dump(dhd);
+
+	return BCME_OK;
+}
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef DHD_WAKE_STATUS
+wake_counts_t*
+dhd_bus_get_wakecount(dhd_pub_t *dhd)
+{
+	if (!dhd->bus) {
+		return NULL;
+	}
+	return &dhd->bus->wake_counts;
+}
+int
+dhd_bus_get_bus_wake(dhd_pub_t *dhd)
+{
+	return bcmpcie_set_get_wake(dhd->bus, 0);
+}
+#endif /* DHD_WAKE_STATUS */
+
+#ifdef BCM_ASLR_HEAP
+/* Writes random number(s) to the TCM. FW upon initialization reads the metadata
+ * of the random number and then based on metadata, reads the random number from the TCM.
+ */
+static void
+dhdpcie_wrt_rnd(struct dhd_bus *bus)
+{
+	bcm_rand_metadata_t rnd_data;
+	uint32 rand_no;
+	uint32 count = 1;	/* start with 1 random number */
+
+	uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
+		((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
+	rnd_data.signature = htol32(BCM_RNG_SIGNATURE);
+	rnd_data.count = htol32(count);
+	/* write the metadata about random number */
+	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
+	/* scale back by number of random number counts */
+	addr -= sizeof(count) * count;
+	/* Now write the random number(s) */
+	rand_no = htol32(dhd_get_random_number());
+	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rand_no, sizeof(rand_no));
+}
+#endif /* BCM_ASLR_HEAP */
diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.h b/drivers/net/wireless/bcmdhd/dhd_pcie.h
new file mode 100644
index 0000000..92b07c6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pcie.h
@@ -0,0 +1,607 @@
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pcie.h 707536 2017-06-28 04:23:48Z $
+ */
+
+
+#ifndef dhd_pcie_h
+#define dhd_pcie_h
+
+#include <bcmpcie.h>
+#include <hnd_cons.h>
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+#ifdef CONFIG_PCI_MSM
+#include <linux/msm_pcie.h>
+#else
+#include <mach/msm_pcie.h>
+#endif /* CONFIG_PCI_MSM */
+#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)
+#include <linux/exynos-pci-noti.h>
+extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
+extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+#include <linux/mutex.h>
+#include <linux/wait.h>
+
+#define DEFAULT_DHD_RUNTIME_MS 100
+#ifndef CUSTOM_DHD_RUNTIME_MS
+#define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS
+#endif /* CUSTOM_DHD_RUNTIME_MS */
+
+
+#ifndef MAX_IDLE_COUNT
+#define MAX_IDLE_COUNT 16
+#endif /* MAX_IDLE_COUNT */
+
+#ifndef MAX_RESUME_WAIT
+#define MAX_RESUME_WAIT 100
+#endif /* MAX_RESUME_WAIT */
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+/* defines */
+
+#define PCMSGBUF_HDRLEN 0
+#define DONGLE_REG_MAP_SIZE (32 * 1024)
+#define DONGLE_TCM_MAP_SIZE (4096 * 1024)
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+#ifdef DHD_DEBUG
+#define DHD_PCIE_SUCCESS 0
+#define DHD_PCIE_FAILURE 1
+#endif /* DHD_DEBUG */
+#define	REMAP_ENAB(bus)			((bus)->remap)
+#define	REMAP_ISADDR(bus, a)		(((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+#define struct_pcie_notify		struct msm_pcie_notify
+#define struct_pcie_register_event	struct msm_pcie_register_event
+#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)
+#define struct_pcie_notify		struct exynos_pcie_notify
+#define struct_pcie_register_event	struct exynos_pcie_register_event
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+#define MAX_DHD_TX_FLOWS	320
+
+/* user defined data structures */
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX	192
+#define CONSOLE_BUFFER_MAX	(8 * 1024)
+
+#ifdef IDLE_TX_FLOW_MGMT
+#define IDLE_FLOW_LIST_TIMEOUT 5000
+#define IDLE_FLOW_RING_TIMEOUT 5000
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef DEVICE_TX_STUCK_DETECT
+#define DEVICE_TX_STUCK_CKECK_TIMEOUT	1000 /* 1 sec */
+#define DEVICE_TX_STUCK_TIMEOUT		10000 /* 10 secs */
+#define DEVICE_TX_STUCK_WARN_DURATION (DEVICE_TX_STUCK_TIMEOUT / DEVICE_TX_STUCK_CKECK_TIMEOUT)
+#define DEVICE_TX_STUCK_DURATION (DEVICE_TX_STUCK_WARN_DURATION * 2)
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+/* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */
+#define IDMA_ENAB(dhd)		((dhd)->idma_enable)
+#define IDMA_ACTIVE(dhd)	(((dhd)->idma_enable) && ((dhd)->idma_inited))
+
+#define IDMA_DS_ENAB(dhd)	((dhd)->idma_retention_ds)
+#define IDMA_DS_ACTIVE(dhd)	((dhd)->bus->dongle_in_ds)
+
+/* IFRM (Implicit Flow Ring Manager enable and inited */
+#define IFRM_ENAB(dhd)		((dhd)->ifrm_enable)
+#define IFRM_ACTIVE(dhd)	(((dhd)->ifrm_enable) && ((dhd)->ifrm_inited))
+
+/* PCIE CTO Prevention and Recovery */
+#define PCIECTO_ENAB(dhd)		((dhd)->cto_enable)
+
+/* Implicit DMA index usage :
+ * Index 0 for h2d write index transfer
+ * Index 1 for d2h read index transfer
+ */
+#define IDMA_IDX0 0
+#define IDMA_IDX1 1
+#define IDMA_IDX2 2
+#define IDMA_IDX3 3
+
+#define DHDPCIE_CONFIG_HDR_SIZE 16
+#define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */
+#define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20
+#define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */
+#define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */
+#define DHDPCIE_PM_D2_DELAY 200 /* 200us */
+
+typedef struct dhd_console {
+	 uint		count;	/* Poll interval msec counter */
+	 uint		log_addr;		 /* Log struct address (fixed) */
+	 hnd_log_t	 log;			 /* Log struct (host copy) */
+	 uint		 bufsize;		 /* Size of log buffer */
+	 uint8		 *buf;			 /* Log buffer (host copy) */
+	 uint		 last;			 /* Last buffer read index */
+} dhd_console_t;
+
+typedef struct ring_sh_info {
+	uint32 ring_mem_addr;
+	uint32 ring_state_w;
+	uint32 ring_state_r;
+} ring_sh_info_t;
+
+
+#define DEVICE_WAKE_NONE	0
+#define DEVICE_WAKE_OOB		1
+#define DEVICE_WAKE_INB		2
+
+#define INBAND_DW_ENAB(bus)		((bus)->dw_option == DEVICE_WAKE_INB)
+#define OOB_DW_ENAB(bus)		((bus)->dw_option == DEVICE_WAKE_OOB)
+#define NO_DW_ENAB(bus)			((bus)->dw_option == DEVICE_WAKE_NONE)
+
+struct dhd_bus;
+
+struct dhd_pcie_rev {
+	uint8	fw_rev;
+	void (*handle_mb_data)(struct dhd_bus *);
+};
+
+typedef struct dhdpcie_config_save
+{
+	uint32 header[DHDPCIE_CONFIG_HDR_SIZE];
+	/* pmcsr save */
+	uint32 pmcsr;
+	/* express save */
+	uint32 exp_dev_ctrl_stat;
+	uint32 exp_link_ctrl_stat;
+	uint32 exp_dev_ctrl_stat2;
+	uint32 exp_link_ctrl_stat2;
+	/* msi save */
+	uint32 msi_cap;
+	uint32 msi_addr0;
+	uint32 msi_addr1;
+	uint32 msi_data;
+	/* l1pm save */
+	uint32 l1pm0;
+	uint32 l1pm1;
+	/* ltr save */
+	uint32 ltr;
+	/* aer save */
+	uint32 aer_caps_ctrl; /* 0x18 */
+	uint32 aer_severity;  /* 0x0C */
+	uint32 aer_umask;     /* 0x08 */
+	uint32 aer_cmask;     /* 0x14 */
+	uint32 aer_root_cmd;  /* 0x2c */
+	/* BAR0 and BAR1 windows */
+	uint32 bar0_win;
+	uint32 bar1_win;
+} dhdpcie_config_save_t;
+
+typedef struct dhd_bus {
+	dhd_pub_t	*dhd;
+	struct pci_dev  *rc_dev;	/* pci RC device handle */
+	struct pci_dev  *dev;		/* pci device handle */
+#ifdef DHD_EFI
+	void *pcie_dev;
+#endif
+
+	dll_t		flowring_active_list; /* constructed list of tx flowring queues */
+#ifdef IDLE_TX_FLOW_MGMT
+	uint64		active_list_last_process_ts;
+						/* stores the timestamp of active list processing */
+#endif /* IDLE_TX_FLOW_MGMT */
+
+#ifdef DEVICE_TX_STUCK_DETECT
+	/* Flag to enable/disable device tx stuck monitor by DHD IOVAR dev_tx_stuck_monitor */
+	uint32 dev_tx_stuck_monitor;
+	/* Stores the timestamp (msec) of the last device Tx stuck check */
+	uint32	device_tx_stuck_check;
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+	si_t		*sih;			/* Handle for SI calls */
+	char		*vars;			/* Variables (from CIS and/or other) */
+	uint		varsz;			/* Size of variables buffer */
+	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
+	sbpcieregs_t	*reg;			/* Registers for PCIE core */
+
+	uint		armrev;			/* CPU core revision */
+	uint		ramrev;			/* SOCRAM core revision */
+	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	bool		ramsize_adjusted;	/* flag to note adjustment, so that
+						 * adjustment routine and file io
+						 * are avoided on D3 cold -> D0
+						 */
+	uint32		srmemsize;		/* Size of SRMEM */
+
+	uint32		bus;			/* gSPI or SDIO bus */
+	uint32		intstatus;		/* Intstatus bits (events) pending */
+	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
+	bool		fcstate;		/* State of dongle flow-control */
+
+	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
+	char		*fw_path;		/* module_param: path to firmware image */
+	char		*nv_path;		/* module_param: path to nvram vars file */
+#ifdef CACHE_FW_IMAGES
+	int			processed_nvram_params_len;	/* Modified len of NVRAM info */
+#endif
+
+
+	struct pktq	txq;			/* Queue length used for flow-control */
+
+	bool		intr;			/* Use interrupts */
+	bool		poll;			/* Use polling */
+	bool		ipend;			/* Device interrupt is pending */
+	bool		intdis;			/* Interrupts disabled by isr */
+	uint		intrcount;		/* Count of device interrupt callbacks */
+	uint		lastintrs;		/* Count as of last watchdog timer */
+
+	dhd_console_t	console;		/* Console output polling support */
+	uint		console_addr;		/* Console address from shared struct */
+
+	bool		alp_only;		/* Don't use HT clock (ALP only) */
+
+	bool		remap;		/* Contiguous 1MB RAM: 512K socram + 512K devram
+					 * Available with socram rev 16
+					 * Remap region not DMA-able
+					 */
+	uint32		resetinstr;
+	uint32		dongle_ram_base;
+
+	ulong		shared_addr;
+	pciedev_shared_t	*pcie_sh;
+	bool bus_flowctrl;
+	uint32		dma_rxoffset;
+	volatile char	*regs;		/* pci device memory va */
+	volatile char	*tcm;		/* pci device memory va */
+	osl_t		*osh;
+	uint32		nvram_csm;	/* Nvram checksum */
+	uint16		pollrate;
+	uint16  polltick;
+
+	volatile uint32  *pcie_mb_intr_addr;
+	volatile uint32  *pcie_mb_intr_2_addr;
+	void    *pcie_mb_intr_osh;
+	bool	sleep_allowed;
+
+	wake_counts_t	wake_counts;
+
+	/* version 3 shared struct related info start */
+	ring_sh_info_t	ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS];
+
+	uint8	h2d_ring_count;
+	uint8	d2h_ring_count;
+	uint32  ringmem_ptr;
+	uint32  ring_state_ptr;
+
+	uint32 d2h_dma_scratch_buffer_mem_addr;
+
+	uint32 h2d_mb_data_ptr_addr;
+	uint32 d2h_mb_data_ptr_addr;
+	/* version 3 shared struct related info end */
+
+	uint32 def_intmask;
+	bool	ltrsleep_on_unload;
+	uint	wait_for_d3_ack;
+	uint16	max_tx_flowrings;
+	uint16	max_submission_rings;
+	uint16	max_completion_rings;
+	uint16	max_cmn_rings;
+	uint32	rw_index_sz;
+	bool	db1_for_mb;
+
+	dhd_timeout_t doorbell_timer;
+	bool	device_wake_state;
+#ifdef PCIE_OOB
+	bool	oob_enabled;
+#endif /* PCIE_OOB */
+	bool	irq_registered;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
+	defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895))
+#ifdef CONFIG_ARCH_MSM
+	uint8 no_cfg_restore;
+#endif /* CONFIG_ARCH_MSM */
+	struct_pcie_register_event pcie_event;
+#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
+	* (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895))
+	*/
+	bool read_shm_fail;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+	int32 idletime;                 /* Control for activity timeout */
+#ifdef DHD_PCIE_RUNTIMEPM
+	int32 idlecount;                /* Activity timeout counter */
+	int32 bus_wake;                 /* For wake up the bus */
+	bool runtime_resume_done;       /* For check runtime suspend end */
+	struct mutex pm_lock;            /* Synchronize for system PM & runtime PM */
+	wait_queue_head_t rpm_queue;    /* wait-queue for bus wake up */
+#endif /* DHD_PCIE_RUNTIMEPM */
+	uint32 d3_inform_cnt;
+	uint32 d0_inform_cnt;
+	uint32 d0_inform_in_use_cnt;
+	uint8 force_suspend;
+	uint8 is_linkdown;
+#ifdef IDLE_TX_FLOW_MGMT
+	bool enable_idle_flowring_mgmt;
+#endif /* IDLE_TX_FLOW_MGMT */
+	struct	dhd_pcie_rev api;
+	bool use_mailbox;
+	bool	d3_suspend_pending;
+	bool    use_d0_inform;
+	uint32  hostready_count; /* Number of hostready issued */
+#if defined(PCIE_OOB) || defined(BCMPCIE_OOB_HOST_WAKE)
+	bool	oob_presuspend;
+#endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */
+	bool	dongle_in_ds;
+	uint8	dw_option;
+#ifdef PCIE_INB_DW
+	bool	inb_enabled;
+	uint32	ds_exit_timeout;
+	uint32	host_sleep_exit_timeout;
+	uint	wait_for_ds_exit;
+	uint32	inband_dw_assert_cnt; /* # of inband device_wake assert */
+	uint32	inband_dw_deassert_cnt; /* # of inband device_wake deassert */
+	uint32	inband_ds_exit_host_cnt; /* # of DS-EXIT , host initiated */
+	uint32	inband_ds_exit_device_cnt; /* # of DS-EXIT , device initiated */
+	uint32	inband_ds_exit_to_cnt; /* # of DS-EXIT timeout */
+	uint32	inband_host_sleep_exit_to_cnt; /* # of Host_Sleep exit timeout */
+	void	*inb_lock;	/* Lock to serialize in band device wake activity */
+	/* # of contexts in the host which currently want a FW transaction */
+	uint32  host_active_cnt;
+#endif /* PCIE_INB_DW */
+	dhdpcie_config_save_t saved_config;
+	ulong resume_intr_enable_count;
+	ulong dpc_intr_enable_count;
+	ulong isr_intr_disable_count;
+	ulong suspend_intr_disable_count;
+	ulong dpc_return_busdown_count;
+	bool  idma_enabled;
+	bool  ifrm_enabled;
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+	bool  ds_enabled;
+#endif
+#ifdef DHD_PCIE_RUNTIMEPM
+	bool chk_pm;	/* To avoid counting of wake up from Runtime PM */
+#endif /* DHD_PCIE_RUNTIMEPM */
+} dhd_bus_t;
+
+/* function declarations */
+
+extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size);
+extern int dhdpcie_bus_register(void);
+extern void dhdpcie_bus_unregister(void);
+extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
+
+extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh,
+	volatile char *regs, volatile char *tcm, void *pci_dev);
+extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
+extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
+extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus);
+extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
+extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus);
+extern void dhdpcie_bus_release(struct dhd_bus *bus);
+extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
+extern void dhdpcie_free_irq(dhd_bus_t *bus);
+extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
+extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake);
+extern int dhdpcie_bus_suspend(struct  dhd_bus *bus, bool state);
+extern int dhdpcie_pci_suspend_resume(struct  dhd_bus *bus, bool state);
+extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable);
+extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time);
+extern bool dhdpcie_tcm_valid(dhd_bus_t *bus);
+extern void dhdpcie_pme_active(osl_t *osh, bool enable);
+extern bool dhdpcie_pme_cap(osl_t *osh);
+extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val);
+extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask);
+extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val);
+extern int dhdpcie_disable_irq(dhd_bus_t *bus);
+extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus);
+extern int dhdpcie_enable_irq(dhd_bus_t *bus);
+extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset);
+extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
+		bool is_write, uint32 writeval);
+extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus);
+extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
+extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
+extern int dhdpcie_disable_device(dhd_bus_t *bus);
+extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
+extern void dhdpcie_free_resource(dhd_bus_t *bus);
+extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
+extern int dhdpcie_enable_device(dhd_bus_t *bus);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
+extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
+extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef PCIE_OOB
+extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val);
+extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus);
+extern void dhdpcie_oob_init(dhd_bus_t *bus);
+extern void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus);
+extern int dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val);
+extern void dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val);
+#endif /* PCIE_OOB */
+
+#if defined(CONFIG_ARCH_EXYNOS)
+#define SAMSUNG_PCIE_VENDOR_ID 0x144d
+#if defined(CONFIG_MACH_UNIVERSAL5433)
+#define SAMSUNG_PCIE_DEVICE_ID 0xa5e3
+#define SAMSUNG_PCIE_CH_NUM
+#elif defined(CONFIG_MACH_UNIVERSAL7420)
+#define SAMSUNG_PCIE_DEVICE_ID 0xa575
+#define SAMSUNG_PCIE_CH_NUM 1
+#elif defined(CONFIG_SOC_EXYNOS8890)
+#define SAMSUNG_PCIE_DEVICE_ID 0xa544
+#define SAMSUNG_PCIE_CH_NUM 0
+#elif defined(CONFIG_SOC_EXYNOS7420)
+#define SAMSUNG_PCIE_DEVICE_ID 0xa575
+#define SAMSUNG_PCIE_CH_NUM 1
+#elif defined(CONFIG_SOC_EXYNOS8895)
+#define SAMSUNG_PCIE_DEVICE_ID 0xecec
+#define SAMSUNG_PCIE_CH_NUM 0
+#else
+#error "Not supported platform"
+#endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */
+#endif /* CONFIG_ARCH_EXYNOS */
+
+#if defined(CONFIG_ARCH_MSM)
+#define MSM_PCIE_VENDOR_ID 0x17cb
+#if defined(CONFIG_ARCH_APQ8084)
+#define MSM_PCIE_DEVICE_ID 0x0101
+#elif defined(CONFIG_ARCH_MSM8994)
+#define MSM_PCIE_DEVICE_ID 0x0300
+#elif defined(CONFIG_ARCH_MSM8996)
+#define MSM_PCIE_DEVICE_ID 0x0104
+#elif defined(CONFIG_ARCH_MSM8998)
+#define MSM_PCIE_DEVICE_ID 0x0105
+#else
+#error "Not supported platform"
+#endif
+#endif /* CONFIG_ARCH_MSM */
+
+#if defined(CONFIG_X86)
+#define X86_PCIE_VENDOR_ID 0x8086
+#define X86_PCIE_DEVICE_ID 0x9c1a
+#endif /* CONFIG_X86 */
+
+#if defined(CONFIG_ARCH_TEGRA)
+#define TEGRA_PCIE_VENDOR_ID 0x14e4
+#define TEGRA_PCIE_DEVICE_ID 0x4347
+#endif /* CONFIG_ARCH_TEGRA */
+
+#if defined(CONFIG_ARCH_EXYNOS)
+#define PCIE_RC_VENDOR_ID SAMSUNG_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID SAMSUNG_PCIE_DEVICE_ID
+#elif defined(CONFIG_ARCH_MSM)
+#define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID
+#elif defined(CONFIG_X86)
+#define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID
+#elif defined(CONFIG_ARCH_TEGRA)
+#define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID
+#define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID
+#endif /* CONFIG_ARCH_EXYNOS */
+
+#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
+#ifdef CONFIG_MACH_UNIVERSAL5433
+extern int exynos_pcie_pm_suspend(void);
+extern int exynos_pcie_pm_resume(void);
+#else
+extern int exynos_pcie_pm_suspend(int ch_num);
+extern int exynos_pcie_pm_resume(int ch_num);
+#endif /* CONFIG_MACH_UNIVERSAL5433 */
+#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
+
+#ifdef CONFIG_ARCH_TEGRA
+extern int tegra_pcie_pm_suspend(void);
+extern int tegra_pcie_pm_resume(void);
+#endif /* CONFIG_ARCH_TEGRA */
+
+extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
+#ifdef IDLE_TX_FLOW_MGMT
+extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg);
+extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
+extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg);
+extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus,
+	flow_ring_node_t *flow_ring_node);
+extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus,
+	flow_ring_node_t *flow_ring_node);
+extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
+	flow_ring_node_t *flow_ring_node);
+extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
+	flow_ring_node_t *flow_ring_node);
+#endif /* IDLE_TX_FLOW_MGMT */
+
+extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
+
+#ifdef DHD_WAKE_STATUS
+int bcmpcie_get_total_wake(struct dhd_bus *bus);
+int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag);
+#endif /* DHD_WAKE_STATUS */
+extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus);
+extern void dhd_bus_hostready(struct  dhd_bus *bus);
+#ifdef PCIE_OOB
+extern bool dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus);
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+extern bool dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus);
+extern void dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus,
+	enum dhd_bus_ds_state state);
+extern enum dhd_bus_ds_state dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus);
+extern const char * dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate);
+extern const char * dhd_convert_dsval(uint32 val, bool d2h);
+extern int dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val);
+extern void dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus);
+#endif /* PCIE_INB_DW */
+extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option);
+extern bool dhdpcie_irq_enabled(struct dhd_bus *bus);
+extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus);
+extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus);
+
+static INLINE uint32
+dhd_pcie_config_read(osl_t *osh, uint offset, uint size)
+{
+	OSL_DELAY(100);
+	return OSL_PCI_READ_CONFIG(osh, offset, size);
+}
+
+static INLINE uint32
+dhd_pcie_corereg_read(si_t *sih, uint val)
+{
+	OSL_DELAY(100);
+	si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val);
+	return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+}
+
+#ifdef DHD_SSSR_DUMP
+extern int dhdpcie_sssr_dump(dhd_pub_t *dhd);
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef DHD_EFI
+extern int dhd_os_wifi_platform_set_power(uint32 value);
+int dhd_control_signal(dhd_bus_t *bus, char *arg, int set);
+extern int dhd_wifi_properties(struct dhd_bus *bus, char *arg);
+extern bool dhdpcie_is_arm_halted(struct dhd_bus *bus);
+extern void dhdpcie_dongle_pwr_toggle(dhd_bus_t *bus);
+extern int dhd_otp_dump(dhd_bus_t *bus, char *arg);
+#else
+static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
+static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
+#endif /* DHD_EFI */
+int dhdpcie_config_check(dhd_bus_t *bus);
+int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
+int dhdpcie_config_save(dhd_bus_t *bus);
+int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
+
+#endif /* dhd_pcie_h */
diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c
new file mode 100644
index 0000000..9b782ad
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c
@@ -0,0 +1,2312 @@
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pcie_linux.c 707536 2017-06-28 04:23:48Z $
+ */
+
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <sbchipc.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <dhd_linux.h>
+#ifdef CONFIG_ARCH_MSM
+#if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996)
+#include <linux/msm_pcie.h>
+#else
+#include <mach/msm_pcie.h>
+#endif /* CONFIG_PCI_MSM */
+#endif /* CONFIG_ARCH_MSM */
+#ifdef PCIE_OOB
+#include "ftdi_sio_external.h"
+#endif /* PCIE_OOB */
+#include <linux/irq.h>
+#ifdef USE_SMMU_ARCH_MSM
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#endif /* USE_SMMU_ARCH_MSM */
+
+#define PCI_CFG_RETRY 		10
+#define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN 	24		/* Mem. filename length */
+
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+	struct sk_buff *s = (struct sk_buff *)(p); \
+	ASSERT(OSL_PKTTAG_SZ == 32); \
+	*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+
+#ifdef PCIE_OOB
+#define HOST_WAKE 4   /* GPIO_0 (HOST_WAKE) - Output from WLAN */
+#define DEVICE_WAKE 5  /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */
+#define BIT_WL_REG_ON 6
+#define BIT_BT_REG_ON 7
+
+int gpio_handle_val = 0;
+unsigned char gpio_port = 0;
+unsigned char gpio_direction = 0;
+#define OOB_PORT "ttyUSB0"
+#endif /* PCIE_OOB */
+
+/* user defined data structures  */
+
+typedef struct dhd_pc_res {
+	uint32 bar0_size;
+	void* bar0_addr;
+	uint32 bar1_size;
+	void* bar1_addr;
+} pci_config_res, *pPci_config_res;
+
+typedef bool (*dhdpcie_cb_fn_t)(void *);
+
+typedef struct dhdpcie_info
+{
+	dhd_bus_t	*bus;
+	osl_t 			*osh;
+	struct pci_dev  *dev;		/* pci device handle */
+	volatile char 	*regs;		/* pci device memory va */
+	volatile char 	*tcm;		/* pci device memory va */
+	uint32			tcm_size;	/* pci device memory size */
+	struct pcos_info *pcos_info;
+	uint16		last_intrstatus;	/* to cache intrstatus */
+	int	irq;
+	char pciname[32];
+	struct pci_saved_state* default_state;
+	struct pci_saved_state* state;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+	void *os_cxt;			/* Pointer to per-OS private data */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_WAKE_STATUS
+	spinlock_t	pcie_lock;
+	unsigned int	total_wake_count;
+	int		pkt_wake;
+	int		wake_irq;
+#endif /* DHD_WAKE_STATUS */
+#ifdef USE_SMMU_ARCH_MSM
+	void *smmu_cxt;
+#endif /* USE_SMMU_ARCH_MSM */
+} dhdpcie_info_t;
+
+
+struct pcos_info {
+	dhdpcie_info_t *pc;
+	spinlock_t lock;
+	wait_queue_head_t intr_wait_queue;
+	struct timer_list tuning_timer;
+	int tuning_timer_exp;
+	atomic_t timer_enab;
+	struct tasklet_struct tuning_tasklet;
+};
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+typedef struct dhdpcie_os_info {
+	int			oob_irq_num;	/* valid when hardware or software oob in use */
+	unsigned long		oob_irq_flags;	/* valid when hardware or software oob in use */
+	bool			oob_irq_registered;
+	bool			oob_irq_enabled;
+	bool			oob_irq_wake_enabled;
+	spinlock_t		oob_irq_spinlock;
+	void			*dev;		/* handle to the underlying device */
+} dhdpcie_os_info_t;
+static irqreturn_t wlan_oob_irq(int irq, void *data);
+#if defined(CUSTOMER_HW2) && defined(CONFIG_ARCH_APQ8084)
+extern struct brcm_pcie_wake brcm_pcie_wake;
+#endif /* CUSTOMER_HW2 && CONFIG_ARCH_APQ8084 */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef USE_SMMU_ARCH_MSM
+typedef struct dhdpcie_smmu_info {
+	struct dma_iommu_mapping *smmu_mapping;
+	dma_addr_t smmu_iova_start;
+	size_t smmu_iova_len;
+} dhdpcie_smmu_info_t;
+#endif /* USE_SMMU_ARCH_MSM */
+
+/* function declarations */
+static int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev);
+static int dhdpcie_init(struct pci_dev *pdev);
+static irqreturn_t dhdpcie_isr(int irq, void *arg);
+/* OS Routine functions for PCI suspend/resume */
+
+static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state);
+static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
+static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
+static int dhdpcie_resume_dev(struct pci_dev *dev);
+static int dhdpcie_suspend_dev(struct pci_dev *dev);
+#ifdef DHD_PCIE_RUNTIMEPM
+static int dhdpcie_pm_suspend(struct device *dev);
+static int dhdpcie_pm_prepare(struct device *dev);
+static int dhdpcie_pm_resume(struct device *dev);
+static void dhdpcie_pm_complete(struct device *dev);
+#else
+static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
+static int dhdpcie_pci_resume(struct pci_dev *dev);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
+	{ vendor: 0x14e4,
+	device: PCI_ANY_ID,
+	subvendor: PCI_ANY_ID,
+	subdevice: PCI_ANY_ID,
+	class: PCI_CLASS_NETWORK_OTHER << 8,
+	class_mask: 0xffff00,
+	driver_data: 0,
+	},
+	{ 0, 0, 0, 0, 0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
+
+/* Power Management Hooks */
+#ifdef DHD_PCIE_RUNTIMEPM
+static const struct dev_pm_ops dhd_pcie_pm_ops = {
+	.prepare = dhdpcie_pm_prepare,
+	.suspend = dhdpcie_pm_suspend,
+	.resume = dhdpcie_pm_resume,
+	.complete = dhdpcie_pm_complete,
+};
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+static struct pci_driver dhdpcie_driver = {
+	node:		{&dhdpcie_driver.node, &dhdpcie_driver.node},
+	name:		"pcieh",
+	id_table:	dhdpcie_pci_devid,
+	probe:		dhdpcie_pci_probe,
+	remove:		dhdpcie_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	save_state:	NULL,
+#endif
+#ifdef DHD_PCIE_RUNTIMEPM
+	.driver.pm = &dhd_pcie_pm_ops,
+#else
+	suspend:	dhdpcie_pci_suspend,
+	resume:		dhdpcie_pci_resume,
+#endif /* DHD_PCIE_RUNTIMEPM */
+};
+
+int dhdpcie_init_succeeded = FALSE;
+
+#ifdef USE_SMMU_ARCH_MSM
+static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt)
+{
+	struct dma_iommu_mapping *mapping;
+	struct device_node *root_node = NULL;
+	dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
+	int smmu_iova_address[2];
+	char *wlan_node = "android,bcmdhd_wlan";
+	char *wlan_smmu_node = "wlan-smmu-iova-address";
+	int atomic_ctx = 1;
+	int s1_bypass = 1;
+	int ret = 0;
+
+	DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__));
+
+	root_node = of_find_compatible_node(NULL, NULL, wlan_node);
+	if (!root_node) {
+		WARN(1, "failed to get device node of BRCM WLAN\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_u32_array(root_node, wlan_smmu_node,
+		smmu_iova_address, 2) == 0) {
+		DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n",
+			__FUNCTION__, smmu_iova_address[0], smmu_iova_address[1]));
+		smmu_info->smmu_iova_start = smmu_iova_address[0];
+		smmu_info->smmu_iova_len = smmu_iova_address[1];
+	} else {
+		printf("%s : can't get smmu iova address property\n",
+			__FUNCTION__);
+		return -ENODEV;
+	}
+
+	if (smmu_info->smmu_iova_len <= 0) {
+		DHD_ERROR(("%s: Invalid smmu iova len %d\n",
+			__FUNCTION__, (int)smmu_info->smmu_iova_len));
+		return -EINVAL;
+	}
+
+	DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__));
+	mapping = arm_iommu_create_mapping(&platform_bus_type,
+		smmu_info->smmu_iova_start, smmu_info->smmu_iova_len);
+	if (IS_ERR(mapping)) {
+		DHD_ERROR(("%s: create mapping failed, err = %d\n",
+			__FUNCTION__, ret));
+		ret = PTR_ERR(mapping);
+		goto map_fail;
+	}
+
+	ret = iommu_domain_set_attr(mapping->domain,
+		DOMAIN_ATTR_ATOMIC, &atomic_ctx);
+	if (ret) {
+		DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n",
+			__FUNCTION__, ret));
+		goto set_attr_fail;
+	}
+
+	ret = iommu_domain_set_attr(mapping->domain,
+		DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n",
+			__FUNCTION__, ret));
+		goto set_attr_fail;
+	}
+
+	ret = arm_iommu_attach_device(&pdev->dev, mapping);
+	if (ret) {
+		DHD_ERROR(("%s: attach device failed, err = %d\n",
+			__FUNCTION__, ret));
+		goto attach_fail;
+	}
+
+	smmu_info->smmu_mapping = mapping;
+
+	return ret;
+
+attach_fail:
+set_attr_fail:
+	arm_iommu_release_mapping(mapping);
+map_fail:
+	return ret;
+}
+
+static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt)
+{
+	dhdpcie_smmu_info_t *smmu_info;
+
+	if (!smmu_cxt) {
+		return;
+	}
+
+	smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
+	if (smmu_info->smmu_mapping) {
+		arm_iommu_detach_device(&pdev->dev);
+		arm_iommu_release_mapping(smmu_info->smmu_mapping);
+		smmu_info->smmu_mapping = NULL;
+	}
+}
+#endif /* USE_SMMU_ARCH_MSM */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+static int dhdpcie_pm_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+	dhd_bus_t *bus = NULL;
+	unsigned long flags;
+
+	if (pch) {
+		bus = pch->bus;
+	}
+	if (!bus) {
+		return ret;
+	}
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
+		DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
+			__FUNCTION__, bus->dhd->dhd_bus_busy_state));
+		DHD_GENERAL_UNLOCK(bus->dhd, flags);
+		return -EBUSY;
+	}
+	DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	if (!bus->dhd->dongle_reset)
+		ret = dhdpcie_set_suspend_resume(bus, TRUE);
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
+	dhd_os_busbusy_wake(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	return ret;
+
+}
+
+static int dhdpcie_pm_prepare(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+	dhd_bus_t *bus = NULL;
+
+	if (pch) {
+		bus = pch->bus;
+		DHD_DISABLE_RUNTIME_PM(bus->dhd);
+	}
+
+	bus->chk_pm = TRUE;
+	return 0;
+}
+
+static int dhdpcie_pm_resume(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+	dhd_bus_t *bus = NULL;
+	unsigned long flags;
+
+	if (pch) {
+		bus = pch->bus;
+	}
+	if (!bus) {
+		return ret;
+	}
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	if (!bus->dhd->dongle_reset) {
+		ret = dhdpcie_set_suspend_resume(bus, FALSE);
+		bus->chk_pm = FALSE;
+	}
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
+	dhd_os_busbusy_wake(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	return ret;
+}
+
+static void dhdpcie_pm_complete(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+	dhd_bus_t *bus = NULL;
+
+	if (pch) {
+		bus = pch->bus;
+		DHD_ENABLE_RUNTIME_PM(bus->dhd);
+	}
+
+	return;
+}
+#else
+static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
+{
+	int ret = 0;
+	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+	dhd_bus_t *bus = NULL;
+	unsigned long flags;
+
+	if (pch) {
+		bus = pch->bus;
+	}
+	if (!bus) {
+		return ret;
+	}
+
+	BCM_REFERENCE(state);
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
+		DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
+			__FUNCTION__, bus->dhd->dhd_bus_busy_state));
+		DHD_GENERAL_UNLOCK(bus->dhd, flags);
+		return -EBUSY;
+	}
+	DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	if (!bus->dhd->dongle_reset)
+		ret = dhdpcie_set_suspend_resume(bus, TRUE);
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
+	dhd_os_busbusy_wake(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	return ret;
+}
+
+static int dhdpcie_pci_resume(struct pci_dev *pdev)
+{
+	int ret = 0;
+	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+	dhd_bus_t *bus = NULL;
+	unsigned long flags;
+
+	if (pch) {
+		bus = pch->bus;
+	}
+	if (!bus) {
+		return ret;
+	}
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	if (!bus->dhd->dongle_reset)
+		ret = dhdpcie_set_suspend_resume(bus, FALSE);
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
+	dhd_os_busbusy_wake(bus->dhd);
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	return ret;
+}
+
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
+{
+	int ret = 0;
+
+	ASSERT(bus && !bus->dhd->dongle_reset);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+		/* if wakelock is held during suspend, return failed */
+		if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) {
+			return -EBUSY;
+		}
+		mutex_lock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+	/* When firmware is not loaded do the PCI bus */
+	/* suspend/resume only */
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		ret = dhdpcie_pci_suspend_resume(bus, state);
+#ifdef DHD_PCIE_RUNTIMEPM
+		mutex_unlock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
+		return ret;
+	}
+
+		ret = dhdpcie_bus_suspend(bus, state);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+		mutex_unlock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+	return ret;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+static int dhdpcie_suspend_dev(struct pci_dev *dev)
+{
+	int ret;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	dhdpcie_info_t *pch = pci_get_drvdata(dev);
+	dhd_bus_t *bus = pch->bus;
+
+	if (bus->is_linkdown) {
+		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+	DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	dhd_dpc_tasklet_kill(bus->dhd);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+	pci_save_state(dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	pch->state = pci_store_saved_state(dev);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+	pci_enable_wake(dev, PCI_D0, TRUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	if (pci_is_enabled(dev))
+#endif
+		pci_disable_device(dev);
+
+	ret = pci_set_power_state(dev, PCI_D3hot);
+	if (ret) {
+		DHD_ERROR(("%s: pci_set_power_state error %d\n",
+			__FUNCTION__, ret));
+	}
+	dev->state_saved = FALSE;
+	return ret;
+}
+
+#ifdef DHD_WAKE_STATUS
+int bcmpcie_get_total_wake(struct dhd_bus *bus)
+{
+	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+	return pch->total_wake_count;
+}
+
+int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag)
+{
+	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&pch->pcie_lock, flags);
+
+	ret = pch->pkt_wake;
+	pch->total_wake_count += flag;
+	pch->pkt_wake = flag;
+
+	spin_unlock_irqrestore(&pch->pcie_lock, flags);
+	return ret;
+}
+#endif /* DHD_WAKE_STATUS */
+
+static int dhdpcie_resume_dev(struct pci_dev *dev)
+{
+	int err = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	dhdpcie_info_t *pch = pci_get_drvdata(dev);
+	pci_load_and_free_saved_state(dev, &pch->state);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+	DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+	dev->state_saved = TRUE;
+	pci_restore_state(dev);
+	err = pci_enable_device(dev);
+	if (err) {
+		printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
+		goto out;
+	}
+	pci_set_master(dev);
+	err = pci_set_power_state(dev, PCI_D0);
+	if (err) {
+		printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static int dhdpcie_resume_host_dev(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
+	bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM);
+#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
+#ifdef CONFIG_ARCH_MSM
+	bcmerror = dhdpcie_start_host_pcieclock(bus);
+#endif /* CONFIG_ARCH_MSM */
+#ifdef CONFIG_ARCH_TEGRA
+	bcmerror = tegra_pcie_pm_resume();
+#endif /* CONFIG_ARCH_TEGRA */
+	if (bcmerror < 0) {
+		DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
+			__FUNCTION__, bcmerror));
+		bus->is_linkdown = 1;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+		bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+	}
+
+	return bcmerror;
+}
+
+static int dhdpcie_suspend_host_dev(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
+	if (bus->rc_dev) {
+		pci_save_state(bus->rc_dev);
+	} else {
+		DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
+			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
+	}
+	exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM);
+#endif	/* USE_EXYNOS_PCIE_RC_PMPATCH */
+#ifdef CONFIG_ARCH_MSM
+	bcmerror = dhdpcie_stop_host_pcieclock(bus);
+#endif	/* CONFIG_ARCH_MSM */
+#ifdef CONFIG_ARCH_TEGRA
+	bcmerror = tegra_pcie_pm_suspend();
+#endif /* CONFIG_ARCH_TEGRA */
+	return bcmerror;
+}
+
+#if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID)
+uint32
+dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
+{
+	uint val = -1; /* Initialise to 0xfffffff */
+	if (bus->rc_dev) {
+		pci_read_config_dword(bus->rc_dev, offset, &val);
+		OSL_DELAY(100);
+	} else {
+		DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
+			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
+	}
+	DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n",
+		__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val));
+	return (val);
+}
+
+/*
+ * Reads/ Writes the value of capability register
+ * from the given CAP_ID section of PCI Root Port
+ *
+ * Arguements
+ * @bus current dhd_bus_t pointer
+ * @cap Capability or Extended Capability ID to get
+ * @offset offset of Register to Read
+ * @is_ext TRUE if @cap is given for Extended Capability
+ * @is_write is set to TRUE to indicate write
+ * @val value to write
+ *
+ * Return Value
+ * Returns 0xffffffff on error
+ * on write success returns BCME_OK (0)
+ * on Read Success returns the value of register requested
+ * Note: caller shoud ensure valid capability ID and Ext. Capability ID.
+ */
+
+uint32
+dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
+	uint32 writeval)
+{
+	int cap_ptr = 0;
+	uint32 ret = -1;
+	uint32 readval;
+
+	if (!(bus->rc_dev)) {
+		DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
+			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
+		return ret;
+	}
+
+	/* Find Capability offset */
+	if (is_ext) {
+		/* removing max EXT_CAP_ID check as
+		 * linux kernel definition's max value is not upadted yet as per spec
+		 */
+		cap_ptr = pci_find_ext_capability(bus->rc_dev, cap);
+
+	} else {
+		/* removing max PCI_CAP_ID_MAX check as
+		 * pervious kernel versions dont have this definition
+		 */
+		cap_ptr = pci_find_capability(bus->rc_dev, cap);
+	}
+
+	/* Return if capability with given ID not found */
+	if (cap_ptr == 0) {
+		DHD_ERROR(("%s: RC %x:%x PCI Cap(0x%02x) not supported.\n",
+			__FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, cap));
+		return BCME_ERROR;
+	}
+
+	if (is_write) {
+		ret = pci_write_config_dword(bus->rc_dev, (cap_ptr + offset), writeval);
+		if (ret) {
+			DHD_ERROR(("%s: pci_write_config_dword failed. cap=%d offset=%d\n",
+				__FUNCTION__, cap, offset));
+			return BCME_ERROR;
+		}
+		ret = BCME_OK;
+
+	} else {
+
+		ret = pci_read_config_dword(bus->rc_dev, (cap_ptr + offset), &readval);
+
+		if (ret) {
+			DHD_ERROR(("%s: pci_read_config_dword failed. cap=%d offset=%d\n",
+				__FUNCTION__, cap, offset));
+			return BCME_ERROR;
+		}
+		ret = readval;
+	}
+
+	return ret;
+}
+
+/* API wrapper to read Root Port link capability
+ * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
+ */
+
+uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus)
+{
+	uint32 linkcap = -1;
+	linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP,
+			PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0);
+	linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK;
+	return linkcap;
+}
+#endif
+
+int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
+{
+	int rc;
+
+	struct pci_dev *dev = bus->dev;
+
+	if (state) {
+#ifndef BCMPCIE_OOB_HOST_WAKE
+		dhdpcie_pme_active(bus->osh, state);
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
+		rc = dhdpcie_suspend_dev(dev);
+		if (!rc) {
+			dhdpcie_suspend_host_dev(bus);
+		}
+	} else {
+		dhdpcie_resume_host_dev(bus);
+		rc = dhdpcie_resume_dev(dev);
+#ifndef	BCMPCIE_OOB_HOST_WAKE
+		dhdpcie_pme_active(bus->osh, state);
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#if defined(DHD_HANG_SEND_UP_TEST)
+		if (bus->is_linkdown ||
+			bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL)
+#else /* DHD_HANG_SEND_UP_TEST */
+		if (bus->is_linkdown)
+#endif /* DHD_HANG_SEND_UP_TEST */
+		{
+			bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
+			dhd_os_send_hang_message(bus->dhd);
+		}
+#endif 
+	}
+	return rc;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+static int dhdpcie_device_scan(struct device *dev, void *data)
+{
+	struct pci_dev *pcidev;
+	int *cnt = data;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	pcidev = container_of(dev, struct pci_dev, dev);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	if (pcidev->vendor != 0x14e4)
+		return 0;
+
+	DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
+	*cnt += 1;
+	if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
+		DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
+			pcidev->device, pcidev->driver->name));
+
+	return 0;
+}
+#endif /* LINUX_VERSION >= 2.6.0 */
+
+int
+dhdpcie_bus_register(void)
+{
+	int error = 0;
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	if (!(error = pci_module_init(&dhdpcie_driver)))
+		return 0;
+
+	DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#else
+	if (!(error = pci_register_driver(&dhdpcie_driver))) {
+		bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
+		if (!error) {
+			DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
+		} else if (!dhdpcie_init_succeeded) {
+			DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
+		} else {
+			return 0;
+		}
+
+		pci_unregister_driver(&dhdpcie_driver);
+		error = BCME_ERROR;
+	}
+#endif /* LINUX_VERSION < 2.6.0 */
+
+	return error;
+}
+
+
+void
+dhdpcie_bus_unregister(void)
+{
+	pci_unregister_driver(&dhdpcie_driver);
+}
+
+int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	DHD_MUTEX_LOCK();
+
+	if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
+		DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
+		DHD_MUTEX_UNLOCK();
+		return -ENODEV;
+	}
+	printf("PCI_PROBE:  bus %X, slot %X,vendor %X, device %X"
+		"(good PCI location)\n", pdev->bus->number,
+		PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
+
+	if (dhdpcie_init (pdev)) {
+		DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
+		DHD_MUTEX_UNLOCK();
+		return -ENODEV;
+	}
+
+#ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
+	/* disable async suspend */
+	device_disable_async_suspend(&pdev->dev);
+#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
+
+	DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
+	DHD_MUTEX_UNLOCK();
+	return 0;
+}
+
+int
+dhdpcie_detach(dhdpcie_info_t *pch)
+{
+	if (pch) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+		if (!dhd_download_fw_on_driverload) {
+			pci_load_and_free_saved_state(pch->dev, &pch->default_state);
+		}
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+		MFREE(pch->osh, pch, sizeof(dhdpcie_info_t));
+	}
+	return 0;
+}
+
+
+void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev)
+{
+	osl_t *osh = NULL;
+	dhdpcie_info_t *pch = NULL;
+	dhd_bus_t *bus = NULL;
+
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+	DHD_MUTEX_LOCK();
+
+	pch = pci_get_drvdata(pdev);
+	bus = pch->bus;
+	osh = pch->osh;
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	if (bus) {
+#ifdef CONFIG_ARCH_MSM
+		msm_pcie_deregister_event(&bus->pcie_event);
+#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#ifdef CONFIG_SOC_EXYNOS8890
+		exynos_pcie_deregister_event(&bus->pcie_event);
+#endif /* CONFIG_SOC_EXYNOS8890 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+	}
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+	bus->rc_dev = NULL;
+
+	dhdpcie_bus_release(bus);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	if (pci_is_enabled(pdev))
+#endif
+		pci_disable_device(pdev);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+	/* pcie os info detach */
+	MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef USE_SMMU_ARCH_MSM
+	/* smmu info detach */
+	dhdpcie_smmu_remove(pdev, pch->smmu_cxt);
+	MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t));
+#endif /* USE_SMMU_ARCH_MSM */
+	/* pcie info detach */
+	dhdpcie_detach(pch);
+	/* osl detach */
+	osl_detach(osh);
+
+#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
+	defined(CONFIG_ARCH_APQ8084)
+	brcm_pcie_wake.wake_irq = NULL;
+	brcm_pcie_wake.data = NULL;
+#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
+
+	dhdpcie_init_succeeded = FALSE;
+
+	DHD_MUTEX_UNLOCK();
+
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+
+	return;
+}
+
+/* Free Linux irq */
+int
+dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
+{
+	dhd_bus_t *bus = dhdpcie_info->bus;
+	struct pci_dev *pdev = dhdpcie_info->bus->dev;
+	int err = 0;
+
+	if (!bus->irq_registered) {
+		snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
+		    "dhdpcie:%s", pci_name(pdev));
+#ifdef DHD_USE_MSI
+		printf("%s: MSI enabled\n", __FUNCTION__);
+		err = pci_enable_msi(pdev);
+		if (err < 0) {
+			DHD_ERROR(("%s: pci_enable_msi() failed, %d, fall back to INTx\n", __FUNCTION__, err));
+		}
+#else
+		printf("%s: MSI not enabled\n", __FUNCTION__);
+#endif /* DHD_USE_MSI */
+		err = request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
+			dhdpcie_info->pciname, bus);
+		if (err) {
+			DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+#ifdef DHD_USE_MSI
+			pci_disable_msi(pdev);
+#endif /* DHD_USE_MSI */
+			return -1;
+		} else {
+			bus->irq_registered = TRUE;
+		}
+	} else {
+		DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
+	}
+
+	if (!dhdpcie_irq_enabled(bus)) {
+		DHD_ERROR(("%s: PCIe IRQ was disabled, so, enabled it again\n", __FUNCTION__));
+		dhdpcie_enable_irq(bus);
+	}
+
+	DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
+
+
+	return 0; /* SUCCESS */
+}
+
+/**
+ *	dhdpcie_get_pcieirq - return pcie irq number to linux-dhd
+ */
+int
+dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq)
+{
+	struct pci_dev *pdev = bus->dev;
+
+	if (!pdev) {
+		DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	*irq  = pdev->irq;
+
+	return 0; /* SUCCESS */
+}
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PRINTF_RESOURCE	"0x%016llx"
+#else
+#define PRINTF_RESOURCE	"0x%08x"
+#endif
+
+/*
+
+Name:  osl_pci_get_resource
+
+Parametrs:
+
+1: struct pci_dev *pdev   -- pci device structure
+2: pci_res                       -- structure containing pci configuration space values
+
+
+Return value:
+
+int   - Status (TRUE or FALSE)
+
+Description:
+Access PCI configuration space, retrieve  PCI allocated resources , updates in resource structure.
+
+ */
+int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
+{
+	phys_addr_t  bar0_addr, bar1_addr;
+	ulong bar1_size;
+	struct pci_dev *pdev = NULL;
+	pdev = dhdpcie_info->dev;
+#ifdef EXYNOS_PCIE_MODULE_PATCH
+	pci_restore_state(pdev);
+#endif /* EXYNOS_MODULE_PATCH */
+	do {
+		if (pci_enable_device(pdev)) {
+			printf("%s: Cannot enable PCI device\n", __FUNCTION__);
+			break;
+		}
+		pci_set_master(pdev);
+		bar0_addr = pci_resource_start(pdev, 0);	/* Bar-0 mapped address */
+		bar1_addr = pci_resource_start(pdev, 2);	/* Bar-1 mapped address */
+
+		/* read Bar-1 mapped memory range */
+		bar1_size = pci_resource_len(pdev, 2);
+
+		if ((bar1_size == 0) || (bar1_addr == 0)) {
+			printf("%s: BAR1 Not enabled for this device  size(%ld),"
+				" addr(0x"PRINTF_RESOURCE")\n",
+				__FUNCTION__, bar1_size, bar1_addr);
+			goto err;
+		}
+
+		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+		dhdpcie_info->tcm_size =
+			(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
+		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
+
+		if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
+			DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
+			break;
+		}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+		if (!dhd_download_fw_on_driverload) {
+			/* Backup PCIe configuration so as to use Wi-Fi on/off process
+			 * in case of built in driver
+			 */
+			pci_save_state(pdev);
+			dhdpcie_info->default_state = pci_store_saved_state(pdev);
+
+			if (dhdpcie_info->default_state == NULL) {
+				DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
+					__FUNCTION__));
+				REG_UNMAP(dhdpcie_info->regs);
+				REG_UNMAP(dhdpcie_info->tcm);
+				pci_disable_device(pdev);
+				break;
+			}
+		}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+#ifdef EXYNOS_PCIE_MODULE_PATCH
+		pci_save_state(pdev);
+#endif /* EXYNOS_MODULE_PATCH */
+
+		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
+		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+		return 0; /* SUCCESS  */
+	} while (0);
+err:
+	return -1;  /* FAILURE */
+}
+
+int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
+{
+
+	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+	do {
+		/* define it here only!! */
+		if (dhdpcie_get_resource (dhdpcie_info)) {
+			DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
+			break;
+		}
+		DHD_TRACE(("%s:Exit - SUCCESS \n",
+			__FUNCTION__));
+
+		return 0; /* SUCCESS */
+
+	} while (0);
+
+	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+	return -1; /* FAILURE */
+
+}
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
+	(defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)))
+void dhdpcie_linkdown_cb(struct_pcie_notify *noti)
+{
+	struct pci_dev *pdev = (struct pci_dev *)noti->user;
+	dhdpcie_info_t *pch = NULL;
+
+	if (pdev) {
+		pch = pci_get_drvdata(pdev);
+		if (pch) {
+			dhd_bus_t *bus = pch->bus;
+			if (bus) {
+				dhd_pub_t *dhd = bus->dhd;
+				if (dhd) {
+					DHD_ERROR(("%s: Event HANG send up "
+						"due to PCIe linkdown\n",
+						__FUNCTION__));
+#ifdef CONFIG_ARCH_MSM
+					bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+					bus->is_linkdown = 1;
+					DHD_OS_WAKE_LOCK(dhd);
+					dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+					dhd_os_send_hang_message(dhd);
+				}
+			}
+		}
+	}
+
+}
+#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
+	* (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895))
+	*/
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+int dhdpcie_init(struct pci_dev *pdev)
+{
+
+	osl_t 				*osh = NULL;
+	dhd_bus_t 			*bus = NULL;
+	dhdpcie_info_t		*dhdpcie_info =  NULL;
+	wifi_adapter_info_t	*adapter = NULL;
+#ifdef BCMPCIE_OOB_HOST_WAKE
+	dhdpcie_os_info_t	*dhdpcie_osinfo = NULL;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef USE_SMMU_ARCH_MSM
+	dhdpcie_smmu_info_t	*dhdpcie_smmu_info = NULL;
+#endif /* USE_SMMU_ARCH_MSM */
+
+	do {
+		/* osl attach */
+		if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+			DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* initialize static buffer */
+		adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
+			PCI_SLOT(pdev->devfn));
+		if (adapter != NULL) {
+			DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
+#ifdef BUS_POWER_RESTORE
+			adapter->pci_dev = pdev;
+#endif
+		} else
+			DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
+		osl_static_mem_init(osh, adapter);
+
+		/* Set ACP coherence flag */
+		if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT())
+			osl_flag_set(osh, OSL_ACP_COHERENCE);
+
+		/*  allocate linux spcific pcie structure here */
+		if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
+			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+			break;
+		}
+		bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
+		dhdpcie_info->osh = osh;
+		dhdpcie_info->dev = pdev;
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+		/* allocate OS speicific structure */
+		dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t));
+		if (dhdpcie_osinfo == NULL) {
+			DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n",
+				__FUNCTION__));
+			break;
+		}
+		bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
+		dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo;
+
+		/* Initialize host wake IRQ */
+		spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock);
+		/* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */
+		dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter,
+			&dhdpcie_osinfo->oob_irq_flags);
+		if (dhdpcie_osinfo->oob_irq_num < 0) {
+			DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+		}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef USE_SMMU_ARCH_MSM
+		/* allocate private structure for using SMMU */
+		dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t));
+		if (dhdpcie_smmu_info == NULL) {
+			DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n",
+				__FUNCTION__));
+			break;
+		}
+		bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
+		dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info;
+
+		/* Initialize smmu structure */
+		if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) {
+			DHD_ERROR(("%s: Failed to initialize SMMU\n",
+				__FUNCTION__));
+			break;
+		}
+#endif /* USE_SMMU_ARCH_MSM */
+
+#ifdef DHD_WAKE_STATUS
+		/* Initialize pcie_lock */
+		spin_lock_init(&dhdpcie_info->pcie_lock);
+#endif /* DHD_WAKE_STATUS */
+
+		/* Find the PCI resources, verify the  */
+		/* vendor and device ID, map BAR regions and irq,  update in structures */
+		if (dhdpcie_scan_resource(dhdpcie_info)) {
+			DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
+
+			break;
+		}
+
+		/* Bus initialization */
+		bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, pdev);
+		if (!bus) {
+			DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
+			break;
+		}
+
+		dhdpcie_info->bus = bus;
+		bus->is_linkdown = 0;
+
+		/* Get RC Device Handle */
+#if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID)
+		bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
+#else
+		bus->rc_dev = NULL;
+#endif
+
+#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
+	defined(CONFIG_ARCH_APQ8084)
+		brcm_pcie_wake.wake_irq = wlan_oob_irq;
+		brcm_pcie_wake.data = bus;
+#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
+
+#ifdef DONGLE_ENABLE_ISOLATION
+		bus->dhd->dongle_isolation = TRUE;
+#endif /* DONGLE_ENABLE_ISOLATION */
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+		bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
+		bus->pcie_event.user = pdev;
+		bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
+		bus->pcie_event.callback = dhdpcie_linkdown_cb;
+		bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
+		msm_pcie_register_event(&bus->pcie_event);
+		bus->no_cfg_restore = 0;
+#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)
+		bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN;
+		bus->pcie_event.user = pdev;
+		bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
+		bus->pcie_event.callback = dhdpcie_linkdown_cb;
+		exynos_pcie_register_event(&bus->pcie_event);
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+		bus->read_shm_fail = FALSE;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+		if (bus->intr) {
+			/* Register interrupt callback, but mask it (not operational yet). */
+			DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
+			dhdpcie_bus_intr_disable(bus);
+
+			if (dhdpcie_request_irq(dhdpcie_info)) {
+				DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+				break;
+			}
+		} else {
+			bus->pollrate = 1;
+			DHD_INFO(("%s: PCIe interrupt function is NOT registered "
+				"due to polling mode\n", __FUNCTION__));
+		}
+
+#if defined(BCM_REQUEST_FW)
+		if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) {
+		DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__));
+		}
+		bus->nv_path = NULL;
+		bus->fw_path = NULL;
+#endif /* BCM_REQUEST_FW */
+
+		/* set private data for pci_dev */
+		pci_set_drvdata(pdev, dhdpcie_info);
+
+		if (dhd_download_fw_on_driverload) {
+			if (dhd_bus_start(bus->dhd)) {
+				DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
+				if (!allow_delay_fwdl)
+					break;
+			}
+		} else {
+			/* Set ramdom MAC address during boot time */
+			get_random_bytes(&bus->dhd->mac.octet[3], 3);
+			/* Adding BRCM OUI */
+			bus->dhd->mac.octet[0] = 0;
+			bus->dhd->mac.octet[1] = 0x90;
+			bus->dhd->mac.octet[2] = 0x4C;
+		}
+
+		/* Attach to the OS network interface */
+		DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
+		if (dhd_register_if(bus->dhd, 0, TRUE)) {
+			DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
+			break;
+		}
+
+		dhdpcie_init_succeeded = TRUE;
+
+#if defined(MULTIPLE_SUPPLICANT)
+		wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif /* MULTIPLE_SUPPLICANT */
+
+		DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
+		return 0;  /* return  SUCCESS  */
+
+	} while (0);
+	/* reverse the initialization in order in case of error */
+
+	if (bus)
+		dhdpcie_bus_release(bus);
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+	if (dhdpcie_osinfo) {
+		MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
+	}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef USE_SMMU_ARCH_MSM
+	if (dhdpcie_smmu_info) {
+		MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
+		dhdpcie_info->smmu_cxt = NULL;
+	}
+#endif /* USE_SMMU_ARCH_MSM */
+
+	if (dhdpcie_info)
+		dhdpcie_detach(dhdpcie_info);
+	pci_disable_device(pdev);
+	if (osh)
+		osl_detach(osh);
+
+	dhdpcie_init_succeeded = FALSE;
+
+	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+	return -1; /* return FAILURE  */
+}
+
+/* Free Linux irq */
+void
+dhdpcie_free_irq(dhd_bus_t *bus)
+{
+	struct pci_dev *pdev = NULL;
+
+	DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
+	if (bus) {
+		pdev = bus->dev;
+		if (bus->irq_registered) {
+			free_irq(pdev->irq, bus);
+			bus->irq_registered = FALSE;
+#ifdef DHD_USE_MSI
+			pci_disable_msi(pdev);
+#endif /* DHD_USE_MSI */
+		} else {
+			DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
+		}
+	}
+	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+	return;
+}
+
+/*
+
+Name:  dhdpcie_isr
+
+Parametrs:
+
+1: IN int irq   -- interrupt vector
+2: IN void *arg      -- handle to private data structure
+
+Return value:
+
+Status (TRUE or FALSE)
+
+Description:
+Interrupt Service routine checks for the status register,
+disable interrupt and queue DPC if mail box interrupts are raised.
+*/
+
+
+irqreturn_t
+dhdpcie_isr(int irq, void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)arg;
+	if (dhdpcie_bus_isr(bus))
+		return TRUE;
+	else
+		return FALSE;
+}
+
+int
+dhdpcie_disable_irq_nosync(dhd_bus_t *bus)
+{
+	struct pci_dev *dev;
+	if ((bus == NULL) || (bus->dev == NULL)) {
+		DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	dev = bus->dev;
+	disable_irq_nosync(dev->irq);
+	return BCME_OK;
+}
+
+int
+dhdpcie_disable_irq(dhd_bus_t *bus)
+{
+	struct pci_dev *dev;
+	if ((bus == NULL) || (bus->dev == NULL)) {
+		DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	dev = bus->dev;
+	disable_irq(dev->irq);
+	return BCME_OK;
+}
+
+int
+dhdpcie_enable_irq(dhd_bus_t *bus)
+{
+	struct pci_dev *dev;
+	if ((bus == NULL) || (bus->dev == NULL)) {
+		DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	dev = bus->dev;
+	enable_irq(dev->irq);
+	return BCME_OK;
+}
+
+bool
+dhdpcie_irq_enabled(dhd_bus_t *bus)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+	struct irq_desc *desc = irq_to_desc(bus->dev->irq);
+	/* depth will be zero, if enabled */
+	if (!desc->depth) {
+		DHD_ERROR(("%s: depth:%d\n", __FUNCTION__, desc->depth));
+	}
+	return desc->depth ? FALSE : TRUE;
+#else
+	/* return TRUE by default as there is no support for lower versions */
+	return TRUE;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+}
+
+int
+dhdpcie_start_host_pcieclock(dhd_bus_t *bus)
+{
+	int ret = 0;
+#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	int options = 0;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* CONFIG_ARCH_MSM */
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if (bus == NULL) {
+		return BCME_ERROR;
+	}
+
+	if (bus->dev == NULL) {
+		return BCME_ERROR;
+	}
+
+#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	if (bus->no_cfg_restore) {
+		options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
+	}
+	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+		bus->dev, NULL, options);
+	if (bus->no_cfg_restore && !ret) {
+		msm_pcie_recover_config(bus->dev);
+		bus->no_cfg_restore = 0;
+	}
+#else
+	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+		bus->dev, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+	if (ret) {
+		DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
+		goto done;
+	}
+
+done:
+#endif /* CONFIG_ARCH_MSM */
+	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+	return ret;
+}
+
+int
+dhdpcie_stop_host_pcieclock(dhd_bus_t *bus)
+{
+	int ret = 0;
+#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	int options = 0;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#endif /* CONFIG_ARCH_MSM */
+
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if (bus == NULL) {
+		return BCME_ERROR;
+	}
+
+	if (bus->dev == NULL) {
+		return BCME_ERROR;
+	}
+
+#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+	/* Always reset the PCIe host when wifi off */
+	bus->no_cfg_restore = 1;
+
+	if (bus->no_cfg_restore) {
+		options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
+	}
+
+	ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+		bus->dev, NULL, options);
+#else
+	ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+		bus->dev, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+	if (ret) {
+		DHD_ERROR(("Failed to stop PCIe link\n"));
+		goto done;
+	}
+done:
+#endif /* CONFIG_ARCH_MSM */
+	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+	return ret;
+}
+
+int
+dhdpcie_disable_device(dhd_bus_t *bus)
+{
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if (bus == NULL) {
+		return BCME_ERROR;
+	}
+
+	if (bus->dev == NULL) {
+		return BCME_ERROR;
+	}
+
+	pci_disable_device(bus->dev);
+
+	return 0;
+}
+
+int
+dhdpcie_enable_device(dhd_bus_t *bus)
+{
+	int ret = BCME_ERROR;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	dhdpcie_info_t *pch;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if (bus == NULL) {
+		return BCME_ERROR;
+	}
+
+	if (bus->dev == NULL) {
+		return BCME_ERROR;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	pch = pci_get_drvdata(bus->dev);
+	if (pch == NULL) {
+		return BCME_ERROR;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && (LINUX_VERSION_CODE < \
+	KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890)
+	/* Updated with pci_load_and_free_saved_state to compatible
+	 * with Kernel version 3.14.0 to 3.18.41.
+	 */
+	pci_load_and_free_saved_state(bus->dev, &pch->default_state);
+	pch->default_state = pci_store_saved_state(bus->dev);
+#else
+	pci_load_saved_state(bus->dev, pch->default_state);
+#endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */
+
+	pci_restore_state(bus->dev);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
+
+	ret = pci_enable_device(bus->dev);
+	if (ret) {
+		pci_disable_device(bus->dev);
+	} else {
+		pci_set_master(bus->dev);
+	}
+
+	return ret;
+}
+
+int
+dhdpcie_alloc_resource(dhd_bus_t *bus)
+{
+	dhdpcie_info_t *dhdpcie_info;
+	phys_addr_t bar0_addr, bar1_addr;
+	ulong bar1_size;
+
+	do {
+		if (bus == NULL) {
+			DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+			break;
+		}
+
+		if (bus->dev == NULL) {
+			DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+			break;
+		}
+
+		dhdpcie_info = pci_get_drvdata(bus->dev);
+		if (dhdpcie_info == NULL) {
+			DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+			break;
+		}
+
+		bar0_addr = pci_resource_start(bus->dev, 0);	/* Bar-0 mapped address */
+		bar1_addr = pci_resource_start(bus->dev, 2);	/* Bar-1 mapped address */
+
+		/* read Bar-1 mapped memory range */
+		bar1_size = pci_resource_len(bus->dev, 2);
+
+		if ((bar1_size == 0) || (bar1_addr == 0)) {
+			printf("%s: BAR1 Not enabled for this device size(%ld),"
+				" addr(0x"PRINTF_RESOURCE")\n",
+				__FUNCTION__, bar1_size, bar1_addr);
+			break;
+		}
+
+		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+		if (!dhdpcie_info->regs) {
+			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+			break;
+		}
+
+		bus->regs = dhdpcie_info->regs;
+		dhdpcie_info->tcm_size =
+			(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
+		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
+		if (!dhdpcie_info->tcm) {
+			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+			REG_UNMAP(dhdpcie_info->regs);
+			bus->regs = NULL;
+			break;
+		}
+
+		bus->tcm = dhdpcie_info->tcm;
+
+		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
+		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+		return 0;
+	} while (0);
+
+	return BCME_ERROR;
+}
+
+void
+dhdpcie_free_resource(dhd_bus_t *bus)
+{
+	dhdpcie_info_t *dhdpcie_info;
+
+	if (bus == NULL) {
+		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->dev == NULL) {
+		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	dhdpcie_info = pci_get_drvdata(bus->dev);
+	if (dhdpcie_info == NULL) {
+		DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->regs) {
+		REG_UNMAP(dhdpcie_info->regs);
+		bus->regs = NULL;
+	}
+
+	if (bus->tcm) {
+		REG_UNMAP(dhdpcie_info->tcm);
+		bus->tcm = NULL;
+	}
+}
+
+int
+dhdpcie_bus_request_irq(struct dhd_bus *bus)
+{
+	dhdpcie_info_t *dhdpcie_info;
+	int ret = 0;
+
+	if (bus == NULL) {
+		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (bus->dev == NULL) {
+		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	dhdpcie_info = pci_get_drvdata(bus->dev);
+	if (dhdpcie_info == NULL) {
+		DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (bus->intr) {
+		/* Register interrupt callback, but mask it (not operational yet). */
+		DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
+		dhdpcie_bus_intr_disable(bus);
+		ret = dhdpcie_request_irq(dhdpcie_info);
+		if (ret) {
+			DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
+				__FUNCTION__, ret));
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+#ifdef BCMPCIE_OOB_HOST_WAKE
+void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
+{
+	unsigned long flags;
+	dhdpcie_info_t *pch;
+	dhdpcie_os_info_t *dhdpcie_osinfo;
+
+	if (bus == NULL) {
+		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->dev == NULL) {
+		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	pch = pci_get_drvdata(bus->dev);
+	if (pch == NULL) {
+		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+	spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags);
+	if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
+		(dhdpcie_osinfo->oob_irq_num > 0)) {
+		if (enable) {
+			enable_irq(dhdpcie_osinfo->oob_irq_num);
+		} else {
+			disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
+		}
+		dhdpcie_osinfo->oob_irq_enabled = enable;
+	}
+	spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *data)
+{
+	dhd_bus_t *bus;
+	DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
+	bus = (dhd_bus_t *)data;
+	dhdpcie_oob_intr_set(bus, FALSE);
+#ifdef DHD_WAKE_STATUS
+#ifdef DHD_PCIE_RUNTIMEPM
+	/* This condition is for avoiding counting of wake up from Runtime PM */
+	if (bus->chk_pm)
+#endif /* DHD_PCIE_RUNTIMPM */
+	{
+		bcmpcie_set_get_wake(bus, 1);
+	}
+#endif /* DHD_WAKE_STATUS */
+#ifdef DHD_PCIE_RUNTIMEPM
+	dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq);
+#endif /* DHD_PCIE_RUNTIMPM */
+	if (bus->dhd->up && bus->oob_presuspend) {
+		DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
+	}
+	return IRQ_HANDLED;
+}
+
+int dhdpcie_oob_intr_register(dhd_bus_t *bus)
+{
+	int err = 0;
+	dhdpcie_info_t *pch;
+	dhdpcie_os_info_t *dhdpcie_osinfo;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	if (bus == NULL) {
+		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	if (bus->dev == NULL) {
+		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	pch = pci_get_drvdata(bus->dev);
+	if (pch == NULL) {
+		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+	if (dhdpcie_osinfo->oob_irq_registered) {
+		DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__));
+		return -EBUSY;
+	}
+
+	if (dhdpcie_osinfo->oob_irq_num > 0) {
+		printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__,
+			(int)dhdpcie_osinfo->oob_irq_num,
+			(int)dhdpcie_osinfo->oob_irq_flags);
+		err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq,
+			dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
+			bus);
+		if (err) {
+			DHD_ERROR(("%s: request_irq failed with %d\n",
+				__FUNCTION__, err));
+			return err;
+		}
+#if defined(DISABLE_WOWLAN)
+		printf("%s: disable_irq_wake\n", __FUNCTION__);
+		dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
+#else
+		printf("%s: enable_irq_wake\n", __FUNCTION__);
+		err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
+		if (!err) {
+			dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
+		} else
+			printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err);
+#endif
+		dhdpcie_osinfo->oob_irq_enabled = TRUE;
+	}
+
+	dhdpcie_osinfo->oob_irq_registered = TRUE;
+
+	return err;
+}
+
+void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
+{
+	int err = 0;
+	dhdpcie_info_t *pch;
+	dhdpcie_os_info_t *dhdpcie_osinfo;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	if (bus == NULL) {
+		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->dev == NULL) {
+		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	pch = pci_get_drvdata(bus->dev);
+	if (pch == NULL) {
+		DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
+	if (!dhdpcie_osinfo->oob_irq_registered) {
+		DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__));
+		return;
+	}
+	if (dhdpcie_osinfo->oob_irq_num > 0) {
+		if (dhdpcie_osinfo->oob_irq_wake_enabled) {
+			err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
+			if (!err) {
+				dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
+			}
+		}
+		if (dhdpcie_osinfo->oob_irq_enabled) {
+			disable_irq(dhdpcie_osinfo->oob_irq_num);
+			dhdpcie_osinfo->oob_irq_enabled = FALSE;
+		}
+		free_irq(dhdpcie_osinfo->oob_irq_num, bus);
+	}
+	dhdpcie_osinfo->oob_irq_registered = FALSE;
+}
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef PCIE_OOB
+void dhdpcie_oob_init(dhd_bus_t *bus)
+{
+	gpio_handle_val = get_handle(OOB_PORT);
+	if (gpio_handle_val < 0)
+	{
+		DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__));
+		ASSERT(FALSE);
+	}
+
+	gpio_direction = 0;
+	ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG);
+
+	/* Note BT core is also enabled here */
+	gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
+	gpio_write_port(gpio_handle_val, gpio_port);
+
+	gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
+	ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG);
+
+	bus->oob_enabled = TRUE;
+	bus->oob_presuspend = FALSE;
+
+	/* drive the Device_Wake GPIO low on startup */
+	bus->device_wake_state = TRUE;
+	dhd_bus_set_device_wake(bus, FALSE);
+	dhd_bus_doorbell_timeout_reset(bus);
+
+}
+
+void
+dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val)
+{
+	DHD_INFO(("Set Device_Wake to %d\n", val));
+	if (val)
+	{
+		gpio_port = gpio_port | (1 << BIT_BT_REG_ON);
+		gpio_write_port(gpio_handle_val, gpio_port);
+	} else {
+		gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON));
+		gpio_write_port(gpio_handle_val, gpio_port);
+	}
+}
+
+int
+dhd_oob_get_bt_reg_on(struct dhd_bus *bus)
+{
+	int ret;
+	uint8 val;
+	ret = gpio_read_port(gpio_handle_val, &val);
+
+	if (ret < 0) {
+		DHD_ERROR(("gpio_read_port returns %d\n", ret));
+		return ret;
+	}
+
+	if (val & (1 << BIT_BT_REG_ON))
+	{
+		ret = 1;
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+int
+dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val)
+{
+	if (bus->device_wake_state != val)
+	{
+		DHD_INFO(("Set Device_Wake to %d\n", val));
+
+		if (bus->oob_enabled && !bus->oob_presuspend)
+		{
+			if (val)
+			{
+				gpio_port = gpio_port | (1 << DEVICE_WAKE);
+				gpio_write_port_non_block(gpio_handle_val, gpio_port);
+			} else {
+				gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE));
+				gpio_write_port_non_block(gpio_handle_val, gpio_port);
+			}
+		}
+
+		bus->device_wake_state = val;
+	}
+	return BCME_OK;
+}
+
+INLINE void
+dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val)
+{
+	/* TODO: Currently Inband implementation of Device_Wake is not supported,
+	 * so this function is left empty later this can be used to support the same.
+	 */
+}
+#endif /* PCIE_OOB */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+bool dhd_runtimepm_state(dhd_pub_t *dhd)
+{
+	dhd_bus_t *bus;
+	unsigned long flags;
+	bus = dhd->bus;
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	bus->idlecount++;
+
+	DHD_TRACE(("%s : Enter \n", __FUNCTION__));
+	if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
+		bus->idlecount = 0;
+		if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
+			bus->bus_wake = 0;
+			DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd);
+			bus->runtime_resume_done = FALSE;
+			/* stop all interface network queue. */
+			dhd_bus_stop_queue(bus);
+			DHD_GENERAL_UNLOCK(dhd, flags);
+			DHD_ERROR(("%s: DHD Idle state!! -  idletime :%d, wdtick :%d \n",
+					__FUNCTION__, bus->idletime, dhd_runtimepm_ms));
+			/* RPM suspend is failed, return FALSE then re-trying */
+			if (dhdpcie_set_suspend_resume(bus, TRUE)) {
+				DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__));
+				DHD_GENERAL_LOCK(dhd, flags);
+				DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
+				dhd_os_busbusy_wake(bus->dhd);
+				bus->runtime_resume_done = TRUE;
+				/* It can make stuck NET TX Queue without below */
+				dhd_bus_start_queue(bus);
+				DHD_GENERAL_UNLOCK(dhd, flags);
+				smp_wmb();
+				wake_up_interruptible(&bus->rpm_queue);
+				return FALSE;
+			}
+
+			DHD_GENERAL_LOCK(dhd, flags);
+			DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
+			DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd);
+			/* For making sure NET TX Queue active  */
+			dhd_bus_start_queue(bus);
+			DHD_GENERAL_UNLOCK(dhd, flags);
+
+			wait_event_interruptible(bus->rpm_queue, bus->bus_wake);
+
+			DHD_GENERAL_LOCK(dhd, flags);
+			DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd);
+			DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd);
+			DHD_GENERAL_UNLOCK(dhd, flags);
+
+			dhdpcie_set_suspend_resume(bus, FALSE);
+
+			DHD_GENERAL_LOCK(dhd, flags);
+			DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd);
+			dhd_os_busbusy_wake(bus->dhd);
+			/* Inform the wake up context that Resume is over */
+			bus->runtime_resume_done = TRUE;
+			/* For making sure NET TX Queue active  */
+			dhd_bus_start_queue(bus);
+			DHD_GENERAL_UNLOCK(dhd, flags);
+
+			smp_wmb();
+			wake_up_interruptible(&bus->rpm_queue);
+			DHD_ERROR(("%s : runtime resume ended \n", __FUNCTION__));
+			return TRUE;
+		} else {
+			DHD_GENERAL_UNLOCK(dhd, flags);
+			/* Since one of the contexts are busy (TX, IOVAR or RX)
+			 * we should not suspend
+			 */
+			DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n",
+				__FUNCTION__, dhd->dhd_bus_busy_state));
+			return FALSE;
+		}
+	}
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+	return FALSE;
+} /* dhd_runtimepm_state */
+
+/*
+ * dhd_runtime_bus_wake
+ *  TRUE - related with runtime pm context
+ *  FALSE - It isn't invloved in runtime pm context
+ */
+bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr)
+{
+	unsigned long flags;
+	bus->idlecount = 0;
+	DHD_TRACE(("%s : enter\n", __FUNCTION__));
+	if (bus->dhd->up == FALSE) {
+		DHD_INFO(("%s : dhd is not up\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	DHD_GENERAL_LOCK(bus->dhd, flags);
+	if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) {
+		/* Wake up RPM state thread if it is suspend in progress or suspended */
+		if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) ||
+				DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
+			bus->bus_wake = 1;
+
+			DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+			DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr));
+			smp_wmb();
+			wake_up_interruptible(&bus->rpm_queue);
+		/* No need to wake up the RPM state thread */
+		} else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
+			DHD_GENERAL_UNLOCK(bus->dhd, flags);
+		}
+
+		/* If wait is TRUE, function with wait = TRUE will be wait in here  */
+		if (wait) {
+			wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done);
+		} else {
+			DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__));
+		}
+		/* If it is called from RPM context, it returns TRUE */
+		return TRUE;
+	}
+
+	DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+	return FALSE;
+}
+
+bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return dhd_runtime_bus_wake(bus, wait, func_addr);
+}
+
+void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	bus->idletime = 0;
+}
+
+bool dhdpcie_is_resume_done(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return bus->runtime_resume_done;
+}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+struct device * dhd_bus_to_dev(dhd_bus_t *bus)
+{
+	struct pci_dev *pdev;
+	pdev = bus->dev;
+
+	if (pdev)
+		return &pdev->dev;
+	else
+		return NULL;
+}
+
+#ifdef HOFFLOAD_MODULES
+void
+dhd_free_module_memory(struct dhd_bus *bus, struct module_metadata *hmem)
+{
+	struct device *dev = &bus->dev->dev;
+	if (hmem) {
+		dma_unmap_single(dev, (dma_addr_t) hmem->data_addr, hmem->size, DMA_TO_DEVICE);
+		kfree(hmem->data);
+		hmem->data = NULL;
+		hmem->size = 0;
+	} else {
+		DHD_ERROR(("dev:%p pci unmapping error\n", dev));
+	}
+}
+
+void *
+dhd_alloc_module_memory(struct dhd_bus *bus, uint32_t size, struct module_metadata *hmem)
+{
+	struct device *dev = &bus->dev->dev;
+	if (!hmem->data) {
+		hmem->data = kzalloc(size, GFP_KERNEL);
+		if (!hmem->data) {
+			DHD_ERROR(("dev:%p mem alloc failure\n", dev));
+			return NULL;
+		}
+	}
+	hmem->size = size;
+	DHD_INFO(("module size: 0x%x \n", hmem->size));
+	hmem->data_addr = (u64) dma_map_single(dev, hmem->data, hmem->size, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, hmem->data_addr)) {
+		DHD_ERROR(("dev:%p dma mapping error\n", dev));
+		kfree(hmem->data);
+		hmem->data = NULL;
+		return hmem->data;
+	}
+	return hmem->data;
+}
+#endif /* HOFFLOAD_MODULES */
diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.c b/drivers/net/wireless/bcmdhd/dhd_pno.c
new file mode 100644
index 0000000..4cd70c4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pno.c
@@ -0,0 +1,4050 @@
+/*
+ * Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload and Wi-Fi Location Service(WLS) code.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pno.c 707287 2017-06-27 06:44:29Z $
+ */
+
+#if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT)
+#error "GSCAN needs PNO to be enabled!"
+#endif
+
+#ifdef PNO_SUPPORT
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+
+#include <bcmevent.h>
+#include <dhd.h>
+#include <dhd_pno.h>
+#include <dhd_dbg.h>
+#ifdef GSCAN_SUPPORT
+#include <linux/gcd.h>
+#endif /* GSCAN_SUPPORT */
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif /* WL_CFG80211 */
+
+#ifdef __BIG_ENDIAN
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif /* IL_BIGENDINA */
+
+#define NULL_CHECK(p, s, err)  \
+			do { \
+				if (!(p)) { \
+					printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+					err = BCME_ERROR; \
+					return err; \
+				} \
+			} while (0)
+#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state)
+#define PNO_BESTNET_LEN 1024
+#define PNO_ON 1
+#define PNO_OFF 0
+#define CHANNEL_2G_MAX 14
+#define CHANNEL_5G_MAX 165
+#define MAX_NODE_CNT 5
+#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE)
+#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000)  \
+						- (uint32)(timestamp2/1000)))
+#define TIME_DIFF_MS(timestamp1, timestamp2) (abs((uint32)(timestamp1)  \
+						- (uint32)(timestamp2)))
+#define TIMESPEC_TO_US(ts)  (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
+							(ts).tv_nsec / NSEC_PER_USEC)
+
+#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====")
+#define TIME_MIN_DIFF 5
+
+#define EVENT_DATABUF_MAXLEN	(512 - sizeof(bcm_event_t))
+#define EVENT_MAX_NETCNT_V1 \
+	((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v1_t)) \
+	/ sizeof(wl_pfn_net_info_v1_t) + 1)
+#define EVENT_MAX_NETCNT_V2 \
+	((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v2_t)) \
+	/ sizeof(wl_pfn_net_info_v2_t) + 1)
+
+#ifdef GSCAN_SUPPORT
+static int _dhd_pno_flush_ssid(dhd_pub_t *dhd);
+static wl_pfn_gscan_ch_bucket_cfg_t *
+dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state,
+      uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw);
+#endif /* GSCAN_SUPPORT */
+static int dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16  scan_fr, int pno_repeat,
+		int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+static inline bool
+is_dfs(uint16 channel)
+{
+	if (channel >= 52 && channel <= 64)			/* class 2 */
+		return TRUE;
+	else if (channel >= 100 && channel <= 140)	/* class 4 */
+		return TRUE;
+	else
+		return FALSE;
+}
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+	int pfn = 0;
+	int err;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	/* Disable PNO */
+	err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn(error : %d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	_pno_state->pno_status = DHD_PNO_DISABLED;
+	err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n",
+			__FUNCTION__, err));
+	}
+exit:
+	return err;
+}
+
+bool
+dhd_is_pno_supported(dhd_pub_t *dhd)
+{
+	dhd_pno_status_info_t *_pno_state;
+
+	if (!dhd || !dhd->pno_state) {
+		DHD_ERROR(("NULL POINTER : %s\n",
+			__FUNCTION__));
+		return FALSE;
+	}
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	return WLS_SUPPORTED(_pno_state);
+}
+
+bool
+dhd_is_legacy_pno_enabled(dhd_pub_t *dhd)
+{
+	dhd_pno_status_info_t *_pno_state;
+
+	if (!dhd || !dhd->pno_state) {
+		DHD_ERROR(("NULL POINTER : %s\n",
+			__FUNCTION__));
+		return FALSE;
+	}
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	return ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) != 0);
+}
+
+#ifdef GSCAN_SUPPORT
+static uint64
+convert_fw_rel_time_to_systime(struct timespec *ts, uint32 fw_ts_ms)
+{
+	return ((uint64)(TIMESPEC_TO_US(*ts)) - (uint64)(fw_ts_ms * 1000));
+}
+
+static void
+dhd_pno_idx_to_ssid(struct dhd_pno_gscan_params *gscan_params,
+            dhd_epno_results_t *res, uint32 idx)
+{
+	dhd_pno_ssid_t *iter, *next;
+	int i;
+
+	/* If idx doesn't make sense */
+	if (idx >= gscan_params->epno_cfg.num_epno_ssid) {
+		DHD_ERROR(("No match, idx %d num_ssid %d\n", idx,
+			gscan_params->epno_cfg.num_epno_ssid));
+		goto exit;
+	}
+
+	if (gscan_params->epno_cfg.num_epno_ssid > 0) {
+		i = 0;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		list_for_each_entry_safe(iter, next,
+			&gscan_params->epno_cfg.epno_ssid_list, list) {
+			if (i++ == idx) {
+				memcpy(res->ssid, iter->SSID, iter->SSID_len);
+				res->ssid_len = iter->SSID_len;
+				return;
+			}
+		}
+	}
+exit:
+	/* If we are here then there was no match */
+	res->ssid[0] = '\0';
+	res->ssid_len = 0;
+	return;
+}
+
+/* Translate HAL flag bitmask to BRCM FW flag bitmask */
+void dhd_pno_translate_epno_fw_flags(uint32 *flags)
+{
+	uint32 in_flags, fw_flags = 0;
+	in_flags = *flags;
+
+	if (in_flags & DHD_EPNO_A_BAND_TRIG) {
+		fw_flags |= WL_PFN_SSID_A_BAND_TRIG;
+	}
+
+	if (in_flags & DHD_EPNO_BG_BAND_TRIG) {
+		fw_flags |= WL_PFN_SSID_BG_BAND_TRIG;
+	}
+
+	if (!(in_flags & DHD_EPNO_STRICT_MATCH) &&
+	    !(in_flags & DHD_EPNO_HIDDEN_SSID)) {
+		fw_flags |= WL_PFN_SSID_IMPRECISE_MATCH;
+	}
+
+	if (in_flags & DHD_EPNO_SAME_NETWORK) {
+		fw_flags |= WL_PFN_SSID_SAME_NETWORK;
+	}
+
+	/* Add any hard coded flags needed */
+	fw_flags |= WL_PFN_SUPPRESS_AGING_MASK;
+	*flags = fw_flags;
+
+	return;
+}
+
+/* Translate HAL auth bitmask to BRCM FW bitmask */
+void dhd_pno_set_epno_auth_flag(uint32 *wpa_auth)
+{
+	switch (*wpa_auth) {
+		case DHD_PNO_AUTH_CODE_OPEN:
+			*wpa_auth = WPA_AUTH_DISABLED;
+			break;
+		case DHD_PNO_AUTH_CODE_PSK:
+			*wpa_auth = (WPA_AUTH_PSK | WPA2_AUTH_PSK);
+			break;
+		case DHD_PNO_AUTH_CODE_EAPOL:
+			*wpa_auth = ~WPA_AUTH_NONE;
+			break;
+		default:
+			DHD_ERROR(("%s: Unknown auth %d", __FUNCTION__, *wpa_auth));
+			*wpa_auth = WPA_AUTH_PFN_ANY;
+			break;
+	}
+	return;
+}
+
+/* Cleanup all results */
+static void
+dhd_gscan_clear_all_batch_results(dhd_pub_t *dhd)
+{
+	struct dhd_pno_gscan_params *gscan_params;
+	dhd_pno_status_info_t *_pno_state;
+	gscan_results_cache_t *iter;
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan;
+	iter = gscan_params->gscan_batch_cache;
+	/* Mark everything as consumed */
+	while (iter) {
+		iter->tot_consumed = iter->tot_count;
+		iter = iter->next;
+	}
+	dhd_gscan_batch_cache_cleanup(dhd);
+	return;
+}
+
+static int
+_dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int size)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+
+#ifdef GSCAN_SUPPORT
+static int
+_dhd_pno_flush_ssid(dhd_pub_t *dhd)
+{
+	int err;
+	wl_pfn_t pfn_elem;
+	memset(&pfn_elem, 0, sizeof(wl_pfn_t));
+	pfn_elem.flags = htod32(WL_PFN_FLUSH_ALL_SSIDS);
+	err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_elem, sizeof(wl_pfn_t), NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
+	}
+	return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+static bool
+is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params)
+{
+	smp_rmb();
+	return (gscan_params->get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE);
+}
+#endif /* GSCAN_SUPPORT */
+
+static int
+_dhd_pno_suspend(dhd_pub_t *dhd)
+{
+	int err;
+	int suspend = 1;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err));
+		goto exit;
+
+	}
+	_pno_state->pno_status = DHD_PNO_SUSPEND;
+exit:
+	return err;
+}
+static int
+_dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (enable & 0xfffe) {
+		DHD_ERROR(("%s invalid value\n", __FUNCTION__));
+		err = BCME_BADARG;
+		goto exit;
+	}
+	if (!dhd_support_sta_mode(dhd)) {
+		DHD_ERROR(("PNO is not allowed for non-STA mode"));
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (enable) {
+		if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
+			dhd_is_associated(dhd, 0, NULL)) {
+			DHD_ERROR(("%s Legacy PNO mode cannot be enabled "
+				"in assoc mode , ignore it\n", __FUNCTION__));
+			err = BCME_BADOPTION;
+			goto exit;
+		}
+	}
+	/* Enable/Disable PNO */
+	err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+	_pno_state->pno_status = (enable)?
+		DHD_PNO_ENABLED : DHD_PNO_DISABLED;
+	if (!enable)
+		_pno_state->pno_mode = DHD_PNO_NONE_MODE;
+
+	DHD_PNO(("%s set pno as %s\n",
+		__FUNCTION__, enable ? "Enable" : "Disable"));
+exit:
+	return err;
+}
+
+static int
+_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode)
+{
+	int err = BCME_OK;
+	wl_pfn_param_t pfn_param;
+	dhd_pno_params_t *_params;
+	dhd_pno_status_info_t *_pno_state;
+	bool combined_scan = FALSE;
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	memset(&pfn_param, 0, sizeof(pfn_param));
+
+	/* set pfn parameters */
+	pfn_param.version = htod32(PFN_VERSION);
+	pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) |
+		(ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT));
+	if (mode == DHD_PNO_LEGACY_MODE) {
+		/* check and set extra pno params */
+		if ((pno_params->params_legacy.pno_repeat != 0) ||
+			(pno_params->params_legacy.pno_freq_expo_max != 0)) {
+			pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+			pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat);
+			pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max);
+		}
+		/* set up pno scan fr */
+		if (pno_params->params_legacy.scan_fr != 0)
+			pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr);
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n"));
+			mode |= DHD_PNO_BATCH_MODE;
+			combined_scan = TRUE;
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n"));
+			mode |= DHD_PNO_HOTLIST_MODE;
+			combined_scan = TRUE;
+		}
+#ifdef GSCAN_SUPPORT
+		else if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+			DHD_PNO(("will enable combined scan with GSCAN SCAN MODE\n"));
+			mode |= DHD_PNO_GSCAN_MODE;
+		}
+#endif /* GSCAN_SUPPORT */
+	}
+	if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		/* Scan frequency of 30 sec */
+		pfn_param.scan_freq = htod32(30);
+		/* slow adapt scan is off by default */
+		pfn_param.slow_freq = htod32(0);
+		/* RSSI margin of 30 dBm */
+		pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM);
+		/* Network timeout 60 sec */
+		pfn_param.lost_network_timeout = htod32(60);
+		/* best n = 2 by default */
+		pfn_param.bestn = DEFAULT_BESTN;
+		/* mscan m=0 by default, so not record best networks by default */
+		pfn_param.mscan = DEFAULT_MSCAN;
+		/*  default repeat = 10 */
+		pfn_param.repeat = DEFAULT_REPEAT;
+		/* by default, maximum scan interval = 2^2
+		 * scan_freq when adaptive scan is turned on
+		 */
+		pfn_param.exp = DEFAULT_EXP;
+		if (mode == DHD_PNO_BATCH_MODE) {
+			/* In case of BATCH SCAN */
+			if (pno_params->params_batch.bestn)
+				pfn_param.bestn = pno_params->params_batch.bestn;
+			if (pno_params->params_batch.scan_fr)
+				pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr);
+			if (pno_params->params_batch.mscan)
+				pfn_param.mscan = pno_params->params_batch.mscan;
+			/* enable broadcast scan */
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		} else if (mode == DHD_PNO_HOTLIST_MODE) {
+			/* In case of HOTLIST SCAN */
+			if (pno_params->params_hotlist.scan_fr)
+				pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr);
+			pfn_param.bestn = 0;
+			pfn_param.repeat = 0;
+			/* enable broadcast scan */
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		}
+		if (combined_scan) {
+			/* Disable Adaptive Scan */
+			pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT));
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+			pfn_param.repeat = 0;
+			pfn_param.exp = 0;
+			if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+				/* In case of Legacy PNO + BATCH SCAN */
+				_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+				if (_params->params_batch.bestn)
+					pfn_param.bestn = _params->params_batch.bestn;
+				if (_params->params_batch.scan_fr)
+					pfn_param.scan_freq = htod32(_params->params_batch.scan_fr);
+				if (_params->params_batch.mscan)
+					pfn_param.mscan = _params->params_batch.mscan;
+			} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+				/* In case of Legacy PNO + HOTLIST SCAN */
+				_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+				if (_params->params_hotlist.scan_fr)
+				pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr);
+				pfn_param.bestn = 0;
+				pfn_param.repeat = 0;
+			}
+		}
+	}
+#ifdef GSCAN_SUPPORT
+	if (mode & DHD_PNO_GSCAN_MODE) {
+		uint32 lost_network_timeout;
+
+		pfn_param.scan_freq = htod32(pno_params->params_gscan.scan_fr);
+		if (pno_params->params_gscan.mscan) {
+			pfn_param.bestn = pno_params->params_gscan.bestn;
+			pfn_param.mscan =  pno_params->params_gscan.mscan;
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		}
+		/* RSSI margin of 30 dBm */
+		pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM);
+		pfn_param.repeat = 0;
+		pfn_param.exp = 0;
+		pfn_param.slow_freq = 0;
+		pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			dhd_pno_params_t *params;
+
+			params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+
+			pfn_param.scan_freq = gcd(pno_params->params_gscan.scan_fr,
+			                 params->params_legacy.scan_fr);
+
+			if ((params->params_legacy.pno_repeat != 0) ||
+				(params->params_legacy.pno_freq_expo_max != 0)) {
+				pfn_param.repeat = (uchar) (params->params_legacy.pno_repeat);
+				pfn_param.exp = (uchar) (params->params_legacy.pno_freq_expo_max);
+			}
+		}
+
+		lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq *
+		                        pfn_param.scan_freq *
+		                        pno_params->params_gscan.lost_ap_window);
+		if (lost_network_timeout) {
+			pfn_param.lost_network_timeout = htod32(MIN(lost_network_timeout,
+			                                 GSCAN_MIN_BSSID_TIMEOUT));
+		} else {
+			pfn_param.lost_network_timeout = htod32(GSCAN_MIN_BSSID_TIMEOUT);
+		}
+	} else
+#endif /* GSCAN_SUPPORT */
+	{
+		if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) ||
+			pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) {
+			DHD_ERROR(("%s pno freq(%d sec) is not valid \n",
+				__FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+			err = BCME_BADARG;
+			goto exit;
+		}
+	}
+
+	err = dhd_set_rand_mac_oui(dhd);
+	/* Ignore if chip doesnt support the feature */
+	if (err < 0 && err != BCME_UNSUPPORTED) {
+		DHD_ERROR(("%s : failed to set random mac for PNO scan, %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+
+#ifdef GSCAN_SUPPORT
+	if (mode == DHD_PNO_BATCH_MODE ||
+		((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan))
+#else
+	if (mode == DHD_PNO_BATCH_MODE)
+#endif /* GSCAN_SUPPORT */
+	{
+		int _tmp = pfn_param.bestn;
+		/* set bestn to calculate the max mscan which firmware supports */
+		err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), NULL, 0, TRUE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__));
+			goto exit;
+		}
+		/* get max mscan which the firmware supports */
+		err = dhd_iovar(dhd, 0, "pfnmem", NULL, 0, (char *)&_tmp, sizeof(_tmp), FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__));
+			goto exit;
+		}
+		pfn_param.mscan = MIN(pfn_param.mscan, _tmp);
+		DHD_PNO((" returned mscan : %d, set bestn : %d mscan %d\n", _tmp, pfn_param.bestn,
+		        pfn_param.mscan));
+	}
+	err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+	/* need to return mscan if this is for batch scan instead of err */
+	err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err;
+exit:
+	return err;
+}
+
+static int
+_dhd_pno_add_ssid(dhd_pub_t *dhd, struct list_head *ssid_list, int nssid)
+{
+	int err = BCME_OK;
+	int i = 0, mem_needed;
+	wl_pfn_t *pfn_elem_buf;
+	struct dhd_pno_ssid *iter, *next;
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (!nssid) {
+		NULL_CHECK(ssid_list, "ssid list is NULL", err);
+		return BCME_ERROR;
+	}
+	mem_needed = (sizeof(wl_pfn_t) * nssid);
+	pfn_elem_buf = (wl_pfn_t *) kzalloc(mem_needed, GFP_KERNEL);
+	if (!pfn_elem_buf) {
+		DHD_ERROR(("%s: Can't malloc %d bytes!\n", __FUNCTION__, mem_needed));
+		return BCME_NOMEM;
+	}
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry_safe(iter, next, ssid_list, list) {
+		pfn_elem_buf[i].infra = htod32(1);
+		pfn_elem_buf[i].auth = htod32(DOT11_OPEN_SYSTEM);
+		pfn_elem_buf[i].wpa_auth = htod32(iter->wpa_auth);
+		pfn_elem_buf[i].flags = htod32(iter->flags);
+		if (iter->hidden)
+			pfn_elem_buf[i].flags |= htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+		/* If a single RSSI threshold is defined, use that */
+#ifdef PNO_MIN_RSSI_TRIGGER
+		pfn_elem_buf[i].flags |= ((PNO_MIN_RSSI_TRIGGER & 0xFF) << WL_PFN_RSSI_SHIFT);
+#else
+		pfn_elem_buf[i].flags |= ((iter->rssi_thresh & 0xFF) << WL_PFN_RSSI_SHIFT);
+#endif /* PNO_MIN_RSSI_TRIGGER */
+		memcpy((char *)pfn_elem_buf[i].ssid.SSID, iter->SSID,
+			iter->SSID_len);
+		pfn_elem_buf[i].ssid.SSID_len = iter->SSID_len;
+		DHD_PNO(("%s size = %d hidden = %d flags = %x rssi_thresh %d\n",
+			iter->SSID, iter->SSID_len, iter->hidden,
+			iter->flags, iter->rssi_thresh));
+		if (++i >= nssid) {
+			/* shouldn't happen */
+			break;
+		}
+	}
+	err = dhd_iovar(dhd, 0, "pfn_add", (char *)pfn_elem_buf, mem_needed, NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
+	}
+	kfree(pfn_elem_buf);
+	return err;
+}
+
+/* qsort compare function */
+static int
+_dhd_pno_cmpfunc(const void *a, const void *b)
+{
+	return (*(const uint16*)a - *(const uint16*)b);
+}
+
+static int
+_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan,
+	uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2)
+{
+	int err = BCME_OK;
+	int i = 0, j = 0, k = 0;
+	uint16 tmp;
+	NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+	NULL_CHECK(nchan, "nchan is NULL", err);
+	NULL_CHECK(chan_list1, "chan_list1 is NULL", err);
+	NULL_CHECK(chan_list2, "chan_list2 is NULL", err);
+	/* chan_list1 and chan_list2 should be sorted at first */
+	while (i < nchan1 && j < nchan2) {
+		tmp = chan_list1[i] < chan_list2[j]?
+			chan_list1[i++] : chan_list2[j++];
+		for (; i < nchan1 && chan_list1[i] == tmp; i++);
+		for (; j < nchan2 && chan_list2[j] == tmp; j++);
+		d_chan_list[k++] = tmp;
+	}
+
+	while (i < nchan1) {
+		tmp = chan_list1[i++];
+		for (; i < nchan1 && chan_list1[i] == tmp; i++);
+		d_chan_list[k++] = tmp;
+	}
+
+	while (j < nchan2) {
+		tmp = chan_list2[j++];
+		for (; j < nchan2 && chan_list2[j] == tmp; j++);
+		d_chan_list[k++] = tmp;
+
+	}
+	*nchan = k;
+	return err;
+}
+
+static int
+_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list,
+	int *nchan, uint8 band, bool skip_dfs)
+{
+	int err = BCME_OK;
+	int i, j;
+	uint32 chan_buf[WL_NUMCHANNELS + 1];
+	wl_uint32_list_t *list;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (*nchan) {
+		NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+	}
+	memset(&chan_buf, 0, sizeof(chan_buf));
+	list = (wl_uint32_list_t *) (void *)chan_buf;
+	list->count = htod32(WL_NUMCHANNELS);
+	err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0);
+	if (err < 0) {
+		DHD_ERROR(("failed to get channel list (err: %d)\n", err));
+		goto exit;
+	}
+	for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) {
+		if (band == WLC_BAND_2G) {
+			if (dtoh32(list->element[i]) > CHANNEL_2G_MAX)
+				continue;
+		} else if (band == WLC_BAND_5G) {
+			if (dtoh32(list->element[i]) <= CHANNEL_2G_MAX)
+				continue;
+			if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+				continue;
+
+		} else if (band == WLC_BAND_AUTO) {
+			if (skip_dfs || !is_dfs(dtoh32(list->element[i])))
+				continue;
+
+		} else { /* All channels */
+			if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+				continue;
+		}
+		if (dtoh32(list->element[i]) <= CHANNEL_5G_MAX) {
+			d_chan_list[j++] = (uint16) dtoh32(list->element[i]);
+		} else {
+			err = BCME_BADCHAN;
+			goto exit;
+		}
+	}
+	*nchan = j;
+exit:
+	return err;
+}
+
+static int
+_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch,
+	char *buf, int nbufsize)
+{
+	int err = BCME_OK;
+	int bytes_written = 0, nreadsize = 0;
+	int t_delta = 0;
+	int nleftsize = nbufsize;
+	uint8 cnt = 0;
+	char *bp = buf;
+	char eabuf[ETHER_ADDR_STR_LEN];
+#ifdef PNO_DEBUG
+	char *_base_bp;
+	char msg[150];
+#endif
+	dhd_pno_bestnet_entry_t *iter, *next;
+	dhd_pno_scan_results_t *siter, *snext;
+	dhd_pno_best_header_t *phead, *pprev;
+	NULL_CHECK(params_batch, "params_batch is NULL", err);
+	if (nbufsize > 0)
+		NULL_CHECK(buf, "buf is NULL", err);
+	/* initialize the buffer */
+	memset(buf, 0, nbufsize);
+	DHD_PNO(("%s enter \n", __FUNCTION__));
+	/* # of scans */
+	if (!params_batch->get_batch.batch_started) {
+		bp += nreadsize = snprintf(bp, nleftsize, "scancount=%d\n",
+			params_batch->get_batch.expired_tot_scan_cnt);
+		nleftsize -= nreadsize;
+		params_batch->get_batch.batch_started = TRUE;
+	}
+	DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt));
+	/* preestimate scan count until which scan result this report is going to end */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry_safe(siter, snext,
+		&params_batch->get_batch.expired_scan_results_list, list) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		phead = siter->bestnetheader;
+		while (phead != NULL) {
+			/* if left_size is less than bestheader total size , stop this */
+			if (nleftsize <=
+				(phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD))
+				goto exit;
+			/* increase scan count */
+			cnt++;
+			/* # best of each scan */
+			DHD_PNO(("\n<loop : %d, apcount %d>\n", cnt - 1, phead->tot_cnt));
+			/* attribute of the scan */
+			if (phead->reason & PNO_STATUS_ABORT_MASK) {
+				bp += nreadsize = snprintf(bp, nleftsize, "trunc\n");
+				nleftsize -= nreadsize;
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry_safe(iter, next,
+				&phead->entry_list, list) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+				t_delta = jiffies_to_msecs(jiffies - iter->recorded_time);
+#ifdef PNO_DEBUG
+				_base_bp = bp;
+				memset(msg, 0, sizeof(msg));
+#endif
+				/* BSSID info */
+				bp += nreadsize = snprintf(bp, nleftsize, "bssid=%s\n",
+				bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf));
+				nleftsize -= nreadsize;
+				/* SSID */
+				bp += nreadsize = snprintf(bp, nleftsize, "ssid=%s\n", iter->SSID);
+				nleftsize -= nreadsize;
+				/* channel */
+				bp += nreadsize = snprintf(bp, nleftsize, "freq=%d\n",
+				wf_channel2mhz(iter->channel,
+				iter->channel <= CH_MAX_2G_CHANNEL?
+				WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+				nleftsize -= nreadsize;
+				/* RSSI */
+				bp += nreadsize = snprintf(bp, nleftsize, "level=%d\n", iter->RSSI);
+				nleftsize -= nreadsize;
+				/* add the time consumed in Driver to the timestamp of firmware */
+				iter->timestamp += t_delta;
+				bp += nreadsize = snprintf(bp, nleftsize,
+					"age=%d\n", iter->timestamp);
+				nleftsize -= nreadsize;
+				/* RTT0 */
+				bp += nreadsize = snprintf(bp, nleftsize, "dist=%d\n",
+				(iter->rtt0 == 0)? -1 : iter->rtt0);
+				nleftsize -= nreadsize;
+				/* RTT1 */
+				bp += nreadsize = snprintf(bp, nleftsize, "distSd=%d\n",
+				(iter->rtt0 == 0)? -1 : iter->rtt1);
+				nleftsize -= nreadsize;
+				bp += nreadsize = snprintf(bp, nleftsize, "%s", AP_END_MARKER);
+				nleftsize -= nreadsize;
+				list_del(&iter->list);
+				MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+#ifdef PNO_DEBUG
+				memcpy(msg, _base_bp, bp - _base_bp);
+				DHD_PNO(("Entry : \n%s", msg));
+#endif
+			}
+			bp += nreadsize = snprintf(bp, nleftsize, "%s", SCAN_END_MARKER);
+			DHD_PNO(("%s", SCAN_END_MARKER));
+			nleftsize -= nreadsize;
+			pprev = phead;
+			/* reset the header */
+			siter->bestnetheader = phead = phead->next;
+			MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+
+			siter->cnt_header--;
+		}
+		if (phead == NULL) {
+			/* we store all entry in this scan , so it is ok to delete */
+			list_del(&siter->list);
+			MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+		}
+	}
+exit:
+	if (cnt < params_batch->get_batch.expired_tot_scan_cnt) {
+		DHD_ERROR(("Buffer size is small to save all batch entry,"
+			" cnt : %d (remained_scan_cnt): %d\n",
+			cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt));
+	}
+	params_batch->get_batch.expired_tot_scan_cnt -= cnt;
+	/* set FALSE only if the link list  is empty after returning the data */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	if (list_empty(&params_batch->get_batch.expired_scan_results_list)) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		params_batch->get_batch.batch_started = FALSE;
+		bp += snprintf(bp, nleftsize, "%s", RESULTS_END_MARKER);
+		DHD_PNO(("%s", RESULTS_END_MARKER));
+		DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__));
+	}
+	/* return used memory in buffer */
+	bytes_written = (int32)(bp - buf);
+	return bytes_written;
+}
+
+static int
+_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last)
+{
+	int err = BCME_OK;
+	int removed_scan_cnt = 0;
+	dhd_pno_scan_results_t *siter, *snext;
+	dhd_pno_best_header_t *phead, *pprev;
+	dhd_pno_bestnet_entry_t *iter, *next;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(head, "head is NULL", err);
+	NULL_CHECK(head->next, "head->next is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry_safe(siter, snext,
+		head, list) {
+		if (only_last) {
+			/* in case that we need to delete only last one */
+			if (!list_is_last(&siter->list, head)) {
+				/* skip if the one is not last */
+				continue;
+			}
+		}
+		/* delete all data belong if the one is last */
+		phead = siter->bestnetheader;
+		while (phead != NULL) {
+			removed_scan_cnt++;
+			list_for_each_entry_safe(iter, next,
+			&phead->entry_list, list) {
+				list_del(&iter->list);
+				MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+			}
+			pprev = phead;
+			phead = phead->next;
+			MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+		}
+		if (phead == NULL) {
+			/* it is ok to delete top node */
+			list_del(&siter->list);
+			MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	return removed_scan_cnt;
+}
+
+static int
+_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan)
+{
+	int err = BCME_OK;
+	int i = 0;
+	wl_pfn_cfg_t pfncfg_param;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nchan) {
+		NULL_CHECK(channel_list, "nchan is NULL", err);
+	}
+	if (nchan > WL_NUMCHANNELS) {
+		return BCME_RANGE;
+	}
+	DHD_PNO(("%s enter :  nchan : %d\n", __FUNCTION__, nchan));
+	memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t));
+	/* Setup default values */
+	pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET);
+	pfncfg_param.channel_num = htod32(0);
+
+	for (i = 0; i < nchan; i++)
+		pfncfg_param.channel_list[i] = channel_list[i];
+
+	pfncfg_param.channel_num = htod32(nchan);
+	err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), NULL, 0,
+			TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+
+static int
+_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	mutex_lock(&_pno_state->pno_mutex);
+	switch (mode) {
+	case DHD_PNO_LEGACY_MODE: {
+		struct dhd_pno_ssid *iter, *next;
+		if (params->params_legacy.nssid > 0) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry_safe(iter, next,
+				&params->params_legacy.ssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		params->params_legacy.nssid = 0;
+		params->params_legacy.scan_fr = 0;
+		params->params_legacy.pno_freq_expo_max = 0;
+		params->params_legacy.pno_repeat = 0;
+		params->params_legacy.nchan = 0;
+		memset(params->params_legacy.chan_list, 0,
+			sizeof(params->params_legacy.chan_list));
+		break;
+	}
+	case DHD_PNO_BATCH_MODE: {
+		params->params_batch.scan_fr = 0;
+		params->params_batch.mscan = 0;
+		params->params_batch.nchan = 0;
+		params->params_batch.rtt = 0;
+		params->params_batch.bestn = 0;
+		params->params_batch.nchan = 0;
+		params->params_batch.band = WLC_BAND_AUTO;
+		memset(params->params_batch.chan_list, 0,
+			sizeof(params->params_batch.chan_list));
+		params->params_batch.get_batch.batch_started = FALSE;
+		params->params_batch.get_batch.buf = NULL;
+		params->params_batch.get_batch.bufsize = 0;
+		params->params_batch.get_batch.reason = 0;
+		_dhd_pno_clear_all_batch_results(dhd,
+			&params->params_batch.get_batch.scan_results_list, FALSE);
+		_dhd_pno_clear_all_batch_results(dhd,
+			&params->params_batch.get_batch.expired_scan_results_list, FALSE);
+		params->params_batch.get_batch.tot_scan_cnt = 0;
+		params->params_batch.get_batch.expired_tot_scan_cnt = 0;
+		params->params_batch.get_batch.top_node_cnt = 0;
+		INIT_LIST_HEAD(&params->params_batch.get_batch.scan_results_list);
+		INIT_LIST_HEAD(&params->params_batch.get_batch.expired_scan_results_list);
+		break;
+	}
+	case DHD_PNO_HOTLIST_MODE: {
+		struct dhd_pno_bssid *iter, *next;
+		if (params->params_hotlist.nbssid > 0) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry_safe(iter, next,
+				&params->params_hotlist.bssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		}
+		params->params_hotlist.scan_fr = 0;
+		params->params_hotlist.nbssid = 0;
+		params->params_hotlist.nchan = 0;
+		params->params_batch.band = WLC_BAND_AUTO;
+		memset(params->params_hotlist.chan_list, 0,
+			sizeof(params->params_hotlist.chan_list));
+		break;
+	}
+	default:
+		DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode));
+		break;
+	}
+	mutex_unlock(&_pno_state->pno_mutex);
+	return err;
+}
+
+static int
+_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nbssid) {
+		NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err);
+	}
+	err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid,
+			sizeof(wl_pfn_bssid_t) * nbssid, NULL, 0, TRUE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+
+int
+dhd_pno_stop_for_ssid(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 mode = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wl_pfn_bssid_t *p_pfn_bssid = NULL;
+	NULL_CHECK(dhd, "dev is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) {
+		DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		struct dhd_pno_gscan_params *gscan_params;
+
+		_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+		gscan_params = &_params->params_gscan;
+		if (gscan_params->mscan) {
+			/* retrieve the batching data from firmware into host */
+			err = dhd_wait_batch_results_complete(dhd);
+			if (err != BCME_OK)
+				goto exit;
+		}
+		/* save current pno_mode before calling dhd_pno_clean */
+		mutex_lock(&_pno_state->pno_mutex);
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			mutex_unlock(&_pno_state->pno_mutex);
+			goto exit;
+		}
+		/* restore previous pno_mode */
+		_pno_state->pno_mode = mode;
+		mutex_unlock(&_pno_state->pno_mutex);
+		/* Restart gscan */
+		err = dhd_pno_initiate_gscan_request(dhd, 1, 0);
+		goto exit;
+	}
+#endif /* GSCAN_SUPPORT */
+	/* restart Batch mode  if the batch mode is on */
+	if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* save current pno_mode before calling dhd_pno_clean */
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			err = BCME_ERROR;
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+
+		/* restore previous pno_mode */
+		_pno_state->pno_mode = mode;
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			/* restart BATCH SCAN */
+			err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+				DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			/* restart HOTLIST SCAN */
+			struct dhd_pno_bssid *iter, *next;
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+			_params->params_hotlist.nbssid, GFP_KERNEL);
+			if (p_pfn_bssid == NULL) {
+				DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+				" (count: %d)",
+					__FUNCTION__, _params->params_hotlist.nbssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				goto exit;
+			}
+			/* convert dhd_pno_bssid to wl_pfn_bssid */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry_safe(iter, next,
+			&_params->params_hotlist.bssid_list, list) {
+				memcpy(&p_pfn_bssid->macaddr,
+				&iter->macaddr, ETHER_ADDR_LEN);
+				p_pfn_bssid->flags = iter->flags;
+				p_pfn_bssid++;
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+			err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	kfree(p_pfn_bssid);
+	return err;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	return (_dhd_pno_enable(dhd, enable));
+}
+
+static int
+dhd_pno_add_to_ssid_list(struct list_head *ptr, wlc_ssid_ext_t *ssid_list,
+    int nssid, int *num_ssid_added)
+{
+	int ret = BCME_OK;
+	int i;
+	struct dhd_pno_ssid *_pno_ssid;
+
+	for (i = 0; i < nssid; i++) {
+		if (ssid_list[i].SSID_len > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("%s : Invalid SSID length %d\n",
+				__FUNCTION__, ssid_list[i].SSID_len));
+			ret = BCME_ERROR;
+			goto exit;
+		}
+		/* Check for broadcast ssid */
+		if (!ssid_list[i].SSID_len) {
+			DHD_ERROR(("%d: Broadcast SSID is illegal for PNO setting\n", i));
+			ret = BCME_ERROR;
+			goto exit;
+		}
+
+		_pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL);
+		if (_pno_ssid == NULL) {
+			DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n",
+				__FUNCTION__));
+			ret = BCME_ERROR;
+			goto exit;
+		}
+		_pno_ssid->SSID_len = ssid_list[i].SSID_len;
+		_pno_ssid->hidden = ssid_list[i].hidden;
+		_pno_ssid->rssi_thresh = ssid_list[i].rssi_thresh;
+		_pno_ssid->flags = ssid_list[i].flags;
+		_pno_ssid->wpa_auth = WPA_AUTH_PFN_ANY;
+
+		memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len);
+		list_add_tail(&_pno_ssid->list, ptr);
+	}
+
+exit:
+	*num_ssid_added = i;
+	return ret;
+}
+
+int
+dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	struct dhd_pno_legacy_params *params_legacy;
+	int err = BCME_OK;
+
+	if (!dhd || !dhd->pno_state) {
+		DHD_ERROR(("%s: PNO Not enabled/Not ready\n", __FUNCTION__));
+		return BCME_NOTREADY;
+	}
+
+	if (!dhd_support_sta_mode(dhd)) {
+		return BCME_BADOPTION;
+	}
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+	params_legacy = &(_params->params_legacy);
+	err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n",
+			__FUNCTION__, err));
+		return err;
+	}
+
+	INIT_LIST_HEAD(&params_legacy->ssid_list);
+
+	if (dhd_pno_add_to_ssid_list(&params_legacy->ssid_list, ssid_list,
+	      nssid, &params_legacy->nssid) < 0) {
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+		return BCME_ERROR;
+	}
+
+	DHD_PNO(("%s enter : nssid %d, scan_fr :%d, pno_repeat :%d,"
+			"pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__,
+			params_legacy->nssid, scan_fr, pno_repeat, pno_freq_expo_max, nchan));
+
+	return dhd_pno_set_legacy_pno(dhd, scan_fr, pno_repeat,
+	     pno_freq_expo_max, channel_list, nchan);
+
+}
+
+static int
+dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16  scan_fr, int pno_repeat,
+	int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	dhd_pno_status_info_t *_pno_state;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int32 tot_nchan = 0;
+	int err = BCME_OK;
+	int i, nssid;
+	int mode = 0;
+	struct list_head *ssid_list;
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+	/* If GSCAN is also ON will handle this down below */
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE &&
+		!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE))
+#else
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)
+#endif /* GSCAN_SUPPORT */
+	{
+		DHD_ERROR(("%s : Legacy PNO mode was already started, "
+			"will disable previous one to start new one\n", __FUNCTION__));
+		err = dhd_pno_stop_for_ssid(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n",
+				__FUNCTION__, err));
+			return err;
+		}
+	}
+	_pno_state->pno_mode |= DHD_PNO_LEGACY_MODE;
+	memset(_chan_list, 0, sizeof(_chan_list));
+	tot_nchan = MIN(nchan, WL_NUMCHANNELS);
+	if (tot_nchan > 0 && channel_list) {
+		for (i = 0; i < tot_nchan; i++)
+		_params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i];
+	}
+#ifdef GSCAN_SUPPORT
+	else {
+		tot_nchan = WL_NUMCHANNELS;
+		err = _dhd_pno_get_channels(dhd, _chan_list, &tot_nchan,
+				(WLC_BAND_2G | WLC_BAND_5G), FALSE);
+		if (err < 0) {
+			tot_nchan = 0;
+			DHD_PNO(("Could not get channel list for PNO SSID\n"));
+		} else {
+			for (i = 0; i < tot_nchan; i++)
+			_params->params_legacy.chan_list[i] = _chan_list[i];
+		}
+	}
+#endif /* GSCAN_SUPPORT */
+
+	if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		DHD_PNO(("BATCH SCAN is on progress in firmware\n"));
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = _dhd_pno_enable(dhd, PNO_OFF);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			goto exit;
+		}
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+		/* use superset of channel list between two mode */
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			if (_params2->params_batch.nchan > 0 && tot_nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_batch.chan_list[0],
+					_params2->params_batch.nchan,
+					&channel_list[0], tot_nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+					" between legacy and batch\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}  else {
+				DHD_PNO(("superset channel will use"
+				" all channels in firmware\n"));
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			if (_params2->params_hotlist.nchan > 0 && tot_nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_hotlist.chan_list[0],
+					_params2->params_hotlist.nchan,
+					&channel_list[0], tot_nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+					" between legacy and hotlist\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}
+		}
+	}
+	_params->params_legacy.scan_fr = scan_fr;
+	_params->params_legacy.pno_repeat = pno_repeat;
+	_params->params_legacy.pno_freq_expo_max = pno_freq_expo_max;
+	_params->params_legacy.nchan = tot_nchan;
+	ssid_list = &_params->params_legacy.ssid_list;
+	nssid = _params->params_legacy.nssid;
+
+#ifdef GSCAN_SUPPORT
+	/* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		struct dhd_pno_gscan_params *gscan_params;
+		gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan;
+		/* ePNO and Legacy PNO do not co-exist */
+		if (gscan_params->epno_cfg.num_epno_ssid) {
+			DHD_PNO(("ePNO and Legacy PNO do not co-exist\n"));
+			err = BCME_EPERM;
+			goto exit;
+		}
+		DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n"));
+		err = dhd_pno_initiate_gscan_request(dhd, 1, 0);
+		goto exit;
+	}
+#endif /* GSCAN_SUPPORT */
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) {
+		DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
+		goto exit;
+	}
+	if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) {
+		DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid));
+		goto exit;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	if (err < 0) {
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+	}
+	/* clear mode in case of error */
+	if (err < 0) {
+		int ret = dhd_pno_clean(dhd);
+
+		if (ret < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, ret));
+		} else {
+			_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+		}
+	}
+	return err;
+}
+
+int
+dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params)
+{
+	int err = BCME_OK;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int rem_nchan = 0, tot_nchan = 0;
+	int mode = 0, mscan = 0;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(batch_params, "batch_params is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		_pno_state->pno_mode |= DHD_PNO_BATCH_MODE;
+		err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+				__FUNCTION__));
+			goto exit;
+		}
+	} else {
+		/* batch mode is already started */
+		return -EBUSY;
+	}
+	_params->params_batch.scan_fr = batch_params->scan_fr;
+	_params->params_batch.bestn = batch_params->bestn;
+	_params->params_batch.mscan = (batch_params->mscan)?
+		batch_params->mscan : DEFAULT_BATCH_MSCAN;
+	_params->params_batch.nchan = batch_params->nchan;
+	memcpy(_params->params_batch.chan_list, batch_params->chan_list,
+		sizeof(_params->params_batch.chan_list));
+
+	memset(_chan_list, 0, sizeof(_chan_list));
+
+	rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan;
+	if (batch_params->band == WLC_BAND_2G || batch_params->band == WLC_BAND_5G) {
+		/* get a valid channel list based on band B or A */
+		err = _dhd_pno_get_channels(dhd,
+		&_params->params_batch.chan_list[batch_params->nchan],
+		&rem_nchan, batch_params->band, FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+				__FUNCTION__, batch_params->band));
+			goto exit;
+		}
+		/* now we need to update nchan because rem_chan has valid channel count */
+		_params->params_batch.nchan += rem_nchan;
+		/* need to sort channel list */
+		sort(_params->params_batch.chan_list, _params->params_batch.nchan,
+			sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+	}
+#ifdef PNO_DEBUG
+{
+		DHD_PNO(("Channel list : "));
+		for (i = 0; i < _params->params_batch.nchan; i++) {
+			DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+		}
+		DHD_PNO(("\n"));
+}
+#endif
+	if (_params->params_batch.nchan) {
+		/* copy the channel list into local array */
+		memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list));
+		tot_nchan = _params->params_batch.nchan;
+	}
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		DHD_PNO(("PNO SSID is on progress in firmware\n"));
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = _dhd_pno_enable(dhd, PNO_OFF);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			goto exit;
+		}
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+		/* Use the superset for channelist between two mode */
+		_params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+		if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) {
+			err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+				&_params2->params_legacy.chan_list[0],
+				_params2->params_legacy.nchan,
+				&_params->params_batch.chan_list[0], _params->params_batch.nchan);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to merge channel list"
+				" between legacy and batch\n",
+					__FUNCTION__));
+				goto exit;
+			}
+		} else {
+			DHD_PNO(("superset channel will use all channels in firmware\n"));
+		}
+		if ((err = _dhd_pno_add_ssid(dhd, &_params2->params_legacy.ssid_list,
+				_params2->params_legacy.nssid)) < 0) {
+			DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+			goto exit;
+		}
+	}
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	} else {
+		/* we need to return mscan */
+		mscan = err;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+	else {
+		/* return #max scan firmware can do */
+		err = mscan;
+	}
+	return err;
+}
+
+
+#ifdef GSCAN_SUPPORT
+
+static int
+dhd_set_epno_params(dhd_pub_t *dhd, wl_pfn_ssid_params_t *params, bool set)
+{
+	wl_pfn_ssid_cfg_t cfg;
+	int err;
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	memset(&cfg, 0, sizeof(wl_pfn_ssid_cfg_t));
+	cfg.version = WL_PFN_SSID_CFG_VERSION;
+
+	/* If asked to clear params (set == FALSE) just set the CLEAR bit */
+	if (!set)
+		cfg.flags |= WL_PFN_SSID_CFG_CLEAR;
+	else if (params)
+		memcpy(&cfg.params, params, sizeof(wl_pfn_ssid_params_t));
+	err = dhd_iovar(dhd, 0, "pfn_ssid_cfg", (char *)&cfg,
+			sizeof(wl_pfn_ssid_cfg_t), NULL, 0, TRUE);
+	if (err != BCME_OK) {
+		DHD_ERROR(("%s : Failed to execute pfn_ssid_cfg %d\n", __FUNCTION__, err));
+	}
+	return err;
+}
+
+int
+dhd_pno_flush_fw_epno(dhd_pub_t *dhd)
+{
+	int err;
+
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+
+	err = dhd_set_epno_params(dhd, NULL, FALSE);
+	if (err < 0) {
+		DHD_ERROR(("failed to set ePNO params %d\n", err));
+		return err;
+	}
+	err = _dhd_pno_flush_ssid(dhd);
+	return err;
+}
+
+int
+dhd_pno_set_epno(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_params_t *params;
+	dhd_pno_status_info_t *_pno_state;
+
+	struct dhd_pno_gscan_params *gscan_params;
+
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	gscan_params = &params->params_gscan;
+
+	if (gscan_params->epno_cfg.num_epno_ssid) {
+		DHD_PNO(("num_epno_ssid %d\n", gscan_params->epno_cfg.num_epno_ssid));
+		if ((err = _dhd_pno_add_ssid(dhd, &gscan_params->epno_cfg.epno_ssid_list,
+			gscan_params->epno_cfg.num_epno_ssid)) < 0) {
+			DHD_ERROR(("failed to add ssid list (err %d) to firmware\n", err));
+			return err;
+		}
+		err = dhd_set_epno_params(dhd, &gscan_params->epno_cfg.params, TRUE);
+		if (err < 0) {
+			DHD_ERROR(("failed to set ePNO params %d\n", err));
+		}
+	}
+	return err;
+}
+
+
+static void
+dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params,
+            dhd_pno_status_info_t *_pno_state, uint8 flags)
+{
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (flags & GSCAN_FLUSH_SCAN_CFG) {
+		_params->params_gscan.bestn = 0;
+		_params->params_gscan.mscan = 0;
+		_params->params_gscan.buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+		_params->params_gscan.scan_fr = 0;
+		_params->params_gscan.send_all_results_flag = 0;
+		memset(_params->params_gscan.channel_bucket, 0,
+		_params->params_gscan.nchannel_buckets *
+		 sizeof(struct dhd_pno_gscan_channel_bucket));
+		_params->params_gscan.nchannel_buckets = 0;
+		DHD_PNO(("Flush Scan config\n"));
+	}
+	if (flags & GSCAN_FLUSH_HOTLIST_CFG) {
+		struct dhd_pno_bssid *iter, *next;
+		if (_params->params_gscan.nbssid_hotlist > 0) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry_safe(iter, next,
+				&_params->params_gscan.hotlist_bssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		}
+		_params->params_gscan.nbssid_hotlist = 0;
+		DHD_PNO(("Flush Hotlist Config\n"));
+	}
+	if (flags & GSCAN_FLUSH_EPNO_CFG) {
+		dhd_pno_ssid_t *iter, *next;
+		dhd_epno_ssid_cfg_t *epno_cfg = &_params->params_gscan.epno_cfg;
+
+		if (epno_cfg->num_epno_ssid > 0) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry_safe(iter, next,
+				&epno_cfg->epno_ssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		epno_cfg->num_epno_ssid = 0;
+		}
+		memset(&epno_cfg->params, 0, sizeof(wl_pfn_ssid_params_t));
+		DHD_PNO(("Flushed ePNO Config\n"));
+	}
+
+	return;
+}
+
+int
+dhd_pno_lock_batch_results(dhd_pub_t *dhd)
+{
+	dhd_pno_status_info_t *_pno_state;
+	int err = BCME_OK;
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	mutex_lock(&_pno_state->pno_mutex);
+	return err;
+}
+
+void
+dhd_pno_unlock_batch_results(dhd_pub_t *dhd)
+{
+	dhd_pno_status_info_t *_pno_state;
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	mutex_unlock(&_pno_state->pno_mutex);
+	return;
+}
+
+int dhd_wait_batch_results_complete(dhd_pub_t *dhd)
+{
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	int err = BCME_OK;
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+	/* Has the workqueue finished its job already?? */
+	if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_IN_PROGRESS) {
+		DHD_PNO(("%s: Waiting to complete retrieval..\n", __FUNCTION__));
+		wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+		     is_batch_retrieval_complete(&_params->params_gscan),
+		     msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+	} else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */
+		gscan_results_cache_t *iter;
+		uint16 num_results = 0;
+
+		mutex_lock(&_pno_state->pno_mutex);
+		iter = _params->params_gscan.gscan_batch_cache;
+		while (iter) {
+			num_results += iter->tot_count - iter->tot_consumed;
+			iter = iter->next;
+		}
+		mutex_unlock(&_pno_state->pno_mutex);
+
+		/* All results consumed/No results cached??
+		 * Get fresh results from FW
+		 */
+		if ((_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) && !num_results) {
+			DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__));
+			err = dhd_retreive_batch_scan_results(dhd);
+			if (err == BCME_OK) {
+				wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+				  is_batch_retrieval_complete(&_params->params_gscan),
+				  msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+			}
+		}
+	}
+	DHD_PNO(("%s: Wait complete\n", __FUNCTION__));
+	return err;
+}
+
+static void *
+dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
+{
+	gscan_results_cache_t *iter, *results;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	uint16 num_scan_ids = 0, num_results = 0;
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+	iter = results = _params->params_gscan.gscan_batch_cache;
+	while (iter) {
+		num_results += iter->tot_count - iter->tot_consumed;
+		num_scan_ids++;
+		iter = iter->next;
+	}
+
+	*len = ((num_results << 16) | (num_scan_ids));
+	return results;
+}
+
+int
+dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+    void *buf, bool flush)
+{
+	int err = BCME_OK;
+	dhd_pno_params_t *_params;
+	int i;
+	dhd_pno_status_info_t *_pno_state;
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	mutex_lock(&_pno_state->pno_mutex);
+
+	switch (type) {
+	case DHD_PNO_BATCH_SCAN_CFG_ID:
+		{
+			gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf;
+			_params->params_gscan.bestn = ptr->bestn;
+			_params->params_gscan.mscan = ptr->mscan;
+			_params->params_gscan.buffer_threshold = ptr->buffer_threshold;
+		}
+		break;
+		case DHD_PNO_GEOFENCE_SCAN_CFG_ID:
+		{
+			gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf;
+			struct dhd_pno_bssid *_pno_bssid;
+			struct bssid_t *bssid_ptr;
+			int8 flags;
+
+			if (flush) {
+				dhd_pno_reset_cfg_gscan(_params, _pno_state,
+				    GSCAN_FLUSH_HOTLIST_CFG);
+			}
+
+			if (!ptr->nbssid) {
+				break;
+			}
+			if (!_params->params_gscan.nbssid_hotlist) {
+				INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list);
+			}
+
+			for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) {
+				_pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL);
+
+				if (!_pno_bssid) {
+					DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes",
+					       sizeof(struct dhd_pno_bssid)));
+					err = BCME_NOMEM;
+					goto exit;
+				}
+				memcpy(&_pno_bssid->macaddr, &bssid_ptr->macaddr, ETHER_ADDR_LEN);
+
+				flags = (int8) bssid_ptr->rssi_reporting_threshold;
+				_pno_bssid->flags = flags  << WL_PFN_RSSI_SHIFT;
+				list_add_tail(&_pno_bssid->list,
+				   &_params->params_gscan.hotlist_bssid_list);
+			}
+
+			_params->params_gscan.nbssid_hotlist += ptr->nbssid;
+			_params->params_gscan.lost_ap_window = ptr->lost_ap_window;
+		}
+		break;
+	case DHD_PNO_SCAN_CFG_ID:
+		{
+			int k;
+			uint16 band;
+			gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf;
+			struct dhd_pno_gscan_channel_bucket *ch_bucket;
+
+			if (ptr->nchannel_buckets <= GSCAN_MAX_CH_BUCKETS) {
+				_params->params_gscan.nchannel_buckets = ptr->nchannel_buckets;
+
+				memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket,
+				    _params->params_gscan.nchannel_buckets *
+				    sizeof(struct dhd_pno_gscan_channel_bucket));
+				ch_bucket = _params->params_gscan.channel_bucket;
+
+				for (i = 0; i < ptr->nchannel_buckets; i++) {
+					band = ch_bucket[i].band;
+					for (k = 0; k < ptr->channel_bucket[i].num_channels; k++)  {
+						ch_bucket[i].chan_list[k] =
+						wf_mhz2channel(ptr->channel_bucket[i].chan_list[k],
+							0);
+					}
+					ch_bucket[i].band = 0;
+					/* HAL and DHD use different bits for 2.4G and
+					 * 5G in bitmap. Hence translating it here...
+					 */
+					if (band & GSCAN_BG_BAND_MASK) {
+						ch_bucket[i].band |= WLC_BAND_2G;
+					}
+					if (band & GSCAN_A_BAND_MASK) {
+						ch_bucket[i].band |= WLC_BAND_5G;
+					}
+					if (band & GSCAN_DFS_MASK) {
+						ch_bucket[i].band |= GSCAN_DFS_MASK;
+					}
+					DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band,
+					          ch_bucket[i].report_flag));
+				}
+
+				for (i = 0; i < ptr->nchannel_buckets; i++) {
+					ch_bucket[i].bucket_freq_multiple =
+					ch_bucket[i].bucket_freq_multiple/ptr->scan_fr;
+					ch_bucket[i].bucket_max_multiple =
+					ch_bucket[i].bucket_max_multiple/ptr->scan_fr;
+					DHD_PNO(("mult %d max_mult %d\n",
+					                 ch_bucket[i].bucket_freq_multiple,
+					                 ch_bucket[i].bucket_max_multiple));
+				}
+				_params->params_gscan.scan_fr = ptr->scan_fr;
+
+				DHD_PNO(("num_buckets %d scan_fr %d\n", ptr->nchannel_buckets,
+				        _params->params_gscan.scan_fr));
+			} else {
+				err = BCME_BADARG;
+			}
+		}
+		break;
+	case DHD_PNO_EPNO_CFG_ID:
+		if (flush) {
+			dhd_pno_reset_cfg_gscan(_params, _pno_state,
+			   GSCAN_FLUSH_EPNO_CFG);
+		}
+		break;
+	case DHD_PNO_EPNO_PARAMS_ID:
+		if (flush) {
+			memset(&_params->params_gscan.epno_cfg.params, 0,
+			     sizeof(wl_pfn_ssid_params_t));
+		}
+		if (buf) {
+			memcpy(&_params->params_gscan.epno_cfg.params, buf,
+			     sizeof(wl_pfn_ssid_params_t));
+		}
+		break;
+	default:
+			err = BCME_BADARG;
+			DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
+			break;
+	}
+exit:
+	mutex_unlock(&_pno_state->pno_mutex);
+	return err;
+
+}
+
+
+static bool
+validate_gscan_params(struct dhd_pno_gscan_params *gscan_params)
+{
+	unsigned int i, k;
+
+	if (!gscan_params->scan_fr || !gscan_params->nchannel_buckets) {
+		DHD_ERROR(("%s : Scan freq - %d or number of channel buckets - %d is empty\n",
+		 __FUNCTION__, gscan_params->scan_fr, gscan_params->nchannel_buckets));
+		return false;
+	}
+
+	for (i = 0; i < gscan_params->nchannel_buckets; i++) {
+		if (!gscan_params->channel_bucket[i].band) {
+			for (k = 0; k < gscan_params->channel_bucket[i].num_channels; k++) {
+				if (gscan_params->channel_bucket[i].chan_list[k] > CHANNEL_5G_MAX) {
+					DHD_ERROR(("%s : Unknown channel %d\n", __FUNCTION__,
+					 gscan_params->channel_bucket[i].chan_list[k]));
+					return false;
+				}
+			}
+		}
+	}
+
+	return true;
+}
+
+static int
+dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params)
+{
+	int err = BCME_OK;
+	int mode, i = 0;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int tot_nchan = 0;
+	int num_buckets_to_fw, tot_num_buckets, gscan_param_size;
+	dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+	wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket = NULL;
+	wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL;
+	wl_pfn_significant_bssid_t *p_pfn_significant_bssid = NULL;
+	wl_pfn_bssid_t *p_pfn_bssid = NULL;
+	dhd_pno_params_t	*_params;
+	bool fw_flushed = FALSE;
+
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(gscan_params, "gscan_params is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if (!validate_gscan_params(gscan_params)) {
+		DHD_ERROR(("%s : Cannot start gscan - bad params\n", __FUNCTION__));
+		err = BCME_BADARG;
+		goto exit;
+	}
+
+	if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state,
+	    _chan_list, &tot_num_buckets, &num_buckets_to_fw))) {
+		goto exit;
+	}
+
+	mutex_lock(&_pno_state->pno_mutex);
+	/* Clear any pre-existing results in our cache
+	 * not consumed by framework
+	 */
+	dhd_gscan_clear_all_batch_results(dhd);
+	if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) {
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			mutex_unlock(&_pno_state->pno_mutex);
+			goto exit;
+		}
+		fw_flushed = TRUE;
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+	}
+	_pno_state->pno_mode |= DHD_PNO_GSCAN_MODE;
+	mutex_unlock(&_pno_state->pno_mutex);
+
+	if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
+		!gscan_params->epno_cfg.num_epno_ssid) {
+		struct dhd_pno_legacy_params *params_legacy;
+		params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+
+		if ((err = _dhd_pno_add_ssid(dhd, &params_legacy->ssid_list,
+			params_legacy->nssid)) < 0) {
+			DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+			goto exit;
+		}
+	}
+
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_GSCAN_MODE)) < 0) {
+		DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
+		goto exit;
+	}
+
+	gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) +
+	          (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t);
+	pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOCZ(dhd->osh, gscan_param_size);
+
+	if (!pfn_gscan_cfg_t) {
+		DHD_ERROR(("%s: failed to malloc memory of size %d\n",
+		   __FUNCTION__, gscan_param_size));
+		err = BCME_NOMEM;
+		goto exit;
+	}
+
+	pfn_gscan_cfg_t->version = WL_GSCAN_CFG_VERSION;
+	if (gscan_params->mscan)
+		pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold;
+	else
+		pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+
+	pfn_gscan_cfg_t->flags =
+	         (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK);
+	pfn_gscan_cfg_t->flags |= GSCAN_ALL_BUCKETS_IN_FIRST_SCAN_MASK;
+	pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw;
+	pfn_gscan_cfg_t->retry_threshold = GSCAN_RETRY_THRESHOLD;
+
+	for (i = 0; i < num_buckets_to_fw; i++) {
+		pfn_gscan_cfg_t->channel_bucket[i].bucket_end_index =
+		           ch_bucket[i].bucket_end_index;
+		pfn_gscan_cfg_t->channel_bucket[i].bucket_freq_multiple =
+		           ch_bucket[i].bucket_freq_multiple;
+		pfn_gscan_cfg_t->channel_bucket[i].max_freq_multiple =
+		           ch_bucket[i].max_freq_multiple;
+		pfn_gscan_cfg_t->channel_bucket[i].repeat =
+		           ch_bucket[i].repeat;
+		pfn_gscan_cfg_t->channel_bucket[i].flag =
+		           ch_bucket[i].flag;
+	}
+
+	tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1;
+	DHD_PNO(("Total channel num %d total ch_buckets  %d ch_buckets_to_fw %d \n", tot_nchan,
+	      tot_num_buckets, num_buckets_to_fw));
+
+	if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+
+	if ((err = _dhd_pno_gscan_cfg(dhd, pfn_gscan_cfg_t, gscan_param_size)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_gscan_cfg (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	/* Reprogram ePNO cfg from dhd cache if FW has been flushed */
+	if (fw_flushed) {
+		dhd_pno_set_epno(dhd);
+	}
+
+	if (gscan_params->nbssid_hotlist) {
+		struct dhd_pno_bssid *iter, *next;
+		wl_pfn_bssid_t *ptr;
+		p_pfn_bssid = (wl_pfn_bssid_t *)kzalloc(sizeof(wl_pfn_bssid_t) *
+		       gscan_params->nbssid_hotlist, GFP_KERNEL);
+		if (p_pfn_bssid == NULL) {
+			DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+			" (count: %d)",
+				__FUNCTION__, _params->params_hotlist.nbssid));
+			err = BCME_NOMEM;
+			_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+			goto exit;
+		}
+		ptr = p_pfn_bssid;
+		/* convert dhd_pno_bssid to wl_pfn_bssid */
+		DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		list_for_each_entry_safe(iter, next,
+		          &gscan_params->hotlist_bssid_list, list) {
+			char buffer_hotlist[64];
+			memcpy(&ptr->macaddr,
+			&iter->macaddr, ETHER_ADDR_LEN);
+			DHD_PNO(("%s\n", bcm_ether_ntoa(&ptr->macaddr, buffer_hotlist)));
+			BCM_REFERENCE(buffer_hotlist);
+			ptr->flags = iter->flags;
+			ptr++;
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+		err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) {
+		DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err));
+	}
+
+exit:
+	/* clear mode in case of error */
+	if (err < 0) {
+		int ret = dhd_pno_clean(dhd);
+
+		if (ret < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, ret));
+		} else {
+			_pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE;
+		}
+	}
+	kfree(p_pfn_significant_bssid);
+	kfree(p_pfn_bssid);
+	if (pfn_gscan_cfg_t) {
+		MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size);
+	}
+	if (ch_bucket) {
+		MFREE(dhd->osh, ch_bucket,
+		(tot_num_buckets * sizeof(wl_pfn_gscan_ch_bucket_cfg_t)));
+	}
+	return err;
+
+}
+
+static wl_pfn_gscan_ch_bucket_cfg_t *
+dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd,
+                                  dhd_pno_status_info_t *_pno_state,
+                                  uint16 *chan_list,
+                                  uint32 *num_buckets,
+                                  uint32 *num_buckets_to_fw)
+{
+	int i, num_channels, err, nchan = WL_NUMCHANNELS, ch_cnt;
+	uint16 *ptr = chan_list, max;
+	wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket;
+	dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	bool is_pno_legacy_running;
+	dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket;
+
+	/* ePNO and Legacy PNO do not co-exist */
+	is_pno_legacy_running = ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
+		!_params->params_gscan.epno_cfg.num_epno_ssid);
+
+	if (is_pno_legacy_running)
+		*num_buckets = _params->params_gscan.nchannel_buckets + 1;
+	else
+		*num_buckets = _params->params_gscan.nchannel_buckets;
+
+	*num_buckets_to_fw = 0;
+
+	ch_bucket = (wl_pfn_gscan_ch_bucket_cfg_t *) MALLOC(dhd->osh,
+	   ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t)));
+
+	if (!ch_bucket) {
+		DHD_ERROR(("%s: failed to malloc memory of size %zd\n",
+			__FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t)));
+		*num_buckets_to_fw = *num_buckets = 0;
+		return NULL;
+	}
+
+	max = gscan_buckets[0].bucket_freq_multiple;
+	num_channels = 0;
+	/* nchan is the remaining space left in chan_list buffer
+	 * So any overflow list of channels is ignored
+	 */
+	for (i = 0; i < _params->params_gscan.nchannel_buckets && nchan; i++) {
+		if (!gscan_buckets[i].band) {
+			ch_cnt = MIN(gscan_buckets[i].num_channels, (uint8)nchan);
+			num_channels += ch_cnt;
+			memcpy(ptr, gscan_buckets[i].chan_list,
+			    ch_cnt * sizeof(uint16));
+			ptr = ptr + ch_cnt;
+		} else {
+			/* get a valid channel list based on band B or A */
+			err = _dhd_pno_get_channels(dhd, ptr,
+			        &nchan, (gscan_buckets[i].band & GSCAN_ABG_BAND_MASK),
+			        !(gscan_buckets[i].band & GSCAN_DFS_MASK));
+
+			if (err < 0) {
+				DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+					__FUNCTION__, gscan_buckets[i].band));
+				MFREE(dhd->osh, ch_bucket,
+				      ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t)));
+				*num_buckets_to_fw = *num_buckets = 0;
+				return NULL;
+			}
+
+			num_channels += nchan;
+			ptr = ptr + nchan;
+		}
+
+		ch_bucket[i].bucket_end_index = num_channels - 1;
+		ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple;
+		ch_bucket[i].repeat = gscan_buckets[i].repeat;
+		ch_bucket[i].max_freq_multiple = gscan_buckets[i].bucket_max_multiple;
+		ch_bucket[i].flag = gscan_buckets[i].report_flag;
+		/* HAL and FW interpretations are opposite for this bit */
+		ch_bucket[i].flag ^= DHD_PNO_REPORT_NO_BATCH;
+		if (max < gscan_buckets[i].bucket_freq_multiple)
+			max = gscan_buckets[i].bucket_freq_multiple;
+		nchan = WL_NUMCHANNELS - num_channels;
+		*num_buckets_to_fw = *num_buckets_to_fw + 1;
+		DHD_PNO(("end_idx  %d freq_mult - %d\n",
+		ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple));
+	}
+
+	_params->params_gscan.max_ch_bucket_freq = max;
+	/* Legacy PNO maybe running, which means we need to create a legacy PNO bucket
+	 * Get GCF of Legacy PNO and Gscan scanfreq
+	 */
+	if (is_pno_legacy_running) {
+		dhd_pno_params_t *_params1 = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+		uint16 *legacy_chan_list = _params1->params_legacy.chan_list;
+		uint16 common_freq;
+		uint32 legacy_bucket_idx = _params->params_gscan.nchannel_buckets;
+		/* If no space is left then only gscan buckets will be sent to FW */
+		if (nchan) {
+			common_freq = gcd(_params->params_gscan.scan_fr,
+			                  _params1->params_legacy.scan_fr);
+			max = gscan_buckets[0].bucket_freq_multiple;
+			/* GSCAN buckets */
+			for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) {
+				ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr;
+				ch_bucket[i].bucket_freq_multiple /= common_freq;
+				if (max < gscan_buckets[i].bucket_freq_multiple)
+					max = gscan_buckets[i].bucket_freq_multiple;
+			}
+			/* Legacy PNO bucket */
+			ch_bucket[legacy_bucket_idx].bucket_freq_multiple =
+			           _params1->params_legacy.scan_fr;
+			ch_bucket[legacy_bucket_idx].bucket_freq_multiple /=
+			           common_freq;
+			_params->params_gscan.max_ch_bucket_freq = MAX(max,
+			         ch_bucket[legacy_bucket_idx].bucket_freq_multiple);
+			ch_bucket[legacy_bucket_idx].flag = CH_BUCKET_REPORT_REGULAR;
+			/* Now add channels to the legacy scan bucket */
+			for (i = 0; i < _params1->params_legacy.nchan && nchan; i++, nchan--) {
+				ptr[i] = legacy_chan_list[i];
+				num_channels++;
+			}
+			ch_bucket[legacy_bucket_idx].bucket_end_index = num_channels - 1;
+			*num_buckets_to_fw = *num_buckets_to_fw + 1;
+			DHD_PNO(("end_idx  %d freq_mult - %d\n",
+			            ch_bucket[legacy_bucket_idx].bucket_end_index,
+			            ch_bucket[legacy_bucket_idx].bucket_freq_multiple));
+		}
+	}
+	return ch_bucket;
+}
+
+static int
+dhd_pno_stop_for_gscan(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	int mode;
+	dhd_pno_status_info_t *_pno_state;
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+		DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	if (_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan.mscan) {
+		/* retrieve the batching data from firmware into host */
+		err = dhd_wait_batch_results_complete(dhd);
+		if (err != BCME_OK)
+			goto exit;
+	}
+	mutex_lock(&_pno_state->pno_mutex);
+	mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE;
+	err = dhd_pno_clean(dhd);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+			__FUNCTION__, err));
+		mutex_unlock(&_pno_state->pno_mutex);
+		return err;
+	}
+	_pno_state->pno_mode = mode;
+	mutex_unlock(&_pno_state->pno_mutex);
+
+	/* Reprogram Legacy PNO if it was running */
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		struct dhd_pno_legacy_params *params_legacy;
+		uint16 chan_list[WL_NUMCHANNELS];
+
+		params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+		_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+		DHD_PNO(("Restarting Legacy PNO SSID scan...\n"));
+		memcpy(chan_list, params_legacy->chan_list,
+		    (params_legacy->nchan * sizeof(uint16)));
+		err = dhd_pno_set_legacy_pno(dhd, params_legacy->scan_fr,
+			params_legacy->pno_repeat, params_legacy->pno_freq_expo_max,
+			chan_list, params_legacy->nchan);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+
+	}
+
+exit:
+	return err;
+}
+
+int
+dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush)
+{
+	int err = BCME_OK;
+	dhd_pno_params_t *params;
+	dhd_pno_status_info_t *_pno_state;
+	struct dhd_pno_gscan_params *gscan_params;
+
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush));
+
+	params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	gscan_params = &params->params_gscan;
+
+	if (run) {
+		err = dhd_pno_set_for_gscan(dhd, gscan_params);
+	} else {
+		if (flush) {
+			mutex_lock(&_pno_state->pno_mutex);
+			dhd_pno_reset_cfg_gscan(params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+			mutex_unlock(&_pno_state->pno_mutex);
+		}
+		/* Need to stop all gscan */
+		err = dhd_pno_stop_for_gscan(dhd);
+	}
+
+	return err;
+}
+
+int
+dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag)
+{
+	int err = BCME_OK;
+	dhd_pno_params_t *params;
+	dhd_pno_status_info_t *_pno_state;
+	struct dhd_pno_gscan_params *gscan_params;
+	uint8 old_flag;
+
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	gscan_params = &params->params_gscan;
+
+	mutex_lock(&_pno_state->pno_mutex);
+
+	old_flag = gscan_params->send_all_results_flag;
+	gscan_params->send_all_results_flag = (uint8) real_time_flag;
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+	    if (old_flag != gscan_params->send_all_results_flag) {
+			wl_pfn_gscan_cfg_t gscan_cfg;
+
+			gscan_cfg.version = WL_GSCAN_CFG_VERSION;
+			gscan_cfg.flags = (gscan_params->send_all_results_flag &
+			                           GSCAN_SEND_ALL_RESULTS_MASK);
+			gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK;
+
+			if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg,
+			            sizeof(wl_pfn_gscan_cfg_t))) < 0) {
+				DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n",
+					__FUNCTION__, err));
+				goto exit_mutex_unlock;
+			}
+		} else {
+			DHD_PNO(("No change in flag - %d\n", old_flag));
+		}
+	} else {
+		DHD_PNO(("Gscan not started\n"));
+	}
+exit_mutex_unlock:
+	mutex_unlock(&_pno_state->pno_mutex);
+exit:
+	return err;
+}
+
+/* Cleanup any consumed results
+ * Return TRUE if all results consumed, else FALSE
+ */
+int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	dhd_pno_params_t *params;
+	struct dhd_pno_gscan_params *gscan_params;
+	dhd_pno_status_info_t *_pno_state;
+	gscan_results_cache_t *iter, *tmp;
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	gscan_params = &params->params_gscan;
+	iter = gscan_params->gscan_batch_cache;
+
+	while (iter) {
+		if (iter->tot_consumed == iter->tot_count) {
+			tmp = iter->next;
+			kfree(iter);
+			iter = tmp;
+		} else
+			break;
+	}
+	gscan_params->gscan_batch_cache = iter;
+	ret = (iter == NULL);
+	return ret;
+}
+
+static int
+_dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 timestamp = 0, ts = 0, i, j, timediff;
+	dhd_pno_params_t *params;
+	dhd_pno_status_info_t *_pno_state;
+	wl_pfn_lnet_info_v2_t *plnetinfo;
+	struct dhd_pno_gscan_params *gscan_params;
+	wl_pfn_lscanresults_v2_t *plbestnet = NULL;
+	gscan_results_cache_t *iter, *tail;
+	wifi_gscan_result_t *result;
+	uint8 *nAPs_per_scan = NULL;
+	uint8 num_scans_in_cur_iter;
+	uint16 count;
+	struct timespec64 tm_spec;
+
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+		DHD_ERROR(("%s: GSCAN is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	gscan_params = &params->params_gscan;
+	nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan);
+
+	if (!nAPs_per_scan) {
+		DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__,
+		gscan_params->mscan));
+		err = BCME_NOMEM;
+		goto exit;
+	}
+
+	plbestnet = (wl_pfn_lscanresults_v2_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+	if (!plbestnet) {
+		DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__,
+		      PNO_BESTNET_LEN));
+		err = BCME_NOMEM;
+		goto exit;
+	}
+
+	mutex_lock(&_pno_state->pno_mutex);
+
+	dhd_gscan_clear_all_batch_results(dhd);
+
+	if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+		DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__));
+		goto exit_mutex_unlock;
+	}
+
+	timediff = gscan_params->scan_fr * 1000;
+	timediff = timediff >> 1;
+
+	/* Ok, now lets start getting results from the FW */
+	plbestnet->status = PFN_INCOMPLETE;
+	tail = gscan_params->gscan_batch_cache;
+	while (plbestnet->status != PFN_COMPLETE) {
+		memset(plbestnet, 0, PNO_BESTNET_LEN);
+		err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet, PNO_BESTNET_LEN,
+				FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n",
+				__FUNCTION__, err));
+			goto exit_mutex_unlock;
+		}
+		ktime_get_boottime_ts64(&tm_spec);
+		DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
+			plbestnet->status, plbestnet->count));
+		if (plbestnet->version != PFN_SCANRESULT_VERSION) {
+			err = BCME_VERSION;
+			DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
+				plbestnet->version, PFN_SCANRESULT_VERSION));
+			goto exit_mutex_unlock;
+		}
+		if (plbestnet->count == 0) {
+			DHD_PNO(("No more batch results\n"));
+			goto exit_mutex_unlock;
+		}
+		num_scans_in_cur_iter = 0;
+		timestamp = plbestnet->netinfo[0].timestamp;
+		/* find out how many scans' results did we get in this batch of FW results */
+		for (i = 0, count = 0; i < plbestnet->count; i++, count++) {
+			plnetinfo = &plbestnet->netinfo[i];
+			/* Unlikely to happen, but just in case the results from
+			 * FW doesnt make sense..... Assume its part of one single scan
+			 */
+			if (num_scans_in_cur_iter >= gscan_params->mscan) {
+				num_scans_in_cur_iter = 0;
+				count = plbestnet->count;
+				break;
+			}
+			if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) {
+				nAPs_per_scan[num_scans_in_cur_iter] = count;
+				count = 0;
+				num_scans_in_cur_iter++;
+			}
+			timestamp = plnetinfo->timestamp;
+		}
+		if (num_scans_in_cur_iter < gscan_params->mscan) {
+			nAPs_per_scan[num_scans_in_cur_iter] = count;
+			num_scans_in_cur_iter++;
+		}
+
+		DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter));
+		plnetinfo = &plbestnet->netinfo[0];
+
+		for (i = 0; i < num_scans_in_cur_iter; i++) {
+			iter = (gscan_results_cache_t *)
+			kmalloc(((nAPs_per_scan[i] - 1) * sizeof(wifi_gscan_result_t)) +
+			              sizeof(gscan_results_cache_t), GFP_KERNEL);
+			if (!iter) {
+				DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n",
+				 __FUNCTION__, gscan_params->mscan));
+				err = BCME_NOMEM;
+				goto exit_mutex_unlock;
+			}
+			/* Need this check because the new set of results from FW
+			 * maybe a continuation of previous sets' scan results
+			 */
+			if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) {
+				iter->scan_id = ++gscan_params->scan_id;
+			} else {
+				iter->scan_id = gscan_params->scan_id;
+			}
+			DHD_PNO(("scan_id %d tot_count %d ch_bucket %x\n",
+				gscan_params->scan_id, nAPs_per_scan[i],
+				plbestnet->scan_ch_buckets[i]));
+			iter->tot_count = nAPs_per_scan[i];
+			iter->scan_ch_bucket = plbestnet->scan_ch_buckets[i];
+			iter->tot_consumed = 0;
+			iter->flag = 0;
+			if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+				DHD_PNO(("This scan is aborted\n"));
+				iter->flag = (ENABLE << PNO_STATUS_ABORT);
+			} else if (gscan_params->reason) {
+				iter->flag = (ENABLE << gscan_params->reason);
+			}
+
+			if (!tail) {
+				gscan_params->gscan_batch_cache = iter;
+			} else {
+				tail->next = iter;
+			}
+			tail = iter;
+			iter->next = NULL;
+			for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) {
+				result = &iter->results[j];
+
+				result->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel,
+					(plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+					WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+				result->rssi = (int32) plnetinfo->RSSI;
+				/* Info not available & not expected */
+				result->beacon_period = 0;
+				result->capability = 0;
+				result->rtt = (uint64) plnetinfo->rtt0;
+				result->rtt_sd = (uint64) plnetinfo->rtt1;
+				result->ts = convert_fw_rel_time_to_systime(&tm_spec,
+				                  plnetinfo->timestamp);
+				ts = plnetinfo->timestamp;
+				if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+					DHD_ERROR(("%s: Invalid SSID length %d\n",
+					      __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
+					plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+				}
+				memcpy(result->ssid, plnetinfo->pfnsubnet.u.SSID,
+					plnetinfo->pfnsubnet.SSID_len);
+				result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
+				memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID,
+				    ETHER_ADDR_LEN);
+
+				DHD_PNO(("\tSSID : "));
+				DHD_PNO(("\n"));
+				DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					result->macaddr.octet[0],
+					result->macaddr.octet[1],
+					result->macaddr.octet[2],
+					result->macaddr.octet[3],
+					result->macaddr.octet[4],
+					result->macaddr.octet[5]));
+				DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+					plnetinfo->pfnsubnet.channel,
+					plnetinfo->RSSI, plnetinfo->timestamp));
+				DHD_PNO(("\tRTT0 : %d, RTT1: %d\n",
+				    plnetinfo->rtt0, plnetinfo->rtt1));
+
+			}
+		}
+	}
+exit_mutex_unlock:
+	mutex_unlock(&_pno_state->pno_mutex);
+exit:
+	params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_COMPLETE;
+	smp_wmb();
+	wake_up_interruptible(&_pno_state->batch_get_wait);
+	if (nAPs_per_scan) {
+		MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan * sizeof(uint8));
+	}
+	if (plbestnet) {
+		MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
+	}
+	DHD_PNO(("Batch retrieval done!\n"));
+	return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+void *
+dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+         void *info, uint32 *len)
+{
+	void *ret = NULL;
+	dhd_pno_gscan_capabilities_t *ptr;
+	dhd_pno_ssid_t *ssid_elem;
+	dhd_pno_params_t *_params;
+	dhd_epno_ssid_cfg_t *epno_cfg;
+	dhd_pno_status_info_t *_pno_state;
+
+
+	if (!dhd || !dhd->pno_state) {
+		DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__));
+		return NULL;
+	}
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	if (!len) {
+		DHD_ERROR(("%s: len is NULL\n", __FUNCTION__));
+		return NULL;
+	}
+
+	switch (type) {
+		case DHD_PNO_GET_CAPABILITIES:
+			ptr = (dhd_pno_gscan_capabilities_t *)
+			kmalloc(sizeof(dhd_pno_gscan_capabilities_t), GFP_KERNEL);
+			if (!ptr)
+				break;
+			/* Hardcoding these values for now, need to get
+			 * these values from FW, will change in a later check-in
+			 */
+			ptr->max_scan_cache_size = GSCAN_MAX_AP_CACHE;
+			ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS;
+			ptr->max_ap_cache_per_scan = GSCAN_MAX_AP_CACHE_PER_SCAN;
+			ptr->max_scan_reporting_threshold = 100;
+			ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS;
+			ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM;
+			ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID;
+			ptr->max_white_list_ssid = MAX_WHITELIST_SSID;
+			ret = (void *)ptr;
+			*len = sizeof(dhd_pno_gscan_capabilities_t);
+			break;
+#ifdef GSCAN_SUPPORT
+		case DHD_PNO_GET_BATCH_RESULTS:
+			ret = dhd_get_gscan_batch_results(dhd, len);
+			break;
+#endif /* GSCAN_SUPPORT */
+		case DHD_PNO_GET_CHANNEL_LIST:
+			if (info) {
+				uint16 ch_list[WL_NUMCHANNELS];
+				uint32 *p, mem_needed, i;
+				int32 err, nchan = WL_NUMCHANNELS;
+				uint32 *gscan_band = (uint32 *) info;
+				uint8 band = 0;
+
+				/* No band specified?, nothing to do */
+				if ((*gscan_band & GSCAN_BAND_MASK) == 0) {
+					DHD_PNO(("No band specified\n"));
+					*len = 0;
+					break;
+				}
+
+				/* HAL and DHD use different bits for 2.4G and
+				 * 5G in bitmap. Hence translating it here...
+				 */
+				if (*gscan_band & GSCAN_BG_BAND_MASK) {
+					band |= WLC_BAND_2G;
+				}
+				if (*gscan_band & GSCAN_A_BAND_MASK) {
+					band |= WLC_BAND_5G;
+				}
+
+				err = _dhd_pno_get_channels(dhd, ch_list, &nchan,
+				                          (band & GSCAN_ABG_BAND_MASK),
+				                          !(*gscan_band & GSCAN_DFS_MASK));
+
+				if (err < 0) {
+					DHD_ERROR(("%s: failed to get valid channel list\n",
+						__FUNCTION__));
+					*len = 0;
+				} else {
+					mem_needed = sizeof(uint32) * nchan;
+					p = (uint32 *) kmalloc(mem_needed, GFP_KERNEL);
+					if (!p) {
+						DHD_ERROR(("%s: Unable to malloc %d bytes\n",
+							__FUNCTION__, mem_needed));
+						break;
+					}
+					for (i = 0; i < nchan; i++) {
+						p[i] = wf_channel2mhz(ch_list[i],
+							(ch_list[i] <= CH_MAX_2G_CHANNEL?
+							WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+					}
+					ret = p;
+					*len = mem_needed;
+				}
+			} else {
+				*len = 0;
+				DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__));
+			}
+			break;
+		case DHD_PNO_GET_NEW_EPNO_SSID_ELEM:
+			epno_cfg = &_params->params_gscan.epno_cfg;
+			if (epno_cfg->num_epno_ssid >=
+			           MAX_EPNO_SSID_NUM) {
+				DHD_ERROR(("Excessive number of ePNO SSIDs programmed %d\n",
+				     epno_cfg->num_epno_ssid));
+				return NULL;
+			}
+			if (!epno_cfg->num_epno_ssid) {
+				INIT_LIST_HEAD(&epno_cfg->epno_ssid_list);
+			}
+			ssid_elem = kzalloc(sizeof(dhd_pno_ssid_t), GFP_KERNEL);
+			if (!ssid_elem) {
+				DHD_ERROR(("EPNO ssid: cannot alloc %zd bytes",
+				   sizeof(dhd_pno_ssid_t)));
+				return NULL;
+			}
+			epno_cfg->num_epno_ssid++;
+			list_add_tail(&ssid_elem->list, &epno_cfg->epno_ssid_list);
+			ret = ssid_elem;
+			break;
+
+		default:
+			DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
+			break;
+	}
+
+	return ret;
+
+}
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+
+static int
+_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+	int err = BCME_OK;
+	int i, j;
+	uint32 timestamp = 0;
+	dhd_pno_params_t *_params = NULL;
+	dhd_pno_status_info_t *_pno_state = NULL;
+	wl_pfn_lscanresults_v2_t *plbestnet = NULL;
+	wl_pfn_lnet_info_v2_t *plnetinfo;
+	dhd_pno_bestnet_entry_t *pbestnet_entry;
+	dhd_pno_best_header_t *pbestnetheader = NULL;
+	dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext;
+	bool allocate_header = FALSE;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit_no_unlock;
+	}
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit_no_unlock;
+	}
+
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+		goto exit_no_unlock;
+	}
+	mutex_lock(&_pno_state->pno_mutex);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	if (buf && bufsize) {
+		if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) {
+			/* need to check whether we have cashed data or not */
+			DHD_PNO(("%s: have cashed batching data in Driver\n",
+				__FUNCTION__));
+			/* convert to results format */
+			goto convert_format;
+		} else {
+			/* this is a first try to get batching results */
+			if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+				/* move the scan_results_list to expired_scan_results_lists */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+				list_for_each_entry_safe(siter, snext,
+					&_params->params_batch.get_batch.scan_results_list, list) {
+					list_move_tail(&siter->list,
+					&_params->params_batch.get_batch.expired_scan_results_list);
+				}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+				_params->params_batch.get_batch.top_node_cnt = 0;
+				_params->params_batch.get_batch.expired_tot_scan_cnt =
+					_params->params_batch.get_batch.tot_scan_cnt;
+				_params->params_batch.get_batch.tot_scan_cnt = 0;
+				goto convert_format;
+			}
+		}
+	}
+	/* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */
+	pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE);
+	if (pscan_results == NULL) {
+		err = BCME_NOMEM;
+		DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n"));
+		goto exit;
+	}
+	pscan_results->bestnetheader = NULL;
+	pscan_results->cnt_header = 0;
+	/* add the element into list unless total node cnt is less than MAX_NODE_ CNT */
+	if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) {
+		list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+		_params->params_batch.get_batch.top_node_cnt++;
+	} else {
+		int _removed_scan_cnt;
+		/* remove oldest one and add new one */
+		DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__));
+		_removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd,
+			&_params->params_batch.get_batch.scan_results_list, TRUE);
+		_params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt;
+		list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+
+	}
+	plbestnet = (wl_pfn_lscanresults_v2_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+	if (!(plbestnet))
+	{
+		DHD_ERROR(("(%s) : plbestnet (%p) is NULL\n", __FUNCTION__, (plbestnet)));
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	memset(plbestnet, 0, PNO_BESTNET_LEN);
+	while (plbestnet->status != PFN_COMPLETE) {
+		memset(plbestnet, 0, PNO_BESTNET_LEN);
+		err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet, PNO_BESTNET_LEN, 0);
+		if (err < 0) {
+			if (err == BCME_EPERM) {
+				DHD_ERROR(("we cannot get the batching data "
+					"during scanning in firmware, try again\n,"));
+				msleep(500);
+				continue;
+			} else {
+				DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+		DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
+			plbestnet->status, plbestnet->count));
+		if (plbestnet->version != PFN_SCANRESULT_VERSION) {
+			err = BCME_VERSION;
+			DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
+				plbestnet->version, PFN_SCANRESULT_VERSION));
+			goto exit;
+		}
+		plnetinfo = plbestnet->netinfo;
+		for (i = 0; i < plbestnet->count; i++) {
+			pbestnet_entry = (dhd_pno_bestnet_entry_t *)
+			MALLOC(dhd->osh, BESTNET_ENTRY_SIZE);
+			if (pbestnet_entry == NULL) {
+				err = BCME_NOMEM;
+				DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+				goto exit;
+			}
+			memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
+			pbestnet_entry->recorded_time = jiffies; /* record the current time */
+			/* create header for the first entry */
+			allocate_header = (i == 0)? TRUE : FALSE;
+			/* check whether the new generation is started or not */
+			if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp)
+				> TIME_MIN_DIFF))
+				allocate_header = TRUE;
+			timestamp = plnetinfo->timestamp;
+			if (allocate_header) {
+				pbestnetheader = (dhd_pno_best_header_t *)
+				MALLOC(dhd->osh, BEST_HEADER_SIZE);
+				if (pbestnetheader == NULL) {
+					err = BCME_NOMEM;
+					if (pbestnet_entry)
+						MFREE(dhd->osh, pbestnet_entry,
+						BESTNET_ENTRY_SIZE);
+					DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+					goto exit;
+				}
+				/* increase total cnt of bestnet header */
+				pscan_results->cnt_header++;
+				/* need to record the reason to call dhd_pno_get_for_bach */
+				if (reason)
+					pbestnetheader->reason = (ENABLE << reason);
+				memset(pbestnetheader, 0, BEST_HEADER_SIZE);
+				/* initialize the head of linked list */
+				INIT_LIST_HEAD(&(pbestnetheader->entry_list));
+				/* link the pbestnet heaer into existed list */
+				if (pscan_results->bestnetheader == NULL)
+					/* In case of header */
+					pscan_results->bestnetheader = pbestnetheader;
+				else {
+					dhd_pno_best_header_t *head = pscan_results->bestnetheader;
+					pscan_results->bestnetheader = pbestnetheader;
+					pbestnetheader->next = head;
+				}
+			}
+			/* fills the best network info */
+			pbestnet_entry->channel = plnetinfo->pfnsubnet.channel;
+			pbestnet_entry->RSSI = plnetinfo->RSSI;
+			if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+				/* if RSSI is positive value, we assume that
+				 * this scan is aborted by other scan
+				 */
+				DHD_PNO(("This scan is aborted\n"));
+				pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
+			}
+			pbestnet_entry->rtt0 = plnetinfo->rtt0;
+			pbestnet_entry->rtt1 = plnetinfo->rtt1;
+			pbestnet_entry->timestamp = plnetinfo->timestamp;
+			if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+				DHD_ERROR(("%s: Invalid SSID length %d: trimming it to max\n",
+				      __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
+				plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+			}
+			pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len;
+			memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.u.SSID,
+				pbestnet_entry->SSID_len);
+			memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+			/* add the element into list */
+			list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list);
+			/* increase best entry count */
+			pbestnetheader->tot_cnt++;
+			pbestnetheader->tot_size += BESTNET_ENTRY_SIZE;
+			DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1));
+			DHD_PNO(("\tSSID : "));
+			for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++)
+				DHD_PNO(("%c", plnetinfo->pfnsubnet.u.SSID[j]));
+			DHD_PNO(("\n"));
+			DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
+				plnetinfo->pfnsubnet.BSSID.octet[0],
+				plnetinfo->pfnsubnet.BSSID.octet[1],
+				plnetinfo->pfnsubnet.BSSID.octet[2],
+				plnetinfo->pfnsubnet.BSSID.octet[3],
+				plnetinfo->pfnsubnet.BSSID.octet[4],
+				plnetinfo->pfnsubnet.BSSID.octet[5]));
+			DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+				plnetinfo->pfnsubnet.channel,
+				plnetinfo->RSSI, plnetinfo->timestamp));
+			DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, plnetinfo->rtt1));
+			plnetinfo++;
+		}
+	}
+	if (pscan_results->cnt_header == 0) {
+		/* In case that we didn't get any data from the firmware
+		 * Remove the current scan_result list from get_bach.scan_results_list.
+		 */
+		DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n"));
+		list_del(&pscan_results->list);
+		MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE);
+		_params->params_batch.get_batch.top_node_cnt--;
+	} else {
+		/* increase total scan count using current scan count */
+		_params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header;
+	}
+
+	if (buf && bufsize) {
+		/* This is a first try to get batching results */
+		if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+			/* move the scan_results_list to expired_scan_results_lists */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry_safe(siter, snext,
+				&_params->params_batch.get_batch.scan_results_list, list) {
+				list_move_tail(&siter->list,
+					&_params->params_batch.get_batch.expired_scan_results_list);
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+			/* reset gloval values after  moving to expired list */
+			_params->params_batch.get_batch.top_node_cnt = 0;
+			_params->params_batch.get_batch.expired_tot_scan_cnt =
+				_params->params_batch.get_batch.tot_scan_cnt;
+			_params->params_batch.get_batch.tot_scan_cnt = 0;
+		}
+convert_format:
+		err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize);
+		if (err < 0) {
+			DHD_ERROR(("failed to convert the data into upper layer format\n"));
+			goto exit;
+		}
+	}
+exit:
+	if (plbestnet)
+		MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
+	if (_params) {
+		_params->params_batch.get_batch.buf = NULL;
+		_params->params_batch.get_batch.bufsize = 0;
+		_params->params_batch.get_batch.bytes_written = err;
+	}
+	mutex_unlock(&_pno_state->pno_mutex);
+exit_no_unlock:
+	if (swait_active(&_pno_state->get_batch_done.wait))
+		complete(&_pno_state->get_batch_done);
+	return err;
+}
+
+static void
+_dhd_pno_get_batch_handler(struct work_struct *work)
+{
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pub_t *dhd;
+	struct dhd_pno_batch_params *params_batch;
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	_pno_state = container_of(work, struct dhd_pno_status_info, work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	dhd = _pno_state->dhd;
+	if (dhd == NULL) {
+		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+#ifdef GSCAN_SUPPORT
+	_dhd_pno_get_gscan_batch_from_fw(dhd);
+#endif /* GSCAN_SUPPORT */
+	if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+		params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+
+		_dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf,
+			params_batch->get_batch.bufsize, params_batch->get_batch.reason);
+	}
+}
+
+int
+dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+	int err = BCME_OK;
+	char *pbuf = buf;
+	dhd_pno_status_info_t *_pno_state;
+	struct dhd_pno_batch_params *params_batch;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		struct dhd_pno_gscan_params *gscan_params;
+		gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan;
+		gscan_params->reason = reason;
+		err = dhd_retreive_batch_scan_results(dhd);
+		if (err == BCME_OK) {
+			wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+			     is_batch_retrieval_complete(gscan_params),
+			     msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+		}
+	} else
+#endif
+	{
+		if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+			DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+			memset(pbuf, 0, bufsize);
+			pbuf += snprintf(pbuf, bufsize, "scancount=%d\n", 0);
+			snprintf(pbuf, bufsize, "%s", RESULTS_END_MARKER);
+			err = strlen(buf);
+			goto exit;
+		}
+		params_batch->get_batch.buf = buf;
+		params_batch->get_batch.bufsize = bufsize;
+		params_batch->get_batch.reason = reason;
+		params_batch->get_batch.bytes_written = 0;
+		schedule_work(&_pno_state->work);
+		wait_for_completion(&_pno_state->get_batch_done);
+	}
+
+#ifdef GSCAN_SUPPORT
+	if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE))
+#endif
+	err = params_batch->get_batch.bytes_written;
+exit:
+	return err;
+}
+
+int
+dhd_pno_stop_for_batch(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	int mode = 0;
+	int i = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wl_pfn_bssid_t *p_pfn_bssid = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		DHD_PNO(("Gscan is ongoing, nothing to stop here\n"));
+		return err;
+	}
+#endif
+
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+	if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) {
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+
+		_pno_state->pno_mode = mode;
+		/* restart Legacy PNO if the Legacy PNO is on */
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			struct dhd_pno_legacy_params *_params_legacy;
+			_params_legacy =
+				&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+			err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr,
+					_params_legacy->pno_repeat,
+					_params_legacy->pno_freq_expo_max,
+					_params_legacy->chan_list, _params_legacy->nchan);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			struct dhd_pno_bssid *iter, *next;
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+				_params->params_hotlist.nbssid, GFP_KERNEL);
+			if (p_pfn_bssid == NULL) {
+				DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+					" (count: %d)",
+					__FUNCTION__, _params->params_hotlist.nbssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				goto exit;
+			}
+			i = 0;
+			/* convert dhd_pno_bssid to wl_pfn_bssid */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry_safe(iter, next,
+				&_params->params_hotlist.bssid_list, list) {
+				memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN);
+				p_pfn_bssid[i].flags = iter->flags;
+				i++;
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+			err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+	kfree(p_pfn_bssid);
+	return err;
+}
+
+int
+dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params)
+{
+	int err = BCME_OK;
+	int i;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int rem_nchan = 0;
+	int tot_nchan = 0;
+	int mode = 0;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	struct dhd_pno_bssid *_pno_bssid;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(hotlist_params, "hotlist_params is NULL", err);
+	NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	_params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS];
+	if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+		_pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE;
+		err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+				__FUNCTION__));
+			goto exit;
+		}
+	}
+	_params->params_batch.nchan = hotlist_params->nchan;
+	_params->params_batch.scan_fr = hotlist_params->scan_fr;
+	if (hotlist_params->nchan)
+		memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list,
+			sizeof(_params->params_hotlist.chan_list));
+	memset(_chan_list, 0, sizeof(_chan_list));
+
+	rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan;
+	if (hotlist_params->band == WLC_BAND_2G || hotlist_params->band == WLC_BAND_5G) {
+		/* get a valid channel list based on band B or A */
+		err = _dhd_pno_get_channels(dhd,
+		&_params->params_hotlist.chan_list[hotlist_params->nchan],
+		&rem_nchan, hotlist_params->band, FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+				__FUNCTION__, hotlist_params->band));
+			goto exit;
+		}
+		/* now we need to update nchan because rem_chan has valid channel count */
+		_params->params_hotlist.nchan += rem_nchan;
+		/* need to sort channel list */
+		sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan,
+			sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+	}
+#ifdef PNO_DEBUG
+{
+		int i;
+		DHD_PNO(("Channel list : "));
+		for (i = 0; i < _params->params_batch.nchan; i++) {
+			DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+		}
+		DHD_PNO(("\n"));
+}
+#endif
+	if (_params->params_hotlist.nchan) {
+		/* copy the channel list into local array */
+		memcpy(_chan_list, _params->params_hotlist.chan_list,
+			sizeof(_chan_list));
+		tot_nchan = _params->params_hotlist.nchan;
+	}
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			DHD_PNO(("PNO SSID is on progress in firmware\n"));
+			/* store current pno_mode before disabling pno */
+			mode = _pno_state->pno_mode;
+			err = _dhd_pno_enable(dhd, PNO_OFF);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+				goto exit;
+			}
+			/* restore the previous mode */
+			_pno_state->pno_mode = mode;
+			/* Use the superset for channelist between two mode */
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+			if (_params2->params_legacy.nchan > 0 &&
+				_params->params_hotlist.nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_legacy.chan_list[0],
+					_params2->params_legacy.nchan,
+					&_params->params_hotlist.chan_list[0],
+					_params->params_hotlist.nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+						"between legacy and hotlist\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}
+
+	}
+
+	INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list));
+
+	err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	for (i = 0; i < hotlist_params->nbssid; i++) {
+		_pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL);
+		NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err);
+		memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN);
+		_pno_bssid->flags = p_pfn_bssid[i].flags;
+		list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list);
+	}
+	_params->params_hotlist.nbssid = hotlist_params->nbssid;
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+	return err;
+}
+
+int
+dhd_pno_stop_for_hotlist(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 mode = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+		DHD_ERROR(("%s : Hotlist MODE is not enabled\n",
+			__FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+
+	if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) {
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* save current pno_mode before calling dhd_pno_clean */
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+		/* restore previos pno mode */
+		_pno_state->pno_mode = mode;
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			/* restart Legacy PNO Scan */
+			struct dhd_pno_legacy_params *_params_legacy;
+			_params_legacy =
+			&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+
+			err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr,
+				_params_legacy->pno_repeat, _params_legacy->pno_freq_expo_max,
+				_params_legacy->chan_list, _params_legacy->nchan);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			/* restart Batching Scan */
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			/* restart BATCH SCAN */
+			err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+				DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+					__FUNCTION__,  err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	return err;
+}
+
+#ifdef GSCAN_SUPPORT
+int
+dhd_retreive_batch_scan_results(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	struct dhd_pno_batch_params *params_batch;
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+	params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+	if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE) {
+		DHD_PNO(("Retreive batch results\n"));
+		params_batch->get_batch.buf = NULL;
+		params_batch->get_batch.bufsize = 0;
+		params_batch->get_batch.reason = PNO_STATUS_EVENT;
+		_params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS;
+		smp_wmb();
+		schedule_work(&_pno_state->work);
+	} else {
+		DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval"
+			"already in progress, will skip\n", __FUNCTION__));
+		err = BCME_ERROR;
+	}
+
+	return err;
+}
+
+void
+dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type)
+{
+	dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+	struct dhd_pno_gscan_params *gscan_params;
+	gscan_results_cache_t *iter, *tmp;
+
+	if (!_pno_state) {
+		return;
+	}
+	gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+	if (type == HOTLIST_FOUND) {
+		iter = gscan_params->gscan_hotlist_found;
+		gscan_params->gscan_hotlist_found = NULL;
+	} else {
+		iter = gscan_params->gscan_hotlist_lost;
+		gscan_params->gscan_hotlist_lost = NULL;
+	}
+
+	while (iter) {
+		tmp = iter->next;
+		kfree(iter);
+		iter = tmp;
+	}
+
+	return;
+}
+
+void *
+dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, uint32 len, int *size)
+{
+	wl_bss_info_t *bi = NULL;
+	wl_gscan_result_t *gscan_result;
+	wifi_gscan_full_result_t *result = NULL;
+	u32 bi_length = 0;
+	uint8 channel;
+	uint32 mem_needed;
+	struct timespec64 ts;
+	u32 bi_ie_length = 0;
+	u32 bi_ie_offset = 0;
+
+	*size = 0;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	gscan_result = (wl_gscan_result_t *)data;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	if (!gscan_result) {
+		DHD_ERROR(("Invalid gscan result (NULL pointer)\n"));
+		goto exit;
+	}
+
+	if ((len < sizeof(*gscan_result)) ||
+	    (len < dtoh32(gscan_result->buflen)) ||
+	    (dtoh32(gscan_result->buflen) >
+	    (sizeof(*gscan_result) + WL_SCAN_IE_LEN_MAX))) {
+		DHD_ERROR(("%s: invalid gscan buflen:%u\n", __FUNCTION__,
+			dtoh32(gscan_result->buflen)));
+		goto exit;
+	}
+
+	bi = &gscan_result->bss_info[0].info;
+	bi_length = dtoh32(bi->length);
+	if (bi_length != (dtoh32(gscan_result->buflen) -
+	       WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) {
+		DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length));
+		goto exit;
+	}
+	bi_ie_offset = dtoh32(bi->ie_offset);
+	bi_ie_length = dtoh32(bi->ie_length);
+	if ((bi_ie_offset + bi_ie_length) > bi_length) {
+		DHD_ERROR(("%s: Invalid ie_length:%u or ie_offset:%u\n",
+			__FUNCTION__, bi_ie_length, bi_ie_offset));
+		goto exit;
+	}
+	if (bi->SSID_len > DOT11_MAX_SSID_LEN) {
+		DHD_ERROR(("%s: Invalid SSID length:%u\n", __FUNCTION__, bi->SSID_len));
+		goto exit;
+	}
+	mem_needed = OFFSETOF(wifi_gscan_full_result_t, ie_data) + bi_ie_length;
+	result = (wifi_gscan_full_result_t *) kmalloc(mem_needed, GFP_KERNEL);
+	if (!result) {
+		DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n",
+		  __FUNCTION__, mem_needed));
+		goto exit;
+	}
+	result->scan_ch_bucket = gscan_result->scan_ch_bucket;
+	memcpy(result->fixed.ssid, bi->SSID, bi->SSID_len);
+	result->fixed.ssid[bi->SSID_len] = '\0';
+	channel = wf_chspec_ctlchan(bi->chanspec);
+	result->fixed.channel = wf_channel2mhz(channel,
+		(channel <= CH_MAX_2G_CHANNEL?
+		WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+	result->fixed.rssi = (int32) bi->RSSI;
+	result->fixed.rtt = 0;
+	result->fixed.rtt_sd = 0;
+	ktime_get_boottime_ts64(&ts);
+	result->fixed.ts = (uint64) TIMESPEC_TO_US(ts);
+	result->fixed.beacon_period = dtoh16(bi->beacon_period);
+	result->fixed.capability = dtoh16(bi->capability);
+	result->ie_length = bi_ie_length;
+	memcpy(&result->fixed.macaddr, &bi->BSSID, ETHER_ADDR_LEN);
+	memcpy(result->ie_data, ((uint8 *)bi + bi_ie_offset), bi_ie_length);
+	*size = mem_needed;
+exit:
+	return result;
+}
+
+void *
+dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, uint32 event, int *size)
+{
+	dhd_epno_results_t *results = NULL;
+	dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+	struct dhd_pno_gscan_params *gscan_params;
+	uint32 count, mem_needed = 0, i;
+	uint8 ssid[DOT11_MAX_SSID_LEN + 1];
+	struct ether_addr *bssid;
+
+	*size = 0;
+	if (!_pno_state)
+		return NULL;
+	gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+	 if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		 wl_pfn_scanresults_v2_t *pfn_result = (wl_pfn_scanresults_v2_t *)data;
+		 wl_pfn_net_info_v2_t *net;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		if (pfn_result->version != PFN_SCANRESULT_VERSION) {
+			DHD_ERROR(("%s event %d: Incorrect version %d %d\n", __FUNCTION__, event,
+			          pfn_result->version, PFN_SCANRESULT_VERSION));
+			return NULL;
+		}
+		/* Check if count of pfn results is corrupted */
+		if (pfn_result->count > EVENT_MAX_NETCNT_V2) {
+			DHD_ERROR(("%s event %d: pfn results count %d"
+				"exceeds the max limit\n", __FUNCTION__, event,
+				pfn_result->count));
+			return NULL;
+		}
+
+		count = pfn_result->count;
+		mem_needed = sizeof(dhd_epno_results_t) * count;
+		results = (dhd_epno_results_t *) kmalloc(mem_needed, GFP_KERNEL);
+		if (!results) {
+			DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__,
+			        mem_needed));
+			return NULL;
+		}
+		for (i = 0; i < count; i++) {
+			net = &pfn_result->netinfo[i];
+			results[i].rssi = net->RSSI;
+			results[i].channel =  wf_channel2mhz(net->pfnsubnet.channel,
+			                  (net->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ?
+			                  WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+			results[i].flags = (event == WLC_E_PFN_NET_FOUND) ?
+			               WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST;
+			results[i].ssid_len = min(net->pfnsubnet.SSID_len,
+			               (uint8)DOT11_MAX_SSID_LEN);
+			bssid = &results[i].bssid;
+			memcpy(bssid, &net->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+			if (!net->pfnsubnet.SSID_len) {
+				dhd_pno_idx_to_ssid(gscan_params, &results[i],
+				       net->pfnsubnet.u.index);
+			} else {
+				memcpy(results[i].ssid, net->pfnsubnet.u.SSID, results[i].ssid_len);
+			}
+			memcpy(ssid, results[i].ssid, results[i].ssid_len);
+			ssid[results[i].ssid_len] = '\0';
+			DHD_PNO(("ssid - %s bssid %02x:%02x:%02x:%02x:%02x:%02x "
+			        "ch %d rssi %d flags %d\n", ssid,
+			        bssid->octet[0], bssid->octet[1],
+			        bssid->octet[2], bssid->octet[3],
+			        bssid->octet[4], bssid->octet[5],
+			        results[i].channel, results[i].rssi, results[i].flags));
+		}
+	}
+	*size = mem_needed;
+	return results;
+}
+
+void *
+dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data,
+        int *send_evt_bytes, hotlist_type_t type)
+{
+	void *ptr = NULL;
+	dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+	struct dhd_pno_gscan_params *gscan_params;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	wl_pfn_scanresults_v2_t *results = (wl_pfn_scanresults_v2_t *)event_data;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	wifi_gscan_result_t *hotlist_found_array;
+	wl_pfn_net_info_v2_t *plnetinfo;
+	gscan_results_cache_t *gscan_hotlist_cache;
+	int malloc_size = 0, i, total = 0;
+	struct timespec64 tm_spec;
+
+	gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+	if ((results->count == 0) || (results->count > EVENT_MAX_NETCNT_V2)) {
+		DHD_ERROR(("%s: wrong result count:%d\n", __FUNCTION__, results->count));
+		*send_evt_bytes = 0;
+		return ptr;
+	}
+
+	ktime_get_boottime_ts64(&tm_spec);
+	malloc_size = sizeof(gscan_results_cache_t) +
+	((results->count - 1) * sizeof(wifi_gscan_result_t));
+	gscan_hotlist_cache = (gscan_results_cache_t *) kmalloc(malloc_size, GFP_KERNEL);
+
+	if (!gscan_hotlist_cache) {
+		DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size));
+		*send_evt_bytes = 0;
+		return ptr;
+	}
+
+	if (type == HOTLIST_FOUND) {
+		gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found;
+		gscan_params->gscan_hotlist_found = gscan_hotlist_cache;
+		DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, results->count));
+	} else {
+		gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost;
+		gscan_params->gscan_hotlist_lost = gscan_hotlist_cache;
+		DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, results->count));
+	}
+
+	gscan_hotlist_cache->tot_count = results->count;
+	gscan_hotlist_cache->tot_consumed = 0;
+	gscan_hotlist_cache->scan_ch_bucket = results->scan_ch_bucket;
+	plnetinfo = results->netinfo;
+
+	for (i = 0; i < results->count; i++, plnetinfo++) {
+		hotlist_found_array = &gscan_hotlist_cache->results[i];
+		memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t));
+		hotlist_found_array->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel,
+			(plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+		hotlist_found_array->rssi = (int32) plnetinfo->RSSI;
+
+		hotlist_found_array->ts =
+		       convert_fw_rel_time_to_systime(&tm_spec, (plnetinfo->timestamp * 1000));
+		if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("Invalid SSID length %d: trimming it to max\n",
+			          plnetinfo->pfnsubnet.SSID_len));
+			plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+		}
+		memcpy(hotlist_found_array->ssid, plnetinfo->pfnsubnet.u.SSID,
+			plnetinfo->pfnsubnet.SSID_len);
+		hotlist_found_array->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
+
+		memcpy(&hotlist_found_array->macaddr, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+		DHD_PNO(("\t%s %02x:%02x:%02x:%02x:%02x:%02x rssi %d\n", hotlist_found_array->ssid,
+		hotlist_found_array->macaddr.octet[0],
+		hotlist_found_array->macaddr.octet[1],
+		hotlist_found_array->macaddr.octet[2],
+		hotlist_found_array->macaddr.octet[3],
+		hotlist_found_array->macaddr.octet[4],
+		hotlist_found_array->macaddr.octet[5],
+		hotlist_found_array->rssi));
+	}
+
+
+	if (results->status == PFN_COMPLETE) {
+		ptr = (void *) gscan_hotlist_cache;
+		while (gscan_hotlist_cache) {
+			total += gscan_hotlist_cache->tot_count;
+			gscan_hotlist_cache = gscan_hotlist_cache->next;
+		}
+		*send_evt_bytes =  total * sizeof(wifi_gscan_result_t);
+	}
+
+	return ptr;
+}
+#endif /* GSCAN_SUPPORT */
+int
+dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+	int err = BCME_OK;
+	uint event_type;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	event_type = ntoh32(event->event_type);
+	DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type));
+	switch (event_type) {
+	case WLC_E_PFN_BSSID_NET_FOUND:
+	case WLC_E_PFN_BSSID_NET_LOST:
+		/* TODO : need to implement event logic using generic netlink */
+		break;
+	case WLC_E_PFN_BEST_BATCHING:
+#ifndef GSCAN_SUPPORT
+	{
+		struct dhd_pno_batch_params *params_batch;
+		params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+		if (!swait_active(&_pno_state->get_batch_done.wait)) {
+			DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__));
+			params_batch->get_batch.buf = NULL;
+			params_batch->get_batch.bufsize = 0;
+			params_batch->get_batch.reason = PNO_STATUS_EVENT;
+			schedule_work(&_pno_state->work);
+		} else
+			DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING"
+				"will skip this event\n", __FUNCTION__));
+		break;
+	}
+#else
+		break;
+#endif /* !GSCAN_SUPPORT */
+	default:
+		DHD_ERROR(("unknown event : %d\n", event_type));
+	}
+exit:
+	return err;
+}
+
+int dhd_pno_init(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	char *buf = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	UNUSED_PARAMETER(_dhd_pno_suspend);
+	if (dhd->pno_state)
+		goto exit;
+	dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t));
+	NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err);
+	memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t));
+	/* need to check whether current firmware support batching and hotlist scan */
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_pno_state->wls_supported = TRUE;
+	_pno_state->dhd = dhd;
+	mutex_init(&_pno_state->pno_mutex);
+	INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler);
+	init_completion(&_pno_state->get_batch_done);
+#ifdef GSCAN_SUPPORT
+	init_waitqueue_head(&_pno_state->batch_get_wait);
+#endif /* GSCAN_SUPPORT */
+	buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+	if (!buf) {
+		DHD_ERROR((":%s buf alloc err.\n", __FUNCTION__));
+		return BCME_NOMEM;
+	}
+	err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, buf, WLC_IOCTL_SMLEN,
+			FALSE);
+	if (err == BCME_UNSUPPORTED) {
+		_pno_state->wls_supported = FALSE;
+		DHD_INFO(("Current firmware doesn't support"
+			" Android Location Service\n"));
+	} else {
+		DHD_ERROR(("%s: Support Android Location Service\n",
+			__FUNCTION__));
+	}
+exit:
+	kfree(buf);
+	return err;
+}
+
+int dhd_pno_deinit(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	NULL_CHECK(_pno_state, "pno_state is NULL", err);
+	/* may need to free legacy ssid_list */
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+	}
+
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+		mutex_lock(&_pno_state->pno_mutex);
+		dhd_pno_reset_cfg_gscan(_params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+		mutex_unlock(&_pno_state->pno_mutex);
+	}
+#endif /* GSCAN_SUPPORT */
+
+	if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+		/* clear resource if the BATCH MODE is on */
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+	}
+	cancel_work_sync(&_pno_state->work);
+	MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t));
+	dhd->pno_state = NULL;
+	return err;
+}
+#endif /* PNO_SUPPORT */
diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.h b/drivers/net/wireless/bcmdhd/dhd_pno.h
new file mode 100644
index 0000000..7b56c55
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pno.h
@@ -0,0 +1,572 @@
+/*
+ * Header file of Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pno.h 707287 2017-06-27 06:44:29Z $
+ */
+
+#ifndef __DHD_PNO_H__
+#define __DHD_PNO_H__
+
+#if defined(PNO_SUPPORT)
+#define PNO_TLV_PREFIX			'S'
+#define PNO_TLV_VERSION			'1'
+#define PNO_TLV_SUBTYPE_LEGACY_PNO '2'
+#define PNO_TLV_RESERVED		'0'
+
+#define PNO_BATCHING_SET "SET"
+#define PNO_BATCHING_GET "GET"
+#define PNO_BATCHING_STOP "STOP"
+
+#define PNO_PARAMS_DELIMETER " "
+#define PNO_PARAM_CHANNEL_DELIMETER ","
+#define PNO_PARAM_VALUE_DELLIMETER '='
+#define PNO_PARAM_SCANFREQ "SCANFREQ"
+#define PNO_PARAM_BESTN	"BESTN"
+#define PNO_PARAM_MSCAN "MSCAN"
+#define PNO_PARAM_CHANNEL "CHANNEL"
+#define PNO_PARAM_RTT "RTT"
+
+#define PNO_TLV_TYPE_SSID_IE		'S'
+#define PNO_TLV_TYPE_TIME		'T'
+#define PNO_TLV_FREQ_REPEAT		'R'
+#define PNO_TLV_FREQ_EXPO_MAX		'M'
+
+#define MAXNUM_SSID_PER_ADD	16
+#define MAXNUM_PNO_PARAMS 2
+#define PNO_TLV_COMMON_LENGTH	1
+#define DEFAULT_BATCH_MSCAN 16
+
+#define RESULTS_END_MARKER "----\n"
+#define SCAN_END_MARKER "####\n"
+#define AP_END_MARKER "====\n"
+#define PNO_RSSI_MARGIN_DBM          30
+
+#define CSCAN_COMMAND			"CSCAN "
+#define CSCAN_TLV_PREFIX		'S'
+#define CSCAN_TLV_VERSION		1
+#define CSCAN_TLV_SUBVERSION		0
+#define CSCAN_TLV_TYPE_SSID_IE		'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE	'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE	'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE	'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE	'P'
+#define CSCAN_TLV_TYPE_HOME_IE		'H'
+#define CSCAN_TLV_TYPE_STYPE_IE		'T'
+
+#define WL_SCAN_PARAMS_SSID_MAX         10
+#define GET_SSID                        "SSID="
+#define GET_CHANNEL                     "CH="
+#define GET_NPROBE                      "NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL          "ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL         "PASSIVE="
+#define GET_HOME_DWELL                  "HOME="
+#define GET_SCAN_TYPE                   "TYPE="
+
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+#define GSCAN_MAX_CH_BUCKETS             8
+#define GSCAN_MAX_CHANNELS_IN_BUCKET     32
+#define GSCAN_MAX_AP_CACHE_PER_SCAN      32
+#define GSCAN_MAX_AP_CACHE               320
+#define GSCAN_BG_BAND_MASK             (1 << 0)
+#define GSCAN_A_BAND_MASK              (1 << 1)
+#define GSCAN_DFS_MASK                 (1 << 2)
+#define GSCAN_ABG_BAND_MASK            (GSCAN_A_BAND_MASK | GSCAN_BG_BAND_MASK)
+#define GSCAN_BAND_MASK                (GSCAN_ABG_BAND_MASK | GSCAN_DFS_MASK)
+
+#define GSCAN_FLUSH_HOTLIST_CFG      (1 << 0)
+#define GSCAN_FLUSH_SIGNIFICANT_CFG  (1 << 1)
+#define GSCAN_FLUSH_SCAN_CFG         (1 << 2)
+#define GSCAN_FLUSH_EPNO_CFG         (1 << 3)
+#define GSCAN_FLUSH_ALL_CFG     (GSCAN_FLUSH_SCAN_CFG | \
+								GSCAN_FLUSH_SIGNIFICANT_CFG | \
+								GSCAN_FLUSH_HOTLIST_CFG  | \
+								GSCAN_FLUSH_EPNO_CFG)
+#define DHD_EPNO_HIDDEN_SSID          (1 << 0)
+#define DHD_EPNO_A_BAND_TRIG          (1 << 1)
+#define DHD_EPNO_BG_BAND_TRIG         (1 << 2)
+#define DHD_EPNO_STRICT_MATCH         (1 << 3)
+#define DHD_EPNO_SAME_NETWORK         (1 << 4)
+#define DHD_PNO_USE_SSID              (DHD_EPNO_HIDDEN_SSID | DHD_EPNO_STRICT_MATCH)
+
+/* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */
+#define GSCAN_BATCH_RETRIEVAL_COMPLETE      0
+#define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS   1
+#define GSCAN_BATCH_NO_THR_SET              101
+#define GSCAN_LOST_AP_WINDOW_DEFAULT        4
+#define GSCAN_MIN_BSSID_TIMEOUT             90
+#define GSCAN_BATCH_GET_MAX_WAIT            500
+#define CHANNEL_BUCKET_EMPTY_INDEX                      0xFFFF
+#define GSCAN_RETRY_THRESHOLD              3
+
+#define MAX_EPNO_SSID_NUM                   64
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+
+enum scan_status {
+	/* SCAN ABORT by other scan */
+	PNO_STATUS_ABORT,
+	/* RTT is presence or not */
+	PNO_STATUS_RTT_PRESENCE,
+	/* Disable PNO by Driver */
+	PNO_STATUS_DISABLE,
+	/* NORMAL BATCHING GET */
+	PNO_STATUS_NORMAL,
+	/* WLC_E_PFN_BEST_BATCHING */
+	PNO_STATUS_EVENT,
+	PNO_STATUS_MAX
+};
+#define PNO_STATUS_ABORT_MASK 0x0001
+#define PNO_STATUS_RTT_MASK 0x0002
+#define PNO_STATUS_DISABLE_MASK 0x0004
+#define PNO_STATUS_OOM_MASK 0x0010
+
+enum index_mode {
+	INDEX_OF_LEGACY_PARAMS,
+	INDEX_OF_BATCH_PARAMS,
+	INDEX_OF_HOTLIST_PARAMS,
+	/* GSCAN includes hotlist scan and they do not run
+	 * independent of each other
+	 */
+	INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS,
+	INDEX_MODE_MAX
+};
+enum dhd_pno_status {
+	DHD_PNO_DISABLED,
+	DHD_PNO_ENABLED,
+	DHD_PNO_SUSPEND
+};
+typedef struct cmd_tlv {
+	char prefix;
+	char version;
+	char subtype;
+	char reserved;
+} cmd_tlv_t;
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+typedef enum {
+    WIFI_BAND_UNSPECIFIED,
+    WIFI_BAND_BG = 1,                       /* 2.4 GHz                   */
+    WIFI_BAND_A = 2,                        /* 5 GHz without DFS         */
+    WIFI_BAND_A_DFS = 4,                    /* 5 GHz DFS only            */
+    WIFI_BAND_A_WITH_DFS = 6,               /* 5 GHz with DFS            */
+    WIFI_BAND_ABG = 3,                      /* 2.4 GHz + 5 GHz; no DFS   */
+    WIFI_BAND_ABG_WITH_DFS = 7,             /* 2.4 GHz + 5 GHz with DFS  */
+} gscan_wifi_band_t;
+
+typedef enum {
+	HOTLIST_LOST,
+	HOTLIST_FOUND
+} hotlist_type_t;
+
+typedef enum dhd_pno_gscan_cmd_cfg {
+	DHD_PNO_BATCH_SCAN_CFG_ID = 0,
+	DHD_PNO_GEOFENCE_SCAN_CFG_ID,
+	DHD_PNO_SIGNIFICANT_SCAN_CFG_ID,
+	DHD_PNO_SCAN_CFG_ID,
+	DHD_PNO_GET_CAPABILITIES,
+	DHD_PNO_GET_BATCH_RESULTS,
+	DHD_PNO_GET_CHANNEL_LIST,
+	DHD_PNO_GET_NEW_EPNO_SSID_ELEM,
+	DHD_PNO_EPNO_CFG_ID,
+	DHD_PNO_GET_AUTOJOIN_CAPABILITIES,
+	DHD_PNO_EPNO_PARAMS_ID
+} dhd_pno_gscan_cmd_cfg_t;
+
+typedef enum dhd_pno_mode {
+	/* Wi-Fi Legacy PNO Mode */
+	DHD_PNO_NONE_MODE   = 0,
+	DHD_PNO_LEGACY_MODE = (1 << (0)),
+	/* Wi-Fi Android BATCH SCAN Mode */
+	DHD_PNO_BATCH_MODE = (1 << (1)),
+	/* Wi-Fi Android Hotlist SCAN Mode */
+	DHD_PNO_HOTLIST_MODE = (1 << (2)),
+	/* Wi-Fi Google Android SCAN Mode */
+	DHD_PNO_GSCAN_MODE = (1 << (3))
+} dhd_pno_mode_t;
+#else
+typedef enum dhd_pno_mode {
+	/* Wi-Fi Legacy PNO Mode */
+	DHD_PNO_NONE_MODE   = 0,
+	DHD_PNO_LEGACY_MODE = (1 << (0)),
+	/* Wi-Fi Android BATCH SCAN Mode */
+	DHD_PNO_BATCH_MODE = (1 << (1)),
+	/* Wi-Fi Android Hotlist SCAN Mode */
+	DHD_PNO_HOTLIST_MODE = (1 << (2))
+} dhd_pno_mode_t;
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+
+typedef struct dhd_pno_ssid {
+	bool		hidden;
+	int8		rssi_thresh;
+	uint8		dummy;
+	uint16		SSID_len;
+	uint32		flags;
+	int32		wpa_auth;
+	uchar		SSID[DOT11_MAX_SSID_LEN];
+	struct list_head list;
+} dhd_pno_ssid_t;
+
+struct dhd_pno_bssid {
+	struct ether_addr	macaddr;
+	/* Bit4: suppress_lost, Bit3: suppress_found */
+	uint16			flags;
+	struct list_head list;
+};
+
+typedef struct dhd_pno_bestnet_entry {
+	struct ether_addr BSSID;
+	uint8	SSID_len;
+	uint8	SSID[DOT11_MAX_SSID_LEN];
+	int8	RSSI;
+	uint8	channel;
+	uint32	timestamp;
+	uint16	rtt0; /* distance_cm based on RTT */
+	uint16	rtt1; /* distance_cm based on sample standard deviation */
+	unsigned long recorded_time;
+	struct list_head list;
+} dhd_pno_bestnet_entry_t;
+#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t))
+
+typedef struct dhd_pno_bestnet_header {
+	struct dhd_pno_bestnet_header *next;
+	uint8 reason;
+	uint32 tot_cnt;
+	uint32 tot_size;
+	struct list_head entry_list;
+} dhd_pno_best_header_t;
+#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t))
+
+typedef struct dhd_pno_scan_results {
+	dhd_pno_best_header_t *bestnetheader;
+	uint8 cnt_header;
+	struct list_head list;
+} dhd_pno_scan_results_t;
+#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t))
+
+struct dhd_pno_get_batch_info {
+	/* info related to get batch */
+	char *buf;
+	bool batch_started;
+	uint32 tot_scan_cnt;
+	uint32 expired_tot_scan_cnt;
+	uint32 top_node_cnt;
+	uint32 bufsize;
+	uint32 bytes_written;
+	int reason;
+	struct list_head scan_results_list;
+	struct list_head expired_scan_results_list;
+};
+struct dhd_pno_legacy_params {
+	uint16 scan_fr;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	int pno_repeat;
+	int pno_freq_expo_max;
+	int nssid;
+	struct list_head ssid_list;
+};
+struct dhd_pno_batch_params {
+	int32 scan_fr;
+	uint8 bestn;
+	uint8 mscan;
+	uint8 band;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	uint16 rtt;
+	struct dhd_pno_get_batch_info get_batch;
+};
+struct dhd_pno_hotlist_params {
+	uint8 band;
+	int32 scan_fr;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	uint16 nbssid;
+	struct list_head bssid_list;
+};
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+#define DHD_PNO_REPORT_NO_BATCH      (1 << 2)
+
+typedef struct dhd_pno_gscan_channel_bucket {
+	uint16 bucket_freq_multiple;
+	/* band = 1 All bg band channels,
+	 * band = 2 All a band channels,
+	 * band = 0 chan_list channels
+	 */
+	uint16 band;
+	uint8 report_flag;
+	uint8 num_channels;
+	uint16 repeat;
+	uint16 bucket_max_multiple;
+	uint16 chan_list[GSCAN_MAX_CHANNELS_IN_BUCKET];
+} dhd_pno_gscan_channel_bucket_t;
+
+
+#define DHD_PNO_AUTH_CODE_OPEN  1 /*  Open   */
+#define DHD_PNO_AUTH_CODE_PSK   2 /* WPA_PSK or WPA2PSK */
+#define DHD_PNO_AUTH_CODE_EAPOL 4 /* any EAPOL    */
+
+#define DHD_EPNO_DEFAULT_INDEX     0xFFFFFFFF
+
+typedef struct dhd_epno_params {
+	uint8 ssid[DOT11_MAX_SSID_LEN];
+	uint8 ssid_len;
+	int8 rssi_thresh;
+	uint8 flags;
+	uint8 auth;
+	/* index required only for visble ssid */
+	uint32 index;
+	struct list_head list;
+} dhd_epno_params_t;
+
+typedef struct dhd_epno_results {
+	uint8 ssid[DOT11_MAX_SSID_LEN];
+	uint8 ssid_len;
+	int8 rssi;
+	uint16 channel;
+	uint16 flags;
+	struct ether_addr bssid;
+} dhd_epno_results_t;
+
+typedef struct wifi_gscan_result {
+	uint64 ts;                           /* Time of discovery           */
+	char ssid[DOT11_MAX_SSID_LEN+1];     /* null terminated             */
+	struct ether_addr	macaddr;         /* BSSID                      */
+	uint32 channel;                      /* channel frequency in MHz    */
+	int32 rssi;                          /* in db                       */
+	uint64 rtt;                          /* in nanoseconds              */
+	uint64 rtt_sd;                       /* standard deviation in rtt   */
+	uint16 beacon_period;                /* units are Kusec             */
+	uint16 capability;                   /* Capability information       */
+	uint32 pad;
+} wifi_gscan_result_t;
+
+typedef struct wifi_gscan_full_result {
+    wifi_gscan_result_t fixed;
+    uint32 scan_ch_bucket;
+    uint32 ie_length;		/* byte length of Information Elements */
+    char  ie_data[1];		/* IE  data to follow       */
+} wifi_gscan_full_result_t;
+
+typedef struct gscan_results_cache {
+	struct gscan_results_cache *next;
+	uint8  scan_id;
+	uint8  flag;
+	uint8  tot_count;
+	uint8  tot_consumed;
+	uint32 scan_ch_bucket;
+	wifi_gscan_result_t results[1];
+} gscan_results_cache_t;
+
+typedef struct dhd_pno_gscan_capabilities {
+	int max_scan_cache_size;
+	int max_scan_buckets;
+	int max_ap_cache_per_scan;
+	int max_rssi_sample_size;
+	int max_scan_reporting_threshold;
+	int max_hotlist_aps;
+	int max_significant_wifi_change_aps;
+	int max_epno_ssid_crc32;
+	int max_epno_hidden_ssid;
+	int max_white_list_ssid;
+} dhd_pno_gscan_capabilities_t;
+
+typedef struct dhd_epno_ssid_cfg {
+	wl_pfn_ssid_params_t params;
+	uint32 num_epno_ssid;
+	struct list_head epno_ssid_list;
+} dhd_epno_ssid_cfg_t;
+
+struct dhd_pno_gscan_params {
+	int32 scan_fr;
+	uint8 bestn;
+	uint8 mscan;
+	uint8 buffer_threshold;
+	uint8 lost_ap_window;
+	uint8 nchannel_buckets;
+	uint8 reason;
+	uint8 get_batch_flag;
+	uint8 send_all_results_flag;
+	uint16 max_ch_bucket_freq;
+	gscan_results_cache_t *gscan_batch_cache;
+	gscan_results_cache_t *gscan_hotlist_found;
+	gscan_results_cache_t *gscan_hotlist_lost;
+	uint16 nbssid_hotlist;
+	struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
+	struct list_head hotlist_bssid_list;
+	dhd_epno_ssid_cfg_t epno_cfg;
+	uint32 scan_id;
+};
+
+typedef struct gscan_scan_params {
+	int32 scan_fr;
+	uint16 nchannel_buckets;
+	struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
+} gscan_scan_params_t;
+
+typedef struct gscan_batch_params {
+	uint8 bestn;
+	uint8 mscan;
+	uint8 buffer_threshold;
+} gscan_batch_params_t;
+
+struct bssid_t {
+	struct ether_addr	macaddr;
+	int16 rssi_reporting_threshold;  /* 0 -> no reporting threshold */
+};
+
+typedef struct gscan_hotlist_scan_params {
+	uint16 lost_ap_window; /* number of scans to declare LOST */
+	uint16 nbssid;   /* number of bssids  */
+	struct bssid_t bssid[1];  /* n bssids to follow */
+} gscan_hotlist_scan_params_t;
+
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+typedef union dhd_pno_params {
+	struct dhd_pno_legacy_params params_legacy;
+	struct dhd_pno_batch_params params_batch;
+	struct dhd_pno_hotlist_params params_hotlist;
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+	struct dhd_pno_gscan_params params_gscan;
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+} dhd_pno_params_t;
+typedef struct dhd_pno_status_info {
+	dhd_pub_t *dhd;
+	struct work_struct work;
+	struct mutex pno_mutex;
+#ifdef GSCAN_SUPPORT
+	wait_queue_head_t batch_get_wait;
+#endif /* GSCAN_SUPPORT */
+	struct completion get_batch_done;
+	bool wls_supported; /* wifi location service supported or not */
+	enum dhd_pno_status pno_status;
+	enum dhd_pno_mode pno_mode;
+	dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX];
+	struct list_head head_list;
+} dhd_pno_status_info_t;
+
+/* wrapper functions */
+extern int
+dhd_dev_pno_enable(struct net_device *dev, int enable);
+
+extern int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
+	uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int
+dhd_dev_pno_set_for_batch(struct net_device *dev,
+	struct dhd_pno_batch_params *batch_params);
+
+extern int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize);
+
+extern int
+dhd_dev_pno_stop_for_batch(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params);
+extern bool dhd_dev_is_legacy_pno_enabled(struct net_device *dev);
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+extern void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info,
+        uint32 *len);
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#ifdef GSCAN_SUPPORT
+extern int
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+              void *buf, bool flush);
+int dhd_dev_pno_lock_access_batch_results(struct net_device *dev);
+void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev);
+extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush);
+extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time);
+int dhd_retreive_batch_scan_results(dhd_pub_t *dhd);
+extern void * dhd_dev_hotlist_scan_event(struct net_device *dev,
+            const void  *data, int *send_evt_bytes, hotlist_type_t type);
+void * dhd_dev_process_full_gscan_result(struct net_device *dev,
+            const void  *data, uint32 len, int *send_evt_bytes);
+extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev);
+extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type);
+extern int dhd_dev_wait_batch_results_complete(struct net_device *dev);
+extern void * dhd_dev_process_epno_result(struct net_device *dev,
+		const void  *data, uint32 event, int *send_evt_bytes);
+extern int dhd_dev_set_epno(struct net_device *dev);
+extern int dhd_dev_flush_fw_epno(struct net_device *dev);
+#endif /* GSCAN_SUPPORT */
+/* dhd pno fuctions */
+extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int enable);
+extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params);
+
+extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason);
+
+
+extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd);
+
+extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params);
+
+extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd);
+
+extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+extern int dhd_pno_init(dhd_pub_t *dhd);
+extern int dhd_pno_deinit(dhd_pub_t *dhd);
+extern bool dhd_is_pno_supported(dhd_pub_t *dhd);
+extern bool dhd_is_legacy_pno_enabled(dhd_pub_t *dhd);
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info,
+                       uint32 *len);
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#ifdef GSCAN_SUPPORT
+extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+                       void *buf, bool flush);
+extern int dhd_pno_lock_batch_results(dhd_pub_t *dhd);
+extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd);
+extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush);
+extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag);
+extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf);
+extern int dhd_dev_retrieve_batch_scan(struct net_device *dev);
+extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data,
+                       int *send_evt_bytes, hotlist_type_t type);
+extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data,
+                       uint32 len, int *send_evt_bytes);
+extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd);
+extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type);
+extern int dhd_wait_batch_results_complete(dhd_pub_t *dhd);
+extern void * dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data,
+         uint32 event, int *size);
+extern void dhd_pno_translate_epno_fw_flags(uint32 *flags);
+extern int dhd_pno_set_epno(dhd_pub_t *dhd);
+extern int dhd_pno_flush_fw_epno(dhd_pub_t *dhd);
+extern void dhd_pno_set_epno_auth_flag(uint32 *wpa_auth);
+#endif /* GSCAN_SUPPORT */
+#endif 
+
+#endif /* __DHD_PNO_H__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h
new file mode 100644
index 0000000..820e344
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_proto.h
@@ -0,0 +1,201 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_proto.h 678890 2017-01-11 11:48:36Z $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
+#define DEFAULT_IOCTL_RESP_TIMEOUT	4000
+#ifndef IOCTL_RESP_TIMEOUT
+/* In milli second default value for Production FW */
+#define IOCTL_RESP_TIMEOUT  DEFAULT_IOCTL_RESP_TIMEOUT
+#endif /* IOCTL_RESP_TIMEOUT */
+
+#ifndef MFG_IOCTL_RESP_TIMEOUT
+#define MFG_IOCTL_RESP_TIMEOUT  20000  /* In milli second default value for MFG FW */
+#endif /* MFG_IOCTL_RESP_TIMEOUT */
+
+#define DEFAULT_D3_ACK_RESP_TIMEOUT	1000
+#ifndef D3_ACK_RESP_TIMEOUT
+#define D3_ACK_RESP_TIMEOUT		DEFAULT_D3_ACK_RESP_TIMEOUT
+#endif /* D3_ACK_RESP_TIMEOUT */
+
+#define DEFAULT_DHD_BUS_BUSY_TIMEOUT	(IOCTL_RESP_TIMEOUT + 1000)
+#ifndef DHD_BUS_BUSY_TIMEOUT
+#define DHD_BUS_BUSY_TIMEOUT	DEFAULT_DHD_BUS_BUSY_TIMEOUT
+#endif /* DEFAULT_DHD_BUS_BUSY_TIMEOUT */
+
+#define DS_EXIT_TIMEOUT	1000 /* In ms */
+#define DS_ENTER_TIMEOUT 1000 /* In ms */
+
+#define IOCTL_DISABLE_TIMEOUT 0
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Initilizes the index block for dma'ing indices */
+extern int dhd_prot_dma_indx_init(dhd_pub_t *dhdp, uint32 rw_index_sz,
+	uint8 type, uint32 length);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_sync_with_dongle(dhd_pub_t *dhdp);
+
+/* Protocol initialization needed for IOCTL/IOVAR path */
+extern int dhd_prot_init(dhd_pub_t *dhd);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+extern uint dhd_prot_hdrlen(dhd_pub_t *, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Handles a protocol control response asynchronously */
+extern int dhd_prot_ctl_complete(dhd_pub_t *dhd);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                             void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Dump extended trap data */
+extern int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+	uint reorder_info_len, void **pkt, uint32 *free_buf_count);
+
+#ifdef BCMPCIE
+extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound);
+extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound);
+extern bool dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound);
+extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
+extern int dhd_prot_process_trapbuf(dhd_pub_t * dhd);
+extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd);
+extern int dhd_post_dummy_msg(dhd_pub_t *dhd);
+extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len);
+extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset);
+extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
+extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd,
+	uint len, uint srcdelay, uint destdelay, uint d11_lpbk);
+
+extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf,
+	void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma);
+extern void dhd_prot_flowrings_pool_release(dhd_pub_t *dhd,
+	uint16 flowid, void *msgbuf_ring);
+extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex);
+extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b);
+extern uint32 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val);
+extern uint32 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd);
+extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx);
+extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx);
+extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
+	struct bcmstrbuf *strbuf, const char * fmt);
+extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf);
+extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info);
+extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock);
+extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern void dhd_prot_reset(dhd_pub_t *dhd);
+
+#ifdef IDLE_TX_FLOW_MGMT
+extern int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count);
+extern int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+#endif /* IDLE_TX_FLOW_MGMT */
+extern int dhd_prot_init_info_rings(dhd_pub_t *dhd);
+
+#endif /* BCMPCIE */
+
+#ifdef DHD_LB
+extern void dhd_lb_tx_compl_handler(unsigned long data);
+extern void dhd_lb_rx_compl_handler(unsigned long data);
+extern void dhd_lb_rx_process_handler(unsigned long data);
+#endif /* DHD_LB */
+extern int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data);
+
+#ifdef BCMPCIE
+extern int dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlv, uint16 tlv_len,
+	uint16 seq, uint16 xt_id);
+extern bool dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set);
+extern bool dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set);
+#else /* BCMPCIE */
+#define dhd_prot_send_host_timestamp(a, b, c, d, e)		0
+#define dhd_prot_data_path_tx_timestamp_logging(a, b, c)	0
+#define dhd_prot_data_path_rx_timestamp_logging(a, b, c)	0
+#endif /* BCMPCIE */
+
+extern void dhd_prot_dma_indx_free(dhd_pub_t *dhd);
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_rtt.c b/drivers/net/wireless/bcmdhd/dhd_rtt.c
new file mode 100644
index 0000000..700b15e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_rtt.c
@@ -0,0 +1,2421 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), RTT
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+
+#include <bcmevent.h>
+#include <dhd.h>
+#include <dhd_rtt.h>
+#include <dhd_dbg.h>
+#include <wldev_common.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif /* WL_CFG80211 */
+static DEFINE_SPINLOCK(noti_list_lock);
+#define NULL_CHECK(p, s, err)  \
+			do { \
+				if (!(p)) { \
+					printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+					err = BCME_ERROR; \
+					return err; \
+				} \
+			} while (0)
+
+#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED)
+#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED)
+#define TIMESPEC_TO_US(ts)  (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
+							(ts).tv_nsec / NSEC_PER_USEC)
+
+#define FTM_IOC_BUFSZ  2048	/* ioc buffsize for our module (> BCM_XTLV_HDR_SIZE) */
+#define FTM_AVAIL_MAX_SLOTS		32
+#define FTM_MAX_CONFIGS 10
+#define FTM_MAX_PARAMS 10
+#define FTM_DEFAULT_SESSION 1
+#define FTM_BURST_TIMEOUT_UNIT 250 /* 250 ns */
+#define FTM_INVALID -1
+#define	FTM_DEFAULT_CNT_20M		12
+#define FTM_DEFAULT_CNT_40M		10
+#define FTM_DEFAULT_CNT_80M		5
+
+/* convenience macros */
+#define FTM_TU2MICRO(_tu) ((uint64)(_tu) << 10)
+#define FTM_MICRO2TU(_tu) ((uint64)(_tu) >> 10)
+#define FTM_TU2MILLI(_tu) ((uint32)FTM_TU2MICRO(_tu) / 1000)
+#define FTM_MICRO2MILLI(_x) ((uint32)(_x) / 1000)
+#define FTM_MICRO2SEC(_x) ((uint32)(_x) / 1000000)
+#define FTM_INTVL2NSEC(_intvl) ((uint32)ftm_intvl2nsec(_intvl))
+#define FTM_INTVL2USEC(_intvl) ((uint32)ftm_intvl2usec(_intvl))
+#define FTM_INTVL2MSEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000)
+#define FTM_INTVL2SEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000000)
+#define FTM_USECIN100MILLI(_usec) ((_usec) / 100000)
+
+/* broadcom specific set to have more accurate data */
+#define ENABLE_VHT_ACK
+#define CH_MIN_5G_CHANNEL 34
+#define CH_MIN_2G_CHANNEL 1
+
+struct rtt_noti_callback {
+	struct list_head list;
+	void *ctx;
+	dhd_rtt_compl_noti_fn noti_fn;
+};
+
+
+/* bitmask indicating which command groups; */
+typedef enum {
+	FTM_SUBCMD_FLAG_METHOD	= 0x01,	/* FTM method command */
+	FTM_SUBCMD_FLAG_SESSION = 0x02,	/* FTM session command */
+	FTM_SUBCMD_FLAG_ALL = FTM_SUBCMD_FLAG_METHOD | FTM_SUBCMD_FLAG_SESSION
+} ftm_subcmd_flag_t;
+
+/* proxd ftm config-category definition */
+typedef enum {
+	FTM_CONFIG_CAT_GENERAL = 1,	/* generial configuration */
+	FTM_CONFIG_CAT_OPTIONS = 2,	/* 'config options' */
+	FTM_CONFIG_CAT_AVAIL = 3,	/* 'config avail' */
+} ftm_config_category_t;
+
+
+typedef struct ftm_subcmd_info {
+	int16				version;    /* FTM version (optional) */
+	char				*name;		/* cmd-name string as cmdline input */
+	wl_proxd_cmd_t		cmdid;		/* cmd-id */
+	bcm_xtlv_unpack_cbfn_t *handler;  /* cmd response handler (optional) */
+	ftm_subcmd_flag_t	cmdflag; /* CMD flag (optional)  */
+} ftm_subcmd_info_t;
+
+
+typedef struct ftm_config_options_info {
+	uint32 flags;				/* wl_proxd_flags_t/wl_proxd_session_flags_t */
+	bool enable;
+} ftm_config_options_info_t;
+
+typedef struct ftm_config_param_info {
+	uint16		tlvid;	/* mapping TLV id for the item */
+	union {
+		uint32  chanspec;
+		struct ether_addr mac_addr;
+		wl_proxd_intvl_t data_intvl;
+		uint32 data32;
+		uint16 data16;
+		uint8 data8;
+	};
+} ftm_config_param_info_t;
+
+/*
+* definition for id-string mapping.
+*   This is used to map an id (can be cmd-id, tlv-id, ....) to a text-string
+*   for debug-display or cmd-log-display
+*/
+typedef struct ftm_strmap_entry {
+	int32		id;
+	char		*text;
+} ftm_strmap_entry_t;
+
+
+typedef struct ftm_status_map_host_entry {
+	wl_proxd_status_t proxd_status;
+	rtt_reason_t rtt_reason;
+} ftm_status_map_host_entry_t;
+
+static int
+dhd_rtt_convert_results_to_host(rtt_report_t *rtt_report, uint8 *p_data, uint16 tlvid, uint16 len);
+
+static wifi_rate_t
+dhd_rtt_convert_rate_to_host(uint32 ratespec);
+
+#ifdef WL_CFG80211
+static int
+dhd_rtt_start(dhd_pub_t *dhd);
+#endif /* WL_CFG80211 */
+static const int burst_duration_idx[]  = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0, 0};
+
+/* ftm status mapping to host status */
+static const ftm_status_map_host_entry_t ftm_status_map_info[] = {
+	{WL_PROXD_E_INCOMPLETE, RTT_REASON_FAILURE},
+	{WL_PROXD_E_OVERRIDDEN, RTT_REASON_FAILURE},
+	{WL_PROXD_E_ASAP_FAILED, RTT_REASON_FAILURE},
+	{WL_PROXD_E_NOTSTARTED, RTT_REASON_FAIL_NOT_SCHEDULED_YET},
+	{WL_PROXD_E_INVALIDMEAS, RTT_REASON_FAIL_INVALID_TS},
+	{WL_PROXD_E_INCAPABLE, RTT_REASON_FAIL_NO_CAPABILITY},
+	{WL_PROXD_E_MISMATCH, RTT_REASON_FAILURE},
+	{WL_PROXD_E_DUP_SESSION, RTT_REASON_FAILURE},
+	{WL_PROXD_E_REMOTE_FAIL, RTT_REASON_FAILURE},
+	{WL_PROXD_E_REMOTE_INCAPABLE, RTT_REASON_FAILURE},
+	{WL_PROXD_E_SCHED_FAIL, RTT_REASON_FAIL_SCHEDULE},
+	{WL_PROXD_E_PROTO, RTT_REASON_FAIL_PROTOCOL},
+	{WL_PROXD_E_EXPIRED, RTT_REASON_FAILURE},
+	{WL_PROXD_E_TIMEOUT, RTT_REASON_FAIL_TM_TIMEOUT},
+	{WL_PROXD_E_NOACK, RTT_REASON_FAIL_NO_RSP},
+	{WL_PROXD_E_DEFERRED, RTT_REASON_FAILURE},
+	{WL_PROXD_E_INVALID_SID, RTT_REASON_FAILURE},
+	{WL_PROXD_E_REMOTE_CANCEL, RTT_REASON_FAILURE},
+	{WL_PROXD_E_CANCELED, RTT_REASON_ABORTED},
+	{WL_PROXD_E_INVALID_SESSION, RTT_REASON_FAILURE},
+	{WL_PROXD_E_BAD_STATE, RTT_REASON_FAILURE},
+	{WL_PROXD_E_ERROR, RTT_REASON_FAILURE},
+	{WL_PROXD_E_OK, RTT_REASON_SUCCESS}
+};
+
+/* ftm tlv-id mapping */
+static const ftm_strmap_entry_t ftm_tlvid_loginfo[] = {
+	/* { WL_PROXD_TLV_ID_xxx,				"text for WL_PROXD_TLV_ID_xxx" }, */
+	{ WL_PROXD_TLV_ID_NONE,				"none" },
+	{ WL_PROXD_TLV_ID_METHOD,			"method" },
+	{ WL_PROXD_TLV_ID_FLAGS,			"flags" },
+	{ WL_PROXD_TLV_ID_CHANSPEC,			"chanspec" },
+	{ WL_PROXD_TLV_ID_TX_POWER,			"tx power" },
+	{ WL_PROXD_TLV_ID_RATESPEC,			"ratespec" },
+	{ WL_PROXD_TLV_ID_BURST_DURATION,		"burst duration" },
+	{ WL_PROXD_TLV_ID_BURST_PERIOD,			"burst period" },
+	{ WL_PROXD_TLV_ID_BURST_FTM_SEP,		"burst ftm sep" },
+	{ WL_PROXD_TLV_ID_BURST_NUM_FTM,		"burst num ftm" },
+	{ WL_PROXD_TLV_ID_NUM_BURST,			"num burst" },
+	{ WL_PROXD_TLV_ID_FTM_RETRIES,			"ftm retries" },
+	{ WL_PROXD_TLV_ID_BSS_INDEX,			"BSS index" },
+	{ WL_PROXD_TLV_ID_BSSID,			"bssid" },
+	{ WL_PROXD_TLV_ID_INIT_DELAY,			"burst init delay" },
+	{ WL_PROXD_TLV_ID_BURST_TIMEOUT,		"burst timeout" },
+	{ WL_PROXD_TLV_ID_EVENT_MASK,			"event mask" },
+	{ WL_PROXD_TLV_ID_FLAGS_MASK,			"flags mask" },
+	{ WL_PROXD_TLV_ID_PEER_MAC,			"peer addr" },
+	{ WL_PROXD_TLV_ID_FTM_REQ,			"ftm req" },
+	{ WL_PROXD_TLV_ID_LCI_REQ,			"lci req" },
+	{ WL_PROXD_TLV_ID_LCI,				"lci" },
+	{ WL_PROXD_TLV_ID_CIVIC_REQ,			"civic req" },
+	{ WL_PROXD_TLV_ID_CIVIC,			"civic" },
+	{ WL_PROXD_TLV_ID_AVAIL,			"availability" },
+	{ WL_PROXD_TLV_ID_SESSION_FLAGS,		"session flags" },
+	{ WL_PROXD_TLV_ID_SESSION_FLAGS_MASK,		"session flags mask" },
+	{ WL_PROXD_TLV_ID_RX_MAX_BURST,			"rx max bursts" },
+	{ WL_PROXD_TLV_ID_RANGING_INFO,			"ranging info" },
+	{ WL_PROXD_TLV_ID_RANGING_FLAGS,		"ranging flags" },
+	{ WL_PROXD_TLV_ID_RANGING_FLAGS_MASK,		"ranging flags mask" },
+	/* output - 512 + x */
+	{ WL_PROXD_TLV_ID_STATUS,			"status" },
+	{ WL_PROXD_TLV_ID_COUNTERS,			"counters" },
+	{ WL_PROXD_TLV_ID_INFO,				"info" },
+	{ WL_PROXD_TLV_ID_RTT_RESULT,			"rtt result" },
+	{ WL_PROXD_TLV_ID_AOA_RESULT,			"aoa result" },
+	{ WL_PROXD_TLV_ID_SESSION_INFO,			"session info" },
+	{ WL_PROXD_TLV_ID_SESSION_STATUS,		"session status" },
+	{ WL_PROXD_TLV_ID_SESSION_ID_LIST,		"session ids" },
+	/* debug tlvs can be added starting 1024 */
+	{ WL_PROXD_TLV_ID_DEBUG_MASK,			"debug mask" },
+	{ WL_PROXD_TLV_ID_COLLECT,			"collect" },
+	{ WL_PROXD_TLV_ID_STRBUF,			"result" },
+	{ WL_PROXD_TLV_ID_COLLECT_DATA,			"collect-data" },
+	{ WL_PROXD_TLV_ID_RI_RR,			"ri_rr" },
+	{ WL_PROXD_TLV_ID_COLLECT_CHAN_DATA,		"chan est"}
+};
+
+static const ftm_strmap_entry_t ftm_event_type_loginfo[] = {
+	/* wl_proxd_event_type_t,			text-string */
+	{ WL_PROXD_EVENT_NONE,				"none" },
+	{ WL_PROXD_EVENT_SESSION_CREATE,		"session create" },
+	{ WL_PROXD_EVENT_SESSION_START,			"session start" },
+	{ WL_PROXD_EVENT_FTM_REQ,			"FTM req" },
+	{ WL_PROXD_EVENT_BURST_START,			"burst start" },
+	{ WL_PROXD_EVENT_BURST_END,			"burst end" },
+	{ WL_PROXD_EVENT_SESSION_END,			"session end" },
+	{ WL_PROXD_EVENT_SESSION_RESTART,		"session restart" },
+	{ WL_PROXD_EVENT_BURST_RESCHED,			"burst rescheduled" },
+	{ WL_PROXD_EVENT_SESSION_DESTROY,		"session destroy" },
+	{ WL_PROXD_EVENT_RANGE_REQ,			"range request" },
+	{ WL_PROXD_EVENT_FTM_FRAME,			"FTM frame" },
+	{ WL_PROXD_EVENT_DELAY,				"delay" },
+	{ WL_PROXD_EVENT_VS_INITIATOR_RPT,		"initiator-report " }, /* rx */
+	{ WL_PROXD_EVENT_RANGING,			"ranging " },
+	{ WL_PROXD_EVENT_COLLECT,			"collect" },
+};
+
+/*
+* session-state --> text string mapping
+*/
+static const ftm_strmap_entry_t ftm_session_state_value_loginfo[] = {
+	/* wl_proxd_session_state_t,			text string */
+	{ WL_PROXD_SESSION_STATE_CREATED,		"created" },
+	{ WL_PROXD_SESSION_STATE_CONFIGURED,		"configured" },
+	{ WL_PROXD_SESSION_STATE_STARTED,		"started" },
+	{ WL_PROXD_SESSION_STATE_DELAY,			"delay" },
+	{ WL_PROXD_SESSION_STATE_USER_WAIT,		"user-wait" },
+	{ WL_PROXD_SESSION_STATE_SCHED_WAIT,		"sched-wait" },
+	{ WL_PROXD_SESSION_STATE_BURST,			"burst" },
+	{ WL_PROXD_SESSION_STATE_STOPPING,		"stopping" },
+	{ WL_PROXD_SESSION_STATE_ENDED,			"ended" },
+	{ WL_PROXD_SESSION_STATE_DESTROYING,		"destroying" },
+	{ WL_PROXD_SESSION_STATE_NONE,			"none" }
+};
+
+/*
+* ranging-state --> text string mapping
+*/
+static const ftm_strmap_entry_t ftm_ranging_state_value_loginfo [] = {
+	/* wl_proxd_ranging_state_t,			text string */
+	{ WL_PROXD_RANGING_STATE_NONE,			"none" },
+	{ WL_PROXD_RANGING_STATE_NOTSTARTED,		"nonstarted" },
+	{ WL_PROXD_RANGING_STATE_INPROGRESS,		"inprogress" },
+	{ WL_PROXD_RANGING_STATE_DONE,			"done" },
+};
+
+/*
+* status --> text string mapping
+*/
+static const ftm_strmap_entry_t ftm_status_value_loginfo[] = {
+	/* wl_proxd_status_t,			text-string */
+	{ WL_PROXD_E_OVERRIDDEN,		"overridden" },
+	{ WL_PROXD_E_ASAP_FAILED,		"ASAP failed" },
+	{ WL_PROXD_E_NOTSTARTED,		"not started" },
+	{ WL_PROXD_E_INVALIDMEAS,		"invalid measurement" },
+	{ WL_PROXD_E_INCAPABLE,			"incapable" },
+	{ WL_PROXD_E_MISMATCH,			"mismatch"},
+	{ WL_PROXD_E_DUP_SESSION,		"dup session" },
+	{ WL_PROXD_E_REMOTE_FAIL,		"remote fail" },
+	{ WL_PROXD_E_REMOTE_INCAPABLE,		"remote incapable" },
+	{ WL_PROXD_E_SCHED_FAIL,		"sched failure" },
+	{ WL_PROXD_E_PROTO,			"protocol error" },
+	{ WL_PROXD_E_EXPIRED,			"expired" },
+	{ WL_PROXD_E_TIMEOUT,			"timeout" },
+	{ WL_PROXD_E_NOACK,			"no ack" },
+	{ WL_PROXD_E_DEFERRED,			"deferred" },
+	{ WL_PROXD_E_INVALID_SID,		"invalid session id" },
+	{ WL_PROXD_E_REMOTE_CANCEL,		"remote cancel" },
+	{ WL_PROXD_E_CANCELED,			"canceled" },
+	{ WL_PROXD_E_INVALID_SESSION,		"invalid session" },
+	{ WL_PROXD_E_BAD_STATE,			"bad state" },
+	{ WL_PROXD_E_ERROR,			"error" },
+	{ WL_PROXD_E_OK,			"OK" }
+};
+
+/*
+* time interval unit --> text string mapping
+*/
+static const ftm_strmap_entry_t ftm_tmu_value_loginfo[] = {
+	/* wl_proxd_tmu_t,			text-string */
+	{ WL_PROXD_TMU_TU,			"TU" },
+	{ WL_PROXD_TMU_SEC,			"sec" },
+	{ WL_PROXD_TMU_MILLI_SEC,		"ms" },
+	{ WL_PROXD_TMU_MICRO_SEC,		"us" },
+	{ WL_PROXD_TMU_NANO_SEC,		"ns" },
+	{ WL_PROXD_TMU_PICO_SEC,		"ps" }
+};
+
+#define RSPEC_BW(rspec)         ((rspec) & WL_RSPEC_BW_MASK)
+#define RSPEC_IS20MHZ(rspec)	(RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ)
+#define RSPEC_IS40MHZ(rspec)	(RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ)
+#define RSPEC_IS80MHZ(rspec)    (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ)
+#define RSPEC_IS160MHZ(rspec)   (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ)
+
+#define IS_MCS(rspec)     	(((rspec) & WL_RSPEC_ENCODING_MASK) != WL_RSPEC_ENCODE_RATE)
+#define IS_STBC(rspec)     	(((((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) ||	\
+	(((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)) &&	\
+	(((rspec) & WL_RSPEC_STBC) == WL_RSPEC_STBC))
+#define RSPEC_ISSGI(rspec)      (((rspec) & WL_RSPEC_SGI) != 0)
+#define RSPEC_ISLDPC(rspec)     (((rspec) & WL_RSPEC_LDPC) != 0)
+#define RSPEC_ISSTBC(rspec)     (((rspec) & WL_RSPEC_STBC) != 0)
+#define RSPEC_ISTXBF(rspec)     (((rspec) & WL_RSPEC_TXBF) != 0)
+#define RSPEC_ISVHT(rspec)    	(((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)
+#define RSPEC_ISHT(rspec)	(((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT)
+#define RSPEC_ISLEGACY(rspec)   (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE)
+#define RSPEC2RATE(rspec)	(RSPEC_ISLEGACY(rspec) ? \
+				 ((rspec) & RSPEC_RATE_MASK) : rate_rspec2rate(rspec))
+/* return rate in unit of 500Kbps -- for internal use in wlc_rate_sel.c */
+#define RSPEC2KBPS(rspec)	rate_rspec2rate(rspec)
+
+struct ieee_80211_mcs_rate_info {
+	uint8 constellation_bits;
+	uint8 coding_q;
+	uint8 coding_d;
+};
+
+static const struct ieee_80211_mcs_rate_info wl_mcs_info[] = {
+	{ 1, 1, 2 }, /* MCS  0: MOD: BPSK,   CR 1/2 */
+	{ 2, 1, 2 }, /* MCS  1: MOD: QPSK,   CR 1/2 */
+	{ 2, 3, 4 }, /* MCS  2: MOD: QPSK,   CR 3/4 */
+	{ 4, 1, 2 }, /* MCS  3: MOD: 16QAM,  CR 1/2 */
+	{ 4, 3, 4 }, /* MCS  4: MOD: 16QAM,  CR 3/4 */
+	{ 6, 2, 3 }, /* MCS  5: MOD: 64QAM,  CR 2/3 */
+	{ 6, 3, 4 }, /* MCS  6: MOD: 64QAM,  CR 3/4 */
+	{ 6, 5, 6 }, /* MCS  7: MOD: 64QAM,  CR 5/6 */
+	{ 8, 3, 4 }, /* MCS  8: MOD: 256QAM, CR 3/4 */
+	{ 8, 5, 6 }  /* MCS  9: MOD: 256QAM, CR 5/6 */
+};
+
+/**
+ * Returns the rate in [Kbps] units for a caller supplied MCS/bandwidth/Nss/Sgi combination.
+ *     'mcs' : a *single* spatial stream MCS (11n or 11ac)
+ */
+uint
+rate_mcs2rate(uint mcs, uint nss, uint bw, int sgi)
+{
+	const int ksps = 250; /* kilo symbols per sec, 4 us sym */
+	const int Nsd_20MHz = 52;
+	const int Nsd_40MHz = 108;
+	const int Nsd_80MHz = 234;
+	const int Nsd_160MHz = 468;
+	uint rate;
+
+	if (mcs == 32) {
+		/* just return fixed values for mcs32 instead of trying to parametrize */
+		rate = (sgi == 0) ? 6000 : 6778;
+	} else if (mcs <= 9) {
+		/* This calculation works for 11n HT and 11ac VHT if the HT mcs values
+		 * are decomposed into a base MCS = MCS % 8, and Nss = 1 + MCS / 8.
+		 * That is, HT MCS 23 is a base MCS = 7, Nss = 3
+		 */
+
+		/* find the number of complex numbers per symbol */
+		if (RSPEC_IS20MHZ(bw)) {
+			rate = Nsd_20MHz;
+		} else if (RSPEC_IS40MHZ(bw)) {
+			rate = Nsd_40MHz;
+		} else if (bw == WL_RSPEC_BW_80MHZ) {
+			rate = Nsd_80MHz;
+		} else if (bw == WL_RSPEC_BW_160MHZ) {
+			rate = Nsd_160MHz;
+		} else {
+			rate = 0;
+		}
+
+		/* multiply by bits per number from the constellation in use */
+		rate = rate * wl_mcs_info[mcs].constellation_bits;
+
+		/* adjust for the number of spatial streams */
+		rate = rate * nss;
+
+		/* adjust for the coding rate given as a quotient and divisor */
+		rate = (rate * wl_mcs_info[mcs].coding_q) / wl_mcs_info[mcs].coding_d;
+
+		/* multiply by Kilo symbols per sec to get Kbps */
+		rate = rate * ksps;
+
+		/* adjust the symbols per sec for SGI
+		 * symbol duration is 4 us without SGI, and 3.6 us with SGI,
+		 * so ratio is 10 / 9
+		 */
+		if (sgi) {
+			/* add 4 for rounding of division by 9 */
+			rate = ((rate * 10) + 4) / 9;
+		}
+	} else {
+		rate = 0;
+	}
+
+	return rate;
+} /* wlc_rate_mcs2rate */
+
+/** take a well formed ratespec_t arg and return phy rate in [Kbps] units */
+int
+rate_rspec2rate(uint32 rspec)
+{
+	int rate = -1;
+
+	if (RSPEC_ISLEGACY(rspec)) {
+		rate = 500 * (rspec & WL_RSPEC_RATE_MASK);
+	} else if (RSPEC_ISHT(rspec)) {
+		uint mcs = (rspec & WL_RSPEC_RATE_MASK);
+
+		if (mcs == 32) {
+			rate = rate_mcs2rate(mcs, 1, WL_RSPEC_BW_40MHZ, RSPEC_ISSGI(rspec));
+		} else {
+			uint nss = 1 + (mcs / 8);
+			mcs = mcs % 8;
+			rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec));
+		}
+	} else if (RSPEC_ISVHT(rspec)) {
+		uint mcs = (rspec & WL_RSPEC_VHT_MCS_MASK);
+		uint nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT;
+
+		ASSERT(mcs <= 9);
+		ASSERT(nss <= 8);
+
+		rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec));
+	} else {
+		ASSERT(0);
+	}
+
+	return (rate == 0) ? -1 : rate;
+}
+
+char resp_buf[WLC_IOCTL_SMLEN];
+
+static uint64
+ftm_intvl2nsec(const wl_proxd_intvl_t *intvl)
+{
+	uint64 ret;
+	ret = intvl->intvl;
+	switch (intvl->tmu) {
+	case WL_PROXD_TMU_TU:			ret = FTM_TU2MICRO(ret) * 1000; break;
+	case WL_PROXD_TMU_SEC:			ret *= 1000000000; break;
+	case WL_PROXD_TMU_MILLI_SEC:	ret *= 1000000; break;
+	case WL_PROXD_TMU_MICRO_SEC:	ret *= 1000; break;
+	case WL_PROXD_TMU_PICO_SEC:		ret = intvl->intvl / 1000; break;
+	case WL_PROXD_TMU_NANO_SEC:		/* fall through */
+	default:						break;
+	}
+	return ret;
+}
+uint64
+ftm_intvl2usec(const wl_proxd_intvl_t *intvl)
+{
+	uint64 ret;
+	ret = intvl->intvl;
+	switch (intvl->tmu) {
+	case WL_PROXD_TMU_TU:			ret = FTM_TU2MICRO(ret); break;
+	case WL_PROXD_TMU_SEC:			ret *= 1000000; break;
+	case WL_PROXD_TMU_NANO_SEC:		ret = intvl->intvl / 1000; break;
+	case WL_PROXD_TMU_PICO_SEC:		ret = intvl->intvl / 1000000; break;
+	case WL_PROXD_TMU_MILLI_SEC:	ret *= 1000; break;
+	case WL_PROXD_TMU_MICRO_SEC:	/* fall through */
+	default:						break;
+	}
+	return ret;
+}
+
+/*
+* lookup 'id' (as a key) from a fw status to host map table
+* if found, return the corresponding reason code
+*/
+
+static rtt_reason_t
+ftm_get_statusmap_info(wl_proxd_status_t id, const ftm_status_map_host_entry_t *p_table,
+	uint32 num_entries)
+{
+	int i;
+	const ftm_status_map_host_entry_t *p_entry;
+	/* scan thru the table till end */
+	p_entry = p_table;
+	for (i = 0; i < (int) num_entries; i++)
+	{
+		if (p_entry->proxd_status == id) {
+			return p_entry->rtt_reason;
+		}
+		p_entry++;		/* next entry */
+	}
+	return RTT_REASON_FAILURE; /* not found */
+}
+/*
+* lookup 'id' (as a key) from a table
+* if found, return the entry pointer, otherwise return NULL
+*/
+static const ftm_strmap_entry_t*
+ftm_get_strmap_info(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries)
+{
+	int i;
+	const ftm_strmap_entry_t *p_entry;
+
+	/* scan thru the table till end */
+	p_entry = p_table;
+	for (i = 0; i < (int) num_entries; i++)
+	{
+		if (p_entry->id == id)
+			return p_entry;
+		p_entry++;		/* next entry */
+	}
+	return NULL;			/* not found */
+}
+
+/*
+* map enum to a text-string for display, this function is called by the following:
+* For debug/trace:
+*     ftm_[cmdid|tlvid]_to_str()
+* For TLV-output log for 'get' commands
+*     ftm_[method|tmu|caps|status|state]_value_to_logstr()
+* Input:
+*     pTable -- point to a 'enum to string' table.
+*/
+static const char *
+ftm_map_id_to_str(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries)
+{
+	const ftm_strmap_entry_t*p_entry = ftm_get_strmap_info(id, p_table, num_entries);
+	if (p_entry)
+		return (p_entry->text);
+
+	return "invalid";
+}
+
+
+#ifdef RTT_DEBUG
+
+/* define entry, e.g. { WL_PROXD_CMD_xxx, "WL_PROXD_CMD_xxx" } */
+#define DEF_STRMAP_ENTRY(id) { (id), #id }
+
+/* ftm cmd-id mapping */
+static const ftm_strmap_entry_t ftm_cmdid_map[] = {
+	/* {wl_proxd_cmd_t(WL_PROXD_CMD_xxx), "WL_PROXD_CMD_xxx" }, */
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_NONE),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_VERSION),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_ENABLE),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_DISABLE),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_CONFIG),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_SESSION),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_BURST_REQUEST),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_SESSION),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_DELETE_SESSION),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RESULT),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_INFO),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_STATUS),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_SESSIONS),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_COUNTERS),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_CLEAR_COUNTERS),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_COLLECT),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_TUNE),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_DUMP),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_RANGING),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_RANGING),
+	DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RANGING_INFO),
+};
+
+/*
+* map a ftm cmd-id to a text-string for display
+*/
+static const char *
+ftm_cmdid_to_str(uint16 cmdid)
+{
+	return ftm_map_id_to_str((int32) cmdid, &ftm_cmdid_map[0], ARRAYSIZE(ftm_cmdid_map));
+}
+#endif /* RTT_DEBUG */
+
+
+/*
+* convert BCME_xxx error codes into related error strings
+* note, bcmerrorstr() defined in bcmutils is for BCMDRIVER only,
+*       this duplicate copy is for WL access and may need to clean up later
+*/
+static const char *ftm_bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+static const char *
+ftm_status_value_to_logstr(wl_proxd_status_t status)
+{
+	static char ftm_msgbuf_status_undef[32];
+	const ftm_strmap_entry_t *p_loginfo;
+	int bcmerror;
+
+	/* check if within BCME_xxx error range */
+	bcmerror = (int) status;
+	if (VALID_BCMERROR(bcmerror))
+		return ftm_bcmerrorstrtable[-bcmerror];
+
+	/* otherwise, look for 'proxd ftm status' range */
+	p_loginfo = ftm_get_strmap_info((int32) status,
+		&ftm_status_value_loginfo[0], ARRAYSIZE(ftm_status_value_loginfo));
+	if (p_loginfo)
+		return p_loginfo->text;
+
+	/* report for 'out of range' FTM-status error code */
+	memset(ftm_msgbuf_status_undef, 0, sizeof(ftm_msgbuf_status_undef));
+	snprintf(ftm_msgbuf_status_undef, sizeof(ftm_msgbuf_status_undef),
+		"Undefined status %d", status);
+	return &ftm_msgbuf_status_undef[0];
+}
+
+static const char *
+ftm_tmu_value_to_logstr(wl_proxd_tmu_t tmu)
+{
+	return ftm_map_id_to_str((int32)tmu,
+		&ftm_tmu_value_loginfo[0], ARRAYSIZE(ftm_tmu_value_loginfo));
+}
+
+static const ftm_strmap_entry_t*
+ftm_get_event_type_loginfo(wl_proxd_event_type_t	event_type)
+{
+	/* look up 'event-type' from a predefined table  */
+	return ftm_get_strmap_info((int32) event_type,
+		ftm_event_type_loginfo, ARRAYSIZE(ftm_event_type_loginfo));
+}
+
+static const char *
+ftm_session_state_value_to_logstr(wl_proxd_session_state_t state)
+{
+	return ftm_map_id_to_str((int32)state, &ftm_session_state_value_loginfo[0],
+		ARRAYSIZE(ftm_session_state_value_loginfo));
+}
+
+
+#ifdef WL_CFG80211
+/*
+* send 'proxd' iovar for all ftm get-related commands
+*/
+static int
+rtt_do_get_ioctl(dhd_pub_t *dhd, wl_proxd_iov_t *p_proxd_iov, uint16 proxd_iovsize,
+		ftm_subcmd_info_t *p_subcmd_info)
+{
+
+	wl_proxd_iov_t *p_iovresp = (wl_proxd_iov_t *)resp_buf;
+	int status;
+	int tlvs_len;
+	/*  send getbuf proxd iovar */
+	status = dhd_getiovar(dhd, 0, "proxd", (char *)p_proxd_iov,
+			proxd_iovsize, (char **)&p_iovresp, WLC_IOCTL_SMLEN);
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n",
+			__FUNCTION__, p_subcmd_info->cmdid, status));
+		return status;
+	}
+	if (p_subcmd_info->cmdid == WL_PROXD_CMD_GET_VERSION) {
+		p_subcmd_info->version = ltoh16(p_iovresp->version);
+		DHD_RTT(("ftm version: 0x%x\n", ltoh16(p_iovresp->version)));
+		goto exit;
+	}
+
+	tlvs_len = ltoh16(p_iovresp->len) - WL_PROXD_IOV_HDR_SIZE;
+	if (tlvs_len < 0) {
+		DHD_ERROR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n",
+			__FUNCTION__, ltoh16(p_iovresp->len), (int) WL_PROXD_IOV_HDR_SIZE));
+		tlvs_len = 0;
+	}
+
+	if (tlvs_len > 0 && p_subcmd_info->handler) {
+		/* unpack TLVs and invokes the cbfn for processing */
+		status = bcm_unpack_xtlv_buf(p_proxd_iov, (uint8 *)p_iovresp->tlvs,
+				tlvs_len, BCM_XTLV_OPTION_ALIGN32, p_subcmd_info->handler);
+	}
+exit:
+	return status;
+}
+
+
+static wl_proxd_iov_t *
+rtt_alloc_getset_buf(wl_proxd_method_t method, wl_proxd_session_id_t session_id,
+	wl_proxd_cmd_t cmdid, uint16 tlvs_bufsize, uint16 *p_out_bufsize)
+{
+	uint16 proxd_iovsize;
+	uint16 kflags;
+	wl_proxd_tlv_t *p_tlv;
+	wl_proxd_iov_t *p_proxd_iov = (wl_proxd_iov_t *) NULL;
+
+	*p_out_bufsize = 0;	/* init */
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	/* calculate the whole buffer size, including one reserve-tlv entry in the header */
+	proxd_iovsize = sizeof(wl_proxd_iov_t) + tlvs_bufsize;
+
+	p_proxd_iov = kzalloc(proxd_iovsize, kflags);
+	if (p_proxd_iov == NULL) {
+		DHD_ERROR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize));
+		return NULL;
+	}
+
+	/* setup proxd-FTM-method iovar header */
+	p_proxd_iov->version = htol16(WL_PROXD_API_VERSION);
+	p_proxd_iov->len = htol16(proxd_iovsize); /* caller may adjust it based on #of TLVs */
+	p_proxd_iov->cmd = htol16(cmdid);
+	p_proxd_iov->method = htol16(method);
+	p_proxd_iov->sid = htol16(session_id);
+
+	/* initialize the reserved/dummy-TLV in iovar header */
+	p_tlv = p_proxd_iov->tlvs;
+	p_tlv->id = htol16(WL_PROXD_TLV_ID_NONE);
+	p_tlv->len = htol16(0);
+
+	*p_out_bufsize = proxd_iovsize;	/* for caller's reference */
+
+	return p_proxd_iov;
+}
+
+
+static int
+dhd_rtt_common_get_handler(dhd_pub_t *dhd, ftm_subcmd_info_t *p_subcmd_info,
+		wl_proxd_method_t method,
+		wl_proxd_session_id_t session_id)
+{
+	int status = BCME_OK;
+	uint16 proxd_iovsize = 0;
+	wl_proxd_iov_t *p_proxd_iov;
+#ifdef RTT_DEBUG
+	DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n",
+		__FUNCTION__, method, session_id, p_subcmd_info->cmdid,
+		ftm_cmdid_to_str(p_subcmd_info->cmdid)));
+#endif
+	/* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */
+	p_proxd_iov = rtt_alloc_getset_buf(method, session_id, p_subcmd_info->cmdid,
+		0, &proxd_iovsize);
+
+	if (p_proxd_iov == NULL)
+		return BCME_NOMEM;
+
+	status = rtt_do_get_ioctl(dhd, p_proxd_iov, proxd_iovsize, p_subcmd_info);
+
+	if (status != BCME_OK) {
+		DHD_RTT(("%s failed: status=%d\n", __FUNCTION__, status));
+	}
+	kfree(p_proxd_iov);
+	return status;
+}
+
+/*
+* common handler for set-related proxd method commands which require no TLV as input
+*   wl proxd ftm [session-id] <set-subcmd>
+* e.g.
+*   wl proxd ftm enable -- to enable ftm
+*   wl proxd ftm disable -- to disable ftm
+*   wl proxd ftm <session-id> start -- to start a specified session
+*   wl proxd ftm <session-id> stop  -- to cancel a specified session;
+*                                    state is maintained till session is delete.
+*   wl proxd ftm <session-id> delete -- to delete a specified session
+*   wl proxd ftm [<session-id>] clear-counters -- to clear counters
+*   wl proxd ftm <session-id> burst-request -- on initiator: to send burst request;
+*                                              on target: send FTM frame
+*   wl proxd ftm <session-id> collect
+*   wl proxd ftm tune     (TBD)
+*/
+static int
+dhd_rtt_common_set_handler(dhd_pub_t *dhd, const ftm_subcmd_info_t *p_subcmd_info,
+	wl_proxd_method_t method, wl_proxd_session_id_t session_id)
+{
+	uint16 proxd_iovsize;
+	wl_proxd_iov_t *p_proxd_iov;
+	int ret;
+
+#ifdef RTT_DEBUG
+	DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n",
+		__FUNCTION__, method, session_id, p_subcmd_info->cmdid,
+		ftm_cmdid_to_str(p_subcmd_info->cmdid)));
+#endif
+
+	/* allocate and initialize a temp buffer for 'set proxd' iovar */
+	proxd_iovsize = 0;
+	p_proxd_iov = rtt_alloc_getset_buf(method, session_id, p_subcmd_info->cmdid,
+							0, &proxd_iovsize);		/* no TLV */
+	if (p_proxd_iov == NULL)
+		return BCME_NOMEM;
+
+	/* no TLV to pack, simply issue a set-proxd iovar */
+	ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov, proxd_iovsize, NULL, 0, TRUE);
+#ifdef RTT_DEBUG
+	if (ret != BCME_OK) {
+		DHD_RTT(("error: IOVAR failed, status=%d\n", ret));
+	}
+#endif
+	/* clean up */
+	kfree(p_proxd_iov);
+
+	return ret;
+}
+#endif /* WL_CFG80211 */
+
+static int
+rtt_unpack_xtlv_cbfn(void *ctx, uint8 *p_data, uint16 tlvid, uint16 len)
+{
+	int ret = BCME_OK;
+	int i;
+	wl_proxd_ftm_session_status_t *p_data_info = NULL;
+	wl_proxd_collect_event_data_t *p_collect_data = NULL;
+	uint32 chan_data_entry = 0;
+
+	switch (tlvid) {
+	case WL_PROXD_TLV_ID_RTT_RESULT:
+		ret = dhd_rtt_convert_results_to_host((rtt_report_t *)ctx,
+				p_data, tlvid, len);
+		break;
+	case WL_PROXD_TLV_ID_SESSION_STATUS:
+		DHD_RTT(("WL_PROXD_TLV_ID_SESSION_STATUS\n"));
+		memcpy(ctx, p_data, sizeof(wl_proxd_ftm_session_status_t));
+		p_data_info = (wl_proxd_ftm_session_status_t *)ctx;
+		p_data_info->sid = ltoh16_ua(&p_data_info->sid);
+		p_data_info->state = ltoh16_ua(&p_data_info->state);
+		p_data_info->status = ltoh32_ua(&p_data_info->status);
+		p_data_info->burst_num = ltoh16_ua(&p_data_info->burst_num);
+		DHD_RTT(("\tsid=%u, state=%d, status=%d, burst_num=%u\n",
+			p_data_info->sid, p_data_info->state,
+			p_data_info->status, p_data_info->burst_num));
+
+		break;
+	case WL_PROXD_TLV_ID_COLLECT_DATA:
+		DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_DATA\n"));
+		memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_t));
+		p_collect_data = (wl_proxd_collect_event_data_t *)ctx;
+		DHD_RTT(("\tH_RX\n"));
+		for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+			p_collect_data->H_RX[i] = ltoh32_ua(&p_collect_data->H_RX[i]);
+			DHD_RTT(("\t%u\n", p_collect_data->H_RX[i]));
+		}
+		DHD_RTT(("\n"));
+		DHD_RTT(("\tH_LB\n"));
+		for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+			p_collect_data->H_LB[i] = ltoh32_ua(&p_collect_data->H_LB[i]);
+			DHD_RTT(("\t%u\n", p_collect_data->H_LB[i]));
+		}
+		DHD_RTT(("\n"));
+		DHD_RTT(("\tri_rr\n"));
+		for (i = 0; i < FTM_TPK_RI_RR_LEN; i++) {
+			DHD_RTT(("\t%u\n", p_collect_data->ri_rr[i]));
+		}
+		p_collect_data->phy_err_mask = ltoh32_ua(&p_collect_data->phy_err_mask);
+		DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data->phy_err_mask));
+		break;
+	case WL_PROXD_TLV_ID_COLLECT_CHAN_DATA:
+		DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_CHAN_DATA\n"));
+		DHD_RTT(("\tchan est %u\n", (uint32) (len / sizeof(uint32))));
+		for (i = 0; i < (len/sizeof(chan_data_entry)); i++) {
+			uint32 *p = (uint32*)p_data;
+			chan_data_entry = ltoh32_ua(p + i);
+			DHD_RTT(("\t%u\n", chan_data_entry));
+		}
+		break;
+	default:
+		DHD_ERROR(("> Unsupported TLV ID %d\n", tlvid));
+		ret = BCME_ERROR;
+		break;
+	}
+
+	return ret;
+}
+
+#ifdef WL_CFG80211
+static int
+rtt_handle_config_options(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv,
+	uint16 *p_buf_space_left, ftm_config_options_info_t *ftm_configs, int ftm_cfg_cnt)
+{
+	int ret = BCME_OK;
+	int cfg_idx = 0;
+	uint32 flags = WL_PROXD_FLAG_NONE;
+	uint32 flags_mask = WL_PROXD_FLAG_NONE;
+	uint32 new_mask;		/* cmdline input */
+	ftm_config_options_info_t *p_option_info;
+	uint16 type = (session_id == WL_PROXD_SESSION_ID_GLOBAL) ?
+			WL_PROXD_TLV_ID_FLAGS_MASK : WL_PROXD_TLV_ID_SESSION_FLAGS_MASK;
+	for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) {
+		p_option_info = (ftm_configs + cfg_idx);
+		if (p_option_info != NULL) {
+			new_mask = p_option_info->flags;
+			/* update flags mask */
+			flags_mask |= new_mask;
+			if (p_option_info->enable) {
+				flags |= new_mask;	/* set the bit on */
+			} else {
+				flags &= ~new_mask;	/* set the bit off */
+			}
+		}
+	}
+	flags = htol32(flags);
+	flags_mask = htol32(flags_mask);
+	/* setup flags_mask TLV */
+	ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left,
+		type, sizeof(uint32), &flags_mask, BCM_XTLV_OPTION_ALIGN32);
+	if (ret != BCME_OK) {
+		DHD_ERROR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n",
+			__FUNCTION__, ret));
+		goto exit;
+	}
+
+	type = (session_id == WL_PROXD_SESSION_ID_GLOBAL)?
+		WL_PROXD_TLV_ID_FLAGS : WL_PROXD_TLV_ID_SESSION_FLAGS;
+	/* setup flags TLV */
+	ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left,
+			type, sizeof(uint32), &flags, BCM_XTLV_OPTION_ALIGN32);
+		if (ret != BCME_OK) {
+#ifdef RTT_DEBUG
+			DHD_RTT(("%s: bcm_pack_xltv_entry() for flags failed, status=%d\n",
+				__FUNCTION__, ret));
+#endif
+		}
+exit:
+	return ret;
+}
+
+static int
+rtt_handle_config_general(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv,
+	uint16 *p_buf_space_left, ftm_config_param_info_t *ftm_configs, int ftm_cfg_cnt)
+{
+	int ret = BCME_OK;
+	int cfg_idx = 0;
+	uint32 chanspec;
+	ftm_config_param_info_t *p_config_param_info;
+	void		*p_src_data;
+	uint16	src_data_size;	/* size of data pointed by p_src_data as 'source' */
+	for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) {
+		p_config_param_info = (ftm_configs + cfg_idx);
+		if (p_config_param_info != NULL) {
+			switch (p_config_param_info->tlvid)	{
+			case WL_PROXD_TLV_ID_BSS_INDEX:
+			case WL_PROXD_TLV_ID_FTM_RETRIES:
+			case WL_PROXD_TLV_ID_FTM_REQ_RETRIES:
+				p_src_data = &p_config_param_info->data8;
+				src_data_size = sizeof(uint8);
+				break;
+			case WL_PROXD_TLV_ID_BURST_NUM_FTM: /* uint16 */
+			case WL_PROXD_TLV_ID_NUM_BURST:
+			case WL_PROXD_TLV_ID_RX_MAX_BURST:
+				p_src_data = &p_config_param_info->data16;
+				src_data_size = sizeof(uint16);
+				break;
+			case WL_PROXD_TLV_ID_TX_POWER:		/* uint32 */
+			case WL_PROXD_TLV_ID_RATESPEC:
+			case WL_PROXD_TLV_ID_EVENT_MASK: /* wl_proxd_event_mask_t/uint32 */
+			case WL_PROXD_TLV_ID_DEBUG_MASK:
+				p_src_data = &p_config_param_info->data32;
+				src_data_size = sizeof(uint32);
+				break;
+			case WL_PROXD_TLV_ID_CHANSPEC:		/* chanspec_t --> 32bit */
+				chanspec = p_config_param_info->chanspec;
+				p_src_data = (void *) &chanspec;
+				src_data_size = sizeof(uint32);
+				break;
+			case WL_PROXD_TLV_ID_BSSID: /* mac address */
+			case WL_PROXD_TLV_ID_PEER_MAC:
+				p_src_data = &p_config_param_info->mac_addr;
+				src_data_size = sizeof(struct ether_addr);
+				break;
+			case WL_PROXD_TLV_ID_BURST_DURATION:	/* wl_proxd_intvl_t */
+			case WL_PROXD_TLV_ID_BURST_PERIOD:
+			case WL_PROXD_TLV_ID_BURST_FTM_SEP:
+			case WL_PROXD_TLV_ID_BURST_TIMEOUT:
+			case WL_PROXD_TLV_ID_INIT_DELAY:
+				p_src_data = &p_config_param_info->data_intvl;
+				src_data_size = sizeof(wl_proxd_intvl_t);
+				break;
+			default:
+				ret = BCME_BADARG;
+				break;
+			}
+			if (ret != BCME_OK) {
+				DHD_ERROR(("%s bad TLV ID : %d\n",
+					__FUNCTION__, p_config_param_info->tlvid));
+				break;
+			}
+
+			ret = bcm_pack_xtlv_entry((uint8 **) p_tlv, p_buf_space_left,
+				p_config_param_info->tlvid, src_data_size, p_src_data,
+				BCM_XTLV_OPTION_ALIGN32);
+			if (ret != BCME_OK) {
+				DHD_ERROR(("%s: bcm_pack_xltv_entry() failed,"
+					" status=%d\n", __FUNCTION__, ret));
+				break;
+			}
+
+		}
+	}
+	return ret;
+}
+
+static int
+dhd_rtt_ftm_enable(dhd_pub_t *dhd, bool enable)
+{
+	ftm_subcmd_info_t subcmd_info;
+	subcmd_info.name = (enable)? "enable" : "disable";
+	subcmd_info.cmdid = (enable)? WL_PROXD_CMD_ENABLE: WL_PROXD_CMD_DISABLE;
+	subcmd_info.handler = NULL;
+	return dhd_rtt_common_set_handler(dhd, &subcmd_info,
+			WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL);
+}
+
+static int
+dhd_rtt_start_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, bool start)
+{
+	ftm_subcmd_info_t subcmd_info;
+	subcmd_info.name = (start)? "start session" : "stop session";
+	subcmd_info.cmdid = (start)? WL_PROXD_CMD_START_SESSION: WL_PROXD_CMD_STOP_SESSION;
+	subcmd_info.handler = NULL;
+	return dhd_rtt_common_set_handler(dhd, &subcmd_info,
+			WL_PROXD_METHOD_FTM, session_id);
+}
+
+static int
+dhd_rtt_delete_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id)
+{
+	ftm_subcmd_info_t subcmd_info;
+	subcmd_info.name = "delete session";
+	subcmd_info.cmdid = WL_PROXD_CMD_DELETE_SESSION;
+	subcmd_info.handler = NULL;
+	return dhd_rtt_common_set_handler(dhd, &subcmd_info,
+			WL_PROXD_METHOD_FTM, session_id);
+}
+
+static int
+dhd_rtt_ftm_config(dhd_pub_t *dhd, wl_proxd_session_id_t session_id,
+	ftm_config_category_t catagory, void *ftm_configs, int ftm_cfg_cnt)
+{
+	ftm_subcmd_info_t subcmd_info;
+	wl_proxd_tlv_t *p_tlv;
+	/* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */
+	wl_proxd_iov_t *p_proxd_iov;
+	uint16 proxd_iovsize = 0;
+	uint16 bufsize;
+	uint16 buf_space_left;
+	uint16 all_tlvsize;
+	int ret = BCME_OK;
+
+	subcmd_info.name = "config";
+	subcmd_info.cmdid = WL_PROXD_CMD_CONFIG;
+
+	p_proxd_iov = rtt_alloc_getset_buf(WL_PROXD_METHOD_FTM, session_id, subcmd_info.cmdid,
+		FTM_IOC_BUFSZ, &proxd_iovsize);
+
+	if (p_proxd_iov == NULL) {
+		DHD_ERROR(("%s : failed to allocate the iovar (size :%d)\n",
+			__FUNCTION__, FTM_IOC_BUFSZ));
+		return BCME_NOMEM;
+	}
+	/* setup TLVs */
+	bufsize = proxd_iovsize - WL_PROXD_IOV_HDR_SIZE; /* adjust available size for TLVs */
+	p_tlv = &p_proxd_iov->tlvs[0];
+	/* TLV buffer starts with a full size, will decrement for each packed TLV */
+	buf_space_left = bufsize;
+	if (catagory == FTM_CONFIG_CAT_OPTIONS) {
+		ret = rtt_handle_config_options(session_id, &p_tlv, &buf_space_left,
+				(ftm_config_options_info_t *)ftm_configs, ftm_cfg_cnt);
+	} else if (catagory == FTM_CONFIG_CAT_GENERAL) {
+		ret = rtt_handle_config_general(session_id, &p_tlv, &buf_space_left,
+				(ftm_config_param_info_t *)ftm_configs, ftm_cfg_cnt);
+	}
+	if (ret == BCME_OK) {
+		/* update the iov header, set len to include all TLVs + header */
+		all_tlvsize = (bufsize - buf_space_left);
+		p_proxd_iov->len = htol16(all_tlvsize + WL_PROXD_IOV_HDR_SIZE);
+		ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov,
+				all_tlvsize + WL_PROXD_IOV_HDR_SIZE, NULL, 0, TRUE);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("%s : failed to set config\n", __FUNCTION__));
+		}
+	}
+	/* clean up */
+	kfree(p_proxd_iov);
+	return ret;
+}
+
+static int
+dhd_rtt_get_version(dhd_pub_t *dhd, int *out_version)
+{
+	int ret;
+	ftm_subcmd_info_t subcmd_info;
+	subcmd_info.name = "ver";
+	subcmd_info.cmdid = WL_PROXD_CMD_GET_VERSION;
+	subcmd_info.handler = NULL;
+	ret = dhd_rtt_common_get_handler(dhd, &subcmd_info,
+			WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL);
+	*out_version = (ret == BCME_OK) ? subcmd_info.version : 0;
+	return ret;
+}
+#endif /* WL_CFG80211 */
+
+chanspec_t
+dhd_rtt_convert_to_chspec(wifi_channel_info_t channel)
+{
+	int bw;
+	chanspec_t chanspec = 0;
+	uint8 center_chan;
+	uint8 primary_chan;
+	/* set witdh to 20MHZ for 2.4G HZ */
+	if (channel.center_freq >= 2400 && channel.center_freq <= 2500) {
+		channel.width = WIFI_CHAN_WIDTH_20;
+	}
+	switch (channel.width) {
+	case WIFI_CHAN_WIDTH_20:
+		bw = WL_CHANSPEC_BW_20;
+		primary_chan = wf_mhz2channel(channel.center_freq, 0);
+		chanspec = wf_channel2chspec(primary_chan, bw);
+		break;
+	case WIFI_CHAN_WIDTH_40:
+		bw = WL_CHANSPEC_BW_40;
+		primary_chan = wf_mhz2channel(channel.center_freq, 0);
+		chanspec = wf_channel2chspec(primary_chan, bw);
+		break;
+	case WIFI_CHAN_WIDTH_80:
+		bw = WL_CHANSPEC_BW_80;
+		primary_chan = wf_mhz2channel(channel.center_freq, 0);
+		center_chan = wf_mhz2channel(channel.center_freq0, 0);
+		chanspec = wf_chspec_80(center_chan, primary_chan);
+		break;
+	default:
+		DHD_ERROR(("doesn't support this bandwith : %d", channel.width));
+		bw = -1;
+		break;
+	}
+	return chanspec;
+}
+
+int
+dhd_rtt_idx_to_burst_duration(uint idx)
+{
+	if (idx >= ARRAY_SIZE(burst_duration_idx)) {
+		return -1;
+	}
+	return burst_duration_idx[idx];
+}
+
+int
+dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params)
+{
+	int err = BCME_OK;
+	int idx;
+	rtt_status_info_t *rtt_status;
+	NULL_CHECK(params, "params is NULL", err);
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	if (!HAS_11MC_CAP(rtt_status->rtt_capa.proto)) {
+		DHD_ERROR(("doesn't support RTT \n"));
+		return BCME_ERROR;
+	}
+	if (rtt_status->status != RTT_STOPPED) {
+		DHD_ERROR(("rtt is already started\n"));
+		return BCME_BUSY;
+	}
+	DHD_RTT(("%s enter\n", __FUNCTION__));
+
+	memset(rtt_status->rtt_config.target_info, 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+	rtt_status->rtt_config.rtt_target_cnt = params->rtt_target_cnt;
+	memcpy(rtt_status->rtt_config.target_info,
+		params->target_info, TARGET_INFO_SIZE(params->rtt_target_cnt));
+	rtt_status->status = RTT_STARTED;
+	/* start to measure RTT from first device */
+	/* find next target to trigger RTT */
+	for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+		/* skip the disabled device */
+		if (rtt_status->rtt_config.target_info[idx].disable) {
+			continue;
+		} else {
+			/* set the idx to cur_idx */
+			rtt_status->cur_idx = idx;
+			break;
+		}
+	}
+	if (idx < rtt_status->rtt_config.rtt_target_cnt) {
+		DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx));
+		schedule_work(&rtt_status->work);
+	}
+	return err;
+}
+
+int
+dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt)
+{
+	int err = BCME_OK;
+#ifdef WL_CFG8011
+	int i = 0, j = 0;
+	rtt_status_info_t *rtt_status;
+	rtt_results_header_t *entry, *next;
+	rtt_result_t *rtt_result, *next2;
+	struct rtt_noti_callback *iter;
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	if (rtt_status->status == RTT_STOPPED) {
+		DHD_ERROR(("rtt is not started\n"));
+		return BCME_OK;
+	}
+	DHD_RTT(("%s enter\n", __FUNCTION__));
+	mutex_lock(&rtt_status->rtt_mutex);
+	for (i = 0; i < mac_cnt; i++) {
+		for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) {
+			if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr,
+				ETHER_ADDR_LEN)) {
+				rtt_status->rtt_config.target_info[j].disable = TRUE;
+			}
+		}
+	}
+	if (rtt_status->all_cancel) {
+		/* cancel all of request */
+		rtt_status->status = RTT_STOPPED;
+		DHD_RTT(("current RTT process is cancelled\n"));
+		/* remove the rtt results in cache */
+		if (!list_empty(&rtt_status->rtt_results_cache)) {
+			/* Iterate rtt_results_header list */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry_safe(entry, next,
+				&rtt_status->rtt_results_cache, list) {
+				list_del(&entry->list);
+				/* Iterate rtt_result list */
+				list_for_each_entry_safe(rtt_result, next2,
+					&entry->result_list, list) {
+					list_del(&rtt_result->list);
+					kfree(rtt_result);
+				}
+				kfree(entry);
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		}
+		/* send the rtt complete event to wake up the user process */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+			iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		/* reinitialize the HEAD */
+		INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+		/* clear information for rtt_config */
+		rtt_status->rtt_config.rtt_target_cnt = 0;
+		memset(rtt_status->rtt_config.target_info, 0,
+			TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+		rtt_status->cur_idx = 0;
+		dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+		dhd_rtt_ftm_enable(dhd, FALSE);
+	}
+	mutex_unlock(&rtt_status->rtt_mutex);
+#endif /* WL_CFG80211 */
+	return err;
+}
+
+
+#ifdef WL_CFG80211
+static int
+dhd_rtt_start(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	char chanbuf[CHANSPEC_STR_LEN];
+	int ftm_cfg_cnt = 0;
+	int ftm_param_cnt = 0;
+	uint32 rspec = 0;
+	ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS];
+	ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
+	rtt_target_info_t *rtt_target;
+	rtt_status_info_t *rtt_status;
+	int pm = PM_OFF;
+	struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+
+	DHD_RTT(("Enter %s\n", __FUNCTION__));
+	if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) {
+		err = BCME_RANGE;
+		DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx));
+		if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+			DHD_ERROR(("STA is set as Target/Responder \n"));
+			return BCME_ERROR;
+		}
+		goto exit;
+	}
+	if (RTT_IS_STOPPED(rtt_status)) {
+		DHD_RTT(("RTT is stopped\n"));
+		goto exit;
+	}
+	err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
+	if (err) {
+		DHD_ERROR(("Failed to get the PM value\n"));
+	} else {
+		err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+		if (err) {
+			DHD_ERROR(("Failed to set the PM\n"));
+			rtt_status->pm_restore = FALSE;
+		} else {
+			rtt_status->pm_restore = TRUE;
+		}
+	}
+
+	mutex_lock(&rtt_status->rtt_mutex);
+	/* Get a target information */
+	rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+	mutex_unlock(&rtt_status->rtt_mutex);
+	DHD_RTT(("%s enter\n", __FUNCTION__));
+	if (!RTT_IS_ENABLED(rtt_status)) {
+		/* enable ftm */
+		err = dhd_rtt_ftm_enable(dhd, TRUE);
+		if (err) {
+			DHD_ERROR(("failed to enable FTM (%d)\n", err));
+			goto exit;
+		}
+	}
+
+	/* delete session of index default sesession  */
+	err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
+	if (err < 0 && err != BCME_NOTFOUND) {
+		DHD_ERROR(("failed to delete session of FTM (%d)\n", err));
+		goto exit;
+	}
+	rtt_status->status = RTT_ENABLED;
+	memset(ftm_configs, 0, sizeof(ftm_configs));
+	memset(ftm_params, 0, sizeof(ftm_params));
+
+	/* configure the session 1 as initiator */
+	ftm_configs[ftm_cfg_cnt].enable = TRUE;
+	ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_INITIATOR;
+	dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS,
+		ftm_configs, ftm_cfg_cnt);
+	/* target's mac address */
+	if (!ETHER_ISNULLADDR(rtt_target->addr.octet)) {
+		ftm_params[ftm_param_cnt].mac_addr = rtt_target->addr;
+		ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_PEER_MAC;
+		bcm_ether_ntoa(&rtt_target->addr, eabuf);
+		DHD_RTT((">\t target %s\n", eabuf));
+	}
+	/* target's chanspec */
+	if (rtt_target->chanspec) {
+		ftm_params[ftm_param_cnt].chanspec = htol32((uint32)rtt_target->chanspec);
+		ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CHANSPEC;
+		wf_chspec_ntoa(rtt_target->chanspec, chanbuf);
+		DHD_RTT((">\t chanspec : %s\n", chanbuf));
+	}
+	/* num-burst */
+	if (rtt_target->num_burst) {
+		ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_burst);
+		ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_NUM_BURST;
+		DHD_RTT((">\t num of burst : %d\n", rtt_target->num_burst));
+	}
+	/* number of frame per burst */
+	if (rtt_target->num_frames_per_burst == 0) {
+		rtt_target->num_frames_per_burst =
+			CHSPEC_IS20(rtt_target->chanspec) ? FTM_DEFAULT_CNT_20M :
+			CHSPEC_IS40(rtt_target->chanspec) ? FTM_DEFAULT_CNT_40M :
+			FTM_DEFAULT_CNT_80M;
+	}
+	ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_frames_per_burst);
+	ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_NUM_FTM;
+	DHD_RTT((">\t number of frame per burst : %d\n", rtt_target->num_frames_per_burst));
+	/* FTM retry count */
+	if (rtt_target->num_retries_per_ftm) {
+		ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftm;
+		ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_FTM_RETRIES;
+		DHD_RTT((">\t retry count of FTM  : %d\n", rtt_target->num_retries_per_ftm));
+	}
+	/* FTM Request retry count */
+	if (rtt_target->num_retries_per_ftmr) {
+		ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftmr;
+		ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_FTM_REQ_RETRIES;
+		DHD_RTT((">\t retry count of FTM Req : %d\n", rtt_target->num_retries_per_ftmr));
+	}
+	/* burst-period */
+	if (rtt_target->burst_period) {
+		ftm_params[ftm_param_cnt].data_intvl.intvl =
+			htol32(rtt_target->burst_period); /* ms */
+		ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC;
+		ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_PERIOD;
+		DHD_RTT((">\t burst period : %d ms\n", rtt_target->burst_period));
+	}
+	/* burst-duration */
+	if (rtt_target->burst_duration) {
+		ftm_params[ftm_param_cnt].data_intvl.intvl =
+			htol32(rtt_target->burst_duration); /* ms */
+		ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC;
+		ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_DURATION;
+		DHD_RTT((">\t burst duration : %d ms\n",
+			rtt_target->burst_duration));
+	}
+	if (rtt_target->bw && rtt_target->preamble) {
+		bool use_default = FALSE;
+		int nss;
+		int mcs;
+		switch (rtt_target->preamble) {
+		case RTT_PREAMBLE_LEGACY:
+			rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */
+			rspec |= WL_RATE_6M;
+			break;
+		case RTT_PREAMBLE_HT:
+			rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */
+			mcs = 0; /* default MCS 0 */
+			rspec |= mcs;
+			break;
+		case RTT_PREAMBLE_VHT:
+			rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */
+			mcs = 0; /* default MCS 0 */
+			nss = 1; /* default Nss = 1  */
+			rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
+			break;
+		default:
+			DHD_RTT(("doesn't support this preamble : %d\n", rtt_target->preamble));
+			use_default = TRUE;
+			break;
+		}
+		switch (rtt_target->bw) {
+		case RTT_BW_20:
+			rspec |= WL_RSPEC_BW_20MHZ;
+			break;
+		case RTT_BW_40:
+			rspec |= WL_RSPEC_BW_40MHZ;
+			break;
+		case RTT_BW_80:
+			rspec |= WL_RSPEC_BW_80MHZ;
+			break;
+		default:
+			DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw));
+			use_default = TRUE;
+			break;
+		}
+		if (!use_default) {
+			ftm_params[ftm_param_cnt].data32 = htol32(rspec);
+			ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_RATESPEC;
+			DHD_RTT((">\t ratespec : %d\n", rspec));
+		}
+
+	}
+	dhd_set_rand_mac_oui(dhd);
+	dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_GENERAL,
+		ftm_params, ftm_param_cnt);
+
+	err = dhd_rtt_start_session(dhd, FTM_DEFAULT_SESSION, TRUE);
+	if (err) {
+		DHD_ERROR(("failed to start session of FTM : error %d\n", err));
+	}
+exit:
+	if (err) {
+		DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__));
+		rtt_status->status = RTT_STOPPED;
+		/* disable FTM */
+		dhd_rtt_ftm_enable(dhd, FALSE);
+		if (rtt_status->pm_restore) {
+			DHD_ERROR(("pm_restore =%d func =%s \n",
+				rtt_status->pm_restore, __FUNCTION__));
+			pm = PM_FAST;
+			err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+			if (err) {
+				DHD_ERROR(("Failed to set PM \n"));
+			} else {
+				rtt_status->pm_restore = FALSE;
+			}
+		}
+	}
+	return err;
+}
+#endif /* WL_CFG80211 */
+
+int
+dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
+{
+	int err = BCME_OK;
+	struct rtt_noti_callback *cb = NULL, *iter;
+	rtt_status_info_t *rtt_status;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(noti_fn, "noti_fn is NULL", err);
+
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	spin_lock_bh(&noti_list_lock);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+		if (iter->noti_fn == noti_fn) {
+			goto exit;
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC);
+	if (!cb) {
+		err = -ENOMEM;
+		goto exit;
+	}
+	cb->noti_fn = noti_fn;
+	cb->ctx = ctx;
+	list_add(&cb->list, &rtt_status->noti_fn_list);
+exit:
+	spin_unlock_bh(&noti_list_lock);
+	return err;
+}
+
+int
+dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn)
+{
+	int err = BCME_OK;
+	struct rtt_noti_callback *cb = NULL, *iter;
+	rtt_status_info_t *rtt_status;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(noti_fn, "noti_fn is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	spin_lock_bh(&noti_list_lock);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+		if (iter->noti_fn == noti_fn) {
+			cb = iter;
+			list_del(&cb->list);
+			break;
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	spin_unlock_bh(&noti_list_lock);
+	if (cb) {
+		kfree(cb);
+	}
+	return err;
+}
+
+static wifi_rate_t
+dhd_rtt_convert_rate_to_host(uint32 rspec)
+{
+	wifi_rate_t host_rate;
+	memset(&host_rate, 0, sizeof(wifi_rate_t));
+	if ((rspec & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE) {
+		host_rate.preamble = 0;
+	} else if ((rspec & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) {
+		host_rate.preamble = 2;
+		host_rate.rateMcsIdx = rspec & WL_RSPEC_RATE_MASK;
+	} else if ((rspec & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT) {
+		host_rate.preamble = 3;
+		host_rate.rateMcsIdx = rspec & WL_RSPEC_VHT_MCS_MASK;
+		host_rate.nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT;
+	}
+	host_rate.bw = (rspec & WL_RSPEC_BW_MASK) - 1;
+	host_rate.bitrate = rate_rspec2rate(rspec) / 100; /* 100kbps */
+	DHD_RTT(("bit rate : %d\n", host_rate.bitrate));
+	return host_rate;
+}
+
+
+static int
+dhd_rtt_convert_results_to_host(rtt_report_t *rtt_report, uint8 *p_data, uint16 tlvid, uint16 len)
+{
+	int err = BCME_OK;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	wl_proxd_rtt_result_t *p_data_info;
+	wl_proxd_result_flags_t flags;
+	wl_proxd_session_state_t session_state;
+	wl_proxd_status_t proxd_status;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+	struct timespec64 ts;
+#endif /* LINUX_VER >= 2.6.39 */
+	uint32 ratespec;
+	uint32 avg_dist;
+	wl_proxd_rtt_sample_t *p_sample;
+	wl_proxd_intvl_t rtt;
+	wl_proxd_intvl_t p_time;
+
+	NULL_CHECK(rtt_report, "rtt_report is NULL", err);
+	NULL_CHECK(p_data, "p_data is NULL", err);
+	DHD_RTT(("%s enter\n", __FUNCTION__));
+	p_data_info = (wl_proxd_rtt_result_t *) p_data;
+	/* unpack and format 'flags' for display */
+	flags = ltoh16_ua(&p_data_info->flags);
+
+	/* session state and status */
+	session_state = ltoh16_ua(&p_data_info->state);
+	proxd_status = ltoh32_ua(&p_data_info->status);
+	bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
+	ftm_session_state_value_to_logstr(session_state);
+	ftm_status_value_to_logstr(proxd_status);
+	DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
+		eabuf,
+		session_state,
+		ftm_session_state_value_to_logstr(session_state),
+		proxd_status,
+		ftm_status_value_to_logstr(proxd_status)));
+
+	/* show avg_dist (1/256m units), burst_num */
+	avg_dist = ltoh32_ua(&p_data_info->avg_dist);
+	if (avg_dist == 0xffffffff) {	/* report 'failure' case */
+		DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n",
+		ltoh16_ua(&p_data_info->burst_num),
+		p_data_info->num_valid_rtt)); /* in a session */
+		avg_dist = FTM_INVALID;
+	}
+	else {
+		DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n",
+			avg_dist >> 8, /* 1/256m units */
+			((avg_dist & 0xff) * 625) >> 4,
+			ltoh16_ua(&p_data_info->burst_num),
+			p_data_info->num_valid_rtt,
+			p_data_info->num_ftm)); /* in a session */
+	}
+	/* show 'avg_rtt' sample */
+	p_sample = &p_data_info->avg_rtt;
+	ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu));
+	DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d ratespec=0x%08x\n",
+		(int16) ltoh16_ua(&p_sample->rssi),
+		ltoh32_ua(&p_sample->rtt.intvl),
+		ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)),
+		ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10,
+		ltoh32_ua(&p_sample->ratespec)));
+
+	/* set peer address */
+	rtt_report->addr = p_data_info->peer;
+	/* burst num */
+	rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num);
+	/* success num */
+	rtt_report->success_num = p_data_info->num_valid_rtt;
+	/* actual number of FTM supported by peer */
+	rtt_report->num_per_burst_peer = p_data_info->num_ftm;
+	rtt_report->negotiated_burst_num = p_data_info->num_ftm;
+	/* status */
+	rtt_report->status = ftm_get_statusmap_info(proxd_status,
+			&ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info));
+
+	/* rssi (0.5db) */
+	rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_data_info->avg_rtt.rssi)) * 2;
+
+	/* rx rate */
+	ratespec = ltoh32_ua(&p_data_info->avg_rtt.ratespec);
+	rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec);
+	/* tx rate */
+	if (flags & WL_PROXD_RESULT_FLAG_VHTACK) {
+		rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010);
+	} else {
+		rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc);
+	}
+	/* rtt_sd */
+	rtt.tmu = ltoh16_ua(&p_data_info->avg_rtt.rtt.tmu);
+	rtt.intvl = ltoh32_ua(&p_data_info->avg_rtt.rtt.intvl);
+	rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */
+	rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */
+	DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt));
+	DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi));
+
+	/* average distance */
+	if (avg_dist != FTM_INVALID) {
+		rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */
+		rtt_report->distance += (avg_dist & 0xff) * 1000 / 256;
+	} else {
+		rtt_report->distance = FTM_INVALID;
+	}
+	/* time stamp */
+	/* get the time elapsed from boot time */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+	ktime_get_boottime_ts64(&ts);
+	rtt_report->ts = (uint64)TIMESPEC_TO_US(ts);
+#endif /* LINUX_VER >= 2.6.39 */
+
+	if (proxd_status == WL_PROXD_E_REMOTE_FAIL) {
+		/* retry time  after failure */
+		p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+		p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+		rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */
+		DHD_RTT((">\tretry_after: %d%s\n",
+			ltoh32_ua(&p_data_info->u.retry_after.intvl),
+			ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu))));
+	} else {
+		/* burst duration */
+		p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+		p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+		rtt_report->burst_duration =  FTM_INTVL2MSEC(&p_time); /* s -> ms */
+		DHD_RTT((">\tburst_duration: %d%s\n",
+			ltoh32_ua(&p_data_info->u.burst_duration.intvl),
+			ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu))));
+		DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration));
+	}
+	return err;
+}
+
+int
+dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+	int ret = BCME_OK;
+	int tlvs_len;
+	uint16 version;
+	wl_proxd_event_t *p_event;
+	wl_proxd_event_type_t event_type;
+	wl_proxd_ftm_session_status_t session_status;
+	wl_proxd_collect_event_data_t *collect_event_data;
+	const ftm_strmap_entry_t *p_loginfo;
+	rtt_result_t *rtt_result;
+	gfp_t kflags;
+#ifdef WL_CFG80211
+	int idx;
+	struct rtt_noti_callback *iter;
+	bool is_new = TRUE;
+	rtt_status_info_t *rtt_status;
+	rtt_result_t *next2;
+	rtt_results_header_t *next = NULL;
+	rtt_target_info_t *rtt_target_info;
+	rtt_results_header_t *entry, *rtt_results_header = NULL;
+#endif /* WL_CFG80211 */
+
+	DHD_RTT(("Enter %s \n", __FUNCTION__));
+	NULL_CHECK(dhd, "dhd is NULL", ret);
+
+#ifdef WL_CFG80211
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
+
+	if (RTT_IS_STOPPED(rtt_status)) {
+		/* Ignore the Proxd event */
+		DHD_RTT((" event handler rtt is stopped \n"));
+		if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+			DHD_RTT(("Device is target/Responder. Recv the event. \n"));
+		} else {
+			return ret;
+		}
+	}
+#endif /* WL_CFG80211 */
+	if (ntoh32_ua((void *)&event->datalen) < OFFSETOF(wl_proxd_event_t, tlvs)) {
+		DHD_RTT(("%s: wrong datalen:%d\n", __FUNCTION__,
+			ntoh32_ua((void *)&event->datalen)));
+		return -EINVAL;
+	}
+	event_type = ntoh32_ua((void *)&event->event_type);
+	if (event_type != WLC_E_PROXD) {
+		DHD_ERROR((" failed event \n"));
+		return -EINVAL;
+	}
+
+	if (!event_data) {
+		DHD_ERROR(("%s: event_data:NULL\n", __FUNCTION__));
+		return -EINVAL;
+	}
+	p_event = (wl_proxd_event_t *) event_data;
+	version = ltoh16(p_event->version);
+	if (version < WL_PROXD_API_VERSION) {
+		DHD_ERROR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n",
+			version, WL_PROXD_API_VERSION));
+		return ret;
+	}
+#ifdef WL_CFG80211
+	if (!in_atomic()) {
+		mutex_lock(&rtt_status->rtt_mutex);
+	}
+#endif /* WL_CFG80211 */
+	event_type = (wl_proxd_event_type_t) ltoh16(p_event->type);
+
+	kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL;
+
+	DHD_RTT(("event_type=0x%x, ntoh16()=0x%x, ltoh16()=0x%x\n",
+		p_event->type, ntoh16(p_event->type), ltoh16(p_event->type)));
+	p_loginfo = ftm_get_event_type_loginfo(event_type);
+	if (p_loginfo == NULL) {
+		DHD_ERROR(("receive an invalid FTM event %d\n", event_type));
+		ret = -EINVAL;
+		goto exit;	/* ignore this event */
+	}
+	/* get TLVs len, skip over event header */
+	if (ltoh16(p_event->len) < OFFSETOF(wl_proxd_event_t, tlvs)) {
+		DHD_ERROR(("invalid FTM event length:%d\n", ltoh16(p_event->len)));
+		ret = -EINVAL;
+		goto exit;
+	}
+	tlvs_len = ltoh16(p_event->len) - OFFSETOF(wl_proxd_event_t, tlvs);
+	DHD_RTT(("receive '%s' event: version=0x%x len=%d method=%d sid=%d tlvs_len=%d\n",
+		p_loginfo->text,
+		version,
+		ltoh16(p_event->len),
+		ltoh16(p_event->method),
+		ltoh16(p_event->sid),
+		tlvs_len));
+#ifdef WL_CFG80211
+	rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	/* find a rtt_report_header for this mac address */
+	list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
+		 if (!memcmp(&entry->peer_mac, &event->addr, ETHER_ADDR_LEN))  {
+			/* found a rtt_report_header for peer_mac in the list */
+			is_new = FALSE;
+			rtt_results_header = entry;
+			break;
+		 }
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#endif /* WL_CFG80211 */
+	switch (event_type) {
+	case WL_PROXD_EVENT_SESSION_CREATE:
+		DHD_RTT(("WL_PROXD_EVENT_SESSION_CREATE\n"));
+		break;
+	case WL_PROXD_EVENT_SESSION_START:
+		DHD_RTT(("WL_PROXD_EVENT_SESSION_START\n"));
+		break;
+	case WL_PROXD_EVENT_BURST_START:
+		DHD_RTT(("WL_PROXD_EVENT_BURST_START\n"));
+		break;
+	case WL_PROXD_EVENT_BURST_END:
+		DHD_RTT(("WL_PROXD_EVENT_BURST_END\n"));
+#ifdef WL_CFG80211
+		if (is_new) {
+			/* allocate new header for rtt_results */
+			rtt_results_header = kzalloc(sizeof(rtt_results_header_t), kflags);
+			if (!rtt_results_header) {
+				ret = -ENOMEM;
+				goto exit;
+			}
+			/* Initialize the head of list for rtt result */
+			INIT_LIST_HEAD(&rtt_results_header->result_list);
+			rtt_results_header->peer_mac = event->addr;
+			list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+		}
+#endif /* WL_CFG80211 */
+		if (tlvs_len > 0) {
+			/* allocate rtt_results for new results */
+			rtt_result = kzalloc(sizeof(rtt_result_t), kflags);
+			if (!rtt_result) {
+				ret = -ENOMEM;
+				goto exit;
+			}
+			/* unpack TLVs and invokes the cbfn to print the event content TLVs */
+			ret = bcm_unpack_xtlv_buf((void *) &(rtt_result->report),
+				(uint8 *)&p_event->tlvs[0], tlvs_len,
+				BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
+			if (ret != BCME_OK) {
+				DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
+					__FUNCTION__));
+				goto exit;
+			}
+#ifdef WL_CFG80211
+			/* fill out the results from the configuration param */
+			rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
+			rtt_result->report.type = RTT_TWO_WAY;
+			DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+			rtt_result->report_len = RTT_REPORT_SIZE;
+
+			list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+			rtt_results_header->result_cnt++;
+			rtt_results_header->result_tot_len += rtt_result->report_len;
+#endif /* WL_CFG80211 */
+		}
+		break;
+	case WL_PROXD_EVENT_SESSION_END:
+			DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n"));
+#ifdef WL_CFG80211
+		if (!RTT_IS_ENABLED(rtt_status)) {
+			DHD_RTT(("Ignore the session end evt\n"));
+			goto exit;
+		}
+#endif /* WL_CFG80211 */
+		if (tlvs_len > 0) {
+			/* unpack TLVs and invokes the cbfn to print the event content TLVs */
+			ret = bcm_unpack_xtlv_buf((void *) &session_status,
+				(uint8 *)&p_event->tlvs[0], tlvs_len,
+				BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
+			if (ret != BCME_OK) {
+				DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
+					__FUNCTION__));
+				goto exit;
+			}
+		}
+#ifdef WL_CFG80211
+		/* In case of no result for the peer device, make fake result for error case */
+		if (is_new) {
+			/* allocate new header for rtt_results */
+			rtt_results_header = kzalloc(sizeof(rtt_results_header_t), GFP_KERNEL);
+			if (!rtt_results_header) {
+				ret = -ENOMEM;
+				goto exit;
+			}
+			/* Initialize the head of list for rtt result */
+			INIT_LIST_HEAD(&rtt_results_header->result_list);
+			rtt_results_header->peer_mac = event->addr;
+			list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+
+			/* allocate rtt_results for new results */
+			rtt_result = kzalloc(sizeof(rtt_result_t), kflags);
+			if (!rtt_result) {
+				ret = -ENOMEM;
+				kfree(rtt_results_header);
+				goto exit;
+			}
+			/* fill out the results from the configuration param */
+			rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
+			rtt_result->report.type = RTT_TWO_WAY;
+			DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+			rtt_result->report_len = RTT_REPORT_SIZE;
+			rtt_result->report.status = RTT_REASON_FAIL_NO_RSP;
+			rtt_result->report.addr = rtt_target_info->addr;
+			rtt_result->report.distance = FTM_INVALID;
+			list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+			rtt_results_header->result_cnt++;
+			rtt_results_header->result_tot_len += rtt_result->report_len;
+		}
+		/* find next target to trigger RTT */
+		for (idx = (rtt_status->cur_idx + 1);
+			idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+			/* skip the disabled device */
+			if (rtt_status->rtt_config.target_info[idx].disable) {
+				continue;
+			} else {
+				/* set the idx to cur_idx */
+				rtt_status->cur_idx = idx;
+				break;
+			}
+		}
+		if (idx < rtt_status->rtt_config.rtt_target_cnt) {
+			/* restart to measure RTT from next device */
+			DHD_ERROR(("restart to measure rtt\n"));
+			schedule_work(&rtt_status->work);
+		} else {
+			DHD_RTT(("RTT_STOPPED\n"));
+			rtt_status->status = RTT_STOPPED;
+			/* to turn on mpc mode */
+			schedule_work(&rtt_status->work);
+			/* notify the completed information to others */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+				iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+			}
+			/* remove the rtt results in cache */
+			if (!list_empty(&rtt_status->rtt_results_cache)) {
+				/* Iterate rtt_results_header list */
+				list_for_each_entry_safe(entry, next,
+					&rtt_status->rtt_results_cache, list) {
+					list_del(&entry->list);
+					/* Iterate rtt_result list */
+					list_for_each_entry_safe(rtt_result, next2,
+						&entry->result_list, list) {
+						list_del(&rtt_result->list);
+						kfree(rtt_result);
+					}
+					kfree(entry);
+				}
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+			/* reinitialize the HEAD */
+			INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+			/* clear information for rtt_config */
+			rtt_status->rtt_config.rtt_target_cnt = 0;
+			memset(rtt_status->rtt_config.target_info, 0,
+				TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+			rtt_status->cur_idx = 0;
+		}
+#endif /* WL_CFG80211 */
+		break;
+	case WL_PROXD_EVENT_SESSION_RESTART:
+		DHD_RTT(("WL_PROXD_EVENT_SESSION_RESTART\n"));
+		break;
+	case WL_PROXD_EVENT_BURST_RESCHED:
+		DHD_RTT(("WL_PROXD_EVENT_BURST_RESCHED\n"));
+		break;
+	case WL_PROXD_EVENT_SESSION_DESTROY:
+		DHD_RTT(("WL_PROXD_EVENT_SESSION_DESTROY\n"));
+		break;
+	case WL_PROXD_EVENT_FTM_FRAME:
+		DHD_RTT(("WL_PROXD_EVENT_FTM_FRAME\n"));
+		break;
+	case WL_PROXD_EVENT_DELAY:
+		DHD_RTT(("WL_PROXD_EVENT_DELAY\n"));
+		break;
+	case WL_PROXD_EVENT_VS_INITIATOR_RPT:
+		DHD_RTT(("WL_PROXD_EVENT_VS_INITIATOR_RPT\n "));
+		break;
+	case WL_PROXD_EVENT_RANGING:
+		DHD_RTT(("WL_PROXD_EVENT_RANGING\n"));
+		break;
+	case WL_PROXD_EVENT_COLLECT:
+		DHD_RTT(("WL_PROXD_EVENT_COLLECT\n"));
+		if (tlvs_len > 0) {
+			collect_event_data = kzalloc(sizeof(wl_proxd_collect_event_data_t), kflags);
+			if (!collect_event_data) {
+				ret = -ENOMEM;
+				goto exit;
+			}
+			/* unpack TLVs and invokes the cbfn to print the event content TLVs */
+			ret = bcm_unpack_xtlv_buf((void *) collect_event_data,
+				(uint8 *)&p_event->tlvs[0], tlvs_len,
+				BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn);
+			kfree(collect_event_data);
+			if (ret != BCME_OK) {
+				DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
+					__FUNCTION__));
+				goto exit;
+			}
+		}
+		break;
+
+
+	default:
+		DHD_ERROR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type));
+		break;
+	}
+exit:
+#ifdef WL_CFG80211
+	if (!in_atomic()) {
+		mutex_unlock(&rtt_status->rtt_mutex);
+	}
+#endif /* WL_CFG80211 */
+
+	return ret;
+}
+
+#ifdef WL_CFG80211
+static void
+dhd_rtt_work(struct work_struct *work)
+{
+	rtt_status_info_t *rtt_status;
+	dhd_pub_t *dhd;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	rtt_status = container_of(work, rtt_status_info_t, work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	if (rtt_status == NULL) {
+		DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__));
+		return;
+	}
+	dhd = rtt_status->dhd;
+	if (dhd == NULL) {
+		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+	(void) dhd_rtt_start(dhd);
+}
+#endif /* WL_CFG80211 */
+
+int
+dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa)
+{
+	rtt_status_info_t *rtt_status;
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	NULL_CHECK(capa, "capa is NULL", err);
+	bzero(capa, sizeof(rtt_capabilities_t));
+
+	/* set rtt capabilities */
+	if (rtt_status->rtt_capa.proto & RTT_CAP_ONE_WAY)
+		capa->rtt_one_sided_supported = 1;
+	if (rtt_status->rtt_capa.proto & RTT_CAP_FTM_WAY)
+		capa->rtt_ftm_supported = 1;
+
+	if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCI)
+		capa->lci_support = 1;
+	if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCR)
+		capa->lcr_support = 1;
+	if (rtt_status->rtt_capa.feature & RTT_FEATURE_PREAMBLE)
+		capa->preamble_support = 1;
+	if (rtt_status->rtt_capa.feature & RTT_FEATURE_BW)
+		capa->bw_support = 1;
+
+	/* bit mask */
+	capa->preamble_support = rtt_status->rtt_capa.preamble;
+	capa->bw_support = rtt_status->rtt_capa.bw;
+
+	return err;
+}
+
+#ifdef WL_CFG80211
+int
+dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info)
+{
+	u32 chanspec = 0;
+	int err = BCME_OK;
+	chanspec_t c = 0;
+	u32 channel;
+	struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+
+	if ((err = wldev_iovar_getint(dev, "chanspec",
+		(s32 *)&chanspec)) == BCME_OK) {
+		c = (chanspec_t)dtoh32(chanspec);
+		c = wl_chspec_driver_to_host(c);
+		channel  = wf_chspec_ctlchan(c);
+		DHD_RTT((" control channel is %d \n", channel));
+		if (CHSPEC_IS20(c)) {
+			channel_info->width = WIFI_CHAN_WIDTH_20;
+			DHD_RTT((" band is 20 \n"));
+		} else if (CHSPEC_IS40(c)) {
+			channel_info->width = WIFI_CHAN_WIDTH_40;
+			DHD_RTT(("band is 40 \n"));
+		} else {
+			channel_info->width = WIFI_CHAN_WIDTH_80;
+			DHD_RTT(("band is 80 \n"));
+		}
+		if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) &&
+			(channel <= CH_MAX_2G_CHANNEL)) {
+			channel_info->center_freq =
+				ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+		} else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) {
+			channel_info->center_freq =
+				ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+		}
+		if ((channel_info->width == WIFI_CHAN_WIDTH_80) ||
+			(channel_info->width == WIFI_CHAN_WIDTH_40)) {
+			channel = CHSPEC_CHANNEL(c);
+			channel_info->center_freq0 =
+				ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+		}
+	} else {
+		DHD_ERROR(("Failed to get the chanspec \n"));
+	}
+	return err;
+}
+
+int
+dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info)
+{
+	int err = BCME_OK;
+	char chanbuf[CHANSPEC_STR_LEN];
+	int pm = PM_OFF;
+	int ftm_cfg_cnt = 0;
+	chanspec_t chanspec;
+	wifi_channel_info_t channel;
+	struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+	ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS];
+	ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
+	rtt_status_info_t *rtt_status;
+
+	memset(&channel, 0, sizeof(channel));
+	BCM_REFERENCE(chanbuf);
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	if (RTT_IS_STOPPED(rtt_status)) {
+		DHD_RTT(("STA responder/Target. \n"));
+	}
+	DHD_RTT(("Enter %s \n", __FUNCTION__));
+	if (!dhd_is_associated(dhd, 0, NULL)) {
+		if (channel_info) {
+			channel.width = channel_info->width;
+			channel.center_freq = channel_info->center_freq;
+			channel.center_freq0 = channel_info->center_freq;
+		}
+		else {
+			channel.width = WIFI_CHAN_WIDTH_80;
+			channel.center_freq = DEFAULT_FTM_FREQ;
+			channel.center_freq0 = DEFAULT_FTM_CNTR_FREQ0;
+		}
+		chanspec =  dhd_rtt_convert_to_chspec(channel);
+		DHD_RTT(("chanspec/channel set as %s for rtt.\n",
+			wf_chspec_ntoa(chanspec, chanbuf)));
+		err = wldev_iovar_setint(dev, "chanspec", chanspec);
+		if (err) {
+			DHD_ERROR(("Failed to set the chanspec \n"));
+		}
+	}
+	err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
+	DHD_RTT(("Current PM value read %d\n", rtt_status->pm));
+	if (err) {
+		DHD_ERROR(("Failed to get the PM value \n"));
+	} else {
+		err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+		if (err) {
+			DHD_ERROR(("Failed to set the PM \n"));
+			rtt_status->pm_restore = FALSE;
+		} else {
+			rtt_status->pm_restore = TRUE;
+		}
+	}
+	if (!RTT_IS_ENABLED(rtt_status)) {
+		err = dhd_rtt_ftm_enable(dhd, TRUE);
+		if (err) {
+			DHD_ERROR(("Failed to enable FTM (%d)\n", err));
+			goto exit;
+		}
+		DHD_RTT(("FTM enabled \n"));
+	}
+	rtt_status->status = RTT_ENABLED;
+	DHD_RTT(("Responder enabled \n"));
+	memset(ftm_configs, 0, sizeof(ftm_configs));
+	memset(ftm_params, 0, sizeof(ftm_params));
+	ftm_configs[ftm_cfg_cnt].enable = TRUE;
+	ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_TARGET;
+	rtt_status->flags = WL_PROXD_SESSION_FLAG_TARGET;
+	DHD_RTT(("Set the device as responder \n"));
+	err = dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS,
+		ftm_configs, ftm_cfg_cnt);
+exit:
+	if (err) {
+		rtt_status->status = RTT_STOPPED;
+		DHD_ERROR(("rtt is stopped  %s \n", __FUNCTION__));
+		dhd_rtt_ftm_enable(dhd, FALSE);
+		DHD_RTT(("restoring the PM value \n"));
+		if (rtt_status->pm_restore) {
+			pm = PM_FAST;
+			err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+			if (err) {
+				DHD_ERROR(("Failed to restore PM \n"));
+			} else {
+				rtt_status->pm_restore = FALSE;
+			}
+		}
+	}
+	return err;
+}
+
+int
+dhd_rtt_cancel_responder(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	rtt_status_info_t *rtt_status;
+	int pm = 0;
+	struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	DHD_RTT(("Enter %s \n", __FUNCTION__));
+	err = dhd_rtt_ftm_enable(dhd, FALSE);
+	if (err) {
+		DHD_ERROR(("failed to disable FTM (%d)\n", err));
+	}
+	rtt_status->status = RTT_STOPPED;
+	if (rtt_status->pm_restore) {
+		pm = PM_FAST;
+		DHD_RTT(("pm_restore =%d \n", rtt_status->pm_restore));
+		err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+		if (err) {
+			DHD_ERROR(("Failed to restore PM \n"));
+		} else {
+			rtt_status->pm_restore = FALSE;
+		}
+	}
+	return err;
+}
+#endif /* WL_CFG80211 */
+
+int
+dhd_rtt_init(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+#ifdef WL_CFG80211
+	int ret;
+	int32 drv_up = 1;
+	int32 version;
+	rtt_status_info_t *rtt_status;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	dhd->rtt_supported = FALSE;
+	if (dhd->rtt_state) {
+		return err;
+	}
+	dhd->rtt_state = kzalloc(sizeof(rtt_status_info_t), GFP_KERNEL);
+	if (dhd->rtt_state == NULL) {
+		err = BCME_NOMEM;
+		DHD_ERROR(("%s : failed to create rtt_state\n", __FUNCTION__));
+		return err;
+	}
+	bzero(dhd->rtt_state, sizeof(rtt_status_info_t));
+	rtt_status = GET_RTTSTATE(dhd);
+	rtt_status->rtt_config.target_info =
+			kzalloc(TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT), GFP_KERNEL);
+	if (rtt_status->rtt_config.target_info == NULL) {
+		DHD_ERROR(("%s failed to allocate the target info for %d\n",
+			__FUNCTION__, RTT_MAX_TARGET_CNT));
+		err = BCME_NOMEM;
+		goto exit;
+	}
+	rtt_status->dhd = dhd;
+	/* need to do WLC_UP  */
+	dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&drv_up, sizeof(int32), TRUE, 0);
+
+	ret = dhd_rtt_get_version(dhd, &version);
+	if (ret == BCME_OK && (version == WL_PROXD_API_VERSION)) {
+		DHD_ERROR(("%s : FTM is supported\n", __FUNCTION__));
+		dhd->rtt_supported = TRUE;
+		/* rtt_status->rtt_capa.proto |= RTT_CAP_ONE_WAY; */
+		rtt_status->rtt_capa.proto |= RTT_CAP_FTM_WAY;
+
+		/* indicate to set tx rate */
+		rtt_status->rtt_capa.feature |= RTT_FEATURE_LCI;
+		rtt_status->rtt_capa.feature |= RTT_FEATURE_LCR;
+		rtt_status->rtt_capa.feature |= RTT_FEATURE_PREAMBLE;
+		rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_VHT;
+		rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_HT;
+
+		/* indicate to set bandwith */
+		rtt_status->rtt_capa.feature |= RTT_FEATURE_BW;
+		rtt_status->rtt_capa.bw |= RTT_BW_20;
+		rtt_status->rtt_capa.bw |= RTT_BW_40;
+		rtt_status->rtt_capa.bw |= RTT_BW_80;
+	} else {
+		if ((ret != BCME_OK) || (version == 0)) {
+			DHD_ERROR(("%s : FTM is not supported\n", __FUNCTION__));
+		} else {
+			DHD_ERROR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n",
+				__FUNCTION__, WL_PROXD_API_VERSION, version));
+		}
+	}
+	/* cancel all of RTT request once we got the cancel request */
+	rtt_status->all_cancel = TRUE;
+	mutex_init(&rtt_status->rtt_mutex);
+	INIT_LIST_HEAD(&rtt_status->noti_fn_list);
+	INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+	INIT_WORK(&rtt_status->work, dhd_rtt_work);
+exit:
+	if (err < 0) {
+		kfree(rtt_status->rtt_config.target_info);
+		kfree(dhd->rtt_state);
+	}
+#endif /* WL_CFG80211 */
+	return err;
+
+}
+
+int
+dhd_rtt_deinit(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+#ifdef WL_CFG80211
+	rtt_status_info_t *rtt_status;
+	rtt_results_header_t *rtt_header, *next;
+	rtt_result_t *rtt_result, *next2;
+	struct rtt_noti_callback *iter, *iter2;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	rtt_status->status = RTT_STOPPED;
+	DHD_RTT(("rtt is stopped %s \n", __FUNCTION__));
+	/* clear evt callback list */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+	if (!list_empty(&rtt_status->noti_fn_list)) {
+		list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) {
+			list_del(&iter->list);
+			kfree(iter);
+		}
+	}
+	/* remove the rtt results */
+	if (!list_empty(&rtt_status->rtt_results_cache)) {
+		list_for_each_entry_safe(rtt_header, next, &rtt_status->rtt_results_cache, list) {
+			list_del(&rtt_header->list);
+			list_for_each_entry_safe(rtt_result, next2,
+				&rtt_header->result_list, list) {
+				list_del(&rtt_result->list);
+				kfree(rtt_result);
+			}
+			kfree(rtt_header);
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	kfree(rtt_status->rtt_config.target_info);
+	kfree(dhd->rtt_state);
+	dhd->rtt_state = NULL;
+#endif /* WL_CFG80211 */
+	return err;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_rtt.h b/drivers/net/wireless/bcmdhd/dhd_rtt.h
new file mode 100644
index 0000000..b3ca820
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_rtt.h
@@ -0,0 +1,387 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), RTT
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+#ifndef __DHD_RTT_H__
+#define __DHD_RTT_H__
+
+#include "dngl_stats.h"
+
+#define RTT_MAX_TARGET_CNT 50
+#define RTT_MAX_FRAME_CNT 25
+#define RTT_MAX_RETRY_CNT 10
+#define DEFAULT_FTM_CNT 6
+#define DEFAULT_RETRY_CNT 6
+#define DEFAULT_FTM_FREQ 5180
+#define DEFAULT_FTM_CNTR_FREQ0 5210
+
+#define TARGET_INFO_SIZE(count) (sizeof(rtt_target_info_t) * count)
+
+#define TARGET_TYPE(target) (target->type)
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+/* DSSS, CCK and 802.11n rates in [500kbps] units */
+#define WL_MAXRATE	108	/* in 500kbps units */
+#define WL_RATE_1M	2	/* in 500kbps units */
+#define WL_RATE_2M	4	/* in 500kbps units */
+#define WL_RATE_5M5	11	/* in 500kbps units */
+#define WL_RATE_11M	22	/* in 500kbps units */
+#define WL_RATE_6M	12	/* in 500kbps units */
+#define WL_RATE_9M	18	/* in 500kbps units */
+#define WL_RATE_12M	24	/* in 500kbps units */
+#define WL_RATE_18M	36	/* in 500kbps units */
+#define WL_RATE_24M	48	/* in 500kbps units */
+#define WL_RATE_36M	72	/* in 500kbps units */
+#define WL_RATE_48M	96	/* in 500kbps units */
+#define WL_RATE_54M	108	/* in 500kbps units */
+#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state)
+
+enum rtt_role {
+	RTT_INITIATOR = 0,
+	RTT_TARGET = 1
+};
+enum rtt_status {
+	RTT_STOPPED = 0,
+	RTT_STARTED = 1,
+	RTT_ENABLED = 2
+};
+typedef int64_t wifi_timestamp; /* In microseconds (us) */
+typedef int64_t wifi_timespan;
+typedef int32 wifi_rssi_rtt;
+
+typedef enum {
+	RTT_INVALID,
+	RTT_ONE_WAY,
+	RTT_TWO_WAY,
+	RTT_AUTO
+} rtt_type_t;
+
+typedef enum {
+	RTT_PEER_STA,
+	RTT_PEER_AP,
+	RTT_PEER_P2P,
+	RTT_PEER_NAN,
+	RTT_PEER_INVALID
+} rtt_peer_type_t;
+
+typedef enum rtt_reason {
+    RTT_REASON_SUCCESS,
+    RTT_REASON_FAILURE,
+    RTT_REASON_FAIL_NO_RSP,
+    RTT_REASON_FAIL_INVALID_TS, /* Invalid timestamp */
+    RTT_REASON_FAIL_PROTOCOL, /* 11mc protocol failed */
+    RTT_REASON_FAIL_REJECTED,
+    RTT_REASON_FAIL_NOT_SCHEDULED_YET,
+    RTT_REASON_FAIL_SCHEDULE, /* schedule failed */
+    RTT_REASON_FAIL_TM_TIMEOUT,
+    RTT_REASON_FAIL_AP_ON_DIFF_CHANNEL,
+    RTT_REASON_FAIL_NO_CAPABILITY,
+    RTT_REASON_FAIL_BUSY_TRY_LATER,
+    RTT_REASON_ABORTED
+} rtt_reason_t;
+
+enum {
+	RTT_CAP_ONE_WAY	 = BIT(0),
+	/* IEEE802.11mc */
+	RTT_CAP_FTM_WAY  = BIT(1)
+};
+
+enum {
+	RTT_FEATURE_LCI = BIT(0),
+	RTT_FEATURE_LCR = BIT(1),
+	RTT_FEATURE_PREAMBLE = BIT(2),
+	RTT_FEATURE_BW = BIT(3)
+};
+
+enum {
+	RTT_PREAMBLE_LEGACY = BIT(0),
+	RTT_PREAMBLE_HT = BIT(1),
+	RTT_PREAMBLE_VHT = BIT(2)
+};
+
+
+enum {
+	RTT_BW_5 = BIT(0),
+	RTT_BW_10 = BIT(1),
+	RTT_BW_20 = BIT(2),
+	RTT_BW_40 = BIT(3),
+	RTT_BW_80 = BIT(4),
+	RTT_BW_160 = BIT(5)
+};
+#define FTM_MAX_NUM_BURST_EXP	14
+#define HAS_11MC_CAP(cap) (cap & RTT_CAP_FTM_WAY)
+#define HAS_ONEWAY_CAP(cap) (cap & RTT_CAP_ONE_WAY)
+#define HAS_RTT_CAP(cap) (HAS_ONEWAY_CAP(cap) || HAS_11MC_CAP(cap))
+
+typedef struct wifi_channel_info {
+	wifi_channel_width_t width;
+	wifi_channel center_freq; /* primary 20 MHz channel */
+	wifi_channel center_freq0; /* center freq (MHz) first segment */
+	wifi_channel center_freq1; /* center freq (MHz) second segment valid for 80 + 80 */
+} wifi_channel_info_t;
+
+typedef struct wifi_rate {
+	uint32 preamble :3; /* 0: OFDM, 1: CCK, 2 : HT, 3: VHT, 4..7 reserved */
+	uint32 nss		:2; /* 1 : 1x1, 2: 2x2, 3: 3x3, 4: 4x4 */
+	uint32 bw		:3; /* 0: 20Mhz, 1: 40Mhz, 2: 80Mhz, 3: 160Mhz */
+	/* OFDM/CCK rate code would be as per IEEE std in the unit of 0.5 mb
+	* HT/VHT it would be mcs index
+	*/
+	uint32 rateMcsIdx :8;
+	uint32 reserved :16; /* reserved */
+	uint32 bitrate;	/* unit of 100 Kbps */
+} wifi_rate_t;
+
+typedef struct rtt_target_info {
+	struct ether_addr addr;
+	rtt_type_t type; /* rtt_type */
+	rtt_peer_type_t peer; /* peer type */
+	wifi_channel_info_t channel; /* channel information */
+	chanspec_t chanspec; /* chanspec for channel */
+	bool	disable; /* disable for RTT measurement */
+	/*
+	* Time interval between bursts (units: 100 ms).
+	* Applies to 1-sided and 2-sided RTT multi-burst requests.
+	* Range: 0-31, 0: no preference by initiator (2-sided RTT)
+	*/
+	uint32	burst_period;
+	/*
+	* Total number of RTT bursts to be executed. It will be
+	* specified in the same way as the parameter "Number of
+	* Burst Exponent" found in the FTM frame format. It
+	* applies to both: 1-sided RTT and 2-sided RTT. Valid
+	* values are 0 to 15 as defined in 802.11mc std.
+	* 0 means single shot
+	* The implication of this parameter on the maximum
+	* number of RTT results is the following:
+	* for 1-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst)
+	* for 2-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst - 1)
+	*/
+	uint16 num_burst;
+	/*
+	* num of frames per burst.
+	* Minimum value = 1, Maximum value = 31
+	* For 2-sided this equals the number of FTM frames
+	* to be attempted in a single burst. This also
+	* equals the number of FTM frames that the
+	* initiator will request that the responder send
+	* in a single frame
+	*/
+    uint32 num_frames_per_burst;
+	/* num of frames in each RTT burst
+	 * for single side, measurement result num = frame number
+	 * for 2 side RTT, measurement result num  = frame number - 1
+	 */
+	uint32 num_retries_per_ftm; /* retry time for RTT measurment frame */
+	/* following fields are only valid for 2 side RTT */
+	uint32 num_retries_per_ftmr;
+	uint8  LCI_request;
+	uint8  LCR_request;
+	/*
+	* Applies to 1-sided and 2-sided RTT. Valid values will
+	* be 2-11 and 15 as specified by the 802.11mc std for
+	* the FTM parameter burst duration. In a multi-burst
+	* request, if responder overrides with larger value,
+	* the initiator will return failure. In a single-burst
+	* request if responder overrides with larger value,
+	* the initiator will sent TMR_STOP to terminate RTT
+	* at the end of the burst_duration it requested.
+	*/
+	uint32 burst_duration;
+	uint8  preamble; /* 1 - Legacy, 2 - HT, 4 - VHT */
+	uint8  bw;  /* 5, 10, 20, 40, 80, 160 */
+} rtt_target_info_t;
+
+typedef struct rtt_config_params {
+	int8 rtt_target_cnt;
+	rtt_target_info_t *target_info;
+} rtt_config_params_t;
+
+typedef struct rtt_status_info {
+    dhd_pub_t *dhd;
+    int8 status;   /* current status for the current entry */
+    int8 txchain; /* current device tx chain */
+    int8 mpc; /* indicate we change mpc mode */
+    int pm; /* to save current value of pm */
+    int8 pm_restore; /* flag to reset the old value of pm */
+    int8 cur_idx; /* current entry to do RTT */
+    bool all_cancel; /* cancel all request once we got the cancel requet */
+    uint32 flags; /* indicate whether device is configured as initiator or target */
+    struct capability {
+		int32 proto     :8;
+		int32 feature   :8;
+		int32 preamble  :8;
+		int32 bw        :8;
+	} rtt_capa; /* rtt capability */
+    struct mutex rtt_mutex;
+    rtt_config_params_t rtt_config;
+    struct work_struct work;
+    struct list_head noti_fn_list;
+    struct list_head rtt_results_cache; /* store results for RTT */
+} rtt_status_info_t;
+
+typedef struct rtt_report {
+	struct ether_addr addr;
+	unsigned int burst_num; /* # of burst inside a multi-burst request */
+	unsigned int ftm_num; /* total RTT measurement frames attempted */
+	unsigned int success_num; /* total successful RTT measurement frames */
+	uint8  num_per_burst_peer; /* max number of FTM number per burst the peer support */
+	rtt_reason_t status; /* raging status */
+	/* in s, 11mc only, only for RTT_REASON_FAIL_BUSY_TRY_LATER, 1- 31s */
+	uint8 retry_after_duration;
+	rtt_type_t type; /* rtt type */
+	wifi_rssi_rtt  rssi; /* average rssi in 0.5 dB steps e.g. 143 implies -71.5 dB */
+	wifi_rssi_rtt  rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */
+	/*
+	* 1-sided RTT: TX rate of RTT frame.
+	* 2-sided RTT: TX rate of initiator's Ack in response to FTM frame.
+	*/
+	wifi_rate_t tx_rate;
+	/*
+	* 1-sided RTT: TX rate of Ack from other side.
+	* 2-sided RTT: TX rate of FTM frame coming from responder.
+	*/
+	wifi_rate_t rx_rate;
+	wifi_timespan rtt;	/*  round trip time in 0.1 nanoseconds */
+	wifi_timespan rtt_sd;	/* rtt standard deviation in 0.1 nanoseconds */
+	wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */
+	int distance; /* distance in cm (optional) */
+	int distance_sd; /* standard deviation in cm (optional) */
+	int distance_spread; /* difference between max and min distance recorded (optional) */
+	wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */
+	int burst_duration; /* in ms, how long the FW time is to fininish one burst measurement */
+	int negotiated_burst_num; /* Number of bursts allowed by the responder */
+	bcm_tlv_t *LCI; /* LCI Report */
+	bcm_tlv_t *LCR; /* Location Civic Report */
+} rtt_report_t;
+#define RTT_REPORT_SIZE (sizeof(rtt_report_t))
+
+/* rtt_results_header to maintain rtt result list per mac address */
+typedef struct rtt_results_header {
+	struct ether_addr peer_mac;
+	uint32 result_cnt;
+	uint32 result_tot_len; /* sum of report_len of rtt_result */
+	struct list_head list;
+	struct list_head result_list;
+} rtt_results_header_t;
+
+/* rtt_result to link all of rtt_report */
+typedef struct rtt_result {
+	struct list_head list;
+	struct rtt_report report;
+	int32 report_len; /* total length of rtt_report */
+} rtt_result_t;
+
+/* RTT Capabilities */
+typedef struct rtt_capabilities {
+	uint8 rtt_one_sided_supported;  /* if 1-sided rtt data collection is supported */
+	uint8 rtt_ftm_supported;        /* if ftm rtt data collection is supported */
+	uint8 lci_support;		/* location configuration information */
+	uint8 lcr_support;		/* Civic Location */
+	uint8 preamble_support;         /* bit mask indicate what preamble is supported */
+	uint8 bw_support;               /* bit mask indicate what BW is supported */
+} rtt_capabilities_t;
+
+
+/* RTT responder information */
+typedef struct wifi_rtt_responder {
+	wifi_channel_info channel;   /* channel of responder */
+	uint8 preamble;             /* preamble supported by responder */
+} wifi_rtt_responder_t;
+
+typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data);
+/* Linux wrapper to call common dhd_rtt_set_cfg */
+int
+dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf);
+
+int
+dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt);
+
+int
+dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx,
+	dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa);
+
+#ifdef WL_CFG80211
+int
+dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info);
+#endif /* WL_CFG80211 */
+
+int
+dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info);
+
+int
+dhd_dev_rtt_cancel_responder(struct net_device *dev);
+/* export to upper layer */
+chanspec_t
+dhd_rtt_convert_to_chspec(wifi_channel_info_t channel);
+
+int
+dhd_rtt_idx_to_burst_duration(uint idx);
+
+int
+dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params);
+
+int
+dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt);
+
+
+int
+dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+
+int
+dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa);
+
+int
+dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info);
+
+int
+dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info);
+
+int
+dhd_rtt_cancel_responder(dhd_pub_t *dhd);
+
+int
+dhd_rtt_init(dhd_pub_t *dhd);
+
+int
+dhd_rtt_deinit(dhd_pub_t *dhd);
+#endif /* __DHD_RTT_H__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c
new file mode 100644
index 0000000..137fe82
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c
@@ -0,0 +1,10065 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_sdio.c 705650 2017-06-19 03:00:50Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <bcmsdpcm.h>
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#include <sbchipc.h>
+#include <sbhnddma.h>
+
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <bcmsdbus.h>
+
+#include <ethernet.h>
+#include <802.1d.h>
+#include <802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+#include <dhd_config.h>
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef BT_OVER_SDIO
+#include <dhd_bt_interface.h>
+#endif /* BT_OVER_SDIO */
+
+bool dhd_mp_halting(dhd_pub_t *dhdp);
+extern void bcmsdh_waitfor_iodrain(void *sdh);
+extern void bcmsdh_reject_ioreqs(void *sdh, bool reject);
+extern bool  bcmsdh_fatal_error(void *sdh);
+static int dhdsdio_suspend(void *context);
+static int dhdsdio_resume(void *context);
+
+
+#ifndef DHDSDIO_MEM_DUMP_FNAME
+#define DHDSDIO_MEM_DUMP_FNAME         "mem_dump"
+#endif
+
+#define QLEN		(1024) /* bulk rx and tx queue lengths */
+#define FCHI		(QLEN - 10)
+#define FCLOW		(FCHI / 2)
+#define PRIOMASK	7
+
+#define F0_BLOCK_SIZE 32
+#define TXRETRIES	2	/* # of retries for tx frames */
+#define READ_FRM_CNT_RETRIES	3
+#ifndef DHD_RXBOUND
+#define DHD_RXBOUND	50	/* Default for max rx frames in one scheduling */
+#endif
+
+#ifndef DHD_TXBOUND
+#define DHD_TXBOUND	20	/* Default for max tx frames in one scheduling */
+#endif
+
+#define DHD_TXMINMAX	1	/* Max tx frames if rx still pending */
+
+#define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
+#define MAX_MEMBLOCK  (32 * 1024)	/* Block size used for downloading of dongle image */
+
+#define MAX_DATA_BUF	(64 * 1024)	/* Must be large enough to hold biggest possible glom */
+#define MAX_MEM_BUF	4096
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD   32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN	(SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#define SDPCM_HDRLEN_TXGLOM	(SDPCM_HDRLEN + SDPCM_HWEXT_LEN)
+#define MAX_TX_PKTCHAIN_CNT	SDPCM_MAXGLOM_SIZE
+
+#ifdef SDTEST
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ	32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ	2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY	3000
+
+/* Maximum usec to wait for HTAVAIL to come up */
+#define DHD_WAIT_HTAVAIL	10000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY <= 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* hooks for limiting threshold custom tx num in rx processing */
+#define DEFAULT_TXINRX_THRES    0
+#ifndef CUSTOM_TXINRX_THRES
+#define CUSTOM_TXINRX_THRES     DEFAULT_TXINRX_THRES
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi.  Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2()		if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+
+#ifdef PKT_STATICS
+pkt_statics_t tx_statics = {0};
+#endif
+
+#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW
+extern unsigned int system_hw_rev;
+#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */
+
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX	192
+#define CONSOLE_BUFFER_MAX	2024
+typedef struct dhd_console {
+	uint		count;			/* Poll interval msec counter */
+	uint		log_addr;		/* Log struct address (fixed) */
+	hnd_log_t	log;			/* Log struct (host copy) */
+	uint		bufsize;		/* Size of log buffer */
+	uint8		*buf;			/* Log buffer (host copy) */
+	uint		last;			/* Last buffer read index */
+} dhd_console_t;
+
+#define	REMAP_ENAB(bus)			((bus)->remap)
+#define	REMAP_ISADDR(bus, a)		(((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+#define	KSO_ENAB(bus)			((bus)->kso)
+#define	SR_ENAB(bus)			((bus)->_srenab)
+#define	SLPAUTO_ENAB(bus)		((SR_ENAB(bus)) && ((bus)->_slpauto))
+
+#define	MIN_RSRC_SR			0x3
+#define	CORE_CAPEXT_ADDR_OFFSET		(0x64c)
+#define	CORE_CAPEXT_SR_SUPPORTED_MASK	(1 << 1)
+#define RCTL_MACPHY_DISABLE_MASK	(1 << 26)
+#define RCTL_LOGIC_DISABLE_MASK		(1 << 27)
+
+#define	OOB_WAKEUP_ENAB(bus)		((bus)->_oobwakeup)
+#define	GPIO_DEV_SRSTATE		16	/* Host gpio17 mapped to device gpio0 SR state */
+#define	GPIO_DEV_SRSTATE_TIMEOUT	320000	/* 320ms */
+#define	GPIO_DEV_WAKEUP			17	/* Host gpio17 mapped to device gpio1 wakeup */
+#define	CC_CHIPCTRL2_GPIO1_WAKEUP	(1  << 0)
+#define	CC_CHIPCTRL3_SR_ENG_ENABLE	(1  << 2)
+#define OVERFLOW_BLKSZ512_WM		96
+#define OVERFLOW_BLKSZ512_MES		80
+
+#define CC_PMUCC3	(0x3)
+
+#ifdef DHD_UCODE_DOWNLOAD
+/* Ucode host download related macros */
+#define UCODE_DOWNLOAD_REQUEST  0xCAFECAFE
+#define UCODE_DOWNLOAD_COMPLETE 0xABCDABCD
+#endif /* DHD_UCODE_DOWNLOAD */
+
+#if defined(BT_OVER_SDIO)
+#define BTMEM_OFFSET			0x19000000
+/* BIT0 => WLAN Power UP and BIT1=> WLAN Wake */
+#define BT2WLAN_PWRUP_WAKE		0x03
+#define BT2WLAN_PWRUP_ADDR		0x640894	/* This address is specific to 43012B0 */
+
+#define BTFW_MAX_STR_LEN		600
+#define BTFW_DOWNLOAD_BLK_SIZE		(BTFW_MAX_STR_LEN/2 + 8)
+
+#define BTFW_ADDR_MODE_UNKNOWN		0
+#define BTFW_ADDR_MODE_EXTENDED		1
+#define BTFW_ADDR_MODE_SEGMENT		2
+#define BTFW_ADDR_MODE_LINEAR32		3
+
+#define BTFW_HEX_LINE_TYPE_DATA				0
+#define BTFW_HEX_LINE_TYPE_END_OF_DATA			1
+#define BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS	2
+#define BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS		4
+#define BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS	5
+
+#endif /* defined (BT_OVER_SDIO) */
+
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+	dhd_pub_t	*dhd;
+
+	bcmsdh_info_t	*sdh;			/* Handle for BCMSDH calls */
+	si_t		*sih;			/* Handle for SI calls */
+	char		*vars;			/* Variables (from CIS and/or other) */
+	uint		varsz;			/* Size of variables buffer */
+	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
+
+	sdpcmd_regs_t	*regs;			/* Registers for SDIO core */
+	uint		sdpcmrev;		/* SDIO core revision */
+	uint		armrev;			/* CPU core revision */
+	uint		ramrev;			/* SOCRAM core revision */
+	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		srmemsize;		/* Size of SRMEM */
+
+	uint32		bus;			/* gSPI or SDIO bus */
+	uint32		bus_num;		/* bus number */
+	uint32		slot_num;		/* slot ID */
+	uint32		hostintmask;	/* Copy of Host Interrupt Mask */
+	uint32		intstatus;		/* Intstatus bits (events) pending */
+	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
+	bool		fcstate;		/* State of dongle flow-control */
+
+	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
+	char		*fw_path;		/* module_param: path to firmware image */
+	char		*nv_path;		/* module_param: path to nvram vars file */
+
+	uint		blocksize;		/* Block size of SDIO transfers */
+	uint		roundup;		/* Max roundup limit */
+
+	struct pktq	txq;			/* Queue length used for flow-control */
+	uint8		flowcontrol;		/* per prio flow control bitmask */
+	uint8		tx_seq;			/* Transmit sequence number (next) */
+	uint8		tx_max;			/* Maximum transmit sequence allowed */
+
+	uint8		hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+	uint8		*rxhdr;			/* Header of current rx frame (in hdrbuf) */
+	uint16		nextlen;		/* Next Read Len from last header */
+	uint8		rx_seq;			/* Receive sequence number (expected) */
+	bool		rxskip;			/* Skip receive (awaiting NAK ACK) */
+
+	void		*glomd;			/* Packet containing glomming descriptor */
+	void		*glom;			/* Packet chain for glommed superframe */
+	uint		glomerr;		/* Glom packet read errors */
+
+	uint8		*rxbuf;			/* Buffer for receiving control packets */
+	uint		rxblen;			/* Allocated length of rxbuf */
+	uint8		*rxctl;			/* Aligned pointer into rxbuf */
+	uint8		*databuf;		/* Buffer for receiving big glom packet */
+	uint8		*dataptr;		/* Aligned pointer into databuf */
+	uint		rxlen;			/* Length of valid data in buffer */
+
+	uint8		sdpcm_ver;		/* Bus protocol reported by dongle */
+
+	bool		intr;			/* Use interrupts */
+	bool		poll;			/* Use polling */
+	bool		ipend;			/* Device interrupt is pending */
+	bool		intdis;			/* Interrupts disabled by isr */
+	uint 		intrcount;		/* Count of device interrupt callbacks */
+	uint		lastintrs;		/* Count as of last watchdog timer */
+	uint		spurious;		/* Count of spurious interrupts */
+	uint		pollrate;		/* Ticks between device polls */
+	uint		polltick;		/* Tick counter */
+	uint		pollcnt;		/* Count of active polls */
+
+	dhd_console_t	console;		/* Console output polling support */
+	uint		console_addr;		/* Console address from shared struct */
+
+	uint		regfails;		/* Count of R_REG/W_REG failures */
+
+	uint		clkstate;		/* State of sd and backplane clock(s) */
+	bool		activity;		/* Activity flag for clock down */
+	int32		idletime;		/* Control for activity timeout */
+	int32		idlecount;		/* Activity timeout counter */
+	int32		idleclock;		/* How to set bus driver when idle */
+	int32		sd_divisor;		/* Speed control to bus driver */
+	int32		sd_mode;		/* Mode control to bus driver */
+	int32		sd_rxchain;		/* If bcmsdh api accepts PKT chains */
+	bool		use_rxchain;		/* If dhd should use PKT chains */
+	bool		sleeping;		/* Is SDIO bus sleeping? */
+#if defined(SUPPORT_P2P_GO_PS)
+	wait_queue_head_t bus_sleep;
+#endif /* LINUX && SUPPORT_P2P_GO_PS */
+	bool		ctrl_wait;
+	wait_queue_head_t ctrl_tx_wait;
+	uint		rxflow_mode;		/* Rx flow control mode */
+	bool		rxflow;			/* Is rx flow control on */
+	uint		prev_rxlim_hit;		/* Is prev rx limit exceeded (per dpc schedule) */
+	bool		alp_only;		/* Don't use HT clock (ALP only) */
+	/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+	bool		usebufpool;
+	int32		txinrx_thres;	/* num of in-queued pkts */
+	int32		dotxinrx;	/* tx first in dhdsdio_readframes */
+#ifdef SDTEST
+	/* external loopback */
+	bool		ext_loop;
+	uint8		loopid;
+
+	/* pktgen configuration */
+	uint		pktgen_freq;		/* Ticks between bursts */
+	uint		pktgen_count;		/* Packets to send each burst */
+	uint		pktgen_print;		/* Bursts between count displays */
+	uint		pktgen_total;		/* Stop after this many */
+	uint		pktgen_minlen;		/* Minimum packet data len */
+	uint		pktgen_maxlen;		/* Maximum packet data len */
+	uint		pktgen_mode;		/* Configured mode: tx, rx, or echo */
+	uint		pktgen_stop;		/* Number of tx failures causing stop */
+
+	/* active pktgen fields */
+	uint		pktgen_tick;		/* Tick counter for bursts */
+	uint		pktgen_ptick;		/* Burst counter for printing */
+	uint		pktgen_sent;		/* Number of test packets generated */
+	uint		pktgen_rcvd;		/* Number of test packets received */
+	uint		pktgen_prev_time;	/* Time at which previous stats where printed */
+	uint		pktgen_prev_sent;	/* Number of test packets generated when
+						 * previous stats were printed
+						 */
+	uint		pktgen_prev_rcvd;	/* Number of test packets received when
+						 * previous stats were printed
+						 */
+	uint		pktgen_fail;		/* Number of failed send attempts */
+	uint16		pktgen_len;		/* Length of next packet to send */
+#define PKTGEN_RCV_IDLE     (0)
+#define PKTGEN_RCV_ONGOING  (1)
+	uint16		pktgen_rcv_state;		/* receive state */
+	uint		pktgen_rcvd_rcvsession;	/* test pkts rcvd per rcv session. */
+#endif /* SDTEST */
+
+	/* Some additional counters */
+	uint		tx_sderrs;		/* Count of tx attempts with sd errors */
+	uint		fcqueued;		/* Tx packets that got queued */
+	uint		rxrtx;			/* Count of rtx requests (NAK to dongle) */
+	uint		rx_toolong;		/* Receive frames too long to receive */
+	uint		rxc_errors;		/* SDIO errors when reading control frames */
+	uint		rx_hdrfail;		/* SDIO errors on header reads */
+	uint		rx_badhdr;		/* Bad received headers (roosync?) */
+	uint		rx_badseq;		/* Mismatched rx sequence number */
+	uint		fc_rcvd;		/* Number of flow-control events received */
+	uint		fc_xoff;		/* Number which turned on flow-control */
+	uint		fc_xon;			/* Number which turned off flow-control */
+	uint		rxglomfail;		/* Failed deglom attempts */
+	uint		rxglomframes;		/* Number of glom frames (superframes) */
+	uint		rxglompkts;		/* Number of packets from glom frames */
+	uint		f2rxhdrs;		/* Number of header reads */
+	uint		f2rxdata;		/* Number of frame data reads */
+	uint		f2txdata;		/* Number of f2 frame writes */
+	uint		f1regdata;		/* Number of f1 register accesses */
+	wake_counts_t	wake_counts;		/* Wake up counter */
+#ifdef DHDENABLE_TAILPAD
+	uint		tx_tailpad_chain;	/* Number of tail padding by chaining pad_pkt */
+	uint		tx_tailpad_pktget;	/* Number of tail padding by new PKTGET */
+#endif /* DHDENABLE_TAILPAD */
+	uint8		*ctrl_frame_buf;
+	uint32		ctrl_frame_len;
+	bool		ctrl_frame_stat;
+	uint32		rxint_mode;	/* rx interrupt mode */
+	bool		remap;		/* Contiguous 1MB RAM: 512K socram + 512K devram
+					 * Available with socram rev 16
+					 * Remap region not DMA-able
+					 */
+	bool		kso;
+	bool		_slpauto;
+	bool		_oobwakeup;
+	bool		_srenab;
+	bool        readframes;
+	bool        reqbussleep;
+	uint32		resetinstr;
+	uint32		dongle_ram_base;
+
+	void		*glom_pkt_arr[SDPCM_MAXGLOM_SIZE];	/* Array of pkts for glomming */
+	uint32		txglom_cnt;	/* Number of pkts in the glom array */
+	uint32		txglom_total_len;	/* Total length of pkts in glom array */
+	bool		txglom_enable;	/* Flag to indicate whether tx glom is enabled/disabled */
+	uint32		txglomsize;	/* Glom size limitation */
+#ifdef DHDENABLE_TAILPAD
+	void		*pad_pkt;
+#endif /* DHDENABLE_TAILPAD */
+	uint32		dongle_trap_addr; /* device trap addr location in device memory */
+#if defined(BT_OVER_SDIO)
+	char		*btfw_path;	/* module_param: path to BT firmware image */
+	uint32		bt_use_count; /* Counter that tracks whether BT is using the bus */
+#endif /* defined (BT_OVER_SDIO) */
+	uint		txglomframes;	/* Number of tx glom frames (superframes) */
+	uint		txglompkts;		/* Number of packets from tx glom frames */
+	uint8		*membuf;		/* Buffer for dhdsdio_membytes */
+} dhd_bus_t;
+
+
+/*
+ * Whenever DHD_IDLE_IMMEDIATE condition is handled, we have to now check if
+ * BT is active too. Instead of adding #ifdef code in all the places, we thought
+ * of adding one macro check as part of the if condition that checks for DHD_IDLE_IMMEDIATE
+ * In case of non BT over SDIO builds, this macro will always return TRUE. In case
+ * of the builds where BT_OVER_SDIO is enabled, it will expand to a condition check
+ * that checks if bt_use_count is zero. So this macro will return equate to 1 if
+ * bt_use_count is 0, indicating that there are no active users and if bt_use_count
+ * is non zero it would return 0 there by preventing the caller from executing the
+ * sleep calls.
+ */
+#ifdef BT_OVER_SDIO
+#define NO_OTHER_ACTIVE_BUS_USER(bus)		(bus->bt_use_count == 0)
+#else
+#define NO_OTHER_ACTIVE_BUS_USER(bus)		(1)
+#endif /* BT_OVER_SDIO */
+
+/* clkstate */
+#define CLK_NONE	0
+#define CLK_SDONLY	1
+#define CLK_PENDING	2	/* Not used yet */
+#define CLK_AVAIL	3
+
+#define DHD_NOPMU(dhd)	(FALSE)
+
+#if defined(BCMSDIOH_STD)
+#define BLK_64_MAXTXGLOM 20
+#endif /* BCMSDIOH_STD */
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+extern uint sd_f1_blocksize;
+
+
+#if defined(BT_OVER_SDIO)
+extern dhd_pub_t	*g_dhd_pub;
+#endif /* (BT_OVER_SDIO) */
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+int dhd_enableOOB(dhd_pub_t *dhd, bool sleep);
+
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax = DHD_TXMINMAX;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_RAMSIZE (128 *1024)
+int dhd_dongle_ramsize;
+
+uint dhd_doflow = TRUE;
+uint dhd_dpcpoll = FALSE;
+
+module_param(dhd_doflow, uint, 0644);
+module_param(dhd_dpcpoll, uint, 0644);
+
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static uint watermark = 8;
+static uint mesbusyctrl = 0;
+static const uint firstread = DHD_FIRSTREAD;
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+#define ALIGNMENT  4
+
+#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align)					\
+	do {								\
+		uintptr datalign;						\
+		datalign = (uintptr)PKTDATA((osh), (p));		\
+		datalign = ROUNDUP(datalign, (align)) - datalign;	\
+		ASSERT(datalign < (align));				\
+		ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign));	\
+		if (datalign)						\
+			PKTPULL((osh), (p), (uint)datalign);			\
+		PKTSETLEN((osh), (p), (len));				\
+	} while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+#if defined(BCMSDIOH_TXGLOM_EXT)
+bool
+dhdsdio_is_dataok(dhd_bus_t *bus) {
+	return (((uint8)(bus->tx_max - bus->tx_seq) - bus->dhd->conf->tx_max_offset > 1) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0));
+}
+
+uint8
+dhdsdio_get_databufcnt(dhd_bus_t *bus) {
+	return ((uint8)(bus->tx_max - bus->tx_seq) - 1 - bus->dhd->conf->tx_max_offset);
+}
+#endif
+
+/* To check if there's window offered */
+#if defined(BCMSDIOH_TXGLOM_EXT)
+#define DATAOK(bus) dhdsdio_is_dataok(bus)
+#else
+#define DATAOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+#endif
+
+/* To check if there's window offered for ctrl frame */
+#define TXCTLOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Number of pkts available in dongle for data RX */
+#if defined(BCMSDIOH_TXGLOM_EXT)
+#define DATABUFCNT(bus) dhdsdio_get_databufcnt(bus)
+#else
+#define DATABUFCNT(bus) \
+	((uint8)(bus->tx_max - bus->tx_seq) - 1)
+#endif
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		regvar = R_REG(bus->dhd->osh, regaddr); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) { \
+			DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+			regvar = 0; \
+		} \
+	} \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		W_REG(bus->dhd->osh, regaddr, regval); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) \
+			DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+	} \
+} while (0)
+
+#define BUS_WAKE(bus) \
+	do { \
+		bus->idlecount = 0; \
+		if ((bus)->sleeping) \
+			dhdsdio_bussleep((bus), FALSE); \
+	} while (0);
+
+/*
+ * pktavail interrupts from dongle to host can be managed in 3 different ways
+ * whenever there is a packet available in dongle to transmit to host.
+ *
+ * Mode 0:	Dongle writes the software host mailbox and host is interrupted.
+ * Mode 1:	(sdiod core rev >= 4)
+ *		Device sets a new bit in the intstatus whenever there is a packet
+ *		available in fifo.  Host can't clear this specific status bit until all the
+ *		packets are read from the FIFO.  No need to ack dongle intstatus.
+ * Mode 2:	(sdiod core rev >= 4)
+ *		Device sets a bit in the intstatus, and host acks this by writing
+ *		one to this bit.  Dongle won't generate anymore packet interrupts
+ *		until host reads all the packets from the dongle and reads a zero to
+ *		figure that there are no more packets.  No need to disable host ints.
+ *		Need to ack the intstatus.
+ */
+
+#define SDIO_DEVICE_HMB_RXINT		0	/* default old way */
+#define SDIO_DEVICE_RXDATAINT_MODE_0	1	/* from sdiod rev 4 */
+#define SDIO_DEVICE_RXDATAINT_MODE_1	2	/* from sdiod rev 4 */
+
+
+#define FRAME_AVAIL_MASK(bus) 	\
+	((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
+
+#define DHD_BUS			SDIO_BUS
+
+#define PKT_AVAILABLE(bus, intstatus)	((intstatus) & (FRAME_AVAIL_MASK(bus)))
+
+#define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count);
+#endif
+
+static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
+#ifdef DHD_DEBUG
+static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
+#endif /* DHD_DEBUG */
+
+#if defined(DHD_FW_COREDUMP)
+static int dhdsdio_mem_dump(dhd_bus_t *bus);
+#endif /* DHD_FW_COREDUMP */
+static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+                                 void * regsva, uint16  devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
+	bool reset_flag);
+
+static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry);
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt);
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+	int prev_chain_total_len, bool last_chained_pkt,
+	int *pad_pkt_len, void **new_pkt
+#if defined(BCMSDIOH_TXGLOM_EXT)
+	, int first_frame
+#endif
+);
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt);
+
+static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(dhd_bus_t *bus);
+
+#ifdef DHD_UCODE_DOWNLOAD
+static int dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path);
+#endif /* DHD_UCODE_DOWNLOAD */
+static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
+static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
+static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep);
+static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok);
+static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus);
+static bool dhdsdio_dpc(dhd_bus_t *bus);
+static int dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len);
+static int dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode);
+static int dhdsdio_sdclk(dhd_bus_t *bus, bool on);
+static void dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp);
+static void dhdsdio_advertise_bus_remove(dhd_pub_t *dhdp);
+#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT
+int dhd_get_system_rev(void);
+#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */
+
+#ifdef WLMEDIA_HTSF
+#include <htsf.h>
+extern uint32 dhd_get_htsf(void *dhd, int ifidx);
+#endif /* WLMEDIA_HTSF */
+
+#if defined(BT_OVER_SDIO)
+static int extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value);
+static int read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode,
+	uint16 * hi_addr, uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes);
+static int dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_btfw(struct dhd_bus *bus);
+#endif /* defined (BT_OVER_SDIO) */
+
+#ifdef DHD_ULP
+#include <dhd_ulp.h>
+static int dhd_bus_ulp_reinit_fw(dhd_bus_t *bus);
+#endif /* DHD_ULP */
+
+#ifdef DHD_WAKE_STATUS
+int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh);
+int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag);
+#endif /* DHD_WAKE_STATUS */
+
+static void
+dhdsdio_tune_fifoparam(struct dhd_bus *bus)
+{
+	int err;
+	uint8 devctl, wm, mes;
+
+	if (bus->sih->buscorerev >= 15) {
+		/* See .ppt in PR for these recommended values */
+		if (bus->blocksize == 512) {
+			wm = OVERFLOW_BLKSZ512_WM;
+			mes = OVERFLOW_BLKSZ512_MES;
+		} else {
+			mes = bus->blocksize/4;
+			wm = bus->blocksize/4;
+		}
+
+		watermark = wm;
+		mesbusyctrl = mes;
+	} else {
+		DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n",
+			bus->sih->buscorerev));
+		return;
+	}
+
+	/* Update watermark */
+	if (wm > 0) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err);
+
+		devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+	}
+
+	/* Update MES */
+	if (mes > 0) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+			(mes | SBSDIO_MESBUSYCTRL_ENAB), &err);
+	}
+
+	DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n",
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err),
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err),
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err)));
+}
+
+static void
+dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size)
+{
+	int32 min_size =  DONGLE_MIN_RAMSIZE;
+	/* Restrict the ramsize to user specified limit */
+	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+		dhd_dongle_ramsize, min_size));
+	if ((dhd_dongle_ramsize > min_size) &&
+		(dhd_dongle_ramsize < (int32)bus->orig_ramsize))
+		bus->ramsize = dhd_dongle_ramsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+	int err = 0;
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+	                 (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+		                 (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+		                 (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+	return err;
+}
+
+
+#ifdef USE_OOB_GPIO1
+static int
+dhdsdio_oobwakeup_init(dhd_bus_t *bus)
+{
+	uint32 val, addr, data;
+
+	bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP);
+
+	addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+	data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+
+	/* Set device for gpio1 wakeup */
+	bcmsdh_reg_write(bus->sdh, addr, 4, 2);
+	val = bcmsdh_reg_read(bus->sdh, data, 4);
+	val |= CC_CHIPCTRL2_GPIO1_WAKEUP;
+	bcmsdh_reg_write(bus->sdh, data, 4, val);
+
+	bus->_oobwakeup = TRUE;
+
+	return 0;
+}
+#endif /* USE_OOB_GPIO1 */
+
+/*
+ * Query if FW is in SR mode
+ */
+static bool
+dhdsdio_sr_cap(dhd_bus_t *bus)
+{
+	bool cap = FALSE;
+	uint32  core_capext, addr, data;
+
+	if (bus->sih->chip == BCM43430_CHIP_ID ||
+		bus->sih->chip == BCM43018_CHIP_ID) {
+		/* check if fw initialized sr engine */
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, sr_control1);
+		if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0)
+			cap = TRUE;
+
+		return cap;
+	}
+	if (bus->sih->chip == BCM4324_CHIP_ID) {
+			addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+			data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+			bcmsdh_reg_write(bus->sdh, addr, 4, 3);
+			core_capext = bcmsdh_reg_read(bus->sdh, data, 4);
+	} else if ((bus->sih->chip == BCM4330_CHIP_ID) ||
+		(bus->sih->chip == BCM43362_CHIP_ID) ||
+		(BCM4347_CHIP(bus->sih->chip))) {
+			core_capext = FALSE;
+	} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+		(bus->sih->chip == BCM4339_CHIP_ID) ||
+		(bus->sih->chip == BCM43349_CHIP_ID) ||
+		BCM4345_CHIP(bus->sih->chip) ||
+		(bus->sih->chip == BCM4354_CHIP_ID) ||
+		(bus->sih->chip == BCM4358_CHIP_ID) ||
+		(bus->sih->chip == BCM43569_CHIP_ID) ||
+		(bus->sih->chip == BCM4371_CHIP_ID) ||
+		(BCM4349_CHIP(bus->sih->chip))		||
+		(bus->sih->chip == BCM4350_CHIP_ID) ||
+		(bus->sih->chip == BCM43012_CHIP_ID) ||
+		(bus->sih->chip == BCM4362_CHIP_ID)) {
+		core_capext = TRUE;
+	} else {
+		core_capext = bcmsdh_reg_read(bus->sdh,
+			si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, core_cap_ext)),
+			4);
+		core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK);
+	}
+	if (!(core_capext))
+		return FALSE;
+
+	if (bus->sih->chip == BCM4324_CHIP_ID) {
+		/* FIX: Should change to query SR control register instead */
+		cap = TRUE;
+	} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+		(bus->sih->chip == BCM4339_CHIP_ID) ||
+		(bus->sih->chip == BCM43349_CHIP_ID) ||
+		BCM4345_CHIP(bus->sih->chip) ||
+		(bus->sih->chip == BCM4354_CHIP_ID) ||
+		(bus->sih->chip == BCM4358_CHIP_ID) ||
+		(bus->sih->chip == BCM43569_CHIP_ID) ||
+		(bus->sih->chip == BCM4371_CHIP_ID) ||
+		(bus->sih->chip == BCM4350_CHIP_ID)) {
+		uint32 enabval = 0;
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+		data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+		bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3);
+		enabval = bcmsdh_reg_read(bus->sdh, data, 4);
+
+		if ((bus->sih->chip == BCM4350_CHIP_ID) ||
+			BCM4345_CHIP(bus->sih->chip) ||
+			(bus->sih->chip == BCM4354_CHIP_ID) ||
+			(bus->sih->chip == BCM4358_CHIP_ID) ||
+			(bus->sih->chip == BCM43569_CHIP_ID) ||
+			(bus->sih->chip == BCM4371_CHIP_ID))
+			enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE;
+
+		if (enabval)
+			cap = TRUE;
+	} else {
+		data = bcmsdh_reg_read(bus->sdh,
+			si_get_pmu_reg_addr(bus->sih, OFFSETOF(chipcregs_t, retention_ctl)),
+			4);
+		if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0)
+			cap = TRUE;
+	}
+
+	return cap;
+}
+
+static int
+dhdsdio_srwar_init(dhd_bus_t *bus)
+{
+	bcmsdh_gpio_init(bus->sdh);
+
+#ifdef USE_OOB_GPIO1
+	dhdsdio_oobwakeup_init(bus);
+#endif
+
+
+	return 0;
+}
+
+static int
+dhdsdio_sr_init(dhd_bus_t *bus)
+{
+	uint8 val;
+	int err = 0;
+
+	if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2))
+		dhdsdio_srwar_init(bus);
+
+
+	if (bus->sih->chip == BCM43012_CHIP_ID) {
+		val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+		val |= 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
+			1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT, &err);
+		val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+	} else {
+		val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+		val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
+			1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err);
+		val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+	}
+
+#ifdef USE_CMD14
+	/* Add CMD14 Support */
+	dhdsdio_devcap_set(bus,
+		(SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT));
+#endif /* USE_CMD14 */
+
+	if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID ||
+		CHIPID(bus->sih->chip) == BCM43018_CHIP_ID ||
+		CHIPID(bus->sih->chip) == BCM4339_CHIP_ID ||
+		CHIPID(bus->sih->chip) == BCM43012_CHIP_ID ||
+		CHIPID(bus->sih->chip) == BCM4362_CHIP_ID)
+		dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC);
+
+	if (bus->sih->chip == BCM43012_CHIP_ID) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_HT_AVAIL_REQ, &err);
+	} else {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err);
+	}
+	bus->_slpauto = dhd_slpauto ? TRUE : FALSE;
+
+	bus->_srenab = TRUE;
+
+	return 0;
+}
+
+/*
+ * FIX: Be sure KSO bit is enabled
+ * Currently, it's defaulting to 0 which should be 1.
+ */
+static int
+dhdsdio_clk_kso_init(dhd_bus_t *bus)
+{
+	uint8 val;
+	int err = 0;
+
+	/* set flag */
+	bus->kso = TRUE;
+
+	/*
+	 * Enable KeepSdioOn (KSO) bit for normal operation
+	 * Default is 0 (4334A0) so set it. Fixed in B0.
+	 */
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL);
+	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
+		if (err)
+			DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err));
+	}
+
+	return 0;
+}
+
+#define KSO_DBG(x)
+#define KSO_WAIT_US 50
+#define KSO_WAIT_MS 1
+#define KSO_SLEEP_RETRY_COUNT 20
+#define KSO_WAKE_RETRY_COUNT 100
+#define ERROR_BCME_NODEVICE_MAX 1
+
+#define DEFAULT_MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+#ifndef CUSTOM_MAX_KSO_ATTEMPTS
+#define CUSTOM_MAX_KSO_ATTEMPTS DEFAULT_MAX_KSO_ATTEMPTS
+#endif
+
+static int
+dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
+{
+	uint8 wr_val = 0, rd_val, cmp_val, bmask;
+	int err = 0;
+	int try_cnt = 0;
+
+	KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR")));
+
+	wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+
+
+	/* In case of 43012 chip, the chip could go down immediately after KSO bit is cleared.
+	 * So the further reads of KSO register could fail. Thereby just bailing out immediately
+	 * after clearing KSO bit, to avoid polling of KSO bit.
+	 */
+	if ((!on) && (bus->sih->chip == BCM43012_CHIP_ID)) {
+		return err;
+	}
+
+	if (on) {
+		cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |  SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
+		bmask = cmp_val;
+
+		OSL_SLEEP(3);
+
+	} else {
+		/* Put device to sleep, turn off  KSO  */
+		cmp_val = 0;
+		bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
+	}
+
+	do {
+		rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+		if (((rd_val & bmask) == cmp_val) && !err)
+			break;
+
+		KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err));
+
+		if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) {
+			OSL_SLEEP(KSO_WAIT_MS);
+		} else
+			OSL_DELAY(KSO_WAIT_US);
+
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+	} while (try_cnt++ < CUSTOM_MAX_KSO_ATTEMPTS);
+
+
+	if (try_cnt > 2)
+		KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n",
+			__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+
+	if (try_cnt > CUSTOM_MAX_KSO_ATTEMPTS)  {
+		DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n",
+			__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+	}
+
+	return err;
+}
+
+static int
+dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on)
+{
+	int err = 0;
+
+	if (on == FALSE) {
+
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__,
+			bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+		dhdsdio_clk_kso_enab(bus, FALSE);
+	} else {
+		DHD_ERROR(("%s: KSO enable\n", __FUNCTION__));
+
+		/* Make sure we have SD bus access */
+		if (bus->clkstate == CLK_NONE) {
+			DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__));
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+		}
+
+		dhdsdio_clk_kso_enab(bus, TRUE);
+
+		DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__,
+			dhdsdio_sleepcsr_get(bus)));
+	}
+
+	bus->kso = on;
+	BCM_REFERENCE(err);
+
+	return 0;
+}
+
+static uint8
+dhdsdio_sleepcsr_get(dhd_bus_t *bus)
+{
+	int err = 0;
+	uint8 val = 0;
+
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+	if (err)
+		DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err));
+
+	return val;
+}
+
+uint8
+dhdsdio_devcap_get(dhd_bus_t *bus)
+{
+	return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL);
+}
+
+static int
+dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap)
+{
+	int err = 0;
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err);
+	if (err)
+		DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err));
+
+	return 0;
+}
+
+static int
+dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on)
+{
+	int err = 0, retry;
+	uint8 val;
+
+	retry = 0;
+	if (on == TRUE) {
+		/* Enter Sleep */
+
+		/* Be sure we request clk before going to sleep
+		 * so we can wake-up with clk request already set
+		 * else device can go back to sleep immediately
+		 */
+		if (!SLPAUTO_ENAB(bus))
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		else {
+			val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+			if ((val & SBSDIO_CSR_MASK) == 0) {
+				DHD_ERROR(("%s: No clock before enter sleep:0x%x\n",
+					__FUNCTION__, val));
+
+				/* Reset clock request */
+				bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					SBSDIO_ALP_AVAIL_REQ, &err);
+				DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__,
+					bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+			}
+		}
+
+		DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__,
+			bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+#ifdef USE_CMD14
+		err = bcmsdh_sleep(bus->sdh, TRUE);
+#else
+		if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) {
+			if (sd1idle) {
+				/* Change to SD1 mode */
+				dhdsdio_set_sdmode(bus, 1);
+			}
+		}
+
+		err = dhdsdio_clk_kso_enab(bus, FALSE);
+		if (OOB_WAKEUP_ENAB(bus))
+		{
+			err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE);  /* GPIO_1 is off */
+		}
+#endif /* USE_CMD14 */
+
+		if ((SLPAUTO_ENAB(bus)) && (bus->idleclock != DHD_IDLE_ACTIVE)) {
+			DHD_TRACE(("%s: Turnoff SD clk\n", __FUNCTION__));
+			/* Now remove the SD clock */
+			err = dhdsdio_sdclk(bus, FALSE);
+		}
+	} else {
+		/* Exit Sleep */
+		/* Make sure we have SD bus access */
+		if (bus->clkstate == CLK_NONE) {
+			DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+		}
+
+		if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE),
+				GPIO_DEV_SRSTATE_TIMEOUT);
+
+			if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) {
+				DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
+			}
+		}
+#ifdef USE_CMD14
+		err = bcmsdh_sleep(bus->sdh, FALSE);
+		if (SLPAUTO_ENAB(bus) && (err != 0)) {
+			OSL_DELAY(10000);
+			DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__));
+
+			/* Toggle sleep to resync with host and device */
+			err = bcmsdh_sleep(bus->sdh, TRUE);
+			OSL_DELAY(10000);
+			err = bcmsdh_sleep(bus->sdh, FALSE);
+
+			if (err) {
+				OSL_DELAY(10000);
+				DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__));
+
+				/* Toggle sleep to resync with host and device */
+				err = bcmsdh_sleep(bus->sdh, TRUE);
+				OSL_DELAY(10000);
+				err = bcmsdh_sleep(bus->sdh, FALSE);
+				if (err) {
+					DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__));
+					DHD_ERROR(("%s: FATAL: Device non-response!\n",
+						__FUNCTION__));
+					err = 0;
+				}
+			}
+		}
+#else
+		if (OOB_WAKEUP_ENAB(bus))
+		{
+			err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE);  /* GPIO_1 is on */
+		}
+		do {
+			err = dhdsdio_clk_kso_enab(bus, TRUE);
+			if (err)
+				OSL_SLEEP(10);
+		} while ((err != 0) && (++retry < 3));
+
+		if (err != 0) {
+			DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry));
+#ifndef BT_OVER_SDIO
+			err = 0; /* continue anyway */
+#endif /* BT_OVER_SDIO */
+		}
+
+		if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) {
+			dhdsdio_set_sdmode(bus, bus->sd_mode);
+		}
+#endif /* !USE_CMD14 */
+
+		if (err == 0) {
+			uint8 csr;
+
+			/* Wait for device ready during transition to wake-up */
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(((csr = dhdsdio_sleepcsr_get(bus)) &
+				SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) !=
+				(SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000));
+
+			DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr));
+
+			if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) {
+				DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n",
+					__FUNCTION__, csr));
+				err = BCME_NODEVICE;
+			}
+
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+				SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) !=
+				(SBSDIO_HT_AVAIL)), (DHD_WAIT_HTAVAIL));
+
+			DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr));
+			if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) {
+				DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n",
+					__FUNCTION__, csr));
+				err = BCME_NODEVICE;
+			}
+		}
+	}
+
+	/* Update if successful */
+	if (err == 0)
+		bus->kso = on ? FALSE : TRUE;
+	else {
+		DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n",
+			__FUNCTION__, bus->kso, on, err));
+		if (!on && retry > 2)
+			bus->kso = FALSE;
+	}
+
+	return err;
+}
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+#define HT_AVAIL_ERROR_MAX 10
+	static int ht_avail_error = 0;
+	int err;
+	uint8 clkctl, clkreq, devctl;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	clkctl = 0;
+	sdh = bus->sdh;
+
+
+	if (!KSO_ENAB(bus))
+		return BCME_OK;
+
+	if (SLPAUTO_ENAB(bus)) {
+		bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
+		return BCME_OK;
+	}
+
+	if (on) {
+		/* Request HT Avail */
+		clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+		if (err) {
+			ht_avail_error++;
+			if (ht_avail_error < HT_AVAIL_ERROR_MAX) {
+				DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
+				bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR;
+				dhd_os_send_hang_message(bus->dhd);
+			}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
+			return BCME_ERROR;
+		} else {
+			ht_avail_error = 0;
+		}
+
+
+		/* Check current status */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+
+#if !defined(OOB_INTR_ONLY)
+		/* Go to pending and await interrupt if appropriate */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+			/* Allow only clock-available interrupt */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			DHD_INFO(("CLKCTL: set PENDING\n"));
+			bus->clkstate = CLK_PENDING;
+			return BCME_OK;
+		} else
+#endif /* !defined (OOB_INTR_ONLY) */
+		{
+			if (bus->clkstate == CLK_PENDING) {
+				/* Cancel CA-only interrupt filter */
+				devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+				devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			}
+		}
+
+		/* Otherwise, wait here (polling) for HT Avail */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+			                                    SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+			          !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+		}
+		if (err) {
+			DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+			           __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+			return BCME_ERROR;
+		}
+
+		/* Mark clock available */
+		bus->clkstate = CLK_AVAIL;
+		DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+		if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+			if (!SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+			}
+#endif /* !defined(BCMLXSDMMC) */
+		} else {
+			if (SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+			}
+		}
+#endif /* defined (DHD_DEBUG) */
+
+		bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+		bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+	} else {
+		clkreq = 0;
+
+		if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+		}
+
+		bus->clkstate = CLK_SDONLY;
+		if (!SR_ENAB(bus)) {
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+			DHD_INFO(("CLKCTL: turned OFF\n"));
+			if (err) {
+				DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+	}
+	return BCME_OK;
+}
+
+/* Change SD1/SD4 bus mode */
+static int
+dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode)
+{
+	int err;
+
+	err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+		&sd_mode, sizeof(sd_mode), TRUE);
+	if (err) {
+		DHD_ERROR(("%s: error changing sd_mode: %d\n",
+			__FUNCTION__, err));
+		return BCME_ERROR;
+	}
+	return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+	int err;
+	int32 iovalue;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (on) {
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			/* Turn on clock and restore mode */
+			iovalue = 1;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Restore clock speed */
+			iovalue = bus->sd_divisor;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_SDONLY;
+	} else {
+		/* Stop or slow the SD clock itself */
+		if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+			DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+			           __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+			return BCME_ERROR;
+		}
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			iovalue = 0;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Set divisor to idle value */
+			iovalue = bus->idleclock;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_NONE;
+	}
+
+	return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+	int ret = BCME_OK;
+#ifdef DHD_DEBUG
+	uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Early exit if we're already there */
+	if (bus->clkstate == target) {
+		if (target == CLK_AVAIL) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+			bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+			bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+		}
+		return ret;
+	}
+
+	switch (target) {
+	case CLK_AVAIL:
+		/* Make sure SD clock is available */
+		if (bus->clkstate == CLK_NONE)
+			dhdsdio_sdclk(bus, TRUE);
+		/* Now request HT Avail on the backplane */
+		ret = dhdsdio_htclk(bus, TRUE, pendok);
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+			bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+		}
+		break;
+
+	case CLK_SDONLY:
+
+#ifdef BT_OVER_SDIO
+		/*
+		 * If the request is to switch off Back plane clock,
+		 * confirm that BT is inactive before doing so.
+		 * If this call had come from Non Watchdog context any way
+		 * the Watchdog would switch off the clock again when
+		 * nothing is to be done & Bt has finished using the bus.
+		 */
+		if (bus->bt_use_count != 0) {
+			DHD_INFO(("%s(): Req CLK_SDONLY, BT is active %d not switching off \r\n",
+				__FUNCTION__, bus->bt_use_count));
+			ret = BCME_OK;
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+			break;
+		}
+
+		DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n",
+			__FUNCTION__));
+#endif /* BT_OVER_SDIO */
+
+		/* Remove HT request, or bring up SD clock */
+		if (bus->clkstate == CLK_NONE)
+			ret = dhdsdio_sdclk(bus, TRUE);
+		else if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		else
+			DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+			           bus->clkstate, target));
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		}
+		break;
+
+	case CLK_NONE:
+
+#ifdef BT_OVER_SDIO
+		/*
+		 * If the request is to switch off Back plane clock,
+		 * confirm that BT is inactive before doing so.
+		 * If this call had come from Non Watchdog context any way
+		 * the Watchdog would switch off the clock again when
+		 * nothing is to be done & Bt has finished using the bus.
+		 */
+		if (bus->bt_use_count != 0) {
+			DHD_INFO(("%s(): Request CLK_NONE BT is active %d not switching off \r\n",
+				__FUNCTION__, bus->bt_use_count));
+			ret = BCME_OK;
+			break;
+		}
+
+		DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n",
+			__FUNCTION__));
+#endif /* BT_OVER_SDIO */
+
+		/* Make sure to remove HT request */
+		if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		/* Now remove the SD clock */
+		ret = dhdsdio_sdclk(bus, FALSE);
+#ifdef DHD_DEBUG
+		if (dhd_console_ms == 0)
+#endif /* DHD_DEBUG */
+		if (bus->poll == 0)
+			dhd_os_wd_timer(bus->dhd, 0);
+		break;
+	}
+#ifdef DHD_DEBUG
+	DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+	return ret;
+}
+
+static int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+	int err = 0;
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+	         (sleep ? "SLEEP" : "WAKE"),
+	          (bus->sleeping ? "SLEEP" : "WAKE")));
+
+	if (bus->dhd->hang_was_sent)
+		return BCME_ERROR;
+
+	/* Done if we're already in the requested state */
+	if (sleep == bus->sleeping)
+		return BCME_OK;
+
+	/* Going to sleep: set the alarm and turn off the lights... */
+	if (sleep) {
+		/* Don't sleep if something is pending */
+#ifdef DHD_USE_IDLECOUNT
+		if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq) || bus->readframes ||
+			bus->ctrl_frame_stat)
+#else
+		if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+#endif /* DHD_USE_IDLECOUNT */
+			return BCME_BUSY;
+
+#ifdef BT_OVER_SDIO
+		/*
+		 * The following is the assumption based on which the hook is placed.
+		 * From WLAN driver, either from the active contexts OR from the Watchdog contexts
+		 * we will be attempting to Go to Sleep. AT that moment if we see that BT is still
+		 * actively using the bus, we will return BCME_BUSY from here, but the bus->sleeping
+		 * state would not have changed. So the caller can then schedule the Watchdog again
+		 * which will come and attempt to sleep at a later point.
+		 *
+		 * In case if BT is the only one and is the last user, we don't switch off the clock
+		 * immediately, we allow the WLAN to decide when to sleep i.e from the watchdog.
+		 * Now if the watchdog becomes active and attempts to switch off the clock and if
+		 * another WLAN context is active they are any way serialized with sdlock.
+		 */
+		if (bus->bt_use_count != 0) {
+			DHD_INFO(("%s(): Cannot sleep BT is active \r\n", __FUNCTION__));
+			return BCME_BUSY;
+		}
+#endif /* !BT_OVER_SDIO */
+
+
+		if (!SLPAUTO_ENAB(bus)) {
+			/* Disable SDIO interrupts (no longer interested) */
+			bcmsdh_intr_disable(bus->sdh);
+
+			/* Make sure the controller has the bus up */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+			/* Tell device to start using OOB wakeup */
+			W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+			if (retries > retry_limit)
+				DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+			/* Turn off our contribution to the HT clock request */
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+				SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+			/* Isolate the bus */
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+					SBSDIO_DEVCTL_PADS_ISO, NULL);
+		} else {
+			/* Leave interrupts enabled since device can exit sleep and
+			 * interrupt host
+			 */
+			err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */);
+		}
+
+		/* Change state */
+		bus->sleeping = TRUE;
+#if defined(SUPPORT_P2P_GO_PS)
+		wake_up(&bus->bus_sleep);
+#endif /* LINUX && SUPPORT_P2P_GO_PS */
+	} else {
+		/* Waking up: bus power up is ok, set local state */
+
+		if (!SLPAUTO_ENAB(bus)) {
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err);
+
+			/* Force pad isolation off if possible (in case power never toggled) */
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+			/* Make sure the controller has the bus up */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+			/* Send misc interrupt to indicate OOB not needed */
+			W_SDREG(0, &regs->tosbmailboxdata, retries);
+			if (retries <= retry_limit)
+				W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+			if (retries > retry_limit)
+				DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+			/* Make sure we have SD bus access */
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+			/* Enable interrupts again */
+			if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+				bus->intdis = FALSE;
+				bcmsdh_intr_enable(bus->sdh);
+			}
+		} else {
+			err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */);
+#ifdef BT_OVER_SDIO
+			if (err < 0) {
+				struct net_device *net = NULL;
+				dhd_pub_t *dhd = bus->dhd;
+				net = dhd_idx2net(dhd, 0);
+				if (net != NULL) {
+					DHD_ERROR(("<<<<<< WIFI HANG by KSO Enabled failure\n"));
+					dhd_os_sdunlock(dhd);
+					net_os_send_hang_message(net);
+					dhd_os_sdlock(dhd);
+				} else {
+					DHD_ERROR(("<<<<< WIFI HANG Fail because net is NULL\n"));
+				}
+			}
+#endif /* BT_OVER_SDIO */
+		}
+
+		if (err == 0) {
+			/* Change state */
+			bus->sleeping = FALSE;
+		}
+	}
+
+	return err;
+}
+
+#ifdef BT_OVER_SDIO
+/*
+ * Call this function to Get the Clock running.
+ * Assumes that the caller holds the sdlock.
+ * bus - Pointer to the dhd_bus handle
+ * can_wait - TRUE if the caller can wait until the clock becomes ready
+ *            FALSE if the caller cannot wait
+ */
+int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait)
+{
+	int ret = BCME_ERROR;
+
+	BCM_REFERENCE(owner);
+
+	bus->bt_use_count++;
+
+	/*
+	 * We can call BUS_WAKE, clkctl multiple times, both of the items
+	 * have states and if its already ON, no new configuration is done
+	 */
+
+	/* Wake up the Dongle FW from SR */
+	BUS_WAKE(bus);
+
+	/*
+	 * Make sure back plane ht clk is on
+	 * CLK_AVAIL - Turn On both SD & HT clock
+	 */
+	ret = dhdsdio_clkctl(bus, CLK_AVAIL, can_wait);
+
+	DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__,
+		bus->bt_use_count));
+	return ret;
+}
+
+/*
+ * Call this function to relinquish the Clock.
+ * Assumes that the caller holds the sdlock.
+ * bus - Pointer to the dhd_bus handle
+ * can_wait - TRUE if the caller can wait until the clock becomes ready
+ *            FALSE if the caller cannot wait
+ */
+int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait)
+{
+	int ret = BCME_ERROR;
+
+	BCM_REFERENCE(owner);
+	BCM_REFERENCE(can_wait);
+
+	if (bus->bt_use_count == 0) {
+		DHD_ERROR(("%s(): Clocks are already turned off \r\n",
+			__FUNCTION__));
+		return ret;
+	}
+
+	bus->bt_use_count--;
+
+	/*
+	 * When the SDIO Bus is shared between BT & WLAN, we turn Off the clock
+	 * once the last user has relinqushed the same. But there are two schemes
+	 * in that too. We consider WLAN as the  bus master (even if its not
+	 * active). Even when the WLAN is OFF the DHD Watchdog is active.
+	 * So this Bus Watchdog is the context whill put the Bus to sleep.
+	 * Refer dhd_bus_watchdog function
+	 */
+
+	ret = BCME_OK;
+	DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__,
+		bus->bt_use_count));
+	return ret;
+}
+
+void dhdsdio_reset_bt_use_count(struct dhd_bus *bus)
+{
+	/* reset bt use count */
+	bus->bt_use_count = 0;
+}
+#endif /* BT_OVER_SDIO */
+
+int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size)
+{
+	int func_blk_size = function_num;
+	int bcmerr = 0;
+	int result;
+
+	bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", &func_blk_size,
+		sizeof(int), &result, sizeof(int), IOV_GET);
+
+	if (bcmerr != BCME_OK) {
+		DHD_ERROR(("%s: Get F%d Block size error\n", __FUNCTION__, function_num));
+		return BCME_ERROR;
+	}
+
+	if (result != block_size) {
+		DHD_ERROR(("%s: F%d Block size set from %d to %d\n",
+			__FUNCTION__, function_num, result, block_size));
+		func_blk_size = function_num << 16 | block_size;
+		bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL,
+			0, &func_blk_size, sizeof(int32), IOV_SET);
+		if (bcmerr != BCME_OK) {
+			DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+	}
+
+	return BCME_OK;
+}
+
+#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
+	bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (enable == TRUE) {
+
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+	} else {
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+	}
+
+	/* Turn off our contribution to the HT clock request */
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif 
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+	int ret = BCME_ERROR;
+	osl_t *osh;
+	uint datalen, prec;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	osh = bus->dhd->osh;
+	datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+	/* Push the test header if doing loopback */
+	if (bus->ext_loop) {
+		uint8* data;
+		PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+		data = PKTDATA(osh, pkt);
+		*data++ = SDPCM_TEST_ECHOREQ;
+		*data++ = (uint8)bus->loopid++;
+		*data++ = (datalen >> 0);
+		*data++ = (datalen >> 8);
+		datalen += SDPCM_TEST_HDRLEN;
+	}
+#else /* SDTEST */
+	BCM_REFERENCE(datalen);
+#endif /* SDTEST */
+
+	prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+
+	/* move from dhdsdio_sendfromq(), try to orphan skb early */
+	if (bus->dhd->conf->orphan_move)
+		PKTORPHAN(pkt, bus->dhd->conf->tsq);
+
+	/* Check for existing queue, current flow-control, pending event, or pending clock */
+	if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+	    (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+	    (bus->clkstate != CLK_AVAIL)) {
+		bool deq_ret;
+		int pkq_len = 0;
+
+		DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, pktq_len(&bus->txq)));
+		bus->fcqueued++;
+
+		/* Priority based enq */
+		dhd_os_sdlock_txq(bus->dhd);
+		deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec);
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if (!deq_ret) {
+#ifdef PROP_TXSTATUS
+			if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0)
+#endif /* PROP_TXSTATUS */
+			{
+#ifdef DHDTCPACK_SUPPRESS
+				if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+					DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n",
+						__FUNCTION__, __LINE__));
+					dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+				}
+#endif /* DHDTCPACK_SUPPRESS */
+				dhd_txcomplete(bus->dhd, pkt, FALSE);
+				PKTFREE(osh, pkt, TRUE);
+			}
+			ret = BCME_NORESOURCE;
+		} else
+			ret = BCME_OK;
+
+		if (dhd_doflow) {
+			dhd_os_sdlock_txq(bus->dhd);
+			pkq_len = pktq_len(&bus->txq);
+			dhd_os_sdunlock_txq(bus->dhd);
+		}
+		if (dhd_doflow && pkq_len >= FCHI) {
+			bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+			wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) !=
+				WLFC_UNSUPPORTED);
+#endif
+			if (!wlfc_enabled && dhd_doflow) {
+				dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+			}
+		}
+
+#ifdef DHD_DEBUG
+		dhd_os_sdlock_txq(bus->dhd);
+		if (pktq_plen(&bus->txq, prec) > qcount[prec])
+			qcount[prec] = pktq_plen(&bus->txq, prec);
+		dhd_os_sdunlock_txq(bus->dhd);
+#endif
+
+		/* Schedule DPC if needed to send queued packet(s) */
+		if (dhd_deferred_tx && !bus->dpc_sched) {
+			if (bus->dhd->conf->deferred_tx_len) {
+				if(dhd_os_wd_timer_enabled(bus->dhd) == FALSE) {
+					bus->dpc_sched = TRUE;
+					dhd_sched_dpc(bus->dhd);
+				}
+				if(pktq_len(&bus->txq) >= bus->dhd->conf->deferred_tx_len &&
+						dhd_os_wd_timer_enabled(bus->dhd) == FALSE) {
+					bus->dpc_sched = TRUE;
+					dhd_sched_dpc(bus->dhd);
+				}
+			} else {
+				bus->dpc_sched = TRUE;
+				dhd_sched_dpc(bus->dhd);
+			}
+		}
+	} else {
+		int chan = SDPCM_DATA_CHANNEL;
+
+#ifdef SDTEST
+		chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL);
+#endif
+		/* Lock: we're about to use shared data/code (and SDIO) */
+		dhd_os_sdlock(bus->dhd);
+
+		/* Otherwise, send it now */
+		BUS_WAKE(bus);
+		/* Make sure back plane ht clk is on, no pending allowed */
+		dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+
+		ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE);
+
+		if (ret != BCME_OK)
+			bus->dhd->tx_errors++;
+		else
+			bus->dhd->dstats.tx_bytes += datalen;
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+				NO_OTHER_ACTIVE_BUS_USER(bus)) {
+			bus->activity = FALSE;
+			dhdsdio_bussleep(bus, TRUE);
+			dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+	}
+
+	return ret;
+}
+
+/* align packet data pointer and packet length to n-byte boundary, process packet headers,
+ * a new packet may be allocated if there is not enough head and/or tail from for padding.
+ * the caller is responsible for updating the glom size in the head packet (when glom is
+ * used)
+ *
+ * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter
+ * is taken in tx glom mode only
+ *
+ * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment
+ * padding, NULL if not needed, the caller is responsible for freeing the new packet
+ *
+ * return: positive value - length of the packet, including head and tail padding
+ *		   negative value - errors
+ */
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+	int prev_chain_total_len, bool last_chained_pkt,
+	int *pad_pkt_len, void **new_pkt
+#if defined(BCMSDIOH_TXGLOM_EXT)
+	, int first_frame
+#endif
+)
+{
+	osl_t *osh;
+	uint8 *frame;
+	int pkt_len;
+	int modulo;
+	int head_padding;
+	int tail_padding = 0;
+	uint32 swheader;
+	uint32 swhdr_offset;
+	bool alloc_new_pkt = FALSE;
+	uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+#ifdef PKT_STATICS
+	uint16 len;
+#endif
+
+	*new_pkt = NULL;
+	osh = bus->dhd->osh;
+
+#ifdef DHDTCPACK_SUPPRESS
+	if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+		DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+			__FUNCTION__, __LINE__));
+		dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+	}
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Add space for the SDPCM hardware/software headers */
+	PKTPUSH(osh, pkt, sdpcm_hdrlen);
+	ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+	pkt_len = (uint16)PKTLEN(osh, pkt);
+
+#ifdef WLMEDIA_HTSF
+	frame = (uint8*)PKTDATA(osh, pkt);
+	if (PKTLEN(osh, pkt) >= 100) {
+		htsf_ts = (htsfts_t*) (frame + HTSF_HOSTOFFSET + 12);
+		if (htsf_ts->magic == HTSFMAGIC) {
+			htsf_ts->c20 = get_cycles();
+			htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+#ifdef PKT_STATICS
+	len = (uint16)PKTLEN(osh, pkt);
+	switch(chan) {
+		case SDPCM_CONTROL_CHANNEL:
+			tx_statics.ctrl_count++;
+			tx_statics.ctrl_size += len;
+			break;
+		case SDPCM_DATA_CHANNEL:
+			tx_statics.data_count++;
+			tx_statics.data_size += len;
+			break;
+		case SDPCM_GLOM_CHANNEL:
+			tx_statics.glom_count++;
+			tx_statics.glom_size += len;
+			break;
+		case SDPCM_EVENT_CHANNEL:
+			tx_statics.event_count++;
+			tx_statics.event_size += len;
+			break;
+		case SDPCM_TEST_CHANNEL:
+			tx_statics.test_count++;
+			tx_statics.test_size += len;
+			break;
+
+		default:
+			break;
+	}
+#endif /* PKT_STATICS */
+#ifdef DHD_DEBUG
+	if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets))
+		tx_packets[PKTPRIO(pkt)]++;
+#endif /* DHD_DEBUG */
+
+	/* align the data pointer, allocate a new packet if there is not enough space (new
+	 * packet data pointer will be aligned thus no padding will be needed)
+	 */
+	head_padding = (uintptr)frame % DHD_SDALIGN;
+	if (PKTHEADROOM(osh, pkt) < head_padding) {
+		head_padding = 0;
+		alloc_new_pkt = TRUE;
+	} else {
+		uint cur_chain_total_len;
+		int chain_tail_padding = 0;
+
+		/* All packets need to be aligned by DHD_SDALIGN */
+		modulo = (pkt_len + head_padding) % DHD_SDALIGN;
+		tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+
+		/* Total pkt chain length needs to be aligned by block size,
+		 * unless it is a single pkt chain with total length less than one block size,
+		 * which we prefer sending by byte mode.
+		 *
+		 * Do the chain alignment here if
+		 * 1. This is the last pkt of the chain of multiple pkts or a single pkt.
+		 * 2-1. This chain is of multiple pkts, or
+		 * 2-2. This is a single pkt whose size is longer than one block size.
+		 */
+		cur_chain_total_len = prev_chain_total_len +
+			(head_padding + pkt_len + tail_padding);
+		if (last_chained_pkt && bus->blocksize != 0 &&
+			(cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+			modulo = cur_chain_total_len % bus->blocksize;
+			chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+		}
+
+#ifdef DHDENABLE_TAILPAD
+		if (PKTTAILROOM(osh, pkt) < tail_padding) {
+			/* We don't have tail room to align by DHD_SDALIGN */
+			alloc_new_pkt = TRUE;
+			bus->tx_tailpad_pktget++;
+		} else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) {
+			/* We have tail room for tail_padding of this pkt itself, but not for
+			 * total pkt chain alignment by block size.
+			 * Use the padding packet to avoid memory copy if applicable,
+			 * otherwise, just allocate a new pkt.
+			 */
+			if (bus->pad_pkt) {
+				*pad_pkt_len = chain_tail_padding;
+				bus->tx_tailpad_chain++;
+			} else {
+				alloc_new_pkt = TRUE;
+				bus->tx_tailpad_pktget++;
+			}
+		} else
+		/* This last pkt's tailroom is sufficient to hold both tail_padding
+		 * of the pkt itself and chain_tail_padding of total pkt chain
+		 */
+#endif /* DHDENABLE_TAILPAD */
+		tail_padding += chain_tail_padding;
+	}
+
+	DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n",
+		__FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len));
+
+	if (alloc_new_pkt) {
+		void *tmp_pkt;
+		int newpkt_size;
+		int cur_total_len;
+
+		ASSERT(*pad_pkt_len == 0);
+
+		DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__));
+
+		/* head pointer is aligned now, no padding needed */
+		head_padding = 0;
+
+		/* update the tail padding as it depends on the head padding, since a new packet is
+		 * allocated, the head padding is non longer needed and packet length is chagned
+		 */
+
+		cur_total_len = prev_chain_total_len + pkt_len;
+		if (last_chained_pkt && bus->blocksize != 0 &&
+			(cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+			modulo = cur_total_len % bus->blocksize;
+			tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+		} else {
+			modulo = pkt_len % DHD_SDALIGN;
+			tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+		}
+
+		newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN;
+		bus->dhd->tx_realloc++;
+		tmp_pkt = PKTGET(osh, newpkt_size, TRUE);
+		if (tmp_pkt == NULL) {
+			DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size));
+			return BCME_NOMEM;
+		}
+		PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN);
+		bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt));
+		*new_pkt = tmp_pkt;
+		pkt = tmp_pkt;
+	}
+
+	if (head_padding)
+		PKTPUSH(osh, pkt, head_padding);
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+	bzero(frame, head_padding + sdpcm_hdrlen);
+	pkt_len = (uint16)PKTLEN(osh, pkt);
+
+	/* the header has the followming format
+	 * 4-byte HW frame tag: length, ~length (for glom this is the total length)
+	 *
+	 * 8-byte HW extesion flags (glom mode only) as the following:
+	 *			2-byte packet length, excluding HW tag and padding
+	 *			2-byte frame channel and frame flags (e.g. next frame following)
+	 *			2-byte header length
+	 *			2-byte tail padding size
+	 *
+	 * 8-byte SW frame tags as the following
+	 *			4-byte flags: host tx seq, channel, data offset
+	 *			4-byte flags: TBD
+	 */
+
+	swhdr_offset = SDPCM_FRAMETAG_LEN;
+
+	/* hardware frame tag:
+	 *
+	 * in tx-glom mode, dongle only checks the hardware frame tag in the first
+	 * packet and sees it as the total lenght of the glom (including tail padding),
+	 * for each packet in the glom, the packet length needs to be updated, (see
+	 * below PKTSETLEN)
+	 *
+	 * in non tx-glom mode, PKTLEN still need to include tail padding as to be
+	 * referred to in sdioh_request_buffer(). The tail length will be excluded in
+	 * dhdsdio_txpkt_postprocess().
+	 */
+#if defined(BCMSDIOH_TXGLOM_EXT)
+	if (bus->dhd->conf->txglom_bucket_size)
+		tail_padding = 0;
+#endif
+	*(uint16*)frame = (uint16)htol16(pkt_len);
+	*(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len);
+	pkt_len += tail_padding;
+
+	/* hardware extesion flags */
+	if (bus->txglom_enable) {
+		uint32 hwheader1;
+		uint32 hwheader2;
+#ifdef BCMSDIOH_TXGLOM_EXT
+		uint32 act_len = pkt_len - tail_padding;
+		uint32 real_pad = 0;
+		if(bus->dhd->conf->txglom_ext && !last_chained_pkt) {
+			tail_padding = 0;
+			if(first_frame == 0) {
+				// first pkt, add pad to bucket size - recv offset
+				pkt_len = bus->dhd->conf->txglom_bucket_size - TXGLOM_RECV_OFFSET;
+			} else {
+				// add pad to bucket size
+				pkt_len = bus->dhd->conf->txglom_bucket_size;
+			}
+			swhdr_offset += SDPCM_HWEXT_LEN;
+			hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (last_chained_pkt << 24);
+			hwheader2 = (pkt_len - act_len) << 16;
+			htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+			htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+			real_pad = pkt_len - act_len;
+
+			if (PKTTAILROOM(osh, pkt) < real_pad) {
+				DHD_INFO(("%s : insufficient tailroom %d for %d real_pad\n", 
+					__func__, (int)PKTTAILROOM(osh, pkt), real_pad));
+				if (PKTPADTAILROOM(osh, pkt, real_pad)) {
+					DHD_ERROR(("CHK1: padding error size %d\n", real_pad));
+				} else
+					frame = (uint8 *)PKTDATA(osh, pkt);
+			}
+		} else 
+#endif
+		{
+			swhdr_offset += SDPCM_HWEXT_LEN;
+			hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) |
+				(last_chained_pkt << 24);
+			hwheader2 = (tail_padding) << 16;
+			htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+			htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+		}
+	}
+	PKTSETLEN((osh), (pkt), (pkt_len));
+
+	/* software frame tags */
+	swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+		| (txseq % SDPCM_SEQUENCE_WRAP) |
+		(((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+	htol32_ua_store(swheader, frame + swhdr_offset);
+	htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader));
+
+	return pkt_len;
+}
+
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt)
+{
+	osl_t *osh;
+	uint8 *frame;
+	int data_offset;
+	int tail_padding;
+	int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0);
+
+	(void)osh;
+	osh = bus->dhd->osh;
+
+	/* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */
+	frame = (uint8*)PKTDATA(osh, pkt);
+
+	DHD_INFO(("%s PKTLEN before postprocess %d",
+		__FUNCTION__, PKTLEN(osh, pkt)));
+
+	/* PKTLEN still includes tail_padding, so exclude it.
+	 * We shall have head_padding + original pkt_len for PKTLEN afterwards.
+	 */
+	if (bus->txglom_enable) {
+		/* txglom pkts have tail_padding length in HW ext header */
+		tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
+		PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding);
+		DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n",
+			tail_padding, PKTLEN(osh, pkt)));
+	} else {
+		/* non-txglom pkts have head_padding + original pkt length in HW frame tag.
+		 * We cannot refer to this field for txglom pkts as the first pkt of the chain will
+		 * have the field for the total length of the chain.
+		 */
+		PKTSETLEN(osh, pkt, *(uint16*)frame);
+		DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n",
+			*(uint16*)frame, PKTLEN(osh, pkt)));
+	}
+
+	data_offset = ltoh32_ua(frame + swhdr_offset);
+	data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
+	/* Get rid of sdpcm header + head_padding */
+	PKTPULL(osh, pkt, data_offset);
+
+	DHD_INFO(("%s data_offset %d, PKTLEN %d\n",
+		__FUNCTION__, data_offset, PKTLEN(osh, pkt)));
+
+	return BCME_OK;
+}
+
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt)
+{
+	int i;
+	int ret = 0;
+	osl_t *osh;
+	bcmsdh_info_t *sdh;
+	void *pkt = NULL;
+	void *pkt_chain;
+	int total_len = 0;
+	void *head_pkt = NULL;
+	void *prev_pkt = NULL;
+	int pad_pkt_len = 0;
+	int new_pkt_num = 0;
+	void *new_pkts[MAX_TX_PKTCHAIN_CNT];
+	bool wlfc_enabled = FALSE;
+
+	if (bus->dhd->dongle_reset)
+		return BCME_NOTREADY;
+
+	if (num_pkt <= 0)
+		return BCME_BADARG;
+
+	sdh = bus->sdh;
+	osh = bus->dhd->osh;
+	/* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */
+	new_pkts[0] = NULL;
+
+	for (i = 0; i < num_pkt; i++) {
+		int pkt_len;
+		bool last_pkt;
+		void *new_pkt = NULL;
+
+		pkt = pkts[i];
+		ASSERT(pkt);
+		last_pkt = (i == num_pkt - 1);
+		pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i,
+			total_len, last_pkt, &pad_pkt_len, &new_pkt
+#if defined(BCMSDIOH_TXGLOM_EXT)
+			, i
+#endif
+		);
+		if (pkt_len <= 0)
+			goto done;
+		if (new_pkt) {
+			pkt = new_pkt;
+			new_pkts[new_pkt_num++] = new_pkt;
+		}
+		total_len += pkt_len;
+
+		PKTSETNEXT(osh, pkt, NULL);
+		/* insert the packet into the list */
+		head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt);
+		prev_pkt = pkt;
+
+	}
+
+	/* Update the HW frame tag (total length) in the first pkt of the glom */
+	if (bus->txglom_enable) {
+		uint8 *frame;
+
+		total_len += pad_pkt_len;
+		frame = (uint8*)PKTDATA(osh, head_pkt);
+		*(uint16*)frame = (uint16)htol16(total_len);
+		*(((uint16*)frame) + 1) = (uint16)htol16(~total_len);
+
+	}
+
+#ifdef DHDENABLE_TAILPAD
+	/* if a padding packet if needed, insert it to the end of the link list */
+	if (pad_pkt_len) {
+		PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len);
+		PKTSETNEXT(osh, pkt, bus->pad_pkt);
+	}
+#endif /* DHDENABLE_TAILPAD */
+
+	/* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet
+	 * parameter is not NULL, for non packet chian we pass NULL pkt pointer
+	 * so it will take the aligned length and buffer pointer.
+	 */
+	pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL;
+	ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES);
+	if (ret == BCME_OK)
+		bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP;
+
+	/* if a padding packet was needed, remove it from the link list as it not a data pkt */
+	if (pad_pkt_len && pkt)
+		PKTSETNEXT(osh, pkt, NULL);
+
+done:
+	pkt = head_pkt;
+	while (pkt) {
+		void *pkt_next = PKTNEXT(osh, pkt);
+		PKTSETNEXT(osh, pkt, NULL);
+		dhdsdio_txpkt_postprocess(bus, pkt);
+		pkt = pkt_next;
+	}
+
+	/* new packets might be allocated due to insufficient room for padding, but we
+	 * still have to indicate the original packets to upper layer
+	 */
+	for (i = 0; i < num_pkt; i++) {
+		pkt = pkts[i];
+		wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+		if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) {
+			wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) !=
+				WLFC_UNSUPPORTED);
+		}
+#endif /* PROP_TXSTATUS */
+		if (!wlfc_enabled) {
+			PKTSETNEXT(osh, pkt, NULL);
+			dhd_txcomplete(bus->dhd, pkt, ret != 0);
+			if (free_pkt)
+				PKTFREE(osh, pkt, TRUE);
+		}
+	}
+
+	for (i = 0; i < new_pkt_num; i++)
+		PKTFREE(osh, new_pkts[i], TRUE);
+
+	return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+	uint cnt = 0;
+	uint8 tx_prec_map;
+	uint16 txpktqlen = 0;
+	uint32 intstatus = 0;
+	uint retries = 0;
+	osl_t *osh;
+	uint datalen = 0;
+	dhd_pub_t *dhd = bus->dhd;
+	sdpcmd_regs_t *regs = bus->regs;
+#ifdef DHD_LOSSLESS_ROAMING
+	uint8 *pktdata;
+	struct ether_header *eh;
+#endif /* DHD_LOSSLESS_ROAMING */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	osh = dhd->osh;
+	tx_prec_map = ~bus->flowcontrol;
+#ifdef DHD_LOSSLESS_ROAMING
+	tx_prec_map &= dhd->dequeue_prec_map;
+#endif /* DHD_LOSSLESS_ROAMING */
+	for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) {
+		int i;
+		int num_pkt = 1;
+		void *pkts[MAX_TX_PKTCHAIN_CNT];
+		int prec_out;
+
+		dhd_os_sdlock_txq(bus->dhd);
+		if (bus->txglom_enable) {
+			uint32 glomlimit = (uint32)bus->txglomsize;
+#if defined(BCMSDIOH_STD)
+			if (bus->blocksize == 64) {
+				glomlimit = MIN((uint32)bus->txglomsize, BLK_64_MAXTXGLOM);
+			}
+#endif /* BCMSDIOH_STD */
+			num_pkt = MIN((uint32)DATABUFCNT(bus), glomlimit);
+			num_pkt = MIN(num_pkt, ARRAYSIZE(pkts));
+		}
+		num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map));
+		for (i = 0; i < num_pkt; i++) {
+			pkts[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
+			if (!pkts[i]) {
+				DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n",
+					__FUNCTION__));
+				ASSERT(0);
+				break;
+			}
+#ifdef DHD_LOSSLESS_ROAMING
+			pktdata = (uint8 *)PKTDATA(osh, pkts[i]);
+#ifdef BDC
+			/* Skip BDC header */
+			pktdata += BDC_HEADER_LEN + ((struct bdc_header *)pktdata)->dataOffset;
+#endif
+			eh = (struct ether_header *)pktdata;
+			if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+				uint8 prio = (uint8)PKTPRIO(pkts[i]);
+
+				/* Restore to original priority for 802.1X packet */
+				if (prio == PRIO_8021D_NC) {
+					PKTSETPRIO(pkts[i], dhd->prio_8021x);
+				}
+			}
+#endif /* DHD_LOSSLESS_ROAMING */
+			if (!bus->dhd->conf->orphan_move)
+				PKTORPHAN(pkts[i], bus->dhd->conf->tsq);
+			datalen += PKTLEN(osh, pkts[i]);
+		}
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if (i == 0)
+			break;
+		if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK)
+			dhd->tx_errors++;
+		else {
+			dhd->dstats.tx_bytes += datalen;
+			bus->txglomframes++;
+			bus->txglompkts += num_pkt;
+		}
+		cnt += i;
+#ifdef PKT_STATICS
+		if (num_pkt) {
+			tx_statics.glom_cnt[num_pkt-1]++;
+			if (num_pkt > tx_statics.glom_max)
+				tx_statics.glom_max = num_pkt;
+		}
+#endif
+
+		/* In poll mode, need to check for other events */
+		if (!bus->intr && cnt)
+		{
+			/* Check device status, signal pending interrupt */
+			R_SDREG(intstatus, &regs->intstatus, retries);
+			bus->f2txdata++;
+			if (bcmsdh_regfail(bus->sdh))
+				break;
+			if (intstatus & bus->hostintmask)
+				bus->ipend = TRUE;
+		}
+
+	}
+
+	if (dhd_doflow) {
+		dhd_os_sdlock_txq(bus->dhd);
+		txpktqlen = pktq_len(&bus->txq);
+		dhd_os_sdunlock_txq(bus->dhd);
+	}
+
+	/* Do flow-control if needed */
+	if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) {
+		bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+		wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED);
+#endif
+		if (!wlfc_enabled && dhd_doflow && dhd->txoff) {
+			dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+		}
+	}
+
+	return cnt;
+}
+
+static void
+dhdsdio_sendpendctl(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	int ret;
+	uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN;
+
+	if (bus->txglom_enable)
+		frame_seq += SDPCM_HWEXT_LEN;
+
+	if (*frame_seq != bus->tx_seq) {
+		DHD_INFO(("%s IOCTL frame seq lag detected!"
+			" frm_seq:%d != bus->tx_seq:%d, corrected\n",
+			__FUNCTION__, *frame_seq, bus->tx_seq));
+		*frame_seq = bus->tx_seq;
+	}
+
+	ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		(uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+		NULL, NULL, NULL, 1);
+	if (ret == BCME_OK)
+		bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+	bus->ctrl_frame_stat = FALSE;
+	dhd_wait_event_wakeup(bus->dhd);
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	static int err_nodevice = 0;
+	uint8 *frame;
+	uint16 len;
+	uint32 swheader;
+	uint8 doff = 0;
+	int ret = -1;
+	uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Back the pointer to make a room for bus header */
+	frame = msg - sdpcm_hdrlen;
+	len = (msglen += sdpcm_hdrlen);
+
+	/* Add alignment padding (optional for ctl frames) */
+	if (dhd_alignctl) {
+		if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+			frame -= doff;
+			len += doff;
+			msglen += doff;
+			bzero(frame, doff + sdpcm_hdrlen);
+		}
+		ASSERT(doff < DHD_SDALIGN);
+	}
+	doff += sdpcm_hdrlen;
+
+	/* Round send length to next SDIO block */
+	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+		uint16 pad = bus->blocksize - (len % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize))
+			len += pad;
+	} else if (len % DHD_SDALIGN) {
+		len += DHD_SDALIGN - (len % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (len & (ALIGNMENT - 1)))
+		len = ROUNDUP(len, ALIGNMENT);
+
+	ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+	/* Need to lock here to protect txseq and SDIO tx calls */
+	dhd_os_sdlock(bus->dhd);
+	if (bus->dhd->conf->txctl_tmo_fix > 0 && !TXCTLOK(bus)) {
+		bus->ctrl_wait = TRUE;
+		dhd_os_sdunlock(bus->dhd);
+		wait_event_interruptible_timeout(bus->ctrl_tx_wait, TXCTLOK(bus),
+			msecs_to_jiffies(bus->dhd->conf->txctl_tmo_fix));
+		dhd_os_sdlock(bus->dhd);
+		bus->ctrl_wait = FALSE;
+	}
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+	*(uint16*)frame = htol16((uint16)msglen);
+	*(((uint16*)frame) + 1) = htol16(~msglen);
+
+	if (bus->txglom_enable) {
+		uint32 hwheader1, hwheader2;
+		/* Software tag: channel, sequence number, data offset */
+		swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+				| bus->tx_seq
+				| ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+		htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
+		htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN
+			+ SDPCM_HWEXT_LEN + sizeof(swheader));
+
+		hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24);
+		hwheader2 = (len - (msglen)) << 16;
+		htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+
+		*(uint16*)frame = htol16(len);
+		*(((uint16*)frame) + 1) = htol16(~(len));
+	} else {
+		/* Software tag: channel, sequence number, data offset */
+		swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+		        | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+		htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+	}
+
+#ifdef DHD_ULP
+	dhd_ulp_set_path(bus->dhd, DHD_ULP_TX_CTRL);
+
+	if (!TXCTLOK(bus) || !dhd_ulp_f2_ready(bus->dhd, bus->sdh))
+#else
+	if (!TXCTLOK(bus))
+#endif
+	{
+		DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+			__FUNCTION__, bus->tx_max, bus->tx_seq));
+		bus->ctrl_frame_stat = TRUE;
+		/* Send from dpc */
+		bus->ctrl_frame_buf = frame;
+		bus->ctrl_frame_len = len;
+
+		if (!bus->dpc_sched) {
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);
+		}
+		if (bus->ctrl_frame_stat) {
+			dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+		}
+
+		if (bus->ctrl_frame_stat == FALSE) {
+			DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+			ret = 0;
+		} else {
+			bus->dhd->txcnt_timeout++;
+			if (!bus->dhd->hang_was_sent) {
+#ifdef CUSTOMER_HW4_DEBUG
+				uint32 status, retry = 0;
+				R_SDREG(status, &bus->regs->intstatus, retry);
+				DHD_TRACE_HW4(("%s: txcnt_timeout, INT status=0x%08X\n",
+					__FUNCTION__, status));
+				DHD_TRACE_HW4(("%s : tx_max : %d, tx_seq : %d, clkstate : %d \n",
+					__FUNCTION__, bus->tx_max, bus->tx_seq, bus->clkstate));
+#endif /* CUSTOMER_HW4_DEBUG */
+				DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
+					__FUNCTION__, bus->dhd->txcnt_timeout));
+			}
+#ifdef DHD_FW_COREDUMP
+			/* Collect socram dump */
+			if (bus->dhd->memdump_enabled) {
+				/* collect core dump */
+				bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_TX;
+				dhd_os_sdunlock(bus->dhd);
+				dhd_bus_mem_dump(bus->dhd);
+				dhd_os_sdlock(bus->dhd);
+			}
+#endif /* DHD_FW_COREDUMP */
+			ret = -1;
+			bus->ctrl_frame_stat = FALSE;
+			goto done;
+		}
+	}
+
+	bus->dhd->txcnt_timeout = 0;
+	bus->ctrl_frame_stat = TRUE;
+
+	if (ret == -1) {
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+			prhex("Tx Frame", frame, len);
+		} else if (DHD_HDRS_ON()) {
+			prhex("TxHdr", frame, MIN(len, 16));
+		}
+#endif
+#ifdef PKT_STATICS
+		tx_statics.ctrl_count++;
+		tx_statics.ctrl_size += len;
+#endif
+		ret = dhd_bcmsdh_send_buffer(bus, frame, len);
+	}
+	bus->ctrl_frame_stat = FALSE;
+#ifdef DHD_ULP
+	dhd_ulp_enable_cached_sbwad(bus->dhd, bus->sdh);
+#endif /* DHD_ULP */
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+		NO_OTHER_ACTIVE_BUS_USER(bus)) {
+		bus->activity = FALSE;
+		dhdsdio_bussleep(bus, TRUE);
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	if (ret)
+		bus->dhd->tx_ctlerrs++;
+	else
+		bus->dhd->tx_ctlpkts++;
+
+	if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT) {
+		return -ETIMEDOUT;
+	}
+
+	if (ret == BCME_NODEVICE)
+		err_nodevice++;
+	else
+		err_nodevice = 0;
+
+	return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	int timeleft;
+	uint rxlen = 0;
+	static uint cnt = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Wait until control frame is available */
+	timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, false);
+
+	dhd_os_sdlock(bus->dhd);
+	rxlen = bus->rxlen;
+	bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+	bus->rxlen = 0;
+	dhd_os_sdunlock(bus->dhd);
+
+	if (bus->dhd->conf->ctrl_resched > 0 && !rxlen && timeleft == 0) {
+		cnt++;
+		if (cnt <= bus->dhd->conf->ctrl_resched) {
+			uint32 status, retry = 0;
+			R_SDREG(status, &bus->regs->intstatus, retry);
+			if ((status & I_HMB_HOST_INT) || PKT_AVAILABLE(bus, status)) {
+				DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, status=0x%x\n",
+					__FUNCTION__, cnt, status));
+				bus->ipend = TRUE;
+				bus->dpc_sched = TRUE;
+				dhd_sched_dpc(bus->dhd);
+
+				/* Wait until control frame is available */
+				timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, true);
+
+				dhd_os_sdlock(bus->dhd);
+				rxlen = bus->rxlen;
+				bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+				bus->rxlen = 0;
+				dhd_os_sdunlock(bus->dhd);
+			}
+		}
+	} else {
+		cnt = 0;
+	}
+
+	if (rxlen) {
+		DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+			__FUNCTION__, rxlen, msglen));
+	} else {
+		if (timeleft == 0) {
+#ifdef DHD_DEBUG
+			uint32 status, retry = 0;
+			R_SDREG(status, &bus->regs->intstatus, retry);
+			DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n",
+				__FUNCTION__, status));
+#else
+			DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#endif /* DHD_DEBUG */
+			if (!bus->dhd->dongle_trap_occured) {
+#ifdef DHD_FW_COREDUMP
+				bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
+#endif /* DHD_FW_COREDUMP */
+				dhd_os_sdlock(bus->dhd);
+				dhdsdio_checkdied(bus, NULL, 0);
+				dhd_os_sdunlock(bus->dhd);
+			}
+		} else {
+			DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+			if (!bus->dhd->dongle_trap_occured) {
+#ifdef DHD_FW_COREDUMP
+				bus->dhd->memdump_type = DUMP_TYPE_RESUMED_UNKNOWN;
+#endif /* DHD_FW_COREDUMP */
+				dhd_os_sdlock(bus->dhd);
+				dhdsdio_checkdied(bus, NULL, 0);
+				dhd_os_sdunlock(bus->dhd);
+			}
+		}
+#ifdef DHD_FW_COREDUMP
+		/* Dump the ram image */
+		if (bus->dhd->memdump_enabled && !bus->dhd->dongle_trap_occured)
+			dhdsdio_mem_dump(bus);
+#endif /* DHD_FW_COREDUMP */
+	}
+	if (timeleft == 0) {
+		if (rxlen == 0)
+			bus->dhd->rxcnt_timeout++;
+		DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__,
+			bus->dhd->rxcnt_timeout, rxlen));
+#ifdef DHD_FW_COREDUMP
+		/* collect socram dump */
+		if (bus->dhd->memdump_enabled) {
+			bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_RX;
+			dhd_bus_mem_dump(bus->dhd);
+		}
+#endif /* DHD_FW_COREDUMP */
+	} else {
+		bus->dhd->rxcnt_timeout = 0;
+	}
+
+	if (rxlen)
+		bus->dhd->rx_ctlpkts++;
+	else
+		bus->dhd->rx_ctlerrs++;
+
+	if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) {
+		return -ETIMEDOUT;
+	}
+
+
+	if (bus->dhd->dongle_trap_occured)
+		return -EREMOTEIO;
+
+	return rxlen ? (int)rxlen : -EIO;
+}
+
+/* IOVar table */
+enum {
+	IOV_INTR = 1,
+	IOV_POLLRATE,
+	IOV_SDREG,
+	IOV_SBREG,
+	IOV_SDCIS,
+	IOV_RAMSIZE,
+	IOV_RAMSTART,
+#ifdef DHD_DEBUG
+	IOV_CHECKDIED,
+	IOV_SERIALCONS,
+#endif /* DHD_DEBUG */
+	IOV_SET_DOWNLOAD_STATE,
+	IOV_SOCRAM_STATE,
+	IOV_FORCEEVEN,
+	IOV_SDIOD_DRIVE,
+	IOV_READAHEAD,
+	IOV_SDRXCHAIN,
+	IOV_ALIGNCTL,
+	IOV_SDALIGN,
+	IOV_DEVRESET,
+	IOV_CPU,
+#if defined(USE_SDIOFIFO_IOVAR)
+	IOV_WATERMARK,
+	IOV_MESBUSYCTRL,
+#endif /* USE_SDIOFIFO_IOVAR */
+#ifdef SDTEST
+	IOV_PKTGEN,
+	IOV_EXTLOOP,
+#endif /* SDTEST */
+	IOV_SPROM,
+	IOV_TXBOUND,
+	IOV_RXBOUND,
+	IOV_TXMINMAX,
+	IOV_IDLETIME,
+	IOV_IDLECLOCK,
+	IOV_SD1IDLE,
+	IOV_SLEEP,
+	IOV_DONGLEISOLATION,
+	IOV_KSO,
+	IOV_DEVSLEEP,
+	IOV_DEVCAP,
+	IOV_VARS,
+#ifdef SOFTAP
+	IOV_FWPATH,
+#endif
+	IOV_TXGLOMSIZE,
+	IOV_TXGLOMMODE,
+	IOV_HANGREPORT,
+	IOV_TXINRX_THRES,
+	IOV_SDIO_SUSPEND
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+	{"intr",	IOV_INTR,	0, 0,	IOVT_BOOL,	0 },
+	{"sleep",	IOV_SLEEP,	0, 0,	IOVT_BOOL,	0 },
+	{"pollrate",	IOV_POLLRATE,	0, 0,	IOVT_UINT32,	0 },
+	{"idletime",	IOV_IDLETIME,	0, 0,	IOVT_INT32,	0 },
+	{"idleclock",	IOV_IDLECLOCK,	0, 0,	IOVT_INT32,	0 },
+	{"sd1idle",	IOV_SD1IDLE,	0, 0,	IOVT_BOOL,	0 },
+	{"ramsize",	IOV_RAMSIZE,	0, 0,	IOVT_UINT32,	0 },
+	{"ramstart",	IOV_RAMSTART,	0, 0,	IOVT_UINT32,	0 },
+	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0, 0,	IOVT_BOOL,	0 },
+	{"socram_state",	IOV_SOCRAM_STATE,	0, 0,	IOVT_BOOL,	0 },
+	{"vars",	IOV_VARS,	0, 0,	IOVT_BUFFER,	0 },
+	{"sdiod_drive",	IOV_SDIOD_DRIVE, 0, 0,	IOVT_UINT32,	0 },
+	{"readahead",	IOV_READAHEAD,	0, 0,	IOVT_BOOL,	0 },
+	{"sdrxchain",	IOV_SDRXCHAIN,	0, 0,	IOVT_BOOL,	0 },
+	{"alignctl",	IOV_ALIGNCTL,	0, 0,	IOVT_BOOL,	0 },
+	{"sdalign",	IOV_SDALIGN,	0, 0,	IOVT_BOOL,	0 },
+	{"devreset",	IOV_DEVRESET,	0, 0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"sdreg",	IOV_SDREG,	0, 0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sbreg",	IOV_SBREG,	0, 0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_cis",	IOV_SDCIS,	0, 0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+	{"forcealign",	IOV_FORCEEVEN,	0, 0,	IOVT_BOOL,	0 },
+	{"txbound",	IOV_TXBOUND,	0, 0,	IOVT_UINT32,	0 },
+	{"rxbound",	IOV_RXBOUND,	0, 0,	IOVT_UINT32,	0 },
+	{"txminmax",	IOV_TXMINMAX,	0, 0,	IOVT_UINT32,	0 },
+	{"cpu",		IOV_CPU,	0, 0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"checkdied",	IOV_CHECKDIED,	0, 0,	IOVT_BUFFER,	0 },
+	{"serial",	IOV_SERIALCONS,	0, 0,	IOVT_UINT32,	0 },
+#endif /* DHD_DEBUG  */
+#endif /* DHD_DEBUG */
+#ifdef SDTEST
+	{"extloop",	IOV_EXTLOOP,	0, 0,	IOVT_BOOL,	0 },
+	{"pktgen",	IOV_PKTGEN,	0, 0,	IOVT_BUFFER,	sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+#if defined(USE_SDIOFIFO_IOVAR)
+	{"watermark",	IOV_WATERMARK,	0, 0,	IOVT_UINT32,	0 },
+	{"mesbusyctrl",	IOV_MESBUSYCTRL,	0, 0,	IOVT_UINT32,	0 },
+#endif /* USE_SDIOFIFO_IOVAR */
+	{"devcap", IOV_DEVCAP,	0, 0,	IOVT_UINT32,	0 },
+	{"dngl_isolation", IOV_DONGLEISOLATION,	0, 0,	IOVT_UINT32,	0 },
+	{"kso",	IOV_KSO,	0, 0,	IOVT_UINT32,	0 },
+	{"devsleep", IOV_DEVSLEEP,	0, 0,	IOVT_UINT32,	0 },
+#ifdef SOFTAP
+	{"fwpath", IOV_FWPATH, 0, 0, IOVT_BUFFER, 0 },
+#endif
+	{"txglomsize", IOV_TXGLOMSIZE, 0, 0, IOVT_UINT32, 0 },
+	{"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
+	{"txinrx_thres", IOV_TXINRX_THRES, 0, 0, IOVT_INT32, 0 },
+	{"sdio_suspend", IOV_SDIO_SUSPEND, 0, 0, IOVT_UINT32, 0 },
+	{NULL, 0, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+	uint q1, q2;
+
+	if (!div) {
+		bcm_bprintf(strbuf, "%s N/A", desc);
+	} else {
+		q1 = num / div;
+		q2 = (100 * (num - (q1 * div))) / div;
+		bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+	}
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	dhd_bus_t *bus = dhdp->bus;
+#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKE_EVENT_STATUS)
+	int i;
+#endif
+
+	bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+	bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+	            bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+	bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n",
+	            bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+	            bus->rxlen, bus->rx_seq);
+	bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n",
+	            bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+
+#ifdef DHD_WAKE_STATUS
+	bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
+		bcmsdh_get_total_wake(bus->sdh), bus->wake_counts.rxwake,
+		bus->wake_counts.rcwake);
+#ifdef DHD_WAKE_RX_STATUS
+	bcm_bprintf(strbuf, " unicast %u multicast %u broadcast %u arp %u\n",
+		bus->wake_counts.rx_ucast, bus->wake_counts.rx_mcast,
+		bus->wake_counts.rx_bcast, bus->wake_counts.rx_arp);
+	bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
+		bus->wake_counts.rx_multi_ipv4, bus->wake_counts.rx_multi_ipv6,
+		bus->wake_counts.rx_icmpv6, bus->wake_counts.rx_multi_other);
+	bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
+		bus->wake_counts.rx_icmpv6_ra, bus->wake_counts.rx_icmpv6_na,
+		bus->wake_counts.rx_icmpv6_ns);
+#endif /* DHD_WAKE_RX_STATUS */
+#ifdef DHD_WAKE_EVENT_STATUS
+	for (i = 0; i < WLC_E_LAST; i++)
+		if (bus->wake_counts.rc_event[i] != 0)
+			bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(i),
+				bus->wake_counts.rc_event[i]);
+	bcm_bprintf(strbuf, "\n");
+#endif /* DHD_WAKE_EVENT_STATUS */
+#endif /* DHD_WAKE_STATUS */
+
+	bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n",
+	            bus->pollrate, bus->pollcnt, bus->regfails);
+
+	bcm_bprintf(strbuf, "\nAdditional counters:\n");
+#ifdef DHDENABLE_TAILPAD
+	bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n",
+	            bus->tx_tailpad_chain, bus->tx_tailpad_pktget);
+#endif /* DHDENABLE_TAILPAD */
+	bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n",
+	            bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+	            bus->rxc_errors);
+	bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n",
+	            bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+	bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n",
+	            bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+	bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n",
+	            bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+	bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n",
+	            (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+	            bus->f2txdata, bus->f1regdata);
+	{
+		dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+		             bus->dhd->rx_packets);
+		dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+		             (bus->f2txdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+		bcm_bprintf(strbuf, "\n\n");
+	}
+
+#ifdef SDTEST
+	if (bus->pktgen_count) {
+		bcm_bprintf(strbuf, "pktgen config and count:\n");
+		bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n",
+		            bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+		            bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+		bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n",
+		            bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+	}
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+	bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+	            bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+	bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+	bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+	            bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+	dhd_dump_pct(strbuf, "Tx: glom pct", (100 * bus->txglompkts), bus->dhd->tx_packets);
+	dhd_dump_pct(strbuf, ", pkts/glom", bus->txglompkts, bus->txglomframes);
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "txglomframes %u, txglompkts %u\n", bus->txglomframes, bus->txglompkts);
+	bcm_bprintf(strbuf, "\n");
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+	bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+	bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+	bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+#ifdef DHDENABLE_TAILPAD
+	bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0;
+#endif /* DHDENABLE_TAILPAD */
+	bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+	bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+	bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+	bus->txglomframes = bus->txglompkts = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+
+	pktgen.version = DHD_PKTGEN_VERSION;
+	pktgen.freq = bus->pktgen_freq;
+	pktgen.count = bus->pktgen_count;
+	pktgen.print = bus->pktgen_print;
+	pktgen.total = bus->pktgen_total;
+	pktgen.minlen = bus->pktgen_minlen;
+	pktgen.maxlen = bus->pktgen_maxlen;
+	pktgen.numsent = bus->pktgen_sent;
+	pktgen.numrcvd = bus->pktgen_rcvd;
+	pktgen.numfail = bus->pktgen_fail;
+	pktgen.mode = bus->pktgen_mode;
+	pktgen.stop = bus->pktgen_stop;
+
+	bcopy(&pktgen, arg, sizeof(pktgen));
+
+	return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+	uint oldcnt, oldmode;
+
+	bcopy(arg, &pktgen, sizeof(pktgen));
+	if (pktgen.version != DHD_PKTGEN_VERSION)
+		return BCME_BADARG;
+
+	oldcnt = bus->pktgen_count;
+	oldmode = bus->pktgen_mode;
+
+	bus->pktgen_freq = pktgen.freq;
+	bus->pktgen_count = pktgen.count;
+	bus->pktgen_print = pktgen.print;
+	bus->pktgen_total = pktgen.total;
+	bus->pktgen_minlen = pktgen.minlen;
+	bus->pktgen_maxlen = pktgen.maxlen;
+	bus->pktgen_mode = pktgen.mode;
+	bus->pktgen_stop = pktgen.stop;
+
+	bus->pktgen_tick = bus->pktgen_ptick = 0;
+	bus->pktgen_prev_time = jiffies;
+	bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+	bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+	/* Clear counts for a new pktgen (mode change, or was stopped) */
+	if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) {
+		bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0;
+		bus->pktgen_prev_rcvd = bus->pktgen_fail = 0;
+	}
+
+	return 0;
+}
+#endif /* SDTEST */
+
+static void
+dhdsdio_devram_remap(dhd_bus_t *bus, bool val)
+{
+	uint8 enable, protect, remap;
+
+	si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+	remap = val ? TRUE : FALSE;
+	si_socdevram(bus->sih, TRUE, &enable, &protect, &remap);
+}
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint32 sdaddr;
+	uint dsize;
+	uint8 *pdata;
+
+	/* In remap mode, adjust address beyond socram and redirect
+	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+	 * is not backplane accessible
+	 */
+	if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) {
+		address -= bus->orig_ramsize;
+		address += SOCDEVRAM_BP_ADDR;
+	}
+
+	/* Determine initial transfer parameters */
+	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+	if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+		dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+	else
+		dsize = size;
+
+	/* Set the backplane window to include the start address */
+	if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+		DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+		goto xfer_done;
+	}
+
+	/* Do the transfer(s) */
+	while (size) {
+		DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+		          __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+		          (address & SBSDIO_SBWINDOW_MASK)));
+		if (dsize <= MAX_MEM_BUF) {
+			pdata = bus->membuf;
+			if (write)
+				memcpy(bus->membuf, data, dsize);
+		} else {
+			pdata = data;
+		}
+		if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, pdata, dsize))) {
+			DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+			break;
+		}
+		if (dsize <= MAX_MEM_BUF && !write)
+			memcpy(data, bus->membuf, dsize);
+
+		/* Adjust for next transfer (if any) */
+		if ((size -= dsize)) {
+			data += dsize;
+			address += dsize;
+			if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+				DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+				break;
+			}
+			sdaddr = 0;
+			dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+		}
+
+	}
+
+xfer_done:
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+		DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+			bcmsdh_cur_sbwad(bus->sdh)));
+	}
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+	uint32 addr;
+	int rv, i;
+	uint32 shaddr = 0;
+
+	if (bus->sih == NULL) {
+		if (bus->dhd && bus->dhd->dongle_reset) {
+			DHD_ERROR(("%s: Dongle is in reset state\n", __FUNCTION__));
+			return BCME_NOTREADY;
+		} else {
+			ASSERT(bus->dhd);
+			ASSERT(bus->sih);
+			DHD_ERROR(("%s: The address of sih is invalid\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+	}
+	if ((CHIPID(bus->sih->chip) == BCM43430_CHIP_ID ||
+		CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) && !dhdsdio_sr_cap(bus))
+		bus->srmemsize = 0;
+
+	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+	i = 0;
+	do {
+		/* Read last word in memory to determine address of sdpcm_shared structure */
+		if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0)
+			return rv;
+
+		addr = ltoh32(addr);
+
+		DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+		/*
+		 * Check if addr is valid.
+		 * NVRAM length at the end of memory should have been overwritten.
+		 */
+		if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+			if ((bus->srmemsize > 0) && (i++ == 0)) {
+				shaddr -= bus->srmemsize;
+			} else {
+				DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
+					__FUNCTION__, addr));
+				return BCME_ERROR;
+			}
+		} else
+			break;
+	} while (i < 2);
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+		return rv;
+
+	/* Endianness */
+	sh->flags = ltoh32(sh->flags);
+	sh->trap_addr = ltoh32(sh->trap_addr);
+	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+	sh->assert_line = ltoh32(sh->assert_line);
+	sh->console_addr = ltoh32(sh->console_addr);
+	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
+		return BCME_OK;
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+		DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+		           "is different than sdpcm_shared version %d in dongle\n",
+		           __FUNCTION__, SDPCM_SHARED_VERSION,
+		           sh->flags & SDPCM_SHARED_VERSION_MASK));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+#define CONSOLE_LINE_MAX	192
+
+#ifdef DHD_DEBUG
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+	dhd_console_t *c = &bus->console;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return 0;
+
+	if (!KSO_ENAB(bus))
+		return 0;
+
+	/* Read console log struct */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = ltoh32(c->log.buf_size);
+		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+			return BCME_NOMEM;
+	}
+
+	idx = ltoh32(c->log.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return BCME_ERROR;
+
+	/* Skip reading the console buffer if the index pointer has not moved */
+	if (idx == c->last)
+		return BCME_OK;
+
+	/* Read the console buffer */
+	addr = ltoh32(c->log.buf);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.  Instead, back up
+				 * the buffer pointer and output this line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			printf("CONSOLE: %s\n", line);
+#ifdef LOG_INTO_TCPDUMP
+			dhd_sendup_log(bus->dhd, line, n);
+#endif /* LOG_INTO_TCPDUMP */
+		}
+	}
+break2:
+
+	return BCME_OK;
+}
+#endif /* DHD_DEBUG */
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+	int bcmerror = 0;
+	uint msize = 512;
+	char *mbuffer = NULL;
+	char *console_buffer = NULL;
+	uint maxstrlen = 256;
+	char *str = NULL;
+	sdpcm_shared_t l_sdpcm_shared;
+	struct bcmstrbuf strbuf;
+	uint32 console_ptr, console_size, console_index;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, i, addr;
+	int rv;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (DHD_NOCHECKDIED_ON())
+		return 0;
+
+	if (data == NULL) {
+		/*
+		 * Called after a rx ctrl timeout. "data" is NULL.
+		 * allocate memory to trace the trap or assert.
+		 */
+		size = msize;
+		mbuffer = data = MALLOC(bus->dhd->osh, msize);
+		if (mbuffer == NULL) {
+			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+	}
+
+	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+		bcmerror = BCME_NOMEM;
+		goto done;
+	}
+
+	if ((bcmerror = dhdsdio_readshared(bus, &l_sdpcm_shared)) < 0)
+		goto done;
+
+	bcm_binit(&strbuf, data, size);
+
+	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
+	            l_sdpcm_shared.msgtrace_addr, l_sdpcm_shared.console_addr);
+
+	if ((l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+	}
+
+	if ((l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "No trap%s in dongle",
+		          (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+		          ?"/assrt" :"");
+	} else {
+		if (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+			/* Download assert */
+			bcm_bprintf(&strbuf, "Dongle assert");
+			if (l_sdpcm_shared.assert_exp_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 l_sdpcm_shared.assert_exp_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " expr \"%s\"", str);
+			}
+
+			if (l_sdpcm_shared.assert_file_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                   l_sdpcm_shared.assert_file_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " file \"%s\"", str);
+			}
+
+			bcm_bprintf(&strbuf, " line %d ", l_sdpcm_shared.assert_line);
+		}
+
+		if (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+			trap_t *tr = &bus->dhd->last_trap_info;
+			bus->dhd->dongle_trap_occured = TRUE;
+			if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+			                                 l_sdpcm_shared.trap_addr,
+			                                 (uint8*)tr, sizeof(trap_t))) < 0)
+				goto done;
+
+			bus->dongle_trap_addr = ltoh32(l_sdpcm_shared.trap_addr);
+
+			dhd_bus_dump_trap_info(bus, &strbuf);
+
+			addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+				goto printbuf;
+
+			addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_size, sizeof(console_size))) < 0)
+				goto printbuf;
+
+			addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_index, sizeof(console_index))) < 0)
+				goto printbuf;
+
+			console_ptr = ltoh32(console_ptr);
+			console_size = ltoh32(console_size);
+			console_index = ltoh32(console_index);
+
+			if (console_size > CONSOLE_BUFFER_MAX ||
+				!(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+				goto printbuf;
+
+			if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
+				(uint8 *)console_buffer, console_size)) < 0)
+				goto printbuf;
+
+			for (i = 0, n = 0; i < console_size; i += n + 1) {
+				for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+					ch = console_buffer[(console_index + i + n) % console_size];
+					if (ch == '\n')
+						break;
+					line[n] = ch;
+				}
+
+
+				if (n > 0) {
+					if (line[n - 1] == '\r')
+						n--;
+					line[n] = 0;
+					/* Don't use DHD_ERROR macro since we print
+					 * a lot of information quickly. The macro
+					 * will truncate a lot of the printfs
+					 */
+
+					if (dhd_msg_level & DHD_ERROR_VAL)
+						printf("CONSOLE: %s\n", line);
+				}
+			}
+		}
+	}
+
+printbuf:
+	if (l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+		DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+	}
+
+#if defined(DHD_FW_COREDUMP)
+	if (bus->dhd->memdump_enabled && (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP)) {
+		/* Mem dump to a file on device */
+		bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
+		dhd_os_sdunlock(bus->dhd);
+		dhdsdio_mem_dump(bus);
+		dhd_os_sdlock(bus->dhd);
+	}
+#endif /* #if defined(DHD_FW_COREDUMP) */
+
+done:
+	if (mbuffer)
+		MFREE(bus->dhd->osh, mbuffer, msize);
+	if (str)
+		MFREE(bus->dhd->osh, str, maxstrlen);
+	if (console_buffer)
+		MFREE(bus->dhd->osh, console_buffer, console_size);
+
+	return bcmerror;
+}
+
+#if defined(DHD_FW_COREDUMP)
+int
+dhd_bus_mem_dump(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	if (dhdp->busstate == DHD_BUS_SUSPEND) {
+		DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__));
+		return 0;
+	}
+	return dhdsdio_mem_dump(bus);
+}
+
+static int
+dhdsdio_mem_dump(dhd_bus_t *bus)
+{
+	int ret = 0;
+	int size;				/* Full mem size */
+	uint32 start = bus->dongle_ram_base;	/* Start address */
+	uint read_size = 0;			/* Read size of each iteration */
+	uint8 *buf = NULL, *databuf = NULL;
+
+	/* Get full mem size */
+	size = bus->ramsize;
+	buf = dhd_get_fwdump_buf(bus->dhd, size);
+	if (!buf) {
+		DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
+		return -1;
+	}
+
+	dhd_os_sdlock(bus->dhd);
+	BUS_WAKE(bus);
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Read mem content */
+	DHD_ERROR(("Dump dongle memory\n"));
+	databuf = buf;
+	while (size)
+	{
+		read_size = MIN(MEMBLOCK, size);
+		if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size)))
+		{
+			DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
+			ret = BCME_ERROR;
+			break;
+		}
+		/* Decrement size and increment start address */
+		size -= read_size;
+		start += read_size;
+		databuf += read_size;
+	}
+
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+		NO_OTHER_ACTIVE_BUS_USER(bus)) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	/* schedule a work queue to perform actual memdump. dhd_mem_dump() performs the job */
+	if (!ret) {
+		/* buf, actually soc_ram free handled in dhd_{free,clear} */
+		dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
+	}
+
+	return ret;
+}
+#endif /* DHD_FW_COREDUMP */
+
+int
+dhd_socram_dump(dhd_bus_t * bus)
+{
+#if defined(DHD_FW_COREDUMP)
+	return (dhdsdio_mem_dump(bus));
+#else
+	return -1;
+#endif
+}
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+	int bcmerror = BCME_OK;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->up &&
+#ifdef DHD_ULP
+		(DHD_ULP_DISABLED == dhd_ulp_get_ulp_state(bus->dhd)) &&
+#endif /* DHD_ULP */
+		1) {
+		bcmerror = BCME_NOTDOWN;
+		goto err;
+	}
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* Free the old ones and replace with passed variables */
+	if (bus->vars)
+		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+	bus->vars = MALLOC(bus->dhd->osh, len);
+	bus->varsz = bus->vars ? len : 0;
+	if (bus->vars == NULL) {
+		bcmerror = BCME_NOMEM;
+		goto err;
+	}
+
+	/* Copy the passed variables, which should include the terminating double-null */
+	bcopy(arg, bus->vars, bus->varsz);
+err:
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB		(1  << 24)
+#define CC_CHIPCTRL_JTAG_SEL			(1  << 3)
+#define CC_CHIPCTRL_GPIO_SEL				(0x3)
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334	(1  << 28)
+
+static int
+dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
+{
+	int int_val;
+	uint32 addr, data, uart_enab = 0;
+	uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL;
+	uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL;
+
+	addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+	data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+	*bcmerror = 0;
+
+	bcmsdh_reg_write(bus->sdh, addr, 4, 1);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	int_val = bcmsdh_reg_read(bus->sdh, data, 4);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+
+	if (bus->sih->chip == BCM4330_CHIP_ID) {
+		uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB;
+	} else if (bus->sih->chip == BCM4334_CHIP_ID ||
+		bus->sih->chip == BCM43340_CHIP_ID ||
+		bus->sih->chip == BCM43341_CHIP_ID ||
+		bus->sih->chip == BCM43342_CHIP_ID ||
+		0) {
+		if (enable) {
+			/* Moved to PMU chipcontrol 1 from 4330 */
+			int_val &= ~gpio_sel;
+			int_val |= jtag_sel;
+		} else {
+			int_val |= gpio_sel;
+			int_val &= ~jtag_sel;
+		}
+		uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334;
+	}
+
+	if (!set)
+		return (int_val & uart_enab);
+	if (enable)
+		int_val |= uart_enab;
+	else
+		int_val &= ~uart_enab;
+	bcmsdh_reg_write(bus->sdh, data, 4, int_val);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	if (bus->sih->chip == BCM4330_CHIP_ID) {
+		uint32 chipcontrol;
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
+		chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
+		chipcontrol &= ~jtag_sel;
+		if (enable) {
+			chipcontrol |=  jtag_sel;
+			chipcontrol &= ~gpio_sel;
+		}
+		bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
+	}
+
+	return (int_val & uart_enab);
+}
+#endif 
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	bool bool_val = 0;
+
+	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+	/* Some ioctls use the bus */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
+		bcmerror = BCME_NOTREADY;
+		goto exit;
+	}
+
+	/*
+	 * Special handling for keepSdioOn: New SDIO Wake-up Mechanism
+	 */
+	if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) {
+		dhdsdio_clk_kso_iovar(bus, bool_val);
+		goto exit;
+	} else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) {
+		{
+			dhdsdio_clk_devsleep_iovar(bus, bool_val);
+			if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) {
+				DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n",
+					bus->dpc_sched));
+				if (!bus->dpc_sched) {
+					bus->dpc_sched = TRUE;
+					dhd_sched_dpc(bus->dhd);
+				}
+			}
+		}
+		goto exit;
+	}
+
+	/* Handle sleep stuff before any clock mucking */
+	if (vi->varid == IOV_SLEEP) {
+		if (IOV_ISSET(actionid)) {
+			bcmerror = dhdsdio_bussleep(bus, bool_val);
+		} else {
+			int_val = (int32)bus->sleeping;
+			bcopy(&int_val, arg, val_size);
+		}
+		goto exit;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	if (!bus->dhd->dongle_reset) {
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	}
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_INTR):
+		int_val = (int32)bus->intr;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_INTR):
+		bus->intr = bool_val;
+		bus->intdis = FALSE;
+		if (bus->dhd->up) {
+			if (bus->intr) {
+				DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+				// terence 20141207: enbale intdis
+				bus->intdis = TRUE;
+				bcmsdh_intr_enable(bus->sdh);
+			} else {
+				DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+				bcmsdh_intr_disable(bus->sdh);
+			}
+		}
+		break;
+
+	case IOV_GVAL(IOV_POLLRATE):
+		int_val = (int32)bus->pollrate;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POLLRATE):
+		bus->pollrate = (uint)int_val;
+		bus->poll = (bus->pollrate != 0);
+		break;
+
+	case IOV_GVAL(IOV_IDLETIME):
+		int_val = bus->idletime;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLETIME):
+		if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->idletime = int_val;
+		}
+		break;
+
+	case IOV_GVAL(IOV_IDLECLOCK):
+		int_val = (int32)bus->idleclock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLECLOCK):
+		bus->idleclock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SD1IDLE):
+		int_val = (int32)sd1idle;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SD1IDLE):
+		sd1idle = bool_val;
+		break;
+
+
+
+	case IOV_GVAL(IOV_RAMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_RAMSTART):
+		int_val = (int32)bus->dongle_ram_base;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_SDIOD_DRIVE):
+		int_val = (int32)dhd_sdiod_drive_strength;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDIOD_DRIVE):
+		dhd_sdiod_drive_strength = int_val;
+		si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+		break;
+
+	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_SOCRAM_STATE):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdsdio_downloadvars(bus, arg, len);
+		break;
+
+	case IOV_GVAL(IOV_READAHEAD):
+		int_val = (int32)dhd_readahead;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_READAHEAD):
+		if (bool_val && !dhd_readahead)
+			bus->nextlen = 0;
+		dhd_readahead = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDRXCHAIN):
+		int_val = (int32)bus->use_rxchain;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDRXCHAIN):
+		if (bool_val && !bus->sd_rxchain)
+			bcmerror = BCME_UNSUPPORTED;
+		else
+			bus->use_rxchain = bool_val;
+		break;
+	case IOV_GVAL(IOV_ALIGNCTL):
+		int_val = (int32)dhd_alignctl;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_ALIGNCTL):
+		dhd_alignctl = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDALIGN):
+		int_val = DHD_SDALIGN;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_VARS):
+		if (bus->varsz < (uint)len)
+			bcopy(bus->vars, arg, bus->varsz);
+		else
+			bcmerror = BCME_BUFTOOSHORT;
+		break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uintptr addr;
+		uint size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = ((uintptr)bus->regs + sd_ptr->offset);
+		size = sd_ptr->func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uintptr addr;
+		uint size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = ((uintptr)bus->regs + sd_ptr->offset);
+		size = sd_ptr->func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	/* Same as above, but offset is not backplane (not SDIO core) */
+	case IOV_GVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	case IOV_GVAL(IOV_SDCIS):
+	{
+		*(char *)arg = 0;
+
+		bcmstrcat(arg, "\nFunc 0\n");
+		bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 1\n");
+		bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 2\n");
+		bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		break;
+	}
+
+	case IOV_GVAL(IOV_FORCEEVEN):
+		int_val = (int32)forcealign;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_FORCEEVEN):
+		forcealign = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_TXBOUND):
+		int_val = (int32)dhd_txbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXBOUND):
+		dhd_txbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_RXBOUND):
+		int_val = (int32)dhd_rxbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RXBOUND):
+		dhd_rxbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_TXMINMAX):
+		int_val = (int32)dhd_txminmax;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXMINMAX):
+		dhd_txminmax = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_SERIALCONS):
+		int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
+		if (bcmerror != 0)
+			break;
+
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SERIALCONS):
+		dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
+		break;
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+	case IOV_GVAL(IOV_EXTLOOP):
+		int_val = (int32)bus->ext_loop;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_EXTLOOP):
+		bus->ext_loop = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_get(bus, arg);
+		break;
+
+	case IOV_SVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_set(bus, arg);
+		break;
+#endif /* SDTEST */
+
+#if defined(USE_SDIOFIFO_IOVAR)
+	case IOV_GVAL(IOV_WATERMARK):
+		int_val = (int32)watermark;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WATERMARK):
+		watermark = (uint)int_val;
+		watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark;
+		DHD_ERROR(("Setting watermark as 0x%x.\n", watermark));
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL);
+		break;
+
+	case IOV_GVAL(IOV_MESBUSYCTRL):
+		int_val = (int32)mesbusyctrl;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MESBUSYCTRL):
+		mesbusyctrl = (uint)int_val;
+		mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK)
+			? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl;
+		DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl));
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+			((uint8)mesbusyctrl | 0x80), NULL);
+		break;
+#endif 
+
+
+	case IOV_GVAL(IOV_DONGLEISOLATION):
+		int_val = bus->dhd->dongle_isolation;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DONGLEISOLATION):
+		bus->dhd->dongle_isolation = bool_val;
+		break;
+
+	case IOV_SVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+		           __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+		           bus->dhd->busstate));
+
+		ASSERT(bus->dhd->osh);
+		/* ASSERT(bus->cl_devid); */
+
+		dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+		break;
+	/*
+	 * softap firmware is updated through module parameter or android private command
+	 */
+
+	case IOV_GVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+		/* Get its status */
+		int_val = (bool) bus->dhd->dongle_reset;
+		bcopy(&int_val, arg, val_size);
+
+		break;
+
+	case IOV_GVAL(IOV_KSO):
+		int_val = dhdsdio_sleepcsr_get(bus);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DEVCAP):
+		int_val = dhdsdio_devcap_get(bus);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DEVCAP):
+		dhdsdio_devcap_set(bus, (uint8) int_val);
+		break;
+	case IOV_GVAL(IOV_TXGLOMSIZE):
+		int_val = (int32)bus->txglomsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXGLOMSIZE):
+		if (int_val > SDPCM_MAXGLOM_SIZE) {
+			bcmerror = BCME_ERROR;
+		} else {
+			bus->txglomsize = (uint)int_val;
+		}
+		break;
+	case IOV_SVAL(IOV_HANGREPORT):
+		bus->dhd->hang_report = bool_val;
+		DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report));
+		break;
+
+	case IOV_GVAL(IOV_HANGREPORT):
+		int_val = (int32)bus->dhd->hang_report;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_TXINRX_THRES):
+		int_val = bus->txinrx_thres;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_TXINRX_THRES):
+		if (int_val < 0) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->txinrx_thres = int_val;
+		}
+		break;
+
+	case IOV_GVAL(IOV_SDIO_SUSPEND):
+		int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDIO_SUSPEND):
+		if (bool_val) { /* Suspend */
+			dhdsdio_suspend(bus);
+		}
+		else { /* Resume */
+			dhdsdio_resume(bus);
+		}
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+		NO_OTHER_ACTIVE_BUS_USER(bus)) {
+		bus->activity = FALSE;
+		dhdsdio_bussleep(bus, TRUE);
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+	uint32 varsize, phys_size;
+	uint32 varaddr;
+	uint8 *vbuffer;
+	uint32 varsizew;
+#ifdef DHD_DEBUG
+	uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+	varaddr = (bus->ramsize - 4) - varsize;
+
+	// terence 20150412: fix for nvram failed to download
+	if (bus->dhd->conf->chip == BCM43340_CHIP_ID ||
+			bus->dhd->conf->chip == BCM43341_CHIP_ID) {
+		varsize = varsize ? ROUNDUP(varsize, 64) : 0;
+		varaddr = (bus->ramsize - 64) - varsize;
+	}
+
+	varaddr += bus->dongle_ram_base;
+
+	if (bus->vars) {
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
+			if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
+				DHD_ERROR(("PR85623WAR in place\n"));
+				varsize += 4;
+				varaddr -= 4;
+			}
+		}
+
+		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+		if (!vbuffer)
+			return BCME_NOMEM;
+
+		bzero(vbuffer, varsize);
+		bcopy(bus->vars, vbuffer, bus->varsz);
+
+		/* Write the vars list */
+		bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+				__FUNCTION__, bcmerror, varsize, varaddr));
+			return bcmerror;
+		}
+
+#ifdef DHD_DEBUG
+		/* Verify NVRAM bytes */
+		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+		if (!nvram_ularray) {
+			MFREE(bus->dhd->osh, vbuffer, varsize);
+			return BCME_NOMEM;
+		}
+
+		/* Upload image to verify downloaded contents. */
+		memset(nvram_ularray, 0xaa, varsize);
+
+		/* Read the vars list to temp buffer for comparison */
+		bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+		if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, varsize, varaddr));
+		}
+		/* Compare the org NVRAM with the one read from RAM */
+		if (memcmp(vbuffer, nvram_ularray, varsize)) {
+			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+			__FUNCTION__));
+
+		MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+		MFREE(bus->dhd->osh, vbuffer, varsize);
+	}
+
+	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+	phys_size += bus->dongle_ram_base;
+
+	/* adjust to the user specified RAM */
+	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+		phys_size, bus->ramsize));
+	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+		varaddr, varsize));
+	varsize = ((phys_size - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		varsizew = htol32(varsizew);
+	}
+
+	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4),
+		(uint8*)&varsizew, 4);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+	uint retries;
+	int bcmerror = 0;
+	int foundcr4 = 0;
+
+	if (!bus->sih)
+		return BCME_ERROR;
+	/* To enter download state, disable ARM and reset SOCRAM.
+	 * To exit download state, simply reset ARM (default is RAM boot).
+	 */
+	if (enter) {
+		bus->alp_only = TRUE;
+
+		if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+				foundcr4 = 1;
+			} else {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		}
+
+		if (!foundcr4) {
+			si_core_disable(bus->sih, 0);
+			if (bcmsdh_regfail(bus->sdh)) {
+				bcmerror = BCME_SDIO_ERROR;
+				goto fail;
+			}
+
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			si_core_reset(bus->sih, 0, 0);
+			if (bcmsdh_regfail(bus->sdh)) {
+				DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n",
+				           __FUNCTION__));
+				bcmerror = BCME_SDIO_ERROR;
+				goto fail;
+			}
+
+			/* Disable remap for download */
+			if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih))
+				dhdsdio_devram_remap(bus, FALSE);
+
+			if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID ||
+				CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) {
+				/* Disabling Remap for SRAM_3 */
+				si_socram_set_bankpda(bus->sih, 0x3, 0x0);
+			}
+
+			/* Clear the top bit of memory */
+			if (bus->ramsize) {
+				uint32 zeros = 0;
+				if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4,
+				                     (uint8*)&zeros, 4) < 0) {
+					bcmerror = BCME_SDIO_ERROR;
+					goto fail;
+				}
+			}
+		} else {
+			/* For CR4,
+			 * Halt ARM
+			 * Remove ARM reset
+			 * Read RAM base address [0x18_0000]
+			 * [next] Download firmware
+			 * [done at else] Populate the reset vector
+			 * [done at else] Remove ARM halt
+			*/
+			/* Halt ARM & remove reset */
+			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+		}
+	} else {
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if (!si_iscoreup(bus->sih)) {
+				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if ((bcmerror = dhdsdio_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+			/* Enable remap before ARM reset but after vars.
+			 * No backplane access in remap mode
+			 */
+			if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih))
+				dhdsdio_devram_remap(bus, TRUE);
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		} else {
+			/* cr4 has no socram, but tcm's */
+			/* write vars */
+			if ((bcmerror = dhdsdio_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+			/* switch back to arm core again */
+			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			/* write address 0 with reset instruction */
+			bcmerror = dhdsdio_membytes(bus, TRUE, 0,
+				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+			if (bcmerror == BCME_OK) {
+				uint32 tmp;
+
+				/* verify write */
+				bcmerror = dhdsdio_membytes(bus, FALSE, 0,
+				                            (uint8 *)&tmp, sizeof(tmp));
+
+				if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
+					DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
+					          __FUNCTION__, bus->resetinstr));
+					DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
+					          __FUNCTION__, tmp));
+					bcmerror = BCME_SDIO_ERROR;
+					goto fail;
+				}
+			}
+
+			/* now remove reset and halt and continue to run CR4 */
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		/* Allow HT Clock now that the ARM is running. */
+		bus->alp_only = FALSE;
+
+		bus->dhd->busstate = DHD_BUS_LOAD;
+	}
+
+fail:
+	/* Always return to SDIOD core */
+	if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+		si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+	return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+		dhd_os_sdlock(bus->dhd);
+
+		BUS_WAKE(bus);
+
+		/* Turn on clock in case SD command needs backplane */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+		/* Check for bus configuration changes of interest */
+
+		/* If it was divisor change, read the new one */
+		if (set && strcmp(name, "sd_divisor") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_divisor = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_divisor));
+			}
+		}
+		/* If it was a mode change, read the new one */
+		if (set && strcmp(name, "sd_mode") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_mode = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_mode));
+			}
+		}
+		/* Similar check for blocksize change */
+		if (set && strcmp(name, "sd_blocksize") == 0) {
+			int32 fnum = 2;
+			if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+			                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+				bus->blocksize = 0;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+				dhdsdio_tune_fifoparam(bus);
+			}
+		}
+		bus->roundup = MIN(max_roundup, bus->blocksize);
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+			NO_OTHER_ACTIVE_BUS_USER(bus)) {
+			bus->activity = FALSE;
+			dhdsdio_bussleep(bus, TRUE);
+			dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+	osl_t *osh;
+	uint32 local_hostintmask;
+	uint8 saveclk;
+	uint retries;
+	int err;
+	bool wlfc_enabled = FALSE;
+	unsigned long flags;
+
+	if (!bus->dhd)
+		return;
+
+	osh = bus->dhd->osh;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_waitlockfree(bus->sdh);
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) {
+		/* if Firmware already hangs disbale any interrupt */
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		bus->hostintmask = 0;
+		bcmsdh_intr_disable(bus->sdh);
+	} else {
+
+		BUS_WAKE(bus);
+
+		if (KSO_ENAB(bus)) {
+
+		/* Enable clock for device interrupts */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Disable and clear interrupts at the chip level also */
+		W_SDREG(0, &bus->regs->hostintmask, retries);
+		local_hostintmask = bus->hostintmask;
+		bus->hostintmask = 0;
+
+		/* Force clocks on backplane to be sure F2 interrupt propagates */
+		saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (!err) {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+			                 (saveclk | SBSDIO_FORCE_HT), &err);
+		}
+		if (err) {
+			DHD_ERROR(("%s: Failed to force clock for F2: err %d\n",
+			            __FUNCTION__, err));
+		}
+
+		/* Turn off the bus (F2), free any pending packets */
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+		bcmsdh_intr_disable(bus->sdh);
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+		/* Clear any pending interrupts now that F2 is disabled */
+		W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+		}
+
+		/* Turn off the backplane clock (only) */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+		/* Change our idea of bus state */
+		DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+	}
+
+#ifdef PROP_TXSTATUS
+	wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+	if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+		 * when there is a newly coming packet from network stack.
+		 */
+		dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+		dhd_os_sdlock_txq(bus->dhd);
+		/* Clear the data packet queues */
+		pktq_flush(osh, &bus->txq, TRUE);
+		dhd_os_sdunlock_txq(bus->dhd);
+	}
+
+	/* Clear any held glomming stuff */
+	if (bus->glomd)
+		PKTFREE(osh, bus->glomd, FALSE);
+
+	if (bus->glom)
+		PKTFREE(osh, bus->glom, FALSE);
+
+	bus->glom = bus->glomd = NULL;
+
+	/* Clear rx control and wake any waiters */
+	bus->rxlen = 0;
+	dhd_os_ioctl_resp_wake(bus->dhd);
+
+	/* Reset some F2 state stuff */
+	bus->rxskip = FALSE;
+	bus->tx_seq = bus->rx_seq = 0;
+
+	bus->tx_max = 4;
+
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+}
+
+#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD)
+extern uint sd_txglom;
+#endif
+void
+dhd_txglom_enable(dhd_pub_t *dhdp, bool enable)
+{
+	/* can't enable host txglom by default, some platforms have no
+	 * (or crappy) ADMA support and txglom will cause kernel assertions (e.g.
+	 * panda board)
+	 */
+	dhd_bus_t *bus = dhdp->bus;
+#ifdef BCMSDIOH_TXGLOM
+	uint32 rxglom;
+	int32 ret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BCMSDIOH_STD
+	if (enable)
+		enable = sd_txglom;
+#endif /* BCMSDIOH_STD */
+
+	if (enable) {
+		rxglom = 1;
+		ret = dhd_iovar(dhdp, 0, "bus:rxglom", (char *)&rxglom, sizeof(rxglom), NULL, 0,
+				TRUE);
+		if (ret >= 0)
+			bus->txglom_enable = TRUE;
+		else {
+#ifdef BCMSDIOH_STD
+			sd_txglom = 0;
+#endif /* BCMSDIOH_STD */
+			bus->txglom_enable = FALSE;
+		}
+	} else
+#endif /* BCMSDIOH_TXGLOM */
+		bus->txglom_enable = FALSE;
+	printf("%s: enable %d\n",  __FUNCTION__, bus->txglom_enable);
+	dhd_conf_set_txglom_params(bus->dhd, bus->txglom_enable);
+	bcmsdh_set_mode(bus->sdh, bus->dhd->conf->txglom_mode);
+}
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	dhd_timeout_t tmo;
+	uint retries = 0;
+	uint8 ready, enable;
+	int err, ret = 0;
+	uint8 saveclk;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(bus->dhd);
+	if (!bus->dhd)
+		return 0;
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	if (bus->sih->chip == BCM43362_CHIP_ID) {
+		printf("%s: delay 100ms for BCM43362\n", __FUNCTION__);
+		OSL_DELAY(100000); // terence 20131209: delay for 43362
+	}
+
+	/* Make sure backplane clock is on, needed to generate F2 interrupt */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (bus->clkstate != CLK_AVAIL) {
+		DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
+		ret = -1;
+		goto exit;
+	}
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+	if (!err) {
+		if (bus->sih->chip == BCM43012_CHIP_ID) {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+				(saveclk | SBSDIO_HT_AVAIL_REQ), &err);
+		} else {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+				(saveclk | SBSDIO_FORCE_HT), &err);
+		}
+	}
+
+	if (err) {
+		DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+		ret = -1;
+		goto exit;
+	}
+
+	/* Enable function 2 (frame transfers) */
+	W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+	        &bus->regs->tosbmailboxdata, retries);
+	enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+	/* Give the dongle some time to do its thing and set IOR2 */
+	dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+	ready = 0;
+	while (ready != enable && !dhd_timeout_expired(&tmo))
+	        ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+	DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+	          __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+	/* If F2 successfully enabled, set core and enable interrupts */
+	if (ready == enable) {
+		/* Make sure we're talking to the core. */
+		if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+			bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+		ASSERT(bus->regs != NULL);
+
+		/* Set up the interrupt mask and enable interrupts */
+		bus->hostintmask = HOSTINTMASK;
+		/* corerev 4 could use the newer interrupt logic to detect the frames */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
+			(bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
+			bus->hostintmask &= ~I_HMB_FRAME_IND;
+			bus->hostintmask |= I_XMTDATA_AVAIL;
+		}
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+		if (bus->sih->buscorerev < 15) {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK,
+				(uint8)watermark, &err);
+		}
+
+		/* Set bus state according to enable result */
+		dhdp->busstate = DHD_BUS_DATA;
+
+		/* Need to set fn2 block size to match fn1 block size.
+		 * Requests to fn2 go thru fn1. *
+		 * faltwig has this code contitioned with #if !BCMSPI_ANDROID.
+		 * It would be cleaner to use the ->sdh->block_sz[fno] instead of
+		 * 64, but this layer has no access to sdh types.
+		 */
+
+		/* bcmsdh_intr_unmask(bus->sdh); */
+
+		bus->intdis = FALSE;
+		if (bus->intr) {
+			DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+			bcmsdh_intr_enable(bus->sdh);
+		} else {
+			DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+			bcmsdh_intr_disable(bus->sdh);
+		}
+
+	}
+
+
+	else {
+		/* Disable F2 again */
+		enable = SDIO_FUNC_ENABLE_1;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+	}
+
+	if (dhdsdio_sr_cap(bus)) {
+		dhdsdio_sr_init(bus);
+		/* Masking the chip active interrupt  permanantly */
+		bus->hostintmask &= ~I_CHIPACTIVE;
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+		DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n",
+		__FUNCTION__, bus->hostintmask));
+	} else {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+	}
+
+	/* If we didn't come up, turn off backplane clock */
+	if (dhdp->busstate != DHD_BUS_DATA)
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+exit:
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+
+	return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+	uint16 lastrbc;
+	uint8 hi, lo;
+	int err;
+
+	DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+	           (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return;
+	}
+
+	if (abort) {
+		bcmsdh_abort(sdh, SDIO_FUNC_2);
+	}
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+	if (err) {
+		DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->f1regdata++;
+
+	/* Wait until the packet has been flushed (device/FIFO stable) */
+	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+		hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+		lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err);
+		if (err) {
+			DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__));
+			goto fail;
+		}
+
+		bus->f1regdata += 2;
+
+		if ((hi == 0) && (lo == 0))
+			break;
+
+		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+			DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+			           __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+		}
+		lastrbc = (hi << 8) + lo;
+	}
+
+	if (!retries) {
+		DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+	} else {
+		DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+	}
+
+	if (rtx) {
+		bus->rxrtx++;
+		W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+		bus->f1regdata++;
+		if (retries <= retry_limit) {
+			bus->rxskip = TRUE;
+		}
+	}
+
+	/* Clear partial in any case */
+	bus->nextlen = 0;
+
+fail:
+	/* If we can't reach the device, signal failure */
+	if (err || bcmsdh_regfail(sdh))
+		bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint rdlen, pad;
+
+	int sdret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Control data already received in aligned rxctl */
+	if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+		goto gotpkt;
+
+	ASSERT(bus->rxbuf);
+	/* Set rxctl for frame (w/optional alignment) */
+	bus->rxctl = bus->rxbuf;
+	if (dhd_alignctl) {
+		bus->rxctl += firstread;
+		if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+			bus->rxctl += (DHD_SDALIGN - pad);
+		bus->rxctl -= firstread;
+	}
+	ASSERT(bus->rxctl >= bus->rxbuf);
+
+	/* Copy the already-read portion over */
+	bcopy(hdr, bus->rxctl, firstread);
+	if (len <= firstread)
+		goto gotpkt;
+
+	/* Copy the full data pkt in gSPI case and process ioctl. */
+	if (bus->bus == SPI_BUS) {
+		bcopy(hdr, bus->rxctl, len);
+		goto gotpkt;
+	}
+
+	/* Raise rdlen to next SDIO block to avoid tail command */
+	rdlen = len - firstread;
+	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+		pad = bus->blocksize - (rdlen % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+		    ((len + pad) < bus->dhd->maxctl))
+			rdlen += pad;
+	} else if (rdlen % DHD_SDALIGN) {
+		rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (rdlen & (ALIGNMENT - 1)))
+		rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+	/* Drop if the read is too big or it exceeds our maximum */
+	if ((rdlen + firstread) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+		           __FUNCTION__, rdlen, bus->dhd->maxctl));
+		bus->dhd->rx_errors++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+	if ((len - doff) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+		           __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+		bus->dhd->rx_errors++; bus->rx_toolong++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+
+	/* Read remainder of frame body into the rxctl buffer */
+	sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+	                            (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+	bus->f2rxdata++;
+	ASSERT(sdret != BCME_PENDING);
+
+	/* Control frame failures need retransmission */
+	if (sdret < 0) {
+		DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+		bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+		dhdsdio_rxfail(bus, TRUE, TRUE);
+		goto done;
+	}
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+	if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+		prhex("RxCtrl", bus->rxctl, len);
+	}
+#endif
+
+	/* Point to valid data and indicate its length */
+	bus->rxctl += doff;
+	bus->rxlen = len - doff;
+
+done:
+	/* Awake any waiters */
+	dhd_os_ioctl_resp_wake(bus->dhd);
+}
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+	void **pkt, uint32 *pkt_count);
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+	uint16 dlen, totlen;
+	uint8 *dptr, num = 0;
+
+	uint16 sublen, check;
+	void *pfirst, *plast, *pnext;
+	void * list_tail[DHD_MAX_IFS] = { NULL };
+	void * list_head[DHD_MAX_IFS] = { NULL };
+	uint8 idx;
+	osl_t *osh = bus->dhd->osh;
+
+	int errcode;
+	uint8 chan, seq, doff, sfdoff;
+	uint8 txmax;
+	uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+	uint reorder_info_len;
+
+	int ifidx = 0;
+	bool usechain = bus->use_rxchain;
+
+	/* If packets, issue read(s) and send up packet chain */
+	/* Return sequence numbers consumed? */
+
+	DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+	/* If there's a descriptor, generate the packet chain */
+	if (bus->glomd) {
+		dhd_os_sdlock_rxq(bus->dhd);
+
+		pfirst = plast = pnext = NULL;
+		dlen = (uint16)PKTLEN(osh, bus->glomd);
+		dptr = PKTDATA(osh, bus->glomd);
+		if (!dlen || (dlen & 1)) {
+			DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+			           __FUNCTION__, dlen));
+			dlen = 0;
+		}
+
+		for (totlen = num = 0; dlen; num++) {
+			/* Get (and move past) next length */
+			sublen = ltoh16_ua(dptr);
+			dlen -= sizeof(uint16);
+			dptr += sizeof(uint16);
+			if ((sublen < SDPCM_HDRLEN) ||
+			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+				DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+				           __FUNCTION__, num, sublen));
+				pnext = NULL;
+				break;
+			}
+			if (sublen % DHD_SDALIGN) {
+				DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+				           __FUNCTION__, sublen, DHD_SDALIGN));
+				usechain = FALSE;
+			}
+			totlen += sublen;
+
+			/* For last frame, adjust read len so total is a block multiple */
+			if (!dlen) {
+				sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+				totlen = ROUNDUP(totlen, bus->blocksize);
+			}
+
+			/* Allocate/chain packet for next subframe */
+			if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+				DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+				           __FUNCTION__, num, sublen));
+				break;
+			}
+			ASSERT(!PKTLINK(pnext));
+			if (!pfirst) {
+				ASSERT(!plast);
+				pfirst = plast = pnext;
+			} else {
+				ASSERT(plast);
+				PKTSETNEXT(osh, plast, pnext);
+				plast = pnext;
+			}
+
+			/* Adhere to start alignment requirements */
+			PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+		}
+
+		/* If all allocations succeeded, save packet chain in bus structure */
+		if (pnext) {
+			DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+			          __FUNCTION__, totlen, num));
+			if (DHD_GLOM_ON() && bus->nextlen) {
+				if (totlen != bus->nextlen) {
+					DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+					          "rxseq %d\n", __FUNCTION__, bus->nextlen,
+					          totlen, rxseq));
+				}
+			}
+			bus->glom = pfirst;
+			pfirst = pnext = NULL;
+		} else {
+			if (pfirst)
+				PKTFREE(osh, pfirst, FALSE);
+			bus->glom = NULL;
+			num = 0;
+		}
+
+		/* Done with descriptor packet */
+		PKTFREE(osh, bus->glomd, FALSE);
+		bus->glomd = NULL;
+		bus->nextlen = 0;
+
+		dhd_os_sdunlock_rxq(bus->dhd);
+	}
+
+	/* Ok -- either we just generated a packet chain, or had one from before */
+	if (bus->glom) {
+		if (DHD_GLOM_ON()) {
+			DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+			for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+				DHD_GLOM(("    %p: %p len 0x%04x (%d)\n",
+				          pnext, (uint8*)PKTDATA(osh, pnext),
+				          PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+			}
+		}
+
+		pfirst = bus->glom;
+		dlen = (uint16)pkttotlen(osh, pfirst);
+
+		/* Do an SDIO read for the superframe.  Configurable iovar to
+		 * read directly into the chained packet, or allocate a large
+		 * packet and and copy into the chain.
+		 */
+		if (usechain) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+			                              dlen, pfirst, NULL, NULL);
+		} else if (bus->dataptr) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, bus->dataptr,
+			                              dlen, NULL, NULL, NULL);
+			sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+			if (sublen != dlen) {
+				DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+				           __FUNCTION__, dlen, sublen));
+				errcode = -1;
+			}
+			pnext = NULL;
+			BCM_REFERENCE(pnext);
+		} else {
+			DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+			errcode = -1;
+		}
+		bus->f2rxdata++;
+		ASSERT(errcode != BCME_PENDING);
+
+		/* On failure, kill the superframe, allow a couple retries */
+		if (errcode < 0) {
+			DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+			           __FUNCTION__, dlen, errcode));
+			bus->dhd->rx_errors++;
+
+			if (bus->glomerr++ < 3) {
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			return 0;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_GLOM_ON()) {
+			prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+			      MIN(PKTLEN(osh, pfirst), 48));
+		}
+#endif
+
+
+		/* Validate the superframe header */
+		dptr = (uint8 *)PKTDATA(osh, pfirst);
+		sublen = ltoh16_ua(dptr);
+		check = ltoh16_ua(dptr + sizeof(uint16));
+
+		chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+		bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+		doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+		errcode = 0;
+		if ((uint16)~(sublen^check)) {
+			DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, sublen, check));
+			errcode = -1;
+		} else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+			DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+			           __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+			errcode = -1;
+		} else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+			DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+			           SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+			errcode = -1;
+		} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+			DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+			errcode = -1;
+		} else if ((doff < SDPCM_HDRLEN) ||
+		           (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+			DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+				__FUNCTION__, doff, sublen, PKTLEN(osh, pfirst),
+				SDPCM_HDRLEN));
+			errcode = -1;
+		}
+
+		/* Check sequence number of superframe SW header */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+			          __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_max;
+		}
+		bus->tx_max = txmax;
+
+		/* Remove superframe header, remember offset */
+		PKTPULL(osh, pfirst, doff);
+		sfdoff = doff;
+
+		/* Validate all the subframe headers */
+		for (num = 0, pnext = pfirst; pnext && !errcode;
+		     num++, pnext = PKTNEXT(osh, pnext)) {
+			dptr = (uint8 *)PKTDATA(osh, pnext);
+			dlen = (uint16)PKTLEN(osh, pnext);
+			sublen = ltoh16_ua(dptr);
+			check = ltoh16_ua(dptr + sizeof(uint16));
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				prhex("subframe", dptr, 32);
+			}
+#endif
+
+			if ((uint16)~(sublen^check)) {
+				DHD_ERROR(("%s (subframe %d): HW hdr error: "
+				           "len/check 0x%04x/0x%04x\n",
+				           __FUNCTION__, num, sublen, check));
+				errcode = -1;
+			} else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+				DHD_ERROR(("%s (subframe %d): length mismatch: "
+				           "len 0x%04x, expect 0x%04x\n",
+				           __FUNCTION__, num, sublen, dlen));
+				errcode = -1;
+			} else if ((chan != SDPCM_DATA_CHANNEL) &&
+			           (chan != SDPCM_EVENT_CHANNEL)) {
+				DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+				           __FUNCTION__, num, chan));
+				errcode = -1;
+			} else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+				DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+				           __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+				errcode = -1;
+			}
+		}
+
+		if (errcode) {
+			/* Terminate frame on error, request a couple retries */
+			if (bus->glomerr++ < 3) {
+				/* Restore superframe header space */
+				PKTPUSH(osh, pfirst, sfdoff);
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			bus->nextlen = 0;
+			return 0;
+		}
+
+		/* Basic SD framing looks ok - process each packet (header) */
+		bus->glom = NULL;
+		plast = NULL;
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+			pnext = PKTNEXT(osh, pfirst);
+			PKTSETNEXT(osh, pfirst, NULL);
+
+			dptr = (uint8 *)PKTDATA(osh, pfirst);
+			sublen = ltoh16_ua(dptr);
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+			DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+			          __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+			          PKTLEN(osh, pfirst), sublen, chan, seq));
+
+			ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+			if (rxseq != seq) {
+				DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Subframe Data", dptr, dlen);
+			}
+#endif
+
+			PKTSETLEN(osh, pfirst, sublen);
+			PKTPULL(osh, pfirst, doff);
+
+			reorder_info_len = sizeof(reorder_info_buf);
+
+			if (PKTLEN(osh, pfirst) == 0) {
+				PKTFREE(bus->dhd->osh, pfirst, FALSE);
+				continue;
+			} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf,
+				&reorder_info_len) != 0) {
+				DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+				bus->dhd->rx_errors++;
+				PKTFREE(osh, pfirst, FALSE);
+				continue;
+			}
+			if (reorder_info_len) {
+				uint32 free_buf_count;
+				void *ppfirst;
+
+				ppfirst = pfirst;
+				/* Reordering info from the firmware */
+				dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf,
+					reorder_info_len, &ppfirst, &free_buf_count);
+
+				if (free_buf_count == 0) {
+					continue;
+				} else {
+					void *temp;
+
+					/*  go to the end of the chain and attach the pnext there */
+					temp = ppfirst;
+					while (PKTNEXT(osh, temp) != NULL) {
+						temp = PKTNEXT(osh, temp);
+					}
+					pfirst = temp;
+					if (list_tail[ifidx] == NULL)
+						list_head[ifidx] = ppfirst;
+					else
+						PKTSETNEXT(osh, list_tail[ifidx], ppfirst);
+					list_tail[ifidx] = pfirst;
+				}
+
+				num += (uint8)free_buf_count;
+			} else {
+				/* this packet will go up, link back into chain and count it */
+
+				if (list_tail[ifidx] == NULL) {
+					list_head[ifidx] = list_tail[ifidx] = pfirst;
+				} else {
+					PKTSETNEXT(osh, list_tail[ifidx], pfirst);
+					list_tail[ifidx] = pfirst;
+				}
+				num++;
+			}
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+				          __FUNCTION__, num, pfirst,
+				          PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+				          PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+				prhex("", (uint8 *)PKTDATA(osh, pfirst),
+				      MIN(PKTLEN(osh, pfirst), 32));
+			}
+#endif /* DHD_DEBUG */
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+			if (list_head[idx]) {
+				void *temp;
+				uint8 cnt = 0;
+				temp = list_head[idx];
+				do {
+					temp = PKTNEXT(osh, temp);
+					cnt++;
+				} while (temp);
+				if (cnt) {
+					dhd_os_sdunlock(bus->dhd);
+					dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0);
+					dhd_os_sdlock(bus->dhd);
+#if defined(SDIO_ISR_THREAD)
+					/* terence 20150615: fix for below error due to bussleep in watchdog after dhd_os_sdunlock here,
+					  * so call BUS_WAKE to wake up bus again
+					  * dhd_bcmsdh_recv_buf: Device asleep
+					  * dhdsdio_readframes: RXHEADER FAILED: -40
+					  * dhdsdio_rxfail: abort command, terminate frame, send NAK
+					*/
+					BUS_WAKE(bus);
+#endif
+				}
+			}
+		}
+		bus->rxglomframes++;
+		bus->rxglompkts += num;
+	}
+	return num;
+}
+
+
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+	osl_t *osh = bus->dhd->osh;
+	bcmsdh_info_t *sdh = bus->sdh;
+
+	uint16 len, check;	/* Extracted hardware header fields */
+	uint8 chan, seq, doff;	/* Extracted software header fields */
+	uint8 fcbits;		/* Extracted fcbits from software header */
+	uint8 delta;
+
+	void *pkt;	/* Packet for event or data frames */
+	uint16 pad;	/* Number of pad bytes to read */
+	uint16 rdlen;	/* Total number of bytes to read */
+	uint8 rxseq;	/* Next sequence number to expect */
+	uint rxleft = 0;	/* Remaining number of frames allowed */
+	int sdret;	/* Return code from bcmsdh calls */
+	uint8 txmax;	/* Maximum tx sequence offered */
+	bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+	uint8 *rxbuf;
+	int ifidx = 0;
+	uint rxcount = 0; /* Total frames read */
+	uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+	uint reorder_info_len;
+	uint pkt_count;
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+	bool sdtest = FALSE;	/* To limit message spew from test mode */
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	bus->readframes = TRUE;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: KSO off\n", __FUNCTION__));
+		bus->readframes = FALSE;
+		return 0;
+	}
+
+	ASSERT(maxframes);
+
+#ifdef SDTEST
+	/* Allow pktgen to override maxframes */
+	if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+		maxframes = bus->pktgen_count;
+		sdtest = TRUE;
+	}
+#endif
+
+	/* Not finished unless we encounter no more frames indication */
+	*finished = FALSE;
+
+
+	for (rxseq = bus->rx_seq, rxleft = maxframes;
+	     !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+	     rxseq++, rxleft--) {
+#ifdef DHDTCPACK_SUP_DBG
+		if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) {
+			if (bus->dotxinrx == FALSE)
+				DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n",
+					__FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode));
+		}
+#ifdef DEBUG_COUNTER
+		else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) {
+			tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++;
+		}
+#endif /* DEBUG_COUNTER */
+#endif /* DHDTCPACK_SUP_DBG */
+		/* tx more to improve rx performance */
+		if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+			dhdsdio_sendpendctl(bus);
+		} else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) &&
+			!bus->fcstate && DATAOK(bus) &&
+			(pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) {
+			dhdsdio_sendfromq(bus, dhd_txbound);
+#ifdef DHDTCPACK_SUPPRESS
+			/* In TCPACK_SUP_DELAYTX mode, do txinrx only if
+			 * 1. Any DATA packet to TX
+			 * 2. TCPACK to TCPDATA PSH packets.
+			 * in bus txq.
+			 */
+			bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ?
+				FALSE : TRUE;
+#endif
+		}
+
+		/* Handle glomming separately */
+		if (bus->glom || bus->glomd) {
+			uint8 cnt;
+			DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+			          __FUNCTION__, bus->glomd, bus->glom));
+			cnt = dhdsdio_rxglom(bus, rxseq);
+			DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+			rxseq += cnt - 1;
+			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+			continue;
+		}
+
+		/* Try doing single read if we can */
+		if (dhd_readahead && bus->nextlen) {
+			uint16 nextlen = bus->nextlen;
+			bus->nextlen = 0;
+
+			if (bus->bus == SPI_BUS) {
+				rdlen = len = nextlen;
+			} else {
+				rdlen = len = nextlen << 4;
+
+				/* Pad read to blocksize for efficiency */
+				if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+					pad = bus->blocksize - (rdlen % bus->blocksize);
+					if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+						((rdlen + pad + firstread) < MAX_RX_DATASZ))
+						rdlen += pad;
+				} else if (rdlen % DHD_SDALIGN) {
+					rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+				}
+			}
+
+			/* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+			 * Later we use buffer-poll for data as well as control packets.
+			 * This is required because dhd receives full frame in gSPI unlike SDIO.
+			 * After the frame is received we have to distinguish whether it is data
+			 * or non-data frame.
+			 */
+			/* Allocate a packet buffer */
+			dhd_os_sdlock_rxq(bus->dhd);
+			if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+				if (bus->bus == SPI_BUS) {
+					bus->usebufpool = FALSE;
+					bus->rxctl = bus->rxbuf;
+					if (dhd_alignctl) {
+						bus->rxctl += firstread;
+						if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+							bus->rxctl += (DHD_SDALIGN - pad);
+						bus->rxctl -= firstread;
+					}
+					ASSERT(bus->rxctl >= bus->rxbuf);
+					rxbuf = bus->rxctl;
+					/* Read the entire frame */
+					sdret = dhd_bcmsdh_recv_buf(bus,
+					                            bcmsdh_cur_sbwad(sdh),
+					                            SDIO_FUNC_2,
+					                            F2SYNC, rxbuf, rdlen,
+					                            NULL, NULL, NULL);
+					bus->f2rxdata++;
+					ASSERT(sdret != BCME_PENDING);
+
+
+					/* Control frame failures need retransmission */
+					if (sdret < 0) {
+						DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+						   __FUNCTION__, rdlen, sdret));
+						/* dhd.rx_ctlerrs is higher level */
+						bus->rxc_errors++;
+						dhd_os_sdunlock_rxq(bus->dhd);
+						dhdsdio_rxfail(bus, TRUE,
+						    (bus->bus == SPI_BUS) ? FALSE : TRUE);
+						continue;
+					}
+				} else {
+					/* Give up on data, request rtx of events */
+					DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+					           "expected rxseq %d\n",
+					           __FUNCTION__, len, rdlen, rxseq));
+					/* Just go try again w/normal header read */
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			} else {
+				if (bus->bus == SPI_BUS)
+					bus->usebufpool = TRUE;
+
+				ASSERT(!PKTLINK(pkt));
+				PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+				rxbuf = (uint8 *)PKTDATA(osh, pkt);
+				/* Read the entire frame */
+				sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+				                            SDIO_FUNC_2,
+				                            F2SYNC, rxbuf, rdlen,
+				                            pkt, NULL, NULL);
+				bus->f2rxdata++;
+				ASSERT(sdret != BCME_PENDING);
+
+				if (sdret < 0) {
+					DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+					   __FUNCTION__, rdlen, sdret));
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+					bus->dhd->rx_errors++;
+					dhd_os_sdunlock_rxq(bus->dhd);
+					/* Force retry w/normal header read.  Don't attempt NAK for
+					 * gSPI
+					 */
+					dhdsdio_rxfail(bus, TRUE,
+					      (bus->bus == SPI_BUS) ? FALSE : TRUE);
+					continue;
+				}
+			}
+			dhd_os_sdunlock_rxq(bus->dhd);
+
+			/* Now check the header */
+			bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+			/* Extract hardware header fields */
+			len = ltoh16_ua(bus->rxhdr);
+			check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+			/* All zeros means readahead info was bad */
+			if (!(len|check)) {
+				DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+				           __FUNCTION__));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate check bytes */
+			if ((uint16)~(len^check)) {
+				DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+				           " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+				           len, check));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rx_badhdr++;
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate frame length */
+			if (len < SDPCM_HDRLEN) {
+				DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+				           __FUNCTION__, len));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Check for consistency with readahead info */
+				len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+			if (len_consistent) {
+				/* Mismatch, force retry w/normal header (may be >4K) */
+				DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+				           "expected rxseq %d\n",
+				           __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+
+			/* Extract software header fields */
+			chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+				bus->nextlen =
+				         bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+				if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+					DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+					          " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+					          seq));
+					bus->nextlen = 0;
+				}
+
+				bus->dhd->rx_readahead_cnt ++;
+			/* Handle Flow Control */
+			fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+			delta = 0;
+			if (~bus->flowcontrol & fcbits) {
+				bus->fc_xoff++;
+				delta = 1;
+			}
+			if (bus->flowcontrol & ~fcbits) {
+				bus->fc_xon++;
+				delta = 1;
+			}
+
+			if (delta) {
+				bus->fc_rcvd++;
+				bus->flowcontrol = fcbits;
+			}
+
+			/* Check and update sequence number */
+			if (rxseq != seq) {
+				DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+			/* Check window for sanity */
+			if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+					DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+						__FUNCTION__, txmax, bus->tx_seq));
+					txmax = bus->tx_max;
+			}
+			bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Data", rxbuf, len);
+			} else if (DHD_HDRS_ON()) {
+				prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+			}
+#endif
+
+			if (chan == SDPCM_CONTROL_CHANNEL) {
+				if (bus->bus == SPI_BUS) {
+					dhdsdio_read_control(bus, rxbuf, len, doff);
+					if (bus->usebufpool) {
+						dhd_os_sdlock_rxq(bus->dhd);
+						PKTFREE(bus->dhd->osh, pkt, FALSE);
+						dhd_os_sdunlock_rxq(bus->dhd);
+					}
+					continue;
+				} else {
+					DHD_ERROR(("%s (nextlen): readahead on control"
+					           " packet %d?\n", __FUNCTION__, seq));
+					/* Force retry w/normal header read */
+					bus->nextlen = 0;
+					dhdsdio_rxfail(bus, FALSE, TRUE);
+					dhd_os_sdlock_rxq(bus->dhd);
+					PKTFREE2();
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			}
+
+			if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+				DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+				           "rx pktbuf's or not yet malloced.\n", len, chan));
+				continue;
+			}
+
+			/* Validate data offset */
+			if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+				DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+				           __FUNCTION__, doff, len, SDPCM_HDRLEN));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				ASSERT(0);
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				continue;
+			}
+
+			/* All done with this one -- now deliver the packet */
+			goto deliver;
+		}
+		/* gSPI frames should not be handled in fractions */
+		if (bus->bus == SPI_BUS) {
+			break;
+		}
+
+		/* Read frame header (hardware and software) */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            bus->rxhdr, firstread, NULL, NULL, NULL);
+		bus->f2rxhdrs++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+			bus->rx_hdrfail++;
+			dhdsdio_rxfail(bus, TRUE, TRUE);
+			continue;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+			prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Extract hardware header fields */
+		len = ltoh16_ua(bus->rxhdr);
+		check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+		/* All zeros means no more frames */
+		if (!(len|check)) {
+			*finished = TRUE;
+			break;
+		}
+
+		/* Validate check bytes */
+		if ((uint16)~(len^check)) {
+			DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, len, check));
+			bus->rx_badhdr++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Validate frame length */
+		if (len < SDPCM_HDRLEN) {
+			DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+			continue;
+		}
+
+		/* Extract software header fields */
+		chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		/* Validate data offset */
+		if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+			DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+			           __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+			bus->rx_badhdr++;
+			ASSERT(0);
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Save the readahead length if there is one */
+		bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+
+		/* Handle Flow Control */
+		fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		delta = 0;
+		if (~bus->flowcontrol & fcbits) {
+			bus->fc_xoff++;
+			delta = 1;
+		}
+		if (bus->flowcontrol & ~fcbits) {
+			bus->fc_xon++;
+			delta = 1;
+		}
+
+		if (delta) {
+			bus->fc_rcvd++;
+			bus->flowcontrol = fcbits;
+		}
+
+		/* Check and update sequence number */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_max;
+		}
+		bus->tx_max = txmax;
+
+		/* Call a separate function for control frames */
+		if (chan == SDPCM_CONTROL_CHANNEL) {
+			dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+			continue;
+		}
+
+		ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+		       (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+		/* Length to read */
+		rdlen = (len > firstread) ? (len - firstread) : 0;
+
+		/* May pad read to blocksize for efficiency */
+		if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+			pad = bus->blocksize - (rdlen % bus->blocksize);
+			if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+			    ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+				rdlen += pad;
+		} else if (rdlen % DHD_SDALIGN) {
+			rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+		}
+
+		/* Satisfy length-alignment requirements */
+		if (forcealign && (rdlen & (ALIGNMENT - 1)))
+			rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+		if ((rdlen + firstread) > MAX_RX_DATASZ) {
+			/* Too long -- skip this frame */
+			DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+			bus->dhd->rx_errors++; bus->rx_toolong++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+			/* Give up on data, request rtx of events */
+			DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+			           __FUNCTION__, rdlen, chan));
+			bus->dhd->rx_dropped++;
+			dhd_os_sdunlock_rxq(bus->dhd);
+			dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+			continue;
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		ASSERT(!PKTLINK(pkt));
+
+		/* Leave room for what we already read, and align remainder */
+		ASSERT(firstread < (PKTLEN(osh, pkt)));
+		PKTPULL(osh, pkt, firstread);
+		PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+		/* Read the remaining frame data */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+		bus->f2rxdata++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+			           ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+			            ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+			continue;
+		}
+
+		/* Copy the already-read portion */
+		PKTPUSH(osh, pkt, firstread);
+		bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			prhex("Rx Data", PKTDATA(osh, pkt), len);
+		}
+#endif
+
+deliver:
+		/* Save superframe descriptor and allocate packet frame */
+		if (chan == SDPCM_GLOM_CHANNEL) {
+			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+				DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+				          __FUNCTION__, len));
+#ifdef DHD_DEBUG
+				if (DHD_GLOM_ON()) {
+					prhex("Glom Data", PKTDATA(osh, pkt), len);
+				}
+#endif
+				PKTSETLEN(osh, pkt, len);
+				ASSERT(doff == SDPCM_HDRLEN);
+				PKTPULL(osh, pkt, SDPCM_HDRLEN);
+				bus->glomd = pkt;
+			} else {
+				DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+			}
+			continue;
+		}
+
+		/* Fill in packet len and prio, deliver upward */
+		PKTSETLEN(osh, pkt, len);
+		PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+		/* Test channel packets are processed separately */
+		if (chan == SDPCM_TEST_CHANNEL) {
+			dhdsdio_testrcv(bus, pkt, seq);
+			continue;
+		}
+#endif /* SDTEST */
+
+		if (PKTLEN(osh, pkt) == 0) {
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			continue;
+		} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf,
+			&reorder_info_len) != 0) {
+			DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			continue;
+		}
+
+		if (reorder_info_len) {
+			/* Reordering info from the firmware */
+			dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len,
+				&pkt, &pkt_count);
+			if (pkt_count == 0)
+				continue;
+		} else {
+			pkt_count = 1;
+		}
+
+		/* Unlock during rx call */
+		dhd_os_sdunlock(bus->dhd);
+		dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan);
+		dhd_os_sdlock(bus->dhd);
+#if defined(SDIO_ISR_THREAD)
+		/* terence 20150615: fix for below error due to bussleep in watchdog after dhd_os_sdunlock here,
+		  * so call BUS_WAKE to wake up bus again
+		  * dhd_bcmsdh_recv_buf: Device asleep
+		  * dhdsdio_readframes: RXHEADER FAILED: -40
+		  * dhdsdio_rxfail: abort command, terminate frame, send NAK
+		*/
+		BUS_WAKE(bus);
+#endif
+	}
+	rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+	/* Message if we hit the limit */
+	if (!rxleft && !sdtest)
+		DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+	else
+#endif /* DHD_DEBUG */
+	DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+	/* Back off rxseq if awaiting rtx, update rx_seq */
+	if (bus->rxskip)
+		rxseq--;
+	bus->rx_seq = rxseq;
+
+	if (bus->reqbussleep)
+	{
+		dhdsdio_bussleep(bus, TRUE);
+		bus->reqbussleep = FALSE;
+	}
+	bus->readframes = FALSE;
+
+	return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus, uint32 *hmbd)
+{
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus = 0;
+	uint32 hmb_data;
+	uint8 fcbits;
+	uint retries = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Read mailbox data and ack that we did so */
+	R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+	if (retries <= retry_limit)
+		W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+	bus->f1regdata += 2;
+
+	/* Dongle recomposed rx frames, accept them again */
+	if (hmb_data & HMB_DATA_NAKHANDLED) {
+		DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+		if (!bus->rxskip) {
+			DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+		}
+		bus->rxskip = FALSE;
+		intstatus |= FRAME_AVAIL_MASK(bus);
+	}
+
+	/*
+	 * DEVREADY does not occur with gSPI.
+	 */
+	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+		bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+			DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+			           bus->sdpcm_ver, SDPCM_PROT_VERSION));
+		else
+			DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+		/* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		    (bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1)) {
+			uint32 val;
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+			val &= ~CC_XMTDATAAVAIL_MODE;
+			val |= CC_XMTDATAAVAIL_CTRL;
+			W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+		}
+
+#ifdef DHD_DEBUG
+		/* Retrieve console state address now that firmware should have updated it */
+		{
+			sdpcm_shared_t shared;
+			if (dhdsdio_readshared(bus, &shared) == 0)
+				bus->console_addr = shared.console_addr;
+		}
+#endif /* DHD_DEBUG */
+	}
+
+	/*
+	 * Flow Control has been moved into the RX headers and this out of band
+	 * method isn't used any more.  Leave this here for possibly remaining backward
+	 * compatible with older dongles
+	 */
+	if (hmb_data & HMB_DATA_FC) {
+		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+		if (fcbits & ~bus->flowcontrol)
+			bus->fc_xoff++;
+		if (bus->flowcontrol & ~fcbits)
+			bus->fc_xon++;
+
+		bus->fc_rcvd++;
+		bus->flowcontrol = fcbits;
+	}
+
+	/* At least print a message if FW halted */
+	if (hmb_data & HMB_DATA_FWHALT) {
+		DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n"));
+		dhdsdio_checkdied(bus, NULL, 0);
+		bus->dhd->busstate = DHD_BUS_DOWN;
+	}
+
+	/* Shouldn't be any others */
+	if (hmb_data & ~(HMB_DATA_DEVREADY |
+	                 HMB_DATA_FWHALT |
+	                 HMB_DATA_NAKHANDLED |
+	                 HMB_DATA_FC |
+	                 HMB_DATA_FWREADY |
+	                 HMB_DATA_FCDATA_MASK |
+	                 HMB_DATA_VERSION_MASK)) {
+		DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+	}
+
+	if (hmbd) {
+		*hmbd = hmb_data;
+	}
+
+	return intstatus;
+}
+
+static bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus, newstatus = 0;
+	uint retries = 0;
+	uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+	uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+	uint framecnt = 0;		  /* Temporary counter of tx/rx frames */
+	bool rxdone = TRUE;		  /* Flag for no more read data */
+	bool resched = FALSE;	  /* Flag indicating resched wanted */
+	unsigned long flags;
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+	bool is_resched_by_readframe = FALSE;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_os_sdlock(bus->dhd);
+	DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+		bus->intstatus = 0;
+		DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+		dhd_os_sdunlock(bus->dhd);
+		return 0;
+	}
+
+	DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
+	DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+	/* Start with leftover status bits */
+	intstatus = bus->intstatus;
+
+	if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		goto exit;
+	}
+
+	/* If waiting for HTAVAIL, check status */
+	if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) {
+		int err;
+		uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+		/* Check for inconsistent device control */
+		devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		} else {
+			ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+		}
+#endif /* DHD_DEBUG */
+
+		/* Read CSR, if clock on switch to AVAIL, else ignore */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		}
+
+		DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+		if (SBSDIO_HTAV(clkctl)) {
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			if (err) {
+				DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			bus->clkstate = CLK_AVAIL;
+		} else {
+			goto clkwait;
+		}
+	}
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+	if (bus->clkstate != CLK_AVAIL)
+		goto clkwait;
+
+	/* Pending interrupt indicates new device status */
+	if (bus->ipend) {
+		bus->ipend = FALSE;
+#if defined(BT_OVER_SDIO)
+	bcmsdh_btsdio_process_f3_intr();
+#endif /* defined (BT_OVER_SDIO) */
+
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata++;
+		if (bcmsdh_regfail(bus->sdh))
+			newstatus = 0;
+		newstatus &= bus->hostintmask;
+		bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+		if (newstatus) {
+			bus->f1regdata++;
+			if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
+				(newstatus == I_XMTDATA_AVAIL)) {
+			} else
+				W_SDREG(newstatus, &regs->intstatus, retries);
+		}
+	}
+
+	/* Merge new bits with previous */
+	intstatus |= newstatus;
+	bus->intstatus = 0;
+
+	/* Handle flow-control change: read new state in case our ack
+	 * crossed another change interrupt.  If change still set, assume
+	 * FC ON for safety, let next loop through do the debounce.
+	 */
+	if (intstatus & I_HMB_FC_CHANGE) {
+		intstatus &= ~I_HMB_FC_CHANGE;
+		W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata += 2;
+		bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+		intstatus |= (newstatus & bus->hostintmask);
+	}
+
+	/* Handle host mailbox indication */
+	if (intstatus & I_HMB_HOST_INT) {
+		uint32 hmbdata = 0;
+
+		intstatus &= ~I_HMB_HOST_INT;
+		intstatus |= dhdsdio_hostmail(bus, &hmbdata);
+
+#ifdef DHD_ULP
+		/* ULP prototyping. Redowload fw on oob interupt */
+
+		/* all the writes after this point CAN use cached sbwad value */
+		bcmsdh_force_sbwad_calc(bus->sdh, FALSE);
+
+		if (dhd_ulp_pre_redownload_check(bus->dhd, bus->sdh, hmbdata)) {
+			if (dhd_bus_ulp_reinit_fw(bus) < 0) {
+				DHD_ERROR(("%s:%d FW redownload failed\n",
+					__FUNCTION__, __LINE__));
+				goto exit;
+			}
+		}
+#endif
+
+	}
+
+	/* Just being here means nothing more to do for chipactive */
+	if (intstatus & I_CHIPACTIVE) {
+		/* ASSERT(bus->clkstate == CLK_AVAIL); */
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	/* Handle host mailbox indication */
+	if (intstatus & I_HMB_HOST_INT) {
+		intstatus &= ~I_HMB_HOST_INT;
+		intstatus |= dhdsdio_hostmail(bus, NULL);
+	}
+
+	/* Generally don't ask for these, can get CRC errors... */
+	if (intstatus & I_WR_OOSYNC) {
+		DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+		intstatus &= ~I_WR_OOSYNC;
+	}
+
+	if (intstatus & I_RD_OOSYNC) {
+		DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+		intstatus &= ~I_RD_OOSYNC;
+	}
+
+	if (intstatus & I_SBINT) {
+		DHD_ERROR(("Dongle reports SBINT\n"));
+		intstatus &= ~I_SBINT;
+	}
+
+	/* Would be active due to wake-wlan in gSPI */
+	if (intstatus & I_CHIPACTIVE) {
+		DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	if (intstatus & I_HMB_FC_STATE) {
+		DHD_INFO(("Dongle reports HMB_FC_STATE\n"));
+		intstatus &= ~I_HMB_FC_STATE;
+	}
+
+	/* Ignore frame indications if rxskip is set */
+	if (bus->rxskip) {
+		intstatus &= ~FRAME_AVAIL_MASK(bus);
+	}
+
+	/* On frame indication, read available frames */
+	if (PKT_AVAILABLE(bus, intstatus)) {
+
+		framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+		if (rxdone || bus->rxskip)
+			intstatus  &= ~FRAME_AVAIL_MASK(bus);
+		rxlimit -= MIN(framecnt, rxlimit);
+	}
+
+	/* Keep still-pending events for next scheduling */
+	bus->intstatus = intstatus;
+
+clkwait:
+	/* Re-enable interrupts to detect new device events (mailbox, rx frame)
+	 * or clock availability.  (Allows tx loop to check ipend if desired.)
+	 * (Unless register access seems hosed, as we may not be able to ACK...)
+	 */
+	if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh) &&
+			!(bus->dhd->conf->oob_enabled_later && !bus->ctrl_frame_stat)) {
+		DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+		          __FUNCTION__, rxdone, framecnt));
+		bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+		bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+		bcmsdh_intr_enable(sdh);
+	}
+
+#if defined(OOB_INTR_ONLY) && !defined(HW_OOB)
+	/* In case of SW-OOB(using edge trigger),
+	 * Check interrupt status in the dongle again after enable irq on the host.
+	 * and rechedule dpc if interrupt is pended in the dongle.
+	 * There is a chance to miss OOB interrupt while irq is disabled on the host.
+	 * No need to do this with HW-OOB(level trigger)
+	 */
+	R_SDREG(newstatus, &regs->intstatus, retries);
+	if (bcmsdh_regfail(bus->sdh))
+		newstatus = 0;
+	if (newstatus & bus->hostintmask) {
+		bus->ipend = TRUE;
+		resched = TRUE;
+	}
+#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */
+
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE);
+#endif
+
+	if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL))
+		dhdsdio_sendpendctl(bus);
+
+	/* Send queued frames (limit 1 if rx may still be pending) */
+	else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+	    pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+
+#ifdef DHD_ULP
+		if (dhd_ulp_f2_ready(bus->dhd, bus->sdh)) {
+#endif /* DHD_ULP */
+			if (bus->dhd->conf->dhd_txminmax < 0)
+				framecnt = rxdone ? txlimit : MIN(txlimit, DATABUFCNT(bus));
+			else
+				framecnt = rxdone ? txlimit : MIN(txlimit, bus->dhd->conf->dhd_txminmax);
+			framecnt = dhdsdio_sendfromq(bus, framecnt);
+			txlimit -= framecnt;
+#ifdef DHD_ULP
+		} else {
+			/* In other transient states like DHD_ULP_, after the states are
+			* DHD_ULP_F2ENAB_CLEARING and DHD_ULP_F2ENAB_SETTING,
+			* dpc is scheduled after steady-state and dhdsdio_sendfromq() will
+			* execute again
+			*/
+		}
+#endif /* DHD_ULP */
+	}
+	/* Resched the DPC if ctrl cmd is pending on bus credit */
+	if (bus->ctrl_frame_stat) {
+		if (bus->dhd->conf->txctl_tmo_fix) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			if (!kthread_should_stop())
+				schedule_timeout(1);
+			set_current_state(TASK_RUNNING);
+		}
+		resched = TRUE;
+	}
+
+	/* Resched if events or tx frames are pending, else await next interrupt */
+	/* On failed register access, all bets are off: no resched or interrupts */
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+		if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) &
+			SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+			/* Bus failed because of KSO */
+			DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__));
+			bus->kso = FALSE;
+		} else {
+			DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
+				__FUNCTION__));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+			bus->intstatus = 0;
+		}
+	} else if (bus->clkstate == CLK_PENDING) {
+		/* Awaiting I_CHIPACTIVE; don't resched */
+	} else if (bus->intstatus || bus->ipend ||
+	           (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+			PKT_AVAILABLE(bus, bus->intstatus)) {  /* Read multiple frames */
+		resched = TRUE;
+	}
+
+	bus->dpc_sched = resched;
+
+	/* If we're done for now, turn off clock request. */
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING) &&
+		NO_OTHER_ACTIVE_BUS_USER(bus)) {
+		bus->activity = FALSE;
+		dhdsdio_bussleep(bus, TRUE);
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+	}
+
+exit:
+
+	if (!resched) {
+		/* Re-enable interrupts to detect new device events (mailbox, rx frame)
+		 * or clock availability.  (Allows tx loop to check ipend if desired.)
+		 * (Unless register access seems hosed, as we may not be able to ACK...)
+		 */
+		if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh) &&
+				(bus->dhd->conf->oob_enabled_later && !bus->ctrl_frame_stat)) {
+			DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+					  __FUNCTION__, rxdone, framecnt));
+			bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+			bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+			bcmsdh_intr_enable(sdh);
+		}
+		if (dhd_dpcpoll) {
+			if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) {
+				resched = TRUE;
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+				is_resched_by_readframe = TRUE;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+			}
+		}
+	}
+
+	if (bus->ctrl_wait && TXCTLOK(bus))
+		wake_up_interruptible(&bus->ctrl_tx_wait);
+	dhd_os_sdunlock(bus->dhd);
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+	if (bus->dhd->dhd_bug_on) {
+		DHD_INFO(("%s: resched = %d ctrl_frame_stat = %d intstatus 0x%08x"
+			" ipend = %d pktq_mlen = %d is_resched_by_readframe = %d \n",
+			__FUNCTION__, resched, bus->ctrl_frame_stat,
+			bus->intstatus, bus->ipend,
+			pktq_mlen(&bus->txq, ~bus->flowcontrol), is_resched_by_readframe));
+
+			bus->dhd->dhd_bug_on = FALSE;
+	}
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+
+	DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
+	dhd_os_busbusy_wake(bus->dhd);
+	DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+	return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+	bool resched;
+
+	/* Call the DPC directly. */
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	resched = dhdsdio_dpc(bus);
+
+	return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)arg;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!bus) {
+		DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+		return;
+	}
+	sdh = bus->sdh;
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Count the interrupt call */
+	bus->intrcount++;
+	bus->ipend = TRUE;
+
+	/* Shouldn't get this interrupt if we're sleeping? */
+	if (!SLPAUTO_ENAB(bus)) {
+		if (bus->sleeping) {
+			DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+			return;
+		} else if (!KSO_ENAB(bus)) {
+			DHD_ERROR(("ISR in devsleep 1\n"));
+		}
+	}
+
+	/* Disable additional interrupts (is this needed now)? */
+	if (bus->intr) {
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+	} else {
+		DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+	}
+
+	bcmsdh_intr_disable(sdh);
+	bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+	/* terence 20150209: dpc should be scheded again if dpc_sched is TRUE or dhd_bus_txdata can
+	    not schedule anymore because dpc_sched is TRUE now.
+	 */
+	if (dhdsdio_dpc(bus)) {
+		bus->dpc_sched = TRUE;
+		dhd_sched_dpc(bus->dhd);
+	}
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+	bus->dpc_sched = TRUE;
+	dhd_sched_dpc(bus->dhd);
+#endif /* defined(SDIO_ISR_THREAD) */
+
+}
+
+#ifdef PKT_STATICS
+void dhdsdio_txpktstatics(void)
+{
+	uint i, total = 0;
+
+	printf("%s: TYPE EVENT: %d pkts (size=%d) transfered\n",
+		__FUNCTION__, tx_statics.event_count, tx_statics.event_size);
+	printf("%s: TYPE CTRL:  %d pkts (size=%d) transfered\n",
+		__FUNCTION__, tx_statics.ctrl_count, tx_statics.ctrl_size);
+	printf("%s: TYPE DATA:  %d pkts (size=%d) transfered\n",
+		__FUNCTION__, tx_statics.data_count, tx_statics.data_size);
+	printf("%s: Glom size distribution:\n", __FUNCTION__);
+	for (i=0;i<tx_statics.glom_max;i++) {
+		total += tx_statics.glom_cnt[i];
+	}
+	for (i=0;i<tx_statics.glom_max;i++) {
+		printf("%02d: %d", i+1, tx_statics.glom_cnt[i]);
+		if ((i+1)%8)
+			printf(", ");
+		else
+			printf("\n");
+	}
+	printf("\n");
+	for (i=0;i<tx_statics.glom_max;i++) {
+		printf("%02d:%3d%%", i+1, (tx_statics.glom_cnt[i]*100)/total);
+		if ((i+1)%8)
+			printf(", ");
+		else
+			printf("\n");
+	}
+	printf("\n");
+	printf("%s: data/glom=%d, glom_max=%d\n",
+		__FUNCTION__, tx_statics.data_count/total, tx_statics.glom_max);
+	printf("%s: TYPE RX GLOM: %d pkts (size=%d) transfered\n",
+		__FUNCTION__, tx_statics.glom_count, tx_statics.glom_size);
+	printf("%s: TYPE TEST: %d pkts (size=%d) transfered\n\n\n",
+		__FUNCTION__, tx_statics.test_count, tx_statics.test_size);
+}
+#endif
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+	/* Default to specified length, or full range */
+	if (dhd_pktgen_len) {
+		bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+		bus->pktgen_minlen = bus->pktgen_maxlen;
+	} else {
+		bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+		bus->pktgen_minlen = 0;
+	}
+	bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+	/* Default to per-watchdog burst with 10s print time */
+	bus->pktgen_freq = 1;
+	bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0;
+	bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+	/* Default to echo mode */
+	bus->pktgen_mode = DHD_PKTGEN_ECHO;
+	bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+	void *pkt;
+	uint8 *data;
+	uint pktcount;
+	uint fillbyte;
+	osl_t *osh = bus->dhd->osh;
+	uint16 len;
+	ulong time_lapse;
+	uint sent_pkts;
+	uint rcvd_pkts;
+
+	/* Display current count if appropriate */
+	if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+		bus->pktgen_ptick = 0;
+		printf("%s: send attempts %d, rcvd %d, errors %d\n",
+		       __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+
+		/* Print throughput stats only for constant length packet runs */
+		if (bus->pktgen_minlen == bus->pktgen_maxlen) {
+			time_lapse = jiffies - bus->pktgen_prev_time;
+			bus->pktgen_prev_time = jiffies;
+			sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent;
+			bus->pktgen_prev_sent = bus->pktgen_sent;
+			rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd;
+			bus->pktgen_prev_rcvd = bus->pktgen_rcvd;
+
+			printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n",
+			  __FUNCTION__,
+			  (sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8,
+			  (rcvd_pkts * bus->pktgen_len  / jiffies_to_msecs(time_lapse)) * 8);
+		}
+	}
+
+	/* For recv mode, just make sure dongle has started sending */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
+			dhdsdio_sdtest_set(bus, bus->pktgen_total);
+		}
+		return;
+	}
+
+	/* Otherwise, generate or request the specified number of packets */
+	for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+		/* Stop if total has been reached */
+		if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			break;
+		}
+
+		/* Allocate an appropriate-sized packet */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+			len = SDPCM_TEST_PKT_CNT_FLD_LEN;
+		} else {
+			len = bus->pktgen_len;
+		}
+		if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+		                   TRUE))) {;
+			DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+			break;
+		}
+		PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+		data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+		/* Write test header cmd and extra based on mode */
+		switch (bus->pktgen_mode) {
+		case DHD_PKTGEN_ECHO:
+			*data++ = SDPCM_TEST_ECHOREQ;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_SEND:
+			*data++ = SDPCM_TEST_DISCARD;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_RXBURST:
+			*data++ = SDPCM_TEST_BURST;
+			*data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */
+			break;
+
+		default:
+			DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+			PKTFREE(osh, pkt, TRUE);
+			bus->pktgen_count = 0;
+			return;
+		}
+
+		/* Write test header length field */
+		*data++ = (bus->pktgen_len >> 0);
+		*data++ = (bus->pktgen_len >> 8);
+
+		/* Write frame count in a 4 byte field adjucent to SDPCM test header for
+		 * burst mode
+		 */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+			*data++ = (uint8)(bus->pktgen_count >> 0);
+			*data++ = (uint8)(bus->pktgen_count >> 8);
+			*data++ = (uint8)(bus->pktgen_count >> 16);
+			*data++ = (uint8)(bus->pktgen_count >> 24);
+		} else {
+
+			/* Then fill in the remainder -- N/A for burst */
+			for (fillbyte = 0; fillbyte < len; fillbyte++)
+				*data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+			prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Send it */
+		if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) {
+			bus->pktgen_fail++;
+			if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+				bus->pktgen_count = 0;
+		}
+		bus->pktgen_sent++;
+
+		/* Bump length if not fixed, wrap at max */
+		if (++bus->pktgen_len > bus->pktgen_maxlen)
+			bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+		/* Special case for burst mode: just send one request! */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+			break;
+	}
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, uint count)
+{
+	void *pkt;
+	uint8 *data;
+	osl_t *osh = bus->dhd->osh;
+
+	/* Allocate the packet */
+	if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+		SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) {
+		DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+		return;
+	}
+	PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+		SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN);
+	data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+	/* Fill in the test header */
+	*data++ = SDPCM_TEST_SEND;
+	*data++ = (count > 0)?TRUE:FALSE;
+	*data++ = (bus->pktgen_maxlen >> 0);
+	*data++ = (bus->pktgen_maxlen >> 8);
+	*data++ = (uint8)(count >> 0);
+	*data++ = (uint8)(count >> 8);
+	*data++ = (uint8)(count >> 16);
+	*data++ = (uint8)(count >> 24);
+
+	/* Send it */
+	if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK)
+		bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+	osl_t *osh = bus->dhd->osh;
+	uint8 *data;
+	uint pktlen;
+
+	uint8 cmd;
+	uint8 extra;
+	uint16 len;
+	uint16 offset;
+
+	/* Check for min length */
+	if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+		DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+		PKTFREE(osh, pkt, FALSE);
+		return;
+	}
+
+	/* Extract header fields */
+	data = PKTDATA(osh, pkt);
+	cmd = *data++;
+	extra = *data++;
+	len = *data++; len += *data++ << 8;
+	DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
+	/* Check length for relevant commands */
+	if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+		if (pktlen != len + SDPCM_TEST_HDRLEN) {
+			DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+			           " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+			PKTFREE(osh, pkt, FALSE);
+			return;
+		}
+	}
+
+	/* Process as per command */
+	switch (cmd) {
+	case SDPCM_TEST_ECHOREQ:
+		/* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+		*(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+		if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) {
+			bus->pktgen_sent++;
+		} else {
+			bus->pktgen_fail++;
+			PKTFREE(osh, pkt, FALSE);
+		}
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_ECHORSP:
+		if (bus->ext_loop) {
+			PKTFREE(osh, pkt, FALSE);
+			bus->pktgen_rcvd++;
+			break;
+		}
+
+		for (offset = 0; offset < len; offset++, data++) {
+			if (*data != SDPCM_TEST_FILL(offset, extra)) {
+				DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+				           "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+				           offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+				break;
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_DISCARD:
+		{
+			int i = 0;
+			uint8 *prn = data;
+			uint8 testval = extra;
+			for (i = 0; i < len; i++) {
+				if (*prn != testval) {
+					DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
+						i, bus->pktgen_rcvd_rcvsession, testval, *prn));
+					prn++; testval++;
+				}
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_BURST:
+	case SDPCM_TEST_SEND:
+	default:
+		DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+		          " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+		PKTFREE(osh, pkt, FALSE);
+		break;
+	}
+
+	/* For recv mode, stop at limit (and tell dongle to stop sending) */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcvd_rcvsession++;
+
+			if (bus->pktgen_total &&
+				(bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			DHD_ERROR(("Pktgen:rcv test complete!\n"));
+			bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
+			dhdsdio_sdtest_set(bus, FALSE);
+				bus->pktgen_rcvd_rcvsession = 0;
+			}
+		}
+	}
+}
+#endif /* SDTEST */
+
+int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
+{
+	int err = 0;
+
+#if defined(OOB_INTR_ONLY)
+	err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus);
+#endif
+	return err;
+}
+
+void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+{
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_unregister(dhdp->bus->sdh);
+#endif
+}
+
+void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+{
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(dhdp->bus->sdh, enable);
+#endif
+}
+
+void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub)
+{
+	bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh);
+}
+
+void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub)
+{
+	bcmsdh_dev_relax(dhdpub->bus->sdh);
+}
+
+bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub)
+{
+	bool enabled = FALSE;
+
+	enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh);
+	return enabled;
+}
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus;
+	unsigned long flags;
+
+	DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+	bus = dhdp->bus;
+
+	if (bus->dhd->dongle_reset)
+		return FALSE;
+
+	if (bus->dhd->hang_was_sent) {
+		dhd_os_wd_timer(bus->dhd, 0);
+		return FALSE;
+	}
+
+	/* Ignore the timer if simulating bus down */
+	if (!SLPAUTO_ENAB(bus) && bus->sleeping)
+		return FALSE;
+
+	DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) ||
+			DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+		DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+		return FALSE;
+	}
+	DHD_BUS_BUSY_SET_IN_WD(dhdp);
+	DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+	dhd_os_sdlock(bus->dhd);
+
+	/* Poll period: check device if appropriate. */
+	// terence 20160615: remove !SLPAUTO_ENAB(bus) to fix not able to polling if sr supported
+	if (1 && (bus->poll && (++bus->polltick >= bus->pollrate))) {
+		uint32 intstatus = 0;
+
+		/* Reset poll tick */
+		bus->polltick = 0;
+
+		/* Check device if no interrupts */
+		if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+			if (!bus->dpc_sched) {
+				uint8 devpend;
+				devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+				                          SDIOD_CCCR_INTPEND, NULL);
+				intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+			}
+
+			/* If there is something, make like the ISR and schedule the DPC */
+			if (intstatus) {
+				bus->pollcnt++;
+				bus->ipend = TRUE;
+				if (bus->intr) {
+					bcmsdh_intr_disable(bus->sdh);
+				}
+				bus->dpc_sched = TRUE;
+				dhd_sched_dpc(bus->dhd);
+			}
+		}
+
+		/* Update interrupt tracking */
+		bus->lastintrs = bus->intrcount;
+	}
+
+	if ((!bus->dpc_sched) && pktq_len(&bus->txq)) {
+		bus->dpc_sched = TRUE;
+		dhd_sched_dpc(bus->dhd);
+	}
+
+#ifdef DHD_DEBUG
+	/* Poll for console output periodically */
+	if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+		bus->console.count += dhd_watchdog_ms;
+		if (bus->console.count >= dhd_console_ms) {
+			bus->console.count -= dhd_console_ms;
+			/* Make sure backplane clock is on */
+			if (SLPAUTO_ENAB(bus))
+				dhdsdio_bussleep(bus, FALSE);
+			else
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+			if (dhdsdio_readconsole(bus) < 0)
+				dhd_console_ms = 0;	/* On error, stop trying */
+		}
+	}
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+	/* Generate packets if configured */
+	if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+		/* Make sure backplane clock is on */
+		if (SLPAUTO_ENAB(bus))
+			dhdsdio_bussleep(bus, FALSE);
+		else
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		bus->pktgen_tick = 0;
+		dhdsdio_pktgen(bus);
+	}
+#endif
+
+	/* On idle timeout clear activity flag and/or turn off clock */
+#ifdef DHD_USE_IDLECOUNT
+	if (bus->activity)
+		bus->activity = FALSE;
+	else {
+		bus->idlecount++;
+
+		/*
+		 * If the condition to switch off the clock is reached And if
+		 * BT is inactive (in case of BT_OVER_SDIO build) turn off clk.
+		 *
+		 * Consider the following case, DHD is configured with
+		 * 1) idletime == DHD_IDLE_IMMEDIATE
+		 * 2) BT is the last user of the clock
+		 * We cannot disable the clock from __dhdsdio_clk_disable
+		 * since WLAN might be using it. If WLAN is active then
+		 * from the respective function/context after doing the job
+		 * the clk is turned off.
+		 * But if WLAN is actually inactive then the watchdog should
+		 * disable the clock. So the condition check below should be
+		 * bus->idletime != 0 instead of idletime == 0
+		 */
+		if ((bus->idletime != 0) && (bus->idlecount >= bus->idletime) &&
+			NO_OTHER_ACTIVE_BUS_USER(bus)) {
+			DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__));
+			if (!bus->poll && SLPAUTO_ENAB(bus)) {
+				if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY)
+					dhd_os_wd_timer(bus->dhd, 0);
+			} else
+				dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+			bus->idlecount = 0;
+		}
+	}
+#else
+	if ((bus->idletime != 0) && (bus->clkstate == CLK_AVAIL) &&
+		NO_OTHER_ACTIVE_BUS_USER(bus)) {
+		if (++bus->idlecount >= bus->idletime) {
+			bus->idlecount = 0;
+			if (bus->activity) {
+				bus->activity = FALSE;
+				if (!bus->poll && SLPAUTO_ENAB(bus)) {
+					if (!bus->readframes)
+						dhdsdio_bussleep(bus, TRUE);
+					else
+						bus->reqbussleep = TRUE;
+				} else {
+					dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+				}
+			}
+		}
+	}
+#endif /* DHD_USE_IDLECOUNT */
+
+	dhd_os_sdunlock(bus->dhd);
+
+	DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+	DHD_BUS_BUSY_CLEAR_IN_WD(dhdp);
+	dhd_os_busbusy_wake(dhdp);
+	DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+	return bus->ipend;
+}
+
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	uint32 addr, val;
+	int rv;
+	void *pkt;
+
+	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
+	if (bus->console_addr == 0)
+		return BCME_UNSUPPORTED;
+
+	/* Exclusive bus access */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Don't allow input if dongle is in reset */
+	if (bus->dhd->dongle_reset) {
+		dhd_os_sdunlock(bus->dhd);
+		return BCME_NOTREADY;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	BUS_WAKE(bus);
+	/* No pend allowed since txpkt is called later, ht clk has to be on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Zero cbuf_index */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
+	val = htol32(0);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Write message into cbuf */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+		goto done;
+
+	/* Write length into vcons_in */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
+	val = htol32(msglen);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	if (!DATAOK(bus)) {
+		rv = BCME_NOTREADY;
+		goto done;
+	}
+
+	/* Bump dongle by sending an empty packet on the event channel.
+	 * sdpcm_sendup (RX) checks for virtual console input.
+	 */
+	if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
+		rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE);
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
+		NO_OTHER_ACTIVE_BUS_USER(bus)) {
+		bus->activity = FALSE;
+		dhdsdio_bussleep(bus, TRUE);
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rv;
+}
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+	uint byte, tag, tdata;
+	DHD_INFO(("Function %d CIS:\n", fn));
+
+	for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+		if ((byte % 16) == 0)
+			DHD_INFO(("    "));
+		DHD_INFO(("%02x ", cis[byte]));
+		if ((byte % 16) == 15)
+			DHD_INFO(("\n"));
+		if (!tdata--) {
+			tag = cis[byte];
+			if (tag == 0xff)
+				break;
+			else if (!tag)
+				tdata = 0;
+			else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+				tdata = cis[byte + 1] + 1;
+			else
+				DHD_INFO(("]"));
+		}
+	}
+	if ((byte % 16) != 15)
+		DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+	if (chipid == BCM4336_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4330_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43237_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43362_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4314_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43242_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43340_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43341_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43143_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43342_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4334_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43239_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4324_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4335_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4339_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43349_CHIP_ID)
+		return TRUE;
+	if (BCM4345_CHIP(chipid))
+		return TRUE;
+	if (chipid == BCM4350_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4354_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4358_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43569_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4371_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43430_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43018_CHIP_ID)
+		return TRUE;
+	if (BCM4349_CHIP(chipid))
+		return TRUE;
+	if (BCM4347_CHIP(chipid))
+		return TRUE;
+	if (chipid == BCM4364_CHIP_ID)
+			return TRUE;
+
+	if (chipid == BCM43012_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4362_CHIP_ID)
+		return TRUE;
+
+	return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+	uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+	int ret;
+	dhd_bus_t *bus;
+#ifdef GET_OTP_MAC_ENABLE
+	struct ether_addr ea_addr;
+#endif
+
+	DHD_MUTEX_LOCK();
+
+	/* Init global variables at run-time, not as part of the declaration.
+	 * This is required to support init/de-init of the driver. Initialization
+	 * of globals as part of the declaration results in non-deterministic
+	 * behavior since the value of the globals may be different on the
+	 * first time that the driver is initialized vs subsequent initializations.
+	 */
+	dhd_txbound = DHD_TXBOUND;
+	dhd_rxbound = DHD_RXBOUND;
+	dhd_alignctl = TRUE;
+	sd1idle = TRUE;
+	dhd_readahead = TRUE;
+	retrydata = FALSE;
+
+#ifdef DISABLE_FLOW_CONTROL
+	dhd_doflow = FALSE;
+#endif /* DISABLE_FLOW_CONTROL */
+	dhd_dongle_ramsize = 0;
+	dhd_txminmax = DHD_TXMINMAX;
+
+	forcealign = TRUE;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+	/* We make assumptions about address window mappings */
+	ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+	/* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+	 * means early parse could fail, so here we should get either an ID
+	 * we recognize OR (-1) indicating we must request power first.
+	 */
+	/* Check the Vendor ID */
+	switch (venid) {
+		case 0x0000:
+		case VENDOR_BROADCOM:
+			break;
+		default:
+			DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+			           __FUNCTION__, venid));
+			goto forcereturn;
+	}
+
+	/* Check the Device ID and make sure it's one that we support */
+	switch (devid) {
+		case 0:
+			DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+			          __FUNCTION__));
+			break;
+
+		default:
+			DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+			           __FUNCTION__, venid, devid));
+			goto forcereturn;
+	}
+
+	if (osh == NULL) {
+		DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__));
+		goto forcereturn;
+	}
+
+	/* Allocate private bus interface state */
+	if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+		DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+		goto fail;
+	}
+	bzero(bus, sizeof(dhd_bus_t));
+	bus->sdh = sdh;
+	bus->cl_devid = (uint16)devid;
+	bus->bus = DHD_BUS;
+	bus->bus_num = bus_no;
+	bus->slot_num = slot;
+	bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+	bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+#ifdef BT_OVER_SDIO
+	bus->bt_use_count = 0;
+#endif
+
+#if defined(SUPPORT_P2P_GO_PS)
+	init_waitqueue_head(&bus->bus_sleep);
+#endif /* LINUX && SUPPORT_P2P_GO_PS */
+	init_waitqueue_head(&bus->ctrl_tx_wait);
+
+	/* attempt to attach to the dongle */
+	if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+		DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Attach to the dhd/OS/network interface */
+	if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+		DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+#if defined(BT_OVER_SDIO)
+	g_dhd_pub = bus->dhd;
+	DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
+#endif /* defined (BT_OVER_SDIO) */
+
+	/* Allocate buffers */
+	if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (bus->intr) {
+		/* Register interrupt callback, but mask it (not operational yet). */
+		DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+		bcmsdh_intr_disable(sdh);
+		if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+			DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+			           __FUNCTION__, ret));
+			goto fail;
+		}
+		DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+	} else {
+		DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n",
+		           __FUNCTION__));
+	}
+
+	DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+	/* if firmware path present try to download and bring up bus */
+	bus->dhd->hang_report  = TRUE;
+#if 0 // terence 20150325: fix for WPA/WPA2 4-way handshake fail in hostapd
+	if (dhd_download_fw_on_driverload) {
+		if ((ret = dhd_bus_start(bus->dhd)) != 0) {
+			DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
+				goto fail;
+		}
+	}
+	else {
+		/* Set random MAC address during boot time */
+		get_random_bytes(&bus->dhd->mac.octet[3], 3);
+		/* Adding BRCM OUI */
+		bus->dhd->mac.octet[0] = 0;
+		bus->dhd->mac.octet[1] = 0x90;
+		bus->dhd->mac.octet[2] = 0x4C;
+	}
+#endif
+#if defined(BT_OVER_SDIO)
+	/* At this point Regulators are turned on and iconditionaly sdio bus is started
+	 * based upon dhd_download_fw_on_driverload check, so
+	 * increase the bus user count, this count will only be disabled inside
+	 * dhd_register_if() function if flag dhd_download_fw_on_driverload is set to false,
+	 * i.e FW download during insmod is not needed, otherwise it will not be decremented
+	 * so that WALN will always hold the bus untill rmmod is done.
+	 */
+	dhdsdio_bus_usr_cnt_inc(bus->dhd);
+#endif /* BT_OVER_SDIO */
+
+#ifdef GET_OTP_MAC_ENABLE
+	if (dhd_conf_get_mac(bus->dhd, sdh, ea_addr.octet)) {
+		DHD_TRACE(("%s: Can not read MAC address\n", __FUNCTION__));
+	} else
+		memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+	/* Ok, have the per-port tell the stack we're open for business */
+	if (dhd_register_if(bus->dhd, 0, TRUE) != 0) {
+		DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+		goto fail;
+	}
+
+#ifdef BCMHOST_XTAL_PU_TIME_MOD
+	bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11);
+#ifdef BCM4330_CHIP
+	bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x0000F801);
+#else
+	bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001);
+#endif /* BCM4330_CHIP */
+#endif /* BCMHOST_XTAL_PU_TIME_MOD */
+
+#if defined(MULTIPLE_SUPPLICANT)
+	wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
+#endif /* MULTIPLE_SUPPLICANT */
+	DHD_MUTEX_UNLOCK();
+
+	return bus;
+
+fail:
+	dhdsdio_release(bus, osh);
+
+forcereturn:
+	DHD_MUTEX_UNLOCK();
+
+	return NULL;
+}
+
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+                     uint16 devid)
+{
+	uint8 clkctl = 0;
+	uint fn, numfn;
+	uint8 *cis[SDIOD_MAX_IOFUNCS];
+	int err = 0;
+
+
+	bus->alp_only = TRUE;
+	bus->sih = NULL;
+
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+		DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+	}
+
+#if defined(DHD_DEBUG) && !defined(CUSTOMER_HW4_DEBUG)
+	DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
+		bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
+#endif /* DHD_DEBUG && !CUSTOMER_HW4_DEBUG */
+
+
+	/* Force PLL off until si_attach() programs PLL control regs */
+
+
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+	if (!err)
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+	if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+		DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+		           err, DHD_INIT_CLKCTL1, clkctl));
+		goto fail;
+	}
+	numfn = bcmsdh_query_iofnum(sdh);
+	ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+	/* Make sure ALP is available before trying to read CIS */
+	SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+	                                    SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+	          !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+	/* Now request ALP be put on the bus */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+	                 DHD_INIT_CLKCTL2, &err);
+	OSL_DELAY(200);
+
+	if (DHD_INFO_ON()) {
+		for (fn = 0; fn <= numfn; fn++) {
+			if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+				break;
+			}
+			bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+			if ((err = bcmsdh_cis_read(sdh, fn, cis[fn],
+			                                 SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+				MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+				break;
+			}
+#if 0
+		/* Reading the F1, F2 and F3 max blocksize values from CIS
+		  * and writing into the F1, F2 and F3	block size registers.
+		  * There is no max block size register value available for F0 in CIS register.
+		  * So, setting default value for F0 block size as 32 (which was set earlier
+		  * in iovar). IOVAR takes only one arguement.
+		  * So, we are passing the function number alongwith the value (fn<<16)
+		*/
+			if (!fn)
+				value = F0_BLOCK_SIZE;
+			else
+				value = (cis[fn][25]<<8) | cis[fn][24] | (fn<<16);
+			printf("%s: fn=%d, value=%d\n", __FUNCTION__, fn, value);
+			if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &value,
+				sizeof(value), TRUE) != BCME_OK) {
+				bus->blocksize = 0;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__,
+					"sd_blocksize"));
+			}
+#endif
+#ifdef DHD_DEBUG
+			if (DHD_INFO_ON()) {
+				dhd_dump_cis(fn, cis[fn]);
+			}
+#endif /* DHD_DEBUG */
+		}
+		while (fn-- > 0) {
+			ASSERT(cis[fn]);
+			MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+		}
+	}
+#if 0
+	if (dhd_conf_set_blksize(sdh)) {
+		bus->blocksize = 0;
+	}
+#endif
+	if (err) {
+		DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+		goto fail;
+	}
+	/* si_attach() will provide an SI handle and scan the backplane */
+	if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+	                           &bus->vars, &bus->varsz))) {
+		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+		goto fail;
+	}
+
+#ifdef DHD_DEBUG
+	DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n",
+		bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
+#endif /* DHD_DEBUG */
+
+
+	bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+	if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+		DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+		           __FUNCTION__, bus->sih->chip));
+		goto fail;
+	}
+
+	if (bus->sih->buscorerev >= 12)
+		dhdsdio_clk_kso_init(bus);
+	else
+		bus->kso = TRUE;
+
+	if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) {
+	}
+
+	si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+	/* Get info on the ARM and SOCRAM cores... */
+	if (!DHD_NOPMU(bus)) {
+		if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+			bus->armrev = si_corerev(bus->sih);
+		} else {
+			DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+			goto fail;
+		}
+
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+				DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+				goto fail;
+			}
+		} else {
+			/* cr4 has a different way to find the RAM size from TCM's */
+			if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+				DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+				goto fail;
+			}
+			/* also populate base address */
+			switch ((uint16)bus->sih->chip) {
+			case BCM4335_CHIP_ID:
+			case BCM4339_CHIP_ID:
+			case BCM43349_CHIP_ID:
+				bus->dongle_ram_base = CR4_4335_RAM_BASE;
+				break;
+			case BCM4350_CHIP_ID:
+			case BCM4354_CHIP_ID:
+			case BCM4358_CHIP_ID:
+			case BCM43569_CHIP_ID:
+			case BCM4371_CHIP_ID:
+				bus->dongle_ram_base = CR4_4350_RAM_BASE;
+				break;
+			case BCM4360_CHIP_ID:
+				bus->dongle_ram_base = CR4_4360_RAM_BASE;
+				break;
+			CASE_BCM4345_CHIP:
+				bus->dongle_ram_base = (bus->sih->chiprev < 6)  /* from 4345C0 */
+					? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
+				break;
+			case BCM4349_CHIP_GRPID:
+				/* RAM based changed from 4349c0(revid=9) onwards */
+				bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
+					CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9);
+				break;
+			case BCM4364_CHIP_ID:
+				bus->dongle_ram_base = CR4_4364_RAM_BASE;
+				break;
+			case BCM4347_CHIP_GRPID:
+				bus->dongle_ram_base = CR4_4347_RAM_BASE;
+				break;
+			case BCM4362_CHIP_ID:
+				bus->dongle_ram_base = CR4_4362_RAM_BASE;
+				break;
+			default:
+				bus->dongle_ram_base = 0;
+				DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+				           __FUNCTION__, bus->dongle_ram_base));
+			}
+		}
+		bus->ramsize = bus->orig_ramsize;
+		if (dhd_dongle_ramsize)
+			dhd_dongle_setramsize(bus, dhd_dongle_ramsize);
+
+		DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+		           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+		bus->srmemsize = si_socram_srmem_size(bus->sih);
+	}
+
+	/* ...but normally deal with the SDPCMDEV core */
+	if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+	    !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+		DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->sdpcmrev = si_corerev(bus->sih);
+
+	/* Set core control so an SDIO reset does a backplane reset */
+	OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+	bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
+
+	if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		(bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1))
+	{
+		uint32 val;
+
+		val = R_REG(osh, &bus->regs->corecontrol);
+		val &= ~CC_XMTDATAAVAIL_MODE;
+		val |= CC_XMTDATAAVAIL_CTRL;
+		W_REG(osh, &bus->regs->corecontrol, val);
+	}
+
+
+	pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+	/* Locate an appropriately-aligned portion of hdrbuf */
+	bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = (bool)dhd_intr;
+	if ((bus->poll = (bool)dhd_poll))
+		bus->pollrate = 1;
+
+	/* Setting default Glom size */
+	bus->txglomsize = SDPCM_DEFGLOM_SIZE;
+
+	return TRUE;
+
+fail:
+	if (bus->sih != NULL) {
+		si_detach(bus->sih);
+		bus->sih = NULL;
+	}
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->maxctl) {
+		bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+		if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+			DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+			           __FUNCTION__, bus->rxblen));
+			goto fail;
+		}
+	}
+	/* Allocate buffer to receive glomed packet */
+	if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+		DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+			__FUNCTION__, MAX_DATA_BUF));
+		/* release rxbuf which was already located as above */
+		if (!bus->rxblen)
+			DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen);
+		goto fail;
+	}
+	/* Allocate buffer to membuf */
+	bus->membuf = MALLOC(osh, MAX_MEM_BUF);
+	if (bus->membuf == NULL) {
+		DHD_ERROR(("%s: MALLOC of %d-byte membuf failed\n",
+			__FUNCTION__, MAX_MEM_BUF));
+		if (bus->databuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+			MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+			bus->databuf = NULL;
+		}
+		/* release rxbuf which was already located as above */
+		if (!bus->rxblen)
+			DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen);
+		goto fail;
+	}
+	memset(bus->membuf, 0, MAX_MEM_BUF);
+
+	/* Align the buffer */
+	if ((uintptr)bus->databuf % DHD_SDALIGN)
+		bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+	else
+		bus->dataptr = bus->databuf;
+
+	return TRUE;
+
+fail:
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	int32 fnum;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bus->_srenab = FALSE;
+
+#ifdef SDTEST
+	dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+	/* set PMU minimum resource mask to default */
+	dhd_bus_set_default_min_res_mask(bus);
+	/* Disable F2 to clear any intermediate frame state on the dongle */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	bus->sleeping = FALSE;
+	bus->rxflow = FALSE;
+	bus->prev_rxlim_hit = 0;
+
+	/* Done with backplane-dependent accesses, can drop clock... */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+	/* ...and initialize clock/power states */
+	bus->clkstate = CLK_SDONLY;
+	bus->idletime = (int32)dhd_idletime;
+	bus->idleclock = DHD_IDLE_ACTIVE;
+
+	/* Query the SD clock speed */
+	if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+	                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+		bus->sd_divisor = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_divisor", bus->sd_divisor));
+	}
+
+	/* Query the SD bus mode */
+	if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+	                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+		bus->sd_mode = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_mode", bus->sd_mode));
+	}
+
+	/* Query the F2 block size, set roundup accordingly */
+	fnum = 2;
+	if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+	                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+		bus->blocksize = 0;
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+		dhdsdio_tune_fifoparam(bus);
+	}
+	bus->roundup = MIN(max_roundup, bus->blocksize);
+
+#ifdef DHDENABLE_TAILPAD
+	if (bus->pad_pkt)
+		PKTFREE(osh, bus->pad_pkt, FALSE);
+	bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE);
+	if (bus->pad_pkt == NULL)
+		DHD_ERROR(("failed to allocate padding packet\n"));
+	else {
+		int alignment_offset = 0;
+		uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt);
+		if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN)))
+			PKTPUSH(osh, bus->pad_pkt, alignment_offset);
+		PKTSETNEXT(osh, bus->pad_pkt, NULL);
+	}
+#endif /* DHDENABLE_TAILPAD */
+
+	/* Query if bus module supports packet chaining, default to use if supported */
+	if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+	                    &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+		bus->sd_rxchain = FALSE;
+	} else {
+		DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+		          __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+	}
+	bus->use_rxchain = (bool)bus->sd_rxchain;
+	bus->txinrx_thres = CUSTOM_TXINRX_THRES;
+	/* TX first in dhdsdio_readframes() */
+	bus->dotxinrx = TRUE;
+
+#ifdef PKT_STATICS
+	memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t));
+#endif
+
+	return TRUE;
+}
+
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+                          char *pfw_path, char *pnv_path,
+                          char *pclm_path, char *pconf_path)
+{
+	int ret;
+
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+	bus->dhd->clm_path = pclm_path;
+	bus->dhd->conf_path = pconf_path;
+
+	ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+	return ret;
+}
+
+void
+dhd_set_path_params(struct dhd_bus *bus)
+{
+	/* External conf takes precedence if specified */
+	dhd_conf_preinit(bus->dhd);
+
+	if (bus->dhd->conf_path[0] == '\0') {
+		dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
+	}
+	if (bus->dhd->clm_path[0] == '\0') {
+		dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
+	}
+#ifdef CONFIG_PATH_AUTO_SELECT
+	dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
+#endif
+
+	dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
+
+	dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
+	dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
+	dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
+
+	dhd_conf_set_fw_name_by_mac(bus->dhd, bus->sdh, bus->fw_path);
+	dhd_conf_set_nv_name_by_mac(bus->dhd, bus->sdh, bus->nv_path);
+
+	printf("Final fw_path=%s\n", bus->fw_path);
+	printf("Final nv_path=%s\n", bus->nv_path);
+	printf("Final clm_path=%s\n", bus->dhd->clm_path);
+	printf("Final conf_path=%s\n", bus->dhd->conf_path);
+
+}
+
+void
+dhd_set_bus_params(struct dhd_bus *bus)
+{
+	if (bus->dhd->conf->dhd_poll >= 0) {
+		bus->poll = bus->dhd->conf->dhd_poll;
+		if (!bus->pollrate)
+			bus->pollrate = 1;
+		printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
+	}
+	if (bus->dhd->conf->use_rxchain >= 0) {
+		bus->use_rxchain = (bool)bus->dhd->conf->use_rxchain;
+	}
+	if (bus->dhd->conf->txinrx_thres >= 0) {
+		bus->txinrx_thres = bus->dhd->conf->txinrx_thres;
+	}
+	if (bus->dhd->conf->txglomsize >= 0) {
+		bus->txglomsize = bus->dhd->conf->txglomsize;
+	}
+}
+
+static int
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+	int ret;
+
+
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+	dhd_set_blob_support(bus->dhd, bus->fw_path);
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+	DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
+		__FUNCTION__, bus->fw_path, bus->nv_path));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+
+	/* Download the firmware */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	dhd_set_path_params(bus);
+	dhd_set_bus_params(bus);
+
+	ret = _dhdsdio_download_firmware(bus);
+
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+	return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+	bool dongle_isolation = FALSE;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dongle_isolation = bus->dhd->dongle_isolation;
+			dhd_detach(bus->dhd);
+		}
+
+		/* De-register interrupt handler */
+		bcmsdh_intr_disable(bus->sdh);
+		bcmsdh_intr_dereg(bus->sdh);
+
+		if (bus->dhd) {
+			dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
+			dhd_free(bus->dhd);
+			bus->dhd = NULL;
+		}
+
+		dhdsdio_release_malloc(bus, osh);
+
+#ifdef DHD_DEBUG
+		if (bus->console.buf != NULL)
+			MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+#ifdef DHDENABLE_TAILPAD
+		if (bus->pad_pkt)
+			PKTFREE(osh, bus->pad_pkt, FALSE);
+#endif /* DHDENABLE_TAILPAD */
+
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd && bus->dhd->dongle_reset)
+		return;
+
+	if (bus->rxbuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+		bus->rxctl = bus->rxbuf = NULL;
+		bus->rxlen = 0;
+	}
+
+	if (bus->databuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+		bus->databuf = NULL;
+	}
+
+	if (bus->membuf) {
+		MFREE(osh, bus->membuf, MAX_DATA_BUF);
+		bus->membuf = NULL;
+	}
+
+	if (bus->vars && bus->varsz) {
+		MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+		bus->dhd, bus->dhd->dongle_reset));
+
+	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+		return;
+
+	if (bus->sih) {
+		/* In Win10, system will be BSOD if using "sysprep" to do OS image */
+		/* Skip this will not cause the BSOD. */
+#if !defined(BCMLXSDMMC)
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		}
+		if (KSO_ENAB(bus) && (dongle_isolation == FALSE))
+			si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+		}
+		si_detach(bus->sih);
+		bus->sih = NULL;
+		if (bus->vars && bus->varsz)
+			MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_MUTEX_LOCK();
+	if (bus) {
+		ASSERT(bus->dhd);
+		/* Advertise bus remove during rmmod */
+		dhdsdio_advertise_bus_remove(bus->dhd);
+		dhdsdio_release(bus, bus->dhd->osh);
+	}
+	DHD_MUTEX_UNLOCK();
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static int
+dhdsdio_suspend(void *context)
+{
+	int ret = 0;
+#ifdef SUPPORT_P2P_GO_PS
+	int wait_time = 0;
+#endif /* SUPPORT_P2P_GO_PS */
+
+	dhd_bus_t *bus = (dhd_bus_t*)context;
+	unsigned long flags;
+
+	DHD_ERROR(("%s Enter\n", __FUNCTION__));
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("bus not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return BCME_ERROR;
+	}
+
+	if (bus->dhd->up == FALSE) {
+		return BCME_OK;
+	}
+
+	DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+	if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
+		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
+		DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+		return BCME_ERROR;
+	}
+	DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+	if (bus->dhd->dongle_reset) {
+		DHD_ERROR(("Dongle is in reset state.\n"));
+		return -EIO;
+	}
+
+	DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+	bus->dhd->busstate = DHD_BUS_SUSPEND;
+	if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
+		DHD_ERROR(("Tx Request is not ended\n"));
+		bus->dhd->busstate = DHD_BUS_DATA;
+		DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+		return -EBUSY;
+	}
+	DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
+	DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+#ifdef SUPPORT_P2P_GO_PS
+	if (bus->idletime > 0) {
+		wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms);
+	}
+#endif /* SUPPORT_P2P_GO_PS */
+	ret = dhd_os_check_wakelock(bus->dhd);
+#ifdef SUPPORT_P2P_GO_PS
+	// terence 20141124: fix for suspend issue
+	if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up) && (bus->dhd->op_mode != DHD_FLAG_HOSTAP_MODE)) {
+		if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) {
+			if (!bus->sleeping) {
+				ret = 1;
+			}
+		}
+	}
+#endif /* SUPPORT_P2P_GO_PS */
+
+	DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+	if (ret) {
+		bus->dhd->busstate = DHD_BUS_DATA;
+	}
+	DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
+	dhd_os_busbusy_wake(bus->dhd);
+	DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+	return ret;
+}
+
+static int
+dhdsdio_resume(void *context)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)context;
+	ulong flags;
+
+	DHD_ERROR(("%s Enter\n", __FUNCTION__));
+
+	if (bus->dhd->up == FALSE) {
+		return BCME_OK;
+	}
+
+	DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
+	DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+#if defined(OOB_INTR_ONLY)
+	if (dhd_os_check_if_up(bus->dhd))
+		bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif 
+
+	DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+	DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
+	bus->dhd->busstate = DHD_BUS_DATA;
+	dhd_os_busbusy_wake(bus->dhd);
+	DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+	return 0;
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+	dhdsdio_probe,
+	dhdsdio_disconnect,
+	dhdsdio_suspend,
+	dhdsdio_resume
+};
+
+int
+dhd_bus_register(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_unregister();
+}
+
+#if defined(BCMLXSDMMC)
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+int dhd_bus_reg_sdio_notify(void* semaphore)
+{
+	return bcmsdh_reg_sdio_notify(semaphore);
+}
+
+void dhd_bus_unreg_sdio_notify(void)
+{
+	bcmsdh_unreg_sdio_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	unsigned char *ularray = NULL;
+
+	DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+	/* Download image */
+	while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)dlarray));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), MEMBLOCK);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+	if (offset < sizeof(dlarray)) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+			goto err;
+		}
+	}
+
+#ifdef DHD_DEBUG
+	/* Upload and compare the downloaded code */
+	{
+		ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+		/* Upload image to verify downloaded contents. */
+		offset = 0;
+		memset(ularray, 0xaa, bus->ramsize);
+		while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto err;
+			}
+
+			offset += MEMBLOCK;
+		}
+
+		if (offset < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+				ularray + offset, sizeof(dlarray) - offset);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+				goto err;
+			}
+		}
+
+		if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+			goto err;
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+	}
+#endif /* DHD_DEBUG */
+
+err:
+	if (ularray)
+		MFREE(bus->dhd->osh, ularray, bus->ramsize);
+	return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	int len;
+	void *image = NULL;
+	uint8 *memblock = NULL, *memptr;
+	uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
+	uint memblock_size = MEMBLOCK;
+#ifdef DHD_DEBUG_DOWNLOADTIME
+	unsigned long initial_jiffies = 0;
+	uint firmware_sz = 0;
+#endif
+
+	DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+	image = dhd_os_open_image(pfw_path);
+	if (image == NULL) {
+		printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
+		goto err;
+	}
+
+	/* Update the dongle image download block size depending on the F1 block size */
+	if (sd_f1_blocksize == 512)
+		memblock_size = MAX_MEMBLOCK;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+			memblock_size));
+		goto err;
+	}
+	if (dhd_msg_level & DHD_TRACE_VAL) {
+		memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+		if (memptr_tmp == NULL) {
+			DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+			goto err;
+		}
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+#ifdef DHD_DEBUG_DOWNLOADTIME
+	initial_jiffies = jiffies;
+#endif
+
+	/* Download image */
+	while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) {
+		// terence 20150412: fix for firmware failed to download
+		if (bus->dhd->conf->chip == BCM43340_CHIP_ID ||
+				bus->dhd->conf->chip == BCM43341_CHIP_ID) {
+			if (len % 64 != 0) {
+				memset(memptr+len, 0, len%64);
+				len += (64 - len%64);
+			}
+		}
+		if (len < 0) {
+			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)memptr));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, memblock_size, offset));
+			goto err;
+		}
+
+		if (dhd_msg_level & DHD_TRACE_VAL) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset, memptr_tmp, len);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+				        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto err;
+			}
+			if (memcmp(memptr_tmp, memptr, len)) {
+				DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
+				goto err;
+			} else
+				DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
+		}
+
+		offset += memblock_size;
+#ifdef DHD_DEBUG_DOWNLOADTIME
+		firmware_sz += len;
+#endif
+	}
+
+#ifdef DHD_DEBUG_DOWNLOADTIME
+	DHD_ERROR(("Firmware download time for %u bytes: %u ms\n",
+			firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies)));
+#endif
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN);
+	if (dhd_msg_level & DHD_TRACE_VAL) {
+		if (memptr_tmp)
+			MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
+	}
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+#ifdef DHD_UCODE_DOWNLOAD
+/* Currently supported only for the chips in which ucode RAM is AXI addressable */
+static uint32
+dhdsdio_ucode_base(struct dhd_bus *bus)
+{
+	uint32 ucode_base = 0;
+
+	switch ((uint16)bus->sih->chip) {
+	case BCM43012_CHIP_ID:
+		ucode_base = 0xE8020000;
+		break;
+	default:
+		DHD_ERROR(("%s: Unsupported!\n", __func__));
+		break;
+	}
+
+	return ucode_base;
+}
+
+static int
+dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	int len;
+	uint32 ucode_base;
+	void *image = NULL;
+	uint8 *memblock = NULL, *memptr;
+	uint memblock_size = MEMBLOCK;
+#ifdef DHD_DEBUG_DOWNLOADTIME
+	unsigned long initial_jiffies = 0;
+	uint firmware_sz = 0;
+#endif
+
+	DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, ucode_path));
+
+	ucode_base = dhdsdio_ucode_base(bus);
+
+	image = dhd_os_open_image(ucode_path);
+	if (image == NULL)
+		goto err;
+
+	/* Update the dongle image download block size depending on the F1 block size */
+	if (sd_f1_blocksize == 512)
+		memblock_size = MAX_MEMBLOCK;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+			memblock_size));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+#ifdef DHD_DEBUG_DOWNLOADTIME
+	initial_jiffies = jiffies;
+#endif
+
+	/* Download image */
+	while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) {
+		if (len < 0) {
+			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+
+		bcmerror = dhdsdio_membytes(bus, TRUE, (ucode_base + offset), memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, memblock_size, offset));
+			goto err;
+		}
+
+		offset += memblock_size;
+#ifdef DHD_DEBUG_DOWNLOADTIME
+		firmware_sz += len;
+#endif
+	}
+
+#ifdef DHD_DEBUG_DOWNLOADTIME
+	DHD_ERROR(("ucode download time for %u bytes: %u ms\n",
+			firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies)));
+#endif
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+void
+dhd_bus_ucode_download(struct dhd_bus *bus)
+{
+	uint32 shaddr = 0, shdata = 0;
+
+	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+	dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&shdata, 4);
+
+	DHD_TRACE(("%s: shdata:[0x%08x :0x%08x]\n", __func__, shaddr, shdata));
+
+	if (shdata == UCODE_DOWNLOAD_REQUEST)
+	{
+		DHD_ERROR(("%s: Received ucode download request!\n", __func__));
+
+		/* Download the ucode */
+		if (!dhd_get_ucode_path(bus->dhd)) {
+			DHD_ERROR(("%s: bus->uc_path not set!\n", __func__));
+			return;
+		}
+		dhdsdio_download_ucode_file(bus, dhd_get_ucode_path(bus->dhd));
+
+		DHD_ERROR(("%s: Ucode downloaded successfully!\n", __func__));
+
+		shdata = UCODE_DOWNLOAD_COMPLETE;
+		dhdsdio_membytes(bus, TRUE, shaddr, (uint8 *)&shdata, 4);
+	}
+}
+
+#endif /* DHD_UCODE_DOWNLOAD */
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	uint len;
+	void * image = NULL;
+	char * memblock = NULL;
+	char *bufp;
+	char *pnv_path;
+	bool nvram_file_exists;
+
+	pnv_path = bus->nv_path;
+
+	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+
+	/* For Get nvram from UEFI */
+	if (nvram_file_exists) {
+		image = dhd_os_open_image(pnv_path);
+		if (image == NULL) {
+			printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
+			goto err;
+		}
+	}
+
+	memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+		           __FUNCTION__, MAX_NVRAMBUF_SIZE));
+		goto err;
+	}
+
+	/* For Get nvram from image or UEFI (when image == NULL ) */
+	len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+
+	if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+		bufp = (char *)memblock;
+		bufp[len] = 0;
+		len = process_nvram_vars(bufp, len);
+		if (len % 4) {
+			len += 4 - (len % 4);
+		}
+		bufp += len;
+		*bufp++ = 0;
+		if (len)
+			bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error downloading vars: %d\n",
+			           __FUNCTION__, bcmerror));
+		}
+	} else {
+		DHD_ERROR(("%s: error reading nvram file: %d\n",
+		           __FUNCTION__, len));
+		bcmerror = BCME_SDIO_ERROR;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+
+	bool embed = FALSE;	/* download embedded firmware */
+	bool dlok = FALSE;	/* download firmware succeeded */
+
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+		embed = TRUE;
+#else
+		return 0;
+#endif
+	}
+
+	/* Keep arm in reset */
+	if (dhdsdio_download_state(bus, TRUE)) {
+		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+			embed = TRUE;
+#else
+			goto err;
+#endif
+		} else {
+			embed = FALSE;
+			dlok = TRUE;
+		}
+	}
+
+#ifdef BCMEMBEDIMAGE
+	if (embed) {
+		if (dhdsdio_download_code_array(bus)) {
+			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+			goto err;
+		} else {
+			dlok = TRUE;
+		}
+	}
+#else
+	BCM_REFERENCE(embed);
+#endif
+	if (!dlok) {
+		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External nvram takes precedence if specified */
+	if (dhdsdio_download_nvram(bus)) {
+		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* Take arm out of reset */
+	if (dhdsdio_download_state(bus, FALSE)) {
+		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	bcmerror = 0;
+
+err:
+	return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+	int status;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete_fn, handle);
+
+	return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle, int max_retry)
+{
+	int ret;
+	int i = 0;
+	int retries = 0;
+	bcmsdh_info_t *sdh;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	sdh = bus->sdh;
+	do {
+		ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes,
+			pkt, complete_fn, handle);
+
+		bus->f2txdata++;
+		ASSERT(ret != BCME_PENDING);
+
+		if (ret == BCME_NODEVICE) {
+			DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+		} else if (ret < 0) {
+			/* On failure, abort the command and terminate the frame */
+			DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n",
+				__FUNCTION__, ret));
+			bus->tx_sderrs++;
+			bus->f1regdata++;
+			bus->dhd->tx_errors++;
+			bcmsdh_abort(sdh, SDIO_FUNC_2);
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+				SFC_WF_TERM, NULL);
+			for (i = 0; i < READ_FRM_CNT_RETRIES; i++) {
+				uint8 hi, lo;
+				hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI,
+					NULL);
+				lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO,
+					NULL);
+				bus->f1regdata += 2;
+				if ((hi == 0) && (lo == 0))
+					break;
+			}
+		}
+	} while ((ret < 0) && retrydata && ++retries < max_retry);
+
+	return ret;
+}
+
+uint8
+dhd_bus_is_ioready(struct dhd_bus *bus)
+{
+	uint8 enable;
+	bcmsdh_info_t *sdh;
+	ASSERT(bus);
+	ASSERT(bus->sih != NULL);
+	enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+	sdh = bus->sdh;
+	return (enable == bcmsdh_cfg_read(sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL));
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+	ASSERT(bus);
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+	return bus->dhd;
+}
+
+const void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+	return (const void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+	return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+	return (bus->txglom_enable) ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+}
+
+void
+dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val)
+{
+	bus->dotxinrx = val;
+}
+
+/*
+ *  dhdsdio_advertise_bus_cleanup advertises that clean up is under progress
+ * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
+ * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
+ * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
+ * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
+ */
+static void
+dhdsdio_advertise_bus_cleanup(dhd_pub_t	 *dhdp)
+{
+	unsigned long flags;
+	int timeleft;
+
+	DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+	dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
+	DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+	if ((timeleft == 0) || (timeleft == 1)) {
+		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+				__FUNCTION__, dhdp->dhd_bus_busy_state));
+		ASSERT(0);
+	}
+
+	return;
+}
+
+static void
+dhdsdio_advertise_bus_remove(dhd_pub_t	 *dhdp)
+{
+	unsigned long flags;
+	int timeleft;
+
+	DHD_LINUX_GENERAL_LOCK(dhdp, flags);
+	dhdp->busstate = DHD_BUS_REMOVE;
+	DHD_LINUX_GENERAL_UNLOCK(dhdp, flags);
+
+	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+	if ((timeleft == 0) || (timeleft == 1)) {
+		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+				__FUNCTION__, dhdp->dhd_bus_busy_state));
+		ASSERT(0);
+	}
+
+	return;
+}
+
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	int bcmerror = 0;
+	dhd_bus_t *bus;
+	unsigned long flags;
+
+	bus = dhdp->bus;
+
+	if (flag == TRUE) {
+		if (!bus->dhd->dongle_reset) {
+			DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
+			dhdsdio_advertise_bus_cleanup(bus->dhd);
+			dhd_os_sdlock(dhdp);
+			dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+			/* Force flow control as protection when stop come before ifconfig_down */
+			dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+			/* Expect app to have torn down any connection before calling */
+			/* Stop the bus, disable F2 */
+			dhd_bus_stop(bus, FALSE);
+
+#if defined(OOB_INTR_ONLY)
+			/* Clean up any pending IRQ */
+			dhd_enable_oob_intr(bus, FALSE);
+			bcmsdh_oob_intr_set(bus->sdh, FALSE);
+			bcmsdh_oob_intr_unregister(bus->sdh);
+#endif 
+
+			/* Clean tx/rx buffer pointers, detach from the dongle */
+			dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
+
+			bus->dhd->dongle_reset = TRUE;
+			bus->dhd->up = FALSE;
+			dhd_txglom_enable(dhdp, FALSE);
+			dhd_os_sdunlock(dhdp);
+
+			DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+			bus->dhd->busstate = DHD_BUS_DOWN;
+			DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+
+			printf("%s:  WLAN OFF DONE\n", __FUNCTION__);
+			/* App can now remove power from device */
+		} else
+			bcmerror = BCME_SDIO_ERROR;
+	} else {
+		/* App must have restored power to device before calling */
+
+		printf("\n\n%s: == WLAN ON ==\n", __FUNCTION__);
+
+		if (bus->dhd->dongle_reset) {
+			/* Turn on WLAN */
+			dhd_os_sdlock(dhdp);
+			/* Reset SD client */
+			bcmsdh_reset(bus->sdh);
+
+			/* Attempt to re-attach & download */
+			if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+				(uint32 *)SI_ENUM_BASE,
+				bus->cl_devid)) {
+
+				DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
+				bus->dhd->busstate = DHD_BUS_DOWN;
+				DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
+				/* Attempt to download binary to the dongle */
+				if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+				    dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) {
+
+					/* Re-init bus, enable F2 transfer */
+					bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+					if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+						dhd_enable_oob_intr(bus, TRUE);
+						bcmsdh_oob_intr_register(bus->sdh,
+							dhdsdio_isr, bus);
+						bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#elif defined(FORCE_WOWLAN)
+						dhd_enable_oob_intr(bus, TRUE);
+#endif 
+
+						bus->dhd->dongle_reset = FALSE;
+						bus->dhd->up = TRUE;
+
+#if !defined(IGNORE_ETH0_DOWN)
+						/* Restore flow control  */
+						dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+#endif 
+						dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+						DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+					} else {
+						dhd_bus_stop(bus, FALSE);
+						dhdsdio_release_dongle(bus, bus->dhd->osh,
+							TRUE, FALSE);
+					}
+				} else {
+					DHD_ERROR(("%s Failed to download binary to the dongle\n",
+						__FUNCTION__));
+					if (bus->sih != NULL) {
+						si_detach(bus->sih);
+						bus->sih = NULL;
+					}
+					bcmerror = BCME_SDIO_ERROR;
+				}
+			} else
+				bcmerror = BCME_SDIO_ERROR;
+
+			dhd_os_sdunlock(dhdp);
+		} else {
+			printf("%s called when dongle is not in reset\n",
+				__FUNCTION__);
+			printf("Will call dhd_bus_start instead\n");
+			dhd_bus_resume(dhdp, 1);
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
+			dhd_conf_set_hw_oob_intr(bus->sdh, bus->sih->chip); // terence 20120615: fix for OOB initial issue
+#endif
+			if ((bcmerror = dhd_bus_start(dhdp)) != 0)
+				DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
+					__FUNCTION__, bcmerror));
+		}
+	}
+
+#ifdef PKT_STATICS
+	memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t));
+#endif
+	return bcmerror;
+}
+
+int dhd_bus_suspend(dhd_pub_t *dhdpub)
+{
+	return bcmsdh_stop(dhdpub->bus->sdh);
+}
+
+int dhd_bus_resume(dhd_pub_t *dhdpub, int stage)
+{
+	return bcmsdh_start(dhdpub->bus->sdh, stage);
+}
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	if (bus && bus->sih)
+		return bus->sih->chip;
+	else
+		return 0;
+}
+
+/* Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	if (bus && bus->sih)
+		return bus->sih->chiprev;
+	else
+		return 0;
+}
+
+/* Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return bus->sih->chippkg;
+}
+
+int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num)
+{
+	*bus_type = bus->bus;
+	*bus_num = bus->bus_num;
+	*slot_num = bus->slot_num;
+	return 0;
+}
+
+int
+dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
+{
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+	return dhdsdio_membytes(bus, set, address, data, size);
+}
+
+
+void
+dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path,
+									char *pclm_path, char *pconf_path)
+{
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+	bus->dhd->clm_path = pclm_path;
+	bus->dhd->conf_path = pconf_path;
+}
+
+int
+dhd_enableOOB(dhd_pub_t *dhd, bool sleep)
+{
+	dhd_bus_t *bus = dhd->bus;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	if (sleep) {
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit) {
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+			return BCME_BUSY;
+		}
+		/* Turn off our contribution to the HT clock request */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	} else {
+		/* Make sure the controller has the bus up */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+		/* Make sure we have SD bus access */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	}
+	return BCME_OK;
+}
+
+void
+dhd_bus_pktq_flush(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	bool wlfc_enabled = FALSE;
+
+#ifdef PROP_TXSTATUS
+	wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+	if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+		 * when there is a newly coming packet from network stack.
+		 */
+		dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+		/* Clear the data packet queues */
+		pktq_flush(dhdp->osh, &bus->txq, TRUE);
+	}
+}
+
+#ifdef BCMSDIO
+int
+dhd_sr_config(dhd_pub_t *dhd, bool on)
+{
+	dhd_bus_t *bus = dhd->bus;
+
+	if (!bus->_srenab)
+		return -1;
+
+	return dhdsdio_clk_devsleep_iovar(bus, on);
+}
+
+uint16
+dhd_get_chipid(dhd_pub_t *dhd)
+{
+	dhd_bus_t *bus = dhd->bus;
+
+	if (bus && bus->sih)
+		return (uint16)bus->sih->chip;
+	else
+		return 0;
+}
+#endif /* BCMSDIO */
+
+#ifdef DEBUGGER
+uint32 dhd_sdio_reg_read(void *h, uint32 addr)
+{
+	uint32 rval;
+	struct dhd_bus *bus = (struct dhd_bus *) h;
+
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	rval = bcmsdh_reg_read(bus->sdh, addr, 4);
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rval;
+}
+
+void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val)
+{
+	struct dhd_bus *bus = (struct dhd_bus *) h;
+
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	bcmsdh_reg_write(bus->sdh, addr, 4, val);
+
+	dhd_os_sdunlock(bus->dhd);
+}
+
+#endif /* DEBUGGER */
+
+
+#if defined(BT_OVER_SDIO)
+uint8 dhd_bus_cfg_read(void *h, uint fun_num, uint32 addr, int *err)
+{
+	uint8 intrd;
+	dhd_pub_t *dhdp = (dhd_pub_t *)h;
+	dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+	dhd_os_sdlock(bus->dhd);
+
+	intrd = bcmsdh_cfg_read(bus->sdh, fun_num, addr, err);
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return intrd;
+} EXPORT_SYMBOL(dhd_bus_cfg_read);
+
+void dhd_bus_cfg_write(void *h, uint fun_num, uint32 addr, uint8 val, int *err)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)h;
+	dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+	dhd_os_sdlock(bus->dhd);
+
+	bcmsdh_cfg_write(bus->sdh, fun_num, addr, val, err);
+
+	dhd_os_sdunlock(bus->dhd);
+
+} EXPORT_SYMBOL(dhd_bus_cfg_write);
+
+static int
+extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value)
+{
+	char field [8];
+
+	strncpy(field, line + start_pos, num_chars);
+	field [num_chars] = '\0';
+
+	return (sscanf (field, "%hX", value) == 1);
+}
+
+static int
+read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode, uint16 * hi_addr,
+	uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes)
+{
+	int		str_len;
+	uint16	num_data_bytes, addr, data_pos, type, w, i;
+	uint32	abs_base_addr32 = 0;
+	*num_bytes = 0;
+
+	while (!*num_bytes)
+	{
+		str_len = dhd_os_gets_image(bus->dhd, line, BTFW_MAX_STR_LEN, file);
+
+		DHD_TRACE(("%s: Len :0x%x  %s\n", __FUNCTION__, str_len, line));
+
+		if (str_len == 0) {
+			break;
+		} else if (str_len > 9) {
+			extract_hex_field(line, 1, 2, &num_data_bytes);
+			extract_hex_field(line, 3, 4, &addr);
+			extract_hex_field(line, 7, 2, &type);
+
+			data_pos = 9;
+			for (i = 0; i < num_data_bytes; i++) {
+				extract_hex_field(line, data_pos, 2, &w);
+				data_bytes [i] = (uint8)(w & 0x00FF);
+				data_pos += 2;
+			}
+
+			if (type == BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS) {
+				*hi_addr = (data_bytes [0] << 8) | data_bytes [1];
+				*addr_mode = BTFW_ADDR_MODE_EXTENDED;
+			} else if (type == BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS) {
+				*hi_addr = (data_bytes [0] << 8) | data_bytes [1];
+				*addr_mode = BTFW_ADDR_MODE_SEGMENT;
+			} else if (type == BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS) {
+				abs_base_addr32 = (data_bytes [0] << 24) | (data_bytes [1] << 16) |
+					(data_bytes [2] << 8) | data_bytes [3];
+				*addr_mode = BTFW_ADDR_MODE_LINEAR32;
+			} else if (type == BTFW_HEX_LINE_TYPE_DATA) {
+				*dest_addr = addr;
+				if (*addr_mode == BTFW_ADDR_MODE_EXTENDED)
+					*dest_addr += (*hi_addr << 16);
+				else if (*addr_mode == BTFW_ADDR_MODE_SEGMENT)
+					*dest_addr += (*hi_addr << 4);
+				else if (*addr_mode == BTFW_ADDR_MODE_LINEAR32)
+					*dest_addr += abs_base_addr32;
+				*num_bytes = num_data_bytes;
+			}
+		}
+	}
+	return (*num_bytes > 0);
+}
+
+static int
+_dhdsdio_download_btfw(struct dhd_bus *bus)
+{
+	int	bcm_error = -1;
+	void	*image = NULL;
+	uint8	*mem_blk = NULL, *mem_ptr = NULL, *data_ptr = NULL;
+
+
+	uint32 offset_addr = 0, offset_len = 0, bytes_to_write = 0;
+
+	char	*line = NULL;
+	uint32	dest_addr = 0, num_bytes;
+	uint16	hiAddress = 0;
+	uint32	start_addr, start_data, end_addr, end_data, i, index, pad;
+	uint32	bt2wlan_pwrup_adr;
+
+	int	addr_mode		= BTFW_ADDR_MODE_EXTENDED;
+
+	/* Out immediately if no image to download */
+	if ((bus->btfw_path == NULL) || (bus->btfw_path[0] == '\0')) {
+		return 0;
+	}
+
+	image = dhd_os_open_image(bus->btfw_path);
+	if (image == NULL)
+		goto err;
+
+	mem_ptr = mem_blk = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN);
+	if (mem_blk == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+			BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN));
+		goto err;
+	}
+	if ((uint32)(uintptr)mem_blk % DHD_SDALIGN)
+		mem_ptr += (DHD_SDALIGN - ((uint32)(uintptr)mem_blk % DHD_SDALIGN));
+
+	data_ptr = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE - 8);
+	if (data_ptr == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
+			BTFW_DOWNLOAD_BLK_SIZE - 8));
+		goto err;
+	}
+	/* Write to BT register to hold WLAN wake high during BT FW download */
+	bt2wlan_pwrup_adr = BTMEM_OFFSET + BT2WLAN_PWRUP_ADDR;
+	bcmsdh_reg_write(bus->sdh, bt2wlan_pwrup_adr, 4, BT2WLAN_PWRUP_WAKE);
+	/*
+	  * Wait for at least 2msec for the clock to be ready/Available.
+	  */
+	OSL_DELAY(2000);
+
+	line = MALLOC(bus->dhd->osh, BTFW_MAX_STR_LEN);
+	if (line == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+			__FUNCTION__, BTFW_MAX_STR_LEN));
+		goto err;
+	}
+	memset(line, 0, BTFW_MAX_STR_LEN);
+
+	while (read_more_btbytes (bus, image, line, &addr_mode, &hiAddress, &dest_addr,
+		data_ptr, &num_bytes)) {
+
+		DHD_TRACE(("read %d bytes at address %08X\n", num_bytes, dest_addr));
+
+		start_addr = BTMEM_OFFSET + dest_addr;
+		index = 0;
+
+		/* Make sure the start address is 4 byte aligned to avoid alignment issues
+		 * with SD host controllers
+		 */
+		if (!ISALIGNED(start_addr, 4)) {
+			pad = start_addr % 4;
+			start_addr = ROUNDDN(start_addr, 4);
+			start_data = bcmsdh_reg_read(bus->sdh, start_addr, 4);
+			for (i = 0; i < pad; i++, index++) {
+			    mem_ptr[index] = (uint8)((uint8 *)&start_data)[i];
+			}
+		}
+		bcopy(data_ptr, &(mem_ptr[index]), num_bytes);
+		index += num_bytes;
+
+		/* Make sure the length is multiple of 4bytes to avoid alignment issues
+		 * with SD host controllers
+		 */
+		end_addr = start_addr + index;
+		if (!ISALIGNED(end_addr, 4)) {
+			end_addr = ROUNDDN(end_addr, 4);
+			end_data = bcmsdh_reg_read(bus->sdh, end_addr, 4);
+			for (i = (index % 4); i < 4; i++, index++) {
+				mem_ptr[index] = (uint8)((uint8 *)&end_data)[i];
+			}
+		}
+
+		offset_addr = start_addr & 0xFFF;
+		offset_len =  offset_addr + index;
+		if (offset_len <= 0x1000) {
+			bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr, index);
+			if (bcm_error) {
+				DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+				__FUNCTION__, bcm_error, num_bytes, start_addr));
+				goto err;
+			}
+		}
+		else {
+			bytes_to_write = 0x1000 - offset_addr;
+			bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr,
+				bytes_to_write);
+			if (bcm_error) {
+				DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+				__FUNCTION__, bcm_error, num_bytes, start_addr));
+				goto err;
+			}
+
+			OSL_DELAY(10000);
+
+			bcm_error = dhdsdio_membytes(bus, TRUE, (start_addr + bytes_to_write),
+				(mem_ptr + bytes_to_write), (index - bytes_to_write));
+			if (bcm_error) {
+				DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+					__FUNCTION__, bcm_error, num_bytes, start_addr));
+				goto err;
+			}
+		}
+		memset(line, 0, BTFW_MAX_STR_LEN);
+	}
+
+	bcm_error = 0;
+err:
+	if (mem_blk)
+		MFREE(bus->dhd->osh, mem_blk, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN);
+
+	if (data_ptr)
+		MFREE(bus->dhd->osh, data_ptr, BTFW_DOWNLOAD_BLK_SIZE - 8);
+
+	if (line)
+		MFREE(bus->dhd->osh, line, BTFW_MAX_STR_LEN);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcm_error;
+}
+
+static int
+dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+	int ret;
+
+	DHD_TRACE(("%s: btfw path=%s\n",
+		__FUNCTION__, bus->btfw_path));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+	dhd_os_sdlock(bus->dhd);
+
+	/* Download the firmware */
+	ret = _dhdsdio_download_btfw(bus);
+
+	dhd_os_sdunlock(bus->dhd);
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+
+	return ret;
+}
+
+int
+dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh,
+                          char *pbtfw_path)
+{
+	int ret;
+
+	bus->btfw_path = pbtfw_path;
+
+	ret = dhdsdio_download_btfw(bus, osh, bus->sdh);
+
+	return ret;
+}
+#endif /* defined (BT_OVER_SDIO) */
+
+void
+dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
+{
+	trap_t *tr = &bus->dhd->last_trap_info;
+
+	bcm_bprintf(strbuf,
+		"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+		"lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+		"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+		"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+		ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
+		ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
+		ltoh32(bus->dongle_trap_addr),
+		ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
+		ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7));
+
+}
+
+static int
+dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len)
+{
+	int ret = -1;
+
+	ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(((dhd_bus_t*)bus)->sdh),
+		SDIO_FUNC_2, F2SYNC, frame, len, NULL, NULL, NULL, TXRETRIES);
+
+	if (ret == BCME_OK)
+		((dhd_bus_t*)bus)->tx_seq = (((dhd_bus_t*)bus)->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+	return ret;
+}
+
+/* Function to set the min res mask depending on the chip ID used */
+bool
+dhd_bus_set_default_min_res_mask(struct dhd_bus *bus)
+{
+	if ((bus == NULL) || (bus->sih == NULL)) {
+		DHD_ERROR(("%s(): Invalid Arguments \r\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	switch (bus->sih->chip) {
+	case BCM4339_CHIP_ID:
+		bcmsdh_reg_write(bus->sdh, SI_ENUM_BASE + 0x618, 4, 0x3fcaf377);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__));
+			return FALSE;
+		}
+		break;
+
+	case BCM43012_CHIP_ID:
+		bcmsdh_reg_write(bus->sdh,
+			si_get_pmu_reg_addr(bus->sih, OFFSETOF(pmuregs_t, min_res_mask)),
+			4, DEFAULT_43012_MIN_RES_MASK);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__));
+			return FALSE;
+		}
+		break;
+
+	default:
+		DHD_ERROR(("%s: Unhandled chip id\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	return TRUE;
+}
+
+/* Function to reset PMU registers */
+void
+dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp)
+{
+	struct dhd_bus *bus = dhdp->bus;
+	bcmsdh_reg_write(bus->sdh, si_get_pmu_reg_addr(bus->sih,
+		OFFSETOF(pmuregs_t, swscratch)), 4, 0x0);
+	if (bcmsdh_regfail(bus->sdh)) {
+		DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__));
+	}
+}
+
+
+#ifdef DHD_ULP
+/* Function to disable console messages on entering ULP mode */
+void
+dhd_bus_ulp_disable_console(dhd_pub_t *dhdp)
+{
+#ifdef DHD_DEBUG
+	DHD_ERROR(("Flushing and disabling console messages\n"));
+
+	/* Save the console print interval */
+	dhd_ulp_save_console_interval(dhdp);
+
+	/* Flush the console buffer before disabling */
+	dhdsdio_readconsole(dhdp->bus);
+	dhd_console_ms = 0;
+#endif /* DHD_DEBUG */
+}
+
+/* Function for redownloading firmaware */
+static int
+dhd_bus_ulp_reinit_fw(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+
+	/* After firmware redownload tx/rx seq are reset accordingly these values are
+	reset on DHD side tx_max is initially set to 4, which later is updated by FW
+	*/
+	bus->tx_seq = bus->rx_seq = 0;
+	bus->tx_max = 4;
+
+	if (dhd_bus_download_firmware(bus, bus->dhd->osh,
+		bus->fw_path, bus->nv_path) >= 0) {
+
+		/* Re-init bus, enable F2 transfer */
+		bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+		if (bcmerror == BCME_OK) {
+			bus->dhd->up = TRUE;
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+
+			dhd_ulp_set_ulp_state(bus->dhd, DHD_ULP_READY);
+#if defined(OOB_INTR_ONLY)
+			dhd_enable_oob_intr(bus, TRUE);
+			bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif 
+#ifdef DHD_DEBUG
+		/* Re-enable the console messages on FW redownload to default value */
+		dhd_ulp_restore_console_interval(bus->dhd);
+#endif /* DHD_DEBUG */
+		} else {
+			DHD_ERROR(("bus init failed\n"));
+			dhd_bus_stop(bus, FALSE);
+			dhdsdio_release_dongle(bus, bus->dhd->osh,
+				TRUE, FALSE);
+		}
+	} else
+		bcmerror = BCME_SDIO_ERROR;
+
+	return bcmerror;
+}
+#endif /* DHD_ULP */
+
+int
+dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
+{
+	int bcmerror = 0;
+	struct dhd_bus *bus = dhdp->bus;
+
+	if (read) {
+		*data = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+	} else {
+		bcmsdh_reg_write(bus->sdh, addr, size, *data);
+	}
+
+	if (bcmsdh_regfail(bus->sdh))
+		bcmerror = BCME_SDIO_ERROR;
+
+	return bcmerror;
+}
+
+int dhd_get_idletime(dhd_pub_t *dhd)
+{
+	return dhd->bus->idletime;
+}
+
+#ifdef DHD_WAKE_STATUS
+wake_counts_t*
+dhd_bus_get_wakecount(dhd_pub_t *dhd)
+{
+	if (!dhd->bus) {
+		return NULL;
+	}
+	return &dhd->bus->wake_counts;
+}
+int
+dhd_bus_get_bus_wake(dhd_pub_t *dhd)
+{
+	return bcmsdh_set_get_wake(dhd->bus->sdh, 0);
+}
+#endif /* DHD_WAKE_STATUS */
diff --git a/drivers/net/wireless/bcmdhd/dhd_static_buf.c b/drivers/net/wireless/bcmdhd/dhd_static_buf.c
new file mode 100644
index 0000000..c886997
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_static_buf.c
@@ -0,0 +1,535 @@
+#include <linux/module.h>

+#include <linux/kernel.h>

+#include <linux/init.h>

+#include <linux/platform_device.h>

+#include <linux/delay.h>

+#include <linux/err.h>

+#include <linux/skbuff.h>

+

+#define	DHD_STATIC_VERSION_STR		"1.579.77.41.9"

+

+#define BCMDHD_SDIO

+#define BCMDHD_PCIE

+

+enum dhd_prealloc_index {

+	DHD_PREALLOC_PROT = 0,

+#if defined(BCMDHD_SDIO)

+	DHD_PREALLOC_RXBUF = 1,

+	DHD_PREALLOC_DATABUF = 2,

+#endif

+	DHD_PREALLOC_OSL_BUF = 3,

+	DHD_PREALLOC_SKB_BUF = 4,

+	DHD_PREALLOC_WIPHY_ESCAN0 = 5,

+	DHD_PREALLOC_WIPHY_ESCAN1 = 6,

+	DHD_PREALLOC_DHD_INFO = 7,

+	DHD_PREALLOC_DHD_WLFC_INFO = 8,

+#ifdef BCMDHD_PCIE

+	DHD_PREALLOC_IF_FLOW_LKUP = 9,

+#endif

+	DHD_PREALLOC_MEMDUMP_BUF = 10,

+	DHD_PREALLOC_MEMDUMP_RAM = 11,

+	DHD_PREALLOC_DHD_WLFC_HANGER = 12,

+	DHD_PREALLOC_PKTID_MAP = 13,

+	DHD_PREALLOC_PKTID_MAP_IOCTL = 14,

+	DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,

+	DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,

+	DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,

+	DHD_PREALLOC_STAT_REPORT_BUF = 18,

+	DHD_PREALLOC_WL_ESCAN_INFO = 19,

+	DHD_PREALLOC_FW_VERBOSE_RING = 20,

+	DHD_PREALLOC_FW_EVENT_RING = 21,

+	DHD_PREALLOC_DHD_EVENT_RING = 22,

+	DHD_PREALLOC_NAN_EVENT_RING = 23,

+	DHD_PREALLOC_MAX

+};

+

+#define STATIC_BUF_MAX_NUM	20

+#define STATIC_BUF_SIZE	(PAGE_SIZE*2)

+

+#define DHD_PREALLOC_PROT_SIZE	(16 * 1024)

+#define DHD_PREALLOC_RXBUF_SIZE	(24 * 1024)

+#define DHD_PREALLOC_DATABUF_SIZE	(64 * 1024)

+#define DHD_PREALLOC_OSL_BUF_SIZE	(STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)

+#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE	(64 * 1024)

+#define DHD_PREALLOC_DHD_INFO_SIZE	(32 * 1024)

+#define DHD_PREALLOC_MEMDUMP_RAM_SIZE	(810 * 1024)

+#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE	(73 * 1024)

+#define DHD_PREALLOC_WL_ESCAN_INFO_SIZE	(66 * 1024)

+#ifdef CONFIG_64BIT

+#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE	(20 * 1024 * 2)

+#else

+#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE	(20 * 1024)

+#endif

+#define FW_VERBOSE_RING_SIZE		(64 * 1024)

+#define FW_EVENT_RING_SIZE		(64 * 1024)

+#define DHD_EVENT_RING_SIZE		(64 * 1024)

+#define NAN_EVENT_RING_SIZE		(64 * 1024)

+

+#if defined(CONFIG_64BIT)

+#define WLAN_DHD_INFO_BUF_SIZE	(24 * 1024)

+#define WLAN_DHD_WLFC_BUF_SIZE	(64 * 1024)

+#define WLAN_DHD_IF_FLOW_LKUP_SIZE	(64 * 1024)

+#else

+#define WLAN_DHD_INFO_BUF_SIZE	(16 * 1024)

+#define WLAN_DHD_WLFC_BUF_SIZE	(24 * 1024)

+#define WLAN_DHD_IF_FLOW_LKUP_SIZE	(20 * 1024)

+#endif /* CONFIG_64BIT */

+#define WLAN_DHD_MEMDUMP_SIZE	(800 * 1024)

+

+#define DHD_SKB_1PAGE_BUFSIZE	(PAGE_SIZE*1)

+#define DHD_SKB_2PAGE_BUFSIZE	(PAGE_SIZE*2)

+#define DHD_SKB_4PAGE_BUFSIZE	(PAGE_SIZE*4)

+

+#define DHD_SKB_1PAGE_BUF_NUM	8

+#ifdef BCMDHD_PCIE

+#define DHD_SKB_2PAGE_BUF_NUM	64

+#elif defined(BCMDHD_SDIO)

+#define DHD_SKB_2PAGE_BUF_NUM	8

+#endif

+#define DHD_SKB_4PAGE_BUF_NUM	1

+

+/* The number is defined in linux_osl.c

+ * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM

+ * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM

+ */

+#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \

+		(DHD_SKB_2PAGE_BUF_NUM))

+#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM))

+

+void *wlan_static_prot = NULL;

+void *wlan_static_rxbuf = NULL;

+void *wlan_static_databuf = NULL;

+void *wlan_static_osl_buf = NULL;

+void *wlan_static_scan_buf0 = NULL;

+void *wlan_static_scan_buf1 = NULL;

+void *wlan_static_dhd_info_buf = NULL;

+void *wlan_static_dhd_wlfc_info_buf = NULL;

+void *wlan_static_if_flow_lkup = NULL;

+void *wlan_static_dhd_memdump_ram_buf = NULL;

+void *wlan_static_dhd_wlfc_hanger_buf = NULL;

+void *wlan_static_wl_escan_info_buf = NULL;

+void *wlan_static_fw_verbose_ring_buf = NULL;

+void *wlan_static_fw_event_ring_buf = NULL;

+void *wlan_static_dhd_event_ring_buf = NULL;

+void *wlan_static_nan_event_ring_buf = NULL;

+

+static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM];

+

+void *dhd_wlan_mem_prealloc(int section, unsigned long size)

+{

+	pr_err("%s: sectoin %d, %ld\n", __func__, section, size);

+	if (section == DHD_PREALLOC_PROT)

+		return wlan_static_prot;

+

+#if defined(BCMDHD_SDIO)

+	if (section == DHD_PREALLOC_RXBUF)

+		return wlan_static_rxbuf;

+

+	if (section == DHD_PREALLOC_DATABUF)

+		return wlan_static_databuf;

+#endif /* BCMDHD_SDIO */

+

+	if (section == DHD_PREALLOC_SKB_BUF)

+		return wlan_static_skb;

+

+	if (section == DHD_PREALLOC_WIPHY_ESCAN0)

+		return wlan_static_scan_buf0;

+

+	if (section == DHD_PREALLOC_WIPHY_ESCAN1)

+		return wlan_static_scan_buf1;

+

+	if (section == DHD_PREALLOC_OSL_BUF) {

+		if (size > DHD_PREALLOC_OSL_BUF_SIZE) {

+			pr_err("request OSL_BUF(%lu) > %ld\n",

+				size, DHD_PREALLOC_OSL_BUF_SIZE);

+			return NULL;

+		}

+		return wlan_static_osl_buf;

+	}

+

+	if (section == DHD_PREALLOC_DHD_INFO) {

+		if (size > DHD_PREALLOC_DHD_INFO_SIZE) {

+			pr_err("request DHD_INFO size(%lu) > %d\n",

+				size, DHD_PREALLOC_DHD_INFO_SIZE);

+			return NULL;

+		}

+		return wlan_static_dhd_info_buf;

+	}

+	if (section == DHD_PREALLOC_DHD_WLFC_INFO) {

+		if (size > WLAN_DHD_WLFC_BUF_SIZE) {

+			pr_err("request DHD_WLFC_INFO size(%lu) > %d\n",

+				size, WLAN_DHD_WLFC_BUF_SIZE);

+			return NULL;

+		}

+		return wlan_static_dhd_wlfc_info_buf;

+	}

+#ifdef BCMDHD_PCIE

+	if (section == DHD_PREALLOC_IF_FLOW_LKUP)  {

+		if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) {

+			pr_err("request DHD_IF_FLOW_LKUP size(%lu) > %d\n",

+				size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE);

+			return NULL;

+		}

+

+		return wlan_static_if_flow_lkup;

+	}

+#endif /* BCMDHD_PCIE */

+	if (section == DHD_PREALLOC_MEMDUMP_RAM) {

+		if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) {

+			pr_err("request DHD_PREALLOC_MEMDUMP_RAM_SIZE(%lu) > %d\n",

+				size, DHD_PREALLOC_MEMDUMP_RAM_SIZE);

+			return NULL;

+		}

+

+		return wlan_static_dhd_memdump_ram_buf;

+	}

+	if (section == DHD_PREALLOC_DHD_WLFC_HANGER) {

+		if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) {

+			pr_err("request DHD_WLFC_HANGER size(%lu) > %d\n",

+				size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE);

+			return NULL;

+		}

+		return wlan_static_dhd_wlfc_hanger_buf;

+	}

+	if (section == DHD_PREALLOC_WL_ESCAN_INFO) {

+		if (size > DHD_PREALLOC_WL_ESCAN_INFO_SIZE) {

+			pr_err("request DHD_PREALLOC_WL_ESCAN_INFO_SIZE(%lu) > %d\n",

+				size, DHD_PREALLOC_WL_ESCAN_INFO_SIZE);

+			return NULL;

+		}

+

+		return wlan_static_wl_escan_info_buf;

+	}

+	if (section == DHD_PREALLOC_FW_VERBOSE_RING) {

+		if (size > FW_VERBOSE_RING_SIZE) {

+			pr_err("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n",

+				size, FW_VERBOSE_RING_SIZE);

+			return NULL;

+		}

+

+		return wlan_static_fw_verbose_ring_buf;

+	}

+	if (section == DHD_PREALLOC_FW_EVENT_RING) {

+		if (size > FW_EVENT_RING_SIZE) {

+			pr_err("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n",

+				size, FW_EVENT_RING_SIZE);

+			return NULL;

+		}

+

+		return wlan_static_fw_event_ring_buf;

+	}

+	if (section == DHD_PREALLOC_DHD_EVENT_RING) {

+		if (size > DHD_EVENT_RING_SIZE) {

+			pr_err("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n",

+				size, DHD_EVENT_RING_SIZE);

+			return NULL;

+		}

+

+		return wlan_static_dhd_event_ring_buf;

+	}

+	if (section == DHD_PREALLOC_NAN_EVENT_RING) {

+		if (size > NAN_EVENT_RING_SIZE) {

+			pr_err("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n",

+				size, NAN_EVENT_RING_SIZE);

+			return NULL;

+		}

+

+		return wlan_static_nan_event_ring_buf;

+	}

+	if ((section < 0) || (section > DHD_PREALLOC_MAX))

+		pr_err("request section id(%d) is out of max index %d\n",

+				section, DHD_PREALLOC_MAX);

+

+	pr_err("%s: failed to alloc section %d, size=%ld\n",

+		__func__, section, size);

+

+	return NULL;

+}

+EXPORT_SYMBOL(dhd_wlan_mem_prealloc);

+

+static int dhd_init_wlan_mem(void)

+{

+	int i;

+	int j;

+	printk(KERN_ERR "%s(): %s\n", __func__, DHD_STATIC_VERSION_STR);

+

+	for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {

+		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);

+		if (!wlan_static_skb[i]) {

+			goto err_skb_alloc;

+		}

+		pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__,

+			DHD_PREALLOC_SKB_BUF, i, DHD_SKB_1PAGE_BUFSIZE);

+	}

+

+	for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {

+		wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);

+		if (!wlan_static_skb[i]) {

+			goto err_skb_alloc;

+		}

+		pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__,

+			DHD_PREALLOC_SKB_BUF, i, DHD_SKB_2PAGE_BUFSIZE);

+	}

+

+#if defined(BCMDHD_SDIO)

+	wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);

+	if (!wlan_static_skb[i])

+		goto err_skb_alloc;

+	pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__,

+		DHD_PREALLOC_SKB_BUF, i, DHD_SKB_4PAGE_BUFSIZE);

+#endif /* BCMDHD_SDIO */

+

+	wlan_static_prot = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL);

+	if (!wlan_static_prot)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_PROT, DHD_PREALLOC_PROT_SIZE);

+

+#if defined(BCMDHD_SDIO)

+	wlan_static_rxbuf = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL);

+	if (!wlan_static_rxbuf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_RXBUF, DHD_PREALLOC_RXBUF_SIZE);

+

+	wlan_static_databuf = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL);

+	if (!wlan_static_databuf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_DATABUF, DHD_PREALLOC_DATABUF_SIZE);

+#endif /* BCMDHD_SDIO */

+

+	wlan_static_osl_buf = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL);

+	if (!wlan_static_osl_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%ld\n", __func__,

+		DHD_PREALLOC_OSL_BUF, DHD_PREALLOC_OSL_BUF_SIZE);

+

+	wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL);

+	if (!wlan_static_scan_buf0)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_WIPHY_ESCAN0, DHD_PREALLOC_WIPHY_ESCAN0_SIZE);

+

+	wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL);

+	if (!wlan_static_dhd_info_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_DHD_INFO, DHD_PREALLOC_DHD_INFO_SIZE);

+

+	wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL);

+	if (!wlan_static_dhd_wlfc_info_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_DHD_WLFC_INFO, WLAN_DHD_WLFC_BUF_SIZE);

+

+#ifdef BCMDHD_PCIE

+	wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL);

+	if (!wlan_static_if_flow_lkup)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_IF_FLOW_LKUP, DHD_PREALLOC_IF_FLOW_LKUP_SIZE);

+#endif /* BCMDHD_PCIE */

+

+	wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL);

+	if (!wlan_static_dhd_memdump_ram_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_MEMDUMP_RAM, DHD_PREALLOC_MEMDUMP_RAM_SIZE);

+

+	wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL);

+	if (!wlan_static_dhd_wlfc_hanger_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_DHD_WLFC_HANGER, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE);

+

+	wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_ESCAN_INFO_SIZE, GFP_KERNEL);

+	if (!wlan_static_wl_escan_info_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_WL_ESCAN_INFO, DHD_PREALLOC_WL_ESCAN_INFO_SIZE);

+

+	wlan_static_fw_verbose_ring_buf = kmalloc(FW_VERBOSE_RING_SIZE, GFP_KERNEL);

+	if (!wlan_static_fw_verbose_ring_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE);

+

+	wlan_static_fw_event_ring_buf = kmalloc(FW_EVENT_RING_SIZE, GFP_KERNEL);

+	if (!wlan_static_fw_event_ring_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_FW_EVENT_RING, FW_EVENT_RING_SIZE);

+

+	wlan_static_dhd_event_ring_buf = kmalloc(DHD_EVENT_RING_SIZE, GFP_KERNEL);

+	if (!wlan_static_dhd_event_ring_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE);

+

+	wlan_static_nan_event_ring_buf = kmalloc(NAN_EVENT_RING_SIZE, GFP_KERNEL);

+	if (!wlan_static_nan_event_ring_buf)

+		goto err_mem_alloc;

+	pr_err("%s: sectoin %d, size=%d\n", __func__,

+		DHD_PREALLOC_NAN_EVENT_RING, NAN_EVENT_RING_SIZE);

+

+	return 0;

+

+err_mem_alloc:

+

+	if (wlan_static_prot)

+		kfree(wlan_static_prot);

+

+#if defined(BCMDHD_SDIO)

+	if (wlan_static_rxbuf)

+		kfree(wlan_static_rxbuf);

+

+	if (wlan_static_databuf)

+		kfree(wlan_static_databuf);

+#endif /* BCMDHD_SDIO */

+

+	if (wlan_static_osl_buf)

+		kfree(wlan_static_osl_buf);

+

+	if (wlan_static_scan_buf0)

+		kfree(wlan_static_scan_buf0);

+

+	if (wlan_static_scan_buf1)

+		kfree(wlan_static_scan_buf1);

+

+	if (wlan_static_dhd_info_buf)

+		kfree(wlan_static_dhd_info_buf);

+

+	if (wlan_static_dhd_wlfc_info_buf)

+		kfree(wlan_static_dhd_wlfc_info_buf);

+

+#ifdef BCMDHD_PCIE

+	if (wlan_static_if_flow_lkup)

+		kfree(wlan_static_if_flow_lkup);

+#endif /* BCMDHD_PCIE */

+

+	if (wlan_static_dhd_memdump_ram_buf)

+		kfree(wlan_static_dhd_memdump_ram_buf);

+

+	if (wlan_static_dhd_wlfc_hanger_buf)

+		kfree(wlan_static_dhd_wlfc_hanger_buf);

+

+	if (wlan_static_wl_escan_info_buf)

+		kfree(wlan_static_wl_escan_info_buf);

+

+#ifdef BCMDHD_PCIE

+	if (wlan_static_fw_verbose_ring_buf)

+		kfree(wlan_static_fw_verbose_ring_buf);

+

+	if (wlan_static_fw_event_ring_buf)

+		kfree(wlan_static_fw_event_ring_buf);

+

+	if (wlan_static_dhd_event_ring_buf)

+		kfree(wlan_static_dhd_event_ring_buf);

+

+	if (wlan_static_nan_event_ring_buf)

+		kfree(wlan_static_nan_event_ring_buf);

+#endif /* BCMDHD_PCIE */

+

+	pr_err("%s: Failed to mem_alloc for WLAN\n", __func__);

+

+	i = WLAN_SKB_BUF_NUM;

+

+err_skb_alloc:

+	pr_err("%s: Failed to skb_alloc for WLAN\n", __func__);

+	for (j = 0; j < i; j++)

+		dev_kfree_skb(wlan_static_skb[j]);

+

+	return -ENOMEM;

+}

+

+static int __init

+dhd_static_buf_init(void)

+{

+	dhd_init_wlan_mem();

+

+	return 0;

+}

+

+static void __exit

+dhd_static_buf_exit(void)

+{

+	int i;

+

+	pr_err("%s()\n", __FUNCTION__);

+

+	for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {

+		if (wlan_static_skb[i])

+			dev_kfree_skb(wlan_static_skb[i]);

+	}

+

+	for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {

+		if (wlan_static_skb[i])

+			dev_kfree_skb(wlan_static_skb[i]);

+	}

+

+#if defined(BCMDHD_SDIO)

+	if (wlan_static_skb[i])

+		dev_kfree_skb(wlan_static_skb[i]);

+#endif /* BCMDHD_SDIO */

+

+	if (wlan_static_prot)

+		kfree(wlan_static_prot);

+

+#if defined(BCMDHD_SDIO)

+	if (wlan_static_rxbuf)

+		kfree(wlan_static_rxbuf);

+

+	if (wlan_static_databuf)

+		kfree(wlan_static_databuf);

+#endif /* BCMDHD_SDIO */

+

+	if (wlan_static_osl_buf)

+		kfree(wlan_static_osl_buf);

+

+	if (wlan_static_scan_buf0)

+		kfree(wlan_static_scan_buf0);

+

+	if (wlan_static_scan_buf1)

+		kfree(wlan_static_scan_buf1);

+

+	if (wlan_static_dhd_info_buf)

+		kfree(wlan_static_dhd_info_buf);

+

+	if (wlan_static_dhd_wlfc_info_buf)

+		kfree(wlan_static_dhd_wlfc_info_buf);

+

+#ifdef BCMDHD_PCIE

+	if (wlan_static_if_flow_lkup)

+		kfree(wlan_static_if_flow_lkup);

+#endif /* BCMDHD_PCIE */

+

+	if (wlan_static_dhd_memdump_ram_buf)

+		kfree(wlan_static_dhd_memdump_ram_buf);

+

+	if (wlan_static_dhd_wlfc_hanger_buf)

+		kfree(wlan_static_dhd_wlfc_hanger_buf);

+

+	if (wlan_static_wl_escan_info_buf)

+		kfree(wlan_static_wl_escan_info_buf);

+

+#ifdef BCMDHD_PCIE

+	if (wlan_static_fw_verbose_ring_buf)

+		kfree(wlan_static_fw_verbose_ring_buf);

+

+	if (wlan_static_fw_event_ring_buf)

+		kfree(wlan_static_fw_event_ring_buf);

+

+	if (wlan_static_dhd_event_ring_buf)

+		kfree(wlan_static_dhd_event_ring_buf);

+

+	if (wlan_static_nan_event_ring_buf)

+		kfree(wlan_static_nan_event_ring_buf);

+#endif

+

+	return;

+}

+

+module_init(dhd_static_buf_init);

+

+module_exit(dhd_static_buf_exit);

diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.c b/drivers/net/wireless/bcmdhd/dhd_wlfc.c
new file mode 100644
index 0000000..442af32
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.c
@@ -0,0 +1,4569 @@
+/*
+ * DHD PROP_TXSTATUS Module.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_wlfc.c 679733 2017-01-17 06:40:39Z $
+ *
+ */
+
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <dhd_bus.h>
+
+#include <dhd_dbg.h>
+#include <dhd_config.h>
+
+#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+
+/*
+ * wlfc naming and lock rules:
+ *
+ * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation.
+ * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed.
+ * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation.
+ *
+ */
+
+#if defined(DHD_WLFC_THREAD)
+#define WLFC_THREAD_QUICK_RETRY_WAIT_MS    10      /* 10 msec */
+#define WLFC_THREAD_RETRY_WAIT_MS          10000   /* 10 sec */
+#endif /* defined (DHD_WLFC_THREAD) */
+
+
+#ifdef PROP_TXSTATUS
+
+#define DHD_WLFC_QMON_COMPLETE(entry)
+
+
+/** reordering related */
+
+#if defined(DHD_WLFC_THREAD)
+static void
+_dhd_wlfc_thread_wakeup(dhd_pub_t *dhdp)
+{
+	dhdp->wlfc_thread_go = TRUE;
+	wake_up_interruptible(&dhdp->wlfc_wqhead);
+}
+#endif /* DHD_WLFC_THREAD */
+
+static uint16
+_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq)
+{
+	uint16 seq;
+
+	if (!p) {
+		return 0xffff;
+	}
+
+	seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+	if (seq < current_seq) {
+		/* wrap around */
+		seq += 256;
+	}
+
+	return seq;
+}
+
+/**
+ * Enqueue a caller supplied packet on a caller supplied precedence queue, optionally reorder
+ * suppressed packets.
+ *    @param[in] pq       caller supplied packet queue to enqueue the packet on
+ *    @param[in] prec     precedence of the to-be-queued packet
+ *    @param[in] p        transmit packet to enqueue
+ *    @param[in] qHead    if TRUE, enqueue to head instead of tail. Used to maintain d11 seq order.
+ *    @param[in] current_seq
+ *    @param[in] reOrder  reOrder on odd precedence (=suppress queue)
+ */
+static void
+_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead,
+	uint8 current_seq, bool reOrder)
+{
+	struct pktq_prec *q;
+	uint16 seq, seq2;
+	void *p2, *p2_prev;
+
+	if (!p)
+		return;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);		/* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	PKTSETLINK(p, NULL);
+	if (q->head == NULL) {
+		/* empty queue */
+		q->head = p;
+		q->tail = p;
+	} else {
+		if (reOrder && (prec & 1)) {
+			seq = _dhd_wlfc_adjusted_seq(p, current_seq);
+			p2 = qHead ? q->head : q->tail;
+			seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+			if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) {
+				/* need reorder */
+				p2 = q->head;
+				p2_prev = NULL;
+				seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+				while (seq > seq2) {
+					p2_prev = p2;
+					p2 = PKTLINK(p2);
+					if (!p2) {
+						break;
+					}
+					seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+				}
+
+				if (p2_prev == NULL) {
+					/* insert head */
+					PKTSETLINK(p, q->head);
+					q->head = p;
+				} else if (p2 == NULL) {
+					/* insert tail */
+					PKTSETLINK(p2_prev, p);
+					q->tail = p;
+				} else {
+					/* insert after p2_prev */
+					PKTSETLINK(p, PKTLINK(p2_prev));
+					PKTSETLINK(p2_prev, p);
+				}
+				goto exit;
+			}
+		}
+
+		if (qHead) {
+			PKTSETLINK(p, q->head);
+			q->head = p;
+		} else {
+			PKTSETLINK(q->tail, p);
+			q->tail = p;
+		}
+	}
+
+exit:
+
+	q->len++;
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+} /* _dhd_wlfc_prec_enque */
+
+/**
+ * Create a place to store all packet pointers submitted to the firmware until a status comes back,
+ * suppress or otherwise.
+ *
+ * hang-er: noun, a contrivance on which things are hung, as a hook.
+ */
+/** @deprecated soon */
+static void*
+_dhd_wlfc_hanger_create(dhd_pub_t *dhd, int max_items)
+{
+	int i;
+	wlfc_hanger_t* hanger;
+
+	/* allow only up to a specific size for now */
+	ASSERT(max_items == WLFC_HANGER_MAXITEMS);
+
+	if ((hanger = (wlfc_hanger_t*)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_HANGER,
+		WLFC_HANGER_SIZE(max_items))) == NULL) {
+		return NULL;
+	}
+	memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
+	hanger->max_items = max_items;
+
+	for (i = 0; i < hanger->max_items; i++) {
+		hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+	}
+	return hanger;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_delete(dhd_pub_t *dhd, void* hanger)
+{
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		DHD_OS_PREFREE(dhd, h, WLFC_HANGER_SIZE(h->max_items));
+		return BCME_OK;
+	}
+	return BCME_BADARG;
+}
+
+/** @deprecated soon */
+static uint16
+_dhd_wlfc_hanger_get_free_slot(void* hanger)
+{
+	uint32 i;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		i = h->slot_pos + 1;
+		if (i == h->max_items) {
+			i = 0;
+		}
+		while (i != h->slot_pos) {
+			if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) {
+				h->slot_pos = i;
+				return (uint16)i;
+			}
+			i++;
+			if (i == h->max_items)
+				i = 0;
+		}
+		h->failed_slotfind++;
+	}
+	return WLFC_HANGER_MAXITEMS;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	*gen = 0xff;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+
+	if (h) {
+		if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) {
+			*gen = h->items[slot_id].gen;
+		}
+		else {
+			DHD_ERROR(("Error: %s():%d item not used\n",
+				__FUNCTION__, __LINE__));
+			rc = BCME_NOTFOUND;
+		}
+
+	} else {
+		rc = BCME_BADARG;
+	}
+
+	return rc;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h && (slot_id < WLFC_HANGER_MAXITEMS)) {
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
+			h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
+			h->items[slot_id].pkt = pkt;
+			h->items[slot_id].pkt_state = 0;
+			h->items[slot_id].pkt_txstatus = 0;
+			h->pushed++;
+		} else {
+			h->failed_to_push++;
+			rc = BCME_NOTFOUND;
+		}
+	} else {
+		rc = BCME_BADARG;
+	}
+
+	return rc;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	*pktout = NULL;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+
+	if (h) {
+		if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) {
+			*pktout = h->items[slot_id].pkt;
+			if (remove_from_hanger) {
+				h->items[slot_id].state =
+					WLFC_HANGER_ITEM_STATE_FREE;
+				h->items[slot_id].pkt = NULL;
+				h->items[slot_id].gen = 0xff;
+				h->items[slot_id].identifier = 0;
+				h->popped++;
+			}
+		} else {
+			h->failed_to_pop++;
+			rc = BCME_NOTFOUND;
+		}
+	} else {
+		rc = BCME_BADARG;
+	}
+
+	return rc;
+}
+
+/** @deprecated soon */
+static int
+_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+	if (h) {
+		h->items[slot_id].gen = gen;
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+			h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED;
+		} else {
+			rc = BCME_BADARG;
+		}
+	} else {
+		rc = BCME_BADARG;
+	}
+
+	return rc;
+}
+
+/** remove reference of specific packet in hanger */
+/** @deprecated soon */
+static bool
+_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt)
+{
+	int i;
+
+	if (!h || !pkt) {
+		return FALSE;
+	}
+
+	i = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(pkt)));
+
+	if ((i < h->max_items) && (pkt == h->items[i].pkt)) {
+		if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
+			h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+			h->items[i].pkt = NULL;
+			h->items[i].gen = 0xff;
+			h->items[i].identifier = 0;
+			return TRUE;
+		} else {
+			DHD_ERROR(("Error: %s():%d item not suppressed\n",
+				__FUNCTION__, __LINE__));
+		}
+	}
+
+	return FALSE;
+}
+
+/** afq = At Firmware Queue, queue containing packets pending in the dongle */
+static int
+_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p)
+{
+	wlfc_mac_descriptor_t* entry;
+	uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+	uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+
+	if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE)
+		entry  = &ctx->destination_entries.nodes[entry_idx];
+	else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+		entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE];
+	else
+		entry = &ctx->destination_entries.other;
+
+	pktq_penq(&entry->afq, prec, p);
+
+	return BCME_OK;
+}
+
+/** afq = At Firmware Queue, queue containing packets pending in the dongle */
+static int
+_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec,
+	void **pktout)
+{
+	wlfc_mac_descriptor_t *entry;
+	struct pktq *pq;
+	struct pktq_prec *q;
+	void *p, *b;
+
+	if (!ctx) {
+		DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout));
+		return BCME_BADARG;
+	}
+
+	if (pktout) {
+		*pktout = NULL;
+	}
+
+	ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1));
+
+	if (hslot < WLFC_MAC_DESC_TABLE_SIZE)
+		entry  = &ctx->destination_entries.nodes[hslot];
+	else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+		entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE];
+	else
+		entry = &ctx->destination_entries.other;
+
+	pq = &entry->afq;
+
+	ASSERT(prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	b = NULL;
+	p = q->head;
+
+	while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)))))
+	{
+		b = p;
+		p = PKTLINK(p);
+	}
+
+	if (p == NULL) {
+		/* none is matched */
+		if (b) {
+			DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt));
+		} else {
+			DHD_ERROR(("%s: queue is empty\n", __FUNCTION__));
+		}
+
+		return BCME_ERROR;
+	}
+
+	bcm_pkt_validate_chk(p);
+
+	if (!b) {
+		/* head packet is matched */
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		/* middle packet is matched */
+		DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt,
+			WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head)))));
+		ctx->stats.ooo_pkts[prec]++;
+		PKTSETLINK(b, PKTLINK(p));
+		if (PKTLINK(p) == NULL) {
+			q->tail = b;
+		}
+	}
+
+	q->len--;
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	if (pktout) {
+		*pktout = p;
+	}
+
+	return BCME_OK;
+} /* _dhd_wlfc_deque_afq */
+
+/**
+ * Flow control information piggy backs on packets, in the form of one or more TLVs. This function
+ * pushes one or more TLVs onto a packet that is going to be sent towards the dongle.
+ *
+ *     @param[in]     ctx
+ *     @param[in/out] packet
+ *     @param[in]     tim_signal TRUE if parameter 'tim_bmp' is valid
+ *     @param[in]     tim_bmp
+ *     @param[in]     mac_handle
+ *     @param[in]     htodtag
+ *     @param[in]     htodseq d11 seqno for seqno reuse, only used if 'seq reuse' was agreed upon
+ *                    earlier between host and firmware.
+ *     @param[in]     skip_wlfc_hdr
+ */
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signal,
+	uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr)
+{
+	uint32 wl_pktinfo = 0;
+	uint8* wlh;
+	uint8 dataOffset = 0;
+	uint8 fillers;
+	uint8 tim_signal_len = 0;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	struct bdc_header *h;
+	void *p = *packet;
+
+	if (skip_wlfc_hdr)
+		goto push_bdc_hdr;
+
+	if (tim_signal) {
+		tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+	}
+
+	/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+	dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len;
+	if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+		dataOffset += WLFC_CTL_VALUE_LEN_SEQ;
+	}
+
+	fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+	dataOffset += fillers;
+
+	PKTPUSH(ctx->osh, p, dataOffset);
+	wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+	wl_pktinfo = htol32(htodtag);
+
+	wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG;
+	wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG;
+	memcpy(&wlh[TLV_HDR_LEN] /* dst */, &wl_pktinfo, sizeof(uint32));
+
+	if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+		uint16 wl_seqinfo = htol16(htodseq);
+		wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ;
+		memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo,
+			WLFC_CTL_VALUE_LEN_SEQ);
+	}
+
+	if (tim_signal_len) {
+		wlh[dataOffset - fillers - tim_signal_len ] =
+			WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 1] =
+			WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+		wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+	}
+	if (fillers)
+		memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+push_bdc_hdr:
+	PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
+	h = (struct bdc_header *)PKTDATA(ctx->osh, p);
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(p))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = dataOffset >> 2;
+	BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+	*packet = p;
+	return BCME_OK;
+} /* _dhd_wlfc_pushheader */
+
+/**
+ * Removes (PULLs) flow control related headers from the caller supplied packet, is invoked eg
+ * when a packet is about to be freed.
+ */
+static int
+_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
+{
+	struct bdc_header *h;
+
+	if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+	h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf);
+
+	/* pull BDC header */
+	PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
+
+	if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2)));
+		return BCME_ERROR;
+	}
+
+	/* pull wl-header */
+	PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2));
+	return BCME_OK;
+}
+
+/**
+ * @param[in/out] p packet
+ */
+static wlfc_mac_descriptor_t*
+_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
+{
+	int i;
+	wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
+	uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
+	uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
+	wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p));
+	int iftype = ctx->destination_entries.interfaces[ifid].iftype;
+
+	/* saved one exists, return it */
+	if (entry)
+		return entry;
+
+	/* Multicast destination, STA and P2P clients get the interface entry.
+	 * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
+	 * have their own entry.
+	 */
+	if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) ||
+		iftype == WLC_E_IF_ROLE_P2P_CLIENT) &&
+		(ctx->destination_entries.interfaces[ifid].occupied)) {
+			entry = &ctx->destination_entries.interfaces[ifid];
+	}
+
+	if (entry && ETHER_ISMULTI(dstn)) {
+		DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+		return entry;
+	}
+
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (table[i].occupied) {
+			if (table[i].interface_id == ifid) {
+				if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) {
+					entry = &table[i];
+					break;
+				}
+			}
+		}
+	}
+
+	if (entry == NULL)
+		entry = &ctx->destination_entries.other;
+
+	DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+
+	return entry;
+} /* _dhd_wlfc_find_table_entry */
+
+/**
+ * In case a packet must be dropped (because eg the queues are full), various tallies have to be
+ * be updated. Called from several other functions.
+ *     @param[in] dhdp pointer to public DHD structure
+ *     @param[in] prec precedence of the packet
+ *     @param[in] p    the packet to be dropped
+ *     @param[in] bPktInQ TRUE if packet is part of a queue
+ */
+static int
+_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ)
+{
+	athost_wl_status_info_t* ctx;
+	void *pout = NULL;
+
+	ASSERT(dhdp && p);
+	ASSERT(prec >= 0 && prec <= WLFC_PSQ_PREC_COUNT);
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+		/* suppressed queue, need pop from hanger */
+		_dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG
+					(PKTTAG(p))), &pout, TRUE);
+		ASSERT(p == pout);
+	}
+
+	if (!(prec & 1)) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* pkt in delayed q, so fake push BDC header for
+		 * dhd_tcpack_check_xmit() and dhd_txcomplete().
+		 */
+		_dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, 0, 0, TRUE);
+
+		/* This packet is about to be freed, so remove it from tcp_ack_info_tbl
+		 * This must be one of...
+		 * 1. A pkt already in delayQ is evicted by another pkt with higher precedence
+		 * in _dhd_wlfc_prec_enq_with_drop()
+		 * 2. A pkt could not be enqueued to delayQ because it is full,
+		 * in _dhd_wlfc_enque_delayq().
+		 * 3. A pkt could not be enqueued to delayQ because it is full,
+		 * in _dhd_wlfc_rollback_packet_toq().
+		 */
+		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+				" Stop using it\n",
+				__FUNCTION__, __LINE__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+		}
+#endif /* DHDTCPACK_SUPPRESS */
+	}
+
+	if (bPktInQ) {
+		ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+		ctx->pkt_cnt_per_ac[prec>>1]--;
+		ctx->pkt_cnt_in_psq--;
+	}
+
+	ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+	ctx->stats.pktout++;
+	ctx->stats.drop_pkts[prec]++;
+
+	dhd_txcomplete(dhdp, p, FALSE);
+	PKTFREE(ctx->osh, p, TRUE);
+
+	return 0;
+} /* _dhd_wlfc_prec_drop */
+
+/**
+ * Called when eg the host handed a new packet over to the driver, or when the dongle reported
+ * that a packet could currently not be transmitted (=suppressed). This function enqueues a transmit
+ * packet in the host driver to be (re)transmitted at a later opportunity.
+ *     @param[in] dhdp pointer to public DHD structure
+ *     @param[in] qHead When TRUE, queue packet at head instead of tail, to preserve d11 sequence
+ */
+static bool
+_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead,
+	uint8 current_seq)
+{
+	void *p = NULL;
+	int eprec = -1;		/* precedence to evict from */
+	athost_wl_status_info_t* ctx;
+
+	ASSERT(dhdp && pq && pkt);
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(pq, prec) && !pktq_full(pq)) {
+		goto exit;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(pq, prec)) {
+		eprec = prec;
+	} else if (pktq_full(pq)) {
+		p = pktq_peek_tail(pq, &eprec);
+		if (!p) {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+			return FALSE;
+		}
+		if ((eprec > prec) || (eprec < 0)) {
+			if (!pktq_pempty(pq, prec)) {
+				eprec = prec;
+			} else {
+				return FALSE;
+			}
+		}
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(pq, eprec));
+		/* Evict all fragmented frames */
+		dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop);
+	}
+
+exit:
+	/* Enqueue */
+	_dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq,
+		WLFC_GET_REORDERSUPP(dhdp->wlfc_mode));
+	ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++;
+	ctx->pkt_cnt_per_ac[prec>>1]++;
+	ctx->pkt_cnt_in_psq++;
+
+	return TRUE;
+} /* _dhd_wlfc_prec_enq_with_drop */
+
+/**
+ * Called during eg the 'committing' of a transmit packet from the OS layer to a lower layer, in
+ * the event that this 'commit' failed.
+ */
+static int
+_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
+	void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
+{
+	/*
+	 * put the packet back to the head of queue
+	 * - suppressed packet goes back to suppress sub-queue
+	 * - pull out the header, if new or delayed packet
+	 *
+	 * Note: hslot is used only when header removal is done.
+	 */
+	wlfc_mac_descriptor_t* entry;
+	int rc = BCME_OK;
+	int prec, fifo_id;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+	fifo_id = prec << 1;
+	if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED)
+		fifo_id += 1;
+	if (entry != NULL) {
+		/*
+		if this packet did not count against FIFO credit, it must have
+		taken a requested_credit from the firmware (for pspoll etc.)
+		*/
+		if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p)))
+			entry->requested_credit++;
+
+		if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+			/* decrement sequence count */
+			WLFC_DECR_SEQCOUNT(entry, prec);
+			/* remove header first */
+			rc = _dhd_wlfc_pullheader(ctx, p);
+			if (rc != BCME_OK) {
+				DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+				goto exit;
+			}
+		}
+
+		if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE,
+			WLFC_SEQCOUNT(entry, fifo_id>>1))
+			== FALSE) {
+			/* enque failed */
+			DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n",
+				__FUNCTION__, __LINE__, fifo_id));
+			rc = BCME_ERROR;
+		}
+	} else {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		rc = BCME_ERROR;
+	}
+
+exit:
+	if (rc != BCME_OK) {
+		ctx->stats.rollback_failed++;
+		_dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE);
+	} else {
+		ctx->stats.rollback++;
+	}
+
+	return rc;
+} /* _dhd_wlfc_rollback_packet_toq */
+
+/** Returns TRUE if host OS -> DHD flow control is allowed on the caller supplied interface */
+static bool
+_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid)
+{
+	int prec, ac_traffic = WLFC_NO_TRAFFIC;
+
+	for (prec = 0; prec < AC_COUNT; prec++) {
+		if (ctx->pkt_cnt_in_drv[ifid][prec] > 0) {
+			if (ac_traffic == WLFC_NO_TRAFFIC)
+				ac_traffic = prec + 1;
+			else if (ac_traffic != (prec + 1))
+				ac_traffic = WLFC_MULTI_TRAFFIC;
+		}
+	}
+
+	if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) {
+		/* single AC (BE/BK/VI/VO) in queue */
+		if (ctx->allow_fc) {
+			return TRUE;
+		} else {
+			uint32 delta;
+			uint32 curr_t = OSL_SYSUPTIME();
+
+			if (ctx->fc_defer_timestamp == 0) {
+				/* first single ac scenario */
+				ctx->fc_defer_timestamp = curr_t;
+				return FALSE;
+			}
+
+			/* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */
+			delta = curr_t - ctx->fc_defer_timestamp;
+			if (delta >= WLFC_FC_DEFER_PERIOD_MS) {
+				ctx->allow_fc = TRUE;
+			}
+		}
+	} else {
+		/* multiple ACs or BCMC in queue */
+		ctx->allow_fc = FALSE;
+		ctx->fc_defer_timestamp = 0;
+	}
+
+	return ctx->allow_fc;
+} /* _dhd_wlfc_allow_fc */
+
+/**
+ * Starts or stops the flow of transmit packets from the host OS towards the DHD, depending on
+ * low/high watermarks.
+ */
+static void
+_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
+{
+	dhd_pub_t *dhdp;
+
+	ASSERT(ctx);
+
+	dhdp = (dhd_pub_t *)ctx->dhdp;
+	ASSERT(dhdp);
+
+	if (dhdp->skip_fc && dhdp->skip_fc((void *)dhdp, if_id))
+		return;
+
+	if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id))
+		return;
+
+	if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+		/* start traffic */
+		ctx->hostif_flow_state[if_id] = OFF;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("F"));
+
+		dhd_txflowcontrol(dhdp, if_id, OFF);
+
+		ctx->toggle_host_if = 0;
+	}
+
+	if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
+		/* stop traffic */
+		ctx->hostif_flow_state[if_id] = ON;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic   %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("N"));
+
+		dhd_txflowcontrol(dhdp, if_id, ON);
+
+		ctx->host_ifidx = if_id;
+		ctx->toggle_host_if = 1;
+	}
+
+	return;
+} /* _dhd_wlfc_flow_control_check */
+
+static int
+_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	uint8 ta_bmp)
+{
+	int rc = BCME_OK;
+	void* p = NULL;
+	int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	if (dhdp->proptxstatus_txoff) {
+		rc = BCME_NORESOURCE;
+		return rc;
+	}
+
+	/* allocate a dummy packet */
+	p = PKTGET(ctx->osh, dummylen, TRUE);
+	if (p) {
+		PKTPULL(ctx->osh, p, dummylen);
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
+		_dhd_wlfc_pushheader(ctx, &p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE);
+		DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+		DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1);
+#ifdef PROP_TXSTATUS_DEBUG
+		ctx->stats.signal_only_pkts_sent++;
+#endif
+
+#if defined(BCMPCIE)
+		rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx);
+#else
+		rc = dhd_bus_txdata(dhdp->bus, p);
+#endif
+		if (rc != BCME_OK) {
+			_dhd_wlfc_pullheader(ctx, p);
+			PKTFREE(ctx->osh, p, TRUE);
+		}
+	} else {
+		DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+		           __FUNCTION__, dummylen));
+		rc = BCME_NOMEM;
+		dhdp->tx_pktgetfail++;
+	}
+
+	return rc;
+} /* _dhd_wlfc_send_signalonly_packet */
+
+/**
+ * Called on eg receiving 'mac close' indication from dongle. Updates the per-MAC administration
+ * maintained in caller supplied parameter 'entry'.
+ *
+ *    @param[in/out] entry  administration about a remote MAC entity
+ *    @param[in]     prec   precedence queue for this remote MAC entitity
+ *
+ * Return value: TRUE if traffic availability changed
+ */
+static bool
+_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	int prec)
+{
+	bool rc = FALSE;
+
+	if (entry->state == WLFC_STATE_CLOSE) {
+		if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
+			(pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
+			/* no packets in both 'normal' and 'suspended' queues */
+			if (entry->traffic_pending_bmp & NBITVAL(prec)) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp & ~ NBITVAL(prec);
+			}
+		} else {
+			/* packets are queued in host for transmission to dongle */
+			if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp | NBITVAL(prec);
+			}
+		}
+	}
+
+	if (rc) {
+		/* request a TIM update to firmware at the next piggyback opportunity */
+		if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
+			entry->send_tim_signal = 1;
+			_dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
+			entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+			entry->send_tim_signal = 0;
+		} else {
+			rc = FALSE;
+		}
+	}
+
+	return rc;
+} /* _dhd_wlfc_traffic_pending_check */
+
+/**
+ * Called on receiving a 'd11 suppressed' or 'wl suppressed' tx status from the firmware. Enqueues
+ * the packet to transmit to firmware again at a later opportunity.
+ */
+static int
+_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
+{
+	wlfc_mac_descriptor_t* entry;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	if (entry == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_NOTFOUND;
+	}
+	/*
+	- suppressed packets go to sub_queue[2*prec + 1] AND
+	- delayed packets go to sub_queue[2*prec + 0] to ensure
+	order of delivery.
+	*/
+	if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE,
+		WLFC_SEQCOUNT(entry, prec))
+		== FALSE) {
+		ctx->stats.delayq_full_error++;
+		/* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
+		WLFC_DBGMESG(("s"));
+		return BCME_ERROR;
+	}
+
+	/* A packet has been pushed, update traffic availability bitmap, if applicable */
+	_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+	_dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+	return BCME_OK;
+}
+
+/**
+ * Called when a transmit packet is about to be 'committed' from the OS layer to a lower layer
+ * towards the dongle (eg the DBUS layer). Updates wlfc administration. May modify packet.
+ *
+ *     @param[in/out] ctx    driver specific flow control administration
+ *     @param[in/out] entry  The remote MAC entity for which the packet is destined.
+ *     @param[in/out] packet Packet to send. This function optionally adds TLVs to the packet.
+ *     @param[in] header_needed True if packet is 'new' to flow control
+ *     @param[out] slot Handle to container in which the packet was 'parked'
+ */
+static int
+_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, void** packet, int header_needed, uint32* slot)
+{
+	int rc = BCME_OK;
+	int hslot = WLFC_HANGER_MAXITEMS;
+	bool send_tim_update = FALSE;
+	uint32 htod = 0;
+	uint16 htodseq = 0;
+	uint8 free_ctr;
+	int gen = 0xff;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+	void * p = *packet;
+
+	*slot = hslot;
+
+	if (entry == NULL) {
+		entry = _dhd_wlfc_find_table_entry(ctx, p);
+	}
+
+	if (entry == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_ERROR;
+	}
+
+	if (entry->send_tim_signal) {
+		/* sends a traffic indication bitmap to the dongle */
+		send_tim_update = TRUE;
+		entry->send_tim_signal = 0;
+		entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+	}
+
+	if (header_needed) {
+		if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+			hslot = (uint)(entry - &ctx->destination_entries.nodes[0]);
+		} else {
+			hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+		}
+		gen = entry->generation;
+		free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+	} else {
+		if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+			htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p));
+		}
+
+		hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+
+		if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) {
+			gen = entry->generation;
+		} else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+			gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		} else {
+			_dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen);
+		}
+
+		free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		/* remove old header */
+		_dhd_wlfc_pullheader(ctx, p);
+	}
+
+	if (hslot >= WLFC_HANGER_MAXITEMS) {
+		DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
+	WL_TXSTATUS_SET_HSLOT(htod, hslot);
+	WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+	WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+	WL_TXSTATUS_SET_GENERATION(htod, gen);
+	DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+
+	if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+		/*
+		Indicate that this packet is being sent in response to an
+		explicit request from the firmware side.
+		*/
+		WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
+	} else {
+		WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
+	}
+
+	rc = _dhd_wlfc_pushheader(ctx, &p, send_tim_update,
+		entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE);
+	if (rc == BCME_OK) {
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+
+		if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+			wlfc_hanger_t *h = (wlfc_hanger_t*)(ctx->hanger);
+			if (header_needed) {
+				/*
+				a new header was created for this packet.
+				push to hanger slot and scrub q. Since bus
+				send succeeded, increment seq number as well.
+				*/
+				rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+				if (rc == BCME_OK) {
+#ifdef PROP_TXSTATUS_DEBUG
+					h->items[hslot].push_time =
+						OSL_SYSUPTIME();
+#endif
+				} else {
+					DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n",
+						__FUNCTION__, rc));
+				}
+			} else {
+				/* clear hanger state */
+				if (((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt != p)
+					DHD_ERROR(("%s() pkt not match: cur %p, hanger pkt %p\n",
+						__FUNCTION__, p, h->items[hslot].pkt));
+				ASSERT(h->items[hslot].pkt == p);
+				bcm_object_feature_set(h->items[hslot].pkt,
+					BCM_OBJECT_FEATURE_PKT_STATE, 0);
+				h->items[hslot].pkt_state = 0;
+				h->items[hslot].pkt_txstatus = 0;
+				h->items[hslot].state = WLFC_HANGER_ITEM_STATE_INUSE;
+			}
+		}
+
+		if ((rc == BCME_OK) && header_needed) {
+			/* increment free running sequence count */
+			WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+		}
+	}
+	*slot = hslot;
+	*packet = p;
+	return rc;
+} /* _dhd_wlfc_pretx_pktprocess */
+
+/**
+ * A remote wireless mac may be temporarily 'closed' due to power management. Returns '1' if remote
+ * mac is in the 'open' state, otherwise '0'.
+ */
+static int
+_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, int prec)
+{
+	wlfc_mac_descriptor_t* interfaces = ctx->destination_entries.interfaces;
+
+	if (entry->interface_id >= WLFC_MAX_IFNUM) {
+		ASSERT(&ctx->destination_entries.other == entry);
+		return 1;
+	}
+
+	if (interfaces[entry->interface_id].iftype ==
+		WLC_E_IF_ROLE_P2P_GO) {
+		/* - destination interface is of type p2p GO.
+		For a p2pGO interface, if the destination is OPEN but the interface is
+		CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is
+		destination-specific-credit left send packets. This is because the
+		firmware storing the destination-specific-requested packet in queue.
+		*/
+		if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+			(entry->requested_packet == 0)) {
+			return 0;
+		}
+	}
+
+	/* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
+	if ((((entry->state == WLFC_STATE_CLOSE) ||
+		(interfaces[entry->interface_id].state == WLFC_STATE_CLOSE)) &&
+		(entry->requested_credit == 0) &&
+		(entry->requested_packet == 0)) ||
+		(!(entry->ac_bitmap & (1 << prec)))) {
+		return 0;
+	}
+
+	return 1;
+} /* _dhd_wlfc_is_destination_open */
+
+/**
+ * Dequeues a suppressed or delayed packet from a queue
+ *    @param[in/out] ctx          Driver specific flow control administration
+ *    @param[in]  prec            Precedence of queue to dequeue from
+ *    @param[out] ac_credit_spent Boolean, returns 0 or 1
+ *    @param[out] needs_hdr       Boolean, returns 0 or 1
+ *    @param[out] entry_out       The remote MAC for which the packet is destined
+ *    @param[in]  only_no_credit  If TRUE, searches all entries instead of just the active ones
+ *
+ * Return value: the dequeued packet
+ */
+static void*
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec,
+	uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out,
+	bool only_no_credit)
+{
+	wlfc_mac_descriptor_t* entry;
+	int total_entries;
+	void* p = NULL;
+	int i;
+	uint8 credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1;
+
+	*entry_out = NULL;
+	/* most cases a packet will count against FIFO credit */
+	*ac_credit_spent = credit_spent;
+
+	/* search all entries, include nodes as well as interfaces */
+	if (only_no_credit) {
+		total_entries = ctx->requested_entry_count;
+	} else {
+		total_entries = ctx->active_entry_count;
+	}
+
+	for (i = 0; i < total_entries; i++) {
+		if (only_no_credit) {
+			entry = ctx->requested_entry[i];
+		} else {
+			entry = ctx->active_entry_head;
+			/* move head to ensure fair round-robin */
+			ctx->active_entry_head = ctx->active_entry_head->next;
+		}
+		ASSERT(entry);
+
+		if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) &&
+			(entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) &&
+			(!entry->suppressed)) {
+			*ac_credit_spent = credit_spent;
+			if (entry->state == WLFC_STATE_CLOSE) {
+				*ac_credit_spent = 0;
+			}
+
+			/* higher precedence will be picked up first,
+			 * i.e. suppressed packets before delayed ones
+			 */
+			p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec));
+			*needs_hdr = 0;
+			if (p == NULL) {
+				/* De-Q from delay Q */
+				p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec));
+				*needs_hdr = 1;
+			}
+
+			if (p != NULL) {
+				bcm_pkt_validate_chk(p);
+				/* did the packet come from suppress sub-queue? */
+				if (entry->requested_credit > 0) {
+					entry->requested_credit--;
+#ifdef PROP_TXSTATUS_DEBUG
+					entry->dstncredit_sent_packets++;
+#endif
+				} else if (entry->requested_packet > 0) {
+					entry->requested_packet--;
+					DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+				}
+
+				*entry_out = entry;
+				ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
+				ctx->pkt_cnt_per_ac[prec]--;
+				ctx->pkt_cnt_in_psq--;
+				_dhd_wlfc_flow_control_check(ctx, &entry->psq,
+					DHD_PKTTAG_IF(PKTTAG(p)));
+				/*
+				 * A packet has been picked up, update traffic availability bitmap,
+				 * if applicable.
+				 */
+				_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+				return p;
+			}
+		}
+	}
+	return NULL;
+} /* _dhd_wlfc_deque_delayedq */
+
+/** Enqueues caller supplied packet on either a 'suppressed' or 'delayed' queue */
+static int
+_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec)
+{
+	wlfc_mac_descriptor_t* entry;
+
+	if (pktbuf != NULL) {
+		entry = _dhd_wlfc_find_table_entry(ctx, pktbuf);
+		if (entry == NULL) {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+			return BCME_ERROR;
+		}
+
+		/*
+		- suppressed packets go to sub_queue[2*prec + 1] AND
+		- delayed packets go to sub_queue[2*prec + 0] to ensure
+		order of delivery.
+		*/
+		if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1),
+			FALSE, WLFC_SEQCOUNT(entry, prec))
+			== FALSE) {
+			WLFC_DBGMESG(("D"));
+			ctx->stats.delayq_full_error++;
+			return BCME_ERROR;
+		}
+
+
+		/* A packet has been pushed, update traffic availability bitmap, if applicable */
+		_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+	}
+
+	return BCME_OK;
+} /* _dhd_wlfc_enque_delayq */
+
+/** Returns TRUE if caller supplied packet is destined for caller supplied interface */
+static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid)
+{
+	if (!p || !p_ifid)
+		return FALSE;
+
+	return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p))));
+}
+
+/** Returns TRUE if caller supplied packet is destined for caller supplied remote MAC */
+static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry)
+{
+	if (!p || !entry)
+		return FALSE;
+
+	return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p))));
+}
+
+static void
+_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt)
+{
+	dhd_pub_t *dhdp;
+	bool credit_return = FALSE;
+
+	if (!wlfc || !pkt) {
+		return;
+	}
+
+	dhdp = (dhd_pub_t *)(wlfc->dhdp);
+	if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) &&
+		DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+		int lender, credit_returned = 0;
+		uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt));
+
+		credit_return = TRUE;
+
+		/* Note that borrower is fifo_id */
+		/* Return credits to highest priority lender first */
+		for (lender = AC_COUNT; lender >= 0; lender--) {
+			if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+				wlfc->FIFO_credit[lender]++;
+				wlfc->credits_borrowed[fifo_id][lender]--;
+				credit_returned = 1;
+				break;
+			}
+		}
+
+		if (!credit_returned) {
+			wlfc->FIFO_credit[fifo_id]++;
+		}
+	}
+
+	BCM_REFERENCE(credit_return);
+#if defined(DHD_WLFC_THREAD)
+	if (credit_return) {
+		_dhd_wlfc_thread_wakeup(dhdp);
+	}
+#endif /* defined(DHD_WLFC_THREAD) */
+}
+
+/** Removes and frees a packet from the hanger. Called during eg tx complete. */
+static void
+_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state,
+	int pkt_txstatus)
+{
+	wlfc_hanger_t* hanger;
+	wlfc_hanger_item_t* item;
+
+	if (!wlfc)
+		return;
+
+	hanger = (wlfc_hanger_t*)wlfc->hanger;
+	if (!hanger)
+		return;
+
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return;
+
+	item = &hanger->items[slot_id];
+
+	if (item->pkt) {
+		item->pkt_state |= pkt_state;
+		if (pkt_txstatus != -1)
+			item->pkt_txstatus = (uint8)pkt_txstatus;
+		bcm_object_feature_set(item->pkt, BCM_OBJECT_FEATURE_PKT_STATE, item->pkt_state);
+		if (item->pkt_state == WLFC_HANGER_PKT_STATE_COMPLETE) {
+			void *p = NULL;
+			void *pkt = item->pkt;
+			uint8 old_state = item->state;
+			int ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, slot_id, &p, TRUE);
+			BCM_REFERENCE(ret);
+			BCM_REFERENCE(pkt);
+			ASSERT((ret == BCME_OK) && p && (pkt == p));
+			if (old_state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
+				printf("ERROR: free a suppressed pkt %p state %d pkt_state %d\n",
+					pkt, old_state, item->pkt_state);
+			}
+			ASSERT(old_state != WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED);
+
+			/* free packet */
+			wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))]
+				[DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+			wlfc->stats.pktout++;
+			dhd_txcomplete((dhd_pub_t *)wlfc->dhdp, p, item->pkt_txstatus);
+			PKTFREE(wlfc->osh, p, TRUE);
+		}
+	} else {
+		/* free slot */
+		if (item->state == WLFC_HANGER_ITEM_STATE_FREE)
+			DHD_ERROR(("Error: %s():%d Multiple TXSTATUS or BUSRETURNED: %d (%d)\n",
+			    __FUNCTION__, __LINE__, item->pkt_state, pkt_state));
+		item->state = WLFC_HANGER_ITEM_STATE_FREE;
+	}
+} /* _dhd_wlfc_hanger_free_pkt */
+
+/** Called during eg detach() */
+static void
+_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq,
+	bool dir, f_processpkt_t fn, void *arg, q_type_t q_type)
+{
+	int prec;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	ASSERT(dhdp);
+
+	/* Optimize flush, if pktq len = 0, just return.
+	 * pktq len of 0 means pktq's prec q's are all empty.
+	 */
+	if (pq->len == 0) {
+		return;
+	}
+
+	for (prec = 0; prec < pq->num_prec; prec++) {
+		struct pktq_prec *q;
+		void *p, *prev = NULL;
+
+		q = &pq->q[prec];
+		p = q->head;
+		while (p) {
+			bcm_pkt_validate_chk(p);
+			if (fn == NULL || (*fn)(p, arg)) {
+				bool head = (p == q->head);
+				if (head)
+					q->head = PKTLINK(p);
+				else
+					PKTSETLINK(prev, PKTLINK(p));
+				if (q_type == Q_TYPE_PSQ) {
+					if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+						_dhd_wlfc_hanger_remove_reference(ctx->hanger, p);
+					}
+					ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+					ctx->pkt_cnt_per_ac[prec>>1]--;
+					ctx->pkt_cnt_in_psq--;
+					ctx->stats.cleanup_psq_cnt++;
+					if (!(prec & 1)) {
+						/* pkt in delayed q, so fake push BDC header for
+						 * dhd_tcpack_check_xmit() and dhd_txcomplete().
+						 */
+						_dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0,
+							0, 0, TRUE);
+#ifdef DHDTCPACK_SUPPRESS
+						if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+							DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+								" Stop using it\n",
+								__FUNCTION__, __LINE__));
+							dhd_tcpack_suppress_set(dhdp,
+								TCPACK_SUP_OFF);
+						}
+#endif /* DHDTCPACK_SUPPRESS */
+					}
+				} else if (q_type == Q_TYPE_AFQ) {
+					wlfc_mac_descriptor_t* entry =
+						_dhd_wlfc_find_table_entry(ctx, p);
+					if (entry->transit_count)
+						entry->transit_count--;
+					if (entry->suppr_transit_count) {
+						entry->suppr_transit_count--;
+						if (entry->suppressed &&
+							(!entry->onbus_pkts_count) &&
+							(!entry->suppr_transit_count))
+							entry->suppressed = FALSE;
+					}
+					_dhd_wlfc_return_implied_credit(ctx, p);
+					ctx->stats.cleanup_fw_cnt++;
+				}
+				PKTSETLINK(p, NULL);
+				if (dir) {
+					ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+					ctx->stats.pktout++;
+					dhd_txcomplete(dhdp, p, FALSE);
+				}
+				PKTFREE(ctx->osh, p, dir);
+
+				q->len--;
+				pq->len--;
+				p = (head ? q->head : PKTLINK(prev));
+			} else {
+				prev = p;
+				p = PKTLINK(p);
+			}
+		}
+
+		if (q->head == NULL) {
+			ASSERT(q->len == 0);
+			q->tail = NULL;
+		}
+
+	}
+
+	if (fn == NULL)
+		ASSERT(pq->len == 0);
+} /* _dhd_wlfc_pktq_flush */
+
+#ifndef BCMDBUS
+/** !BCMDBUS specific function. Dequeues a packet from the caller supplied queue. */
+static void*
+_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			break;
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+	if (p == NULL)
+		return NULL;
+
+	bcm_pkt_validate_chk(p);
+
+	if (prev == NULL) {
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		PKTSETLINK(prev, PKTLINK(p));
+		if (q->tail == p) {
+			q->tail = prev;
+		}
+	}
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+/** !BCMDBUS specific function */
+static void
+_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	int prec;
+	void *pkt = NULL, *head = NULL, *tail = NULL;
+	struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+	wlfc_mac_descriptor_t* entry;
+
+	dhd_os_sdlock_txq(dhd);
+	for (prec = 0; prec < txq->num_prec; prec++) {
+		while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+#ifdef DHDTCPACK_SUPPRESS
+			if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) {
+				DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+					__FUNCTION__, __LINE__));
+				dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF);
+			}
+#endif /* DHDTCPACK_SUPPRESS */
+			if (!head) {
+				head = pkt;
+			}
+			if (tail) {
+				PKTSETLINK(tail, pkt);
+			}
+			tail = pkt;
+		}
+	}
+	dhd_os_sdunlock_txq(dhd);
+
+
+	while ((pkt = head)) {
+		head = PKTLINK(pkt);
+		PKTSETLINK(pkt, NULL);
+		entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode) &&
+			!_dhd_wlfc_hanger_remove_reference(h, pkt)) {
+			DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n",
+				__FUNCTION__, pkt));
+		}
+		if (entry->transit_count)
+			entry->transit_count--;
+		if (entry->suppr_transit_count) {
+			entry->suppr_transit_count--;
+			if (entry->suppressed &&
+				(!entry->onbus_pkts_count) &&
+				(!entry->suppr_transit_count))
+				entry->suppressed = FALSE;
+		}
+		_dhd_wlfc_return_implied_credit(wlfc, pkt);
+		wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--;
+		wlfc->stats.pktout++;
+		wlfc->stats.cleanup_txq_cnt++;
+		dhd_txcomplete(dhd, pkt, FALSE);
+		PKTFREE(wlfc->osh, pkt, TRUE);
+	}
+} /* _dhd_wlfc_cleanup_txq */
+#endif /* !BCMDBUS */
+
+/** called during eg detach */
+void
+_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	int i;
+	int total_entries;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+
+	wlfc->stats.cleanup_txq_cnt = 0;
+	wlfc->stats.cleanup_psq_cnt = 0;
+	wlfc->stats.cleanup_fw_cnt = 0;
+
+	/*
+	*  flush sequence should be txq -> psq -> hanger/afq, hanger has to be last one
+	*/
+#ifndef BCMDBUS
+	/* flush bus->txq */
+	_dhd_wlfc_cleanup_txq(dhd, fn, arg);
+#endif /* !BCMDBUS */
+
+	/* flush psq, search all entries, include nodes as well as interfaces */
+	total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+	table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+	for (i = 0; i < total_entries; i++) {
+		if (table[i].occupied) {
+			/* release packets held in PSQ (both delayed and suppressed) */
+			if (table[i].psq.len) {
+				WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n",
+					__FUNCTION__, i, table[i].psq.len));
+				_dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE,
+					fn, arg, Q_TYPE_PSQ);
+			}
+
+			/* free packets held in AFQ */
+			if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.len)) {
+				_dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE,
+					fn, arg, Q_TYPE_AFQ);
+			}
+
+			if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) {
+				table[i].occupied = 0;
+				if (table[i].transit_count || table[i].suppr_transit_count) {
+					DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n",
+						__FUNCTION__, i,
+						table[i].transit_count,
+						table[i].suppr_transit_count));
+				}
+			}
+		}
+	}
+
+	/*
+		. flush remained pkt in hanger queue, not in bus->txq nor psq.
+		. the remained pkt was successfully downloaded to dongle already.
+		. hanger slot state cannot be set to free until receive txstatus update.
+	*/
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		for (i = 0; i < h->max_items; i++) {
+			if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+				(h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+				if (fn == NULL || (*fn)(h->items[i].pkt, arg)) {
+					h->items[i].state = WLFC_HANGER_ITEM_STATE_FLUSHED;
+				}
+			}
+		}
+	}
+
+	return;
+} /* _dhd_wlfc_cleanup */
+
+/** Called after eg the dongle signalled a new remote MAC that it connected with to the DHD */
+static int
+_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	uint8 action, uint8 ifid, uint8 iftype, uint8* ea,
+	f_processpkt_t fn, void *arg)
+{
+	int rc = BCME_OK;
+
+
+	if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) {
+		entry->occupied = 1;
+		entry->state = WLFC_STATE_OPEN;
+		entry->requested_credit = 0;
+		entry->interface_id = ifid;
+		entry->iftype = iftype;
+		entry->ac_bitmap = 0xff; /* update this when handling APSD */
+
+		/* for an interface entry we may not care about the MAC address */
+		if (ea != NULL)
+			memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+
+		if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+			entry->suppressed = FALSE;
+			entry->transit_count = 0;
+			entry->suppr_transit_count = 0;
+			entry->onbus_pkts_count = 0;
+		}
+
+		if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+			dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+
+			pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+			_dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid);
+
+			if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+				pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN);
+			}
+
+			if (entry->next == NULL) {
+				/* not linked to anywhere, add to tail */
+				if (ctx->active_entry_head) {
+					entry->prev = ctx->active_entry_head->prev;
+					ctx->active_entry_head->prev->next = entry;
+					ctx->active_entry_head->prev = entry;
+					entry->next = ctx->active_entry_head;
+				} else {
+					ASSERT(ctx->active_entry_count == 0);
+					entry->prev = entry->next = entry;
+					ctx->active_entry_head = entry;
+				}
+				ctx->active_entry_count++;
+			} else {
+				DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__,
+					(int)(entry - &ctx->destination_entries.nodes[0])));
+			}
+		}
+	} else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+		/* When the entry is deleted, the packets that are queued in the entry must be
+		   cleanup. The cleanup action should be before the occupied is set as 0.
+		*/
+		_dhd_wlfc_cleanup(ctx->dhdp, fn, arg);
+		_dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid);
+
+		entry->occupied = 0;
+		entry->state = WLFC_STATE_CLOSE;
+		memset(&entry->ea[0], 0, ETHER_ADDR_LEN);
+
+		if (entry->next) {
+			/* not floating, remove from Q */
+			if (ctx->active_entry_count <= 1) {
+				/* last item */
+				ctx->active_entry_head = NULL;
+				ctx->active_entry_count = 0;
+			} else {
+				entry->prev->next = entry->next;
+				entry->next->prev = entry->prev;
+				if (entry == ctx->active_entry_head) {
+					ctx->active_entry_head = entry->next;
+				}
+				ctx->active_entry_count--;
+			}
+			entry->next = entry->prev = NULL;
+		} else {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		}
+	}
+	return rc;
+} /* _dhd_wlfc_mac_entry_update */
+
+
+#ifdef LIMIT_BORROW
+
+/** LIMIT_BORROW specific function */
+static int
+_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac,
+	bool bBorrowAll)
+{
+	int lender_ac, borrow_limit = 0;
+	int rc = -1;
+
+	if (ctx == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return -1;
+	}
+
+	/* Borrow from lowest priority available AC (including BC/MC credits) */
+	for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) {
+		if (!bBorrowAll) {
+			borrow_limit = ctx->Init_FIFO_credit[lender_ac]/WLFC_BORROW_LIMIT_RATIO;
+		} else {
+			borrow_limit = 0;
+		}
+
+		if (ctx->FIFO_credit[lender_ac] > borrow_limit) {
+			ctx->credits_borrowed[borrower_ac][lender_ac]++;
+			ctx->FIFO_credit[lender_ac]--;
+			rc = lender_ac;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+/** LIMIT_BORROW specific function */
+static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac)
+{
+	if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) ||
+		(borrower_ac < 0) || (borrower_ac > AC_COUNT)) {
+		DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n",
+			__FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac));
+
+		return BCME_BADARG;
+	}
+
+	ctx->credits_borrowed[borrower_ac][lender_ac]--;
+	ctx->FIFO_credit[lender_ac]++;
+
+	return BCME_OK;
+}
+
+#endif /* LIMIT_BORROW */
+
+/**
+ * Called on an interface event (WLC_E_IF) indicated by firmware.
+ *     @param action : eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD
+ */
+static int
+_dhd_wlfc_interface_entry_update(void* state,
+	uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	wlfc_mac_descriptor_t* entry;
+
+	if (ifid >= WLFC_MAX_IFNUM)
+		return BCME_BADARG;
+
+	entry = &ctx->destination_entries.interfaces[ifid];
+
+	return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea,
+		_dhd_wlfc_ifpkt_fn, &ifid);
+}
+
+/**
+ * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast
+ * specific)
+ */
+static int
+_dhd_wlfc_BCMCCredit_support_update(void* state)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+	ctx->bcmc_credit_supported = TRUE;
+	return BCME_OK;
+}
+
+/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */
+static int
+_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	int i;
+
+	for (i = 0; i <= 4; i++) {
+		if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) {
+			DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n",
+				__FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i]));
+		}
+	}
+
+	/* update the AC FIFO credit map */
+	ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]);
+	ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]);
+	ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]);
+	ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]);
+	ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]);
+
+	ctx->Init_FIFO_credit[0] = credits[0];
+	ctx->Init_FIFO_credit[1] = credits[1];
+	ctx->Init_FIFO_credit[2] = credits[2];
+	ctx->Init_FIFO_credit[3] = credits[3];
+	ctx->Init_FIFO_credit[4] = credits[4];
+
+	/* credit for ATIM FIFO is not used yet. */
+	ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0;
+
+	return BCME_OK;
+}
+
+/**
+ * Called during committing of a transmit packet from the OS DHD layer to the next layer towards
+ * the dongle (eg the DBUS layer). All transmit packets flow via this function to the next layer.
+ *
+ *     @param[in/out] ctx      Driver specific flow control administration
+ *     @param[in] ac           Access Category (QoS) of called supplied packet
+ *     @param[in] commit_info  Contains eg the packet to send
+ *     @param[in] fcommit      Function pointer to transmit function of next software layer
+ *     @param[in] commit_ctx   Opaque context used when calling next layer
+ */
+static int
+_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
+    dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
+{
+	uint32 hslot;
+	int	rc;
+	dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+
+	/*
+		if ac_fifo_credit_spent = 0
+
+		This packet will not count against the FIFO credit.
+		To ensure the txstatus corresponding to this packet
+		does not provide an implied credit (default behavior)
+		mark the packet accordingly.
+
+		if ac_fifo_credit_spent = 1
+
+		This is a normal packet and it counts against the FIFO
+		credit count.
+	*/
+	DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent);
+	rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, &commit_info->p,
+	     commit_info->needs_hdr, &hslot);
+
+	if (rc == BCME_OK) {
+		rc = fcommit(commit_ctx, commit_info->p);
+		if (rc == BCME_OK) {
+			uint8 gen = WL_TXSTATUS_GET_GENERATION(
+				DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p)));
+			ctx->stats.pkt2bus++;
+			if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) {
+				ctx->stats.send_pkts[ac]++;
+				WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+			}
+
+			if (gen != commit_info->mac_entry->generation) {
+				/* will be suppressed back by design */
+				if (!commit_info->mac_entry->suppressed) {
+					commit_info->mac_entry->suppressed = TRUE;
+				}
+				commit_info->mac_entry->suppr_transit_count++;
+			}
+			commit_info->mac_entry->transit_count++;
+			commit_info->mac_entry->onbus_pkts_count++;
+		} else if (commit_info->needs_hdr) {
+			if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+				void *pout = NULL;
+				/* pop hanger for delayed packet */
+				_dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(
+					DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, TRUE);
+				ASSERT(commit_info->p == pout);
+			}
+		}
+	} else {
+		ctx->stats.generic_error++;
+	}
+
+	if (rc != BCME_OK) {
+		/*
+		   pretx pkt process or bus commit has failed, rollback.
+		   - remove wl-header for a delayed packet
+		   - save wl-header header for suppressed packets
+		   - reset credit check flag
+		*/
+		_dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot);
+		DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0);
+	}
+
+	return rc;
+} /* _dhd_wlfc_handle_packet_commit */
+
+/** Returns remote MAC descriptor for caller supplied MAC address */
+static uint8
+_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8 *ea)
+{
+	wlfc_mac_descriptor_t* table =
+		((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
+	uint8 table_index;
+
+	if (ea != NULL) {
+		for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) {
+			if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) &&
+				table[table_index].occupied)
+				return table_index;
+		}
+	}
+	return WLFC_MAC_DESC_ID_INVALID;
+}
+
+/**
+ * Called when the host receives a WLFC_CTL_TYPE_TXSTATUS event from the dongle, indicating the
+ * status of a frame that the dongle attempted to transmit over the wireless medium.
+ */
+static int
+dhd_wlfc_suppressed_acked_update(dhd_pub_t *dhd, uint16 hslot, uint8 prec, uint8 hcnt)
+{
+	athost_wl_status_info_t* ctx;
+	wlfc_mac_descriptor_t* entry = NULL;
+	struct pktq *pq;
+	struct pktq_prec *q;
+	void *p, *b;
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd(%p)\n", __FUNCTION__, dhd));
+		return BCME_BADARG;
+	}
+	ctx = (athost_wl_status_info_t*)dhd->wlfc_state;
+	if (!ctx) {
+		DHD_ERROR(("%s: ctx(%p)\n", __FUNCTION__, ctx));
+		return BCME_ERROR;
+	}
+
+	ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1));
+
+	if (hslot < WLFC_MAC_DESC_TABLE_SIZE)
+		entry  = &ctx->destination_entries.nodes[hslot];
+	else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+		entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE];
+	else
+		entry = &ctx->destination_entries.other;
+
+	pq = &entry->psq;
+
+	ASSERT(((prec << 1) + 1) < pq->num_prec);
+
+	q = &pq->q[((prec << 1) + 1)];
+
+	b = NULL;
+	p = q->head;
+
+	while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) {
+		b = p;
+		p = PKTLINK(p);
+	}
+
+	if (p == NULL) {
+		/* none is matched */
+		if (b) {
+			DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt));
+		} else {
+			DHD_ERROR(("%s: queue is empty\n", __FUNCTION__));
+		}
+
+		return BCME_ERROR;
+	}
+
+	if (!b) {
+		/* head packet is matched */
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		/* middle packet is matched */
+		PKTSETLINK(b, PKTLINK(p));
+		if (PKTLINK(p) == NULL) {
+			q->tail = b;
+		}
+	}
+
+	q->len--;
+	pq->len--;
+	ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
+	ctx->pkt_cnt_per_ac[prec]--;
+
+	PKTSETLINK(p, NULL);
+
+	if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		_dhd_wlfc_enque_afq(ctx, p);
+	} else {
+		_dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+	}
+
+	entry->transit_count++;
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac)
+{
+	uint8 status_flag_ori, status_flag;
+	uint32 status;
+	int ret = BCME_OK;
+	int remove_from_hanger_ori, remove_from_hanger = 1;
+	void* pktbuf = NULL;
+	uint8 fifo_id = 0, gen = 0, count = 0, hcnt;
+	uint16 hslot;
+	wlfc_mac_descriptor_t* entry = NULL;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	uint16 seq = 0, seq_fromfw = 0, seq_num = 0;
+
+	memcpy(&status, pkt_info, sizeof(uint32));
+	status = ltoh32(status);
+	status_flag = WL_TXSTATUS_GET_FLAGS(status);
+	hcnt = WL_TXSTATUS_GET_FREERUNCTR(status);
+	hslot = WL_TXSTATUS_GET_HSLOT(status);
+	fifo_id = WL_TXSTATUS_GET_FIFO(status);
+	gen = WL_TXSTATUS_GET_GENERATION(status);
+
+	if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+		memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ);
+		seq = ltoh16(seq);
+		seq_fromfw = GET_WL_HAS_ASSIGNED_SEQ(seq);
+		seq_num = WL_SEQ_GET_NUM(seq);
+	}
+
+	wlfc->stats.txstatus_in += len;
+
+	if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
+		wlfc->stats.pkt_freed += len;
+	} else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) {
+		wlfc->stats.pkt_freed += len;
+	} else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+		wlfc->stats.d11_suppress += len;
+		remove_from_hanger = 0;
+	} else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+		wlfc->stats.wl_suppress += len;
+		remove_from_hanger = 0;
+	} else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+		wlfc->stats.wlc_tossed_pkts += len;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) {
+		wlfc->stats.pkt_freed += len;
+	}
+
+	if (dhd->proptxstatus_txstatus_ignore) {
+		if (!remove_from_hanger) {
+			DHD_ERROR(("suppress txstatus: %d\n", status_flag));
+		}
+		return BCME_OK;
+	}
+
+	status_flag_ori = status_flag;
+	remove_from_hanger_ori = remove_from_hanger;
+
+	while (count < len) {
+		if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) {
+			dhd_wlfc_suppressed_acked_update(dhd, hslot, fifo_id, hcnt);
+		}
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf);
+		} else {
+			status_flag = status_flag_ori;
+			remove_from_hanger = remove_from_hanger_ori;
+			ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE);
+			if (!pktbuf) {
+				_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+					WLFC_HANGER_PKT_STATE_TXSTATUS, -1);
+				goto cont;
+			} else {
+				wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+				if (h->items[hslot].state == WLFC_HANGER_ITEM_STATE_FLUSHED) {
+					status_flag = WLFC_CTL_PKTFLAG_DISCARD;
+					remove_from_hanger = 1;
+				}
+			}
+		}
+
+		if ((ret != BCME_OK) || !pktbuf) {
+			goto cont;
+		}
+
+		bcm_pkt_validate_chk(pktbuf);
+
+		/* set fifo_id to correct value because not all FW does that */
+		fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
+		entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+
+		if (!remove_from_hanger) {
+			/* this packet was suppressed */
+			if (!entry->suppressed || (entry->generation != gen)) {
+				if (!entry->suppressed) {
+					entry->suppr_transit_count = entry->transit_count;
+					if (p_mac) {
+						*p_mac = entry;
+					}
+				} else {
+					DHD_ERROR(("gen(%d), entry->generation(%d)\n",
+						gen, entry->generation));
+				}
+				entry->suppressed = TRUE;
+
+			}
+			entry->generation = gen;
+		}
+
+#ifdef PROP_TXSTATUS_DEBUG
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode))
+		{
+			uint32 new_t = OSL_SYSUPTIME();
+			uint32 old_t;
+			uint32 delta;
+			old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time;
+
+
+			wlfc->stats.latency_sample_count++;
+			if (new_t > old_t)
+				delta = new_t - old_t;
+			else
+				delta = 0xffffffff + new_t - old_t;
+			wlfc->stats.total_status_latency += delta;
+			wlfc->stats.latency_most_recent = delta;
+
+			wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
+			if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
+				wlfc->stats.idx_delta = 0;
+		}
+#endif /* PROP_TXSTATUS_DEBUG */
+
+		/* pick up the implicit credit from this packet */
+		if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
+			_dhd_wlfc_return_implied_credit(wlfc, pktbuf);
+		} else {
+			/*
+			if this packet did not count against FIFO credit, it must have
+			taken a requested_credit from the destination entry (for pspoll etc.)
+			*/
+			if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) {
+				entry->requested_credit++;
+#if defined(DHD_WLFC_THREAD)
+				_dhd_wlfc_thread_wakeup(dhd);
+#endif /* DHD_WLFC_THREAD */
+			}
+#ifdef PROP_TXSTATUS_DEBUG
+			entry->dstncredit_acks++;
+#endif
+		}
+
+		if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
+			(status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+			/* save generation bit inside packet */
+			WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen);
+
+			if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+				WL_SEQ_SET_REUSE(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw);
+				WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num);
+			}
+
+			ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
+			if (ret != BCME_OK) {
+				/* delay q is full, drop this packet */
+				DHD_WLFC_QMON_COMPLETE(entry);
+				_dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE);
+			} else {
+				if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+					/* Mark suppressed to avoid a double free
+					during wlfc cleanup
+					*/
+					_dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen);
+				}
+			}
+		} else {
+
+			DHD_WLFC_QMON_COMPLETE(entry);
+
+			if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+				_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+					WLFC_HANGER_PKT_STATE_TXSTATUS, TRUE);
+			} else {
+				dhd_txcomplete(dhd, pktbuf, TRUE);
+				wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))]
+					[DHD_PKTTAG_FIFO(PKTTAG(pktbuf))]--;
+				wlfc->stats.pktout++;
+				/* free the packet */
+				PKTFREE(wlfc->osh, pktbuf, TRUE);
+			}
+		}
+		/* pkt back from firmware side */
+		if (entry->transit_count)
+			entry->transit_count--;
+		if (entry->suppr_transit_count) {
+			entry->suppr_transit_count--;
+			if (entry->suppressed &&
+				(!entry->onbus_pkts_count) &&
+				(!entry->suppr_transit_count))
+				entry->suppressed = FALSE;
+		}
+
+cont:
+		hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK;
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK;
+		}
+
+		if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) {
+			seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK;
+		}
+
+		count++;
+	}
+
+	return BCME_OK;
+} /* _dhd_wlfc_compressed_txstatus_update */
+
+/**
+ * Called when eg host receives a 'WLFC_CTL_TYPE_FIFO_CREDITBACK' event from the dongle.
+ *    @param[in] credits caller supplied credit that will be added to the host credit.
+ */
+static int
+_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+	int i;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
+
+		/* update FIFO credits */
+		if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+		{
+			int lender; /* Note that borrower is i */
+
+			/* Return credits to highest priority lender first */
+			for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) {
+				if (wlfc->credits_borrowed[i][lender] > 0) {
+					if (credits[i] >= wlfc->credits_borrowed[i][lender]) {
+						credits[i] -=
+							(uint8)wlfc->credits_borrowed[i][lender];
+						wlfc->FIFO_credit[lender] +=
+						    wlfc->credits_borrowed[i][lender];
+						wlfc->credits_borrowed[i][lender] = 0;
+					} else {
+						wlfc->credits_borrowed[i][lender] -= credits[i];
+						wlfc->FIFO_credit[lender] += credits[i];
+						credits[i] = 0;
+					}
+				}
+			}
+
+			/* If we have more credits left over, these must belong to the AC */
+			if (credits[i] > 0) {
+				wlfc->FIFO_credit[i] += credits[i];
+			}
+
+			if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) {
+				wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i];
+			}
+		}
+	}
+
+#if defined(DHD_WLFC_THREAD)
+	_dhd_wlfc_thread_wakeup(dhd);
+#endif /* defined(DHD_WLFC_THREAD) */
+
+	return BCME_OK;
+} /* _dhd_wlfc_fifocreditback_indicate */
+
+#ifndef BCMDBUS
+/** !BCMDBUS specific function */
+static void
+_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* entry;
+	int prec;
+	void *pkt = NULL, *head = NULL, *tail = NULL;
+	struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+	uint8	results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ];
+	uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0};
+	uint32 htod = 0;
+	uint16 htodseq = 0;
+	bool bCreditUpdate = FALSE;
+
+	dhd_os_sdlock_txq(dhd);
+	for (prec = 0; prec < txq->num_prec; prec++) {
+		while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+			if (!head) {
+				head = pkt;
+			}
+			if (tail) {
+				PKTSETLINK(tail, pkt);
+			}
+			tail = pkt;
+		}
+	}
+	dhd_os_sdunlock_txq(dhd);
+
+	while ((pkt = head)) {
+		head = PKTLINK(pkt);
+		PKTSETLINK(pkt, NULL);
+
+		entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+		if (!entry) {
+			PKTFREE(dhd->osh, pkt, TRUE);
+			continue;
+		}
+		if (entry->onbus_pkts_count > 0) {
+			entry->onbus_pkts_count--;
+		}
+		if (entry->suppressed &&
+				(!entry->onbus_pkts_count) &&
+				(!entry->suppr_transit_count)) {
+			entry->suppressed = FALSE;
+		}
+		/* fake a suppression txstatus */
+		htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt));
+		WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS);
+		WL_TXSTATUS_SET_GENERATION(htod, entry->generation);
+		htod = htol32(htod);
+		memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS);
+		if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+			htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt));
+			if (IS_WL_TO_REUSE_SEQ(htodseq)) {
+				SET_WL_HAS_ASSIGNED_SEQ(htodseq);
+				RESET_WL_TO_REUSE_SEQ(htodseq);
+			}
+			htodseq = htol16(htodseq);
+			memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq,
+				WLFC_CTL_VALUE_LEN_SEQ);
+		}
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_enque_afq(wlfc, pkt);
+		}
+		_dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL);
+
+		/* fake a fifo credit back */
+		if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+			credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++;
+			bCreditUpdate = TRUE;
+		}
+	}
+
+	if (bCreditUpdate) {
+		_dhd_wlfc_fifocreditback_indicate(dhd, credits);
+	}
+} /* _dhd_wlfc_suppress_txq */
+#endif /* !BCMDBUS */
+
+static int
+_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value)
+{
+	uint32 timestamp;
+
+	(void)dhd;
+
+	bcopy(&value[2], &timestamp, sizeof(uint32));
+	timestamp = ltoh32(timestamp);
+	DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+{
+	(void)dhd;
+	(void)rssi;
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+	int i;
+
+	if (!wlfc || !entry) {
+		return;
+	}
+
+	for (i = 0; i < wlfc->requested_entry_count; i++) {
+		if (entry == wlfc->requested_entry[i]) {
+			break;
+		}
+	}
+
+	if (i == wlfc->requested_entry_count) {
+		/* no match entry found */
+		ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1));
+		wlfc->requested_entry[wlfc->requested_entry_count++] = entry;
+	}
+}
+
+/** called on eg receiving 'mac open' event from the dongle. */
+static void
+_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+	int i;
+
+	if (!wlfc || !entry) {
+		return;
+	}
+
+	for (i = 0; i < wlfc->requested_entry_count; i++) {
+		if (entry == wlfc->requested_entry[i]) {
+			break;
+		}
+	}
+
+	if (i < wlfc->requested_entry_count) {
+		/* found */
+		ASSERT(wlfc->requested_entry_count > 0);
+		wlfc->requested_entry_count--;
+		if (i != wlfc->requested_entry_count) {
+			wlfc->requested_entry[i] =
+				wlfc->requested_entry[wlfc->requested_entry_count];
+		}
+		wlfc->requested_entry[wlfc->requested_entry_count] = NULL;
+	}
+}
+
+/** called on eg receiving a WLFC_CTL_TYPE_MACDESC_ADD TLV from the dongle */
+static int
+_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	int rc;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 existing_index;
+	uint8 table_index;
+	uint8 ifid;
+	uint8* ea;
+
+	WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n",
+		__FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7],
+		((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
+		WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
+
+	table = wlfc->destination_entries.nodes;
+	table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]);
+	ifid = value[1];
+	ea = &value[2];
+
+	_dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]);
+	if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
+		existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+		if ((existing_index != WLFC_MAC_DESC_ID_INVALID) &&
+			(existing_index != table_index) && table[existing_index].occupied) {
+			/*
+			there is an existing different entry, free the old one
+			and move it to new index if necessary.
+			*/
+			rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index],
+				eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id,
+				table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn,
+				&table[existing_index]);
+		}
+
+		if (!table[table_index].occupied) {
+			/* this new MAC entry does not exist, create one */
+			table[table_index].mac_handle = value[0];
+			rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+				eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+				wlfc->destination_entries.interfaces[ifid].iftype,
+				ea, NULL, NULL);
+		} else {
+			/* the space should have been empty, but it's not */
+			wlfc->stats.mac_update_failed++;
+		}
+	}
+
+	if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
+		if (table[table_index].occupied) {
+				rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+					eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
+					wlfc->destination_entries.interfaces[ifid].iftype,
+					ea, _dhd_wlfc_entrypkt_fn, &table[table_index]);
+		} else {
+			/* the space should have been occupied, but it's not */
+			wlfc->stats.mac_update_failed++;
+		}
+	}
+	BCM_REFERENCE(rc);
+	return BCME_OK;
+} /* _dhd_wlfc_mac_table_update */
+
+/** Called on a 'mac open' or 'mac close' event indicated by the dongle */
+static int
+_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc; /* a table maps from mac handle to mac descriptor */
+	uint8 mac_handle = value[0];
+	int i;
+
+	table = wlfc->destination_entries.nodes;
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		if (type == WLFC_CTL_TYPE_MAC_OPEN) {
+			desc->state = WLFC_STATE_OPEN;
+			desc->ac_bitmap = 0xff;
+			DHD_WLFC_CTRINC_MAC_OPEN(desc);
+			desc->requested_credit = 0;
+			desc->requested_packet = 0;
+			_dhd_wlfc_remove_requested_entry(wlfc, desc);
+		} else {
+			desc->state = WLFC_STATE_CLOSE;
+			DHD_WLFC_CTRINC_MAC_CLOSE(desc);
+			/* Indicate to firmware if there is any traffic pending. */
+			for (i = 0; i < AC_COUNT; i++) {
+				_dhd_wlfc_traffic_pending_check(wlfc, desc, i);
+			}
+		}
+	} else {
+		wlfc->stats.psmode_update_failed++;
+	}
+
+	return BCME_OK;
+} /* _dhd_wlfc_psmode_update */
+
+/** called upon receiving 'interface open' or 'interface close' event from the dongle */
+static int
+_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 if_id = value[0];
+
+	if (if_id < WLFC_MAX_IFNUM) {
+		table = wlfc->destination_entries.interfaces;
+		if (table[if_id].occupied) {
+			if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
+				table[if_id].state = WLFC_STATE_OPEN;
+				/* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
+			} else {
+				table[if_id].state = WLFC_STATE_CLOSE;
+				/* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
+			}
+			return BCME_OK;
+		}
+	}
+	wlfc->stats.interface_update_failed++;
+
+	return BCME_OK;
+}
+
+/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_CREDIT TLV from the dongle */
+static int
+_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 credit;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	credit = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_credit = credit;
+
+		desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+		_dhd_wlfc_add_requested_entry(wlfc, desc);
+#if defined(DHD_WLFC_THREAD)
+		if (credit) {
+			_dhd_wlfc_thread_wakeup(dhd);
+		}
+#endif /* DHD_WLFC_THREAD */
+	} else {
+		wlfc->stats.credit_request_failed++;
+	}
+
+	return BCME_OK;
+}
+
+/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_PACKET TLV from the dongle */
+static int
+_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 packet_count;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	packet_count = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_packet = packet_count;
+
+		desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+		_dhd_wlfc_add_requested_entry(wlfc, desc);
+#if defined(DHD_WLFC_THREAD)
+		if (packet_count) {
+			_dhd_wlfc_thread_wakeup(dhd);
+		}
+#endif /* DHD_WLFC_THREAD */
+	} else {
+		wlfc->stats.packet_request_failed++;
+	}
+
+	return BCME_OK;
+}
+
+/** Called when host receives a WLFC_CTL_TYPE_HOST_REORDER_RXPKTS TLV from the dongle */
+static void
+_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
+{
+	if (info_len) {
+		/* Check copy length to avoid buffer overrun. In case of length exceeding
+		*  WLHOST_REORDERDATA_TOTLEN, return failure instead sending incomplete result
+		*  of length WLHOST_REORDERDATA_TOTLEN
+		*/
+		if ((info_buf) && (len <= WLHOST_REORDERDATA_TOTLEN)) {
+			bcopy(val, info_buf, len);
+			*info_len = len;
+		} else {
+			*info_len = 0;
+		}
+	}
+}
+
+/*
+ * public functions
+ */
+
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd)
+{
+	bool rc = TRUE;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return FALSE;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		rc =  FALSE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+
+int dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+	int i, rc = BCME_OK;
+	athost_wl_status_info_t* wlfc;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_enabled || dhd->wlfc_state) {
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	/* allocate space to track txstatus propagated from firmware */
+	dhd->wlfc_state = DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_INFO,
+		sizeof(athost_wl_status_info_t));
+	if (dhd->wlfc_state == NULL) {
+		rc = BCME_NOMEM;
+		goto exit;
+	}
+
+	/* initialize state space */
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+	/* remember osh & dhdp */
+	wlfc->osh = dhd->osh;
+	wlfc->dhdp = dhd;
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		wlfc->hanger = _dhd_wlfc_hanger_create(dhd, WLFC_HANGER_MAXITEMS);
+		if (wlfc->hanger == NULL) {
+			DHD_OS_PREFREE(dhd, dhd->wlfc_state,
+				sizeof(athost_wl_status_info_t));
+			dhd->wlfc_state = NULL;
+			rc = BCME_NOMEM;
+			goto exit;
+		}
+	}
+
+	dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+	/* default to check rx pkt */
+	dhd->wlfc_rxpkt_chk = TRUE;
+	if (dhd->op_mode & DHD_FLAG_IBSS_MODE) {
+		dhd->wlfc_rxpkt_chk = FALSE;
+	}
+
+	/* initialize all interfaces to accept traffic */
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		wlfc->hostif_flow_state[i] = OFF;
+	}
+
+	_dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other,
+		eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL);
+
+	wlfc->allow_credit_borrow = 0;
+	wlfc->single_ac = 0;
+	wlfc->single_ac_timestamp = 0;
+
+
+exit:
+	DHD_ERROR(("%s: ret=%d\n", __FUNCTION__, rc));
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+} /* dhd_wlfc_enable */
+
+#ifdef SUPPORT_P2P_GO_PS
+
+/**
+ * Called when the host platform enters a lower power mode, eg right before a system hibernate.
+ * SUPPORT_P2P_GO_PS specific function.
+ */
+int
+dhd_wlfc_suspend(dhd_pub_t *dhd)
+{
+	uint32 tlv = 0;
+
+	DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__));
+	if (!dhd->wlfc_enabled)
+		return -1;
+
+	if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0))
+		return -1;
+	if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0)
+		return 0;
+	tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+	if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0))
+		return -1;
+
+	return 0;
+}
+
+/**
+ * Called when the host platform resumes from a power management operation, eg resume after a
+ * system hibernate. SUPPORT_P2P_GO_PS specific function.
+ */
+int
+dhd_wlfc_resume(dhd_pub_t *dhd)
+{
+	uint32 tlv = 0;
+
+	DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__));
+	if (!dhd->wlfc_enabled)
+		return -1;
+
+	if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0))
+		return -1;
+	if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) ==
+		(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS))
+		return 0;
+	tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+	if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0))
+		return -1;
+
+	return 0;
+}
+
+#endif /* SUPPORT_P2P_GO_PS */
+
+/** A flow control header was received from firmware, containing one or more TLVs */
+int
+dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf,
+	uint *reorder_info_len)
+{
+	uint8 type, len;
+	uint8* value;
+	uint8* tmpbuf;
+	uint16 remainder = (uint16)tlv_hdr_len;
+	uint16 processed = 0;
+	athost_wl_status_info_t* wlfc = NULL;
+	void* entry;
+
+	if ((dhd == NULL) || (pktbuf == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) {
+		if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+			dhd_os_wlfc_unblock(dhd);
+			return WLFC_UNSUPPORTED;
+		}
+		wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	}
+
+	tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+
+	if (remainder) {
+		while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
+			type = tmpbuf[processed];
+			if (type == WLFC_CTL_TYPE_FILLER) {
+				remainder -= 1;
+				processed += 1;
+				continue;
+			}
+
+			len  = tmpbuf[processed + 1];
+			value = &tmpbuf[processed + 2];
+
+			if (remainder < (2 + len))
+				break;
+
+			remainder -= 2 + len;
+			processed += 2 + len;
+			entry = NULL;
+
+			DHD_INFO(("%s():%d type %d remainder %d processed %d\n",
+				__FUNCTION__, __LINE__, type, remainder, processed));
+
+			if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS)
+				_dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf,
+					reorder_info_len);
+
+			if (wlfc == NULL) {
+				ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER);
+
+				if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS &&
+					type != WLFC_CTL_TYPE_TRANS_ID)
+					DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!"
+					" type %d remainder %d processed %d\n",
+					__FUNCTION__, __LINE__, type, remainder, processed));
+				continue;
+			}
+
+			if (type == WLFC_CTL_TYPE_TXSTATUS) {
+				_dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry);
+			} else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) {
+				uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS;
+
+				if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+					compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ;
+				}
+				_dhd_wlfc_compressed_txstatus_update(dhd, value,
+					value[compcnt_offset], &entry);
+			} else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) {
+				_dhd_wlfc_fifocreditback_indicate(dhd, value);
+			} else if (type == WLFC_CTL_TYPE_RSSI) {
+				_dhd_wlfc_rssi_indicate(dhd, value);
+			} else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) {
+				_dhd_wlfc_credit_request(dhd, value);
+			} else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) {
+				_dhd_wlfc_packet_request(dhd, value);
+			} else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+				(type == WLFC_CTL_TYPE_MAC_CLOSE)) {
+				_dhd_wlfc_psmode_update(dhd, value, type);
+			} else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+				(type == WLFC_CTL_TYPE_MACDESC_DEL)) {
+				_dhd_wlfc_mac_table_update(dhd, value, type);
+			} else if (type == WLFC_CTL_TYPE_TRANS_ID) {
+				_dhd_wlfc_dbg_senum_check(dhd, value);
+			} else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+				(type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
+				_dhd_wlfc_interface_update(dhd, value, type);
+			}
+
+#ifndef BCMDBUS
+			if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) {
+				/* suppress all packets for this mac entry from bus->txq */
+				_dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry);
+			}
+#endif /* !BCMDBUS */
+		} /* while */
+
+		if (remainder != 0 && wlfc) {
+			/* trouble..., something is not right */
+			wlfc->stats.tlv_parse_failed++;
+		}
+	} /* if */
+
+	if (wlfc)
+		wlfc->stats.dhd_hdrpulls++;
+
+	dhd_os_wlfc_unblock(dhd);
+	return BCME_OK;
+}
+
+KERNEL_THREAD_RETURN_TYPE
+dhd_wlfc_transfer_packets(void *data)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)data;
+	int ac, single_ac = 0, rc = BCME_OK;
+	dhd_wlfc_commit_info_t  commit_info;
+	athost_wl_status_info_t* ctx;
+	int bus_retry_count = 0;
+	int pkt_send = 0;
+	int pkt_send_per_ac = 0;
+
+	uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */
+	uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */
+	uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */
+	bool no_credit = FALSE;
+
+	int lender;
+
+#if defined(DHD_WLFC_THREAD)
+	/* wait till someone wakeup me up, will change it at running time */
+	int wait_msec = msecs_to_jiffies(0xFFFFFFFF);
+#endif /* defined(DHD_WLFC_THREAD) */
+
+#if defined(DHD_WLFC_THREAD)
+	while (1) {
+		bus_retry_count = 0;
+		pkt_send = 0;
+		tx_map = 0;
+		rx_map = 0;
+		packets_map = 0;
+		wait_msec = wait_event_interruptible_timeout(dhdp->wlfc_wqhead,
+			dhdp->wlfc_thread_go, wait_msec);
+		if (kthread_should_stop()) {
+			break;
+		}
+		dhdp->wlfc_thread_go = FALSE;
+
+		dhd_os_wlfc_block(dhdp);
+#endif /* defined(DHD_WLFC_THREAD) */
+		ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+#if defined(DHD_WLFC_THREAD)
+		if (!ctx)
+			goto exit;
+#endif /* defined(DHD_WLFC_THREAD) */
+
+	memset(&commit_info, 0, sizeof(commit_info));
+
+	/*
+	Commit packets for regular AC traffic. Higher priority first.
+	First, use up FIFO credits available to each AC. Based on distribution
+	and credits left, borrow from other ACs as applicable
+
+	-NOTE:
+	If the bus between the host and firmware is overwhelmed by the
+	traffic from host, it is possible that higher priority traffic
+	starves the lower priority queue. If that occurs often, we may
+	have to employ weighted round-robin or ucode scheme to avoid
+	low priority packet starvation.
+	*/
+
+	for (ac = AC_COUNT; ac >= 0; ac--) {
+		if (dhdp->wlfc_rxpkt_chk) {
+			/* check rx packet */
+			uint32 curr_t = OSL_SYSUPTIME(), delta;
+
+			delta = curr_t - ctx->rx_timestamp[ac];
+			if (delta < WLFC_RX_DETECTION_THRESHOLD_MS) {
+				rx_map |= (1 << ac);
+			}
+		}
+
+		if (ctx->pkt_cnt_per_ac[ac] == 0) {
+			continue;
+		}
+
+		tx_map |= (1 << ac);
+		single_ac = ac + 1;
+		pkt_send_per_ac = 0;
+		while ((FALSE == dhdp->proptxstatus_txoff) &&
+				(pkt_send_per_ac < WLFC_PACKET_BOUND)) {
+			/* packets from delayQ with less priority are fresh and
+			 * they'd need header and have no MAC entry
+			 */
+			no_credit = (ctx->FIFO_credit[ac] < 1);
+			if (dhdp->proptxstatus_credit_ignore ||
+				((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) {
+				no_credit = FALSE;
+			}
+
+			lender = -1;
+#ifdef LIMIT_BORROW
+			if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map) &&
+				dhdp->wlfc_borrow_allowed) {
+				/* try borrow from lower priority */
+				lender = _dhd_wlfc_borrow_credit(ctx, ac - 1, ac, FALSE);
+				if (lender != -1) {
+					no_credit = FALSE;
+				}
+			}
+#endif
+			commit_info.needs_hdr = 1;
+			commit_info.mac_entry = NULL;
+			commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+				&(commit_info.ac_fifo_credit_spent),
+				&(commit_info.needs_hdr),
+				&(commit_info.mac_entry),
+				no_credit);
+			commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+				eWLFC_PKTTYPE_SUPPRESSED;
+
+			if (commit_info.p == NULL) {
+#ifdef LIMIT_BORROW
+				if (lender != -1 && dhdp->wlfc_borrow_allowed) {
+					_dhd_wlfc_return_credit(ctx, lender, ac);
+				}
+#endif
+				break;
+			}
+
+			if (!dhdp->proptxstatus_credit_ignore && (lender == -1)) {
+				ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent);
+			}
+			/* here we can ensure have credit or no credit needed */
+			rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+				ctx->fcommit, ctx->commit_ctx);
+
+			/* Bus commits may fail (e.g. flow control); abort after retries */
+			if (rc == BCME_OK) {
+				pkt_send++;
+				pkt_send_per_ac++;
+				if (commit_info.ac_fifo_credit_spent && (lender == -1)) {
+					ctx->FIFO_credit[ac]--;
+				}
+#ifdef LIMIT_BORROW
+				else if (!commit_info.ac_fifo_credit_spent && (lender != -1) &&
+					dhdp->wlfc_borrow_allowed) {
+					_dhd_wlfc_return_credit(ctx, lender, ac);
+				}
+#endif
+			} else {
+#ifdef LIMIT_BORROW
+				if (lender != -1 && dhdp->wlfc_borrow_allowed) {
+					_dhd_wlfc_return_credit(ctx, lender, ac);
+				}
+#endif
+				bus_retry_count++;
+				if (bus_retry_count >= BUS_RETRIES) {
+					DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+					goto exit;
+				}
+			}
+		}
+
+		if (ctx->pkt_cnt_per_ac[ac]) {
+			packets_map |= (1 << ac);
+		}
+	}
+
+	if ((tx_map == 0) || dhdp->proptxstatus_credit_ignore) {
+		/* nothing send out or remain in queue */
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	if (((tx_map & (tx_map - 1)) == 0) && (tx_map >= rx_map)) {
+		/* only one tx ac exist and no higher rx ac */
+		if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) {
+			ac = single_ac - 1;
+		} else {
+			uint32 delta;
+			uint32 curr_t = OSL_SYSUPTIME();
+
+			if (single_ac != ctx->single_ac) {
+				/* new single ac traffic (first single ac or different single ac) */
+				ctx->allow_credit_borrow = 0;
+				ctx->single_ac_timestamp = curr_t;
+				ctx->single_ac = (uint8)single_ac;
+				rc = BCME_OK;
+				goto exit;
+			}
+			/* same ac traffic, check if it lasts enough time */
+			delta = curr_t - ctx->single_ac_timestamp;
+
+			if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
+				/* wait enough time, can borrow now */
+				ctx->allow_credit_borrow = 1;
+				ac = single_ac - 1;
+			} else {
+				rc = BCME_OK;
+				goto exit;
+			}
+		}
+	} else {
+		/* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
+		ctx->allow_credit_borrow = 0;
+		ctx->single_ac_timestamp = 0;
+		ctx->single_ac = 0;
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	if (packets_map == 0) {
+		/* nothing to send, skip borrow */
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	/* At this point, borrow all credits only for ac */
+	while (FALSE == dhdp->proptxstatus_txoff) {
+#ifdef LIMIT_BORROW
+		if (dhdp->wlfc_borrow_allowed) {
+			if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) {
+				break;
+			}
+		}
+		else
+			break;
+#endif
+		commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+			&(commit_info.ac_fifo_credit_spent),
+			&(commit_info.needs_hdr),
+			&(commit_info.mac_entry),
+			FALSE);
+		if (commit_info.p == NULL) {
+			/* before borrow only one ac exists and now this only ac is empty */
+#ifdef LIMIT_BORROW
+			_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			break;
+		}
+
+		commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+			eWLFC_PKTTYPE_SUPPRESSED;
+
+		rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+		     ctx->fcommit, ctx->commit_ctx);
+
+		/* Bus commits may fail (e.g. flow control); abort after retries */
+		if (rc == BCME_OK) {
+			pkt_send++;
+			if (commit_info.ac_fifo_credit_spent) {
+#ifndef LIMIT_BORROW
+				ctx->FIFO_credit[ac]--;
+#endif
+			} else {
+#ifdef LIMIT_BORROW
+				_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			}
+		} else {
+#ifdef LIMIT_BORROW
+			_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			bus_retry_count++;
+			if (bus_retry_count >= BUS_RETRIES) {
+				DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+				goto exit;
+			}
+		}
+	}
+
+	BCM_REFERENCE(pkt_send);
+
+exit:
+#if defined(DHD_WLFC_THREAD)
+		dhd_os_wlfc_unblock(dhdp);
+		if (ctx && ctx->pkt_cnt_in_psq && pkt_send) {
+			wait_msec = msecs_to_jiffies(WLFC_THREAD_QUICK_RETRY_WAIT_MS);
+		} else {
+			wait_msec = msecs_to_jiffies(WLFC_THREAD_RETRY_WAIT_MS);
+		}
+	}
+	return 0;
+#else
+	return rc;
+#endif /* defined(DHD_WLFC_THREAD) */
+}
+
+/**
+ * Enqueues a transmit packet in the next layer towards the dongle, eg the DBUS layer. Called by
+ * eg dhd_sendpkt().
+ *     @param[in] dhdp                  Pointer to public DHD structure
+ *     @param[in] fcommit               Pointer to transmit function of next layer
+ *     @param[in] commit_ctx            Opaque context used when calling next layer
+ *     @param[in] pktbuf                Packet to send
+ *     @param[in] need_toggle_host_if   If TRUE, resets flag ctx->toggle_host_if
+ */
+int
+dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf,
+	bool need_toggle_host_if)
+{
+	int rc = BCME_OK;
+	athost_wl_status_info_t* ctx;
+
+#if defined(DHD_WLFC_THREAD)
+	if (!pktbuf)
+		return BCME_OK;
+#endif /* defined(DHD_WLFC_THREAD) */
+
+	if ((dhdp == NULL) || (fcommit == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		if (pktbuf) {
+			DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0);
+		}
+		rc =  WLFC_UNSUPPORTED;
+		goto exit;
+	}
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+#ifdef BCMDBUS
+	if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+		if (pktbuf) {
+			PKTFREE(ctx->osh, pktbuf, TRUE);
+			rc = BCME_OK;
+		}
+		goto exit;
+	}
+#endif /* BCMDBUS */
+
+	if (dhdp->proptxstatus_module_ignore) {
+		if (pktbuf) {
+			uint32 htod = 0;
+			WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+			_dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE);
+			if (fcommit(commit_ctx, pktbuf)) {
+				/* free it if failed, otherwise do it in tx complete cb */
+				PKTFREE(ctx->osh, pktbuf, TRUE);
+			}
+			rc = BCME_OK;
+		}
+		goto exit;
+	}
+
+	if (pktbuf) {
+		int ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+		ASSERT(ac <= AC_COUNT);
+		DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1);
+		/* en-queue the packets to respective queue. */
+		rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac);
+		if (rc) {
+			_dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE);
+		} else {
+			ctx->stats.pktin++;
+			ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++;
+		}
+	}
+
+	if (!ctx->fcommit) {
+		ctx->fcommit = fcommit;
+	} else {
+		ASSERT(ctx->fcommit == fcommit);
+	}
+	if (!ctx->commit_ctx) {
+		ctx->commit_ctx = commit_ctx;
+	} else {
+		ASSERT(ctx->commit_ctx == commit_ctx);
+	}
+
+#if defined(DHD_WLFC_THREAD)
+	_dhd_wlfc_thread_wakeup(dhdp);
+#else
+	dhd_wlfc_transfer_packets(dhdp);
+#endif /* defined(DHD_WLFC_THREAD) */
+
+exit:
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+} /* dhd_wlfc_commit_packets */
+
+/**
+ * Called when the (lower) DBUS layer indicates completion (succesfull or not) of a transmit packet
+ */
+int
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+	athost_wl_status_info_t* wlfc;
+	wlfc_mac_descriptor_t *entry;
+	void* pout = NULL;
+	int rtn = BCME_OK;
+	if ((dhd == NULL) || (txp == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	bcm_pkt_validate_chk(txp);
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		rtn = WLFC_UNSUPPORTED;
+		goto EXIT;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.signal_only_pkts_freed++;
+#endif
+		/* is this a signal-only packet? */
+		_dhd_wlfc_pullheader(wlfc, txp);
+		PKTFREE(wlfc->osh, txp, TRUE);
+		goto EXIT;
+	}
+
+	entry = _dhd_wlfc_find_table_entry(wlfc, txp);
+	ASSERT(entry);
+
+	if (!success || dhd->proptxstatus_txstatus_ignore) {
+		WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+			__FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT(
+				DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, TRUE);
+			ASSERT(txp == pout);
+		}
+
+		/* indicate failure and free the packet */
+		dhd_txcomplete(dhd, txp, success);
+
+		/* return the credit, if necessary */
+		_dhd_wlfc_return_implied_credit(wlfc, txp);
+
+		if (entry->transit_count)
+			entry->transit_count--;
+		if (entry->suppr_transit_count)
+			entry->suppr_transit_count--;
+		wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--;
+		wlfc->stats.pktout++;
+		PKTFREE(wlfc->osh, txp, TRUE);
+	} else {
+		/* bus confirmed pkt went to firmware side */
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_enque_afq(wlfc, txp);
+		} else {
+			int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp)));
+			_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+				WLFC_HANGER_PKT_STATE_BUSRETURNED, -1);
+		}
+	}
+
+	ASSERT(entry->onbus_pkts_count > 0);
+	if (entry->onbus_pkts_count > 0)
+		entry->onbus_pkts_count--;
+	if (entry->suppressed &&
+		(!entry->onbus_pkts_count) &&
+		(!entry->suppr_transit_count))
+		entry->suppressed = FALSE;
+EXIT:
+	dhd_os_wlfc_unblock(dhd);
+	return rtn;
+} /* dhd_wlfc_txcomplete */
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+	/* enable all signals & indicate host proptxstatus logic is active */
+	uint32 tlv, mode, fw_caps;
+	int ret = 0;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+	if (dhd->wlfc_enabled) {
+		DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+	dhd->wlfc_enabled = TRUE;
+	dhd_os_wlfc_unblock(dhd);
+
+	tlv = WLFC_FLAGS_RSSI_SIGNALS |
+		WLFC_FLAGS_XONXOFF_SIGNALS |
+		WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+		WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
+		WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+
+	/*
+	try to enable/disable signaling by sending "tlv" iovar. if that fails,
+	fallback to no flow control? Print a message for now.
+	*/
+
+	/* enable proptxtstatus signaling by default */
+	if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+			dhd->wlfc_enabled?"enabled":"disabled", tlv));
+	}
+
+	mode = 0;
+
+	/* query caps */
+	ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0);
+
+	if (!ret) {
+		DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
+
+		if (WLFC_IS_OLD_DEF(fw_caps)) {
+#ifdef BCMDBUS
+			mode = WLFC_MODE_HANGER;
+#else
+			/* enable proptxtstatus v2 by default */
+			mode = WLFC_MODE_AFQ;
+#endif /* BCMDBUS */
+		} else {
+			WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps));
+#ifdef BCMDBUS
+			WLFC_SET_AFQ(mode, 0);
+#endif /* BCMDBUS */
+			WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps));
+			WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps));
+		}
+		ret = dhd_wl_ioctl_set_intiovar(dhd, "wlfc_mode", mode, WLC_SET_VAR, TRUE, 0);
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->wlfc_mode = 0;
+	if (ret >= 0) {
+		if (WLFC_IS_OLD_DEF(mode)) {
+			WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ));
+		} else {
+			dhd->wlfc_mode = mode;
+		}
+	}
+
+	DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
+#ifdef LIMIT_BORROW
+	dhd->wlfc_borrow_allowed = TRUE;
+#endif
+	dhd_os_wlfc_unblock(dhd);
+
+	if (dhd->plat_init)
+		dhd->plat_init((void *)dhd);
+
+	return BCME_OK;
+} /* dhd_wlfc_init */
+
+/** AMPDU host reorder specific function */
+int
+dhd_wlfc_hostreorder_init(dhd_pub_t *dhd)
+{
+	/* enable only ampdu hostreorder here */
+	uint32 tlv;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__));
+
+	tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+	/* enable proptxtstatus signaling by default */
+	if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) {
+		DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n",
+			__FUNCTION__));
+	} else {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n",
+			__FUNCTION__, tlv));
+	}
+
+	dhd_os_wlfc_block(dhd);
+	dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER;
+	dhd_os_wlfc_unblock(dhd);
+	/* terence 20161229: enable ampdu_hostreorder if tlv enable hostreorder */
+	dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+#ifndef BCMDBUS
+	_dhd_wlfc_cleanup_txq(dhd, fn, arg);
+#endif /* !BCMDBUS */
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** release all packet resources */
+int
+dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	_dhd_wlfc_cleanup(dhd, fn, arg);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+	/* cleanup all psq related resources */
+	athost_wl_status_info_t* wlfc;
+	uint32 tlv = 0;
+	uint32 hostreorder = 0;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+	if (!dhd->wlfc_enabled) {
+		DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__));
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+
+	dhd->wlfc_enabled = FALSE;
+	dhd_os_wlfc_unblock(dhd);
+
+	/* query ampdu hostreorder */
+	(void) dhd_wl_ioctl_get_intiovar(dhd, "ampdu_hostreorder",
+		&hostreorder, WLC_GET_VAR, FALSE, 0);
+
+	if (hostreorder) {
+		tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+		DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n",
+			__FUNCTION__, __LINE__));
+	}
+
+	/* Disable proptxtstatus signaling for deinit */
+	(void) dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0);
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	_dhd_wlfc_cleanup(dhd, NULL, NULL);
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		int i;
+		wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+		for (i = 0; i < h->max_items; i++) {
+			if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) {
+				_dhd_wlfc_hanger_free_pkt(wlfc, i,
+					WLFC_HANGER_PKT_STATE_COMPLETE, TRUE);
+			}
+		}
+
+		/* delete hanger */
+		_dhd_wlfc_hanger_delete(dhd, h);
+	}
+
+
+	/* free top structure */
+	DHD_OS_PREFREE(dhd, dhd->wlfc_state,
+		sizeof(athost_wl_status_info_t));
+	dhd->wlfc_state = NULL;
+	dhd->proptxstatus_mode = hostreorder ?
+		WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE;
+
+	DHD_ERROR(("%s: wlfc_mode=0x%x, tlv=%d\n", __FUNCTION__, dhd->wlfc_mode, tlv));
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (dhd->plat_deinit)
+		dhd->plat_deinit((void *)dhd);
+	return BCME_OK;
+} /* dhd_wlfc_init */
+
+/**
+ * Called on an interface event (WLC_E_IF) indicated by firmware
+ *     @param[in] dhdp   Pointer to public DHD structure
+ *     @param[in] action eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD
+ */
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea);
+
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data);
+
+	dhd_os_wlfc_unblock(dhdp);
+
+	return rc;
+}
+#ifdef LIMIT_BORROW
+int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data)
+{
+	if (dhdp == NULL || event_data == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+	dhd_os_wlfc_block(dhdp);
+	dhdp->wlfc_borrow_allowed = (bool)(*(uint32 *)event_data);
+	dhd_os_wlfc_unblock(dhdp);
+
+	return BCME_OK;
+}
+#endif /* LIMIT_BORROW */
+
+/**
+ * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast
+ * specific)
+ */
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state);
+
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+/** debug specific function */
+int
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	int i;
+	uint8* ea;
+	athost_wl_status_info_t* wlfc;
+	wlfc_hanger_t* h;
+	wlfc_mac_descriptor_t* mac_table;
+	wlfc_mac_descriptor_t* interfaces;
+	char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+	if (!dhdp || !strbuf) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	h = (wlfc_hanger_t*)wlfc->hanger;
+	if (h == NULL) {
+		bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+	}
+
+	mac_table = wlfc->destination_entries.nodes;
+	interfaces = wlfc->destination_entries.interfaces;
+	bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+
+	if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+		h = (wlfc_hanger_t*)wlfc->hanger;
+		if (h == NULL) {
+			bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+		} else {
+			bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+				"f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+				h->pushed,
+				h->popped,
+				h->failed_to_push,
+				h->failed_to_pop,
+				h->failed_slotfind,
+				(h->pushed - h->popped));
+		}
+	}
+
+	bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+		"(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n",
+		wlfc->stats.tlv_parse_failed,
+		wlfc->stats.credit_request_failed,
+		wlfc->stats.mac_update_failed,
+		wlfc->stats.psmode_update_failed,
+		wlfc->stats.delayq_full_error,
+		wlfc->stats.rollback_failed);
+
+	bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) "
+		"(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d],"
+		"AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n",
+		wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0],
+		wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0],
+		wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1],
+		wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1],
+		wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2],
+		wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2],
+		wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3],
+		wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3],
+		wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4],
+		wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]);
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		if (interfaces[i].occupied) {
+			char* iftype_desc;
+
+			if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+				iftype_desc = "<Unknown";
+			else
+				iftype_desc = iftypes[interfaces[i].iftype];
+
+			ea = interfaces[i].ea;
+			bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s "
+				"netif_flow_control:%s\n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				interfaces[i].interface_id,
+				iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
+				? " OFF":" ON"));
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit),"
+				"(trans,supp_trans,onbus)"
+				"= (%d,%s,%d),(%d,%d,%d)\n",
+				i,
+				interfaces[i].psq.len,
+				((interfaces[i].state ==
+				WLFC_STATE_OPEN) ? "OPEN":"CLOSE"),
+				interfaces[i].requested_credit,
+				interfaces[i].transit_count,
+				interfaces[i].suppr_transit_count,
+				interfaces[i].onbus_pkts_count);
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].PSQ"
+				"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+				"(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d),"
+				"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+				i,
+				interfaces[i].psq.q[0].len,
+				interfaces[i].psq.q[1].len,
+				interfaces[i].afq.q[0].len,
+				interfaces[i].psq.q[2].len,
+				interfaces[i].psq.q[3].len,
+				interfaces[i].afq.q[1].len,
+				interfaces[i].psq.q[4].len,
+				interfaces[i].psq.q[5].len,
+				interfaces[i].afq.q[2].len,
+				interfaces[i].psq.q[6].len,
+				interfaces[i].psq.q[7].len,
+				interfaces[i].afq.q[3].len,
+				interfaces[i].psq.q[8].len,
+				interfaces[i].psq.q[9].len,
+				interfaces[i].afq.q[4].len);
+		}
+	}
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (mac_table[i].occupied) {
+			ea = mac_table[i].ea;
+			bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				mac_table[i].interface_id);
+
+			bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),"
+				"(trans,supp_trans,onbus)"
+				"= (%d,%s,%d),(%d,%d,%d)\n",
+				i,
+				mac_table[i].psq.len,
+				((mac_table[i].state ==
+				WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+				mac_table[i].requested_credit,
+				mac_table[i].transit_count,
+				mac_table[i].suppr_transit_count,
+				mac_table[i].onbus_pkts_count);
+#ifdef PROP_TXSTATUS_DEBUG
+			bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+				i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+			bcm_bprintf(strbuf, "MAC_table[%d].PSQ"
+				"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+				"(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d),"
+				"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+				i,
+				mac_table[i].psq.q[0].len,
+				mac_table[i].psq.q[1].len,
+				mac_table[i].afq.q[0].len,
+				mac_table[i].psq.q[2].len,
+				mac_table[i].psq.q[3].len,
+				mac_table[i].afq.q[1].len,
+				mac_table[i].psq.q[4].len,
+				mac_table[i].psq.q[5].len,
+				mac_table[i].afq.q[2].len,
+				mac_table[i].psq.q[6].len,
+				mac_table[i].psq.q[7].len,
+				mac_table[i].afq.q[3].len,
+				mac_table[i].psq.q[8].len,
+				mac_table[i].psq.q[9].len,
+				mac_table[i].afq.q[4].len);
+
+		}
+	}
+
+#ifdef PROP_TXSTATUS_DEBUG
+	{
+		int avg;
+		int moving_avg = 0;
+		int moving_samples;
+
+		if (wlfc->stats.latency_sample_count) {
+			moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+			for (i = 0; i < moving_samples; i++)
+				moving_avg += wlfc->stats.deltas[i];
+			moving_avg /= moving_samples;
+
+			avg = (100 * wlfc->stats.total_status_latency) /
+				wlfc->stats.latency_sample_count;
+			bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+				"(%d.%d, %03d, %03d)\n",
+				moving_samples, avg/100, (avg - (avg/100)*100),
+				wlfc->stats.latency_most_recent,
+				moving_avg);
+		}
+	}
+
+	bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+		"back = (%d,%d,%d,%d,%d,%d)\n",
+		wlfc->stats.fifo_credits_sent[0],
+		wlfc->stats.fifo_credits_sent[1],
+		wlfc->stats.fifo_credits_sent[2],
+		wlfc->stats.fifo_credits_sent[3],
+		wlfc->stats.fifo_credits_sent[4],
+		wlfc->stats.fifo_credits_sent[5],
+
+		wlfc->stats.fifo_credits_back[0],
+		wlfc->stats.fifo_credits_back[1],
+		wlfc->stats.fifo_credits_back[2],
+		wlfc->stats.fifo_credits_back[3],
+		wlfc->stats.fifo_credits_back[4],
+		wlfc->stats.fifo_credits_back[5]);
+	{
+		uint32 fifo_cr_sent = 0;
+		uint32 fifo_cr_acked = 0;
+		uint32 request_cr_sent = 0;
+		uint32 request_cr_ack = 0;
+		uint32 bc_mc_cr_ack = 0;
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+			fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+		}
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+			fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+		}
+
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_sent +=
+					wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_sent +=
+				wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.nodes[i].dstncredit_acks;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.interfaces[i].dstncredit_acks;
+			}
+		}
+		bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+			"other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+			fifo_cr_sent, fifo_cr_acked,
+			request_cr_sent, request_cr_ack,
+			wlfc->destination_entries.other.dstncredit_acks,
+			bc_mc_cr_ack,
+			wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+	}
+#endif /* PROP_TXSTATUS_DEBUG */
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),(dropped,hdr_only,wlc_tossed)"
+		"(freed,free_err,rollback)) = "
+		"((%d,%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
+		wlfc->stats.pktin,
+		wlfc->stats.pkt2bus,
+		wlfc->stats.txstatus_in,
+		wlfc->stats.dhd_hdrpulls,
+		wlfc->stats.pktout,
+
+		wlfc->stats.pktdropped,
+		wlfc->stats.wlfc_header_only_pkt,
+		wlfc->stats.wlc_tossed_pkts,
+
+		wlfc->stats.pkt_freed,
+		wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+	bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+		"((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+		wlfc->stats.d11_suppress,
+		wlfc->stats.wl_suppress,
+		wlfc->stats.bad_suppress,
+
+		wlfc->stats.psq_d11sup_enq,
+		wlfc->stats.psq_wlsup_enq,
+		wlfc->stats.psq_hostq_enq,
+		wlfc->stats.mac_handle_notfound,
+
+		wlfc->stats.psq_d11sup_retx,
+		wlfc->stats.psq_wlsup_retx,
+		wlfc->stats.psq_hostq_retx);
+
+	bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n",
+		wlfc->stats.cleanup_txq_cnt,
+		wlfc->stats.cleanup_psq_cnt,
+		wlfc->stats.cleanup_fw_cnt);
+
+	bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error);
+
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i,
+			wlfc->pkt_cnt_in_q[i][0],
+			wlfc->pkt_cnt_in_q[i][1],
+			wlfc->pkt_cnt_in_q[i][2],
+			wlfc->pkt_cnt_in_q[i][3],
+			wlfc->pkt_cnt_in_q[i][4]);
+	}
+	bcm_bprintf(strbuf, "\n");
+
+	dhd_os_wlfc_unblock(dhdp);
+	return BCME_OK;
+} /* dhd_wlfc_dump */
+
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd)
+{
+	athost_wl_status_info_t* wlfc;
+	wlfc_hanger_t* hanger;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		hanger = (wlfc_hanger_t*)wlfc->hanger;
+
+		hanger->pushed = 0;
+		hanger->popped = 0;
+		hanger->failed_slotfind = 0;
+		hanger->failed_to_pop = 0;
+		hanger->failed_to_push = 0;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** returns TRUE if flow control is enabled */
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_enabled;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** Called via an IOVAR */
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** Called via an IOVAR */
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (dhd->wlfc_state) {
+		dhd->proptxstatus_mode = val & 0xff;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** Called when rx frame is received from the dongle */
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf)
+{
+	athost_wl_status_info_t* wlfc;
+	bool rc = FALSE;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return FALSE;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return FALSE;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	if (PKTLEN(wlfc->osh, pktbuf) == 0) {
+		wlfc->stats.wlfc_header_only_pkt++;
+		rc = TRUE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock)
+{
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	if (bAcquireLock) {
+		dhd_os_wlfc_block(dhdp);
+	}
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) ||
+		dhdp->proptxstatus_module_ignore) {
+		if (bAcquireLock) {
+			dhd_os_wlfc_unblock(dhdp);
+		}
+		return WLFC_UNSUPPORTED;
+	}
+
+	if (state != dhdp->proptxstatus_txoff) {
+		dhdp->proptxstatus_txoff = state;
+	}
+
+	if (bAcquireLock) {
+		dhd_os_wlfc_unblock(dhdp);
+	}
+
+	return BCME_OK;
+}
+
+/** Called when eg an rx frame is received from the dongle */
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio)
+{
+	athost_wl_status_info_t* wlfc;
+	int rx_path_ac = -1;
+
+	if ((dhd == NULL) || (prio >= NUMPRIO)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_rxpkt_chk) {
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	rx_path_ac = prio2fifo[prio];
+	wlfc->rx_timestamp[rx_path_ac] = OSL_SYSUPTIME();
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_module_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val)
+{
+	uint32 tlv = 0;
+	bool bChanged = FALSE;
+
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if ((bool)val != dhd->proptxstatus_module_ignore) {
+		dhd->proptxstatus_module_ignore = (val != 0);
+		/* force txstatus_ignore sync with proptxstatus_module_ignore */
+		dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore;
+		if (FALSE == dhd->proptxstatus_module_ignore) {
+			tlv = WLFC_FLAGS_RSSI_SIGNALS |
+				WLFC_FLAGS_XONXOFF_SIGNALS |
+				WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+				WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
+		}
+		/* always enable host reorder */
+		tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+		bChanged = TRUE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (bChanged) {
+		/* select enable proptxtstatus signaling */
+		if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) {
+			DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+				__FUNCTION__, tlv));
+		} else {
+			DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n",
+				__FUNCTION__, tlv));
+		}
+	}
+
+#if defined(DHD_WLFC_THREAD)
+	_dhd_wlfc_thread_wakeup(dhd);
+#endif /* defined(DHD_WLFC_THREAD) */
+
+	return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_credit_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->proptxstatus_credit_ignore = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_txstatus_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->proptxstatus_txstatus_ignore = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_rxpkt_chk;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/** called via an IOVAR */
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->wlfc_rxpkt_chk = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+#endif /* PROP_TXSTATUS */
diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
new file mode 100644
index 0000000..54c6b3b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
@@ -0,0 +1,562 @@
+/*
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_wlfc.h 671530 2016-11-22 08:43:33Z $
+ *
+ */
+#ifndef __wlfc_host_driver_definitions_h__
+#define __wlfc_host_driver_definitions_h__
+
+
+/* #define OOO_DEBUG */
+
+#define KERNEL_THREAD_RETURN_TYPE int
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+typedef bool (*f_processpkt_t)(void* p, void* arg);
+
+#define WLFC_UNSUPPORTED -9999
+
+#define WLFC_NO_TRAFFIC	-1
+#define WLFC_MULTI_TRAFFIC 0
+
+#define BUS_RETRIES 1	/* # of retries before aborting a bus tx operation */
+
+/** 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 3072
+
+#define WLFC_HANGER_ITEM_STATE_FREE			1
+#define WLFC_HANGER_ITEM_STATE_INUSE			2
+#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED		3
+#define WLFC_HANGER_ITEM_STATE_FLUSHED			4
+
+#define WLFC_HANGER_PKT_STATE_TXSTATUS			1
+#define WLFC_HANGER_PKT_STATE_BUSRETURNED		2
+#define WLFC_HANGER_PKT_STATE_COMPLETE			\
+	(WLFC_HANGER_PKT_STATE_TXSTATUS | WLFC_HANGER_PKT_STATE_BUSRETURNED)
+
+typedef enum {
+	Q_TYPE_PSQ, /**< Power Save Queue, contains both delayed and suppressed packets */
+	Q_TYPE_AFQ  /**< At Firmware Queue */
+} q_type_t;
+
+typedef enum ewlfc_packet_state {
+	eWLFC_PKTTYPE_NEW,        /**< unused in the code (Jan 2015) */
+	eWLFC_PKTTYPE_DELAYED,    /**< packet did not enter wlfc yet */
+	eWLFC_PKTTYPE_SUPPRESSED, /**< packet entered wlfc and was suppressed by the dongle */
+	eWLFC_PKTTYPE_MAX
+} ewlfc_packet_state_t;
+
+typedef enum ewlfc_mac_entry_action {
+	eWLFC_MAC_ENTRY_ACTION_ADD,
+	eWLFC_MAC_ENTRY_ACTION_DEL,
+	eWLFC_MAC_ENTRY_ACTION_UPDATE,
+	eWLFC_MAC_ENTRY_ACTION_MAX
+} ewlfc_mac_entry_action_t;
+
+typedef struct wlfc_hanger_item {
+	uint8	state;
+	uint8   gen;
+	uint8	pkt_state;     /**< bitmask containing eg WLFC_HANGER_PKT_STATE_TXCOMPLETE */
+	uint8	pkt_txstatus;
+	uint32	identifier;
+	void*	pkt;
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32	push_time;
+#endif
+	struct wlfc_hanger_item *next;
+} wlfc_hanger_item_t;
+
+/** hanger contains packets that have been posted by the dhd to the dongle and are expected back */
+typedef struct wlfc_hanger {
+	int max_items;
+	uint32 pushed;
+	uint32 popped;
+	uint32 failed_to_push;
+	uint32 failed_to_pop;
+	uint32 failed_slotfind;
+	uint32 slot_pos;
+	wlfc_hanger_item_t items[1];
+} wlfc_hanger_t;
+
+#define WLFC_HANGER_SIZE(n)	((sizeof(wlfc_hanger_t) - \
+	sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
+
+#define WLFC_STATE_OPEN		1	/**< remote MAC is able to receive packets */
+#define WLFC_STATE_CLOSE	2	/**< remote MAC is in power save mode */
+
+#define WLFC_PSQ_PREC_COUNT		((AC_COUNT + 1) * 2) /**< 2 for each AC traffic and bc/mc */
+#define WLFC_AFQ_PREC_COUNT		(AC_COUNT + 1)
+
+#define WLFC_PSQ_LEN			(4096 * 8)
+
+#ifdef BCMDBUS
+#define WLFC_FLOWCONTROL_HIWATER	512
+#define WLFC_FLOWCONTROL_LOWATER	(WLFC_FLOWCONTROL_HIWATER / 4)
+#else
+#define WLFC_FLOWCONTROL_HIWATER	((4096 * 8) - 256)
+#define WLFC_FLOWCONTROL_LOWATER	256
+#endif
+
+#if (WLFC_FLOWCONTROL_HIWATER >= (WLFC_PSQ_LEN - 256))
+#undef WLFC_FLOWCONTROL_HIWATER
+#define WLFC_FLOWCONTROL_HIWATER	(WLFC_PSQ_LEN - 256)
+#undef WLFC_FLOWCONTROL_LOWATER
+#define WLFC_FLOWCONTROL_LOWATER	(WLFC_FLOWCONTROL_HIWATER / 4)
+#endif
+
+#define WLFC_LOG_BUF_SIZE		(1024*1024)
+
+/** Properties related to a remote MAC entity */
+typedef struct wlfc_mac_descriptor {
+	uint8 occupied;         /**< if 0, this descriptor is unused and thus can be (re)used */
+	uint8 interface_id;
+	uint8 iftype;           /**< eg WLC_E_IF_ROLE_STA */
+	uint8 state;            /**< eg WLFC_STATE_OPEN */
+	uint8 ac_bitmap;        /**< automatic power save delivery (APSD) */
+	uint8 requested_credit;
+	uint8 requested_packet; /**< unit: [number of packets] */
+	uint8 ea[ETHER_ADDR_LEN];
+
+	/** maintain (MAC,AC) based seq count for packets going to the device. As well as bc/mc. */
+	uint8 seq[AC_COUNT + 1];
+	uint8 generation;       /**< toggles between 0 and 1 */
+	struct pktq	psq;    /**< contains both 'delayed' and 'suppressed' packets */
+	/** packets at firmware queue */
+	struct pktq	afq;
+	/** The AC pending bitmap that was reported to the fw at last change */
+	uint8 traffic_lastreported_bmp;
+	/** The new AC pending bitmap */
+	uint8 traffic_pending_bmp;
+	/** 1= send on next opportunity */
+	uint8 send_tim_signal;
+	uint8 mac_handle;          /**< mac handles are assigned by the dongle */
+	/** Number of packets at dongle for this entry. */
+	int transit_count;
+	/** Number of suppression to wait before evict from delayQ */
+	int suppr_transit_count;
+	/** pkt sent to bus but no bus TX complete yet */
+	int onbus_pkts_count;
+	/** flag. TRUE when remote MAC is in suppressed state */
+	uint8 suppressed;
+
+
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32 dstncredit_sent_packets;
+	uint32 dstncredit_acks;
+	uint32 opened_ct;
+	uint32 closed_ct;
+#endif
+	struct wlfc_mac_descriptor* prev;
+	struct wlfc_mac_descriptor* next;
+} wlfc_mac_descriptor_t;
+
+/** A 'commit' is the hand over of a packet from the host OS layer to the layer below (eg DBUS) */
+typedef struct dhd_wlfc_commit_info {
+	uint8					needs_hdr;
+	uint8					ac_fifo_credit_spent;
+	ewlfc_packet_state_t	pkt_type;
+	wlfc_mac_descriptor_t*	mac_entry;
+	void*					p;
+} dhd_wlfc_commit_info_t;
+
+#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
+	entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
+
+#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++
+#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)]
+
+typedef struct athost_wl_stat_counters {
+	uint32	pktin;
+	uint32	pktout;
+	uint32	pkt2bus;
+	uint32	pktdropped;
+	uint32	tlv_parse_failed;
+	uint32	rollback;
+	uint32	rollback_failed;
+	uint32	delayq_full_error;
+	uint32	credit_request_failed;
+	uint32	packet_request_failed;
+	uint32	mac_update_failed;
+	uint32	psmode_update_failed;
+	uint32	interface_update_failed;
+	uint32	wlfc_header_only_pkt;
+	uint32	txstatus_in;
+	uint32	d11_suppress;
+	uint32	wl_suppress;
+	uint32	bad_suppress;
+	uint32	pkt_freed;
+	uint32	pkt_free_err;
+	uint32	psq_wlsup_retx;
+	uint32	psq_wlsup_enq;
+	uint32	psq_d11sup_retx;
+	uint32	psq_d11sup_enq;
+	uint32	psq_hostq_retx;
+	uint32	psq_hostq_enq;
+	uint32	mac_handle_notfound;
+	uint32	wlc_tossed_pkts;
+	uint32	dhd_hdrpulls;
+	uint32	generic_error;
+	/* an extra one for bc/mc traffic */
+	uint32	send_pkts[AC_COUNT + 1];
+	uint32	drop_pkts[WLFC_PSQ_PREC_COUNT];
+	uint32	ooo_pkts[AC_COUNT + 1];
+#ifdef PROP_TXSTATUS_DEBUG
+	/** all pkt2bus -> txstatus latency accumulated */
+	uint32	latency_sample_count;
+	uint32	total_status_latency;
+	uint32	latency_most_recent;
+	int	idx_delta;
+	uint32	deltas[10];
+	uint32	fifo_credits_sent[6];
+	uint32	fifo_credits_back[6];
+	uint32	dropped_qfull[6];
+	uint32	signal_only_pkts_sent;
+	uint32	signal_only_pkts_freed;
+#endif
+	uint32	cleanup_txq_cnt;
+	uint32	cleanup_psq_cnt;
+	uint32	cleanup_fw_cnt;
+} athost_wl_stat_counters_t;
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_sent[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_back[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \
+	(ctx)->stats.dropped_qfull[(ac)]++;} while (0)
+#else
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
+#endif
+#define WLFC_PACKET_BOUND              10
+#define WLFC_FCMODE_NONE				0
+#define WLFC_FCMODE_IMPLIED_CREDIT		1
+#define WLFC_FCMODE_EXPLICIT_CREDIT		2
+#define WLFC_ONLY_AMPDU_HOSTREORDER		3
+
+/** Reserved credits ratio when borrowed by hihger priority */
+#define WLFC_BORROW_LIMIT_RATIO		4
+
+/** How long to defer borrowing in milliseconds */
+#define WLFC_BORROW_DEFER_PERIOD_MS 100
+
+/** How long to defer flow control in milliseconds */
+#define WLFC_FC_DEFER_PERIOD_MS 200
+
+/** How long to detect occurance per AC in miliseconds */
+#define WLFC_RX_DETECTION_THRESHOLD_MS	100
+
+/** Mask to represent available ACs (note: BC/MC is ignored) */
+#define WLFC_AC_MASK 0xF
+
+/** flow control specific information, only 1 instance during driver lifetime */
+typedef struct athost_wl_status_info {
+	uint8	last_seqid_to_wlc;
+
+	/** OSL handle */
+	osl_t *osh;
+	/** dhd public struct pointer */
+	void *dhdp;
+
+	f_commitpkt_t fcommit;
+	void* commit_ctx;
+
+	/** statistics */
+	athost_wl_stat_counters_t stats;
+
+	/** incremented on eg receiving a credit map event from the dongle */
+	int		Init_FIFO_credit[AC_COUNT + 2];
+	/** the additional ones are for bc/mc and ATIM FIFO */
+	int		FIFO_credit[AC_COUNT + 2];
+	/** Credit borrow counts for each FIFO from each of the other FIFOs */
+	int		credits_borrowed[AC_COUNT + 2][AC_COUNT + 2];
+
+	/** packet hanger and MAC->handle lookup table */
+	void *hanger;
+
+	struct {
+		/** table for individual nodes */
+		wlfc_mac_descriptor_t	nodes[WLFC_MAC_DESC_TABLE_SIZE];
+		/** table for interfaces */
+		wlfc_mac_descriptor_t	interfaces[WLFC_MAX_IFNUM];
+		/* OS may send packets to unknown (unassociated) destinations */
+		/** A place holder for bc/mc and packets to unknown destinations */
+		wlfc_mac_descriptor_t	other;
+	} destination_entries;
+
+	wlfc_mac_descriptor_t *active_entry_head; /**< a chain of MAC descriptors */
+	int active_entry_count;
+
+	wlfc_mac_descriptor_t *requested_entry[WLFC_MAC_DESC_TABLE_SIZE];
+	int requested_entry_count;
+
+	/* pkt counts for each interface and ac */
+	int	pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1];
+	int	pkt_cnt_per_ac[AC_COUNT+1];
+	int	pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1];
+	int	pkt_cnt_in_psq;
+	uint8	allow_fc;              /**< Boolean */
+	uint32  fc_defer_timestamp;
+	uint32	rx_timestamp[AC_COUNT+1];
+
+	/** ON/OFF state for flow control to the host network interface */
+	uint8	hostif_flow_state[WLFC_MAX_IFNUM];
+	uint8	host_ifidx;
+
+	/** to flow control an OS interface */
+	uint8	toggle_host_if;
+
+	/** To borrow credits */
+	uint8   allow_credit_borrow;
+
+	/** ac number for the first single ac traffic */
+	uint8	single_ac;
+
+	/** Timestamp for the first single ac traffic */
+	uint32  single_ac_timestamp;
+
+	bool	bcmc_credit_supported;
+
+} athost_wl_status_info_t;
+
+/** Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+
+#ifdef BCM_OBJECT_TRACE
+	/* if use this field, keep it at the first 4 bytes */
+	uint32 sn;
+#endif /* BCM_OBJECT_TRACE */
+
+	/**
+	b[15]  - 1 = wlfc packet
+	b[14:13]  - encryption exemption
+	b[12 ] - 1 = event channel
+	b[11 ] - 1 = this packet was sent in response to one time packet request,
+	do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+	b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+	b[9  ] - 1 = packet is host->firmware (transmit direction)
+	       - 0 = packet received from firmware (firmware->host)
+	b[8  ] - 1 = packet was sent due to credit_request (pspoll),
+	             packet does not count against FIFO credit.
+	       - 0 = normal transaction, packet counts against FIFO credit
+	b[7  ] - 1 = AP, 0 = STA
+	b[6:4] - AC FIFO number
+	b[3:0] - interface index
+	*/
+	uint16	if_flags;
+
+	/**
+	 * destination MAC address for this packet so that not every module needs to open the packet
+	 * to find this
+	 */
+	uint8	dstn_ether[ETHER_ADDR_LEN];
+
+	/** This 32-bit goes from host to device for every packet. */
+	uint32	htod_tag;
+
+	/** This 16-bit is original d11seq number for every suppressed packet. */
+	uint16	htod_seq;
+
+	/** This address is mac entry for every packet. */
+	void *entry;
+
+	/** bus specific stuff */
+	union {
+		struct {
+			void *stuff;
+			uint32 thing1;
+			uint32 thing2;
+		} sd;
+
+		struct {
+			void *bus;
+			void *urb;
+		} usb;
+	} bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_WLFCPKT_MASK			0x1
+#define DHD_PKTTAG_WLFCPKT_SHIFT		15
+#define DHD_PKTTAG_WLFCPKT_SET(tag, value)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \
+	(((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT)
+#define DHD_PKTTAG_WLFCPKT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK)
+
+#define DHD_PKTTAG_EXEMPT_MASK			0x3
+#define DHD_PKTTAG_EXEMPT_SHIFT			13
+#define DHD_PKTTAG_EXEMPT_SET(tag, value)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \
+	(((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT)
+#define DHD_PKTTAG_EXEMPT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK)
+
+#define DHD_PKTTAG_EVENT_MASK			0x1
+#define DHD_PKTTAG_EVENT_SHIFT			12
+#define DHD_PKTTAG_SETEVENT(tag, event)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \
+	(((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT)
+#define DHD_PKTTAG_EVENT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK)
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK		0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT		11
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+	(1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SIGNALONLY_MASK		0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT		10
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+	(((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_PKTDIR_MASK			0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT			9
+#define DHD_PKTTAG_SETPKTDIR(tag, dir)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+	(((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_CREDITCHECK_MASK		0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT		8
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+	(((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_IFTYPE_MASK			0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT			7
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+	(((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_FIFO_MASK			0x7
+#define DHD_PKTTAG_FIFO_SHIFT			4
+#define DHD_PKTTAG_SETFIFO(tag, fifo)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+	(((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag)		((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_IF_MASK			0xf
+#define DHD_PKTTAG_IF_SHIFT			0
+#define DHD_PKTTAG_SETIF(tag, if)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \
+	(((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT)
+#define DHD_PKTTAG_IF(tag)		((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea)	memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+	(dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag)	((dhd_pkttag_t*)(tag))->dstn_ether
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue)	((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag)			(((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_SET_H2DSEQ(tag, seq)		((dhd_pkttag_t*)(tag))->htod_seq = (seq)
+#define DHD_PKTTAG_H2DSEQ(tag)			(((dhd_pkttag_t*)(tag))->htod_seq)
+
+#define DHD_PKTTAG_SET_ENTRY(tag, entry)	((dhd_pkttag_t*)(tag))->entry = (entry)
+#define DHD_PKTTAG_ENTRY(tag)			(((dhd_pkttag_t*)(tag))->entry)
+
+#define PSQ_SUP_IDX(x) (x * 2 + 1)
+#define PSQ_DLY_IDX(x) (x * 2)
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do {} while (0)
+#endif
+
+#ifdef BCM_OBJECT_TRACE
+#define DHD_PKTTAG_SET_SN(tag, val)		((dhd_pkttag_t*)(tag))->sn = (val)
+#define DHD_PKTTAG_SN(tag)			(((dhd_pkttag_t*)(tag))->sn)
+#endif /* BCM_OBJECT_TRACE */
+
+/* public functions */
+int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len,
+	uchar *reorder_info_buf, uint *reorder_info_len);
+KERNEL_THREAD_RETURN_TYPE dhd_wlfc_transfer_packets(void *data);
+int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit,
+	void* commit_ctx, void *pktbuf, bool need_toggle_host_if);
+int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+int dhd_wlfc_init(dhd_pub_t *dhd);
+#ifdef SUPPORT_P2P_GO_PS
+int dhd_wlfc_suspend(dhd_pub_t *dhd);
+int dhd_wlfc_resume(dhd_pub_t *dhd);
+#endif /* SUPPORT_P2P_GO_PS */
+int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd);
+int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg);
+int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg);
+int dhd_wlfc_deinit(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data);
+#ifdef LIMIT_BORROW
+int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data);
+#endif /* LIMIT_BORROW */
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp);
+int dhd_wlfc_enable(dhd_pub_t *dhdp);
+int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd);
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val);
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val);
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd);
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf);
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock);
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio);
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val);
+
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val);
+
+#endif /* __wlfc_host_driver_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h
new file mode 100644
index 0000000..b995ec5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_stats.h
@@ -0,0 +1,383 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dngl_stats.h 681171 2017-01-25 05:27:08Z $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+#include <ethernet.h>
+#include <802.11.h>
+
+typedef struct {
+	unsigned long	rx_packets;		/* total packets received */
+	unsigned long	tx_packets;		/* total packets transmitted */
+	unsigned long	rx_bytes;		/* total bytes received */
+	unsigned long	tx_bytes;		/* total bytes transmitted */
+	unsigned long	rx_errors;		/* bad packets received */
+	unsigned long	tx_errors;		/* packet transmit problems */
+	unsigned long	rx_dropped;		/* packets dropped by dongle */
+	unsigned long	tx_dropped;		/* packets dropped by dongle */
+	unsigned long   multicast;      /* multicast packets received */
+} dngl_stats_t;
+
+typedef int32 wifi_radio;
+typedef int32 wifi_channel;
+typedef int32 wifi_rssi;
+typedef struct { uint16 version; uint16 length; } ver_len;
+
+typedef enum wifi_channel_width {
+	WIFI_CHAN_WIDTH_20	  = 0,
+	WIFI_CHAN_WIDTH_40	  = 1,
+	WIFI_CHAN_WIDTH_80	  = 2,
+	WIFI_CHAN_WIDTH_160   = 3,
+	WIFI_CHAN_WIDTH_80P80 = 4,
+	WIFI_CHAN_WIDTH_5	  = 5,
+	WIFI_CHAN_WIDTH_10	  = 6,
+	WIFI_CHAN_WIDTH_INVALID = -1
+} wifi_channel_width_t;
+
+typedef enum {
+	WIFI_DISCONNECTED = 0,
+	WIFI_AUTHENTICATING = 1,
+	WIFI_ASSOCIATING = 2,
+	WIFI_ASSOCIATED = 3,
+	WIFI_EAPOL_STARTED = 4,   /* if done by firmware/driver */
+	WIFI_EAPOL_COMPLETED = 5, /* if done by firmware/driver */
+} wifi_connection_state;
+
+typedef enum {
+	WIFI_ROAMING_IDLE = 0,
+	WIFI_ROAMING_ACTIVE = 1
+} wifi_roam_state;
+
+typedef enum {
+	WIFI_INTERFACE_STA = 0,
+	WIFI_INTERFACE_SOFTAP = 1,
+	WIFI_INTERFACE_IBSS = 2,
+	WIFI_INTERFACE_P2P_CLIENT = 3,
+	WIFI_INTERFACE_P2P_GO = 4,
+	WIFI_INTERFACE_NAN = 5,
+	WIFI_INTERFACE_MESH = 6
+} wifi_interface_mode;
+
+#define WIFI_CAPABILITY_QOS          0x00000001     /* set for QOS association */
+#define WIFI_CAPABILITY_PROTECTED    0x00000002     /* set for protected association (802.11
+						     * beacon frame control protected bit set)
+						     */
+#define WIFI_CAPABILITY_INTERWORKING 0x00000004     /* set if 802.11 Extended Capabilities
+						     * element interworking bit is set
+						     */
+#define WIFI_CAPABILITY_HS20         0x00000008     /* set for HS20 association */
+#define WIFI_CAPABILITY_SSID_UTF8    0x00000010     /* set is 802.11 Extended Capabilities
+						     * element UTF-8 SSID bit is set
+						     */
+#define WIFI_CAPABILITY_COUNTRY      0x00000020     /* set is 802.11 Country Element is present */
+#define PACK_ATTRIBUTE __attribute__ ((packed))
+typedef struct {
+	wifi_interface_mode mode;     /* interface mode */
+	uint8 mac_addr[6];               /* interface mac address (self) */
+	wifi_connection_state state;  /* connection state (valid for STA, CLI only) */
+	wifi_roam_state roaming;      /* roaming state */
+	uint32 capabilities;             /* WIFI_CAPABILITY_XXX (self) */
+	uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */
+	uint8 bssid[ETHER_ADDR_LEN];     /* bssid */
+	uint8 ap_country_str[3];         /* country string advertised by AP */
+	uint8 country_str[3];            /* country string for this association */
+} wifi_interface_info;
+
+typedef wifi_interface_info *wifi_interface_handle;
+
+/* channel information */
+typedef struct {
+	wifi_channel_width_t width;   /* channel width (20, 40, 80, 80+80, 160) */
+	wifi_channel center_freq;   /* primary 20 MHz channel */
+	wifi_channel center_freq0;  /* center frequency (MHz) first segment */
+	wifi_channel center_freq1;  /* center frequency (MHz) second segment */
+} wifi_channel_info;
+
+/* wifi rate */
+typedef struct {
+	uint32 preamble;   /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */
+	uint32 nss;   	/* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */
+	uint32 bw;   	/* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */
+	uint32 rateMcsIdx; /* OFDM/CCK rate code would be as per ieee std
+			    * in the units of 0.5mbps
+			    */
+			/* HT/VHT it would be mcs index */
+	uint32 reserved;   /* reserved */
+	uint32 bitrate;    /* units of 100 Kbps */
+} wifi_rate;
+
+typedef struct {
+	uint32 preamble   :3;   /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */
+	uint32 nss        :2;   /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */
+	uint32 bw         :3;   /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */
+	uint32 rateMcsIdx :8;   /* OFDM/CCK rate code would be as per ieee std
+					* in the units of 0.5mbps HT/VHT it would be
+					* mcs index
+					*/
+	uint32 reserved  :16;   /* reserved */
+	uint32 bitrate;         /* units of 100 Kbps */
+} wifi_rate_v1;
+
+/* channel statistics */
+typedef struct {
+	wifi_channel_info channel;  /* channel */
+	uint32 on_time;         	/* msecs the radio is awake (32 bits number
+				         * accruing over time)
+					 */
+	uint32 cca_busy_time;          /* msecs the CCA register is busy (32 bits number
+					* accruing over time)
+					*/
+} wifi_channel_stat;
+
+/* radio statistics */
+typedef struct {
+	struct {
+		uint16 version;
+		uint16 length;
+	};
+	wifi_radio radio;               /* wifi radio (if multiple radio supported) */
+	uint32 on_time;                    /* msecs the radio is awake (32 bits number
+					    * accruing over time)
+					    */
+	uint32 tx_time;                    /* msecs the radio is transmitting (32 bits
+					    * number accruing over time)
+					    */
+	uint32 rx_time;                    /* msecs the radio is in active receive (32 bits
+					    * number accruing over time)
+					    */
+	uint32 on_time_scan;               /* msecs the radio is awake due to all scan (32 bits
+					    * number accruing over time)
+					    */
+	uint32 on_time_nbd;                /* msecs the radio is awake due to NAN (32 bits
+					    * number accruing over time)
+					    */
+	uint32 on_time_gscan;              /* msecs the radio is awake due to G?scan (32 bits
+					    * number accruing over time)
+					    */
+	uint32 on_time_roam_scan;          /* msecs the radio is awake due to roam?scan (32 bits
+					    * number accruing over time)
+					    */
+	uint32 on_time_pno_scan;           /* msecs the radio is awake due to PNO scan (32 bits
+					    * number accruing over time)
+					    */
+	uint32 on_time_hs20;               /* msecs the radio is awake due to HS2.0 scans and
+					    * GAS exchange (32 bits number accruing over time)
+					    */
+	uint32 num_channels;               /* number of channels */
+	wifi_channel_stat channels[1];   /* channel statistics */
+} wifi_radio_stat;
+
+typedef struct {
+	wifi_radio radio;
+	uint32 on_time;
+	uint32 tx_time;
+	uint32 rx_time;
+	uint32 on_time_scan;
+	uint32 on_time_nbd;
+	uint32 on_time_gscan;
+	uint32 on_time_roam_scan;
+	uint32 on_time_pno_scan;
+	uint32 on_time_hs20;
+	uint32 num_channels;
+} wifi_radio_stat_h;
+
+/* per rate statistics */
+typedef struct {
+	wifi_rate_v1 rate;     /* rate information */
+	uint32 tx_mpdu;        /* number of successfully transmitted data pkts (ACK rcvd) */
+	uint32 rx_mpdu;        /* number of received data pkts */
+	uint32 mpdu_lost;      /* number of data packet losses (no ACK) */
+	uint32 retries;        /* total number of data pkt retries */
+	uint32 retries_short;  /* number of short data pkt retries */
+	uint32 retries_long;   /* number of long data pkt retries */
+} wifi_rate_stat_v1;
+
+typedef struct {
+	uint16 version;
+	uint16 length;
+	uint32 tx_mpdu;        /* number of successfully transmitted data pkts (ACK rcvd) */
+	uint32 rx_mpdu;        /* number of received data pkts */
+	uint32 mpdu_lost;      /* number of data packet losses (no ACK) */
+	uint32 retries;        /* total number of data pkt retries */
+	uint32 retries_short;  /* number of short data pkt retries */
+	uint32 retries_long;   /* number of long data pkt retries */
+	wifi_rate rate;
+} wifi_rate_stat;
+
+/* access categories */
+typedef enum {
+	WIFI_AC_VO  = 0,
+	WIFI_AC_VI  = 1,
+	WIFI_AC_BE  = 2,
+	WIFI_AC_BK  = 3,
+	WIFI_AC_MAX = 4
+} wifi_traffic_ac;
+
+/* wifi peer type */
+typedef enum
+{
+	WIFI_PEER_STA,
+	WIFI_PEER_AP,
+	WIFI_PEER_P2P_GO,
+	WIFI_PEER_P2P_CLIENT,
+	WIFI_PEER_NAN,
+	WIFI_PEER_TDLS,
+	WIFI_PEER_INVALID
+} wifi_peer_type;
+
+/* per peer statistics */
+typedef struct {
+	wifi_peer_type type;           /* peer type (AP, TDLS, GO etc.) */
+	uint8 peer_mac_address[6];        /* mac address */
+	uint32 capabilities;              /* peer WIFI_CAPABILITY_XXX */
+	uint32 num_rate;                  /* number of rates */
+	wifi_rate_stat rate_stats[1];   /* per rate statistics, number of entries  = num_rate */
+} wifi_peer_info;
+
+/* per access category statistics */
+typedef struct {
+	wifi_traffic_ac ac;             /* access category (VI, VO, BE, BK) */
+	uint32 tx_mpdu;                    /* number of successfully transmitted unicast data pkts
+					    * (ACK rcvd)
+					    */
+	uint32 rx_mpdu;                    /* number of received unicast mpdus */
+	uint32 tx_mcast;                   /* number of succesfully transmitted multicast
+					    * data packets
+					    */
+					   /* STA case: implies ACK received from AP for the
+					    * unicast packet in which mcast pkt was sent
+					    */
+	uint32 rx_mcast;                   /* number of received multicast data packets */
+	uint32 rx_ampdu;                   /* number of received unicast a-mpdus */
+	uint32 tx_ampdu;                   /* number of transmitted unicast a-mpdus */
+	uint32 mpdu_lost;                  /* number of data pkt losses (no ACK) */
+	uint32 retries;                    /* total number of data pkt retries */
+	uint32 retries_short;              /* number of short data pkt retries */
+	uint32 retries_long;               /* number of long data pkt retries */
+	uint32 contention_time_min;        /* data pkt min contention time (usecs) */
+	uint32 contention_time_max;        /* data pkt max contention time (usecs) */
+	uint32 contention_time_avg;        /* data pkt avg contention time (usecs) */
+	uint32 contention_num_samples;     /* num of data pkts used for contention statistics */
+} wifi_wmm_ac_stat;
+
+/* interface statistics */
+typedef struct {
+	wifi_interface_handle iface;          /* wifi interface */
+	wifi_interface_info info;             /* current state of the interface */
+	uint32 beacon_rx;                     /* access point beacon received count from
+					       * connected AP
+					       */
+	uint64 average_tsf_offset;	/* average beacon offset encountered (beacon_TSF - TBTT)
+					* The average_tsf_offset field is used so as to calculate
+					* the typical beacon contention time on the channel as well
+					* may be used to debug beacon synchronization and related
+					* power consumption issue
+					*/
+	uint32 leaky_ap_detected;	/* indicate that this AP
+					* typically leaks packets beyond
+					* the driver guard time.
+					*/
+	uint32 leaky_ap_avg_num_frames_leaked;	/* average number of frame leaked by AP after
+					* frame with PM bit set was ACK'ed by AP
+					*/
+	uint32 leaky_ap_guard_time;		/* guard time currently in force
+					* (when implementing IEEE power management
+					* based on frame control PM bit), How long
+					* driver waits before shutting down the radio and after
+					* receiving an ACK for a data frame with PM bit set)
+					*/
+	uint32 mgmt_rx;                       /* access point mgmt frames received count from
+				       * connected AP (including Beacon)
+				       */
+	uint32 mgmt_action_rx;                /* action frames received count */
+	uint32 mgmt_action_tx;                /* action frames transmit count */
+	wifi_rssi rssi_mgmt;                  /* access Point Beacon and Management frames RSSI
+					       * (averaged)
+					       */
+	wifi_rssi rssi_data;                  /* access Point Data Frames RSSI (averaged) from
+					       * connected AP
+					       */
+	wifi_rssi rssi_ack;                   /* access Point ACK RSSI (averaged) from
+					       * connected AP
+					       */
+	wifi_wmm_ac_stat ac[WIFI_AC_MAX];     /* per ac data packet statistics */
+	uint32 num_peers;                        /* number of peers */
+	wifi_peer_info peer_info[1];           /* per peer statistics */
+} wifi_iface_stat;
+
+#ifdef CONFIG_COMPAT
+/* interface statistics */
+typedef struct {
+	compat_uptr_t iface;          /* wifi interface */
+	wifi_interface_info info;             /* current state of the interface */
+	uint32 beacon_rx;                     /* access point beacon received count from
+					       * connected AP
+					       */
+	uint64 average_tsf_offset;	/* average beacon offset encountered (beacon_TSF - TBTT)
+					* The average_tsf_offset field is used so as to calculate
+					* the typical beacon contention time on the channel as well
+					* may be used to debug beacon synchronization and related
+					* power consumption issue
+					*/
+	uint32 leaky_ap_detected;	/* indicate that this AP
+					* typically leaks packets beyond
+					* the driver guard time.
+					*/
+	uint32 leaky_ap_avg_num_frames_leaked;	/* average number of frame leaked by AP after
+					* frame with PM bit set was ACK'ed by AP
+					*/
+	uint32 leaky_ap_guard_time;		/* guard time currently in force
+					* (when implementing IEEE power management
+					* based on frame control PM bit), How long
+					* driver waits before shutting down the radio and after
+					* receiving an ACK for a data frame with PM bit set)
+					*/
+	uint32 mgmt_rx;                       /* access point mgmt frames received count from
+				       * connected AP (including Beacon)
+				       */
+	uint32 mgmt_action_rx;                /* action frames received count */
+	uint32 mgmt_action_tx;                /* action frames transmit count */
+	wifi_rssi rssi_mgmt;                  /* access Point Beacon and Management frames RSSI
+					       * (averaged)
+					       */
+	wifi_rssi rssi_data;                  /* access Point Data Frames RSSI (averaged) from
+					       * connected AP
+					       */
+	wifi_rssi rssi_ack;                   /* access Point ACK RSSI (averaged) from
+					       * connected AP
+					       */
+	wifi_wmm_ac_stat ac[WIFI_AC_MAX];     /* per ac data packet statistics */
+	uint32 num_peers;                        /* number of peers */
+	wifi_peer_info peer_info[1];           /* per peer statistics */
+} compat_wifi_iface_stat;
+#endif /* CONFIG_COMPAT */
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
new file mode 100644
index 0000000..96da42e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
@@ -0,0 +1,43 @@
+/*
+ * Dongle WL Header definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dngl_wlhdr.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef _dngl_wlhdr_h_
+#define _dngl_wlhdr_h_
+
+typedef struct wl_header {
+    uint8   type;           /* Header type */
+    uint8   version;        /* Header version */
+	int8	rssi;			/* RSSI */
+	uint8	pad;			/* Unused */
+} wl_header_t;
+
+#define WL_HEADER_LEN   sizeof(wl_header_t)
+#define WL_HEADER_TYPE  0
+#define WL_HEADER_VER   1
+#endif /* _dngl_wlhdr_h_ */
diff --git a/drivers/net/wireless/bcmdhd/hnd_pktpool.c b/drivers/net/wireless/bcmdhd/hnd_pktpool.c
new file mode 100644
index 0000000..0c5c3a8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hnd_pktpool.c
@@ -0,0 +1,1180 @@
+/*
+ * HND generic packet pool operation primitives
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_pktpool.c 613891 2016-01-20 10:05:44Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <osl_ext.h>
+#include <bcmutils.h>
+#include <hnd_pktpool.h>
+
+
+/* mutex macros for thread safe */
+#ifdef HND_PKTPOOL_THREAD_SAFE
+#define HND_PKTPOOL_MUTEX_CREATE(name, mutex)	osl_ext_mutex_create(name, mutex)
+#define HND_PKTPOOL_MUTEX_DELETE(mutex)		osl_ext_mutex_delete(mutex)
+#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec)	osl_ext_mutex_acquire(mutex, msec)
+#define HND_PKTPOOL_MUTEX_RELEASE(mutex)	osl_ext_mutex_release(mutex)
+#else
+#define HND_PKTPOOL_MUTEX_CREATE(name, mutex)	OSL_EXT_SUCCESS
+#define HND_PKTPOOL_MUTEX_DELETE(mutex)		OSL_EXT_SUCCESS
+#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec)	OSL_EXT_SUCCESS
+#define HND_PKTPOOL_MUTEX_RELEASE(mutex)	OSL_EXT_SUCCESS
+#endif
+
+/* Registry size is one larger than max pools, as slot #0 is reserved */
+#define PKTPOOLREG_RSVD_ID				(0U)
+#define PKTPOOLREG_RSVD_PTR				(POOLPTR(0xdeaddead))
+#define PKTPOOLREG_FREE_PTR				(POOLPTR(NULL))
+
+#define PKTPOOL_REGISTRY_SET(id, pp)	(pktpool_registry_set((id), (pp)))
+#define PKTPOOL_REGISTRY_CMP(id, pp)	(pktpool_registry_cmp((id), (pp)))
+
+/* Tag a registry entry as free for use */
+#define PKTPOOL_REGISTRY_CLR(id)		\
+		PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
+#define PKTPOOL_REGISTRY_ISCLR(id)		\
+		(PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
+
+/* Tag registry entry 0 as reserved */
+#define PKTPOOL_REGISTRY_RSV()			\
+		PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
+#define PKTPOOL_REGISTRY_ISRSVD()		\
+		(PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
+
+/* Walk all un-reserved entries in registry */
+#define PKTPOOL_REGISTRY_FOREACH(id)	\
+		for ((id) = 1U; (id) <= pktpools_max; (id)++)
+
+enum pktpool_empty_cb_state {
+	EMPTYCB_ENABLED = 0,	/* Enable callback when new packets are added to pool */
+	EMPTYCB_DISABLED,	/* Disable callback when new packets are added to pool */
+	EMPTYCB_SKIPPED		/* Packet was added to pool when callback was disabled */
+};
+
+uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
+pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
+
+/* Register/Deregister a pktpool with registry during pktpool_init/deinit */
+static int pktpool_register(pktpool_t * poolptr);
+static int pktpool_deregister(pktpool_t * poolptr);
+
+/** add declaration */
+static void pktpool_avail_notify(pktpool_t *pktp);
+
+/** accessor functions required when ROMming this file, forced into RAM */
+
+
+pktpool_t *
+BCMRAMFN(get_pktpools_registry)(int id)
+{
+	return pktpools_registry[id];
+}
+
+static void
+BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
+{
+	pktpools_registry[id] = pp;
+}
+
+static bool
+BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
+{
+	return pktpools_registry[id] == pp;
+}
+
+/** Constructs a pool registry to serve a maximum of total_pools */
+int
+pktpool_attach(osl_t *osh, uint32 total_pools)
+{
+	uint32 poolid;
+	BCM_REFERENCE(osh);
+
+	if (pktpools_max != 0U) {
+		return BCME_ERROR;
+	}
+
+	ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
+
+	/* Initialize registry: reserve slot#0 and tag others as free */
+	PKTPOOL_REGISTRY_RSV();		/* reserve slot#0 */
+
+	PKTPOOL_REGISTRY_FOREACH(poolid) {	/* tag all unreserved entries as free */
+		PKTPOOL_REGISTRY_CLR(poolid);
+	}
+
+	pktpools_max = total_pools;
+
+	return (int)pktpools_max;
+}
+
+/** Destructs the pool registry. Ascertain all pools were first de-inited */
+int
+pktpool_dettach(osl_t *osh)
+{
+	uint32 poolid;
+	BCM_REFERENCE(osh);
+
+	if (pktpools_max == 0U) {
+		return BCME_OK;
+	}
+
+	/* Ascertain that no pools are still registered */
+	ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
+
+	PKTPOOL_REGISTRY_FOREACH(poolid) {	/* ascertain all others are free */
+		ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
+	}
+
+	pktpools_max = 0U; /* restore boot state */
+
+	return BCME_OK;
+}
+
+/** Registers a pool in a free slot; returns the registry slot index */
+static int
+pktpool_register(pktpool_t * poolptr)
+{
+	uint32 poolid;
+
+	if (pktpools_max == 0U) {
+		return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
+	}
+
+	ASSERT(pktpools_max != 0U);
+
+	/* find an empty slot in pktpools_registry */
+	PKTPOOL_REGISTRY_FOREACH(poolid) {
+		if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
+			PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
+			return (int)poolid; /* return pool ID */
+		}
+	} /* FOREACH */
+
+	return PKTPOOL_INVALID_ID;	/* error: registry is full */
+}
+
+/** Deregisters a pktpool, given the pool pointer; tag slot as free */
+static int
+pktpool_deregister(pktpool_t * poolptr)
+{
+	uint32 poolid;
+
+	ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
+
+	poolid = POOLID(poolptr);
+	ASSERT(poolid <= pktpools_max);
+
+	/* Asertain that a previously registered poolptr is being de-registered */
+	if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
+		PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
+	} else {
+		ASSERT(0);
+		return BCME_ERROR; /* mismatch in registry */
+	}
+
+	return BCME_OK;
+}
+
+/**
+ * pktpool_init:
+ * User provides a pktpool_t structure and specifies the number of packets to
+ * be pre-filled into the pool (pplen).
+ * pktpool_init first attempts to register the pool and fetch a unique poolid.
+ * If registration fails, it is considered an BCME_ERR, caused by either the
+ * registry was not pre-created (pktpool_attach) or the registry is full.
+ * If registration succeeds, then the requested number of packets will be filled
+ * into the pool as part of initialization. In the event that there is no
+ * available memory to service the request, then BCME_NOMEM will be returned
+ * along with the count of how many packets were successfully allocated.
+ * In dongle builds, prior to memory reclaimation, one should limit the number
+ * of packets to be allocated during pktpool_init and fill the pool up after
+ * reclaim stage.
+ *
+ * @param pplen  Number of packets to be pre-filled into the pool
+ * @param plen   The size of all packets in a pool must be the same, [bytes] units. E.g. PKTBUFSZ.
+ * @param type   e.g. 'lbuf_frag'
+ */
+int
+pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type)
+{
+	int i, err = BCME_OK;
+	int pktplen;
+	uint8 pktp_id;
+
+	ASSERT(pktp != NULL);
+	ASSERT(osh != NULL);
+	ASSERT(pplen != NULL);
+
+	pktplen = *pplen;
+
+	bzero(pktp, sizeof(pktpool_t));
+
+	/* assign a unique pktpool id */
+	if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
+		return BCME_ERROR;
+	}
+	POOLSETID(pktp, pktp_id);
+
+	pktp->inited = TRUE;
+	pktp->istx = istx ? TRUE : FALSE;
+	pktp->plen = (uint16)plen;
+	pktp->type = type;
+
+	if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
+		return BCME_ERROR;
+	}
+
+	pktp->maxlen = PKTPOOL_LEN_MAX;
+	pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
+
+	for (i = 0; i < pktplen; i++) {
+		void *p;
+		p = PKTGET(osh, plen, TRUE);
+
+		if (p == NULL) {
+			/* Not able to allocate all requested pkts
+			 * so just return what was actually allocated
+			 * We can add to the pool later
+			 */
+			if (pktp->freelist == NULL) /* pktpool free list is empty */
+				err = BCME_NOMEM;
+
+			goto exit;
+		}
+
+		PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
+
+		PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
+		pktp->freelist = p;
+
+		pktp->avail++;
+
+#ifdef BCMDBG_POOL
+		pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+	}
+
+exit:
+	pktp->len = pktp->avail;
+
+	*pplen = pktp->len; /* number of packets managed by pool */
+	return err;
+} /* pktpool_init */
+
+/**
+ * pktpool_deinit:
+ * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
+ * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
+ * An assert is in place to ensure that there are no packets still lingering
+ * around. Packets freed to a pool after the deinit will cause a memory
+ * corruption as the pktpool_t structure no longer exists.
+ */
+int
+pktpool_deinit(osl_t *osh, pktpool_t *pktp)
+{
+	uint16 freed = 0;
+
+	ASSERT(osh != NULL);
+	ASSERT(pktp != NULL);
+
+#ifdef BCMDBG_POOL
+	{
+		int i;
+		for (i = 0; i <= pktp->len; i++) {
+			pktp->dbg_q[i].p = NULL;
+		}
+	}
+#endif
+
+	while (pktp->freelist != NULL) {
+		void * p = pktp->freelist;
+
+		pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
+		PKTSETFREELIST(p, NULL);
+
+		PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
+
+		PKTFREE(osh, p, pktp->istx); /* free the packet */
+
+		freed++;
+		ASSERT(freed <= pktp->len);
+	}
+
+	pktp->avail -= freed;
+	ASSERT(pktp->avail == 0);
+
+	pktp->len -= freed;
+
+	pktpool_deregister(pktp); /* release previously acquired unique pool id */
+	POOLSETID(pktp, PKTPOOL_INVALID_ID);
+
+	if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	pktp->inited = FALSE;
+
+	/* Are there still pending pkts? */
+	ASSERT(pktp->len == 0);
+
+	return 0;
+}
+
+int
+pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
+{
+	void *p;
+	int err = 0;
+	int len, psize, maxlen;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	ASSERT(pktp->plen != 0);
+
+	maxlen = pktp->maxlen;
+	psize = minimal ? (maxlen >> 2) : maxlen;
+	for (len = (int)pktp->len; len < psize; len++) {
+
+		p = PKTGET(osh, pktp->len, TRUE);
+
+		if (p == NULL) {
+			err = BCME_NOMEM;
+			break;
+		}
+
+		if (pktpool_add(pktp, p) != BCME_OK) {
+			PKTFREE(osh, p, FALSE);
+			err = BCME_ERROR;
+			break;
+		}
+	}
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	if (pktp->cbcnt) {
+		if (pktp->empty == FALSE)
+			pktpool_avail_notify(pktp);
+	}
+
+	return err;
+}
+
+static void *
+pktpool_deq(pktpool_t *pktp)
+{
+	void *p = NULL;
+
+	if (pktp->avail == 0)
+		return NULL;
+
+	ASSERT(pktp->freelist != NULL);
+
+	p = pktp->freelist;  /* dequeue packet from head of pktpool free list */
+	pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
+
+
+	PKTSETFREELIST(p, NULL);
+
+	pktp->avail--;
+
+	return p;
+}
+
+static void
+pktpool_enq(pktpool_t *pktp, void *p)
+{
+	ASSERT(p != NULL);
+
+	PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
+	pktp->freelist = p; /* free list points to newly inserted packet */
+
+
+	pktp->avail++;
+	ASSERT(pktp->avail <= pktp->len);
+}
+
+/** utility for registering host addr fill function called from pciedev */
+int
+/* BCMATTACHFN */
+(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+	ASSERT(cb != NULL);
+
+	ASSERT(pktp->cbext.cb == NULL);
+	pktp->cbext.cb = cb;
+	pktp->cbext.arg = arg;
+	return 0;
+}
+
+int
+pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+	ASSERT(cb != NULL);
+
+	if (pktp == NULL)
+		return BCME_ERROR;
+	ASSERT(pktp->rxcplidfn.cb == NULL);
+	pktp->rxcplidfn.cb = cb;
+	pktp->rxcplidfn.arg = arg;
+	return 0;
+}
+
+/** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
+void
+pktpool_invoke_dmarxfill(pktpool_t *pktp)
+{
+	ASSERT(pktp->dmarxfill.cb);
+	ASSERT(pktp->dmarxfill.arg);
+
+	if (pktp->dmarxfill.cb)
+		pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
+}
+
+/** Registers callback functions for split rx mode */
+int
+pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+
+	ASSERT(cb != NULL);
+
+	pktp->dmarxfill.cb = cb;
+	pktp->dmarxfill.arg = arg;
+
+	return 0;
+}
+
+/**
+ * Registers callback functions.
+ * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
+ */
+int
+pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+	int err = 0;
+	int i;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	ASSERT(cb != NULL);
+
+	i = pktp->cbcnt;
+	if (i == PKTPOOL_CB_MAX_AVL) {
+		err = BCME_ERROR;
+		goto done;
+	}
+
+	ASSERT(pktp->cbs[i].cb == NULL);
+	pktp->cbs[i].cb = cb;
+	pktp->cbs[i].arg = arg;
+	pktp->cbcnt++;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return err;
+}
+
+/** Registers callback functions */
+int
+pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+	int err = 0;
+	int i;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	ASSERT(cb != NULL);
+
+	i = pktp->ecbcnt;
+	if (i == PKTPOOL_CB_MAX) {
+		err = BCME_ERROR;
+		goto done;
+	}
+
+	ASSERT(pktp->ecbs[i].cb == NULL);
+	pktp->ecbs[i].cb = cb;
+	pktp->ecbs[i].arg = arg;
+	pktp->ecbcnt++;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return err;
+}
+
+/** Calls registered callback functions */
+static int
+pktpool_empty_notify(pktpool_t *pktp)
+{
+	int i;
+
+	pktp->empty = TRUE;
+	for (i = 0; i < pktp->ecbcnt; i++) {
+		ASSERT(pktp->ecbs[i].cb != NULL);
+		pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
+	}
+	pktp->empty = FALSE;
+
+	return 0;
+}
+
+#ifdef BCMDBG_POOL
+int
+pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+	int err = 0;
+	int i;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	ASSERT(cb);
+
+	i = pktp->dbg_cbcnt;
+	if (i == PKTPOOL_CB_MAX) {
+		err = BCME_ERROR;
+		goto done;
+	}
+
+	ASSERT(pktp->dbg_cbs[i].cb == NULL);
+	pktp->dbg_cbs[i].cb = cb;
+	pktp->dbg_cbs[i].arg = arg;
+	pktp->dbg_cbcnt++;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return err;
+}
+
+int pktpool_dbg_notify(pktpool_t *pktp);
+
+int
+pktpool_dbg_notify(pktpool_t *pktp)
+{
+	int i;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	for (i = 0; i < pktp->dbg_cbcnt; i++) {
+		ASSERT(pktp->dbg_cbs[i].cb);
+		pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
+	}
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return 0;
+}
+
+int
+pktpool_dbg_dump(pktpool_t *pktp)
+{
+	int i;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	printf("pool len=%d maxlen=%d\n",  pktp->dbg_qlen, pktp->maxlen);
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p);
+		printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
+			pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
+	}
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return 0;
+}
+
+int
+pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
+{
+	int i;
+	int state;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	bzero(stats, sizeof(pktpool_stats_t));
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p != NULL);
+
+		state = PKTPOOLSTATE(pktp->dbg_q[i].p);
+		switch (state) {
+			case POOL_TXENQ:
+				stats->enq++; break;
+			case POOL_TXDH:
+				stats->txdh++; break;
+			case POOL_TXD11:
+				stats->txd11++; break;
+			case POOL_RXDH:
+				stats->rxdh++; break;
+			case POOL_RXD11:
+				stats->rxd11++; break;
+			case POOL_RXFILL:
+				stats->rxfill++; break;
+			case POOL_IDLE:
+				stats->idle++; break;
+		}
+	}
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return 0;
+}
+
+int
+pktpool_start_trigger(pktpool_t *pktp, void *p)
+{
+	uint32 cycles, i;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	if (!PKTPOOL(OSH_NULL, p))
+		goto done;
+
+	OSL_GETCYCLES(cycles);
+
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p != NULL);
+
+		if (pktp->dbg_q[i].p == p) {
+			pktp->dbg_q[i].cycles = cycles;
+			break;
+		}
+	}
+
+done:
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return 0;
+}
+
+int pktpool_stop_trigger(pktpool_t *pktp, void *p);
+
+int
+pktpool_stop_trigger(pktpool_t *pktp, void *p)
+{
+	uint32 cycles, i;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	if (!PKTPOOL(OSH_NULL, p))
+		goto done;
+
+	OSL_GETCYCLES(cycles);
+
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p != NULL);
+
+		if (pktp->dbg_q[i].p == p) {
+			if (pktp->dbg_q[i].cycles == 0)
+				break;
+
+			if (cycles >= pktp->dbg_q[i].cycles)
+				pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
+			else
+				pktp->dbg_q[i].dur =
+					(((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
+
+			pktp->dbg_q[i].cycles = 0;
+			break;
+		}
+	}
+
+done:
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return 0;
+}
+#endif /* BCMDBG_POOL */
+
+int
+pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
+{
+	BCM_REFERENCE(osh);
+	ASSERT(pktp);
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	pktp->availcb_excl = NULL;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return 0;
+}
+
+int
+pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
+{
+	int i;
+	int err;
+	BCM_REFERENCE(osh);
+
+	ASSERT(pktp);
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	ASSERT(pktp->availcb_excl == NULL);
+	for (i = 0; i < pktp->cbcnt; i++) {
+		if (cb == pktp->cbs[i].cb) {
+			pktp->availcb_excl = &pktp->cbs[i];
+			break;
+		}
+	}
+
+	if (pktp->availcb_excl == NULL)
+		err = BCME_ERROR;
+	else
+		err = 0;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return err;
+}
+
+static void
+pktpool_avail_notify(pktpool_t *pktp)
+{
+	int i, k, idx;
+	int avail;
+
+	ASSERT(pktp);
+	if (pktp->availcb_excl != NULL) {
+		pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
+		return;
+	}
+
+	k = pktp->cbcnt - 1;
+	for (i = 0; i < pktp->cbcnt; i++) {
+		avail = pktp->avail;
+
+		if (avail) {
+			if (pktp->cbtoggle)
+				idx = i;
+			else
+				idx = k--;
+
+			ASSERT(pktp->cbs[idx].cb != NULL);
+			pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
+		}
+	}
+
+	/* Alternate between filling from head or tail
+	 */
+	pktp->cbtoggle ^= 1;
+
+	return;
+}
+
+/** Gets an empty packet from the caller provided pool */
+void *
+pktpool_get(pktpool_t *pktp)
+{
+	void *p;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+
+	p = pktpool_deq(pktp);
+
+	if (p == NULL) {
+		/* Notify and try to reclaim tx pkts */
+		if (pktp->ecbcnt)
+			pktpool_empty_notify(pktp);
+
+		p = pktpool_deq(pktp);
+		if (p == NULL)
+			goto done;
+	}
+
+
+done:
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+void
+pktpool_free(pktpool_t *pktp, void *p)
+{
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return;
+
+	ASSERT(p != NULL);
+#ifdef BCMDBG_POOL
+	/* pktpool_stop_trigger(pktp, p); */
+#endif
+
+	pktpool_enq(pktp, p);
+
+	/**
+	 * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
+	 * If any avail callback functions are registered, send a notification
+	 * that a new packet is available in the pool.
+	 */
+	if (pktp->cbcnt) {
+		/* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
+		 * This allows to feed on burst basis as opposed to inefficient per-packet basis.
+		 */
+		if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
+			/**
+			 * If the call originated from pktpool_empty_notify, the just freed packet
+			 * is needed in pktpool_get.
+			 * Therefore don't call pktpool_avail_notify.
+			 */
+			if (pktp->empty == FALSE)
+				pktpool_avail_notify(pktp);
+		} else {
+			/**
+			 * The callback is temporarily disabled, log that a packet has been freed.
+			 */
+			pktp->emptycb_disable = EMPTYCB_SKIPPED;
+		}
+	}
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return;
+}
+
+/** Adds a caller provided (empty) packet to the caller provided pool */
+int
+pktpool_add(pktpool_t *pktp, void *p)
+{
+	int err = 0;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	ASSERT(p != NULL);
+
+	if (pktp->len == pktp->maxlen) {
+		err = BCME_RANGE;
+		goto done;
+	}
+
+	/* pkts in pool have same length */
+	ASSERT(pktp->plen == PKTLEN(OSH_NULL, p));
+	PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
+
+	pktp->len++;
+	pktpool_enq(pktp, p);
+
+#ifdef BCMDBG_POOL
+	pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+
+done:
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return err;
+}
+
+/**
+ * Force pktpool_setmaxlen () into RAM as it uses a constant
+ * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
+ */
+int
+BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
+{
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	if (maxlen > PKTPOOL_LEN_MAX)
+		maxlen = PKTPOOL_LEN_MAX;
+
+	/* if pool is already beyond maxlen, then just cap it
+	 * since we currently do not reduce the pool len
+	 * already allocated
+	 */
+	pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen;
+
+	/* protect shared resource */
+	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+		return BCME_ERROR;
+
+	return pktp->maxlen;
+}
+
+void
+pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
+{
+	ASSERT(pktp);
+
+	/**
+	 * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
+	 * If callback is going to be re-enabled, check if any packet got
+	 * freed and added back to the pool while callback was disabled.
+	 * When this is the case do the callback now, provided that callback functions
+	 * are registered and this call did not originate from pktpool_empty_notify.
+	 */
+	if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
+		(pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
+			pktpool_avail_notify(pktp);
+	}
+
+	/* Enable or temporarily disable callback when packet becomes available. */
+	pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED;
+}
+
+bool
+pktpool_emptycb_disabled(pktpool_t *pktp)
+{
+	ASSERT(pktp);
+	return pktp->emptycb_disable != EMPTYCB_ENABLED;
+}
+
+#ifdef BCMPKTPOOL
+#include <hnd_lbuf.h>
+
+pktpool_t *pktpool_shared = NULL;
+
+#ifdef BCMFRAGPOOL
+pktpool_t *pktpool_shared_lfrag = NULL;
+#endif /* BCMFRAGPOOL */
+
+pktpool_t *pktpool_shared_rxlfrag = NULL;
+
+static osl_t *pktpool_osh = NULL;
+
+/**
+ * Initializes several packet pools and allocates packets within those pools.
+ */
+int
+hnd_pktpool_init(osl_t *osh)
+{
+	int err;
+	int n;
+
+	/* Construct a packet pool registry before initializing packet pools */
+	n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
+	if (n != PKTPOOL_MAXIMUM_ID) {
+		ASSERT(0);
+		err = BCME_ERROR;
+		goto error0;
+	}
+
+	pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
+	if (pktpool_shared == NULL) {
+		ASSERT(0);
+		err = BCME_NOMEM;
+		goto error1;
+	}
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+	pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
+	if (pktpool_shared_lfrag == NULL) {
+		ASSERT(0);
+		err = BCME_NOMEM;
+		goto error2;
+	}
+#endif
+
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+	pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
+	if (pktpool_shared_rxlfrag == NULL) {
+		ASSERT(0);
+		err = BCME_NOMEM;
+		goto error3;
+	}
+#endif
+
+
+	/*
+	 * At this early stage, there's not enough memory to allocate all
+	 * requested pkts in the shared pool.  Need to add to the pool
+	 * after reclaim
+	 *
+	 * n = NRXBUFPOST + SDPCMD_RXBUFS;
+	 *
+	 * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
+	 * registry is not initialized or the registry is depleted.
+	 *
+	 * A BCME_NOMEM error only indicates that the requested number of packets
+	 * were not filled into the pool.
+	 */
+	n = 1;
+	if ((err = pktpool_init(osh, pktpool_shared,
+	                        &n, PKTBUFSZ, FALSE, lbuf_basic)) != BCME_OK) {
+		ASSERT(0);
+		goto error4;
+	}
+	pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+	n = 1;
+	if ((err = pktpool_init(osh, pktpool_shared_lfrag,
+	                        &n, PKTFRAGSZ, TRUE, lbuf_frag)) != BCME_OK) {
+		ASSERT(0);
+		goto error5;
+	}
+	pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
+#endif
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+	n = 1;
+	if ((err = pktpool_init(osh, pktpool_shared_rxlfrag,
+	                        &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag)) != BCME_OK) {
+		ASSERT(0);
+		goto error6;
+	}
+	pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
+#endif
+
+	pktpool_osh = osh;
+
+	return BCME_OK;
+
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+error6:
+#endif
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+	pktpool_deinit(osh, pktpool_shared_lfrag);
+error5:
+#endif
+
+#if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \
+	(defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED))
+	pktpool_deinit(osh, pktpool_shared);
+#endif
+
+error4:
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+	hnd_free(pktpool_shared_rxlfrag);
+	pktpool_shared_rxlfrag = (pktpool_t *)NULL;
+error3:
+#endif /* BCMRXFRAGPOOL */
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+	hnd_free(pktpool_shared_lfrag);
+	pktpool_shared_lfrag = (pktpool_t *)NULL;
+error2:
+#endif /* BCMFRAGPOOL */
+
+	hnd_free(pktpool_shared);
+	pktpool_shared = (pktpool_t *)NULL;
+
+error1:
+	pktpool_dettach(osh);
+error0:
+	return err;
+} /* hnd_pktpool_init */
+
+int
+hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
+{
+	return (pktpool_fill(pktpool_osh, pktpool, minimal));
+}
+
+/** refills pktpools after reclaim */
+void
+hnd_pktpool_refill(bool minimal)
+{
+	if (POOL_ENAB(pktpool_shared)) {
+		pktpool_fill(pktpool_osh, pktpool_shared, minimal);
+	}
+/* fragpool reclaim */
+#ifdef BCMFRAGPOOL
+	if (POOL_ENAB(pktpool_shared_lfrag)) {
+#if defined(SRMEM)
+		if (SRMEM_ENAB()) {
+			int maxlen = pktpool_maxlen(pktpool_shared);
+			int len = pktpool_len(pktpool_shared);
+
+			for (; len < maxlen; len++)	{
+				void *p;
+				if ((p = PKTSRGET(pktpool_plen(pktpool_shared))) == NULL)
+					break;
+				pktpool_add(pktpool_shared, p);
+			}
+		}
+#endif /* SRMEM */
+		pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
+	}
+#endif /* BCMFRAGPOOL */
+/* rx fragpool reclaim */
+#ifdef BCMRXFRAGPOOL
+	if (POOL_ENAB(pktpool_shared_rxlfrag)) {
+		pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
+	}
+#endif
+}
+#endif /* BCMPKTPOOL */
diff --git a/drivers/net/wireless/bcmdhd/hnd_pktq.c b/drivers/net/wireless/bcmdhd/hnd_pktq.c
new file mode 100644
index 0000000..132b321
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hnd_pktq.c
@@ -0,0 +1,973 @@
+/*
+ * HND generic pktq operation primitives
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_pktq.c 644628 2016-06-21 06:25:58Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <osl_ext.h>
+#include <bcmutils.h>
+#include <hnd_pktq.h>
+
+/* mutex macros for thread safe */
+#ifdef HND_PKTQ_THREAD_SAFE
+#define HND_PKTQ_MUTEX_CREATE(name, mutex)	osl_ext_mutex_create(name, mutex)
+#define HND_PKTQ_MUTEX_DELETE(mutex)		osl_ext_mutex_delete(mutex)
+#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec)	osl_ext_mutex_acquire(mutex, msec)
+#define HND_PKTQ_MUTEX_RELEASE(mutex)		osl_ext_mutex_release(mutex)
+#else
+#define HND_PKTQ_MUTEX_CREATE(name, mutex)	OSL_EXT_SUCCESS
+#define HND_PKTQ_MUTEX_DELETE(mutex)		OSL_EXT_SUCCESS
+#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec)	OSL_EXT_SUCCESS
+#define HND_PKTQ_MUTEX_RELEASE(mutex)		OSL_EXT_SUCCESS
+#endif /* */
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void * BCMFASTPATH
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);		/* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head)
+		PKTSETLINK(q->tail, p);
+	else
+		q->head = p;
+
+	q->tail = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);		/* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head == NULL)
+		q->tail = p;
+
+	PKTSETLINK(p, q->head);
+	q->head = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+/*
+ * Append spktq 'list' to the tail of pktq 'pq'
+ */
+void BCMFASTPATH
+pktq_append(struct pktq *pq, int prec, struct spktq *list)
+{
+	struct pktq_prec *q;
+	struct pktq_prec *list_q;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return;
+
+	list_q = &list->q[0];
+
+	/* empty list check */
+	if (list_q->head == NULL)
+		goto done;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(list_q->tail) == NULL);         /* terminated list */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head)
+		PKTSETLINK(q->tail, list_q->head);
+	else
+		q->head = list_q->head;
+
+	q->tail = list_q->tail;
+	q->len += list_q->len;
+	pq->len += list_q->len;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	list_q->head = NULL;
+	list_q->tail = NULL;
+	list_q->len = 0;
+	list->len = 0;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return;
+}
+
+/*
+ * Prepend spktq 'list' to the head of pktq 'pq'
+ */
+void BCMFASTPATH
+pktq_prepend(struct pktq *pq, int prec, struct spktq *list)
+{
+	struct pktq_prec *q;
+	struct pktq_prec *list_q;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return;
+
+	list_q = &list->q[0];
+
+	/* empty list check */
+	if (list_q->head == NULL)
+		goto done;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(list_q->tail) == NULL);         /* terminated list */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	/* set the tail packet of list to point at the former pq head */
+	PKTSETLINK(list_q->tail, q->head);
+	/* the new q head is the head of list */
+	q->head = list_q->head;
+
+	/* If the q tail was non-null, then it stays as is.
+	 * If the q tail was null, it is now the tail of list
+	 */
+	if (q->tail == NULL) {
+		q->tail = list_q->tail;
+	}
+
+	q->len += list_q->len;
+	pq->len += list_q->len;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	list_q->head = NULL;
+	list_q->tail = NULL;
+	list_q->len = 0;
+	list->len = 0;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return;
+}
+
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		goto done;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
+{
+	struct pktq_prec *q;
+	void *p = NULL;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if (prev_p == NULL)
+		goto done;
+
+	if ((p = PKTLINK(prev_p)) == NULL)
+		goto done;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(prev_p, PKTLINK(p));
+	PKTSETLINK(p, NULL);
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			break;
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+	if (p == NULL)
+		goto done;
+
+	if (prev == NULL) {
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		PKTSETLINK(prev, PKTLINK(p));
+		if (q->tail == p) {
+			q->tail = prev;
+		}
+	}
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		goto done;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+bool BCMFASTPATH
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+	bool ret = FALSE;
+	struct pktq_prec *q;
+	void *p = NULL;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return FALSE;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	/* Should this just assert pktbuf? */
+	if (!pktbuf)
+		goto done;
+
+	q = &pq->q[prec];
+
+	if (q->head == pktbuf) {
+		if ((q->head = PKTLINK(pktbuf)) == NULL)
+			q->tail = NULL;
+	} else {
+		for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+			;
+		if (p == NULL)
+			goto done;
+
+		PKTSETLINK(p, PKTLINK(pktbuf));
+		if (q->tail == pktbuf)
+			q->tail = p;
+	}
+
+	q->len--;
+	pq->len--;
+	PKTSETLINK(pktbuf, NULL);
+	ret = TRUE;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return FALSE;
+
+	return ret;
+}
+
+static void
+_pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx,
+              defer_free_pkt_fn_t defer, void *defer_ctx)
+{
+	struct pktq_prec wq;
+	struct pktq_prec *q;
+	void *p;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return;
+
+	/* move the prec queue aside to a work queue */
+	q = &pq->q[prec];
+
+	wq = *q;
+
+	q->head = NULL;
+	q->tail = NULL;
+	q->len = 0;
+
+	pq->len -= wq.len;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return;
+
+	/* start with the head of the work queue */
+	while ((p = wq.head) != NULL) {
+		/* unlink the current packet from the list */
+		wq.head = PKTLINK(p);
+		PKTSETLINK(p, NULL);
+		wq.len--;
+
+		/* call the filter function on current packet */
+		ASSERT(fltr != NULL);
+		switch ((*fltr)(fltr_ctx, p)) {
+		case PKT_FILTER_NOACTION:
+			/* put this packet back */
+			pktq_penq(pq, prec, p);
+			break;
+
+		case PKT_FILTER_DELETE:
+			/* delete this packet */
+			ASSERT(defer != NULL);
+			(*defer)(defer_ctx, p);
+			break;
+
+		case PKT_FILTER_REMOVE:
+			/* pkt already removed from list */
+			break;
+
+		default:
+			ASSERT(0);
+			break;
+		}
+	}
+
+	ASSERT(wq.len == 0);
+}
+
+void
+pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx,
+	defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx)
+{
+	_pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx);
+
+	ASSERT(flush != NULL);
+	(*flush)(flush_ctx);
+}
+
+void
+pktq_filter(struct pktq *pq, pktq_filter_t fltr, void* fltr_ctx,
+	defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx)
+{
+	bool filter = FALSE;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return;
+
+	/* Optimize if pktq len = 0, just return.
+	 * pktq len of 0 means pktq's prec q's are all empty.
+	 */
+	if (pq->len > 0) {
+		filter = TRUE;
+	}
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return;
+
+	if (filter) {
+		int prec;
+
+		PKTQ_PREC_ITER(pq, prec) {
+			_pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx);
+		}
+
+		ASSERT(flush != NULL);
+		(*flush)(flush_ctx);
+	}
+}
+
+bool
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+	int prec;
+
+	if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS)
+		return FALSE;
+
+	ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+	/* pq is variable size; only zero out what's requested */
+	bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+	pq->num_prec = (uint16)num_prec;
+
+	pq->max = (uint16)max_len;
+
+	for (prec = 0; prec < num_prec; prec++)
+		pq->q[prec].max = pq->max;
+
+	return TRUE;
+}
+
+bool
+pktq_deinit(struct pktq *pq)
+{
+	BCM_REFERENCE(pq);
+	if (HND_PKTQ_MUTEX_DELETE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return FALSE;
+
+	return TRUE;
+}
+
+void
+pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
+{
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return;
+
+	if (prec < pq->num_prec)
+		pq->q[prec].max = (uint16)max_len;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return;
+}
+
+void * BCMFASTPATH
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p = NULL;
+	int prec;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	if (pq->len == 0)
+		goto done;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		goto done;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p = NULL, *prev;
+	int prec;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	if (pq->len == 0)
+		goto done;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		goto done;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+	int prec;
+	void *p = NULL;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	if (pq->len == 0)
+		goto done;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	p = pq->q[prec].head;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+	int prec;
+	void *p = NULL;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	if (pq->len == 0)
+		goto done;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	p = pq->q[prec].tail;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir)
+{
+	void *p;
+
+	/* no need for a mutex protection! */
+
+	/* start with the head of the list */
+	while ((p = pktq_pdeq(pq, prec)) != NULL) {
+
+		/* delete this packet */
+		PKTFREE(osh, p, dir);
+	}
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir)
+{
+	bool flush = FALSE;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return;
+
+	/* Optimize flush, if pktq len = 0, just return.
+	 * pktq len of 0 means pktq's prec q's are all empty.
+	 */
+	if (pq->len > 0) {
+		flush = TRUE;
+	}
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return;
+
+	if (flush) {
+		int prec;
+
+		PKTQ_PREC_ITER(pq, prec) {
+			pktq_pflush(osh, pq, prec, dir);
+		}
+	}
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+	int prec, len;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return 0;
+
+	len = 0;
+
+	for (prec = 0; prec <= pq->hi_prec; prec++)
+		if (prec_bmp & (1 << prec))
+			len += pq->q[prec].len;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return 0;
+
+	return len;
+}
+
+/* Priority peek from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p = NULL;
+	int prec;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	if (pq->len == 0)
+		goto done;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+		if (prec-- == 0)
+			goto done;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		goto done;
+
+	if (prec_out)
+		*prec_out = prec;
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+/* Priority dequeue from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p = NULL;
+	int prec;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	if (pq->len == 0)
+		goto done;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
+		if (prec-- == 0)
+			goto done;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		goto done;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	// terence 20150308: fix for non-null pointer of skb->prev sent from ndo_start_xmit
+	if (q->len == 0) {
+		q->head = NULL;
+		q->tail = NULL;
+	}
+
+	if (prec_out)
+		*prec_out = prec;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+done:
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return NULL;
+
+	return p;
+}
+
+#ifdef HND_PKTQ_THREAD_SAFE
+int
+pktq_pavail(struct pktq *pq, int prec)
+{
+	int ret;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return 0;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	ret = pq->q[prec].max - pq->q[prec].len;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return 0;
+
+	return ret;
+}
+
+bool
+pktq_pfull(struct pktq *pq, int prec)
+{
+	bool ret;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return FALSE;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	ret = pq->q[prec].len >= pq->q[prec].max;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return FALSE;
+
+	return ret;
+}
+
+int
+pktq_avail(struct pktq *pq)
+{
+	int ret;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return 0;
+
+	ret = pq->max - pq->len;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return 0;
+
+	return ret;
+}
+
+bool
+pktq_full(struct pktq *pq)
+{
+	bool ret;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+		return FALSE;
+
+	ret = pq->len >= pq->max;
+
+	/* protect shared resource */
+	if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+		return FALSE;
+
+	return ret;
+}
+#endif	/* HND_PKTQ_THREAD_SAFE */
diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c
new file mode 100644
index 0000000..c76a943
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hndpmu.c
@@ -0,0 +1,382 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hndpmu.c 657872 2016-09-02 22:17:34Z $
+ */
+
+
+/*
+ * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs.
+ * However, in the context of this file the baseband ('BB') PLL/FLL is referred to.
+ *
+ * Throughout this code, the prefixes 'pmu0_', 'pmu1_' and 'pmu2_' are used.
+ * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012)
+ * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports
+ * fractional frequency generation. pmu2_ does not support fractional frequency generation.
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+#if defined(BCMULP)
+#include <ulp.h>
+#endif /* defined(BCMULP) */
+#include <sbgci.h>
+#ifdef EVENT_LOG_COMPILE
+#include <event_log.h>
+#endif
+#include <sbgci.h>
+
+#define	PMU_ERROR(args)
+
+#define	PMU_MSG(args)
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define	PMU_NONE(args)
+
+/** contains resource bit positions for a specific chip */
+struct rsc_per_chip_s {
+	uint8 ht_avail;
+	uint8 macphy_clkavail;
+	uint8 ht_start;
+	uint8 otp_pu;
+};
+
+typedef struct rsc_per_chip_s rsc_per_chip_t;
+
+
+/* SDIO Pad drive strength to select value mappings.
+ * The last strength value in each table must be 0 (the tri-state value).
+ */
+typedef struct {
+	uint8 strength;			/* Pad Drive Strength in mA */
+	uint8 sel;			/* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+	{4, 0x2},
+	{2, 0x3},
+	{1, 0x0},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+	{12, 0x7},
+	{10, 0x6},
+	{8, 0x5},
+	{6, 0x4},
+	{4, 0x2},
+	{2, 0x1},
+	{0, 0x0} };
+
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
+	{32, 0x7},
+	{26, 0x6},
+	{22, 0x5},
+	{16, 0x4},
+	{12, 0x3},
+	{8, 0x2},
+	{4, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = {
+	{32, 0x6},
+	{26, 0x7},
+	{22, 0x4},
+	{16, 0x5},
+	{12, 0x2},
+	{8, 0x3},
+	{4, 0x0},
+	{0, 0x1} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = {
+	{6, 0x7},
+	{5, 0x6},
+	{4, 0x5},
+	{3, 0x4},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
+
+/** SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab6_1v8[] = {
+	{3, 0x3},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+
+/**
+ * SDIO Drive Strength to sel value table for 43143 PMU Rev 17, see Confluence 43143 Toplevel
+ * architecture page, section 'PMU Chip Control 1 Register definition', click link to picture
+ * BCM43143_sel_sdio_signals.jpg. Valid after PMU Chip Control 0 Register, bit31 (override) has
+ * been written '1'.
+ */
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_3v3[] = {
+	/* note: for 14, 10, 6 and 2mA hw timing is not met according to rtl team */
+	{16, 0x7},
+	{12, 0x5},
+	{8,  0x3},
+	{4,  0x1} }; /* note: 43143 does not support tristate */
+
+#else
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_1v8[] = {
+	/* note: for 7, 5, 3 and 1mA hw timing is not met according to rtl team */
+	{8, 0x7},
+	{6, 0x5},
+	{4,  0x3},
+	{2,  0x1} }; /* note: 43143 does not support tristate */
+
+#endif /* BCM_SDIO_VDDIO */
+
+#define SDIOD_DRVSTR_KEY(chip, pmu)	(((chip) << 16) | (pmu))
+
+/**
+ * Balance between stable SDIO operation and power consumption is achieved using this function.
+ * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this
+ * function should read the VDDIO itself to select the correct table. For now it has been solved
+ * with the 'BCM_SDIO_VDDIO' preprocessor constant.
+ *
+ * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if
+ *		    hardware supports this), if no hw support drive strength is not programmed.
+ */
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+	sdiod_drive_str_t *str_tab = NULL;
+	uint32 str_mask = 0;	/* only alter desired bits in PMU chipcontrol 1 register */
+	uint32 str_shift = 0;
+	uint32 str_ovr_pmuctl = PMU_CHIPCTL0; /* PMU chipcontrol register containing override bit */
+	uint32 str_ovr_pmuval = 0;            /* position of bit within this register */
+	pmuregs_t *pmu;
+	uint origidx;
+
+	if (!(sih->cccaps & CC_CAP_PMU)) {
+		return;
+	}
+	BCM_REFERENCE(sdiod_drive_strength_tab1);
+	BCM_REFERENCE(sdiod_drive_strength_tab2);
+	/* Remember original core before switch to chipc/pmu */
+	origidx = si_coreidx(sih);
+	if (AOB_ENAB(sih)) {
+		pmu = si_setcore(sih, PMU_CORE_ID, 0);
+	} else {
+		pmu = si_setcoreidx(sih, SI_CC_IDX);
+	}
+	ASSERT(pmu != NULL);
+
+	switch (SDIOD_DRVSTR_KEY(CHIPID(sih->chip), PMUREV(sih->pmurev))) {
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11):
+		if (PMUREV(sih->pmurev) == 8) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3;
+		} else if (PMUREV(sih->pmurev) == 11) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		}
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab6_1v8;
+		str_mask = 0x00001800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+		if (drivestrength >=  ARRAYLAST(sdiod_drive_strength_tab7_3v3)->strength) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_3v3;
+		}
+#else
+		if (drivestrength >=  ARRAYLAST(sdiod_drive_strength_tab7_1v8)->strength) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_1v8;
+		}
+#endif /* BCM_SDIO_VDDIO */
+		str_mask = 0x00000007;
+		str_ovr_pmuval = PMU43143_CC0_SDIO_DRSTR_OVR;
+		break;
+	default:
+		PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+		         bcm_chipname(
+			 CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), PMUREV(sih->pmurev)));
+		break;
+	}
+
+	if (str_tab != NULL) {
+		uint32 cc_data_temp;
+		int i;
+
+		/* Pick the lowest available drive strength equal or greater than the
+		 * requested strength.	Drive strength of 0 requests tri-state.
+		 */
+		for (i = 0; drivestrength < str_tab[i].strength; i++)
+			;
+
+		if (i > 0 && drivestrength > str_tab[i].strength)
+			i--;
+
+		W_REG(osh, &pmu->chipcontrol_addr, PMU_CHIPCTL1);
+		cc_data_temp = R_REG(osh, &pmu->chipcontrol_data);
+		cc_data_temp &= ~str_mask;
+		cc_data_temp |= str_tab[i].sel << str_shift;
+		W_REG(osh, &pmu->chipcontrol_data, cc_data_temp);
+		if (str_ovr_pmuval) { /* enables the selected drive strength */
+			W_REG(osh,  &pmu->chipcontrol_addr, str_ovr_pmuctl);
+			OR_REG(osh, &pmu->chipcontrol_data, str_ovr_pmuval);
+		}
+		PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+		         drivestrength, str_tab[i].strength));
+	}
+
+	/* Return to original core */
+	si_setcoreidx(sih, origidx);
+} /* si_sdiod_drive_strength_init */
+
+
+#if defined(BCMULP)
+int
+si_pmu_ulp_register(si_t *sih)
+{
+	return ulp_p1_module_register(ULP_MODULE_ID_PMU, &ulp_pmu_ctx, (void *)sih);
+}
+
+static uint
+si_pmu_ulp_get_retention_size_cb(void *handle, ulp_ext_info_t *einfo)
+{
+	ULP_DBG(("%s: sz: %d\n", __FUNCTION__, sizeof(si_pmu_ulp_cr_dat_t)));
+	return sizeof(si_pmu_ulp_cr_dat_t);
+}
+
+static int
+si_pmu_ulp_enter_cb(void *handle, ulp_ext_info_t *einfo, uint8 *cache_data)
+{
+	si_pmu_ulp_cr_dat_t crinfo = {0};
+	crinfo.ilpcycles_per_sec = ilpcycles_per_sec;
+	ULP_DBG(("%s: ilpcycles_per_sec: %x\n", __FUNCTION__, ilpcycles_per_sec));
+	memcpy(cache_data, (void*)&crinfo, sizeof(crinfo));
+	return BCME_OK;
+}
+
+static int
+si_pmu_ulp_exit_cb(void *handle, uint8 *cache_data,
+	uint8 *p2_cache_data)
+{
+	si_pmu_ulp_cr_dat_t *crinfo = (si_pmu_ulp_cr_dat_t *)cache_data;
+
+	ilpcycles_per_sec = crinfo->ilpcycles_per_sec;
+	ULP_DBG(("%s: ilpcycles_per_sec: %x, cache_data: %p\n", __FUNCTION__,
+		ilpcycles_per_sec, cache_data));
+	return BCME_OK;
+}
+
+void
+si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period)
+{
+	pmuregs_t *pmu;
+	pmu = si_setcoreidx(sih, si_findcoreidx(sih, PMU_CORE_ID, 0));
+	W_REG(osh, &pmu->ILPPeriod, ilp_period);
+}
+#endif /* defined(BCMULP) */
+
+
+
+void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask)
+{
+	pmuregs_t *pmu;
+	uint origidx;
+
+	/* Remember original core before switch to chipc/pmu */
+	origidx = si_coreidx(sih);
+	if (AOB_ENAB(sih)) {
+		pmu = si_setcore(sih, PMU_CORE_ID, 0);
+	}
+	else {
+		pmu = si_setcoreidx(sih, SI_CC_IDX);
+	}
+	ASSERT(pmu != NULL);
+
+	W_REG(osh, &pmu->min_res_mask, min_res_mask);
+	OSL_DELAY(100);
+
+	/* Return to original core */
+	si_setcoreidx(sih, origidx);
+}
+
+bool
+si_pmu_cap_fast_lpo(si_t *sih)
+{
+	return (PMU_REG(sih, core_cap_ext, 0, 0) & PCAP_EXT_USE_MUXED_ILP_CLK_MASK) ? TRUE : FALSE;
+}
+
+int
+si_pmu_fast_lpo_disable(si_t *sih)
+{
+	if (!si_pmu_cap_fast_lpo(sih)) {
+		PMU_ERROR(("%s: No Fast LPO capability\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	PMU_REG(sih, pmucontrol_ext,
+		PCTL_EXT_FASTLPO_ENAB |
+		PCTL_EXT_FASTLPO_SWENAB |
+		PCTL_EXT_FASTLPO_PCIE_SWENAB,
+		0);
+	OSL_DELAY(1000);
+	return BCME_OK;
+}
diff --git a/drivers/net/wireless/bcmdhd/include/802.11.h b/drivers/net/wireless/bcmdhd/include/802.11.h
new file mode 100644
index 0000000..443bdfc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/802.11.h
@@ -0,0 +1,5126 @@
+/*
+ * Fundamental types and constants relating to 802.11
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.11.h 700693 2017-05-20 20:29:07Z $
+ */
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <ethernet.h>
+#endif
+
+#include <wpa.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US			1024	/* 802.11 Time Unit is 1024 microseconds */
+
+/* Generic 802.11 frame constants */
+#define DOT11_A3_HDR_LEN		24	/* d11 header length with A3 */
+#define DOT11_A4_HDR_LEN		30	/* d11 header length with A4 */
+#define DOT11_MAC_HDR_LEN		DOT11_A3_HDR_LEN	/* MAC header length */
+#define DOT11_FCS_LEN			4	/* d11 FCS length */
+#define DOT11_ICV_LEN			4	/* d11 ICV length */
+#define DOT11_ICV_AES_LEN		8	/* d11 ICV/AES length */
+#define DOT11_QOS_LEN			2	/* d11 QoS length */
+#define DOT11_HTC_LEN			4	/* d11 HT Control field length */
+
+#define DOT11_KEY_INDEX_SHIFT		6	/* d11 key index shift */
+#define DOT11_IV_LEN			4	/* d11 IV length */
+#define DOT11_IV_TKIP_LEN		8	/* d11 IV TKIP length */
+#define DOT11_IV_AES_OCB_LEN		4	/* d11 IV/AES/OCB length */
+#define DOT11_IV_AES_CCM_LEN		8	/* d11 IV/AES/CCM length */
+#define DOT11_IV_MAX_LEN		8	/* maximum iv len for any encryption */
+
+/* Includes MIC */
+#define DOT11_MAX_MPDU_BODY_LEN		2304	/* max MPDU body length */
+/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */
+#define DOT11_MAX_MPDU_LEN		(DOT11_A4_HDR_LEN + \
+					 DOT11_QOS_LEN + \
+					 DOT11_IV_AES_CCM_LEN + \
+					 DOT11_MAX_MPDU_BODY_LEN + \
+					 DOT11_ICV_LEN + \
+					 DOT11_FCS_LEN)	/* d11 max MPDU length */
+
+#define DOT11_MAX_SSID_LEN		32	/* d11 max ssid length */
+
+/* dot11RTSThreshold */
+#define DOT11_DEFAULT_RTS_LEN		2347	/* d11 default RTS length */
+#define DOT11_MAX_RTS_LEN		2347	/* d11 max RTS length */
+
+/* dot11FragmentationThreshold */
+#define DOT11_MIN_FRAG_LEN		256	/* d11 min fragmentation length */
+#define DOT11_MAX_FRAG_LEN		2346	/* Max frag is also limited by aMPDUMaxLength
+						* of the attached PHY
+						*/
+#define DOT11_DEFAULT_FRAG_LEN		2346	/* d11 default fragmentation length */
+
+/* dot11BeaconPeriod */
+#define DOT11_MIN_BEACON_PERIOD		1	/* d11 min beacon period */
+#define DOT11_MAX_BEACON_PERIOD		0xFFFF	/* d11 max beacon period */
+
+/* dot11DTIMPeriod */
+#define DOT11_MIN_DTIM_PERIOD		1	/* d11 min DTIM period */
+#define DOT11_MAX_DTIM_PERIOD		0xFF	/* d11 max DTIM period */
+
+/** 802.2 LLC/SNAP header used by 802.11 per 802.1H */
+#define DOT11_LLC_SNAP_HDR_LEN		8	/* d11 LLC/SNAP header length */
+/* minimum LLC header length; DSAP, SSAP, 8 bit Control (unnumbered) */
+#define DOT11_LLC_HDR_LEN_MIN		3
+#define DOT11_OUI_LEN			3	/* d11 OUI length */
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[DOT11_OUI_LEN];		/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	type;				/* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* RFC1042 header used by 802.11 per 802.1H */
+#define RFC1042_HDR_LEN	(ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)	/* RCF1042 header length */
+
+/* Generic 802.11 MAC header */
+/**
+ * N.B.: This struct reflects the full 4 address 802.11 MAC header.
+ *		 The fields are defined such that the shorter 1, 2, and 3
+ *		 address headers just use the first k fields.
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	a1;		/* address 1 */
+	struct ether_addr	a2;		/* address 2 */
+	struct ether_addr	a3;		/* address 3 */
+	uint16			seq;		/* sequence control */
+	struct ether_addr	a4;		/* address 4 */
+} BWL_POST_PACKED_STRUCT;
+
+/* Control frames */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_RTS_LEN		16		/* d11 RTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CTS_LEN		10		/* d11 CTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_ACK_LEN		10		/* d11 ACK frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* AID */
+	struct ether_addr	bssid;		/* receiver address, STA in AP */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_PS_POLL_LEN	16		/* d11 PS poll frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	bssid;		/* transmitter address, STA in AP */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CS_END_LEN	16		/* d11 CF-END frame length */
+
+/**
+ * RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling
+ *  category+OUI+vendor specific content ( this can be variable)
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+	uint8	category;
+	uint8	OUI[3];
+	uint8	type;
+	uint8	subtype;
+	uint8	data[1040];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+/** generic vendor specific action frame with variable length */
+BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
+	uint8	category;
+	uint8	OUI[3];
+	uint8	type;
+	uint8	subtype;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+
+#define DOT11_ACTION_VS_HDR_LEN	6
+
+#define BCM_ACTION_OUI_BYTE0	0x00
+#define BCM_ACTION_OUI_BYTE1	0x90
+#define BCM_ACTION_OUI_BYTE2	0x4c
+
+/* BA/BAR Control parameters */
+#define DOT11_BA_CTL_POLICY_NORMAL	0x0000	/* normal ack */
+#define DOT11_BA_CTL_POLICY_NOACK	0x0001	/* no ack */
+#define DOT11_BA_CTL_POLICY_MASK	0x0001	/* ack policy mask */
+
+#define DOT11_BA_CTL_MTID		0x0002	/* multi tid BA */
+#define DOT11_BA_CTL_COMPRESSED		0x0004	/* compressed bitmap */
+
+#define DOT11_BA_CTL_NUMMSDU_MASK	0x0FC0	/* num msdu in bitmap mask */
+#define DOT11_BA_CTL_NUMMSDU_SHIFT	6	/* num msdu in bitmap shift */
+
+#define DOT11_BA_CTL_TID_MASK		0xF000	/* tid mask */
+#define DOT11_BA_CTL_TID_SHIFT		12	/* tid shift */
+
+/** control frame header (BA/BAR) */
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN	16		/* control frame hdr len */
+
+/** BAR frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+	uint16			bar_control;	/* BAR Control */
+	uint16			seqnum;		/* Starting Sequence control */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN		4		/* BAR frame payload length */
+
+#define DOT11_BA_BITMAP_LEN	128		/* bitmap length */
+#define DOT11_BA_CMP_BITMAP_LEN	8		/* compressed bitmap length */
+/** BA frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+	uint16			ba_control;	/* BA Control */
+	uint16			seqnum;		/* Starting Sequence control */
+	uint8			bitmap[DOT11_BA_BITMAP_LEN];	/* Block Ack Bitmap */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN		4		/* BA frame payload len (wo bitmap) */
+
+/** Management frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	da;		/* receiver address */
+	struct ether_addr	sa;		/* transmitter address */
+	struct ether_addr	bssid;		/* BSS ID */
+	uint16			seq;		/* sequence control */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_management_header dot11_management_header_t;
+#define	DOT11_MGMT_HDR_LEN	24		/* d11 management header length */
+
+/* Management frame payloads */
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+	uint32			timestamp[2];
+	uint16			beacon_interval;
+	uint16			capability;
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_BCN_PRB_LEN	12		/* 802.11 beacon/probe frame fixed length */
+#define	DOT11_BCN_PRB_FIXED_LEN	12		/* 802.11 beacon/probe frame fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+	uint16			alg;		/* algorithm */
+	uint16			seq;		/* sequence control */
+	uint16			status;		/* status code */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN		6	/* length of auth frame without challenge IE */
+#define DOT11_AUTH_SEQ_STATUS_LEN	4	/* length of auth frame without challenge IE and
+						 * without algorithm
+						 */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+	uint16			capability;	/* capability information */
+	uint16			listen;		/* listen interval */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN	4	/* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+	uint16			capability;	/* capability information */
+	uint16			listen;		/* listen interval */
+	struct ether_addr	ap;		/* Current AP address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN	10	/* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+	uint16			capability;	/* capability information */
+	uint16			status;		/* status code */
+	uint16			aid;		/* association ID */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN	6	/* length of assoc resp frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+	uint8	category;
+	uint8	action;
+	uint8	token;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN	3	/* d11 action measurement header length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+	uint8	category;
+	uint8	action;
+	uint8	ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+	uint8	category;
+	uint8	action;
+	uint8	control;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query {
+	uint8	category;
+	uint8	action;
+	uint16	id;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode {
+	uint8	category;
+	uint8	action;
+	uint8	mode;
+} BWL_POST_PACKED_STRUCT;
+
+/* These lengths assume 64 MU groups, as specified in 802.11ac-2013 */
+#define DOT11_ACTION_GID_MEMBERSHIP_LEN  8    /* bytes */
+#define DOT11_ACTION_GID_USER_POS_LEN   16    /* bytes */
+BWL_PRE_PACKED_STRUCT struct dot11_action_group_id {
+	uint8   category;
+	uint8   action;
+	uint8   membership_status[DOT11_ACTION_GID_MEMBERSHIP_LEN];
+	uint8   user_position[DOT11_ACTION_GID_USER_POS_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE	1
+#define SM_PWRSAVE_MODE		2
+
+/* ************* 802.11h related definitions. ************* */
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+	uint8 id;
+	uint8 len;
+	uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+	int8 min;
+	int8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+	uint8 id;
+	uint8 len;
+	uint8 tx_pwr;
+	uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN	2 	/* length of IE data, not including 2 byte header */
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+	uint8 id;
+	uint8 len;
+	uint8 first_channel;
+	uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+/**
+ * Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband
+ * offset for 40MHz operation.  The possible 3 values are:
+ * 1 = above control channel
+ * 3 = below control channel
+ * 0 = no extension channel
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+	uint8	id;		/* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */
+	uint8	len;		/* IE length */
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	type;           /* type indicates what follows */
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN	5
+#define BRCM_EXTCH_IE_TYPE	53	/* 802.11n ID not yet assigned */
+#define DOT11_EXTCH_IE_LEN	1
+#define DOT11_EXT_CH_MASK	0x03	/* extension channel mask */
+#define DOT11_EXT_CH_UPPER	0x01	/* ext. ch. on upper sb */
+#define DOT11_EXT_CH_LOWER	0x03	/* ext. ch. on lower sb */
+#define DOT11_EXT_CH_NONE	0x00	/* no extension ch.  */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+	uint8	category;
+	uint8	action;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_frmhdr dot11_action_frmhdr_t;
+
+/* Action Field length */
+#define DOT11_ACTION_CATEGORY_LEN	1
+#define DOT11_ACTION_ACTION_LEN		1
+#define DOT11_ACTION_DIALOG_TOKEN_LEN	1
+#define DOT11_ACTION_CAPABILITY_LEN	2
+#define DOT11_ACTION_STATUS_CODE_LEN	2
+#define DOT11_ACTION_REASON_CODE_LEN	2
+#define DOT11_ACTION_TARGET_CH_LEN	1
+#define DOT11_ACTION_OPER_CLASS_LEN	1
+
+#define DOT11_ACTION_FRMHDR_LEN	2
+
+/** CSA IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+	uint8 id;	/* id DOT11_MNG_CHANNEL_SWITCH_ID */
+	uint8 len;	/* length of IE */
+	uint8 mode;	/* mode 0 or 1 */
+	uint8 channel;	/* channel switch to */
+	uint8 count;	/* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN	3	/* length of IE data, not including 2 byte header */
+/* CSA mode - 802.11h-2003 $7.3.2.20 */
+#define DOT11_CSA_MODE_ADVISORY		0	/* no DOT11_CSA_MODE_NO_TX restriction imposed */
+#define DOT11_CSA_MODE_NO_TX		1	/* no transmission upon receiving CSA frame. */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+	uint8	category;
+	uint8	action;
+	dot11_chan_switch_ie_t chan_switch_ie;	/* for switch IE */
+	dot11_brcm_extch_ie_t extch_ie;		/* extension channel offset */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+	uint8 mode;	/* mode 0 or 1 */
+	uint8 reg;	/* regulatory class */
+	uint8 channel;	/* channel switch to */
+	uint8 count;	/* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+
+/** 11n Extended Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+	uint8 id;	/* id DOT11_MNG_EXT_CSA_ID */
+	uint8 len;	/* length of IE */
+	struct dot11_csa_body b;	/* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN	4	/* length of extended channel switch IE body */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	dot11_ext_csa_ie_t chan_switch_ie;	/* for switch IE */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	struct dot11_csa_body b;	/* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+
+/**  Wide Bandwidth Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	uint8 channel_width;			/* new channel width */
+	uint8 center_frequency_segment_0;	/* center frequency segment 0 */
+	uint8 center_frequency_segment_1;	/* center frequency segment 1 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t;
+
+#define DOT11_WIDE_BW_SWITCH_IE_LEN     3       /* length of IE data, not including 2 byte header */
+
+/** Channel Switch Wrapper IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t;
+
+typedef enum wide_bw_chan_width {
+	WIDE_BW_CHAN_WIDTH_20	= 0,
+	WIDE_BW_CHAN_WIDTH_40	= 1,
+	WIDE_BW_CHAN_WIDTH_80	= 2,
+	WIDE_BW_CHAN_WIDTH_160	= 3,
+	WIDE_BW_CHAN_WIDTH_80_80	= 4
+} wide_bw_chan_width_t;
+
+/**  Wide Bandwidth Channel IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_ID */
+	uint8 len;				/* length of IE */
+	uint8 channel_width;			/* channel width */
+	uint8 center_frequency_segment_0;	/* center frequency segment 0 */
+	uint8 center_frequency_segment_1;	/* center frequency segment 1 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wide_bw_channel dot11_wide_bw_chan_ie_t;
+
+#define DOT11_WIDE_BW_IE_LEN     3       /* length of IE data, not including 2 byte header */
+/** VHT Transmit Power Envelope IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	uint8 transmit_power_info;
+	uint8 local_max_transmit_power_20;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t;
+
+/* vht transmit power envelope IE length depends on channel width */
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ	1
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ	2
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ	3
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+	uint8	id;
+	uint8	len;
+	uint8	info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN	1	/* length of OBSS Coexistence INFO IE */
+
+#define	DOT11_OBSS_COEX_INFO_REQ		0x01
+#define	DOT11_OBSS_COEX_40MHZ_INTOLERANT	0x02
+#define	DOT11_OBSS_COEX_20MHZ_WIDTH_REQ	0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+	uint8	id;
+	uint8	len;
+	uint8	regclass;
+	uint8	chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN	1	/* fixed length of regclass */
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+	uint8 id;
+	uint8 len;
+	uint8 cap[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+
+#define DOT11_EXTCAP_LEN_COEX	1
+#define DOT11_EXTCAP_LEN_BT	3
+#define DOT11_EXTCAP_LEN_IW	4
+#define DOT11_EXTCAP_LEN_SI	6
+
+#define DOT11_EXTCAP_LEN_TDLS	5
+#define DOT11_11AC_EXTCAP_LEN_TDLS	8
+
+#define DOT11_EXTCAP_LEN_FMS			2
+#define DOT11_EXTCAP_LEN_PROXY_ARP		2
+#define DOT11_EXTCAP_LEN_TFS			3
+#define DOT11_EXTCAP_LEN_WNM_SLEEP		3
+#define DOT11_EXTCAP_LEN_TIMBC			3
+#define DOT11_EXTCAP_LEN_BSSTRANS		3
+#define DOT11_EXTCAP_LEN_DMS			4
+#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION	6
+#define DOT11_EXTCAP_LEN_TDLS_WBW		8
+#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION	8
+
+/* TDLS Capabilities */
+#define DOT11_TDLS_CAP_TDLS			37		/* TDLS support */
+#define DOT11_TDLS_CAP_PU_BUFFER_STA	28		/* TDLS Peer U-APSD buffer STA support */
+#define DOT11_TDLS_CAP_PEER_PSM		20		/* TDLS Peer PSM support */
+#define DOT11_TDLS_CAP_CH_SW			30		/* TDLS Channel switch */
+#define DOT11_TDLS_CAP_PROH			38		/* TDLS prohibited */
+#define DOT11_TDLS_CAP_CH_SW_PROH		39		/* TDLS Channel switch prohibited */
+#define DOT11_TDLS_CAP_TDLS_WIDER_BW	61	/* TDLS Wider Band-Width */
+
+#define TDLS_CAP_MAX_BIT		39		/* TDLS max bit defined in ext cap */
+
+/* 802.11h/802.11k Measurement Request/Report IEs */
+/* Measurement Type field */
+#define DOT11_MEASURE_TYPE_BASIC 	0   /* d11 measurement basic type */
+#define DOT11_MEASURE_TYPE_CCA 		1   /* d11 measurement CCA type */
+#define DOT11_MEASURE_TYPE_RPI		2   /* d11 measurement RPI type */
+#define DOT11_MEASURE_TYPE_CHLOAD	3   /* d11 measurement Channel Load type */
+#define DOT11_MEASURE_TYPE_NOISE	4   /* d11 measurement Noise Histogram type */
+#define DOT11_MEASURE_TYPE_BEACON	5   /* d11 measurement Beacon type */
+#define DOT11_MEASURE_TYPE_FRAME	6   /* d11 measurement Frame type */
+#define DOT11_MEASURE_TYPE_STAT		7   /* d11 measurement STA Statistics type */
+#define DOT11_MEASURE_TYPE_LCI		8   /* d11 measurement LCI type */
+#define DOT11_MEASURE_TYPE_TXSTREAM	9   /* d11 measurement TX Stream type */
+#define DOT11_MEASURE_TYPE_MCDIAGS	10  /* d11 measurement multicast diagnostics */
+#define DOT11_MEASURE_TYPE_CIVICLOC	11  /* d11 measurement location civic */
+#define DOT11_MEASURE_TYPE_LOC_ID	12  /* d11 measurement location identifier */
+#define DOT11_MEASURE_TYPE_DIRCHANQ	13  /* d11 measurement dir channel quality */
+#define DOT11_MEASURE_TYPE_DIRMEAS	14  /* d11 measurement directional */
+#define DOT11_MEASURE_TYPE_DIRSTATS	15  /* d11 measurement directional stats */
+#define DOT11_MEASURE_TYPE_FTMRANGE	16  /* d11 measurement Fine Timing */
+#define DOT11_MEASURE_TYPE_PAUSE	255	/* d11 measurement pause type */
+
+/* Measurement Request Modes */
+#define DOT11_MEASURE_MODE_PARALLEL 	(1<<0)	/* d11 measurement parallel */
+#define DOT11_MEASURE_MODE_ENABLE 	(1<<1)	/* d11 measurement enable */
+#define DOT11_MEASURE_MODE_REQUEST	(1<<2)	/* d11 measurement request */
+#define DOT11_MEASURE_MODE_REPORT 	(1<<3)	/* d11 measurement report */
+#define DOT11_MEASURE_MODE_DUR 	(1<<4)	/* d11 measurement dur mandatory */
+/* Measurement Report Modes */
+#define DOT11_MEASURE_MODE_LATE 	(1<<0)	/* d11 measurement late */
+#define DOT11_MEASURE_MODE_INCAPABLE	(1<<1)	/* d11 measurement incapable */
+#define DOT11_MEASURE_MODE_REFUSED	(1<<2)	/* d11 measurement refuse */
+/* Basic Measurement Map bits */
+#define DOT11_MEASURE_BASIC_MAP_BSS	((uint8)(1<<0))	/* d11 measurement basic map BSS */
+#define DOT11_MEASURE_BASIC_MAP_OFDM	((uint8)(1<<1))	/* d11 measurement map OFDM */
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN	((uint8)(1<<2))	/* d11 measurement map unknown */
+#define DOT11_MEASURE_BASIC_MAP_RADAR	((uint8)(1<<3))	/* d11 measurement map radar */
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS	((uint8)(1<<4))	/* d11 measurement map unmeasuremnt */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14	/* d11 measurement request IE length */
+/* length of Measure Request IE data not including variable len */
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3	/* d11 measurement request IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req_loc {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	BWL_PRE_PACKED_STRUCT union
+	{
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 subject;
+			uint8 data[1];
+		} BWL_POST_PACKED_STRUCT lci;
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 subject;
+			uint8 type;  /* type of civic location */
+			uint8 siu;   /* service interval units */
+			uint16 si; /* service interval */
+			uint8 data[1];
+		} BWL_POST_PACKED_STRUCT civic;
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 subject;
+			uint8 siu;   /* service interval units */
+			uint16 si; /* service interval */
+			uint8 data[1];
+		} BWL_POST_PACKED_STRUCT locid;
+		BWL_PRE_PACKED_STRUCT struct {
+			uint16 max_init_delay;		/* maximum random initial delay */
+			uint8 min_ap_count;
+			uint8 data[1];
+		} BWL_POST_PACKED_STRUCT ftm_range;
+	} BWL_POST_PACKED_STRUCT req;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req_loc dot11_meas_req_loc_t;
+#define DOT11_MNG_IE_MREQ_MIN_LEN           4	/* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREQ_LCI_FIXED_LEN     4	/* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREQ_CIVIC_FIXED_LEN   8	/* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREQ_FRNG_FIXED_LEN    6	/* d11 measurement report IE length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_lci_subelement {
+	uint8 subelement;
+	uint8 length;
+	uint8 lci_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lci_subelement dot11_lci_subelement_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_colocated_bssid_list_se {
+	uint8 sub_id;
+	uint8 length;
+	uint8 max_bssid_ind; /* MaxBSSID Indicator */
+	struct ether_addr bssid[1]; /* variable */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_colocated_bssid_list_se dot11_colocated_bssid_list_se_t;
+#define DOT11_LCI_COLOCATED_BSSID_LIST_FIXED_LEN     3
+#define DOT11_LCI_COLOCATED_BSSID_SUBELEM_ID         7
+
+BWL_PRE_PACKED_STRUCT struct dot11_civic_subelement {
+	uint8 type;  /* type of civic location */
+	uint8 subelement;
+	uint8 length;
+	uint8 civic_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_civic_subelement dot11_civic_subelement_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	BWL_PRE_PACKED_STRUCT union
+	{
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 channel;
+			uint8 start_time[8];
+			uint16 duration;
+			uint8 map;
+		} BWL_POST_PACKED_STRUCT basic;
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 subelement;
+			uint8 length;
+			uint8 data[1];
+		} BWL_POST_PACKED_STRUCT lci;
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 type;  /* type of civic location */
+			uint8 subelement;
+			uint8 length;
+			uint8 data[1];
+		} BWL_POST_PACKED_STRUCT civic;
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 exp_tsf[8];
+			uint8 subelement;
+			uint8 length;
+			uint8 data[1];
+		} BWL_POST_PACKED_STRUCT locid;
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 entry_count;
+			uint8 data[1];
+		} BWL_POST_PACKED_STRUCT ftm_range;
+		uint8 data[1];
+	} BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+#define DOT11_MNG_IE_MREP_MIN_LEN           5	/* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_LCI_FIXED_LEN     5	/* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_CIVIC_FIXED_LEN   6	/* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_LOCID_FIXED_LEN   13	/* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_BASIC_FIXED_LEN   15	/* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_FRNG_FIXED_LEN    4
+
+/* length of Measure Report IE data not including variable len */
+#define DOT11_MNG_IE_MREP_FIXED_LEN	3	/* d11 measurement response IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN	12	/* d11 measurement basic report length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+	uint8 id;
+	uint8 len;
+	uint8 count;	/* TBTTs until beacon interval in quiet starts */
+	uint8 period;	/* Beacon intervals between periodic quiet periods ? */
+	uint16 duration;	/* Length of quiet period, in TU's */
+	uint16 offset;	/* TU's offset from TBTT in Count field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+	uint8 channel;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+	uint8 id;
+	uint8 len;
+	uint8 eaddr[ETHER_ADDR_LEN];
+	uint8 interval;
+	chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+/* WME Elements */
+#define WME_OUI			"\x00\x50\xf2"	/* WME OUI */
+#define WME_OUI_LEN		3
+#define WME_OUI_TYPE		2	/* WME type */
+#define WME_TYPE		2	/* WME type, deprecated */
+#define WME_SUBTYPE_IE		0	/* Information Element */
+#define WME_SUBTYPE_PARAM_IE	1	/* Parameter Element */
+#define WME_SUBTYPE_TSPEC	2	/* Traffic Specification */
+#define WME_VER			1	/* WME version */
+
+/* WME Access Category Indices (ACIs) */
+#define AC_BE			0	/* Best Effort */
+#define AC_BK			1	/* Background */
+#define AC_VI			2	/* Video */
+#define AC_VO			3	/* Voice */
+#define AC_COUNT		4	/* number of ACs */
+
+typedef uint8 ac_bitmap_t;	/* AC bitmap of (1 << AC_xx) */
+
+#define AC_BITMAP_NONE		0x0	/* No ACs */
+#define AC_BITMAP_ALL		0xf	/* All ACs */
+#define AC_BITMAP_TST(ab, ac)	(((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac)	(((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+/* Management PKT Lifetime indices */
+/* Removing flag checks 'BCMINTERNAL || WLTEST'
+ * while merging MERGE BIS120RC4 to DINGO2
+ */
+#define MGMT_ALL		0xffff
+#define MGMT_AUTH_LT	FC_SUBTYPE_AUTH
+#define MGMT_ASSOC_LT	FC_SUBTYPE_ASSOC_REQ
+
+/** WME Information Element (IE) */
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7	/* WME IE length */
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+	uint8	ACI;
+	uint8	ECW;
+	uint16  TXOP;		/* stored in network order (ls octet first) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+/** WME Parameter Element (PE) */
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN            24          /* WME Parameter IE length */
+
+/* QoS Info field for IE as sent from AP */
+#define WME_QI_AP_APSD_MASK         0x80        /* U-APSD Supported mask */
+#define WME_QI_AP_APSD_SHIFT        7           /* U-APSD Supported shift */
+#define WME_QI_AP_COUNT_MASK        0x0f        /* Parameter set count mask */
+#define WME_QI_AP_COUNT_SHIFT       0           /* Parameter set count shift */
+
+/* QoS Info field for IE as sent from STA */
+#define WME_QI_STA_MAXSPLEN_MASK    0x60        /* Max Service Period Length mask */
+#define WME_QI_STA_MAXSPLEN_SHIFT   5           /* Max Service Period Length shift */
+#define WME_QI_STA_APSD_ALL_MASK    0xf         /* APSD all AC bits mask */
+#define WME_QI_STA_APSD_ALL_SHIFT   0           /* APSD all AC bits shift */
+#define WME_QI_STA_APSD_BE_MASK     0x8         /* APSD AC_BE mask */
+#define WME_QI_STA_APSD_BE_SHIFT    3           /* APSD AC_BE shift */
+#define WME_QI_STA_APSD_BK_MASK     0x4         /* APSD AC_BK mask */
+#define WME_QI_STA_APSD_BK_SHIFT    2           /* APSD AC_BK shift */
+#define WME_QI_STA_APSD_VI_MASK     0x2         /* APSD AC_VI mask */
+#define WME_QI_STA_APSD_VI_SHIFT    1           /* APSD AC_VI shift */
+#define WME_QI_STA_APSD_VO_MASK     0x1         /* APSD AC_VO mask */
+#define WME_QI_STA_APSD_VO_SHIFT    0           /* APSD AC_VO shift */
+
+/* ACI */
+#define EDCF_AIFSN_MIN               1           /* AIFSN minimum value */
+#define EDCF_AIFSN_MAX               15          /* AIFSN maximum value */
+#define EDCF_AIFSN_MASK              0x0f        /* AIFSN mask */
+#define EDCF_ACM_MASK                0x10        /* ACM mask */
+#define EDCF_ACI_MASK                0x60        /* ACI mask */
+#define EDCF_ACI_SHIFT               5           /* ACI shift */
+#define EDCF_AIFSN_SHIFT             12          /* 4 MSB(0xFFF) in ifs_ctl for AC idx */
+
+/* ECW */
+#define EDCF_ECW_MIN                 0           /* cwmin/cwmax exponent minimum value */
+#define EDCF_ECW_MAX                 15          /* cwmin/cwmax exponent maximum value */
+#define EDCF_ECW2CW(exp)             ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK             0x0f        /* cwmin exponent form mask */
+#define EDCF_ECWMAX_MASK             0xf0        /* cwmax exponent form mask */
+#define EDCF_ECWMAX_SHIFT            4           /* cwmax exponent form shift */
+
+/* TXOP */
+#define EDCF_TXOP_MIN                0           /* TXOP minimum value */
+#define EDCF_TXOP_MAX                65535       /* TXOP maximum value */
+#define EDCF_TXOP2USEC(txop)         ((txop) << 5)
+
+/* Default BE ACI value for non-WME connection STA */
+#define NON_EDCF_AC_BE_ACI_STA          0x02
+
+/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */
+#define EDCF_AC_BE_ACI_STA           0x03	/* STA ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_STA           0xA4	/* STA ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_STA          0x0000	/* STA TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_STA           0x27	/* STA ACI value for background AC */
+#define EDCF_AC_BK_ECW_STA           0xA4	/* STA ECW value for background AC */
+#define EDCF_AC_BK_TXOP_STA          0x0000	/* STA TXOP value for background AC */
+#define EDCF_AC_VI_ACI_STA           0x42	/* STA ACI value for video AC */
+#define EDCF_AC_VI_ECW_STA           0x43	/* STA ECW value for video AC */
+#define EDCF_AC_VI_TXOP_STA          0x005e	/* STA TXOP value for video AC */
+#define EDCF_AC_VO_ACI_STA           0x62	/* STA ACI value for audio AC */
+#define EDCF_AC_VO_ECW_STA           0x32	/* STA ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_STA          0x002f	/* STA TXOP value for audio AC */
+
+/* Default EDCF parameters that AP uses; WMM draft Table 14 */
+#define EDCF_AC_BE_ACI_AP            0x03	/* AP ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_AP            0x64	/* AP ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_AP           0x0000	/* AP TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_AP            0x27	/* AP ACI value for background AC */
+#define EDCF_AC_BK_ECW_AP            0xA4	/* AP ECW value for background AC */
+#define EDCF_AC_BK_TXOP_AP           0x0000	/* AP TXOP value for background AC */
+#define EDCF_AC_VI_ACI_AP            0x41	/* AP ACI value for video AC */
+#define EDCF_AC_VI_ECW_AP            0x43	/* AP ECW value for video AC */
+#define EDCF_AC_VI_TXOP_AP           0x005e	/* AP TXOP value for video AC */
+#define EDCF_AC_VO_ACI_AP            0x61	/* AP ACI value for audio AC */
+#define EDCF_AC_VO_ECW_AP            0x32	/* AP ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_AP           0x002f	/* AP TXOP value for audio AC */
+
+/** EDCA Parameter IE */
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN            18          /* EDCA Parameter IE length */
+
+/** QoS Capability IE */
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+	uint8 id; 			/* 11, DOT11_MNG_QBSS_LOAD_ID */
+	uint8 length;
+	uint16 station_count; 		/* total number of STAs associated */
+	uint8 channel_utilization;	/* % of time, normalized to 255, QAP sensed medium busy */
+	uint16 aac; 			/* available admission capacity */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+#define BSS_LOAD_IE_SIZE 	7	/* BSS load IE size */
+
+#define WLC_QBSS_LOAD_CHAN_FREE_MAX	0xff	/* max for channel free score */
+
+/* nom_msdu_size */
+#define FIXED_MSDU_SIZE 0x8000		/* MSDU size is fixed */
+#define MSDU_SIZE_MASK	0x7fff		/* (Nominal or fixed) MSDU size */
+
+/* surplus_bandwidth */
+/* Represented as 3 bits of integer, binary point, 13 bits fraction */
+#define	INTEGER_SHIFT	13	/* integer shift */
+#define FRACTION_MASK	0x1FFF	/* fraction mask */
+
+/** Management Notification Frame */
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+	uint8 category;			/* DOT11_ACTION_NOTIFICATION */
+	uint8 action;
+	uint8 token;
+	uint8 status;
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4	/* Fixed length */
+
+/** Timeout Interval IE */
+BWL_PRE_PACKED_STRUCT struct ti_ie {
+	uint8 ti_type;
+	uint32 ti_val;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ti_ie ti_ie_t;
+#define TI_TYPE_REASSOC_DEADLINE	1
+#define TI_TYPE_KEY_LIFETIME		2
+
+#ifndef CISCO_AIRONET_OUI
+#define CISCO_AIRONET_OUI	"\x00\x40\x96"	/* Cisco AIRONET OUI */
+#endif
+/* QoS FastLane IE. */
+BWL_PRE_PACKED_STRUCT struct ccx_qfl_ie {
+	uint8	id;		/* 221, DOT11_MNG_VS_ID */
+	uint8	length;		/* 5 */
+	uint8	oui[3];		/* 00:40:96 */
+	uint8	type;		/* 11 */
+	uint8	data;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ccx_qfl_ie ccx_qfl_ie_t;
+#define CCX_QFL_IE_TYPE	11
+#define CCX_QFL_ENABLE_SHIFT	5
+#define CCX_QFL_ENALBE (1 << CCX_QFL_ENABLE_SHIFT)
+
+/* WME Action Codes */
+#define WME_ADDTS_REQUEST	0	/* WME ADDTS request */
+#define WME_ADDTS_RESPONSE	1	/* WME ADDTS response */
+#define WME_DELTS_REQUEST	2	/* WME DELTS request */
+
+/* WME Setup Response Status Codes */
+#define WME_ADMISSION_ACCEPTED		0	/* WME admission accepted */
+#define WME_INVALID_PARAMETERS		1	/* WME invalide parameters */
+#define WME_ADMISSION_REFUSED		3	/* WME admission refused */
+
+/* Macro to take a pointer to a beacon or probe response
+ * body and return the char* pointer to the SSID info element
+ */
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+/* Authentication frame payload constants */
+#define DOT11_OPEN_SYSTEM	0	/* d11 open authentication */
+#define DOT11_SHARED_KEY	1	/* d11 shared authentication */
+#define DOT11_FAST_BSS		2	/* d11 fast bss authentication */
+#define DOT11_SAE		3	/* d11 simultaneous authentication of equals */
+#define DOT11_FILS_SKEY_PFS	4	/* d11 fils shared key authentication w/o pfs */
+#define DOT11_FILS_SKEY		5	/* d11 fils shared key authentication w/ pfs */
+#define DOT11_FILS_PKEY		6	/* d11 fils public key authentication */
+#define DOT11_CHALLENGE_LEN	128	/* d11 challenge text length */
+
+/* Frame control macros */
+#define FC_PVER_MASK		0x3	/* PVER mask */
+#define FC_PVER_SHIFT		0	/* PVER shift */
+#define FC_TYPE_MASK		0xC	/* type mask */
+#define FC_TYPE_SHIFT		2	/* type shift */
+#define FC_SUBTYPE_MASK		0xF0	/* subtype mask */
+#define FC_SUBTYPE_SHIFT	4	/* subtype shift */
+#define FC_TODS			0x100	/* to DS */
+#define FC_TODS_SHIFT		8	/* to DS shift */
+#define FC_FROMDS		0x200	/* from DS */
+#define FC_FROMDS_SHIFT		9	/* from DS shift */
+#define FC_MOREFRAG		0x400	/* more frag. */
+#define FC_MOREFRAG_SHIFT	10	/* more frag. shift */
+#define FC_RETRY		0x800	/* retry */
+#define FC_RETRY_SHIFT		11	/* retry shift */
+#define FC_PM			0x1000	/* PM */
+#define FC_PM_SHIFT		12	/* PM shift */
+#define FC_MOREDATA		0x2000	/* more data */
+#define FC_MOREDATA_SHIFT	13	/* more data shift */
+#define FC_WEP			0x4000	/* WEP */
+#define FC_WEP_SHIFT		14	/* WEP shift */
+#define FC_ORDER		0x8000	/* order */
+#define FC_ORDER_SHIFT		15	/* order shift */
+
+/* sequence control macros */
+#define SEQNUM_SHIFT		4	/* seq. number shift */
+#define SEQNUM_MAX		0x1000	/* max seqnum + 1 */
+#define FRAGNUM_MASK		0xF	/* frag. number mask */
+
+/* Frame Control type/subtype defs */
+
+/* FC Types */
+#define FC_TYPE_MNG		0	/* management type */
+#define FC_TYPE_CTL		1	/* control type */
+#define FC_TYPE_DATA		2	/* data type */
+
+/* Management Subtypes */
+#define FC_SUBTYPE_ASSOC_REQ		0	/* assoc. request */
+#define FC_SUBTYPE_ASSOC_RESP		1	/* assoc. response */
+#define FC_SUBTYPE_REASSOC_REQ		2	/* reassoc. request */
+#define FC_SUBTYPE_REASSOC_RESP		3	/* reassoc. response */
+#define FC_SUBTYPE_PROBE_REQ		4	/* probe request */
+#define FC_SUBTYPE_PROBE_RESP		5	/* probe response */
+#define FC_SUBTYPE_BEACON		8	/* beacon */
+#define FC_SUBTYPE_ATIM			9	/* ATIM */
+#define FC_SUBTYPE_DISASSOC		10	/* disassoc. */
+#define FC_SUBTYPE_AUTH			11	/* authentication */
+#define FC_SUBTYPE_DEAUTH		12	/* de-authentication */
+#define FC_SUBTYPE_ACTION		13	/* action */
+#define FC_SUBTYPE_ACTION_NOACK		14	/* action no-ack */
+
+/* Control Subtypes */
+#define FC_SUBTYPE_CTL_WRAPPER		7	/* Control Wrapper */
+#define FC_SUBTYPE_BLOCKACK_REQ		8	/* Block Ack Req */
+#define FC_SUBTYPE_BLOCKACK		9	/* Block Ack */
+#define FC_SUBTYPE_PS_POLL		10	/* PS poll */
+#define FC_SUBTYPE_RTS			11	/* RTS */
+#define FC_SUBTYPE_CTS			12	/* CTS */
+#define FC_SUBTYPE_ACK			13	/* ACK */
+#define FC_SUBTYPE_CF_END		14	/* CF-END */
+#define FC_SUBTYPE_CF_END_ACK		15	/* CF-END ACK */
+
+/* Data Subtypes */
+#define FC_SUBTYPE_DATA			0	/* Data */
+#define FC_SUBTYPE_DATA_CF_ACK		1	/* Data + CF-ACK */
+#define FC_SUBTYPE_DATA_CF_POLL		2	/* Data + CF-Poll */
+#define FC_SUBTYPE_DATA_CF_ACK_POLL	3	/* Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_NULL			4	/* Null */
+#define FC_SUBTYPE_CF_ACK		5	/* CF-Ack */
+#define FC_SUBTYPE_CF_POLL		6	/* CF-Poll */
+#define FC_SUBTYPE_CF_ACK_POLL		7	/* CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA		8	/* QoS Data */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK	9	/* QoS Data + CF-Ack */
+#define FC_SUBTYPE_QOS_DATA_CF_POLL	10	/* QoS Data + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL	11	/* QoS Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_NULL		12	/* QoS Null */
+#define FC_SUBTYPE_QOS_CF_POLL		14	/* QoS CF-Poll */
+#define FC_SUBTYPE_QOS_CF_ACK_POLL	15	/* QoS CF-Ack + CF-Poll */
+
+/* Data Subtype Groups */
+#define FC_SUBTYPE_ANY_QOS(s)		(((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s)		(((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s)	(((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s)	(((s) & 1) != 0)
+#define FC_SUBTYPE_ANY_PSPOLL(s)	(((s) & 10) != 0)
+
+/* Type/Subtype Combos */
+#define FC_KIND_MASK		(FC_TYPE_MASK | FC_SUBTYPE_MASK)	/* FC kind mask */
+
+#define FC_KIND(t, s)	(((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))	/* FC kind */
+
+#define FC_SUBTYPE(fc)	(((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)	/* Subtype from FC */
+#define FC_TYPE(fc)	(((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)	/* Type from FC */
+
+#define FC_ASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)	/* assoc. request */
+#define FC_ASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)	/* assoc. response */
+#define FC_REASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)	/* reassoc. request */
+#define FC_REASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)	/* reassoc. response */
+#define FC_PROBE_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)	/* probe request */
+#define FC_PROBE_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)	/* probe response */
+#define FC_BEACON	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)		/* beacon */
+#define FC_ATIM		FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM)		/* ATIM */
+#define FC_DISASSOC	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)	/* disassoc */
+#define FC_AUTH		FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)		/* authentication */
+#define FC_DEAUTH	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)		/* deauthentication */
+#define FC_ACTION	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)		/* action */
+#define FC_ACTION_NOACK	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)	/* action no-ack */
+
+#define FC_CTL_WRAPPER	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)	/* Control Wrapper */
+#define FC_BLOCKACK_REQ	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)	/* Block Ack Req */
+#define FC_BLOCKACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)	/* Block Ack */
+#define FC_PS_POLL	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)	/* PS poll */
+#define FC_RTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)		/* RTS */
+#define FC_CTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)		/* CTS */
+#define FC_ACK		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)		/* ACK */
+#define FC_CF_END	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)		/* CF-END */
+#define FC_CF_END_ACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)	/* CF-END ACK */
+
+#define FC_DATA		FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)		/* data */
+#define FC_NULL_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)		/* null data */
+#define FC_DATA_CF_ACK	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)	/* data CF ACK */
+#define FC_QOS_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)	/* QoS data */
+#define FC_QOS_NULL	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)	/* QoS null */
+
+/* QoS Control Field */
+
+/* 802.1D Priority */
+#define QOS_PRIO_SHIFT		0	/* QoS priority shift */
+#define QOS_PRIO_MASK		0x0007	/* QoS priority mask */
+#define QOS_PRIO(qos)		(((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)	/* QoS priority */
+
+/* Traffic Identifier */
+#define QOS_TID_SHIFT		0	/* QoS TID shift */
+#define QOS_TID_MASK		0x000f	/* QoS TID mask */
+#define QOS_TID(qos)		(((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)	/* QoS TID */
+
+/* End of Service Period (U-APSD) */
+#define QOS_EOSP_SHIFT		4	/* QoS End of Service Period shift */
+#define QOS_EOSP_MASK		0x0010	/* QoS End of Service Period mask */
+#define QOS_EOSP(qos)		(((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)	/* Qos EOSP */
+
+/* Ack Policy */
+#define QOS_ACK_NORMAL_ACK	0	/* Normal Ack */
+#define QOS_ACK_NO_ACK		1	/* No Ack (eg mcast) */
+#define QOS_ACK_NO_EXP_ACK	2	/* No Explicit Ack */
+#define QOS_ACK_BLOCK_ACK	3	/* Block Ack */
+#define QOS_ACK_SHIFT		5	/* QoS ACK shift */
+#define QOS_ACK_MASK		0x0060	/* QoS ACK mask */
+#define QOS_ACK(qos)		(((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)	/* QoS ACK */
+
+/* A-MSDU flag */
+#define QOS_AMSDU_SHIFT		7	/* AMSDU shift */
+#define QOS_AMSDU_MASK		0x0080	/* AMSDU mask */
+
+/* Management Frames */
+
+/* Management Frame Constants */
+
+/* Fixed fields */
+#define DOT11_MNG_AUTH_ALGO_LEN		2	/* d11 management auth. algo. length */
+#define DOT11_MNG_AUTH_SEQ_LEN		2	/* d11 management auth. seq. length */
+#define DOT11_MNG_BEACON_INT_LEN	2	/* d11 management beacon interval length */
+#define DOT11_MNG_CAP_LEN		2	/* d11 management cap. length */
+#define DOT11_MNG_AP_ADDR_LEN		6	/* d11 management AP address length */
+#define DOT11_MNG_LISTEN_INT_LEN	2	/* d11 management listen interval length */
+#define DOT11_MNG_REASON_LEN		2	/* d11 management reason length */
+#define DOT11_MNG_AID_LEN		2	/* d11 management AID length */
+#define DOT11_MNG_STATUS_LEN		2	/* d11 management status length */
+#define DOT11_MNG_TIMESTAMP_LEN		8	/* d11 management timestamp length */
+
+/* DUR/ID field in assoc resp is 0xc000 | AID */
+#define DOT11_AID_MASK			0x3fff	/* d11 AID mask */
+
+/* Reason Codes */
+#define DOT11_RC_RESERVED		0	/* d11 RC reserved */
+#define DOT11_RC_UNSPECIFIED		1	/* Unspecified reason */
+#define DOT11_RC_AUTH_INVAL		2	/* Previous authentication no longer valid */
+#define DOT11_RC_DEAUTH_LEAVING		3	/* Deauthenticated because sending station
+						 * is leaving (or has left) IBSS or ESS
+						 */
+#define DOT11_RC_INACTIVITY		4	/* Disassociated due to inactivity */
+#define DOT11_RC_BUSY			5	/* Disassociated because AP is unable to handle
+						 * all currently associated stations
+						 */
+#define DOT11_RC_INVAL_CLASS_2		6	/* Class 2 frame received from
+						 * nonauthenticated station
+						 */
+#define DOT11_RC_INVAL_CLASS_3		7	/* Class 3 frame received from
+						 *  nonassociated station
+						 */
+#define DOT11_RC_DISASSOC_LEAVING	8	/* Disassociated because sending station is
+						 * leaving (or has left) BSS
+						 */
+#define DOT11_RC_NOT_AUTH		9	/* Station requesting (re)association is not
+						 * authenticated with responding station
+						 */
+#define DOT11_RC_BAD_PC			10	/* Unacceptable power capability element */
+#define DOT11_RC_BAD_CHANNELS		11	/* Unacceptable supported channels element */
+
+/* 12 is unused by STA but could be used by AP/GO */
+#define DOT11_RC_DISASSOC_BTM		12	/* Disassociated due to BSS Transition Magmt */
+
+
+/* 32-39 are QSTA specific reasons added in 11e */
+#define DOT11_RC_UNSPECIFIED_QOS	32	/* unspecified QoS-related reason */
+#define DOT11_RC_INSUFFCIENT_BW		33	/* QAP lacks sufficient bandwidth */
+#define DOT11_RC_EXCESSIVE_FRAMES	34	/* excessive number of frames need ack */
+#define DOT11_RC_TX_OUTSIDE_TXOP	35	/* transmitting outside the limits of txop */
+#define DOT11_RC_LEAVING_QBSS		36	/* QSTA is leaving the QBSS (or restting) */
+#define DOT11_RC_BAD_MECHANISM		37	/* does not want to use the mechanism */
+#define DOT11_RC_SETUP_NEEDED		38	/* mechanism needs a setup */
+#define DOT11_RC_TIMEOUT		39	/* timeout */
+
+#define DOT11_RC_MESH_PEERING_CANCELLED		52
+#define DOT11_RC_MESH_MAX_PEERS			53
+#define DOT11_RC_MESH_CONFIG_POLICY_VIOLN	54
+#define DOT11_RC_MESH_CLOSE_RECVD		55
+#define DOT11_RC_MESH_MAX_RETRIES		56
+#define DOT11_RC_MESH_CONFIRM_TIMEOUT		57
+#define DOT11_RC_MESH_INVALID_GTK		58
+#define DOT11_RC_MESH_INCONSISTENT_PARAMS	59
+
+#define DOT11_RC_MESH_INVALID_SEC_CAP		60
+#define DOT11_RC_MESH_PATHERR_NOPROXYINFO	61
+#define DOT11_RC_MESH_PATHERR_NOFWINFO		62
+#define DOT11_RC_MESH_PATHERR_DSTUNREACH	63
+#define DOT11_RC_MESH_MBSSMAC_EXISTS		64
+#define DOT11_RC_MESH_CHANSWITCH_REGREQ		65
+#define DOT11_RC_MESH_CHANSWITCH_UNSPEC		66
+
+#define DOT11_RC_MAX			66	/* Reason codes > 66 are reserved */
+
+#define DOT11_RC_TDLS_PEER_UNREACH	25
+#define DOT11_RC_TDLS_DOWN_UNSPECIFIED	26
+
+/* Status Codes */
+#define DOT11_SC_SUCCESS		0	/* Successful */
+#define DOT11_SC_FAILURE		1	/* Unspecified failure */
+#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2	/* TDLS wakeup schedule rejected but alternative  */
+					/* schedule provided */
+#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3	/* TDLS wakeup schedule rejected */
+#define DOT11_SC_TDLS_SEC_DISABLED	5	/* TDLS Security disabled */
+#define DOT11_SC_LIFETIME_REJ		6	/* Unacceptable lifetime */
+#define DOT11_SC_NOT_SAME_BSS		7	/* Not in same BSS */
+#define DOT11_SC_CAP_MISMATCH		10	/* Cannot support all requested
+						 * capabilities in the Capability
+						 * Information field
+						 */
+#define DOT11_SC_REASSOC_FAIL		11	/* Reassociation denied due to inability
+						 * to confirm that association exists
+						 */
+#define DOT11_SC_ASSOC_FAIL		12	/* Association denied due to reason
+						 * outside the scope of this standard
+						 */
+#define DOT11_SC_AUTH_MISMATCH		13	/* Responding station does not support
+						 * the specified authentication
+						 * algorithm
+						 */
+#define DOT11_SC_AUTH_SEQ		14	/* Received an Authentication frame
+						 * with authentication transaction
+						 * sequence number out of expected
+						 * sequence
+						 */
+#define DOT11_SC_AUTH_CHALLENGE_FAIL	15	/* Authentication rejected because of
+						 * challenge failure
+						 */
+#define DOT11_SC_AUTH_TIMEOUT		16	/* Authentication rejected due to timeout
+						 * waiting for next frame in sequence
+						 */
+#define DOT11_SC_ASSOC_BUSY_FAIL	17	/* Association denied because AP is
+						 * unable to handle additional
+						 * associated stations
+						 */
+#define DOT11_SC_ASSOC_RATE_MISMATCH	18	/* Association denied due to requesting
+						 * station not supporting all of the
+						 * data rates in the BSSBasicRateSet
+						 * parameter
+						 */
+#define DOT11_SC_ASSOC_SHORT_REQUIRED	19	/* Association denied due to requesting
+						 * station not supporting the Short
+						 * Preamble option
+						 */
+#define DOT11_SC_ASSOC_PBCC_REQUIRED	20	/* Association denied due to requesting
+						 * station not supporting the PBCC
+						 * Modulation option
+						 */
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED	21	/* Association denied due to requesting
+						 * station not supporting the Channel
+						 * Agility option
+						 */
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED	22	/* Association denied because Spectrum
+							 * Management capability is required.
+							 */
+#define DOT11_SC_ASSOC_BAD_POWER_CAP	23	/* Association denied because the info
+						 * in the Power Cap element is
+						 * unacceptable.
+						 */
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS	24	/* Association denied because the info
+						 * in the Supported Channel element is
+						 * unacceptable
+						 */
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED	25	/* Association denied due to requesting
+							 * station not supporting the Short Slot
+							 * Time option
+							 */
+#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26	/* Association denied because requesting station
+						 * does not support the DSSS-OFDM option
+						 */
+#define DOT11_SC_ASSOC_HT_REQUIRED	27	/* Association denied because the requesting
+						 * station does not support HT features
+						 */
+#define DOT11_SC_ASSOC_R0KH_UNREACHABLE	28	/* Association denied due to AP
+						 * being unable to reach the R0 Key Holder
+						 */
+#define DOT11_SC_ASSOC_TRY_LATER	30	/* Association denied temporarily, try again later
+						 */
+#define DOT11_SC_ASSOC_MFP_VIOLATION	31	/* Association denied due to Robust Management
+						 * frame policy violation
+						 */
+
+#define	DOT11_SC_DECLINED		37	/* request declined */
+#define	DOT11_SC_INVALID_PARAMS		38	/* One or more params have invalid values */
+#define DOT11_SC_INVALID_PAIRWISE_CIPHER	42 /* invalid pairwise cipher */
+#define	DOT11_SC_INVALID_AKMP		43	/* Association denied due to invalid AKMP */
+#define DOT11_SC_INVALID_RSNIE_CAP	45	/* invalid RSN IE capabilities */
+#define DOT11_SC_DLS_NOT_ALLOWED	48	/* DLS is not allowed in the BSS by policy */
+#define	DOT11_SC_INVALID_PMKID		53	/* Association denied due to invalid PMKID */
+#define	DOT11_SC_INVALID_MDID		54	/* Association denied due to invalid MDID */
+#define	DOT11_SC_INVALID_FTIE		55	/* Association denied due to invalid FTIE */
+
+#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED	59	/* ad proto not supported */
+#define DOT11_SC_NO_OUTSTAND_REQ			60	/* no outstanding req */
+#define DOT11_SC_RSP_NOT_RX_FROM_SERVER		61	/* no response from server */
+#define DOT11_SC_TIMEOUT					62	/* timeout */
+#define DOT11_SC_QUERY_RSP_TOO_LARGE		63	/* query rsp too large */
+#define DOT11_SC_SERVER_UNREACHABLE			65	/* server unreachable */
+
+#define DOT11_SC_UNEXP_MSG			70	/* Unexpected message */
+#define DOT11_SC_INVALID_SNONCE		71	/* Invalid SNonce */
+#define DOT11_SC_INVALID_RSNIE		72	/* Invalid contents of RSNIE */
+
+#define DOT11_SC_ANTICLOG_TOCKEN_REQUIRED	76	/* Anti-clogging tocken required */
+#define DOT11_SC_INVALID_FINITE_CYCLIC_GRP	77	/* Invalid contents of RSNIE */
+
+#define DOT11_SC_ASSOC_VHT_REQUIRED	104	/* Association denied because the requesting
+						 * station does not support VHT features.
+						 */
+
+#define DOT11_SC_TRANSMIT_FAILURE	79	/* transmission failure */
+
+/* Info Elts, length of INFORMATION portion of Info Elts */
+#define DOT11_MNG_DS_PARAM_LEN			1	/* d11 management DS parameter length */
+#define DOT11_MNG_IBSS_PARAM_LEN		2	/* d11 management IBSS parameter length */
+
+/* TIM Info element has 3 bytes fixed info in INFORMATION field,
+ * followed by 1 to 251 bytes of Partial Virtual Bitmap
+ */
+#define DOT11_MNG_TIM_FIXED_LEN			3	/* d11 management TIM fixed length */
+#define DOT11_MNG_TIM_DTIM_COUNT		0	/* d11 management DTIM count */
+#define DOT11_MNG_TIM_DTIM_PERIOD		1	/* d11 management DTIM period */
+#define DOT11_MNG_TIM_BITMAP_CTL		2	/* d11 management TIM BITMAP control  */
+#define DOT11_MNG_TIM_PVB			3	/* d11 management TIM PVB */
+
+/* TLV defines */
+#define TLV_TAG_OFF		0	/* tag offset */
+#define TLV_LEN_OFF		1	/* length offset */
+#define TLV_HDR_LEN		2	/* header length */
+#define TLV_BODY_OFF		2	/* body offset */
+#define TLV_BODY_LEN_MAX	255	/* max body length */
+
+/* Management Frame Information Element IDs */
+#define DOT11_MNG_SSID_ID			0	/* d11 management SSID id */
+#define DOT11_MNG_RATES_ID			1	/* d11 management rates id */
+#define DOT11_MNG_FH_PARMS_ID			2	/* d11 management FH parameter id */
+#define DOT11_MNG_DS_PARMS_ID			3	/* d11 management DS parameter id */
+#define DOT11_MNG_CF_PARMS_ID			4	/* d11 management CF parameter id */
+#define DOT11_MNG_TIM_ID			5	/* d11 management TIM id */
+#define DOT11_MNG_IBSS_PARMS_ID			6	/* d11 management IBSS parameter id */
+#define DOT11_MNG_COUNTRY_ID			7	/* d11 management country id */
+#define DOT11_MNG_HOPPING_PARMS_ID		8	/* d11 management hopping parameter id */
+#define DOT11_MNG_HOPPING_TABLE_ID		9	/* d11 management hopping table id */
+#define DOT11_MNG_FTM_SYNC_INFO_ID		9	/* 11mc D4.3 */
+#define DOT11_MNG_REQUEST_ID			10	/* d11 management request id */
+#define DOT11_MNG_QBSS_LOAD_ID 			11	/* d11 management QBSS Load id */
+#define DOT11_MNG_EDCA_PARAM_ID			12	/* 11E EDCA Parameter id */
+#define DOT11_MNG_TSPEC_ID			13	/* d11 management TSPEC id */
+#define DOT11_MNG_TCLAS_ID			14	/* d11 management TCLAS id */
+#define DOT11_MNG_CHALLENGE_ID			16	/* d11 management chanllenge id */
+#define DOT11_MNG_PWR_CONSTRAINT_ID		32	/* 11H PowerConstraint */
+#define DOT11_MNG_PWR_CAP_ID			33	/* 11H PowerCapability */
+#define DOT11_MNG_TPC_REQUEST_ID 		34	/* 11H TPC Request */
+#define DOT11_MNG_TPC_REPORT_ID			35	/* 11H TPC Report */
+#define DOT11_MNG_SUPP_CHANNELS_ID		36	/* 11H Supported Channels */
+#define DOT11_MNG_CHANNEL_SWITCH_ID		37	/* 11H ChannelSwitch Announcement */
+#define DOT11_MNG_MEASURE_REQUEST_ID		38	/* 11H MeasurementRequest */
+#define DOT11_MNG_MEASURE_REPORT_ID		39	/* 11H MeasurementReport */
+#define DOT11_MNG_QUIET_ID			40	/* 11H Quiet */
+#define DOT11_MNG_IBSS_DFS_ID			41	/* 11H IBSS_DFS */
+#define DOT11_MNG_ERP_ID			42	/* d11 management ERP id */
+#define DOT11_MNG_TS_DELAY_ID			43	/* d11 management TS Delay id */
+#define DOT11_MNG_TCLAS_PROC_ID			44	/* d11 management TCLAS processing id */
+#define	DOT11_MNG_HT_CAP			45	/* d11 mgmt HT cap id */
+#define DOT11_MNG_QOS_CAP_ID			46	/* 11E QoS Capability id */
+#define DOT11_MNG_NONERP_ID			47	/* d11 management NON-ERP id */
+#define DOT11_MNG_RSN_ID			48	/* d11 management RSN id */
+#define DOT11_MNG_EXT_RATES_ID			50	/* d11 management ext. rates id */
+#define DOT11_MNG_AP_CHREP_ID			51	/* 11k AP Channel report id */
+#define DOT11_MNG_NEIGHBOR_REP_ID		52	/* 11k & 11v Neighbor report id */
+#define DOT11_MNG_RCPI_ID			53	/* 11k RCPI */
+#define DOT11_MNG_MDIE_ID			54	/* 11r Mobility domain id */
+#define DOT11_MNG_FTIE_ID			55	/* 11r Fast Bss Transition id */
+#define DOT11_MNG_FT_TI_ID			56	/* 11r Timeout Interval id */
+#define DOT11_MNG_RDE_ID			57	/* 11r RIC Data Element id */
+#define	DOT11_MNG_REGCLASS_ID			59	/* d11 management regulatory class id */
+#define DOT11_MNG_EXT_CSA_ID			60	/* d11 Extended CSA */
+#define	DOT11_MNG_HT_ADD			61	/* d11 mgmt additional HT info */
+#define	DOT11_MNG_EXT_CHANNEL_OFFSET		62	/* d11 mgmt ext channel offset */
+#define DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID	63	/* 11k bss average access delay */
+#define DOT11_MNG_ANTENNA_ID			64	/* 11k antenna id */
+#define DOT11_MNG_RSNI_ID			65	/* 11k RSNI id */
+#define DOT11_MNG_MEASUREMENT_PILOT_TX_ID	66	/* 11k measurement pilot tx info id */
+#define DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID	67	/* 11k bss aval admission cap id */
+#define DOT11_MNG_BSS_AC_ACCESS_DELAY_ID	68	/* 11k bss AC access delay id */
+#define DOT11_MNG_WAPI_ID			68	/* d11 management WAPI id */
+#define DOT11_MNG_TIME_ADVERTISE_ID	69	/* 11p time advertisement */
+#define DOT11_MNG_RRM_CAP_ID		70	/* 11k radio measurement capability */
+#define DOT11_MNG_MULTIPLE_BSSID_ID		71	/* 11k multiple BSSID id */
+#define	DOT11_MNG_HT_BSS_COEXINFO_ID		72	/* d11 mgmt OBSS Coexistence INFO */
+#define	DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID	73	/* d11 mgmt OBSS Intolerant Channel list */
+#define	DOT11_MNG_HT_OBSS_ID			74	/* d11 mgmt OBSS HT info */
+#define DOT11_MNG_MMIE_ID			76	/* d11 mgmt MIC IE */
+#define DOT11_MNG_FMS_DESCR_ID			86	/* 11v FMS descriptor */
+#define DOT11_MNG_FMS_REQ_ID			87	/* 11v FMS request id */
+#define DOT11_MNG_FMS_RESP_ID			88	/* 11v FMS response id */
+#define DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID	90	/* 11v bss max idle id */
+#define DOT11_MNG_TFS_REQUEST_ID		91	/* 11v tfs request id */
+#define DOT11_MNG_TFS_RESPONSE_ID		92	/* 11v tfs response id */
+#define DOT11_MNG_WNM_SLEEP_MODE_ID		93	/* 11v wnm-sleep mode id */
+#define DOT11_MNG_TIMBC_REQ_ID			94	/* 11v TIM broadcast request id */
+#define DOT11_MNG_TIMBC_RESP_ID			95	/* 11v TIM broadcast response id */
+#define DOT11_MNG_CHANNEL_USAGE			97	/* 11v channel usage */
+#define DOT11_MNG_TIME_ZONE_ID			98	/* 11v time zone */
+#define DOT11_MNG_DMS_REQUEST_ID		99	/* 11v dms request id */
+#define DOT11_MNG_DMS_RESPONSE_ID		100	/* 11v dms response id */
+#define DOT11_MNG_LINK_IDENTIFIER_ID		101	/* 11z TDLS Link Identifier IE */
+#define DOT11_MNG_WAKEUP_SCHEDULE_ID		102	/* 11z TDLS Wakeup Schedule IE */
+#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID	104	/* 11z TDLS Channel Switch Timing IE */
+#define DOT11_MNG_PTI_CONTROL_ID		105	/* 11z TDLS PTI Control IE */
+#define DOT11_MNG_PU_BUFFER_STATUS_ID		106	/* 11z TDLS PU Buffer Status IE */
+#define DOT11_MNG_INTERWORKING_ID		107	/* 11u interworking */
+#define DOT11_MNG_ADVERTISEMENT_ID		108	/* 11u advertisement protocol */
+#define DOT11_MNG_EXP_BW_REQ_ID			109	/* 11u expedited bandwith request */
+#define DOT11_MNG_QOS_MAP_ID			110	/* 11u QoS map set */
+#define DOT11_MNG_ROAM_CONSORT_ID		111	/* 11u roaming consortium */
+#define DOT11_MNG_EMERGCY_ALERT_ID		112	/* 11u emergency alert identifier */
+#define DOT11_MNG_MESH_CONFIG			113	/* Mesh Configuration */
+#define DOT11_MNG_MESH_ID			114	/* Mesh ID */
+#define DOT11_MNG_MESH_PEER_MGMT_ID		117	/* Mesh PEER MGMT IE */
+#define DOT11_MNG_EXT_CAP_ID			127	/* d11 mgmt ext capability */
+#define DOT11_MNG_EXT_PREQ_ID			130	/* Mesh PREQ IE */
+#define DOT11_MNG_EXT_PREP_ID			131	/* Mesh PREP IE */
+#define DOT11_MNG_EXT_PERR_ID			132	/* Mesh PERR IE */
+#define	DOT11_MNG_VHT_CAP_ID			191	/* d11 mgmt VHT cap id */
+#define	DOT11_MNG_VHT_OPERATION_ID		192	/* d11 mgmt VHT op id */
+#define	DOT11_MNG_EXT_BSSLOAD_ID		193	/* d11 mgmt VHT extended bss load id */
+#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID	194	/* Wide BW Channel Switch IE */
+#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195	/* VHT transmit Power Envelope IE */
+#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID	196	/* Channel Switch Wrapper IE */
+#define DOT11_MNG_AID_ID			197	/* Association ID  IE */
+#define	DOT11_MNG_OPER_MODE_NOTIF_ID		199	/* d11 mgmt VHT oper mode notif */
+#define DOT11_MNG_RNR_ID			201
+#define DOT11_MNG_HE_CAP_ID			202
+#define DOT11_MNG_HE_OP_ID			203
+#define DOT11_MNG_FTM_PARAMS_ID			206
+#define DOT11_MNG_TWT_ID			216	/* 11ah D5.0 */
+#define DOT11_MNG_WPA_ID			221	/* d11 management WPA id */
+#define DOT11_MNG_PROPR_ID			221
+/* should start using this one instead of above two */
+#define DOT11_MNG_VS_ID				221	/* d11 management Vendor Specific IE */
+#define DOT11_MNG_MESH_CSP_ID			222	/* d11 Mesh Channel Switch Parameter */
+#define DOT11_MNG_FILS_IND_ID			240	/* 11ai FILS Indication element */
+
+/* The follwing ID extensions should be defined >= 255
+ * i.e. the values should include 255 (DOT11_MNG_ID_EXT_ID + ID Extension).
+ */
+#define DOT11_MNG_ID_EXT_ID			255	/* Element ID Extension 11mc D4.3 */
+#define DOT11_MNG_RAPS_ID	(DOT11_MNG_ID_EXT_ID+11)  /* OFDMA Random Access Parameter Set */
+
+/* FILS ext ids */
+#define FILS_REQ_PARAMS_EXT_ID		2
+#define DOT11_MNG_FILS_REQ_PARAMS	(DOT11_MNG_ID_EXT_ID + FILS_REQ_PARAMS_EXT_ID)
+#define FILS_SESSION_EXT_ID		4
+#define DOT11_MNG_FILS_SESSION		(DOT11_MNG_ID_EXT_ID + FILS_SESSION_EXT_ID)
+#define FILS_HLP_CONTAINER_EXT_ID		5
+#define DOT11_MNG_FILS_HLP_CONTAINER		(DOT11_MNG_ID_EXT_ID + FILS_HLP_CONTAINER_EXT_ID)
+#define FILS_WRAPPED_DATA_EXT_ID	8
+#define DOT11_MNG_FILS_WRAPPED_DATA	(DOT11_MNG_ID_EXT_ID + FILS_WRAPPED_DATA_EXT_ID)
+#define FILS_NONCE_EXT_ID		13
+#define DOT11_MNG_FILS_NONCE		(DOT11_MNG_ID_EXT_ID + FILS_NONCE_EXT_ID)
+
+#define DOT11_MNG_IE_ID_EXT_MATCH(_ie, _id) (\
+	((_ie)->id == DOT11_MNG_ID_EXT_ID) && \
+	((_ie)->len > 0) && \
+	((_id) == ((uint8 *)(_ie) + TLV_HDR_LEN)[0]))
+
+#define DOT11_MNG_IE_ID_EXT_INIT(_ie, _id, _len) do {\
+		(_ie)->id = DOT11_MNG_ID_EXT_ID; \
+		(_ie)->len = _len; \
+		(_ie)->id_ext = _id; \
+	} while (0)
+
+/* Rate Defines */
+
+/* Valid rates for the Supported Rates and Extended Supported Rates IEs.
+ * Encoding is the rate in 500kbps units, rouding up for fractional values.
+ * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values.
+ * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates.
+ * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27},
+ * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices.
+ */
+
+#define DOT11_RATE_1M   2       /* 1  Mbps in 500kbps units */
+#define DOT11_RATE_2M   4       /* 2  Mbps in 500kbps units */
+#define DOT11_RATE_5M5  11      /* 5.5 Mbps in 500kbps units */
+#define DOT11_RATE_11M  22      /* 11 Mbps in 500kbps units */
+#define DOT11_RATE_6M   12      /* 6  Mbps in 500kbps units */
+#define DOT11_RATE_9M   18      /* 9  Mbps in 500kbps units */
+#define DOT11_RATE_12M  24      /* 12 Mbps in 500kbps units */
+#define DOT11_RATE_18M  36      /* 18 Mbps in 500kbps units */
+#define DOT11_RATE_24M  48      /* 24 Mbps in 500kbps units */
+#define DOT11_RATE_36M  72      /* 36 Mbps in 500kbps units */
+#define DOT11_RATE_48M  96      /* 48 Mbps in 500kbps units */
+#define DOT11_RATE_54M  108     /* 54 Mbps in 500kbps units */
+#define DOT11_RATE_MAX  108     /* highest rate (54 Mbps) in 500kbps units */
+
+/* Supported Rates and Extended Supported Rates IEs
+ * The supported rates octets are defined a the MSB indicatin a Basic Rate
+ * and bits 0-6 as the rate value
+ */
+#define DOT11_RATE_BASIC                0x80 /* flag for a Basic Rate */
+#define DOT11_RATE_MASK                 0x7F /* mask for numeric part of rate */
+
+/* BSS Membership Selector parameters
+ * 802.11-2016 (and 802.11ax-D1.1), Sec 9.4.2.3
+ * These selector values are advertised in Supported Rates and Extended Supported Rates IEs
+ * in the supported rates list with the Basic rate bit set.
+ * Constants below include the basic bit.
+ */
+#define DOT11_BSS_MEMBERSHIP_HT         0xFF  /* Basic 0x80 + 127, HT Required to join */
+#define DOT11_BSS_MEMBERSHIP_VHT        0xFE  /* Basic 0x80 + 126, VHT Required to join */
+#define DOT11_BSS_MEMBERSHIP_HE         0xFD  /* Basic 0x80 + 125, HE Required to join */
+
+/* ERP info element bit values */
+#define DOT11_MNG_ERP_LEN			1	/* ERP is currently 1 byte long */
+#define DOT11_MNG_NONERP_PRESENT		0x01	/* NonERP (802.11b) STAs are present
+							 *in the BSS
+							 */
+#define DOT11_MNG_USE_PROTECTION		0x02	/* Use protection mechanisms for
+							 *ERP-OFDM frames
+							 */
+#define DOT11_MNG_BARKER_PREAMBLE		0x04	/* Short Preambles: 0 == allowed,
+							 * 1 == not allowed
+							 */
+/* TS Delay element offset & size */
+#define DOT11_MGN_TS_DELAY_LEN		4	/* length of TS DELAY IE */
+#define TS_DELAY_FIELD_SIZE			4	/* TS DELAY field size */
+
+/* Capability Information Field */
+#define DOT11_CAP_ESS				0x0001	/* d11 cap. ESS */
+#define DOT11_CAP_IBSS				0x0002	/* d11 cap. IBSS */
+#define DOT11_CAP_POLLABLE			0x0004	/* d11 cap. pollable */
+#define DOT11_CAP_POLL_RQ			0x0008	/* d11 cap. poll request */
+#define DOT11_CAP_PRIVACY			0x0010	/* d11 cap. privacy */
+#define DOT11_CAP_SHORT				0x0020	/* d11 cap. short */
+#define DOT11_CAP_PBCC				0x0040	/* d11 cap. PBCC */
+#define DOT11_CAP_AGILITY			0x0080	/* d11 cap. agility */
+#define DOT11_CAP_SPECTRUM			0x0100	/* d11 cap. spectrum */
+#define DOT11_CAP_QOS				0x0200	/* d11 cap. qos */
+#define DOT11_CAP_SHORTSLOT			0x0400	/* d11 cap. shortslot */
+#define DOT11_CAP_APSD				0x0800	/* d11 cap. apsd */
+#define DOT11_CAP_RRM				0x1000	/* d11 cap. 11k radio measurement */
+#define DOT11_CAP_CCK_OFDM			0x2000	/* d11 cap. CCK/OFDM */
+#define DOT11_CAP_DELAY_BA			0x4000	/* d11 cap. delayed block ack */
+#define DOT11_CAP_IMMEDIATE_BA			0x8000	/* d11 cap. immediate block ack */
+
+/* Extended capabilities IE bitfields */
+/* 20/40 BSS Coexistence Management support bit position */
+#define DOT11_EXT_CAP_OBSS_COEX_MGMT		0
+/* Extended Channel Switching support bit position */
+#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING	2
+/* scheduled PSMP support bit position */
+#define DOT11_EXT_CAP_SPSMP			6
+/*  Flexible Multicast Service */
+#define DOT11_EXT_CAP_FMS			11
+/* proxy ARP service support bit position */
+#define DOT11_EXT_CAP_PROXY_ARP			12
+/* Civic Location */
+#define DOT11_EXT_CAP_CIVIC_LOC			14
+/* Geospatial Location */
+#define DOT11_EXT_CAP_LCI			15
+/* Traffic Filter Service */
+#define DOT11_EXT_CAP_TFS			16
+/* WNM-Sleep Mode */
+#define DOT11_EXT_CAP_WNM_SLEEP			17
+/* TIM Broadcast service */
+#define DOT11_EXT_CAP_TIMBC			18
+/* BSS Transition Management support bit position */
+#define DOT11_EXT_CAP_BSSTRANS_MGMT		19
+/* Direct Multicast Service */
+#define DOT11_EXT_CAP_DMS			26
+/* Interworking support bit position */
+#define DOT11_EXT_CAP_IW			31
+/* QoS map support bit position */
+#define DOT11_EXT_CAP_QOS_MAP		32
+/* service Interval granularity bit position and mask */
+#define DOT11_EXT_CAP_SI			41
+#define DOT11_EXT_CAP_SI_MASK			0x0E
+/* Location Identifier service */
+#define DOT11_EXT_CAP_IDENT_LOC			44
+/* WNM notification */
+#define DOT11_EXT_CAP_WNM_NOTIF			46
+/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */
+#define DOT11_EXT_CAP_OPER_MODE_NOTIF		62
+/* Fine timing measurement - D3.0 */
+#define DOT11_EXT_CAP_FTM_RESPONDER		70
+#define DOT11_EXT_CAP_FTM_INITIATOR		71 /* tentative 11mcd3.0 */
+/* TWT support */
+#define DOT11_EXT_CAP_TWT_REQUESTER		75
+#define DOT11_EXT_CAP_TWT_RESPONDER		76
+/* TODO: Update DOT11_EXT_CAP_MAX_IDX to reflect the highest offset.
+ * Note: DOT11_EXT_CAP_MAX_IDX must only be used in attach path.
+ *       It will cause ROM invalidation otherwise.
+ */
+#define DOT11_EXT_CAP_MAX_IDX	76
+
+#ifdef WL_FTM
+#define DOT11_EXT_CAP_MAX_BIT_IDX		95	/* !!!update this please!!! */
+#else
+#define DOT11_EXT_CAP_MAX_BIT_IDX		62	/* !!!update this please!!! */
+#endif
+/* extended capability */
+#ifndef DOT11_EXTCAP_LEN_MAX
+#define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3)
+#endif
+BWL_PRE_PACKED_STRUCT struct dot11_extcap {
+	uint8 extcap[DOT11_EXTCAP_LEN_MAX];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap dot11_extcap_t;
+
+/* VHT Operating mode bit fields -  (11ac D8.0/802.11-2016 - 9.4.1.53) */
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3
+#define DOT11_OPER_MODE_160_8080_BW_SHIFT 2
+#define DOT11_OPER_MODE_160_8080_BW_MASK 0x04
+#define DOT11_OPER_MODE_NOLDPC_SHIFT 3
+#define DOT11_OPER_MODE_NOLDPC_MASK 0x08
+#define DOT11_OPER_MODE_RXNSS_SHIFT 4
+#define DOT11_OPER_MODE_RXNSS_MASK 0x70
+#define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7
+#define DOT11_OPER_MODE_RXNSS_TYPE_MASK 0x80
+
+#define DOT11_OPER_MODE(type, nss, chanw) (\
+	((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\
+		 DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\
+	(((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\
+	((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\
+		 DOT11_OPER_MODE_CHANNEL_WIDTH_MASK))
+
+#define DOT11_D8_OPER_MODE(type, nss, ldpc, bw160_8080, chanw) (\
+	((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\
+		 DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\
+	(((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\
+	((ldpc) << DOT11_OPER_MODE_NOLDPC_SHIFT & DOT11_OPER_MODE_NOLDPC_MASK) |\
+	((bw160_8080) << DOT11_OPER_MODE_160_8080_BW_SHIFT &\
+		 DOT11_OPER_MODE_160_8080_BW_MASK) |\
+	((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\
+		 DOT11_OPER_MODE_CHANNEL_WIDTH_MASK))
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \
+	(((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\
+		>> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT)
+#define DOT11_OPER_MODE_160_8080(mode) \
+	(((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)\
+		>> DOT11_OPER_MODE_160_8080_BW_SHIFT)
+#define DOT11_OPER_MODE_RXNSS(mode) \
+	((((mode) & DOT11_OPER_MODE_RXNSS_MASK)		\
+		>> DOT11_OPER_MODE_RXNSS_SHIFT) + 1)
+#define DOT11_OPER_MODE_RXNSS_TYPE(mode) \
+	(((mode) & DOT11_OPER_MODE_RXNSS_TYPE_MASK)\
+		>> DOT11_OPER_MODE_RXNSS_TYPE_SHIFT)
+
+#define DOT11_OPER_MODE_20MHZ 0
+#define DOT11_OPER_MODE_40MHZ 1
+#define DOT11_OPER_MODE_80MHZ 2
+#define DOT11_OPER_MODE_160MHZ 3
+#define DOT11_OPER_MODE_8080MHZ 3
+#define DOT11_OPER_MODE_1608080MHZ 1
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_40MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_40MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_160_8080_BW_MASK))
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_160_8080_BW_MASK))
+
+/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */
+BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie {
+	uint8 mode;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t;
+
+#define DOT11_OPER_MODE_NOTIF_IE_LEN 1
+
+/* Extended Capability Information Field */
+#define DOT11_OBSS_COEX_MNG_SUPPORT	0x01	/* 20/40 BSS Coexistence Management support */
+
+/*
+ * Action Frame Constants
+ */
+#define DOT11_ACTION_HDR_LEN		2	/* action frame category + action field */
+#define DOT11_ACTION_CAT_OFF		0	/* category offset */
+#define DOT11_ACTION_ACT_OFF		1	/* action offset */
+
+/* Action Category field (sec 8.4.1.11) */
+#define DOT11_ACTION_CAT_ERR_MASK	0x80	/* category error mask */
+#define DOT11_ACTION_CAT_MASK		0x7F	/* category mask */
+#define DOT11_ACTION_CAT_SPECT_MNG	0	/* category spectrum management */
+#define DOT11_ACTION_CAT_QOS		1	/* category QoS */
+#define DOT11_ACTION_CAT_DLS		2	/* category DLS */
+#define DOT11_ACTION_CAT_BLOCKACK	3	/* category block ack */
+#define DOT11_ACTION_CAT_PUBLIC		4	/* category public */
+#define DOT11_ACTION_CAT_RRM		5	/* category radio measurements */
+#define DOT11_ACTION_CAT_FBT	6	/* category fast bss transition */
+#define DOT11_ACTION_CAT_HT		7	/* category for HT */
+#define	DOT11_ACTION_CAT_SA_QUERY	8	/* security association query */
+#define	DOT11_ACTION_CAT_PDPA		9	/* protected dual of public action */
+#define DOT11_ACTION_CAT_WNM		10	/* category for WNM */
+#define DOT11_ACTION_CAT_UWNM		11	/* category for Unprotected WNM */
+#define DOT11_ACTION_CAT_MESH		13	/* category for Mesh */
+#define DOT11_ACTION_CAT_SELFPROT	15	/* category for Mesh, self protected */
+#define DOT11_ACTION_NOTIFICATION	17
+#define DOT11_ACTION_CAT_VHT		21	/* VHT action */
+#define DOT11_ACTION_CAT_S1G		22	/* S1G action */
+#define DOT11_ACTION_CAT_HE		27	/* HE action frame */
+#define DOT11_ACTION_CAT_FILS		26	/* FILS action frame */
+#define DOT11_ACTION_CAT_VSP		126	/* protected vendor specific */
+#define DOT11_ACTION_CAT_VS		127	/* category Vendor Specific */
+
+/* Spectrum Management Action IDs (sec 7.4.1) */
+#define DOT11_SM_ACTION_M_REQ		0	/* d11 action measurement request */
+#define DOT11_SM_ACTION_M_REP		1	/* d11 action measurement response */
+#define DOT11_SM_ACTION_TPC_REQ		2	/* d11 action TPC request */
+#define DOT11_SM_ACTION_TPC_REP		3	/* d11 action TPC response */
+#define DOT11_SM_ACTION_CHANNEL_SWITCH	4	/* d11 action channel switch */
+#define DOT11_SM_ACTION_EXT_CSA		5	/* d11 extened CSA for 11n */
+
+/* QoS action ids */
+#define DOT11_QOS_ACTION_ADDTS_REQ	0	/* d11 action ADDTS request */
+#define DOT11_QOS_ACTION_ADDTS_RESP	1	/* d11 action ADDTS response */
+#define DOT11_QOS_ACTION_DELTS		2	/* d11 action DELTS */
+#define DOT11_QOS_ACTION_SCHEDULE	3	/* d11 action schedule */
+#define DOT11_QOS_ACTION_QOS_MAP	4	/* d11 action QOS map */
+
+/* HT action ids */
+#define DOT11_ACTION_ID_HT_CH_WIDTH	0	/* notify channel width action id */
+#define DOT11_ACTION_ID_HT_MIMO_PS	1	/* mimo ps action id */
+
+/* Public action ids */
+#define DOT11_PUB_ACTION_BSS_COEX_MNG	0	/* 20/40 Coexistence Management action id */
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH	4	/* d11 action channel switch */
+#define DOT11_PUB_ACTION_VENDOR_SPEC	9	/* Vendor specific */
+#define DOT11_PUB_ACTION_GAS_CB_REQ	12	/* GAS Comeback Request */
+#define DOT11_PUB_ACTION_FTM_REQ	32		/* FTM request */
+#define DOT11_PUB_ACTION_FTM		33		/* FTM measurement */
+
+/* Block Ack action types */
+#define DOT11_BA_ACTION_ADDBA_REQ	0	/* ADDBA Req action frame type */
+#define DOT11_BA_ACTION_ADDBA_RESP	1	/* ADDBA Resp action frame type */
+#define DOT11_BA_ACTION_DELBA		2	/* DELBA action frame type */
+
+/* ADDBA action parameters */
+#define DOT11_ADDBA_PARAM_AMSDU_SUP	0x0001	/* AMSDU supported under BA */
+#define DOT11_ADDBA_PARAM_POLICY_MASK	0x0002	/* policy mask(ack vs delayed) */
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT	1	/* policy shift */
+#define DOT11_ADDBA_PARAM_TID_MASK	0x003c	/* tid mask */
+#define DOT11_ADDBA_PARAM_TID_SHIFT	2	/* tid shift */
+#define DOT11_ADDBA_PARAM_BSIZE_MASK	0xffc0	/* buffer size mask */
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT	6	/* buffer size shift */
+
+#define DOT11_ADDBA_POLICY_DELAYED	0	/* delayed BA policy */
+#define DOT11_ADDBA_POLICY_IMMEDIATE	1	/* immediate BA policy */
+
+/* Fast Transition action types */
+#define DOT11_FT_ACTION_FT_RESERVED		0
+#define DOT11_FT_ACTION_FT_REQ			1	/* FBT request - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_RES			2	/* FBT response - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_CON			3	/* FBT confirm - for OTDS with RRP */
+#define DOT11_FT_ACTION_FT_ACK			4	/* FBT ack */
+
+/* DLS action types */
+#define DOT11_DLS_ACTION_REQ			0	/* DLS Request */
+#define DOT11_DLS_ACTION_RESP			1	/* DLS Response */
+#define DOT11_DLS_ACTION_TD			2	/* DLS Teardown */
+
+/* Wireless Network Management (WNM) action types */
+#define DOT11_WNM_ACTION_EVENT_REQ		0
+#define DOT11_WNM_ACTION_EVENT_REP		1
+#define DOT11_WNM_ACTION_DIAG_REQ		2
+#define DOT11_WNM_ACTION_DIAG_REP		3
+#define DOT11_WNM_ACTION_LOC_CFG_REQ		4
+#define DOT11_WNM_ACTION_LOC_RFG_RESP		5
+#define DOT11_WNM_ACTION_BSSTRANS_QUERY		6
+#define DOT11_WNM_ACTION_BSSTRANS_REQ		7
+#define DOT11_WNM_ACTION_BSSTRANS_RESP		8
+#define DOT11_WNM_ACTION_FMS_REQ		9
+#define DOT11_WNM_ACTION_FMS_RESP		10
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ	11
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP	12
+#define DOT11_WNM_ACTION_TFS_REQ		13
+#define DOT11_WNM_ACTION_TFS_RESP		14
+#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ		15
+#define DOT11_WNM_ACTION_WNM_SLEEP_REQ		16
+#define DOT11_WNM_ACTION_WNM_SLEEP_RESP		17
+#define DOT11_WNM_ACTION_TIMBC_REQ		18
+#define DOT11_WNM_ACTION_TIMBC_RESP		19
+#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD	20
+#define DOT11_WNM_ACTION_CHAN_USAGE_REQ		21
+#define DOT11_WNM_ACTION_CHAN_USAGE_RESP	22
+#define DOT11_WNM_ACTION_DMS_REQ		23
+#define DOT11_WNM_ACTION_DMS_RESP		24
+#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ	25
+#define DOT11_WNM_ACTION_NOTFCTN_REQ		26
+#define DOT11_WNM_ACTION_NOTFCTN_RESP		27
+#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP	28
+
+/* Unprotected Wireless Network Management (WNM) action types */
+#define DOT11_UWNM_ACTION_TIM			0
+#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT	1
+
+#define DOT11_MNG_COUNTRY_ID_LEN 3
+
+/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */
+#define DOT11_VHT_ACTION_CBF				0	/* Compressed Beamforming */
+#define DOT11_VHT_ACTION_GID_MGMT			1	/* Group ID Management */
+#define DOT11_VHT_ACTION_OPER_MODE_NOTIF	2	/* Operating mode notif'n */
+
+/* FILS category action types - 802.11ai D11.0 - 9.6.8.1 */
+#define DOT11_FILS_ACTION_DISCOVERY		34	/* FILS Discovery */
+
+/** DLS Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_req {
+	uint8 category;			/* category of action frame (2) */
+	uint8 action;				/* DLS action: req (0) */
+	struct ether_addr	da;		/* destination address */
+	struct ether_addr	sa;		/* source address */
+	uint16 cap;				/* capability */
+	uint16 timeout;			/* timeout value */
+	uint8 data[1];				/* IE:support rate, extend support rate, HT cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_req dot11_dls_req_t;
+#define DOT11_DLS_REQ_LEN 18	/* Fixed length */
+
+/** DLS response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_resp {
+	uint8 category;			/* category of action frame (2) */
+	uint8 action;				/* DLS action: req (0) */
+	uint16 status;				/* status code field */
+	struct ether_addr	da;		/* destination address */
+	struct ether_addr	sa;		/* source address */
+	uint8 data[1];				/* optional: capability, rate ... */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_resp dot11_dls_resp_t;
+#define DOT11_DLS_RESP_LEN 16	/* Fixed length */
+
+
+/* ************* 802.11v related definitions. ************* */
+
+/** BSS Management Transition Query frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_query (6) */
+	uint8 token;			/* dialog token */
+	uint8 reason;			/* transition query reason */
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_query dot11_bsstrans_query_t;
+#define DOT11_BSSTRANS_QUERY_LEN 4	/* Fixed length */
+
+/* BTM transition reason */
+#define DOT11_BSSTRANS_REASON_UNSPECIFIED		0
+#define DOT11_BSSTRANS_REASON_EXC_FRAME_LOSS		1
+#define DOT11_BSSTRANS_REASON_EXC_TRAFFIC_DELAY		2
+#define DOT11_BSSTRANS_REASON_INSUFF_QOS_CAPACITY	3
+#define DOT11_BSSTRANS_REASON_FIRST_ASSOC		4
+#define DOT11_BSSTRANS_REASON_LOAD_BALANCING		5
+#define DOT11_BSSTRANS_REASON_BETTER_AP_FOUND		6
+#define DOT11_BSSTRANS_REASON_DEAUTH_RX			7
+#define DOT11_BSSTRANS_REASON_8021X_EAP_AUTH_FAIL	8
+#define DOT11_BSSTRANS_REASON_4WAY_HANDSHK_FAIL		9
+#define DOT11_BSSTRANS_REASON_MANY_REPLAYCNT_FAIL	10
+#define DOT11_BSSTRANS_REASON_MANY_DATAMIC_FAIL		11
+#define DOT11_BSSTRANS_REASON_EXCEED_MAX_RETRANS	12
+#define DOT11_BSSTRANS_REASON_MANY_BCAST_DISASSOC_RX	13
+#define DOT11_BSSTRANS_REASON_MANY_BCAST_DEAUTH_RX	14
+#define DOT11_BSSTRANS_REASON_PREV_TRANSITION_FAIL	15
+#define DOT11_BSSTRANS_REASON_LOW_RSSI			16
+#define DOT11_BSSTRANS_REASON_ROAM_FROM_NON_80211	17
+#define DOT11_BSSTRANS_REASON_RX_BTM_REQ		18
+#define DOT11_BSSTRANS_REASON_PREF_LIST_INCLUDED	19
+#define DOT11_BSSTRANS_REASON_LEAVING_ESS		20
+
+/** BSS Management Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_req (7) */
+	uint8 token;			/* dialog token */
+	uint8 reqmode;			/* transition request mode */
+	uint16 disassoc_tmr;		/* disassociation timer */
+	uint8 validity_intrvl;		/* validity interval */
+	uint8 data[1];			/* optional: BSS term duration, ... */
+						/* ...session info URL, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_req dot11_bsstrans_req_t;
+#define DOT11_BSSTRANS_REQ_LEN 7	/* Fixed length */
+
+/* BSS Mgmt Transition Request Mode Field - 802.11v */
+#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL		0x01
+#define DOT11_BSSTRANS_REQMODE_ABRIDGED			0x02
+#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT	0x04
+#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL		0x08
+#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT	0x10
+
+/** BSS Management transition response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_resp (8) */
+	uint8 token;			/* dialog token */
+	uint8 status;			/* transition status */
+	uint8 term_delay;		/* validity interval */
+	uint8 data[1];			/* optional: BSSID target, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t;
+#define DOT11_BSSTRANS_RESP_LEN 5	/* Fixed length */
+
+/* BSS Mgmt Transition Response Status Field */
+#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT			0
+#define DOT11_BSSTRANS_RESP_STATUS_REJECT			1
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN		2
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_CAP		3
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_UNDESIRED		4
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_DELAY_REQ		5
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_BSS_LIST_PROVIDED	6
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS		7
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS		8
+
+
+/** BSS Max Idle Period element */
+BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie {
+	uint8 id;				/* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */
+	uint8 len;
+	uint16 max_idle_period;			/* in unit of 1000 TUs */
+	uint8 idle_opt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t;
+#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN	3	/* bss max idle period IE size */
+#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED	1	/* BSS max idle option */
+
+/** TIM Broadcast request element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie {
+	uint8 id;				/* 94, DOT11_MNG_TIMBC_REQ_ID */
+	uint8 len;
+	uint8 interval;				/* in unit of beacon interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t;
+#define DOT11_TIMBC_REQ_IE_LEN		1	/* Fixed length */
+
+/** TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* TIM broadcast request element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req dot11_timbc_req_t;
+#define DOT11_TIMBC_REQ_LEN		3	/* Fixed length */
+
+/** TIM Broadcast response element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie {
+	uint8 id;				/* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */
+	uint8 len;
+	uint8 status;				/* status of add request */
+	uint8 interval;				/* in unit of beacon interval */
+	int32 offset;				/* in unit of ms */
+	uint16 high_rate;			/* in unit of 0.5 Mb/s */
+	uint16 low_rate;			/* in unit of 0.5 Mb/s */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t;
+#define DOT11_TIMBC_DENY_RESP_IE_LEN	1	/* Deny. Fixed length */
+#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN	10	/* Accept. Fixed length */
+
+#define DOT11_TIMBC_STATUS_ACCEPT		0
+#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP	1
+#define DOT11_TIMBC_STATUS_DENY			2
+#define DOT11_TIMBC_STATUS_OVERRIDDEN		3
+#define DOT11_TIMBC_STATUS_RESERVED		4
+
+/** TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */
+	uint8 token;			/* dialog token */
+	uint8 data[1];			/* TIM broadcast response element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp dot11_timbc_resp_t;
+#define DOT11_TIMBC_RESP_LEN	3	/* Fixed length */
+
+/** TIM element */
+BWL_PRE_PACKED_STRUCT struct dot11_tim_ie {
+	uint8 id;			/* 5, DOT11_MNG_TIM_ID	 */
+	uint8 len;			/* 4 - 255 */
+	uint8 dtim_count;		/* DTIM decrementing counter */
+	uint8 dtim_period;		/* DTIM period */
+	uint8 bitmap_control;		/* AID 0 + bitmap offset */
+	uint8 pvb[1];			/* Partial Virtual Bitmap, variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tim_ie dot11_tim_ie_t;
+#define DOT11_TIM_IE_FIXED_LEN	3	/* Fixed length, without id and len */
+#define DOT11_TIM_IE_FIXED_TOTAL_LEN	5	/* Fixed length, with id and len */
+
+/** TIM Broadcast frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc {
+	uint8 category;			/* category of action frame (11) */
+	uint8 action;			/* action: TIM (0) */
+	uint8 check_beacon;		/* need to check-beacon */
+	uint8 tsf[8];			/* Time Synchronization Function */
+	dot11_tim_ie_t tim_ie;		/* TIM element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc dot11_timbc_t;
+#define DOT11_TIMBC_HDR_LEN	(sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t))
+#define DOT11_TIMBC_FIXED_LEN	(sizeof(dot11_timbc_t) - 1)	/* Fixed length */
+#define DOT11_TIMBC_LEN			11	/* Fixed length */
+
+/** TCLAS frame classifier type */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr {
+	uint8 type;
+	uint8 mask;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t;
+#define DOT11_TCLAS_FC_HDR_LEN		2	/* Fixed length */
+
+#define DOT11_TCLAS_MASK_0		0x1
+#define DOT11_TCLAS_MASK_1		0x2
+#define DOT11_TCLAS_MASK_2		0x4
+#define DOT11_TCLAS_MASK_3		0x8
+#define DOT11_TCLAS_MASK_4		0x10
+#define DOT11_TCLAS_MASK_5		0x20
+#define DOT11_TCLAS_MASK_6		0x40
+#define DOT11_TCLAS_MASK_7		0x80
+
+#define DOT11_TCLAS_FC_0_ETH		0
+#define DOT11_TCLAS_FC_1_IP		1
+#define DOT11_TCLAS_FC_2_8021Q		2
+#define DOT11_TCLAS_FC_3_OFFSET		3
+#define DOT11_TCLAS_FC_4_IP_HIGHER	4
+#define DOT11_TCLAS_FC_5_8021D		5
+
+/** TCLAS frame classifier type 0 parameters for Ethernet */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth {
+	uint8 type;
+	uint8 mask;
+	uint8 sa[ETHER_ADDR_LEN];
+	uint8 da[ETHER_ADDR_LEN];
+	uint16 eth_type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t;
+#define DOT11_TCLAS_FC_0_ETH_LEN	16
+
+/** TCLAS frame classifier type 1 parameters for IPV4 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 {
+	uint8 type;
+	uint8 mask;
+	uint8 version;
+	uint32 src_ip;
+	uint32 dst_ip;
+	uint16 src_port;
+	uint16 dst_port;
+	uint8 dscp;
+	uint8 protocol;
+	uint8 reserved;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t;
+#define DOT11_TCLAS_FC_1_IPV4_LEN	18
+
+/** TCLAS frame classifier type 2 parameters for 802.1Q */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q {
+	uint8 type;
+	uint8 mask;
+	uint16 tci;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t;
+#define DOT11_TCLAS_FC_2_8021Q_LEN	4
+
+/** TCLAS frame classifier type 3 parameters for filter offset */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter {
+	uint8 type;
+	uint8 mask;
+	uint16 offset;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t;
+#define DOT11_TCLAS_FC_3_FILTER_LEN	4
+
+/** TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t;
+#define DOT11_TCLAS_FC_4_IPV4_LEN	DOT11_TCLAS_FC_1_IPV4_LEN
+
+/** TCLAS frame classifier type 4 parameters for IPV6 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 {
+	uint8 type;
+	uint8 mask;
+	uint8 version;
+	uint8 saddr[16];
+	uint8 daddr[16];
+	uint16 src_port;
+	uint16 dst_port;
+	uint8 dscp;
+	uint8 nexthdr;
+	uint8 flow_lbl[3];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t;
+#define DOT11_TCLAS_FC_4_IPV6_LEN	44
+
+/** TCLAS frame classifier type 5 parameters for 802.1D */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d {
+	uint8 type;
+	uint8 mask;
+	uint8 pcp;
+	uint8 cfi;
+	uint16 vid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t;
+#define DOT11_TCLAS_FC_5_8021D_LEN	6
+
+/** TCLAS frame classifier type parameters */
+BWL_PRE_PACKED_STRUCT union dot11_tclas_fc {
+	uint8 data[1];
+	dot11_tclas_fc_hdr_t hdr;
+	dot11_tclas_fc_0_eth_t t0_eth;
+	dot11_tclas_fc_1_ipv4_t	t1_ipv4;
+	dot11_tclas_fc_2_8021q_t t2_8021q;
+	dot11_tclas_fc_3_filter_t t3_filter;
+	dot11_tclas_fc_4_ipv4_t	t4_ipv4;
+	dot11_tclas_fc_4_ipv6_t	t4_ipv6;
+	dot11_tclas_fc_5_8021d_t t5_8021d;
+} BWL_POST_PACKED_STRUCT;
+typedef union dot11_tclas_fc dot11_tclas_fc_t;
+
+#define DOT11_TCLAS_FC_MIN_LEN		4	/* Classifier Type 2 has the min size */
+#define DOT11_TCLAS_FC_MAX_LEN		254
+
+/** TCLAS element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie {
+	uint8 id;				/* 14, DOT11_MNG_TCLAS_ID */
+	uint8 len;
+	uint8 user_priority;
+	dot11_tclas_fc_t fc;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_ie dot11_tclas_ie_t;
+#define DOT11_TCLAS_IE_LEN		3	/* Fixed length, include id and len */
+
+/** TCLAS processing element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie {
+	uint8 id;				/* 44, DOT11_MNG_TCLAS_PROC_ID */
+	uint8 len;
+	uint8 process;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t;
+#define DOT11_TCLAS_PROC_IE_LEN		3	/* Fixed length, include id and len */
+
+#define DOT11_TCLAS_PROC_MATCHALL	0	/* All high level element need to match */
+#define DOT11_TCLAS_PROC_MATCHONE	1	/* One high level element need to match */
+#define DOT11_TCLAS_PROC_NONMATCH	2	/* Non match to any high level element */
+
+
+/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */
+#define DOT11_TSPEC_IE_LEN		57	/* Fixed length */
+
+/** TFS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie {
+	uint8 id;				/* 91, DOT11_MNG_TFS_REQUEST_ID */
+	uint8 len;
+	uint8 tfs_id;
+	uint8 actcode;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t;
+#define DOT11_TFS_REQ_IE_LEN		2	/* Fixed length, without id and len */
+
+/** TFS request action codes (bitfield) */
+#define DOT11_TFS_ACTCODE_DELETE	1
+#define DOT11_TFS_ACTCODE_NOTIFY	2
+
+/** TFS request subelement IDs */
+#define DOT11_TFS_REQ_TFS_SE_ID		1
+#define DOT11_TFS_REQ_VENDOR_SE_ID	221
+
+/** TFS subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 data[1];				/* TCLAS element(s) + optional TCLAS proc */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_se dot11_tfs_se_t;
+
+
+/** TFS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie {
+	uint8 id;				/* 92, DOT11_MNG_TFS_RESPONSE_ID */
+	uint8 len;
+	uint8 tfs_id;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t;
+#define DOT11_TFS_RESP_IE_LEN		1	/* Fixed length, without id and len */
+
+/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */
+#define DOT11_TFS_RESP_TFS_STATUS_SE_ID		1
+#define DOT11_TFS_RESP_TFS_SE_ID		2
+#define DOT11_TFS_RESP_VENDOR_SE_ID		221
+
+/** TFS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se {
+	uint8 sub_id;				/* 92, DOT11_MNG_TFS_RESPONSE_ID */
+	uint8 len;
+	uint8 resp_st;
+	uint8 data[1];				/* Potential dot11_tfs_se_t included */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_status_se dot11_tfs_status_se_t;
+#define DOT11_TFS_STATUS_SE_LEN			1	/* Fixed length, without id and len */
+
+/* Following Definition should be merged to FMS_TFS macro below */
+/* TFS Response status code. Identical to FMS Element status, without N/A  */
+#define DOT11_TFS_STATUS_ACCEPT			0
+#define DOT11_TFS_STATUS_DENY_FORMAT		1
+#define DOT11_TFS_STATUS_DENY_RESOURCE		2
+#define DOT11_TFS_STATUS_DENY_POLICY		4
+#define DOT11_TFS_STATUS_DENY_UNSPECIFIED	5
+#define DOT11_TFS_STATUS_ALTPREF_POLICY		7
+#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP	14
+
+/* FMS Element Status and TFS Response Status Definition */
+#define DOT11_FMS_TFS_STATUS_ACCEPT		0
+#define DOT11_FMS_TFS_STATUS_DENY_FORMAT	1
+#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE	2
+#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI	3
+#define DOT11_FMS_TFS_STATUS_DENY_POLICY	4
+#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED	5
+#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI	6
+#define DOT11_FMS_TFS_STATUS_ALT_POLICY		7
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI	8
+#define DOT11_FMS_TFS_STATUS_ALT_MCRATE		9
+#define DOT11_FMS_TFS_STATUS_TERM_POLICY	10
+#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE	11
+#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO	12
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI	13
+#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP	14
+
+/** TFS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS request (13) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req dot11_tfs_req_t;
+#define DOT11_TFS_REQ_LEN		3	/* Fixed length */
+
+/** TFS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS request (14) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp dot11_tfs_resp_t;
+#define DOT11_TFS_RESP_LEN		3	/* Fixed length */
+
+/** TFS Management Notify frame request header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS notify request (15) */
+	uint8 tfs_id_cnt;			/* TFS IDs count */
+	uint8 tfs_id[1];			/* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t;
+#define DOT11_TFS_NOTIFY_REQ_LEN	3	/* Fixed length */
+
+/** TFS Management Notify frame response header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS notify response (28) */
+	uint8 tfs_id_cnt;			/* TFS IDs count */
+	uint8 tfs_id[1];			/* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t;
+#define DOT11_TFS_NOTIFY_RESP_LEN	3	/* Fixed length */
+
+
+/** WNM-Sleep Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: wnm-sleep request (16) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t;
+#define DOT11_WNM_SLEEP_REQ_LEN		3	/* Fixed length */
+
+/** WNM-Sleep Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: wnm-sleep request (17) */
+	uint8 token;				/* dialog token */
+	uint16 key_len;				/* key data length */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t;
+#define DOT11_WNM_SLEEP_RESP_LEN	5	/* Fixed length */
+
+#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK	0
+#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK	1
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_gtk {
+	uint8 sub_id;
+	uint8 len;
+	uint16 key_info;
+	uint8 key_length;
+	uint8 rsc[8];
+	uint8 key[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN	11	/* without sub_id, len, and key */
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN	43	/* without sub_id and len */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk {
+	uint8 sub_id;
+	uint8 len;
+	uint16 key_id;
+	uint8 pn[6];
+	uint8 key[16];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie {
+	uint8 id;				/* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */
+	uint8 len;
+	uint8 act_type;
+	uint8 resp_status;
+	uint16 interval;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t;
+#define DOT11_WNM_SLEEP_IE_LEN		4	/* Fixed length */
+
+#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER	0
+#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT	1
+
+#define DOT11_WNM_SLEEP_RESP_ACCEPT	0
+#define DOT11_WNM_SLEEP_RESP_UPDATE	1
+#define DOT11_WNM_SLEEP_RESP_DENY	2
+#define DOT11_WNM_SLEEP_RESP_DENY_TEMP	3
+#define DOT11_WNM_SLEEP_RESP_DENY_KEY	4
+#define DOT11_WNM_SLEEP_RESP_DENY_INUSE	5
+#define DOT11_WNM_SLEEP_RESP_LAST	6
+
+/** DMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: dms request (23) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req dot11_dms_req_t;
+#define DOT11_DMS_REQ_LEN		3	/* Fixed length */
+
+/** DMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: dms request (24) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp dot11_dms_resp_t;
+#define DOT11_DMS_RESP_LEN		3	/* Fixed length */
+
+/** DMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie {
+	uint8 id;				/* 99, DOT11_MNG_DMS_REQUEST_ID */
+	uint8 len;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_ie dot11_dms_req_ie_t;
+#define DOT11_DMS_REQ_IE_LEN		2	/* Fixed length */
+
+/** DMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie {
+	uint8 id;				/* 100, DOT11_MNG_DMS_RESPONSE_ID */
+	uint8 len;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t;
+#define DOT11_DMS_RESP_IE_LEN		2	/* Fixed length */
+
+/** DMS request descriptor */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc {
+	uint8 dms_id;
+	uint8 len;
+	uint8 type;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_desc dot11_dms_req_desc_t;
+#define DOT11_DMS_REQ_DESC_LEN		3	/* Fixed length */
+
+#define DOT11_DMS_REQ_TYPE_ADD		0
+#define DOT11_DMS_REQ_TYPE_REMOVE	1
+#define DOT11_DMS_REQ_TYPE_CHANGE	2
+
+/** DMS response status */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st {
+	uint8 dms_id;
+	uint8 len;
+	uint8 type;
+	uint16 lsc;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_st dot11_dms_resp_st_t;
+#define DOT11_DMS_RESP_STATUS_LEN	5	/* Fixed length */
+
+#define DOT11_DMS_RESP_TYPE_ACCEPT	0
+#define DOT11_DMS_RESP_TYPE_DENY	1
+#define DOT11_DMS_RESP_TYPE_TERM	2
+
+#define DOT11_DMS_RESP_LSC_UNSUPPORTED	0xFFFF
+
+/** WNM-Notification Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_notif_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: Notification request (26) */
+	uint8 token;				/* dialog token */
+	uint8 type;				   /* type */
+	uint8 data[1];				/* Sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_notif_req dot11_wnm_notif_req_t;
+#define DOT11_WNM_NOTIF_REQ_LEN		4	/* Fixed length */
+
+/** FMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: fms request (9) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req dot11_fms_req_t;
+#define DOT11_FMS_REQ_LEN		3	/* Fixed length */
+
+/** FMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: fms request (10) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp dot11_fms_resp_t;
+#define DOT11_FMS_RESP_LEN		3	/* Fixed length */
+
+/** FMS Descriptor element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_desc {
+	uint8 id;
+	uint8 len;
+	uint8 num_fms_cnt;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_desc dot11_fms_desc_t;
+#define DOT11_FMS_DESC_LEN		1	/* Fixed length */
+
+#define DOT11_FMS_CNTR_MAX		0x8
+#define DOT11_FMS_CNTR_ID_MASK		0x7
+#define DOT11_FMS_CNTR_ID_SHIFT		0x0
+#define DOT11_FMS_CNTR_COUNT_MASK	0xf1
+#define DOT11_FMS_CNTR_SHIFT		0x3
+
+/** FMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie {
+	uint8 id;
+	uint8 len;
+	uint8 fms_token;			/* token used to identify fms stream set */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req_ie dot11_fms_req_ie_t;
+#define DOT11_FMS_REQ_IE_FIX_LEN		1	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field {
+	uint8 mask;
+	uint8 mcs_idx;
+	uint16 rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rate_id_field dot11_rate_id_field_t;
+#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK	0x7
+#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET	0
+#define DOT11_RATE_ID_FIELD_RATETYPE_MASK	0x18
+#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET	3
+#define DOT11_RATE_ID_FIELD_LEN		sizeof(dot11_rate_id_field_t)
+
+/** FMS request subelements */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 interval;
+	uint8 max_interval;
+	dot11_rate_id_field_t rate;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_se dot11_fms_se_t;
+#define DOT11_FMS_REQ_SE_LEN		6	/* Fixed length */
+
+#define DOT11_FMS_REQ_SE_ID_FMS		1	/* FMS subelement */
+#define DOT11_FMS_REQ_SE_ID_VS		221	/* Vendor Specific subelement */
+
+/** FMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie {
+	uint8 id;
+	uint8 len;
+	uint8 fms_token;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t;
+#define DOT11_FMS_RESP_IE_FIX_LEN		1	/* Fixed length */
+
+/* FMS status subelements */
+#define DOT11_FMS_STATUS_SE_ID_FMS	1	/* FMS Status */
+#define DOT11_FMS_STATUS_SE_ID_TCLAS	2	/* TCLAS Status */
+#define DOT11_FMS_STATUS_SE_ID_VS	221	/* Vendor Specific subelement */
+
+/** FMS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 status;
+	uint8 interval;
+	uint8 max_interval;
+	uint8 fmsid;
+	uint8 counter;
+	dot11_rate_id_field_t rate;
+	uint8 mcast_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_status_se dot11_fms_status_se_t;
+#define DOT11_FMS_STATUS_SE_LEN		15	/* Fixed length */
+
+/** TCLAS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 fmsid;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_status_se dot11_tclas_status_se_t;
+#define DOT11_TCLAS_STATUS_SE_LEN		1	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba req */
+	uint8 token;				/* identifier */
+	uint16 addba_param_set;		/* parameter set */
+	uint16 timeout;				/* timeout in seconds */
+	uint16 start_seqnum;		/* starting sequence number */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN		9	/* length of addba req frame */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba resp */
+	uint8 token;				/* identifier */
+	uint16 status;				/* status of add request */
+	uint16 addba_param_set;			/* negotiated parameter set */
+	uint16 timeout;				/* negotiated timeout in seconds */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN		9	/* length of addba resp frame */
+
+/* DELBA action parameters */
+#define DOT11_DELBA_PARAM_INIT_MASK	0x0800	/* initiator mask */
+#define DOT11_DELBA_PARAM_INIT_SHIFT	11	/* initiator shift */
+#define DOT11_DELBA_PARAM_TID_MASK	0xf000	/* tid mask */
+#define DOT11_DELBA_PARAM_TID_SHIFT	12	/* tid shift */
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba req */
+	uint16 delba_param_set;			/* paarmeter set */
+	uint16 reason;				/* reason for dellba */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN			6	/* length of delba frame */
+
+/* SA Query action field value */
+#define SA_QUERY_REQUEST		0
+#define SA_QUERY_RESPONSE		1
+
+/* ************* 802.11r related definitions. ************* */
+
+/** Over-the-DS Fast Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_req {
+	uint8 category;			/* category of action frame (6) */
+	uint8 action;			/* action: ft req */
+	uint8 sta_addr[ETHER_ADDR_LEN];
+	uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_req dot11_ft_req_t;
+#define DOT11_FT_REQ_FIXED_LEN 14
+
+/** Over-the-DS Fast Transition Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_res {
+	uint8 category;			/* category of action frame (6) */
+	uint8 action;			/* action: ft resp */
+	uint8 sta_addr[ETHER_ADDR_LEN];
+	uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+	uint16 status;			/* status code */
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_res dot11_ft_res_t;
+#define DOT11_FT_RES_FIXED_LEN 16
+
+/** RDE RIC Data Element. */
+BWL_PRE_PACKED_STRUCT struct dot11_rde_ie {
+	uint8 id;			/* 11r, DOT11_MNG_RDE_ID */
+	uint8 length;
+	uint8 rde_id;			/* RDE identifier. */
+	uint8 rd_count;			/* Resource Descriptor Count. */
+	uint16 status;			/* Status Code. */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rde_ie dot11_rde_ie_t;
+
+/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */
+#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t)
+
+
+/* ************* 802.11k related definitions. ************* */
+
+/* Radio measurements enabled capability ie */
+#define DOT11_RRM_CAP_LEN		5	/* length of rrm cap bitmap */
+#define RCPI_IE_LEN 1
+#define RSNI_IE_LEN 1
+BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
+	uint8 cap[DOT11_RRM_CAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
+
+/* Bitmap definitions for cap ie */
+#define DOT11_RRM_CAP_LINK		0
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT	1
+#define DOT11_RRM_CAP_PARALLEL		2
+#define DOT11_RRM_CAP_REPEATED		3
+#define DOT11_RRM_CAP_BCN_PASSIVE	4
+#define DOT11_RRM_CAP_BCN_ACTIVE	5
+#define DOT11_RRM_CAP_BCN_TABLE		6
+#define DOT11_RRM_CAP_BCN_REP_COND	7
+#define DOT11_RRM_CAP_FM		8
+#define DOT11_RRM_CAP_CLM		9
+#define DOT11_RRM_CAP_NHM		10
+#define DOT11_RRM_CAP_SM		11
+#define DOT11_RRM_CAP_LCIM		12
+#define DOT11_RRM_CAP_LCIA		13
+#define DOT11_RRM_CAP_TSCM		14
+#define DOT11_RRM_CAP_TTSCM		15
+#define DOT11_RRM_CAP_AP_CHANREP	16
+#define DOT11_RRM_CAP_RMMIB		17
+/* bit18-bit23, not used for RRM_IOVAR */
+#define DOT11_RRM_CAP_MPC0		24
+#define DOT11_RRM_CAP_MPC1		25
+#define DOT11_RRM_CAP_MPC2		26
+#define DOT11_RRM_CAP_MPTI		27
+#define DOT11_RRM_CAP_NBRTSFO		28
+#define DOT11_RRM_CAP_RCPI		29
+#define DOT11_RRM_CAP_RSNI		30
+#define DOT11_RRM_CAP_BSSAAD		31
+#define DOT11_RRM_CAP_BSSAAC		32
+#define DOT11_RRM_CAP_AI		33
+#define DOT11_RRM_CAP_FTM_RANGE		34
+#define DOT11_RRM_CAP_CIVIC_LOC		35
+#define DOT11_RRM_CAP_IDENT_LOC		36
+#define DOT11_RRM_CAP_LAST		36
+
+#ifdef WL11K_ALL_MEAS
+#define DOT11_RRM_CAP_LINK_ENAB			(1 << DOT11_RRM_CAP_LINK)
+#define DOT11_RRM_CAP_FM_ENAB			(1 << (DOT11_RRM_CAP_FM - 8))
+#define DOT11_RRM_CAP_CLM_ENAB			(1 << (DOT11_RRM_CAP_CLM - 8))
+#define DOT11_RRM_CAP_NHM_ENAB			(1 << (DOT11_RRM_CAP_NHM - 8))
+#define DOT11_RRM_CAP_SM_ENAB			(1 << (DOT11_RRM_CAP_SM - 8))
+#define DOT11_RRM_CAP_LCIM_ENAB			(1 << (DOT11_RRM_CAP_LCIM - 8))
+#define DOT11_RRM_CAP_TSCM_ENAB			(1 << (DOT11_RRM_CAP_TSCM - 8))
+#ifdef WL11K_AP
+#define DOT11_RRM_CAP_MPC0_ENAB			(1 << (DOT11_RRM_CAP_MPC0 - 24))
+#define DOT11_RRM_CAP_MPC1_ENAB			(1 << (DOT11_RRM_CAP_MPC1 - 24))
+#define DOT11_RRM_CAP_MPC2_ENAB			(1 << (DOT11_RRM_CAP_MPC2 - 24))
+#define DOT11_RRM_CAP_MPTI_ENAB			(1 << (DOT11_RRM_CAP_MPTI - 24))
+#else
+#define DOT11_RRM_CAP_MPC0_ENAB			0
+#define DOT11_RRM_CAP_MPC1_ENAB			0
+#define DOT11_RRM_CAP_MPC2_ENAB			0
+#define DOT11_RRM_CAP_MPTI_ENAB			0
+#endif /* WL11K_AP */
+#define DOT11_RRM_CAP_CIVIC_LOC_ENAB		(1 << (DOT11_RRM_CAP_CIVIC_LOC - 32))
+#define DOT11_RRM_CAP_IDENT_LOC_ENAB		(1 << (DOT11_RRM_CAP_IDENT_LOC - 32))
+#else
+#define DOT11_RRM_CAP_LINK_ENAB			0
+#define DOT11_RRM_CAP_FM_ENAB			0
+#define DOT11_RRM_CAP_CLM_ENAB			0
+#define DOT11_RRM_CAP_NHM_ENAB			0
+#define DOT11_RRM_CAP_SM_ENAB			0
+#define DOT11_RRM_CAP_LCIM_ENAB			0
+#define DOT11_RRM_CAP_TSCM_ENAB			0
+#define DOT11_RRM_CAP_MPC0_ENAB			0
+#define DOT11_RRM_CAP_MPC1_ENAB			0
+#define DOT11_RRM_CAP_MPC2_ENAB			0
+#define DOT11_RRM_CAP_MPTI_ENAB			0
+#define DOT11_RRM_CAP_CIVIC_LOC_ENAB		0
+#define DOT11_RRM_CAP_IDENT_LOC_ENAB		0
+#endif /* WL11K_ALL_MEAS */
+#ifdef WL11K_NBR_MEAS
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB	(1 << DOT11_RRM_CAP_NEIGHBOR_REPORT)
+#else
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB	0
+#endif /* WL11K_NBR_MEAS */
+#ifdef WL11K_BCN_MEAS
+#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB		(1 << DOT11_RRM_CAP_BCN_PASSIVE)
+#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB		(1 << DOT11_RRM_CAP_BCN_ACTIVE)
+#else
+#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB		0
+#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB		0
+#endif /* WL11K_BCN_MEAS */
+#define DOT11_RRM_CAP_MPA_MASK		0x7
+/* Operating Class (formerly "Regulatory Class") definitions */
+#define DOT11_OP_CLASS_NONE			255
+
+BWL_PRE_PACKED_STRUCT struct do11_ap_chrep {
+	uint8 id;
+	uint8 len;
+	uint8 reg;
+	uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct do11_ap_chrep dot11_ap_chrep_t;
+
+/* Radio Measurements action ids */
+#define DOT11_RM_ACTION_RM_REQ		0	/* Radio measurement request */
+#define DOT11_RM_ACTION_RM_REP		1	/* Radio measurement report */
+#define DOT11_RM_ACTION_LM_REQ		2	/* Link measurement request */
+#define DOT11_RM_ACTION_LM_REP		3	/* Link measurement report */
+#define DOT11_RM_ACTION_NR_REQ		4	/* Neighbor report request */
+#define DOT11_RM_ACTION_NR_REP		5	/* Neighbor report response */
+#define DOT11_PUB_ACTION_MP		7	/* Measurement Pilot public action id */
+
+/** Generic radio measurement action frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_action dot11_rm_action_t;
+#define DOT11_RM_ACTION_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint16 reps;				/* no. of repetitions */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq dot11_rmreq_t;
+#define DOT11_RMREQ_LEN	5
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_ie {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_ie dot11_rm_ie_t;
+#define DOT11_RM_IE_LEN	5
+
+/* Definitions for "mode" bits in rm req */
+#define DOT11_RMREQ_MODE_PARALLEL	1
+#define DOT11_RMREQ_MODE_ENABLE		2
+#define DOT11_RMREQ_MODE_REQUEST	4
+#define DOT11_RMREQ_MODE_REPORT		8
+#define DOT11_RMREQ_MODE_DURMAND	0x10	/* Duration Mandatory */
+
+/* Definitions for "mode" bits in rm rep */
+#define DOT11_RMREP_MODE_LATE		1
+#define DOT11_RMREP_MODE_INCAPABLE	2
+#define DOT11_RMREP_MODE_REFUSED	4
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+	uint8 bcn_mode;
+	struct ether_addr	bssid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
+#define DOT11_RMREQ_BCN_LEN	18
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 frame_info;
+	uint8 rcpi;
+	uint8 rsni;
+	struct ether_addr	bssid;
+	uint8 antenna_id;
+	uint32 parent_tsf;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
+#define DOT11_RMREP_BCN_LEN	26
+
+/* Beacon request measurement mode */
+#define DOT11_RMREQ_BCN_PASSIVE	0
+#define DOT11_RMREQ_BCN_ACTIVE	1
+#define DOT11_RMREQ_BCN_TABLE	2
+
+/* Sub-element IDs for Beacon Request */
+#define DOT11_RMREQ_BCN_SSID_ID 0
+#define DOT11_RMREQ_BCN_REPINFO_ID  1
+#define DOT11_RMREQ_BCN_REPDET_ID   2
+#define DOT11_RMREQ_BCN_REQUEST_ID  10
+#define DOT11_RMREQ_BCN_APCHREP_ID  DOT11_MNG_AP_CHREP_ID
+
+/* Reporting Detail element definition */
+#define DOT11_RMREQ_BCN_REPDET_FIXED	0	/* Fixed length fields only */
+#define DOT11_RMREQ_BCN_REPDET_REQUEST	1	/* + requested information elems */
+#define DOT11_RMREQ_BCN_REPDET_ALL	2	/* All fields */
+
+/* Reporting Information (reporting condition) element definition */
+#define DOT11_RMREQ_BCN_REPINFO_LEN	2	/* Beacon Reporting Information length */
+#define DOT11_RMREQ_BCN_REPCOND_DEFAULT	0	/* Report to be issued after each measurement */
+
+/* Sub-element IDs for Beacon Report */
+#define DOT11_RMREP_BCN_FRM_BODY	1
+#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX	224 /* 802.11k-2008 7.3.2.22.6 */
+
+/* Sub-element IDs for Frame Report */
+#define DOT11_RMREP_FRAME_COUNT_REPORT 1
+
+/* Channel load request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t;
+#define DOT11_RMREQ_CHANLOAD_LEN	11
+
+/** Channel load report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 channel_load;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t;
+#define DOT11_RMREP_CHANLOAD_LEN	13
+
+/** Noise histogram request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_noise dot11_rmreq_noise_t;
+#define DOT11_RMREQ_NOISE_LEN 11
+
+/** Noise histogram report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 antid;
+	uint8 anpi;
+	uint8 ipi0_dens;
+	uint8 ipi1_dens;
+	uint8 ipi2_dens;
+	uint8 ipi3_dens;
+	uint8 ipi4_dens;
+	uint8 ipi5_dens;
+	uint8 ipi6_dens;
+	uint8 ipi7_dens;
+	uint8 ipi8_dens;
+	uint8 ipi9_dens;
+	uint8 ipi10_dens;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_noise dot11_rmrep_noise_t;
+#define DOT11_RMREP_NOISE_LEN 25
+
+/** Frame request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+	uint8 req_type;
+	struct ether_addr	ta;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_frame dot11_rmreq_frame_t;
+#define DOT11_RMREQ_FRAME_LEN 18
+
+/** Frame report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frame dot11_rmrep_frame_t;
+#define DOT11_RMREP_FRAME_LEN 12
+
+/** Frame report entry */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry {
+	struct ether_addr	ta;
+	struct ether_addr	bssid;
+	uint8 phy_type;
+	uint8 avg_rcpi;
+	uint8 last_rsni;
+	uint8 last_rcpi;
+	uint8 ant_id;
+	uint16 frame_cnt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t;
+#define DOT11_RMREP_FRMENTRY_LEN 19
+
+/** STA statistics request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	struct ether_addr	peer;
+	uint16 interval;
+	uint16 duration;
+	uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_stat dot11_rmreq_stat_t;
+#define DOT11_RMREQ_STAT_LEN 16
+
+/** STA statistics report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat {
+	uint16 duration;
+	uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_stat dot11_rmrep_stat_t;
+
+/* Statistics Group Report: Group IDs */
+enum {
+	DOT11_RRM_STATS_GRP_ID_0 = 0,
+	DOT11_RRM_STATS_GRP_ID_1,
+	DOT11_RRM_STATS_GRP_ID_2,
+	DOT11_RRM_STATS_GRP_ID_3,
+	DOT11_RRM_STATS_GRP_ID_4,
+	DOT11_RRM_STATS_GRP_ID_5,
+	DOT11_RRM_STATS_GRP_ID_6,
+	DOT11_RRM_STATS_GRP_ID_7,
+	DOT11_RRM_STATS_GRP_ID_8,
+	DOT11_RRM_STATS_GRP_ID_9,
+	DOT11_RRM_STATS_GRP_ID_10,
+	DOT11_RRM_STATS_GRP_ID_11,
+	DOT11_RRM_STATS_GRP_ID_12,
+	DOT11_RRM_STATS_GRP_ID_13,
+	DOT11_RRM_STATS_GRP_ID_14,
+	DOT11_RRM_STATS_GRP_ID_15,
+	DOT11_RRM_STATS_GRP_ID_16
+};
+
+/* Statistics Group Report: Group Data length  */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0	28
+typedef struct rrm_stat_group_0 {
+	uint32	txfrag;
+	uint32	txmulti;
+	uint32	txfail;
+	uint32	rxframe;
+	uint32	rxmulti;
+	uint32	rxbadfcs;
+	uint32	txframe;
+} rrm_stat_group_0_t;
+
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_1	24
+typedef struct rrm_stat_group_1 {
+	uint32	txretry;
+	uint32	txretries;
+	uint32	rxdup;
+	uint32	txrts;
+	uint32	rtsfail;
+	uint32	ackfail;
+} rrm_stat_group_1_t;
+
+/* group 2-9 use same qos data structure (tid 0-7), total 52 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_2_9	52
+typedef struct rrm_stat_group_qos {
+	uint32	txfrag;
+	uint32	txfail;
+	uint32	txretry;
+	uint32	txretries;
+	uint32	rxdup;
+	uint32	txrts;
+	uint32	rtsfail;
+	uint32	ackfail;
+	uint32	rxfrag;
+	uint32	txframe;
+	uint32	txdrop;
+	uint32	rxmpdu;
+	uint32	rxretries;
+} rrm_stat_group_qos_t;
+
+/* dot11BSSAverageAccessDelay Group (only available at an AP): 8 byte */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_10	8
+typedef BWL_PRE_PACKED_STRUCT struct rrm_stat_group_10 {
+	uint8	apavgdelay;
+	uint8	avgdelaybe;
+	uint8	avgdelaybg;
+	uint8	avgdelayvi;
+	uint8	avgdelayvo;
+	uint16	stacount;
+	uint8	chanutil;
+} BWL_POST_PACKED_STRUCT rrm_stat_group_10_t;
+
+/* AMSDU, 40 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_11	40
+typedef struct rrm_stat_group_11 {
+	uint32	txamsdu;
+	uint32	amsdufail;
+	uint32	amsduretry;
+	uint32	amsduretries;
+	uint32	txamsdubyte_h;
+	uint32	txamsdubyte_l;
+	uint32	amsduackfail;
+	uint32	rxamsdu;
+	uint32	rxamsdubyte_h;
+	uint32	rxamsdubyte_l;
+} rrm_stat_group_11_t;
+
+/* AMPDU, 36 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_12	36
+typedef struct rrm_stat_group_12 {
+	uint32	txampdu;
+	uint32	txmpdu;
+	uint32	txampdubyte_h;
+	uint32	txampdubyte_l;
+	uint32	rxampdu;
+	uint32	rxmpdu;
+	uint32	rxampdubyte_h;
+	uint32	rxampdubyte_l;
+	uint32	ampducrcfail;
+} rrm_stat_group_12_t;
+
+/* BACK etc, 36 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_13	36
+typedef struct rrm_stat_group_13 {
+	uint32	rximpbarfail;
+	uint32	rxexpbarfail;
+	uint32	chanwidthsw;
+	uint32	txframe20mhz;
+	uint32	txframe40mhz;
+	uint32	rxframe20mhz;
+	uint32	rxframe40mhz;
+	uint32	psmpgrantdur;
+	uint32	psmpuseddur;
+} rrm_stat_group_13_t;
+
+/* RD Dual CTS etc, 36 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_14	36
+typedef struct rrm_stat_group_14 {
+	uint32	grantrdgused;
+	uint32	grantrdgunused;
+	uint32	txframeingrantrdg;
+	uint32	txbyteingrantrdg_h;
+	uint32	txbyteingrantrdg_l;
+	uint32	dualcts;
+	uint32	dualctsfail;
+	uint32	rtslsi;
+	uint32	rtslsifail;
+} rrm_stat_group_14_t;
+
+/* bf and STBC etc, 20 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_15	20
+typedef struct rrm_stat_group_15 {
+	uint32	bfframe;
+	uint32	stbccts;
+	uint32	stbcctsfail;
+	uint32	nonstbccts;
+	uint32	nonstbcctsfail;
+} rrm_stat_group_15_t;
+
+/* RSNA, 28 bytes */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_16	28
+typedef struct rrm_stat_group_16 {
+	uint32	rsnacmacicverr;
+	uint32	rsnacmacreplay;
+	uint32	rsnarobustmgmtccmpreplay;
+	uint32	rsnatkipicverr;
+	uint32	rsnatkipicvreplay;
+	uint32	rsnaccmpdecrypterr;
+	uint32	rsnaccmpreplay;
+} rrm_stat_group_16_t;
+
+/* Transmit stream/category measurement request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint16 interval;
+	uint16 duration;
+	struct ether_addr	peer;
+	uint8 traffic_id;
+	uint8 bin0_range;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t;
+#define DOT11_RMREQ_TXSTREAM_LEN	17
+
+/** Transmit stream/category measurement report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream {
+	uint32 starttime[2];
+	uint16 duration;
+	struct ether_addr	peer;
+	uint8 traffic_id;
+	uint8 reason;
+	uint32 txmsdu_cnt;
+	uint32 msdu_discarded_cnt;
+	uint32 msdufailed_cnt;
+	uint32 msduretry_cnt;
+	uint32 cfpolls_lost_cnt;
+	uint32 avrqueue_delay;
+	uint32 avrtx_delay;
+	uint8 bin0_range;
+	uint32 bin0;
+	uint32 bin1;
+	uint32 bin2;
+	uint32 bin3;
+	uint32 bin4;
+	uint32 bin5;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t;
+#define DOT11_RMREP_TXSTREAM_LEN	71
+
+typedef struct rrm_tscm {
+	uint32 msdu_tx;
+	uint32 msdu_exp;
+	uint32 msdu_fail;
+	uint32 msdu_retries;
+	uint32 cfpolls_lost;
+	uint32 queue_delay;
+	uint32 tx_delay_sum;
+	uint32 tx_delay_cnt;
+	uint32 bin0_range_us;
+	uint32 bin0;
+	uint32 bin1;
+	uint32 bin2;
+	uint32 bin3;
+	uint32 bin4;
+	uint32 bin5;
+} rrm_tscm_t;
+enum {
+	DOT11_FTM_LOCATION_SUBJ_LOCAL = 0, 		/* Where am I? */
+	DOT11_FTM_LOCATION_SUBJ_REMOTE = 1,		/* Where are you? */
+	DOT11_FTM_LOCATION_SUBJ_THIRDPARTY = 2   /* Where is he/she? */
+};
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_lci {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 subj;
+
+	/* Following 3 fields are unused. Keep for ROM compatibility. */
+	uint8 lat_res;
+	uint8 lon_res;
+	uint8 alt_res;
+
+	/* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_ftm_lci dot11_rmreq_ftm_lci_t;
+#define DOT11_RMREQ_LCI_LEN	9
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_lci {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 lci_sub_id;
+	uint8 lci_sub_len;
+	/* optional LCI field */
+	/* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_ftm_lci dot11_rmrep_ftm_lci_t;
+
+#define DOT11_FTM_LCI_SUBELEM_ID 		0
+#define DOT11_FTM_LCI_SUBELEM_LEN 		2
+#define DOT11_FTM_LCI_FIELD_LEN 		16
+#define DOT11_FTM_LCI_UNKNOWN_LEN 		2
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_civic {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 subj;
+	uint8 civloc_type;
+	uint8 siu;	/* service interval units */
+	uint16 si;  /* service interval */
+	/* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_ftm_civic dot11_rmreq_ftm_civic_t;
+#define DOT11_RMREQ_CIVIC_LEN	10
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_civic {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 civloc_type;
+	uint8 civloc_sub_id;
+	uint8 civloc_sub_len;
+	/* optional location civic field */
+	/* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_ftm_civic dot11_rmrep_ftm_civic_t;
+
+#define DOT11_FTM_CIVIC_LOC_TYPE_RFC4776	0
+#define DOT11_FTM_CIVIC_SUBELEM_ID 			0
+#define DOT11_FTM_CIVIC_SUBELEM_LEN 		2
+#define DOT11_FTM_CIVIC_LOC_SI_NONE			0
+#define DOT11_FTM_CIVIC_TYPE_LEN			1
+#define DOT11_FTM_CIVIC_UNKNOWN_LEN 		3
+
+/* Location Identifier measurement request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_locid {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 subj;
+	uint8 siu;
+	uint16 si;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_locid dot11_rmreq_locid_t;
+#define DOT11_RMREQ_LOCID_LEN	9
+
+/* Location Identifier measurement report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_locid {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 exp_tsf[8];
+	uint8 locid_sub_id;
+	uint8 locid_sub_len;
+	/* optional location identifier field */
+	/* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_locid dot11_rmrep_locid_t;
+#define DOT11_LOCID_UNKNOWN_LEN		10
+#define DOT11_LOCID_SUBELEM_ID		0
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_subel {
+	uint8 id;
+	uint8 len;
+	uint16 max_age;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_range_subel dot11_ftm_range_subel_t;
+#define DOT11_FTM_RANGE_SUBELEM_ID      4
+#define DOT11_FTM_RANGE_SUBELEM_LEN     2
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_range {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint16 max_init_delay;		/* maximum random initial delay */
+	uint8 min_ap_count;
+	uint8 data[1];
+	/* neighbor report sub-elements */
+	/* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_ftm_range dot11_rmreq_ftm_range_t;
+#define DOT11_RMREQ_FTM_RANGE_LEN 8
+
+#define DOT11_FTM_RANGE_LEN		3
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_entry {
+	uint32 start_tsf;		/* 4 lsb of tsf */
+	struct ether_addr bssid;
+	uint8 range[DOT11_FTM_RANGE_LEN];
+	uint8 max_err[DOT11_FTM_RANGE_LEN];
+	uint8  rsvd;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_range_entry dot11_ftm_range_entry_t;
+#define DOT11_FTM_RANGE_ENTRY_MAX_COUNT   15
+
+enum {
+	DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 2,
+	DOT11_FTM_RANGE_ERROR_AP_FAILED = 3,
+	DOT11_FTM_RANGE_ERROR_TX_FAILED = 8,
+	DOT11_FTM_RANGE_ERROR_MAX
+};
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_error_entry {
+	uint32 start_tsf;		/* 4 lsb of tsf */
+	struct ether_addr bssid;
+	uint8  code;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_range_error_entry dot11_ftm_range_error_entry_t;
+#define DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT   11
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_range {
+    uint8 id;
+    uint8 len;
+    uint8 token;
+    uint8 mode;
+    uint8 type;
+    uint8 entry_count;
+    uint8 data[2]; /* includes pad */
+	/*
+	dot11_ftm_range_entry_t entries[entry_count];
+	uint8 error_count;
+	dot11_ftm_error_entry_t errors[error_count];
+	 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_ftm_range dot11_rmrep_ftm_range_t;
+
+#define DOT11_FTM_RANGE_REP_MIN_LEN     6       /* No extra byte for error_count */
+#define DOT11_FTM_RANGE_ENTRY_CNT_MAX   15
+#define DOT11_FTM_RANGE_ERROR_CNT_MAX   11
+#define DOT11_FTM_RANGE_REP_FIXED_LEN   1       /* No extra byte for error_count */
+/** Measurement pause request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint16 pause_time;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t;
+#define DOT11_RMREQ_PAUSE_LEN	7
+
+
+/* Neighbor Report subelements ID (11k & 11v) */
+#define DOT11_NGBR_TSF_INFO_SE_ID	1
+#define DOT11_NGBR_CCS_SE_ID		2
+#define DOT11_NGBR_BSSTRANS_PREF_SE_ID	3
+#define DOT11_NGBR_BSS_TERM_DUR_SE_ID	4
+#define DOT11_NGBR_BEARING_SE_ID	5
+#define DOT11_NGBR_WIDE_BW_CHAN_SE_ID	6
+
+/** Neighbor Report, BSS Transition Candidate Preference subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 preference;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t;
+#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN		1
+#define DOT11_NGBR_BSSTRANS_PREF_SE_IE_LEN	3
+#define DOT11_NGBR_BSSTRANS_PREF_SE_HIGHEST	0xff
+
+/** Neighbor Report, BSS Termination Duration subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 tsf[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t;
+#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN	10
+
+/* Neighbor Report BSSID Information Field */
+#define DOT11_NGBR_BI_REACHABILTY_UNKN	0x0002
+#define DOT11_NGBR_BI_REACHABILTY	0x0003
+#define DOT11_NGBR_BI_SEC		0x0004
+#define DOT11_NGBR_BI_KEY_SCOPE		0x0008
+#define DOT11_NGBR_BI_CAP		0x03f0
+#define DOT11_NGBR_BI_CAP_SPEC_MGMT	0x0010
+#define DOT11_NGBR_BI_CAP_QOS		0x0020
+#define DOT11_NGBR_BI_CAP_APSD		0x0040
+#define DOT11_NGBR_BI_CAP_RDIO_MSMT	0x0080
+#define DOT11_NGBR_BI_CAP_DEL_BA	0x0100
+#define DOT11_NGBR_BI_CAP_IMM_BA	0x0200
+#define DOT11_NGBR_BI_MOBILITY		0x0400
+#define DOT11_NGBR_BI_HT		0x0800
+#define DOT11_NGBR_BI_VHT		0x1000
+#define DOT11_NGBR_BI_FTM		0x2000
+
+/** Neighbor Report element (11k & 11v) */
+BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie {
+	uint8 id;
+	uint8 len;
+	struct ether_addr bssid;
+	uint32 bssid_info;
+	uint8 reg;		/* Operating class */
+	uint8 channel;
+	uint8 phytype;
+	uint8 data[1]; 		/* Variable size subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t;
+#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN	13
+
+
+/* MLME Enumerations */
+#define DOT11_BSSTYPE_INFRASTRUCTURE		0	/* d11 infrastructure */
+#define DOT11_BSSTYPE_INDEPENDENT		1	/* d11 independent */
+#define DOT11_BSSTYPE_ANY			2	/* d11 any BSS type */
+#define DOT11_BSSTYPE_MESH			3	/* d11 Mesh */
+#define DOT11_SCANTYPE_ACTIVE			0	/* d11 scan active */
+#define DOT11_SCANTYPE_PASSIVE			1	/* d11 scan passive */
+
+/** Link Measurement */
+BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint8 txpwr;				/* Transmit Power Used */
+	uint8 maxtxpwr;				/* Max Transmit Power */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmreq dot11_lmreq_t;
+#define DOT11_LMREQ_LEN	5
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	dot11_tpc_rep_t tpc;			/* TPC element */
+	uint8 rxant;				/* Receive Antenna ID */
+	uint8 txant;				/* Transmit Antenna ID */
+	uint8 rcpi;				/* RCPI */
+	uint8 rsni;				/* RSNI */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmrep dot11_lmrep_t;
+#define DOT11_LMREP_LEN	11
+
+#define DOT11_MP_CAP_SPECTRUM			0x01	/* d11 cap. spectrum */
+#define DOT11_MP_CAP_SHORTSLOT			0x02	/* d11 cap. shortslot */
+/* Measurement Pilot */
+BWL_PRE_PACKED_STRUCT struct dot11_mprep {
+	uint8 cap_info;				/* Condensed capability Info. */
+	uint8 country[2];				/* Condensed country string */
+	uint8 opclass;				/* Op. Class */
+	uint8 channel;				/* Channel */
+	uint8 mp_interval;			/* Measurement Pilot Interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mprep dot11_mprep_t;
+#define DOT11_MPREP_LEN	6
+
+/* 802.11 BRCM "Compromise" Pre N constants */
+#define PREN_PREAMBLE		24	/* green field preamble time */
+#define PREN_MM_EXT		12	/* extra mixed mode preamble time */
+#define PREN_PREAMBLE_EXT	4	/* extra preamble (multiply by unique_streams-1) */
+
+/* 802.11N PHY constants */
+#define RIFS_11N_TIME		2	/* NPHY RIFS time */
+
+/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3
+ * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2
+ */
+/* HT-SIG1 */
+#define HT_SIG1_MCS_MASK        0x00007F
+#define HT_SIG1_CBW             0x000080
+#define HT_SIG1_HT_LENGTH       0xFFFF00
+
+/* HT-SIG2 */
+#define HT_SIG2_SMOOTHING       0x000001
+#define HT_SIG2_NOT_SOUNDING    0x000002
+#define HT_SIG2_RESERVED        0x000004
+#define HT_SIG2_AGGREGATION     0x000008
+#define HT_SIG2_STBC_MASK       0x000030
+#define HT_SIG2_STBC_SHIFT      4
+#define HT_SIG2_FEC_CODING      0x000040
+#define HT_SIG2_SHORT_GI        0x000080
+#define HT_SIG2_ESS_MASK        0x000300
+#define HT_SIG2_ESS_SHIFT       8
+#define HT_SIG2_CRC             0x03FC00
+#define HT_SIG2_TAIL            0x1C0000
+
+/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */
+#define HT_T_LEG_PREAMBLE      16
+#define HT_T_L_SIG              4
+#define HT_T_SIG                8
+#define HT_T_LTF1               4
+#define HT_T_GF_LTF1            8
+#define HT_T_LTFs               4
+#define HT_T_STF                4
+#define HT_T_GF_STF             8
+#define HT_T_SYML               4
+
+#define HT_N_SERVICE           16       /* bits in SERVICE field */
+#define HT_N_TAIL               6       /* tail bits per BCC encoder */
+
+/* 802.11 A PHY constants */
+#define APHY_SLOT_TIME          9       /* APHY slot time */
+#define APHY_SIFS_TIME          16      /* APHY SIFS time */
+#define APHY_DIFS_TIME          (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))  /* APHY DIFS time */
+#define APHY_PREAMBLE_TIME      16      /* APHY preamble time */
+#define APHY_SIGNAL_TIME        4       /* APHY signal time */
+#define APHY_SYMBOL_TIME        4       /* APHY symbol time */
+#define APHY_SERVICE_NBITS      16      /* APHY service nbits */
+#define APHY_TAIL_NBITS         6       /* APHY tail nbits */
+#define APHY_CWMIN              15      /* APHY cwmin */
+#define APHY_PHYHDR_DUR		20	/* APHY PHY Header Duration */
+
+/* 802.11 B PHY constants */
+#define BPHY_SLOT_TIME          20      /* BPHY slot time */
+#define BPHY_SIFS_TIME          10      /* BPHY SIFS time */
+#define BPHY_DIFS_TIME          50      /* BPHY DIFS time */
+#define BPHY_PLCP_TIME          192     /* BPHY PLCP time */
+#define BPHY_PLCP_SHORT_TIME    96      /* BPHY PLCP short time */
+#define BPHY_CWMIN              31      /* BPHY cwmin */
+#define BPHY_SHORT_PHYHDR_DUR	96	/* BPHY Short PHY Header Duration */
+#define BPHY_LONG_PHYHDR_DUR	192	/* BPHY Long PHY Header Duration */
+
+/* 802.11 G constants */
+#define DOT11_OFDM_SIGNAL_EXTENSION	6	/* d11 OFDM signal extension */
+
+#define PHY_CWMAX		1023	/* PHY cwmax */
+
+#define	DOT11_MAXNUMFRAGS	16	/* max # fragments per MSDU */
+
+/* 802.11 VHT constants */
+
+typedef int vht_group_id_t;
+
+/* for VHT-A1 */
+/* SIG-A1 reserved bits */
+#define VHT_SIGA1_CONST_MASK            0x800004
+
+#define VHT_SIGA1_BW_MASK               0x000003
+#define VHT_SIGA1_20MHZ_VAL             0x000000
+#define VHT_SIGA1_40MHZ_VAL             0x000001
+#define VHT_SIGA1_80MHZ_VAL             0x000002
+#define VHT_SIGA1_160MHZ_VAL            0x000003
+
+#define VHT_SIGA1_STBC                  0x000008
+
+#define VHT_SIGA1_GID_MASK              0x0003f0
+#define VHT_SIGA1_GID_SHIFT             4
+#define VHT_SIGA1_GID_TO_AP             0x00
+#define VHT_SIGA1_GID_NOT_TO_AP         0x3f
+#define VHT_SIGA1_GID_MAX_GID           0x3f
+
+#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00
+#define VHT_SIGA1_NSTS_SHIFT            10
+#define VHT_SIGA1_MAX_USERPOS           3
+
+#define VHT_SIGA1_PARTIAL_AID_MASK      0x3fe000
+#define VHT_SIGA1_PARTIAL_AID_SHIFT     13
+
+#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED   0x400000
+
+/* for VHT-A2 */
+#define VHT_SIGA2_GI_NONE               0x000000
+#define VHT_SIGA2_GI_SHORT              0x000001
+#define VHT_SIGA2_GI_W_MOD10            0x000002
+#define VHT_SIGA2_CODING_LDPC           0x000004
+#define VHT_SIGA2_LDPC_EXTRA_OFDM_SYM   0x000008
+#define VHT_SIGA2_BEAMFORM_ENABLE       0x000100
+#define VHT_SIGA2_MCS_SHIFT             4
+
+#define VHT_SIGA2_B9_RESERVED           0x000200
+#define VHT_SIGA2_TAIL_MASK             0xfc0000
+#define VHT_SIGA2_TAIL_VALUE            0x000000
+
+/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */
+#define VHT_T_LEG_PREAMBLE      16
+#define VHT_T_L_SIG              4
+#define VHT_T_SIG_A              8
+#define VHT_T_LTF                4
+#define VHT_T_STF                4
+#define VHT_T_SIG_B              4
+#define VHT_T_SYML               4
+
+#define VHT_N_SERVICE           16	/* bits in SERVICE field */
+#define VHT_N_TAIL               6	/* tail bits per BCC encoder */
+
+/** dot11Counters Table - 802.11 spec., Annex D */
+typedef struct d11cnt {
+	uint32		txfrag;		/* dot11TransmittedFragmentCount */
+	uint32		txmulti;	/* dot11MulticastTransmittedFrameCount */
+	uint32		txfail;		/* dot11FailedCount */
+	uint32		txretry;	/* dot11RetryCount */
+	uint32		txretrie;	/* dot11MultipleRetryCount */
+	uint32		rxdup;		/* dot11FrameduplicateCount */
+	uint32		txrts;		/* dot11RTSSuccessCount */
+	uint32		txnocts;	/* dot11RTSFailureCount */
+	uint32		txnoack;	/* dot11ACKFailureCount */
+	uint32		rxfrag;		/* dot11ReceivedFragmentCount */
+	uint32		rxmulti;	/* dot11MulticastReceivedFrameCount */
+	uint32		rxcrc;		/* dot11FCSErrorCount */
+	uint32		txfrmsnt;	/* dot11TransmittedFrameCount */
+	uint32		rxundec;	/* dot11WEPUndecryptableCount */
+} d11cnt_t;
+
+#define BRCM_PROP_OUI		"\x00\x90\x4C"
+
+
+#define BRCM_FTM_IE_TYPE			14
+
+/* #define HT_CAP_IE_TYPE			51
+ * #define HT_ADD_IE_TYPE			52
+ * #define BRCM_EXTCH_IE_TYPE		53
+ * #define MEMBER_OF_BRCM_PROP_IE_TYPE	54
+ * #define BRCM_RELMACST_IE_TYPE		55
+ * #define BRCM_EVT_WL_BSS_INFO		64
+ * #define RWL_ACTION_WIFI_FRAG_TYPE	85
+ * #define BTC_INFO_BRCM_PROP_IE_TYPE	90
+ * #define ULB_BRCM_PROP_IE_TYPE	91
+ * #define SDB_BRCM_PROP_IE_TYPE	92
+ */
+
+/* Action frame type for RWL */
+#define RWL_WIFI_DEFAULT		0
+#define RWL_WIFI_FIND_MY_PEER		9 /* Used while finding server */
+#define RWL_WIFI_FOUND_PEER		10 /* Server response to the client  */
+#define RWL_ACTION_WIFI_FRAG_TYPE	85 /* Fragment indicator for receiver */
+
+#define PROXD_AF_TYPE			11 /* Wifi proximity action frame type */
+#define BRCM_RELMACST_AF_TYPE	        12 /* RMC action frame type */
+
+/* Action frame type for FTM Initiator Report */
+#define BRCM_FTM_VS_AF_TYPE	14
+enum {
+	BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1,	/* FTM Initiator Report */
+	BRCM_FTM_VS_COLLECT_SUBTYPE = 2,	/* FTM Collect debug protocol */
+};
+
+
+
+
+/* brcm syscap_ie cap */
+#define BRCM_SYSCAP_WET_TUNNEL	0x0100	/* Device with WET_TUNNEL support */
+
+#define BRCM_OUI		"\x00\x10\x18"	/* Broadcom OUI */
+
+/** BRCM info element */
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	ver;		/* type/ver of this IE */
+	uint8	assoc;		/* # of assoc STAs */
+	uint8	flags;		/* misc flags */
+	uint8	flags1;		/* misc flags */
+	uint16	amsdu_mtu_pref;	/* preferred A-MSDU MTU */
+} BWL_POST_PACKED_STRUCT;
+typedef	struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN		11	/* BRCM IE length */
+#define BRCM_IE_VER		2	/* BRCM IE version */
+#define BRCM_IE_LEGACY_AES_VER	1	/* BRCM IE legacy AES version */
+
+/* brcm_ie flags */
+#define	BRF_ABCAP		0x1	/* afterburner is obsolete,  defined for backward compat */
+#define	BRF_ABRQRD		0x2	/* afterburner is obsolete,  defined for backward compat */
+#define	BRF_LZWDS		0x4	/* lazy wds enabled */
+#define	BRF_BLOCKACK		0x8	/* BlockACK capable */
+#define BRF_ABCOUNTER_MASK	0xf0	/* afterburner is obsolete,  defined for backward compat */
+#define BRF_PROP_11N_MCS	0x10	/* re-use afterburner bit */
+#define BRF_MEDIA_CLIENT	0x20	/* re-use afterburner bit to indicate media client device */
+
+#define GET_BRF_PROP_11N_MCS(brcm_ie) \
+	(!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS))
+
+/* brcm_ie flags1 */
+#define	BRF1_AMSDU		0x1	/* A-MSDU capable */
+#define	BRF1_WNM		0x2	/* WNM capable */
+#define BRF1_WMEPS		0x4	/* AP is capable of handling WME + PS w/o APSD */
+#define BRF1_PSOFIX		0x8	/* AP has fixed PS mode out-of-order packets */
+#define	BRF1_RX_LARGE_AGG	0x10	/* device can rx large aggregates */
+#define BRF1_RFAWARE_DCS	0x20    /* RFAWARE dynamic channel selection (DCS) */
+#define BRF1_SOFTAP		0x40    /* Configure as Broadcom SOFTAP */
+#define BRF1_DWDS		0x80    /* DWDS capable */
+
+/** Vendor IE structure */
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+	uchar id;
+	uchar len;
+	uchar oui [3];
+	uchar data [1]; 	/* Variable size data */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN		2	/* id + len field */
+#define VNDR_IE_MIN_LEN		3	/* size of the oui field */
+#define VNDR_IE_FIXED_LEN	(VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN)
+
+#define VNDR_IE_MAX_LEN		255	/* vendor IE max length, without ID and len */
+
+/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */
+BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie {
+	uchar id;
+	uchar len;
+	uchar oui[3];
+	uint8	type;           /* type indicates what follows */
+	struct ether_addr ea;   /* Device Primary MAC Adrress */
+} BWL_POST_PACKED_STRUCT;
+typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t;
+
+#define MEMBER_OF_BRCM_PROP_IE_LEN		10	/* IE max length */
+#define MEMBER_OF_BRCM_PROP_IE_HDRLEN	        (sizeof(member_of_brcm_prop_ie_t))
+#define MEMBER_OF_BRCM_PROP_IE_TYPE		54
+
+/** BRCM Reliable Multicast IE */
+BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie {
+	uint8 id;
+	uint8 len;
+	uint8 oui[3];
+	uint8 type;           /* type indicates what follows */
+	struct ether_addr ea;   /* The ack sender's MAC Adrress */
+	struct ether_addr mcast_ea;  /* The multicast MAC address */
+	uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */
+} BWL_POST_PACKED_STRUCT;
+typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t;
+
+/* IE length */
+/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */
+#define RELMCAST_BRCM_PROP_IE_LEN	(sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8)))
+
+#define RELMCAST_BRCM_PROP_IE_TYPE	55
+
+/* BRCM BTC IE */
+BWL_PRE_PACKED_STRUCT struct btc_brcm_prop_ie {
+	uint8 id;
+	uint8 len;
+	uint8 oui[3];
+	uint8 type;           /* type inidicates what follows */
+	uint32 info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct btc_brcm_prop_ie btc_brcm_prop_ie_t;
+
+#define BTC_INFO_BRCM_PROP_IE_TYPE	90
+#define BRCM_BTC_INFO_TYPE_LEN	(sizeof(btc_brcm_prop_ie_t) - (2 * sizeof(uint8)))
+
+/* ************* HT definitions. ************* */
+#define MCSSET_LEN	16	/* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */
+#define MAX_MCS_NUM	(128)	/* max mcs number = 128 */
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+	uint16	cap;
+	uint8	params;
+	uint8	supp_mcs[MCSSET_LEN];
+	uint16	ext_htcap;
+	uint32	txbf_cap;
+	uint8	as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ht_cap_ie {
+	uint8	id;
+	uint8	len;
+	ht_cap_ie_t ht_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t;
+
+/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the capability IE is primarily used to convey this nodes abilities */
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	type;           /* type indicates what follows */
+	ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+
+#define HT_PROP_IE_OVERHEAD	4	/* overhead bytes for prop oui ie */
+#define HT_CAP_IE_LEN		26	/* HT capability len (based on .11n d2.0) */
+#define HT_CAP_IE_TYPE		51
+
+#define HT_CAP_LDPC_CODING	0x0001	/* Support for rx of LDPC coded pkts */
+#define HT_CAP_40MHZ		0x0002  /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define HT_CAP_MIMO_PS_MASK	0x000C  /* Mimo PS mask */
+#define HT_CAP_MIMO_PS_SHIFT	0x0002	/* Mimo PS shift */
+#define HT_CAP_MIMO_PS_OFF	0x0003	/* Mimo PS, no restriction */
+#define HT_CAP_MIMO_PS_RTS	0x0001	/* Mimo PS, send RTS/CTS around MIMO frames */
+#define HT_CAP_MIMO_PS_ON	0x0000	/* Mimo PS, MIMO disallowed */
+#define HT_CAP_GF		0x0010	/* Greenfield preamble support */
+#define HT_CAP_SHORT_GI_20	0x0020	/* 20MHZ short guard interval support */
+#define HT_CAP_SHORT_GI_40	0x0040	/* 40Mhz short guard interval support */
+#define HT_CAP_TX_STBC		0x0080	/* Tx STBC support */
+#define HT_CAP_RX_STBC_MASK	0x0300	/* Rx STBC mask */
+#define HT_CAP_RX_STBC_SHIFT	8	/* Rx STBC shift */
+#define HT_CAP_DELAYED_BA	0x0400	/* delayed BA support */
+#define HT_CAP_MAX_AMSDU	0x0800	/* Max AMSDU size in bytes , 0=3839, 1=7935 */
+
+#define HT_CAP_DSSS_CCK	0x1000	/* DSSS/CCK supported by the BSS */
+#define HT_CAP_PSMP		0x2000	/* Power Save Multi Poll support */
+#define HT_CAP_40MHZ_INTOLERANT 0x4000	/* 40MHz Intolerant */
+#define HT_CAP_LSIG_TXOP	0x8000	/* L-SIG TXOP protection support */
+
+#define HT_CAP_RX_STBC_NO		0x0	/* no rx STBC support */
+#define HT_CAP_RX_STBC_ONE_STREAM	0x1	/* rx STBC support of 1 spatial stream */
+#define HT_CAP_RX_STBC_TWO_STREAM	0x2	/* rx STBC support of 1-2 spatial streams */
+#define HT_CAP_RX_STBC_THREE_STREAM	0x3	/* rx STBC support of 1-3 spatial streams */
+
+
+#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX	0x1
+#define HT_CAP_TXBF_CAP_NDP_RX			0x8
+#define HT_CAP_TXBF_CAP_NDP_TX			0x10
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI		0x100
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING	0x200
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING	0x400
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_MASK	0x1800
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_SHIFT	11
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_MASK	0x6000
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_SHIFT	13
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_MASK	0x18000
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_SHIFT	15
+#define HT_CAP_TXBF_CAP_CSI_BFR_ANT_SHIFT	19
+#define HT_CAP_TXBF_CAP_NC_BFR_ANT_SHIFT	21
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_SHIFT		23
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_MASK		0x1800000
+
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_SHIFT	27
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_MASK		0x18000000
+
+#define HT_CAP_TXBF_FB_TYPE_NONE 	0
+#define HT_CAP_TXBF_FB_TYPE_DELAYED 	1
+#define HT_CAP_TXBF_FB_TYPE_IMMEDIATE 	2
+#define HT_CAP_TXBF_FB_TYPE_BOTH 	3
+
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_MASK	0x400
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_SHIFT	10
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15
+
+#define HT_CAP_MCS_FLAGS_SUPP_BYTE 12 /* byte offset in HT Cap Supported MCS for various flags */
+#define HT_CAP_MCS_RX_8TO15_BYTE_OFFSET                1
+#define HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL              0x02
+#define HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK    0x0C
+
+#define VHT_MAX_MPDU		11454	/* max mpdu size for now (bytes) */
+#define VHT_MPDU_MSDU_DELTA	56		/* Difference in spec - vht mpdu, amsdu len */
+/* Max AMSDU len - per spec */
+#define VHT_MAX_AMSDU		(VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA)
+
+#define HT_MAX_AMSDU		7935	/* max amsdu size (bytes) per the HT spec */
+#define HT_MIN_AMSDU		3835	/* min amsdu size (bytes) per the HT spec */
+
+#define HT_PARAMS_RX_FACTOR_MASK	0x03	/* ampdu rcv factor mask */
+#define HT_PARAMS_DENSITY_MASK		0x1C	/* ampdu density mask */
+#define HT_PARAMS_DENSITY_SHIFT	2	/* ampdu density shift */
+
+/* HT/AMPDU specific define */
+#define AMPDU_MAX_MPDU_DENSITY  7       /* max mpdu density; in 1/4 usec units */
+#define AMPDU_DENSITY_NONE      0       /* No density requirement */
+#define AMPDU_DENSITY_1over4_US 1       /* 1/4 us density */
+#define AMPDU_DENSITY_1over2_US 2       /* 1/2 us density */
+#define AMPDU_DENSITY_1_US      3       /*   1 us density */
+#define AMPDU_DENSITY_2_US      4       /*   2 us density */
+#define AMPDU_DENSITY_4_US      5       /*   4 us density */
+#define AMPDU_DENSITY_8_US      6       /*   8 us density */
+#define AMPDU_DENSITY_16_US     7       /*  16 us density */
+#define AMPDU_RX_FACTOR_8K      0       /* max rcv ampdu len (8kb) */
+#define AMPDU_RX_FACTOR_16K     1       /* max rcv ampdu len (16kb) */
+#define AMPDU_RX_FACTOR_32K     2       /* max rcv ampdu len (32kb) */
+#define AMPDU_RX_FACTOR_64K     3       /* max rcv ampdu len (64kb) */
+
+/* AMPDU RX factors for VHT rates */
+#define AMPDU_RX_FACTOR_128K    4       /* max rcv ampdu len (128kb) */
+#define AMPDU_RX_FACTOR_256K    5       /* max rcv ampdu len (256kb) */
+#define AMPDU_RX_FACTOR_512K    6       /* max rcv ampdu len (512kb) */
+#define AMPDU_RX_FACTOR_1024K   7       /* max rcv ampdu len (1024kb) */
+
+#define AMPDU_RX_FACTOR_BASE    8*1024  /* ampdu factor base for rx len */
+#define AMPDU_RX_FACTOR_BASE_PWR	13	/* ampdu factor base for rx len in power of 2 */
+
+#define AMPDU_DELIMITER_LEN	4	/* length of ampdu delimiter */
+#define AMPDU_DELIMITER_LEN_MAX	63	/* max length of ampdu delimiter(enforced in HW) */
+
+#define HT_CAP_EXT_PCO			0x0001
+#define HT_CAP_EXT_PCO_TTIME_MASK	0x0006
+#define HT_CAP_EXT_PCO_TTIME_SHIFT	1
+#define HT_CAP_EXT_MCS_FEEDBACK_MASK	0x0300
+#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT	8
+#define HT_CAP_EXT_HTC			0x0400
+#define HT_CAP_EXT_RD_RESP		0x0800
+
+/** 'ht_add' is called 'HT Operation' information element in the 802.11 standard */
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+	uint8	ctl_ch;			/* control channel number */
+	uint8	byte1;			/* ext ch,rec. ch. width, RIFS support */
+	uint16	opmode;			/* operation mode */
+	uint16	misc_bits;		/* misc bits */
+	uint8	basic_mcs[MCSSET_LEN];  /* required MCS set */
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the additional IE is primarily used to convey the current BSS configuration */
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	type;		/* indicates what follows */
+	ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN	22
+#define HT_ADD_IE_TYPE	52
+
+/* byte1 defn's */
+#define HT_BW_ANY		0x04	/* set, STA can use 20 or 40MHz */
+#define HT_RIFS_PERMITTED     	0x08	/* RIFS allowed */
+
+/* opmode defn's */
+#define HT_OPMODE_MASK	        0x0003	/* protection mode mask */
+#define HT_OPMODE_SHIFT		0	/* protection mode shift */
+#define HT_OPMODE_PURE		0x0000	/* protection mode PURE */
+#define HT_OPMODE_OPTIONAL	0x0001	/* protection mode optional */
+#define HT_OPMODE_HT20IN40	0x0002	/* protection mode 20MHz HT in 40MHz BSS */
+#define HT_OPMODE_MIXED	0x0003	/* protection mode Mixed Mode */
+#define HT_OPMODE_NONGF	0x0004	/* protection mode non-GF */
+#define DOT11N_TXBURST		0x0008	/* Tx burst limit */
+#define DOT11N_OBSS_NONHT	0x0010	/* OBSS Non-HT STA present */
+#define HT_OPMODE_CCFS2_MASK	0x1fe0	/* Channel Center Frequency Segment 2 mask */
+#define HT_OPMODE_CCFS2_SHIFT	5	/* Channel Center Frequency Segment 2 shift */
+
+/* misc_bites defn's */
+#define HT_BASIC_STBC_MCS	0x007f	/* basic STBC MCS */
+#define HT_DUAL_STBC_PROT	0x0080	/* Dual STBC Protection */
+#define HT_SECOND_BCN		0x0100	/* Secondary beacon support */
+#define HT_LSIG_TXOP		0x0200	/* L-SIG TXOP Protection full support */
+#define HT_PCO_ACTIVE		0x0400	/* PCO active */
+#define HT_PCO_PHASE		0x0800	/* PCO phase */
+#define HT_DUALCTS_PROTECTION	0x0080	/* DUAL CTS protection needed */
+
+/* Tx Burst Limits */
+#define DOT11N_2G_TXBURST_LIMIT	6160	/* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+#define DOT11N_5G_TXBURST_LIMIT	3080	/* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+
+/* Macros for opmode */
+#define GET_HT_OPMODE(add_ie)		((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					>> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_MIXED)	/* mixed mode present */
+#define HT_HT20_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_HT20IN40)	/* 20MHz HT present */
+#define HT_OPTIONAL_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_OPTIONAL)	/* Optional protection present */
+#define HT_USE_PROTECTION(add_ie)	(HT_HT20_PRESENT((add_ie)) || \
+					HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */
+#define HT_NONGF_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+					== HT_OPMODE_NONGF)	/* non-GF present */
+#define DOT11N_TXBURST_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+					== DOT11N_TXBURST)	/* Tx Burst present */
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+					== DOT11N_OBSS_NONHT)	/* OBSS Non-HT present */
+#define HT_OPMODE_CCFS2_GET(add_ie)	((ltoh16_ua(&(add_ie)->opmode) & HT_OPMODE_CCFS2_MASK) \
+					>> HT_OPMODE_CCFS2_SHIFT)	/* get CCFS2 */
+#define HT_OPMODE_CCFS2_SET(add_ie, ccfs2)	do { /* set CCFS2 */ \
+	(add_ie)->opmode &= htol16(~HT_OPMODE_CCFS2_MASK); \
+	(add_ie)->opmode |= htol16(((ccfs2) << HT_OPMODE_CCFS2_SHIFT) & HT_OPMODE_CCFS2_MASK); \
+} while (0)
+
+/* Macros for HT MCS field access */
+#define HT_CAP_MCS_BITMASK(supp_mcs)                 \
+	((supp_mcs)[HT_CAP_MCS_RX_8TO15_BYTE_OFFSET])
+#define HT_CAP_MCS_TX_RX_UNEQUAL(supp_mcs)          \
+	((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL)
+#define HT_CAP_MCS_TX_STREAM_SUPPORT(supp_mcs)          \
+		((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK)
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+	uint16	passive_dwell;
+	uint16	active_dwell;
+	uint16	bss_widthscan_interval;
+	uint16	passive_total;
+	uint16	active_total;
+	uint16	chanwidth_transition_dly;
+	uint16	activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+	uint8	id;
+	uint8	len;
+	obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN	sizeof(obss_params_t)	/* HT OBSS len (based on 802.11n d3.0) */
+
+/* HT control field */
+#define HT_CTRL_LA_TRQ		0x00000002	/* sounding request */
+#define HT_CTRL_LA_MAI		0x0000003C	/* MCS request or antenna selection indication */
+#define HT_CTRL_LA_MAI_SHIFT	2
+#define HT_CTRL_LA_MAI_MRQ	0x00000004	/* MCS request */
+#define HT_CTRL_LA_MAI_MSI	0x00000038	/* MCS request sequence identifier */
+#define HT_CTRL_LA_MFSI		0x000001C0	/* MFB sequence identifier */
+#define HT_CTRL_LA_MFSI_SHIFT	6
+#define HT_CTRL_LA_MFB_ASELC	0x0000FE00	/* MCS feedback, antenna selection command/data */
+#define HT_CTRL_LA_MFB_ASELC_SH	9
+#define HT_CTRL_LA_ASELC_CMD	0x00000C00	/* ASEL command */
+#define HT_CTRL_LA_ASELC_DATA	0x0000F000	/* ASEL data */
+#define HT_CTRL_CAL_POS		0x00030000	/* Calibration position */
+#define HT_CTRL_CAL_SEQ		0x000C0000	/* Calibration sequence */
+#define HT_CTRL_CSI_STEERING	0x00C00000	/* CSI/Steering */
+#define HT_CTRL_CSI_STEER_SHIFT	22
+#define HT_CTRL_CSI_STEER_NFB	0		/* no fedback required */
+#define HT_CTRL_CSI_STEER_CSI	1		/* CSI, H matrix */
+#define HT_CTRL_CSI_STEER_NCOM	2		/* non-compressed beamforming */
+#define HT_CTRL_CSI_STEER_COM	3		/* compressed beamforming */
+#define HT_CTRL_NDP_ANNOUNCE	0x01000000	/* NDP announcement */
+#define HT_CTRL_AC_CONSTRAINT	0x40000000	/* AC Constraint */
+#define HT_CTRL_RDG_MOREPPDU	0x80000000	/* RDG/More PPDU */
+
+/* ************* VHT definitions. ************* */
+
+/**
+ * VHT Capabilites IE (sec 8.4.2.160)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_cap_ie {
+	uint32  vht_cap_info;
+	/* supported MCS set - 64 bit field */
+	uint16	rx_mcs_map;
+	uint16  rx_max_rate;
+	uint16  tx_mcs_map;
+	uint16	tx_max_rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_cap_ie vht_cap_ie_t;
+
+/* 4B cap_info + 8B supp_mcs */
+#define VHT_CAP_IE_LEN 12
+
+/* VHT Capabilities Info field - 32bit - in VHT Cap IE */
+#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK          0x00000003
+#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK       0x0000000c
+#define VHT_CAP_INFO_LDPC                       0x00000010
+#define VHT_CAP_INFO_SGI_80MHZ                  0x00000020
+#define VHT_CAP_INFO_SGI_160MHZ                 0x00000040
+#define VHT_CAP_INFO_TX_STBC                    0x00000080
+#define VHT_CAP_INFO_RX_STBC_MASK               0x00000700
+#define VHT_CAP_INFO_RX_STBC_SHIFT              8
+#define VHT_CAP_INFO_SU_BEAMFMR                 0x00000800
+#define VHT_CAP_INFO_SU_BEAMFMEE                0x00001000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK         0x0000e000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT        13
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK      0x00070000
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT     16
+#define VHT_CAP_INFO_MU_BEAMFMR                 0x00080000
+#define VHT_CAP_INFO_MU_BEAMFMEE                0x00100000
+#define VHT_CAP_INFO_TXOPPS                     0x00200000
+#define VHT_CAP_INFO_HTCVHT                     0x00400000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK      0x03800000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT     23
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK        0x0c000000
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT       26
+#define VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK        0xc0000000
+#define VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT       30
+
+/* get Extended NSS BW Support passing vht cap info */
+#define VHT_CAP_EXT_NSS_BW_SUP(cap_info) \
+	(((cap_info) & VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK) >> VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT)
+
+/* VHT CAP INFO extended NSS BW support - refer to IEEE 802.11 REVmc D8.0 Figure 9-559 */
+#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160	1 /* 160MHz at half NSS CAP */
+#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160_80P80	2 /* 160 & 80p80 MHz at half NSS CAP */
+
+/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK   0x1fff
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT  0
+#define VHT_CAP_SUPP_CHAN_WIDTH_SHIFT		5
+
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK   0x1fff
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT  0
+
+/* defines for field(s) in vht_cap_ie->rx_max_rate */
+#define VHT_CAP_MAX_NSTS_MASK			0xe000
+#define VHT_CAP_MAX_NSTS_SHIFT			13
+
+/* defines for field(s) in vht_cap_ie->tx_max_rate */
+#define VHT_CAP_EXT_NSS_BW_CAP			0x2000
+
+#define VHT_CAP_MCS_MAP_0_7                     0
+#define VHT_CAP_MCS_MAP_0_8                     1
+#define VHT_CAP_MCS_MAP_0_9                     2
+#define VHT_CAP_MCS_MAP_NONE                    3
+#define VHT_CAP_MCS_MAP_S                       2 /* num bits for 1-stream */
+#define VHT_CAP_MCS_MAP_M                       0x3 /* mask for 1-stream */
+/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */
+#define VHT_CAP_MCS_MAP_NONE_ALL                0xffff
+
+/* VHT rates bitmap */
+#define VHT_CAP_MCS_0_7_RATEMAP		0x00ff
+#define VHT_CAP_MCS_0_8_RATEMAP		0x01ff
+#define VHT_CAP_MCS_0_9_RATEMAP		0x03ff
+#define VHT_CAP_MCS_FULL_RATEMAP 	VHT_CAP_MCS_0_9_RATEMAP
+
+#define VHT_PROP_MCS_MAP_10_11                   0
+#define VHT_PROP_MCS_MAP_UNUSED1                 1
+#define VHT_PROP_MCS_MAP_UNUSED2                 2
+#define VHT_PROP_MCS_MAP_NONE                    3
+#define VHT_PROP_MCS_MAP_NONE_ALL                0xffff
+
+/* VHT prop rates bitmap */
+#define VHT_PROP_MCS_10_11_RATEMAP	0x0c00
+#define VHT_PROP_MCS_FULL_RATEMAP	VHT_PROP_MCS_10_11_RATEMAP
+
+#if !defined(VHT_CAP_MCS_MAP_0_9_NSS3)
+/* mcsmap with MCS0-9 for Nss = 3 */
+#define VHT_CAP_MCS_MAP_0_9_NSS3 \
+	        ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \
+	         (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \
+	         (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3)))
+#endif /* !VHT_CAP_MCS_MAP_0_9_NSS3 */
+
+#define VHT_CAP_MCS_MAP_NSS_MAX                 8
+
+/* get mcsmap with given mcs for given nss streams */
+#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \
+	do { \
+		int i; \
+		for (i = 1; i <= nss; i++) { \
+			VHT_MCS_MAP_SET_MCS_PER_SS(i, mcs, mcsmap); \
+		} \
+	} while (0)
+
+/* Map the mcs code to mcs bit map */
+#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \
+	((mcs_code == VHT_CAP_MCS_MAP_0_7) ? VHT_CAP_MCS_0_7_RATEMAP : \
+	 (mcs_code == VHT_CAP_MCS_MAP_0_8) ? VHT_CAP_MCS_0_8_RATEMAP : \
+	 (mcs_code == VHT_CAP_MCS_MAP_0_9) ? VHT_CAP_MCS_0_9_RATEMAP : 0)
+
+#define VHT_PROP_MCS_CODE_TO_PROP_MCS_MAP(mcs_code) \
+	((mcs_code == VHT_PROP_MCS_MAP_10_11) ? VHT_PROP_MCS_10_11_RATEMAP : 0)
+
+/* Map the mcs bit map to mcs code */
+#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \
+	((mcs_map == VHT_CAP_MCS_0_7_RATEMAP) ? VHT_CAP_MCS_MAP_0_7 : \
+	 (mcs_map == VHT_CAP_MCS_0_8_RATEMAP) ? VHT_CAP_MCS_MAP_0_8 : \
+	 (mcs_map == VHT_CAP_MCS_0_9_RATEMAP) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE)
+
+#define VHT_PROP_MCS_MAP_TO_PROP_MCS_CODE(mcs_map) \
+	(((mcs_map & 0xc00) == 0xc00)  ? VHT_PROP_MCS_MAP_10_11 : VHT_PROP_MCS_MAP_NONE)
+
+/** VHT Capabilities Supported Channel Width */
+typedef enum vht_cap_chan_width {
+	VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00,
+	VHT_CAP_CHAN_WIDTH_SUPPORT_160       = 0x04,
+	VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080  = 0x08
+} vht_cap_chan_width_t;
+
+/** VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */
+typedef enum vht_cap_max_mpdu_len {
+	VHT_CAP_MPDU_MAX_4K     = 0x00,
+	VHT_CAP_MPDU_MAX_8K     = 0x01,
+	VHT_CAP_MPDU_MAX_11K    = 0x02
+} vht_cap_max_mpdu_len_t;
+
+/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */
+#define VHT_MPDU_LIMIT_4K        3895
+#define VHT_MPDU_LIMIT_8K        7991
+#define VHT_MPDU_LIMIT_11K      11454
+
+
+/**
+ * VHT Operation IE (sec 8.4.2.161)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_op_ie {
+	uint8	chan_width;
+	uint8	chan1;
+	uint8	chan2;
+	uint16	supp_mcs;  /*  same def as above in vht cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_op_ie vht_op_ie_t;
+
+/* 3B VHT Op info + 2B Basic MCS */
+#define VHT_OP_IE_LEN 5
+
+typedef enum vht_op_chan_width {
+	VHT_OP_CHAN_WIDTH_20_40	= 0,
+	VHT_OP_CHAN_WIDTH_80	= 1,
+	VHT_OP_CHAN_WIDTH_160	= 2, /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */
+	VHT_OP_CHAN_WIDTH_80_80	= 3  /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */
+} vht_op_chan_width_t;
+
+/* AID length */
+#define AID_IE_LEN		2
+/**
+ * BRCM vht features IE header
+ * The header if the fixed part of the IE
+ * On the 5GHz band this is the entire IE,
+ * on 2.4GHz the VHT IEs as defined in the 802.11ac
+ * specification follows
+ *
+ *
+ * VHT features rates  bitmap.
+ * Bit0:		5G MCS 0-9 BW 160MHz
+ * Bit1:		5G MCS 0-9 support BW 80MHz
+ * Bit2:		5G MCS 0-9 support BW 20MHz
+ * Bit3:		2.4G MCS 0-9 support BW 20MHz
+ * Bits:4-7	Reserved for future use
+ *
+ */
+#define VHT_FEATURES_IE_TYPE	0x4
+BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr {
+	uint8 oui[3];
+	uint8 type;		/* type of this IE = 4 */
+	uint8 rate_mask;	/* VHT rate mask */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_features_ie_hdr vht_features_ie_hdr_t;
+
+/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */
+#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S)
+#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \
+	(((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M)
+#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \
+	do { \
+	 (mcsMap) &= (~(VHT_CAP_MCS_MAP_M << VHT_MCS_MAP_GET_SS_IDX(nss))); \
+	 (mcsMap) |= (((numMcs) & VHT_CAP_MCS_MAP_M) << VHT_MCS_MAP_GET_SS_IDX(nss)); \
+	} while (0)
+#define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \
+		 (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE)
+
+/* Get the max ss supported from the mcs map */
+#define VHT_MAX_SS_SUPPORTED(mcsMap) \
+	VHT_MCS_SS_SUPPORTED(8, mcsMap) ? 8 : \
+	VHT_MCS_SS_SUPPORTED(7, mcsMap) ? 7 : \
+	VHT_MCS_SS_SUPPORTED(6, mcsMap) ? 6 : \
+	VHT_MCS_SS_SUPPORTED(5, mcsMap) ? 5 : \
+	VHT_MCS_SS_SUPPORTED(4, mcsMap) ? 4 : \
+	VHT_MCS_SS_SUPPORTED(3, mcsMap) ? 3 : \
+	VHT_MCS_SS_SUPPORTED(2, mcsMap) ? 2 : \
+	VHT_MCS_SS_SUPPORTED(1, mcsMap) ? 1 : 0
+
+/* ************* WPA definitions. ************* */
+#define WPA_OUI			"\x00\x50\xF2"	/* WPA OUI */
+#define WPA_OUI_LEN		3		/* WPA OUI length */
+#define WPA_OUI_TYPE		1
+#define WPA_VERSION		1		/* WPA version */
+#define WPA2_OUI		"\x00\x0F\xAC"	/* WPA2 OUI */
+#define WPA2_OUI_LEN		3		/* WPA2 OUI length */
+#define WPA2_VERSION		1		/* WPA2 version */
+#define WPA2_VERSION_LEN	2		/* WAP2 version length */
+
+/* ************* WPS definitions. ************* */
+#define WPS_OUI			"\x00\x50\xF2"	/* WPS OUI */
+#define WPS_OUI_LEN		3		/* WPS OUI length */
+#define WPS_OUI_TYPE		4
+
+/* ************* WFA definitions. ************* */
+#if defined(WL_LEGACY_P2P)
+#define MAC_OUI			"\x00\x17\xF2"	/* MACOSX OUI */
+#define MAC_OUI_TYPE_P2P	5
+#endif 
+
+#ifdef P2P_IE_OVRD
+#define WFA_OUI			MAC_OUI
+#else
+#define WFA_OUI			"\x50\x6F\x9A"	/* WFA OUI */
+#endif /* P2P_IE_OVRD */
+#define WFA_OUI_LEN		3		/* WFA OUI length */
+#ifdef P2P_IE_OVRD
+#define WFA_OUI_TYPE_P2P	MAC_OUI_TYPE_P2P
+#else
+#define WFA_OUI_TYPE_TPC	8
+#define WFA_OUI_TYPE_P2P	9
+#endif
+
+#define WFA_OUI_TYPE_TPC	8
+#ifdef WLTDLS
+#define WFA_OUI_TYPE_TPQ	4	/* WFD Tunneled Probe ReQuest */
+#define WFA_OUI_TYPE_TPS	5	/* WFD Tunneled Probe ReSponse */
+#define WFA_OUI_TYPE_WFD	10
+#endif /* WTDLS */
+#define WFA_OUI_TYPE_HS20	0x10
+#define WFA_OUI_TYPE_OSEN	0x12
+#define WFA_OUI_TYPE_NAN	0x13
+#define WFA_OUI_TYPE_MBO	0x16
+#define WFA_OUI_TYPE_MBO_OCE	0x16
+
+/* RSN authenticated key managment suite */
+#define RSN_AKM_NONE		0	/* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED	1	/* Over 802.1x */
+#define RSN_AKM_PSK		2	/* Pre-shared Key */
+#define RSN_AKM_FBT_1X		3	/* Fast Bss transition using 802.1X */
+#define RSN_AKM_FBT_PSK		4	/* Fast Bss transition using Pre-shared Key */
+/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more
+ * Just kept here to avoid build issue in BISON/CARIBOU branch
+ */
+#define RSN_AKM_MFP_1X		5	/* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_MFP_PSK		6	/* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_SHA256_1X	5	/* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_SHA256_PSK	6	/* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_TPK		7	/* TPK(TDLS Peer Key) handshake */
+#define RSN_AKM_FILS_SHA256	14	/* SHA256 key derivation, using FILS */
+#define RSN_AKM_FILS_SHA384	15	/* SHA384 key derivation, using FILS */
+
+/* OSEN authenticated key managment suite */
+#define OSEN_AKM_UNSPECIFIED	RSN_AKM_UNSPECIFIED	/* Over 802.1x */
+
+/* Key related defines */
+#define DOT11_MAX_DEFAULT_KEYS	4	/* number of default keys */
+#define DOT11_MAX_IGTK_KEYS		2
+#define DOT11_MAX_KEY_SIZE	32	/* max size of any key */
+#define DOT11_MAX_IV_SIZE	16	/* max size of any IV */
+#define DOT11_EXT_IV_FLAG	(1<<5)	/* flag to indicate IV is > 4 bytes */
+#define DOT11_WPA_KEY_RSC_LEN   8       /* WPA RSC key len */
+
+#define WEP1_KEY_SIZE		5	/* max size of any WEP key */
+#define WEP1_KEY_HEX_SIZE	10	/* size of WEP key in hex. */
+#define WEP128_KEY_SIZE		13	/* max size of any WEP key */
+#define WEP128_KEY_HEX_SIZE	26	/* size of WEP key in hex. */
+#define TKIP_MIC_SIZE		8	/* size of TKIP MIC */
+#define TKIP_EOM_SIZE		7	/* max size of TKIP EOM */
+#define TKIP_EOM_FLAG		0x5a	/* TKIP EOM flag byte */
+#define TKIP_KEY_SIZE		32	/* size of any TKIP key, includs MIC keys */
+#define TKIP_TK_SIZE		16
+#define TKIP_MIC_KEY_SIZE	8
+#define TKIP_MIC_AUTH_TX	16	/* offset to Authenticator MIC TX key */
+#define TKIP_MIC_AUTH_RX	24	/* offset to Authenticator MIC RX key */
+#define TKIP_MIC_SUP_RX		TKIP_MIC_AUTH_TX	/* offset to Supplicant MIC RX key */
+#define TKIP_MIC_SUP_TX		TKIP_MIC_AUTH_RX	/* offset to Supplicant MIC TX key */
+#define AES_KEY_SIZE		16	/* size of AES key */
+#define AES_MIC_SIZE		8	/* size of AES MIC */
+#define BIP_KEY_SIZE		16	/* size of BIP key */
+#define BIP_MIC_SIZE		8   /* sizeof BIP MIC */
+
+#define AES_GCM_MIC_SIZE	16	/* size of MIC for 128-bit GCM - .11adD9 */
+
+#define AES256_KEY_SIZE		32	/* size of AES 256 key - .11acD5 */
+#define AES256_MIC_SIZE		16	/* size of MIC for 256 bit keys, incl BIP */
+
+/* WCN */
+#define WCN_OUI			"\x00\x50\xf2"	/* WCN OUI */
+#define WCN_TYPE		4	/* WCN type */
+
+#ifdef BCMWAPI_WPI
+#define SMS4_KEY_LEN        16
+#define SMS4_WPI_CBC_MAC_LEN    16
+#endif
+
+/* 802.11r protocol definitions */
+
+/** Mobility Domain IE */
+BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mdid;		/* Mobility Domain Id */
+	uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mdid_ie dot11_mdid_ie_t;
+
+#define FBT_MDID_CAP_OVERDS	0x01	/* Fast Bss transition over the DS support */
+#define FBT_MDID_CAP_RRP	0x02	/* Resource request protocol support */
+
+/** Fast Bss Transition IE */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mic_control;		/* Mic Control */
+	uint8 mic[16];
+	uint8 anonce[32];
+	uint8 snonce[32];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_ie dot11_ft_ie_t;
+
+#define TIE_TYPE_RESERVED		0
+#define TIE_TYPE_REASSOC_DEADLINE	1
+#define TIE_TYPE_KEY_LIEFTIME		2
+#define TIE_TYPE_ASSOC_COMEBACK		3
+BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie {
+	uint8 id;
+	uint8 len;
+	uint8 type;		/* timeout interval type */
+	uint32 value;		/* timeout interval value */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timeout_ie dot11_timeout_ie_t;
+
+/** GTK ie */
+BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
+	uint8 id;
+	uint8 len;
+	uint16 key_info;
+	uint8 key_len;
+	uint8 rsc[8];
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_gtk_ie dot11_gtk_ie_t;
+
+/** Management MIC ie */
+BWL_PRE_PACKED_STRUCT struct mmic_ie {
+	uint8   id;					/* IE ID: DOT11_MNG_MMIE_ID */
+	uint8   len;				/* IE length */
+	uint16  key_id;				/* key id */
+	uint8   ipn[6];				/* ipn */
+	uint8   mic[16];			/* mic */
+} BWL_POST_PACKED_STRUCT;
+typedef struct mmic_ie mmic_ie_t;
+
+/* 802.11r-2008, 11A.10.3 - RRB frame format */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_rrb_frame {
+	uint8  frame_type; /* 1 for RRB */
+	uint8  packet_type; /* 0 for Request 1 for Response */
+	uint16 len;
+	uint8  cur_ap_addr[ETHER_ADDR_LEN];
+	uint8  data[1];	/* IEs Received/Sent in FT Action Req/Resp Frame */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_ft_rrb_frame dot11_ft_rrb_frame_t;
+
+#define DOT11_FT_RRB_FIXED_LEN 10
+#define DOT11_FT_REMOTE_FRAME_TYPE 1
+#define DOT11_FT_PACKET_REQ 0
+#define DOT11_FT_PACKET_RESP 1
+
+#define BSSID_INVALID           "\x00\x00\x00\x00\x00\x00"
+#define BSSID_BROADCAST         "\xFF\xFF\xFF\xFF\xFF\xFF"
+
+#ifdef BCMWAPI_WAI
+#define WAPI_IE_MIN_LEN 	20	/* WAPI IE min length */
+#define WAPI_VERSION		1	/* WAPI version */
+#define WAPI_VERSION_LEN	2	/* WAPI version length */
+#define WAPI_OUI		"\x00\x14\x72"	/* WAPI OUI */
+#define WAPI_OUI_LEN		DOT11_OUI_LEN	/* WAPI OUI length */
+#endif /* BCMWAPI_WAI */
+
+/* ************* WMM Parameter definitions. ************* */
+#define WMM_OUI			"\x00\x50\xF2"	/* WNN OUI */
+#define WMM_OUI_LEN		3		/* WMM OUI length */
+#define WMM_OUI_TYPE	2		/* WMM OUT type */
+#define WMM_VERSION		1
+#define WMM_VERSION_LEN	1
+
+/* WMM OUI subtype */
+#define WMM_OUI_SUBTYPE_PARAMETER	1
+#define WMM_PARAMETER_IE_LEN		24
+
+/** Link Identifier Element */
+BWL_PRE_PACKED_STRUCT struct link_id_ie {
+	uint8 id;
+	uint8 len;
+	struct ether_addr	bssid;
+	struct ether_addr	tdls_init_mac;
+	struct ether_addr	tdls_resp_mac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct link_id_ie link_id_ie_t;
+#define TDLS_LINK_ID_IE_LEN		18
+
+/** Link Wakeup Schedule Element */
+BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie {
+	uint8 id;
+	uint8 len;
+	uint32 offset;			/* in ms between TSF0 and start of 1st Awake Window */
+	uint32 interval;		/* in ms bwtween the start of 2 Awake Windows */
+	uint32 awake_win_slots;	/* in backof slots, duration of Awake Window */
+	uint32 max_wake_win;	/* in ms, max duration of Awake Window */
+	uint16 idle_cnt;		/* number of consecutive Awake Windows */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wakeup_sch_ie wakeup_sch_ie_t;
+#define TDLS_WAKEUP_SCH_IE_LEN		18
+
+/** Channel Switch Timing Element */
+BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie {
+	uint8 id;
+	uint8 len;
+	uint16 switch_time;		/* in ms, time to switch channels */
+	uint16 switch_timeout;	/* in ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct channel_switch_timing_ie channel_switch_timing_ie_t;
+#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN		4
+
+/** PTI Control Element */
+BWL_PRE_PACKED_STRUCT struct pti_control_ie {
+	uint8 id;
+	uint8 len;
+	uint8 tid;
+	uint16 seq_control;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pti_control_ie pti_control_ie_t;
+#define TDLS_PTI_CONTROL_IE_LEN		3
+
+/** PU Buffer Status Element */
+BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie {
+	uint8 id;
+	uint8 len;
+	uint8 status;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pu_buffer_status_ie pu_buffer_status_ie_t;
+#define TDLS_PU_BUFFER_STATUS_IE_LEN	1
+#define TDLS_PU_BUFFER_STATUS_AC_BK		1
+#define TDLS_PU_BUFFER_STATUS_AC_BE		2
+#define TDLS_PU_BUFFER_STATUS_AC_VI		4
+#define TDLS_PU_BUFFER_STATUS_AC_VO		8
+
+/* TDLS Action Field Values */
+#define TDLS_SETUP_REQ				0
+#define TDLS_SETUP_RESP				1
+#define TDLS_SETUP_CONFIRM			2
+#define TDLS_TEARDOWN				3
+#define TDLS_PEER_TRAFFIC_IND			4
+#define TDLS_CHANNEL_SWITCH_REQ			5
+#define TDLS_CHANNEL_SWITCH_RESP		6
+#define TDLS_PEER_PSM_REQ			7
+#define TDLS_PEER_PSM_RESP			8
+#define TDLS_PEER_TRAFFIC_RESP			9
+#define TDLS_DISCOVERY_REQ			10
+
+/* 802.11z TDLS Public Action Frame action field */
+#define TDLS_DISCOVERY_RESP			14
+
+/* 802.11u GAS action frames */
+#define GAS_REQUEST_ACTION_FRAME				10
+#define GAS_RESPONSE_ACTION_FRAME				11
+#define GAS_COMEBACK_REQUEST_ACTION_FRAME		12
+#define GAS_COMEBACK_RESPONSE_ACTION_FRAME		13
+
+/* FTM - fine timing measurement public action frames */
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_req {
+	uint8 category;				/* category of action frame (4) */
+	uint8 action;				/* public action (32) */
+	uint8 trigger;				/* trigger/continue? */
+	/* optional lci, civic loc, ftm params */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_req dot11_ftm_req_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm {
+	uint8 category;				/* category of action frame (4) */
+	uint8 action;				/* public action (33) */
+	uint8 dialog;				/* dialog token */
+	uint8 follow_up;			/* follow up dialog token */
+	uint8 tod[6];				/* t1 - last depart timestamp */
+	uint8 toa[6];				/* t4 - last ack arrival timestamp */
+	uint8 tod_err[2];			/* t1 error */
+	uint8 toa_err[2];			/* t4 error */
+	/* optional lci report, civic loc report, ftm params */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm dot11_ftm_t;
+
+
+#define DOT11_FTM_ERR_NOT_CONT_OFFSET 1
+#define DOT11_FTM_ERR_NOT_CONT_MASK 0x80
+#define DOT11_FTM_ERR_NOT_CONT_SHIFT 7
+#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \
+	DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT)
+#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\
+	uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \
+	_err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \
+	_err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \
+	(_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \
+} while (0)
+
+#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0
+#define DOT11_FTM_ERR_MAX_ERR_MASK 0x7fff
+#define DOT11_FTM_ERR_MAX_ERR_SHIFT 0
+#define DOT11_FTM_ERR_MAX_ERR(_err) (((((_err)[1] & 0x7f) << 8) | (_err)[0]))
+#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\
+	uint16 _val2; \
+	uint16 _not_cont; \
+	_val2 =  (((_val) & DOT11_FTM_ERR_MAX_ERR_MASK) << DOT11_FTM_ERR_MAX_ERR_SHIFT); \
+	_val2 = (_val2 > 0x3fff) ? 0 : _val2; /* not expecting > 16ns error */ \
+	_not_cont = DOT11_FTM_ERR_NOT_CONT(_err); \
+	(_err)[0] = _val2 & 0xff; \
+	(_err)[1] = (_val2 >> 8) & 0xff; \
+	DOT11_FTM_ERR_SET_NOT_CONT(_err, _not_cont); \
+} while (0)
+
+#if defined(DOT11_FTM_ERR_ROM_COMPAT)
+/* incorrect defs - here for ROM compatibiity */
+#undef DOT11_FTM_ERR_NOT_CONT_OFFSET
+#undef DOT11_FTM_ERR_NOT_CONT_MASK
+#undef DOT11_FTM_ERR_NOT_CONT_SHIFT
+#undef DOT11_FTM_ERR_NOT_CONT
+#undef DOT11_FTM_ERR_SET_NOT_CONT
+
+#define DOT11_FTM_ERR_NOT_CONT_OFFSET 0
+#define DOT11_FTM_ERR_NOT_CONT_MASK 0x0001
+#define DOT11_FTM_ERR_NOT_CONT_SHIFT 0
+#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \
+	DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT)
+#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\
+	uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \
+	_err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \
+	_err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \
+	(_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \
+} while (0)
+
+#undef DOT11_FTM_ERR_MAX_ERR_OFFSET
+#undef DOT11_FTM_ERR_MAX_ERR_MASK
+#undef DOT11_FTM_ERR_MAX_ERR_SHIFT
+#undef DOT11_FTM_ERR_MAX_ERR
+#undef DOT11_FTM_ERR_SET_MAX_ERR
+
+#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0
+#define DOT11_FTM_ERR_MAX_ERR_MASK 0xfff7
+#define DOT11_FTM_ERR_MAX_ERR_SHIFT 1
+#define DOT11_FTM_ERR_MAX_ERR(_err) ((((_err)[1] << 7) | (_err)[0]) >> 1)
+#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\
+	uint16 _val2; \
+	_val2 =  (((_val) << DOT11_FTM_ERR_MAX_ERR_SHIFT) |\
+		 ((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & DOT11_FTM_ERR_NOT_CONT_MASK)); \
+	(_err)[0] = _val2 & 0xff; \
+	(_err)[1] = _val2 >> 8 & 0xff; \
+} while (0)
+#endif /* DOT11_FTM_ERR_ROM_COMPAT */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_params {
+	uint8 id;		/* DOT11_MNG_FTM_PARAM_ID 8.4.2.166 11mcd2.6/2014 - revisit */
+	uint8 len;
+	uint8 info[9];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_params dot11_ftm_params_t;
+#define DOT11_FTM_PARAMS_IE_LEN (sizeof(dot11_ftm_params_t) - 2)
+
+#define FTM_PARAMS_FIELD(_p, _off, _mask, _shift) (((_p)->info[(_off)] & (_mask)) >> (_shift))
+#define FTM_PARAMS_SET_FIELD(_p, _off, _mask, _shift, _val) do {\
+	uint8 _ptmp = (_p)->info[_off] & ~(_mask); \
+	(_p)->info[(_off)] = _ptmp | (((_val) << (_shift)) & (_mask)); \
+} while (0)
+
+#define FTM_PARAMS_STATUS_OFFSET 0
+#define FTM_PARAMS_STATUS_MASK 0x03
+#define FTM_PARAMS_STATUS_SHIFT 0
+#define FTM_PARAMS_STATUS(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_STATUS_OFFSET, \
+	FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT)
+#define FTM_PARAMS_SET_STATUS(_p, _status) FTM_PARAMS_SET_FIELD(_p, \
+	FTM_PARAMS_STATUS_OFFSET, FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT, _status)
+
+#define FTM_PARAMS_VALUE_OFFSET 0
+#define FTM_PARAMS_VALUE_MASK 0x7c
+#define FTM_PARAMS_VALUE_SHIFT 2
+#define FTM_PARAMS_VALUE(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_VALUE_OFFSET, \
+	FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT)
+#define FTM_PARAMS_SET_VALUE(_p, _value) FTM_PARAMS_SET_FIELD(_p, \
+	FTM_PARAMS_VALUE_OFFSET, FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT, _value)
+#define FTM_PARAMS_MAX_VALUE 32
+
+#define FTM_PARAMS_NBURSTEXP_OFFSET 1
+#define FTM_PARAMS_NBURSTEXP_MASK 0x0f
+#define FTM_PARAMS_NBURSTEXP_SHIFT 0
+#define FTM_PARAMS_NBURSTEXP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_NBURSTEXP_OFFSET, \
+	FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT)
+#define FTM_PARAMS_SET_NBURSTEXP(_p, _bexp) FTM_PARAMS_SET_FIELD(_p, \
+	FTM_PARAMS_NBURSTEXP_OFFSET, FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT, \
+	_bexp)
+
+#define FTM_PARAMS_NBURST(_p) (1 << FTM_PARAMS_NBURSTEXP(_p))
+
+enum {
+	FTM_PARAMS_NBURSTEXP_NOPREF = 15
+};
+
+enum {
+	FTM_PARAMS_BURSTTMO_NOPREF = 15
+};
+
+#define FTM_PARAMS_BURSTTMO_OFFSET 1
+#define FTM_PARAMS_BURSTTMO_MASK 0xf0
+#define FTM_PARAMS_BURSTTMO_SHIFT 4
+#define FTM_PARAMS_BURSTTMO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_BURSTTMO_OFFSET, \
+	FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT)
+/* set timeout in params using _tmo where timeout = 2^(_tmo) * 250us */
+#define FTM_PARAMS_SET_BURSTTMO(_p, _tmo) FTM_PARAMS_SET_FIELD(_p, \
+	FTM_PARAMS_BURSTTMO_OFFSET, FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT, (_tmo)+2)
+
+#define FTM_PARAMS_BURSTTMO_USEC(_val) ((1 << ((_val)-2)) * 250)
+#define FTM_PARAMS_BURSTTMO_VALID(_val) ((((_val) < 12 && (_val) > 1)) || \
+	(_val) == FTM_PARAMS_BURSTTMO_NOPREF)
+#define FTM_PARAMS_BURSTTMO_MAX_MSEC 128 /* 2^9 * 250us */
+#define FTM_PARAMS_BURSTTMO_MAX_USEC 128000 /* 2^9 * 250us */
+
+#define FTM_PARAMS_MINDELTA_OFFSET 2
+#define FTM_PARAMS_MINDELTA_USEC(_p) ((_p)->info[FTM_PARAMS_MINDELTA_OFFSET] * 100)
+#define FTM_PARAMS_SET_MINDELTA_USEC(_p, _delta) do { \
+	(_p)->info[FTM_PARAMS_MINDELTA_OFFSET] = (_delta) / 100; \
+} while (0)
+
+enum {
+	FTM_PARAMS_MINDELTA_NOPREF = 0
+};
+
+#define FTM_PARAMS_PARTIAL_TSF(_p) ((_p)->info[4] << 8 | (_p)->info[3])
+#define FTM_PARAMS_SET_PARTIAL_TSF(_p, _partial_tsf) do { \
+	(_p)->info[3] = (_partial_tsf) & 0xff; \
+	(_p)->info[4] = ((_partial_tsf) >> 8) & 0xff; \
+} while (0)
+
+#define FTM_PARAMS_PARTIAL_TSF_MASK 0x0000000003fffc00ULL
+#define FTM_PARAMS_PARTIAL_TSF_SHIFT 10
+#define FTM_PARAMS_PARTIAL_TSF_BIT_LEN 16
+#define FTM_PARAMS_PARTIAL_TSF_MAX 0xffff
+
+/* FTM can indicate upto 62k TUs forward and 1k TU backward */
+#define FTM_PARAMS_TSF_FW_HI (63487 << 10)	/* in micro sec */
+#define FTM_PARAMS_TSF_BW_LOW (64512 << 10)	/* in micro sec */
+#define FTM_PARAMS_TSF_BW_HI (65535 << 10)	/* in micro sec */
+#define FTM_PARAMS_TSF_FW_MAX FTM_PARAMS_TSF_FW_HI
+#define FTM_PARAMS_TSF_BW_MAX (FTM_PARAMS_TSF_BW_HI - FTM_PARAMS_TSF_BW_LOW)
+
+#define FTM_PARAMS_PTSFNOPREF_OFFSET 5
+#define FTM_PARAMS_PTSFNOPREF_MASK 0x1
+#define FTM_PARAMS_PTSFNOPREF_SHIFT 0
+#define FTM_PARAMS_PTSFNOPREF(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_PTSFNOPREF_OFFSET, \
+	FTM_PARAMS_PTSFNOPREF_MASK, FTM_PARAMS_PTSFNOPREF_SHIFT)
+#define FTM_PARAMS_SET_PTSFNOPREF(_p, _nopref) FTM_PARAMS_SET_FIELD(_p, \
+	FTM_PARAMS_PTSFNOPREF_OFFSET, FTM_PARAMS_PTSFNOPREF_MASK, \
+	FTM_PARAMS_PTSFNOPREF_SHIFT, _nopref)
+
+#define FTM_PARAMS_ASAP_OFFSET 5
+#define FTM_PARAMS_ASAP_MASK 0x4
+#define FTM_PARAMS_ASAP_SHIFT 2
+#define FTM_PARAMS_ASAP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_ASAP_OFFSET, \
+	FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT)
+#define FTM_PARAMS_SET_ASAP(_p, _asap) FTM_PARAMS_SET_FIELD(_p, \
+	FTM_PARAMS_ASAP_OFFSET, FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT, _asap)
+
+/* FTM1 - AKA ASAP Capable */
+#define FTM_PARAMS_FTM1_OFFSET 5
+#define FTM_PARAMS_FTM1_MASK 0x02
+#define FTM_PARAMS_FTM1_SHIFT 1
+#define FTM_PARAMS_FTM1(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTM1_OFFSET, \
+	FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT)
+#define FTM_PARAMS_SET_FTM1(_p, _ftm1) FTM_PARAMS_SET_FIELD(_p, \
+	FTM_PARAMS_FTM1_OFFSET, FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT, _ftm1)
+
+#define FTM_PARAMS_FTMS_PER_BURST_OFFSET 5
+#define FTM_PARAMS_FTMS_PER_BURST_MASK 0xf8
+#define FTM_PARAMS_FTMS_PER_BURST_SHIFT 3
+#define FTM_PARAMS_FTMS_PER_BURST(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTMS_PER_BURST_OFFSET, \
+	FTM_PARAMS_FTMS_PER_BURST_MASK, FTM_PARAMS_FTMS_PER_BURST_SHIFT)
+#define FTM_PARAMS_SET_FTMS_PER_BURST(_p, _nftms) FTM_PARAMS_SET_FIELD(_p, \
+	FTM_PARAMS_FTMS_PER_BURST_OFFSET, FTM_PARAMS_FTMS_PER_BURST_MASK, \
+	FTM_PARAMS_FTMS_PER_BURST_SHIFT, _nftms)
+
+enum {
+	FTM_PARAMS_FTMS_PER_BURST_NOPREF = 0
+};
+
+#define FTM_PARAMS_CHAN_INFO_OFFSET 6
+#define FTM_PARAMS_CHAN_INFO_MASK 0xfc
+#define FTM_PARAMS_CHAN_INFO_SHIFT 2
+#define FTM_PARAMS_CHAN_INFO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_CHAN_INFO_OFFSET, \
+	FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT)
+#define FTM_PARAMS_SET_CHAN_INFO(_p, _ci) FTM_PARAMS_SET_FIELD(_p, \
+	FTM_PARAMS_CHAN_INFO_OFFSET, FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT, _ci)
+
+/* burst period - units of 100ms */
+#define FTM_PARAMS_BURST_PERIOD(_p) (((_p)->info[8] << 8) | (_p)->info[7])
+#define FTM_PARAMS_SET_BURST_PERIOD(_p, _bp) do {\
+	(_p)->info[7] = (_bp) & 0xff; \
+	(_p)->info[8] = ((_bp) >> 8) & 0xff; \
+} while (0)
+
+#define FTM_PARAMS_BURST_PERIOD_MS(_p) (FTM_PARAMS_BURST_PERIOD(_p) * 100)
+
+enum {
+	FTM_PARAMS_BURST_PERIOD_NOPREF = 0
+};
+
+/* FTM status values - last updated from 11mcD4.0 */
+enum {
+	FTM_PARAMS_STATUS_RESERVED	= 0,
+	FTM_PARAMS_STATUS_SUCCESSFUL = 1,
+	FTM_PARAMS_STATUS_INCAPABLE = 2,
+	FTM_PARAMS_STATUS_FAILED = 3,
+	/* Below are obsolte */
+	FTM_PARAMS_STATUS_OVERRIDDEN = 4,
+	FTM_PARAMS_STATUS_ASAP_INCAPABLE = 5,
+	FTM_PARAMS_STATUS_ASAP_FAILED = 6,
+	/* rest are reserved */
+};
+
+enum {
+	FTM_PARAMS_CHAN_INFO_NO_PREF 		= 0,
+	FTM_PARAMS_CHAN_INFO_RESERVE1 		= 1,
+	FTM_PARAMS_CHAN_INFO_RESERVE2 		= 2,
+	FTM_PARAMS_CHAN_INFO_RESERVE3 		= 3,
+	FTM_PARAMS_CHAN_INFO_NON_HT_5 		= 4,
+	FTM_PARAMS_CHAN_INFO_RESERVE5		= 5,
+	FTM_PARAMS_CHAN_INFO_NON_HT_10 		= 6,
+	FTM_PARAMS_CHAN_INFO_RESERVE7		= 7,
+	FTM_PARAMS_CHAN_INFO_NON_HT_20 		= 8, /* excludes 2.4G, and High rate DSSS */
+	FTM_PARAMS_CHAN_INFO_HT_MF_20 		= 9,
+	FTM_PARAMS_CHAN_INFO_VHT_20 		= 10,
+	FTM_PARAMS_CHAN_INFO_HT_MF_40 		= 11,
+	FTM_PARAMS_CHAN_INFO_VHT_40 		= 12,
+	FTM_PARAMS_CHAN_INFO_VHT_80 		= 13,
+	FTM_PARAMS_CHAN_INFO_VHT_80_80 		= 14,
+	FTM_PARAMS_CHAN_INFO_VHT_160_2_RFLOS 	= 15,
+	FTM_PARAMS_CHAN_INFO_VHT_160		= 16,
+	/* Reserved from 17 - 30 */
+	FTM_PARAMS_CHAN_INFO_DMG_2160 		= 31,
+	/* Reserved from 32 - 63 */
+	FTM_PARAMS_CHAN_INFO_MAX		= 63
+};
+
+/* tag_ID/length/value_buffer tuple */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT ftm_vs_tlv_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie {
+	uint8 id;						/* DOT11_MNG_VS_ID */
+	uint8 len;						/* length following */
+	uint8 oui[3];					/* BRCM_PROP_OUI (or Customer) */
+	uint8 sub_type;					/* BRCM_FTM_IE_TYPE (or Customer) */
+	uint8 version;
+	ftm_vs_tlv_t	tlvs[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_vs_ie dot11_ftm_vs_ie_t;
+
+/* ftm vs api version */
+#define BCM_FTM_VS_PARAMS_VERSION 0x01
+
+/* ftm vendor specific information tlv types */
+enum {
+	FTM_VS_TLV_NONE = 0,
+	FTM_VS_TLV_REQ_PARAMS = 1,		/* additional request params (in FTM_REQ) */
+	FTM_VS_TLV_MEAS_INFO = 2,		/* measurement information (in FTM_MEAS) */
+	FTM_VS_TLV_SEC_PARAMS = 3,		/* security parameters (in either) */
+	FTM_VS_TLV_SEQ_PARAMS = 4,		/* toast parameters (FTM_REQ, BRCM proprietary) */
+	FTM_VS_TLV_MF_BUF = 5,			/* multi frame buffer - may span ftm vs ie's */
+	FTM_VS_TLV_TIMING_PARAMS = 6            /* timing adjustments */
+	/* add additional types above */
+};
+
+/* the following definitions are *DEPRECATED* and moved to implemenetion files. They
+ * are retained here because previous (May 2016) some branches use them
+ */
+#define FTM_TPK_LEN            16
+#define FTM_RI_RR_BUF_LEN      32
+#define FTM_TPK_RI_RR_LEN      13
+#define FTM_TPK_RI_RR_LEN_SECURE_2_0    28
+#define FTM_TPK_DIGEST_LEN     32
+#define FTM_TPK_BUFFER_LEN     128
+#define FTM_TPK_RI_PHY_LEN     7
+#define FTM_TPK_RR_PHY_LEN     7
+#define FTM_TPK_DATA_BUFFER_LEN 88
+#define FTM_TPK_LEN_SECURE_2_0          32
+#define FTM_TPK_RI_PHY_LEN_SECURE_2_0  14
+#define FTM_TPK_RR_PHY_LEN_SECURE_2_0  14
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_params {
+	uint8 id;                       /* DOT11_MNG_VS_ID */
+	uint8 len;
+	uint8 oui[3];                   /* Proprietary OUI, BRCM_PROP_OUI */
+	uint8 bcm_vs_id;
+	ftm_vs_tlv_t ftm_tpk_ri_rr[1];          /* ftm_TPK_ri_rr place holder */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_vs_params dot11_ftm_vs_tpk_ri_rr_params_t;
+#define DOT11_FTM_VS_LEN  (sizeof(dot11_ftm_vs_tpk_ri_rr_params_t) - TLV_HDR_LEN)
+/* end *DEPRECATED* ftm definitions */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_sync_info {
+	uint8 id;		/* Extended - 255 11mc D4.3  */
+	uint8 len;
+	uint8 id_ext;
+	uint8 tsf_sync_info[4];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_sync_info dot11_ftm_sync_info_t;
+
+/* ftm tsf sync info ie len - includes id ext */
+#define DOT11_FTM_SYNC_INFO_IE_LEN (sizeof(dot11_ftm_sync_info_t) - TLV_HDR_LEN)
+
+#define DOT11_FTM_IS_SYNC_INFO_IE(_ie) (\
+	DOT11_MNG_IE_ID_EXT_MATCH(_ie, DOT11_MNG_FTM_SYNC_INFO) && \
+	(_ie)->len == DOT11_FTM_SYNC_INFO_IE_LEN)
+
+/* 802.11u interworking access network options */
+#define IW_ANT_MASK					0x0f
+#define IW_INTERNET_MASK				0x10
+#define IW_ASRA_MASK					0x20
+#define IW_ESR_MASK					0x40
+#define IW_UESA_MASK					0x80
+
+/* 802.11u interworking access network type */
+#define IW_ANT_PRIVATE_NETWORK				0
+#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST		1
+#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK		2
+#define IW_ANT_FREE_PUBLIC_NETWORK			3
+#define IW_ANT_PERSONAL_DEVICE_NETWORK			4
+#define IW_ANT_EMERGENCY_SERVICES_NETWORK		5
+#define IW_ANT_TEST_NETWORK				14
+#define IW_ANT_WILDCARD_NETWORK				15
+
+#define IW_ANT_LEN			1
+#define IW_VENUE_LEN			2
+#define IW_HESSID_LEN			6
+#define IW_HESSID_OFF			(IW_ANT_LEN + IW_VENUE_LEN)
+#define IW_MAX_LEN			(IW_ANT_LEN + IW_VENUE_LEN + IW_HESSID_LEN)
+
+/* 802.11u advertisement protocol */
+#define ADVP_ANQP_PROTOCOL_ID				0
+#define ADVP_MIH_PROTOCOL_ID				1
+
+/* 802.11u advertisement protocol masks */
+#define ADVP_QRL_MASK					0x7f
+#define ADVP_PAME_BI_MASK				0x80
+
+/* 802.11u advertisement protocol values */
+#define ADVP_QRL_REQUEST				0x00
+#define ADVP_QRL_RESPONSE				0x7f
+#define ADVP_PAME_BI_DEPENDENT				0x00
+#define ADVP_PAME_BI_INDEPENDENT			ADVP_PAME_BI_MASK
+
+/* 802.11u ANQP information ID */
+#define ANQP_ID_QUERY_LIST				256
+#define ANQP_ID_CAPABILITY_LIST				257
+#define ANQP_ID_VENUE_NAME_INFO				258
+#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO		259
+#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO	260
+#define ANQP_ID_ROAMING_CONSORTIUM_LIST			261
+#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO	262
+#define ANQP_ID_NAI_REALM_LIST				263
+#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO		264
+#define ANQP_ID_AP_GEOSPATIAL_LOCATION			265
+#define ANQP_ID_AP_CIVIC_LOCATION			266
+#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI		267
+#define ANQP_ID_DOMAIN_NAME_LIST			268
+#define ANQP_ID_EMERGENCY_ALERT_ID_URI			269
+#define ANQP_ID_EMERGENCY_NAI				271
+#define ANQP_ID_VENDOR_SPECIFIC_LIST			56797
+
+/* 802.11u ANQP OUI */
+#define ANQP_OUI_SUBTYPE				9
+
+/* 802.11u venue name */
+#define VENUE_LANGUAGE_CODE_SIZE			3
+#define VENUE_NAME_SIZE					255
+
+/* 802.11u venue groups */
+#define VENUE_UNSPECIFIED				0
+#define VENUE_ASSEMBLY					1
+#define VENUE_BUSINESS					2
+#define VENUE_EDUCATIONAL				3
+#define VENUE_FACTORY					4
+#define VENUE_INSTITUTIONAL				5
+#define VENUE_MERCANTILE				6
+#define VENUE_RESIDENTIAL				7
+#define VENUE_STORAGE					8
+#define VENUE_UTILITY					9
+#define VENUE_VEHICULAR					10
+#define VENUE_OUTDOOR					11
+
+/* 802.11u network authentication type indicator */
+#define NATI_UNSPECIFIED				-1
+#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS		0
+#define NATI_ONLINE_ENROLLMENT_SUPPORTED		1
+#define NATI_HTTP_HTTPS_REDIRECTION			2
+#define NATI_DNS_REDIRECTION				3
+
+/* 802.11u IP address type availability - IPv6 */
+#define IPA_IPV6_SHIFT					0
+#define IPA_IPV6_MASK					(0x03 << IPA_IPV6_SHIFT)
+#define	IPA_IPV6_NOT_AVAILABLE				0x00
+#define IPA_IPV6_AVAILABLE				0x01
+#define IPA_IPV6_UNKNOWN_AVAILABILITY			0x02
+
+/* 802.11u IP address type availability - IPv4 */
+#define IPA_IPV4_SHIFT					2
+#define IPA_IPV4_MASK					(0x3f << IPA_IPV4_SHIFT)
+#define	IPA_IPV4_NOT_AVAILABLE				0x00
+#define IPA_IPV4_PUBLIC					0x01
+#define IPA_IPV4_PORT_RESTRICT				0x02
+#define IPA_IPV4_SINGLE_NAT				0x03
+#define IPA_IPV4_DOUBLE_NAT				0x04
+#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT		0x05
+#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT		0x06
+#define IPA_IPV4_UNKNOWN_AVAILABILITY			0x07
+
+/* 802.11u NAI realm encoding */
+#define REALM_ENCODING_RFC4282				0
+#define REALM_ENCODING_UTF8				1
+
+/* 802.11u IANA EAP method type numbers */
+#define REALM_EAP_TLS					13
+#define REALM_EAP_LEAP					17
+#define REALM_EAP_SIM					18
+#define REALM_EAP_TTLS					21
+#define REALM_EAP_AKA					23
+#define REALM_EAP_PEAP					25
+#define REALM_EAP_FAST					43
+#define REALM_EAP_PSK					47
+#define REALM_EAP_AKAP					50
+#define REALM_EAP_EXPANDED				254
+
+/* 802.11u authentication ID */
+#define REALM_EXPANDED_EAP				1
+#define REALM_NON_EAP_INNER_AUTHENTICATION		2
+#define REALM_INNER_AUTHENTICATION_EAP			3
+#define REALM_EXPANDED_INNER_EAP			4
+#define REALM_CREDENTIAL				5
+#define REALM_TUNNELED_EAP_CREDENTIAL			6
+#define REALM_VENDOR_SPECIFIC_EAP			221
+
+/* 802.11u non-EAP inner authentication type */
+#define REALM_RESERVED_AUTH				0
+#define REALM_PAP					1
+#define REALM_CHAP					2
+#define REALM_MSCHAP					3
+#define REALM_MSCHAPV2					4
+
+/* 802.11u credential type */
+#define REALM_SIM					1
+#define REALM_USIM					2
+#define REALM_NFC					3
+#define REALM_HARDWARE_TOKEN				4
+#define REALM_SOFTOKEN					5
+#define REALM_CERTIFICATE				6
+#define REALM_USERNAME_PASSWORD				7
+#define REALM_SERVER_SIDE				8
+#define REALM_RESERVED_CRED				9
+#define REALM_VENDOR_SPECIFIC_CRED			10
+
+/* 802.11u 3GPP PLMN */
+#define G3PP_GUD_VERSION				0
+#define G3PP_PLMN_LIST_IE				0
+
+/* AP Location Public ID Info encoding */
+#define PUBLIC_ID_URI_FQDN_SE_ID		0
+/* URI/FQDN Descriptor field values */
+#define LOCATION_ENCODING_HELD			1
+#define LOCATION_ENCODING_SUPL			2
+#define URI_FQDN_SIZE					255
+
+/** hotspot2.0 indication element (vendor specific) */
+BWL_PRE_PACKED_STRUCT struct hs20_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 config;
+} BWL_POST_PACKED_STRUCT;
+typedef struct hs20_ie hs20_ie_t;
+#define HS20_IE_LEN 5	/* HS20 IE length */
+
+/** IEEE 802.11 Annex E */
+typedef enum {
+	DOT11_2GHZ_20MHZ_CLASS_12	= 81,	/* Ch 1-11 */
+	DOT11_5GHZ_20MHZ_CLASS_1	= 115,	/* Ch 36-48 */
+	DOT11_5GHZ_20MHZ_CLASS_2_DFS	= 118,	/* Ch 52-64 */
+	DOT11_5GHZ_20MHZ_CLASS_3	= 124,	/* Ch 149-161 */
+	DOT11_5GHZ_20MHZ_CLASS_4_DFS	= 121,	/* Ch 100-140 */
+	DOT11_5GHZ_20MHZ_CLASS_5	= 125,	/* Ch 149-165 */
+	DOT11_5GHZ_40MHZ_CLASS_22	= 116,	/* Ch 36-44,   lower */
+	DOT11_5GHZ_40MHZ_CLASS_23_DFS 	= 119,	/* Ch 52-60,   lower */
+	DOT11_5GHZ_40MHZ_CLASS_24_DFS	= 122,	/* Ch 100-132, lower */
+	DOT11_5GHZ_40MHZ_CLASS_25	= 126,	/* Ch 149-157, lower */
+	DOT11_5GHZ_40MHZ_CLASS_27	= 117,	/* Ch 40-48,   upper */
+	DOT11_5GHZ_40MHZ_CLASS_28_DFS	= 120,	/* Ch 56-64,   upper */
+	DOT11_5GHZ_40MHZ_CLASS_29_DFS	= 123,	/* Ch 104-136, upper */
+	DOT11_5GHZ_40MHZ_CLASS_30	= 127,	/* Ch 153-161, upper */
+	DOT11_2GHZ_40MHZ_CLASS_32	= 83,	/* Ch 1-7,     lower */
+	DOT11_2GHZ_40MHZ_CLASS_33	= 84,	/* Ch 5-11,    upper */
+} dot11_op_class_t;
+
+/* QoS map */
+#define QOS_MAP_FIXED_LENGTH	(8 * 2)	/* DSCP ranges fixed with 8 entries */
+
+#define BCM_AIBSS_IE_TYPE 56
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/802.11e.h b/drivers/net/wireless/bcmdhd/include/802.11e.h
new file mode 100644
index 0000000..f2021e5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/802.11e.h
@@ -0,0 +1,134 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.11e.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN           2           /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF          2           /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET	0		/* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET		1		/* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET		2		/* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET		3		/* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+	uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+	uint8 oui[DOT11_OUI_LEN];	/* WME_OUI */
+	uint8 type;					/* WME_TYPE */
+	uint8 subtype;				/* WME_SUBTYPE_TSPEC */
+	uint8 version;				/* WME_VERSION */
+	tsinfo_t tsinfo;			/* TS Info bit field */
+	uint16 nom_msdu_size;		/* (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/* Maximum MSDU Size (bytes) */
+	uint32 min_srv_interval;	/* Minimum Service Interval (us) */
+	uint32 max_srv_interval;	/* Maximum Service Interval (us) */
+	uint32 inactivity_interval;	/* Inactivity Interval (us) */
+	uint32 suspension_interval; /* Suspension Interval (us) */
+	uint32 srv_start_time;		/* Service Start Time (us) */
+	uint32 min_data_rate;		/* Minimum Data Rate (bps) */
+	uint32 mean_data_rate;		/* Mean Data Rate (bps) */
+	uint32 peak_data_rate;		/* Peak Data Rate (bps) */
+	uint32 max_burst_size;		/* Maximum Burst Size (bytes) */
+	uint32 delay_bound;			/* Delay Bound (us) */
+	uint32 min_phy_rate;		/* Minimum PHY Rate (bps) */
+	uint16 surplus_bw;			/* Surplus Bandwidth Allowance (range 1.0-8.0) */
+	uint16 medium_time;			/* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN	(sizeof(tspec_t))		/* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT		1	/* TS info. TID shift */
+#define TS_INFO_TID_MASK		(0xf << TS_INFO_TID_SHIFT)	/* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT	7	/* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK	(0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT	5	/* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK	(0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT		2		/* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK		(1 << TS_INFO_PSB_SHIFT)	/* TS info. PSB mask */
+#define TS_INFO_UPLINK			(0 << TS_INFO_DIRECTION_SHIFT)	/* TS info. uplink */
+#define TS_INFO_DOWNLINK		(1 << TS_INFO_DIRECTION_SHIFT)	/* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL	(3 << TS_INFO_DIRECTION_SHIFT)	/* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT	3	/* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK	(0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt)	((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt)	((((pt).octets[0]) & \
+	TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt)	((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt)	((((pt).octets[1]) & \
+	TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id)	((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+	((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio)	((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+	((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN		5	/* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF		3	/* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT		1000	/* default ADDTS response timeout in ms */
+						/* DEFVAL dot11ADDTSResponseTimeout = 1s */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED	0	/* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM	1	/* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW	3	/* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE	47	/* ADDTS refused but could retry later */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS		36	/* STA leave QBSS */
+#define DOT11E_STATUS_END_TS				37	/* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS			38	/* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT		39	/* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/802.11s.h b/drivers/net/wireless/bcmdhd/include/802.11s.h
new file mode 100644
index 0000000..8ad1693
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/802.11s.h
@@ -0,0 +1,336 @@
+/*
+ * Fundamental types and constants relating to 802.11s Mesh
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.11s.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _802_11s_h_
+#define _802_11s_h_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define DOT11_MESH_FLAGS_AE_MASK	0x3
+#define DOT11_MESH_FLAGS_AE_SHIFT	0
+
+#define DOT11_MESH_CONNECTED_AS_SET         7
+#define DOT11_MESH_NUMBER_PEERING_SET       1
+#define DOT11_MESH_MESH_GWSET               0
+
+#define DOT11_MESH_ACTION_LINK_MET_REP      0
+#define DOT11_MESH_ACTION_PATH_SEL          1
+#define DOT11_MESH_ACTION_GATE_ANN          2
+#define DOT11_MESH_ACTION_CONG_CONT_NOTIF   3
+#define DOT11_MESH_ACTION_MCCA_SETUP_REQ    4
+#define DOT11_MESH_ACTION_MCCA_SETUP_REP    5
+#define DOT11_MESH_ACTION_MCCA_ADVT_REQ     6
+#define DOT11_MESH_ACTION_MCCA_ADVT         7
+#define DOT11_MESH_ACTION_MCCA_TEARDOWN     8
+#define DOT11_MESH_ACTION_TBTT_ADJ_REQ      9
+#define DOT11_MESH_ACTION_TBTT_ADJ_RESP     10
+
+/* self-protected action field values: 7-57v24 */
+#define DOT11_SELFPROT_ACTION_MESH_PEER_OPEN    1
+#define DOT11_SELFPROT_ACTION_MESH_PEER_CONFM   2
+#define DOT11_SELFPROT_ACTION_MESH_PEER_CLOSE   3
+#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_INF  4
+#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_ACK  5
+
+#define DOT11_MESH_AUTH_PROTO_NONE        0
+#define DOT11_MESH_AUTH_PROTO_SAE         1
+#define DOT11_MESH_AUTH_PROTO_8021X       2
+#define DOT11_MESH_AUTH_PROTO_VS        255
+
+#define DOT11_MESH_PATHSEL_LEN   2
+#define DOT11_MESH_PERR_LEN1     2   /* Least PERR length fixed */
+#define DOT11_MESH_PERR_LEN2     13  /* Least PERR length variable */
+#define DOT11_MESH_PREP_LEN      31  /* Least PREP length */
+#define DOT11_MESH_PREQ_LEN      37  /* Least PREQ length */
+
+#define DOT11_MESH_PATHSEL_PROTID_HWMP    1
+#define DOT11_MESH_PATHSEL_METRICID_ALM   1 /* Air link metric */
+#define DOT11_MESH_CONGESTCTRL_NONE       0
+#define DOT11_MESH_CONGESTCTRL_SP         1
+#define DOT11_MESH_SYNCMETHOD_NOFFSET     1
+
+BWL_PRE_PACKED_STRUCT struct dot11_meshctrl_hdr {
+	uint8               flags;      /* flag bits such as ae etc */
+	uint8               ttl;        /* time to live */
+	uint32              seq;        /* sequence control */
+	struct ether_addr   a5;         /* optional address 5 */
+	struct ether_addr   a6;         /* optional address 6 */
+} BWL_POST_PACKED_STRUCT;
+
+/* Mesh Path Selection Action Frame */
+BWL_PRE_PACKED_STRUCT struct dot11_mesh_pathsel {
+	uint8   category;
+	uint8   meshaction;
+	uint8   data[];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mesh_pathsel dot11_mesh_pathsel_t;
+
+/*  Mesh PREQ IE */
+BWL_PRE_PACKED_STRUCT struct mesh_preq_ie {
+	uint8   id;
+	uint8   len;
+	uint8   flags;
+	uint8   hop_count;
+	uint8   ttl;
+	uint32  pathdis_id;
+	struct  ether_addr  originator_addr;
+	uint32  originator_seq;
+	union {
+		BWL_PRE_PACKED_STRUCT struct {
+			struct ether_addr target_ext_add;
+			uint32  lifetime;
+			uint32  metric;
+			uint8   target_count;
+			uint8   data[];
+		} BWL_POST_PACKED_STRUCT oea;
+
+		BWL_PRE_PACKED_STRUCT struct {
+			uint32  lifetime;
+			uint32  metric;
+			uint8   target_count;
+			uint8   data[];
+		} BWL_POST_PACKED_STRUCT noea;
+	} u;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_preq_ie mesh_preq_ie_t;
+
+/* Target info (part of Mesh PREQ IE) */
+BWL_PRE_PACKED_STRUCT struct mesh_targetinfo {
+	uint8	target_flag;
+	struct	ether_addr	target_addr;
+	uint32	target_seq;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_targetinfo mesh_targetinfo_t;
+
+
+/* Mesh PREP IE */
+BWL_PRE_PACKED_STRUCT struct mesh_prep_ie {
+	uint8	id;
+	uint8	len;
+	uint8	flags;
+	uint8	hop_count;
+	uint8	ttl;
+	struct	ether_addr	target_addr;
+	uint32	target_seq;
+	union {
+		BWL_PRE_PACKED_STRUCT struct {
+			struct ether_addr target_ext_add;
+			uint32  lifetime;
+			uint32  metric;
+			uint8   target_count;
+	                struct	ether_addr	originator_addr;
+	                uint32	originator_seq;
+		} BWL_POST_PACKED_STRUCT oea;
+
+		BWL_PRE_PACKED_STRUCT struct {
+			uint32  lifetime;
+			uint32  metric;
+			uint8   target_count;
+	                struct	ether_addr	originator_addr;
+	                uint32	originator_seq;
+		} BWL_POST_PACKED_STRUCT noea;
+	} u;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_prep_ie mesh_prep_ie_t;
+
+
+/* Mesh PERR IE */
+struct mesh_perr_ie {
+	uint8	id;
+	uint8	len;
+	uint8	ttl;
+	uint8	num_dest;
+	uint8	data[];
+};
+typedef struct mesh_perr_ie mesh_perr_ie_t;
+
+/* Destination info is part of PERR IE */
+BWL_PRE_PACKED_STRUCT struct mesh_perr_destinfo {
+	uint8	flags;
+	struct	ether_addr	destination_addr;
+	uint32	dest_seq;
+	union {
+		BWL_PRE_PACKED_STRUCT struct {
+			struct  ether_addr      dest_ext_addr;
+		} BWL_POST_PACKED_STRUCT dea;
+
+		BWL_PRE_PACKED_STRUCT struct {
+		/* 1 byte reason code to be populated manually in software */
+			uint16	reason_code;
+		} BWL_POST_PACKED_STRUCT nodea;
+	} u;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_perr_destinfo mesh_perr_destinfo_t;
+
+/* Mesh peering action frame hdr */
+BWL_PRE_PACKED_STRUCT struct mesh_peering_frmhdr {
+	uint8	category;
+	uint8	action;
+	union	{
+		struct {
+			uint16	capability;
+		} open;
+		struct {
+			uint16	capability;
+			uint16	AID;
+		} confirm;
+		uint8 data[1];
+	} u;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_peering_frmhdr mesh_peering_frmhdr_t;
+
+/* Mesh peering mgmt IE */
+BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_common {
+	uint16	mesh_peer_prot_id;
+	uint16	local_link_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_peer_mgmt_ie_common mesh_peer_mgmt_ie_common_t;
+#define MESH_PEER_MGMT_IE_OPEN_LEN	(4)
+
+BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_cfm {
+	mesh_peer_mgmt_ie_common_t	common;
+	uint16	peer_link_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_peer_mgmt_ie_cfm mesh_peer_mgmt_ie_cfm_t;
+#define MESH_PEER_MGMT_IE_CONF_LEN	(6)
+
+BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_close {
+	mesh_peer_mgmt_ie_common_t	common;
+	/* uint16	peer_link_id;
+	* simplicity: not supported, TODO for future
+	*/
+	uint16	reason_code;
+} BWL_POST_PACKED_STRUCT;
+typedef struct mesh_peer_mgmt_ie_close mesh_peer_mgmt_ie_close_t;
+#define MESH_PEER_MGMT_IE_CLOSE_LEN	(6)
+
+struct mesh_config_ie {
+	uint8	activ_path_sel_prot_id;
+	uint8	activ_path_sel_metric_id;
+	uint8	cong_ctl_mode_id;
+	uint8	sync_method_id;
+	uint8	auth_prot_id;
+	uint8	mesh_formation_info;
+	uint8	mesh_cap;
+};
+typedef struct mesh_config_ie mesh_config_ie_t;
+#define MESH_CONFIG_IE_LEN	(7)
+
+/* Mesh peering states */
+#define MESH_PEERING_IDLE               0
+#define MESH_PEERING_OPEN_SNT           1
+#define MESH_PEERING_CNF_RCVD           2
+#define MESH_PEERING_OPEN_RCVD          3
+#define MESH_PEERING_ESTAB              4
+#define MESH_PEERING_HOLDING            5
+#define MESH_PEERING_LAST_STATE         6
+/* for debugging: mapping strings */
+#define MESH_PEERING_STATE_STRINGS \
+	{"IDLE  ", "OPNSNT", "CNFRCV", "OPNRCV", "ESTAB ", "HOLDNG"}
+
+#ifdef WLMESH
+typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info {
+	/* mesh_peer_instance as given in the spec. Note that, peer address
+	 * is stored in scb
+	 */
+	uint16	mesh_peer_prot_id;
+	uint16	local_link_id;
+	uint16	peer_link_id;
+	/* AID generated by *peer* to self & received in peer_confirm */
+	uint16  peer_aid;
+
+	/* TODO: no mention in spec? possibly used in PS case. Note that aid generated
+	* from self to peer is stored in scb.
+	*/
+	uint8   state;
+	/* TODO: struct mesh_peer_info *next; this field is required
+	 * if multiple peerings per same src is allowed, which is
+	 * true as per spec.
+	 */
+} BWL_POST_PACKED_STRUCT mesh_peer_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_ext {
+	mesh_peer_info_t	peer_info;
+	uint16	local_aid;	/* AID generated by *local* to peer */
+	struct ether_addr ea; /* peer ea */
+	uint32	entry_state;	/* see MESH_PEER_ENTRY_STATE_ACTIVE etc; valid
+		* ONLY for internal peering requests
+		*/
+	int rssi;
+} BWL_POST_PACKED_STRUCT mesh_peer_info_ext_t;
+
+/* #ifdef WLMESH */
+typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_dump {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;	/* number of results */
+	mesh_peer_info_ext_t	mpi_ext[1];
+} BWL_POST_PACKED_STRUCT mesh_peer_info_dump_t;
+#define WL_MESH_PEER_RES_FIXED_SIZE (sizeof(mesh_peer_info_dump_t) - sizeof(mesh_peer_info_ext_t))
+
+#endif /* WLMESH */
+
+/* once an entry is added into mesh_peer_list, if peering is lost, it will
+* get retried for peering, MAX_MESH_PEER_ENTRY_RETRIES times. after wards, it
+* wont get retried and will be moved to MESH_PEER_ENTRY_STATE_TIMEDOUT state,
+* until user adds it again explicitely, when its entry_state is changed
+* to MESH_PEER_ENTRY_STATE_ACTIVE and tried again.
+*/
+#define MAX_MESH_SELF_PEER_ENTRY_RETRIES	3
+#define MESH_SELF_PEER_ENTRY_STATE_ACTIVE	1
+#define MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT	2
+
+/** Mesh Channel Switch Parameter IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_mcsp_body {
+	uint8 ttl;           /* remaining number of hops allowed for this element. */
+	uint8 flags;         /* attributes of this channel switch attempt */
+	uint8 reason;        /* reason for the mesh channel switch */
+	uint16 precedence;    /* random value in the range 0 to 65535 */
+} BWL_POST_PACKED_STRUCT;
+
+#define DOT11_MCSP_TTL_DEFAULT          1
+#define DOT11_MCSP_FLAG_TRANS_RESTRICT	0x1	/* no transmit except frames with mcsp */
+#define DOT11_MCSP_FLAG_INIT		0x2	/* initiates the channel switch attempt */
+#define DOT11_MCSP_FLAG_REASON		0x4	/* validity of reason code field */
+#define DOT11_MCSP_REASON_REGULATORY	0	/* meet regulatory requirements */
+#define DOT11_MCSP_REASON_UNSPECIFIED	1	/* unspecified reason */
+
+BWL_PRE_PACKED_STRUCT struct dot11_mesh_csp {
+	uint8 id; /* id DOT11_MNG_MESH_CSP_ID */
+	uint8 len; /* length of IE */
+	struct dot11_mcsp_body body; /* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mesh_csp dot11_mesh_csp_ie_t;
+#define DOT11_MESH_CSP_IE_LEN    5       /* length of mesh channel switch parameter IE body */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif  /* #ifndef _802_11s_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/802.1d.h b/drivers/net/wireless/bcmdhd/include/802.1d.h
new file mode 100644
index 0000000..1b8dea5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/802.1d.h
@@ -0,0 +1,53 @@
+/*
+ * Fundamental types and constants relating to 802.1D
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.1d.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+/* 802.1D priority defines */
+#define	PRIO_8021D_NONE		2	/* None = - */
+#define	PRIO_8021D_BK		1	/* BK - Background */
+#define	PRIO_8021D_BE		0	/* BE - Best-effort */
+#define	PRIO_8021D_EE		3	/* EE - Excellent-effort */
+#define	PRIO_8021D_CL		4	/* CL - Controlled Load */
+#define	PRIO_8021D_VI		5	/* Vi - Video */
+#define	PRIO_8021D_VO		6	/* Vo - Voice */
+#define	PRIO_8021D_NC		7	/* NC - Network Control */
+#define	MAXPRIO			7	/* 0-7 */
+#define NUMPRIO			(MAXPRIO + 1)
+
+#define ALLPRIO		-1	/* All prioirty */
+
+/* Converts prio to precedence since the numerical value of
+ * PRIO_8021D_BE and PRIO_8021D_NONE are swapped.
+ */
+#define PRIO2PREC(prio) \
+	(((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif /* _802_1_D__ */
diff --git a/drivers/net/wireless/bcmdhd/include/802.3.h b/drivers/net/wireless/bcmdhd/include/802.3.h
new file mode 100644
index 0000000..6758ac0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/802.3.h
@@ -0,0 +1,55 @@
+/*
+ * Fundamental constants relating to 802.3
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.3.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _802_3_h_
+#define _802_3_h_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define SNAP_HDR_LEN	6	/* 802.3 SNAP header length */
+#define DOT3_OUI_LEN	3	/* 802.3 oui length */
+
+BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];	/* dest mac */
+	uint8	ether_shost[ETHER_ADDR_LEN];	/* src mac */
+	uint16	length;				/* frame length incl header */
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[DOT3_OUI_LEN];		/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	type;				/* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* #ifndef _802_3_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h
new file mode 100644
index 0000000..10992e7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/aidmp.h
@@ -0,0 +1,424 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: aidmp.h 614820 2016-01-23 17:16:17Z $
+ */
+
+#ifndef	_AIDMP_H
+#define	_AIDMP_H
+
+/* Manufacturer Ids */
+#define	MFGID_ARM		0x43b
+#define	MFGID_BRCM		0x4bf
+#define	MFGID_MIPS		0x4a7
+
+/* Component Classes */
+#define	CC_SIM			0
+#define	CC_EROM			1
+#define	CC_CORESIGHT		9
+#define	CC_VERIF		0xb
+#define	CC_OPTIMO		0xd
+#define	CC_GEN			0xe
+#define	CC_PRIMECELL		0xf
+
+/* Enumeration ROM registers */
+#define	ER_EROMENTRY		0x000
+#define	ER_REMAPCONTROL		0xe00
+#define	ER_REMAPSELECT		0xe04
+#define	ER_MASTERSELECT		0xe10
+#define	ER_ITCR			0xf00
+#define	ER_ITIP			0xf04
+
+/* Erom entries */
+#define	ER_TAG			0xe
+#define	ER_TAG1			0x6
+#define	ER_VALID		1
+#define	ER_CI			0
+#define	ER_MP			2
+#define	ER_ADD			4
+#define	ER_END			0xe
+#define	ER_BAD			0xffffffff
+#define	ER_SZ_MAX		4096 /* 4KB */
+
+/* EROM CompIdentA */
+#define	CIA_MFG_MASK		0xfff00000
+#define	CIA_MFG_SHIFT		20
+#define	CIA_CID_MASK		0x000fff00
+#define	CIA_CID_SHIFT		8
+#define	CIA_CCL_MASK		0x000000f0
+#define	CIA_CCL_SHIFT		4
+
+/* EROM CompIdentB */
+#define	CIB_REV_MASK		0xff000000
+#define	CIB_REV_SHIFT		24
+#define	CIB_NSW_MASK		0x00f80000
+#define	CIB_NSW_SHIFT		19
+#define	CIB_NMW_MASK		0x0007c000
+#define	CIB_NMW_SHIFT		14
+#define	CIB_NSP_MASK		0x00003e00
+#define	CIB_NSP_SHIFT		9
+#define	CIB_NMP_MASK		0x000001f0
+#define	CIB_NMP_SHIFT		4
+
+/* EROM MasterPortDesc */
+#define	MPD_MUI_MASK		0x0000ff00
+#define	MPD_MUI_SHIFT		8
+#define	MPD_MP_MASK		0x000000f0
+#define	MPD_MP_SHIFT		4
+
+/* EROM AddrDesc */
+#define	AD_ADDR_MASK		0xfffff000
+#define	AD_SP_MASK		0x00000f00
+#define	AD_SP_SHIFT		8
+#define	AD_ST_MASK		0x000000c0
+#define	AD_ST_SHIFT		6
+#define	AD_ST_SLAVE		0x00000000
+#define	AD_ST_BRIDGE		0x00000040
+#define	AD_ST_SWRAP		0x00000080
+#define	AD_ST_MWRAP		0x000000c0
+#define	AD_SZ_MASK		0x00000030
+#define	AD_SZ_SHIFT		4
+#define	AD_SZ_4K		0x00000000
+#define	AD_SZ_8K		0x00000010
+#define	AD_SZ_16K		0x00000020
+#define	AD_SZ_SZD		0x00000030
+#define	AD_AG32			0x00000008
+#define	AD_ADDR_ALIGN		0x00000fff
+#define	AD_SZ_BASE		0x00001000	/* 4KB */
+
+/* EROM SizeDesc */
+#define	SD_SZ_MASK		0xfffff000
+#define	SD_SG32			0x00000008
+#define	SD_SZ_ALIGN		0x00000fff
+
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+typedef volatile struct _aidmp {
+	uint32	oobselina30;	/* 0x000 */
+	uint32	oobselina74;	/* 0x004 */
+	uint32	PAD[6];
+	uint32	oobselinb30;	/* 0x020 */
+	uint32	oobselinb74;	/* 0x024 */
+	uint32	PAD[6];
+	uint32	oobselinc30;	/* 0x040 */
+	uint32	oobselinc74;	/* 0x044 */
+	uint32	PAD[6];
+	uint32	oobselind30;	/* 0x060 */
+	uint32	oobselind74;	/* 0x064 */
+	uint32	PAD[38];
+	uint32	oobselouta30;	/* 0x100 */
+	uint32	oobselouta74;	/* 0x104 */
+	uint32	PAD[6];
+	uint32	oobseloutb30;	/* 0x120 */
+	uint32	oobseloutb74;	/* 0x124 */
+	uint32	PAD[6];
+	uint32	oobseloutc30;	/* 0x140 */
+	uint32	oobseloutc74;	/* 0x144 */
+	uint32	PAD[6];
+	uint32	oobseloutd30;	/* 0x160 */
+	uint32	oobseloutd74;	/* 0x164 */
+	uint32	PAD[38];
+	uint32	oobsynca;	/* 0x200 */
+	uint32	oobseloutaen;	/* 0x204 */
+	uint32	PAD[6];
+	uint32	oobsyncb;	/* 0x220 */
+	uint32	oobseloutben;	/* 0x224 */
+	uint32	PAD[6];
+	uint32	oobsyncc;	/* 0x240 */
+	uint32	oobseloutcen;	/* 0x244 */
+	uint32	PAD[6];
+	uint32	oobsyncd;	/* 0x260 */
+	uint32	oobseloutden;	/* 0x264 */
+	uint32	PAD[38];
+	uint32	oobaextwidth;	/* 0x300 */
+	uint32	oobainwidth;	/* 0x304 */
+	uint32	oobaoutwidth;	/* 0x308 */
+	uint32	PAD[5];
+	uint32	oobbextwidth;	/* 0x320 */
+	uint32	oobbinwidth;	/* 0x324 */
+	uint32	oobboutwidth;	/* 0x328 */
+	uint32	PAD[5];
+	uint32	oobcextwidth;	/* 0x340 */
+	uint32	oobcinwidth;	/* 0x344 */
+	uint32	oobcoutwidth;	/* 0x348 */
+	uint32	PAD[5];
+	uint32	oobdextwidth;	/* 0x360 */
+	uint32	oobdinwidth;	/* 0x364 */
+	uint32	oobdoutwidth;	/* 0x368 */
+	uint32	PAD[37];
+	uint32	ioctrlset;	/* 0x400 */
+	uint32	ioctrlclear;	/* 0x404 */
+	uint32	ioctrl;		/* 0x408 */
+	uint32	PAD[61];
+	uint32	iostatus;	/* 0x500 */
+	uint32	PAD[127];
+	uint32	ioctrlwidth;	/* 0x700 */
+	uint32	iostatuswidth;	/* 0x704 */
+	uint32	PAD[62];
+	uint32	resetctrl;	/* 0x800 */
+	uint32	resetstatus;	/* 0x804 */
+	uint32	resetreadid;	/* 0x808 */
+	uint32	resetwriteid;	/* 0x80c */
+	uint32	PAD[60];
+	uint32	errlogctrl;	/* 0x900 */
+	uint32	errlogdone;	/* 0x904 */
+	uint32	errlogstatus;	/* 0x908 */
+	uint32	errlogaddrlo;	/* 0x90c */
+	uint32	errlogaddrhi;	/* 0x910 */
+	uint32	errlogid;	/* 0x914 */
+	uint32	errloguser;	/* 0x918 */
+	uint32	errlogflags;	/* 0x91c */
+	uint32	PAD[56];
+	uint32	intstatus;	/* 0xa00 */
+	uint32	PAD[255];
+	uint32	config;		/* 0xe00 */
+	uint32	PAD[63];
+	uint32	itcr;		/* 0xf00 */
+	uint32	PAD[3];
+	uint32	itipooba;	/* 0xf10 */
+	uint32	itipoobb;	/* 0xf14 */
+	uint32	itipoobc;	/* 0xf18 */
+	uint32	itipoobd;	/* 0xf1c */
+	uint32	PAD[4];
+	uint32	itipoobaout;	/* 0xf30 */
+	uint32	itipoobbout;	/* 0xf34 */
+	uint32	itipoobcout;	/* 0xf38 */
+	uint32	itipoobdout;	/* 0xf3c */
+	uint32	PAD[4];
+	uint32	itopooba;	/* 0xf50 */
+	uint32	itopoobb;	/* 0xf54 */
+	uint32	itopoobc;	/* 0xf58 */
+	uint32	itopoobd;	/* 0xf5c */
+	uint32	PAD[4];
+	uint32	itopoobain;	/* 0xf70 */
+	uint32	itopoobbin;	/* 0xf74 */
+	uint32	itopoobcin;	/* 0xf78 */
+	uint32	itopoobdin;	/* 0xf7c */
+	uint32	PAD[4];
+	uint32	itopreset;	/* 0xf90 */
+	uint32	PAD[15];
+	uint32	peripherialid4;	/* 0xfd0 */
+	uint32	peripherialid5;	/* 0xfd4 */
+	uint32	peripherialid6;	/* 0xfd8 */
+	uint32	peripherialid7;	/* 0xfdc */
+	uint32	peripherialid0;	/* 0xfe0 */
+	uint32	peripherialid1;	/* 0xfe4 */
+	uint32	peripherialid2;	/* 0xfe8 */
+	uint32	peripherialid3;	/* 0xfec */
+	uint32	componentid0;	/* 0xff0 */
+	uint32	componentid1;	/* 0xff4 */
+	uint32	componentid2;	/* 0xff8 */
+	uint32	componentid3;	/* 0xffc */
+} aidmp_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+/* Out-of-band Router registers */
+#define	OOB_BUSCONFIG		0x020
+#define	OOB_STATUSA		0x100
+#define	OOB_STATUSB		0x104
+#define	OOB_STATUSC		0x108
+#define	OOB_STATUSD		0x10c
+#define	OOB_ENABLEA0		0x200
+#define	OOB_ENABLEA1		0x204
+#define	OOB_ENABLEA2		0x208
+#define	OOB_ENABLEA3		0x20c
+#define	OOB_ENABLEB0		0x280
+#define	OOB_ENABLEB1		0x284
+#define	OOB_ENABLEB2		0x288
+#define	OOB_ENABLEB3		0x28c
+#define	OOB_ENABLEC0		0x300
+#define	OOB_ENABLEC1		0x304
+#define	OOB_ENABLEC2		0x308
+#define	OOB_ENABLEC3		0x30c
+#define	OOB_ENABLED0		0x380
+#define	OOB_ENABLED1		0x384
+#define	OOB_ENABLED2		0x388
+#define	OOB_ENABLED3		0x38c
+#define	OOB_ITCR		0xf00
+#define	OOB_ITIPOOBA		0xf10
+#define	OOB_ITIPOOBB		0xf14
+#define	OOB_ITIPOOBC		0xf18
+#define	OOB_ITIPOOBD		0xf1c
+#define	OOB_ITOPOOBA		0xf30
+#define	OOB_ITOPOOBB		0xf34
+#define	OOB_ITOPOOBC		0xf38
+#define	OOB_ITOPOOBD		0xf3c
+
+/* DMP wrapper registers */
+#define	AI_OOBSELINA30		0x000
+#define	AI_OOBSELINA74		0x004
+#define	AI_OOBSELINB30		0x020
+#define	AI_OOBSELINB74		0x024
+#define	AI_OOBSELINC30		0x040
+#define	AI_OOBSELINC74		0x044
+#define	AI_OOBSELIND30		0x060
+#define	AI_OOBSELIND74		0x064
+#define	AI_OOBSELOUTA30		0x100
+#define	AI_OOBSELOUTA74		0x104
+#define	AI_OOBSELOUTB30		0x120
+#define	AI_OOBSELOUTB74		0x124
+#define	AI_OOBSELOUTC30		0x140
+#define	AI_OOBSELOUTC74		0x144
+#define	AI_OOBSELOUTD30		0x160
+#define	AI_OOBSELOUTD74		0x164
+#define	AI_OOBSYNCA		0x200
+#define	AI_OOBSELOUTAEN		0x204
+#define	AI_OOBSYNCB		0x220
+#define	AI_OOBSELOUTBEN		0x224
+#define	AI_OOBSYNCC		0x240
+#define	AI_OOBSELOUTCEN		0x244
+#define	AI_OOBSYNCD		0x260
+#define	AI_OOBSELOUTDEN		0x264
+#define	AI_OOBAEXTWIDTH		0x300
+#define	AI_OOBAINWIDTH		0x304
+#define	AI_OOBAOUTWIDTH		0x308
+#define	AI_OOBBEXTWIDTH		0x320
+#define	AI_OOBBINWIDTH		0x324
+#define	AI_OOBBOUTWIDTH		0x328
+#define	AI_OOBCEXTWIDTH		0x340
+#define	AI_OOBCINWIDTH		0x344
+#define	AI_OOBCOUTWIDTH		0x348
+#define	AI_OOBDEXTWIDTH		0x360
+#define	AI_OOBDINWIDTH		0x364
+#define	AI_OOBDOUTWIDTH		0x368
+
+
+#define	AI_IOCTRLSET		0x400
+#define	AI_IOCTRLCLEAR		0x404
+#define	AI_IOCTRL		0x408
+#define	AI_IOSTATUS		0x500
+#define	AI_RESETCTRL		0x800
+#define	AI_RESETSTATUS		0x804
+
+#define	AI_IOCTRLWIDTH		0x700
+#define	AI_IOSTATUSWIDTH	0x704
+
+#define	AI_RESETREADID		0x808
+#define	AI_RESETWRITEID		0x80c
+#define	AI_ERRLOGCTRL		0x900
+#define	AI_ERRLOGDONE		0x904
+#define	AI_ERRLOGSTATUS		0x908
+#define	AI_ERRLOGADDRLO		0x90c
+#define	AI_ERRLOGADDRHI		0x910
+#define	AI_ERRLOGID		0x914
+#define	AI_ERRLOGUSER		0x918
+#define	AI_ERRLOGFLAGS		0x91c
+#define	AI_INTSTATUS		0xa00
+#define	AI_CONFIG		0xe00
+#define	AI_ITCR			0xf00
+#define	AI_ITIPOOBA		0xf10
+#define	AI_ITIPOOBB		0xf14
+#define	AI_ITIPOOBC		0xf18
+#define	AI_ITIPOOBD		0xf1c
+#define	AI_ITIPOOBAOUT		0xf30
+#define	AI_ITIPOOBBOUT		0xf34
+#define	AI_ITIPOOBCOUT		0xf38
+#define	AI_ITIPOOBDOUT		0xf3c
+#define	AI_ITOPOOBA		0xf50
+#define	AI_ITOPOOBB		0xf54
+#define	AI_ITOPOOBC		0xf58
+#define	AI_ITOPOOBD		0xf5c
+#define	AI_ITOPOOBAIN		0xf70
+#define	AI_ITOPOOBBIN		0xf74
+#define	AI_ITOPOOBCIN		0xf78
+#define	AI_ITOPOOBDIN		0xf7c
+#define	AI_ITOPRESET		0xf90
+#define	AI_PERIPHERIALID4	0xfd0
+#define	AI_PERIPHERIALID5	0xfd4
+#define	AI_PERIPHERIALID6	0xfd8
+#define	AI_PERIPHERIALID7	0xfdc
+#define	AI_PERIPHERIALID0	0xfe0
+#define	AI_PERIPHERIALID1	0xfe4
+#define	AI_PERIPHERIALID2	0xfe8
+#define	AI_PERIPHERIALID3	0xfec
+#define	AI_COMPONENTID0		0xff0
+#define	AI_COMPONENTID1		0xff4
+#define	AI_COMPONENTID2		0xff8
+#define	AI_COMPONENTID3		0xffc
+
+/* resetctrl */
+#define	AIRC_RESET		1
+
+/* errlogctrl */
+#define AIELC_TO_EXP_MASK	0x0000001f0		/* backplane timeout exponent */
+#define AIELC_TO_EXP_SHIFT	4
+#define AIELC_TO_ENAB_SHIFT	9			/* backplane timeout enable */
+
+/* errlogdone */
+#define AIELD_ERRDONE_MASK	0x3
+
+/* errlogstatus */
+#define AIELS_SLAVE_ERR         0x1
+#define AIELS_TIMEOUT           0x2
+#define AIELS_DECODE            0x3
+#define AIELS_TIMEOUT_MASK      0x3
+
+/* errorlog status bit map, for SW use */
+#define AXI_WRAP_STS_NONE		(0)
+#define AXI_WRAP_STS_TIMEOUT		(1<<0)
+#define AXI_WRAP_STS_SLAVE_ERR		(1<<1)
+#define AXI_WRAP_STS_DECODE_ERR		(1<<2)
+#define AXI_WRAP_STS_PCI_RD_ERR		(1<<3)
+#define AXI_WRAP_STS_WRAP_RD_ERR	(1<<4)
+#define AXI_WRAP_STS_SET_CORE_FAIL	(1<<5)
+
+/* errlogFrags */
+#define AXI_ERRLOG_FLAGS_WRITE_REQ	(1<<24)
+
+/* config */
+#define	AICFG_OOB		0x00000020
+#define	AICFG_IOS		0x00000010
+#define	AICFG_IOC		0x00000008
+#define	AICFG_TO		0x00000004
+#define	AICFG_ERRL		0x00000002
+#define	AICFG_RST		0x00000001
+
+/* bit defines for AI_OOBSELOUTB74 reg */
+#define OOB_SEL_OUTEN_B_5	15
+#define OOB_SEL_OUTEN_B_6	23
+
+/* AI_OOBSEL for A/B/C/D, 0-7 */
+#define AI_OOBSEL_MASK		0x1F
+#define AI_OOBSEL_0_SHIFT	0
+#define AI_OOBSEL_1_SHIFT	8
+#define AI_OOBSEL_2_SHIFT	16
+#define AI_OOBSEL_3_SHIFT	24
+#define AI_OOBSEL_4_SHIFT	0
+#define AI_OOBSEL_5_SHIFT	8
+#define AI_OOBSEL_6_SHIFT	16
+#define AI_OOBSEL_7_SHIFT	24
+#define AI_IOCTRL_ENABLE_D11_PME	(1 << 14)
+
+/* mask for interrupts from each core to wrapper */
+#define AI_OOBSELINA74_CORE_MASK       0x80808080
+#define AI_OOBSELINA30_CORE_MASK       0x80808080
+
+/* axi id mask in the error log id */
+#define AI_ERRLOGID_AXI_ID_MASK 0x07
+
+#endif	/* _AIDMP_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_cfg.h b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
new file mode 100644
index 0000000..12e3cb2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
@@ -0,0 +1,32 @@
+/*
+ * BCM common config options
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcm_cfg.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef _bcm_cfg_h_
+#define _bcm_cfg_h_
+#endif /* _bcm_cfg_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
new file mode 100644
index 0000000..f8ce7a7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
@@ -0,0 +1,364 @@
+/*
+ * Memory pools library, Public interface
+ *
+ * API Overview
+ *
+ * This package provides a memory allocation subsystem based on pools of
+ * homogenous objects.
+ *
+ * Instrumentation is available for reporting memory utilization both
+ * on a per-data-structure basis and system wide.
+ *
+ * There are two main types defined in this API.
+ *
+ *    pool manager: A singleton object that acts as a factory for
+ *                  pool allocators. It also is used for global
+ *                  instrumentation, such as reporting all blocks
+ *                  in use across all data structures. The pool manager
+ *                  creates and provides individual memory pools
+ *                  upon request to application code.
+ *
+ *    memory pool:  An object for allocating homogenous memory blocks.
+ *
+ * Global identifiers in this module use the following prefixes:
+ *    bcm_mpm_*     Memory pool manager
+ *    bcm_mp_*      Memory pool
+ *
+ * There are two main types of memory pools:
+ *
+ *    prealloc: The contiguous memory block of objects can either be supplied
+ *              by the client or malloc'ed by the memory manager. The objects are
+ *              allocated out of a block of memory and freed back to the block.
+ *
+ *    heap:     The memory pool allocator uses the heap (malloc/free) for memory.
+ *              In this case, the pool allocator is just providing statistics
+ *              and instrumentation on top of the heap, without modifying the heap
+ *              allocation implementation.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcm_mpool_pub.h 535090 2015-02-17 04:49:01Z $
+ */
+
+#ifndef _BCM_MPOOL_PUB_H
+#define _BCM_MPOOL_PUB_H 1
+
+#include <typedefs.h> /* needed for uint16 */
+
+
+/*
+**************************************************************************
+*
+* Type definitions, handles
+*
+**************************************************************************
+*/
+
+/* Forward declaration of OSL handle. */
+struct osl_info;
+
+/* Forward declaration of string buffer. */
+struct bcmstrbuf;
+
+/*
+ * Opaque type definition for the pool manager handle. This object is used for global
+ * memory pool operations such as obtaining a new pool, deleting a pool, iterating and
+ * instrumentation/debugging.
+ */
+struct bcm_mpm_mgr;
+typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h;
+
+/*
+ * Opaque type definition for an instance of a pool. This handle is used for allocating
+ * and freeing memory through the pool, as well as management/instrumentation on this
+ * specific pool.
+ */
+struct bcm_mp_pool;
+typedef struct bcm_mp_pool *bcm_mp_pool_h;
+
+
+/*
+ * To make instrumentation more readable, every memory
+ * pool must have a readable name. Pool names are up to
+ * 8 bytes including '\0' termination. (7 printable characters.)
+ */
+#define BCM_MP_NAMELEN 8
+
+
+/*
+ * Type definition for pool statistics.
+ */
+typedef struct bcm_mp_stats {
+	char name[BCM_MP_NAMELEN];  /* Name of this pool. */
+	unsigned int objsz;         /* Object size allocated in this pool */
+	uint16 nobj;                /* Total number of objects in this pool */
+	uint16 num_alloc;           /* Number of objects currently allocated */
+	uint16 high_water;          /* Max number of allocated objects. */
+	uint16 failed_alloc;        /* Failed allocations. */
+} bcm_mp_stats_t;
+
+
+/*
+**************************************************************************
+*
+* API Routines on the pool manager.
+*
+**************************************************************************
+*/
+
+/*
+ * bcm_mpm_init() - initialize the whole memory pool system.
+ *
+ * Parameters:
+ *    osh:       INPUT  Operating system handle. Needed for heap memory allocation.
+ *    max_pools: INPUT Maximum number of mempools supported.
+ *    mgr:       OUTPUT The handle is written with the new pools manager object/handle.
+ *
+ * Returns:
+ *    BCME_OK     Object initialized successfully. May be used.
+ *    BCME_NOMEM  Initialization failed due to no memory. Object must not be used.
+ */
+int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp);
+
+
+/*
+ * bcm_mpm_deinit() - de-initialize the whole memory pool system.
+ *
+ * Parameters:
+ *    mgr:     INPUT  Pointer to pool manager handle.
+ *
+ * Returns:
+ *    BCME_OK  Memory pool manager successfully de-initialized.
+ *    other    Indicated error occured during de-initialization.
+ */
+int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp);
+
+/*
+ * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The
+ *                                  pool uses a contiguous block of pre-alloced
+ *                                  memory. The memory block may either be provided
+ *                                  by the client or dynamically allocated by the
+ *                                  pool manager.
+ *
+ * Parameters:
+ *    mgr:      INPUT  The handle to the pool manager
+ *    obj_sz:   INPUT  Size of objects that will be allocated by the new pool
+ *                     Must be >= sizeof(void *).
+ *    nobj:     INPUT  Maximum number of concurrently existing objects to support
+ *    memstart  INPUT  Pointer to the memory to use, or NULL to malloc()
+ *    memsize   INPUT  Number of bytes referenced from memstart (for error checking).
+ *                     Must be 0 if 'memstart' is NULL.
+ *    poolname  INPUT  For instrumentation, the name of the pool
+ *    newp:     OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ *    BCME_OK   Pool created ok.
+ *    other     Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr,
+                                 unsigned int obj_sz,
+                                 int nobj,
+                                 void *memstart,
+                                 unsigned int memsize,
+                                 const char poolname[BCM_MP_NAMELEN],
+                                 bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after
+ *                                  all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ *    mgr:     INPUT The handle to the pools manager
+ *    pool:    INPUT The handle of the  pool to delete
+ *
+ * Returns:
+ *    BCME_OK   Pool deleted ok.
+ *    other     Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+/*
+ * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory
+ *                              pool allocator uses the heap (malloc/free) for memory.
+ *                              In this case, the pool allocator is just providing
+ *                              statistics and instrumentation on top of the heap,
+ *                              without modifying the heap allocation implementation.
+ *
+ * Parameters:
+ *    mgr:      INPUT  The handle to the pool manager
+ *    obj_sz:   INPUT  Size of objects that will be allocated by the new pool
+ *    poolname  INPUT  For instrumentation, the name of the pool
+ *    newp:     OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ *    BCME_OK   Pool created ok.
+ *    other     Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz,
+                             const char poolname[BCM_MP_NAMELEN],
+                             bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after
+ *                              all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ *    mgr:     INPUT The handle to the pools manager
+ *    pool:    INPUT The handle of the  pool to delete
+ *
+ * Returns:
+ *    BCME_OK   Pool deleted ok.
+ *    other     Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+
+/*
+ * bcm_mpm_stats() - Return stats for all pools
+ *
+ * Parameters:
+ *    mgr:         INPUT   The handle to the pools manager
+ *    stats:       OUTPUT  Array of pool statistics.
+ *    nentries:    MOD     Max elements in 'stats' array on INPUT. Actual number
+ *                         of array elements copied to 'stats' on OUTPUT.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error getting stats.
+ *
+ */
+int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries);
+
+
+/*
+ * bcm_mpm_dump() - Display statistics on all pools
+ *
+ * Parameters:
+ *    mgr:     INPUT  The handle to the pools manager
+ *    b:       OUTPUT Output buffer.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during dump.
+ *
+ */
+int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b);
+
+
+/*
+ * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to
+ *                          compensate for alignment requirements of the objects.
+ *                          This function provides the padded object size. If clients
+ *                          pre-allocate a memory slab for a memory pool, the
+ *                          padded object size should be used by the client to allocate
+ *                          the memory slab (in order to provide sufficent space for
+ *                          the maximum number of objects).
+ *
+ * Parameters:
+ *    mgr:            INPUT   The handle to the pools manager.
+ *    obj_sz:         INPUT   Input object size.
+ *    padded_obj_sz:  OUTPUT  Padded object size.
+ *
+ * Returns:
+ *    BCME_OK      Ok
+ *    BCME_BADARG  Bad arguments.
+ *
+ */
+int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz);
+
+
+/*
+***************************************************************************
+*
+* API Routines on a specific pool.
+*
+***************************************************************************
+*/
+
+
+/*
+ * bcm_mp_alloc() - Allocate a memory pool object.
+ *
+ * Parameters:
+ *    pool:    INPUT    The handle to the pool.
+ *
+ * Returns:
+ *    A pointer to the new object. NULL on error.
+ *
+ */
+void* bcm_mp_alloc(bcm_mp_pool_h pool);
+
+/*
+ * bcm_mp_free() - Free a memory pool object.
+ *
+ * Parameters:
+ *    pool:  INPUT   The handle to the pool.
+ *    objp:  INPUT   A pointer to the object to free.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during free.
+ *
+ */
+int bcm_mp_free(bcm_mp_pool_h pool, void *objp);
+
+/*
+ * bcm_mp_stats() - Return stats for this pool
+ *
+ * Parameters:
+ *    pool:     INPUT    The handle to the pool
+ *    stats:    OUTPUT   Pool statistics
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error getting statistics.
+ *
+ */
+void bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats);
+
+
+/*
+ * bcm_mp_dump() - Dump a pool
+ *
+ * Parameters:
+ *    pool:    INPUT    The handle to the pool
+ *    b        OUTPUT   Output buffer
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during dump.
+ *
+ */
+int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b);
+
+
+#endif /* _BCM_MPOOL_PUB_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_ring.h b/drivers/net/wireless/bcmdhd/include/bcm_ring.h
new file mode 100644
index 0000000..721d7ea
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_ring.h
@@ -0,0 +1,634 @@
+/*
+ * bcm_ring.h : Ring context abstraction
+ * The ring context tracks the WRITE and READ indices where elements may be
+ * produced and consumed respectively. All elements in the ring need to be
+ * fixed size.
+ *
+ * NOTE: A ring of size N, may only hold N-1 elements.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcm_ring.h 596126 2015-10-29 19:53:48Z $
+ */
+#ifndef __bcm_ring_included__
+#define __bcm_ring_included__
+/*
+ * API Notes:
+ *
+ * Ring manipulation API allows for:
+ *  Pending operations: Often before some work can be completed, it may be
+ *  desired that several resources are available, e.g. space for production in
+ *  a ring. Approaches such as, #1) reserve resources one by one and return them
+ *  if another required resource is not available, or #2) employ a two pass
+ *  algorithm of first testing whether all resources are available, have a
+ *  an impact on performance critical code. The approach taken here is more akin
+ *  to approach #2, where a test for resource availability essentially also
+ *  provides the index for production in an un-committed state.
+ *  The same approach is taken for the consumer side.
+ *
+ *  - Pending production: Fetch the next index where a ring element may be
+ *    produced. The caller may not commit the WRITE of the element.
+ *  - Pending consumption: Fetch the next index where a ring element may be
+ *    consumed. The caller may not commut the READ of the element.
+ *
+ *  Producer side API:
+ *  - bcm_ring_is_full  : Test whether ring is full
+ *  - bcm_ring_prod     : Fetch index where an element may be produced (commit)
+ *  - bcm_ring_prod_pend: Fetch index where an element may be produced (pending)
+ *  - bcm_ring_prod_done: Commit a previous pending produce fetch
+ *  - bcm_ring_prod_avail: Fetch total number free slots eligible for production
+ *
+ * Consumer side API:
+ *  - bcm_ring_is_empty : Test whether ring is empty
+ *  - bcm_ring_cons     : Fetch index where an element may be consumed (commit)
+ *  - bcm_ring_cons_pend: Fetch index where an element may be consumed (pending)
+ *  - bcm_ring_cons_done: Commit a previous pending consume fetch
+ *  - bcm_ring_cons_avail: Fetch total number elements eligible for consumption
+ *
+ *  - bcm_ring_sync_read: Sync read offset in peer ring, from local ring
+ *  - bcm_ring_sync_write: Sync write offset in peer ring, from local ring
+ *
+ * +----------------------------------------------------------------------------
+ *
+ * Design Notes:
+ * Following items are not tracked in a ring context (design decision)
+ *  - width of a ring element.
+ *  - depth of the ring.
+ *  - base of the buffer, where the elements are stored.
+ *  - count of number of free slots in the ring
+ *
+ * Implementation Notes:
+ *  - When BCM_RING_DEBUG is enabled, need explicit bcm_ring_init().
+ *  - BCM_RING_EMPTY and BCM_RING_FULL are (-1)
+ *
+ * +----------------------------------------------------------------------------
+ *
+ * Usage Notes:
+ * An application may incarnate a ring of some fixed sized elements, by defining
+ *  - a ring data buffer to store the ring elements.
+ *  - depth of the ring (max number of elements managed by ring context).
+ *    Preferrably, depth may be represented as a constant.
+ *  - width of a ring element: to be used in pointer arithmetic with the ring's
+ *    data buffer base and an index to fetch the ring element.
+ *
+ * Use bcm_workq_t to instantiate a pair of workq constructs, one for the
+ * producer and the other for the consumer, both pointing to the same circular
+ * buffer. The producer may operate on it's own local workq and flush the write
+ * index to the consumer. Likewise the consumer may use its local workq and
+ * flush the read index to the producer. This way we do not repeatedly access
+ * the peer's context. The two peers may reside on different CPU cores with a
+ * private L1 data cache.
+ * +----------------------------------------------------------------------------
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcm_ring.h 596126 2015-10-29 19:53:48Z $
+ *
+ * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*-
+ * vim: set ts=4 noet sw=4 tw=80:
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+#ifdef ____cacheline_aligned
+#define __ring_aligned                      ____cacheline_aligned
+#else
+#define __ring_aligned
+#endif
+
+/* Conditional compile for debug */
+/* #define BCM_RING_DEBUG */
+
+#define BCM_RING_EMPTY                      (-1)
+#define BCM_RING_FULL                       (-1)
+#define BCM_RING_NULL                       ((bcm_ring_t *)NULL)
+
+#if defined(BCM_RING_DEBUG)
+#define RING_ASSERT(exp)                    ASSERT(exp)
+#define BCM_RING_IS_VALID(ring)             (((ring) != BCM_RING_NULL) && \
+	                                         ((ring)->self == (ring)))
+#else  /* ! BCM_RING_DEBUG */
+#define RING_ASSERT(exp)                    do {} while (0)
+#define BCM_RING_IS_VALID(ring)             ((ring) != BCM_RING_NULL)
+#endif /* ! BCM_RING_DEBUG */
+
+#define BCM_RING_SIZE_IS_VALID(ring_size)   ((ring_size) > 0)
+
+/*
+ * +----------------------------------------------------------------------------
+ * Ring Context
+ * +----------------------------------------------------------------------------
+ */
+typedef struct bcm_ring {     /* Ring context */
+#if defined(BCM_RING_DEBUG)
+	struct bcm_ring *self;    /* ptr to self for IS VALID test */
+#endif /* BCM_RING_DEBUG */
+	int write __ring_aligned; /* WRITE index in a circular ring */
+	int read  __ring_aligned; /* READ index in a circular ring */
+} bcm_ring_t;
+
+
+static INLINE void bcm_ring_init(bcm_ring_t *ring);
+static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from);
+static INLINE bool bcm_ring_is_empty(bcm_ring_t *ring);
+
+static INLINE int  __bcm_ring_next_write(bcm_ring_t *ring, const int ring_size);
+
+static INLINE bool __bcm_ring_full(bcm_ring_t *ring, int next_write);
+static INLINE bool bcm_ring_is_full(bcm_ring_t *ring, const int ring_size);
+
+static INLINE void bcm_ring_prod_done(bcm_ring_t *ring, int write);
+static INLINE int  bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write,
+                                      const int ring_size);
+static INLINE int  bcm_ring_prod(bcm_ring_t *ring, const int ring_size);
+
+static INLINE void bcm_ring_cons_done(bcm_ring_t *ring, int read);
+static INLINE int  bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read,
+                                      const int ring_size);
+static INLINE int  bcm_ring_cons(bcm_ring_t *ring, const int ring_size);
+
+static INLINE void bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self);
+static INLINE void bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self);
+
+static INLINE int  bcm_ring_prod_avail(const bcm_ring_t *ring,
+                                       const int ring_size);
+static INLINE int  bcm_ring_cons_avail(const bcm_ring_t *ring,
+                                       const int ring_size);
+static INLINE void bcm_ring_cons_all(bcm_ring_t *ring);
+
+
+/**
+ * bcm_ring_init - initialize a ring context.
+ * @ring: pointer to a ring context
+ */
+static INLINE void
+bcm_ring_init(bcm_ring_t *ring)
+{
+	ASSERT(ring != (bcm_ring_t *)NULL);
+#if defined(BCM_RING_DEBUG)
+	ring->self = ring;
+#endif /* BCM_RING_DEBUG */
+	ring->write = 0;
+	ring->read = 0;
+}
+
+/**
+ * bcm_ring_copy - copy construct a ring
+ * @to: pointer to the new ring context
+ * @from: pointer to orig ring context
+ */
+static INLINE void
+bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from)
+{
+	bcm_ring_init(to);
+
+	to->write = from->write;
+	to->read  = from->read;
+}
+
+/**
+ * bcm_ring_is_empty - "Boolean" test whether ring is empty.
+ * @ring: pointer to a ring context
+ *
+ * PS. does not return BCM_RING_EMPTY value.
+ */
+static INLINE bool
+bcm_ring_is_empty(bcm_ring_t *ring)
+{
+	RING_ASSERT(BCM_RING_IS_VALID(ring));
+	return (ring->read == ring->write);
+}
+
+
+/**
+ * __bcm_ring_next_write - determine the index where the next write may occur
+ *                         (with wrap-around).
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ *
+ * PRIVATE INTERNAL USE ONLY.
+ */
+static INLINE int
+__bcm_ring_next_write(bcm_ring_t *ring, const int ring_size)
+{
+	RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+	return ((ring->write + 1) % ring_size);
+}
+
+
+/**
+ * __bcm_ring_full - support function for ring full test.
+ * @ring: pointer to a ring context
+ * @next_write: next location in ring where an element is to be produced
+ *
+ * PRIVATE INTERNAL USE ONLY.
+ */
+static INLINE bool
+__bcm_ring_full(bcm_ring_t *ring, int next_write)
+{
+	return (next_write == ring->read);
+}
+
+
+/**
+ * bcm_ring_is_full - "Boolean" test whether a ring is full.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ *
+ * PS. does not return BCM_RING_FULL value.
+ */
+static INLINE bool
+bcm_ring_is_full(bcm_ring_t *ring, const int ring_size)
+{
+	int next_write;
+	RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+	next_write = __bcm_ring_next_write(ring, ring_size);
+	return __bcm_ring_full(ring, next_write);
+}
+
+
+/**
+ * bcm_ring_prod_done - commit a previously pending index where production
+ * was requested.
+ * @ring: pointer to a ring context
+ * @write: index into ring upto where production was done.
+ * +----------------------------------------------------------------------------
+ */
+static INLINE void
+bcm_ring_prod_done(bcm_ring_t *ring, int write)
+{
+	RING_ASSERT(BCM_RING_IS_VALID(ring));
+	ring->write = write;
+}
+
+
+/**
+ * bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be
+ * produced.
+ * @ring: pointer to a ring context
+ * @pend_write: next index, after the returned index
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, const int ring_size)
+{
+	int rtn;
+	RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+	*pend_write = __bcm_ring_next_write(ring, ring_size);
+	if (__bcm_ring_full(ring, *pend_write)) {
+		*pend_write = BCM_RING_FULL;
+		rtn = BCM_RING_FULL;
+	} else {
+		/* production is not committed, caller needs to explicitly commit */
+		rtn = ring->write;
+	}
+	return rtn;
+}
+
+
+/**
+ * bcm_ring_prod - Fetch and "commit" the next index where a ring element may
+ * be produced.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_prod(bcm_ring_t *ring, const int ring_size)
+{
+	int next_write, prod_write;
+	RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+
+	next_write = __bcm_ring_next_write(ring, ring_size);
+	if (__bcm_ring_full(ring, next_write)) {
+		prod_write = BCM_RING_FULL;
+	} else {
+		prod_write = ring->write;
+		bcm_ring_prod_done(ring, next_write); /* "commit" production */
+	}
+	return prod_write;
+}
+
+
+/**
+ * bcm_ring_cons_done - commit a previously pending read
+ * @ring: pointer to a ring context
+ * @read: index upto which elements have been consumed.
+ */
+static INLINE void
+bcm_ring_cons_done(bcm_ring_t *ring, int read)
+{
+	RING_ASSERT(BCM_RING_IS_VALID(ring));
+	ring->read = read;
+}
+
+
+/**
+ * bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring
+ * element may be consumed.
+ * @ring: pointer to a ring context
+ * @pend_read: index into ring upto which elements may be consumed.
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, const int ring_size)
+{
+	int rtn;
+	RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+	if (bcm_ring_is_empty(ring)) {
+		*pend_read = BCM_RING_EMPTY;
+		rtn = BCM_RING_EMPTY;
+	} else {
+		*pend_read = (ring->read + 1) % ring_size;
+		/* production is not committed, caller needs to explicitly commit */
+		rtn = ring->read;
+	}
+	return rtn;
+}
+
+
+/**
+ * bcm_ring_cons - fetch and "commit" the next index where a ring element may
+ * be consumed.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_cons(bcm_ring_t *ring, const int ring_size)
+{
+	int cons_read;
+	RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+	if (bcm_ring_is_empty(ring)) {
+		cons_read = BCM_RING_EMPTY;
+	} else {
+		cons_read = ring->read;
+		ring->read = (ring->read + 1) % ring_size; /* read is committed */
+	}
+	return cons_read;
+}
+
+
+/**
+ * bcm_ring_sync_read - on consumption, update peer's read index.
+ * @peer: pointer to peer's producer ring context
+ * @self: pointer to consumer's ring context
+ */
+static INLINE void
+bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self)
+{
+	RING_ASSERT(BCM_RING_IS_VALID(peer));
+	RING_ASSERT(BCM_RING_IS_VALID(self));
+	peer->read = self->read; /* flush read update to peer producer */
+}
+
+
+/**
+ * bcm_ring_sync_write - on consumption, update peer's write index.
+ * @peer: pointer to peer's consumer ring context
+ * @self: pointer to producer's ring context
+ */
+static INLINE void
+bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self)
+{
+	RING_ASSERT(BCM_RING_IS_VALID(peer));
+	RING_ASSERT(BCM_RING_IS_VALID(self));
+	peer->write = self->write; /* flush write update to peer consumer */
+}
+
+
+/**
+ * bcm_ring_prod_avail - fetch total number of available empty slots in the
+ * ring for production.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_prod_avail(const bcm_ring_t *ring, const int ring_size)
+{
+	int prod_avail;
+	RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+	if (ring->write >= ring->read) {
+		prod_avail = (ring_size - (ring->write - ring->read) - 1);
+	} else {
+		prod_avail = (ring->read - (ring->write + 1));
+	}
+	ASSERT(prod_avail < ring_size);
+	return prod_avail;
+}
+
+
+/**
+ * bcm_ring_cons_avail - fetch total number of available elements for consumption.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_cons_avail(const bcm_ring_t *ring, const int ring_size)
+{
+	int cons_avail;
+	RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+	if (ring->read == ring->write) {
+		cons_avail = 0;
+	} else if (ring->read > ring->write) {
+		cons_avail = ((ring_size - ring->read) + ring->write);
+	} else {
+		cons_avail = ring->write - ring->read;
+	}
+	ASSERT(cons_avail < ring_size);
+	return cons_avail;
+}
+
+
+/**
+ * bcm_ring_cons_all - set ring in state where all elements are consumed.
+ * @ring: pointer to a ring context
+ */
+static INLINE void
+bcm_ring_cons_all(bcm_ring_t *ring)
+{
+	ring->read = ring->write;
+}
+
+
+/**
+ * Work Queue
+ * A work Queue is composed of a ring of work items, of a specified depth.
+ * It HAS-A bcm_ring object, comprising of a RD and WR offset, to implement a
+ * producer/consumer circular ring.
+ */
+
+struct bcm_workq {
+	bcm_ring_t ring;        /* Ring context abstraction */
+	struct bcm_workq *peer; /* Peer workq context */
+	void       *buffer;     /* Buffer storage for work items in workQ */
+	int        ring_size;   /* Depth of workQ */
+} __ring_aligned;
+
+typedef struct bcm_workq bcm_workq_t;
+
+
+/* #define BCM_WORKQ_DEBUG */
+#if defined(BCM_WORKQ_DEBUG)
+#define WORKQ_ASSERT(exp)               ASSERT(exp)
+#else  /* ! BCM_WORKQ_DEBUG */
+#define WORKQ_ASSERT(exp)               do {} while (0)
+#endif /* ! BCM_WORKQ_DEBUG */
+
+#define WORKQ_AUDIT(workq) \
+	WORKQ_ASSERT((workq) != BCM_WORKQ_NULL); \
+	WORKQ_ASSERT(WORKQ_PEER(workq) != BCM_WORKQ_NULL); \
+	WORKQ_ASSERT((workq)->buffer == WORKQ_PEER(workq)->buffer); \
+	WORKQ_ASSERT((workq)->ring_size == WORKQ_PEER(workq)->ring_size);
+
+#define BCM_WORKQ_NULL                  ((bcm_workq_t *)NULL)
+
+#define WORKQ_PEER(workq)               ((workq)->peer)
+#define WORKQ_RING(workq)               (&((workq)->ring))
+#define WORKQ_PEER_RING(workq)          (&((workq)->peer->ring))
+
+#define WORKQ_ELEMENT(__elem_type, __workq, __index) ({ \
+	WORKQ_ASSERT((__workq) != BCM_WORKQ_NULL); \
+	WORKQ_ASSERT((__index) < ((__workq)->ring_size)); \
+	((__elem_type *)((__workq)->buffer)) + (__index); \
+})
+
+
+static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer,
+                                  void *buffer, int ring_size);
+
+static INLINE bool bcm_workq_is_empty(bcm_workq_t *workq_prod);
+
+static INLINE void bcm_workq_prod_sync(bcm_workq_t *workq_prod);
+static INLINE void bcm_workq_cons_sync(bcm_workq_t *workq_cons);
+
+static INLINE void bcm_workq_prod_refresh(bcm_workq_t *workq_prod);
+static INLINE void bcm_workq_cons_refresh(bcm_workq_t *workq_cons);
+
+/**
+ * bcm_workq_init - initialize a workq
+ * @workq: pointer to a workq context
+ * @buffer: pointer to a pre-allocated circular buffer to serve as a ring
+ * @ring_size: size of the ring in terms of max number of elements.
+ */
+static INLINE void
+bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer,
+               void *buffer, int ring_size)
+{
+	ASSERT(workq != BCM_WORKQ_NULL);
+	ASSERT(workq_peer != BCM_WORKQ_NULL);
+	ASSERT(buffer != NULL);
+	ASSERT(ring_size > 0);
+
+	WORKQ_PEER(workq) = workq_peer;
+	WORKQ_PEER(workq_peer) = workq;
+
+	bcm_ring_init(WORKQ_RING(workq));
+	bcm_ring_init(WORKQ_RING(workq_peer));
+
+	workq->buffer = workq_peer->buffer = buffer;
+	workq->ring_size = workq_peer->ring_size = ring_size;
+}
+
+/**
+ * bcm_workq_empty - test whether there is work
+ * @workq_prod: producer's workq
+ */
+static INLINE bool
+bcm_workq_is_empty(bcm_workq_t *workq_prod)
+{
+	return bcm_ring_is_empty(WORKQ_RING(workq_prod));
+}
+
+/**
+ * bcm_workq_prod_sync - Commit the producer write index to peer workq's ring
+ * @workq_prod: producer's workq whose write index must be synced to peer
+ */
+static INLINE void
+bcm_workq_prod_sync(bcm_workq_t *workq_prod)
+{
+	WORKQ_AUDIT(workq_prod);
+
+	/* cons::write <--- prod::write */
+	bcm_ring_sync_write(WORKQ_PEER_RING(workq_prod), WORKQ_RING(workq_prod));
+}
+
+/**
+ * bcm_workq_cons_sync - Commit the consumer read index to the peer workq's ring
+ * @workq_cons: consumer's workq whose read index must be synced to peer
+ */
+static INLINE void
+bcm_workq_cons_sync(bcm_workq_t *workq_cons)
+{
+	WORKQ_AUDIT(workq_cons);
+
+	/* prod::read <--- cons::read */
+	bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons));
+}
+
+
+/**
+ * bcm_workq_prod_refresh - Fetch the updated consumer's read index
+ * @workq_prod: producer's workq whose read index must be refreshed from peer
+ */
+static INLINE void
+bcm_workq_prod_refresh(bcm_workq_t *workq_prod)
+{
+	WORKQ_AUDIT(workq_prod);
+
+	/* prod::read <--- cons::read */
+	bcm_ring_sync_read(WORKQ_RING(workq_prod), WORKQ_PEER_RING(workq_prod));
+}
+
+/**
+ * bcm_workq_cons_refresh - Fetch the updated producer's write index
+ * @workq_cons: consumer's workq whose write index must be refreshed from peer
+ */
+static INLINE void
+bcm_workq_cons_refresh(bcm_workq_t *workq_cons)
+{
+	WORKQ_AUDIT(workq_cons);
+
+	/* cons::write <--- prod::write */
+	bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons));
+}
+
+
+#endif /* ! __bcm_ring_h_included__ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
new file mode 100644
index 0000000..22fd8a0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
@@ -0,0 +1,135 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmcdc.h 676811 2016-12-24 20:48:46Z $
+ */
+#ifndef _bcmcdc_h_
+#define	_bcmcdc_h_
+#include <ethernet.h>
+
+typedef struct cdc_ioctl {
+	uint32 cmd;      /* ioctl command value */
+	uint32 len;      /* lower 16: output buflen; upper 16: input buflen (excludes header) */
+	uint32 flags;    /* flag defns given below */
+	uint32 status;   /* status code returned from the device */
+} cdc_ioctl_t;
+
+/* Max valid buffer size that can be sent to the dongle */
+#define CDC_MAX_MSG_SIZE   ETHER_MAX_LEN
+
+/* len field is divided into input and output buffer lengths */
+#define CDCL_IOC_OUTLEN_MASK   0x0000FFFF  /* maximum or expected response length, */
+					   /* excluding IOCTL header */
+#define CDCL_IOC_OUTLEN_SHIFT  0
+#define CDCL_IOC_INLEN_MASK    0xFFFF0000   /* input buffer length, excluding IOCTL header */
+#define CDCL_IOC_INLEN_SHIFT   16
+
+/* CDC flag definitions */
+#define CDCF_IOC_ERROR		0x01	/* 0=success, 1=ioctl cmd failed */
+#define CDCF_IOC_SET		0x02	/* 0=get, 1=set cmd */
+#define CDCF_IOC_OVL_IDX_MASK	0x3c	/* overlay region index mask */
+#define CDCF_IOC_OVL_RSV	0x40	/* 1=reserve this overlay region */
+#define CDCF_IOC_OVL		0x80	/* 1=this ioctl corresponds to an overlay */
+#define CDCF_IOC_ACTION_MASK	0xfe	/* SET/GET, OVL_IDX, OVL_RSV, OVL mask */
+#define CDCF_IOC_ACTION_SHIFT	1	/* SET/GET, OVL_IDX, OVL_RSV, OVL shift */
+#define CDCF_IOC_IF_MASK	0xF000	/* I/F index */
+#define CDCF_IOC_IF_SHIFT	12
+#define CDCF_IOC_ID_MASK	0xFFFF0000	/* used to uniquely id an ioctl req/resp pairing */
+#define CDCF_IOC_ID_SHIFT	16		/* # of bits of shift for ID Mask */
+
+#define CDC_IOC_IF_IDX(flags)	(((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags)	(((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+/*
+ * BDC header
+ *
+ *   The BDC header is used on data packets to convey priority across USB.
+ */
+
+struct bdc_header {
+	uint8	flags;			/* Flags */
+	uint8	priority;		/* 802.1d Priority 0:2 bits, 4:7 USB flow control info */
+	uint8	flags2;
+	uint8	dataOffset;		/* Offset from end of BDC header to packet data, in
+					 * 4-byte words.  Leaves room for optional headers.
+					 */
+};
+
+#define	BDC_HEADER_LEN		4
+
+/* flags field bitmap */
+#define BDC_FLAG_80211_PKT	0x01	/* Packet is in 802.11 format (dongle -> host) */
+#define BDC_FLAG_SUM_GOOD	0x04	/* Dongle has verified good RX checksums */
+#define BDC_FLAG_SUM_NEEDED	0x08	/* Dongle needs to do TX checksums: host->device */
+#define BDC_FLAG_EVENT_MSG	0x08	/* Payload contains an event msg: device->host */
+#define BDC_FLAG_VER_MASK	0xf0	/* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT	4	/* Protocol version shift */
+
+/* priority field bitmap */
+#define BDC_PRIORITY_MASK	0x07
+#define BDC_PRIORITY_FC_MASK	0xf0	/* flow control info mask */
+#define BDC_PRIORITY_FC_SHIFT	4	/* flow control info shift */
+
+/* flags2 field bitmap */
+#define BDC_FLAG2_IF_MASK	0x0f	/* interface index (host <-> dongle) */
+#define BDC_FLAG2_IF_SHIFT	0
+#define BDC_FLAG2_FC_FLAG	0x10	/* flag to indicate if pkt contains */
+					/* FLOW CONTROL info only */
+
+/* version numbers */
+#define BDC_PROTO_VER_1		1	/* Old Protocol version */
+#define BDC_PROTO_VER		2	/* Protocol version */
+
+/* flags2.if field access macros */
+#define BDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+#define BDC_FLAG2_PAD_MASK		0xf0
+#define BDC_FLAG_PAD_MASK		0x03
+#define BDC_FLAG2_PAD_SHIFT		2
+#define BDC_FLAG_PAD_SHIFT		0
+#define BDC_FLAG2_PAD_IDX		0x3c
+#define BDC_FLAG_PAD_IDX		0x03
+#define BDC_GET_PAD_LEN(hdr) \
+	((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \
+	((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT)))
+#define BDC_SET_PAD_LEN(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \
+	(((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \
+	((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
+	(((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
+
+#endif /* _bcmcdc_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
new file mode 100644
index 0000000..58cbe5e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
@@ -0,0 +1,462 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmdefs.h 657791 2016-09-02 15:14:42Z $
+ */
+
+#ifndef	_bcmdefs_h_
+#define	_bcmdefs_h_
+
+/*
+ * One doesn't need to include this file explicitly, gets included automatically if
+ * typedefs.h is included.
+ */
+
+/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function
+ * arguments or local variables.
+ */
+#define BCM_REFERENCE(data)	((void)(data))
+
+/* Allow for suppressing unused variable warnings. */
+#ifdef __GNUC__
+#define UNUSED_VAR     __attribute__ ((unused))
+#else
+#define UNUSED_VAR
+#endif
+
+/* Compile-time assert can be used in place of ASSERT if the expression evaluates
+ * to a constant at compile time.
+ */
+#define STATIC_ASSERT(expr) { \
+	/* Make sure the expression is constant. */ \
+	typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \
+	/* Make sure the expression is true. */ \
+	typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \
+}
+
+/* Reclaiming text and data :
+ * The following macros specify special linker sections that can be reclaimed
+ * after a system is considered 'up'.
+ * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN,
+ * as in most cases, the attach function calls the detach function to clean up on error).
+ */
+#if defined(BCM_RECLAIM)
+
+extern bool bcm_reclaimed;
+extern bool bcm_attach_part_reclaimed;
+extern bool bcm_preattach_part_reclaimed;
+
+#if defined(BCM_RECLAIM_ATTACH_FN_DATA)
+#define _data	__attribute__ ((__section__ (".dataini2." #_data))) _data
+#define _fn	__attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn
+
+/* Relocate attach symbols to save-restore region to increase pre-reclaim heap size. */
+#define BCM_SRM_ATTACH_DATA(_data)    __attribute__ ((__section__ (".datasrm." #_data))) _data
+#define BCM_SRM_ATTACH_FN(_fn)        __attribute__ ((__section__ (".textsrm." #_fn), noinline)) _fn
+
+#ifndef PREATTACH_NORECLAIM
+#define BCMPREATTACHDATA(_data)	__attribute__ ((__section__ (".dataini3." #_data))) _data
+#define BCMPREATTACHFN(_fn)	__attribute__ ((__section__ (".textini3." #_fn), noinline)) _fn
+#else
+#define BCMPREATTACHDATA(_data)	__attribute__ ((__section__ (".dataini2." #_data))) _data
+#define BCMPREATTACHFN(_fn)	__attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn
+#endif /* PREATTACH_NORECLAIM  */
+#else  /* BCM_RECLAIM_ATTACH_FN_DATA  */
+#define _data	_data
+#define _fn	_fn
+#define BCMPREATTACHDATA(_data)	_data
+#define BCMPREATTACHFN(_fn)	_fn
+#endif /* BCM_RECLAIM_ATTACH_FN_DATA  */
+
+#if defined(BCM_RECLAIM_INIT_FN_DATA)
+#define _data	__attribute__ ((__section__ (".dataini1." #_data))) _data
+#define _fn		__attribute__ ((__section__ (".textini1." #_fn), noinline)) _fn
+#define CONST
+#else /* BCM_RECLAIM_INIT_FN_DATA  */
+#define _data	_data
+#define _fn		_fn
+#ifndef CONST
+#define CONST	const
+#endif
+#endif /* BCM_RECLAIM_INIT_FN_DATA  */
+
+/* Non-manufacture or internal attach function/dat */
+#define	BCMNMIATTACHFN(_fn)	_fn
+#define	BCMNMIATTACHDATA(_data)	_data
+
+#ifdef BCMNODOWN
+#define _fn	_fn
+#else
+#define _fn	_fn
+#endif
+
+#else /* BCM_RECLAIM */
+
+#define bcm_reclaimed           	0
+#define _data		_data
+#define _fn		_fn
+#define BCM_SRM_ATTACH_DATA(_data)	_data
+#define BCM_SRM_ATTACH_FN(_fn)		_fn
+#define BCMPREATTACHDATA(_data)		_data
+#define BCMPREATTACHFN(_fn)		_fn
+#define _data		_data
+#define _fn			_fn
+#define _fn		_fn
+#define	BCMNMIATTACHFN(_fn)		_fn
+#define	BCMNMIATTACHDATA(_data)		_data
+#define CONST				const
+
+#endif /* BCM_RECLAIM */
+
+#if !defined STB
+#undef BCM47XX_CA9
+#endif /* STB */
+
+/* BCMFASTPATH Related Macro defines
+*/
+#ifndef BCMFASTPATH
+#if defined(STB)
+#define BCMFASTPATH		__attribute__ ((__section__ (".text.fastpath")))
+#define BCMFASTPATH_HOST	__attribute__ ((__section__ (".text.fastpath_host")))
+#else
+#define BCMFASTPATH
+#define BCMFASTPATH_HOST
+#endif
+#endif /* BCMFASTPATH */
+
+
+/* Use the BCMRAMFN() macro to tag functions in source that must be included in RAM (excluded from
+ * ROM). This should eliminate the need to manually specify these functions in the ROM config file.
+ * It should only be used in special cases where the function must be in RAM for *all* ROM-based
+ * chips.
+ */
+	#define BCMRAMFN(_fn)	_fn
+
+#define STATIC	static
+
+/* Bus types */
+#define	SI_BUS			0	/* SOC Interconnect */
+#define	PCI_BUS			1	/* PCI target */
+#define	PCMCIA_BUS		2	/* PCMCIA target */
+#define SDIO_BUS		3	/* SDIO target */
+#define JTAG_BUS		4	/* JTAG */
+#define USB_BUS			5	/* USB (does not support R/W REG) */
+#define SPI_BUS			6	/* gSPI target */
+#define RPC_BUS			7	/* RPC target */
+
+/* Allows size optimization for single-bus image */
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus)	(BCMBUSTYPE)
+#else
+#define BUSTYPE(bus)	(bus)
+#endif
+
+#ifdef BCMBUSCORETYPE
+#define BUSCORETYPE(ct)		(BCMBUSCORETYPE)
+#else
+#define BUSCORETYPE(ct)		(ct)
+#endif
+
+/* Allows size optimization for single-backplane image */
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus)	(BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus)	(bus)
+#endif
+
+
+/* Allows size optimization for SPROM support */
+#if defined(BCMSPROMBUS)
+#define SPROMBUS	(BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS	(PCMCIA_BUS)
+#else
+#define SPROMBUS	(PCI_BUS)
+#endif
+
+/* Allows size optimization for single-chip image */
+#ifdef BCMCHIPID
+#define CHIPID(chip)	(BCMCHIPID)
+#else
+#define CHIPID(chip)	(chip)
+#endif
+
+#ifdef BCMCHIPREV
+#define CHIPREV(rev)	(BCMCHIPREV)
+#else
+#define CHIPREV(rev)	(rev)
+#endif
+
+#ifdef BCMPCIEREV
+#define PCIECOREREV(rev)	(BCMPCIEREV)
+#else
+#define PCIECOREREV(rev)	(rev)
+#endif
+
+#ifdef BCMPMUREV
+#define PMUREV(rev)	(BCMPMUREV)
+#else
+#define PMUREV(rev)	(rev)
+#endif
+
+#ifdef BCMCCREV
+#define CCREV(rev)	(BCMCCREV)
+#else
+#define CCREV(rev)	(rev)
+#endif
+
+#ifdef BCMGCIREV
+#define GCIREV(rev)	(BCMGCIREV)
+#else
+#define GCIREV(rev)	(rev)
+#endif
+
+/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
+#define DMADDR_MASK_32 0x0		/* Address mask for 32-bits */
+#define DMADDR_MASK_30 0xc0000000	/* Address mask for 30-bits */
+#define DMADDR_MASK_26 0xFC000000	/* Address maks for 26-bits */
+#define DMADDR_MASK_0  0xffffffff	/* Address mask for 0-bits (hi-part) */
+
+#define	DMADDRWIDTH_26  26 /* 26-bit addressing capability */
+#define	DMADDRWIDTH_30  30 /* 30-bit addressing capability */
+#define	DMADDRWIDTH_32  32 /* 32-bit addressing capability */
+#define	DMADDRWIDTH_63  63 /* 64-bit addressing capability */
+#define	DMADDRWIDTH_64  64 /* 64-bit addressing capability */
+
+typedef struct {
+	uint32 loaddr;
+	uint32 hiaddr;
+} dma64addr_t;
+
+#define PHYSADDR64HI(_pa) ((_pa).hiaddr)
+#define PHYSADDR64HISET(_pa, _val) \
+	do { \
+		(_pa).hiaddr = (_val);		\
+	} while (0)
+#define PHYSADDR64LO(_pa) ((_pa).loaddr)
+#define PHYSADDR64LOSET(_pa, _val) \
+	do { \
+		(_pa).loaddr = (_val);		\
+	} while (0)
+
+#ifdef BCMDMA64OSL
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa)
+#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val)
+#define PHYSADDRLO(_pa)  PHYSADDR64LO(_pa)
+#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val)
+#define PHYSADDRTOULONG(_pa, _ulong) \
+	do { \
+		_ulong = ((unsigned long long)(_pa).hiaddr << 32) | ((_pa).loaddr); \
+	} while (0)
+
+#else
+typedef unsigned long dmaaddr_t;
+#define PHYSADDRHI(_pa) (0)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+	do { \
+		(_pa) = (_val);			\
+	} while (0)
+#endif /* BCMDMA64OSL */
+#define PHYSADDRISZERO(_pa) (PHYSADDRLO(_pa) == 0 && PHYSADDRHI(_pa) == 0)
+
+/* One physical DMA segment */
+typedef struct  {
+	dmaaddr_t addr;
+	uint32	  length;
+} hnddma_seg_t;
+
+#define MAX_DMA_SEGS 8
+
+
+typedef struct {
+	void *oshdmah; /* Opaque handle for OSL to store its information */
+	uint origsize; /* Size of the virtual packet */
+	uint nsegs;
+	hnddma_seg_t segs[MAX_DMA_SEGS];
+} hnddma_seg_map_t;
+
+
+/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
+ * By doing, we avoid the need  to allocate an extra buffer for the header when bridging to WL.
+ * There is a compile time check in wlc.c which ensure that this value is at least as big
+ * as TXOFF. This value is used in dma_rxfill (hnddma.c).
+ */
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY)
+/* add 40 bytes to allow for extra RPC header and info  */
+#define BCMEXTRAHDROOM 260
+#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
+#if defined(STB)
+#if defined(BCM_GMAC3)
+#define BCMEXTRAHDROOM 32 /* For FullDongle, no D11 headroom space required. */
+#else
+#define BCMEXTRAHDROOM 224
+#endif /* ! BCM_GMAC3 */
+#else
+#define BCMEXTRAHDROOM 204
+#endif 
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef SDALIGN
+#define SDALIGN	32
+#endif
+
+/* Headroom required for dongle-to-host communication.  Packets allocated
+ * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should
+ * leave this much room in front for low-level message headers which may
+ * be needed to get across the dongle bus to the host.  (These messages
+ * don't go over the network, so room for the full WL header above would
+ * be a waste.).
+*/
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD	(BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+#if defined(NO_BCMDBG_ASSERT)
+# undef BCMDBG_ASSERT
+# undef BCMASSERT_LOG
+#endif
+
+#if defined(BCMASSERT_LOG)
+#define BCMASSERT_SUPPORT
+#endif 
+
+/* Macros for doing definition and get/set of bitfields
+ * Usage example, e.g. a three-bit field (bits 4-6):
+ *    #define <NAME>_M	BITFIELD_MASK(3)
+ *    #define <NAME>_S	4
+ * ...
+ *    regval = R_REG(osh, &regs->regfoo);
+ *    field = GFIELD(regval, <NAME>);
+ *    regval = SFIELD(regval, <NAME>, 1);
+ *    W_REG(osh, &regs->regfoo, regval);
+ */
+#define BITFIELD_MASK(width) \
+		(((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+		(((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+		(((val) & (~(field ## _M << field ## _S))) | \
+		 ((unsigned)(bits) << field ## _S))
+
+/* define BCMSMALL to remove misc features for memory-constrained environments */
+#ifdef BCMSMALL
+#undef	BCMSPACE
+#define bcmspace	FALSE	/* if (bcmspace) code is discarded */
+#else
+#define	BCMSPACE
+#define bcmspace	TRUE	/* if (bcmspace) code is retained */
+#endif
+
+/* Max. nvram variable table size */
+#ifndef MAXSZ_NVRAM_VARS
+#ifdef LARGE_NVRAM_MAXSZ
+#define MAXSZ_NVRAM_VARS	LARGE_NVRAM_MAXSZ
+#else
+/* SROM12 changes */
+#define	MAXSZ_NVRAM_VARS	6144
+#endif /* LARGE_NVRAM_MAXSZ */
+#endif /* !MAXSZ_NVRAM_VARS */
+
+
+
+/* WL_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also
+ * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles).
+ */
+
+
+#ifdef BCMLFRAG /* BCMLFRAG support enab macros  */
+	extern bool _bcmlfrag;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCMLFRAG_ENAB() (_bcmlfrag)
+	#elif defined(BCMLFRAG_DISABLED)
+		#define BCMLFRAG_ENAB()	(0)
+	#else
+		#define BCMLFRAG_ENAB()	(1)
+	#endif
+#else
+	#define BCMLFRAG_ENAB()		(0)
+#endif /* BCMLFRAG_ENAB */
+
+#ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */
+extern bool _pciedevenab;
+	#if defined(WL_ENAB_RUNTIME_CHECK)
+		#define BCMPCIEDEV_ENAB() (_pciedevenab)
+	#elif defined(BCMPCIEDEV_ENABLED)
+		#define BCMPCIEDEV_ENAB()	1
+	#else
+		#define BCMPCIEDEV_ENAB()	0
+	#endif
+#else
+	#define BCMPCIEDEV_ENAB()	0
+#endif /* BCMPCIEDEV */
+
+	#define BCMSDIODEV_ENAB()	0
+
+/* Max size for reclaimable NVRAM array */
+#ifdef DL_NVRAM
+#define NVRAM_ARRAY_MAXSIZE	DL_NVRAM
+#else
+#define NVRAM_ARRAY_MAXSIZE	MAXSZ_NVRAM_VARS
+#endif /* DL_NVRAM */
+
+extern uint32 gFWID;
+
+/* Chip related low power flags (lpflags) */
+#define LPFLAGS_SI_GLOBAL_DISABLE		(1 << 0)
+#define LPFLAGS_SI_MEM_STDBY_DISABLE		(1 << 1)
+#define LPFLAGS_SI_SFLASH_DISABLE		(1 << 2)
+#define LPFLAGS_SI_BTLDO3P3_DISABLE		(1 << 3)
+#define LPFLAGS_SI_GCI_FORCE_REGCLK_DISABLE	(1 << 4)
+#define LPFLAGS_SI_FORCE_PWM_WHEN_RADIO_ON (1 << 5)
+#define LPFLAGS_PHY_GLOBAL_DISABLE		(1 << 16)
+#define LPFLAGS_PHY_LP_DISABLE			(1 << 17)
+#define LPFLAGS_PSM_PHY_CTL			(1 << 18)
+
+/* Chip related Cbuck modes */
+#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE0 0x00001c03
+#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE0 0x00492490
+#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE1 0x00001c03
+#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE1 0x00490410
+
+/* Chip related dynamic cbuck mode mask */
+
+#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK  0xFFFFFC00
+#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK  0xFFFFFFFF
+
+#ifndef PAD
+#define _PADLINE(line)  pad ## line
+#define _XSTR(line)     _PADLINE(line)
+#define PAD             _XSTR(__LINE__)
+#endif
+
+#endif /* _bcmdefs_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
new file mode 100644
index 0000000..5437c8f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
@@ -0,0 +1,933 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmdevs.h 625027 2016-03-15 08:20:18Z $
+ */
+
+#ifndef	_BCMDEVS_H
+#define	_BCMDEVS_H
+
+/* PCI vendor IDs */
+#define	VENDOR_EPIGRAM		0xfeda
+#define	VENDOR_BROADCOM		0x14e4
+#define	VENDOR_3COM		0x10b7
+#define	VENDOR_NETGEAR		0x1385
+#define	VENDOR_DIAMOND		0x1092
+#define	VENDOR_INTEL		0x8086
+#define	VENDOR_DELL		0x1028
+#define	VENDOR_HP		0x103c
+#define	VENDOR_HP_COMPAQ	0x0e11
+#define	VENDOR_APPLE		0x106b
+#define VENDOR_SI_IMAGE		0x1095		/* Silicon Image, used by Arasan SDIO Host */
+#define VENDOR_BUFFALO		0x1154		/* Buffalo vendor id */
+#define VENDOR_TI		0x104c		/* Texas Instruments */
+#define VENDOR_RICOH		0x1180		/* Ricoh */
+#define VENDOR_JMICRON		0x197b
+
+
+/* PCMCIA vendor IDs */
+#define	VENDOR_BROADCOM_PCMCIA	0x02d0
+
+/* SDIO vendor IDs */
+#define	VENDOR_BROADCOM_SDIO	0x00BF
+
+/* DONGLE VID/PIDs */
+#define BCM_DNGL_VID		0x0a5c
+#define BCM_DNGL_BL_PID_4328	0xbd12
+#define BCM_DNGL_BL_PID_4322	0xbd13
+#define BCM_DNGL_BL_PID_4319    0xbd16
+#define BCM_DNGL_BL_PID_43236   0xbd17
+#define BCM_DNGL_BL_PID_4332	0xbd18
+#define BCM_DNGL_BL_PID_4330	0xbd19
+#define BCM_DNGL_BL_PID_4334	0xbd1a
+#define BCM_DNGL_BL_PID_43239   0xbd1b
+#define BCM_DNGL_BL_PID_4324	0xbd1c
+#define BCM_DNGL_BL_PID_4360	0xbd1d
+#define BCM_DNGL_BL_PID_43143	0xbd1e
+#define BCM_DNGL_BL_PID_43242	0xbd1f
+#define BCM_DNGL_BL_PID_43342	0xbd21
+#define BCM_DNGL_BL_PID_4335	0xbd20
+#define BCM_DNGL_BL_PID_43341	0xbd22
+#define BCM_DNGL_BL_PID_4350    0xbd23
+#define BCM_DNGL_BL_PID_4345    0xbd24
+#define BCM_DNGL_BL_PID_4349	0xbd25
+#define BCM_DNGL_BL_PID_4354	0xbd26
+#define BCM_DNGL_BL_PID_43569   0xbd27
+#define BCM_DNGL_BL_PID_43909	0xbd28
+#define BCM_DNGL_BL_PID_4373	0xbd29
+
+#define BCM_DNGL_BDC_PID	0x0bdc
+#define BCM_DNGL_JTAG_PID	0x4a44
+
+/* HW USB BLOCK [CPULESS USB] PIDs */
+#define BCM_HWUSB_PID_43239     43239
+
+/* PCI Device IDs */
+#ifdef DEPRECATED /* These products have been deprecated */
+#define	BCM4210_DEVICE_ID	0x1072		/* never used */
+#define	BCM4230_DEVICE_ID	0x1086		/* never used */
+#define	BCM4401_ENET_ID		0x170c		/* 4401b0 production enet cards */
+#define	BCM3352_DEVICE_ID	0x3352		/* bcm3352 device id */
+#define	BCM3360_DEVICE_ID	0x3360		/* bcm3360 device id */
+#define	BCM4211_DEVICE_ID	0x4211
+#define	BCM4231_DEVICE_ID	0x4231
+#define	BCM4303_D11B_ID		0x4303		/* 4303 802.11b */
+#define	BCM4311_D11G_ID		0x4311		/* 4311 802.11b/g id */
+#define	BCM4311_D11DUAL_ID	0x4312		/* 4311 802.11a/b/g id */
+#define	BCM4311_D11A_ID		0x4313		/* 4311 802.11a id */
+#define	BCM4328_D11DUAL_ID	0x4314		/* 4328/4312 802.11a/g id */
+#define	BCM4328_D11G_ID		0x4315		/* 4328/4312 802.11g id */
+#define	BCM4328_D11A_ID		0x4316		/* 4328/4312 802.11a id */
+#define	BCM4318_D11A_ID		0x431a		/* 4318 802.11a id */
+#define	BCM4325_D11DUAL_ID	0x431b		/* 4325 802.11a/g id */
+#define	BCM4325_D11G_ID		0x431c		/* 4325 802.11g id */
+#define	BCM4325_D11A_ID		0x431d		/* 4325 802.11a id */
+#define	BCM4306_UART_ID		0x4322		/* 4306 uart */
+#define	BCM4306_V90_ID		0x4323		/* 4306 v90 codec */
+#define	BCM4306_D11G_ID2	0x4325		/* BCM4306_D11G_ID; INF w/loose binding war */
+#define	BCM4321_D11N_ID		0x4328		/* 4321 802.11n dualband id */
+#define	BCM4321_D11N2G_ID	0x4329		/* 4321 802.11n 2.4Ghz band id */
+#define	BCM4321_D11N5G_ID	0x432a		/* 4321 802.11n 5Ghz band id */
+#define BCM4322_D11N_ID		0x432b		/* 4322 802.11n dualband device */
+#define BCM4322_D11N2G_ID	0x432c		/* 4322 802.11n 2.4GHz device */
+#define BCM4322_D11N5G_ID	0x432d		/* 4322 802.11n 5GHz device */
+#define BCM4329_D11N_ID		0x432e		/* 4329 802.11n dualband device */
+#define BCM4329_D11N2G_ID	0x432f		/* 4329 802.11n 2.4G device */
+#define BCM4329_D11N5G_ID	0x4330		/* 4329 802.11n 5G device */
+#define	BCM4315_D11DUAL_ID	0x4334		/* 4315 802.11a/g id */
+#define	BCM4315_D11G_ID		0x4335		/* 4315 802.11g id */
+#define	BCM4315_D11A_ID		0x4336		/* 4315 802.11a id */
+#define BCM4319_D11N_ID		0x4337		/* 4319 802.11n dualband device */
+#define BCM4319_D11N2G_ID	0x4338		/* 4319 802.11n 2.4G device */
+#define BCM4319_D11N5G_ID	0x4339		/* 4319 802.11n 5G device */
+#define BCM43231_D11N2G_ID	0x4340		/* 43231 802.11n 2.4GHz device */
+#define BCM43221_D11N2G_ID	0x4341		/* 43221 802.11n 2.4GHz device */
+#define BCM43222_D11N_ID	0x4350		/* 43222 802.11n dualband device */
+#define BCM43222_D11N2G_ID	0x4351		/* 43222 802.11n 2.4GHz device */
+#define BCM43222_D11N5G_ID	0x4352		/* 43222 802.11n 5GHz device */
+#define BCM43226_D11N_ID	0x4354		/* 43226 802.11n dualband device */
+#endif /* DEPRECATED */
+/* DEPRECATED but used */
+#define	BCM4306_D11G_ID		0x4320		/* 4306 802.11g */
+#define	BCM4306_D11A_ID		0x4321		/* 4306 802.11a */
+#define	BCM4306_D11DUAL_ID	0x4324		/* 4306 dual A+B */
+#define	BCM4318_D11G_ID		0x4318		/* 4318 802.11b/g id */
+#define	BCM4318_D11DUAL_ID	0x4319		/* 4318 802.11a/b/g id */
+/* DEPRECATED */
+
+#define	BCM53572_D11N2G_ID	0x4329		/* 53572 802.11n 2.4Ghz band id (same as BCM4321) */
+#define BCM43224_D11N_ID	0x4353		/* 43224 802.11n dualband device */
+#define BCM43224_D11N_ID_VEN1	0x0576		/* Vendor specific 43224 802.11n db device */
+#define BCM43236_D11N_ID	0x4346		/* 43236 802.11n dualband device */
+#define BCM43236_D11N2G_ID	0x4347		/* 43236 802.11n 2.4GHz device */
+#define BCM43236_D11N5G_ID	0x4348		/* 43236 802.11n 5GHz device */
+#define BCM43225_D11N2G_ID	0x4357		/* 43225 802.11n 2.4GHz device */
+#define BCM43421_D11N_ID	0xA99D		/* 43421 802.11n dualband device */
+#define BCM4313_D11N2G_ID	0x4727		/* 4313 802.11n 2.4G device */
+#define BCM4330_D11N_ID         0x4360          /* 4330 802.11n dualband device */
+#define BCM4330_D11N2G_ID       0x4361          /* 4330 802.11n 2.4G device */
+#define BCM4330_D11N5G_ID       0x4362          /* 4330 802.11n 5G device */
+#define BCM4336_D11N_ID		0x4343		/* 4336 802.11n 2.4GHz device */
+#define BCM6362_D11N_ID		0x435f		/* 6362 802.11n dualband device */
+#define BCM6362_D11N2G_ID	0x433f		/* 6362 802.11n 2.4Ghz band id */
+#define BCM6362_D11N5G_ID	0x434f		/* 6362 802.11n 5Ghz band id */
+#define BCM4331_D11N_ID		0x4331		/* 4331 802.11n dualband id */
+#define BCM4331_D11N2G_ID	0x4332		/* 4331 802.11n 2.4Ghz band id */
+#define BCM4331_D11N5G_ID	0x4333		/* 4331 802.11n 5Ghz band id */
+#define BCM43237_D11N_ID	0x4355		/* 43237 802.11n dualband device */
+#define BCM43237_D11N5G_ID	0x4356		/* 43237 802.11n 5GHz device */
+#define BCM43227_D11N2G_ID	0x4358		/* 43228 802.11n 2.4GHz device */
+#define BCM43228_D11N_ID	0x4359		/* 43228 802.11n DualBand device */
+#define BCM43228_D11N5G_ID	0x435a		/* 43228 802.11n 5GHz device */
+#define BCM43362_D11N_ID	0x4363		/* 43362 802.11n 2.4GHz device */
+#define BCM43239_D11N_ID	0x4370		/* 43239 802.11n dualband device */
+#define BCM4324_D11N_ID		0x4374		/* 4324 802.11n dualband device */
+#define BCM43217_D11N2G_ID	0x43a9		/* 43217 802.11n 2.4GHz device */
+#define BCM43131_D11N2G_ID	0x43aa		/* 43131 802.11n 2.4GHz device */
+#define BCM4314_D11N2G_ID	0x4364		/* 4314 802.11n 2.4G device */
+#define BCM43142_D11N2G_ID	0x4365		/* 43142 802.11n 2.4G device */
+#define BCM43143_D11N2G_ID	0x4366		/* 43143 802.11n 2.4G device */
+#define BCM4334_D11N_ID		0x4380		/* 4334 802.11n dualband device */
+#define BCM4334_D11N2G_ID	0x4381		/* 4334 802.11n 2.4G device */
+#define BCM4334_D11N5G_ID	0x4382		/* 4334 802.11n 5G device */
+#define BCM43342_D11N_ID	0x4383		/* 43342 802.11n dualband device */
+#define BCM43342_D11N2G_ID	0x4384		/* 43342 802.11n 2.4G device */
+#define BCM43342_D11N5G_ID	0x4385		/* 43342 802.11n 5G device */
+#define BCM43341_D11N_ID	0x4386		/* 43341 802.11n dualband device */
+#define BCM43341_D11N2G_ID	0x4387		/* 43341 802.11n 2.4G device */
+#define BCM43341_D11N5G_ID	0x4388		/* 43341 802.11n 5G device */
+#define BCM4360_D11AC_ID	0x43a0
+#define BCM4360_D11AC2G_ID	0x43a1
+#define BCM4360_D11AC5G_ID	0x43a2
+#define BCM4345_D11AC_ID	0x43ab		/* 4345 802.11ac dualband device */
+#define BCM4345_D11AC2G_ID	0x43ac		/* 4345 802.11ac 2.4G device */
+#define BCM4345_D11AC5G_ID	0x43ad		/* 4345 802.11ac 5G device */
+#define BCM43455_D11AC_ID	0x43e3		/* 43455 802.11ac dualband device */
+#define BCM43455_D11AC2G_ID	0x43e4		/* 43455 802.11ac 2.4G device */
+#define BCM43455_D11AC5G_ID	0x43e5		/* 43455 802.11ac 5G device */
+#define BCM4335_D11AC_ID	0x43ae
+#define BCM4335_D11AC2G_ID	0x43af
+#define BCM4335_D11AC5G_ID	0x43b0
+#define BCM4352_D11AC_ID	0x43b1		/* 4352 802.11ac dualband device */
+#define BCM4352_D11AC2G_ID	0x43b2		/* 4352 802.11ac 2.4G device */
+#define BCM4352_D11AC5G_ID	0x43b3		/* 4352 802.11ac 5G device */
+#define BCM43602_D11AC_ID	0x43ba		/* ac dualband PCI devid SPROM programmed */
+#define BCM43602_D11AC2G_ID	0x43bb		/* 43602 802.11ac 2.4G device */
+#define BCM43602_D11AC5G_ID	0x43bc		/* 43602 802.11ac 5G device */
+#define BCM4349_D11AC_ID	0x4349		/* 4349 802.11ac dualband device */
+#define BCM4349_D11AC2G_ID	0x43dd		/* 4349 802.11ac 2.4G device */
+#define BCM4349_D11AC5G_ID	0x43de		/* 4349 802.11ac 5G device */
+#define BCM53573_D11AC_ID	0x43b4		/* 53573 802.11ac dualband device */
+#define BCM53573_D11AC2G_ID	0x43b5		/* 53573 802.11ac 2.4G device */
+#define BCM53573_D11AC5G_ID	0x43b6		/* 53573 802.11ac 5G device */
+#define BCM47189_D11AC_ID	0x43c6		/* 47189 802.11ac dualband device */
+#define BCM47189_D11AC2G_ID	0x43c7		/* 47189 802.11ac 2.4G device */
+#define BCM47189_D11AC5G_ID	0x43c8		/* 47189 802.11ac 5G device */
+#define BCM4355_D11AC_ID	0x43dc		/* 4355 802.11ac dualband device */
+#define BCM4355_D11AC2G_ID	0x43fc		/* 4355 802.11ac 2.4G device */
+#define BCM4355_D11AC5G_ID	0x43fd		/* 4355 802.11ac 5G device */
+#define BCM4359_D11AC_ID	0x43ef		/* 4359 802.11ac dualband device */
+#define BCM4359_D11AC2G_ID	0x43fe		/* 4359 802.11ac 2.4G device */
+#define BCM4359_D11AC5G_ID	0x43ff		/* 4359 802.11ac 5G device */
+#define BCM43596_D11AC_ID	0x4415		/* 43596 802.11ac dualband device */
+#define BCM43596_D11AC2G_ID	0x4416		/* 43596 802.11ac 2.4G device */
+#define BCM43596_D11AC5G_ID	0x4417		/* 43596 802.11ac 5G device */
+#define BCM43597_D11AC_ID	0x441c		/* 43597 802.11ac dualband device */
+#define BCM43597_D11AC2G_ID	0x441d		/* 43597 802.11ac 2.4G device */
+#define BCM43597_D11AC5G_ID	0x441e		/* 43597 802.11ac 5G device */
+#define BCM43909_D11AC_ID	0x43d0		/* 43909 802.11ac dualband device */
+#define BCM43909_D11AC2G_ID	0x43d1		/* 43909 802.11ac 2.4G device */
+#define BCM43909_D11AC5G_ID	0x43d2		/* 43909 802.11ac 5G device */
+#define BCM43012_D11N_ID	0xA804		/* 43012 802.11n dualband device */
+#define BCM43012_D11N2G_ID	0xA805		/* 43012 802.11n 2.4G device */
+#define BCM43012_D11N5G_ID	0xA806		/* 43012 802.11n 5G device */
+
+/* PCI Subsystem ID */
+#define BCM943228HMB_SSID_VEN1	0x0607
+#define BCM94313HMGBL_SSID_VEN1	0x0608
+#define BCM94313HMG_SSID_VEN1	0x0609
+#define BCM943142HM_SSID_VEN1	0x0611
+
+#define BCM43143_D11N2G_ID	0x4366		/* 43143 802.11n 2.4G device */
+
+#define BCM43242_D11N_ID	0x4367		/* 43242 802.11n dualband device */
+#define BCM43242_D11N2G_ID	0x4368		/* 43242 802.11n 2.4G device */
+#define BCM43242_D11N5G_ID	0x4369		/* 43242 802.11n 5G device */
+
+#define BCM4350_D11AC_ID	0x43a3
+#define BCM4350_D11AC2G_ID	0x43a4
+#define BCM4350_D11AC5G_ID	0x43a5
+
+#define BCM43556_D11AC_ID	0x43b7
+#define BCM43556_D11AC2G_ID	0x43b8
+#define BCM43556_D11AC5G_ID	0x43b9
+
+#define BCM43558_D11AC_ID	0x43c0
+#define BCM43558_D11AC2G_ID	0x43c1
+#define BCM43558_D11AC5G_ID	0x43c2
+
+#define BCM43566_D11AC_ID	0x43d3
+#define BCM43566_D11AC2G_ID	0x43d4
+#define BCM43566_D11AC5G_ID	0x43d5
+
+#define BCM43568_D11AC_ID	0x43d6
+#define BCM43568_D11AC2G_ID	0x43d7
+#define BCM43568_D11AC5G_ID	0x43d8
+
+#define BCM43569_D11AC_ID	0x43d9
+#define BCM43569_D11AC2G_ID	0x43da
+#define BCM43569_D11AC5G_ID	0x43db
+
+#define BCM43570_D11AC_ID	0x43d9
+#define BCM43570_D11AC2G_ID	0x43da
+#define BCM43570_D11AC5G_ID	0x43db
+
+#define BCM4354_D11AC_ID	0x43df		/* 4354 802.11ac dualband device */
+#define BCM4354_D11AC2G_ID	0x43e0		/* 4354 802.11ac 2.4G device */
+#define BCM4354_D11AC5G_ID	0x43e1		/* 4354 802.11ac 5G device */
+#define BCM43430_D11N2G_ID	0x43e2		/* 43430 802.11n 2.4G device */
+#define BCM43018_D11N2G_ID	0x441b		/* 43018 802.11n 2.4G device */
+
+
+#define BCM4347_D11AC_ID	0x440a		/* 4347 802.11ac dualband device */
+#define BCM4347_D11AC2G_ID	0x440b		/* 4347 802.11ac 2.4G device */
+#define BCM4347_D11AC5G_ID	0x440c		/* 4347 802.11ac 5G device */
+
+#define BCM4361_D11AC_ID	0x441f		/* 4361 802.11ac dualband device */
+#define BCM4361_D11AC2G_ID	0x4420		/* 4361 802.11ac 2.4G device */
+#define BCM4361_D11AC5G_ID	0x4421		/* 4361 802.11ac 5G device */
+
+#define BCM4362_D11AX_ID	0x4490		/* 4362 802.11ax dualband device */
+#define BCM4362_D11AX2G_ID	0x4491		/* 4362 802.11ax 2.4G device */
+#define BCM4362_D11AX5G_ID	0x4492		/* 4362 802.11ax 5G device */
+
+#define BCM4364_D11AC_ID	0x4464		/* 4364 802.11ac dualband device */
+#define BCM4364_D11AC2G_ID	0x446a		/* 4364 802.11ac 2.4G device */
+#define BCM4364_D11AC5G_ID	0x446b		/* 4364 802.11ac 5G device */
+
+#define BCM4365_D11AC_ID	0x43ca
+#define BCM4365_D11AC2G_ID	0x43cb
+#define BCM4365_D11AC5G_ID	0x43cc
+
+#define BCM4366_D11AC_ID	0x43c3
+#define BCM4366_D11AC2G_ID	0x43c4
+#define BCM4366_D11AC5G_ID	0x43c5
+
+#define BCM43349_D11N_ID	0x43e6		/* 43349 802.11n dualband id */
+#define BCM43349_D11N2G_ID	0x43e7		/* 43349 802.11n 2.4Ghz band id */
+#define BCM43349_D11N5G_ID	0x43e8		/* 43349 802.11n 5Ghz band id */
+
+#define BCM4358_D11AC_ID        0x43e9          /* 4358 802.11ac dualband device */
+#define BCM4358_D11AC2G_ID      0x43ea          /* 4358 802.11ac 2.4G device */
+#define BCM4358_D11AC5G_ID      0x43eb          /* 4358 802.11ac 5G device */
+
+#define BCM4356_D11AC_ID	0x43ec		/* 4356 802.11ac dualband device */
+#define BCM4356_D11AC2G_ID	0x43ed		/* 4356 802.11ac 2.4G device */
+#define BCM4356_D11AC5G_ID	0x43ee		/* 4356 802.11ac 5G device */
+
+#define BCM4371_D11AC_ID	0x440d		/* 4371 802.11ac dualband device */
+#define BCM4371_D11AC2G_ID	0x440e		/* 4371 802.11ac 2.4G device */
+#define BCM4371_D11AC5G_ID	0x440f		/* 4371 802.11ac 5G device */
+#define BCM7271_D11AC_ID	0x4410		/* 7271 802.11ac dualband device */
+#define BCM7271_D11AC2G_ID	0x4411		/* 7271 802.11ac 2.4G device */
+#define BCM7271_D11AC5G_ID	0x4412		/* 7271 802.11ac 5G device */
+
+#define BCM4373_D11AC_ID	0x4418          /* 4373 802.11ac dualband device */
+#define BCM4373_D11AC2G_ID	0x4419          /* 4373 802.11ac 2.4G device */
+#define BCM4373_D11AC5G_ID	0x441a          /* 4373 802.11ac 5G device */
+
+#define	BCMGPRS_UART_ID		0x4333		/* Uart id used by 4306/gprs card */
+#define	BCMGPRS2_UART_ID	0x4344		/* Uart id used by 4306/gprs card */
+#define FPGA_JTAGM_ID		0x43f0		/* FPGA jtagm device id */
+#define BCM_JTAGM_ID		0x43f1		/* BCM jtagm device id */
+#define SDIOH_FPGA_ID		0x43f2		/* sdio host fpga */
+#define BCM_SDIOH_ID		0x43f3		/* BCM sdio host id */
+#define SDIOD_FPGA_ID		0x43f4		/* sdio device fpga */
+#define SPIH_FPGA_ID		0x43f5		/* PCI SPI Host Controller FPGA */
+#define BCM_SPIH_ID		0x43f6		/* Synopsis SPI Host Controller */
+#define MIMO_FPGA_ID		0x43f8		/* FPGA mimo minimacphy device id */
+#define BCM_JTAGM2_ID		0x43f9		/* BCM alternate jtagm device id */
+#define SDHCI_FPGA_ID		0x43fa		/* Standard SDIO Host Controller FPGA */
+#define	BCM4402_ENET_ID		0x4402		/* 4402 enet */
+#define	BCM4402_V90_ID		0x4403		/* 4402 v90 codec */
+#define	BCM4410_DEVICE_ID	0x4410		/* bcm44xx family pci iline */
+#define	BCM4412_DEVICE_ID	0x4412		/* bcm44xx family pci enet */
+#define	BCM4430_DEVICE_ID	0x4430		/* bcm44xx family cardbus iline */
+#define	BCM4432_DEVICE_ID	0x4432		/* bcm44xx family cardbus enet */
+#define	BCM4704_ENET_ID		0x4706		/* 4704 enet (Use 47XX_ENET_ID instead!) */
+#define	BCM4710_DEVICE_ID	0x4710		/* 4710 primary function 0 */
+#define	BCM47XX_AUDIO_ID	0x4711		/* 47xx audio codec */
+#define	BCM47XX_V90_ID		0x4712		/* 47xx v90 codec */
+#define	BCM47XX_ENET_ID		0x4713		/* 47xx enet */
+#define	BCM47XX_EXT_ID		0x4714		/* 47xx external i/f */
+#define	BCM47XX_GMAC_ID		0x4715		/* 47xx Unimac based GbE */
+#define	BCM47XX_USBH_ID		0x4716		/* 47xx usb host */
+#define	BCM47XX_USBD_ID		0x4717		/* 47xx usb device */
+#define	BCM47XX_IPSEC_ID	0x4718		/* 47xx ipsec */
+#define	BCM47XX_ROBO_ID		0x4719		/* 47xx/53xx roboswitch core */
+#define	BCM47XX_USB20H_ID	0x471a		/* 47xx usb 2.0 host */
+#define	BCM47XX_USB20D_ID	0x471b		/* 47xx usb 2.0 device */
+#define	BCM47XX_ATA100_ID	0x471d		/* 47xx parallel ATA */
+#define	BCM47XX_SATAXOR_ID	0x471e		/* 47xx serial ATA & XOR DMA */
+#define	BCM47XX_GIGETH_ID	0x471f		/* 47xx GbE (5700) */
+#ifdef DEPRECATED /* These products have been deprecated */
+#define	BCM4712_MIPS_ID		0x4720		/* 4712 base devid */
+#define	BCM4716_DEVICE_ID	0x4722		/* 4716 base devid */
+#endif /* DEPRECATED */
+#define	BCM47XX_USB30H_ID	0x472a		/* 47xx usb 3.0 host */
+#define	BCM47XX_USB30D_ID	0x472b		/* 47xx usb 3.0 device */
+#define	BCM47XX_USBHUB_ID	0x472c		/* 47xx usb hub */
+#define BCM47XX_SMBUS_EMU_ID	0x47fe		/* 47xx emulated SMBus device */
+#define	BCM47XX_XOR_EMU_ID	0x47ff		/* 47xx emulated XOR engine */
+#define	EPI41210_DEVICE_ID	0xa0fa		/* bcm4210 */
+#define	EPI41230_DEVICE_ID	0xa10e		/* bcm4230 */
+#define JINVANI_SDIOH_ID	0x4743		/* Jinvani SDIO Gold Host */
+#define BCM27XX_SDIOH_ID	0x2702		/* BCM27xx Standard SDIO Host */
+#define PCIXX21_FLASHMEDIA_ID	0x803b		/* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH_ID	0x803c		/* TI PCI xx21 Standard Host Controller */
+#define R5C822_SDIOH_ID		0x0822		/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */
+#define JMICRON_SDIOH_ID	0x2381		/* JMicron Standard SDIO Host Controller */
+
+#define BCM43452_D11AC_ID	0x47ab		/* 43452 802.11ac dualband device */
+#define BCM43452_D11AC2G_ID	0x47ac		/* 43452 802.11ac 2.4G device */
+#define BCM43452_D11AC5G_ID	0x47ad		/* 43452 802.11ac 5G device */
+
+/* Chip IDs */
+#ifdef DEPRECATED /* These products have been deprecated */
+#define	BCM4306_CHIP_ID		0x4306		/* 4306 chipcommon chipid */
+#define	BCM4311_CHIP_ID		0x4311		/* 4311 PCIe 802.11a/b/g */
+#define	BCM43111_CHIP_ID	43111		/* 43111 chipcommon chipid (OTP chipid) */
+#define	BCM43112_CHIP_ID	43112		/* 43112 chipcommon chipid (OTP chipid) */
+#define	BCM4312_CHIP_ID		0x4312		/* 4312 chipcommon chipid */
+#define	BCM4315_CHIP_ID		0x4315		/* 4315 chip id */
+#define	BCM4318_CHIP_ID		0x4318		/* 4318 chipcommon chipid */
+#define	BCM4319_CHIP_ID		0x4319		/* 4319 chip id */
+#define	BCM4320_CHIP_ID		0x4320		/* 4320 chipcommon chipid */
+#define	BCM4321_CHIP_ID		0x4321		/* 4321 chipcommon chipid */
+#define	BCM4322_CHIP_ID		0x4322		/* 4322 chipcommon chipid */
+#define	BCM43221_CHIP_ID	43221		/* 43221 chipcommon chipid (OTP chipid) */
+#define	BCM43222_CHIP_ID	43222		/* 43222 chipcommon chipid */
+#define	BCM43226_CHIP_ID	43226		/* 43226 chipcommon chipid */
+#define	BCM43231_CHIP_ID	43231		/* 43231 chipcommon chipid (OTP chipid) */
+#define	BCM4342_CHIP_ID		4342		/* 4342 chipcommon chipid (OTP, RBBU) */
+#define	BCM4325_CHIP_ID		0x4325		/* 4325 chip id */
+#define	BCM4328_CHIP_ID		0x4328		/* 4328 chip id */
+#define	BCM4329_CHIP_ID		0x4329		/* 4329 chipcommon chipid */
+#define	BCM4712_CHIP_ID		0x4712		/* 4712 chipcommon chipid */
+#endif /* DEPRECATED */
+
+/* DEPRECATED but still referenced in components - start */
+#define	BCM4716_CHIP_ID		0x4716		/* 4716 chipcommon chipid */
+#define	BCM4748_CHIP_ID		0x4748		/* 4716 chipcommon chipid (OTP, RBBU) */
+#define	BCM47162_CHIP_ID	47162		/* 47162 chipcommon chipid */
+#define	BCM5354_CHIP_ID		0x5354		/* 5354 chipcommon chipid */
+/* DEPRECATED but still referenced in components - end */
+
+#define	BCM43224_CHIP_ID	43224		/* 43224 chipcommon chipid */
+#define	BCM43225_CHIP_ID	43225		/* 43225 chipcommon chipid */
+#define	BCM43227_CHIP_ID	43227		/* 43227 chipcommon chipid */
+#define	BCM43228_CHIP_ID	43228		/* 43228 chipcommon chipid */
+#define	BCM43217_CHIP_ID	43217		/* 43217 chip id (OTP chipid) */
+#define BCM4313_CHIP_ID		0x4313		/* 4313 chip id */
+#define	BCM43131_CHIP_ID	43131		/* 43131 chip id (OTP chipid) */
+#define	BCM43234_CHIP_ID	43234		/* 43234 chipcommon chipid */
+#define	BCM43235_CHIP_ID	43235		/* 43235 chipcommon chipid */
+#define	BCM43236_CHIP_ID	43236		/* 43236 chipcommon chipid */
+#define	BCM43237_CHIP_ID	43237		/* 43237 chipcommon chipid */
+#define	BCM43238_CHIP_ID	43238		/* 43238 chipcommon chipid */
+#define	BCM43239_CHIP_ID	43239		/* 43239 chipcommon chipid */
+#define	BCM43420_CHIP_ID	43420		/* 43222 chipcommon chipid (OTP, RBBU) */
+#define	BCM43421_CHIP_ID	43421		/* 43224 chipcommon chipid (OTP, RBBU) */
+#define	BCM43428_CHIP_ID	43428		/* 43228 chipcommon chipid (OTP, RBBU) */
+#define	BCM43431_CHIP_ID	43431		/* 4331  chipcommon chipid (OTP, RBBU) */
+#define	BCM43460_CHIP_ID	43460		/* 4360  chipcommon chipid (OTP, RBBU) */
+#define	BCM4331_CHIP_ID		0x4331		/* 4331 chipcommon chipid */
+#define BCM4336_CHIP_ID		0x4336		/* 4336 chipcommon chipid */
+#define BCM43362_CHIP_ID	43362		/* 43362 chipcommon chipid */
+#define BCM4330_CHIP_ID		0x4330		/* 4330 chipcommon chipid */
+#define BCM6362_CHIP_ID		0x6362		/* 6362 chipcommon chipid */
+#define BCM4314_CHIP_ID		0x4314		/* 4314 chipcommon chipid */
+#define BCM43142_CHIP_ID	43142		/* 43142 chipcommon chipid */
+#define BCM43143_CHIP_ID	43143		/* 43143 chipcommon chipid */
+#define	BCM4324_CHIP_ID		0x4324		/* 4324 chipcommon chipid */
+#define	BCM43242_CHIP_ID	43242		/* 43242 chipcommon chipid */
+#define	BCM43243_CHIP_ID	43243		/* 43243 chipcommon chipid */
+#define BCM4334_CHIP_ID		0x4334		/* 4334 chipcommon chipid */
+#define BCM4335_CHIP_ID		0x4335		/* 4335 chipcommon chipid */
+#define BCM4339_CHIP_ID		0x4339		/* 4339 chipcommon chipid */
+#define BCM43349_CHIP_ID	43349			/* 43349(0xA955) chipcommon chipid */
+#define BCM4360_CHIP_ID		0x4360          /* 4360 chipcommon chipid */
+#define BCM4364_CHIP_ID		0x4364			/* 4364 chipcommon chipid */
+#define BCM4352_CHIP_ID		0x4352          /* 4352 chipcommon chipid */
+#define BCM43526_CHIP_ID	0xAA06
+#define BCM43340_CHIP_ID	43340		/* 43340 chipcommon chipid */
+#define BCM43341_CHIP_ID	43341		/* 43341 chipcommon chipid */
+#define BCM43342_CHIP_ID	43342		/* 43342 chipcommon chipid */
+#define BCM4350_CHIP_ID		0x4350          /* 4350 chipcommon chipid */
+#define BCM4354_CHIP_ID		0x4354          /* 4354 chipcommon chipid */
+#define BCM4356_CHIP_ID		0x4356          /* 4356 chipcommon chipid */
+#define BCM4371_CHIP_ID		0x4371          /* 4371 chipcommon chipid */
+#define BCM43556_CHIP_ID	0xAA24          /* 43556 chipcommon chipid */
+#define BCM43558_CHIP_ID	0xAA26          /* 43558 chipcommon chipid */
+#define BCM43562_CHIP_ID	0xAA2A		/* 43562 chipcommon chipid */
+#define BCM43566_CHIP_ID	0xAA2E          /* 43566 chipcommon chipid */
+#define BCM43567_CHIP_ID	0xAA2F          /* 43567 chipcommon chipid */
+#define BCM43568_CHIP_ID	0xAA30          /* 43568 chipcommon chipid */
+#define BCM43569_CHIP_ID	0xAA31          /* 43569 chipcommon chipid */
+#define BCM43570_CHIP_ID	0xAA32          /* 43570 chipcommon chipid */
+#define BCM4358_CHIP_ID		0x4358          /* 4358 chipcommon chipid */
+#define BCM4371_CHIP_ID		0x4371          /* 4371 chipcommon chipid */
+#define	BCM43012_CHIP_ID	0xA804			/* 43012 chipcommon chipid */
+#define BCM4350_CHIP(chipid)	((CHIPID(chipid) == BCM4350_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4354_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43556_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43558_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43566_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43567_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43568_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43569_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43570_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */
+
+#define BCM4345_CHIP_ID		0x4345		/* 4345 chipcommon chipid */
+#define BCM43454_CHIP_ID	43454		/* 43454 chipcommon chipid */
+#define BCM43455_CHIP_ID	43455		/* 43455 chipcommon chipid */
+#define BCM43457_CHIP_ID	43457		/* 43457 chipcommon chipid */
+#define BCM43458_CHIP_ID	43458		/* 43458 chipcommon chipid */
+
+#define BCM4345_CHIP(chipid)	(CHIPID(chipid) == BCM4345_CHIP_ID || \
+				 CHIPID(chipid) == BCM43454_CHIP_ID || \
+				 CHIPID(chipid) == BCM43455_CHIP_ID || \
+				 CHIPID(chipid) == BCM43457_CHIP_ID || \
+				 CHIPID(chipid) == BCM43458_CHIP_ID)
+
+#define CASE_BCM4345_CHIP	case BCM4345_CHIP_ID: /* fallthrough */ \
+				case BCM43454_CHIP_ID: /* fallthrough */ \
+				case BCM43455_CHIP_ID: /* fallthrough */ \
+				case BCM43457_CHIP_ID: /* fallthrough */ \
+				case BCM43458_CHIP_ID
+
+#define BCM43430_CHIP_ID	43430		/* 43430 chipcommon chipid */
+#define BCM43018_CHIP_ID	43018		/* 43018 chipcommon chipid */
+#define BCM4349_CHIP_ID		0x4349		/* 4349 chipcommon chipid */
+#define BCM4355_CHIP_ID		0x4355		/* 4355 chipcommon chipid */
+#define BCM4359_CHIP_ID		0x4359		/* 4359 chipcommon chipid */
+#define BCM4349_CHIP(chipid)	((CHIPID(chipid) == BCM4349_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4355_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4359_CHIP_ID))
+#define BCM4349_CHIP_GRPID		BCM4349_CHIP_ID: \
+					case BCM4355_CHIP_ID: \
+					case BCM4359_CHIP_ID
+#define BCM43596_CHIP_ID		43596		/* 43596 chipcommon chipid */
+#define BCM4347_CHIP_ID		0x4347          /* 4347 chipcommon chipid */
+#define BCM4357_CHIP_ID		0x4357          /* 4357 chipcommon chipid */
+#define BCM4361_CHIP_ID		0x4361          /* 4361 chipcommon chipid */
+#define BCM4362_CHIP_ID		0x4362          /* 4362 chipcommon chipid */
+#define BCM4347_CHIP(chipid)	((CHIPID(chipid) == BCM4347_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4357_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4361_CHIP_ID))
+#define BCM4347_CHIP_GRPID		BCM4347_CHIP_ID: \
+					case BCM4357_CHIP_ID: \
+					case BCM4361_CHIP_ID
+
+#define BCM4365_CHIP_ID		0x4365		/* 4365 chipcommon chipid */
+#define BCM4366_CHIP_ID		0x4366		/* 4366 chipcommon chipid */
+#define BCM4365_CHIP(chipid)	((CHIPID(chipid) == BCM4365_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4366_CHIP_ID))
+
+
+#define BCM43909_CHIP_ID	0xab85		/* 43909 chipcommon chipid */
+
+#define BCM43602_CHIP_ID	0xaa52		/* 43602 chipcommon chipid */
+#define BCM43462_CHIP_ID	0xa9c6		/* 43462 chipcommon chipid */
+#define BCM43522_CHIP_ID	0xaa02		/* 43522 chipcommon chipid */
+#define BCM43602_CHIP(chipid)	((CHIPID(chipid) == BCM43602_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43462_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43522_CHIP_ID)) /* 43602 variations */
+#define BCM43012_CHIP(chipid)	(CHIPID(chipid) == BCM43012_CHIP_ID)
+#define CASE_BCM43602_CHIP		case BCM43602_CHIP_ID: /* fallthrough */ \
+				case BCM43462_CHIP_ID: /* fallthrough */ \
+				case BCM43522_CHIP_ID
+
+#define	BCM4402_CHIP_ID		0x4402		/* 4402 chipid */
+#define	BCM4704_CHIP_ID		0x4704		/* 4704 chipcommon chipid */
+#define	BCM4706_CHIP_ID		0x5300		/* 4706 chipcommon chipid */
+#define BCM4707_CHIP_ID		53010		/* 4707 chipcommon chipid */
+#define BCM47094_CHIP_ID	53030		/* 47094 chipcommon chipid */
+#define BCM53018_CHIP_ID	53018		/* 53018 chipcommon chipid */
+#define BCM4707_CHIP(chipid)	(((chipid) == BCM4707_CHIP_ID) || \
+				((chipid) == BCM53018_CHIP_ID) || \
+				((chipid) == BCM47094_CHIP_ID))
+#define	BCM4710_CHIP_ID		0x4710		/* 4710 chipid */
+#define	BCM4749_CHIP_ID		0x4749		/* 5357 chipcommon chipid (OTP, RBBU) */
+#define BCM4785_CHIP_ID		0x4785		/* 4785 chipcommon chipid */
+#define	BCM5350_CHIP_ID		0x5350		/* 5350 chipcommon chipid */
+#define	BCM5352_CHIP_ID		0x5352		/* 5352 chipcommon chipid */
+#define BCM5365_CHIP_ID		0x5365		/* 5365 chipcommon chipid */
+#define	BCM5356_CHIP_ID		0x5356		/* 5356 chipcommon chipid */
+#define	BCM5357_CHIP_ID		0x5357		/* 5357 chipcommon chipid */
+#define	BCM53572_CHIP_ID	53572		/* 53572 chipcommon chipid */
+#define	BCM53573_CHIP_ID	53573		/* 53573 chipcommon chipid */
+#define	BCM53574_CHIP_ID	53574		/* 53574 chipcommon chipid */
+#define BCM53573_CHIP(chipid)	((CHIPID(chipid) == BCM53573_CHIP_ID) || \
+				(CHIPID(chipid) == BCM53574_CHIP_ID))
+#define BCM53573_CHIP_GRPID	BCM53573_CHIP_ID : \
+			        case BCM53574_CHIP_ID
+#define BCM53573_DEVICE(devid)	(((devid) == BCM53573_D11AC_ID) || \
+				((devid) == BCM53573_D11AC2G_ID) || \
+				((devid) == BCM53573_D11AC5G_ID) || \
+				((devid) == BCM47189_D11AC_ID) || \
+				((devid) == BCM47189_D11AC2G_ID) || \
+				((devid) == BCM47189_D11AC5G_ID))
+
+#define	BCM7271_CHIP_ID		0x05c9		/* 7271 chipcommon chipid */
+#define	BCM4373_CHIP_ID		0x4373		/* 4373 chipcommon chipid */
+
+/* Package IDs */
+#ifdef DEPRECATED /* These products have been deprecated */
+#define	BCM4303_PKG_ID		2		/* 4303 package id */
+#define	BCM4309_PKG_ID		1		/* 4309 package id */
+#define	BCM4712LARGE_PKG_ID	0		/* 340pin 4712 package id */
+#define	BCM4712SMALL_PKG_ID	1		/* 200pin 4712 package id */
+#define	BCM4712MID_PKG_ID	2		/* 225pin 4712 package id */
+#define BCM4328USBD11G_PKG_ID	2		/* 4328 802.11g USB package id */
+#define BCM4328USBDUAL_PKG_ID	3		/* 4328 802.11a/g USB package id */
+#define BCM4328SDIOD11G_PKG_ID	4		/* 4328 802.11g SDIO package id */
+#define BCM4328SDIODUAL_PKG_ID	5		/* 4328 802.11a/g SDIO package id */
+#define BCM4329_289PIN_PKG_ID	0		/* 4329 289-pin package id */
+#define BCM4329_182PIN_PKG_ID	1		/* 4329N 182-pin package id */
+#define BCM5354E_PKG_ID		1		/* 5354E package id */
+#define	BCM4716_PKG_ID		8		/* 4716 package id */
+#define	BCM4717_PKG_ID		9		/* 4717 package id */
+#define	BCM4718_PKG_ID		10		/* 4718 package id */
+#endif  /* DEPRECATED */
+#define BCM5356_PKG_NONMODE	1		/* 5356 package without nmode suppport */
+#define BCM5358U_PKG_ID		8		/* 5358U package id */
+#define BCM5358_PKG_ID		9		/* 5358 package id */
+#define BCM47186_PKG_ID		10		/* 47186 package id */
+#define BCM5357_PKG_ID		11		/* 5357 package id */
+#define BCM5356U_PKG_ID		12		/* 5356U package id */
+#define BCM53572_PKG_ID		8		/* 53572 package id */
+#define BCM5357C0_PKG_ID	8		/* 5357c0 package id (the same as 53572) */
+#define BCM47188_PKG_ID		9		/* 47188 package id */
+#define BCM5358C0_PKG_ID	0xa		/* 5358c0 package id */
+#define BCM5356C0_PKG_ID	0xb		/* 5356c0 package id */
+#define BCM4331TT_PKG_ID        8		/* 4331 12x12 package id */
+#define BCM4331TN_PKG_ID        9		/* 4331 12x9 package id */
+#define BCM4331TNA0_PKG_ID     0xb		/* 4331 12x9 package id */
+#define BCM47189_PKG_ID		1		/* 47189 package id */
+#define BCM53573_PKG_ID		0		/* 53573 package id */
+#define	BCM4706L_PKG_ID		1		/* 4706L package id */
+
+#define HDLSIM5350_PKG_ID	1		/* HDL simulator package id for a 5350 */
+#define HDLSIM_PKG_ID		14		/* HDL simulator package id */
+#define HWSIM_PKG_ID		15		/* Hardware simulator package id */
+#define BCM43224_FAB_CSM	0x8		/* the chip is manufactured by CSM */
+#define BCM43224_FAB_SMIC	0xa		/* the chip is manufactured by SMIC */
+#define BCM4336_WLBGA_PKG_ID	0x8
+#define BCM4330_WLBGA_PKG_ID	0x0
+#define BCM4314PCIE_ARM_PKG_ID		(8 | 0)	/* 4314 QFN PCI package id, bit 3 tie high */
+#define BCM4314SDIO_PKG_ID		(8 | 1)	/* 4314 QFN SDIO package id */
+#define BCM4314PCIE_PKG_ID		(8 | 2)	/* 4314 QFN PCI (ARM-less) package id */
+#define BCM4314SDIO_ARM_PKG_ID		(8 | 3)	/* 4314 QFN SDIO (ARM-less) package id */
+#define BCM4314SDIO_FPBGA_PKG_ID	(8 | 4)	/* 4314 FpBGA SDIO package id */
+#define BCM4314DEV_PKG_ID		(8 | 6)	/* 4314 Developement package id */
+
+#define BCM4707_PKG_ID		1		/* 4707 package id */
+#define BCM4708_PKG_ID		2		/* 4708 package id */
+#define BCM4709_PKG_ID		0		/* 4709 package id */
+
+#define PCIXX21_FLASHMEDIA0_ID	0x8033		/* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH0_ID	0x8034		/* TI PCI xx21 Standard Host Controller */
+
+#define BCM4335_WLCSP_PKG_ID	(0x0)	/* WLCSP Module/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGA_PKG_ID	(0x1)	/* FCBGA PC/Embeded/Media PCIE/SDIO */
+#define BCM4335_WLBGA_PKG_ID	(0x2)	/* WLBGA COB/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGAD_PKG_ID	(0x3)	/* FCBGA Debug Debug/Dev All if's. */
+#define BCM4335_PKG_MASK	(0x3)
+#define BCM43602_12x12_PKG_ID	(0x1)	/* 12x12 pins package, used for e.g. router designs */
+
+/* boardflags */
+#define	BFL_BTC2WIRE		0x00000001  /* old 2wire Bluetooth coexistence, OBSOLETE */
+#define BFL_BTCOEX      0x00000001      /* Board supports BTCOEX */
+#define	BFL_PACTRL		0x00000002  /* Board has gpio 9 controlling the PA */
+#define BFL_AIRLINEMODE	0x00000004  /* Board implements gpio radio disable indication */
+#define	BFL_ADCDIV		0x00000008  /* Board has the rssi ADC divider */
+#define BFL_DIS_256QAM		0x00000008
+#define	BFL_ENETROBO		0x00000010  /* Board has robo switch or core */
+#define	BFL_TSSIAVG   		0x00000010  /* TSSI averaging for ACPHY chips */
+#define	BFL_NOPLLDOWN		0x00000020  /* Not ok to power down the chip pll and oscillator */
+#define	BFL_CCKHIPWR		0x00000040  /* Can do high-power CCK transmission */
+#define	BFL_ENETADM		0x00000080  /* Board has ADMtek switch */
+#define	BFL_ENETVLAN		0x00000100  /* Board has VLAN capability */
+#define	BFL_LTECOEX		0x00000200  /* LTE Coex enabled */
+#define BFL_NOPCI		0x00000400  /* Board leaves PCI floating */
+#define BFL_FEM			0x00000800  /* Board supports the Front End Module */
+#define BFL_EXTLNA		0x00001000  /* Board has an external LNA in 2.4GHz band */
+#define BFL_HGPA		0x00002000  /* Board has a high gain PA */
+#define	BFL_BTC2WIRE_ALTGPIO	0x00004000  /* Board's BTC 2wire is in the alternate gpios */
+#define	BFL_ALTIQ		0x00008000  /* Alternate I/Q settings */
+#define BFL_NOPA		0x00010000  /* Board has no PA */
+#define BFL_RSSIINV		0x00020000  /* Board's RSSI uses positive slope(not TSSI) */
+#define BFL_PAREF		0x00040000  /* Board uses the PARef LDO */
+#define BFL_3TSWITCH		0x00080000  /* Board uses a triple throw switch shared with BT */
+#define BFL_PHASESHIFT		0x00100000  /* Board can support phase shifter */
+#define BFL_BUCKBOOST		0x00200000  /* Power topology uses BUCKBOOST */
+#define BFL_FEM_BT		0x00400000  /* Board has FEM and switch to share antenna w/ BT */
+#define BFL_NOCBUCK		0x00800000  /* Power topology doesn't use CBUCK */
+#define BFL_CCKFAVOREVM		0x01000000  /* Favor CCK EVM over spectral mask */
+#define BFL_PALDO		0x02000000  /* Power topology uses PALDO */
+#define BFL_LNLDO2_2P5		0x04000000  /* Select 2.5V as LNLDO2 output voltage */
+#define BFL_FASTPWR		0x08000000
+#define BFL_UCPWRCTL_MININDX	0x08000000  /* Enforce min power index to avoid FEM damage */
+#define BFL_EXTLNA_5GHz		0x10000000  /* Board has an external LNA in 5GHz band */
+#define BFL_TRSW_1by2		0x20000000  /* Board has 2 TRSW's in 1by2 designs */
+#define BFL_GAINBOOSTA01        0x20000000  /* 5g Gainboost for core0 and core1 */
+#define BFL_LO_TRSW_R_5GHz	0x40000000  /* In 5G do not throw TRSW to T for clipLO gain */
+#define BFL_ELNA_GAINDEF	0x80000000  /* Backoff InitGain based on elna_2g/5g field
+					     * when this flag is set
+					     */
+#define BFL_EXTLNA_TX	0x20000000	/* Temp boardflag to indicate to */
+
+/* boardflags2 */
+#define BFL2_RXBB_INT_REG_DIS	0x00000001  /* Board has an external rxbb regulator */
+#define BFL2_APLL_WAR		0x00000002  /* Flag to implement alternative A-band PLL settings */
+#define BFL2_TXPWRCTRL_EN	0x00000004  /* Board permits enabling TX Power Control */
+#define BFL2_2X4_DIV		0x00000008  /* Board supports the 2X4 diversity switch */
+#define BFL2_5G_PWRGAIN		0x00000010  /* Board supports 5G band power gain */
+#define BFL2_PCIEWAR_OVR	0x00000020  /* Board overrides ASPM and Clkreq settings */
+#define BFL2_CAESERS_BRD	0x00000040  /* Board is Caesers brd (unused by sw) */
+#define BFL2_BTC3WIRE		0x00000080  /* Board support legacy 3 wire or 4 wire */
+#define BFL2_BTCLEGACY          0x00000080  /* Board support legacy 3/4 wire, to replace
+					     * BFL2_BTC3WIRE
+					     */
+#define BFL2_SKWRKFEM_BRD	0x00000100  /* 4321mcm93 board uses Skyworks FEM */
+#define BFL2_SPUR_WAR		0x00000200  /* Board has a WAR for clock-harmonic spurs */
+#define BFL2_GPLL_WAR		0x00000400  /* Flag to narrow G-band PLL loop b/w */
+#define BFL2_TRISTATE_LED	0x00000800  /* Tri-state the LED */
+#define BFL2_SINGLEANT_CCK	0x00001000  /* Tx CCK pkts on Ant 0 only */
+#define BFL2_2G_SPUR_WAR	0x00002000  /* WAR to reduce and avoid clock-harmonic spurs in 2G */
+#define BFL2_BPHY_ALL_TXCORES	0x00004000  /* Transmit bphy frames using all tx cores */
+#define BFL2_FCC_BANDEDGE_WAR	0x00008000  /* Activates WAR to improve FCC bandedge performance */
+#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000       /* Reducing DAC Spurs */
+#define BFL2_GPLL_WAR2	        0x00010000  /* Flag to widen G-band PLL loop b/w */
+#define BFL2_REDUCED_PA_TURNONTIME 0x00010000  /* Flag to reduce PA turn on Time */
+#define BFL2_IPALVLSHIFT_3P3    0x00020000
+#define BFL2_INTERNDET_TXIQCAL  0x00040000  /* Use internal envelope detector for TX IQCAL */
+#define BFL2_XTALBUFOUTEN       0x00080000  /* Keep the buffered Xtal output from radio on */
+				/* Most drivers will turn it off without this flag */
+				/* to save power. */
+
+#define BFL2_ANAPACTRL_2G	0x00100000  /* 2G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ANAPACTRL_5G	0x00200000  /* 5G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ELNACTRL_TRSW_2G	0x00400000  /* AZW4329: 2G gmode_elna_gain controls TR Switch */
+#define BFL2_BT_SHARE_ANT0	0x00800000  /* share core0 antenna with BT */
+#define BFL2_TEMPSENSE_HIGHER	0x01000000  /* The tempsense threshold can sustain higher value
+					     * than programmed. The exact delta is decided by
+					     * driver per chip/boardtype. This can be used
+					     * when tempsense qualification happens after shipment
+					     */
+#define BFL2_BTC3WIREONLY       0x02000000  /* standard 3 wire btc only.  4 wire not supported */
+#define BFL2_PWR_NOMINAL	0x04000000  /* 0: power reduction on, 1: no power reduction */
+#define BFL2_EXTLNA_PWRSAVE	0x08000000  /* boardflag to enable ucode to apply power save */
+						/* ucode control of eLNA during Tx */
+#define BFL2_4313_RADIOREG	0x10000000
+									   /*  board rework */
+#define BFL2_DYNAMIC_VMID	0x10000000  /* enable dynamic Vmid in idle TSSI CAL for 4331 */
+
+#define BFL2_SDR_EN		0x20000000  /* SDR enabled or disabled */
+#define BFL2_DYNAMIC_VMID	0x10000000  /* boardflag to enable dynamic Vmid idle TSSI CAL */
+#define BFL2_LNA1BYPFORTR2G	0x40000000  /* acphy, enable lna1 bypass for clip gain, 2g */
+#define BFL2_LNA1BYPFORTR5G	0x80000000  /* acphy, enable lna1 bypass for clip gain, 5g */
+
+/* SROM 11 - 11ac boardflag definitions */
+#define BFL_SROM11_BTCOEX  0x00000001  /* Board supports BTCOEX */
+#define BFL_SROM11_WLAN_BT_SH_XTL  0x00000002  /* bluetooth and wlan share same crystal */
+#define BFL_SROM11_EXTLNA	0x00001000  /* Board has an external LNA in 2.4GHz band */
+#define BFL_SROM11_EPA_TURNON_TIME     0x00018000  /* 2 bits for different PA turn on times */
+#define BFL_SROM11_EPA_TURNON_TIME_SHIFT  15
+#define BFL_SROM11_PRECAL_TX_IDX	0x00040000  /* Dedicated TX IQLOCAL IDX values */
+				/* per subband, as derived from 43602A1 MCH5 */
+#define BFL_SROM11_EXTLNA_5GHz	0x10000000  /* Board has an external LNA in 5GHz band */
+#define BFL_SROM11_GAINBOOSTA01	0x20000000  /* 5g Gainboost for core0 and core1 */
+#define BFL2_SROM11_APLL_WAR	0x00000002  /* Flag to implement alternative A-band PLL settings */
+#define BFL2_SROM11_ANAPACTRL_2G  0x00100000  /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_ANAPACTRL_5G  0x00200000  /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_SINGLEANT_CCK	0x00001000  /* Tx CCK pkts on Ant 0 only */
+#define BFL2_SROM11_EPA_ON_DURING_TXIQLOCAL    0x00020000  /* Keep ext. PA's on in TX IQLO CAL */
+
+/* boardflags3 */
+#define BFL3_FEMCTRL_SUB	  0x00000007  /* acphy, subrevs of femctrl on top of srom_femctrl */
+#define BFL3_RCAL_WAR		  0x00000008  /* acphy, rcal war active on this board (4335a0) */
+#define BFL3_TXGAINTBLID	  0x00000070  /* acphy, txgain table id */
+#define BFL3_TXGAINTBLID_SHIFT	  0x4         /* acphy, txgain table id shift bit */
+#define BFL3_TSSI_DIV_WAR	  0x00000080  /* acphy, Seperate paparam for 20/40/80 */
+#define BFL3_TSSI_DIV_WAR_SHIFT	  0x7         /* acphy, Seperate paparam for 20/40/80 shift bit */
+#define BFL3_FEMTBL_FROM_NVRAM    0x00000100  /* acphy, femctrl table is read from nvram */
+#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8         /* acphy, femctrl table is read from nvram */
+#define BFL3_AGC_CFG_2G           0x00000200  /* acphy, gain control configuration for 2G */
+#define BFL3_AGC_CFG_5G           0x00000400  /* acphy, gain control configuration for 5G */
+#define BFL3_PPR_BIT_EXT          0x00000800  /* acphy, bit position for 1bit extension for ppr */
+#define BFL3_PPR_BIT_EXT_SHIFT    11          /* acphy, bit shift for 1bit extension for ppr */
+#define BFL3_BBPLL_SPR_MODE_DIS	  0x00001000  /* acphy, disables bbpll spur modes */
+#define BFL3_RCAL_OTP_VAL_EN      0x00002000  /* acphy, to read rcal_trim value from otp */
+#define BFL3_2GTXGAINTBL_BLANK	  0x00004000  /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14       /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK	  0x00008000  /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15       /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_PHASETRACK_MAX_ALPHABETA	  0x00010000  /* acphy, to max out alpha,beta to 511 */
+#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16       /* acphy, to max out alpha,beta to 511 */
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN           0x00060000
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17
+#define BFL3_5G_SPUR_WAR          0x00080000  /* acphy, enable spur WAR in 5G band */
+#define BFL3_1X1_RSDB_ANT	  0x01000000  /* to find if 2-ant RSDB board or 1-ant RSDB board */
+#define BFL3_1X1_RSDB_ANT_SHIFT           24
+
+/* acphy: lpmode2g and lpmode_5g related boardflags */
+#define BFL3_ACPHY_LPMODE_2G	  0x00300000  /* bits 20:21 for lpmode_2g choice */
+#define BFL3_ACPHY_LPMODE_2G_SHIFT	  20
+
+#define BFL3_ACPHY_LPMODE_5G	  0x00C00000  /* bits 22:23 for lpmode_5g choice */
+#define BFL3_ACPHY_LPMODE_5G_SHIFT	  22
+
+#define BFL3_EXT_LPO_ISCLOCK      0x02000000  /* External LPO is clock, not x-tal */
+#define BFL3_FORCE_INT_LPO_SEL    0x04000000  /* Force internal lpo */
+#define BFL3_FORCE_EXT_LPO_SEL    0x08000000  /* Force external lpo */
+
+#define BFL3_EN_BRCM_IMPBF        0x10000000  /* acphy, Allow BRCM Implicit TxBF */
+#define BFL3_AVVMID_FROM_NVRAM    0x40000000  /* Read Av Vmid from NVRAM  */
+#define BFL3_VLIN_EN_FROM_NVRAM    0x80000000  /* Read Vlin En from NVRAM  */
+
+#define BFL3_AVVMID_FROM_NVRAM_SHIFT   30   /* Read Av Vmid from NVRAM  */
+#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT   31   /* Enable Vlin  from NVRAM  */
+
+/* boardflags4 for SROM12 */
+#define BFL4_SROM12_4dBPAD      (1 << 0)   /* To distinguigh between normal and 4dB pad board */
+#define BFL4_SROM12_2G_DETTYPE      (1 << 1)   /* Determine power detector type for 2G */
+#define BFL4_SROM12_5G_DETTYPE      (1 << 2)   /* Determine power detector type for 5G */
+#define BFL4_4364_HARPOON 0x0100   /* Harpoon module 4364 */
+#define BFL4_4364_GODZILLA 0x0200   /* Godzilla module 4364 */
+
+
+/* papd params */
+#define PAPD_TX_ATTN_2G 0xFF
+#define PAPD_TX_ATTN_5G 0xFF00
+#define PAPD_TX_ATTN_5G_SHIFT 8
+#define PAPD_RX_ATTN_2G 0xFF
+#define PAPD_RX_ATTN_5G 0xFF00
+#define PAPD_RX_ATTN_5G_SHIFT 8
+#define PAPD_CAL_IDX_2G 0xFF
+#define PAPD_CAL_IDX_5G 0xFF00
+#define PAPD_CAL_IDX_5G_SHIFT 8
+#define PAPD_BBMULT_2G 0xFF
+#define PAPD_BBMULT_5G 0xFF00
+#define PAPD_BBMULT_5G_SHIFT 8
+#define TIA_GAIN_MODE_2G 0xFF
+#define TIA_GAIN_MODE_5G 0xFF00
+#define TIA_GAIN_MODE_5G_SHIFT 8
+#define PAPD_EPS_OFFSET_2G 0xFFFF
+#define PAPD_EPS_OFFSET_5G 0xFFFF0000
+#define PAPD_EPS_OFFSET_5G_SHIFT 16
+#define PAPD_CALREF_DB_2G 0xFF
+#define PAPD_CALREF_DB_5G 0xFF00
+#define PAPD_CALREF_DB_5G_SHIFT 8
+
+
+/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
+#define	BOARD_GPIO_BTC3W_IN	0x850	/* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */
+#define	BOARD_GPIO_BTC3W_OUT	0x020	/* bit 5 is TX_CONF */
+#define	BOARD_GPIO_BTCMOD_IN	0x010	/* bit 4 is the alternate BT Coexistence Input */
+#define	BOARD_GPIO_BTCMOD_OUT	0x020	/* bit 5 is the alternate BT Coexistence Out */
+#define	BOARD_GPIO_BTC_IN	0x080	/* bit 7 is BT Coexistence Input */
+#define	BOARD_GPIO_BTC_OUT	0x100	/* bit 8 is BT Coexistence Out */
+#define	BOARD_GPIO_PACTRL	0x200	/* bit 9 controls the PA on new 4306 boards */
+#define BOARD_GPIO_12		0x1000	/* gpio 12 */
+#define BOARD_GPIO_13		0x2000	/* gpio 13 */
+#define BOARD_GPIO_BTC4_IN	0x0800	/* gpio 11, coex4, in */
+#define BOARD_GPIO_BTC4_BT	0x2000	/* gpio 12, coex4, bt active */
+#define BOARD_GPIO_BTC4_STAT	0x4000	/* gpio 14, coex4, status */
+#define BOARD_GPIO_BTC4_WLAN	0x8000	/* gpio 15, coex4, wlan active */
+#define	BOARD_GPIO_1_WLAN_PWR	0x02	/* throttle WLAN power on X21 board */
+#define	BOARD_GPIO_2_WLAN_PWR	0x04	/* throttle WLAN power on X29C board */
+#define	BOARD_GPIO_3_WLAN_PWR	0x08	/* throttle WLAN power on X28 board */
+#define	BOARD_GPIO_4_WLAN_PWR	0x10	/* throttle WLAN power on X19 board */
+#define	BOARD_GPIO_13_WLAN_PWR	0x2000	/* throttle WLAN power on X14 board */
+
+#define GPIO_BTC4W_OUT_4312  0x010  /* bit 4 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224  0x020  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224_SHARED  0x0e0  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43225  0x0e0  /* bit 5 BT_IODISABLE, bit 6 SW_BT, bit 7 SW_WL */
+#define GPIO_BTC4W_OUT_43421  0x020  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_4313  0x060  /* bit 5 SW_BT, bit 6 SW_WL */
+#define GPIO_BTC4W_OUT_4331_SHARED  0x010  /* GPIO 4  */
+
+#define	PCI_CFG_GPIO_SCS	0x10	/* PCI config space bit 4 for 4306c0 slow clock source */
+#define PCI_CFG_GPIO_HWRAD	0x20	/* PCI config space GPIO 13 for hw radio disable */
+#define PCI_CFG_GPIO_XTAL	0x40	/* PCI config space GPIO 14 for Xtal power-up */
+#define PCI_CFG_GPIO_PLL	0x80	/* PCI config space GPIO 15 for PLL power-down */
+
+/* power control defines */
+#define PLL_DELAY		150		/* us pll on delay */
+#define FREF_DELAY		200		/* us fref change delay */
+#define MIN_SLOW_CLK		32		/* us Slow clock period */
+#define	XTAL_ON_DELAY		1000		/* us crystal power-on delay */
+
+
+/* 43341 Boards */
+#define BCM943341WLABGS_SSID	0x062d
+
+/* 43342 Boards */
+#define BCM943342FCAGBI_SSID	0x0641
+
+/* 43012 wlbga Board */
+#define BCM943012WLREF_SSID	0x07d7
+
+/* 43012 fcbga Board */
+#define BCM943012FCREF_SSID	0x07d4
+
+/* 43602 Boards, unclear yet what boards will be created. */
+#define BCM943602RSVD1_SSID	0x06a5
+#define BCM943602RSVD2_SSID	0x06a6
+#define BCM943602X87            0X0133
+#define BCM943602X87P2          0X0152
+#define BCM943602X87P3          0X0153
+#define BCM943602X238           0X0132
+#define BCM943602X238D          0X014A
+#define BCM943602X238DP2        0X0155
+#define BCM943602X238DP3        0X0156
+#define BCM943602X100           0x0761
+#define BCM943602X100GS         0x0157
+#define BCM943602X100P2         0x015A
+
+/* # of GPIO pins */
+#define GPIO_NUMPINS		32
+
+/* These values are used by dhd host driver. */
+#define RDL_RAM_BASE_4319 0x60000000
+#define RDL_RAM_BASE_4329 0x60000000
+#define RDL_RAM_SIZE_4319 0x48000
+#define RDL_RAM_SIZE_4329  0x48000
+#define RDL_RAM_SIZE_43236 0x70000
+#define RDL_RAM_BASE_43236 0x60000000
+#define RDL_RAM_SIZE_4328 0x60000
+#define RDL_RAM_BASE_4328 0x80000000
+#define RDL_RAM_SIZE_4322 0x60000
+#define RDL_RAM_BASE_4322 0x60000000
+#define RDL_RAM_SIZE_4360  0xA0000
+#define RDL_RAM_BASE_4360  0x60000000
+#define RDL_RAM_SIZE_43242  0x90000
+#define RDL_RAM_BASE_43242  0x60000000
+#define RDL_RAM_SIZE_43143  0x70000
+#define RDL_RAM_BASE_43143  0x60000000
+#define RDL_RAM_SIZE_4350  0xC0000
+#define RDL_RAM_BASE_4350  0x180800
+
+/* generic defs for nvram "muxenab" bits
+* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options.
+*/
+#define MUXENAB_UART		0x00000001
+#define MUXENAB_GPIO		0x00000002
+#define MUXENAB_ERCX		0x00000004	/* External Radio BT coex */
+#define MUXENAB_JTAG		0x00000008
+#define MUXENAB_HOST_WAKE	0x00000010	/* configure GPIO for SDIO host_wake */
+#define MUXENAB_I2S_EN		0x00000020
+#define MUXENAB_I2S_MASTER	0x00000040
+#define MUXENAB_I2S_FULL	0x00000080
+#define MUXENAB_SFLASH		0x00000100
+#define MUXENAB_RFSWCTRL0	0x00000200
+#define MUXENAB_RFSWCTRL1	0x00000400
+#define MUXENAB_RFSWCTRL2	0x00000800
+#define MUXENAB_SECI		0x00001000
+#define MUXENAB_BT_LEGACY	0x00002000
+#define MUXENAB_HOST_WAKE1	0x00004000	/* configure alternative GPIO for SDIO host_wake */
+
+/* Boot flags */
+#define FLASH_KERNEL_NFLASH	0x00000001
+#define FLASH_BOOT_NFLASH	0x00000002
+
+#endif /* _BCMDEVS_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdhcp.h b/drivers/net/wireless/bcmdhd/include/bcmdhcp.h
new file mode 100644
index 0000000..f3e1b08
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdhcp.h
@@ -0,0 +1,92 @@
+/*
+ * Fundamental constants relating to DHCP Protocol
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmdhcp.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _bcmdhcp_h_
+#define _bcmdhcp_h_
+
+/* DHCP params */
+#define DHCP_TYPE_OFFSET	0	/* DHCP type (request|reply) offset */
+#define DHCP_TID_OFFSET		4	/* DHCP transition id offset */
+#define DHCP_FLAGS_OFFSET	10	/* DHCP flags offset */
+#define DHCP_CIADDR_OFFSET	12	/* DHCP client IP address offset */
+#define DHCP_YIADDR_OFFSET	16	/* DHCP your IP address offset */
+#define DHCP_GIADDR_OFFSET	24	/* DHCP relay agent IP address offset */
+#define DHCP_CHADDR_OFFSET	28	/* DHCP client h/w address offset */
+#define DHCP_OPT_OFFSET		236	/* DHCP options offset */
+
+#define DHCP_OPT_MSGTYPE	53	/* DHCP message type */
+#define DHCP_OPT_MSGTYPE_REQ	3
+#define DHCP_OPT_MSGTYPE_ACK	5	/* DHCP message type - ACK */
+
+#define DHCP_OPT_CODE_OFFSET	0	/* Option identifier */
+#define DHCP_OPT_LEN_OFFSET	1	/* Option data length */
+#define DHCP_OPT_DATA_OFFSET	2	/* Option data */
+
+#define DHCP_OPT_CODE_CLIENTID	61	/* Option identifier */
+
+#define DHCP_TYPE_REQUEST	1	/* DHCP request (discover|request) */
+#define DHCP_TYPE_REPLY		2	/* DHCP reply (offset|ack) */
+
+#define DHCP_PORT_SERVER	67	/* DHCP server UDP port */
+#define DHCP_PORT_CLIENT	68	/* DHCP client UDP port */
+
+#define DHCP_FLAG_BCAST	0x8000	/* DHCP broadcast flag */
+
+#define DHCP_FLAGS_LEN	2	/* DHCP flags field length */
+
+#define DHCP6_TYPE_SOLICIT	1	/* DHCP6 solicit */
+#define DHCP6_TYPE_ADVERTISE	2	/* DHCP6 advertise */
+#define DHCP6_TYPE_REQUEST	3	/* DHCP6 request */
+#define DHCP6_TYPE_CONFIRM	4	/* DHCP6 confirm */
+#define DHCP6_TYPE_RENEW	5	/* DHCP6 renew */
+#define DHCP6_TYPE_REBIND	6	/* DHCP6 rebind */
+#define DHCP6_TYPE_REPLY	7	/* DHCP6 reply */
+#define DHCP6_TYPE_RELEASE	8	/* DHCP6 release */
+#define DHCP6_TYPE_DECLINE	9	/* DHCP6 decline */
+#define DHCP6_TYPE_RECONFIGURE	10	/* DHCP6 reconfigure */
+#define DHCP6_TYPE_INFOREQ	11	/* DHCP6 information request */
+#define DHCP6_TYPE_RELAYFWD	12	/* DHCP6 relay forward */
+#define DHCP6_TYPE_RELAYREPLY	13	/* DHCP6 relay reply */
+
+#define DHCP6_TYPE_OFFSET	0	/* DHCP6 type offset */
+
+#define	DHCP6_MSG_OPT_OFFSET	4	/* Offset of options in client server messages */
+#define	DHCP6_RELAY_OPT_OFFSET	34	/* Offset of options in relay messages */
+
+#define	DHCP6_OPT_CODE_OFFSET	0	/* Option identifier */
+#define	DHCP6_OPT_LEN_OFFSET	2	/* Option data length */
+#define	DHCP6_OPT_DATA_OFFSET	4	/* Option data */
+
+#define	DHCP6_OPT_CODE_CLIENTID	1	/* DHCP6 CLIENTID option */
+#define	DHCP6_OPT_CODE_SERVERID	2	/* DHCP6 SERVERID option */
+
+#define DHCP6_PORT_SERVER	547	/* DHCP6 server UDP port */
+#define DHCP6_PORT_CLIENT	546	/* DHCP6 client UDP port */
+
+#endif	/* #ifndef _bcmdhcp_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h
new file mode 100644
index 0000000..00adedf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h
@@ -0,0 +1,332 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ *  $Id: bcmendian.h 514727 2014-11-12 03:02:48Z $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+/* Reverse the bytes in a 16-bit value */
+#define BCMSWAP16(val) \
+	((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+		  (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+/* Reverse the bytes in a 32-bit value */
+#define BCMSWAP32(val) \
+	((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+		  (((uint32)(val) & (uint32)0x0000ff00U) <<  8) | \
+		  (((uint32)(val) & (uint32)0x00ff0000U) >>  8) | \
+		  (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+/* Reverse the two 16-bit halves of a 32-bit value */
+#define BCMSWAP32BY16(val) \
+	((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+		  (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+/* Reverse the bytes in a 64-bit value */
+#define BCMSWAP64(val) \
+	((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \
+	          (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \
+	          (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \
+	          (((uint64)(val) & 0x00000000ff000000ULL) <<  8) | \
+	          (((uint64)(val) & 0x000000ff00000000ULL) >>  8) | \
+	          (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \
+	          (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \
+	          (((uint64)(val) & 0xff00000000000000ULL) >> 56)))
+
+/* Reverse the two 32-bit halves of a 64-bit value */
+#define BCMSWAP64BY32(val) \
+	((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \
+	          (((uint64)(val) & 0xffffffff00000000ULL) >> 32)))
+
+
+/* Byte swapping macros
+ *    Host <=> Network (Big Endian) for 16- and 32-bit values
+ *    Host <=> Little-Endian for 16- and 32-bit values
+ */
+#ifndef hton16
+#define HTON16(i) BCMSWAP16(i)
+#define	hton16(i) bcmswap16(i)
+#define	HTON32(i) BCMSWAP32(i)
+#define	hton32(i) bcmswap32(i)
+#define	NTOH16(i) BCMSWAP16(i)
+#define	ntoh16(i) bcmswap16(i)
+#define	NTOH32(i) BCMSWAP32(i)
+#define	ntoh32(i) bcmswap32(i)
+#define LTOH16(i) (i)
+#define ltoh16(i) (i)
+#define LTOH32(i) (i)
+#define ltoh32(i) (i)
+#define HTOL16(i) (i)
+#define htol16(i) (i)
+#define HTOL32(i) (i)
+#define htol32(i) (i)
+#define HTOL64(i) (i)
+#define htol64(i) (i)
+#endif /* hton16 */
+
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+
+/* Unaligned loads and stores in host byte order */
+#define load32_ua(a)		ltoh32_ua(a)
+#define store32_ua(a, v)	htol32_ua_store(v, a)
+#define load16_ua(a)		ltoh16_ua(a)
+#define store16_ua(a, v)	htol16_ua_store(v, a)
+
+#define _LTOH16_UA(cp)	((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp)	((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp)	(((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp)	(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+#define ltoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#define ntoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#ifdef __GNUC__
+
+/* GNU macro versions avoid referencing the argument multiple times, while also
+ * avoiding the -fno-inline used in ROM builds.
+ */
+
+#define bcmswap16(val) ({ \
+	uint16 _val = (val); \
+	BCMSWAP16(_val); \
+})
+
+#define bcmswap32(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32(_val); \
+})
+
+#define bcmswap64(val) ({ \
+	uint64 _val = (val); \
+	BCMSWAP64(_val); \
+})
+
+#define bcmswap32by16(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32BY16(_val); \
+})
+
+#define bcmswap16_buf(buf, len) ({ \
+	uint16 *_buf = (uint16 *)(buf); \
+	uint _wds = (len) / 2; \
+	while (_wds--) { \
+		*_buf = bcmswap16(*_buf); \
+		_buf++; \
+	} \
+})
+
+#define htol16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = _val >> 8; \
+})
+
+#define htol32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = (_val >> 8) & 0xff; \
+	_bytes[2] = (_val >> 16) & 0xff; \
+	_bytes[3] = _val >> 24; \
+})
+
+#define hton16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 8; \
+	_bytes[1] = _val & 0xff; \
+})
+
+#define hton32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 24; \
+	_bytes[1] = (_val >> 16) & 0xff; \
+	_bytes[2] = (_val >> 8) & 0xff; \
+	_bytes[3] = _val & 0xff; \
+})
+
+#define ltoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH16_UA(_bytes); \
+})
+
+#define ltoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH32_UA(_bytes); \
+})
+
+#define ntoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH16_UA(_bytes); \
+})
+
+#define ntoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH32_UA(_bytes); \
+})
+
+#else /* !__GNUC__ */
+
+/* Inline versions avoid referencing the argument multiple times */
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+	return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+	return BCMSWAP32(val);
+}
+
+static INLINE uint64
+bcmswap64(uint64 val)
+{
+	return BCMSWAP64(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+	return BCMSWAP32BY16(val);
+}
+
+/* Reverse pairs of bytes in a buffer (not for high-performance use) */
+/* buf	- start of buffer of shorts to swap */
+/* len  - byte length of buffer */
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+	len = len / 2;
+
+	while (len--) {
+		*buf = bcmswap16(*buf);
+		buf++;
+	}
+}
+
+/*
+ * Store 16-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = val >> 8;
+}
+
+/*
+ * Store 32-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = (val >> 8) & 0xff;
+	bytes[2] = (val >> 16) & 0xff;
+	bytes[3] = val >> 24;
+}
+
+/*
+ * Store 16-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val >> 8;
+	bytes[1] = val & 0xff;
+}
+
+/*
+ * Store 32-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val >> 24;
+	bytes[1] = (val >> 16) & 0xff;
+	bytes[2] = (val >> 8) & 0xff;
+	bytes[3] = val & 0xff;
+}
+
+/*
+ * Load 16-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+	return _LTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+	return _LTOH32_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 16-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+	return _NTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+	return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#endif /* !__GNUC__ */
+#endif /* !_BCMENDIAN_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmeth.h b/drivers/net/wireless/bcmdhd/include/bcmeth.h
new file mode 100644
index 0000000..e1d2d666
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmeth.h
@@ -0,0 +1,117 @@
+/*
+ * Broadcom Ethernettype  protocol definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmeth.h 701825 2017-05-26 16:45:27Z $
+ */
+
+/*
+ * Broadcom Ethernet protocol defines
+ */
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* ETHER_TYPE_BRCM is defined in ethernet.h */
+
+/*
+ * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field
+ * in one of two formats: (only subtypes 32768-65535 are in use now)
+ *
+ * subtypes 0-32767:
+ *     8 bit subtype (0-127)
+ *     8 bit length in bytes (0-255)
+ *
+ * subtypes 32768-65535:
+ *     16 bit big-endian subtype
+ *     16 bit big-endian length in bytes (0-65535)
+ *
+ * length is the number of additional bytes beyond the 4 or 6 byte header
+ *
+ * Reserved values:
+ * 0 reserved
+ * 5-15 reserved for iLine protocol assignments
+ * 17-126 reserved, assignable
+ * 127 reserved
+ * 32768 reserved
+ * 32769-65534 reserved, assignable
+ * 65535 reserved
+ */
+
+/*
+ * While adding the subtypes and their specific processing code make sure
+ * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition
+ */
+
+#define	BCMILCP_SUBTYPE_RATE		1
+#define	BCMILCP_SUBTYPE_LINK		2
+#define	BCMILCP_SUBTYPE_CSA		3
+#define	BCMILCP_SUBTYPE_LARQ		4
+#define BCMILCP_SUBTYPE_VENDOR		5
+#define	BCMILCP_SUBTYPE_FLH		17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG	32769
+#define BCMILCP_SUBTYPE_CERT		32770
+#define BCMILCP_SUBTYPE_SES		32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED		0
+#define BCMILCP_BCM_SUBTYPE_EVENT		1
+#define BCMILCP_BCM_SUBTYPE_SES			2
+/*
+ * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded
+ * within BCMILCP_BCM_SUBTYPE_EVENT type messages
+ */
+/* #define BCMILCP_BCM_SUBTYPE_EAPOL		3 */
+#define BCMILCP_BCM_SUBTYPE_DPT                 4
+#define BCMILCP_BCM_SUBTYPE_DNGLEVENT       5
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH	8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION		0
+#define BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD	2
+
+/* These fields are stored in network order */
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+	uint16	subtype;	/* Vendor specific..32769 */
+	uint16	length;
+	uint8	version;	/* Version is 0 */
+	uint8	oui[3];		/* Broadcom OUI */
+	/* user specific Data */
+	uint16	usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/*  _BCMETH_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmevent.h b/drivers/net/wireless/bcmdhd/include/bcmevent.h
new file mode 100644
index 0000000..27f77a0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmevent.h
@@ -0,0 +1,948 @@
+/*
+ * Broadcom Event  protocol definitions
+ *
+ * Dependencies: bcmeth.h
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmevent.h 700076 2017-05-17 14:42:22Z $
+ *
+ */
+
+/*
+ * Broadcom Ethernet Events protocol defines
+ *
+ */
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#include <typedefs.h>
+/* #include <ethernet.h> -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */
+#include <bcmeth.h>
+#if defined(DNGL_EVENT_SUPPORT)
+#include <dnglevent.h>
+#endif 
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION		2	/* wl_event_msg_t struct version */
+#define BCM_MSG_IFNAME_MAX		16	/* max length of interface name */
+
+/* flags */
+#define WLC_EVENT_MSG_LINK		0x01	/* link is up */
+#define WLC_EVENT_MSG_FLUSHTXQ		0x02	/* flush tx queue on MIC error */
+#define WLC_EVENT_MSG_GROUP		0x04	/* group MIC error */
+#define WLC_EVENT_MSG_UNKBSS		0x08	/* unknown source bsscfg */
+#define WLC_EVENT_MSG_UNKIF		0x10	/* unknown source OS i/f */
+
+/* these fields are stored in network order */
+
+/* version 1 */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			/* see flags below */
+	uint32	event_type;		/* Message (see below) */
+	uint32	status;			/* Status code (see below) */
+	uint32	reason;			/* Reason code (if applicable) */
+	uint32	auth_type;		/* WLC_E_AUTH */
+	uint32	datalen;		/* data buf */
+	struct ether_addr	addr;	/* Station address (if applicable) */
+	char	ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
+
+/* the current version */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			/* see flags below */
+	uint32	event_type;		/* Message (see below) */
+	uint32	status;			/* Status code (see below) */
+	uint32	reason;			/* Reason code (if applicable) */
+	uint32	auth_type;		/* WLC_E_AUTH */
+	uint32	datalen;		/* data buf */
+	struct ether_addr	addr;	/* Station address (if applicable) */
+	char	ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+	uint8	ifidx;			/* destination OS i/f index */
+	uint8	bsscfgidx;		/* source bsscfg index */
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+/* used by driver msgs */
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+	struct ether_header eth;
+	bcmeth_hdr_t		bcm_hdr;
+	wl_event_msg_t		event;
+	/* data portion follows */
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+/*
+ * used by host event
+ * note: if additional event types are added, it should go with is_wlc_event_frame() as well.
+ */
+typedef union bcm_event_msg_u {
+	wl_event_msg_t		event;
+#if defined(DNGL_EVENT_SUPPORT)
+	bcm_dngl_event_msg_t	dngl_event;
+#endif 
+
+	/* add new event here */
+} bcm_event_msg_u_t;
+
+#define BCM_MSG_LEN	(sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+/* Event messages */
+#define WLC_E_SET_SSID		0	/* indicates status of set SSID */
+#define WLC_E_JOIN		1	/* differentiates join IBSS from found (WLC_E_START) IBSS */
+#define WLC_E_START		2	/* STA founded an IBSS or AP started a BSS */
+#define WLC_E_AUTH		3	/* 802.11 AUTH request */
+#define WLC_E_AUTH_IND		4	/* 802.11 AUTH indication */
+#define WLC_E_DEAUTH		5	/* 802.11 DEAUTH request */
+#define WLC_E_DEAUTH_IND	6	/* 802.11 DEAUTH indication */
+#define WLC_E_ASSOC		7	/* 802.11 ASSOC request */
+#define WLC_E_ASSOC_IND		8	/* 802.11 ASSOC indication */
+#define WLC_E_REASSOC		9	/* 802.11 REASSOC request */
+#define WLC_E_REASSOC_IND	10	/* 802.11 REASSOC indication */
+#define WLC_E_DISASSOC		11	/* 802.11 DISASSOC request */
+#define WLC_E_DISASSOC_IND	12	/* 802.11 DISASSOC indication */
+#define WLC_E_QUIET_START	13	/* 802.11h Quiet period started */
+#define WLC_E_QUIET_END		14	/* 802.11h Quiet period ended */
+#define WLC_E_BEACON_RX		15	/* BEACONS received/lost indication */
+#define WLC_E_LINK		16	/* generic link indication */
+#define WLC_E_MIC_ERROR		17	/* TKIP MIC error occurred */
+#define WLC_E_NDIS_LINK		18	/* NDIS style link indication */
+#define WLC_E_ROAM		19	/* roam complete: indicate status & reason */
+#define WLC_E_TXFAIL		20	/* change in dot11FailedCount (txfail) */
+#define WLC_E_PMKID_CACHE	21	/* WPA2 pmkid cache indication */
+#define WLC_E_RETROGRADE_TSF	22	/* current AP's TSF value went backward */
+#define WLC_E_PRUNE		23	/* AP was pruned from join list for reason */
+#define WLC_E_AUTOAUTH		24	/* report AutoAuth table entry match for join attempt */
+#define WLC_E_EAPOL_MSG		25	/* Event encapsulating an EAPOL message */
+#define WLC_E_SCAN_COMPLETE	26	/* Scan results are ready or scan was aborted */
+#define WLC_E_ADDTS_IND		27	/* indicate to host addts fail/success */
+#define WLC_E_DELTS_IND		28	/* indicate to host delts fail/success */
+#define WLC_E_BCNSENT_IND	29	/* indicate to host of beacon transmit */
+#define WLC_E_BCNRX_MSG		30	/* Send the received beacon up to the host */
+#define WLC_E_BCNLOST_MSG	31	/* indicate to host loss of beacon */
+#define WLC_E_ROAM_PREP		32	/* before attempting to roam association */
+#define WLC_E_PFN_NET_FOUND	33	/* PFN network found event */
+#define WLC_E_PFN_NET_LOST	34	/* PFN network lost event */
+#define WLC_E_RESET_COMPLETE	35
+#define WLC_E_JOIN_START	36
+#define WLC_E_ROAM_START	37	/* roam attempt started: indicate reason */
+#define WLC_E_ASSOC_START	38
+#define WLC_E_IBSS_ASSOC	39
+#define WLC_E_RADIO		40
+#define WLC_E_PSM_WATCHDOG	41	/* PSM microcode watchdog fired */
+
+#define WLC_E_PROBREQ_MSG       44      /* probe request received */
+#define WLC_E_SCAN_CONFIRM_IND  45
+#define WLC_E_PSK_SUP		46	/* WPA Handshake fail */
+#define WLC_E_COUNTRY_CODE_CHANGED	47
+#define	WLC_E_EXCEEDED_MEDIUM_TIME	48	/* WMMAC excedded medium time */
+#define WLC_E_ICV_ERROR		49	/* WEP ICV error occurred */
+#define WLC_E_UNICAST_DECODE_ERROR	50	/* Unsupported unicast encrypted frame */
+#define WLC_E_MULTICAST_DECODE_ERROR	51	/* Unsupported multicast encrypted frame */
+#define WLC_E_TRACE		52
+#define WLC_E_IF		54	/* I/F change (for dongle host notification) */
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE	55	/* listen state expires */
+#define WLC_E_RSSI		56	/* indicate RSSI change based on configured levels */
+#define WLC_E_PFN_BEST_BATCHING	57	/* PFN best network batching event */
+#define WLC_E_EXTLOG_MSG	58
+#define WLC_E_ACTION_FRAME      59	/* Action frame Rx */
+#define WLC_E_ACTION_FRAME_COMPLETE	60	/* Action frame Tx complete */
+#define WLC_E_PRE_ASSOC_IND	61	/* assoc request received */
+#define WLC_E_PRE_REASSOC_IND	62	/* re-assoc request received */
+#define WLC_E_CHANNEL_ADOPTED	63
+#define WLC_E_AP_STARTED	64	/* AP started */
+#define WLC_E_DFS_AP_STOP	65	/* AP stopped due to DFS */
+#define WLC_E_DFS_AP_RESUME	66	/* AP resumed due to DFS */
+#define WLC_E_WAI_STA_EVENT	67	/* WAI stations event */
+#define WLC_E_WAI_MSG 		68	/* event encapsulating an WAI message */
+#define WLC_E_ESCAN_RESULT 	69	/* escan result event */
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 	70	/* action frame off channel complete */
+#define WLC_E_PROBRESP_MSG	71	/* probe response received */
+#define WLC_E_P2P_PROBREQ_MSG	72	/* P2P Probe request received */
+#define WLC_E_DCS_REQUEST	73
+#define WLC_E_FIFO_CREDIT_MAP	74	/* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */
+#define WLC_E_ACTION_FRAME_RX	75	/* Received action frame event WITH
+					 * wl_event_rx_frame_data_t header
+					 */
+#define WLC_E_WAKE_EVENT	76	/* Wake Event timer fired, used for wake WLAN test mode */
+#define WLC_E_RM_COMPLETE	77	/* Radio measurement complete */
+#define WLC_E_HTSFSYNC		78	/* Synchronize TSF with the host */
+#define WLC_E_OVERLAY_REQ	79	/* request an overlay IOCTL/iovar from the host */
+#define WLC_E_CSA_COMPLETE_IND		80	/* 802.11 CHANNEL SWITCH ACTION completed */
+#define WLC_E_EXCESS_PM_WAKE_EVENT	81	/* excess PM Wake Event to inform host  */
+#define WLC_E_PFN_SCAN_NONE		82	/* no PFN networks around */
+/* PFN BSSID network found event, conflict/share with  WLC_E_PFN_SCAN_NONE */
+#define WLC_E_PFN_BSSID_NET_FOUND	82
+#define WLC_E_PFN_SCAN_ALLGONE		83	/* last found PFN network gets lost */
+/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */
+#define WLC_E_PFN_BSSID_NET_LOST	83
+#define WLC_E_GTK_PLUMBED		84
+#define WLC_E_ASSOC_IND_NDIS		85	/* 802.11 ASSOC indication for NDIS only */
+#define WLC_E_REASSOC_IND_NDIS		86	/* 802.11 REASSOC indication for NDIS only */
+#define WLC_E_ASSOC_REQ_IE		87
+#define WLC_E_ASSOC_RESP_IE		88
+#define WLC_E_ASSOC_RECREATED		89	/* association recreated on resume */
+#define WLC_E_ACTION_FRAME_RX_NDIS	90	/* rx action frame event for NDIS only */
+#define WLC_E_AUTH_REQ			91	/* authentication request received */
+#define WLC_E_TDLS_PEER_EVENT		92	/* discovered peer, connected/disconnected peer */
+#define WLC_E_SPEEDY_RECREATE_FAIL	93	/* fast assoc recreation failed */
+#define WLC_E_NATIVE			94	/* port-specific event and payload (e.g. NDIS) */
+#define WLC_E_PKTDELAY_IND		95	/* event for tx pkt delay suddently jump */
+#define WLC_E_PSTA_PRIMARY_INTF_IND	99	/* psta primary interface indication */
+#define WLC_E_NAN			100     /* NAN event - Reserved for future */
+#define WLC_E_BEACON_FRAME_RX		101
+#define WLC_E_SERVICE_FOUND		102	/* desired service found */
+#define WLC_E_GAS_FRAGMENT_RX		103	/* GAS fragment received */
+#define WLC_E_GAS_COMPLETE		104	/* GAS sessions all complete */
+#define WLC_E_P2PO_ADD_DEVICE		105	/* New device found by p2p offload */
+#define WLC_E_P2PO_DEL_DEVICE		106	/* device has been removed by p2p offload */
+#define WLC_E_WNM_STA_SLEEP		107	/* WNM event to notify STA enter sleep mode */
+#define WLC_E_TXFAIL_THRESH		108	/* Indication of MAC tx failures (exhaustion of
+						 * 802.11 retries) exceeding threshold(s)
+						 */
+#define WLC_E_PROXD			109	/* Proximity Detection event */
+#define WLC_E_IBSS_COALESCE		110	/* IBSS Coalescing */
+#define WLC_E_AIBSS_TXFAIL		110	/* TXFAIL event for AIBSS, re using event 110 */
+#define WLC_E_BSS_LOAD			114	/* Inform host of beacon bss load */
+#define WLC_E_MIMO_PWR_SAVE		115	/* Inform host MIMO PWR SAVE learning events */
+#define WLC_E_LEAKY_AP_STATS	116 /* Inform host leaky Ap stats events */
+#define WLC_E_ALLOW_CREDIT_BORROW 117	/* Allow or disallow wlfc credit borrowing in DHD */
+#define WLC_E_MSCH			120	/* Multiple channel scheduler event */
+#define WLC_E_CSA_START_IND		121
+#define WLC_E_CSA_DONE_IND		122
+#define WLC_E_CSA_FAILURE_IND		123
+#define WLC_E_CCA_CHAN_QUAL		124	/* CCA based channel quality report */
+#define WLC_E_BSSID		125	/* to report change in BSSID while roaming */
+#define WLC_E_TX_STAT_ERROR		126	/* tx error indication */
+#define WLC_E_BCMC_CREDIT_SUPPORT	127	/* credit check for BCMC supported */
+#define WLC_E_PEER_TIMEOUT	128 /* silently drop a STA because of inactivity */
+#define WLC_E_BT_WIFI_HANDOVER_REQ	130	/* Handover Request Initiated */
+#define WLC_E_SPW_TXINHIBIT		131     /* Southpaw TxInhibit notification */
+#define WLC_E_FBT_AUTH_REQ_IND		132	/* FBT Authentication Request Indication */
+#define WLC_E_RSSI_LQM			133	/* Enhancement addition for WLC_E_RSSI */
+#define WLC_E_PFN_GSCAN_FULL_RESULT		134 /* Full probe/beacon (IEs etc) results */
+#define WLC_E_PFN_SWC		135 /* Significant change in rssi of bssids being tracked */
+#define WLC_E_AUTHORIZED	136	/* a STA been authroized for traffic */
+#define WLC_E_PROBREQ_MSG_RX	137 /* probe req with wl_event_rx_frame_data_t header */
+#define WLC_E_PFN_SCAN_COMPLETE	138	/* PFN completed scan of network list */
+#define WLC_E_RMC_EVENT		139	/* RMC Event */
+#define WLC_E_DPSTA_INTF_IND	140	/* DPSTA interface indication */
+#define WLC_E_RRM		141	/* RRM Event */
+#define WLC_E_PFN_SSID_EXT	142	/* SSID EXT event */
+#define WLC_E_ROAM_EXP_EVENT	143	/* Expanded roam event */
+#define WLC_E_ULP			146	/* ULP entered indication */
+#define WLC_E_MACDBG			147	/* Ucode debugging event */
+#define WLC_E_RESERVED			148	/* reserved */
+#define WLC_E_PRE_ASSOC_RSEP_IND	149	/* assoc resp received */
+#define WLC_E_PSK_AUTH			150	/* PSK AUTH WPA2-PSK 4 WAY Handshake failure */
+#define WLC_E_TKO			151     /* TCP keepalive offload */
+#define WLC_E_SDB_TRANSITION            152     /* SDB mode-switch event */
+#define WLC_E_NATOE_NFCT		153     /* natoe event */
+#define WLC_E_TEMP_THROTTLE		154	/* Temperature throttling control event */
+#define WLC_E_LINK_QUALITY		155     /* Link quality measurement complete */
+#define WLC_E_BSSTRANS_RESP		156	/* BSS Transition Response received */
+#define WLC_E_TWT_SETUP			157	/* TWT Setup Complete event */
+#define WLC_E_HE_TWT_SETUP		157	/* TODO:Remove after merging TWT changes to trunk */
+#define WLC_E_NAN_CRITICAL		158	/* NAN Critical Event */
+#define WLC_E_NAN_NON_CRITICAL		159	/* NAN Non-Critical Event */
+#define WLC_E_RADAR_DETECTED		160	/* Radar Detected event */
+#define WLC_E_RANGING_EVENT		161	/* Ranging event */
+#define WLC_E_INVALID_IE		162	/* Received invalid IE */
+#define WLC_E_MODE_SWITCH		163	/* Mode switch event */
+#define WLC_E_PKT_FILTER		164	/* Packet filter event */
+#define WLC_E_DMA_TXFLUSH_COMPLETE		165	/* TxFlush done before changing
+											* tx/rxchain
+											*/
+#define WLC_E_FBT			166	/* FBT event */
+#define WLC_E_PFN_SCAN_BACKOFF	167	/* PFN SCAN Backoff event */
+#define WLC_E_PFN_BSSID_SCAN_BACKOFF	168	/* PFN BSSID SCAN BAckoff event */
+#define WLC_E_AGGR_EVENT		169	/* Aggregated event */
+#define WLC_E_TVPM_MITIGATION		171	/* Change in mitigation applied by TVPM */
+#define WLC_E_LAST			172	/* highest val + 1 for range checking */
+#if (WLC_E_LAST > 172)
+#error "WLC_E_LAST: Invalid value for last event; must be <= 172."
+#endif /* WLC_E_LAST */
+
+/* define an API for getting the string name of an event */
+extern const char *bcmevent_get_name(uint event_type);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+extern void wl_event_to_network_order(wl_event_msg_t * evt);
+
+/* validate if the event is proper and if valid copy event header to event */
+extern int is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype,
+	bcm_event_msg_u_t *out_event);
+
+/* conversion between host and network order for events */
+void wl_event_to_host_order(wl_event_msg_t * evt);
+void wl_event_to_network_order(wl_event_msg_t * evt);
+
+
+/* Event status codes */
+#define WLC_E_STATUS_SUCCESS		0	/* operation was successful */
+#define WLC_E_STATUS_FAIL		1	/* operation failed */
+#define WLC_E_STATUS_TIMEOUT		2	/* operation timed out */
+#define WLC_E_STATUS_NO_NETWORKS	3	/* failed due to no matching network found */
+#define WLC_E_STATUS_ABORT		4	/* operation was aborted */
+#define WLC_E_STATUS_NO_ACK		5	/* protocol failure: packet not ack'd */
+#define WLC_E_STATUS_UNSOLICITED	6	/* AUTH or ASSOC packet was unsolicited */
+#define WLC_E_STATUS_ATTEMPT		7	/* attempt to assoc to an auto auth configuration */
+#define WLC_E_STATUS_PARTIAL		8	/* scan results are incomplete */
+#define WLC_E_STATUS_NEWSCAN		9	/* scan aborted by another scan */
+#define WLC_E_STATUS_NEWASSOC		10	/* scan aborted due to assoc in progress */
+#define WLC_E_STATUS_11HQUIET		11	/* 802.11h quiet period started */
+#define WLC_E_STATUS_SUPPRESS		12	/* user disabled scanning (WLC_SET_SCANSUPPRESS) */
+#define WLC_E_STATUS_NOCHANS		13	/* no allowable channels to scan */
+#define WLC_E_STATUS_CS_ABORT		15	/* abort channel select */
+#define WLC_E_STATUS_ERROR		16	/* request failed due to error */
+#define WLC_E_STATUS_INVALID 0xff  /* Invalid status code to init variables. */
+
+/* 4-way handshake event type */
+#define WLC_E_PSK_AUTH_SUB_EAPOL_START		1	/* EAPOL start */
+#define WLC_E_PSK_AUTH_SUB_EAPOL_DONE		2	/* EAPOL end */
+/* GTK event type */
+#define	WLC_E_PSK_AUTH_SUB_GTK_DONE		3	/* GTK end */
+
+/* 4-way handshake event status code */
+#define WLC_E_STATUS_PSK_AUTH_WPA_TIMOUT	1	/* operation timed out */
+#define WLC_E_STATUS_PSK_AUTH_MIC_WPA_ERR		2	/* MIC error */
+#define WLC_E_STATUS_PSK_AUTH_IE_MISMATCH_ERR		3	/* IE Missmatch error */
+#define WLC_E_STATUS_PSK_AUTH_REPLAY_COUNT_ERR		4
+#define WLC_E_STATUS_PSK_AUTH_PEER_BLACKISTED	5	/* Blaclisted peer */
+#define WLC_E_STATUS_PSK_AUTH_GTK_REKEY_FAIL	6	/* GTK event status code */
+
+/* SDB transition status code */
+#define WLC_E_STATUS_SDB_START			1
+#define WLC_E_STATUS_SDB_COMPLETE		2
+/* Slice-swap status code */
+#define WLC_E_STATUS_SLICE_SWAP_START		3
+#define WLC_E_STATUS_SLICE_SWAP_COMPLETE	4
+
+
+/* SDB transition reason code */
+#define WLC_E_REASON_HOST_DIRECT	0
+#define WLC_E_REASON_INFRA_ASSOC	1
+#define WLC_E_REASON_INFRA_ROAM		2
+#define WLC_E_REASON_INFRA_DISASSOC	3
+#define WLC_E_REASON_NO_MODE_CHANGE_NEEDED	4
+#define WLC_E_REASON_AWDL_ENABLE	5
+#define WLC_E_REASON_AWDL_DISABLE	6
+
+/* WLC_E_SDB_TRANSITION event data */
+#define WL_MAX_BSSCFG     4
+#define WL_EVENT_SDB_TRANSITION_VER     1
+typedef struct wl_event_sdb_data {
+	uint8 wlunit;           /* Core index */
+	uint8 is_iftype;        /* Interface Type(Station, SoftAP, P2P_GO, P2P_GC */
+	uint16 chanspec;        /* Interface Channel/Chanspec */
+	char ssidbuf[(4 * 32) + 1];	/* SSID_FMT_BUF_LEN: ((4 * DOT11_MAX_SSID_LEN) + 1) */
+} wl_event_sdb_data_t;
+
+typedef struct wl_event_sdb_trans {
+	uint8 version;          /* Event Data Version */
+	uint8 rsdb_mode;
+	uint8 enable_bsscfg;
+	uint8 reserved;
+	struct wl_event_sdb_data values[WL_MAX_BSSCFG];
+} wl_event_sdb_trans_t;
+
+/* roam reason codes */
+#define WLC_E_REASON_INITIAL_ASSOC	0	/* initial assoc */
+#define WLC_E_REASON_LOW_RSSI		1	/* roamed due to low RSSI */
+#define WLC_E_REASON_DEAUTH		2	/* roamed due to DEAUTH indication */
+#define WLC_E_REASON_DISASSOC		3	/* roamed due to DISASSOC indication */
+#define WLC_E_REASON_BCNS_LOST		4	/* roamed due to lost beacons */
+
+#define WLC_E_REASON_FAST_ROAM_FAILED	5	/* roamed due to fast roam failure */
+#define WLC_E_REASON_DIRECTED_ROAM	6	/* roamed due to request by AP */
+#define WLC_E_REASON_TSPEC_REJECTED	7	/* roamed due to TSPEC rejection */
+#define WLC_E_REASON_BETTER_AP		8	/* roamed due to finding better AP */
+#define WLC_E_REASON_MINTXRATE		9	/* roamed because at mintxrate for too long */
+#define WLC_E_REASON_TXFAIL		10	/* We can hear AP, but AP can't hear us */
+/* retained for precommit auto-merging errors; remove once all branches are synced */
+#define WLC_E_REASON_REQUESTED_ROAM	11
+#define WLC_E_REASON_BSSTRANS_REQ	11	/* roamed due to BSS Transition request by AP */
+#define WLC_E_REASON_LOW_RSSI_CU		12 /* roamed due to low RSSI and Channel Usage */
+#define WLC_E_REASON_RADAR_DETECTED	13	/* roamed due to radar detection by STA */
+
+/* prune reason codes */
+#define WLC_E_PRUNE_ENCR_MISMATCH	1	/* encryption mismatch */
+#define WLC_E_PRUNE_BCAST_BSSID		2	/* AP uses a broadcast BSSID */
+#define WLC_E_PRUNE_MAC_DENY		3	/* STA's MAC addr is in AP's MAC deny list */
+#define WLC_E_PRUNE_MAC_NA		4	/* STA's MAC addr is not in AP's MAC allow list */
+#define WLC_E_PRUNE_REG_PASSV		5	/* AP not allowed due to regulatory restriction */
+#define WLC_E_PRUNE_SPCT_MGMT		6	/* AP does not support STA locale spectrum mgmt */
+#define WLC_E_PRUNE_RADAR		7	/* AP is on a radar channel of STA locale */
+#define WLC_E_RSN_MISMATCH		8	/* STA does not support AP's RSN */
+#define WLC_E_PRUNE_NO_COMMON_RATES	9	/* No rates in common with AP */
+#define WLC_E_PRUNE_BASIC_RATES		10	/* STA does not support all basic rates of BSS */
+#define WLC_E_PRUNE_CIPHER_NA		12	/* BSS's cipher not supported */
+#define WLC_E_PRUNE_KNOWN_STA		13	/* AP is already known to us as a STA */
+#define WLC_E_PRUNE_WDS_PEER		15	/* AP is already known to us as a WDS peer */
+#define WLC_E_PRUNE_QBSS_LOAD		16	/* QBSS LOAD - AAC is too low */
+#define WLC_E_PRUNE_HOME_AP		17	/* prune home AP */
+#define WLC_E_PRUNE_AUTH_RESP_MAC	20	/* suppress auth resp by MAC filter */
+
+/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
+#define WLC_E_SUP_OTHER			0	/* Other reason */
+#define WLC_E_SUP_DECRYPT_KEY_DATA	1	/* Decryption of key data failed */
+#define WLC_E_SUP_BAD_UCAST_WEP128	2	/* Illegal use of ucast WEP128 */
+#define WLC_E_SUP_BAD_UCAST_WEP40	3	/* Illegal use of ucast WEP40 */
+#define WLC_E_SUP_UNSUP_KEY_LEN		4	/* Unsupported key length */
+#define WLC_E_SUP_PW_KEY_CIPHER		5	/* Unicast cipher mismatch in pairwise key */
+#define WLC_E_SUP_MSG3_TOO_MANY_IE	6	/* WPA IE contains > 1 RSN IE in key msg 3 */
+#define WLC_E_SUP_MSG3_IE_MISMATCH	7	/* WPA IE mismatch in key message 3 */
+#define WLC_E_SUP_NO_INSTALL_FLAG	8	/* INSTALL flag unset in 4-way msg */
+#define WLC_E_SUP_MSG3_NO_GTK		9	/* encapsulated GTK missing from msg 3 */
+#define WLC_E_SUP_GRP_KEY_CIPHER	10	/* Multicast cipher mismatch in group key */
+#define WLC_E_SUP_GRP_MSG1_NO_GTK	11	/* encapsulated GTK missing from group msg 1 */
+#define WLC_E_SUP_GTK_DECRYPT_FAIL	12	/* GTK decrypt failure */
+#define WLC_E_SUP_SEND_FAIL		13	/* message send failure */
+#define WLC_E_SUP_DEAUTH		14	/* received FC_DEAUTH */
+#define WLC_E_SUP_WPA_PSK_TMO		15	/* WPA PSK 4-way handshake timeout */
+#define WLC_E_SUP_WPA_PSK_M1_TMO	16	/* WPA PSK 4-way handshake M1 timeout */
+#define WLC_E_SUP_WPA_PSK_M3_TMO	17	/* WPA PSK 4-way handshake M3 timeout */
+
+
+/* Ucode reason codes carried in the WLC_E_MACDBG event */
+#define WLC_E_MACDBG_LIST_PSM		0	/* Dump list update for PSM registers */
+#define WLC_E_MACDBG_LIST_PSMX		1	/* Dump list update for PSMx registers */
+#define WLC_E_MACDBG_REGALL		2	/* Dump all registers */
+
+/* Event data for events that include frames received over the air */
+/* WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
+	uint16	version;
+	uint16	channel;	/* Matches chanspec_t format from bcmwifi_channels.h */
+	int32	rssi;
+	uint32	mactime;
+	uint32	rate;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t;
+
+#define BCM_RX_FRAME_DATA_VERSION 1
+
+/* WLC_E_IF event data */
+typedef struct wl_event_data_if {
+	uint8 ifidx;		/* RTE virtual device index (for dongle) */
+	uint8 opcode;		/* see I/F opcode */
+	uint8 reserved;		/* bit mask (WLC_E_IF_FLAGS_XXX ) */
+	uint8 bssidx;		/* bsscfg index */
+	uint8 role;		/* see I/F role */
+} wl_event_data_if_t;
+
+/* WLC_E_NATOE event data */
+typedef struct wl_event_data_natoe {
+	uint32 natoe_active;
+	uint32 sta_ip;
+	uint16 start_port;
+	uint16 end_port;
+} wl_event_data_natoe_t;
+
+/* opcode in WLC_E_IF event */
+#define WLC_E_IF_ADD		1	/* bsscfg add */
+#define WLC_E_IF_DEL		2	/* bsscfg delete */
+#define WLC_E_IF_CHANGE		3	/* bsscfg role change */
+
+/* I/F role code in WLC_E_IF event */
+#define WLC_E_IF_ROLE_STA		0	/* Infra STA */
+#define WLC_E_IF_ROLE_AP		1	/* Access Point */
+#define WLC_E_IF_ROLE_WDS		2	/* WDS link */
+#define WLC_E_IF_ROLE_P2P_GO		3	/* P2P Group Owner */
+#define WLC_E_IF_ROLE_P2P_CLIENT	4	/* P2P Client */
+#define WLC_E_IF_ROLE_IBSS              8       /* IBSS */
+#define WLC_E_IF_ROLE_NAN              9       /* NAN */
+
+/* WLC_E_RSSI event data */
+typedef struct wl_event_data_rssi {
+	int32 rssi;
+	int32 snr;
+	int32 noise;
+} wl_event_data_rssi_t;
+
+/* WLC_E_IF flag */
+#define WLC_E_IF_FLAGS_BSSCFG_NOIF	0x1	/* no host I/F creation needed */
+
+/* Reason codes for LINK */
+#define WLC_E_LINK_BCN_LOSS     1   /* Link down because of beacon loss */
+#define WLC_E_LINK_DISASSOC     2   /* Link down because of disassoc */
+#define WLC_E_LINK_ASSOC_REC    3   /* Link down because assoc recreate failed */
+#define WLC_E_LINK_BSSCFG_DIS   4   /* Link down due to bsscfg down */
+
+
+/* WLC_E_NDIS_LINK event data */
+typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms {
+	struct ether_addr peer_mac; /* 6 bytes */
+	uint16 chanspec;            /* 2 bytes */
+	uint32 link_speed;          /* current datarate in units of 500 Kbit/s */
+	uint32 max_link_speed;      /* max possible datarate for link in units of 500 Kbit/s  */
+	int32  rssi;                /* average rssi */
+} BWL_POST_PACKED_STRUCT ndis_link_parms_t;
+
+/* reason codes for WLC_E_OVERLAY_REQ event */
+#define WLC_E_OVL_DOWNLOAD		0	/* overlay download request */
+#define WLC_E_OVL_UPDATE_IND	1	/* device indication of host overlay update */
+
+/* reason codes for WLC_E_TDLS_PEER_EVENT event */
+#define WLC_E_TDLS_PEER_DISCOVERED		0	/* peer is ready to establish TDLS */
+#define WLC_E_TDLS_PEER_CONNECTED		1
+#define WLC_E_TDLS_PEER_DISCONNECTED	2
+
+/* reason codes for WLC_E_RMC_EVENT event */
+#define WLC_E_REASON_RMC_NONE		0
+#define WLC_E_REASON_RMC_AR_LOST		1
+#define WLC_E_REASON_RMC_AR_NO_ACK		2
+
+#ifdef WLTDLS
+/* TDLS Action Category code */
+#define TDLS_AF_CATEGORY		12
+/* Wi-Fi Display (WFD) Vendor Specific Category */
+/* used for WFD Tunneled Probe Request and Response */
+#define TDLS_VENDOR_SPECIFIC		127
+/* TDLS Action Field Values */
+#define TDLS_ACTION_SETUP_REQ		0
+#define TDLS_ACTION_SETUP_RESP		1
+#define TDLS_ACTION_SETUP_CONFIRM	2
+#define TDLS_ACTION_TEARDOWN		3
+#define WLAN_TDLS_SET_PROBE_WFD_IE	11
+#define WLAN_TDLS_SET_SETUP_WFD_IE	12
+#define WLAN_TDLS_SET_WFD_ENABLED	13
+#define WLAN_TDLS_SET_WFD_DISABLED	14
+#endif
+
+/* WLC_E_RANGING_EVENT subtypes */
+#define WLC_E_RANGING_RESULTS	0
+
+
+/* GAS event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas {
+	uint16	channel;		/* channel of GAS protocol */
+	uint8	dialog_token;	/* GAS dialog token */
+	uint8	fragment_id;	/* fragment id */
+	uint16	status_code;	/* status code on GAS completion */
+	uint16 	data_len;		/* length of data to follow */
+	uint8	data[1];		/* variable length specified by data_len */
+} BWL_POST_PACKED_STRUCT wl_event_gas_t;
+
+/* service discovery TLV */
+typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv {
+	uint16	length;			/* length of response_data */
+	uint8	protocol;		/* service protocol type */
+	uint8	transaction_id;		/* service transaction id */
+	uint8	status_code;		/* status code */
+	uint8	data[1];		/* response data */
+} BWL_POST_PACKED_STRUCT wl_sd_tlv_t;
+
+/* service discovery event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd {
+	uint16	channel;		/* channel */
+	uint8	count;			/* number of tlvs */
+	wl_sd_tlv_t	tlv[1];		/* service discovery TLV */
+} BWL_POST_PACKED_STRUCT wl_event_sd_t;
+
+/* WLC_E_PKT_FILTER event sub-classification codes */
+#define WLC_E_PKT_FILTER_TIMEOUT	1 /* Matching packet not received in last timeout seconds */
+
+/* Note: proxd has a new API (ver 3.0) deprecates the following */
+
+/* Reason codes for WLC_E_PROXD */
+#define WLC_E_PROXD_FOUND		1	/* Found a proximity device */
+#define WLC_E_PROXD_GONE		2	/* Lost a proximity device */
+#define WLC_E_PROXD_START		3	/* used by: target  */
+#define WLC_E_PROXD_STOP		4	/* used by: target   */
+#define WLC_E_PROXD_COMPLETED		5	/* used by: initiator completed */
+#define WLC_E_PROXD_ERROR		6	/* used by both initiator and target */
+#define WLC_E_PROXD_COLLECT_START	7	/* used by: target & initiator */
+#define WLC_E_PROXD_COLLECT_STOP	8	/* used by: target */
+#define WLC_E_PROXD_COLLECT_COMPLETED	9	/* used by: initiator completed */
+#define WLC_E_PROXD_COLLECT_ERROR	10	/* used by both initiator and target */
+#define WLC_E_PROXD_NAN_EVENT		11	/* used by both initiator and target */
+#define WLC_E_PROXD_TS_RESULTS          12      /* used by: initiator completed */
+
+/*  proxd_event data */
+typedef struct ftm_sample {
+	uint32 value;	/* RTT in ns */
+	int8 rssi;	/* RSSI */
+} ftm_sample_t;
+
+typedef struct ts_sample {
+	uint32 t1;
+	uint32 t2;
+	uint32 t3;
+	uint32 t4;
+} ts_sample_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data {
+	uint16 ver;			/* version */
+	uint16 mode;			/* mode: target/initiator */
+	uint16 method;			/* method: rssi/TOF/AOA */
+	uint8  err_code;		/* error classification */
+	uint8  TOF_type;		/* one way or two way TOF */
+	uint8  OFDM_frame_type;		/* legacy or VHT */
+	uint8  bandwidth;		/* Bandwidth is 20, 40,80, MHZ */
+	struct ether_addr peer_mac;	/* (e.g for tgt:initiator's */
+	uint32 distance;		/* dst to tgt, units meter */
+	uint32 meanrtt;			/* mean delta */
+	uint32 modertt;			/* Mode delta */
+	uint32 medianrtt;		/* median RTT */
+	uint32 sdrtt;			/* Standard deviation of RTT */
+	int32  gdcalcresult;		/* Software or Hardware Kind of redundant, but if */
+					/* frame type is VHT, then we should do it by hardware */
+	int16  avg_rssi;		/* avg rssi accroos the ftm frames */
+	int16  validfrmcnt;		/* Firmware's valid frame counts */
+	int32  peer_router_info;	/* Peer router information if available in TLV, */
+					/* We will add this field later  */
+	int32 var1;			/* average of group delay */
+	int32 var2;			/* average of threshold crossing */
+	int32 var3;			/* difference between group delay and threshold crossing */
+					/* raw Fine Time Measurements (ftm) data */
+	uint16 ftm_unit;		/* ftm cnt resolution in picoseconds , 6250ps - default */
+	uint16 ftm_cnt;			/*  num of rtd measurments/length in the ftm buffer  */
+	ftm_sample_t ftm_buff[1];	/* 1 ... ftm_cnt  */
+} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct proxd_event_ts_results {
+	uint16 ver;                     /* version */
+	uint16 mode;                    /* mode: target/initiator */
+	uint16 method;                  /* method: rssi/TOF/AOA */
+	uint8  err_code;                /* error classification */
+	uint8  TOF_type;                /* one way or two way TOF */
+	uint16  ts_cnt;                 /* number of timestamp measurements */
+	ts_sample_t ts_buff[1];         /* Timestamps */
+} BWL_POST_PACKED_STRUCT wl_proxd_event_ts_results_t;
+
+
+/* Video Traffic Interference Monitor Event */
+#define INTFER_EVENT_VERSION		1
+#define INTFER_STREAM_TYPE_NONTCP	1
+#define INTFER_STREAM_TYPE_TCP		2
+#define WLINTFER_STATS_NSMPLS		4
+typedef struct wl_intfer_event {
+	uint16 version;			/* version */
+	uint16 status;			/* status */
+	uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */
+} wl_intfer_event_t;
+
+#define RRM_EVENT_VERSION		0
+typedef struct wl_rrm_event {
+	int16 version;
+	int16 len;
+	int16 cat;		/* Category */
+	int16 subevent;
+	char payload[1]; /* Measurement payload */
+} wl_rrm_event_t;
+
+
+/* WLC_E_PSTA_PRIMARY_INTF_IND event data */
+typedef struct wl_psta_primary_intf_event {
+	struct ether_addr prim_ea;	/* primary intf ether addr */
+} wl_psta_primary_intf_event_t;
+
+/* WLC_E_DPSTA_INTF_IND event data */
+typedef enum {
+	WL_INTF_PSTA = 1,
+	WL_INTF_DWDS = 2
+} wl_dpsta_intf_type;
+
+typedef struct wl_dpsta_intf_event {
+	wl_dpsta_intf_type intf_type;    /* dwds/psta intf register */
+} wl_dpsta_intf_event_t;
+
+/*  **********  NAN protocol events/subevents  ********** */
+#define NAN_EVENT_BUFFER_SIZE 512 /* max size */
+/* NAN Events sent by firmware */
+
+/*
+ * If you make changes to this enum, dont forget to update the mask (if need be).
+ */
+typedef enum wl_nan_events {
+	WL_NAN_EVENT_START			= 1,	/* NAN cluster started */
+	WL_NAN_EVENT_JOIN			= 2,	/* To be deprecated */
+	WL_NAN_EVENT_ROLE			= 3,	/* Role changed */
+	WL_NAN_EVENT_SCAN_COMPLETE		= 4,	/* To be deprecated */
+	WL_NAN_EVENT_DISCOVERY_RESULT		= 5,	/* Subscribe Received */
+	WL_NAN_EVENT_REPLIED			= 6,	/* Publish Sent */
+	WL_NAN_EVENT_TERMINATED			= 7,	/* sub / pub is terminated */
+	WL_NAN_EVENT_RECEIVE			= 8,	/* Follow up Received */
+	WL_NAN_EVENT_STATUS_CHG			= 9,	/* change in nan_mac status */
+	WL_NAN_EVENT_MERGE			= 10,	/* Merged to a NAN cluster */
+	WL_NAN_EVENT_STOP			= 11,	/* To be deprecated */
+	WL_NAN_EVENT_P2P			= 12,	/* Unused */
+	WL_NAN_EVENT_WINDOW_BEGIN_P2P		= 13,	/* Unused */
+	WL_NAN_EVENT_WINDOW_BEGIN_MESH		= 14,	/* Unused */
+	WL_NAN_EVENT_WINDOW_BEGIN_IBSS		= 15,	/* Unused */
+	WL_NAN_EVENT_WINDOW_BEGIN_RANGING	= 16,	/* Unused */
+	WL_NAN_EVENT_POST_DISC			= 17,	/* Event for post discovery data */
+	WL_NAN_EVENT_DATA_IF_ADD		= 18,	/* Unused */
+	WL_NAN_EVENT_DATA_PEER_ADD		= 19,	/* Event for peer add */
+	/* nan 2.0 */
+	/* Will be removed after source code is committed. */
+	WL_NAN_EVENT_DATA_IND			= 20,
+	WL_NAN_EVENT_PEER_DATAPATH_IND		= 20,	/* Incoming DP req */
+	/* Will be removed after source code is committed. */
+	WL_NAN_EVENT_DATA_CONF			= 21,
+	WL_NAN_EVENT_DATAPATH_ESTB		= 21,	/* DP Established */
+	WL_NAN_EVENT_SDF_RX			= 22,	/* SDF payload */
+	WL_NAN_EVENT_DATAPATH_END		= 23,	/* DP Terminate recvd */
+	/* Below event needs to be removed after source code is committed. */
+	WL_NAN_EVENT_DATA_END 			= 23,
+	WL_NAN_EVENT_BCN_RX			= 24,	/* received beacon payload */
+	WL_NAN_EVENT_PEER_DATAPATH_RESP		= 25,	/* Peer's DP response */
+	WL_NAN_EVENT_PEER_DATAPATH_CONF		= 26,	/* Peer's DP confirm */
+	WL_NAN_EVENT_RNG_REQ_IND		= 27,	/* Range Request */
+	WL_NAN_EVENT_RNG_RPT_IND		= 28,	/* Range Report */
+	WL_NAN_EVENT_RNG_TERM_IND		= 29,	/* Range Termination */
+	WL_NAN_EVENT_PEER_DATAPATH_SEC_INST	= 30,   /* Peer's DP sec install */
+	WL_NAN_EVENT_TXS			= 31,   /* for tx status of follow-up and SDFs */
+	WL_NAN_EVENT_INVALID				/* delimiter for max value */
+} nan_app_events_e;
+
+#define NAN_EV_MASK(ev) \
+		(1 << (ev - 1))
+#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0)
+/*  ******************* end of NAN section *************** */
+
+/* WLC_E_ULP event data */
+#define WL_ULP_EVENT_VERSION		1
+#define WL_ULP_DISABLE_CONSOLE		1	/* Disable console message on ULP entry */
+#define WL_ULP_UCODE_DOWNLOAD		2       /* Download ULP ucode file */
+
+typedef struct wl_ulp_event {
+	uint16 version;
+	uint16 ulp_dongle_action;
+} wl_ulp_event_t;
+
+/* TCP keepalive event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_tko {
+	uint8 index;		/* TCP connection index, 0 to max-1 */
+	uint8 pad[3];		/* 4-byte struct alignment */
+} BWL_POST_PACKED_STRUCT wl_event_tko_t;
+
+typedef struct {
+	uint8 radar_type;       /* one of RADAR_TYPE_XXX */
+	uint16 min_pw;          /* minimum pulse-width (usec * 20) */
+	uint16 max_pw;          /* maximum pulse-width (usec * 20) */
+	uint16 min_pri;         /* minimum pulse repetition interval (usec) */
+	uint16 max_pri;         /* maximum pulse repetition interval (usec) */
+	uint16 subband;         /* subband/frequency */
+} radar_detected_event_info_t;
+typedef struct wl_event_radar_detect_data {
+
+	uint32 version;
+	uint16 current_chanspec; /* chanspec on which the radar is recieved */
+	uint16 target_chanspec; /*  Target chanspec after detection of radar on current_chanspec */
+	radar_detected_event_info_t radar_info[2];
+} wl_event_radar_detect_data_t;
+
+
+#define WL_EVENT_MODESW_VER_1			1
+#define WL_EVENT_MODESW_VER_CURRENT		WL_EVENT_MODESW_VER_1
+
+#define WL_E_MODESW_FLAG_MASK_DEVICE		0x01u /* mask of device: belongs to local or peer */
+#define WL_E_MODESW_FLAG_MASK_FROM		0x02u /* mask of origin: firmware or user */
+#define WL_E_MODESW_FLAG_MASK_STATE		0x0Cu /* mask of state: modesw progress state */
+
+#define WL_E_MODESW_FLAG_DEVICE_LOCAL		0x00u /* flag - device: info is about self/local */
+#define WL_E_MODESW_FLAG_DEVICE_PEER		0x01u /* flag - device: info is about peer */
+
+#define WL_E_MODESW_FLAG_FROM_FIRMWARE		0x00u /* flag - from: request is from firmware */
+#define WL_E_MODESW_FLAG_FROM_USER		0x02u /* flag - from: request is from user/iov */
+
+#define WL_E_MODESW_FLAG_STATE_REQUESTED	0x00u /* flag - state: mode switch request */
+#define WL_E_MODESW_FLAG_STATE_INITIATED	0x04u /* flag - state: switch initiated */
+#define WL_E_MODESW_FLAG_STATE_COMPLETE		0x08u /* flag - state: switch completed/success */
+#define WL_E_MODESW_FLAG_STATE_FAILURE		0x0Cu /* flag - state: failed to switch */
+
+/* Get sizeof *X including variable data's length where X is pointer to wl_event_mode_switch_t */
+#define WL_E_MODESW_SIZE(X) (sizeof(*(X)) + (X)->length)
+
+/* Get variable data's length where X is pointer to wl_event_mode_switch_t */
+#define WL_E_MODESW_DATA_SIZE(X) (((X)->length > sizeof(*(X))) ? ((X)->length - sizeof(*(X))) : 0)
+
+#define WL_E_MODESW_REASON_UNKNOWN		0u /* reason: UNKNOWN */
+#define WL_E_MODESW_REASON_ACSD			1u /* reason: ACSD (based on events from FW */
+#define WL_E_MODESW_REASON_OBSS_DBS		2u /* reason: OBSS DBS (eg. on interference) */
+#define WL_E_MODESW_REASON_DFS			3u /* reason: DFS (eg. on subband radar) */
+#define WL_E_MODESW_REASON_DYN160		4u /* reason: DYN160 (160/2x2 - 80/4x4) */
+
+/* event structure for WLC_E_MODE_SWITCH */
+typedef struct {
+	uint16 version;
+	uint16 length;	/* size including 'data' field */
+	uint16 opmode_from;
+	uint16 opmode_to;
+	uint32 flags;	/* bit 0: peer(/local==0);
+			 * bit 1: user(/firmware==0);
+			 * bits 3,2: 00==requested, 01==initiated,
+			 *           10==complete, 11==failure;
+			 * rest: reserved
+			 */
+	uint16 reason;	/* value 0: unknown, 1: ACSD, 2: OBSS_DBS,
+			 *       3: DFS, 4: DYN160, rest: reserved
+			 */
+	uint16 data_offset;	/* offset to 'data' from beginning of this struct.
+				 * fields may be added between data_offset and data
+				 */
+	/* ADD NEW FIELDS HERE */
+	uint8 data[];	/* reason specific data; could be empty */
+} wl_event_mode_switch_t;
+
+/* when reason in WLC_E_MODE_SWITCH is DYN160, data will carry the following structure */
+typedef struct {
+	uint16 trigger;		/* value 0: MU to SU, 1: SU to MU, 2: metric_dyn160, 3:re-/assoc,
+				 *       4: disassoc, 5: rssi, 6: traffic, 7: interference,
+				 *       8: chanim_stats
+				 */
+	struct ether_addr sta_addr;	/* causal STA's MAC address when known */
+	uint16 metric_160_80;		/* latest dyn160 metric */
+	uint8 nss;		/* NSS of the STA */
+	uint8 bw;		/* BW of the STA */
+	int8 rssi;		/* RSSI of the STA */
+	uint8 traffic;		/* internal metric of traffic */
+} wl_event_mode_switch_dyn160;
+
+#define WL_EVENT_FBT_VER_1		1
+
+#define WL_E_FBT_TYPE_FBT_OTD_AUTH	1
+#define WL_E_FBT_TYPE_FBT_OTA_AUTH	2
+
+/* event structure for WLC_E_FBT */
+typedef struct {
+	uint16 version;
+	uint16 length;	/* size including 'data' field */
+	uint16 type; /* value 0: unknown, 1: FBT OTD Auth Req */
+	uint16 data_offset;	/* offset to 'data' from beginning of this struct.
+				 * fields may be added between data_offset and data
+				 */
+	/* ADD NEW FIELDS HERE */
+	uint8 data[];	/* type specific data; could be empty */
+} wl_event_fbt_t;
+
+/* TWT Setup Completion is designed to notify the user of TWT Setup process
+ * status. When 'status' field is value of BCME_OK, the user must check the
+ * 'setup_cmd' field value in 'wl_twt_sdesc_t' structure that at the end of
+ * the event data to see the response from the TWT Responding STA; when
+ * 'status' field is value of BCME_ERROR or non BCME_OK, user must not use
+ * anything from 'wl_twt_sdesc_t' structure as it is the TWT Requesting STA's
+ * own TWT parameter.
+ */
+
+#define WL_TWT_SETUP_CPLT_VER	0
+
+/* TWT Setup Completion event data */
+typedef struct wl_twt_setup_cplt {
+	uint16 version;
+	uint16 length;	/* the byte count of fields from 'dialog' onwards */
+	uint8 dialog;	/* the dialog token user supplied to the TWT setup API */
+	uint8 pad[3];
+	int32 status;
+	/* wl_twt_sdesc_t desc; - defined in wlioctl.h */
+} wl_twt_setup_cplt_t;
+
+#define WL_INVALID_IE_EVENT_VERSION	0
+
+/* Invalid IE Event data */
+typedef struct wl_invalid_ie_event {
+	uint16 version;
+	uint16 len;      /* Length of the invalid IE copy */
+	uint16 type;     /* Type/subtype of the frame which contains the invalid IE */
+	uint16 error;    /* error code of the wrong IE, defined in ie_error_code_t */
+	uint8  ie[];     /* Variable length buffer for the invalid IE copy */
+} wl_invalid_ie_event_t;
+
+/* Fixed header portion of Invalid IE Event */
+typedef struct wl_invalid_ie_event_hdr {
+	uint16 version;
+	uint16 len;      /* Length of the invalid IE copy */
+	uint16 type;     /* Type/subtype of the frame which contains the invalid IE */
+	uint16 error;    /* error code of the wrong IE, defined in ie_error_code_t */
+	/* var length IE data follows */
+} wl_invalid_ie_event_hdr_t;
+
+typedef enum ie_error_code {
+	IE_ERROR_OUT_OF_RANGE = 0x01
+} ie_error_code_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* reason of channel switch */
+typedef enum {
+	CHANSW_DFS = 10,	/* channel switch due to DFS module */
+	CHANSW_HOMECH_REQ = 14, /* channel switch due to HOME Channel Request */
+	CHANSW_STA = 15,	/* channel switch due to STA */
+	CHANSW_SOFTAP = 16,	/* channel switch due to SodtAP */
+	CHANSW_AIBSS = 17,	/* channel switch due to AIBSS */
+	CHANSW_NAN = 18,	/* channel switch due to NAN */
+	CHANSW_NAN_DISC = 19,	/* channel switch due to NAN Disc */
+	CHANSW_NAN_SCHED = 20,	/* channel switch due to NAN Sched */
+	CHANSW_AWDL_AW = 21,	/* channel switch due to AWDL aw */
+	CHANSW_AWDL_SYNC = 22,	/* channel switch due to AWDL sync */
+	CHANSW_AWDL_CAL = 23,	/* channel switch due to AWDL Cal */
+	CHANSW_AWDL_PSF = 24,	/* channel switch due to AWDL PSF */
+	CHANSW_AWDL_OOB_AF = 25, /* channel switch due to AWDL OOB action frame */
+	CHANSW_TDLS = 26,	/* channel switch due to TDLS */
+	CHANSW_PROXD = 27,	/* channel switch due to PROXD */
+	CHANSW_MAX_NUMBER = 28	/* max channel switch reason */
+} wl_chansw_reason_t;
+
+#define CHANSW_REASON(reason)	(1 << reason)
+
+#define EVENT_AGGR_DATA_HDR_LEN	8
+
+typedef struct event_aggr_data {
+	uint16  num_events; /* No of events aggregated */
+	uint16	len;	/* length of the aggregated events, excludes padding */
+	uint8	pad[4]; /* Padding to make aggr event packet header aligned
+					 * on 64-bit boundary, for a 64-bit host system.
+					 */
+	uint8	data[];	/* Aggregate buffer containing Events */
+} event_aggr_data_t;
+
+
+/* WLC_E_TVPM_MITIGATION event structure version */
+#define WL_TVPM_MITIGATION_VERSION 1
+
+/* TVPM mitigation on/off status bits */
+#define WL_TVPM_MITIGATION_TXDC		0x1
+#define WL_TVPM_MITIGATION_TXPOWER	0x2
+#define WL_TVPM_MITIGATION_TXCHAINS	0x4
+
+/* Event structure for WLC_E_TVPM_MITIGATION */
+typedef struct wl_event_tvpm_mitigation {
+	uint16 version;		/* structure version */
+	uint16 length;		/* length of this structure */
+	uint32 timestamp_ms;	/* millisecond timestamp */
+	uint8 slice;		/* slice number */
+	uint8 pad;
+	uint16 on_off;		/* mitigation status bits */
+} wl_event_tvpm_mitigation_t;
+
+#endif /* _BCMEVENT_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmip.h b/drivers/net/wireless/bcmdhd/include/bcmip.h
new file mode 100644
index 0000000..e5bb0ac
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmip.h
@@ -0,0 +1,249 @@
+/*
+ * Fundamental constants relating to IP Protocol
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmip.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* IPV4 and IPV6 common */
+#define IP_VER_OFFSET		0x0	/* offset to version field */
+#define IP_VER_MASK		0xf0	/* version mask */
+#define IP_VER_SHIFT		4	/* version shift */
+#define IP_VER_4		4	/* version number for IPV4 */
+#define IP_VER_6		6	/* version number for IPV6 */
+
+#define IP_VER(ip_body) \
+	((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP		0x1	/* ICMP protocol */
+#define IP_PROT_IGMP		0x2	/* IGMP protocol */
+#define IP_PROT_TCP		0x6	/* TCP protocol */
+#define IP_PROT_UDP		0x11	/* UDP protocol type */
+#define IP_PROT_GRE		0x2f	/* GRE protocol type */
+#define IP_PROT_ICMP6           0x3a    /* ICMPv6 protocol type */
+
+/* IPV4 field offsets */
+#define IPV4_VER_HL_OFFSET      0       /* version and ihl byte offset */
+#define IPV4_TOS_OFFSET         1       /* type of service offset */
+#define IPV4_PKTLEN_OFFSET      2       /* packet length offset */
+#define IPV4_PKTFLAG_OFFSET     6       /* more-frag,dont-frag flag offset */
+#define IPV4_PROT_OFFSET        9       /* protocol type offset */
+#define IPV4_CHKSUM_OFFSET      10      /* IP header checksum offset */
+#define IPV4_SRC_IP_OFFSET      12      /* src IP addr offset */
+#define IPV4_DEST_IP_OFFSET     16      /* dest IP addr offset */
+#define IPV4_OPTIONS_OFFSET     20      /* IP options offset */
+#define IPV4_MIN_HEADER_LEN     20      /* Minimum size for an IP header (no options) */
+
+/* IPV4 field decodes */
+#define IPV4_VER_MASK		0xf0	/* IPV4 version mask */
+#define IPV4_VER_SHIFT		4	/* IPV4 version shift */
+
+#define IPV4_HLEN_MASK		0x0f	/* IPV4 header length mask */
+#define IPV4_HLEN(ipv4_body)	(4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN		4	/* IPV4 address length */
+
+#define IPV4_ADDR_NULL(a)	((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+				  ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a)	((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+				  ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define	IPV4_TOS_DSCP_MASK	0xfc	/* DiffServ codepoint mask */
+#define	IPV4_TOS_DSCP_SHIFT	2	/* DiffServ codepoint shift */
+
+#define	IPV4_TOS(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define	IPV4_TOS_PREC_MASK	0xe0	/* Historical precedence mask */
+#define	IPV4_TOS_PREC_SHIFT	5	/* Historical precedence shift */
+
+#define IPV4_TOS_LOWDELAY	0x10	/* Lowest delay requested */
+#define IPV4_TOS_THROUGHPUT	0x8	/* Best throughput requested */
+#define IPV4_TOS_RELIABILITY	0x4	/* Most reliable delivery requested */
+
+#define IPV4_TOS_ROUTINE        0
+#define IPV4_TOS_PRIORITY       1
+#define IPV4_TOS_IMMEDIATE      2
+#define IPV4_TOS_FLASH          3
+#define IPV4_TOS_FLASHOVERRIDE  4
+#define IPV4_TOS_CRITICAL       5
+#define IPV4_TOS_INETWORK_CTRL  6
+#define IPV4_TOS_NETWORK_CTRL   7
+
+#define IPV4_PROT(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV		0x8000	/* Reserved */
+#define IPV4_FRAG_DONT		0x4000	/* Don't fragment */
+#define IPV4_FRAG_MORE		0x2000	/* More fragments */
+#define IPV4_FRAG_OFFSET_MASK	0x1fff	/* Fragment offset */
+
+#define IPV4_ADDR_STR_LEN	16	/* Max IP address length in string format */
+
+/* IPV4 packet formats */
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+	uint8	addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+	uint8	version_ihl;		/* Version and Internet Header Length */
+	uint8	tos;			/* Type Of Service */
+	uint16	tot_len;		/* Number of bytes in packet (max 65535) */
+	uint16	id;
+	uint16	frag;			/* 3 flag bits and fragment offset */
+	uint8	ttl;			/* Time To Live */
+	uint8	prot;			/* Protocol */
+	uint16	hdr_chksum;		/* IP header checksum */
+	uint8	src_ip[IPV4_ADDR_LEN];	/* Source IP Address */
+	uint8	dst_ip[IPV4_ADDR_LEN];	/* Destination IP Address */
+} BWL_POST_PACKED_STRUCT;
+
+/* IPV6 field offsets */
+#define IPV6_PAYLOAD_LEN_OFFSET	4	/* payload length offset */
+#define IPV6_NEXT_HDR_OFFSET	6	/* next header/protocol offset */
+#define IPV6_HOP_LIMIT_OFFSET	7	/* hop limit offset */
+#define IPV6_SRC_IP_OFFSET	8	/* src IP addr offset */
+#define IPV6_DEST_IP_OFFSET	24	/* dst IP addr offset */
+
+/* IPV6 field decodes */
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+	 ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+	 (((uint8 *)(ipv6_body))[2] << 8) | \
+	 (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+	((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+	 ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+	(((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body)	IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN		16	/* IPV6 address length */
+
+/* IPV4 TOS or IPV6 Traffic Classifier or 0 */
+#define IP_TOS46(ip_body) \
+	(IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+	 IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT);
+
+/* IPV4 or IPV6 Protocol Classifier or 0 */
+#define IP_PROT46(ip_body) \
+	(IP_VER(ip_body) == IP_VER_4 ? IPV4_PROT(ip_body) : \
+	 IP_VER(ip_body) == IP_VER_6 ? IPV6_PROT(ip_body) : 0)
+
+/* IPV6 extension headers (options) */
+#define IPV6_EXTHDR_HOP		0
+#define IPV6_EXTHDR_ROUTING	43
+#define IPV6_EXTHDR_FRAGMENT	44
+#define IPV6_EXTHDR_AUTH	51
+#define IPV6_EXTHDR_NONE	59
+#define IPV6_EXTHDR_DEST	60
+
+#define IPV6_EXTHDR(prot)	(((prot) == IPV6_EXTHDR_HOP) || \
+	                         ((prot) == IPV6_EXTHDR_ROUTING) || \
+	                         ((prot) == IPV6_EXTHDR_FRAGMENT) || \
+	                         ((prot) == IPV6_EXTHDR_AUTH) || \
+	                         ((prot) == IPV6_EXTHDR_NONE) || \
+	                         ((prot) == IPV6_EXTHDR_DEST))
+
+#define IPV6_MIN_HLEN 		40
+
+#define IPV6_EXTHDR_LEN(eh)	((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3)
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr {
+	uint8	nexthdr;
+	uint8	hdrlen;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag {
+	uint8	nexthdr;
+	uint8	rsvd;
+	uint16	frag_off;
+	uint32	ident;
+} BWL_POST_PACKED_STRUCT;
+
+static INLINE int32
+ipv6_exthdr_len(uint8 *h, uint8 *proto)
+{
+	uint16 len = 0, hlen;
+	struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h;
+
+	while (IPV6_EXTHDR(eh->nexthdr)) {
+		if (eh->nexthdr == IPV6_EXTHDR_NONE)
+			return -1;
+		else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT)
+			hlen = 8;
+		else if (eh->nexthdr == IPV6_EXTHDR_AUTH)
+			hlen = (eh->hdrlen + 2) << 2;
+		else
+			hlen = IPV6_EXTHDR_LEN(eh);
+
+		len += hlen;
+		eh = (struct ipv6_exthdr *)(h + len);
+	}
+
+	*proto = eh->nexthdr;
+	return len;
+}
+
+#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000)
+
+#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \
+{ \
+	ether[0] = 0x01; \
+	ether[1] = 0x00; \
+	ether[2] = 0x5E; \
+	ether[3] = (ipv4 & 0x7f0000) >> 16; \
+	ether[4] = (ipv4 & 0xff00) >> 8; \
+	ether[5] = (ipv4 & 0xff); \
+}
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define IPV4_ADDR_STR "%d.%d.%d.%d"
+#define IPV4_ADDR_TO_STR(addr)	((uint32)addr & 0xff000000) >> 24, \
+								((uint32)addr & 0x00ff0000) >> 16, \
+								((uint32)addr & 0x0000ff00) >> 8, \
+								((uint32)addr & 0x000000ff)
+
+#endif	/* _bcmip_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmipv6.h b/drivers/net/wireless/bcmdhd/include/bcmipv6.h
new file mode 100644
index 0000000..84e2b69
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmipv6.h
@@ -0,0 +1,163 @@
+/*
+ * Fundamental constants relating to Neighbor Discovery Protocol
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmipv6.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _bcmipv6_h_
+#define _bcmipv6_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Extension headers */
+#define IPV6_EXT_HOP	0
+#define IPV6_EXT_ROUTE	43
+#define IPV6_EXT_FRAG	44
+#define IPV6_EXT_DEST	60
+#define IPV6_EXT_ESEC	50
+#define IPV6_EXT_AUTH	51
+
+/* Minimum size (extension header "word" length) */
+#define IPV6_EXT_WORD	8
+
+/* Offsets for most extension headers */
+#define IPV6_EXT_NEXTHDR	0
+#define IPV6_EXT_HDRLEN		1
+
+/* Constants specific to fragmentation header */
+#define IPV6_FRAG_MORE_MASK	0x0001
+#define IPV6_FRAG_MORE_SHIFT	0
+#define IPV6_FRAG_OFFS_MASK	0xfff8
+#define IPV6_FRAG_OFFS_SHIFT	3
+
+/* For icmpv6 */
+#define ICMPV6_HEADER_TYPE	0x3A
+#define ICMPV6_PKT_TYPE_RA	134
+#define ICMPV6_PKT_TYPE_NS	135
+#define ICMPV6_PKT_TYPE_NA	136
+
+#define ICMPV6_ND_OPT_TYPE_TARGET_MAC	2
+#define ICMPV6_ND_OPT_TYPE_SRC_MAC		1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR		1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR		1
+
+#define IPV6_VERSION 	6
+#define IPV6_HOP_LIMIT 	255
+
+#define IPV6_ADDR_NULL(a)	((a[0] | a[1] | a[2] | a[3] | a[4] | \
+							 a[5] | a[6] | a[7] | a[8] | a[9] | \
+							 a[10] | a[11] | a[12] | a[13] | \
+							 a[14] | a[15]) == 0)
+
+#define IPV6_ADDR_LOCAL(a)	(((a[0] == 0xfe) && (a[1] & 0x80))? TRUE: FALSE)
+
+/* IPV6 address */
+BWL_PRE_PACKED_STRUCT struct ipv6_addr {
+		uint8		addr[16];
+} BWL_POST_PACKED_STRUCT;
+
+
+/* ICMPV6 Header */
+BWL_PRE_PACKED_STRUCT struct icmp6_hdr {
+	uint8	icmp6_type;
+	uint8	icmp6_code;
+	uint16	icmp6_cksum;
+	BWL_PRE_PACKED_STRUCT union {
+		uint32 reserved;
+		BWL_PRE_PACKED_STRUCT struct nd_advt {
+			uint32	reserved1:5,
+				override:1,
+				solicited:1,
+				router:1,
+				reserved2:24;
+		} BWL_POST_PACKED_STRUCT nd_advt;
+	} BWL_POST_PACKED_STRUCT opt;
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Header Format */
+BWL_PRE_PACKED_STRUCT struct ipv6_hdr {
+	uint8	priority:4,
+		version:4;
+	uint8	flow_lbl[3];
+	uint16	payload_len;
+	uint8	nexthdr;
+	uint8 	hop_limit;
+	struct	ipv6_addr	saddr;
+	struct	ipv6_addr	daddr;
+} BWL_POST_PACKED_STRUCT;
+
+/* Neighbor Advertisement/Solicitation Packet Structure */
+BWL_PRE_PACKED_STRUCT struct bcm_nd_msg {
+	struct	icmp6_hdr	icmph;
+	struct	ipv6_addr	target;
+} BWL_POST_PACKED_STRUCT;
+
+
+/* Neighibor Solicitation/Advertisement Optional Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg_opt {
+	uint8 type;
+	uint8 len;
+	uint8 mac_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Fragmentation Header */
+BWL_PRE_PACKED_STRUCT struct ipv6_frag {
+	uint8	nexthdr;
+	uint8	reserved;
+	uint16	frag_offset;
+	uint32	ident;
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+static const struct ipv6_addr all_node_ipv6_maddr = {
+									{ 0xff, 0x2, 0, 0,
+									0, 0, 0, 0,
+									0, 0, 0, 0,
+									0, 0, 0, 1
+									}};
+
+#define IPV6_ISMULTI(a) (a[0] == 0xff)
+
+#define IPV6_MCAST_TO_ETHER_MCAST(ipv6, ether) \
+{ \
+	ether[0] = 0x33; \
+	ether[1] = 0x33; \
+	ether[2] = ipv6[12]; \
+	ether[3] = ipv6[13]; \
+	ether[4] = ipv6[14]; \
+	ether[5] = ipv6[15]; \
+}
+
+#endif	/* !defined(_bcmipv6_h_) */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h
new file mode 100644
index 0000000..08d6693
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h
@@ -0,0 +1,1202 @@
+/*
+ * MSGBUF network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmmsgbuf.h 676811 2016-12-24 20:48:46Z $
+ */
+#ifndef _bcmmsgbuf_h_
+#define	_bcmmsgbuf_h_
+
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <bcmpcie.h>
+
+#define MSGBUF_MAX_MSG_SIZE   ETHER_MAX_LEN
+
+#define D2H_EPOCH_MODULO		253 /* sequence number wrap */
+#define D2H_EPOCH_INIT_VAL		(D2H_EPOCH_MODULO + 1)
+
+#define H2D_EPOCH_MODULO		253 /* sequence number wrap */
+#define H2D_EPOCH_INIT_VAL		(H2D_EPOCH_MODULO + 1)
+
+#define H2DRING_TXPOST_ITEMSIZE		48
+#define H2DRING_RXPOST_ITEMSIZE		32
+#define H2DRING_CTRL_SUB_ITEMSIZE	40
+
+#define D2HRING_TXCMPLT_ITEMSIZE	24
+#define D2HRING_RXCMPLT_ITEMSIZE	40
+
+#define D2HRING_TXCMPLT_ITEMSIZE_PREREV7	16
+#define D2HRING_RXCMPLT_ITEMSIZE_PREREV7	32
+
+#define D2HRING_CTRL_CMPLT_ITEMSIZE	24
+#define H2DRING_INFO_BUFPOST_ITEMSIZE	H2DRING_CTRL_SUB_ITEMSIZE
+#define D2HRING_INFO_BUFCMPLT_ITEMSIZE	D2HRING_CTRL_CMPLT_ITEMSIZE
+
+#define H2DRING_TXPOST_MAX_ITEM			512
+#define H2DRING_RXPOST_MAX_ITEM			512
+#define H2DRING_CTRL_SUB_MAX_ITEM		64
+#define D2HRING_TXCMPLT_MAX_ITEM		1024
+#define D2HRING_RXCMPLT_MAX_ITEM		512
+
+#define H2DRING_DYNAMIC_INFO_MAX_ITEM          32
+#define D2HRING_DYNAMIC_INFO_MAX_ITEM          32
+
+#define D2HRING_CTRL_CMPLT_MAX_ITEM		64
+
+enum {
+	DNGL_TO_HOST_MSGBUF,
+	HOST_TO_DNGL_MSGBUF
+};
+
+enum {
+	HOST_TO_DNGL_TXP_DATA,
+	HOST_TO_DNGL_RXP_DATA,
+	HOST_TO_DNGL_CTRL,
+	DNGL_TO_HOST_DATA,
+	DNGL_TO_HOST_CTRL
+};
+
+#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE
+#define PCIEDEV_FIRMWARE_TSINFO 0x1
+
+#ifdef PCIE_API_REV1
+
+#define BCMMSGBUF_DUMMY_REF(a, b)	do {BCM_REFERENCE((a));BCM_REFERENCE((b));}  while (0)
+
+#define BCMMSGBUF_API_IFIDX(a)		0
+#define BCMMSGBUF_API_SEQNUM(a)		0
+#define BCMMSGBUF_IOCTL_XTID(a)		0
+#define BCMMSGBUF_IOCTL_PKTID(a)	((a)->cmd_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b)	BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_SET_API_SEQNUM(a, b)	BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b)	(BCMMSGBUF_IOCTL_PKTID(a) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b)	BCMMSGBUF_DUMMY_REF(a, b)
+
+#else /* PCIE_API_REV1 */
+
+#define BCMMSGBUF_API_IFIDX(a)		((a)->if_id)
+#define BCMMSGBUF_IOCTL_PKTID(a)	((a)->pkt_id)
+#define BCMMSGBUF_API_SEQNUM(a)		((a)->u.seq.seq_no)
+#define BCMMSGBUF_IOCTL_XTID(a)		((a)->xt_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b)	(BCMMSGBUF_API_IFIDX((a)) = (b))
+#define BCMMSGBUF_SET_API_SEQNUM(a, b)	(BCMMSGBUF_API_SEQNUM((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b)	(BCMMSGBUF_IOCTL_PKTID((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b)	(BCMMSGBUF_IOCTL_XTID((a)) = (b))
+
+#endif /* PCIE_API_REV1 */
+
+/* utility data structures */
+
+union addr64 {
+	struct {
+		uint32 low;
+		uint32 high;
+	};
+	struct {
+		uint32 low_addr;
+		uint32 high_addr;
+	};
+	uint64 u64;
+} DECLSPEC_ALIGN(8);
+
+typedef union addr64 bcm_addr64_t;
+
+/* IOCTL req Hdr */
+/* cmn Msg Hdr */
+typedef struct cmn_msg_hdr {
+	/** message type */
+	uint8 msg_type;
+	/** interface index this is valid for */
+	uint8 if_id;
+	/* flags */
+	uint8 flags;
+	/** sequence number */
+	uint8 epoch;
+	/** packet Identifier for the associated host buffer */
+	uint32 request_id;
+} cmn_msg_hdr_t;
+
+/** message type */
+typedef enum bcmpcie_msgtype {
+	MSG_TYPE_GEN_STATUS		= 0x1,
+	MSG_TYPE_RING_STATUS		= 0x2,
+	MSG_TYPE_FLOW_RING_CREATE	= 0x3,
+	MSG_TYPE_FLOW_RING_CREATE_CMPLT	= 0x4,
+	/* Enum value as copied from BISON 7.15: new generic message */
+	MSG_TYPE_RING_CREATE_CMPLT	= 0x4,
+	MSG_TYPE_FLOW_RING_DELETE	= 0x5,
+	MSG_TYPE_FLOW_RING_DELETE_CMPLT	= 0x6,
+	/* Enum value as copied from BISON 7.15: new generic message */
+	MSG_TYPE_RING_DELETE_CMPLT	= 0x6,
+	MSG_TYPE_FLOW_RING_FLUSH	= 0x7,
+	MSG_TYPE_FLOW_RING_FLUSH_CMPLT	= 0x8,
+	MSG_TYPE_IOCTLPTR_REQ		= 0x9,
+	MSG_TYPE_IOCTLPTR_REQ_ACK	= 0xA,
+	MSG_TYPE_IOCTLRESP_BUF_POST	= 0xB,
+	MSG_TYPE_IOCTL_CMPLT		= 0xC,
+	MSG_TYPE_EVENT_BUF_POST		= 0xD,
+	MSG_TYPE_WL_EVENT		= 0xE,
+	MSG_TYPE_TX_POST		= 0xF,
+	MSG_TYPE_TX_STATUS		= 0x10,
+	MSG_TYPE_RXBUF_POST		= 0x11,
+	MSG_TYPE_RX_CMPLT		= 0x12,
+	MSG_TYPE_LPBK_DMAXFER 		= 0x13,
+	MSG_TYPE_LPBK_DMAXFER_CMPLT	= 0x14,
+	MSG_TYPE_FLOW_RING_RESUME	 = 0x15,
+	MSG_TYPE_FLOW_RING_RESUME_CMPLT	= 0x16,
+	MSG_TYPE_FLOW_RING_SUSPEND	= 0x17,
+	MSG_TYPE_FLOW_RING_SUSPEND_CMPLT	= 0x18,
+	MSG_TYPE_INFO_BUF_POST		= 0x19,
+	MSG_TYPE_INFO_BUF_CMPLT		= 0x1A,
+	MSG_TYPE_H2D_RING_CREATE	= 0x1B,
+	MSG_TYPE_D2H_RING_CREATE	= 0x1C,
+	MSG_TYPE_H2D_RING_CREATE_CMPLT	= 0x1D,
+	MSG_TYPE_D2H_RING_CREATE_CMPLT	= 0x1E,
+	MSG_TYPE_H2D_RING_CONFIG	= 0x1F,
+	MSG_TYPE_D2H_RING_CONFIG	= 0x20,
+	MSG_TYPE_H2D_RING_CONFIG_CMPLT	= 0x21,
+	MSG_TYPE_D2H_RING_CONFIG_CMPLT	= 0x22,
+	MSG_TYPE_H2D_MAILBOX_DATA	= 0x23,
+	MSG_TYPE_D2H_MAILBOX_DATA	= 0x24,
+	MSG_TYPE_TIMSTAMP_BUFPOST	= 0x25,
+	MSG_TYPE_HOSTTIMSTAMP		= 0x26,
+	MSG_TYPE_HOSTTIMSTAMP_CMPLT	= 0x27,
+	MSG_TYPE_FIRMWARE_TIMESTAMP	= 0x28,
+	MSG_TYPE_API_MAX_RSVD		= 0x3F
+} bcmpcie_msg_type_t;
+
+typedef enum bcmpcie_msgtype_int {
+	MSG_TYPE_INTERNAL_USE_START	= 0x40,
+	MSG_TYPE_EVENT_PYLD		= 0x41,
+	MSG_TYPE_IOCT_PYLD		= 0x42,
+	MSG_TYPE_RX_PYLD		= 0x43,
+	MSG_TYPE_HOST_FETCH		= 0x44,
+	MSG_TYPE_LPBK_DMAXFER_PYLD	= 0x45,
+	MSG_TYPE_TXMETADATA_PYLD	= 0x46,
+	MSG_TYPE_INDX_UPDATE		= 0x47,
+	MSG_TYPE_INFO_PYLD		= 0x48,
+	MSG_TYPE_TS_EVENT_PYLD		= 0x49
+} bcmpcie_msgtype_int_t;
+
+typedef enum bcmpcie_msgtype_u {
+	MSG_TYPE_TX_BATCH_POST		= 0x80,
+	MSG_TYPE_IOCTL_REQ		= 0x81,
+	MSG_TYPE_HOST_EVNT		= 0x82, /* console related */
+	MSG_TYPE_LOOPBACK		= 0x83
+} bcmpcie_msgtype_u_t;
+
+/**
+ * D2H ring host wakeup soft doorbell, override the PCIE doorbell.
+ * Host configures an <32bit address,value> tuple, and dongle uses SBTOPCIE
+ * Transl0 to write specified value to host address.
+ *
+ * Use case: 32bit Address mapped to HW Accelerator Core/Thread Wakeup Register
+ * and value is Core/Thread context. Host will ensure routing the 32bit address
+ * offerred to PCIE to the mapped register.
+ *
+ * D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL
+ */
+typedef struct bcmpcie_soft_doorbell {
+	uint32	value;  /* host defined value to be written, eg HW threadid */
+	bcm_addr64_t haddr; /* host address, eg thread wakeup register address */
+	uint16	items;  /* interrupt coalescing: item count before wakeup */
+	uint16	msecs;  /* interrupt coalescing: timeout in millisecs */
+} bcmpcie_soft_doorbell_t;
+
+/**
+ * D2H interrupt using MSI instead of INTX
+ * Host configures MSI vector offset for each D2H interrupt
+ *
+ * D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL
+ */
+typedef enum bcmpcie_msi_intr_idx {
+	MSI_INTR_IDX_CTRL_CMPL_RING,
+	MSI_INTR_IDX_TXP_CMPL_RING,
+	MSI_INTR_IDX_RXP_CMPL_RING,
+	MSI_INTR_IDX_MAILBOX,
+	MSI_INTR_IDX_MAX
+} bcmpcie_msi_intr_idx_t;
+
+typedef enum bcmpcie_msi_offset_type {
+	BCMPCIE_D2H_MSI_OFFSET_MB0 = 2,
+	BCMPCIE_D2H_MSI_OFFSET_MB1,
+	BCMPCIE_D2H_MSI_OFFSET_DB0,
+	BCMPCIE_D2H_MSI_OFFSET_DB1,
+	BCMPCIE_D2H_MSI_OFFSET_MAX
+} bcmpcie_msi_offset_type_t;
+
+typedef struct bcmpcie_msi_offset {
+	uint16	intr_idx;    /* interrupt index */
+	uint16	msi_offset;  /* msi vector offset */
+} bcmpcie_msi_offset_t;
+
+typedef struct bcmpcie_msi_offset_config {
+	uint32	len;
+	bcmpcie_msi_offset_t	bcmpcie_msi_offset[MSI_INTR_IDX_MAX];
+} bcmpcie_msi_offset_config_t;
+
+#define BCMPCIE_D2H_MSI_OFFSET_DEFAULT	BCMPCIE_D2H_MSI_OFFSET_DB1
+
+
+/* if_id */
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT	5
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX	0x7
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MASK	\
+	(BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_SHFT	0
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MAX	0x1F
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MASK	\
+	(BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+
+/* flags */
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX		0x1
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR	0x2
+#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT		0x80
+#define BCMPCIE_CMNHDR_PHASE_BIT_INIT		0x80
+
+/* IOCTL request message */
+typedef struct ioctl_req_msg {
+	/** common message header */
+	cmn_msg_hdr_t 	cmn_hdr;
+	/** ioctl command type */
+	uint32		cmd;
+	/** ioctl transaction ID, to pair with a ioctl response */
+	uint16		trans_id;
+	/** input arguments buffer len */
+	uint16		input_buf_len;
+	/** expected output len */
+	uint16		output_buf_len;
+	/** to align the host address on 8 byte boundary */
+	uint16		rsvd[3];
+	/** always align on 8 byte boundary */
+	bcm_addr64_t	host_input_buf_addr;
+	/* rsvd */
+	uint32		rsvd1[2];
+} ioctl_req_msg_t;
+
+/** buffer post messages for device to use to return IOCTL responses, Events */
+typedef struct ioctl_resp_evt_buf_post_msg {
+	/** common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+	/** length of the host buffer supplied */
+	uint16		host_buf_len;
+	/** to align the host address on 8 byte boundary */
+	uint16		reserved[3];
+	/** always align on 8 byte boundary */
+	bcm_addr64_t	host_buf_addr;
+	uint32		rsvd[4];
+} ioctl_resp_evt_buf_post_msg_t;
+
+/* buffer post messages for device to use to return dbg buffers */
+typedef ioctl_resp_evt_buf_post_msg_t info_buf_post_msg_t;
+
+
+/* An infobuf host buffer starts with a 32 bit (LE) version. */
+#define PCIE_INFOBUF_V1                1
+/* Infobuf v1 type MSGTRACE's data is exactly the same as the MSGTRACE data that
+ * is wrapped previously/also in a WLC_E_TRACE event.  See structure
+ * msgrace_hdr_t in msgtrace.h.
+*/
+#define PCIE_INFOBUF_V1_TYPE_MSGTRACE  1
+
+/* Infobuf v1 type LOGTRACE data is exactly the same as the LOGTRACE data that
+ * is wrapped previously/also in a WLC_E_TRACE event.  See structure
+ * msgrace_hdr_t in msgtrace.h.  (The only difference between a MSGTRACE
+ * and a LOGTRACE is the "trace type" field.)
+*/
+#define PCIE_INFOBUF_V1_TYPE_LOGTRACE  2
+
+/* An infobuf version 1 host buffer has a single TLV.  The information on the
+ * version 1 types follow this structure definition. (int's LE)
+*/
+typedef struct info_buf_payload_hdr_s {
+	uint16 type;
+	uint16 length;
+} info_buf_payload_hdr_t;
+
+#define PCIE_DMA_XFER_FLG_D11_LPBK_MASK	0x00000001
+#define PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT	0
+
+typedef struct pcie_dma_xfer_params {
+	/** common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+
+	/** always align on 8 byte boundary */
+	bcm_addr64_t	host_input_buf_addr;
+
+	/** always align on 8 byte boundary */
+	bcm_addr64_t	host_ouput_buf_addr;
+
+	/** length of transfer */
+	uint32		xfer_len;
+	/** delay before doing the src txfer */
+	uint32		srcdelay;
+	/** delay before doing the dest txfer */
+	uint32		destdelay;
+	uint8		rsvd[3];
+	uint8		flags;
+} pcie_dma_xfer_params_t;
+
+/** Complete msgbuf hdr for flow ring update from host to dongle */
+typedef struct tx_flowring_create_request {
+	cmn_msg_hdr_t   msg;
+	uint8	da[ETHER_ADDR_LEN];
+	uint8	sa[ETHER_ADDR_LEN];
+	uint8	tid;
+	uint8 	if_flags;
+	uint16	flow_ring_id;
+	uint8 	tc;
+	/* priority_ifrmmask is to define core mask in ifrm mode.
+	 * currently it is not used for priority. so uses solely for ifrm mask
+	 */
+	uint8	priority_ifrmmask;
+	uint16 	int_vector;
+	uint16	max_items;
+	uint16	len_item;
+	bcm_addr64_t flow_ring_ptr;
+} tx_flowring_create_request_t;
+
+typedef struct tx_flowring_delete_request {
+	cmn_msg_hdr_t   msg;
+	uint16	flow_ring_id;
+	uint16 	reason;
+	uint32	rsvd[7];
+} tx_flowring_delete_request_t;
+
+typedef struct tx_flowring_flush_request {
+	cmn_msg_hdr_t   msg;
+	uint16	flow_ring_id;
+	uint16 	reason;
+	uint32	rsvd[7];
+} tx_flowring_flush_request_t;
+
+/** Subtypes for ring_config_req control message */
+typedef enum ring_config_subtype {
+	/** Default D2H PCIE doorbell override using ring_config_req msg */
+	D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL = 1, /* Software doorbell */
+	D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL = 2   /* MSI configuration */
+} ring_config_subtype_t;
+
+typedef struct ring_config_req {
+	cmn_msg_hdr_t	msg;
+	uint16	subtype;
+	uint16	ring_id;
+	uint32	rsvd;
+	union {
+		uint32  data[6];
+		/** D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL */
+		bcmpcie_soft_doorbell_t soft_doorbell;
+		/** D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL */
+		bcmpcie_msi_offset_config_t msi_offset;
+	};
+} ring_config_req_t;
+
+/* data structure to use to create on the fly d2h rings */
+typedef struct d2h_ring_create_req {
+	cmn_msg_hdr_t	msg;
+	uint16	ring_id;
+	uint16	ring_type;
+	uint32	flags;
+	bcm_addr64_t	ring_ptr;
+	uint16	max_items;
+	uint16	len_item;
+	uint32	rsvd[3];
+} d2h_ring_create_req_t;
+
+/* data structure to use to create on the fly h2d rings */
+#define MAX_COMPLETION_RING_IDS_ASSOCIATED	4
+typedef struct h2d_ring_create_req {
+	cmn_msg_hdr_t	msg;
+	uint16	ring_id;
+	uint8	ring_type;
+	uint8	n_completion_ids;
+	uint32	flags;
+	bcm_addr64_t	ring_ptr;
+	uint16	max_items;
+	uint16	len_item;
+	uint16	completion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
+	uint32	rsvd;
+} h2d_ring_create_req_t;
+
+typedef struct d2h_ring_config_req {
+	cmn_msg_hdr_t   msg;
+	uint16	d2h_ring_config_subtype;
+	uint16	d2h_ring_id;
+	uint32  d2h_ring_config_data[4];
+	uint32  rsvd[3];
+} d2h_ring_config_req_t;
+
+typedef struct h2d_ring_config_req {
+	cmn_msg_hdr_t   msg;
+	uint16	h2d_ring_config_subtype;
+	uint16	h2d_ring_id;
+	uint32  h2d_ring_config_data;
+	uint32  rsvd[6];
+} h2d_ring_config_req_t;
+
+typedef struct h2d_mailbox_data {
+	cmn_msg_hdr_t   msg;
+	uint32	mail_box_data;
+	uint32  rsvd[7];
+} h2d_mailbox_data_t;
+typedef struct host_timestamp_msg {
+	cmn_msg_hdr_t	msg;
+	uint16		xt_id; /* transaction ID */
+	uint16		input_data_len; /* data len at the host_buf_addr, data in TLVs */
+	uint16		seqnum; /* number of times host captured the timestamp */
+	uint16		rsvd;
+	/* always align on 8 byte boundary */
+	bcm_addr64_t	host_buf_addr;
+	/* rsvd */
+	uint32      rsvd1[4];
+} host_timestamp_msg_t;
+
+/* buffer post message for timestamp events MSG_TYPE_TIMSTAMP_BUFPOST */
+typedef ioctl_resp_evt_buf_post_msg_t ts_buf_post_msg_t;
+
+typedef union ctrl_submit_item {
+	ioctl_req_msg_t			ioctl_req;
+	ioctl_resp_evt_buf_post_msg_t	resp_buf_post;
+	pcie_dma_xfer_params_t		dma_xfer;
+	tx_flowring_create_request_t	flow_create;
+	tx_flowring_delete_request_t	flow_delete;
+	tx_flowring_flush_request_t	flow_flush;
+	ring_config_req_t		ring_config_req;
+	d2h_ring_create_req_t		d2h_create;
+	h2d_ring_create_req_t		h2d_create;
+	d2h_ring_config_req_t		d2h_config;
+	h2d_ring_config_req_t		h2d_config;
+	h2d_mailbox_data_t		h2d_mailbox_data;
+	host_timestamp_msg_t		host_ts;
+	ts_buf_post_msg_t		ts_buf_post;
+	unsigned char			check[H2DRING_CTRL_SUB_ITEMSIZE];
+} ctrl_submit_item_t;
+
+typedef struct info_ring_submit_item {
+	info_buf_post_msg_t		info_buf_post;
+	unsigned char			check[H2DRING_INFO_BUFPOST_ITEMSIZE];
+} info_sumbit_item_t;
+
+/** Control Completion messages (20 bytes) */
+typedef struct compl_msg_hdr {
+	/** status for the completion */
+	int16	status;
+	/** submisison flow ring id which generated this status */
+	union {
+	    uint16	ring_id;
+	    uint16	flow_ring_id;
+	};
+} compl_msg_hdr_t;
+
+/** XOR checksum or a magic number to audit DMA done */
+typedef uint32 dma_done_t;
+
+#define MAX_CLKSRC_ID	0xF
+
+typedef struct ts_timestamp_srcid {
+	union {
+		uint32	ts_low; /* time stamp low 32 bits */
+		uint32	reserved; /* If timestamp not used */
+	};
+	union {
+		uint32  ts_high; /* time stamp high 28 bits */
+		union {
+			uint32  ts_high_ext :28; /* time stamp high 28 bits */
+			uint32  clk_id_ext :3; /* clock ID source  */
+			uint32  phase :1; /* Phase bit */
+			dma_done_t	marker_ext;
+		};
+	};
+} ts_timestamp_srcid_t;
+
+typedef ts_timestamp_srcid_t ipc_timestamp_t;
+
+typedef struct ts_timestamp {
+	uint32	low;
+	uint32	high;
+} ts_timestamp_t;
+
+typedef ts_timestamp_t tick_count_64_t;
+typedef ts_timestamp_t ts_timestamp_ns_64_t;
+typedef ts_timestamp_t ts_correction_m_t;
+typedef ts_timestamp_t ts_correction_b_t;
+
+/* completion header status codes */
+#define	BCMPCIE_SUCCESS			0
+#define BCMPCIE_NOTFOUND		1
+#define BCMPCIE_NOMEM			2
+#define BCMPCIE_BADOPTION		3
+#define BCMPCIE_RING_IN_USE		4
+#define BCMPCIE_RING_ID_INVALID		5
+#define BCMPCIE_PKT_FLUSH		6
+#define BCMPCIE_NO_EVENT_BUF		7
+#define BCMPCIE_NO_RX_BUF		8
+#define BCMPCIE_NO_IOCTLRESP_BUF	9
+#define BCMPCIE_MAX_IOCTLRESP_BUF	10
+#define BCMPCIE_MAX_EVENT_BUF		11
+#define BCMPCIE_BAD_PHASE		12
+#define BCMPCIE_INVALID_CPL_RINGID	13
+#define BCMPCIE_RING_TYPE_INVALID	14
+#define BCMPCIE_NO_TS_EVENT_BUF		15
+#define BCMPCIE_MAX_TS_EVENT_BUF	16
+
+/** IOCTL completion response */
+typedef struct ioctl_compl_resp_msg {
+	/** common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/** response buffer len where a host buffer is involved */
+	uint16			resp_len;
+	/** transaction id to pair with a request */
+	uint16			trans_id;
+	/** cmd id */
+	uint32			cmd;
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} ioctl_comp_resp_msg_t;
+
+/** IOCTL request acknowledgement */
+typedef struct ioctl_req_ack_msg {
+	/** common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t 	compl_hdr;
+	/** cmd id */
+	uint32			cmd;
+	uint32			rsvd;
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} ioctl_req_ack_msg_t;
+
+/** WL event message: send from device to host */
+typedef struct wlevent_req_msg {
+	/** common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/** event data len valid with the event buffer */
+	uint16			event_data_len;
+	/** sequence number */
+	uint16			seqnum;
+	/** rsvd	*/
+	uint32			rsvd;
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} wlevent_req_msg_t;
+
+/** dma xfer complete message */
+typedef struct pcie_dmaxfer_cmplt {
+	/** common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	uint32			rsvd[2];
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} pcie_dmaxfer_cmplt_t;
+
+/** general status message */
+typedef struct pcie_gen_status {
+	/** common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	uint32			rsvd[2];
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} pcie_gen_status_t;
+
+/** ring status message */
+typedef struct pcie_ring_status {
+	/** common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/** message which firmware couldn't decode */
+	uint16			write_idx;
+	uint16			rsvd[3];
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} pcie_ring_status_t;
+
+typedef struct ring_create_response {
+	cmn_msg_hdr_t		cmn_hdr;
+	compl_msg_hdr_t 	cmplt;
+	uint32			rsvd[2];
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} ring_create_response_t;
+
+typedef ring_create_response_t tx_flowring_create_response_t;
+typedef ring_create_response_t h2d_ring_create_response_t;
+typedef ring_create_response_t d2h_ring_create_response_t;
+
+typedef struct tx_flowring_delete_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t 	cmplt;
+	uint32			rsvd[2];
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} tx_flowring_delete_response_t;
+
+typedef struct tx_flowring_flush_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t 	cmplt;
+	uint32			rsvd[2];
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} tx_flowring_flush_response_t;
+
+/** Common layout of all d2h control messages */
+typedef struct ctrl_compl_msg {
+	/** common message header */
+	cmn_msg_hdr_t       cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t     compl_hdr;
+	uint32          rsvd[2];
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t      marker;
+} ctrl_compl_msg_t;
+
+typedef struct ring_config_resp {
+	/** common message header */
+	cmn_msg_hdr_t       cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t     compl_hdr;
+	uint32          rsvd[2];
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t      marker;
+} ring_config_resp_t;
+
+typedef struct d2h_mailbox_data {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t		cmplt;
+	uint32			d2h_mailbox_data;
+	uint32			rsvd[1];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} d2h_mailbox_data_t;
+
+/* dbg buf completion msg: send from device to host */
+typedef struct info_buf_resp {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/* event data len valid with the event buffer */
+	uint16			info_data_len;
+	/* sequence number */
+	uint16			seqnum;
+	/* rsvd	*/
+	uint32			rsvd;
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} info_buf_resp_t;
+
+typedef struct info_ring_cpl_item {
+	info_buf_resp_t		info_buf_post;
+	unsigned char		check[D2HRING_INFO_BUFCMPLT_ITEMSIZE];
+} info_cpl_item_t;
+
+typedef struct host_timestamp_msg_cpl {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t cmplt;
+	uint16			xt_id; /* transaction ID */
+	uint16			rsvd;
+	uint32			rsvd1;
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t      marker;
+} host_timestamp_msg_cpl_t;
+
+typedef struct fw_timestamp_event_msg {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t cmplt;
+	/* fw captures time stamp info and passed that to host in TLVs */
+	uint16			buf_len; /* length of the time stamp data copied in host buf */
+	uint16			seqnum; /* number of times fw captured time stamp */
+	uint32			rsvd;
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} fw_timestamp_event_msg_t;
+
+typedef union ctrl_completion_item {
+	ioctl_comp_resp_msg_t		ioctl_resp;
+	wlevent_req_msg_t		event;
+	ioctl_req_ack_msg_t		ioct_ack;
+	pcie_dmaxfer_cmplt_t		pcie_xfer_cmplt;
+	pcie_gen_status_t		pcie_gen_status;
+	pcie_ring_status_t		pcie_ring_status;
+	tx_flowring_create_response_t	txfl_create_resp;
+	tx_flowring_delete_response_t	txfl_delete_resp;
+	tx_flowring_flush_response_t	txfl_flush_resp;
+	ctrl_compl_msg_t		ctrl_compl;
+	ring_config_resp_t		ring_config_resp;
+	d2h_mailbox_data_t		d2h_mailbox_data;
+	info_buf_resp_t			dbg_resp;
+	h2d_ring_create_response_t	h2d_ring_create_resp;
+	d2h_ring_create_response_t	d2h_ring_create_resp;
+	host_timestamp_msg_cpl_t	host_ts_cpl;
+	fw_timestamp_event_msg_t	fw_ts_event;
+	unsigned char			ctrl_response[D2HRING_CTRL_CMPLT_ITEMSIZE];
+} ctrl_completion_item_t;
+
+/** H2D Rxpost ring work items */
+typedef struct host_rxbuf_post {
+	/** common message header */
+	cmn_msg_hdr_t   cmn_hdr;
+	/** provided meta data buffer len */
+	uint16		metadata_buf_len;
+	/** provided data buffer len to receive data */
+	uint16		data_buf_len;
+	/** alignment to make the host buffers start on 8 byte boundary */
+	uint32		rsvd;
+	/** provided meta data buffer */
+	bcm_addr64_t	metadata_buf_addr;
+	/** provided data buffer to receive data */
+	bcm_addr64_t	data_buf_addr;
+} host_rxbuf_post_t;
+
+typedef union rxbuf_submit_item {
+	host_rxbuf_post_t	rxpost;
+	unsigned char		check[H2DRING_RXPOST_ITEMSIZE];
+} rxbuf_submit_item_t;
+
+/* D2H Rxcompletion ring work items for IPC rev7 */
+typedef struct host_rxbuf_cmpl {
+	/** common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t	compl_hdr;
+	/**  filled up meta data len */
+	uint16		metadata_len;
+	/** filled up buffer len to receive data */
+	uint16		data_len;
+	/** offset in the host rx buffer where the data starts */
+	uint16		data_offset;
+	/** offset in the host rx buffer where the data starts */
+	uint16		flags;
+	/** rx status */
+	uint32		rx_status_0;
+	uint32		rx_status_1;
+	/** XOR checksum or a magic number to audit DMA done */
+	/* This is for rev6 only. For IPC rev7, this is a reserved field */
+	dma_done_t	marker;
+	/* timestamp */
+	ipc_timestamp_t ts;
+} host_rxbuf_cmpl_t;
+
+typedef union rxbuf_complete_item {
+	host_rxbuf_cmpl_t	rxcmpl;
+	unsigned char		check[D2HRING_RXCMPLT_ITEMSIZE];
+} rxbuf_complete_item_t;
+
+
+typedef struct host_txbuf_post {
+	/** common message header */
+	cmn_msg_hdr_t   cmn_hdr;
+	/** eth header */
+	uint8		txhdr[ETHER_HDR_LEN];
+	/** flags */
+	uint8		flags;
+	/** number of segments */
+	uint8		seg_cnt;
+
+	/** provided meta data buffer for txstatus */
+	bcm_addr64_t	metadata_buf_addr;
+	/** provided data buffer to receive data */
+	bcm_addr64_t	data_buf_addr;
+	/** provided meta data buffer len */
+	uint16		metadata_buf_len;
+	/** provided data buffer len to receive data */
+	uint16		data_len;
+	/** XOR checksum or a magic number to audit DMA done */
+	dma_done_t	marker;
+} host_txbuf_post_t;
+
+#define BCMPCIE_PKT_FLAGS_FRAME_802_3	0x01
+#define BCMPCIE_PKT_FLAGS_FRAME_802_11	0x02
+
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK	0x03	/* Exempt uses 2 bits */
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT	0x02	/* needs to be shifted past other bits */
+
+
+#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT		5
+#define BCMPCIE_PKT_FLAGS_PRIO_MASK		(7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT)
+#define BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU	0x00
+#define BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT	0x01
+#define BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT	0x02
+#define BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT	0x03
+#define BCMPCIE_PKT_FLAGS_MONITOR_SHIFT		8
+#define BCMPCIE_PKT_FLAGS_MONITOR_MASK		(3 << BCMPCIE_PKT_FLAGS_MONITOR_SHIFT)
+
+/* These are added to fix up compile issues */
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3	BCMPCIE_PKT_FLAGS_FRAME_802_3
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11	BCMPCIE_PKT_FLAGS_FRAME_802_11
+#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT		BCMPCIE_PKT_FLAGS_PRIO_SHIFT
+#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK		BCMPCIE_PKT_FLAGS_PRIO_MASK
+
+
+/* H2D Txpost ring work items */
+typedef union txbuf_submit_item {
+	host_txbuf_post_t	txpost;
+	unsigned char		check[H2DRING_TXPOST_ITEMSIZE];
+} txbuf_submit_item_t;
+
+/* D2H Txcompletion ring work items - extended for IOC rev7 */
+typedef struct host_txbuf_cmpl {
+	/** common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+	/** completion message header */
+	compl_msg_hdr_t	compl_hdr;
+	union {
+		struct {
+			/** provided meta data len */
+			uint16	metadata_len;
+			/** WLAN side txstatus */
+			uint16	tx_status;
+		};
+		/** XOR checksum or a magic number to audit DMA done */
+		/* This is for rev6 only. For IPC rev7, this is not used */
+		dma_done_t	marker;
+	};
+	/* timestamp */
+	ipc_timestamp_t ts;
+
+} host_txbuf_cmpl_t;
+
+typedef union txbuf_complete_item {
+	host_txbuf_cmpl_t	txcmpl;
+	unsigned char		check[D2HRING_TXCMPLT_ITEMSIZE];
+} txbuf_complete_item_t;
+
+#define BCMPCIE_D2H_METADATA_HDRLEN	4
+#define BCMPCIE_D2H_METADATA_MINLEN	(BCMPCIE_D2H_METADATA_HDRLEN + 4)
+
+/** ret buf struct */
+typedef struct ret_buf_ptr {
+	uint32 low_addr;
+	uint32 high_addr;
+} ret_buf_t;
+
+
+#ifdef PCIE_API_REV1
+
+/* ioctl specific hdr */
+typedef struct ioctl_hdr {
+	uint16 		cmd;
+	uint16		retbuf_len;
+	uint32		cmd_id;
+} ioctl_hdr_t;
+
+typedef struct ioctlptr_hdr {
+	uint16 		cmd;
+	uint16		retbuf_len;
+	uint16 		buflen;
+	uint16		rsvd;
+	uint32		cmd_id;
+} ioctlptr_hdr_t;
+
+#else /* PCIE_API_REV1 */
+
+typedef struct ioctl_req_hdr {
+	uint32		pkt_id;	/**< Packet ID */
+	uint32 		cmd;	/**< IOCTL ID */
+	uint16		retbuf_len;
+	uint16 		buflen;
+	uint16		xt_id;	/**< transaction ID */
+	uint16		rsvd[1];
+} ioctl_req_hdr_t;
+
+#endif /* PCIE_API_REV1 */
+
+
+/** Complete msgbuf hdr for ioctl from host to dongle */
+typedef struct ioct_reqst_hdr {
+	cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+	ioctl_hdr_t ioct_hdr;
+#else
+	ioctl_req_hdr_t ioct_hdr;
+#endif
+	ret_buf_t ret_buf;
+} ioct_reqst_hdr_t;
+
+typedef struct ioctptr_reqst_hdr {
+	cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+	ioctlptr_hdr_t ioct_hdr;
+#else
+	ioctl_req_hdr_t ioct_hdr;
+#endif
+	ret_buf_t ret_buf;
+	ret_buf_t ioct_buf;
+} ioctptr_reqst_hdr_t;
+
+/** ioctl response header */
+typedef struct ioct_resp_hdr {
+	cmn_msg_hdr_t   msg;
+#ifdef PCIE_API_REV1
+	uint32	cmd_id;
+#else
+	uint32	pkt_id;
+#endif
+	uint32	status;
+	uint32	ret_len;
+	uint32  inline_data;
+#ifdef PCIE_API_REV1
+#else
+	uint16	xt_id;	/**< transaction ID */
+	uint16	rsvd[1];
+#endif
+} ioct_resp_hdr_t;
+
+/* ioct resp header used in dongle */
+/* ret buf hdr will be stripped off inside dongle itself */
+typedef struct msgbuf_ioctl_resp {
+	ioct_resp_hdr_t	ioct_hdr;
+	ret_buf_t	ret_buf;	/**< ret buf pointers */
+} msgbuf_ioct_resp_t;
+
+/** WL event hdr info */
+typedef struct wl_event_hdr {
+	cmn_msg_hdr_t   msg;
+	uint16 event;
+	uint8 flags;
+	uint8 rsvd;
+	uint16 retbuf_len;
+	uint16 rsvd1;
+	uint32 rxbufid;
+} wl_event_hdr_t;
+
+#define TXDESCR_FLOWID_PCIELPBK_1	0xFF
+#define TXDESCR_FLOWID_PCIELPBK_2	0xFE
+
+typedef struct txbatch_lenptr_tup {
+	uint32 pktid;
+	uint16 pktlen;
+	uint16 rsvd;
+	ret_buf_t	ret_buf;	/**< ret buf pointers */
+} txbatch_lenptr_tup_t;
+
+typedef struct txbatch_cmn_msghdr {
+	cmn_msg_hdr_t   msg;
+	uint8 priority;
+	uint8 hdrlen;
+	uint8 pktcnt;
+	uint8 flowid;
+	uint8 txhdr[ETHER_HDR_LEN];
+	uint16 rsvd;
+} txbatch_cmn_msghdr_t;
+
+typedef struct txbatch_msghdr {
+	txbatch_cmn_msghdr_t txcmn;
+	txbatch_lenptr_tup_t tx_tup[0]; /**< Based on packet count */
+} txbatch_msghdr_t;
+
+/* TX desc posting header */
+typedef struct tx_lenptr_tup {
+	uint16 pktlen;
+	uint16 rsvd;
+	ret_buf_t	ret_buf;	/**< ret buf pointers */
+} tx_lenptr_tup_t;
+
+typedef struct txdescr_cmn_msghdr {
+	cmn_msg_hdr_t   msg;
+	uint8 priority;
+	uint8 hdrlen;
+	uint8 descrcnt;
+	uint8 flowid;
+	uint32 pktid;
+} txdescr_cmn_msghdr_t;
+
+typedef struct txdescr_msghdr {
+	txdescr_cmn_msghdr_t txcmn;
+	uint8 txhdr[ETHER_HDR_LEN];
+	uint16 rsvd;
+	tx_lenptr_tup_t tx_tup[0];	/**< Based on descriptor count */
+} txdescr_msghdr_t;
+
+/** Tx status header info */
+typedef struct txstatus_hdr {
+	cmn_msg_hdr_t   msg;
+	uint32 pktid;
+} txstatus_hdr_t;
+
+/** RX bufid-len-ptr tuple */
+typedef struct rx_lenptr_tup {
+	uint32 rxbufid;
+	uint16 len;
+	uint16 rsvd2;
+	ret_buf_t	ret_buf;	/**< ret buf pointers */
+} rx_lenptr_tup_t;
+
+/** Rx descr Post hdr info */
+typedef struct rxdesc_msghdr {
+	cmn_msg_hdr_t   msg;
+	uint16 rsvd0;
+	uint8 rsvd1;
+	uint8 descnt;
+	rx_lenptr_tup_t rx_tup[0];
+} rxdesc_msghdr_t;
+
+/** RX complete tuples */
+typedef struct rxcmplt_tup {
+	uint16 retbuf_len;
+	uint16 data_offset;
+	uint32 rxstatus0;
+	uint32 rxstatus1;
+	uint32 rxbufid;
+} rxcmplt_tup_t;
+
+/** RX complete messge hdr */
+typedef struct rxcmplt_hdr {
+	cmn_msg_hdr_t   msg;
+	uint16 rsvd0;
+	uint16 rxcmpltcnt;
+	rxcmplt_tup_t rx_tup[0];
+} rxcmplt_hdr_t;
+
+typedef struct hostevent_hdr {
+	cmn_msg_hdr_t   msg;
+	uint32 evnt_pyld;
+} hostevent_hdr_t;
+
+typedef struct dma_xfer_params {
+	uint32 src_physaddr_hi;
+	uint32 src_physaddr_lo;
+	uint32 dest_physaddr_hi;
+	uint32 dest_physaddr_lo;
+	uint32 len;
+	uint32 srcdelay;
+	uint32 destdelay;
+} dma_xfer_params_t;
+
+enum {
+	HOST_EVENT_CONS_CMD = 1
+};
+
+/* defines for flags */
+#define MSGBUF_IOC_ACTION_MASK 0x1
+
+#define MAX_SUSPEND_REQ 15
+
+typedef struct tx_idle_flowring_suspend_request {
+	cmn_msg_hdr_t   msg;
+	uint16	ring_id[MAX_SUSPEND_REQ];      /* ring Id's */
+	uint16	num;	/* number of flowid's to suspend */
+} tx_idle_flowring_suspend_request_t;
+
+typedef struct tx_idle_flowring_suspend_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t		cmplt;
+	uint32			rsvd[2];
+	dma_done_t		marker;
+} tx_idle_flowring_suspend_response_t;
+
+typedef struct tx_idle_flowring_resume_request {
+	cmn_msg_hdr_t   msg;
+	uint16	flow_ring_id;
+	uint16	reason;
+	uint32	rsvd[7];
+} tx_idle_flowring_resume_request_t;
+
+typedef struct tx_idle_flowring_resume_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t		cmplt;
+	uint32			rsvd[2];
+	dma_done_t		marker;
+} tx_idle_flowring_resume_response_t;
+
+/* timesync related additions */
+
+typedef struct _bcm_xtlv {
+	uint16		id; /* TLV idenitifier */
+	uint16		len; /* TLV length in bytes */
+} _bcm_xtlv_t;
+
+#define BCMMSGBUF_FW_CLOCK_INFO_TAG		0
+#define BCMMSGBUF_HOST_CLOCK_INFO_TAG		1
+#define BCMMSGBUF_HOST_CLOCK_SELECT_TAG		2
+#define BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG	3
+#define BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG	4
+#define BCMMSGBUF_MAX_TSYNC_TAG			5
+
+/* Flags in fw clock info TLV */
+#define CAP_DEVICE_TS		(1 << 0)
+#define CAP_CORRECTED_TS	(1 << 1)
+#define TS_CLK_ACTIVE		(1 << 2)
+
+typedef struct ts_fw_clock_info {
+	_bcm_xtlv_t  xtlv; /* BCMMSGBUF_FW_CLOCK_INFO_TAG */
+	ts_timestamp_srcid_t  ts; /* tick count */
+	uchar		clk_src[4]; /* clock source acronym ILP/AVB/TSF */
+	uint32		nominal_clock_freq;
+	uint32		reset_cnt;
+	uint8		flags;
+	uint8		rsvd[3];
+} ts_fw_clock_info_t;
+
+typedef struct ts_host_clock_info {
+	_bcm_xtlv_t  xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */
+	tick_count_64_t ticks; /* 64 bit host tick counter */
+	ts_timestamp_ns_64_t ns; /* 64 bit host time in nano seconds */
+} ts_host_clock_info_t;
+
+typedef struct ts_host_clock_sel {
+	_bcm_xtlv_t	xtlv; /* BCMMSGBUF_HOST_CLOCK_SELECT_TAG */
+	uint32		seqnum; /* number of times GPIO time sync toggled */
+	uint8		min_clk_idx; /* clock idenitifer configured for packet tiem stamping */
+	uint8		max_clk_idx; /* clock idenitifer configured for packet tiem stamping */
+	uint16		rsvd[1];
+} ts_host_clock_sel_t;
+
+typedef struct ts_d2h_clock_correction {
+	_bcm_xtlv_t		xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */
+	uint8			clk_id; /* clock source in the device */
+	uint8			rsvd[3];
+	ts_correction_m_t	m;	/* y  = 'm' x + b */
+	ts_correction_b_t	b;	/* y  = 'm' x + 'c' */
+} ts_d2h_clock_correction_t;
+
+typedef struct ts_host_timestamping_config {
+	_bcm_xtlv_t		xtlv; /* BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG */
+	/* time period to capture the device time stamp and toggle WLAN_TIME_SYNC_GPIO */
+	uint16			period_ms;
+	uint8			flags;
+	uint8			rsvd;
+	uint32			reset_cnt;
+} ts_host_timestamping_config_t;
+
+/* Flags in host timestamping config TLV */
+#define FLAG_HOST_RESET		(1 << 0)
+
+#endif /* _bcmmsgbuf_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmnvram.h b/drivers/net/wireless/bcmdhd/include/bcmnvram.h
new file mode 100644
index 0000000..15b58568
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmnvram.h
@@ -0,0 +1,329 @@
+/*
+ * NVRAM variable manipulation
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmnvram.h 613043 2016-01-16 00:24:13Z $
+ */
+
+#ifndef _bcmnvram_h_
+#define _bcmnvram_h_
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+struct nvram_header {
+	uint32 magic;
+	uint32 len;
+	uint32 crc_ver_init;	/* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+	uint32 config_refresh;	/* 0:15 sdram_config, 16:31 sdram_refresh */
+	uint32 config_ncdl;	/* ncdl values for memc */
+};
+
+struct nvram_tuple {
+	char *name;
+	char *value;
+	struct nvram_tuple *next;
+};
+
+/*
+ * Get default value for an NVRAM variable
+ */
+extern char *nvram_default_get(const char *name);
+/*
+ * validate/restore all per-interface related variables
+ */
+extern void nvram_validate_all(char *prefix, bool restore);
+
+/*
+ * restore specific per-interface variable
+ */
+extern void nvram_restore_var(char *prefix, char *name);
+
+/*
+ * Initialize NVRAM access. May be unnecessary or undefined on certain
+ * platforms.
+ */
+extern int nvram_init(void *sih);
+extern int nvram_deinit(void *sih);
+
+
+/*
+ * Append a chunk of nvram variables to the global list
+ */
+extern int nvram_append(void *si, char *vars, uint varsz);
+
+extern void nvram_get_global_vars(char **varlst, uint *varsz);
+
+
+/*
+ * Check for reset button press for restoring factory defaults.
+ */
+extern int nvram_reset(void *sih);
+
+/*
+ * Disable NVRAM access. May be unnecessary or undefined on certain
+ * platforms.
+ */
+extern void nvram_exit(void *sih);
+
+/*
+ * Get the value of an NVRAM variable. The pointer returned may be
+ * invalid after a set.
+ * @param	name	name of variable to get
+ * @return	value of variable or NULL if undefined
+ */
+extern char * nvram_get(const char *name);
+
+/*
+ * Get the value of an NVRAM variable. The pointer returned may be
+ * invalid after a set.
+ * @param	name	name of variable to get
+ * @param	bit	bit value to get
+ * @return	value of variable or NULL if undefined
+ */
+extern char * nvram_get_bitflag(const char *name, const int bit);
+
+/*
+ * Read the reset GPIO value from the nvram and set the GPIO
+ * as input
+ */
+extern int nvram_resetgpio_init(void *sih);
+
+/*
+ * Get the value of an NVRAM variable.
+ * @param	name	name of variable to get
+ * @return	value of variable or NUL if undefined
+ */
+static INLINE char *
+nvram_safe_get(const char *name)
+{
+	char *p = nvram_get(name);
+	return p ? p : "";
+}
+
+/*
+ * Match an NVRAM variable.
+ * @param	name	name of variable to match
+ * @param	match	value to compare against value of variable
+ * @return	TRUE if variable is defined and its value is string equal
+ *		to match or FALSE otherwise
+ */
+static INLINE int
+nvram_match(const char *name, const char *match)
+{
+	const char *value = nvram_get(name);
+
+	/* In nvramstubs.c builds, nvram_get() is defined as returning zero,
+	* so the return line below never executes the strcmp(),
+	* resulting in 'match' being an unused parameter.
+	* Make a ref to 'match' to quiet the compiler warning.
+	*/
+
+	BCM_REFERENCE(match);
+
+	return (value && !strcmp(value, match));
+}
+
+/*
+ * Match an NVRAM variable.
+ * @param	name	name of variable to match
+ * @param	bit	bit value to get
+ * @param	match	value to compare against value of variable
+ * @return	TRUE if variable is defined and its value is string equal
+ *		to match or FALSE otherwise
+ */
+static INLINE int
+nvram_match_bitflag(const char *name, const int bit, const char *match)
+{
+	const char *value = nvram_get_bitflag(name, bit);
+	BCM_REFERENCE(match);
+	return (value && !strcmp(value, match));
+}
+
+/*
+ * Inversely match an NVRAM variable.
+ * @param	name	name of variable to match
+ * @param	match	value to compare against value of variable
+ * @return	TRUE if variable is defined and its value is not string
+ *		equal to invmatch or FALSE otherwise
+ */
+static INLINE int
+nvram_invmatch(const char *name, const char *invmatch)
+{
+	const char *value = nvram_get(name);
+
+	/* In nvramstubs.c builds, nvram_get() is defined as returning zero,
+	* so the return line below never executes the strcmp(),
+	* resulting in 'invmatch' being an unused parameter.
+	* Make a ref to 'invmatch' to quiet the compiler warning.
+	*/
+
+	BCM_REFERENCE(invmatch);
+
+	return (value && strcmp(value, invmatch));
+}
+
+/*
+ * Set the value of an NVRAM variable. The name and value strings are
+ * copied into private storage. Pointers to previously set values
+ * may become invalid. The new value may be immediately
+ * retrieved but will not be permanently stored until a commit.
+ * @param	name	name of variable to set
+ * @param	value	value of variable
+ * @return	0 on success and errno on failure
+ */
+extern int nvram_set(const char *name, const char *value);
+
+/*
+ * Set the value of an NVRAM variable. The name and value strings are
+ * copied into private storage. Pointers to previously set values
+ * may become invalid. The new value may be immediately
+ * retrieved but will not be permanently stored until a commit.
+ * @param	name	name of variable to set
+ * @param	bit	bit value to set
+ * @param	value	value of variable
+ * @return	0 on success and errno on failure
+ */
+extern int nvram_set_bitflag(const char *name, const int bit, const int value);
+/*
+ * Unset an NVRAM variable. Pointers to previously set values
+ * remain valid until a set.
+ * @param	name	name of variable to unset
+ * @return	0 on success and errno on failure
+ * NOTE: use nvram_commit to commit this change to flash.
+ */
+extern int nvram_unset(const char *name);
+
+/*
+ * Commit NVRAM variables to permanent storage. All pointers to values
+ * may be invalid after a commit.
+ * NVRAM values are undefined after a commit.
+ * @param   nvram_corrupt    true to corrupt nvram, false otherwise.
+ * @return	0 on success and errno on failure
+ */
+extern int nvram_commit_internal(bool nvram_corrupt);
+
+/*
+ * Commit NVRAM variables to permanent storage. All pointers to values
+ * may be invalid after a commit.
+ * NVRAM values are undefined after a commit.
+ * @return	0 on success and errno on failure
+ */
+extern int nvram_commit(void);
+
+/*
+ * Get all NVRAM variables (format name=value\0 ... \0\0).
+ * @param	buf	buffer to store variables
+ * @param	count	size of buffer in bytes
+ * @return	0 on success and errno on failure
+ */
+extern int nvram_getall(char *nvram_buf, int count);
+
+/*
+ * returns the crc value of the nvram
+ * @param	nvh	nvram header pointer
+ */
+uint8 nvram_calc_crc(struct nvram_header * nvh);
+
+extern int nvram_space;
+#endif /* _LANGUAGE_ASSEMBLY */
+
+/* The NVRAM version number stored as an NVRAM variable */
+#define NVRAM_SOFTWARE_VERSION	"1"
+
+#define NVRAM_MAGIC		0x48534C46	/* 'FLSH' */
+#define NVRAM_CLEAR_MAGIC	0x0
+#define NVRAM_INVALID_MAGIC	0xFFFFFFFF
+#define NVRAM_VERSION		1
+#define NVRAM_HEADER_SIZE	20
+/* This definition is for precommit staging, and will be removed */
+#define NVRAM_SPACE		0x8000
+/* For CFE builds this gets passed in thru the makefile */
+#ifndef MAX_NVRAM_SPACE
+#define MAX_NVRAM_SPACE		0x10000
+#endif
+#define DEF_NVRAM_SPACE		0x8000
+#define ROM_ENVRAM_SPACE	0x1000
+#define NVRAM_LZMA_MAGIC	0x4c5a4d41	/* 'LZMA' */
+
+#define NVRAM_MAX_VALUE_LEN 255
+#define NVRAM_MAX_PARAM_LEN 64
+
+#define NVRAM_CRC_START_POSITION	9 /* magic, len, crc8 to be skipped */
+#define NVRAM_CRC_VER_MASK	0xffffff00 /* for crc_ver_init */
+
+/* Offsets to embedded nvram area */
+#define NVRAM_START_COMPRESSED	0x400
+#define NVRAM_START		0x1000
+
+#define BCM_JUMBO_NVRAM_DELIMIT '\n'
+#define BCM_JUMBO_START "Broadcom Jumbo Nvram file"
+
+
+#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \
+	defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__))
+#define IMAGE_SIZE "image_size"
+#define BOOTPARTITION "bootpartition"
+#define IMAGE_BOOT BOOTPARTITION
+#define PARTIALBOOTS "partialboots"
+#define MAXPARTIALBOOTS "maxpartialboots"
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#endif
+
+#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \
+	defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__))
+/* Shared by all: CFE, Linux Kernel, and Ap */
+#define IMAGE_BOOT "image_boot"
+#define BOOTPARTITION IMAGE_BOOT
+/* CFE variables */
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_SIZE "image_size"
+
+/* CFE and Linux Kernel shared variables */
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+
+/* Linux application variables */
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#define POLICY_TOGGLE "toggle"
+#define LINUX_PART_TO_FLASH "linux_to_flash"
+#define LINUX_FLASH_POLICY "linux_flash_policy"
+
+#endif /* defined(DUAL_IMAGE||CONFIG_DUAL_IMAGE)||__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__ */
+
+#endif /* _bcmnvram_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcie.h b/drivers/net/wireless/bcmdhd/include/bcmpcie.h
new file mode 100644
index 0000000..114924c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcie.h
@@ -0,0 +1,440 @@
+/*
+ * Broadcom PCIE
+ * Software-specific definitions shared between device and host side
+ * Explains the shared area between host and dongle
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmpcie.h 678914 2017-01-11 15:34:26Z $
+ */
+
+
+#ifndef	_bcmpcie_h_
+#define	_bcmpcie_h_
+
+#include <bcmutils.h>
+
+#define ADDR_64(x)			(x.addr)
+#define HIGH_ADDR_32(x)     ((uint32) (((sh_addr_t) x).high_addr))
+#define LOW_ADDR_32(x)      ((uint32) (((sh_addr_t) x).low_addr))
+
+typedef struct {
+	uint32 low_addr;
+	uint32 high_addr;
+} sh_addr_t;
+
+
+/* May be overridden by 43xxxxx-roml.mk */
+#if !defined(BCMPCIE_MAX_TX_FLOWS)
+#define BCMPCIE_MAX_TX_FLOWS	40
+#endif /* ! BCMPCIE_MAX_TX_FLOWS */
+
+#define PCIE_SHARED_VERSION_7		0x00007
+#define PCIE_SHARED_VERSION_6		0x00006 /* rev6 is compatible with rev 5 */
+#define PCIE_SHARED_VERSION_5		0x00005 /* rev6 is compatible with rev 5 */
+/**
+ * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that
+ * is located in device memory.
+ */
+#define PCIE_SHARED_VERSION		PCIE_SHARED_VERSION_7
+#define PCIE_SHARED_VERSION_MASK	0x000FF
+#define PCIE_SHARED_ASSERT_BUILT	0x00100
+#define PCIE_SHARED_ASSERT		0x00200
+#define PCIE_SHARED_TRAP		0x00400
+#define PCIE_SHARED_IN_BRPT		0x00800
+#define PCIE_SHARED_SET_BRPT		0x01000
+#define PCIE_SHARED_PENDING_BRPT	0x02000
+/* BCMPCIE_SUPPORT_TX_PUSH_RING		0x04000 obsolete */
+#define PCIE_SHARED_EVT_SEQNUM		0x08000
+#define PCIE_SHARED_DMA_INDEX		0x10000
+
+/* WAR: D11 txstatus through unused status field of PCIe completion header */
+#define PCIE_SHARED_D2H_D11_TX_STATUS  0x40000000	/* using flags2 in shared area */
+#define PCIE_SHARED_H2D_D11_TX_STATUS  0x80000000	/* using flags2 in shared area */
+
+/**
+ * There are host types where a device interrupt can 'race ahead' of data written by the device into
+ * host memory. The dongle can avoid this condition using a variety of techniques (read barrier,
+ * using PCIe Message Signalled Interrupts, or by using the PCIE_DMA_INDEX feature). Unfortunately
+ * these techniques have drawbacks on router platforms. For these platforms, it was decided to not
+ * avoid the condition, but to detect the condition instead and act on it.
+ * D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM
+ */
+#define PCIE_SHARED_D2H_SYNC_SEQNUM     0x20000
+#define PCIE_SHARED_D2H_SYNC_XORCSUM    0x40000
+#define PCIE_SHARED_D2H_SYNC_MODE_MASK \
+	(PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM)
+#define PCIE_SHARED_IDLE_FLOW_RING		0x80000
+#define PCIE_SHARED_2BYTE_INDICES       0x100000
+
+#define PCIE_SHARED2_EXTENDED_TRAP_DATA	0x00000001	/* using flags2 in shared area */
+
+/* dongle supports fatal buf log collection */
+#define PCIE_SHARED_FATAL_LOGBUG_VALID 0x200000
+
+/* Implicit DMA with corerev 19 and after */
+#define PCIE_SHARED_IDMA		0x400000
+
+/* MSI support */
+#define PCIE_SHARED_D2H_MSI_MULTI_MSG   0x800000
+
+/* IFRM with corerev 19 and after */
+#define PCIE_SHARED_IFRM		0x1000000
+
+/**
+ * From Rev6 and above, suspend/resume can be done using two handshake methods.
+ * 1. Using ctrl post/ctrl cmpl messages (Default rev6)
+ * 2. Using Mailbox data (old method as used in rev5)
+ * This shared flag indicates whether to overide rev6 default method and use mailbox for
+ * suspend/resume.
+ */
+#define PCIE_SHARED_USE_MAILBOX		0x2000000
+
+/* Firmware compiled for mfgbuild purposes */
+#define PCIE_SHARED_MFGBUILD_FW		0x4000000
+
+/* Firmware could use DB0 value as host timestamp */
+#define PCIE_SHARED_TIMESTAMP_DB0	0x8000000
+/* Firmware could use Hostready (IPC rev7) */
+#define PCIE_SHARED_HOSTRDY_SUPPORT	0x10000000
+
+/* When set, Firmwar does not support OOB Device Wake based DS protocol */
+#define PCIE_SHARED_NO_OOB_DW	0x20000000
+
+/* When set, Firmwar supports Inband DS protocol */
+#define PCIE_SHARED_INBAND_DS	0x40000000
+
+/* Implicit DMA WAR for 4347B0 PCIe memory retention */
+#define PCIE_SHARED_IDMA_RETENTION_DS	0x80000000
+
+#define PCIE_SHARED_D2H_MAGIC		0xFEDCBA09
+#define PCIE_SHARED_H2D_MAGIC		0x12345678
+
+/**
+ * Message rings convey messages between host and device. They are unidirectional, and are located
+ * in host memory.
+ *
+ * This is the minimal set of message rings, known as 'common message rings':
+ */
+#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT		0
+#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT		1
+#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE		2
+#define BCMPCIE_D2H_MSGRING_TX_COMPLETE			3
+#define BCMPCIE_D2H_MSGRING_RX_COMPLETE			4
+#define BCMPCIE_COMMON_MSGRING_MAX_ID			4
+
+#define BCMPCIE_H2D_COMMON_MSGRINGS			2
+#define BCMPCIE_D2H_COMMON_MSGRINGS			3
+#define BCMPCIE_COMMON_MSGRINGS				5
+
+#define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \
+	(BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows))
+
+/* different ring types */
+#define BCMPCIE_H2D_RING_TYPE_CTRL_SUBMIT		0x1
+#define BCMPCIE_H2D_RING_TYPE_TXFLOW_RING		0x2
+#define BCMPCIE_H2D_RING_TYPE_RXBUFPOST			0x3
+#define BCMPCIE_H2D_RING_TYPE_TXSUBMIT			0x4
+#define BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT		0x5
+
+#define BCMPCIE_D2H_RING_TYPE_CTRL_CPL			0x1
+#define BCMPCIE_D2H_RING_TYPE_TX_CPL			0x2
+#define BCMPCIE_D2H_RING_TYPE_RX_CPL			0x3
+#define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL		0x4
+#define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE		0x5
+
+/**
+ * H2D and D2H, WR and RD index, are maintained in the following arrays:
+ * - Array of all H2D WR Indices
+ * - Array of all H2D RD Indices
+ * - Array of all D2H WR Indices
+ * - Array of all D2H RD Indices
+ *
+ * The offset of the WR or RD indexes (for common rings) in these arrays are
+ * listed below. Arrays ARE NOT indexed by a ring's id.
+ *
+ * D2H common rings WR and RD index start from 0, even though their ringids
+ * start from BCMPCIE_H2D_COMMON_MSGRINGS
+ */
+
+#define BCMPCIE_H2D_RING_IDX(h2d_ring_id) (h2d_ring_id)
+
+enum h2dring_idx {
+	/* H2D common rings */
+	BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX =
+		BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT),
+	BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX =
+		BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT),
+
+	/* First TxPost's WR or RD index starts after all H2D common rings */
+	BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START =
+		BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_COMMON_MSGRINGS)
+};
+
+#define BCMPCIE_D2H_RING_IDX(d2h_ring_id) \
+	((d2h_ring_id) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+enum d2hring_idx {
+	/* D2H Common Rings */
+	BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX =
+		BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE),
+	BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX =
+		BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_TX_COMPLETE),
+	BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX =
+		BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_RX_COMPLETE)
+};
+
+/**
+ * Macros for managing arrays of RD WR indices:
+ * rw_index_sz:
+ *    - in dongle, rw_index_sz is known at compile time
+ *    - in host/DHD, rw_index_sz is derived from advertized pci_shared flags
+ *
+ *  ring_idx: See h2dring_idx and d2hring_idx
+ */
+
+/** Offset of a RD or WR index in H2D or D2H indices array */
+#define BCMPCIE_RW_INDEX_OFFSET(rw_index_sz, ring_idx) \
+	((rw_index_sz) * (ring_idx))
+
+/** Fetch the address of RD or WR index in H2D or D2H indices array */
+#define BCMPCIE_RW_INDEX_ADDR(indices_array_base, rw_index_sz, ring_idx) \
+	(void *)((uint32)(indices_array_base) + \
+	BCMPCIE_RW_INDEX_OFFSET((rw_index_sz), (ring_idx)))
+
+/** H2D DMA Indices array size: given max flow rings */
+#define BCMPCIE_H2D_RW_INDEX_ARRAY_SZ(rw_index_sz, max_tx_flows) \
+	((rw_index_sz) * BCMPCIE_H2D_MSGRINGS(max_tx_flows))
+
+/** D2H DMA Indices array size */
+#define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \
+	((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS)
+
+/**
+ * This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used
+ * for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated
+ * both in host as well as device memory.
+ */
+typedef struct ring_mem {
+	uint16		idx;       /* ring id */
+	uint8		type;
+	uint8		rsvd;
+	uint16		max_item;  /* Max number of items in flow ring */
+	uint16		len_items; /* Items are fixed size. Length in bytes of one item */
+	sh_addr_t	base_addr; /* 64 bits address, either in host or device memory */
+} ring_mem_t;
+
+
+/**
+ * Per flow ring, information is maintained in device memory, eg at what address the ringmem and
+ * ringstate are located. The flow ring itself can be instantiated in either host or device memory.
+ *
+ * Perhaps this type should be renamed to make clear that it resides in device memory only.
+ */
+typedef struct ring_info {
+	uint32		ringmem_ptr; /* ring mem location in dongle memory */
+
+	/* Following arrays are indexed using h2dring_idx and d2hring_idx, and not
+	 * by a ringid.
+	 */
+
+	/* 32bit ptr to arrays of WR or RD indices for all rings in dongle memory */
+	uint32		h2d_w_idx_ptr; /* Array of all H2D ring's WR indices */
+	uint32		h2d_r_idx_ptr; /* Array of all H2D ring's RD indices */
+	uint32		d2h_w_idx_ptr; /* Array of all D2H ring's WR indices */
+	uint32		d2h_r_idx_ptr; /* Array of all D2H ring's RD indices */
+
+	/* PCIE_DMA_INDEX feature: Dongle uses mem2mem DMA to sync arrays in host.
+	 * Host may directly fetch WR and RD indices from these host-side arrays.
+	 *
+	 * 64bit ptr to arrays of WR or RD indices for all rings in host memory.
+	 */
+	sh_addr_t	h2d_w_idx_hostaddr; /* Array of all H2D ring's WR indices */
+	sh_addr_t	h2d_r_idx_hostaddr; /* Array of all H2D ring's RD indices */
+	sh_addr_t	d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */
+	sh_addr_t	d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */
+
+	uint16		max_tx_flowrings; /* maximum number of H2D rings: common + flow */
+	uint16		max_submission_queues; /* maximum number of H2D rings: common + flow */
+	uint16		max_completion_rings; /* maximum number of H2D rings: common + flow */
+	uint16		max_vdevs; /* max number of virtual interfaces supported */
+
+	sh_addr_t	ifrm_w_idx_hostaddr; /* Array of all H2D ring's WR indices for IFRM */
+} ring_info_t;
+
+/**
+ * A structure located in TCM that is shared between host and device, primarily used during
+ * initialization.
+ */
+typedef struct {
+	/** shared area version captured at flags 7:0 */
+	uint32	flags;
+
+	uint32  trap_addr;
+	uint32  assert_exp_addr;
+	uint32  assert_file_addr;
+	uint32  assert_line;
+	uint32	console_addr;		/**< Address of hnd_cons_t */
+
+	uint32  msgtrace_addr;
+
+	uint32  fwid;
+
+	/* Used for debug/flow control */
+	uint16  total_lfrag_pkt_cnt;
+	uint16  max_host_rxbufs; /* rsvd in spec */
+
+	uint32 dma_rxoffset; /* rsvd in spec */
+
+	/** these will be used for sleep request/ack, d3 req/ack */
+	uint32  h2d_mb_data_ptr;
+	uint32  d2h_mb_data_ptr;
+
+	/* information pertinent to host IPC/msgbuf channels */
+	/** location in the TCM memory which has the ring_info */
+	uint32	rings_info_ptr;
+
+	/** block of host memory for the scratch buffer */
+	uint32		host_dma_scratch_buffer_len;
+	sh_addr_t	host_dma_scratch_buffer;
+
+	/** block of host memory for the dongle to push the status into */
+	uint32		device_rings_stsblk_len;
+	sh_addr_t	device_rings_stsblk;
+
+	uint32	buzz_dbg_ptr;	/* BUZZZ state format strings and trace buffer */
+
+	/* rev6 compatible changes */
+	uint32          flags2;
+	uint32          host_cap;
+
+	/* location in the host address space to write trap indication.
+	* At this point for the current rev of the spec, firmware will
+	* support only indications to 32 bit host addresses.
+	*/
+	sh_addr_t       host_trap_addr;
+
+	/* location for host fatal error log buffer start address */
+	uint32		device_fatal_logbuf_start;
+
+	/* location in host memory for offloaded modules */
+	sh_addr_t   hoffload_addr;
+} pciedev_shared_t;
+
+extern pciedev_shared_t pciedev_shared;
+
+/* host capabilities */
+#define HOSTCAP_PCIEAPI_VERSION_MASK		0x000000FF
+#define HOSTCAP_H2D_VALID_PHASE			0x00000100
+#define HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE	0x00000200
+#define HOSTCAP_H2D_ENABLE_HOSTRDY		0x00000400
+#define HOSTCAP_DB0_TIMESTAMP			0x00000800
+#define HOSTCAP_DS_NO_OOB_DW			0x00001000
+#define HOSTCAP_DS_INBAND_DW			0x00002000
+#define HOSTCAP_H2D_IDMA			0x00004000
+#define HOSTCAP_H2D_IFRM			0x00008000
+#define HOSTCAP_H2D_DAR				0x00010000
+#define HOSTCAP_EXTENDED_TRAP_DATA		0x00020000
+#define HOSTCAP_TXSTATUS_METADATA		0x00040000
+
+/**
+ * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware
+ * support.
+ */
+
+/* H2D mail box Data */
+#define H2D_HOST_D3_INFORM	0x00000001
+#define H2D_HOST_DS_ACK		0x00000002
+#define H2D_HOST_DS_NAK		0x00000004
+#define H2D_HOST_CONS_INT	0x80000000	/**< h2d int for console cmds  */
+#define H2D_FW_TRAP		0x20000000	/**< h2d force TRAP */
+#define H2D_HOST_D0_INFORM_IN_USE	0x00000008
+#define H2D_HOST_D0_INFORM	0x00000010
+#define H2D_HOST_IDMA_INITED	0x00000080
+#define H2DMB_DS_HOST_SLEEP_INFORM H2D_HOST_D3_INFORM
+#define H2DMB_DS_DEVICE_SLEEP_ACK  H2D_HOST_DS_ACK
+#define H2DMB_DS_DEVICE_SLEEP_NAK  H2D_HOST_DS_NAK
+#define H2DMB_D0_INFORM_IN_USE     H2D_HOST_D0_INFORM_IN_USE
+#define H2DMB_D0_INFORM            H2D_HOST_D0_INFORM
+#define H2DMB_DS_ACTIVE            0x00000020
+#define H2DMB_DS_DEVICE_WAKE       0x00000040
+#define H2DMB_FW_TRAP              H2D_FW_TRAP
+#define H2DMB_HOST_CONS_INT        H2D_HOST_CONS_INT
+#define H2DMB_DS_DEVICE_WAKE_ASSERT		H2DMB_DS_DEVICE_WAKE
+#define H2DMB_DS_DEVICE_WAKE_DEASSERT	H2DMB_DS_ACTIVE
+
+/* D2H mail box Data */
+#define D2H_DEV_D3_ACK		0x00000001
+#define D2H_DEV_DS_ENTER_REQ	0x00000002
+#define D2H_DEV_DS_EXIT_NOTE	0x00000004
+#define D2H_DEV_FWHALT		0x10000000
+#define D2H_DEV_EXT_TRAP_DATA   0x20000000
+#define D2H_DEV_IDMA_INITED	0x00000010
+#define D2H_FWTRAP_MASK		0x0000001F	/* Adding maskbits for TRAP information */
+#define D2HMB_DS_HOST_SLEEP_ACK         D2H_DEV_D3_ACK
+#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ
+#define D2HMB_DS_DEVICE_SLEEP_EXIT      D2H_DEV_DS_EXIT_NOTE
+#define D2HMB_DS_HOST_SLEEP_EXIT_ACK    0x00000008
+#define D2HMB_FWHALT                    D2H_DEV_FWHALT
+#define D2H_DEV_MB_MASK		(D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \
+				D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED | D2H_DEV_FWHALT | \
+				D2H_FWTRAP_MASK | D2H_DEV_EXT_TRAP_DATA)
+#define D2H_DEV_MB_INVALIDATED(x)	((!x) || (x & ~D2H_DEV_MB_MASK))
+
+
+/** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */
+#define NEXTTXP(i, d)           ((((i)+1) >= (d)) ? 0 : ((i)+1))
+#define NTXPACTIVE(r, w, d)     (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w)))
+#define NTXPAVAIL(r, w, d)      (((d) - NTXPACTIVE((r), (w), (d))) > 1)
+
+/* Function can be used to notify host of FW halt */
+#define READ_AVAIL_SPACE(w, r, d)		\
+			((w >= r) ? (w - r) : (d - r))
+
+#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d)		((w >= r) ? (d - w) : (r - w))
+#define WRITE_SPACE_AVAIL(r, w, d)	(d - (NTXPACTIVE(r, w, d)) - 1)
+#define CHECK_WRITE_SPACE(r, w, d)	\
+	((r) > (w)) ? ((r) - (w) - 1) : ((r) == 0 || (w) == 0) ? ((d) - (w) - 1) : ((d) - (w))
+#define CHECK_NOWRITE_SPACE(r, w, d)	\
+	(((r) == (w) + 1) || (((r) == 0) && ((w) == ((d) - 1))))
+
+
+#define WRT_PEND(x)	((x)->wr_pending)
+#define DNGL_RING_WPTR(msgbuf)		(*((msgbuf)->tcm_rs_w_ptr)) /**< advanced by producer */
+#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a)	(DNGL_RING_WPTR(msgbuf) = (a))
+
+#define DNGL_RING_RPTR(msgbuf)		(*((msgbuf)->tcm_rs_r_ptr)) /**< advanced by consumer */
+#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a)	(DNGL_RING_RPTR(msgbuf) = (a))
+
+#define MODULO_RING_IDX(x, y)	((x) % (y)->bitmap_size)
+
+#define  RING_READ_PTR(x)	((x)->ringstate->r_offset)
+#define  RING_WRITE_PTR(x)	((x)->ringstate->w_offset)
+#define  RING_START_PTR(x)	((x)->ringmem->base_addr.low_addr)
+#define  RING_MAX_ITEM(x)	((x)->ringmem->max_item)
+#define  RING_LEN_ITEMS(x)	((x)->ringmem->len_items)
+#define	 HOST_RING_BASE(x)	((x)->dma_buf.va)
+#define	 HOST_RING_END(x)	((uint8 *)HOST_RING_BASE((x)) + \
+					((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x))))
+#endif	/* _bcmpcie_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
new file mode 100644
index 0000000..b3502ea
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
@@ -0,0 +1,184 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmpcispi.h 514727 2014-11-12 03:02:48Z $
+ */
+#ifndef	_BCM_PCI_SPI_H
+#define	_BCM_PCI_SPI_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	uint32 spih_ctrl;		/* 0x00 SPI Control Register */
+	uint32 spih_stat;		/* 0x04 SPI Status Register */
+	uint32 spih_data;		/* 0x08 SPI Data Register, 32-bits wide */
+	uint32 spih_ext;		/* 0x0C SPI Extension Register */
+	uint32 PAD[4];			/* 0x10-0x1F PADDING */
+
+	uint32 spih_gpio_ctrl;		/* 0x20 SPI GPIO Control Register */
+	uint32 spih_gpio_data;		/* 0x24 SPI GPIO Data Register */
+	uint32 PAD[6];			/* 0x28-0x3F PADDING */
+
+	uint32 spih_int_edge;		/* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+	uint32 spih_int_pol;		/* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+							/* 1=Active High) */
+	uint32 spih_int_mask;		/* 0x48 SPI Interrupt Mask */
+	uint32 spih_int_status;		/* 0x4C SPI Interrupt Status */
+	uint32 PAD[4];			/* 0x50-0x5F PADDING */
+
+	uint32 spih_hex_disp;		/* 0x60 SPI 4-digit hex display value */
+	uint32 spih_current_ma;		/* 0x64 SPI SD card current consumption in mA */
+	uint32 PAD[1];			/* 0x68 PADDING */
+	uint32 spih_disp_sel;		/* 0x6c SPI 4-digit hex display mode select (1=current) */
+	uint32 PAD[4];			/* 0x70-0x7F PADDING */
+	uint32 PAD[8];			/* 0x80-0x9F PADDING */
+	uint32 PAD[8];			/* 0xA0-0xBF PADDING */
+	uint32 spih_pll_ctrl;	/* 0xC0 PLL Control Register */
+	uint32 spih_pll_status;	/* 0xC4 PLL Status Register */
+	uint32 spih_xtal_freq;	/* 0xC8 External Clock Frequency in units of 10000Hz */
+	uint32 spih_clk_count;	/* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+	uint32 cfg_space[0x40];		/* 0x000-0x0FF PCI Configuration Space (Read Only) */
+	uint32 P_IMG_CTRL0;		/* 0x100 PCI Image0 Control Register */
+
+	uint32 P_BA0;			/* 0x104 32 R/W PCI Image0 Base Address register */
+	uint32 P_AM0;			/* 0x108 32 R/W PCI Image0 Address Mask register */
+	uint32 P_TA0;			/* 0x10C 32 R/W PCI Image0 Translation Address register */
+	uint32 P_IMG_CTRL1;		/* 0x110 32 R/W PCI Image1 Control register */
+	uint32 P_BA1;			/* 0x114 32 R/W PCI Image1 Base Address register */
+	uint32 P_AM1;			/* 0x118 32 R/W PCI Image1 Address Mask register */
+	uint32 P_TA1;			/* 0x11C 32 R/W PCI Image1 Translation Address register */
+	uint32 P_IMG_CTRL2;		/* 0x120 32 R/W PCI Image2 Control register */
+	uint32 P_BA2;			/* 0x124 32 R/W PCI Image2 Base Address register */
+	uint32 P_AM2;			/* 0x128 32 R/W PCI Image2 Address Mask register */
+	uint32 P_TA2;			/* 0x12C 32 R/W PCI Image2 Translation Address register */
+	uint32 P_IMG_CTRL3;		/* 0x130 32 R/W PCI Image3 Control register */
+	uint32 P_BA3;			/* 0x134 32 R/W PCI Image3 Base Address register */
+	uint32 P_AM3;			/* 0x138 32 R/W PCI Image3 Address Mask register */
+	uint32 P_TA3;			/* 0x13C 32 R/W PCI Image3 Translation Address register */
+	uint32 P_IMG_CTRL4;		/* 0x140 32 R/W PCI Image4 Control register */
+	uint32 P_BA4;			/* 0x144 32 R/W PCI Image4 Base Address register */
+	uint32 P_AM4;			/* 0x148 32 R/W PCI Image4 Address Mask register */
+	uint32 P_TA4;			/* 0x14C 32 R/W PCI Image4 Translation Address register */
+	uint32 P_IMG_CTRL5;		/* 0x150 32 R/W PCI Image5 Control register */
+	uint32 P_BA5;			/* 0x154 32 R/W PCI Image5 Base Address register */
+	uint32 P_AM5;			/* 0x158 32 R/W PCI Image5 Address Mask register */
+	uint32 P_TA5;			/* 0x15C 32 R/W PCI Image5 Translation Address register */
+	uint32 P_ERR_CS;		/* 0x160 32 R/W PCI Error Control and Status register */
+	uint32 P_ERR_ADDR;		/* 0x164 32 R PCI Erroneous Address register */
+	uint32 P_ERR_DATA;		/* 0x168 32 R PCI Erroneous Data register */
+
+	uint32 PAD[5];			/* 0x16C-0x17F PADDING */
+
+	uint32 WB_CONF_SPC_BAR;		/* 0x180 32 R WISHBONE Configuration Space Base Address */
+	uint32 W_IMG_CTRL1;		/* 0x184 32 R/W WISHBONE Image1 Control register */
+	uint32 W_BA1;			/* 0x188 32 R/W WISHBONE Image1 Base Address register */
+	uint32 W_AM1;			/* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+	uint32 W_TA1;			/* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+	uint32 W_IMG_CTRL2;		/* 0x194 32 R/W WISHBONE Image2 Control register */
+	uint32 W_BA2;			/* 0x198 32 R/W WISHBONE Image2 Base Address register */
+	uint32 W_AM2;			/* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+	uint32 W_TA2;			/* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+	uint32 W_IMG_CTRL3;		/* 0x1A4 32 R/W WISHBONE Image3 Control register */
+	uint32 W_BA3;			/* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+	uint32 W_AM3;			/* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+	uint32 W_TA3;			/* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+	uint32 W_IMG_CTRL4;		/* 0x1B4 32 R/W WISHBONE Image4 Control register */
+	uint32 W_BA4;			/* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+	uint32 W_AM4;			/* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+	uint32 W_TA4;			/* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+	uint32 W_IMG_CTRL5;		/* 0x1C4 32 R/W WISHBONE Image5 Control register */
+	uint32 W_BA5;			/* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+	uint32 W_AM5;			/* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+	uint32 W_TA5;			/* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+	uint32 W_ERR_CS;		/* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+	uint32 W_ERR_ADDR;		/* 0x1D8 32 R WISHBONE Erroneous Address register */
+	uint32 W_ERR_DATA;		/* 0x1DC 32 R WISHBONE Erroneous Data register */
+	uint32 CNF_ADDR;		/* 0x1E0 32 R/W Configuration Cycle register */
+	uint32 CNF_DATA;		/* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+	uint32 INT_ACK;			/* 0x1E8 32 R Interrupt Acknowledge register */
+	uint32 ICR;			/* 0x1EC 32 R/W Interrupt Control register */
+	uint32 ISR;			/* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN		(1 << 0)	/* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN	(1 << 1)	/* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN	(1 << 2)	/* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN	(1 << 3)	/* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN	(1 << 4)	/* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET	(1U << 31)	/* Software reset of the PCI Core. */
+
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST		(1 << 0)	/* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST	(1 << 1)	/* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST	(1 << 2)	/* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST	(1 << 3)	/* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST	(1 << 4)	/* System Error Interrupt Status */
+
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR		(1 << 0)	/* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR		(1 << 1)	/* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR		(1 << 2)	/* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS			(1 << 0)	/* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER		(1 << 1)	/* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT	(1 << 2)	/* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK		0x30		/* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT	4		/* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL		(1 << 3)	/* SPI Write FIFO Full */
+#define SPIH_WFEMPTY		(1 << 2)	/* SPI Write FIFO Empty */
+#define SPIH_RFFULL		(1 << 1)	/* SPI Read FIFO Full */
+#define SPIH_RFEMPTY		(1 << 0)	/* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK		(1U << 31)	/* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK		(1 << 1)	/* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED		(1 << 3)	/* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND		0xf4240		/* 1 million */
+
+#endif /* _BCM_PCI_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h
new file mode 100644
index 0000000..09e607f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h
@@ -0,0 +1,39 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmperf.h 514727 2014-11-12 03:02:48Z $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define	BCMPERF_GETICACHE_MISS(x)	((x) = 0)
+#define	BCMPERF_GETICACHE_HIT(x)	((x) = 0)
+#define	BCMPERF_GETINSTRCOUNT(x)	((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
new file mode 100644
index 0000000..da835e8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
@@ -0,0 +1,173 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdbus.h 644725 2016-06-21 12:26:04Z $
+ */
+
+#ifndef	_sdio_api_h_
+#define	_sdio_api_h_
+
+#if defined(BT_OVER_SDIO)
+#include <linux/mmc/sdio_func.h>
+#endif /* defined (BT_OVER_SDIO) */
+
+
+#define SDIOH_API_RC_SUCCESS                          (0x00)
+#define SDIOH_API_RC_FAIL	                      (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ              0	/* Read request */
+#define SDIOH_WRITE             1	/* Write request */
+
+#define SDIOH_DATA_FIX          0	/* Fixed addressing */
+#define SDIOH_DATA_INC          1	/* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL   0       /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND   1       /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU  2       /* Cut-through command */
+
+#define SDIOH_DATA_PIO          0       /* PIO mode */
+#define SDIOH_DATA_DMA          1       /* DMA mode */
+
+/* Max number of glommed pkts */
+#ifdef CUSTOM_MAX_TXGLOM_SIZE
+#define SDPCM_MAXGLOM_SIZE  CUSTOM_MAX_TXGLOM_SIZE
+#else
+#define SDPCM_MAXGLOM_SIZE	36
+#endif /* CUSTOM_MAX_TXGLOM_SIZE */
+
+#define SDPCM_TXGLOM_CPY 0			/* SDIO 2.0 should use copy mode */
+#define SDPCM_TXGLOM_MDESC	1		/* SDIO 3.0 should use multi-desc mode */
+
+#ifdef CUSTOM_DEF_TXGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE  CUSTOM_DEF_TXGLOM_SIZE
+#else
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif /* CUSTOM_DEF_TXGLOM_SIZE */
+
+#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE
+#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!"
+#undef SDPCM_DEFGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif
+
+#ifdef PKT_STATICS
+typedef struct pkt_statics {
+	uint16	event_count;
+	uint32	event_size;
+	uint16	ctrl_count;
+	uint32	ctrl_size;
+	uint32	data_count;
+	uint32	data_size;
+	uint32	glom_cnt[SDPCM_MAXGLOM_SIZE];
+	uint16	glom_max;
+	uint16	glom_count;
+	uint32	glom_size;
+	uint16	test_count;
+	uint32	test_size;
+} pkt_statics_t;
+#endif
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+#if defined(BT_OVER_SDIO)
+extern
+void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func);
+#endif /* defined (BT_OVER_SDIO) */
+
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+	uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+	uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+	void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+extern SDIOH_API_RC sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                          void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Wait system lock free */
+extern int sdioh_waitlockfree(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+
+
+#if defined(BCMSDIOH_STD)
+	#define SDIOH_SLEEP_ENABLED
+#endif
+extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab);
+
+/* GPIO support */
+extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd);
+extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab);
+extern uint sdioh_set_mode(sdioh_info_t *sd, uint mode);
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
new file mode 100644
index 0000000..7262d0f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
@@ -0,0 +1,273 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ *     export functions to client drivers
+ *     abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdh.h 698895 2017-05-11 02:55:17Z $
+ */
+
+/**
+ * @file bcmsdh.h
+ */
+
+#ifndef	_bcmsdh_h_
+#define	_bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL	0x0001 /* Error */
+#define BCMSDH_INFO_VAL		0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x) printf x
+#define BCMSDH_INFO(x)
+
+#if defined(BCMSDIO) && (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || \
+	defined(BCMSDIOH_SPI))
+#define BCMSDH_ADAPTER
+#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+
+#if defined(BT_OVER_SDIO)
+typedef enum {
+	NO_HANG_STATE		= 0,
+	HANG_START_STATE		= 1,
+	HANG_RECOVERY_STATE	= 2
+} dhd_hang_state_t;
+#endif
+
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva);
+/**
+ * BCMSDH API context
+ */
+struct bcmsdh_info
+{
+	bool	init_success;	/* underlying driver successfully attached */
+	void	*sdioh;		/* handler for sdioh */
+	uint32  vendevid;	/* Target Vendor and Device ID on SD bus */
+	osl_t   *osh;
+	bool	regfail;	/* Save status of last reg_read/reg_write call */
+	uint32	sbwad;		/* Save backplane window address */
+	void	*os_cxt;        /* Pointer to per-OS private data */
+	bool	force_sbwad_calc; /* forces calculation of sbwad instead of using cached value */
+#ifdef DHD_WAKE_STATUS
+	unsigned int	total_wake_count;
+	int		pkt_wake;
+#endif /* DHD_WAKE_STATUS */
+};
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+/* Enable/disable SD card interrupt forward */
+extern void bcmsdh_intr_forward(void *sdh, bool pass);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ *   fn:   function number
+ *   addr: unmodified SDIO-space address
+ *   data: data byte to write
+ *   err:  pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ *   fn:     function whose CIS is being requested (0 is common CIS)
+ *   cis:    pointer to memory location to place results
+ *   length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+extern int bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint offset);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ *   addr: backplane address (i.e. >= regsva from attach)
+ *   size: register width in bytes (2 or 4)
+ *   data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uintptr addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data);
+
+/* set sb address window */
+extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ *   fn:       function number
+ *   addr:     backplane address (i.e. >= regsva from attach)
+ *   flags:    backplane width, address increment, sync/async
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ *   pkt:      pointer to packet associated with buf (if any)
+ *   complete: callback function for command completion (async only)
+ *   handle:   handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete_fn, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete_fn, void *handle);
+
+extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len);
+extern void bcmsdh_glom_clear(void *sdh);
+extern uint bcmsdh_set_mode(void *sdh, uint mode);
+extern bool bcmsdh_glom_enabled(void);
+/* Flags bits */
+#define SDIO_REQ_4BYTE	0x1	/* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED	0x2	/* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC	0x4	/* Async request (vs. sync request) */
+#define SDIO_BYTE_MODE	0x8	/* Byte mode request(non-block mode) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING	1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ *   rw:       read or write (0/1)
+ *   addr:     direct SDIO address
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Wait system lock free */
+extern int bcmsdh_waitlockfree(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+                           void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+/* callback functions */
+typedef struct {
+	/* probe the device */
+	void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+	                uint16 func, uint bustype, void * regsva, osl_t * osh,
+	                void * param);
+	/* remove the device */
+	void (*remove)(void *context);
+	/* can we suspend now */
+	int (*suspend)(void *context);
+	/* resume from suspend */
+	int (*resume)(void *context);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+extern int bcmsdh_reg_sdio_notify(void* semaphore);
+extern void bcmsdh_unreg_sdio_notify(void);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+	void* oob_irq_handler_context);
+extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh);
+extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable);
+#endif 
+extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh);
+extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh);
+extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh);
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh);
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh);
+
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* function to force sbwad calculation instead of using cached value */
+extern void bcmsdh_force_sbwad_calc(void *sdh, bool force);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+extern int bcmsdh_sleep(void *sdh, bool enab);
+
+/* GPIO support */
+extern int bcmsdh_gpio_init(void *sd);
+extern bool bcmsdh_gpioin(void *sd, uint32 gpio);
+extern int bcmsdh_gpioouten(void *sd, uint32 gpio);
+extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab);
+
+#endif	/* _bcmsdh_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
new file mode 100644
index 0000000..1073d97
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
@@ -0,0 +1,129 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Proprietary,Open:>>
+ *
+ * $Id: bcmsdh_sdmmc.h 687253 2017-02-28 09:33:36Z $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)	do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x)	do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0)
+#define sd_debug(x)	do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x)	do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
+#define sd_ctrl(x)	do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
+#define sd_cost(x)	do { if (sd_msglevel & SDH_COST_VAL) printf x; } while (0)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4		2
+#define CLIENT_INTR			0x100	/* Get rid of this! */
+#define SDIOH_SDMMC_MAX_SG_ENTRIES	(SDPCM_MAXGLOM_SIZE + 2)
+
+struct sdioh_info {
+	osl_t		*osh;			/* osh handler */
+	void		*bcmsdh;		/* upper layer handle */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	uint16		intmask;		/* Current active interrupts */
+
+	int		intrcount;		/* Client interrupts */
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool		use_client_ints;	/* If this is false, make sure to restore */
+	int		sd_mode;		/* SD1/SD4/SPI */
+	int		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint8		num_funcs;		/* Supported funcs on client */
+	uint32		com_cis_ptr;
+	uint32		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	bool		use_rxchain;
+	struct scatterlist	sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES];
+	struct sdio_func	fake_func0;
+	struct sdio_func	*func[SDIOD_MAX_IOFUNCS];
+	uint		sd_clk_rate;
+	uint	txglom_mode;		/* Txglom mode: 0 - copy, 1 - multi-descriptor */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+
+#ifdef GLOBAL_SDMMC_INSTANCE
+typedef struct _BCMSDH_SDMMC_INSTANCE {
+	sdioh_info_t	*sd;
+	struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
+#endif
+
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
new file mode 100644
index 0000000..6230047
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
@@ -0,0 +1,301 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdpcm.h 614070 2016-01-21 00:55:57Z $
+ */
+
+#ifndef	_bcmsdpcm_h_
+#define	_bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK	I_SMB_SW0	/* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK	I_SMB_SW1	/* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB	I_SMB_SW2	/* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT	I_SMB_SW3	/* To SB Mailbox Miscellaneous Interrupt */
+
+#define I_TOSBMAIL      (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK		(1 << 0)	/* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK	(1 << 1)	/* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB	(1 << 2)	/* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT	(1 << 3)	/* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK	0x0000000f	/* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+
+#ifdef DS_PROT
+/* Bit msgs for custom deep sleep protocol */
+#define SMB_DATA_D3INFORM	0x100	/* host announcing D3 entry */
+#define SMB_DATA_DSACK		0x200	/* host acking a deepsleep request */
+#define SMB_DATA_DSNACK		0x400	/* host nacking a deepsleep request */
+#endif /* DS_PROT */
+
+#define SMB_DATA_VERSION_MASK	0x00ff0000	/* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT	16		/* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_INT_ACK	I_HMB_SW0	/* To Host Mailbox Dev Interrupt ACK */
+#define I_HMB_FC_STATE	I_HMB_SW0	/* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE	I_HMB_SW1	/* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND	I_HMB_SW2	/* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT	I_HMB_SW3	/* To Host Mailbox Miscellaneous Interrupt */
+
+#define I_TOHOSTMAIL    (I_HMB_INT_ACK | I_HMB_FRAME_IND | I_HMB_HOST_INT)
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_INT_ACK	(1 << 0)	/* To Host Mailbox Dev Interrupt ACK */
+#define HMB_FRAME_IND	(1 << 2)	/* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT	(1 << 3)	/* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK	0x0000000f	/* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED	0x01	/* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY	0x02	/* we're ready to to talk to host after enable */
+#define HMB_DATA_FC		0x04	/* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY	0x08	/* firmware is ready for protocol activity */
+#define HMB_DATA_FWHALT		0x10	/* firmware has halted operation */
+
+#ifdef DS_PROT
+/* Bit msgs for custom deep sleep protocol */
+#define HMB_DATA_DSREQ		0x100	/* firmware requesting deepsleep entry */
+#define HMB_DATA_DSEXIT		0x200	/* firmware announcing deepsleep exit */
+#define HMB_DATA_D3ACK		0x400	/* firmware acking a D3 notice from host */
+#define HMB_DATA_D3EXIT		0x800	/* firmware announcing D3 exit */
+#define HMB_DATA_DSPROT_MASK	0xf00
+#endif /* DS_PROT */
+
+
+#define HMB_DATA_FCDATA_MASK	0xff000000	/* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT	24		/* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK	0x00ff0000	/* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT	16		/* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION	4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK		0x000000ff	/* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK		0x00000f00	/* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT		8		/* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK		0x0000f000	/* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT		12		/* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK		0x00ff0000	/* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT		16		/* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET		2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET		3		/* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) 		(((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK		0xff000000
+#define SDPCM_DOFFSET_SHIFT		24
+
+#define SDPCM_FCMASK_OFFSET		4		/* Flow control */
+#define SDPCM_FCMASK_VALUE(p)		(((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET		5		/* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p)		(((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET		6		/* Version # */
+#define SDPCM_VERSION_VALUE(p)		(((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET		7		/* Spare */
+#define SDPCM_UNUSED_VALUE(p)		(((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN	8	/* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL	0	/* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL	1	/* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL	2	/* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL	3	/* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL	15	/* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL	15
+
+#define SDPCM_SEQUENCE_WRAP	256	/* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0	0x01
+#define SDPCM_FLAG_RESVD1	0x02
+#define SDPCM_FLAG_GSPI_TXENAB	0x04
+#define SDPCM_FLAG_GLOMDESC	0x08	/* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG	(SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p)	(((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN		4	/* Generally: Cmd(1), Ext(1), Len(2);
+						 * Semantics of Ext byte depend on command.
+						 * Len is current or requested frame length, not
+						 * including test header; sent little-endian.
+						 */
+#define SDPCM_TEST_PKT_CNT_FLD_LEN	4	/* Packet count filed legth */
+#define SDPCM_TEST_DISCARD		0x01	/* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ		0x02	/* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP		0x03	/* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST		0x04	/* Receiver to send a burst. Ext is a frame count
+						 * (Backward compatabilty) Set frame count in a
+						 * 4 byte filed adjacent to the HDR
+						 */
+#define SDPCM_TEST_SEND			0x05	/* Receiver sets send mode. Ext is boolean on/off
+						 * Set frame count in a 4 byte filed adjacent to
+						 * the HDR
+						 */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id)	((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+	uint32 cmd52rd;		/* Cmd52RdCount, SDIO: cmd52 reads */
+	uint32 cmd52wr;		/* Cmd52WrCount, SDIO: cmd52 writes */
+	uint32 cmd53rd;		/* Cmd53RdCount, SDIO: cmd53 reads */
+	uint32 cmd53wr;		/* Cmd53WrCount, SDIO: cmd53 writes */
+	uint32 abort;		/* AbortCount, SDIO: aborts */
+	uint32 datacrcerror;	/* DataCrcErrorCount, SDIO: frames w/CRC error */
+	uint32 rdoutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+	uint32 wroutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+	uint32 writebusy;	/* WriteBusyCount, SDIO: device asserted "busy" */
+	uint32 readwait;	/* ReadWaitCount, SDIO: no data ready for a read cmd */
+	uint32 readterm;	/* ReadTermCount, SDIO: read frame termination cmds */
+	uint32 writeterm;	/* WriteTermCount, SDIO: write frames termination cmds */
+	uint32 rxdescuflo;	/* receive descriptor underflows */
+	uint32 rxfifooflo;	/* receive fifo overflows */
+	uint32 txfifouflo;	/* transmit fifo underflows */
+	uint32 runt;		/* runt (too short) frames recv'd from bus */
+	uint32 badlen;		/* frame's rxh len does not match its hw tag len */
+	uint32 badcksum;	/* frame's hw tag chksum doesn't agree with len value */
+	uint32 seqbreak;	/* break in sequence # space from one rx frame to the next */
+	uint32 rxfcrc;		/* frame rx header indicates crc error */
+	uint32 rxfwoos;		/* frame rx header indicates write out of sync */
+	uint32 rxfwft;		/* frame rx header indicates write frame termination */
+	uint32 rxfabort;	/* frame rx header indicates frame aborted */
+	uint32 woosint;		/* write out of sync interrupt */
+	uint32 roosint;		/* read out of sync interrupt */
+	uint32 rftermint;	/* read frame terminate interrupt */
+	uint32 wftermint;	/* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val)	((var) == (val))
+#define SDIODREV_GE(var, val)	((var) >= (val))
+#define SDIODREV_GT(var, val)	((var) > (val))
+#define SDIODREV_LT(var, val)	((var) < (val))
+#define SDIODREV_LE(var, val)	((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+	(SDIODREV_LT((h)->corerev, 1) ? \
+	 SDIODDMAREG32((h), (dir), (chnl)) : \
+	 SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODDMAREG(h, dir, chnl) : \
+	 PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+	(SDIODREV_LT((corerev), 1) ? \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+	((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODFIFOREG(h, corerev) : \
+	 PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION       0x0001
+#define SDPCM_SHARED_VERSION_MASK  0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT  0x0100
+#define SDPCM_SHARED_ASSERT        0x0200
+#define SDPCM_SHARED_TRAP          0x0400
+#define SDPCM_SHARED_IN_BRPT       0x0800
+#define SDPCM_SHARED_SET_BRPT      0x1000
+#define SDPCM_SHARED_PENDING_BRPT  0x2000
+#define SDPCM_SHARED_FATAL_LOGBUF_VALID	0x100000
+
+typedef struct {
+	uint32	flags;
+	uint32  trap_addr;
+	uint32  assert_exp_addr;
+	uint32  assert_file_addr;
+	uint32  assert_line;
+	uint32	console_addr;		/* Address of hnd_cons_t */
+	uint32  msgtrace_addr;
+	uint32  fwid;
+	uint32  device_fatal_logbuf_start;
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+#endif	/* _bcmsdpcm_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
new file mode 100644
index 0000000..537876c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
@@ -0,0 +1,138 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdspi.h 514727 2014-11-12 03:02:48Z $
+ */
+#ifndef	_BCM_SD_SPI_H
+#define	_BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#undef ERROR
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint		bar0;			/* BAR0 for PCI Device */
+	osl_t 		*osh;			/* osh handler */
+	void		*controller;	/* Pointer to SPI Controller's private data struct */
+
+	uint		lockcount; 		/* nest count of sdspi_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint32		target_dev;		/* Target device ID */
+	uint32		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint 		irq;			/* Client irq */
+	uint32 		intrcount;		/* Client interrupts */
+	uint32 		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+	bool		got_hcint;		/* Host Controller interrupt. */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current register transfer size */
+	uint32		cmd53_wr_data;		/* Used to pass CMD53 write data */
+	uint32		card_response;		/* Used to pass back response status byte */
+	uint32		card_rsp_data;		/* Used to pass back response data word */
+	uint16 		card_rca;		/* Current Address */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;
+	ulong		dma_phys;
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
new file mode 100644
index 0000000..ff3b0d1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
@@ -0,0 +1,285 @@
+/*
+ *  'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdstd.h 514727 2014-11-12 03:02:48Z $
+ */
+#ifndef	_BCM_SD_STD_H
+#define	_BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+#define SDIOH_MODE_SD1		1
+#define SDIOH_MODE_SD4		2
+
+#define MAX_SLOTS 6 	/* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ	0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK	1
+#define SDIOH_TYPE_BCM27XX	2
+#define SDIOH_TYPE_TI_PCIXX21	4	/* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822	5	/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON	6	/* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS   0x00001E00
+
+#define RETRIES_LARGE 100000
+#define sdstd_os_yield(sd)	do {} while (0)
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+#define USE_FIFO		0x8	/* Fifo vs non-fifo */
+
+#define CLIENT_INTR 		0x100	/* Get rid of this! */
+
+#define HC_INTR_RETUNING	0x1000
+
+
+#ifdef BCMSDIOH_TXGLOM
+/* Total glom pkt can not exceed 64K
+ * need one more slot for glom padding packet
+ */
+#define SDIOH_MAXGLOM_SIZE	(40+1)
+
+typedef struct glom_buf {
+	uint32 count;				/* Total number of pkts queued */
+	void *dma_buf_arr[SDIOH_MAXGLOM_SIZE];	/* Frame address */
+	ulong dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */
+	uint16 nbytes[SDIOH_MAXGLOM_SIZE];	/* Size of each frame */
+} glom_buf_t;
+#endif
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint32 curr_caps;                    	/* max current capabilities reg */
+
+	osl_t 		*osh;			/* osh handler */
+	volatile char 	*mem_space;		/* pci device memory va */
+	uint		lockcount; 		/* nest count of sdstd_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint		target_dev;		/* Target device ID */
+	uint16		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+	void		*bcmsdh;		/* handler to upper layer stack (bcmsdh) */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint		irq;			/* Client irq */
+	int		intrcount;		/* Client interrupts */
+	int		local_intrcount;	/* Controller interrupts */
+	bool		host_init_done;		/* Controller initted */
+	bool		card_init_done;		/* Client SDIO interface initted */
+	bool		polled_mode;		/* polling for command completion */
+
+	bool		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool		use_client_ints;	/* If this is false, make sure to restore */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int		sd_mode;		/* SD1/SD4/SPI */
+	int		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32		data_xfer_count;	/* Current transfer */
+	uint16		card_rca;		/* Current Address */
+	int8		sd_dma_mode;		/* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+	uint8		num_funcs;		/* Supported funcs on client */
+	uint32		com_cis_ptr;
+	uint32		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;		/* DMA Buffer virtual address */
+	ulong		dma_phys;		/* DMA Buffer physical address */
+	void		*adma2_dscr_buf;	/* ADMA2 Descriptor Buffer virtual address */
+	ulong		adma2_dscr_phys;	/* ADMA2 Descriptor Buffer physical address */
+
+	/* adjustments needed to make the dma align properly */
+	void		*dma_start_buf;
+	ulong		dma_start_phys;
+	uint		alloced_dma_size;
+	void		*adma2_dscr_start_buf;
+	ulong		adma2_dscr_start_phys;
+	uint		alloced_adma2_dscr_size;
+
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+	bool		got_hcint;		/* local interrupt flag */
+	uint16		last_intrstatus;	/* to cache intrstatus */
+	int 	host_UHSISupported;		/* whether UHSI is supported for HC. */
+	int 	card_UHSI_voltage_Supported; 	/* whether UHSI is supported for
+						 * Card in terms of Voltage [1.8 or 3.3].
+						 */
+	int	global_UHSI_Supp;	/* type of UHSI support in both host and card.
+					 * HOST_SDR_UNSUPP: capabilities not supported/matched
+					 * HOST_SDR_12_25: SDR12 and SDR25 supported
+					 * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+					 */
+	volatile int	sd3_dat_state; 		/* data transfer state used for retuning check */
+	volatile int	sd3_tun_state; 		/* tuning state used for retuning check */
+	bool	sd3_tuning_reqd; 	/* tuning requirement parameter */
+	uint32	caps3;			/* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+#ifdef BCMSDIOH_TXGLOM
+	glom_buf_t glom_info;		/* pkt information used for glomming */
+	uint	txglom_mode;		/* Txglom mode: 0 - copy, 1 - multi-descriptor */
+#endif
+};
+
+#define DMA_MODE_NONE	0
+#define DMA_MODE_SDMA	1
+#define DMA_MODE_ADMA1	2
+#define DMA_MODE_ADMA2	3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO	-1
+
+#define USE_DMA(sd)		((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 			0
+#define TUNING_START 			1
+#define TUNING_START_AFTER_DAT 	2
+#define TUNING_ONGOING 			3
+
+#define DATA_TRANSFER_IDLE 		0
+#define DATA_TRANSFER_ONGOING	1
+
+#define CHECK_TUNING_PRE_DATA	1
+#define CHECK_TUNING_POST_DATA	2
+
+
+#ifdef DHD_DEBUG
+#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01
+#define SD_DHD_ENABLE_PERIODIC_TUNING  0x00
+#endif
+
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, ulong addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, ulong addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wrappers for safe concurrent register access */
+extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags);
+extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern int sdstd_3_get_data_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif /* _BCM_SD_STD_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h
new file mode 100644
index 0000000..9b4bd2d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h
@@ -0,0 +1,43 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmspi.h 514727 2014-11-12 03:02:48Z $
+ */
+#ifndef	_BCM_SPI_H
+#define	_BCM_SPI_H
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
+
+#endif /* _BCM_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
new file mode 100644
index 0000000..e9735ff
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
@@ -0,0 +1,165 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmspibrcm.h 514727 2014-11-12 03:02:48Z $
+ */
+#ifndef	_BCM_SPI_BRCM_H
+#define	_BCM_SPI_BRCM_H
+
+#ifndef SPI_MAX_IOFUNCS
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS		4
+#endif
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#if defined(DHD_DEBUG)
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)	do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x)	do { if (sd_msglevel & SDH_INFO_VAL)  printf x; } while (0)
+#define sd_debug(x)	do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x)	do { if (sd_msglevel & SDH_DATA_VAL)  printf x; } while (0)
+#define sd_ctrl(x)	do { if (sd_msglevel & SDH_CTRL_VAL)  printf x; } while (0)
+#else
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#endif
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_F1		64
+#define BLOCK_SIZE_F2 		2048
+#define BLOCK_SIZE_F3 		2048
+
+/* internal return code */
+#define SUCCESS	0
+#undef ERROR
+#define ERROR	1
+#define ERROR_UF	2
+#define ERROR_OF	3
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+struct sdioh_info {
+	uint		cfg_bar;		/* pci cfg address for bar */
+	uint32		caps;			/* cached value of capabilities reg */
+	void		*bar0;			/* BAR0 for PCI Device */
+	osl_t		*osh;			/* osh handler */
+	void		*controller;	/* Pointer to SPI Controller's private data struct */
+	uint		lockcount;		/* nest count of spi_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint32		target_dev;		/* Target device ID */
+	uint32		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint		irq;			/* Client irq */
+	uint32		intrcount;		/* Client interrupts */
+	uint32		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SPI_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current transfer */
+	uint16 		card_rca;		/* Current Address */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		card_dstatus;		/* 32bit device status */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SPI_MAX_IOFUNCS];
+	void		*dma_buf;
+	ulong		dma_phys;
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+	uint32		wordlen;			/* host processor 16/32bits */
+	uint32		prev_fun;
+	uint32		chip;
+	uint32		chiprev;
+	bool		resp_delay_all;
+	bool		dwordmode;
+	bool		resp_delay_new;
+
+	struct spierrstats_t spierrstats;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmspibrcm.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmspibrcm.c references to per-port code
+ */
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#define SPI_RW_FLAG_M			BITFIELD_MASK(1)	/* Bit [31] - R/W Command Bit */
+#define SPI_RW_FLAG_S			31
+#define SPI_ACCESS_M			BITFIELD_MASK(1)	/* Bit [30] - Fixed/Incr Access */
+#define SPI_ACCESS_S			30
+#define SPI_FUNCTION_M			BITFIELD_MASK(2)	/* Bit [29:28] - Function Number */
+#define SPI_FUNCTION_S			28
+#define SPI_REG_ADDR_M			BITFIELD_MASK(17)	/* Bit [27:11] - Address */
+#define SPI_REG_ADDR_S			11
+#define SPI_LEN_M			BITFIELD_MASK(11)	/* Bit [10:0] - Packet length */
+#define SPI_LEN_S			0
+
+#endif /* _BCM_SPI_BRCM_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
new file mode 100644
index 0000000..f1e9bfe
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
@@ -0,0 +1,973 @@
+/*
+ * SROM format definition.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsrom_fmt.h 646789 2016-06-30 19:43:02Z $
+ */
+
+#ifndef	_bcmsrom_fmt_h_
+#define	_bcmsrom_fmt_h_
+
+#define SROM_MAXREV		13	/* max revision supported by driver */
+
+/* Maximum srom: 12 Kilobits == 1536 bytes */
+
+#define	SROM_MAX		1536
+#define SROM_MAXW		594
+
+#ifdef LARGE_NVRAM_MAXSZ
+#define VARS_MAX                LARGE_NVRAM_MAXSZ
+#else
+#define VARS_MAX                4096
+#endif /* LARGE_NVRAM_MAXSZ */
+
+/* PCI fields */
+#define PCI_F0DEVID		48
+
+
+#define	SROM_WORDS		64
+
+#define SROM3_SWRGN_OFF		28	/* s/w region offset in words */
+
+#define	SROM_SSID		2
+#define	SROM_SVID		3
+
+#define	SROM_WL1LHMAXP		29
+
+#define	SROM_WL1LPAB0		30
+#define	SROM_WL1LPAB1		31
+#define	SROM_WL1LPAB2		32
+
+#define	SROM_WL1HPAB0		33
+#define	SROM_WL1HPAB1		34
+#define	SROM_WL1HPAB2		35
+
+#define	SROM_MACHI_IL0		36
+#define	SROM_MACMID_IL0		37
+#define	SROM_MACLO_IL0		38
+#define	SROM_MACHI_ET0		39
+#define	SROM_MACMID_ET0		40
+#define	SROM_MACLO_ET0		41
+#define	SROM_MACHI_ET1		42
+#define	SROM_MACMID_ET1		43
+#define	SROM_MACLO_ET1		44
+#define	SROM3_MACHI		37
+#define	SROM3_MACMID		38
+#define	SROM3_MACLO		39
+
+#define	SROM_BXARSSI2G		40
+#define	SROM_BXARSSI5G		41
+
+#define	SROM_TRI52G		42
+#define	SROM_TRI5GHL		43
+
+#define	SROM_RXPO52G		45
+
+#define	SROM2_ENETPHY		45
+
+#define	SROM_AABREV		46
+/* Fields in AABREV */
+#define	SROM_BR_MASK		0x00ff
+#define	SROM_CC_MASK		0x0f00
+#define	SROM_CC_SHIFT		8
+#define	SROM_AA0_MASK		0x3000
+#define	SROM_AA0_SHIFT		12
+#define	SROM_AA1_MASK		0xc000
+#define	SROM_AA1_SHIFT		14
+
+#define	SROM_WL0PAB0		47
+#define	SROM_WL0PAB1		48
+#define	SROM_WL0PAB2		49
+
+#define	SROM_LEDBH10		50
+#define	SROM_LEDBH32		51
+
+#define	SROM_WL10MAXP		52
+
+#define	SROM_WL1PAB0		53
+#define	SROM_WL1PAB1		54
+#define	SROM_WL1PAB2		55
+
+#define	SROM_ITT		56
+
+#define	SROM_BFL		57
+#define	SROM_BFL2		28
+#define	SROM3_BFL2		61
+
+#define	SROM_AG10		58
+
+#define	SROM_CCODE		59
+
+#define	SROM_OPO		60
+
+#define	SROM3_LEDDC		62
+
+#define	SROM_CRCREV		63
+
+/* SROM Rev 4: Reallocate the software part of the srom to accomodate
+ * MIMO features. It assumes up to two PCIE functions and 440 bytes
+ * of useable srom i.e. the useable storage in chips with OTP that
+ * implements hardware redundancy.
+ */
+
+#define	SROM4_WORDS		220
+
+#define	SROM4_SIGN		32
+#define	SROM4_SIGNATURE		0x5372
+
+#define	SROM4_BREV		33
+
+#define	SROM4_BFL0		34
+#define	SROM4_BFL1		35
+#define	SROM4_BFL2		36
+#define	SROM4_BFL3		37
+#define	SROM5_BFL0		37
+#define	SROM5_BFL1		38
+#define	SROM5_BFL2		39
+#define	SROM5_BFL3		40
+
+#define	SROM4_MACHI		38
+#define	SROM4_MACMID		39
+#define	SROM4_MACLO		40
+#define	SROM5_MACHI		41
+#define	SROM5_MACMID		42
+#define	SROM5_MACLO		43
+
+#define	SROM4_CCODE		41
+#define	SROM4_REGREV		42
+#define	SROM5_CCODE		34
+#define	SROM5_REGREV		35
+
+#define	SROM4_LEDBH10		43
+#define	SROM4_LEDBH32		44
+#define	SROM5_LEDBH10		59
+#define	SROM5_LEDBH32		60
+
+#define	SROM4_LEDDC		45
+#define	SROM5_LEDDC		45
+
+#define	SROM4_AA		46
+#define	SROM4_AA2G_MASK		0x00ff
+#define	SROM4_AA2G_SHIFT	0
+#define	SROM4_AA5G_MASK		0xff00
+#define	SROM4_AA5G_SHIFT	8
+
+#define	SROM4_AG10		47
+#define	SROM4_AG32		48
+
+#define	SROM4_TXPID2G		49
+#define	SROM4_TXPID5G		51
+#define	SROM4_TXPID5GL		53
+#define	SROM4_TXPID5GH		55
+
+#define SROM4_TXRXC		61
+#define SROM4_TXCHAIN_MASK	0x000f
+#define SROM4_TXCHAIN_SHIFT	0
+#define SROM4_RXCHAIN_MASK	0x00f0
+#define SROM4_RXCHAIN_SHIFT	4
+#define SROM4_SWITCH_MASK	0xff00
+#define SROM4_SWITCH_SHIFT	8
+
+
+/* Per-path fields */
+#define	MAX_PATH_SROM		4
+#define	SROM4_PATH0		64
+#define	SROM4_PATH1		87
+#define	SROM4_PATH2		110
+#define	SROM4_PATH3		133
+
+#define	SROM4_2G_ITT_MAXP	0
+#define	SROM4_2G_PA		1
+#define	SROM4_5G_ITT_MAXP	5
+#define	SROM4_5GLH_MAXP		6
+#define	SROM4_5G_PA		7
+#define	SROM4_5GL_PA		11
+#define	SROM4_5GH_PA		15
+
+/* Fields in the ITT_MAXP and 5GLH_MAXP words */
+#define	B2G_MAXP_MASK		0xff
+#define	B2G_ITT_SHIFT		8
+#define	B5G_MAXP_MASK		0xff
+#define	B5G_ITT_SHIFT		8
+#define	B5GH_MAXP_MASK		0xff
+#define	B5GL_MAXP_SHIFT		8
+
+/* All the miriad power offsets */
+#define	SROM4_2G_CCKPO		156
+#define	SROM4_2G_OFDMPO		157
+#define	SROM4_5G_OFDMPO		159
+#define	SROM4_5GL_OFDMPO	161
+#define	SROM4_5GH_OFDMPO	163
+#define	SROM4_2G_MCSPO		165
+#define	SROM4_5G_MCSPO		173
+#define	SROM4_5GL_MCSPO		181
+#define	SROM4_5GH_MCSPO		189
+#define	SROM4_CDDPO		197
+#define	SROM4_STBCPO		198
+#define	SROM4_BW40PO		199
+#define	SROM4_BWDUPPO		200
+
+#define	SROM4_CRCREV		219
+
+
+/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
+ * This is acombined srom for both MIMO and SISO boards, usable in
+ * the .130 4Kilobit OTP with hardware redundancy.
+ */
+
+#define	SROM8_SIGN		64
+
+#define	SROM8_BREV		65
+
+#define	SROM8_BFL0		66
+#define	SROM8_BFL1		67
+#define	SROM8_BFL2		68
+#define	SROM8_BFL3		69
+
+#define	SROM8_MACHI		70
+#define	SROM8_MACMID		71
+#define	SROM8_MACLO		72
+
+#define	SROM8_CCODE		73
+#define	SROM8_REGREV		74
+
+#define	SROM8_LEDBH10		75
+#define	SROM8_LEDBH32		76
+
+#define	SROM8_LEDDC		77
+
+#define	SROM8_AA		78
+
+#define	SROM8_AG10		79
+#define	SROM8_AG32		80
+
+#define	SROM8_TXRXC		81
+
+#define	SROM8_BXARSSI2G		82
+#define	SROM8_BXARSSI5G		83
+#define	SROM8_TRI52G		84
+#define	SROM8_TRI5GHL		85
+#define	SROM8_RXPO52G		86
+
+#define SROM8_FEM2G		87
+#define SROM8_FEM5G		88
+#define SROM8_FEM_ANTSWLUT_MASK		0xf800
+#define SROM8_FEM_ANTSWLUT_SHIFT	11
+#define SROM8_FEM_TR_ISO_MASK		0x0700
+#define SROM8_FEM_TR_ISO_SHIFT		8
+#define SROM8_FEM_PDET_RANGE_MASK	0x00f8
+#define SROM8_FEM_PDET_RANGE_SHIFT	3
+#define SROM8_FEM_EXTPA_GAIN_MASK	0x0006
+#define SROM8_FEM_EXTPA_GAIN_SHIFT	1
+#define SROM8_FEM_TSSIPOS_MASK		0x0001
+#define SROM8_FEM_TSSIPOS_SHIFT		0
+
+#define SROM8_THERMAL		89
+
+/* Temp sense related entries */
+#define SROM8_MPWR_RAWTS		90
+#define SROM8_TS_SLP_OPT_CORRX	91
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
+#define SROM8_FOC_HWIQ_IQSWP	92
+
+#define SROM8_EXTLNAGAIN        93
+
+/* Temperature delta for PHY calibration */
+#define SROM8_PHYCAL_TEMPDELTA	94
+
+/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */
+#define SROM8_MPWR_1_AND_2	95
+
+
+/* Per-path offsets & fields */
+#define	SROM8_PATH0		96
+#define	SROM8_PATH1		112
+#define	SROM8_PATH2		128
+#define	SROM8_PATH3		144
+
+#define	SROM8_2G_ITT_MAXP	0
+#define	SROM8_2G_PA		1
+#define	SROM8_5G_ITT_MAXP	4
+#define	SROM8_5GLH_MAXP		5
+#define	SROM8_5G_PA		6
+#define	SROM8_5GL_PA		9
+#define	SROM8_5GH_PA		12
+
+/* All the miriad power offsets */
+#define	SROM8_2G_CCKPO		160
+
+#define	SROM8_2G_OFDMPO		161
+#define	SROM8_5G_OFDMPO		163
+#define	SROM8_5GL_OFDMPO	165
+#define	SROM8_5GH_OFDMPO	167
+
+#define	SROM8_2G_MCSPO		169
+#define	SROM8_5G_MCSPO		177
+#define	SROM8_5GL_MCSPO		185
+#define	SROM8_5GH_MCSPO		193
+
+#define	SROM8_CDDPO		201
+#define	SROM8_STBCPO		202
+#define	SROM8_BW40PO		203
+#define	SROM8_BWDUPPO		204
+
+/* SISO PA parameters are in the path0 spaces */
+#define	SROM8_SISO		96
+
+/* Legacy names for SISO PA paramters */
+#define	SROM8_W0_ITTMAXP	(SROM8_SISO + SROM8_2G_ITT_MAXP)
+#define	SROM8_W0_PAB0		(SROM8_SISO + SROM8_2G_PA)
+#define	SROM8_W0_PAB1		(SROM8_SISO + SROM8_2G_PA + 1)
+#define	SROM8_W0_PAB2		(SROM8_SISO + SROM8_2G_PA + 2)
+#define	SROM8_W1_ITTMAXP	(SROM8_SISO + SROM8_5G_ITT_MAXP)
+#define	SROM8_W1_MAXP_LCHC	(SROM8_SISO + SROM8_5GLH_MAXP)
+#define	SROM8_W1_PAB0		(SROM8_SISO + SROM8_5G_PA)
+#define	SROM8_W1_PAB1		(SROM8_SISO + SROM8_5G_PA + 1)
+#define	SROM8_W1_PAB2		(SROM8_SISO + SROM8_5G_PA + 2)
+#define	SROM8_W1_PAB0_LC	(SROM8_SISO + SROM8_5GL_PA)
+#define	SROM8_W1_PAB1_LC	(SROM8_SISO + SROM8_5GL_PA + 1)
+#define	SROM8_W1_PAB2_LC	(SROM8_SISO + SROM8_5GL_PA + 2)
+#define	SROM8_W1_PAB0_HC	(SROM8_SISO + SROM8_5GH_PA)
+#define	SROM8_W1_PAB1_HC	(SROM8_SISO + SROM8_5GH_PA + 1)
+#define	SROM8_W1_PAB2_HC	(SROM8_SISO + SROM8_5GH_PA + 2)
+
+#define	SROM8_CRCREV		219
+
+/* SROM REV 9 */
+#define SROM9_2GPO_CCKBW20	160
+#define SROM9_2GPO_CCKBW20UL	161
+#define SROM9_2GPO_LOFDMBW20	162
+#define SROM9_2GPO_LOFDMBW20UL	164
+
+#define SROM9_5GLPO_LOFDMBW20	166
+#define SROM9_5GLPO_LOFDMBW20UL	168
+#define SROM9_5GMPO_LOFDMBW20	170
+#define SROM9_5GMPO_LOFDMBW20UL	172
+#define SROM9_5GHPO_LOFDMBW20	174
+#define SROM9_5GHPO_LOFDMBW20UL	176
+
+#define SROM9_2GPO_MCSBW20	178
+#define SROM9_2GPO_MCSBW20UL	180
+#define SROM9_2GPO_MCSBW40	182
+
+#define SROM9_5GLPO_MCSBW20	184
+#define SROM9_5GLPO_MCSBW20UL	186
+#define SROM9_5GLPO_MCSBW40	188
+#define SROM9_5GMPO_MCSBW20	190
+#define SROM9_5GMPO_MCSBW20UL	192
+#define SROM9_5GMPO_MCSBW40	194
+#define SROM9_5GHPO_MCSBW20	196
+#define SROM9_5GHPO_MCSBW20UL	198
+#define SROM9_5GHPO_MCSBW40	200
+
+#define SROM9_PO_MCS32		202
+#define SROM9_PO_LOFDM40DUP	203
+#define SROM9_EU_EDCRSTH	204
+#define SROM10_EU_EDCRSTH	204
+#define SROM8_RXGAINERR_2G	205
+#define SROM8_RXGAINERR_5GL	206
+#define SROM8_RXGAINERR_5GM	207
+#define SROM8_RXGAINERR_5GH	208
+#define SROM8_RXGAINERR_5GU	209
+#define SROM8_SUBBAND_PPR	210
+#define SROM8_PCIEINGRESS_WAR	211
+#define SROM8_EU_EDCRSTH	212
+#define SROM9_SAR		212
+
+#define SROM8_NOISELVL_2G	213
+#define SROM8_NOISELVL_5GL	214
+#define SROM8_NOISELVL_5GM	215
+#define SROM8_NOISELVL_5GH	216
+#define SROM8_NOISELVL_5GU	217
+#define SROM8_NOISECALOFFSET	218
+
+#define SROM9_REV_CRC		219
+
+#define SROM10_CCKPWROFFSET	218
+#define SROM10_SIGN		219
+#define SROM10_SWCTRLMAP_2G	220
+#define SROM10_CRCREV		229
+
+#define	SROM10_WORDS		230
+#define	SROM10_SIGNATURE	SROM4_SIGNATURE
+
+
+/* SROM REV 11 */
+#define SROM11_BREV			65
+
+#define SROM11_BFL0			66
+#define SROM11_BFL1			67
+#define SROM11_BFL2			68
+#define SROM11_BFL3			69
+#define SROM11_BFL4			70
+#define SROM11_BFL5			71
+
+#define SROM11_MACHI			72
+#define SROM11_MACMID			73
+#define SROM11_MACLO			74
+
+#define SROM11_CCODE			75
+#define SROM11_REGREV			76
+
+#define SROM11_LEDBH10			77
+#define SROM11_LEDBH32			78
+
+#define SROM11_LEDDC			79
+
+#define SROM11_AA			80
+
+#define SROM11_AGBG10			81
+#define SROM11_AGBG2A0			82
+#define SROM11_AGA21			83
+
+#define SROM11_TXRXC			84
+
+#define SROM11_FEM_CFG1			85
+#define SROM11_FEM_CFG2			86
+
+/* Masks and offsets for FEM_CFG */
+#define SROM11_FEMCTRL_MASK		0xf800
+#define SROM11_FEMCTRL_SHIFT		11
+#define SROM11_PAPDCAP_MASK		0x0400
+#define SROM11_PAPDCAP_SHIFT		10
+#define SROM11_TWORANGETSSI_MASK	0x0200
+#define SROM11_TWORANGETSSI_SHIFT	9
+#define SROM11_PDGAIN_MASK		0x01f0
+#define SROM11_PDGAIN_SHIFT		4
+#define SROM11_EPAGAIN_MASK		0x000e
+#define SROM11_EPAGAIN_SHIFT		1
+#define SROM11_TSSIPOSSLOPE_MASK	0x0001
+#define SROM11_TSSIPOSSLOPE_SHIFT	0
+#define SROM11_GAINCTRLSPH_MASK		0xf800
+#define SROM11_GAINCTRLSPH_SHIFT	11
+
+#define SROM11_THERMAL			87
+#define SROM11_MPWR_RAWTS		88
+#define SROM11_TS_SLP_OPT_CORRX		89
+#define SROM11_XTAL_FREQ		90
+#define SROM11_5GB0_4080_W0_A1          91
+#define SROM11_PHYCAL_TEMPDELTA  	92
+#define SROM11_MPWR_1_AND_2 		93
+#define SROM11_5GB0_4080_W1_A1          94
+#define SROM11_TSSIFLOOR_2G 		95
+#define SROM11_TSSIFLOOR_5GL 		96
+#define SROM11_TSSIFLOOR_5GM 		97
+#define SROM11_TSSIFLOOR_5GH 		98
+#define SROM11_TSSIFLOOR_5GU 		99
+
+/* Masks and offsets for Thermal parameters */
+#define SROM11_TEMPS_PERIOD_MASK	0xf0
+#define SROM11_TEMPS_PERIOD_SHIFT	4
+#define SROM11_TEMPS_HYSTERESIS_MASK	0x0f
+#define SROM11_TEMPS_HYSTERESIS_SHIFT	0
+#define SROM11_TEMPCORRX_MASK		0xfc
+#define SROM11_TEMPCORRX_SHIFT		2
+#define SROM11_TEMPSENSE_OPTION_MASK	0x3
+#define SROM11_TEMPSENSE_OPTION_SHIFT	0
+
+#define SROM11_PDOFF_2G_40M_A0_MASK     0x000f
+#define SROM11_PDOFF_2G_40M_A0_SHIFT    0
+#define SROM11_PDOFF_2G_40M_A1_MASK     0x00f0
+#define SROM11_PDOFF_2G_40M_A1_SHIFT    4
+#define SROM11_PDOFF_2G_40M_A2_MASK     0x0f00
+#define SROM11_PDOFF_2G_40M_A2_SHIFT    8
+#define SROM11_PDOFF_2G_40M_VALID_MASK  0x8000
+#define SROM11_PDOFF_2G_40M_VALID_SHIFT 15
+
+#define SROM11_PDOFF_2G_40M     	100
+#define SROM11_PDOFF_40M_A0		101
+#define SROM11_PDOFF_40M_A1		102
+#define SROM11_PDOFF_40M_A2		103
+#define SROM11_5GB0_4080_W2_A1          103
+#define SROM11_PDOFF_80M_A0		104
+#define SROM11_PDOFF_80M_A1		105
+#define SROM11_PDOFF_80M_A2		106
+#define SROM11_5GB1_4080_W0_A1          106
+
+#define SROM11_SUBBAND5GVER 		107
+
+/* Per-path fields and offset */
+#define	MAX_PATH_SROM_11		3
+#define SROM11_PATH0			108
+#define SROM11_PATH1			128
+#define SROM11_PATH2			148
+
+#define	SROM11_2G_MAXP			0
+#define SROM11_5GB1_4080_PA             0
+#define	SROM11_2G_PA			1
+#define SROM11_5GB2_4080_PA             2
+#define	SROM11_RXGAINS1			4
+#define	SROM11_RXGAINS			5
+#define SROM11_5GB3_4080_PA             5
+#define	SROM11_5GB1B0_MAXP		6
+#define	SROM11_5GB3B2_MAXP		7
+#define	SROM11_5GB0_PA			8
+#define	SROM11_5GB1_PA			11
+#define	SROM11_5GB2_PA			14
+#define	SROM11_5GB3_PA			17
+
+/* Masks and offsets for rxgains */
+#define SROM11_RXGAINS5GTRELNABYPA_MASK		0x8000
+#define SROM11_RXGAINS5GTRELNABYPA_SHIFT	15
+#define SROM11_RXGAINS5GTRISOA_MASK		0x7800
+#define SROM11_RXGAINS5GTRISOA_SHIFT		11
+#define SROM11_RXGAINS5GELNAGAINA_MASK		0x0700
+#define SROM11_RXGAINS5GELNAGAINA_SHIFT		8
+#define SROM11_RXGAINS2GTRELNABYPA_MASK		0x0080
+#define SROM11_RXGAINS2GTRELNABYPA_SHIFT	7
+#define SROM11_RXGAINS2GTRISOA_MASK		0x0078
+#define SROM11_RXGAINS2GTRISOA_SHIFT		3
+#define SROM11_RXGAINS2GELNAGAINA_MASK		0x0007
+#define SROM11_RXGAINS2GELNAGAINA_SHIFT		0
+#define SROM11_RXGAINS5GHTRELNABYPA_MASK	0x8000
+#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT	15
+#define SROM11_RXGAINS5GHTRISOA_MASK		0x7800
+#define SROM11_RXGAINS5GHTRISOA_SHIFT		11
+#define SROM11_RXGAINS5GHELNAGAINA_MASK		0x0700
+#define SROM11_RXGAINS5GHELNAGAINA_SHIFT	8
+#define SROM11_RXGAINS5GMTRELNABYPA_MASK	0x0080
+#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT	7
+#define SROM11_RXGAINS5GMTRISOA_MASK		0x0078
+#define SROM11_RXGAINS5GMTRISOA_SHIFT		3
+#define SROM11_RXGAINS5GMELNAGAINA_MASK		0x0007
+#define SROM11_RXGAINS5GMELNAGAINA_SHIFT	0
+
+/* Power per rate */
+#define SROM11_CCKBW202GPO		168
+#define SROM11_CCKBW20UL2GPO		169
+#define SROM11_MCSBW202GPO		170
+#define SROM11_MCSBW202GPO_1		171
+#define SROM11_MCSBW402GPO		172
+#define SROM11_MCSBW402GPO_1		173
+#define SROM11_DOT11AGOFDMHRBW202GPO	174
+#define SROM11_OFDMLRBW202GPO		175
+
+#define SROM11_MCSBW205GLPO 		176
+#define SROM11_MCSBW205GLPO_1		177
+#define SROM11_MCSBW405GLPO 		178
+#define SROM11_MCSBW405GLPO_1		179
+#define SROM11_MCSBW805GLPO 		180
+#define SROM11_MCSBW805GLPO_1		181
+#define SROM11_RPCAL_2G			182
+#define SROM11_RPCAL_5GL		183
+#define SROM11_MCSBW205GMPO 		184
+#define SROM11_MCSBW205GMPO_1		185
+#define SROM11_MCSBW405GMPO 		186
+#define SROM11_MCSBW405GMPO_1		187
+#define SROM11_MCSBW805GMPO 		188
+#define SROM11_MCSBW805GMPO_1		189
+#define SROM11_RPCAL_5GM		190
+#define SROM11_RPCAL_5GH		191
+#define SROM11_MCSBW205GHPO 		192
+#define SROM11_MCSBW205GHPO_1		193
+#define SROM11_MCSBW405GHPO 		194
+#define SROM11_MCSBW405GHPO_1		195
+#define SROM11_MCSBW805GHPO 		196
+#define SROM11_MCSBW805GHPO_1		197
+#define SROM11_RPCAL_5GU		198
+#define SROM11_PDOFF_2G_CCK	        199
+#define SROM11_MCSLR5GLPO		200
+#define SROM11_MCSLR5GMPO		201
+#define SROM11_MCSLR5GHPO		202
+
+#define SROM11_SB20IN40HRPO		203
+#define SROM11_SB20IN80AND160HR5GLPO 	204
+#define SROM11_SB40AND80HR5GLPO		205
+#define SROM11_SB20IN80AND160HR5GMPO 	206
+#define SROM11_SB40AND80HR5GMPO		207
+#define SROM11_SB20IN80AND160HR5GHPO 	208
+#define SROM11_SB40AND80HR5GHPO		209
+#define SROM11_SB20IN40LRPO 		210
+#define SROM11_SB20IN80AND160LR5GLPO	211
+#define SROM11_SB40AND80LR5GLPO		212
+#define SROM11_TXIDXCAP2G               212
+#define SROM11_SB20IN80AND160LR5GMPO	213
+#define SROM11_SB40AND80LR5GMPO		214
+#define SROM11_TXIDXCAP5G               214
+#define SROM11_SB20IN80AND160LR5GHPO	215
+#define SROM11_SB40AND80LR5GHPO		216
+
+#define SROM11_DOT11AGDUPHRPO 		217
+#define SROM11_DOT11AGDUPLRPO		218
+
+/* MISC */
+#define SROM11_PCIEINGRESS_WAR		220
+#define SROM11_SAR			221
+
+#define SROM11_NOISELVL_2G		222
+#define SROM11_NOISELVL_5GL 		223
+#define SROM11_NOISELVL_5GM 		224
+#define SROM11_NOISELVL_5GH 		225
+#define SROM11_NOISELVL_5GU 		226
+
+#define SROM11_RXGAINERR_2G		227
+#define SROM11_RXGAINERR_5GL		228
+#define SROM11_RXGAINERR_5GM		229
+#define SROM11_RXGAINERR_5GH		230
+#define SROM11_RXGAINERR_5GU		231
+
+#define SROM11_EU_EDCRSTH	        232
+#define SROM12_EU_EDCRSTH	        232
+
+#define SROM11_SIGN 			64
+#define SROM11_CRCREV 			233
+
+#define	SROM11_WORDS				234
+#define	SROM11_SIGNATURE		0x0634
+
+
+/* SROM REV 12 */
+#define SROM12_SIGN                     64
+#define SROM12_WORDS			512
+#define SROM12_SIGNATURE		0x8888
+#define SROM12_CRCREV			511
+
+#define SROM12_BFL6				486
+#define SROM12_BFL7				487
+
+#define SROM12_MCSBW205GX1PO		234
+#define SROM12_MCSBW205GX1PO_1		235
+#define SROM12_MCSBW405GX1PO		236
+#define SROM12_MCSBW405GX1PO_1		237
+#define SROM12_MCSBW805GX1PO		238
+#define SROM12_MCSBW805GX1PO_1		239
+#define SROM12_MCSLR5GX1PO			240
+#define SROM12_SB40AND80LR5GX1PO		241
+#define SROM12_SB20IN80AND160LR5GX1PO	242
+#define SROM12_SB20IN80AND160HR5GX1PO	243
+#define SROM12_SB40AND80HR5GX1PO		244
+
+#define SROM12_MCSBW205GX2PO		245
+#define SROM12_MCSBW205GX2PO_1		246
+#define SROM12_MCSBW405GX2PO		247
+#define SROM12_MCSBW405GX2PO_1		248
+#define SROM12_MCSBW805GX2PO		249
+#define SROM12_MCSBW805GX2PO_1		250
+#define SROM12_MCSLR5GX2PO			251
+#define SROM12_SB40AND80LR5GX2PO	252
+#define SROM12_SB20IN80AND160LR5GX2PO	253
+#define SROM12_SB20IN80AND160HR5GX2PO	254
+#define SROM12_SB40AND80HR5GX2PO		255
+
+/* MISC */
+#define	SROM12_RXGAINS10			483
+#define	SROM12_RXGAINS11			484
+#define	SROM12_RXGAINS12			485
+
+/* Per-path fields and offset */
+#define	MAX_PATH_SROM_12			3
+#define SROM12_PATH0				256
+#define SROM12_PATH1				328
+#define SROM12_PATH2				400
+
+#define	SROM12_5GB42G_MAXP				0
+#define SROM12_2GB0_PA					1
+#define SROM12_2GB0_PA_W0				1
+#define SROM12_2GB0_PA_W1				2
+#define SROM12_2GB0_PA_W2				3
+#define SROM12_2GB0_PA_W3				4
+
+#define	SROM12_RXGAINS					5
+#define	SROM12_5GB1B0_MAXP				6
+#define	SROM12_5GB3B2_MAXP				7
+
+#define SROM12_5GB0_PA					8
+#define SROM12_5GB0_PA_W0				8
+#define SROM12_5GB0_PA_W1				9
+#define SROM12_5GB0_PA_W2				10
+#define SROM12_5GB0_PA_W3				11
+
+#define SROM12_5GB1_PA					12
+#define SROM12_5GB1_PA_W0				12
+#define SROM12_5GB1_PA_W1				13
+#define SROM12_5GB1_PA_W2				14
+#define SROM12_5GB1_PA_W3				15
+
+#define SROM12_5GB2_PA					16
+#define SROM12_5GB2_PA_W0				16
+#define SROM12_5GB2_PA_W1				17
+#define SROM12_5GB2_PA_W2				18
+#define SROM12_5GB2_PA_W3				19
+
+#define SROM12_5GB3_PA					20
+#define SROM12_5GB3_PA_W0				20
+#define SROM12_5GB3_PA_W1				21
+#define SROM12_5GB3_PA_W2				22
+#define SROM12_5GB3_PA_W3				23
+
+#define SROM12_5GB4_PA					24
+#define SROM12_5GB4_PA_W0				24
+#define SROM12_5GB4_PA_W1				25
+#define SROM12_5GB4_PA_W2				26
+#define SROM12_5GB4_PA_W3				27
+
+#define SROM12_2G40B0_PA				28
+#define SROM12_2G40B0_PA_W0				28
+#define SROM12_2G40B0_PA_W1				29
+#define SROM12_2G40B0_PA_W2				30
+#define SROM12_2G40B0_PA_W3				31
+
+#define SROM12_5G40B0_PA				32
+#define SROM12_5G40B0_PA_W0				32
+#define SROM12_5G40B0_PA_W1				33
+#define SROM12_5G40B0_PA_W2				34
+#define SROM12_5G40B0_PA_W3				35
+
+#define SROM12_5G40B1_PA				36
+#define SROM12_5G40B1_PA_W0				36
+#define SROM12_5G40B1_PA_W1				37
+#define SROM12_5G40B1_PA_W2				38
+#define SROM12_5G40B1_PA_W3				39
+
+#define SROM12_5G40B2_PA				40
+#define SROM12_5G40B2_PA_W0				40
+#define SROM12_5G40B2_PA_W1				41
+#define SROM12_5G40B2_PA_W2				42
+#define SROM12_5G40B2_PA_W3				43
+
+#define SROM12_5G40B3_PA				44
+#define SROM12_5G40B3_PA_W0				44
+#define SROM12_5G40B3_PA_W1				45
+#define SROM12_5G40B3_PA_W2				46
+#define SROM12_5G40B3_PA_W3				47
+
+#define SROM12_5G40B4_PA				48
+#define SROM12_5G40B4_PA_W0				48
+#define SROM12_5G40B4_PA_W1				49
+#define SROM12_5G40B4_PA_W2				50
+#define SROM12_5G40B4_PA_W3				51
+
+#define SROM12_5G80B0_PA				52
+#define SROM12_5G80B0_PA_W0				52
+#define SROM12_5G80B0_PA_W1				53
+#define SROM12_5G80B0_PA_W2				54
+#define SROM12_5G80B0_PA_W3				55
+
+#define SROM12_5G80B1_PA				56
+#define SROM12_5G80B1_PA_W0				56
+#define SROM12_5G80B1_PA_W1				57
+#define SROM12_5G80B1_PA_W2				58
+#define SROM12_5G80B1_PA_W3				59
+
+#define SROM12_5G80B2_PA				60
+#define SROM12_5G80B2_PA_W0				60
+#define SROM12_5G80B2_PA_W1				61
+#define SROM12_5G80B2_PA_W2				62
+#define SROM12_5G80B2_PA_W3				63
+
+#define SROM12_5G80B3_PA				64
+#define SROM12_5G80B3_PA_W0				64
+#define SROM12_5G80B3_PA_W1				65
+#define SROM12_5G80B3_PA_W2				66
+#define SROM12_5G80B3_PA_W3				67
+
+#define SROM12_5G80B4_PA				68
+#define SROM12_5G80B4_PA_W0				68
+#define SROM12_5G80B4_PA_W1				69
+#define SROM12_5G80B4_PA_W2				70
+#define SROM12_5G80B4_PA_W3				71
+
+/* PD offset */
+#define SROM12_PDOFF_2G_CCK				472
+
+#define SROM12_PDOFF_20in40M_5G_B0		473
+#define SROM12_PDOFF_20in40M_5G_B1		474
+#define SROM12_PDOFF_20in40M_5G_B2		475
+#define SROM12_PDOFF_20in40M_5G_B3		476
+#define SROM12_PDOFF_20in40M_5G_B4		477
+
+#define SROM12_PDOFF_40in80M_5G_B0		478
+#define SROM12_PDOFF_40in80M_5G_B1		479
+#define SROM12_PDOFF_40in80M_5G_B2		480
+#define SROM12_PDOFF_40in80M_5G_B3		481
+#define SROM12_PDOFF_40in80M_5G_B4		482
+
+#define SROM12_PDOFF_20in80M_5G_B0		488
+#define SROM12_PDOFF_20in80M_5G_B1		489
+#define SROM12_PDOFF_20in80M_5G_B2		490
+#define SROM12_PDOFF_20in80M_5G_B3		491
+#define SROM12_PDOFF_20in80M_5G_B4		492
+
+#define SROM13_PDOFFSET20IN40M5GCORE3           98
+#define SROM13_PDOFFSET20IN40M5GCORE3_1         99
+#define SROM13_PDOFFSET20IN80M5GCORE3           510
+#define SROM13_PDOFFSET20IN80M5GCORE3_1         511
+#define SROM13_PDOFFSET40IN80M5GCORE3           105
+#define SROM13_PDOFFSET40IN80M5GCORE3_1         106
+
+#define SROM13_PDOFFSET20IN40M2G                94
+#define SROM13_PDOFFSET20IN40M2GCORE3           95
+
+#define SROM12_GPDN_L				91  /* GPIO pull down bits [15:0]  */
+#define SROM12_GPDN_H				233 /* GPIO pull down bits [31:16] */
+
+#define SROM13_SIGN                     64
+#define SROM13_WORDS                    590
+#define SROM13_SIGNATURE                0x4d55
+#define SROM13_CRCREV                   589
+
+
+/* Per-path fields and offset */
+#define MAX_PATH_SROM_13                        4
+#define SROM13_PATH0                            256
+#define SROM13_PATH1                            328
+#define SROM13_PATH2                            400
+#define SROM13_PATH3                            512
+#define SROM13_RXGAINS                         5
+
+#define SROM13_XTALFREQ                 90
+
+#define SROM13_PDOFFSET20IN40M2G        94
+#define SROM13_PDOFFSET20IN40M2GCORE3   95
+#define SROM13_SB20IN40HRLRPOX          96
+
+#define SROM13_RXGAINS1CORE3            97
+
+#define SROM13_PDOFFSET20IN40M5GCORE3   98
+#define SROM13_PDOFFSET20IN40M5GCORE3_1 99
+
+#define SROM13_ANTGAIN_BANDBGA          100
+
+#define SROM13_RXGAINS2CORE0            101
+#define SROM13_RXGAINS2CORE1            102
+#define SROM13_RXGAINS2CORE2            103
+#define SROM13_RXGAINS2CORE3            104
+
+#define SROM13_PDOFFSET40IN80M5GCORE3   105
+#define SROM13_PDOFFSET40IN80M5GCORE3_1 106
+
+/* power per rate */
+#define SROM13_MCS1024QAM2GPO           108
+#define SROM13_MCS1024QAM5GLPO          109
+#define SROM13_MCS1024QAM5GLPO_1        110
+#define SROM13_MCS1024QAM5GMPO          111
+#define SROM13_MCS1024QAM5GMPO_1        112
+#define SROM13_MCS1024QAM5GHPO          113
+#define SROM13_MCS1024QAM5GHPO_1        114
+#define SROM13_MCS1024QAM5GX1PO         115
+#define SROM13_MCS1024QAM5GX1PO_1       116
+#define SROM13_MCS1024QAM5GX2PO         117
+#define SROM13_MCS1024QAM5GX2PO_1       118
+
+#define SROM13_MCSBW1605GLPO            119
+#define SROM13_MCSBW1605GLPO_1          120
+#define SROM13_MCSBW1605GMPO            121
+#define SROM13_MCSBW1605GMPO_1          122
+#define SROM13_MCSBW1605GHPO            123
+#define SROM13_MCSBW1605GHPO_1          124
+
+#define SROM13_MCSBW1605GX1PO           125
+#define SROM13_MCSBW1605GX1PO_1         126
+#define SROM13_MCSBW1605GX2PO           127
+#define SROM13_MCSBW1605GX2PO_1         128
+
+#define SROM13_ULBPPROFFS5GB0		129
+#define SROM13_ULBPPROFFS5GB1           130
+#define SROM13_ULBPPROFFS5GB2           131
+#define SROM13_ULBPPROFFS5GB3           132
+#define SROM13_ULBPPROFFS5GB4           133
+#define SROM13_ULBPPROFFS2G		134
+
+#define SROM13_MCS8POEXP                135
+#define SROM13_MCS8POEXP_1              136
+#define SROM13_MCS9POEXP                137
+#define SROM13_MCS9POEXP_1              138
+#define SROM13_MCS10POEXP               139
+#define SROM13_MCS10POEXP_1             140
+#define SROM13_MCS11POEXP               141
+#define SROM13_MCS11POEXP_1             142
+#define SROM13_ULBPDOFFS5GB0A0		143
+#define SROM13_ULBPDOFFS5GB0A1          144
+#define SROM13_ULBPDOFFS5GB0A2          145
+#define SROM13_ULBPDOFFS5GB0A3          146
+#define SROM13_ULBPDOFFS5GB1A0          147
+#define SROM13_ULBPDOFFS5GB1A1          148
+#define SROM13_ULBPDOFFS5GB1A2          149
+#define SROM13_ULBPDOFFS5GB1A3          150
+#define SROM13_ULBPDOFFS5GB2A0          151
+#define SROM13_ULBPDOFFS5GB2A1          152
+#define SROM13_ULBPDOFFS5GB2A2          153
+#define SROM13_ULBPDOFFS5GB2A3          154
+#define SROM13_ULBPDOFFS5GB3A0          155
+#define SROM13_ULBPDOFFS5GB3A1          156
+#define SROM13_ULBPDOFFS5GB3A2          157
+#define SROM13_ULBPDOFFS5GB3A3          158
+#define SROM13_ULBPDOFFS5GB4A0          159
+#define SROM13_ULBPDOFFS5GB4A1          160
+#define SROM13_ULBPDOFFS5GB4A2          161
+#define SROM13_ULBPDOFFS5GB4A3          162
+#define SROM13_ULBPDOFFS2GA0		163
+#define SROM13_ULBPDOFFS2GA1		164
+#define SROM13_ULBPDOFFS2GA2		165
+#define SROM13_ULBPDOFFS2GA3		166
+
+#define SROM13_RPCAL5GB4                199
+#define SROM13_RPCAL2GCORE3             101
+#define SROM13_RPCAL5GB01CORE3          102
+#define SROM13_RPCAL5GB23CORE3          103
+
+#define SROM13_EU_EDCRSTH               232
+
+#define SROM13_SWCTRLMAP4_CFG			493
+#define SROM13_SWCTRLMAP4_TX2G_FEM3TO0		494
+#define SROM13_SWCTRLMAP4_RX2G_FEM3TO0		495
+#define SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0	496
+#define SROM13_SWCTRLMAP4_MISC2G_FEM3TO0	497
+#define SROM13_SWCTRLMAP4_TX5G_FEM3TO0		498
+#define SROM13_SWCTRLMAP4_RX5G_FEM3TO0		499
+#define SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0	500
+#define SROM13_SWCTRLMAP4_MISC5G_FEM3TO0	501
+#define SROM13_SWCTRLMAP4_TX2G_FEM7TO4		502
+#define SROM13_SWCTRLMAP4_RX2G_FEM7TO4		503
+#define SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4	504
+#define SROM13_SWCTRLMAP4_MISC2G_FEM7TO4	505
+#define SROM13_SWCTRLMAP4_TX5G_FEM7TO4		506
+#define SROM13_SWCTRLMAP4_RX5G_FEM7TO4		507
+#define SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4	508
+#define SROM13_SWCTRLMAP4_MISC5G_FEM7TO4	509
+
+#define SROM13_PDOFFSET20IN80M5GCORE3   510
+#define SROM13_PDOFFSET20IN80M5GCORE3_1 511
+
+#define SROM13_NOISELVLCORE3            584
+#define SROM13_NOISELVLCORE3_1          585
+#define SROM13_RXGAINERRCORE3           586
+#define SROM13_RXGAINERRCORE3_1         587
+
+
+#define SROM16_SIGN                     104
+#define SROM16_WORDS                    512
+#define SROM16_SIGNATURE                0x4347
+#define SROM16_CRCREV                   511
+
+typedef struct {
+	uint8 tssipos;		/* TSSI positive slope, 1: positive, 0: negative */
+	uint8 extpagain;	/* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
+	uint8 pdetrange;	/* support 32 combinations of different Pdet dynamic ranges */
+	uint8 triso;		/* TR switch isolation */
+	uint8 antswctrllut;	/* antswctrl lookup table configuration: 32 possible choices */
+} srom_fem_t;
+
+#endif	/* _bcmsrom_fmt_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
new file mode 100644
index 0000000..e855186
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
@@ -0,0 +1,1408 @@
+/*
+ * Table that encodes the srom formats for PCI/PCIe NICs.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsrom_tbl.h 616054 2016-01-29 13:22:24Z $
+ */
+
+#ifndef	_bcmsrom_tbl_h_
+#define	_bcmsrom_tbl_h_
+
+#include "sbpcmcia.h"
+#include "wlioctl.h"
+#include <bcmsrom_fmt.h>
+
+typedef struct {
+	const char *name;
+	uint32	revmask;
+	uint32	flags;
+	uint16	off;
+	uint16	mask;
+} sromvar_t;
+
+#define SRFL_MORE	1		/* value continues as described by the next entry */
+#define	SRFL_NOFFS	2		/* value bits can't be all one's */
+#define	SRFL_PRHEX	4		/* value is in hexdecimal format */
+#define	SRFL_PRSIGN	8		/* value is in signed decimal format */
+#define	SRFL_CCODE	0x10		/* value is in country code format */
+#define	SRFL_ETHADDR	0x20		/* value is an Ethernet address */
+#define SRFL_LEDDC	0x40		/* value is an LED duty cycle */
+#define SRFL_NOVAR	0x80		/* do not generate a nvram param, entry is for mfgc */
+#define SRFL_ARRAY	0x100		/* value is in an array. All elements EXCEPT FOR THE LAST
+					 * ONE in the array should have this flag set.
+					 */
+
+
+#define SROM_DEVID_PCIE	48
+
+/**
+ * Assumptions:
+ * - Ethernet address spans across 3 consecutive words
+ *
+ * Table rules:
+ * - Add multiple entries next to each other if a value spans across multiple words
+ *   (even multiple fields in the same word) with each entry except the last having
+ *   it's SRFL_MORE bit set.
+ * - Ethernet address entry does not follow above rule and must not have SRFL_MORE
+ *   bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
+ * - The last entry's name field must be NULL to indicate the end of the table. Other
+ *   entries must have non-NULL name.
+ */
+static const sromvar_t pci_sromvars[] = {
+/*	name		revmask		flags		off			mask */
+#if defined(CABLECPE)
+	{"devid",	0xffffff00,	SRFL_PRHEX,	PCI_F0DEVID,		0xffff},
+#elif defined(BCMPCIEDEV) && defined(BCMPCIEDEV_ENABLED)
+	{"devid",	0xffffff00,	SRFL_PRHEX, SROM_DEVID_PCIE,		0xffff},
+#else
+	{"devid",	0xffffff00,	SRFL_PRHEX|SRFL_NOVAR,	PCI_F0DEVID,	0xffff},
+#endif 
+	{"boardrev",	0x0000000e,	SRFL_PRHEX,	SROM_AABREV,		SROM_BR_MASK},
+	{"boardrev",	0x000000f0,	SRFL_PRHEX,	SROM4_BREV,		0xffff},
+	{"boardrev",	0xffffff00,	SRFL_PRHEX,	SROM8_BREV,		0xffff},
+	{"boardflags",	0x00000002,	SRFL_PRHEX,	SROM_BFL,		0xffff},
+	{"boardflags",	0x00000004,	SRFL_PRHEX|SRFL_MORE,	SROM_BFL,	0xffff},
+	{"",		0,		0,		SROM_BFL2,		0xffff},
+	{"boardflags",	0x00000008,	SRFL_PRHEX|SRFL_MORE,	SROM_BFL,	0xffff},
+	{"",		0,		0,		SROM3_BFL2,		0xffff},
+	{"boardflags",	0x00000010,	SRFL_PRHEX|SRFL_MORE,	SROM4_BFL0,	0xffff},
+	{"",		0,		0,		SROM4_BFL1,		0xffff},
+	{"boardflags",	0x000000e0,	SRFL_PRHEX|SRFL_MORE,	SROM5_BFL0,	0xffff},
+	{"",		0,		0,		SROM5_BFL1,		0xffff},
+	{"boardflags",	0xffffff00,	SRFL_PRHEX|SRFL_MORE,	SROM8_BFL0,	0xffff},
+	{"",		0,		0,		SROM8_BFL1,		0xffff},
+	{"boardflags2", 0x00000010,	SRFL_PRHEX|SRFL_MORE,	SROM4_BFL2,	0xffff},
+	{"",		0,		0,		SROM4_BFL3,		0xffff},
+	{"boardflags2", 0x000000e0,	SRFL_PRHEX|SRFL_MORE,	SROM5_BFL2,	0xffff},
+	{"",		0,		0,		SROM5_BFL3,		0xffff},
+	{"boardflags2", 0xffffff00,	SRFL_PRHEX|SRFL_MORE,	SROM8_BFL2,	0xffff},
+	{"",		0,		0,		SROM8_BFL3,		0xffff},
+	{"boardtype",	0xfffffffc,	SRFL_PRHEX,	SROM_SSID,		0xffff},
+	{"subvid",	0xfffffffc,	SRFL_PRHEX,	SROM_SVID,		0xffff},
+	{"boardnum",	0x00000006,	0,		SROM_MACLO_IL0,		0xffff},
+	{"boardnum",	0x00000008,	0,		SROM3_MACLO,		0xffff},
+	{"boardnum",	0x00000010,	0,		SROM4_MACLO,		0xffff},
+	{"boardnum",	0x000000e0,	0,		SROM5_MACLO,		0xffff},
+	{"boardnum",	0x00000700,	0,		SROM8_MACLO,		0xffff},
+	{"cc",		0x00000002,	0,		SROM_AABREV,		SROM_CC_MASK},
+	{"regrev",	0x00000008,	0,		SROM_OPO,		0xff00},
+	{"regrev",	0x00000010,	0,		SROM4_REGREV,		0xffff},
+	{"regrev",	0x000000e0,	0,		SROM5_REGREV,		0xffff},
+	{"regrev",	0x00000700,	0,		SROM8_REGREV,		0xffff},
+	{"ledbh0",	0x0000000e,	SRFL_NOFFS,	SROM_LEDBH10,		0x00ff},
+	{"ledbh1",	0x0000000e,	SRFL_NOFFS,	SROM_LEDBH10,		0xff00},
+	{"ledbh2",	0x0000000e,	SRFL_NOFFS,	SROM_LEDBH32,		0x00ff},
+	{"ledbh3",	0x0000000e,	SRFL_NOFFS,	SROM_LEDBH32,		0xff00},
+	{"ledbh0",	0x00000010,	SRFL_NOFFS,	SROM4_LEDBH10,		0x00ff},
+	{"ledbh1",	0x00000010,	SRFL_NOFFS,	SROM4_LEDBH10,		0xff00},
+	{"ledbh2",	0x00000010,	SRFL_NOFFS,	SROM4_LEDBH32,		0x00ff},
+	{"ledbh3",	0x00000010,	SRFL_NOFFS,	SROM4_LEDBH32,		0xff00},
+	{"ledbh0",	0x000000e0,	SRFL_NOFFS,	SROM5_LEDBH10,		0x00ff},
+	{"ledbh1",	0x000000e0,	SRFL_NOFFS,	SROM5_LEDBH10,		0xff00},
+	{"ledbh2",	0x000000e0,	SRFL_NOFFS,	SROM5_LEDBH32,		0x00ff},
+	{"ledbh3",	0x000000e0,	SRFL_NOFFS,	SROM5_LEDBH32,		0xff00},
+	{"ledbh0",	0x00000700,	SRFL_NOFFS,	SROM8_LEDBH10,		0x00ff},
+	{"ledbh1",	0x00000700,	SRFL_NOFFS,	SROM8_LEDBH10,		0xff00},
+	{"ledbh2",	0x00000700,	SRFL_NOFFS,	SROM8_LEDBH32,		0x00ff},
+	{"ledbh3",	0x00000700,	SRFL_NOFFS,	SROM8_LEDBH32,		0xff00},
+	{"pa0b0",	0x0000000e,	SRFL_PRHEX,	SROM_WL0PAB0,		0xffff},
+	{"pa0b1",	0x0000000e,	SRFL_PRHEX,	SROM_WL0PAB1,		0xffff},
+	{"pa0b2",	0x0000000e,	SRFL_PRHEX,	SROM_WL0PAB2,		0xffff},
+	{"pa0itssit",	0x0000000e,	0,		SROM_ITT,		0x00ff},
+	{"pa0maxpwr",	0x0000000e,	0,		SROM_WL10MAXP,		0x00ff},
+	{"pa0b0",	0x00000700,	SRFL_PRHEX,	SROM8_W0_PAB0,		0xffff},
+	{"pa0b1",	0x00000700,	SRFL_PRHEX,	SROM8_W0_PAB1,		0xffff},
+	{"pa0b2",	0x00000700,	SRFL_PRHEX,	SROM8_W0_PAB2,		0xffff},
+	{"pa0itssit",	0x00000700,	0,		SROM8_W0_ITTMAXP,	0xff00},
+	{"pa0maxpwr",	0x00000700,	0,		SROM8_W0_ITTMAXP,	0x00ff},
+	{"opo",		0x0000000c,	0,		SROM_OPO,		0x00ff},
+	{"opo",		0x00000700,	0,		SROM8_2G_OFDMPO,	0x00ff},
+	{"aa2g",	0x0000000e,	0,		SROM_AABREV,		SROM_AA0_MASK},
+	{"aa2g",	0x000000f0,	0,		SROM4_AA,		0x00ff},
+	{"aa2g",	0x00000700,	0,		SROM8_AA,		0x00ff},
+	{"aa5g",	0x0000000e,	0,		SROM_AABREV,		SROM_AA1_MASK},
+	{"aa5g",	0x000000f0,	0,		SROM4_AA,		0xff00},
+	{"aa5g",	0x00000700,	0,		SROM8_AA,		0xff00},
+	{"ag0",		0x0000000e,	0,		SROM_AG10,		0x00ff},
+	{"ag1",		0x0000000e,	0,		SROM_AG10,		0xff00},
+	{"ag0",		0x000000f0,	0,		SROM4_AG10,		0x00ff},
+	{"ag1",		0x000000f0,	0,		SROM4_AG10,		0xff00},
+	{"ag2",		0x000000f0,	0,		SROM4_AG32,		0x00ff},
+	{"ag3",		0x000000f0,	0,		SROM4_AG32,		0xff00},
+	{"ag0",		0x00000700,	0,		SROM8_AG10,		0x00ff},
+	{"ag1",		0x00000700,	0,		SROM8_AG10,		0xff00},
+	{"ag2",		0x00000700,	0,		SROM8_AG32,		0x00ff},
+	{"ag3",		0x00000700,	0,		SROM8_AG32,		0xff00},
+	{"pa1b0",	0x0000000e,	SRFL_PRHEX,	SROM_WL1PAB0,		0xffff},
+	{"pa1b1",	0x0000000e,	SRFL_PRHEX,	SROM_WL1PAB1,		0xffff},
+	{"pa1b2",	0x0000000e,	SRFL_PRHEX,	SROM_WL1PAB2,		0xffff},
+	{"pa1lob0",	0x0000000c,	SRFL_PRHEX,	SROM_WL1LPAB0,		0xffff},
+	{"pa1lob1",	0x0000000c,	SRFL_PRHEX,	SROM_WL1LPAB1,		0xffff},
+	{"pa1lob2",	0x0000000c,	SRFL_PRHEX,	SROM_WL1LPAB2,		0xffff},
+	{"pa1hib0",	0x0000000c,	SRFL_PRHEX,	SROM_WL1HPAB0,		0xffff},
+	{"pa1hib1",	0x0000000c,	SRFL_PRHEX,	SROM_WL1HPAB1,		0xffff},
+	{"pa1hib2",	0x0000000c,	SRFL_PRHEX,	SROM_WL1HPAB2,		0xffff},
+	{"pa1itssit",	0x0000000e,	0,		SROM_ITT,		0xff00},
+	{"pa1maxpwr",	0x0000000e,	0,		SROM_WL10MAXP,		0xff00},
+	{"pa1lomaxpwr",	0x0000000c,	0,		SROM_WL1LHMAXP,		0xff00},
+	{"pa1himaxpwr",	0x0000000c,	0,		SROM_WL1LHMAXP,		0x00ff},
+	{"pa1b0",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB0,		0xffff},
+	{"pa1b1",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB1,		0xffff},
+	{"pa1b2",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB2,		0xffff},
+	{"pa1lob0",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB0_LC,	0xffff},
+	{"pa1lob1",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB1_LC,	0xffff},
+	{"pa1lob2",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB2_LC,	0xffff},
+	{"pa1hib0",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB0_HC,	0xffff},
+	{"pa1hib1",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB1_HC,	0xffff},
+	{"pa1hib2",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB2_HC,	0xffff},
+	{"pa1itssit",	0x00000700,	0,		SROM8_W1_ITTMAXP,	0xff00},
+	{"pa1maxpwr",	0x00000700,	0,		SROM8_W1_ITTMAXP,	0x00ff},
+	{"pa1lomaxpwr",	0x00000700,	0,		SROM8_W1_MAXP_LCHC,	0xff00},
+	{"pa1himaxpwr",	0x00000700,	0,		SROM8_W1_MAXP_LCHC,	0x00ff},
+	{"bxa2g",	0x00000008,	0,		SROM_BXARSSI2G,		0x1800},
+	{"rssisav2g",	0x00000008,	0,		SROM_BXARSSI2G,		0x0700},
+	{"rssismc2g",	0x00000008,	0,		SROM_BXARSSI2G,		0x00f0},
+	{"rssismf2g",	0x00000008,	0,		SROM_BXARSSI2G,		0x000f},
+	{"bxa2g",	0x00000700,	0,		SROM8_BXARSSI2G,	0x1800},
+	{"rssisav2g",	0x00000700,	0,		SROM8_BXARSSI2G,	0x0700},
+	{"rssismc2g",	0x00000700,	0,		SROM8_BXARSSI2G,	0x00f0},
+	{"rssismf2g",	0x00000700,	0,		SROM8_BXARSSI2G,	0x000f},
+	{"bxa5g",	0x00000008,	0,		SROM_BXARSSI5G,		0x1800},
+	{"rssisav5g",	0x00000008,	0,		SROM_BXARSSI5G,		0x0700},
+	{"rssismc5g",	0x00000008,	0,		SROM_BXARSSI5G,		0x00f0},
+	{"rssismf5g",	0x00000008,	0,		SROM_BXARSSI5G,		0x000f},
+	{"bxa5g",	0x00000700,	0,		SROM8_BXARSSI5G,	0x1800},
+	{"rssisav5g",	0x00000700,	0,		SROM8_BXARSSI5G,	0x0700},
+	{"rssismc5g",	0x00000700,	0,		SROM8_BXARSSI5G,	0x00f0},
+	{"rssismf5g",	0x00000700,	0,		SROM8_BXARSSI5G,	0x000f},
+	{"tri2g",	0x00000008,	0,		SROM_TRI52G,		0x00ff},
+	{"tri5g",	0x00000008,	0,		SROM_TRI52G,		0xff00},
+	{"tri5gl",	0x00000008,	0,		SROM_TRI5GHL,		0x00ff},
+	{"tri5gh",	0x00000008,	0,		SROM_TRI5GHL,		0xff00},
+	{"tri2g",	0x00000700,	0,		SROM8_TRI52G,		0x00ff},
+	{"tri5g",	0x00000700,	0,		SROM8_TRI52G,		0xff00},
+	{"tri5gl",	0x00000700,	0,		SROM8_TRI5GHL,		0x00ff},
+	{"tri5gh",	0x00000700,	0,		SROM8_TRI5GHL,		0xff00},
+	{"rxpo2g",	0x00000008,	SRFL_PRSIGN,	SROM_RXPO52G,		0x00ff},
+	{"rxpo5g",	0x00000008,	SRFL_PRSIGN,	SROM_RXPO52G,		0xff00},
+	{"rxpo2g",	0x00000700,	SRFL_PRSIGN,	SROM8_RXPO52G,		0x00ff},
+	{"rxpo5g",	0x00000700,	SRFL_PRSIGN,	SROM8_RXPO52G,		0xff00},
+	{"txchain",	0x000000f0,	SRFL_NOFFS,	SROM4_TXRXC,		SROM4_TXCHAIN_MASK},
+	{"rxchain",	0x000000f0,	SRFL_NOFFS,	SROM4_TXRXC,		SROM4_RXCHAIN_MASK},
+	{"antswitch",	0x000000f0,	SRFL_NOFFS,	SROM4_TXRXC,		SROM4_SWITCH_MASK},
+	{"txchain",	0x00000700,	SRFL_NOFFS,	SROM8_TXRXC,		SROM4_TXCHAIN_MASK},
+	{"rxchain",	0x00000700,	SRFL_NOFFS,	SROM8_TXRXC,		SROM4_RXCHAIN_MASK},
+	{"antswitch",	0x00000700,	SRFL_NOFFS,	SROM8_TXRXC,		SROM4_SWITCH_MASK},
+	{"tssipos2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_TSSIPOS_MASK},
+	{"extpagain2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_EXTPA_GAIN_MASK},
+	{"pdetrange2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_PDET_RANGE_MASK},
+	{"triso2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_TR_ISO_MASK},
+	{"antswctl2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_ANTSWLUT_MASK},
+	{"tssipos5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_TSSIPOS_MASK},
+	{"extpagain5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_EXTPA_GAIN_MASK},
+	{"pdetrange5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_PDET_RANGE_MASK},
+	{"triso5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_TR_ISO_MASK},
+	{"antswctl5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_ANTSWLUT_MASK},
+	{"txpid2ga0",	0x000000f0,	0,		SROM4_TXPID2G,		0x00ff},
+	{"txpid2ga1",	0x000000f0,	0,		SROM4_TXPID2G,		0xff00},
+	{"txpid2ga2",	0x000000f0,	0,		SROM4_TXPID2G + 1,	0x00ff},
+	{"txpid2ga3",	0x000000f0,	0,		SROM4_TXPID2G + 1,	0xff00},
+	{"txpid5ga0",	0x000000f0,	0,		SROM4_TXPID5G,		0x00ff},
+	{"txpid5ga1",	0x000000f0,	0,		SROM4_TXPID5G,		0xff00},
+	{"txpid5ga2",	0x000000f0,	0,		SROM4_TXPID5G + 1,	0x00ff},
+	{"txpid5ga3",	0x000000f0,	0,		SROM4_TXPID5G + 1,	0xff00},
+	{"txpid5gla0",	0x000000f0,	0,		SROM4_TXPID5GL,		0x00ff},
+	{"txpid5gla1",	0x000000f0,	0,		SROM4_TXPID5GL,		0xff00},
+	{"txpid5gla2",	0x000000f0,	0,		SROM4_TXPID5GL + 1,	0x00ff},
+	{"txpid5gla3",	0x000000f0,	0,		SROM4_TXPID5GL + 1,	0xff00},
+	{"txpid5gha0",	0x000000f0,	0,		SROM4_TXPID5GH,		0x00ff},
+	{"txpid5gha1",	0x000000f0,	0,		SROM4_TXPID5GH,		0xff00},
+	{"txpid5gha2",	0x000000f0,	0,		SROM4_TXPID5GH + 1,	0x00ff},
+	{"txpid5gha3",	0x000000f0,	0,		SROM4_TXPID5GH + 1,	0xff00},
+
+	{"ccode",	0x0000000f,	SRFL_CCODE,	SROM_CCODE,		0xffff},
+	{"ccode",	0x00000010,	SRFL_CCODE,	SROM4_CCODE,		0xffff},
+	{"ccode",	0x000000e0,	SRFL_CCODE,	SROM5_CCODE,		0xffff},
+	{"ccode",	0x00000700,	SRFL_CCODE,	SROM8_CCODE,		0xffff},
+	{"macaddr",	0x00000700,	SRFL_ETHADDR,	SROM8_MACHI,		0xffff},
+	{"macaddr",	0x000000e0,	SRFL_ETHADDR,	SROM5_MACHI,		0xffff},
+	{"macaddr",	0x00000010,	SRFL_ETHADDR,	SROM4_MACHI,		0xffff},
+	{"macaddr",	0x00000008,	SRFL_ETHADDR,	SROM3_MACHI,		0xffff},
+	{"il0macaddr",	0x00000007,	SRFL_ETHADDR,	SROM_MACHI_IL0,		0xffff},
+	{"et1macaddr",	0x00000007,	SRFL_ETHADDR,	SROM_MACHI_ET1,		0xffff},
+	{"leddc",	0x00000700,	SRFL_NOFFS|SRFL_LEDDC,	SROM8_LEDDC,	0xffff},
+	{"leddc",	0x000000e0,	SRFL_NOFFS|SRFL_LEDDC,	SROM5_LEDDC,	0xffff},
+	{"leddc",	0x00000010,	SRFL_NOFFS|SRFL_LEDDC,	SROM4_LEDDC,	0xffff},
+	{"leddc",	0x00000008,	SRFL_NOFFS|SRFL_LEDDC,	SROM3_LEDDC,	0xffff},
+
+	{"tempthresh",	0x00000700,	0,		SROM8_THERMAL,		0xff00},
+	{"tempoffset",	0x00000700,	0,		SROM8_THERMAL,		0x00ff},
+	{"rawtempsense", 0x00000700,	SRFL_PRHEX,	SROM8_MPWR_RAWTS,	0x01ff},
+	{"measpower",	0x00000700,	SRFL_PRHEX,	SROM8_MPWR_RAWTS,	0xfe00},
+	{"tempsense_slope",	0x00000700,	SRFL_PRHEX, 	SROM8_TS_SLP_OPT_CORRX,	0x00ff},
+	{"tempcorrx",	0x00000700,	SRFL_PRHEX, 	SROM8_TS_SLP_OPT_CORRX,	0xfc00},
+	{"tempsense_option",	0x00000700,	SRFL_PRHEX,	SROM8_TS_SLP_OPT_CORRX,	0x0300},
+	{"freqoffset_corr",	0x00000700,	SRFL_PRHEX,	SROM8_FOC_HWIQ_IQSWP,	0x000f},
+	{"iqcal_swp_dis",	0x00000700,	SRFL_PRHEX,	SROM8_FOC_HWIQ_IQSWP,	0x0010},
+	{"hw_iqcal_en",	0x00000700,	SRFL_PRHEX,	SROM8_FOC_HWIQ_IQSWP,	0x0020},
+	{"elna2g",      0x00000700,     0,              SROM8_EXTLNAGAIN,       0x00ff},
+	{"elna5g",      0x00000700,     0,              SROM8_EXTLNAGAIN,       0xff00},
+	{"phycal_tempdelta",	0x00000700,	0,	SROM8_PHYCAL_TEMPDELTA,	0x00ff},
+	{"temps_period",	0x00000700,	0,	SROM8_PHYCAL_TEMPDELTA,	0x0f00},
+	{"temps_hysteresis",	0x00000700,	0,	SROM8_PHYCAL_TEMPDELTA,	0xf000},
+	{"measpower1", 0x00000700,	SRFL_PRHEX, SROM8_MPWR_1_AND_2, 	0x007f},
+	{"measpower2",	0x00000700, 	SRFL_PRHEX, SROM8_MPWR_1_AND_2, 	0x3f80},
+
+	{"cck2gpo",	0x000000f0,	0,		SROM4_2G_CCKPO,		0xffff},
+	{"cck2gpo",	0x00000100,	0,		SROM8_2G_CCKPO,		0xffff},
+	{"ofdm2gpo",	0x000000f0,	SRFL_MORE,	SROM4_2G_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM4_2G_OFDMPO + 1,	0xffff},
+	{"ofdm5gpo",	0x000000f0,	SRFL_MORE,	SROM4_5G_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM4_5G_OFDMPO + 1,	0xffff},
+	{"ofdm5glpo",	0x000000f0,	SRFL_MORE,	SROM4_5GL_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM4_5GL_OFDMPO + 1,	0xffff},
+	{"ofdm5ghpo",	0x000000f0,	SRFL_MORE,	SROM4_5GH_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM4_5GH_OFDMPO + 1,	0xffff},
+	{"ofdm2gpo",	0x00000100,	SRFL_MORE,	SROM8_2G_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM8_2G_OFDMPO + 1,	0xffff},
+	{"ofdm5gpo",	0x00000100,	SRFL_MORE,	SROM8_5G_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM8_5G_OFDMPO + 1,	0xffff},
+	{"ofdm5glpo",	0x00000100,	SRFL_MORE,	SROM8_5GL_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM8_5GL_OFDMPO + 1,	0xffff},
+	{"ofdm5ghpo",	0x00000100,	SRFL_MORE,	SROM8_5GH_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM8_5GH_OFDMPO + 1,	0xffff},
+	{"mcs2gpo0",	0x000000f0,	0,		SROM4_2G_MCSPO,		0xffff},
+	{"mcs2gpo1",	0x000000f0,	0,		SROM4_2G_MCSPO + 1,	0xffff},
+	{"mcs2gpo2",	0x000000f0,	0,		SROM4_2G_MCSPO + 2,	0xffff},
+	{"mcs2gpo3",	0x000000f0,	0,		SROM4_2G_MCSPO + 3,	0xffff},
+	{"mcs2gpo4",	0x000000f0,	0,		SROM4_2G_MCSPO + 4,	0xffff},
+	{"mcs2gpo5",	0x000000f0,	0,		SROM4_2G_MCSPO + 5,	0xffff},
+	{"mcs2gpo6",	0x000000f0,	0,		SROM4_2G_MCSPO + 6,	0xffff},
+	{"mcs2gpo7",	0x000000f0,	0,		SROM4_2G_MCSPO + 7,	0xffff},
+	{"mcs5gpo0",	0x000000f0,	0,		SROM4_5G_MCSPO,		0xffff},
+	{"mcs5gpo1",	0x000000f0,	0,		SROM4_5G_MCSPO + 1,	0xffff},
+	{"mcs5gpo2",	0x000000f0,	0,		SROM4_5G_MCSPO + 2,	0xffff},
+	{"mcs5gpo3",	0x000000f0,	0,		SROM4_5G_MCSPO + 3,	0xffff},
+	{"mcs5gpo4",	0x000000f0,	0,		SROM4_5G_MCSPO + 4,	0xffff},
+	{"mcs5gpo5",	0x000000f0,	0,		SROM4_5G_MCSPO + 5,	0xffff},
+	{"mcs5gpo6",	0x000000f0,	0,		SROM4_5G_MCSPO + 6,	0xffff},
+	{"mcs5gpo7",	0x000000f0,	0,		SROM4_5G_MCSPO + 7,	0xffff},
+	{"mcs5glpo0",	0x000000f0,	0,		SROM4_5GL_MCSPO,	0xffff},
+	{"mcs5glpo1",	0x000000f0,	0,		SROM4_5GL_MCSPO + 1,	0xffff},
+	{"mcs5glpo2",	0x000000f0,	0,		SROM4_5GL_MCSPO + 2,	0xffff},
+	{"mcs5glpo3",	0x000000f0,	0,		SROM4_5GL_MCSPO + 3,	0xffff},
+	{"mcs5glpo4",	0x000000f0,	0,		SROM4_5GL_MCSPO + 4,	0xffff},
+	{"mcs5glpo5",	0x000000f0,	0,		SROM4_5GL_MCSPO + 5,	0xffff},
+	{"mcs5glpo6",	0x000000f0,	0,		SROM4_5GL_MCSPO + 6,	0xffff},
+	{"mcs5glpo7",	0x000000f0,	0,		SROM4_5GL_MCSPO + 7,	0xffff},
+	{"mcs5ghpo0",	0x000000f0,	0,		SROM4_5GH_MCSPO,	0xffff},
+	{"mcs5ghpo1",	0x000000f0,	0,		SROM4_5GH_MCSPO + 1,	0xffff},
+	{"mcs5ghpo2",	0x000000f0,	0,		SROM4_5GH_MCSPO + 2,	0xffff},
+	{"mcs5ghpo3",	0x000000f0,	0,		SROM4_5GH_MCSPO + 3,	0xffff},
+	{"mcs5ghpo4",	0x000000f0,	0,		SROM4_5GH_MCSPO + 4,	0xffff},
+	{"mcs5ghpo5",	0x000000f0,	0,		SROM4_5GH_MCSPO + 5,	0xffff},
+	{"mcs5ghpo6",	0x000000f0,	0,		SROM4_5GH_MCSPO + 6,	0xffff},
+	{"mcs5ghpo7",	0x000000f0,	0,		SROM4_5GH_MCSPO + 7,	0xffff},
+	{"mcs2gpo0",	0x00000100,	0,		SROM8_2G_MCSPO,		0xffff},
+	{"mcs2gpo1",	0x00000100,	0,		SROM8_2G_MCSPO + 1,	0xffff},
+	{"mcs2gpo2",	0x00000100,	0,		SROM8_2G_MCSPO + 2,	0xffff},
+	{"mcs2gpo3",	0x00000100,	0,		SROM8_2G_MCSPO + 3,	0xffff},
+	{"mcs2gpo4",	0x00000100,	0,		SROM8_2G_MCSPO + 4,	0xffff},
+	{"mcs2gpo5",	0x00000100,	0,		SROM8_2G_MCSPO + 5,	0xffff},
+	{"mcs2gpo6",	0x00000100,	0,		SROM8_2G_MCSPO + 6,	0xffff},
+	{"mcs2gpo7",	0x00000100,	0,		SROM8_2G_MCSPO + 7,	0xffff},
+	{"mcs5gpo0",	0x00000100,	0,		SROM8_5G_MCSPO,		0xffff},
+	{"mcs5gpo1",	0x00000100,	0,		SROM8_5G_MCSPO + 1,	0xffff},
+	{"mcs5gpo2",	0x00000100,	0,		SROM8_5G_MCSPO + 2,	0xffff},
+	{"mcs5gpo3",	0x00000100,	0,		SROM8_5G_MCSPO + 3,	0xffff},
+	{"mcs5gpo4",	0x00000100,	0,		SROM8_5G_MCSPO + 4,	0xffff},
+	{"mcs5gpo5",	0x00000100,	0,		SROM8_5G_MCSPO + 5,	0xffff},
+	{"mcs5gpo6",	0x00000100,	0,		SROM8_5G_MCSPO + 6,	0xffff},
+	{"mcs5gpo7",	0x00000100,	0,		SROM8_5G_MCSPO + 7,	0xffff},
+	{"mcs5glpo0",	0x00000100,	0,		SROM8_5GL_MCSPO,	0xffff},
+	{"mcs5glpo1",	0x00000100,	0,		SROM8_5GL_MCSPO + 1,	0xffff},
+	{"mcs5glpo2",	0x00000100,	0,		SROM8_5GL_MCSPO + 2,	0xffff},
+	{"mcs5glpo3",	0x00000100,	0,		SROM8_5GL_MCSPO + 3,	0xffff},
+	{"mcs5glpo4",	0x00000100,	0,		SROM8_5GL_MCSPO + 4,	0xffff},
+	{"mcs5glpo5",	0x00000100,	0,		SROM8_5GL_MCSPO + 5,	0xffff},
+	{"mcs5glpo6",	0x00000100,	0,		SROM8_5GL_MCSPO + 6,	0xffff},
+	{"mcs5glpo7",	0x00000100,	0,		SROM8_5GL_MCSPO + 7,	0xffff},
+	{"mcs5ghpo0",	0x00000100,	0,		SROM8_5GH_MCSPO,	0xffff},
+	{"mcs5ghpo1",	0x00000100,	0,		SROM8_5GH_MCSPO + 1,	0xffff},
+	{"mcs5ghpo2",	0x00000100,	0,		SROM8_5GH_MCSPO + 2,	0xffff},
+	{"mcs5ghpo3",	0x00000100,	0,		SROM8_5GH_MCSPO + 3,	0xffff},
+	{"mcs5ghpo4",	0x00000100,	0,		SROM8_5GH_MCSPO + 4,	0xffff},
+	{"mcs5ghpo5",	0x00000100,	0,		SROM8_5GH_MCSPO + 5,	0xffff},
+	{"mcs5ghpo6",	0x00000100,	0,		SROM8_5GH_MCSPO + 6,	0xffff},
+	{"mcs5ghpo7",	0x00000100,	0,		SROM8_5GH_MCSPO + 7,	0xffff},
+	{"cddpo",	0x000000f0,	0,		SROM4_CDDPO,		0xffff},
+	{"stbcpo",	0x000000f0,	0,		SROM4_STBCPO,		0xffff},
+	{"bw40po",	0x000000f0,	0,		SROM4_BW40PO,		0xffff},
+	{"bwduppo",	0x000000f0,	0,		SROM4_BWDUPPO,		0xffff},
+	{"cddpo",	0x00000100,	0,		SROM8_CDDPO,		0xffff},
+	{"stbcpo",	0x00000100,	0,		SROM8_STBCPO,		0xffff},
+	{"bw40po",	0x00000100,	0,		SROM8_BW40PO,		0xffff},
+	{"bwduppo",	0x00000100,	0,		SROM8_BWDUPPO,		0xffff},
+
+	/* power per rate from sromrev 9 */
+	{"cckbw202gpo",		0x00000600,	0,	SROM9_2GPO_CCKBW20,		0xffff},
+	{"cckbw20ul2gpo",	0x00000600,	0,	SROM9_2GPO_CCKBW20UL,		0xffff},
+	{"legofdmbw202gpo",	0x00000600,	SRFL_MORE, SROM9_2GPO_LOFDMBW20,	0xffff},
+	{"",			0,		0,	SROM9_2GPO_LOFDMBW20 + 1,	0xffff},
+	{"legofdmbw20ul2gpo",	0x00000600,	SRFL_MORE, SROM9_2GPO_LOFDMBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_2GPO_LOFDMBW20UL + 1,	0xffff},
+	{"legofdmbw205glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_LOFDMBW20,	0xffff},
+	{"",			0,		0,	SROM9_5GLPO_LOFDMBW20 + 1,	0xffff},
+	{"legofdmbw20ul5glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GLPO_LOFDMBW20UL + 1,	0xffff},
+	{"legofdmbw205gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_LOFDMBW20,	0xffff},
+	{"",			0,		0,	SROM9_5GMPO_LOFDMBW20 + 1,	0xffff},
+	{"legofdmbw20ul5gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GMPO_LOFDMBW20UL + 1,	0xffff},
+	{"legofdmbw205ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_LOFDMBW20,	0xffff},
+	{"",			0,		0,	SROM9_5GHPO_LOFDMBW20 + 1,	0xffff},
+	{"legofdmbw20ul5ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GHPO_LOFDMBW20UL + 1,	0xffff},
+	{"mcsbw202gpo",		0x00000600,	SRFL_MORE, SROM9_2GPO_MCSBW20,		0xffff},
+	{"",			0,		0,	SROM9_2GPO_MCSBW20 + 1,		0xffff},
+	{"mcsbw20ul2gpo",      	0x00000600,	SRFL_MORE, SROM9_2GPO_MCSBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_2GPO_MCSBW20UL + 1,	0xffff},
+	{"mcsbw402gpo",		0x00000600,	SRFL_MORE, SROM9_2GPO_MCSBW40,		0xffff},
+	{"",			0,		0,	SROM9_2GPO_MCSBW40 + 1,		0xffff},
+	{"mcsbw205glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_MCSBW20,		0xffff},
+	{"",			0,		0,	SROM9_5GLPO_MCSBW20 + 1,	0xffff},
+	{"mcsbw20ul5glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_MCSBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GLPO_MCSBW20UL + 1,	0xffff},
+	{"mcsbw405glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_MCSBW40,		0xffff},
+	{"",			0,		0,	SROM9_5GLPO_MCSBW40 + 1,	0xffff},
+	{"mcsbw205gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_MCSBW20,		0xffff},
+	{"",			0,		0,	SROM9_5GMPO_MCSBW20 + 1,	0xffff},
+	{"mcsbw20ul5gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_MCSBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GMPO_MCSBW20UL + 1,	0xffff},
+	{"mcsbw405gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_MCSBW40,		0xffff},
+	{"",			0,		0,	SROM9_5GMPO_MCSBW40 + 1,	0xffff},
+	{"mcsbw205ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_MCSBW20,		0xffff},
+	{"",			0,		0,	SROM9_5GHPO_MCSBW20 + 1,	0xffff},
+	{"mcsbw20ul5ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_MCSBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GHPO_MCSBW20UL + 1,	0xffff},
+	{"mcsbw405ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_MCSBW40,		0xffff},
+	{"",			0,		0,	SROM9_5GHPO_MCSBW40 + 1,	0xffff},
+	{"mcs32po",		0x00000600,	0,	SROM9_PO_MCS32,			0xffff},
+	{"legofdm40duppo",	0x00000600,	0,	SROM9_PO_LOFDM40DUP,	0xffff},
+	{"pcieingress_war",	0x00000700,	0,	SROM8_PCIEINGRESS_WAR,	0xf},
+	{"eu_edthresh2g",	0x00000100,	0,	SROM8_EU_EDCRSTH,		0x00ff},
+	{"eu_edthresh5g",	0x00000100,	0,	SROM8_EU_EDCRSTH,		0xff00},
+	{"eu_edthresh2g",	0x00000200,	0,	SROM9_EU_EDCRSTH,		0x00ff},
+	{"eu_edthresh5g",	0x00000200,	0,	SROM9_EU_EDCRSTH,		0xff00},
+	{"rxgainerr2ga0",	0x00000700,	0,	SROM8_RXGAINERR_2G,		0x003f},
+	{"rxgainerr2ga0",	0x00000700,	0,	SROM8_RXGAINERR_2G,		0x003f},
+	{"rxgainerr2ga1",	0x00000700,	0,	SROM8_RXGAINERR_2G,		0x07c0},
+	{"rxgainerr2ga2",	0x00000700,	0,	SROM8_RXGAINERR_2G,		0xf800},
+	{"rxgainerr5gla0",	0x00000700,	0,	SROM8_RXGAINERR_5GL,	0x003f},
+	{"rxgainerr5gla1",	0x00000700,	0,	SROM8_RXGAINERR_5GL,	0x07c0},
+	{"rxgainerr5gla2",	0x00000700,	0,	SROM8_RXGAINERR_5GL,	0xf800},
+	{"rxgainerr5gma0",	0x00000700,	0,	SROM8_RXGAINERR_5GM,	0x003f},
+	{"rxgainerr5gma1",	0x00000700,	0,	SROM8_RXGAINERR_5GM,	0x07c0},
+	{"rxgainerr5gma2",	0x00000700,	0,	SROM8_RXGAINERR_5GM,	0xf800},
+	{"rxgainerr5gha0",	0x00000700,	0,	SROM8_RXGAINERR_5GH,	0x003f},
+	{"rxgainerr5gha1",	0x00000700,	0,	SROM8_RXGAINERR_5GH,	0x07c0},
+	{"rxgainerr5gha2",	0x00000700,	0,	SROM8_RXGAINERR_5GH,	0xf800},
+	{"rxgainerr5gua0",	0x00000700,	0,	SROM8_RXGAINERR_5GU,	0x003f},
+	{"rxgainerr5gua1",	0x00000700,	0,	SROM8_RXGAINERR_5GU,	0x07c0},
+	{"rxgainerr5gua2",	0x00000700,	0,	SROM8_RXGAINERR_5GU,	0xf800},
+	{"sar2g",       	0x00000600,	0,	SROM9_SAR,          	0x00ff},
+	{"sar5g",           0x00000600,	0,	SROM9_SAR,	            0xff00},
+	{"noiselvl2ga0",	0x00000700,	0,	SROM8_NOISELVL_2G,		0x001f},
+	{"noiselvl2ga1",	0x00000700,	0,	SROM8_NOISELVL_2G,		0x03e0},
+	{"noiselvl2ga2",	0x00000700,	0,	SROM8_NOISELVL_2G,		0x7c00},
+	{"noiselvl5gla0",	0x00000700,	0,	SROM8_NOISELVL_5GL,		0x001f},
+	{"noiselvl5gla1",	0x00000700,	0,	SROM8_NOISELVL_5GL,		0x03e0},
+	{"noiselvl5gla2",	0x00000700,	0,	SROM8_NOISELVL_5GL,		0x7c00},
+	{"noiselvl5gma0",	0x00000700,	0,	SROM8_NOISELVL_5GM,		0x001f},
+	{"noiselvl5gma1",	0x00000700,	0,	SROM8_NOISELVL_5GM,		0x03e0},
+	{"noiselvl5gma2",	0x00000700,	0,	SROM8_NOISELVL_5GM,		0x7c00},
+	{"noiselvl5gha0",	0x00000700,	0,	SROM8_NOISELVL_5GH,		0x001f},
+	{"noiselvl5gha1",	0x00000700,	0,	SROM8_NOISELVL_5GH,		0x03e0},
+	{"noiselvl5gha2",	0x00000700,	0,	SROM8_NOISELVL_5GH,		0x7c00},
+	{"noiselvl5gua0",	0x00000700,	0,	SROM8_NOISELVL_5GU,		0x001f},
+	{"noiselvl5gua1",	0x00000700,	0,	SROM8_NOISELVL_5GU,		0x03e0},
+	{"noiselvl5gua2",	0x00000700,	0,	SROM8_NOISELVL_5GU,		0x7c00},
+	{"noisecaloffset",	0x00000300,	0,	SROM8_NOISECALOFFSET,		0x00ff},
+	{"noisecaloffset5g",	0x00000300,	0,	SROM8_NOISECALOFFSET,		0xff00},
+	{"subband5gver",	0x00000700,	0,	SROM8_SUBBAND_PPR,		0x7},
+
+	{"cckPwrOffset",	0x00000400,	0,	SROM10_CCKPWROFFSET,		0xffff},
+	{"eu_edthresh2g",	0x00000400,	0,	SROM10_EU_EDCRSTH,		0x00ff},
+	{"eu_edthresh5g",	0x00000400,	0,	SROM10_EU_EDCRSTH,		0xff00},
+	/* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */
+	{"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff},
+	{"",	0x00000400, SRFL_ARRAY,	SROM10_SWCTRLMAP_2G + 1,			0xffff},
+	{"",	0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 	0xffff},
+	{"",	0x00000400, SRFL_ARRAY,	SROM10_SWCTRLMAP_2G + 3,			0xffff},
+	{"",	0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4,	0xffff},
+	{"",	0x00000400, SRFL_ARRAY,	SROM10_SWCTRLMAP_2G + 5,			0xffff},
+	{"",	0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6,	0xffff},
+	{"",	0x00000400, SRFL_ARRAY,	SROM10_SWCTRLMAP_2G + 7,			0xffff},
+	{"",	0x00000400, SRFL_PRHEX,	SROM10_SWCTRLMAP_2G + 8,			0xffff},
+
+	/* sromrev 11 */
+	{"boardflags3",	0xfffff800,	SRFL_PRHEX|SRFL_MORE,	SROM11_BFL4,	0xffff},
+	{"",		0,		0,			SROM11_BFL5,	0xffff},
+	{"boardnum",	0xfffff800,	0,			SROM11_MACLO,	0xffff},
+	{"macaddr",	0xfffff800,	SRFL_ETHADDR,		SROM11_MACHI,	0xffff},
+	{"ccode",	0xfffff800,	SRFL_CCODE,		SROM11_CCODE,	0xffff},
+	{"regrev",	0xfffff800,	0,			SROM11_REGREV,	0xffff},
+	{"ledbh0",	0xfffff800,	SRFL_NOFFS,		SROM11_LEDBH10,	0x00ff},
+	{"ledbh1",	0xfffff800,	SRFL_NOFFS,		SROM11_LEDBH10,	0xff00},
+	{"ledbh2",	0xfffff800,	SRFL_NOFFS,		SROM11_LEDBH32,	0x00ff},
+	{"ledbh3",	0xfffff800,	SRFL_NOFFS,		SROM11_LEDBH32,	0xff00},
+	{"leddc",	0xfffff800,	SRFL_NOFFS|SRFL_LEDDC,	SROM11_LEDDC,	0xffff},
+	{"aa2g",	0xfffff800,	0,			SROM11_AA,	0x00ff},
+	{"aa5g",	0xfffff800,	0,			SROM11_AA,	0xff00},
+	{"agbg0",	0xfffff800,	0,			SROM11_AGBG10,  0xff00},
+	{"agbg1",	0xfffff800,	0,			SROM11_AGBG10,	0x00ff},
+	{"agbg2",	0xfffff800,	0,			SROM11_AGBG2A0,	0xff00},
+	{"aga0",	0xfffff800,	0,			SROM11_AGBG2A0,	0x00ff},
+	{"aga1",	0xfffff800,	0,			SROM11_AGA21,   0xff00},
+	{"aga2",	0xfffff800,	0,			SROM11_AGA21,	0x00ff},
+	{"txchain",	0xfffff800,	SRFL_NOFFS,	SROM11_TXRXC,	SROM4_TXCHAIN_MASK},
+	{"rxchain",	0xfffff800,	SRFL_NOFFS,	SROM11_TXRXC,	SROM4_RXCHAIN_MASK},
+	{"antswitch",	0xfffff800,	SRFL_NOFFS,	SROM11_TXRXC,	SROM4_SWITCH_MASK},
+
+	{"tssiposslope2g",	0xfffff800,	0,		SROM11_FEM_CFG1, 	0x0001},
+	{"epagain2g",		0xfffff800,	0,		SROM11_FEM_CFG1, 	0x000e},
+	{"pdgain2g",		0xfffff800,	0,		SROM11_FEM_CFG1, 	0x01f0},
+	{"tworangetssi2g",	0xfffff800,	0,		SROM11_FEM_CFG1, 	0x0200},
+	{"papdcap2g",		0xfffff800,	0,		SROM11_FEM_CFG1, 	0x0400},
+	{"femctrl",		0xfffff800,	0,		SROM11_FEM_CFG1, 	0xf800},
+
+	{"tssiposslope5g",	0xfffff800,	0,		SROM11_FEM_CFG2, 	0x0001},
+	{"epagain5g",		0xfffff800,	0,		SROM11_FEM_CFG2, 	0x000e},
+	{"pdgain5g",		0xfffff800,	0,		SROM11_FEM_CFG2, 	0x01f0},
+	{"tworangetssi5g",	0xfffff800,	0,		SROM11_FEM_CFG2, 	0x0200},
+	{"papdcap5g",		0xfffff800,	0,		SROM11_FEM_CFG2, 	0x0400},
+	{"gainctrlsph",		0xfffff800,	0,		SROM11_FEM_CFG2, 	0xf800},
+
+	{"tempthresh",		0xfffff800,	0,		SROM11_THERMAL,		0xff00},
+	{"tempoffset",		0xfffff800,	0,		SROM11_THERMAL,		0x00ff},
+	{"rawtempsense", 	0xfffff800,	SRFL_PRHEX,	SROM11_MPWR_RAWTS,	0x01ff},
+	{"measpower",		0xfffff800,	SRFL_PRHEX,	SROM11_MPWR_RAWTS,	0xfe00},
+	{"tempsense_slope",	0xfffff800,	SRFL_PRHEX, 	SROM11_TS_SLP_OPT_CORRX, 0x00ff},
+	{"tempcorrx",		0xfffff800,	SRFL_PRHEX, 	SROM11_TS_SLP_OPT_CORRX, 0xfc00},
+	{"tempsense_option",	0xfffff800,	SRFL_PRHEX,	SROM11_TS_SLP_OPT_CORRX, 0x0300},
+	{"xtalfreq",		0xfffff800,	0,		SROM11_XTAL_FREQ, 	0xffff},
+	{"txpwrbckof",	0x00000800,	SRFL_PRHEX,	SROM11_PATH0 + SROM11_2G_MAXP,	0xff00},
+	/* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */
+	{"pa5gbw4080a1", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1,                 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W2_A1,                 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_4080_W0_A1,                 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA,     0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA + 1, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA,     0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 1, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 2, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA,     0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA + 1, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX,              SROM11_PATH2 + SROM11_5GB3_4080_PA + 2, 0xffff},
+	{"phycal_tempdelta",	0xfffff800,	0,		SROM11_PHYCAL_TEMPDELTA, 0x00ff},
+	{"temps_period",	0xfffff800,	0,		SROM11_PHYCAL_TEMPDELTA, 0x0f00},
+	{"temps_hysteresis",	0xfffff800,	0,		SROM11_PHYCAL_TEMPDELTA, 0xf000},
+	{"measpower1", 		0xfffff800,	SRFL_PRHEX,	SROM11_MPWR_1_AND_2, 	0x007f},
+	{"measpower2",		0xfffff800, 	SRFL_PRHEX,	SROM11_MPWR_1_AND_2, 	0x3f80},
+	{"tssifloor2g",		0xfffff800,	SRFL_PRHEX,	SROM11_TSSIFLOOR_2G,	0x03ff},
+	{"tssifloor5g",	0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GL,	0x03ff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GM,	0x03ff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GH,	0x03ff},
+	{"",		0xfffff800,	SRFL_PRHEX,		SROM11_TSSIFLOOR_5GU,	0x03ff},
+	{"pdoffset2g40ma0",     0xfffff800, 0,      SROM11_PDOFF_2G_40M,    0x000f},
+	{"pdoffset2g40ma1",     0xfffff800, 0,      SROM11_PDOFF_2G_40M,    0x00f0},
+	{"pdoffset2g40ma2",     0xfffff800, 0,      SROM11_PDOFF_2G_40M,    0x0f00},
+	{"pdoffset2g40mvalid",  0xfffff800, 0,      SROM11_PDOFF_2G_40M,    0x8000},
+	{"pdoffset40ma0",      	0xfffff800,	0,		SROM11_PDOFF_40M_A0,   	0xffff},
+	{"pdoffset40ma1",      	0xfffff800,	0,		SROM11_PDOFF_40M_A1,   	0xffff},
+	{"pdoffset40ma2",      	0xfffff800,	0,		SROM11_PDOFF_40M_A2,   	0xffff},
+	{"pdoffset80ma0",      	0xfffff800,	0,		SROM11_PDOFF_80M_A0,   	0xffff},
+	{"pdoffset80ma1",      	0xfffff800,	0,		SROM11_PDOFF_80M_A1,   	0xffff},
+	{"pdoffset80ma2",      	0xfffff800,	0,		SROM11_PDOFF_80M_A2,   	0xffff},
+
+	{"subband5gver",	0xfffff800, 	SRFL_PRHEX,	SROM11_SUBBAND5GVER, 	0xffff},
+	{"paparambwver",	0xfffff800, 	0,		SROM11_MCSLR5GLPO, 	0xf000},
+	{"rx5ggainwar",         0xfffff800,     0,              SROM11_MCSLR5GMPO,      0x2000},
+	/* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #0 */
+	{"pa5gbw4080a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 +SROM11_5GB0_PA, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX,              SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff},
+	/* Special PA Params for 4335 5G Band, 40 MHz BW */
+	{"pa5gbw40a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX,              SROM11_PATH1 + SROM11_5GB3_PA + 2, 0xffff},
+	/* Special PA Params for 4335 5G Band, 80 MHz BW */
+	{"pa5gbw80a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX,              SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff},
+	/* Special PA Params for 4335 2G Band, CCK */
+	{"pa2gccka0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX,              SROM11_PATH1 + SROM11_2G_PA + 2, 0xffff},
+
+	/* power per rate */
+	{"cckbw202gpo",		0xfffff800,	0,		SROM11_CCKBW202GPO, 	0xffff},
+	{"cckbw20ul2gpo",	0xfffff800,	0,		SROM11_CCKBW20UL2GPO, 	0xffff},
+	{"mcsbw202gpo",		0xfffff800,	SRFL_MORE,	SROM11_MCSBW202GPO,   	0xffff},
+	{"",            	0xfffff800, 	0,          	SROM11_MCSBW202GPO_1, 	0xffff},
+	{"mcsbw402gpo",		0xfffff800,	SRFL_MORE,	SROM11_MCSBW402GPO,   	0xffff},
+	{"",            	0xfffff800, 	0,   		SROM11_MCSBW402GPO_1, 	0xffff},
+	{"dot11agofdmhrbw202gpo", 0xfffff800, 	0, 	SROM11_DOT11AGOFDMHRBW202GPO, 	0xffff},
+	{"ofdmlrbw202gpo",	0xfffff800, 	0, 		SROM11_OFDMLRBW202GPO,	0xffff},
+	{"mcsbw205glpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW205GLPO, 	0xffff},
+	{"",           		0xfffff800, 	0,   		SROM11_MCSBW205GLPO_1, 	0xffff},
+	{"mcsbw405glpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW405GLPO, 	0xffff},
+	{"",           		0xfffff800, 	0,     		SROM11_MCSBW405GLPO_1, 	0xffff},
+	{"mcsbw805glpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW805GLPO, 	0xffff},
+	{"",           		0xfffff800, 	0,    		SROM11_MCSBW805GLPO_1, 	0xffff},
+	{"mcsbw205gmpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW205GMPO, 	0xffff},
+	{"",           		0xfffff800, 	0,     		SROM11_MCSBW205GMPO_1, 	0xffff},
+	{"mcsbw405gmpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW405GMPO, 	0xffff},
+	{"",           		0xfffff800, 	0,     		SROM11_MCSBW405GMPO_1, 	0xffff},
+	{"mcsbw805gmpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW805GMPO, 	0xffff},
+	{"",           		0xfffff800, 	0,   		SROM11_MCSBW805GMPO_1, 	0xffff},
+	{"mcsbw205ghpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW205GHPO, 	0xffff},
+	{"",           		0xfffff800, 	0,  		SROM11_MCSBW205GHPO_1, 	0xffff},
+	{"mcsbw405ghpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW405GHPO, 	0xffff},
+	{"",           		0xfffff800, 	0,   		SROM11_MCSBW405GHPO_1, 	0xffff},
+	{"mcsbw805ghpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW805GHPO, 	0xffff},
+	{"",           		0xfffff800, 	0,    		SROM11_MCSBW805GHPO_1, 	0xffff},
+	{"mcslr5glpo",		0xfffff800,	0,		SROM11_MCSLR5GLPO, 	0x0fff},
+	{"mcslr5gmpo",		0xfffff800,	0,		SROM11_MCSLR5GMPO, 	0xffff},
+	{"mcslr5ghpo",		0xfffff800,	0,		SROM11_MCSLR5GHPO, 	0xffff},
+	{"sb20in40hrpo", 	0xfffff800,	0,	SROM11_SB20IN40HRPO,		0xffff},
+	{"sb20in80and160hr5glpo", 0xfffff800, 	0, 	SROM11_SB20IN80AND160HR5GLPO, 	0xffff},
+	{"sb40and80hr5glpo",	  0xfffff800, 	0,	SROM11_SB40AND80HR5GLPO,	0xffff},
+	{"sb20in80and160hr5gmpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160HR5GMPO, 	0xffff},
+	{"sb40and80hr5gmpo",	  0xfffff800, 	0,	SROM11_SB40AND80HR5GMPO,	0xffff},
+	{"sb20in80and160hr5ghpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160HR5GHPO, 	0xffff},
+	{"sb40and80hr5ghpo",	  0xfffff800, 	0,	SROM11_SB40AND80HR5GHPO,	0xffff},
+	{"sb20in40lrpo",	  0xfffff800, 	0,	SROM11_SB20IN40LRPO,		0xffff},
+	{"sb20in80and160lr5glpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160LR5GLPO, 	0xffff},
+	{"sb40and80lr5glpo",	  0xfffff800, 	0,	SROM11_SB40AND80LR5GLPO,	0xffff},
+	{"sb20in80and160lr5gmpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160LR5GMPO, 	0xffff},
+	{"sb40and80lr5gmpo",	  0xfffff800, 	0,	SROM11_SB40AND80LR5GMPO,	0xffff},
+	{"sb20in80and160lr5ghpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160LR5GHPO, 	0xffff},
+	{"sb40and80lr5ghpo",	  0xfffff800, 	0,	SROM11_SB40AND80LR5GHPO,	0xffff},
+	{"dot11agduphrpo",	  0xfffff800, 	0,	SROM11_DOT11AGDUPHRPO,		0xffff},
+	{"dot11agduplrpo",	  0xfffff800, 	0,	SROM11_DOT11AGDUPLRPO,		0xffff},
+
+	/* Misc */
+	{"sar2g",       	0xfffff800,	0,	SROM11_SAR,          	0x00ff},
+	{"sar5g",           	0xfffff800,	0,	SROM11_SAR,		0xff00},
+
+	{"noiselvl2ga0",	0xfffff800,	0,		SROM11_NOISELVL_2G,	0x001f},
+	{"noiselvl2ga1",	0xfffff800,	0,		SROM11_NOISELVL_2G,	0x03e0},
+	{"noiselvl2ga2",	0xfffff800,	0,		SROM11_NOISELVL_2G,	0x7c00},
+	{"noiselvl5ga0",	0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GL,	0x001f},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GM,	0x001f},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GH,	0x001f},
+	{"",			0xfffff800,	0,		SROM11_NOISELVL_5GU,	0x001f},
+	{"noiselvl5ga1",	0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GL,	0x03e0},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GM,	0x03e0},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GH,	0x03e0},
+	{"",			0xfffff800,	0,		SROM11_NOISELVL_5GU,	0x03e0},
+	{"noiselvl5ga2",	0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GL,	0x7c00},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GM,	0x7c00},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GH,	0x7c00},
+	{"",			0xfffff800,	0,		SROM11_NOISELVL_5GU,	0x7c00},
+	{"eu_edthresh2g",	0x00000800,	0,	        SROM11_EU_EDCRSTH,	0x00ff},
+	{"eu_edthresh5g",	0x00000800,	0,	        SROM11_EU_EDCRSTH,	0xff00},
+
+	{"rxgainerr2ga0", 	0xfffff800, 	0,    		SROM11_RXGAINERR_2G,    0x003f},
+	{"rxgainerr2ga1", 	0xfffff800, 	0,    		SROM11_RXGAINERR_2G,    0x07c0},
+	{"rxgainerr2ga2", 	0xfffff800, 	0,    		SROM11_RXGAINERR_2G,    0xf800},
+	{"rxgainerr5ga0",      	0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GL,   0x003f},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GM,   0x003f},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GH,   0x003f},
+	{"",      		0xfffff800, 	0,    		SROM11_RXGAINERR_5GU,   0x003f},
+	{"rxgainerr5ga1",      	0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GL,   0x07c0},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GM,   0x07c0},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GH,   0x07c0},
+	{"",      		0xfffff800, 	0,    		SROM11_RXGAINERR_5GU,   0x07c0},
+	{"rxgainerr5ga2",      	0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GL,   0xf800},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GM,   0xf800},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GH,   0xf800},
+	{"",      		0xfffff800, 	0,    		SROM11_RXGAINERR_5GU,   0xf800},
+	{"rpcal2g",      	0xfffff800, 	0,		SROM11_RPCAL_2G,        0xffff},
+	{"rpcal5gb0",      	0xfffff800, 	0,		SROM11_RPCAL_5GL,       0xffff},
+	{"rpcal5gb1",      	0xfffff800, 	0,		SROM11_RPCAL_5GM,       0xffff},
+	{"rpcal5gb2",      	0xfffff800, 	0,		SROM11_RPCAL_5GH,       0xffff},
+	{"rpcal5gb3",      	0xfffff800, 	0,		SROM11_RPCAL_5GU,       0xffff},
+	{"txidxcap2g",      	0xfffff800, 	0,		SROM11_TXIDXCAP2G,      0x0ff0},
+	{"txidxcap5g",      	0xfffff800, 	0,		SROM11_TXIDXCAP5G,      0x0ff0},
+	{"pdoffsetcckma0",      0xfffff800,     0,              SROM11_PDOFF_2G_CCK,    0x000f},
+	{"pdoffsetcckma1",      0xfffff800,     0,              SROM11_PDOFF_2G_CCK,    0x00f0},
+	{"pdoffsetcckma2",      0xfffff800,     0,              SROM11_PDOFF_2G_CCK,    0x0f00},
+
+	/* sromrev 12 */
+	{"boardflags4",		0xfffff000,	SRFL_PRHEX|SRFL_MORE,	SROM12_BFL6,	0xffff},
+	{"",	0,	0,			SROM12_BFL7,	0xffff},
+	{"pdoffsetcck",		0xfffff000,	0,      SROM12_PDOFF_2G_CCK,       0xffff},
+	{"pdoffset20in40m5gb0",	0xfffff000,	0,      SROM12_PDOFF_20in40M_5G_B0,    0xffff},
+	{"pdoffset20in40m5gb1", 0xfffff000,	0,      SROM12_PDOFF_20in40M_5G_B1,    0xffff},
+	{"pdoffset20in40m5gb2", 0xfffff000,	0,      SROM12_PDOFF_20in40M_5G_B2,    0xffff},
+	{"pdoffset20in40m5gb3", 0xfffff000,	0,      SROM12_PDOFF_20in40M_5G_B3,    0xffff},
+	{"pdoffset20in40m5gb4", 0xfffff000,	0,      SROM12_PDOFF_20in40M_5G_B4,    0xffff},
+	{"pdoffset40in80m5gb0", 0xfffff000,	0,      SROM12_PDOFF_40in80M_5G_B0,    0xffff},
+	{"pdoffset40in80m5gb1", 0xfffff000,	0,      SROM12_PDOFF_40in80M_5G_B1,    0xffff},
+	{"pdoffset40in80m5gb2", 0xfffff000,	0,      SROM12_PDOFF_40in80M_5G_B2,    0xffff},
+	{"pdoffset40in80m5gb3", 0xfffff000,	0,      SROM12_PDOFF_40in80M_5G_B3,    0xffff},
+	{"pdoffset40in80m5gb4", 0xfffff000,	0,      SROM12_PDOFF_40in80M_5G_B4,    0xffff},
+	{"pdoffset20in80m5gb0", 0xfffff000,	0,      SROM12_PDOFF_20in80M_5G_B0,    0xffff},
+	{"pdoffset20in80m5gb1", 0xfffff000,	0,      SROM12_PDOFF_20in80M_5G_B1,    0xffff},
+	{"pdoffset20in80m5gb2", 0xfffff000,	0,      SROM12_PDOFF_20in80M_5G_B2,    0xffff},
+	{"pdoffset20in80m5gb3", 0xfffff000,	0,      SROM12_PDOFF_20in80M_5G_B3,    0xffff},
+	{"pdoffset20in80m5gb4", 0xfffff000,	0,      SROM12_PDOFF_20in80M_5G_B4,    0xffff},
+
+	{"pdoffset20in40m5gcore3",   0xffffe000, 0,     SROM13_PDOFFSET20IN40M5GCORE3,   0xffff},
+	{"pdoffset20in40m5gcore3_1", 0xffffe000, 0,     SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff},
+	{"pdoffset20in80m5gcore3",   0xffffe000, 0,     SROM13_PDOFFSET20IN80M5GCORE3,   0xffff},
+	{"pdoffset20in80m5gcore3_1", 0xffffe000, 0,     SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff},
+	{"pdoffset40in80m5gcore3",   0xffffe000, 0,     SROM13_PDOFFSET40IN80M5GCORE3,   0xffff},
+	{"pdoffset40in80m5gcore3_1", 0xffffe000, 0,     SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff},
+
+	{"pdoffset20in40m2g",        0xffffe000, 0,     SROM13_PDOFFSET20IN40M2G,        0xffff},
+	{"pdoffset20in40m2gcore3",   0xffffe000, 0,     SROM13_PDOFFSET20IN40M2GCORE3,   0xffff},
+
+	/* power per rate */
+	{"mcsbw205gx1po",       0xfffff000,     SRFL_MORE,      SROM12_MCSBW205GX1PO,   0xffff},
+	{"",                    0xfffff000,     0,              SROM12_MCSBW205GX1PO_1, 0xffff},
+	{"mcsbw405gx1po",       0xfffff000,     SRFL_MORE,      SROM12_MCSBW405GX1PO,   0xffff},
+	{"",                    0xfffff000,     0,              SROM12_MCSBW405GX1PO_1, 0xffff},
+	{"mcsbw805gx1po",       0xfffff000,     SRFL_MORE,      SROM12_MCSBW805GX1PO,   0xffff},
+	{"",                    0xfffff000,     0,              SROM12_MCSBW805GX1PO_1, 0xffff},
+	{"mcsbw205gx2po",       0xfffff000,     SRFL_MORE,      SROM12_MCSBW205GX2PO,   0xffff},
+	{"",                    0xfffff000,     0,              SROM12_MCSBW205GX2PO_1, 0xffff},
+	{"mcsbw405gx2po",       0xfffff000,     SRFL_MORE,      SROM12_MCSBW405GX2PO,   0xffff},
+	{"",                    0xfffff000,     0,              SROM12_MCSBW405GX2PO_1, 0xffff},
+	{"mcsbw805gx2po",       0xfffff000,     SRFL_MORE,      SROM12_MCSBW805GX2PO,   0xffff},
+	{"",                    0xfffff000,     0,              SROM12_MCSBW805GX2PO_1, 0xffff},
+
+	{"sb20in80and160hr5gx1po", 0xfffff000,  0,      SROM12_SB20IN80AND160HR5GX1PO,  0xffff},
+	{"sb40and80hr5gx1po",     0xfffff000,   0,      SROM12_SB40AND80HR5GX1PO,       0xffff},
+	{"sb20in80and160lr5gx1po", 0xfffff000,  0,      SROM12_SB20IN80AND160LR5GX1PO,  0xffff},
+	{"sb40and80hr5gx1po",     0xfffff000,   0,      SROM12_SB40AND80HR5GX1PO,       0xffff},
+	{"sb20in80and160hr5gx2po", 0xfffff000,  0,      SROM12_SB20IN80AND160HR5GX2PO,  0xffff},
+	{"sb40and80hr5gx2po",     0xfffff000,   0,      SROM12_SB40AND80HR5GX2PO,       0xffff},
+	{"sb20in80and160lr5gx2po", 0xfffff000,  0,      SROM12_SB20IN80AND160LR5GX2PO,  0xffff},
+	{"sb40and80hr5gx2po",     0xfffff000,   0,      SROM12_SB40AND80HR5GX2PO,       0xffff},
+
+	{"rxgains5gmelnagaina0",	0xfffff000,	0,       SROM12_RXGAINS10,	0x0007},
+	{"rxgains5gmelnagaina1",	0xfffff000,	0,       SROM12_RXGAINS11,	0x0007},
+	{"rxgains5gmelnagaina2",	0xfffff000,	0,       SROM12_RXGAINS12,	0x0007},
+	{"rxgains5gmtrisoa0",	0xfffff000,	0,       SROM12_RXGAINS10,	0x0078},
+	{"rxgains5gmtrisoa1",	0xfffff000,	0,       SROM12_RXGAINS11,	0x0078},
+	{"rxgains5gmtrisoa2",	0xfffff000,	0,       SROM12_RXGAINS12,	0x0078},
+	{"rxgains5gmtrelnabypa0", 0xfffff000,	0,       SROM12_RXGAINS10,	0x0080},
+	{"rxgains5gmtrelnabypa1", 0xfffff000,	0,       SROM12_RXGAINS11,	0x0080},
+	{"rxgains5gmtrelnabypa2", 0xfffff000,	0,       SROM12_RXGAINS12,	0x0080},
+	{"rxgains5ghelnagaina0",	0xfffff000,	0,       SROM12_RXGAINS10,	0x0700},
+	{"rxgains5ghelnagaina1",	0xfffff000,	0,       SROM12_RXGAINS11,	0x0700},
+	{"rxgains5ghelnagaina2",	0xfffff000,	0,       SROM12_RXGAINS12,	0x0700},
+	{"rxgains5ghtrisoa0",	0xfffff000,	0,	 SROM12_RXGAINS10,	0x7800},
+	{"rxgains5ghtrisoa1",	0xfffff000,	0,	 SROM12_RXGAINS11,	0x7800},
+	{"rxgains5ghtrisoa2",	0xfffff000,	0,	 SROM12_RXGAINS12,	0x7800},
+	{"rxgains5ghtrelnabypa0", 0xfffff000,	0,	 SROM12_RXGAINS10,	0x8000},
+	{"rxgains5ghtrelnabypa1", 0xfffff000,	0,	 SROM12_RXGAINS11,	0x8000},
+	{"rxgains5ghtrelnabypa2", 0xfffff000,	0,	 SROM12_RXGAINS12,	0x8000},
+	{"eu_edthresh2g",	0x00001000,	0,       SROM12_EU_EDCRSTH,	0x00ff},
+	{"eu_edthresh5g",	0x00001000,	0,       SROM12_EU_EDCRSTH,	0xff00},
+
+	{"gpdn",		0xfffff000,	SRFL_PRHEX|SRFL_MORE,	SROM12_GPDN_L,	0xffff},
+	{"",			0,		0,			SROM12_GPDN_H,	0xffff},
+
+	{"rpcal2gcore3",        0xffffe000, 	0,	SROM13_RPCAL2GCORE3,            0x00ff},
+	{"rpcal5gb0core3",      0xffffe000, 	0,	SROM13_RPCAL5GB01CORE3,         0x00ff},
+	{"rpcal5gb1core3",      0xffffe000, 	0,	SROM13_RPCAL5GB01CORE3,         0xff00},
+	{"rpcal5gb2core3",      0xffffe000, 	0,	SROM13_RPCAL5GB23CORE3,         0x00ff},
+	{"rpcal5gb3core3",      0xffffe000, 	0,	SROM13_RPCAL5GB23CORE3,         0xff00},
+
+	{"eu_edthresh2g",       0x00002000,     0,       SROM13_EU_EDCRSTH,     0x00ff},
+	{"eu_edthresh5g",       0x00002000,     0,       SROM13_EU_EDCRSTH,     0xff00},
+
+	{"agbg3",       0xffffe000,     0,              SROM13_ANTGAIN_BANDBGA, 0xff00},
+	{"aga3",        0xffffe000,     0,              SROM13_ANTGAIN_BANDBGA, 0x00ff},
+	{"noiselvl2ga3",        0xffffe000,     0,              SROM13_NOISELVLCORE3,   0x001f},
+	{"noiselvl5ga3",        0xffffe000,     SRFL_ARRAY,     SROM13_NOISELVLCORE3,   0x03e0},
+	{"",                    0xffffe000,     SRFL_ARRAY,     SROM13_NOISELVLCORE3,   0x7c00},
+	{"",                    0xffffe000,     SRFL_ARRAY,     SROM13_NOISELVLCORE3_1, 0x001f},
+	{"",                    0xffffe000,     0,              SROM13_NOISELVLCORE3_1, 0x03e0},
+	{"rxgainerr2ga3",       0xffffe000,     0,              SROM13_RXGAINERRCORE3,  0x001f},
+	{"rxgainerr5ga3",       0xffffe000,     SRFL_ARRAY,     SROM13_RXGAINERRCORE3,  0x03e0},
+	{"",                    0xffffe000,     SRFL_ARRAY,     SROM13_RXGAINERRCORE3,  0x7c00},
+	{"",                    0xffffe000,     SRFL_ARRAY,     SROM13_RXGAINERRCORE3_1, 0x001f},
+	{"",                    0xffffe000,     0,              SROM13_RXGAINERRCORE3_1, 0x03e0},
+	{"rxgains5gmelnagaina3",        0xffffe000,     0,       SROM13_RXGAINS1CORE3,  0x0007},
+	{"rxgains5gmtrisoa3",   0xffffe000,     0,       SROM13_RXGAINS1CORE3,  0x0078},
+	{"rxgains5gmtrelnabypa3", 0xffffe000,   0,       SROM13_RXGAINS1CORE3,  0x0080},
+	{"rxgains5ghelnagaina3",        0xffffe000,     0,       SROM13_RXGAINS1CORE3,  0x0700},
+	{"rxgains5ghtrisoa3",   0xffffe000,     0,       SROM13_RXGAINS1CORE3,  0x7800},
+	{"rxgains5ghtrelnabypa3", 0xffffe000,   0,       SROM13_RXGAINS1CORE3,  0x8000},
+
+	/* power per rate */
+	{"mcs1024qam2gpo",      0xffffe000,     0,           SROM13_MCS1024QAM2GPO,     0xffff},
+	{"mcs1024qam5glpo",     0xffffe000,     SRFL_MORE,   SROM13_MCS1024QAM5GLPO,    0xffff},
+	{"",                    0xffffe000,     0,           SROM13_MCS1024QAM5GLPO_1,  0xffff},
+	{"mcs1024qam5gmpo",     0xffffe000,     SRFL_MORE,   SROM13_MCS1024QAM5GMPO,    0xffff},
+	{"",                    0xffffe000,     0,           SROM13_MCS1024QAM5GMPO_1,  0xffff},
+	{"mcs1024qam5ghpo",     0xffffe000,     SRFL_MORE,   SROM13_MCS1024QAM5GHPO,    0xffff},
+	{"",                    0xffffe000,     0,           SROM13_MCS1024QAM5GHPO_1,  0xffff},
+	{"mcs1024qam5gx1po",    0xffffe000,     SRFL_MORE,   SROM13_MCS1024QAM5GX1PO,   0xffff},
+	{"",                    0xffffe000,     0,           SROM13_MCS1024QAM5GX1PO_1, 0xffff},
+	{"mcs1024qam5gx2po",    0xffffe000,     SRFL_MORE,   SROM13_MCS1024QAM5GX2PO,   0xffff},
+	{"",                    0xffffe000,     0,           SROM13_MCS1024QAM5GX2PO_1, 0xffff},
+
+	{"mcsbw1605glpo",       0xffffe000,     SRFL_MORE,      SROM13_MCSBW1605GLPO,   0xffff},
+	{"",                    0xffffe000,     0,              SROM13_MCSBW1605GLPO_1, 0xffff},
+	{"mcsbw1605gmpo",       0xffffe000,     SRFL_MORE,      SROM13_MCSBW1605GMPO,   0xffff},
+	{"",                    0xffffe000,     0,              SROM13_MCSBW1605GMPO_1, 0xffff},
+	{"mcsbw1605ghpo",       0xffffe000,     SRFL_MORE,      SROM13_MCSBW1605GHPO,   0xffff},
+	{"",                    0xffffe000,     0,              SROM13_MCSBW1605GHPO_1, 0xffff},
+	{"mcsbw1605gx1po",      0xffffe000,     SRFL_MORE,      SROM13_MCSBW1605GX1PO,  0xffff},
+	{"",                    0xffffe000,     0,      SROM13_MCSBW1605GX1PO_1,        0xffff},
+	{"mcsbw1605gx2po",      0xffffe000,     SRFL_MORE,      SROM13_MCSBW1605GX2PO,  0xffff},
+	{"",                    0xffffe000,     0,      SROM13_MCSBW1605GX2PO_1,        0xffff},
+
+	{"ulbpproffs2g",        0xffffe000,     0,      SROM13_ULBPPROFFS2G,            0xffff},
+
+	{"mcs8poexp",           0xffffe000,     SRFL_MORE,    SROM13_MCS8POEXP,         0xffff},
+	{"",                    0xffffe000,     0,            SROM13_MCS8POEXP_1,       0xffff},
+	{"mcs9poexp",           0xffffe000,     SRFL_MORE,    SROM13_MCS9POEXP,         0xffff},
+	{"",                    0xffffe000,     0,            SROM13_MCS9POEXP_1,       0xffff},
+	{"mcs10poexp",          0xffffe000,     SRFL_MORE,    SROM13_MCS10POEXP,        0xffff},
+	{"",                    0xffffe000,     0,            SROM13_MCS10POEXP_1,      0xffff},
+	{"mcs11poexp",          0xffffe000,     SRFL_MORE,    SROM13_MCS11POEXP,        0xffff},
+	{"",                    0xffffe000,     0,            SROM13_MCS11POEXP_1,      0xffff},
+
+	{"ulbpdoffs5gb0a0",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB0A0,         0xffff},
+	{"ulbpdoffs5gb0a1",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB0A1,         0xffff},
+	{"ulbpdoffs5gb0a2",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB0A2,         0xffff},
+	{"ulbpdoffs5gb0a3",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB0A3,         0xffff},
+	{"ulbpdoffs5gb1a0",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB1A0,         0xffff},
+	{"ulbpdoffs5gb1a1",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB1A1,         0xffff},
+	{"ulbpdoffs5gb1a2",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB1A2,         0xffff},
+	{"ulbpdoffs5gb1a3",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB1A3,         0xffff},
+	{"ulbpdoffs5gb2a0",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB2A0,         0xffff},
+	{"ulbpdoffs5gb2a1",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB2A1,         0xffff},
+	{"ulbpdoffs5gb2a2",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB2A2,         0xffff},
+	{"ulbpdoffs5gb2a3",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB2A3,         0xffff},
+	{"ulbpdoffs5gb3a0",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB3A0,         0xffff},
+	{"ulbpdoffs5gb3a1",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB3A1,         0xffff},
+	{"ulbpdoffs5gb3a2",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB3A2,         0xffff},
+	{"ulbpdoffs5gb3a3",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB3A3,         0xffff},
+	{"ulbpdoffs5gb4a0",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB4A0,         0xffff},
+	{"ulbpdoffs5gb4a1",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB4A1,         0xffff},
+	{"ulbpdoffs5gb4a2",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB4A2,         0xffff},
+	{"ulbpdoffs5gb4a3",     0xffffe000,     0,      SROM13_ULBPDOFFS5GB4A3,         0xffff},
+	{"ulbpdoffs2ga0",       0xffffe000,     0,      SROM13_ULBPDOFFS2GA0,           0xffff},
+	{"ulbpdoffs2ga1",       0xffffe000,     0,      SROM13_ULBPDOFFS2GA1,           0xffff},
+	{"ulbpdoffs2ga2",       0xffffe000,     0,      SROM13_ULBPDOFFS2GA2,           0xffff},
+	{"ulbpdoffs2ga3",       0xffffe000,     0,      SROM13_ULBPDOFFS2GA3,           0xffff},
+
+	{"rpcal5gb4",           0xffffe000,     0,      SROM13_RPCAL5GB4,               0xffff},
+
+	{"sb20in40hrlrpox",     0xffffe000,     0,       SROM13_SB20IN40HRLRPOX,        0xffff},
+
+	{"pdoffset20in40m2g",   0xffffe000,     0,      SROM13_PDOFFSET20IN40M2G,       0xffff},
+	{"pdoffset20in40m2gcore3", 0xffffe000,  0,      SROM13_PDOFFSET20IN40M2GCORE3,  0xffff},
+
+	{"pdoffset20in40m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff},
+	{"",                    0xffffe000,     0,  SROM13_PDOFFSET20IN40M5GCORE3_1,   0xffff},
+	{"pdoffset40in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff},
+	{"",                    0xffffe000,     0,  SROM13_PDOFFSET40IN80M5GCORE3_1,   0xffff},
+	{"pdoffset20in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff},
+	{"",                    0xffffe000,     0,  SROM13_PDOFFSET20IN80M5GCORE3_1,   0xffff},
+
+	{"swctrlmap4_cfg",      0xffffe000,     0,      SROM13_SWCTRLMAP4_CFG,          0xffff},
+	{"swctrlmap4_TX2g_fem3to0", 0xffffe000, 0,      SROM13_SWCTRLMAP4_TX2G_FEM3TO0, 0xffff},
+	{"swctrlmap4_RX2g_fem3to0", 0xffffe000, 0,      SROM13_SWCTRLMAP4_RX2G_FEM3TO0, 0xffff},
+	{"swctrlmap4_RXByp2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0, 0xffff},
+	{"swctrlmap4_misc2g_fem3to0", 0xffffe000, 0,  SROM13_SWCTRLMAP4_MISC2G_FEM3TO0, 0xffff},
+	{"swctrlmap4_TX5g_fem3to0", 0xffffe000, 0,      SROM13_SWCTRLMAP4_TX5G_FEM3TO0, 0xffff},
+	{"swctrlmap4_RX5g_fem3to0", 0xffffe000, 0,      SROM13_SWCTRLMAP4_RX5G_FEM3TO0, 0xffff},
+	{"swctrlmap4_RXByp5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0, 0xffff},
+	{"swctrlmap4_misc5g_fem3to0", 0xffffe000, 0,  SROM13_SWCTRLMAP4_MISC5G_FEM3TO0, 0xffff},
+	{"swctrlmap4_TX2g_fem7to4", 0xffffe000, 0,      SROM13_SWCTRLMAP4_TX2G_FEM7TO4, 0xffff},
+	{"swctrlmap4_RX2g_fem7to4", 0xffffe000, 0,      SROM13_SWCTRLMAP4_RX2G_FEM7TO4, 0xffff},
+	{"swctrlmap4_RXByp2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4, 0xffff},
+	{"swctrlmap4_misc2g_fem7to4", 0xffffe000, 0,  SROM13_SWCTRLMAP4_MISC2G_FEM7TO4, 0xffff},
+	{"swctrlmap4_TX5g_fem7to4", 0xffffe000, 0,      SROM13_SWCTRLMAP4_TX5G_FEM7TO4, 0xffff},
+	{"swctrlmap4_RX5g_fem7to4", 0xffffe000, 0,      SROM13_SWCTRLMAP4_RX5G_FEM7TO4, 0xffff},
+	{"swctrlmap4_RXByp5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4, 0xffff},
+	{"swctrlmap4_misc5g_fem7to4", 0xffffe000, 0,  SROM13_SWCTRLMAP4_MISC5G_FEM7TO4, 0xffff},
+	{NULL,		0,		0,		0,			0}
+};
+
+static const sromvar_t perpath_pci_sromvars[] = {
+	{"maxp2ga",	0x000000f0,	0,		SROM4_2G_ITT_MAXP,	0x00ff},
+	{"itt2ga",	0x000000f0,	0,		SROM4_2G_ITT_MAXP,	0xff00},
+	{"itt5ga",	0x000000f0,	0,		SROM4_5G_ITT_MAXP,	0xff00},
+	{"pa2gw0a",	0x000000f0,	SRFL_PRHEX,	SROM4_2G_PA,		0xffff},
+	{"pa2gw1a",	0x000000f0,	SRFL_PRHEX,	SROM4_2G_PA + 1,	0xffff},
+	{"pa2gw2a",	0x000000f0,	SRFL_PRHEX,	SROM4_2G_PA + 2,	0xffff},
+	{"pa2gw3a",	0x000000f0,	SRFL_PRHEX,	SROM4_2G_PA + 3,	0xffff},
+	{"maxp5ga",	0x000000f0,	0,		SROM4_5G_ITT_MAXP,	0x00ff},
+	{"maxp5gha",	0x000000f0,	0,		SROM4_5GLH_MAXP,	0x00ff},
+	{"maxp5gla",	0x000000f0,	0,		SROM4_5GLH_MAXP,	0xff00},
+	{"pa5gw0a",	0x000000f0,	SRFL_PRHEX,	SROM4_5G_PA,		0xffff},
+	{"pa5gw1a",	0x000000f0,	SRFL_PRHEX,	SROM4_5G_PA + 1,	0xffff},
+	{"pa5gw2a",	0x000000f0,	SRFL_PRHEX,	SROM4_5G_PA + 2,	0xffff},
+	{"pa5gw3a",	0x000000f0,	SRFL_PRHEX,	SROM4_5G_PA + 3,	0xffff},
+	{"pa5glw0a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GL_PA,		0xffff},
+	{"pa5glw1a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GL_PA + 1,	0xffff},
+	{"pa5glw2a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GL_PA + 2,	0xffff},
+	{"pa5glw3a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GL_PA + 3,	0xffff},
+	{"pa5ghw0a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GH_PA,		0xffff},
+	{"pa5ghw1a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GH_PA + 1,	0xffff},
+	{"pa5ghw2a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GH_PA + 2,	0xffff},
+	{"pa5ghw3a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GH_PA + 3,	0xffff},
+	{"maxp2ga",	0x00000700,	0,		SROM8_2G_ITT_MAXP,	0x00ff},
+	{"itt2ga",	0x00000700,	0,		SROM8_2G_ITT_MAXP,	0xff00},
+	{"itt5ga",	0x00000700,	0,		SROM8_5G_ITT_MAXP,	0xff00},
+	{"pa2gw0a",	0x00000700,	SRFL_PRHEX,	SROM8_2G_PA,		0xffff},
+	{"pa2gw1a",	0x00000700,	SRFL_PRHEX,	SROM8_2G_PA + 1,	0xffff},
+	{"pa2gw2a",	0x00000700,	SRFL_PRHEX,	SROM8_2G_PA + 2,	0xffff},
+	{"maxp5ga",	0x00000700,	0,		SROM8_5G_ITT_MAXP,	0x00ff},
+	{"maxp5gha",	0x00000700,	0,		SROM8_5GLH_MAXP,	0x00ff},
+	{"maxp5gla",	0x00000700,	0,		SROM8_5GLH_MAXP,	0xff00},
+	{"pa5gw0a",	0x00000700,	SRFL_PRHEX,	SROM8_5G_PA,		0xffff},
+	{"pa5gw1a",	0x00000700,	SRFL_PRHEX,	SROM8_5G_PA + 1,	0xffff},
+	{"pa5gw2a",	0x00000700,	SRFL_PRHEX,	SROM8_5G_PA + 2,	0xffff},
+	{"pa5glw0a",	0x00000700,	SRFL_PRHEX,	SROM8_5GL_PA,		0xffff},
+	{"pa5glw1a",	0x00000700,	SRFL_PRHEX,	SROM8_5GL_PA + 1,	0xffff},
+	{"pa5glw2a",	0x00000700,	SRFL_PRHEX,	SROM8_5GL_PA + 2,	0xffff},
+	{"pa5ghw0a",	0x00000700,	SRFL_PRHEX,	SROM8_5GH_PA,		0xffff},
+	{"pa5ghw1a",	0x00000700,	SRFL_PRHEX,	SROM8_5GH_PA + 1,	0xffff},
+	{"pa5ghw2a",	0x00000700,	SRFL_PRHEX,	SROM8_5GH_PA + 2,	0xffff},
+
+	/* sromrev 11 */
+	{"maxp2ga",	0xfffff800,	0,			 SROM11_2G_MAXP,	0x00ff},
+	{"pa2ga",	0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA,		0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX,		 SROM11_2G_PA + 2,	0xffff},
+	{"rxgains5gmelnagaina",	0x00000800,	0,		 SROM11_RXGAINS1,	0x0007},
+	{"rxgains5gmtrisoa",	0x00000800,	0,		 SROM11_RXGAINS1,	0x0078},
+	{"rxgains5gmtrelnabypa", 0x00000800,	0,		 SROM11_RXGAINS1,	0x0080},
+	{"rxgains5ghelnagaina",	0x00000800,	0,		 SROM11_RXGAINS1,	0x0700},
+	{"rxgains5ghtrisoa",	0x00000800,	0,		 SROM11_RXGAINS1,	0x7800},
+	{"rxgains5ghtrelnabypa", 0x00000800,	0,		 SROM11_RXGAINS1,	0x8000},
+	{"rxgains2gelnagaina",	0x00000800,	0,		 SROM11_RXGAINS,	0x0007},
+	{"rxgains2gtrisoa",	0x00000800,	0,		 SROM11_RXGAINS,	0x0078},
+	{"rxgains2gtrelnabypa",	0x00000800,	0,		 SROM11_RXGAINS,	0x0080},
+	{"rxgains5gelnagaina",	0x00000800,	0,		 SROM11_RXGAINS,	0x0700},
+	{"rxgains5gtrisoa",	0x00000800,	0,		 SROM11_RXGAINS,	0x7800},
+	{"rxgains5gtrelnabypa",	0x00000800,	0,		 SROM11_RXGAINS,	0x8000},
+	{"maxp5ga",	0x00000800,	SRFL_ARRAY,		 SROM11_5GB1B0_MAXP,	0x00ff},
+	{"",		0x00000800,	SRFL_ARRAY,		 SROM11_5GB1B0_MAXP,	0xff00},
+	{"",		0x00000800,	SRFL_ARRAY,		 SROM11_5GB3B2_MAXP,	0x00ff},
+	{"",		0x00000800,	0,			 SROM11_5GB3B2_MAXP,	0xff00},
+	{"pa5ga",	0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1,	0xffff},
+	{"",		0x00000800,	SRFL_PRHEX,		 SROM11_5GB3_PA + 2,	0xffff},
+
+	/* sromrev 12 */
+	{"maxp5gb4a",	0xfffff000,	0,		 SROM12_5GB42G_MAXP,	0x00ff00},
+	{"pa2ga",	0xfffff000,	SRFL_PRHEX | SRFL_ARRAY,  SROM12_2GB0_PA_W0,	0x00ffff},
+	{"",	0xfffff000,	SRFL_PRHEX | SRFL_ARRAY,	  SROM12_2GB0_PA_W1,	0x00ffff},
+	{"",	0xfffff000,	SRFL_PRHEX | SRFL_ARRAY,	  SROM12_2GB0_PA_W2,	0x00ffff},
+	{"",	0xfffff000,	SRFL_PRHEX,		 SROM12_2GB0_PA_W3,	0x00ffff},
+
+	{"pa2g40a",	0xfffff000,	SRFL_PRHEX | SRFL_ARRAY,  SROM12_2G40B0_PA_W0,	0x00ffff},
+	{"",	0xfffff000,	SRFL_PRHEX | SRFL_ARRAY,	  SROM12_2G40B0_PA_W1,	0x00ffff},
+	{"",	0xfffff000,	SRFL_PRHEX | SRFL_ARRAY,	  SROM12_2G40B0_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX,		 SROM12_2G40B0_PA_W3,	0x00ffff},
+	{"maxp5gb0a",	0xfffff000, 0,	     SROM12_5GB1B0_MAXP,	0x00ff},
+	{"maxp5gb1a",	0xfffff000, 0,       SROM12_5GB1B0_MAXP,	0x00ff00},
+	{"maxp5gb2a",	0xfffff000, 0,	     SROM12_5GB3B2_MAXP,	0x00ff},
+	{"maxp5gb3a",	0xfffff000, 0,	     SROM12_5GB3B2_MAXP,	0x00ff00},
+
+	{"pa5ga",   0xfffff000, SRFL_PRHEX | SRFL_ARRAY,   SROM12_5GB0_PA_W0,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB0_PA_W1,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,         SROM12_5GB0_PA_W2,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB0_PA_W3,		0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB1_PA_W0,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB1_PA_W1,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB1_PA_W2,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB1_PA_W3,		0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB2_PA_W0,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB2_PA_W1,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB2_PA_W2,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB2_PA_W3,		0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB3_PA_W0,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB3_PA_W1,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,         SROM12_5GB3_PA_W2,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB3_PA_W3,		0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB4_PA_W0,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB4_PA_W1,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5GB4_PA_W2,		0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX,	                   SROM12_5GB4_PA_W3,		0x00ffff},
+
+	{"pa5g40a",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B0_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B0_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B0_PA_W3,	0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B1_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B1_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B1_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,         SROM12_5G40B1_PA_W3,	0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B2_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B2_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B2_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B2_PA_W3,	0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B3_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B3_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B3_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B3_PA_W3,	0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B4_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B4_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	     SROM12_5G40B4_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX,		             SROM12_5G40B4_PA_W3,	0x00ffff},
+
+	{"pa5g80a",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,	 SROM12_5G80B0_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B0_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B0_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B0_PA_W3,	0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B1_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B1_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B1_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B1_PA_W3,	0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B2_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B2_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B2_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B2_PA_W3,	0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B3_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B3_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B3_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B3_PA_W3,	0x00ffff},
+
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B4_PA_W0,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B4_PA_W1,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX | SRFL_ARRAY,		 SROM12_5G80B4_PA_W2,	0x00ffff},
+	{"",	0xfffff000, SRFL_PRHEX,		                 SROM12_5G80B4_PA_W3,	0x00ffff},
+	/* sromrev 13 */
+	{"rxgains2gelnagaina",   0xffffe000,     0,       SROM13_RXGAINS,  0x0007},
+	{"rxgains2gtrisoa",     0xffffe000,     0,       SROM13_RXGAINS,  0x0078},
+	{"rxgains2gtrelnabypa", 0xffffe000,     0,       SROM13_RXGAINS,  0x0080},
+	{"rxgains5gelnagaina",  0xffffe000,     0,       SROM13_RXGAINS,  0x0700},
+	{"rxgains5gtrisoa",     0xffffe000,     0,       SROM13_RXGAINS,  0x7800},
+	{"rxgains5gtrelnabypa", 0xffffe000,     0,       SROM13_RXGAINS,  0x8000},
+	{NULL,		0,		0,		0, 			0}
+};
+
+#if !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N))
+#define	PHY_TYPE_HT		7	/* HT-Phy value */
+#define	PHY_TYPE_N		4	/* N-Phy value */
+#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) */
+#if !defined(PHY_TYPE_AC)
+#define	PHY_TYPE_AC		11	/* AC-Phy value */
+#endif /* !defined(PHY_TYPE_AC) */
+#if !defined(PHY_TYPE_LCN20)
+#define	PHY_TYPE_LCN20		12	/* LCN20-Phy value */
+#endif /* !defined(PHY_TYPE_LCN20) */
+#if !defined(PHY_TYPE_NULL)
+#define	PHY_TYPE_NULL		0xf	/* Invalid Phy value */
+#endif /* !defined(PHY_TYPE_NULL) */
+
+typedef struct {
+	uint16	phy_type;
+	uint16	bandrange;
+	uint16	chain;
+	const char *vars;
+} pavars_t;
+
+static const pavars_t pavars[] = {
+	/* HTPHY */
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G,  0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G,  1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G,  2, "pa2gw0a2 pa2gw1a2 pa2gw2a2"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 2, "pa5glw0a2 pa5glw1a2 pa5glw2a2"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 2, "pa5gw0a2 pa5gw1a2 pa5gw2a2"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 2, "pa5ghw0a2 pa5ghw1a2 pa5ghw2a2"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 0, "pa5gw0a3 pa5gw1a3 pa5gw2a3"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 1,  "pa5glw0a3 pa5glw1a3 pa5glw2a3"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 2, "pa5ghw0a3 pa5ghw1a3 pa5ghw2a3"},
+	/* NPHY */
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G,  0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G,  1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+	/* ACPHY */
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  2, "pa2ga2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  0, "pa5ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  1, "pa5ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  2, "pa5ga2"},
+	/* LCN20PHY */
+	{PHY_TYPE_LCN20, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_NULL, 0, 0, ""}
+};
+
+
+static const pavars_t pavars_SROM12[] = {
+	/* ACPHY */
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  2, "pa2ga2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40,  0, "pa2g40a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40,  1, "pa2g40a1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40,  2, "pa2g40a2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND,  0, "pa5ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND,  1, "pa5ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND,  2, "pa5ga2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40,  0, "pa5g40a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40,  1, "pa5g40a1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40,  2, "pa5g40a2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80,  0, "pa5g80a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80,  1, "pa5g80a1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80,  2, "pa5g80a2"},
+	{PHY_TYPE_NULL, 0, 0, ""}
+};
+
+static const pavars_t pavars_SROM13[] = {
+	/* ACPHY */
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  2, "pa2ga2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  3, "pa2ga3"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40,  0, "pa2g40a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40,  1, "pa2g40a1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40,  2, "pa2g40a2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40,  3, "pa2g40a3"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND,  0, "pa5ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND,  1, "pa5ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND,  2, "pa5ga2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND,  3, "pa5ga3"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40,  0, "pa5g40a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40,  1, "pa5g40a1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40,  2, "pa5g40a2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40,  3, "pa5g40a3"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80,  0, "pa5g80a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80,  1, "pa5g80a1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80,  2, "pa5g80a2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80,  3, "pa5g80a3"},
+	{PHY_TYPE_NULL, 0, 0, ""}
+};
+
+/* pavars table when paparambwver is 1 */
+static const pavars_t pavars_bwver_1[] = {
+	/* ACPHY */
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2gccka0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2ga2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  0, "pa5ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  1, "pa5gbw40a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  2, "pa5gbw80a0"},
+	{PHY_TYPE_NULL, 0, 0, ""}
+};
+
+/* pavars table when paparambwver is 2 */
+static const pavars_t pavars_bwver_2[] = {
+	/* ACPHY */
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  0, "pa5ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  1, "pa5ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  2, "pa5gbw4080a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  3, "pa5gbw4080a1"},
+	{PHY_TYPE_NULL, 0, 0, ""}
+};
+
+/* pavars table when paparambwver is 3 */
+static const pavars_t pavars_bwver_3[] = {
+	/* ACPHY */
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  2, "pa2gccka0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  3, "pa2gccka1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  0, "pa5ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  1, "pa5ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  2, "pa5gbw4080a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  3, "pa5gbw4080a1"},
+	{PHY_TYPE_NULL, 0, 0, ""}
+};
+
+typedef struct {
+	uint16	phy_type;
+	uint16	bandrange;
+	const char *vars;
+} povars_t;
+
+static const povars_t povars[] = {
+	/* NPHY */
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G,  "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 "
+	"mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 "
+	"mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 "
+	"mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 "
+	"mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"},
+	{PHY_TYPE_NULL, 0, ""}
+};
+
+typedef struct {
+	uint8	tag;		/* Broadcom subtag name */
+	uint32	revmask;	/* Supported cis_sromrev bitmask. Some of the parameters in
+				 * different tuples have the same name. Therefore, the MFGc tool
+				 * needs to know which tuple to generate when seeing these
+				 * parameters (given that we know sromrev from user input, like the
+				 * nvram file).
+				 */
+	uint8	len;		/* Length field of the tuple, note that it includes the
+				 * subtag name (1 byte): 1 + tuple content length
+				 */
+	const char *params;
+} cis_tuple_t;
+
+#define OTP_RAW		(0xff - 1)	/* Reserved tuple number for wrvar Raw input */
+#define OTP_VERS_1	(0xff - 2)	/* CISTPL_VERS_1 */
+#define OTP_MANFID	(0xff - 3)	/* CISTPL_MANFID */
+#define OTP_RAW1	(0xff - 4)	/* Like RAW, but comes first */
+
+/** this array is used by CIS creating/writing applications */
+static const cis_tuple_t cis_hnbuvars[] = {
+/*       tag			revmask   len  params */
+	{OTP_RAW1,		0xffffffff, 0, ""},	/* special case */
+	{OTP_VERS_1,	0xffffffff, 0, "smanf sproductname"},	/* special case (non BRCM tuple) */
+	{OTP_MANFID,	0xffffffff, 4, "2manfid 2prodid"},	/* special case (non BRCM tuple) */
+	/* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
+	{HNBU_UMANFID,		0xffffffff, 8, "8usbmanfid"},
+	{HNBU_SROMREV,		0xffffffff, 2, "1sromrev"},
+	/* NOTE: subdevid is also written to boardtype.
+	 *       Need to write HNBU_BOARDTYPE to change it if it is different.
+	 */
+	{HNBU_CHIPID,		0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"},
+	{HNBU_BOARDREV,		0xffffffff, 3, "2boardrev"},
+	{HNBU_PAPARMS,		0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"},
+	{HNBU_AA,		0xffffffff, 3, "1aa2g 1aa5g"},
+	{HNBU_AA,		0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */
+	{HNBU_AG,		0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"},
+	{HNBU_BOARDFLAGS,	0xffffffff, 21, "4boardflags 4boardflags2 4boardflags3 "
+	"4boardflags4 4boardflags5 "},
+	{HNBU_LEDS,		0xffffffff, 17, "1ledbh0 1ledbh1 1ledbh2 1ledbh3 1ledbh4 1ledbh5 "
+	"1ledbh6 1ledbh7 1ledbh8 1ledbh9 1ledbh10 1ledbh11 1ledbh12 1ledbh13 1ledbh14 1ledbh15"},
+	{HNBU_CCODE,		0xffffffff, 4, "2ccode 1cctl"},
+	{HNBU_CCKPO,		0xffffffff, 3, "2cckpo"},
+	{HNBU_OFDMPO,		0xffffffff, 5, "4ofdmpo"},
+	{HNBU_PAPARMS5G,	0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 "
+	"2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit "
+	"1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"},
+	{HNBU_RDLID,		0xffffffff, 3, "2rdlid"},
+	{HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g "
+	"0rssisav2g 0bxa2g"}, /* special case */
+	{HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g "
+	"0rssisav5g 0bxa5g"}, /* special case */
+	{HNBU_XTALFREQ,		0xffffffff, 5, "4xtalfreq"},
+	{HNBU_TRI2G,		0xffffffff, 2, "1tri2g"},
+	{HNBU_TRI5G,		0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"},
+	{HNBU_RXPO2G,		0xffffffff, 2, "1rxpo2g"},
+	{HNBU_RXPO5G,		0xffffffff, 2, "1rxpo5g"},
+	{HNBU_BOARDNUM,		0xffffffff, 3, "2boardnum"},
+	{HNBU_MACADDR,		0xffffffff, 7, "6macaddr"},	/* special case */
+	{HNBU_RDLSN,		0xffffffff, 3, "2rdlsn"},
+	{HNBU_BOARDTYPE,	0xffffffff, 3, "2boardtype"},
+	{HNBU_LEDDC,		0xffffffff, 3, "2leddc"},
+	{HNBU_RDLRNDIS,		0xffffffff, 2, "1rdlndis"},
+	{HNBU_CHAINSWITCH,	0xffffffff, 5, "1txchain 1rxchain 2antswitch"},
+	{HNBU_REGREV,		0xffffffff, 3, "2regrev"},
+	{HNBU_FEM,		0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g "
+	"0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */
+	{HNBU_PAPARMS_C0,	0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 "
+	"2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 "
+	"2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"},
+	{HNBU_PAPARMS_C1,	0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 "
+	"2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 "
+	"2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"},
+	{HNBU_PO_CCKOFDM,	0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo "
+	"4ofdm5ghpo"},
+	{HNBU_PO_MCS2G,		0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 "
+	"2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"},
+	{HNBU_PO_MCS5GM,	0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 "
+	"2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"},
+	{HNBU_PO_MCS5GLH,	0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 "
+	"2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 "
+	"2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 "
+	"2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"},
+	{HNBU_CCKFILTTYPE,	0xffffffff, 2, "1cckdigfilttype"},
+	{HNBU_PO_CDD,		0xffffffff, 3, "2cddpo"},
+	{HNBU_PO_STBC,		0xffffffff, 3, "2stbcpo"},
+	{HNBU_PO_40M,		0xffffffff, 3, "2bw40po"},
+	{HNBU_PO_40MDUP,	0xffffffff, 3, "2bwduppo"},
+	{HNBU_RDLRWU,		0xffffffff, 2, "1rdlrwu"},
+	{HNBU_WPS,		0xffffffff, 3, "1wpsgpio 1wpsled"},
+	{HNBU_USBFS,		0xffffffff, 2, "1usbfs"},
+	{HNBU_ELNA2G,           0xffffffff, 2, "1elna2g"},
+	{HNBU_ELNA5G,           0xffffffff, 2, "1elna5g"},
+	{HNBU_CUSTOM1,		0xffffffff, 5, "4customvar1"},
+	{OTP_RAW,		0xffffffff, 0, ""},	/* special case */
+	{HNBU_OFDMPO5G,		0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"},
+	{HNBU_USBEPNUM,		0xffffffff, 3, "2usbepnum"},
+	{HNBU_CCKBW202GPO,	0xffffffff, 7, "2cckbw202gpo 2cckbw20ul2gpo 2cckbw20in802gpo"},
+	{HNBU_LEGOFDMBW202GPO,	0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gpo"},
+	{HNBU_LEGOFDMBW205GPO,	0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo "
+	"4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"},
+	{HNBU_MCS2GPO,	0xffffffff, 17,	"4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo 4mcsbw802gpo"},
+	{HNBU_MCS5GLPO,	0xffffffff, 13,	"4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"},
+	{HNBU_MCS5GMPO,	0xffffffff, 13,	"4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"},
+	{HNBU_MCS5GHPO,	0xffffffff, 13,	"4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"},
+	{HNBU_MCS32PO,	0xffffffff, 3,	"2mcs32po"},
+	{HNBU_LEG40DUPPO,	0xffffffff, 3,	"2legofdm40duppo"},
+	{HNBU_TEMPTHRESH,	0xffffffff, 7,	"1tempthresh 0temps_period 0temps_hysteresis "
+	"1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option "
+	"1phycal_tempdelta"}, /* special case */
+	{HNBU_MUXENAB,		0xffffffff, 2,	"1muxenab"},
+	{HNBU_FEM_CFG,		0xfffff800, 5,	"0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g "
+	"0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g "
+	"0tssiposslope5g"}, /* special case */
+	{HNBU_ACPA_C0,		0xfffff800, 39,	"2subband5gver 2maxp2ga0 2*3pa2ga0 "
+	"1*4maxp5ga0 2*12pa5ga0"},
+	{HNBU_ACPA_C1,		0xfffff800, 37,	"2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"},
+	{HNBU_ACPA_C2,		0xfffff800, 37,	"2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"},
+	{HNBU_MEAS_PWR,		0xfffff800, 5,	"1measpower 1measpower1 1measpower2 2rawtempsense"},
+	{HNBU_PDOFF,		0xfffff800, 13,	"2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 "
+	"2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"},
+	{HNBU_ACPPR_2GPO,	0xfffff800, 13,	"2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo "
+	"2sb20in40dot11agofdm2gpo 2sb20in80dot11agofdm2gpo 2sb20in40ofdmlrbw202gpo "
+	"2sb20in80ofdmlrbw202gpo"},
+	{HNBU_ACPPR_5GPO,	0xfffff800, 59,	"4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo "
+	"4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5glpo 2mcslr5gmpo 2mcslr5ghpo "
+	"4mcsbw80p805glpo 4mcsbw80p805gmpo 4mcsbw80p805ghpo 4mcsbw80p805gx1po 2mcslr5gx1po "
+	"2mcslr5g80p80po 4mcsbw805gx1po 4mcsbw1605gx1po"},
+	{HNBU_MCS5Gx1PO,	0xfffff800, 9,	"4mcsbw205gx1po 4mcsbw405gx1po"},
+	{HNBU_ACPPR_SBPO,	0xfffff800, 49,	"2sb20in40hrpo 2sb20in80and160hr5glpo "
+	"2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo "
+	"2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo "
+	"2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo "
+	"4dot11agduphrpo 4dot11agduplrpo 2sb20in40and80hrpo 2sb20in40and80lrpo "
+	"2sb20in80and160hr5gx1po 2sb20in80and160lr5gx1po 2sb40and80hr5gx1po 2sb40and80lr5gx1po "
+	},
+	{HNBU_ACPPR_SB8080_PO, 0xfffff800, 23, "2sb2040and80in80p80hr5glpo "
+	"2sb2040and80in80p80lr5glpo 2sb2040and80in80p80hr5gmpo "
+	"2sb2040and80in80p80lr5gmpo 2sb2040and80in80p80hr5ghpo 2sb2040and80in80p80lr5ghpo "
+	"2sb2040and80in80p80hr5gx1po 2sb2040and80in80p80lr5gx1po 2sb20in80p80hr5gpo "
+	"2sb20in80p80lr5gpo 2dot11agduppo"},
+	{HNBU_NOISELVL,		0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 "
+	"1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"},
+	{HNBU_RXGAIN_ERR,	0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 "
+	"1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"},
+	{HNBU_AGBGA,		0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"},
+	{HNBU_USBDESC_COMPOSITE, 0xffffffff, 3, "2usbdesc_composite"},
+	{HNBU_UUID, 		0xffffffff, 17,	"16uuid"},
+	{HNBU_WOWLGPIO,		0xffffffff, 2,  "1wowl_gpio"},
+	{HNBU_ACRXGAINS_C0,	0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 "
+	"0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 "
+	"0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 "
+	"0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"},	/* special case */
+	{HNBU_ACRXGAINS_C1,	0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 "
+	"0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 "
+	"0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 "
+	"0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"},	/* special case */
+	{HNBU_ACRXGAINS_C2,	0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 "
+	"0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 "
+	"0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 "
+	"0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"},	/* special case */
+	{HNBU_TXDUTY, 		0xfffff800, 9,	"2tx_duty_cycle_ofdm_40_5g "
+	"2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"},
+	{HNBU_PDOFF_2G,		0xfffff800, 3,	"0pdoffset2g40ma0 0pdoffset2g40ma1 "
+	"0pdoffset2g40ma2 0pdoffset2g40mvalid"},
+	{HNBU_ACPA_CCK,		0xfffff800, 7,	"2*3pa2gccka0"},
+	{HNBU_ACPA_40,		0xfffff800, 25,	"2*12pa5gbw40a0"},
+	{HNBU_ACPA_80,		0xfffff800, 25,	"2*12pa5gbw80a0"},
+	{HNBU_ACPA_4080,	0xfffff800, 49,	"2*12pa5gbw4080a0 2*12pa5gbw4080a1"},
+	{HNBU_SUBBAND5GVER,	0xfffff800, 3,	"2subband5gver"},
+	{HNBU_PAPARAMBWVER,	0xfffff800, 2,	"1paparambwver"},
+	{HNBU_TXBFRPCALS,  0xfffff800, 11,
+	"2rpcal2g 2rpcal5gb0 2rpcal5gb1 2rpcal5gb2 2rpcal5gb3"}, /* txbf rpcalvars */
+	{HNBU_GPIO_PULL_DOWN,	0xffffffff, 5, "4gpdn"},
+	{HNBU_MACADDR2,		0xffffffff, 7, "6macaddr2"},	/* special case */
+	{0xFF,			0xffffffff, 0, ""}
+};
+
+#endif /* _bcmsrom_tbl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmtcp.h b/drivers/net/wireless/bcmdhd/include/bcmtcp.h
new file mode 100644
index 0000000..4d40948
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmtcp.h
@@ -0,0 +1,93 @@
+/*
+ * Fundamental constants relating to TCP Protocol
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmtcp.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _bcmtcp_h_
+#define _bcmtcp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+#define TCP_SRC_PORT_OFFSET	0	/* TCP source port offset */
+#define TCP_DEST_PORT_OFFSET	2	/* TCP dest port offset */
+#define TCP_SEQ_NUM_OFFSET	4	/* TCP sequence number offset */
+#define TCP_ACK_NUM_OFFSET	8	/* TCP acknowledgement number offset */
+#define TCP_HLEN_OFFSET		12	/* HLEN and reserved bits offset */
+#define TCP_FLAGS_OFFSET	13	/* FLAGS and reserved bits offset */
+#define TCP_CHKSUM_OFFSET	16	/* TCP body checksum offset */
+
+#define TCP_PORT_LEN		2	/* TCP port field length */
+
+/* 8bit TCP flag field */
+#define TCP_FLAG_URG            0x20
+#define TCP_FLAG_ACK            0x10
+#define TCP_FLAG_PSH            0x08
+#define TCP_FLAG_RST            0x04
+#define TCP_FLAG_SYN            0x02
+#define TCP_FLAG_FIN            0x01
+
+#define TCP_HLEN_MASK           0xf000
+#define TCP_HLEN_SHIFT          12
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr
+{
+	uint16	src_port;	/* Source Port Address */
+	uint16	dst_port;	/* Destination Port Address */
+	uint32	seq_num;	/* TCP Sequence Number */
+	uint32	ack_num;	/* TCP Sequence Number */
+	uint16	hdrlen_rsvd_flags;	/* Header length, reserved bits and flags */
+	uint16	tcpwin;		/* TCP window */
+	uint16	chksum;		/* Segment checksum with pseudoheader */
+	uint16	urg_ptr;	/* Points to seq-num of byte following urg data */
+} BWL_POST_PACKED_STRUCT;
+
+#define TCP_MIN_HEADER_LEN 20
+
+#define TCP_HDRLEN_MASK 0xf0
+#define TCP_HDRLEN_SHIFT 4
+#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT)
+
+#define TCP_FLAGS_MASK  0x1f
+#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* To address round up by 32bit. */
+#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31))		/* a >= b */
+#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31))		/* a =< b */
+#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b)		/* a > b */
+#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b)		/* a < b */
+
+#endif	/* #ifndef _bcmtcp_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmudp.h b/drivers/net/wireless/bcmdhd/include/bcmudp.h
new file mode 100644
index 0000000..e558134
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmudp.h
@@ -0,0 +1,61 @@
+/*
+ * Fundamental constants relating to UDP Protocol
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmudp.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _bcmudp_h_
+#define _bcmudp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* UDP header */
+#define UDP_DEST_PORT_OFFSET	2	/* UDP dest port offset */
+#define UDP_LEN_OFFSET		4	/* UDP length offset */
+#define UDP_CHKSUM_OFFSET	6	/* UDP body checksum offset */
+
+#define UDP_HDR_LEN	8	/* UDP header length */
+#define UDP_PORT_LEN	2	/* UDP port length */
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmudp_hdr
+{
+	uint16	src_port;	/* Source Port Address */
+	uint16	dst_port;	/* Destination Port Address */
+	uint16	len;		/* Number of bytes in datagram including header */
+	uint16	chksum;		/* entire datagram checksum with pseudoheader */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* #ifndef _bcmudp_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h
new file mode 100644
index 0000000..3c061b8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h
@@ -0,0 +1,1355 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmutils.h 701785 2017-05-26 11:08:50Z $
+ */
+
+#ifndef	_bcmutils_h_
+#define	_bcmutils_h_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define bcm_strncpy_s(dst, noOfElements, src, count)    strncpy((dst), (src), (count))
+#define bcm_strncat_s(dst, noOfElements, src, count)    strncat((dst), (src), (count))
+#define bcm_snprintf_s snprintf
+#define bcm_sprintf_s snprintf
+
+/*
+ * #define bcm_strcpy_s(dst, count, src)            strncpy((dst), (src), (count))
+ * Use bcm_strcpy_s instead as it is a safer option
+ * bcm_strcat_s: Use bcm_strncat_s as a safer option
+ *
+ */
+
+#define BCM_BIT(x)		(1 << (x))
+
+/* ctype replacement */
+#define _BCM_U	0x01	/* upper */
+#define _BCM_L	0x02	/* lower */
+#define _BCM_D	0x04	/* digit */
+#define _BCM_C	0x08	/* cntrl */
+#define _BCM_P	0x10	/* punct */
+#define _BCM_S	0x20	/* white space (space/lf/tab) */
+#define _BCM_X	0x40	/* hex digit */
+#define _BCM_SP	0x80	/* hard space (0x20) */
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x)	(bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c)	((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c)	((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c)	((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c)	((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c)	((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c)	((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c)	((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c)	(bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c)	(bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx)
+
+#define KB(bytes)	(((bytes) + 1023) / 1024)
+
+/* Buffer structure for collecting string-formatted data
+* using bcm_bprintf() API.
+* Use bcm_binit() to initialize before use
+*/
+
+struct bcmstrbuf {
+	char *buf;	/* pointer to current position in origbuf */
+	unsigned int size;	/* current (residual) size in bytes */
+	char *origbuf;	/* unmodified pointer to orignal buffer */
+	unsigned int origsize;	/* unmodified orignal buffer size in bytes */
+};
+
+#define BCMSTRBUF_LEN(b)	(b->size)
+#define BCMSTRBUF_BUF(b)	(b->buf)
+
+/* ** driver-only section ** */
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <hnd_pktq.h>
+#include <hnd_pktpool.h>
+
+#define GPIO_PIN_NOTDEFINED 	0x20	/* Pin not defined */
+
+/*
+ * Spin at most 'us' microseconds while 'exp' is true.
+ * Caller should explicitly test 'exp' when this completes
+ * and take appropriate error action if 'exp' is still true.
+ */
+#ifndef SPINWAIT_POLL_PERIOD
+#define SPINWAIT_POLL_PERIOD	10
+#endif
+
+#define SPINWAIT(exp, us) { \
+	uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1); \
+	while ((exp) && (countdown >= SPINWAIT_POLL_PERIOD)) { \
+		OSL_DELAY(SPINWAIT_POLL_PERIOD); \
+		countdown -= SPINWAIT_POLL_PERIOD; \
+	} \
+}
+
+/* forward definition of ether_addr structure used by some function prototypes */
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+#define UP_TABLE_MAX	((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1)	/* 64 max */
+
+/* externs */
+/* packet */
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+extern uint pktsegcnt_war(osl_t *osh, void *p);
+extern uint8 *pktdataoffset(osl_t *osh, void *p,  uint offset);
+extern void *pktoffset(osl_t *osh, void *p,  uint offset);
+/* Add to adjust 802.1x priority */
+extern void pktset8021xprio(void *pkt, int prio);
+
+/* Get priority from a packet and pass it back in scb (or equiv) */
+#define	PKTPRIO_VDSCP	0x100		/* DSCP prio found af	ter VLAN tag */
+#define	PKTPRIO_VLAN	0x200		/* VLAN prio found */
+#define	PKTPRIO_UPD	0x400		/* DSCP used to update VLAN prio */
+#define	PKTPRIO_DSCP	0x800		/* DSCP prio found */
+
+/* DSCP type definitions (RFC4594) */
+/* AF1x: High-Throughput Data (RFC2597) */
+#define DSCP_AF11	0x0A
+#define DSCP_AF12	0x0C
+#define DSCP_AF13	0x0E
+/* AF2x: Low-Latency Data (RFC2597) */
+#define DSCP_AF21	0x12
+#define DSCP_AF22	0x14
+#define DSCP_AF23	0x16
+/* AF3x: Multimedia Streaming (RFC2597) */
+#define DSCP_AF31	0x1A
+#define DSCP_AF32	0x1C
+#define DSCP_AF33	0x1E
+/* EF: Telephony (RFC3246) */
+#define DSCP_EF		0x2E
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag);
+extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp);
+
+/* ethernet address */
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(const char *p, struct ether_addr *ea);
+
+/* ip address */
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+extern char *bcm_ipv6_ntoa(void *ipv6, char *buf);
+extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip);
+
+/* delay */
+extern void bcm_mdelay(uint ms);
+/* variable access */
+#if defined(BCM_RECLAIM)
+#define NVRAM_RECLAIM_CHECK(name)							\
+	if (bcm_attach_part_reclaimed == TRUE) {						\
+		*(char*) 0 = 0; /* TRAP */						\
+		return NULL;								\
+	}
+#else /* BCM_RECLAIM */
+#define NVRAM_RECLAIM_CHECK(name)
+#endif /* BCM_RECLAIM */
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern int getintvararray(char *vars, const char *name, int index);
+extern int getintvararraysize(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define	bcmlog(fmt, a1, a2)
+#define	bcmdumplog(buf, size)	*buf = '\0'
+#define	bcmdumplogent(buf, idx)	-1
+
+#define TSF_TICKS_PER_MS	1000
+#define TS_ENTER		0xdeadbeef	/* Timestamp profiling enter */
+#define TS_EXIT			0xbeefcafe	/* Timestamp profiling exit */
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+#define bcmdumptslog(b)
+
+extern char *bcm_nvram_vars(uint *length);
+extern int bcm_nvram_cache(void *sih);
+
+/* Support for sharing code across in-driver iovar implementations.
+ * The intent is that a driver use this structure to map iovar names
+ * to its (private) iovar identifiers, and the lookup function to
+ * find the entry.  Macros are provided to map ids and get/set actions
+ * into a single number space for a switch statement.
+ */
+
+/* iovar structure */
+typedef struct bcm_iovar {
+	const char *name;	/* name for lookup and display */
+	uint16 varid;		/* id for switch */
+	uint16 flags;		/* driver-specific flag bits */
+	uint8 flags2;		 /* driver-specific flag bits */
+	uint8 type;		/* base type of argument */
+	uint16 minlen;		/* min length for buffer vars */
+} bcm_iovar_t;
+
+/* varid definitions are per-driver, may use these get/set bits */
+
+/* IOVar action bits for id mapping */
+#define IOV_GET 0 /* Get an iovar */
+#define IOV_SET 1 /* Set an iovar */
+
+/* Varid to actionid mapping */
+#define IOV_GVAL(id)		((id) * 2)
+#define IOV_SVAL(id)		((id) * 2 + IOV_SET)
+#define IOV_ISSET(actionid)	((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid)	(actionid >> 1)
+
+/* flags are per-driver based on driver attributes */
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+
+/* ioctl structure */
+typedef struct wlc_ioctl_cmd {
+	uint16 cmd;			/**< IOCTL command */
+	uint16 flags;			/**< IOCTL command flags */
+	int16 min_len;			/**< IOCTL command minimum argument len (in bytes) */
+} wlc_ioctl_cmd_t;
+
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif 
+#endif	/* BCMDRIVER */
+
+/* string */
+extern int bcm_atoi(const char *s);
+extern ulong bcm_strtoul(const char *cp, char **endp, uint base);
+extern char *bcmstrstr(const char *haystack, const char *needle);
+extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+/* Base type definitions */
+#define IOVT_VOID	0	/* no value (implictly set only) */
+#define IOVT_BOOL	1	/* any value ok (zero/nonzero) */
+#define IOVT_INT8	2	/* integer values are range-checked */
+#define IOVT_UINT8	3	/* unsigned int 8 bits */
+#define IOVT_INT16	4	/* int 16 bits */
+#define IOVT_UINT16	5	/* unsigned int 16 bits */
+#define IOVT_INT32	6	/* int 32 bits */
+#define IOVT_UINT32	7	/* unsigned int 32 bits */
+#define IOVT_BUFFER	8	/* buffer is size-checked as per minlen */
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+/* Initializer for IOV type strings */
+#define BCM_IOV_TYPE_INIT { \
+	"void", \
+	"bool", \
+	"int8", \
+	"uint8", \
+	"int16", \
+	"uint16", \
+	"int32", \
+	"uint32", \
+	"buffer", \
+	"" }
+
+#define BCM_IOVT_IS_INT(type) (\
+	(type == IOVT_BOOL) || \
+	(type == IOVT_INT8) || \
+	(type == IOVT_UINT8) || \
+	(type == IOVT_INT16) || \
+	(type == IOVT_UINT16) || \
+	(type == IOVT_INT32) || \
+	(type == IOVT_UINT32))
+
+/* ** driver/apps-shared section ** */
+
+#define BCME_STRLEN 		64	/* Max string length for BCM errors */
+#define VALID_BCMERROR(e)  ((e <= 0) && (e >= BCME_LAST))
+
+
+/*
+ * error codes could be added but the defined ones shouldn't be changed/deleted
+ * these error codes are exposed to the user code
+ * when ever a new error code is added to this list
+ * please update errorstring table with the related error string and
+ * update osl files with os specific errorcode map
+*/
+
+#define BCME_OK				0	/* Success */
+#define BCME_ERROR			-1	/* Error generic */
+#define BCME_BADARG			-2	/* Bad Argument */
+#define BCME_BADOPTION			-3	/* Bad option */
+#define BCME_NOTUP			-4	/* Not up */
+#define BCME_NOTDOWN			-5	/* Not down */
+#define BCME_NOTAP			-6	/* Not AP */
+#define BCME_NOTSTA			-7	/* Not STA  */
+#define BCME_BADKEYIDX			-8	/* BAD Key Index */
+#define BCME_RADIOOFF 			-9	/* Radio Off */
+#define BCME_NOTBANDLOCKED		-10	/* Not  band locked */
+#define BCME_NOCLK			-11	/* No Clock */
+#define BCME_BADRATESET			-12	/* BAD Rate valueset */
+#define BCME_BADBAND			-13	/* BAD Band */
+#define BCME_BUFTOOSHORT		-14	/* Buffer too short */
+#define BCME_BUFTOOLONG			-15	/* Buffer too long */
+#define BCME_BUSY			-16	/* Busy */
+#define BCME_NOTASSOCIATED		-17	/* Not Associated */
+#define BCME_BADSSIDLEN			-18	/* Bad SSID len */
+#define BCME_OUTOFRANGECHAN		-19	/* Out of Range Channel */
+#define BCME_BADCHAN			-20	/* Bad Channel */
+#define BCME_BADADDR			-21	/* Bad Address */
+#define BCME_NORESOURCE			-22	/* Not Enough Resources */
+#define BCME_UNSUPPORTED		-23	/* Unsupported */
+#define BCME_BADLEN			-24	/* Bad length */
+#define BCME_NOTREADY			-25	/* Not Ready */
+#define BCME_EPERM			-26	/* Not Permitted */
+#define BCME_NOMEM			-27	/* No Memory */
+#define BCME_ASSOCIATED			-28	/* Associated */
+#define BCME_RANGE			-29	/* Not In Range */
+#define BCME_NOTFOUND			-30	/* Not Found */
+#define BCME_WME_NOT_ENABLED		-31	/* WME Not Enabled */
+#define BCME_TSPEC_NOTFOUND		-32	/* TSPEC Not Found */
+#define BCME_ACM_NOTSUPPORTED		-33	/* ACM Not Supported */
+#define BCME_NOT_WME_ASSOCIATION	-34	/* Not WME Association */
+#define BCME_SDIO_ERROR			-35	/* SDIO Bus Error */
+#define BCME_DONGLE_DOWN		-36	/* Dongle Not Accessible */
+#define BCME_VERSION			-37 	/* Incorrect version */
+#define BCME_TXFAIL			-38 	/* TX failure */
+#define BCME_RXFAIL			-39	/* RX failure */
+#define BCME_NODEVICE			-40 	/* Device not present */
+#define BCME_NMODE_DISABLED		-41 	/* NMODE disabled */
+#define BCME_NONRESIDENT		-42 /* access to nonresident overlay */
+#define BCME_SCANREJECT			-43 	/* reject scan request */
+#define BCME_USAGE_ERROR                -44     /* WLCMD usage error */
+#define BCME_IOCTL_ERROR                -45     /* WLCMD ioctl error */
+#define BCME_SERIAL_PORT_ERR            -46     /* RWL serial port error */
+#define BCME_DISABLED			-47     /* Disabled in this build */
+#define BCME_DECERR				-48		/* Decrypt error */
+#define BCME_ENCERR				-49		/* Encrypt error */
+#define BCME_MICERR				-50		/* Integrity/MIC error */
+#define BCME_REPLAY				-51		/* Replay */
+#define BCME_IE_NOTFOUND		-52		/* IE not found */
+#define BCME_DATA_NOTFOUND		-53		/* Complete data not found in buffer */
+#define BCME_NOT_GC			-54     /* expecting a group client */
+#define BCME_PRS_REQ_FAILED		-55     /* GC presence req failed to sent */
+#define BCME_NO_P2P_SE			-56      /* Could not find P2P-Subelement */
+#define BCME_NOA_PND			-57      /* NoA pending, CB shuld be NULL */
+#define BCME_FRAG_Q_FAILED		-58      /* queueing 80211 frag failedi */
+#define BCME_GET_AF_FAILED		-59      /* Get p2p AF pkt failed */
+#define BCME_MSCH_NOTREADY		-60		/* scheduler not ready */
+#define BCME_LAST   BCME_MSCH_NOTREADY
+
+#define BCME_NOTENABLED BCME_DISABLED
+
+/* This error code is *internal* to the driver, and is not propogated to users. It should
+ * only be used by IOCTL patch handlers as an indication that it did not handle the IOCTL.
+ * (Since the error code is internal, an entry in 'BCMERRSTRINGTABLE' is not required,
+ * nor does it need to be part of any OSL driver-to-OS error code mapping).
+ */
+#define BCME_IOCTL_PATCH_UNSUPPORTED	-9999
+#if (BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED)
+	#error "BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED"
+#endif
+
+/* These are collection of BCME Error strings */
+#define BCMERRSTRINGTABLE {		\
+	"OK",				\
+	"Undefined error",		\
+	"Bad Argument",			\
+	"Bad Option",			\
+	"Not up",			\
+	"Not down",			\
+	"Not AP",			\
+	"Not STA",			\
+	"Bad Key Index",		\
+	"Radio Off",			\
+	"Not band locked",		\
+	"No clock",			\
+	"Bad Rate valueset",		\
+	"Bad Band",			\
+	"Buffer too short",		\
+	"Buffer too long",		\
+	"Busy",				\
+	"Not Associated",		\
+	"Bad SSID len",			\
+	"Out of Range Channel",		\
+	"Bad Channel",			\
+	"Bad Address",			\
+	"Not Enough Resources",		\
+	"Unsupported",			\
+	"Bad length",			\
+	"Not Ready",			\
+	"Not Permitted",		\
+	"No Memory",			\
+	"Associated",			\
+	"Not In Range",			\
+	"Not Found",			\
+	"WME Not Enabled",		\
+	"TSPEC Not Found",		\
+	"ACM Not Supported",		\
+	"Not WME Association",		\
+	"SDIO Bus Error",		\
+	"Dongle Not Accessible",	\
+	"Incorrect version",		\
+	"TX Failure",			\
+	"RX Failure",			\
+	"Device Not Present",		\
+	"NMODE Disabled",		\
+	"Nonresident overlay access", \
+	"Scan Rejected",		\
+	"WLCMD usage error",		\
+	"WLCMD ioctl error",		\
+	"RWL serial port error", 	\
+	"Disabled",			\
+	"Decrypt error", \
+	"Encrypt error", \
+	"MIC error", \
+	"Replay", \
+	"IE not found", \
+	"Data not found", \
+	"NOT GC", \
+	"PRS REQ FAILED", \
+	"NO P2P SubElement", \
+	"NOA Pending", \
+	"FRAG Q FAILED", \
+	"GET ActionFrame failed", \
+	"scheduler not ready", \
+}
+
+#ifndef ABS
+#define	ABS(a)			(((a) < 0) ? -(a) : (a))
+#endif /* ABS */
+
+#ifndef MIN
+#define	MIN(a, b)		(((a) < (b)) ? (a) : (b))
+#endif /* MIN */
+
+#ifndef MAX
+#define	MAX(a, b)		(((a) > (b)) ? (a) : (b))
+#endif /* MAX */
+
+/* limit to [min, max] */
+#ifndef LIMIT_TO_RANGE
+#define LIMIT_TO_RANGE(x, min, max) \
+	((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_RANGE */
+
+/* limit to  max */
+#ifndef LIMIT_TO_MAX
+#define LIMIT_TO_MAX(x, max) \
+	(((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_MAX */
+
+/* limit to min */
+#ifndef LIMIT_TO_MIN
+#define LIMIT_TO_MIN(x, min) \
+	(((x) < (min) ? (min) : (x)))
+#endif /* LIMIT_TO_MIN */
+
+#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \
+	(0xffffffff - (prev) + (curr) + 1))
+#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))
+#define ROUNDUP(x, y)		((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDDN(p, align)	((p) & ~((align) - 1))
+#define	ISALIGNED(a, x)		(((uintptr)(a) & ((x) - 1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define	ISPOWEROF2(x)		((((x) - 1) & (x)) == 0)
+#define VALID_MASK(mask)	!((mask) & ((mask) + 1))
+
+#ifndef OFFSETOF
+#ifdef __ARMCC_VERSION
+/*
+ * The ARM RVCT compiler complains when using OFFSETOF where a constant
+ * expression is expected, such as an initializer for a static object.
+ * offsetof from the runtime library doesn't have that problem.
+ */
+#include <stddef.h>
+#define	OFFSETOF(type, member)	offsetof(type, member)
+#else
+#  if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8))
+/* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */
+#    define	OFFSETOF(type, member)	__builtin_offsetof(type, member)
+#  else
+#    define	OFFSETOF(type, member)	((uint)(uintptr)&((type *)0)->member)
+#  endif /* GCC 4.8 or newer */
+#endif /* __ARMCC_VERSION */
+#endif /* OFFSETOF */
+
+#ifndef CONTAINEROF
+#define CONTAINEROF(ptr, type, member) ((type *)((char *)(ptr) - OFFSETOF(type, member)))
+#endif /* CONTAINEROF */
+
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a)		(sizeof(a) / sizeof(a[0]))
+#endif
+
+#ifndef ARRAYLAST /* returns pointer to last array element */
+#define ARRAYLAST(a)		(&a[ARRAYSIZE(a)-1])
+#endif
+
+/* Reference a function; used to prevent a static function from being optimized out */
+extern void *_bcmutils_dummy_fn;
+#define REFERENCE_FUNCTION(f)	(_bcmutils_dummy_fn = (void *)(f))
+
+/* bit map related macros */
+#ifndef setbit
+#ifndef NBBY		/* the BSD family defines NBBY */
+#define	NBBY	8	/* 8 bits per byte */
+#endif /* #ifndef NBBY */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+extern void setbit(void *array, uint bit);
+extern void clrbit(void *array, uint bit);
+extern bool isset(const void *array, uint bit);
+extern bool isclr(const void *array, uint bit);
+#else
+#define	setbit(a, i)	(((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY))
+#define	clrbit(a, i)	(((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY)))
+#define	isset(a, i)	(((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY)))
+#define	isclr(a, i)	((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0)
+#endif
+#endif /* setbit */
+extern void set_bitrange(void *array, uint start, uint end, uint maxbit);
+
+#define	isbitset(a, i)	(((a) & (1 << (i))) != 0)
+
+#define	NBITS(type)	(sizeof(type) * 8)
+#define NBITVAL(nbits)	(1 << (nbits))
+#define MAXBITVAL(nbits)	((1 << (nbits)) - 1)
+#define	NBITMASK(nbits)	MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte)	MAXBITVAL((nbyte) * 8)
+
+extern void bcm_bitprint32(const uint32 u32);
+
+/*
+ * ----------------------------------------------------------------------------
+ * Multiword map of 2bits, nibbles
+ * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val)
+ * getbit2 getbit4 (void *ptr, uint32 ix)
+ * ----------------------------------------------------------------------------
+ */
+
+#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK)                     \
+static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val)     \
+{                                                                   \
+	uint32 *addr = (uint32 *)ptr;                                   \
+	uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */              \
+	uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */      \
+	uint32 mask = (MSK << pos);                                     \
+	uint32 tmp = *a & ~mask;                                        \
+	*a = tmp | (val << pos);                                        \
+}                                                                   \
+static INLINE uint32 getbit##NB(void *ptr, uint32 ix)               \
+{                                                                   \
+	uint32 *addr = (uint32 *)ptr;                                   \
+	uint32 *a = addr + (ix >> RSH);                                 \
+	uint32 pos = (ix & OFF) << LSH;                                 \
+	return ((*a >> pos) & MSK);                                     \
+}
+
+DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */
+DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */
+DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */
+
+/* basic mux operation - can be optimized on several architectures */
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+/* modulo inc/dec - assumes x E [0, bound - 1] */
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+/* modulo inc/dec, bound = 2^k */
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+/* modulo add/sub - assumes x, y E [0, bound - 1] */
+#define MODADD(x, y, bound) \
+    MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+    MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+/* module add/sub, bound = 2^k */
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+/* crc defines */
+#define CRC8_INIT_VALUE  0xff		/* Initial CRC8 checksum value */
+#define CRC8_GOOD_VALUE  0x9f		/* Good final CRC8 checksum value */
+#define CRC16_INIT_VALUE 0xffff		/* Initial CRC16 checksum value */
+#define CRC16_GOOD_VALUE 0xf0b8		/* Good final CRC16 checksum value */
+#define CRC32_INIT_VALUE 0xffffffff	/* Initial CRC32 checksum value */
+#define CRC32_GOOD_VALUE 0xdebb20e3	/* Good final CRC32 checksum value */
+
+/* use for direct output of MAC address in printf etc */
+#define MACF				"%02x:%02x:%02x:%02x:%02x:%02x"
+#define ETHERP_TO_MACF(ea)	((struct ether_addr *) (ea))->octet[0], \
+							((struct ether_addr *) (ea))->octet[1], \
+							((struct ether_addr *) (ea))->octet[2], \
+							((struct ether_addr *) (ea))->octet[3], \
+							((struct ether_addr *) (ea))->octet[4], \
+							((struct ether_addr *) (ea))->octet[5]
+
+#define CONST_ETHERP_TO_MACF(ea) ((const struct ether_addr *) (ea))->octet[0], \
+						 ((const struct ether_addr *) (ea))->octet[1], \
+						 ((const struct ether_addr *) (ea))->octet[2], \
+						 ((const struct ether_addr *) (ea))->octet[3], \
+						 ((const struct ether_addr *) (ea))->octet[4], \
+						 ((const struct ether_addr *) (ea))->octet[5]
+#define ETHER_TO_MACF(ea) (ea).octet[0], \
+							(ea).octet[1], \
+							(ea).octet[2], \
+							(ea).octet[3], \
+							(ea).octet[4], \
+							(ea).octet[5]
+#if !defined(SIMPLE_MAC_PRINT)
+#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC2STRDBG(ea) (ea)[0], (ea)[1], (ea)[2], (ea)[3], (ea)[4], (ea)[5]
+#else
+#define MACDBG				"%02x:%02x:%02x"
+#define MAC2STRDBG(ea) (ea)[0], (ea)[4], (ea)[5]
+#endif /* SIMPLE_MAC_PRINT */
+
+/* bcm_format_flags() bit description structure */
+typedef struct bcm_bit_desc {
+	uint32	bit;
+	const char* name;
+} bcm_bit_desc_t;
+
+/* bcm_format_field */
+typedef struct bcm_bit_desc_ex {
+	uint32 mask;
+	const bcm_bit_desc_t *bitfield;
+} bcm_bit_desc_ex_t;
+
+/* buffer length for ethernet address from bcm_ether_ntoa() */
+#define ETHER_ADDR_STR_LEN	18	/* 18-bytes of Ethernet address buffer length */
+
+static INLINE uint32 /* 32bit word aligned xor-32 */
+bcm_compute_xor32(volatile uint32 *u32_val, int num_u32)
+{
+	int idx;
+	uint32 xor32 = 0;
+	for (idx = 0; idx < num_u32; idx++)
+		xor32 ^= *(u32_val + idx);
+	return xor32;
+}
+
+/* crypto utility function */
+/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+	if (
+#ifdef __i386__
+	    1 ||
+#endif
+	    (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+		/* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */
+		/* x86 supports unaligned.  This version runs 6x-9x faster on x86. */
+		((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
+		((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
+		((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
+		((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
+	} else {
+		/* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */
+		int k;
+		for (k = 0; k < 16; k++)
+			dst[k] = src1[k] ^ src2[k];
+	}
+}
+
+/* externs */
+/* crc */
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+/* format/print */
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+	defined(WLMSG_ASSOC)
+/* print out the value a field has: fields may have 1-32 bits and may hold any value */
+extern int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, int len);
+/* print out which bits in flags are set */
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+#endif
+
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+
+extern const char *bcm_crypto_algo_name(uint algo);
+extern char *bcm_chipname(uint chipid, char *buf, uint len);
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+extern void prhex(const char *msg, volatile uchar *buf, uint len);
+
+/* IE parsing */
+
+/* packing is required if struct is passed across the bus */
+#include <packed_section_start.h>
+/* tag_ID/length/value_buffer tuple */
+typedef struct bcm_tlv {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} bcm_tlv_t;
+
+#define BCM_TLV_SIZE(_tlv) ((_tlv) ? (OFFSETOF(bcm_tlv_t, data) + (_tlv)->len) : 0)
+
+#define BCM_XTLV_TAG_LEN_SIZE		4
+
+/* bcm tlv w/ 16 bit id/len */
+typedef BWL_PRE_PACKED_STRUCT struct bcm_xtlv {
+	uint16	id;
+	uint16	len;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT bcm_xtlv_t;
+#include <packed_section_end.h>
+
+
+/* descriptor of xtlv data src or dst  */
+typedef struct {
+	uint16	type;
+	uint16	len;
+	void	*ptr; /* ptr to memory location */
+} xtlv_desc_t;
+
+/* xtlv options */
+#define BCM_XTLV_OPTION_NONE	0x0000
+#define BCM_XTLV_OPTION_ALIGN32	0x0001
+
+typedef uint16 bcm_xtlv_opts_t;
+struct bcm_xtlvbuf {
+	bcm_xtlv_opts_t opts;
+	uint16 size;
+	uint8 *head; /* point to head of buffer */
+	uint8 *buf; /* current position of buffer */
+	/* allocated buffer may follow, but not necessarily */
+};
+typedef struct bcm_xtlvbuf bcm_xtlvbuf_t;
+
+#define BCM_TLV_MAX_DATA_SIZE (255)
+#define BCM_XTLV_MAX_DATA_SIZE (65535)
+#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data))
+
+#define BCM_XTLV_HDR_SIZE (OFFSETOF(bcm_xtlv_t, data))
+/* LEN only stores the value's length without padding */
+#define BCM_XTLV_LEN(elt) ltoh16_ua(&(elt->len))
+#define BCM_XTLV_ID(elt) ltoh16_ua(&(elt->id))
+/* entire size of the XTLV including header, data, and optional padding */
+#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts)
+#define bcm_valid_xtlv(elt, buflen, opts) (elt && ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt, opts)))
+
+/* Check that bcm_tlv_t fits into the given buflen */
+#define bcm_valid_tlv(elt, buflen) (\
+	 ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \
+	 ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len)))
+
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen);
+extern bcm_tlv_t *bcm_parse_tlvs_dot11(void *buf, int buflen, uint key, bool id_ext);
+
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+extern bcm_tlv_t *bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type,
+	int type_len);
+
+extern uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst);
+extern uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst,
+	int dst_maxlen);
+
+extern uint8 *bcm_copy_tlv(const void *src, uint8 *dst);
+extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen);
+
+/* xtlv */
+
+/* return the next xtlv element, and update buffer len (remaining). Buffer length
+ * updated includes padding as specified by options
+ */
+extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts);
+
+/* initialize an xtlv buffer. Use options specified for packing/unpacking using
+ * the buffer. Caller is responsible for allocating both buffers.
+ */
+extern int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len,
+	bcm_xtlv_opts_t opts);
+
+extern uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf);
+extern uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf);
+extern uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf);
+extern uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf);
+extern int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen);
+extern int bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data);
+extern int bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data);
+extern int bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data);
+extern int bcm_unpack_xtlv_entry(uint8 **buf, uint16 xpct_type, uint16 xpct_len,
+	void *dst, bcm_xtlv_opts_t opts);
+extern int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len,
+	void *src, bcm_xtlv_opts_t opts);
+extern int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts);
+
+/* callback for unpacking xtlv from a buffer into context. */
+typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, uint8 *buf, uint16 type, uint16 len);
+
+/* unpack a tlv buffer using buffer, options, and callback */
+extern int bcm_unpack_xtlv_buf(void *ctx, uint8 *buf, uint16 buflen,
+	bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn);
+
+/* unpack a set of tlvs from the buffer using provided xtlv desc */
+extern int bcm_unpack_xtlv_buf_to_mem(void *buf, int *buflen, xtlv_desc_t *items,
+	bcm_xtlv_opts_t opts);
+
+/* pack a set of tlvs into buffer using provided xtlv desc */
+extern int bcm_pack_xtlv_buf_from_mem(void **buf, uint16 *buflen, xtlv_desc_t *items,
+	bcm_xtlv_opts_t opts);
+
+/* return data pointer of a given ID from xtlv buffer
+ * xtlv data length is given to *datalen_out, if the pointer is valid
+ */
+extern void *bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id,
+	uint16 *datalen_out, bcm_xtlv_opts_t opts);
+
+/* callback to return next tlv id and len to pack, if there is more tlvs to come and
+ * options e.g. alignment
+ */
+typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len);
+
+/* callback to pack the tlv into length validated buffer */
+typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx,
+	uint16 tlv_id, uint16 tlv_len, uint8* buf);
+
+/* pack a set of tlvs into buffer using get_next to interate */
+int bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen,
+	bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next,
+	bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen);
+
+/* bcmerror */
+extern const char *bcmerrorstr(int bcmerror);
+
+extern int wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie);
+
+/* multi-bool data type: set of bools, mbool is true if any is set */
+typedef uint32 mbool;
+#define mboolset(mb, bit)		((mb) |= (bit))		/* set one bool */
+#define mboolclr(mb, bit)		((mb) &= ~(bit))	/* clear one bool */
+#define mboolisset(mb, bit)		(((mb) & (bit)) != 0)	/* TRUE if one bool is set */
+#define	mboolmaskset(mb, mask, val)	((mb) = (((mb) & ~(mask)) | (val)))
+
+/* generic datastruct to help dump routines */
+struct fielddesc {
+	const char *nameandfmt;
+	uint32 offset;
+	uint32 len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline,
+	const uint8 *buf, int len);
+
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(const char *name, const uchar *cdata, int len);
+
+typedef  uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+                          char *buf, uint32 bufsize);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+
+/* power conversion */
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+extern uint bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint len);
+
+unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+
+/* trace any object allocation / free, with / without features (flags) set to the object */
+
+#define BCM_OBJDBG_ADD           1
+#define BCM_OBJDBG_REMOVE        2
+#define BCM_OBJDBG_ADD_PKT       3
+
+/* object feature: set or clear flags */
+#define BCM_OBJECT_FEATURE_FLAG       1
+#define BCM_OBJECT_FEATURE_PKT_STATE  2
+/* object feature: flag bits */
+#define BCM_OBJECT_FEATURE_0     (1 << 0)
+#define BCM_OBJECT_FEATURE_1     (1 << 1)
+#define BCM_OBJECT_FEATURE_2     (1 << 2)
+/* object feature: clear flag bits field set with this flag */
+#define BCM_OBJECT_FEATURE_CLEAR (1 << 31)
+#ifdef BCM_OBJECT_TRACE
+#define bcm_pkt_validate_chk(obj)	do { \
+	void * pkttag; \
+	bcm_object_trace_chk(obj, 0, 0, \
+		__FUNCTION__, __LINE__); \
+	if ((pkttag = PKTTAG(obj))) { \
+		bcm_object_trace_chk(obj, 1, DHD_PKTTAG_SN(pkttag), \
+			__FUNCTION__, __LINE__); \
+	} \
+} while (0)
+extern void bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line);
+extern void bcm_object_trace_upd(void *obj, void *obj_new);
+extern void bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn,
+	const char *caller, int line);
+extern void bcm_object_feature_set(void *obj, uint32 type, uint32 value);
+extern int  bcm_object_feature_get(void *obj, uint32 type, uint32 value);
+extern void bcm_object_trace_init(void);
+extern void bcm_object_trace_deinit(void);
+#else
+#define bcm_pkt_validate_chk(obj)
+#define bcm_object_trace_opr(a, b, c, d)
+#define bcm_object_trace_upd(a, b)
+#define bcm_object_trace_chk(a, b, c, d, e)
+#define bcm_object_feature_set(a, b, c)
+#define bcm_object_feature_get(a, b, c)
+#define bcm_object_trace_init()
+#define bcm_object_trace_deinit()
+#endif /* BCM_OBJECT_TRACE */
+
+/* calculate a * b + c */
+extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c);
+/* calculate a / b */
+extern void bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+
+/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */
+
+/* Table driven count set bits. */
+static const uint8 /* Table only for use by bcm_cntsetbits */
+_CSBTBL[256] =
+{
+#	define B2(n)    n,     n + 1,     n + 1,     n + 2
+#	define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2)
+#	define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2)
+	B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2)
+};
+
+static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */
+bcm_cntsetbits(const uint32 u32arg)
+{
+	/* function local scope declaration of const _CSBTBL[] */
+	const uint8 * p = (const uint8 *)&u32arg;
+	return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]);
+}
+
+
+static INLINE int /* C equivalent count of leading 0's in a u32 */
+C_bcm_count_leading_zeros(uint32 u32arg)
+{
+	int shifts = 0;
+	while (u32arg) {
+		shifts++; u32arg >>= 1;
+	}
+	return (32U - shifts);
+}
+
+#ifdef BCM_ASLR_HEAP
+
+#define BCM_NVRAM_OFFSET_TCM	4
+#define BCM_NVRAM_IMG_COMPRS_FACTOR	4
+#define BCM_RNG_SIGNATURE	0xFEEDC0DE
+
+typedef struct bcm_rand_metadata {
+	uint32 signature;	/* host fills it in, FW verfies before reading rand */
+	uint32 count;	/* number of 4byte wide random numbers */
+} bcm_rand_metadata_t;
+#endif /* BCM_ASLR_HEAP */
+
+#ifdef BCMDRIVER
+/*
+ * Assembly instructions: Count Leading Zeros
+ * "clz"	: MIPS, ARM
+ * "cntlzw"	: PowerPC
+ * "BSF"	: x86
+ * "lzcnt"	: AMD, SPARC
+ */
+
+#if defined(__arm__)
+#if defined(__ARM_ARCH_7M__) /* Cortex M3 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7M__ */
+#if defined(__ARM_ARCH_7R__) /* Cortex R4 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7R__ */
+#endif /* __arm__ */
+
+static INLINE int
+bcm_count_leading_zeros(uint32 u32arg)
+{
+#if defined(__USE_ASM_CLZ__)
+	int zeros;
+	__asm__ volatile("clz    %0, %1 \n" : "=r" (zeros) : "r"  (u32arg));
+	return zeros;
+#else	/* C equivalent */
+	return C_bcm_count_leading_zeros(u32arg);
+#endif  /* C equivalent */
+}
+
+/*
+ * Macro to count leading zeroes
+ *
+ */
+#if defined(__GNUC__)
+#define CLZ(x) __builtin_clzl(x)
+#elif defined(__arm__)
+#define CLZ(x) __clz(x)
+#else
+#define CLZ(x) bcm_count_leading_zeros(x)
+#endif /* __GNUC__ */
+
+/* INTERFACE: Multiword bitmap based small id allocator. */
+struct bcm_mwbmap;	/* forward declaration for use as an opaque mwbmap handle */
+
+#define BCM_MWBMAP_INVALID_HDL	((struct bcm_mwbmap *)NULL)
+#define BCM_MWBMAP_INVALID_IDX	((uint32)(~0U))
+
+/* Incarnate a multiword bitmap based small index allocator */
+extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max);
+
+/* Free up the multiword bitmap index allocator */
+extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl);
+
+/* Allocate a unique small index using a multiword bitmap index allocator */
+extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Force an index at a specified position to be in use */
+extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Determine whether an index is inuse or free */
+extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Debug dump a multiword bitmap allocator */
+extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl);
+
+extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl);
+/* End - Multiword bitmap based small Id allocator. */
+
+
+/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */
+
+#define ID8_INVALID     0xFFu
+#define ID16_INVALID    0xFFFFu
+#define ID32_INVALID    0xFFFFFFFFu
+#define ID16_UNDEFINED              ID16_INVALID
+
+/*
+ * Construct a 16bit id allocator, managing 16bit ids in the range:
+ *    [start_val16 .. start_val16+total_ids)
+ * Note: start_val16 is inclusive.
+ * Returns an opaque handle to the 16bit id allocator.
+ */
+extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16);
+extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl);
+extern void id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16);
+
+/* Allocate a unique 16bit id */
+extern uint16 id16_map_alloc(void * id16_map_hndl);
+
+/* Free a 16bit id value into the id16 allocator */
+extern void id16_map_free(void * id16_map_hndl, uint16 val16);
+
+/* Get the number of failures encountered during id allocation. */
+extern uint32 id16_map_failures(void * id16_map_hndl);
+
+/* Audit the 16bit id allocator state. */
+extern bool id16_map_audit(void * id16_map_hndl);
+/* End - Simple 16bit Id Allocator. */
+#endif /* BCMDRIVER */
+
+extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+
+uint64 fp_mult_64(uint64 val1, uint64 val2, uint8 nf1, uint8 nf2, uint8 nf_res);
+uint8 fp_div_64(uint64 num, uint32 den, uint8 nf_num, uint8 nf_den, uint32 *div_out);
+uint8 fp_calc_head_room_64(uint64 num);
+uint8 fp_calc_head_room_32(uint32 num);
+uint32 fp_round_64(uint64 num, uint8 rnd_pos);
+uint32 fp_round_32(uint32 num, uint8 rnd_pos);
+uint32 fp_floor_64(uint64 num, uint8 floor_pos);
+uint32 fp_floor_32(uint32 num, uint8 floor_pos);
+uint32 fp_ceil_64(uint64 num, uint8 ceil_pos);
+uint64 bcm_shl_64(uint64 input, uint8 shift_amt);
+uint64 bcm_shr_64(uint64 input, uint8 shift_amt);
+
+#define MASK_32_BITS	(~0)
+#define MASK_8_BITS	((1 << 8) - 1)
+
+#define EXTRACT_LOW32(num)	(uint32)(num & MASK_32BITS)
+#define EXTRACT_HIGH32(num)	(uint32)(((uint64)num >> 32) & MASK_32BITS)
+
+#define MAXIMUM(a, b) ((a > b) ? a : b)
+#define MINIMUM(a, b) ((a < b) ? a : b)
+#define LIMIT(x, min, max) ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum);
+
+#ifndef _dll_t_
+#define _dll_t_
+/*
+ * -----------------------------------------------------------------------------
+ *                      Double Linked List Macros
+ * -----------------------------------------------------------------------------
+ *
+ * All dll operations must be performed on a pre-initialized node.
+ * Inserting an uninitialized node into a list effectively initialized it.
+ *
+ * When a node is deleted from a list, you may initialize it to avoid corruption
+ * incurred by double deletion. You may skip initialization if the node is
+ * immediately inserted into another list.
+ *
+ * By placing a dll_t element at the start of a struct, you may cast a dll_t *
+ * to the struct or vice versa.
+ *
+ * Example of declaring an initializing someList and inserting nodeA, nodeB
+ *
+ *     typedef struct item {
+ *         dll_t node;
+ *         int someData;
+ *     } Item_t;
+ *     Item_t nodeA, nodeB, nodeC;
+ *     nodeA.someData = 11111, nodeB.someData = 22222, nodeC.someData = 33333;
+ *
+ *     dll_t someList;
+ *     dll_init(&someList);
+ *
+ *     dll_append(&someList, (dll_t *) &nodeA);
+ *     dll_prepend(&someList, &nodeB.node);
+ *     dll_insert((dll_t *)&nodeC, &nodeA.node);
+ *
+ *     dll_delete((dll_t *) &nodeB);
+ *
+ * Example of a for loop to walk someList of node_p
+ *
+ *   extern void mydisplay(Item_t * item_p);
+ *
+ *   dll_t * item_p, * next_p;
+ *   for (item_p = dll_head_p(&someList); ! dll_end(&someList, item_p);
+ *        item_p = next_p)
+ *   {
+ *       next_p = dll_next_p(item_p);
+ *       ... use item_p at will, including removing it from list ...
+ *       mydisplay((PItem_t)item_p);
+ *   }
+ *
+ * -----------------------------------------------------------------------------
+ */
+typedef struct dll {
+	struct dll * next_p;
+	struct dll * prev_p;
+} dll_t;
+
+static INLINE void
+dll_init(dll_t *node_p)
+{
+	node_p->next_p = node_p;
+	node_p->prev_p = node_p;
+}
+/* dll macros returing a pointer to dll_t */
+
+static INLINE dll_t *
+dll_head_p(dll_t *list_p)
+{
+	return list_p->next_p;
+}
+
+
+static INLINE dll_t *
+dll_tail_p(dll_t *list_p)
+{
+	return (list_p)->prev_p;
+}
+
+
+static INLINE dll_t *
+dll_next_p(dll_t *node_p)
+{
+	return (node_p)->next_p;
+}
+
+
+static INLINE dll_t *
+dll_prev_p(dll_t *node_p)
+{
+	return (node_p)->prev_p;
+}
+
+
+static INLINE bool
+dll_empty(dll_t *list_p)
+{
+	return ((list_p)->next_p == (list_p));
+}
+
+
+static INLINE bool
+dll_end(dll_t *list_p, dll_t * node_p)
+{
+	return (list_p == node_p);
+}
+
+
+/* inserts the node new_p "after" the node at_p */
+static INLINE void
+dll_insert(dll_t *new_p, dll_t * at_p)
+{
+	new_p->next_p = at_p->next_p;
+	new_p->prev_p = at_p;
+	at_p->next_p = new_p;
+	(new_p->next_p)->prev_p = new_p;
+}
+
+static INLINE void
+dll_append(dll_t *list_p, dll_t *node_p)
+{
+	dll_insert(node_p, dll_tail_p(list_p));
+}
+
+static INLINE void
+dll_prepend(dll_t *list_p, dll_t *node_p)
+{
+	dll_insert(node_p, list_p);
+}
+
+
+/* deletes a node from any list that it "may" be in, if at all. */
+static INLINE void
+dll_delete(dll_t *node_p)
+{
+	node_p->prev_p->next_p = node_p->next_p;
+	node_p->next_p->prev_p = node_p->prev_p;
+}
+#endif  /* ! defined(_dll_t_) */
+
+/* Elements managed in a double linked list */
+
+typedef struct dll_pool {
+	dll_t       free_list;
+	uint16      free_count;
+	uint16      elems_max;
+	uint16      elem_size;
+	dll_t       elements[1];
+} dll_pool_t;
+
+dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size);
+void * dll_pool_alloc(dll_pool_t * dll_pool_p);
+void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p);
+void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p);
+typedef void (* dll_elem_dump)(void * elem_p);
+void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size);
+
+/* calculate IPv4 header checksum
+ * - input ip points to IP header in network order
+ * - output cksum is in network order
+ */
+uint16 ipv4_hdr_cksum(uint8 *ip, int ip_len);
+
+/* calculate IPv4 TCP header checksum
+ * - input ip and tcp points to IP and TCP header in network order
+ * - output cksum is in network order
+ */
+uint16 ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len);
+
+/* calculate IPv6 TCP header checksum
+ * - input ipv6 and tcp points to IPv6 and TCP header in network order
+ * - output cksum is in network order
+ */
+uint16 ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len);
+
+#ifdef __cplusplus
+	}
+#endif
+
+/* #define DEBUG_COUNTER */
+#ifdef DEBUG_COUNTER
+#define CNTR_TBL_MAX 10
+typedef struct _counter_tbl_t {
+	char name[16];				/* name of this counter table */
+	uint32 prev_log_print;		/* Internal use. Timestamp of the previous log print */
+	uint log_print_interval;	/* Desired interval to print logs in ms */
+	uint needed_cnt;			/* How many counters need to be used */
+	uint32 cnt[CNTR_TBL_MAX];		/* Counting entries to increase at desired places */
+	bool enabled;				/* Whether to enable printing log */
+} counter_tbl_t;
+
+
+void counter_printlog(counter_tbl_t *ctr_tbl);
+#endif /* DEBUG_COUNTER */
+
+#if defined(__GNUC__)
+#define CALL_SITE __builtin_return_address(0)
+#else
+#define CALL_SITE ((void*) 0)
+#endif
+#ifdef SHOW_LOGTRACE
+#define TRACE_LOG_BUF_MAX_SIZE 1500
+#define BUF_NOT_AVAILABLE	0
+#define NEXT_BUF_NOT_AVAIL	1
+#define NEXT_BUF_AVAIL		2
+
+typedef struct trace_buf_info {
+	int availability;
+	int size;
+	char buf[TRACE_LOG_BUF_MAX_SIZE];
+} trace_buf_info_t;
+#endif /* SHOW_LOGTRACE */
+
+#endif	/* _bcmutils_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h
new file mode 100644
index 0000000..84bfdda
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h
@@ -0,0 +1,69 @@
+/*
+ * Definitions for nl80211 vendor command/event access to host driver
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: brcm_nl80211.h 601873 2015-11-24 11:04:28Z $
+ *
+ */
+
+#ifndef _brcm_nl80211_h_
+#define _brcm_nl80211_h_
+
+#define OUI_BRCM  0x001018
+#define OUI_GOOGLE  0x001A11
+
+enum wl_vendor_subcmd {
+	BRCM_VENDOR_SCMD_UNSPEC,
+	BRCM_VENDOR_SCMD_PRIV_STR,
+	BRCM_VENDOR_SCMD_BCM_STR
+};
+
+
+struct bcm_nlmsg_hdr {
+	uint cmd;	/* common ioctl definition */
+	int len;	/* expected return buffer length */
+	uint offset;	/* user buffer offset */
+	uint set;	/* get or set request optional */
+	uint magic;	/* magic number for verification */
+};
+
+enum bcmnl_attrs {
+	BCM_NLATTR_UNSPEC,
+
+	BCM_NLATTR_LEN,
+	BCM_NLATTR_DATA,
+
+	__BCM_NLATTR_AFTER_LAST,
+	BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1
+};
+
+struct nl_prv_data {
+	int err;			/* return result */
+	void *data;			/* ioctl return buffer pointer */
+	uint len;			/* ioctl return buffer length */
+	struct bcm_nlmsg_hdr *nlioc;	/* bcm_nlmsg_hdr header pointer */
+};
+
+#endif /* _brcm_nl80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/dbus.h b/drivers/net/wireless/bcmdhd/include/dbus.h
new file mode 100644
index 0000000..f4dec0d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dbus.h
@@ -0,0 +1,600 @@
+/*
+ * Dongle BUS interface Abstraction layer
+ *   target serial buses like USB, SDIO, SPI, etc.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dbus.h 596371 2015-10-30 22:43:47Z $
+ */
+
+#ifndef __DBUS_H__
+#define __DBUS_H__
+
+#include "typedefs.h"
+#include <dhd_linux.h>
+
+extern uint dbus_msglevel;
+#define DBUS_ERROR_VAL	0x0001
+#define DBUS_TRACE_VAL	0x0002
+#define DBUS_INFO_VAL	0x0004
+
+#if defined(DHD_DEBUG)
+#define DBUSERR(args)		do {if (dbus_msglevel & DBUS_ERROR_VAL) printf args;} while (0)
+#define DBUSTRACE(args)		do {if (dbus_msglevel & DBUS_TRACE_VAL) printf args;} while (0)
+#define DBUSINFO(args)		do {if (dbus_msglevel & DBUS_INFO_VAL) printf args;} while (0)
+#else /* defined(DHD_DEBUG) */
+#define DBUSERR(args)
+#define DBUSTRACE(args)
+#define DBUSINFO(args)
+#endif
+
+enum {
+	DBUS_OK = 0,
+	DBUS_ERR = -200,
+	DBUS_ERR_TIMEOUT,
+	DBUS_ERR_DISCONNECT,
+	DBUS_ERR_NODEVICE,
+	DBUS_ERR_UNSUPPORTED,
+	DBUS_ERR_PENDING,
+	DBUS_ERR_NOMEM,
+	DBUS_ERR_TXFAIL,
+	DBUS_ERR_TXTIMEOUT,
+	DBUS_ERR_TXDROP,
+	DBUS_ERR_RXFAIL,
+	DBUS_ERR_RXDROP,
+	DBUS_ERR_TXCTLFAIL,
+	DBUS_ERR_RXCTLFAIL,
+	DBUS_ERR_REG_PARAM,
+	DBUS_STATUS_CANCELLED,
+	DBUS_ERR_NVRAM,
+	DBUS_JUMBO_NOMATCH,
+	DBUS_JUMBO_BAD_FORMAT,
+	DBUS_NVRAM_NONTXT,
+	DBUS_ERR_RXZLP
+};
+
+#define BCM_OTP_SIZE_43236  84	/* number of 16 bit values */
+#define BCM_OTP_SW_RGN_43236	24  /* start offset of SW config region */
+#define BCM_OTP_ADDR_43236 0x18000800 /* address of otp base */
+
+#define ERR_CBMASK_TXFAIL		0x00000001
+#define ERR_CBMASK_RXFAIL		0x00000002
+#define ERR_CBMASK_ALL			0xFFFFFFFF
+
+#define DBUS_CBCTL_WRITE			0
+#define DBUS_CBCTL_READ				1
+#if defined(INTR_EP_ENABLE)
+#define DBUS_CBINTR_POLL			2
+#endif /* defined(INTR_EP_ENABLE) */
+
+#define DBUS_TX_RETRY_LIMIT		3		/* retries for failed txirb */
+#define DBUS_TX_TIMEOUT_INTERVAL	250		/* timeout for txirb complete, in ms */
+
+#define DBUS_BUFFER_SIZE_TX	32000
+#define DBUS_BUFFER_SIZE_RX	24000
+
+#define DBUS_BUFFER_SIZE_TX_NOAGG	2048
+#define DBUS_BUFFER_SIZE_RX_NOAGG	2048
+
+/** DBUS types */
+enum {
+	DBUS_USB,
+	DBUS_SDIO,
+	DBUS_SPI,
+	DBUS_UNKNOWN
+};
+
+enum dbus_state {
+	DBUS_STATE_DL_PENDING,
+	DBUS_STATE_DL_DONE,
+	DBUS_STATE_UP,
+	DBUS_STATE_DOWN,
+	DBUS_STATE_PNP_FWDL,
+	DBUS_STATE_DISCONNECT,
+	DBUS_STATE_SLEEP,
+	DBUS_STATE_DL_NEEDED
+};
+
+enum dbus_pnp_state {
+	DBUS_PNP_DISCONNECT,
+	DBUS_PNP_SLEEP,
+	DBUS_PNP_RESUME
+};
+
+enum dbus_file {
+    DBUS_FIRMWARE,
+    DBUS_NVFILE
+};
+
+typedef enum _DEVICE_SPEED {
+	INVALID_SPEED = -1,
+	LOW_SPEED     =  1,	/**< USB 1.1: 1.5 Mbps */
+	FULL_SPEED,     	/**< USB 1.1: 12  Mbps */
+	HIGH_SPEED,		/**< USB 2.0: 480 Mbps */
+	SUPER_SPEED,		/**< USB 3.0: 4.8 Gbps */
+} DEVICE_SPEED;
+
+typedef struct {
+	int bustype;
+	int vid;
+	int pid;
+	int devid;
+	int chiprev; /**< chip revsion number */
+	int mtu;
+	int nchan; /**< Data Channels */
+	int has_2nd_bulk_in_ep;
+} dbus_attrib_t;
+
+/* FIX: Account for errors related to DBUS;
+ * Let upper layer account for packets/bytes
+ */
+typedef struct {
+	uint32 rx_errors;
+	uint32 tx_errors;
+	uint32 rx_dropped;
+	uint32 tx_dropped;
+} dbus_stats_t;
+
+/**
+ * Configurable BUS parameters
+ */
+enum {
+	DBUS_CONFIG_ID_RXCTL_DEFERRES = 1,
+	DBUS_CONFIG_ID_AGGR_LIMIT,
+	DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET
+};
+
+typedef struct {
+	uint32 config_id;
+	union {
+		uint32 general_param;
+		bool rxctl_deferrespok;
+		struct {
+			int maxrxsf;
+			int maxrxsize;
+			int maxtxsf;
+			int maxtxsize;
+		} aggr_param;
+	};
+} dbus_config_t;
+
+/**
+ * External Download Info
+ */
+typedef struct dbus_extdl {
+	uint8 *fw;
+	int fwlen;
+	uint8 *vars;
+	int varslen;
+} dbus_extdl_t;
+
+struct dbus_callbacks;
+struct exec_parms;
+
+typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype,
+	uint16 bus_no, uint16 slot, uint32 hdrlen);
+typedef void (*disconnect_cb_t)(void *arg);
+typedef void *(*exec_cb_t)(struct exec_parms *args);
+
+/** Client callbacks registered during dbus_attach() */
+typedef struct dbus_callbacks {
+	void (*send_complete)(void *cbarg, void *info, int status);
+	void (*recv_buf)(void *cbarg, uint8 *buf, int len);
+	void (*recv_pkt)(void *cbarg, void *pkt);
+	void (*txflowcontrol)(void *cbarg, bool onoff);
+	void (*errhandler)(void *cbarg, int err);
+	void (*ctl_complete)(void *cbarg, int type, int status);
+	void (*state_change)(void *cbarg, int state);
+	void *(*pktget)(void *cbarg, uint len, bool send);
+	void (*pktfree)(void *cbarg, void *p, bool send);
+} dbus_callbacks_t;
+
+struct dbus_pub;
+struct bcmstrbuf;
+struct dbus_irb;
+struct dbus_irb_rx;
+struct dbus_irb_tx;
+struct dbus_intf_callbacks;
+
+typedef struct {
+	void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs);
+	void (*detach)(struct dbus_pub *pub, void *bus);
+
+	int (*up)(void *bus);
+	int (*down)(void *bus);
+	int (*send_irb)(void *bus, struct dbus_irb_tx *txirb);
+	int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb);
+	int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb);
+	int (*send_ctl)(void *bus, uint8 *buf, int len);
+	int (*recv_ctl)(void *bus, uint8 *buf, int len);
+	int (*get_stats)(void *bus, dbus_stats_t *stats);
+	int (*get_attrib)(void *bus, dbus_attrib_t *attrib);
+
+	int (*pnp)(void *bus, int evnt);
+	int (*remove)(void *bus);
+	int (*resume)(void *bus);
+	int (*suspend)(void *bus);
+	int (*stop)(void *bus);
+	int (*reset)(void *bus);
+
+	/* Access to bus buffers directly */
+	void *(*pktget)(void *bus, int len);
+	void (*pktfree)(void *bus, void *pkt);
+
+	int  (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len,
+		bool set);
+	void (*dump)(void *bus, struct bcmstrbuf *strbuf);
+	int  (*set_config)(void *bus, dbus_config_t *config);
+	int  (*get_config)(void *bus, dbus_config_t *config);
+
+	bool (*device_exists)(void *bus);
+	int (*dlneeded)(void *bus);
+	int  (*dlstart)(void *bus, uint8 *fw, int len);
+	int  (*dlrun)(void *bus);
+	bool (*recv_needed)(void *bus);
+
+	void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+	void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+
+	int (*tx_timer_init)(void *bus);
+	int (*tx_timer_start)(void *bus, uint timeout);
+	int (*tx_timer_stop)(void *bus);
+
+	int (*sched_dpc)(void *bus);
+	int (*lock)(void *bus);
+	int (*unlock)(void *bus);
+	int (*sched_probe_cb)(void *bus);
+
+	int (*shutdown)(void *bus);
+
+	int (*recv_stop)(void *bus);
+	int (*recv_resume)(void *bus);
+
+	int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx);
+
+	int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value);
+
+	/* Add from the bottom */
+} dbus_intf_t;
+
+typedef struct dbus_pub {
+	struct osl_info *osh;
+	dbus_stats_t stats;
+	dbus_attrib_t attrib;
+	enum dbus_state busstate;
+	DEVICE_SPEED device_speed;
+	int ntxq, nrxq, rxsize;
+	void *bus;
+	struct shared_info *sh;
+    void *dev_info;
+} dbus_pub_t;
+
+#define BUS_INFO(bus, type) (((type *) bus)->pub->bus)
+
+#define	ALIGNED_LOCAL_VARIABLE(var, align)					\
+	uint8	buffer[SDALIGN+64];						\
+	uint8	*var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align;
+
+/*
+ * Public Bus Function Interface
+ */
+
+/*
+ * FIX: Is there better way to pass OS/Host handles to DBUS but still
+ *      maintain common interface for all OS??
+ * Under NDIS, param1 needs to be MiniportHandle
+ *  For NDIS60, param2 is WdfDevice
+ * Under Linux, param1 and param2 are NULL;
+ */
+extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+	void *param1, void *param2);
+extern int dbus_deregister(void);
+
+//extern int dbus_download_firmware(dbus_pub_t *pub);
+//extern int dbus_up(struct dhd_bus *pub);
+extern int dbus_down(dbus_pub_t *pub);
+//extern int dbus_stop(struct dhd_bus *pub);
+extern int dbus_shutdown(dbus_pub_t *pub);
+extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on);
+
+extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf);
+extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info);
+extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info);
+//extern int dbus_send_ctl(struct dhd_bus *pub, uint8 *buf, int len);
+//extern int dbus_recv_ctl(struct dhd_bus *pub, uint8 *buf, int len);
+extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx);
+extern int dbus_poll_intr(dbus_pub_t *pub);
+extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats);
+extern int dbus_get_device_speed(dbus_pub_t *pub);
+extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config);
+extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config);
+extern void * dbus_get_devinfo(dbus_pub_t *pub);
+
+extern void *dbus_pktget(dbus_pub_t *pub, int len);
+extern void dbus_pktfree(dbus_pub_t *pub, void* pkt);
+
+extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask);
+extern int dbus_pnp_sleep(dbus_pub_t *pub);
+extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload);
+extern int dbus_pnp_disconnect(dbus_pub_t *pub);
+
+//extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+//	void *params, int plen, void *arg, int len, bool set);
+
+extern void *dhd_dbus_txq(const dbus_pub_t *pub);
+extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub);
+
+/*
+ * Private Common Bus Interface
+ */
+
+/** IO Request Block (IRB) */
+typedef struct dbus_irb {
+	struct dbus_irb *next;	/**< it's casted from dbus_irb_tx or dbus_irb_rx struct */
+} dbus_irb_t;
+
+typedef struct dbus_irb_rx {
+	struct dbus_irb irb; /* Must be first */
+	uint8 *buf;
+	int buf_len;
+	int actual_len;
+	void *pkt;
+	void *info;
+	void *arg;
+} dbus_irb_rx_t;
+
+typedef struct dbus_irb_tx {
+	struct dbus_irb irb; 	/** Must be first */
+	uint8 *buf;		/** mutually exclusive with struct member 'pkt' */
+	int len;		/** length of field 'buf' */
+	void *pkt;		/** mutually exclusive with struct member 'buf' */
+	int retry_count;
+	void *info;
+	void *arg;
+	void *send_buf;		/**< linear  bufffer for LINUX when aggreagtion is enabled */
+} dbus_irb_tx_t;
+
+/**
+ * DBUS interface callbacks are different from user callbacks
+ * so, internally, different info can be passed to upper layer
+ */
+typedef struct dbus_intf_callbacks {
+	void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb);
+	void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status);
+	void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status);
+	void (*errhandler)(void *cbarg, int err);
+	void (*ctl_complete)(void *cbarg, int type, int status);
+	void (*state_change)(void *cbarg, int state);
+	bool (*isr)(void *cbarg, bool *wantdpc);
+	bool (*dpc)(void *cbarg, bool bounded);
+	void (*watchdog)(void *cbarg);
+	void *(*pktget)(void *cbarg, uint len, bool send);
+	void (*pktfree)(void *cbarg, void *p, bool send);
+	struct dbus_irb* (*getirb)(void *cbarg, bool send);
+	void (*rxerr_indicate)(void *cbarg, bool on);
+} dbus_intf_callbacks_t;
+
+/*
+ * Porting: To support new bus, port these functions below
+ */
+
+/*
+ * Bus specific Interface
+ * Implemented by dbus_usb.c/dbus_sdio.c
+ */
+extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+	dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_deregister(void);
+extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp);
+
+/*
+ * Bus-specific and OS-specific Interface
+ * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c
+ */
+extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+	void *prarg, dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_osl_deregister(void);
+
+/*
+ * Bus-specific, OS-specific, HW-specific Interface
+ * Mainly for SDIO Host HW controller
+ */
+extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+	void *prarg, dbus_intf_t **intf);
+extern int dbus_bus_osl_hw_deregister(void);
+
+extern uint usbdev_bulkin_eps(void);
+#if defined(BCM_REQUEST_FW)
+extern void *dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type,
+  uint16 boardtype, uint16 boardrev);
+extern void dbus_release_fw_nvfile(void *firmware);
+#endif  /* #if defined(BCM_REQUEST_FW) */
+
+
+#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX)
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	/* Backward compatibility */
+	typedef unsigned int gfp_t;
+
+	#define dma_pool pci_pool
+	#define dma_pool_create(name, dev, size, align, alloc) \
+		pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC)
+	#define dma_pool_destroy(pool) pci_pool_destroy(pool)
+	#define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle)
+	#define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr)
+
+	#define dma_map_single(dev, addr, size, dir)	pci_map_single(dev, addr, size, dir)
+	#define dma_unmap_single(dev, hnd, size, dir)	pci_unmap_single(dev, hnd, size, dir)
+	#define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE
+	#define DMA_TO_DEVICE PCI_DMA_TODEVICE
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+/* Availability of these functions varies (when present, they have two arguments) */
+#ifndef hc32_to_cpu
+	#define hc32_to_cpu(x)	le32_to_cpu(x)
+	#define cpu_to_hc32(x)	cpu_to_le32(x)
+	typedef unsigned int __hc32;
+#else
+	#error Two-argument functions needed
+#endif
+
+/* Private USB opcode base */
+#define EHCI_FASTPATH		0x31
+#define	EHCI_SET_EP_BYPASS	EHCI_FASTPATH
+#define	EHCI_SET_BYPASS_CB	(EHCI_FASTPATH + 1)
+#define	EHCI_SET_BYPASS_DEV	(EHCI_FASTPATH + 2)
+#define	EHCI_DUMP_STATE		(EHCI_FASTPATH + 3)
+#define	EHCI_SET_BYPASS_POOL	(EHCI_FASTPATH + 4)
+#define	EHCI_CLR_EP_BYPASS	(EHCI_FASTPATH + 5)
+
+/*
+ * EHCI QTD structure (hardware and extension)
+ * NOTE that is does not need to (and does not) match its kernel counterpart
+ */
+#define EHCI_QTD_NBUFFERS       5
+#define EHCI_QTD_ALIGN  	32
+#define EHCI_BULK_PACKET_SIZE	512
+#define EHCI_QTD_XACTERR_MAX	32
+
+struct ehci_qtd {
+	/* Hardware map */
+	volatile uint32_t	qtd_next;
+	volatile uint32_t	qtd_altnext;
+	volatile uint32_t	qtd_status;
+#define	EHCI_QTD_GET_BYTES(x)	(((x)>>16) & 0x7fff)
+#define	EHCI_QTD_IOC            0x00008000
+#define	EHCI_QTD_GET_CERR(x)	(((x)>>10) & 0x3)
+#define EHCI_QTD_SET_CERR(x)    ((x) << 10)
+#define	EHCI_QTD_GET_PID(x)	(((x)>>8) & 0x3)
+#define EHCI_QTD_SET_PID(x)     ((x) <<  8)
+#define EHCI_QTD_ACTIVE         0x80
+#define EHCI_QTD_HALTED         0x40
+#define EHCI_QTD_BUFERR         0x20
+#define EHCI_QTD_BABBLE         0x10
+#define EHCI_QTD_XACTERR        0x08
+#define EHCI_QTD_MISSEDMICRO    0x04
+	volatile uint32_t 	qtd_buffer[EHCI_QTD_NBUFFERS];
+	volatile uint32_t 	qtd_buffer_hi[EHCI_QTD_NBUFFERS];
+
+	/* Implementation extension */
+	dma_addr_t		qtd_self;		/**< own hardware address */
+	struct ehci_qtd		*obj_next;		/**< software link to the next QTD */
+	void			*rpc;			/**< pointer to the rpc buffer */
+	size_t			length;			/**< length of the data in the buffer */
+	void			*buff;			/**< pointer to the reassembly buffer */
+	int			xacterrs;		/**< retry counter for qtd xact error */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#define	EHCI_NULL	__constant_cpu_to_le32(1) /* HW null pointer shall be odd */
+
+#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1)
+
+/**
+ * Queue Head
+ * NOTE This structure is slightly different from the one in the kernel; but needs to stay
+ * compatible.
+ */
+struct ehci_qh {
+	/* Hardware map */
+	volatile uint32_t 	qh_link;
+	volatile uint32_t 	qh_endp;
+	volatile uint32_t 	qh_endphub;
+	volatile uint32_t 	qh_curqtd;
+
+	/* QTD overlay */
+	volatile uint32_t	ow_next;
+	volatile uint32_t	ow_altnext;
+	volatile uint32_t	ow_status;
+	volatile uint32_t	ow_buffer [EHCI_QTD_NBUFFERS];
+	volatile uint32_t	ow_buffer_hi [EHCI_QTD_NBUFFERS];
+
+	/* Extension (should match the kernel layout) */
+	dma_addr_t		unused0;
+	void 			*unused1;
+	struct list_head	unused2;
+	struct ehci_qtd		*dummy;
+	struct ehci_qh		*unused3;
+
+	struct ehci_hcd		*unused4;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct kref		unused5;
+	unsigned		unused6;
+
+	uint8_t			unused7;
+
+	/* periodic schedule info */
+	uint8_t			unused8;
+	uint8_t			unused9;
+	uint8_t			unused10;
+	uint16_t		unused11;
+	uint16_t		unused12;
+	uint16_t		unused13;
+	struct usb_device	*unused14;
+#else
+	unsigned		unused5;
+
+	u8			unused6;
+
+	/* periodic schedule info */
+	u8			unused7;
+	u8			unused8;
+	u8			unused9;
+	unsigned short		unused10;
+	unsigned short		unused11;
+#define NO_FRAME ((unsigned short)~0)
+#ifdef EHCI_QUIRK_FIX
+	struct usb_device	*unused12;
+#endif /* EHCI_QUIRK_FIX */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+	struct ehci_qtd		*first_qtd;
+		/* Link to the first QTD; this is an optimized equivalent of the qtd_list field */
+		/* NOTE that ehci_qh in ehci.h shall reserve this word */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/** The corresponding structure in the kernel is used to get the QH */
+struct hcd_dev {	/* usb_device.hcpriv points to this */
+	struct list_head	unused0;
+	struct list_head	unused1;
+
+	/* array of QH pointers */
+	void			*ep[32];
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub,  int epn, struct ehci_qtd *qtd, void *rpc,
+	int token, int len);
+int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data,
+	int token, int len);
+int optimize_submit_async(struct ehci_qtd *qtd, int epn);
+void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma);
+struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags);
+void optimize_ehci_qtd_free(struct ehci_qtd *qtd);
+void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf);
+#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */
+
+void  dbus_flowctrl_tx(void *dbi, bool on);
+#endif /* __DBUS_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/dhd_daemon.h b/drivers/net/wireless/bcmdhd/include/dhd_daemon.h
new file mode 100644
index 0000000..3a5141a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dhd_daemon.h
@@ -0,0 +1,62 @@
+/*
+ * Header file for DHD daemon to handle timeouts
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_daemon.h 671464 2016-11-22 06:15:32Z $
+ */
+
+#ifndef __BCM_DHDD_H__
+#define __BCM_DHDD_H__
+
+/**
+ * To maintain compatabily when dhd driver and dhd daemon is taken from different branches,
+ * make sure to keep this file same across dhd driver branch and dhd apps branch.
+ * TODO: Make this file as shared between apps and dhd.ko
+ */
+
+#define BCM_TO_MAGIC 0x600DB055
+#define NO_TRAP 0
+#define DO_TRAP	1
+
+#define BCM_NL_USER 31
+
+typedef enum notify_dhd_daemon_reason {
+	REASON_COMMAND_TO,
+	REASON_OQS_TO,
+	REASON_SCAN_TO,
+	REASON_JOIN_TO,
+	REASON_DAEMON_STARTED,
+	REASON_DEVICE_TX_STUCK_WARNING,
+	REASON_DEVICE_TX_STUCK,
+	REASON_UNKOWN
+} notify_dhd_daemon_reason_t;
+
+typedef struct bcm_to_info {
+	int magic;
+	int reason;
+	int trap;
+} bcm_to_info_t;
+
+#endif /* __BCM_DHDD_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
new file mode 100644
index 0000000..8a139dc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
@@ -0,0 +1,153 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhdioctl.h 675190 2016-12-14 15:27:52Z $
+ */
+
+#ifndef _dhdioctl_h_
+#define	_dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+	uint32 cmd;	/* common ioctl definition */
+	void *buf;	/* pointer to user buffer */
+	uint32 len;	/* length of user buffer */
+	uint32 set;	/* get or set request boolean (optional) */
+	uint32 used;	/* bytes read or written (optional) */
+	uint32 needed;	/* bytes needed (optional) */
+	uint32 driver;	/* to identify target driver */
+} dhd_ioctl_t;
+
+/* Underlying BUS definition */
+enum {
+	BUS_TYPE_USB = 0, /* for USB dongles */
+	BUS_TYPE_SDIO, /* for SDIO dongles */
+	BUS_TYPE_PCIE /* for PCIE dongles */
+};
+
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC		0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION	1
+
+/*
+ * Increase the DHD_IOCTL_MAXLEN to 16K for supporting download of NVRAM files of size
+ * > 8K. In the existing implementation when NVRAM is to be downloaded via the "vars"
+ * DHD IOVAR, the NVRAM is copied to the DHD Driver memory. Later on when "dwnldstate" is
+ * invoked with FALSE option, the NVRAM gets copied from the DHD driver to the Dongle
+ * memory. The simple way to support this feature without modifying the DHD application,
+ * driver logic is to increase the DHD_IOCTL_MAXLEN size. This macro defines the "size"
+ * of the buffer in which data is exchanged between the DHD App and DHD driver.
+ */
+#define	DHD_IOCTL_MAXLEN	(16384)	/* max length ioctl buffer required */
+#define	DHD_IOCTL_SMLEN		256		/* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC				0
+#define DHD_GET_VERSION				1
+#define DHD_GET_VAR				2
+#define DHD_SET_VAR				3
+
+/* message levels */
+#define DHD_ERROR_VAL	0x0001
+#define DHD_TRACE_VAL	0x0002
+#define DHD_INFO_VAL	0x0004
+#define DHD_DATA_VAL	0x0008
+#define DHD_CTL_VAL	0x0010
+#define DHD_TIMER_VAL	0x0020
+#define DHD_HDRS_VAL	0x0040
+#define DHD_BYTES_VAL	0x0080
+#define DHD_INTR_VAL	0x0100
+#define DHD_LOG_VAL	0x0200
+#define DHD_GLOM_VAL	0x0400
+#define DHD_EVENT_VAL	0x0800
+#define DHD_BTA_VAL	0x1000
+#define DHD_ISCAN_VAL	0x2000
+#define DHD_ARPOE_VAL	0x4000
+#define DHD_REORDER_VAL	0x8000
+#define DHD_NOCHECKDIED_VAL		0x20000 /* UTF WAR */
+#define DHD_PNO_VAL		0x80000
+#define DHD_RTT_VAL		0x100000
+#define DHD_MSGTRACE_VAL	0x200000
+#define DHD_FWLOG_VAL		0x400000
+#define DHD_DBGIF_VAL		0x800000
+#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
+#define DHD_RPM_VAL		0x1000000
+#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#define DHD_PKT_MON_VAL		0x2000000
+#define DHD_PKT_MON_DUMP_VAL	0x4000000
+#define DHD_ERROR_MEM_VAL	0x8000000
+#define DHD_ANDROID_VAL	0x10000
+#define DHD_IW_VAL	0x20000
+#define DHD_CFG_VAL	0x40000
+#define DHD_CONFIG_VAL	0x80000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+	uint32 version;		/* To allow structure change tracking */
+	uint32 freq;		/* Max ticks between tx/rx attempts */
+	uint32 count;		/* Test packets to send/rcv each attempt */
+	uint32 print;		/* Print counts every <print> attempts */
+	uint32 total;		/* Total packets (or bursts) */
+	uint32 minlen;		/* Minimum length of packets to send */
+	uint32 maxlen;		/* Maximum length of packets to send */
+	uint32 numsent;		/* Count of test packets sent */
+	uint32 numrcvd;		/* Count of test packets received */
+	uint32 numfail;		/* Count of test send failures */
+	uint32 mode;		/* Test mode (type of test packets) */
+	uint32 stop;		/* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO		1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 	2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST	3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV		4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE	(-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE	0	/* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP   (-1)	/* Request SD clock be stopped (and use SD1 mode) */
+
+
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/dnglevent.h b/drivers/net/wireless/bcmdhd/include/dnglevent.h
new file mode 100644
index 0000000..40a0047
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dnglevent.h
@@ -0,0 +1,120 @@
+/*
+ * Broadcom Event  protocol definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Dependencies: bcmeth.h
+ *
+ * $Id: dnglevent.h $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * -----------------------------------------------------------------------------
+ *
+ */
+
+/*
+ * Broadcom dngl Ethernet Events protocol defines
+ *
+ */
+
+#ifndef _DNGLEVENT_H_
+#define _DNGLEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <bcmeth.h>
+#include <ethernet.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+#define BCM_DNGL_EVENT_MSG_VERSION		1
+#define DNGL_E_RSRVD_1				0x0
+#define DNGL_E_RSRVD_2				0x1
+#define DNGL_E_SOCRAM_IND			0x2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16  version; /* Current version is 1 */
+	uint16  reserved; /* reserved for any future extension */
+	uint16  event_type; /* DNGL_E_SOCRAM_IND */
+	uint16  datalen; /* Length of the event payload */
+} BWL_POST_PACKED_STRUCT bcm_dngl_event_msg_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_event {
+	struct ether_header eth;
+	bcmeth_hdr_t        bcm_hdr;
+	bcm_dngl_event_msg_t      dngl_event;
+	/* data portion follows */
+} BWL_POST_PACKED_STRUCT bcm_dngl_event_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_socramind {
+	uint16			tag;	/* data tag */
+	uint16			length; /* data length */
+	uint8			value[1]; /* data value with variable length specified by length */
+} BWL_POST_PACKED_STRUCT bcm_dngl_socramind_t;
+
+/* SOCRAM_IND type tags */
+#define SOCRAM_IND_ASSERT_TAG		0x1
+#define SOCRAM_IND_TAG_HEALTH_CHECK	0x2
+/* Health check top level module tags */
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_healthcheck {
+	uint16			top_module_tag;	/* top level module tag */
+	uint16			top_module_len; /* Type of PCIE issue indication */
+	uint8			value[1]; /* data value with variable length specified by length */
+} BWL_POST_PACKED_STRUCT bcm_dngl_healthcheck_t;
+
+/* Health check top level module tags */
+#define HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE 1
+#define HEALTH_CHECK_PCIEDEV_VERSION_1	1
+#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT	0
+#define HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT		1
+#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT		2
+#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT			3
+#define HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT			4
+#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3	1 << HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT
+#define HEALTH_CHECK_PCIEDEV_FLAG_AER	1 << HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT
+#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN	1 << HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT
+#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT	1 << HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT
+#define HEALTH_CHECK_PCIEDEV_FLAG_NODS	1 << HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT
+/* PCIE Module TAGs */
+#define HEALTH_CHECK_PCIEDEV_INDUCED_IND	0x1
+#define HEALTH_CHECK_PCIEDEV_H2D_DMA_IND	0x2
+#define HEALTH_CHECK_PCIEDEV_D2H_DMA_IND	0x3
+#define HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND	0x4
+#define HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND	0x5
+#define HEALTH_CHECK_PCIEDEV_NODS_IND	0x6
+#define HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND	0x7
+
+#define HC_PCIEDEV_CONFIG_REGLIST_MAX	20
+typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_pcie_hc {
+	uint16			version; /* HEALTH_CHECK_PCIEDEV_VERSION_1 */
+	uint16			reserved;
+	uint16			pcie_err_ind_type; /* PCIE Module TAGs */
+	uint16			pcie_flag;
+	uint32			pcie_control_reg;
+	uint32			pcie_config_regs[HC_PCIEDEV_CONFIG_REGLIST_MAX];
+} BWL_POST_PACKED_STRUCT bcm_dngl_pcie_hc_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _DNGLEVENT_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/eapol.h b/drivers/net/wireless/bcmdhd/include/eapol.h
new file mode 100644
index 0000000..ef917ab
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/eapol.h
@@ -0,0 +1,218 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: eapol.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#if !defined(BCMCRYPTO_COMPONENT)
+#include <bcmcrypto/aeskeywrap.h>
+#endif /* !BCMCRYPTO_COMPONENT */
+
+/* EAPOL for 802.3/Ethernet */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ether_header eth;	/* 802.3/Ethernet header */
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+	unsigned char body[1];		/* Body (optional) */
+} BWL_POST_PACKED_STRUCT eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+typedef struct {
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+} eapol_hdr_t;
+
+#define EAPOL_HDR_LEN 4
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION	2
+#define WPA_EAPOL_VERSION	1
+#define LEAP_EAPOL_VERSION	1
+#define SES_EAPOL_VERSION	1
+
+/* EAPOL types */
+#define EAP_PACKET		0
+#define EAPOL_START		1
+#define EAPOL_LOGOFF		2
+#define EAPOL_KEY		3
+#define EAPOL_ASF		4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY		1
+#define EAPOL_WPA2_KEY		2	/* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY		254	/* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN	8
+#define EAPOL_KEY_IV_LEN	16
+#define EAPOL_KEY_SIG_LEN	16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;			/* Key Descriptor Type */
+	unsigned short length;			/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char iv[EAPOL_KEY_IV_LEN];		/* Key IV */
+	unsigned char index;				/* Key Flags & Index */
+	unsigned char signature[EAPOL_KEY_SIG_LEN];	/* Key Signature */
+	unsigned char key[1];				/* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 	44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK	0x80
+#define EAPOL_KEY_BROADCAST	0
+#define EAPOL_KEY_UNICAST	0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK	0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_AKW_BLOCK_LEN 8
+#define EAPOL_WPA_KEY_REPLAY_LEN	8
+#define EAPOL_WPA_KEY_NONCE_LEN		32
+#define EAPOL_WPA_KEY_IV_LEN		16
+#define EAPOL_WPA_KEY_RSC_LEN		8
+#define EAPOL_WPA_KEY_ID_LEN		8
+#define EAPOL_WPA_KEY_MIC_LEN		16
+#define EAPOL_WPA_KEY_DATA_LEN		(EAPOL_WPA_MAX_KEY_SIZE + EAPOL_AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE		32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;		/* Key Descriptor Type */
+	unsigned short key_info;	/* Key Information (unaligned) */
+	unsigned short key_len;		/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN];	/* Nonce */
+	unsigned char iv[EAPOL_WPA_KEY_IV_LEN];		/* Key IV */
+	unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN];	/* Key RSC */
+	unsigned char id[EAPOL_WPA_KEY_ID_LEN];		/* WPA:Key ID, 802.11i/WPA2: Reserved */
+	unsigned char mic[EAPOL_WPA_KEY_MIC_LEN];	/* Key MIC */
+	unsigned short data_len;			/* Key Data Length */
+	unsigned char data[EAPOL_WPA_KEY_DATA_LEN];	/* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 		95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_OSEN	0x0
+#define WPA_KEY_DESC_V1		0x01
+#define WPA_KEY_DESC_V2		0x02
+#define WPA_KEY_DESC_V3		0x03
+#define WPA_KEY_PAIRWISE	0x08
+#define WPA_KEY_INSTALL		0x40
+#define WPA_KEY_ACK		0x80
+#define WPA_KEY_MIC		0x100
+#define WPA_KEY_SECURE		0x200
+#define WPA_KEY_ERROR		0x400
+#define WPA_KEY_REQ		0x800
+
+#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0		0x00
+#define WPA_KEY_INDEX_1		0x10
+#define WPA_KEY_INDEX_2		0x20
+#define WPA_KEY_INDEX_3		0x30
+#define WPA_KEY_INDEX_MASK	0x30
+#define WPA_KEY_INDEX_SHIFT	0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA	0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 type;
+	uint8 length;
+	uint8 oui[3];
+	uint8 subtype;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 	6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK	1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY	2
+#define WPA2_KEY_DATA_SUBTYPE_MAC	3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID	4
+#define WPA2_KEY_DATA_SUBTYPE_IGTK	9
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	flags;
+	uint8	reserved;
+	uint8	gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 	2
+
+#define WPA2_GTK_INDEX_MASK	0x03
+#define WPA2_GTK_INDEX_SHIFT	0x00
+
+#define WPA2_GTK_TRANSMIT	0x04
+
+/* IGTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16	key_id;
+	uint8	ipn[6];
+	uint8	key[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t;
+
+#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 	8
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	reserved[2];
+	uint8	mac[ETHER_ADDR_LEN];
+	uint8	stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD	0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h
new file mode 100644
index 0000000..c61797b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/epivers.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: epivers.h.in 596126 2015-10-29 19:53:48Z $
+ *
+*/
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define	EPI_MAJOR_VERSION	1
+
+#define	EPI_MINOR_VERSION	579
+
+#define	EPI_RC_NUMBER		77
+
+#define	EPI_INCREMENTAL_NUMBER	41
+
+#define	EPI_BUILD_NUMBER	0
+
+#define	EPI_VERSION		1, 579, 77, 41
+
+#define	EPI_VERSION_NUM		0x012434d29
+
+#define EPI_VERSION_DEV		1.579.77.41
+
+/* Driver Version String, ASCII, 32 chars max */
+#define	EPI_VERSION_STR		"1.579.77.41.11 (r)"
+
+#endif /* _epivers_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/ethernet.h b/drivers/net/wireless/bcmdhd/include/ethernet.h
new file mode 100644
index 0000000..2e33867
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/ethernet.h
@@ -0,0 +1,227 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: ethernet.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _NET_ETHERNET_H_	/* use native BSD ethernet.h when available */
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/*
+ * The number of bytes in an ethernet (MAC) address.
+ */
+#define	ETHER_ADDR_LEN		6
+
+/*
+ * The number of bytes in the type field.
+ */
+#define	ETHER_TYPE_LEN		2
+
+/*
+ * The number of bytes in the trailing CRC field.
+ */
+#define	ETHER_CRC_LEN		4
+
+/*
+ * The length of the combined header.
+ */
+#define	ETHER_HDR_LEN		(ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+/*
+ * The minimum packet length.
+ */
+#define	ETHER_MIN_LEN		64
+
+/*
+ * The minimum packet user data length.
+ */
+#define	ETHER_MIN_DATA		46
+
+/*
+ * The maximum packet length.
+ */
+#define	ETHER_MAX_LEN		1518
+
+/*
+ * The maximum packet user data length.
+ */
+#define	ETHER_MAX_DATA		1500
+
+/* ether types */
+#define ETHER_TYPE_MIN		0x0600		/* Anything less than MIN is a length */
+#define	ETHER_TYPE_IP		0x0800		/* IP */
+#define ETHER_TYPE_ARP		0x0806		/* ARP */
+#define ETHER_TYPE_8021Q	0x8100		/* 802.1Q */
+#define	ETHER_TYPE_IPV6		0x86dd		/* IPv6 */
+#define	ETHER_TYPE_BRCM		0x886c		/* Broadcom Corp. */
+#define	ETHER_TYPE_802_1X	0x888e		/* 802.1x */
+#define	ETHER_TYPE_802_1X_PREAUTH 0x88c7	/* 802.1x preauthentication */
+#define ETHER_TYPE_WAI		0x88b4		/* WAI */
+#define ETHER_TYPE_89_0D	0x890d		/* 89-0d frame for TDLS */
+#define ETHER_TYPE_RRB		ETHER_TYPE_89_0D  /* RRB 802.11r 2008 */
+
+#define ETHER_TYPE_PPP_SES	0x8864		/* PPPoE Session */
+
+#define ETHER_TYPE_IAPP_L2_UPDATE	0x6	/* IAPP L2 update frame */
+
+/* Broadcom subtype follows ethertype;  First 2 bytes are reserved; Next 2 are subtype; */
+#define	ETHER_BRCM_SUBTYPE_LEN	4	/* Broadcom 4 byte subtype */
+
+/* ether header */
+#define ETHER_DEST_OFFSET	(0 * ETHER_ADDR_LEN)	/* dest address offset */
+#define ETHER_SRC_OFFSET	(1 * ETHER_ADDR_LEN)	/* src address offset */
+#define ETHER_TYPE_OFFSET	(2 * ETHER_ADDR_LEN)	/* ether type offset */
+
+/*
+ * A macro to validate a length with
+ */
+#define	ETHER_IS_VALID_LEN(foo)	\
+	((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) {		\
+		((uint8 *)ea)[0] = 0x01;			\
+		((uint8 *)ea)[1] = 0x00;			\
+		((uint8 *)ea)[2] = 0x5e;			\
+		((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f;	\
+		((uint8 *)ea)[4] = ((mgrp_ip) >>  8) & 0xff;	\
+		((uint8 *)ea)[5] = ((mgrp_ip) >>  0) & 0xff;	\
+}
+
+#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+BWL_PRE_PACKED_STRUCT struct ether_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+BWL_PRE_PACKED_STRUCT struct	ether_addr {
+	uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif	/* !__INCif_etherh Quick and ugly hack for VxWorks */
+
+/*
+ * Takes a pointer, set, test, clear, toggle locally admininistered
+ * address bit in the 48-bit Ethernet address.
+ */
+#define ETHER_SET_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) 	(((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd))
+#define ETHER_TOGGLE_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+/* Takes a pointer, marks unicast address bit in the MAC address */
+#define ETHER_SET_UNICAST(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+/*
+ * Takes a pointer, returns true if a 48-bit multicast address
+ * (including broadcast, since it is all ones)
+ */
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */
+#define eacmp(a, b)	((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \
+	                 (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \
+	                 (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2]))
+
+#define	ether_cmp(a, b)	eacmp(a, b)
+
+/* copy an ethernet address - assumes the pointers can be referenced as shorts */
+#define eacopy(s, d) \
+do { \
+	((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \
+	((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \
+	((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \
+} while (0)
+
+#define	ether_copy(s, d) eacopy(s, d)
+
+/* Copy an ethernet address in reverse order */
+#define	ether_rcopy(s, d) \
+do { \
+	((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \
+	((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \
+	((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \
+} while (0)
+
+/* Copy 14B ethernet header: 32bit aligned source and destination. */
+#define ehcopy32(s, d) \
+do { \
+	((uint32 *)(d))[0] = ((const uint32 *)(s))[0]; \
+	((uint32 *)(d))[1] = ((const uint32 *)(s))[1]; \
+	((uint32 *)(d))[2] = ((const uint32 *)(s))[2]; \
+	((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \
+} while (0)
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+static const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}};
+
+#define ETHER_ISBCAST(ea)	((((const uint8 *)(ea))[0] &		\
+	                          ((const uint8 *)(ea))[1] &		\
+				  ((const uint8 *)(ea))[2] &		\
+				  ((const uint8 *)(ea))[3] &		\
+				  ((const uint8 *)(ea))[4] &		\
+				  ((const uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea)	((((const uint8 *)(ea))[0] |		\
+				  ((const uint8 *)(ea))[1] |		\
+				  ((const uint8 *)(ea))[2] |		\
+				  ((const uint8 *)(ea))[3] |		\
+				  ((const uint8 *)(ea))[4] |		\
+				  ((const uint8 *)(ea))[5]) == 0)
+
+#define ETHER_ISNULLDEST(da)	((((const uint16 *)(da))[0] |           \
+				  ((const uint16 *)(da))[1] |           \
+				  ((const uint16 *)(da))[2]) == 0)
+#define ETHER_ISNULLSRC(sa)	ETHER_ISNULLDEST(sa)
+
+#define ETHER_MOVE_HDR(d, s) \
+do { \
+	struct ether_header t; \
+	t = *(struct ether_header *)(s); \
+	*(struct ether_header *)(d) = t; \
+} while (0)
+
+#define  ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _NET_ETHERNET_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/event_log.h b/drivers/net/wireless/bcmdhd/include/event_log.h
new file mode 100644
index 0000000..0c2120e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/event_log.h
@@ -0,0 +1,388 @@
+/*
+ * EVENT_LOG system definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: event_log.h 711908 2017-07-20 10:37:34Z $
+ */
+
+#ifndef _EVENT_LOG_H_
+#define _EVENT_LOG_H_
+
+#include <typedefs.h>
+#include <event_log_set.h>
+#include <event_log_tag.h>
+#include <event_log_payload.h>
+#include <osl_decl.h>
+
+/* logstrs header */
+#define LOGSTRS_MAGIC   0x4C4F4753
+#define LOGSTRS_VERSION 0x1
+
+/* We make sure that the block size will fit in a single packet
+ *  (allowing for a bit of overhead on each packet
+ */
+#define EVENT_LOG_MAX_BLOCK_SIZE	1400
+#define EVENT_LOG_WL_BLOCK_SIZE		0x200
+#define EVENT_LOG_PSM_BLOCK_SIZE	0x200
+#define EVENT_LOG_BUS_BLOCK_SIZE	0x200
+#define EVENT_LOG_ERROR_BLOCK_SIZE	0x200
+/* Maximum event log record payload size = 1024 bytes or 256 words. */
+#define EVENT_LOG_MAX_RECORD_PAYLOAD_SIZE	256
+
+/*
+ * There are multiple levels of objects define here:
+ *   event_log_set - a set of buffers
+ *   event log groups - every event log call is part of just one.  All
+ *                      event log calls in a group are handled the
+ *                      same way.  Each event log group is associated
+ *                      with an event log set or is off.
+ */
+
+#ifndef __ASSEMBLER__
+
+/* On the external system where the dumper is we need to make sure
+ * that these types are the same size as they are on the ARM the
+ * produced them
+ */
+#ifdef EVENT_LOG_DUMPER
+#define _EL_BLOCK_PTR uint32
+#define _EL_TYPE_PTR uint32
+#define _EL_SET_PTR uint32
+#define _EL_TOP_PTR uint32
+#else
+#define _EL_BLOCK_PTR struct event_log_block *
+#define _EL_TYPE_PTR uint32 *
+#define _EL_SET_PTR struct event_log_set **
+#define _EL_TOP_PTR struct event_log_top *
+#endif /* EVENT_LOG_DUMPER */
+
+/* Event log sets (a logical circurlar buffer) consist of one or more
+ * event_log_blocks.  The blocks themselves form a logical circular
+ * list.  The log entries are placed in each event_log_block until it
+ * is full.  Logging continues with the next event_log_block in the
+ * event_set until the last event_log_block is reached and then
+ * logging starts over with the first event_log_block in the
+ * event_set.
+ */
+typedef struct event_log_block {
+	_EL_BLOCK_PTR next_block;
+	_EL_BLOCK_PTR prev_block;
+	_EL_TYPE_PTR end_ptr;
+
+	/* Start of packet sent for log tracing */
+	uint16 pktlen;			/* Size of rest of block */
+	uint16 count;			/* Logtrace counter */
+	uint32 extra_hdr_info;		/* LSB: 6 bits set id. MSB 24 bits reserved */
+	uint32 event_logs;
+} event_log_block_t;
+#define EVENT_LOG_BLOCK_HDRLEN		8 /* pktlen 2 + count 2 + extra_hdr_info 4 */
+#define NAN_EVENT_LOG_MIN_LENGTH	2 /* Minimum length of Nan event */
+
+typedef enum {
+	SET_DESTINATION_INVALID = -1,
+	SET_DESTINATION_HOST = 0,
+	SET_DESTINATION_NONE = 1,
+	SET_DESTINATION_MAX
+} event_log_set_destination_t;
+
+/* There can be multiple event_sets with each logging a set of
+ * associated events (i.e, "fast" and "slow" events).
+ */
+typedef struct event_log_set {
+	_EL_BLOCK_PTR first_block; 	/* Pointer to first event_log block */
+	_EL_BLOCK_PTR last_block; 	/* Pointer to last event_log block */
+	_EL_BLOCK_PTR logtrace_block;	/* next block traced */
+	_EL_BLOCK_PTR cur_block;   	/* Pointer to current event_log block */
+	_EL_TYPE_PTR cur_ptr;      	/* Current event_log pointer */
+	uint32 blockcount;		/* Number of blocks */
+	uint16 logtrace_count;		/* Last count for logtrace */
+	uint16 blockfill_count;		/* Fill count for logtrace */
+	uint32 timestamp;		/* Last timestamp event */
+	uint32 cyclecount;		/* Cycles at last timestamp event */
+	event_log_set_destination_t destination;
+	uint16 size;			/* same size for all buffers in one  set */
+} event_log_set_t;
+
+/* Top data structure for access to everything else */
+typedef struct event_log_top {
+	uint32 magic;
+#define EVENT_LOG_TOP_MAGIC 0x474C8669 /* 'EVLG' */
+	uint32 version;
+#define EVENT_LOG_VERSION 1
+	uint32 num_sets;
+	uint32 logstrs_size;		/* Size of lognums + logstrs area */
+	uint32 timestamp;		/* Last timestamp event */
+	uint32 cyclecount;		/* Cycles at last timestamp event */
+	_EL_SET_PTR sets; 		/* Ptr to array of <num_sets> set ptrs */
+} event_log_top_t;
+
+/* Data structure of Keeping the Header from logstrs.bin */
+typedef struct {
+	uint32 logstrs_size;    /* Size of the file */
+	uint32 rom_lognums_offset; /* Offset to the ROM lognum */
+	uint32 ram_lognums_offset; /* Offset to the RAM lognum */
+	uint32 rom_logstrs_offset; /* Offset to the ROM logstr */
+	uint32 ram_logstrs_offset; /* Offset to the RAM logstr */
+	/* Keep version and magic last since "header" is appended to the end of logstrs file. */
+	uint32 version;            /* Header version */
+	uint32 log_magic;       /* MAGIC number for verification 'LOGS' */
+} logstr_header_t;
+
+/*
+ * Use the following macros for generating log events.
+ *
+ * The FAST versions check the enable of the tag before evaluating the arguments and calling the
+ * event_log function.  This adds 5 instructions.  The COMPACT versions evaluate the arguments
+ * and call the event_log function unconditionally.  The event_log function will then skip logging
+ * if this tag is disabled.
+ *
+ * To support easy usage of existing debugging (e.g. msglevel) via macro re-definition there are
+ * two variants of these macros to help.
+ *
+ * First there are the CAST versions.  The event_log function normally logs uint32 values or else
+ * they have to be cast to uint32.  The CAST versions blindly cast for you so you don't have to edit
+ * any existing code.
+ *
+ * Second there are the PAREN_ARGS versions.  These expect the logging format string and arguments
+ * to be enclosed in parentheses.  This allows us to make the following mapping of an existing
+ * msglevel macro:
+ *  #define WL_ERROR(args)   EVENT_LOG_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ERROR, args)
+ *
+ * The versions of the macros without FAST or COMPACT in their name are just synonyms for the
+ * COMPACT versions.
+ *
+ * You should use the COMPACT macro (or its synonym) in cases where there is some preceding logic
+ * that prevents the execution of the macro, e.g. WL_ERROR by definition rarely gets executed.
+ * Use the FAST macro in performance sensitive paths. The key concept here is that you should be
+ * assuming that your macro usage is compiled into ROM and can't be changed ... so choose wisely.
+ *
+ */
+
+#if !defined(EVENT_LOG_DUMPER) && !defined(DHD_EFI)
+
+#ifndef EVENT_LOG_COMPILE
+
+/* Null define if no tracing */
+#define EVENT_LOG(format, ...)
+#define EVENT_LOG_FAST(tag, fmt, ...)
+#define EVENT_LOG_COMPACT(tag, fmt, ...)
+
+#define EVENT_LOG_CAST(tag, fmt, ...)
+#define EVENT_LOG_FAST_CAST(tag, fmt, ...)
+#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...)
+
+#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs)
+#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs)
+#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs)
+
+#define EVENT_LOG_IS_ON(tag)		0
+#define EVENT_LOG_IS_LOG_ON(tag)	0
+
+#define EVENT_LOG_BUFFER(tag, buf, size)
+
+#else  /* EVENT_LOG_COMPILE */
+
+/* The first few are special because they can be done more efficiently
+ * this way and they are the common case.  Once there are too many
+ * parameters the code size starts to be an issue and a loop is better
+ */
+#define _EVENT_LOG0(tag, fmt_num) 			\
+	event_log0(tag, fmt_num)
+#define _EVENT_LOG1(tag, fmt_num, t1) 			\
+	event_log1(tag, fmt_num, t1)
+#define _EVENT_LOG2(tag, fmt_num, t1, t2) 		\
+	event_log2(tag, fmt_num, t1, t2)
+#define _EVENT_LOG3(tag, fmt_num, t1, t2, t3) 		\
+	event_log3(tag, fmt_num, t1, t2, t3)
+#define _EVENT_LOG4(tag, fmt_num, t1, t2, t3, t4) 	\
+	event_log4(tag, fmt_num, t1, t2, t3, t4)
+
+/* The rest call the generic routine that takes a count */
+#define _EVENT_LOG5(tag, fmt_num, ...) event_logn(5, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG6(tag, fmt_num, ...) event_logn(6, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG7(tag, fmt_num, ...) event_logn(7, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG8(tag, fmt_num, ...) event_logn(8, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG9(tag, fmt_num, ...) event_logn(9, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGA(tag, fmt_num, ...) event_logn(10, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGB(tag, fmt_num, ...) event_logn(11, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGC(tag, fmt_num, ...) event_logn(12, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGD(tag, fmt_num, ...) event_logn(13, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGE(tag, fmt_num, ...) event_logn(14, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGF(tag, fmt_num, ...) event_logn(15, tag, fmt_num, __VA_ARGS__)
+
+
+/* Casting  low level macros */
+#define _EVENT_LOG_CAST0(tag, fmt_num)			\
+	event_log0(tag, fmt_num)
+#define _EVENT_LOG_CAST1(tag, fmt_num, t1)		\
+	event_log1(tag, fmt_num, (uint32)(t1))
+#define _EVENT_LOG_CAST2(tag, fmt_num, t1, t2)		\
+	event_log2(tag, fmt_num, (uint32)(t1), (uint32)(t2))
+#define _EVENT_LOG_CAST3(tag, fmt_num, t1, t2, t3)	\
+	event_log3(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3))
+#define _EVENT_LOG_CAST4(tag, fmt_num, t1, t2, t3, t4)	\
+	event_log4(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3), (uint32)(t4))
+
+/* The rest call the generic routine that takes a count */
+#define _EVENT_LOG_CAST5(tag, fmt_num, ...) _EVENT_LOG5(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST6(tag, fmt_num, ...) _EVENT_LOG6(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST7(tag, fmt_num, ...) _EVENT_LOG7(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST8(tag, fmt_num, ...) _EVENT_LOG8(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST9(tag, fmt_num, ...) _EVENT_LOG9(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTA(tag, fmt_num, ...) _EVENT_LOGA(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTB(tag, fmt_num, ...) _EVENT_LOGB(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTC(tag, fmt_num, ...) _EVENT_LOGC(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTD(tag, fmt_num, ...) _EVENT_LOGD(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTE(tag, fmt_num, ...) _EVENT_LOGE(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTF(tag, fmt_num, ...) _EVENT_LOGF(tag, fmt_num, __VA_ARGS__)
+
+/* Hack to make the proper routine call when variadic macros get
+ * passed.  Note the max of 15 arguments.  More than that can't be
+ * handled by the event_log entries anyways so best to catch it at compile
+ * time
+ */
+
+#define _EVENT_LOG_VA_NUM_ARGS(F, _1, _2, _3, _4, _5, _6, _7, _8, _9,	\
+			       _A, _B, _C, _D, _E, _F, N, ...) F ## N
+
+/* cast = _EVENT_LOG for no casting
+ * cast = _EVENT_LOG_CAST for casting of fmt arguments to uint32.
+ *        Only first 4 arguments are casted to uint32. event_logn() is called
+ *        if more than 4 arguments are present. This function internally assumes
+ *        all arguments are uint32
+ */
+#define _EVENT_LOG(cast, tag, fmt, ...)					\
+	static char logstr[] __attribute__ ((section(".logstrs"))) = fmt; \
+	static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \
+	_EVENT_LOG_VA_NUM_ARGS(cast, ##__VA_ARGS__,			\
+			       F, E, D, C, B, A, 9, 8,			\
+			       7, 6, 5, 4, 3, 2, 1, 0)			\
+	(tag, (int) &fmtnum , ## __VA_ARGS__)
+
+
+#define EVENT_LOG_FAST(tag, fmt, ...)					\
+	do {								\
+		if (event_log_tag_sets != NULL) {			\
+			uint8 tag_flag = *(event_log_tag_sets + tag);	\
+			if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) {		\
+				_EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__);	\
+			}						\
+		}							\
+	} while (0)
+
+#define EVENT_LOG_COMPACT(tag, fmt, ...)				\
+	do {								\
+		_EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__);	\
+	} while (0)
+
+/* Event log macro with casting to uint32 of arguments */
+#define EVENT_LOG_FAST_CAST(tag, fmt, ...)				\
+	do {								\
+		if (event_log_tag_sets != NULL) {			\
+			uint8 tag_flag = *(event_log_tag_sets + tag);	\
+			if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) {		\
+				_EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__);	\
+			}						\
+		}							\
+	} while (0)
+
+#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...)				\
+	do {								\
+		_EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__);	\
+	} while (0)
+
+
+#define EVENT_LOG(tag, fmt, ...) EVENT_LOG_COMPACT(tag, fmt , ## __VA_ARGS__)
+
+#define EVENT_LOG_CAST(tag, fmt, ...) EVENT_LOG_COMPACT_CAST(tag, fmt , ## __VA_ARGS__)
+
+#define _EVENT_LOG_REMOVE_PAREN(...) __VA_ARGS__
+#define EVENT_LOG_REMOVE_PAREN(args) _EVENT_LOG_REMOVE_PAREN args
+
+#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs)				\
+		EVENT_LOG_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs))
+
+#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs)			\
+		EVENT_LOG_FAST_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs))
+
+#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs)			\
+		EVENT_LOG_COMPACT_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs))
+
+/* Minimal event logging. Event log internally calls event_logx()
+ * log return address in caller.
+ * Note that the if(0){..} below is to avoid compiler warnings
+ * due to unused variables caused by this macro
+ */
+#define EVENT_LOG_RA(tag, args)						\
+	do {								\
+		if (0) {						\
+			EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, args);	\
+		}							\
+		event_log_caller_return_address(tag);			\
+	} while (0)
+
+#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG)
+
+#define EVENT_DUMP	event_log_buffer
+
+extern uint8 *event_log_tag_sets;
+
+extern int event_log_init(osl_t *osh);
+extern int event_log_set_init(osl_t *osh, int set_num, int size);
+extern int event_log_set_expand(osl_t *osh, int set_num, int size);
+extern int event_log_set_shrink(osl_t *osh, int set_num, int size);
+
+extern int event_log_tag_start(int tag, int set_num, int flags);
+extern int event_log_tag_stop(int tag);
+
+typedef void (*event_log_logtrace_trigger_fn_t)(void *ctx);
+void event_log_set_logtrace_trigger_fn(event_log_logtrace_trigger_fn_t fn, void *ctx);
+
+event_log_top_t *event_log_get_top(void);
+
+extern int event_log_get(int set_num, int buflen, void *buf);
+
+extern uint8 *event_log_next_logtrace(int set_num);
+
+extern void event_log0(int tag, int fmtNum);
+extern void event_log1(int tag, int fmtNum, uint32 t1);
+extern void event_log2(int tag, int fmtNum, uint32 t1, uint32 t2);
+extern void event_log3(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3);
+extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4);
+extern void event_logn(int num_args, int tag, int fmtNum, ...);
+
+extern void event_log_time_sync(uint32 ms);
+extern void event_log_buffer(int tag, uint8 *buf, int size);
+extern void event_log_caller_return_address(int tag);
+extern int event_log_set_destination_set(int set, event_log_set_destination_t dest);
+extern event_log_set_destination_t event_log_set_destination_get(int set);
+
+#endif /* EVENT_LOG_DUMPER */
+
+#endif /* EVENT_LOG_COMPILE */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _EVENT_LOG_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/event_log_payload.h b/drivers/net/wireless/bcmdhd/include/event_log_payload.h
new file mode 100644
index 0000000..d01264c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/event_log_payload.h
@@ -0,0 +1,673 @@
+/*
+ * EVENT_LOG System Definitions
+ *
+ * This file describes the payloads of event log entries that are data buffers
+ * rather than formatted string entries. The contents are generally XTLVs.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: event_log_payload.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _EVENT_LOG_PAYLOAD_H_
+#define _EVENT_LOG_PAYLOAD_H_
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <ethernet.h>
+#include <event_log_tag.h>
+
+#define EVENT_LOG_XTLV_ID_STR                   0  /**< XTLV ID for a string */
+#define EVENT_LOG_XTLV_ID_TXQ_SUM               1  /**< XTLV ID for txq_summary_t */
+#define EVENT_LOG_XTLV_ID_SCBDATA_SUM           2  /**< XTLV ID for cb_subq_summary_t */
+#define EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM  3  /**< XTLV ID for scb_ampdu_tx_summary_t */
+#define EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM        4  /**< XTLV ID for bsscfg_q_summary_t */
+#define EVENT_LOG_XTLV_ID_UCTXSTATUS            5  /**< XTLV ID for ucode TxStatus array */
+#define EVENT_LOG_XTLV_ID_TXQ_SUM_V2            6  /**< XTLV ID for txq_summary_v2_t */
+
+/**
+ * An XTLV holding a string
+ * String is not null terminated, length is the XTLV len.
+ */
+typedef struct xtlv_string {
+	uint16 id;              /* XTLV ID: EVENT_LOG_XTLV_ID_STR */
+	uint16 len;             /* XTLV Len (String length) */
+	char   str[1];          /* var len array characters */
+} xtlv_string_t;
+
+#define XTLV_STRING_FULL_LEN(str_len)     (BCM_XTLV_HDR_SIZE + (str_len) * sizeof(char))
+
+/**
+ * Summary for a single TxQ context
+ * Two of these will be used per TxQ context---one for the high TxQ, and one for
+ * the low txq that contains DMA prepared pkts. The high TxQ is a full multi-precidence
+ * queue and also has a BSSCFG map to identify the BSSCFGS associated with the queue context.
+ * The low txq counterpart does not populate the BSSCFG map.
+ * The excursion queue will have no bsscfgs associated and is the first queue dumped.
+ */
+typedef struct txq_summary {
+	uint16 id;              /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM */
+	uint16 len;             /* XTLV Len */
+	uint32 bsscfg_map;      /* bitmap of bsscfg indexes associated with this queue */
+	uint32 stopped;         /* flow control bitmap */
+	uint8  prec_count;      /* count of precedences/fifos and len of following array */
+	uint8  pad;
+	uint16 plen[1];         /* var len array of lengths of each prec/fifo in the queue */
+} txq_summary_t;
+
+#define TXQ_SUMMARY_LEN                   (OFFSETOF(txq_summary_t, plen))
+#define TXQ_SUMMARY_FULL_LEN(num_q)       (TXQ_SUMMARY_LEN + (num_q) * sizeof(uint16))
+
+typedef struct txq_summary_v2 {
+	uint16 id;              /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM_V2 */
+	uint16 len;             /* XTLV Len */
+	uint32 bsscfg_map;      /* bitmap of bsscfg indexes associated with this queue */
+	uint32 stopped;         /* flow control bitmap */
+	uint32 hw_stopped;      /* flow control bitmap */
+	uint8  prec_count;      /* count of precedences/fifos and len of following array */
+	uint8  pad;
+	uint16 plen[1];         /* var len array of lengths of each prec/fifo in the queue */
+} txq_summary_v2_t;
+
+#define TXQ_SUMMARY_V2_LEN                (OFFSETOF(txq_summary_v2_t, plen))
+#define TXQ_SUMMARY_V2_FULL_LEN(num_q)    (TXQ_SUMMARY_V2_LEN + (num_q) * sizeof(uint16))
+
+/**
+ * Summary for tx datapath of an SCB cubby
+ * This is a generic summary structure (one size fits all) with
+ * a cubby ID and sub-ID to differentiate SCB cubby types and possible sub-queues.
+ */
+typedef struct scb_subq_summary {
+	uint16 id;             /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_SUM */
+	uint16 len;            /* XTLV Len */
+	uint32 flags;          /* cubby specficic flags */
+	uint8  cubby_id;       /* ID registered for cubby */
+	uint8  sub_id;         /* sub ID if a cubby has more than one queue */
+	uint8  prec_count;     /* count of precedences/fifos and len of following array */
+	uint8  pad;
+	uint16 plen[1];        /* var len array of lengths of each prec/fifo in the queue */
+} scb_subq_summary_t;
+
+#define SCB_SUBQ_SUMMARY_LEN              (OFFSETOF(scb_subq_summary_t, plen))
+#define SCB_SUBQ_SUMMARY_FULL_LEN(num_q)  (SCB_SUBQ_SUMMARY_LEN + (num_q) * sizeof(uint16))
+
+/* scb_subq_summary_t.flags for APPS */
+#define SCBDATA_APPS_F_PS               0x00000001
+#define SCBDATA_APPS_F_PSPEND           0x00000002
+#define SCBDATA_APPS_F_INPVB            0x00000004
+#define SCBDATA_APPS_F_APSD_USP         0x00000008
+#define SCBDATA_APPS_F_TXBLOCK          0x00000010
+#define SCBDATA_APPS_F_APSD_HPKT_TMR    0x00000020
+#define SCBDATA_APPS_F_APSD_TX_PEND     0x00000040
+#define SCBDATA_APPS_F_INTRANS          0x00000080
+#define SCBDATA_APPS_F_OFF_PEND         0x00000100
+#define SCBDATA_APPS_F_OFF_BLOCKED      0x00000200
+#define SCBDATA_APPS_F_OFF_IN_PROG      0x00000400
+
+
+/**
+ * Summary for tx datapath AMPDU SCB cubby
+ * This is a specific data structure to describe the AMPDU datapath state for an SCB
+ * used instead of scb_subq_summary_t.
+ * Info is for one TID, so one will be dumped per BA TID active for an SCB.
+ */
+typedef struct scb_ampdu_tx_summary {
+	uint16 id;              /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM */
+	uint16 len;             /* XTLV Len */
+	uint32 flags;           /* misc flags */
+	uint8  tid;             /* initiator TID (priority) */
+	uint8  ba_state;        /* internal BA state */
+	uint8  bar_cnt;         /* number of bars sent with no progress */
+	uint8  retry_bar;       /* reason code if bar to be retried at watchdog */
+	uint16 barpending_seq;  /* seqnum for bar */
+	uint16 bar_ackpending_seq; /* seqnum of bar for which ack is pending */
+	uint16 start_seq;       /* seqnum of the first unacknowledged packet */
+	uint16 max_seq;         /* max unacknowledged seqnum sent */
+	uint32 released_bytes_inflight; /* Number of bytes pending in bytes */
+	uint32 released_bytes_target;
+} scb_ampdu_tx_summary_t;
+
+/* scb_ampdu_tx_summary.flags defs */
+#define SCBDATA_AMPDU_TX_F_BAR_ACKPEND          0x00000001 /* bar_ackpending */
+
+/** XTLV stuct to summarize a BSSCFG's packet queue */
+typedef struct bsscfg_q_summary {
+	uint16 id;               /* XTLV ID: EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM */
+	uint16 len;              /* XTLV Len */
+	struct ether_addr BSSID; /* BSSID */
+	uint8  bsscfg_idx;       /* bsscfg index */
+	uint8  type;             /* bsscfg type enumeration: BSSCFG_TYPE_XXX */
+	uint8  subtype;          /* bsscfg subtype enumeration: BSSCFG_SUBTYPE_XXX */
+	uint8  prec_count;       /* count of precedences/fifos and len of following array */
+	uint16 plen[1];          /* var len array of lengths of each prec/fifo in the queue */
+} bsscfg_q_summary_t;
+
+#define BSSCFG_Q_SUMMARY_LEN              (OFFSETOF(bsscfg_q_summary_t, plen))
+#define BSSCFG_Q_SUMMARY_FULL_LEN(num_q)  (BSSCFG_Q_SUMMARY_LEN + (num_q) * sizeof(uint16))
+
+/**
+ * An XTLV holding a TxStats array
+ * TxStatus entries are 8 or 16 bytes, size in words (2 or 4) givent in
+ * entry_size field.
+ * Array is uint32 words
+ */
+typedef struct xtlv_uc_txs {
+	uint16 id;              /* XTLV ID: EVENT_LOG_XTLV_ID_UCTXSTATUS */
+	uint16 len;             /* XTLV Len */
+	uint8  entry_size;      /* num uint32 words per entry */
+	uint8  pad[3];          /* reserved, zero */
+	uint32 w[1];            /* var len array of words */
+} xtlv_uc_txs_t;
+
+#define XTLV_UCTXSTATUS_LEN                (OFFSETOF(xtlv_uc_txs_t, w))
+#define XTLV_UCTXSTATUS_FULL_LEN(words)    (XTLV_UCTXSTATUS_LEN + (words) * sizeof(uint32))
+
+#define SCAN_SUMMARY_VERSION	1
+/* Scan flags */
+#define SCAN_SUM_CHAN_INFO	0x1
+/* Scan_sum flags */
+#define BAND5G_SIB_ENAB	0x2
+#define BAND2G_SIB_ENAB	0x4
+#define PARALLEL_SCAN	0x8
+#define SCAN_ABORT	0x10
+
+/* scan_channel_info flags */
+#define ACTIVE_SCAN_SCN_SUM	0x2
+#define SCAN_SUM_WLC_CORE0	0x4
+#define SCAN_SUM_WLC_CORE1	0x8
+#define HOME_CHAN	0x10
+
+typedef struct wl_scan_ssid_info
+{
+	uint8		ssid_len;	/* the length of SSID */
+	uint8		ssid[32];	/* SSID string */
+} wl_scan_ssid_info_t;
+
+typedef struct wl_scan_channel_info {
+	uint16 chanspec;	/* chanspec scanned */
+	uint16 reserv;
+	uint32 start_time;		/* Scan start time in
+				* milliseconds for the chanspec
+				* or home_dwell time start
+				*/
+	uint32 end_time;		/* Scan end time in
+				* milliseconds for the chanspec
+				* or home_dwell time end
+				*/
+	uint16 probe_count;	/* No of probes sent out. For future use
+				*/
+	uint16 scn_res_count;	/* Count of scan_results found per
+				* channel. For future use
+				*/
+} wl_scan_channel_info_t;
+
+typedef struct wl_scan_summary_info {
+	uint32 total_chan_num;	/* Total number of channels scanned */
+	uint32 scan_start_time;	/* Scan start time in milliseconds */
+	uint32 scan_end_time;	/* Scan end time in milliseconds */
+	wl_scan_ssid_info_t ssid[1];	/* SSID being scanned in current
+				* channel. For future use
+				*/
+} wl_scan_summary_info_t;
+
+struct wl_scan_summary {
+	uint8 version;		/* Version */
+	uint8 reserved;
+	uint16 len;		/* Length of the data buffer including SSID
+				 * list.
+				 */
+	uint16 sync_id;		/* Scan Sync ID */
+	uint16 scan_flags;		/* flags [0] or SCAN_SUM_CHAN_INFO = */
+				/* channel_info, if not set */
+				/* it is scan_summary_info */
+				/* when channel_info is used, */
+				/* the following flag bits are overridden: */
+				/* flags[1] or ACTIVE_SCAN_SCN_SUM = active channel if set */
+				/* passive if not set */
+				/* flags[2] or WLC_CORE0 = if set, represents wlc_core0 */
+				/* flags[3] or WLC_CORE1 = if set, represents wlc_core1 */
+				/* flags[4] or HOME_CHAN = if set, represents home-channel */
+				/* flags[5:15] = reserved */
+				/* when scan_summary_info is used, */
+				/* the following flag bits are used: */
+				/* flags[1] or BAND5G_SIB_ENAB = */
+				/* allowSIBParallelPassiveScan on 5G band */
+				/* flags[2] or BAND2G_SIB_ENAB = */
+				/* allowSIBParallelPassiveScan on 2G band */
+				/* flags[3] or PARALLEL_SCAN = Parallel scan enabled or not */
+				/* flags[4] or SCAN_ABORT = SCAN_ABORTED scenario */
+				/* flags[5:15] = reserved */
+	union {
+		wl_scan_channel_info_t scan_chan_info;	/* scan related information
+							* for each channel scanned
+							*/
+		wl_scan_summary_info_t scan_sum_info;	/* Cumulative scan related
+							* information.
+							*/
+	} u;
+};
+
+/* Channel switch log record structure
+ * Host may map the following structure on channel switch event log record
+ * received from dongle. Note that all payload entries in event log record are
+ * uint32/int32.
+ */
+typedef struct wl_chansw_event_log_record {
+	uint32 time;			/* Time in us */
+	uint32 old_chanspec;		/* Old channel spec */
+	uint32 new_chanspec;		/* New channel spec */
+	uint32 chansw_reason;		/* Reason for channel change */
+	int32 dwell_time;
+} wl_chansw_event_log_record_t;
+
+/* Sub-block type for EVENT_LOG_TAG_AMPDU_DUMP */
+#define WL_AMPDU_STATS_TYPE_RXMCSx1	0	/* RX MCS rate (Nss = 1) */
+#define WL_AMPDU_STATS_TYPE_RXMCSx2	1
+#define WL_AMPDU_STATS_TYPE_RXMCSx3	2
+#define WL_AMPDU_STATS_TYPE_RXMCSx4	3
+#define WL_AMPDU_STATS_TYPE_RXVHTx1	4	/* RX VHT rate (Nss = 1) */
+#define WL_AMPDU_STATS_TYPE_RXVHTx2	5
+#define WL_AMPDU_STATS_TYPE_RXVHTx3	6
+#define WL_AMPDU_STATS_TYPE_RXVHTx4	7
+#define WL_AMPDU_STATS_TYPE_TXMCSx1	8	/* TX MCS rate (Nss = 1) */
+#define WL_AMPDU_STATS_TYPE_TXMCSx2	9
+#define WL_AMPDU_STATS_TYPE_TXMCSx3	10
+#define WL_AMPDU_STATS_TYPE_TXMCSx4	11
+#define WL_AMPDU_STATS_TYPE_TXVHTx1	12	/* TX VHT rate (Nss = 1) */
+#define WL_AMPDU_STATS_TYPE_TXVHTx2	13
+#define WL_AMPDU_STATS_TYPE_TXVHTx3	14
+#define WL_AMPDU_STATS_TYPE_TXVHTx4	15
+#define WL_AMPDU_STATS_TYPE_RXMCSSGI	16	/* RX SGI usage (for all MCS rates) */
+#define WL_AMPDU_STATS_TYPE_TXMCSSGI	17	/* TX SGI usage (for all MCS rates) */
+#define WL_AMPDU_STATS_TYPE_RXVHTSGI	18	/* RX SGI usage (for all VHT rates) */
+#define WL_AMPDU_STATS_TYPE_TXVHTSGI	19	/* TX SGI usage (for all VHT rates) */
+#define WL_AMPDU_STATS_TYPE_RXMCSPER	20	/* RX PER (for all MCS rates) */
+#define WL_AMPDU_STATS_TYPE_TXMCSPER	21	/* TX PER (for all MCS rates) */
+#define WL_AMPDU_STATS_TYPE_RXVHTPER	22	/* RX PER (for all VHT rates) */
+#define WL_AMPDU_STATS_TYPE_TXVHTPER	23	/* TX PER (for all VHT rates) */
+#define WL_AMPDU_STATS_TYPE_RXDENS	24	/* RX AMPDU density */
+#define WL_AMPDU_STATS_TYPE_TXDENS	25	/* TX AMPDU density */
+#define WL_AMPDU_STATS_TYPE_RXMCSOK	26	/* RX all MCS rates */
+#define WL_AMPDU_STATS_TYPE_RXVHTOK	27	/* RX all VHT rates */
+#define WL_AMPDU_STATS_TYPE_TXMCSALL	28	/* TX all MCS rates */
+#define WL_AMPDU_STATS_TYPE_TXVHTALL	29	/* TX all VHT rates */
+#define WL_AMPDU_STATS_TYPE_TXMCSOK	30	/* TX all MCS rates */
+#define WL_AMPDU_STATS_TYPE_TXVHTOK	31	/* TX all VHT rates */
+
+#define WL_AMPDU_STATS_MAX_CNTS		64
+
+typedef struct {
+	uint16	type;		/* AMPDU statistics sub-type */
+	uint16	len;		/* Number of 32-bit counters */
+	uint32	counters[WL_AMPDU_STATS_MAX_CNTS];
+} wl_ampdu_stats_generic_t;
+
+typedef struct {
+	uint16	type;		/* AMPDU statistics sub-type */
+	uint16	len;		/* Number of 32-bit counters + 2 */
+	uint32	total_ampdu;
+	uint32	total_mpdu;
+	uint32	aggr_dist[WL_AMPDU_STATS_MAX_CNTS + 1];
+} wl_ampdu_stats_aggrsz_t;
+
+/* Sub-block type for EVENT_LOG_TAG_MSCHPROFILE */
+#define WL_MSCH_PROFILER_START		0	/* start event check */
+#define WL_MSCH_PROFILER_EXIT		1	/* exit event check */
+#define WL_MSCH_PROFILER_REQ		2	/* request event */
+#define WL_MSCH_PROFILER_CALLBACK	3	/* call back event */
+#define WL_MSCH_PROFILER_MESSAGE	4	/* message event */
+#define WL_MSCH_PROFILER_PROFILE_START	5
+#define WL_MSCH_PROFILER_PROFILE_END	6
+#define WL_MSCH_PROFILER_REQ_HANDLE	7
+#define WL_MSCH_PROFILER_REQ_ENTITY	8
+#define WL_MSCH_PROFILER_CHAN_CTXT	9
+#define WL_MSCH_PROFILER_EVENT_LOG	10
+#define WL_MSCH_PROFILER_REQ_TIMING	11
+#define WL_MSCH_PROFILER_TYPE_MASK	0x00ff
+#define WL_MSCH_PROFILER_WLINDEX_SHIFT	8
+#define WL_MSCH_PROFILER_WLINDEX_MASK	0x0f00
+#define WL_MSCH_PROFILER_VER_SHIFT	12
+#define WL_MSCH_PROFILER_VER_MASK	0xf000
+
+/* MSCH Event data current verion */
+#define WL_MSCH_PROFILER_VER		2
+
+/* msch version history */
+#define WL_MSCH_PROFILER_RSDB_VER	1
+#define WL_MSCH_PROFILER_REPORT_VER	2
+
+/* msch collect header size */
+#define WL_MSCH_PROFILE_HEAD_SIZE	OFFSETOF(msch_collect_tlv_t, value)
+
+/* msch event log header size */
+#define WL_MSCH_EVENT_LOG_HEAD_SIZE	OFFSETOF(msch_event_log_profiler_event_data_t, data)
+
+/* MSCH data buffer size */
+#define WL_MSCH_PROFILER_BUFFER_SIZE	512
+
+/* request type used in wlc_msch_req_param_t struct */
+#define WL_MSCH_RT_BOTH_FIXED	0	/* both start and end time is fixed */
+#define WL_MSCH_RT_START_FLEX	1	/* start time is flexible and duration is fixed */
+#define WL_MSCH_RT_DUR_FLEX	2	/* start time is fixed and end time is flexible */
+#define WL_MSCH_RT_BOTH_FLEX	3	/* Both start and duration is flexible */
+
+/* Flags used in wlc_msch_req_param_t struct */
+#define WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS  (1 << 0) /* Don't break up channels in chanspec_list */
+#define WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS (1 << 1)  /* No slot end if slots are continous */
+#define WL_MSCH_REQ_FLAGS_PREMTABLE        (1 << 2) /* Req can be pre-empted by PREMT_CURTS req */
+#define WL_MSCH_REQ_FLAGS_PREMT_CURTS      (1 << 3) /* Pre-empt request at the end of curts */
+#define WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE  (1 << 4) /* Pre-empt cur_ts immediately */
+
+/* Requested slot Callback states
+ * req->pend_slot/cur_slot->flags
+ */
+#define WL_MSCH_RC_FLAGS_ONCHAN_FIRE		(1 << 0)
+#define WL_MSCH_RC_FLAGS_START_FIRE_DONE	(1 << 1)
+#define WL_MSCH_RC_FLAGS_END_FIRE_DONE		(1 << 2)
+#define WL_MSCH_RC_FLAGS_ONFIRE_DONE		(1 << 3)
+#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_START	(1 << 4)
+#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_END		(1 << 5)
+#define WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE	(1 << 6)
+
+/* Request entity flags */
+#define WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE	(1 << 0)
+
+/* Request Handle flags */
+#define WL_MSCH_REQ_HDL_FLAGS_NEW_REQ		(1 << 0) /* req_start callback */
+
+/* MSCH state flags (msch_info->flags) */
+#define	WL_MSCH_STATE_IN_TIEMR_CTXT		0x1
+#define WL_MSCH_STATE_SCHD_PENDING		0x2
+
+/* MSCH callback type */
+#define	WL_MSCH_CT_REQ_START		0x1
+#define	WL_MSCH_CT_ON_CHAN		0x2
+#define	WL_MSCH_CT_SLOT_START		0x4
+#define	WL_MSCH_CT_SLOT_END		0x8
+#define	WL_MSCH_CT_SLOT_SKIP		0x10
+#define	WL_MSCH_CT_OFF_CHAN		0x20
+#define WL_MSCH_CT_OFF_CHAN_DONE	0x40
+#define	WL_MSCH_CT_REQ_END		0x80
+#define	WL_MSCH_CT_PARTIAL		0x100
+#define	WL_MSCH_CT_PRE_ONCHAN		0x200
+#define	WL_MSCH_CT_PRE_REQ_START	0x400
+
+/* MSCH command bits */
+#define WL_MSCH_CMD_ENABLE_BIT		0x01
+#define WL_MSCH_CMD_PROFILE_BIT		0x02
+#define WL_MSCH_CMD_CALLBACK_BIT	0x04
+#define WL_MSCH_CMD_REGISTER_BIT	0x08
+#define WL_MSCH_CMD_ERROR_BIT		0x10
+#define WL_MSCH_CMD_DEBUG_BIT		0x20
+#define WL_MSCH_CMD_INFOM_BIT		0x40
+#define WL_MSCH_CMD_TRACE_BIT		0x80
+#define WL_MSCH_CMD_ALL_BITS		0xfe
+#define WL_MSCH_CMD_SIZE_MASK		0x00ff0000
+#define WL_MSCH_CMD_SIZE_SHIFT		16
+#define WL_MSCH_CMD_VER_MASK		0xff000000
+#define WL_MSCH_CMD_VER_SHIFT		24
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_MSCH_NUMCHANNELS		64
+
+typedef struct msch_collect_tlv {
+	uint16	type;
+	uint16	size;
+	char	value[1];
+} msch_collect_tlv_t;
+
+typedef struct msch_profiler_event_data {
+	uint32	time_lo;		/* Request time */
+	uint32	time_hi;
+} msch_profiler_event_data_t;
+
+typedef struct msch_start_profiler_event_data {
+	uint32	time_lo;		/* Request time */
+	uint32	time_hi;
+	uint32	status;
+} msch_start_profiler_event_data_t;
+
+typedef struct msch_message_profiler_event_data {
+	uint32	time_lo;		/* Request time */
+	uint32	time_hi;
+	char	message[1];		/* message */
+} msch_message_profiler_event_data_t;
+
+typedef struct msch_event_log_profiler_event_data {
+	uint32	time_lo;		/* Request time */
+	uint32	time_hi;
+	event_log_hdr_t hdr;		/* event log header */
+	uint32	data[9];		/* event data */
+} msch_event_log_profiler_event_data_t;
+
+typedef struct msch_req_param_profiler_event_data {
+	uint16  flags;			/* Describe various request properties */
+	uint8	req_type;		/* Describe start and end time flexiblilty */
+	uint8	priority;		/* Define the request priority */
+	uint32	start_time_l;		/* Requested start time offset in us unit */
+	uint32	start_time_h;
+	uint32	duration;		/* Requested duration in us unit */
+	uint32	interval;		/* Requested periodic interval in us unit,
+					 * 0 means non-periodic
+					 */
+	union {
+		uint32	dur_flex;	/* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */
+		struct {
+			uint32 min_dur;	/* min duration for traffic, maps to home_time */
+			uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time */
+			uint32 hi_prio_time_l;
+			uint32 hi_prio_time_h;
+			uint32 hi_prio_interval; /* repeated high priority interval */
+		} bf;
+	} flex;
+} msch_req_param_profiler_event_data_t;
+
+typedef struct msch_req_timing_profiler_event_data {
+	uint32 p_req_timing;
+	uint32 p_prev;
+	uint32 p_next;
+	uint16 flags;
+	uint16 timeslot_ptr;
+	uint32 fire_time_l;
+	uint32 fire_time_h;
+	uint32 pre_start_time_l;
+	uint32 pre_start_time_h;
+	uint32 start_time_l;
+	uint32 start_time_h;
+	uint32 end_time_l;
+	uint32 end_time_h;
+	uint32 p_timeslot;
+} msch_req_timing_profiler_event_data_t;
+
+typedef struct msch_chan_ctxt_profiler_event_data {
+	uint32 p_chan_ctxt;
+	uint32 p_prev;
+	uint32 p_next;
+	uint16 chanspec;
+	uint16 bf_sch_pending;
+	uint32 bf_link_prev;
+	uint32 bf_link_next;
+	uint32 onchan_time_l;
+	uint32 onchan_time_h;
+	uint32 actual_onchan_dur_l;
+	uint32 actual_onchan_dur_h;
+	uint32 pend_onchan_dur_l;
+	uint32 pend_onchan_dur_h;
+	uint16 req_entity_list_cnt;
+	uint16 req_entity_list_ptr;
+	uint16 bf_entity_list_cnt;
+	uint16 bf_entity_list_ptr;
+	uint32 bf_skipped_count;
+} msch_chan_ctxt_profiler_event_data_t;
+
+typedef struct msch_req_entity_profiler_event_data {
+	uint32 p_req_entity;
+	uint32 req_hdl_link_prev;
+	uint32 req_hdl_link_next;
+	uint32 chan_ctxt_link_prev;
+	uint32 chan_ctxt_link_next;
+	uint32 rt_specific_link_prev;
+	uint32 rt_specific_link_next;
+	uint32 start_fixed_link_prev;
+	uint32 start_fixed_link_next;
+	uint32 both_flex_list_prev;
+	uint32 both_flex_list_next;
+	uint16 chanspec;
+	uint16 priority;
+	uint16 cur_slot_ptr;
+	uint16 pend_slot_ptr;
+	uint16 pad;
+	uint16 chan_ctxt_ptr;
+	uint32 p_chan_ctxt;
+	uint32 p_req_hdl;
+	uint32 bf_last_serv_time_l;
+	uint32 bf_last_serv_time_h;
+	uint16 onchan_chn_idx;
+	uint16 cur_chn_idx;
+	uint32 flags;
+	uint32 actual_start_time_l;
+	uint32 actual_start_time_h;
+	uint32 curts_fire_time_l;
+	uint32 curts_fire_time_h;
+} msch_req_entity_profiler_event_data_t;
+
+typedef struct msch_req_handle_profiler_event_data {
+	uint32 p_req_handle;
+	uint32 p_prev;
+	uint32 p_next;
+	uint32 cb_func;
+	uint32 cb_ctxt;
+	uint16 req_param_ptr;
+	uint16 req_entity_list_cnt;
+	uint16 req_entity_list_ptr;
+	uint16 chan_cnt;
+	uint32 flags;
+	uint16 chanspec_list;
+	uint16 chanspec_cnt;
+	uint16 chan_idx;
+	uint16 last_chan_idx;
+	uint32 req_time_l;
+	uint32 req_time_h;
+} msch_req_handle_profiler_event_data_t;
+
+typedef struct msch_profiler_profiler_event_data {
+	uint32 time_lo;			/* Request time */
+	uint32 time_hi;
+	uint32 free_req_hdl_list;
+	uint32 free_req_entity_list;
+	uint32 free_chan_ctxt_list;
+	uint32 free_chanspec_list;
+	uint16 cur_msch_timeslot_ptr;
+	uint16 next_timeslot_ptr;
+	uint32 p_cur_msch_timeslot;
+	uint32 p_next_timeslot;
+	uint32 cur_armed_timeslot;
+	uint32 flags;
+	uint32 ts_id;
+	uint32 service_interval;
+	uint32 max_lo_prio_interval;
+	uint16 flex_list_cnt;
+	uint16 msch_chanspec_alloc_cnt;
+	uint16 msch_req_entity_alloc_cnt;
+	uint16 msch_req_hdl_alloc_cnt;
+	uint16 msch_chan_ctxt_alloc_cnt;
+	uint16 msch_timeslot_alloc_cnt;
+	uint16 msch_req_hdl_list_cnt;
+	uint16 msch_req_hdl_list_ptr;
+	uint16 msch_chan_ctxt_list_cnt;
+	uint16 msch_chan_ctxt_list_ptr;
+	uint16 msch_req_timing_list_cnt;
+	uint16 msch_req_timing_list_ptr;
+	uint16 msch_start_fixed_list_cnt;
+	uint16 msch_start_fixed_list_ptr;
+	uint16 msch_both_flex_req_entity_list_cnt;
+	uint16 msch_both_flex_req_entity_list_ptr;
+	uint16 msch_start_flex_list_cnt;
+	uint16 msch_start_flex_list_ptr;
+	uint16 msch_both_flex_list_cnt;
+	uint16 msch_both_flex_list_ptr;
+	uint32 slotskip_flag;
+} msch_profiler_profiler_event_data_t;
+
+typedef struct msch_req_profiler_event_data {
+	uint32 time_lo;			/* Request time */
+	uint32 time_hi;
+	uint16 chanspec_cnt;
+	uint16 chanspec_ptr;
+	uint16 req_param_ptr;
+	uint16 pad;
+} msch_req_profiler_event_data_t;
+
+typedef struct msch_callback_profiler_event_data {
+	uint32 time_lo;			/* Request time */
+	uint32 time_hi;
+	uint16 type;			/* callback type */
+	uint16 chanspec;		/* actual chanspec, may different with requested one */
+	uint32 start_time_l;		/* time slot start time low 32bit */
+	uint32 start_time_h;		/* time slot start time high 32bit */
+	uint32 end_time_l;		/* time slot end time low 32 bit */
+	uint32 end_time_h;		/* time slot end time high 32 bit */
+	uint32 timeslot_id;		/* unique time slot id */
+	uint32 p_req_hdl;
+	uint32 onchan_idx;		/* Current channel index */
+	uint32 cur_chan_seq_start_time_l; /* start time of current sequence */
+	uint32 cur_chan_seq_start_time_h;
+} msch_callback_profiler_event_data_t;
+
+typedef struct msch_timeslot_profiler_event_data {
+	uint32 p_timeslot;
+	uint32 timeslot_id;
+	uint32 pre_start_time_l;
+	uint32 pre_start_time_h;
+	uint32 end_time_l;
+	uint32 end_time_h;
+	uint32 sch_dur_l;
+	uint32 sch_dur_h;
+	uint32 p_chan_ctxt;
+	uint32 fire_time_l;
+	uint32 fire_time_h;
+	uint32 state;
+} msch_timeslot_profiler_event_data_t;
+
+typedef struct msch_register_params	{
+	uint16 wlc_index;		/* Optional wlc index */
+	uint16 flags;			/* Describe various request properties */
+	uint32 req_type;		/* Describe start and end time flexiblilty */
+	uint16 id;			/* register id */
+	uint16 priority;		/* Define the request priority */
+	uint32 start_time;		/* Requested start time offset in ms unit */
+	uint32 duration;		/* Requested duration in ms unit */
+	uint32 interval;		/* Requested periodic interval in ms unit,
+					 * 0 means non-periodic
+					 */
+	uint32 dur_flex;		/* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */
+	uint32 min_dur;			/* min duration for traffic, maps to home_time */
+	uint32 max_away_dur;		/* max acceptable away dur, maps to home_away_time */
+	uint32 hi_prio_time;
+	uint32 hi_prio_interval;	/* repeated high priority interval */
+	uint32 chanspec_cnt;
+	uint16 chanspec_list[WL_MSCH_NUMCHANNELS];
+} msch_register_params_t;
+
+#endif /* _EVENT_LOG_PAYLOAD_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/event_log_set.h b/drivers/net/wireless/bcmdhd/include/event_log_set.h
new file mode 100644
index 0000000..db28360
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/event_log_set.h
@@ -0,0 +1,53 @@
+/*
+ * EVENT_LOG system definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: event_log_set.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _EVENT_LOG_SET_H_
+#define _EVENT_LOG_SET_H_
+
+/* Set a maximum number of sets here.  It is not dynamic for
+ *  efficiency of the EVENT_LOG calls.
+ */
+#define NUM_EVENT_LOG_SETS 8
+
+/* Define new event log sets here */
+#define EVENT_LOG_SET_BUS	0
+#define EVENT_LOG_SET_WL	1
+#define EVENT_LOG_SET_PSM	2
+#define EVENT_LOG_SET_ERROR	3
+#define EVENT_LOG_SET_MEM_API	4
+/* Share the set with MEM_API for now to limit ROM invalidation.
+ * The above set is used in dingo only
+ * On trunk, MSCH should move to a different set.
+ */
+#define EVENT_LOG_SET_MSCH_PROFILER	4
+#define EVENT_LOG_SET_ECOUNTERS 5	/* Host to instantiate this for ecounters. */
+#define EVENT_LOG_SET_6	6	/* Instantiated by host for channel switch logs */
+#define EVENT_LOG_SET_7	7	/* Instantiated by host for AMPDU stats */
+
+#endif /* _EVENT_LOG_SET_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/event_log_tag.h b/drivers/net/wireless/bcmdhd/include/event_log_tag.h
new file mode 100644
index 0000000..41332e7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/event_log_tag.h
@@ -0,0 +1,222 @@
+/*
+ * EVENT_LOG system definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: event_log_tag.h 700681 2017-05-20 16:37:38Z $
+ */
+
+#ifndef _EVENT_LOG_TAG_H_
+#define _EVENT_LOG_TAG_H_
+
+#include <typedefs.h>
+
+/* Define new event log tags here */
+#define EVENT_LOG_TAG_NULL	0	/* Special null tag */
+#define EVENT_LOG_TAG_TS	1	/* Special timestamp tag */
+#define EVENT_LOG_TAG_BUS_OOB	2
+#define EVENT_LOG_TAG_BUS_STATE	3
+#define EVENT_LOG_TAG_BUS_PROTO	4
+#define EVENT_LOG_TAG_BUS_CTL	5
+#define EVENT_LOG_TAG_BUS_EVENT	6
+#define EVENT_LOG_TAG_BUS_PKT	7
+#define EVENT_LOG_TAG_BUS_FRAME	8
+#define EVENT_LOG_TAG_BUS_DESC	9
+#define EVENT_LOG_TAG_BUS_SETUP	10
+#define EVENT_LOG_TAG_BUS_MISC	11
+#define EVENT_LOG_TAG_SRSCAN		22
+#define EVENT_LOG_TAG_PWRSTATS_INFO	23
+#define EVENT_LOG_TAG_UCODE_WATCHDOG 26
+#define EVENT_LOG_TAG_UCODE_FIFO 27
+#define EVENT_LOG_TAG_SCAN_TRACE_LOW	28
+#define EVENT_LOG_TAG_SCAN_TRACE_HIGH	29
+#define EVENT_LOG_TAG_SCAN_ERROR	30
+#define EVENT_LOG_TAG_SCAN_WARN	31
+#define EVENT_LOG_TAG_MPF_ERR	32
+#define EVENT_LOG_TAG_MPF_WARN	33
+#define EVENT_LOG_TAG_MPF_INFO	34
+#define EVENT_LOG_TAG_MPF_DEBUG	35
+#define EVENT_LOG_TAG_EVENT_INFO	36
+#define EVENT_LOG_TAG_EVENT_ERR	37
+#define EVENT_LOG_TAG_PWRSTATS_ERROR	38
+#define EVENT_LOG_TAG_EXCESS_PM_ERROR	39
+#define EVENT_LOG_TAG_IOCTL_LOG			40
+#define EVENT_LOG_TAG_PFN_ERR	41
+#define EVENT_LOG_TAG_PFN_WARN	42
+#define EVENT_LOG_TAG_PFN_INFO	43
+#define EVENT_LOG_TAG_PFN_DEBUG	44
+#define EVENT_LOG_TAG_BEACON_LOG	45
+#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46
+#define EVENT_LOG_TAG_TRACE_CHANSW 47
+#define EVENT_LOG_TAG_PCI_ERROR	48
+#define EVENT_LOG_TAG_PCI_TRACE	49
+#define EVENT_LOG_TAG_PCI_WARN	50
+#define EVENT_LOG_TAG_PCI_INFO	51
+#define EVENT_LOG_TAG_PCI_DBG	52
+#define EVENT_LOG_TAG_PCI_DATA  53
+#define EVENT_LOG_TAG_PCI_RING	54
+/* EVENT_LOG_TAG_AWDL_TRACE_RANGING will be removed after wlc_ranging merge from IGUANA
+ * keeping it here to avoid compilation error on trunk
+ */
+#define EVENT_LOG_TAG_AWDL_TRACE_RANGING	55
+#define EVENT_LOG_TAG_RANGING_TRACE	55
+#define EVENT_LOG_TAG_WL_ERROR		56
+#define EVENT_LOG_TAG_PHY_ERROR		57
+#define EVENT_LOG_TAG_OTP_ERROR		58
+#define EVENT_LOG_TAG_NOTIF_ERROR	59
+#define EVENT_LOG_TAG_MPOOL_ERROR	60
+#define EVENT_LOG_TAG_OBJR_ERROR	61
+#define EVENT_LOG_TAG_DMA_ERROR		62
+#define EVENT_LOG_TAG_PMU_ERROR		63
+#define EVENT_LOG_TAG_BSROM_ERROR	64
+#define EVENT_LOG_TAG_SI_ERROR		65
+#define EVENT_LOG_TAG_ROM_PRINTF	66
+#define EVENT_LOG_TAG_RATE_CNT		67
+#define EVENT_LOG_TAG_CTL_MGT_CNT	68
+#define EVENT_LOG_TAG_AMPDU_DUMP	69
+#define EVENT_LOG_TAG_MEM_ALLOC_SUCC	70
+#define EVENT_LOG_TAG_MEM_ALLOC_FAIL	71
+#define EVENT_LOG_TAG_MEM_FREE		72
+#define EVENT_LOG_TAG_WL_ASSOC_LOG	73
+#define EVENT_LOG_TAG_WL_PS_LOG		74
+#define EVENT_LOG_TAG_WL_ROAM_LOG	75
+#define EVENT_LOG_TAG_WL_MPC_LOG	76
+#define EVENT_LOG_TAG_WL_WSEC_LOG	77
+#define EVENT_LOG_TAG_WL_WSEC_DUMP	78
+#define EVENT_LOG_TAG_WL_MCNX_LOG	79
+#define EVENT_LOG_TAG_HEALTH_CHECK_ERROR 80
+#define EVENT_LOG_TAG_HNDRTE_EVENT_ERROR 81
+#define EVENT_LOG_TAG_ECOUNTERS_ERROR	82
+#define EVENT_LOG_TAG_WL_COUNTERS	83
+#define EVENT_LOG_TAG_ECOUNTERS_IPCSTATS	84
+#define EVENT_LOG_TAG_WL_P2P_LOG            85
+#define EVENT_LOG_TAG_SDIO_ERROR		86
+#define EVENT_LOG_TAG_SDIO_TRACE		87
+#define EVENT_LOG_TAG_SDIO_DBG          88
+#define EVENT_LOG_TAG_SDIO_PRHDRS		89
+#define EVENT_LOG_TAG_SDIO_PRPKT		90
+#define EVENT_LOG_TAG_SDIO_INFORM		91
+#define EVENT_LOG_TAG_MIMO_PS_ERROR	92
+#define EVENT_LOG_TAG_MIMO_PS_TRACE	93
+#define EVENT_LOG_TAG_MIMO_PS_INFO	94
+#define EVENT_LOG_TAG_BTCX_STATS	95
+#define EVENT_LOG_TAG_LEAKY_AP_STATS	96
+#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION	97
+#define EVENT_LOG_TAG_MIMO_PS_STATS	98
+#define EVENT_LOG_TAG_PWRSTATS_PHY	99
+#define EVENT_LOG_TAG_PWRSTATS_SCAN	100
+#define EVENT_LOG_TAG_PWRSTATS_AWDL	101
+#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2	102
+#define EVENT_LOG_TAG_LQM		103
+#define EVENT_LOG_TAG_TRACE_WL_INFO	104
+#define EVENT_LOG_TAG_TRACE_BTCOEX_INFO	105
+#define EVENT_LOG_TAG_ECOUNTERS_TIME_DATA	106
+#define EVENT_LOG_TAG_NAN_ERROR		107
+#define EVENT_LOG_TAG_NAN_INFO		108
+#define EVENT_LOG_TAG_NAN_DBG		109
+#define EVENT_LOG_TAG_STF_ARBITRATOR_ERROR	110
+#define EVENT_LOG_TAG_STF_ARBITRATOR_TRACE	111
+#define EVENT_LOG_TAG_STF_ARBITRATOR_WARN	112
+#define EVENT_LOG_TAG_SCAN_SUMMARY		113
+#define EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT	114
+#define EVENT_LOG_TAG_OCL_INFO			115
+#define EVENT_LOG_TAG_RSDB_PMGR_DEBUG		116
+#define EVENT_LOG_TAG_RSDB_PMGR_ERR		117
+#define EVENT_LOG_TAG_NAT_ERR                   118
+#define EVENT_LOG_TAG_NAT_WARN                  119
+#define EVENT_LOG_TAG_NAT_INFO                  120
+#define EVENT_LOG_TAG_NAT_DEBUG                 121
+#define EVENT_LOG_TAG_STA_INFO			122
+#define EVENT_LOG_TAG_PROXD_ERROR		123
+#define EVENT_LOG_TAG_PROXD_TRACE		124
+#define EVENT_LOG_TAG_PROXD_INFO		125
+#define EVENT_LOG_TAG_IE_ERROR			126
+#define EVENT_LOG_TAG_ASSOC_ERROR		127
+#define EVENT_LOG_TAG_SCAN_ERR			128
+#define EVENT_LOG_TAG_AMSDU_ERROR		129
+#define EVENT_LOG_TAG_AMPDU_ERROR		130
+#define EVENT_LOG_TAG_KM_ERROR			131
+#define EVENT_LOG_TAG_DFS			132
+#define EVENT_LOG_TAG_REGULATORY		133
+#define EVENT_LOG_TAG_CSA			134
+#define EVENT_LOG_TAG_WNM_BSSTRANS_ERR		135
+#define EVENT_LOG_TAG_SUP_INFO			136
+#define EVENT_LOG_TAG_SUP_ERROR			137
+#define EVENT_LOG_TAG_CHANCTXT_TRACE		138
+#define EVENT_LOG_TAG_CHANCTXT_INFO		139
+#define EVENT_LOG_TAG_CHANCTXT_ERROR		140
+#define EVENT_LOG_TAG_CHANCTXT_WARN		141
+#define EVENT_LOG_TAG_MSCHPROFILE		142
+#define EVENT_LOG_TAG_4WAYHANDSHAKE		143
+#define EVENT_LOG_TAG_MSCHPROFILE_TLV		144
+#define EVENT_LOG_TAG_ADPS			145
+#define EVENT_LOG_TAG_MBO_DBG			146
+#define EVENT_LOG_TAG_MBO_INFO			147
+#define EVENT_LOG_TAG_MBO_ERR			148
+#define EVENT_LOG_TAG_TXDELAY			149
+#define EVENT_LOG_TAG_BCNTRIM_INFO		150
+#define EVENT_LOG_TAG_BCNTRIM_TRACE		151
+#define EVENT_LOG_TAG_OPS_INFO			152
+#define EVENT_LOG_TAG_STATS			153
+#define EVENT_LOG_TAG_BAM			154
+#define EVENT_LOG_TAG_TXFAIL			155
+#define EVENT_LOG_TAG_AWDL_CONFIG_DBG		156
+#define EVENT_LOG_TAG_AWDL_SYNC_DBG		157
+#define EVENT_LOG_TAG_AWDL_PEER_DBG		158
+#define EVENT_LOG_TAG_RANDMAC_INFO		159
+#define EVENT_LOG_TAG_RANDMAC_DBG		160
+#define EVENT_LOG_TAG_RANDMAC_ERR		161
+#define EVENT_LOG_TAG_AWDL_DFSP_DBG		162
+#define EVENT_LOG_TAG_TPA_ERR			163
+#define EVENT_LOG_TAG_TPA_INFO			164
+
+/* EVENT_LOG_TAG_MAX	= Set to the same value of last tag, not last tag + 1 */
+#define EVENT_LOG_TAG_MAX			164
+/* Note: New event should be added/reserved in trunk before adding it to branches */
+
+
+#define	SD_PRHDRS(i, s, h, p, n, l)
+#define	SD_PRPKT(m, b, n)
+#define	SD_INFORM(args)
+
+/* Flags for tag control */
+#define EVENT_LOG_TAG_FLAG_NONE		0
+#define EVENT_LOG_TAG_FLAG_LOG		0x80
+#define EVENT_LOG_TAG_FLAG_PRINT	0x40
+#define EVENT_LOG_TAG_FLAG_SET_MASK	0x3f
+
+/* Each event log entry has a type.  The type is the LAST word of the
+ * event log.  The printing code walks the event entries in reverse
+ * order to find the first entry.
+ */
+typedef union event_log_hdr {
+	struct {
+		uint8 tag;		/* Event_log entry tag */
+		uint8 count;		/* Count of 4-byte entries */
+		uint16 fmt_num;		/* Format number */
+	};
+	uint32 t;			/* Type cheat */
+} event_log_hdr_t;
+
+#endif /* _EVENT_LOG_TAG_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/event_trace.h b/drivers/net/wireless/bcmdhd/include/event_trace.h
new file mode 100644
index 0000000..cd24bdf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/event_trace.h
@@ -0,0 +1,123 @@
+/*
+ * Trace log blocks sent over HBUS
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: event_trace.h 645268 2016-06-23 08:39:17Z $
+ */
+
+/**
+ * @file
+ * @brief
+ * Define the trace event ID and tag ID
+ */
+
+#ifndef	_WL_DIAG_H
+#define	_WL_DIAG_H
+
+#define DIAG_MAJOR_VERSION      1	/* 4 bits */
+#define DIAG_MINOR_VERSION      0	/* 4 bits */
+#define DIAG_MICRO_VERSION      0	/* 4 bits */
+
+#define DIAG_VERSION		\
+	((DIAG_MICRO_VERSION&0xF) | (DIAG_MINOR_VERSION&0xF)<<4 | \
+	(DIAG_MAJOR_VERSION&0xF)<<8)
+					/* bit[11:8] major ver */
+					/* bit[7:4] minor ver */
+					/* bit[3:0] micro ver */
+
+/* event ID for trace purpose only, to avoid the conflict with future new
+* WLC_E_ , starting from 0x8000
+*/
+#define TRACE_FW_AUTH_STARTED			0x8000
+#define TRACE_FW_ASSOC_STARTED			0x8001
+#define TRACE_FW_RE_ASSOC_STARTED		0x8002
+#define TRACE_G_SCAN_STARTED			0x8003
+#define TRACE_ROAM_SCAN_STARTED			0x8004
+#define TRACE_ROAM_SCAN_COMPLETE		0x8005
+#define TRACE_FW_EAPOL_FRAME_TRANSMIT_START	0x8006
+#define TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP	0x8007
+#define TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE	0x8008	/* protocol status */
+#define TRACE_BT_COEX_BT_SCO_START		0x8009
+#define TRACE_BT_COEX_BT_SCO_STOP		0x800a
+#define TRACE_BT_COEX_BT_SCAN_START		0x800b
+#define TRACE_BT_COEX_BT_SCAN_STOP		0x800c
+#define TRACE_BT_COEX_BT_HID_START		0x800d
+#define TRACE_BT_COEX_BT_HID_STOP		0x800e
+#define TRACE_ROAM_AUTH_STARTED			0x800f
+/* Event ID for NAN, start from 0x9000 */
+#define TRACE_NAN_CLUSTER_STARTED               0x9000
+#define TRACE_NAN_CLUSTER_JOINED                0x9001
+#define TRACE_NAN_CLUSTER_MERGED                0x9002
+#define TRACE_NAN_ROLE_CHANGED                  0x9003
+#define TRACE_NAN_SCAN_COMPLETE                 0x9004
+#define TRACE_NAN_STATUS_CHNG                   0x9005
+
+/* Parameters of wifi logger events are TLVs */
+/* Event parameters tags are defined as: */
+#define TRACE_TAG_VENDOR_SPECIFIC	0 /* take a byte stream as parameter */
+#define TRACE_TAG_BSSID			1 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_ADDR			2 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_SSID			3 /* takes a 32 bytes SSID address as parameter */
+#define TRACE_TAG_STATUS		4 /* takes an integer as parameter */
+#define TRACE_TAG_CHANNEL_SPEC		5 /* takes one or more wifi_channel_spec as */
+					  /* parameter */
+#define TRACE_TAG_WAKE_LOCK_EVENT	6 /* takes a wake_lock_event struct as parameter */
+#define TRACE_TAG_ADDR1			7 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_ADDR2			8 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_ADDR3			9 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_ADDR4			10 /* takes a 6 bytes MAC address as parameter */
+#define TRACE_TAG_TSF			11 /* take a 64 bits TSF value as parameter */
+#define TRACE_TAG_IE			12 /* take one or more specific 802.11 IEs */
+					   /* parameter, IEs are in turn indicated in */
+					   /* TLV format as per 802.11 spec */
+#define TRACE_TAG_INTERFACE		13 /* take interface name as parameter */
+#define TRACE_TAG_REASON_CODE		14 /* take a reason code as per 802.11 */
+					   /* as parameter */
+#define TRACE_TAG_RATE_MBPS		15 /* take a wifi rate in 0.5 mbps */
+#define TRACE_TAG_REQUEST_ID		16 /* take an integer as parameter */
+#define TRACE_TAG_BUCKET_ID		17 /* take an integer as parameter */
+#define TRACE_TAG_GSCAN_PARAMS		18 /* takes a wifi_scan_cmd_params struct as parameter */
+#define TRACE_TAG_GSCAN_CAPABILITIES	19 /* takes a wifi_gscan_capabilities struct as parameter */
+#define TRACE_TAG_SCAN_ID		20 /* take an integer as parameter */
+#define TRACE_TAG_RSSI			21 /* take an integer as parameter */
+#define TRACE_TAG_CHANNEL		22 /* take an integer as parameter */
+#define TRACE_TAG_LINK_ID		23 /* take an integer as parameter */
+#define TRACE_TAG_LINK_ROLE		24 /* take an integer as parameter */
+#define TRACE_TAG_LINK_STATE		25 /* take an integer as parameter */
+#define TRACE_TAG_LINK_TYPE		26 /* take an integer as parameter */
+#define TRACE_TAG_TSCO			27 /* take an integer as parameter */
+#define TRACE_TAG_RSCO			28 /* take an integer as parameter */
+#define TRACE_TAG_EAPOL_MESSAGE_TYPE	29 /* take an integer as parameter */
+					   /* M1-1, M2-2, M3-3, M4-4 */
+
+typedef union {
+	struct {
+		uint16 event:	16;
+		uint16 version:	16;
+	};
+	uint32 t;
+} wl_event_log_id_ver_t;
+
+#endif	/* _WL_DIAG_H */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h
new file mode 100644
index 0000000..e7c005c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h
@@ -0,0 +1,90 @@
+/*
+ * HND arm trap handling.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_armtrap.h 545867 2015-04-01 22:45:19Z $
+ */
+
+#ifndef	_hnd_armtrap_h_
+#define	_hnd_armtrap_h_
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define	TRAP_STRIDE	4
+#define FIRST_TRAP	TR_RST
+#define LAST_TRAP	(TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_7M__)
+#define	MAX_TRAP_TYPE	(TR_ISR + ARMCM3_NUMINTS)
+#endif	/* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define	TR_TYPE		0x00
+#define	TR_EPC		0x04
+#define	TR_CPSR		0x08
+#define	TR_SPSR		0x0c
+#define	TR_REGS		0x10
+#define	TR_REG(n)	(TR_REGS + (n) * 4)
+#define	TR_SP		TR_REG(13)
+#define	TR_LR		TR_REG(14)
+#define	TR_PC		TR_REG(15)
+
+#define	TRAP_T_SIZE	80
+#define ASSERT_TRAP_SVC_NUMBER	255
+
+#ifndef	_LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+	uint32		type;
+	uint32		epc;
+	uint32		cpsr;
+	uint32		spsr;
+	uint32		r0;	/* a1 */
+	uint32		r1;	/* a2 */
+	uint32		r2;	/* a3 */
+	uint32		r3;	/* a4 */
+	uint32		r4;	/* v1 */
+	uint32		r5;	/* v2 */
+	uint32		r6;	/* v3 */
+	uint32		r7;	/* v4 */
+	uint32		r8;	/* v5 */
+	uint32		r9;	/* sb/v6 */
+	uint32		r10;	/* sl/v7 */
+	uint32		r11;	/* fp/v8 */
+	uint32		r12;	/* ip */
+	uint32		r13;	/* sp */
+	uint32		r14;	/* lr */
+	uint32		pc;	/* r15 */
+} trap_t;
+
+#endif	/* !_LANGUAGE_ASSEMBLY */
+
+#endif	/* _hnd_armtrap_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_cons.h b/drivers/net/wireless/bcmdhd/include/hnd_cons.h
new file mode 100644
index 0000000..3470d6a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_cons.h
@@ -0,0 +1,84 @@
+/*
+ * Console support for RTE - for host use only.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_cons.h 568961 2015-07-06 18:14:49Z $
+ */
+#ifndef	_hnd_cons_h_
+#define	_hnd_cons_h_
+
+#include <typedefs.h>
+#include <siutils.h>
+
+#define CBUF_LEN	(128)
+
+#if defined(BCM_BIG_LOG)
+#define LOG_BUF_LEN	(16 * 1024)
+#else
+#define LOG_BUF_LEN	1024
+#endif
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+#undef RWL_MAX_DATA_LEN
+#undef CBUF_LEN
+#undef LOG_BUF_LEN
+#define RWL_MAX_DATA_LEN (4 * 1024 + 8)
+#define CBUF_LEN	(RWL_MAX_DATA_LEN + 64)
+#define LOG_BUF_LEN (16 * 1024)
+#endif
+
+typedef struct {
+	uint32		buf;		/* Can't be pointer on (64-bit) hosts */
+	uint		buf_size;
+	uint		idx;
+	uint		out_idx;	/* output index */
+} hnd_log_t;
+
+typedef struct {
+	/* Virtual UART
+	 *   When there is no UART (e.g. Quickturn), the host should write a complete
+	 *   input line directly into cbuf and then write the length into vcons_in.
+	 *   This may also be used when there is a real UART (at risk of conflicting with
+	 *   the real UART).  vcons_out is currently unused.
+	 */
+	volatile uint	vcons_in;
+	volatile uint	vcons_out;
+
+	/* Output (logging) buffer
+	 *   Console output is written to a ring buffer log_buf at index log_idx.
+	 *   The host may read the output when it sees log_idx advance.
+	 *   Output will be lost if the output wraps around faster than the host polls.
+	 */
+	hnd_log_t	log;
+
+	/* Console input line buffer
+	 *   Characters are read one at a time into cbuf until <CR> is received, then
+	 *   the buffer is processed as a command line.  Also used for virtual UART.
+	 */
+	uint		cbuf_idx;
+	char		cbuf[CBUF_LEN];
+} hnd_cons_t;
+
+#endif /* _hnd_cons_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_debug.h b/drivers/net/wireless/bcmdhd/include/hnd_debug.h
new file mode 100644
index 0000000..239e596
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_debug.h
@@ -0,0 +1,206 @@
+/*
+ * HND Run Time Environment debug info area
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_debug.h 678890 2017-01-11 11:48:36Z $
+ */
+
+#ifndef	_HND_DEBUG_H
+#define	_HND_DEBUG_H
+
+/* Magic number at a magic location to find HND_DEBUG pointers */
+#define HND_DEBUG_PTR_PTR_MAGIC 0x50504244  	/* DBPP */
+
+/* Magic number at a magic location to find RAM size */
+#define HND_RAMSIZE_PTR_MAGIC	0x534d4152	/* RAMS */
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* Includes only when building dongle code */
+
+
+/* We use explicit sizes here since this gets included from different
+ * systems.  The sizes must be the size of the creating system
+ * (currently 32 bit ARM) since this is gleaned from  dump.
+ */
+
+#ifdef FWID
+extern uint32 gFWID;
+#endif
+
+/* Define pointers for use on other systems */
+#define _HD_EVLOG_P	uint32
+#define _HD_CONS_P	uint32
+#define _HD_TRAP_P	uint32
+
+/* This struct is placed at a well-defined location, and contains a pointer to hnd_debug. */
+typedef struct hnd_debug_ptr {
+	uint32	magic;
+
+	/* RAM address of 'hnd_debug'. For legacy versions of this struct, it is a 0-indexed
+	 * offset instead.
+	 */
+	uint32	hnd_debug_addr;
+
+	/* Base address of RAM. This field does not exist for legacy versions of this struct.  */
+	uint32	ram_base_addr;
+
+} hnd_debug_ptr_t;
+
+/* This struct is placed at a well-defined location. */
+typedef struct hnd_ramsize_ptr {
+	uint32	magic;			/* 'RAMS' */
+
+	/* RAM size information. */
+	uint32	ram_size;
+} hnd_ramsize_ptr_t;
+
+#define  HND_DEBUG_EPIVERS_MAX_STR_LEN	32
+#define  HND_DEBUG_BUILD_SIGNATURE_FWID_LEN	17
+#define  HND_DEBUG_BUILD_SIGNATURE_VER_LEN	22
+typedef struct hnd_debug {
+	uint32	magic;
+#define HND_DEBUG_MAGIC 0x47424544	/* 'DEBG' */
+
+	uint32	version;		/* Debug struct version */
+#define HND_DEBUG_VERSION 1
+
+	uint32	fwid;			/* 4 bytes of fw info */
+	char	epivers[HND_DEBUG_EPIVERS_MAX_STR_LEN];
+
+	_HD_TRAP_P trap_ptr;		/* trap_t data struct */
+	_HD_CONS_P console;		/* Console  */
+
+	uint32	ram_base;
+	uint32	ram_size;
+
+	uint32	rom_base;
+	uint32	rom_size;
+
+	_HD_EVLOG_P event_log_top;
+
+	/* To populated fields below,
+	 * INCLUDE_BUILD_SIGNATURE_IN_SOCRAM needs to be enabled
+	 */
+	char fwid_signature[HND_DEBUG_BUILD_SIGNATURE_FWID_LEN]; /* fwid=<FWID> */
+	char ver_signature[HND_DEBUG_BUILD_SIGNATURE_VER_LEN]; /* ver=abc.abc.abc.abc */
+
+} hnd_debug_t;
+
+/*
+ * timeval_t and prstatus_t are copies of the Linux structures.
+ * Included here because we need the definitions for the target processor
+ * (32 bits) and not the definition on the host this is running on
+ * (which could be 64 bits).
+ */
+
+typedef struct             {    /* Time value with microsecond resolution    */
+	uint32 tv_sec;	/* Seconds                                   */
+	uint32 tv_usec;	/* Microseconds                              */
+} timeval_t;
+
+
+/* Linux/ARM 32 prstatus for notes section */
+typedef struct prstatus {
+	  int32 si_signo; 	/* Signal number */
+	  int32 si_code; 	/* Extra code */
+	  int32 si_errno; 	/* Errno */
+	  uint16 pr_cursig; 	/* Current signal.  */
+	  uint16 unused;
+	  uint32 pr_sigpend;	/* Set of pending signals.  */
+	  uint32 pr_sighold;	/* Set of held signals.  */
+	  uint32 pr_pid;
+	  uint32 pr_ppid;
+	  uint32 pr_pgrp;
+	  uint32 pr_sid;
+	  timeval_t pr_utime;	/* User time.  */
+	  timeval_t pr_stime;	/* System time.  */
+	  timeval_t pr_cutime;	/* Cumulative user time.  */
+	  timeval_t pr_cstime;	/* Cumulative system time.  */
+	  uint32 uregs[18];
+	  int32 pr_fpvalid;	/* True if math copro being used.  */
+} prstatus_t;
+
+/* for mkcore and other utilities use */
+#define DUMP_INFO_PTR_PTR_0   0x74
+#define DUMP_INFO_PTR_PTR_1   0x78
+#define DUMP_INFO_PTR_PTR_2   0xf0
+#define DUMP_INFO_PTR_PTR_3   0xf8
+#define DUMP_INFO_PTR_PTR_4   0x874
+#define DUMP_INFO_PTR_PTR_5   0x878
+#define DUMP_INFO_PTR_PTR_END 0xffffffff
+#define DUMP_INFO_PTR_PTR_LIST	DUMP_INFO_PTR_PTR_0, \
+		DUMP_INFO_PTR_PTR_1,					\
+		DUMP_INFO_PTR_PTR_2,					\
+		DUMP_INFO_PTR_PTR_3,					\
+		DUMP_INFO_PTR_PTR_4,					\
+		DUMP_INFO_PTR_PTR_5,					\
+		DUMP_INFO_PTR_PTR_END
+
+/* for DHD driver to get dongle ram size info. */
+#define RAMSIZE_PTR_PTR_0	0x6c
+#define RAMSIZE_PTR_PTR_END	0xffffffff
+#define RAMSIZE_PTR_PTR_LIST	RAMSIZE_PTR_PTR_0, \
+				RAMSIZE_PTR_PTR_END
+
+typedef struct hnd_ext_trap_hdr {
+	uint8 version;    /* Extended trap version info */
+	uint8 reserved;   /* currently unused */
+	uint16 len;       /* Length of data excluding this header */
+	uint8 data[];     /* TLV data */
+} hnd_ext_trap_hdr_t;
+
+#define TAG_TRAP_SIGNATURE       1  /* Processor register dumps */
+#define TAG_TRAP_STACK           2  /* Processor stack dump (possible code locations) */
+#define TAG_TRAP_MEMORY          3  /* Memory subsystem dump */
+#define TAG_TRAP_DEEPSLEEP       4  /* Deep sleep health check failures */
+#define TAG_TRAP_PSM_WD          5  /* PSM watchdog information */
+#define TAG_TRAP_PHY             6  /* Phy related issues */
+#define TAG_TRAP_BUS             7  /* Bus level issues */
+#define TAG_TRAP_MAC             8  /* Mac level issues */
+#define TAG_TRAP_BACKPLANE       9  /* Backplane related errors */
+
+typedef struct hnd_ext_trap_bp_err
+{
+	uint32 error;
+	uint32 coreid;
+	uint32 baseaddr;
+	uint32 ioctrl;
+	uint32 iostatus;
+	uint32 resetctrl;
+	uint32 resetstatus;
+	uint32 errlogctrl;
+	uint32 errlogdone;
+	uint32 errlogstatus;
+	uint32 errlogaddrlo;
+	uint32 errlogaddrhi;
+	uint32 errlogid;
+	uint32 errloguser;
+	uint32 errlogflags;
+} hnd_ext_trap_bp_err_t;
+
+#endif /* !LANGUAGE_ASSEMBLY */
+
+#endif /* _HND_DEBUG_H */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h
new file mode 100644
index 0000000..e5d0eaa
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h
@@ -0,0 +1,225 @@
+/*
+ * HND generic packet pool operation primitives
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_pktpool.h 613891 2016-01-20 10:05:44Z $
+ */
+
+#ifndef _hnd_pktpool_h_
+#define _hnd_pktpool_h_
+
+#include <osl_ext.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* mutex macros for thread safe */
+#ifdef HND_PKTPOOL_THREAD_SAFE
+#define HND_PKTPOOL_MUTEX_DECL(mutex)		OSL_EXT_MUTEX_DECL(mutex)
+#else
+#define HND_PKTPOOL_MUTEX_DECL(mutex)
+#endif
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool)		((pool) && (pool)->inited)
+#else /* BCMPKTPOOL */
+#define POOL_ENAB(bus)		0
+#endif /* BCMPKTPOOL */
+
+#ifndef PKTPOOL_LEN_MAX
+#define PKTPOOL_LEN_MAX		40
+#endif /* PKTPOOL_LEN_MAX */
+#define PKTPOOL_CB_MAX		3
+#define PKTPOOL_CB_MAX_AVL	4
+
+
+/* forward declaration */
+struct pktpool;
+
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+	pktpool_cb_t cb;
+	void *arg;
+} pktpool_cbinfo_t;
+
+/** PCIe SPLITRX related: call back fn extension to populate host address in pool pkt */
+typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, bool arg2);
+typedef struct {
+	pktpool_cb_extn_t cb;
+	void *arg;
+} pktpool_cbextn_info_t;
+
+
+#ifdef BCMDBG_POOL
+/* pkt pool debug states */
+#define POOL_IDLE	0
+#define POOL_RXFILL	1
+#define POOL_RXDH	2
+#define POOL_RXD11	3
+#define POOL_TXDH	4
+#define POOL_TXD11	5
+#define POOL_AMPDU	6
+#define POOL_TXENQ	7
+
+typedef struct {
+	void *p;
+	uint32 cycles;
+	uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+	uint8 txdh;	/* tx to host */
+	uint8 txd11;	/* tx to d11 */
+	uint8 enq;	/* waiting in q */
+	uint8 rxdh;	/* rx from host */
+	uint8 rxd11;	/* rx from d11 */
+	uint8 rxfill;	/* dma_rxfill */
+	uint8 idle;	/* avail in pool */
+} pktpool_stats_t;
+#endif /* BCMDBG_POOL */
+
+typedef struct pktpool {
+	bool inited;            /**< pktpool_init was successful */
+	uint8 type;             /**< type of lbuf: basic, frag, etc */
+	uint8 id;               /**< pktpool ID:  index in registry */
+	bool istx;              /**< direction: transmit or receive data path */
+	HND_PKTPOOL_MUTEX_DECL(mutex)	/**< thread-safe mutex */
+
+	void * freelist;        /**< free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */
+	uint16 avail;           /**< number of packets in pool's free list */
+	uint16 len;             /**< number of packets managed by pool */
+	uint16 maxlen;          /**< maximum size of pool <= PKTPOOL_LEN_MAX */
+	uint16 plen;            /**< size of pkt buffer in [bytes], excluding lbuf|lbuf_frag */
+
+	bool empty;
+	uint8 cbtoggle;
+	uint8 cbcnt;
+	uint8 ecbcnt;
+	uint8 emptycb_disable;	/**< Value of type enum pktpool_empty_cb_state */
+	pktpool_cbinfo_t *availcb_excl;
+	pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX_AVL];
+	pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+	pktpool_cbextn_info_t cbext;	/**< PCIe SPLITRX related */
+	pktpool_cbextn_info_t rxcplidfn;
+#ifdef BCMDBG_POOL
+	uint8 dbg_cbcnt;
+	pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+	uint16 dbg_qlen;
+	pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+	pktpool_cbinfo_t dmarxfill;
+} pktpool_t;
+
+
+pktpool_t *get_pktpools_registry(int id);
+
+/* Incarnate a pktpool registry. On success returns total_pools. */
+extern int pktpool_attach(osl_t *osh, uint32 total_pools);
+extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern void* pktpool_get(pktpool_t *pktp);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
+extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1);
+extern int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg);
+extern void pktpool_invoke_dmarxfill(pktpool_t *pktp);
+extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+
+#define POOLPTR(pp)         ((pktpool_t *)(pp))
+#define POOLID(pp)          (POOLPTR(pp)->id)
+
+#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid))
+
+#define pktpool_len(pp)     (POOLPTR(pp)->len)   /**< returns packet length in [bytes] */
+#define pktpool_avail(pp)   (POOLPTR(pp)->avail)
+#define pktpool_plen(pp)    (POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp)  (POOLPTR(pp)->maxlen)
+
+
+/*
+ * ----------------------------------------------------------------------------
+ * A pool ID is assigned with a pkt pool during pool initialization. This is
+ * done by maintaining a registry of all initialized pools, and the registry
+ * index at which the pool is registered is used as the pool's unique ID.
+ * ID 0 is reserved and is used to signify an invalid pool ID.
+ * All packets henceforth allocated from a pool will be tagged with the pool's
+ * unique ID. Packets allocated from the heap will use the reserved ID = 0.
+ * Packets with non-zero pool id signify that they were allocated from a pool.
+ * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used
+ * in place of a 32bit pool pointer in each packet.
+ * ----------------------------------------------------------------------------
+ */
+#define PKTPOOL_INVALID_ID          (0)
+#define PKTPOOL_MAXIMUM_ID          (15)
+
+/* Registry of pktpool(s) */
+/* Pool ID to/from Pool Pointer converters */
+#define PKTPOOL_ID2PTR(id)          (get_pktpools_registry(id))
+#define PKTPOOL_PTR2ID(pp)          (POOLID(pp))
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif /* BCMDBG_POOL */
+
+#ifdef BCMPKTPOOL
+#define SHARED_POOL		(pktpool_shared)
+extern pktpool_t *pktpool_shared;
+#ifdef BCMFRAGPOOL
+#define SHARED_FRAG_POOL	(pktpool_shared_lfrag)
+extern pktpool_t *pktpool_shared_lfrag;
+#endif
+
+/** PCIe SPLITRX related */
+#define SHARED_RXFRAG_POOL	(pktpool_shared_rxlfrag)
+extern pktpool_t *pktpool_shared_rxlfrag;
+
+int hnd_pktpool_init(osl_t *osh);
+int hnd_pktpool_fill(pktpool_t *pktpool, bool minimal);
+void hnd_pktpool_refill(bool minimal);
+#else /* BCMPKTPOOL */
+#define SHARED_POOL		((struct pktpool *)NULL)
+#endif /* BCMPKTPOOL */
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif /* _hnd_pktpool_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktq.h b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h
new file mode 100644
index 0000000..ad778da
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h
@@ -0,0 +1,284 @@
+/*
+ * HND generic pktq operation primitives
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_pktq.h 641285 2016-06-02 02:33:55Z $
+ */
+
+#ifndef _hnd_pktq_h_
+#define _hnd_pktq_h_
+
+#include <osl_ext.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* mutex macros for thread safe */
+#ifdef HND_PKTQ_THREAD_SAFE
+#define HND_PKTQ_MUTEX_DECL(mutex)		OSL_EXT_MUTEX_DECL(mutex)
+#else
+#define HND_PKTQ_MUTEX_DECL(mutex)
+#endif
+
+/* osl multi-precedence packet queue */
+#define PKTQ_LEN_MAX            0xFFFF  /* Max uint16 65535 packets */
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT        128	/* Max 128 packets */
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC           16	/* Maximum precedence levels */
+#endif
+
+typedef struct pktq_prec {
+	void *head;     /**< first packet to dequeue */
+	void *tail;     /**< last packet to dequeue */
+	uint16 len;     /**< number of queued packets */
+	uint16 max;     /**< maximum number of queued packets */
+} pktq_prec_t;
+
+#ifdef PKTQ_LOG
+typedef struct {
+	uint32 requested;    /**< packets requested to be stored */
+	uint32 stored;	     /**< packets stored */
+	uint32 saved;	     /**< packets saved,
+	                            because a lowest priority queue has given away one packet
+	                      */
+	uint32 selfsaved;    /**< packets saved,
+	                            because an older packet from the same queue has been dropped
+	                      */
+	uint32 full_dropped; /**< packets dropped,
+	                            because pktq is full with higher precedence packets
+	                      */
+	uint32 dropped;      /**< packets dropped because pktq per that precedence is full */
+	uint32 sacrificed;   /**< packets dropped,
+	                            in order to save one from a queue of a highest priority
+	                      */
+	uint32 busy;         /**< packets droped because of hardware/transmission error */
+	uint32 retry;        /**< packets re-sent because they were not received */
+	uint32 ps_retry;     /**< packets retried again prior to moving power save mode */
+	uint32 suppress;     /**< packets which were suppressed and not transmitted */
+	uint32 retry_drop;   /**< packets finally dropped after retry limit */
+	uint32 max_avail;    /**< the high-water mark of the queue capacity for packets -
+	                            goes to zero as queue fills
+	                      */
+	uint32 max_used;     /**< the high-water mark of the queue utilisation for packets -
+						        increases with use ('inverse' of max_avail)
+				          */
+	uint32 queue_capacity; /**< the maximum capacity of the queue */
+	uint32 rtsfail;        /**< count of rts attempts that failed to receive cts */
+	uint32 acked;          /**< count of packets sent (acked) successfully */
+	uint32 txrate_succ;    /**< running total of phy rate of packets sent successfully */
+	uint32 txrate_main;    /**< running totoal of primary phy rate of all packets */
+	uint32 throughput;     /**< actual data transferred successfully */
+	uint32 airtime;        /**< cumulative total medium access delay in useconds */
+	uint32  _logtime;      /**< timestamp of last counter clear  */
+} pktq_counters_t;
+
+#define PKTQ_LOG_COMMON \
+	uint32			pps_time;	/**< time spent in ps pretend state */ \
+	uint32                  _prec_log;
+
+typedef struct {
+	PKTQ_LOG_COMMON
+	pktq_counters_t*        _prec_cnt[PKTQ_MAX_PREC];     /**< Counters per queue  */
+} pktq_log_t;
+#else
+typedef struct pktq_log pktq_log_t;
+#endif /* PKTQ_LOG */
+
+
+#define PKTQ_COMMON	\
+	HND_PKTQ_MUTEX_DECL(mutex)							\
+	pktq_log_t *pktqlog;								\
+	uint16 num_prec;        /**< number of precedences in use */			\
+	uint16 hi_prec;         /**< rapid dequeue hint (>= highest non-empty prec) */	\
+	uint16 max;             /**< total max packets */				\
+	uint16 len;             /**< total number of packets */
+
+/* multi-priority pkt queue */
+struct pktq {
+	PKTQ_COMMON
+	/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+	struct pktq_prec q[PKTQ_MAX_PREC];
+};
+
+/* simple, non-priority pkt queue */
+struct spktq {
+	PKTQ_COMMON
+	/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+	struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec)        for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+/* fn(pkt, arg).  return true if pkt belongs to bsscfg */
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+/*
+ * pktq filter support
+ */
+
+/* filter function return values */
+typedef enum {
+	PKT_FILTER_NOACTION = 0,    /**< restore the pkt to its position in the queue */
+	PKT_FILTER_DELETE = 1,      /**< delete the pkt */
+	PKT_FILTER_REMOVE = 2,      /**< do not restore the pkt to the queue,
+	                             *   filter fn has taken ownership of the pkt
+	                             */
+} pktq_filter_result_t;
+
+/**
+ * Caller supplied filter function to pktq_pfilter(), pktq_filter().
+ * Function filter(ctx, pkt) is called with its ctx pointer on each pkt in the
+ * pktq.  When the filter function is called, the supplied pkt will have been
+ * unlinked from the pktq.  The filter function returns a pktq_filter_result_t
+ * result specifying the action pktq_filter()/pktq_pfilter() should take for
+ * the pkt.
+ * Here are the actions taken by pktq_filter/pfilter() based on the supplied
+ * filter function's return value:
+ *
+ * PKT_FILTER_NOACTION - The filter will re-link the pkt at its
+ *     previous location.
+ *
+ * PKT_FILTER_DELETE - The filter will not relink the pkt and will
+ *     call the user supplied defer_free_pkt fn on the packet.
+ *
+ * PKT_FILTER_REMOVE - The filter will not relink the pkt. The supplied
+ *     filter fn took ownership (or deleted) the pkt.
+ *
+ * WARNING: pkts inserted by the user (in pkt_filter and/or flush callbacks
+ * and chains) in the prec queue will not be seen by the filter, and the prec
+ * queue will be temporarily be removed from the queue hence there're side
+ * effects including pktq_len() on the queue won't reflect the correct number
+ * of packets in the queue.
+ */
+typedef pktq_filter_result_t (*pktq_filter_t)(void* ctx, void* pkt);
+
+/* The defer_free_pkt callback is invoked when the the pktq_filter callback
+ * returns PKT_FILTER_DELETE decision, which allows the user to deposite
+ * the packet appropriately based on the situation (free the packet or
+ * save it in a temporary queue etc.).
+ */
+typedef void (*defer_free_pkt_fn_t)(void *ctx, void *pkt);
+
+/* The flush_free_pkt callback is invoked when all packets in the pktq
+ * are processed.
+ */
+typedef void (*flush_free_pkt_fn_t)(void *ctx);
+
+/* filter a pktq, using the caller supplied filter/deposition/flush functions */
+extern void  pktq_filter(struct pktq *pq, pktq_filter_t fn, void* arg,
+	defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx);
+/* filter a particular precedence in pktq, using the caller supplied filter function */
+extern void  pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fn, void* arg,
+	defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx);
+
+/* operations on a specific precedence in packet queue */
+
+#define pktq_psetmax(pq, prec, _max)	((pq)->q[prec].max = (_max))
+#define pktq_pmax(pq, prec)		((pq)->q[prec].max)
+#define pktq_plen(pq, prec)		((pq)->q[prec].len)
+#define pktq_pempty(pq, prec)		((pq)->q[prec].len == 0)
+#define pktq_ppeek(pq, prec)		((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec)	((pq)->q[prec].tail)
+#ifdef HND_PKTQ_THREAD_SAFE
+extern int pktq_pavail(struct pktq *pq, int prec);
+extern bool pktq_pfull(struct pktq *pq, int prec);
+#else
+#define pktq_pavail(pq, prec)	((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec)	((pq)->q[prec].len >= (pq)->q[prec].max)
+#endif	/* HND_PKTQ_THREAD_SAFE */
+
+extern void  pktq_append(struct pktq *pq, int prec, struct spktq *list);
+extern void  pktq_prepend(struct pktq *pq, int prec, struct spktq *list);
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
+extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+/* Remove a specified packet from its queue */
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+/* operations on a set of precedences in packet queue */
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+/* operations on packet queue as a whole */
+
+#define pktq_len(pq)		((int)(pq)->len)
+#define pktq_max(pq)		((int)(pq)->max)
+#define pktq_empty(pq)		((pq)->len == 0)
+#ifdef HND_PKTQ_THREAD_SAFE
+extern int pktq_avail(struct pktq *pq);
+extern bool pktq_full(struct pktq *pq);
+#else
+#define pktq_avail(pq)		((int)((pq)->max - (pq)->len))
+#define pktq_full(pq)		((pq)->len >= (pq)->max)
+#endif	/* HND_PKTQ_THREAD_SAFE */
+
+/* operations for single precedence queues */
+#define pktenq(pq, p)		pktq_penq(((struct pktq *)(void *)pq), 0, (p))
+#define pktenq_head(pq, p)	pktq_penq_head(((struct pktq *)(void *)pq), 0, (p))
+#define pktdeq(pq)		pktq_pdeq(((struct pktq *)(void *)pq), 0)
+#define pktdeq_tail(pq)		pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
+#define pktqflush(osh, pq, dir)	pktq_pflush(osh, ((struct pktq *)(void *)pq), 0, dir)
+#define pktqinit(pq, len)	pktq_init(((struct pktq *)(void *)pq), 1, len)
+#define pktqdeinit(pq)		pktq_deinit((struct pktq *)(void *)pq)
+#define pktqavail(pq)		pktq_avail((struct pktq *)(void *)pq)
+#define pktqfull(pq)		pktq_full((struct pktq *)(void *)pq)
+#define pktqfilter(pq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \
+	pktq_pfilter((struct pktq *)pq, 0, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx)
+
+/* wrap macros for modules in components use */
+#define spktqinit(pq, max_pkts) pktqinit(pq, max_pkts)
+#define spktenq(pq, p)          pktenq(pq, p)
+#define spktdeq(pq)             pktdeq(pq)
+
+extern bool pktq_init(struct pktq *pq, int num_prec, int max_len);
+extern bool pktq_deinit(struct pktq *pq);
+
+extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
+
+/* prec_out may be NULL if caller is not interested in return value */
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+
+/* flush pktq */
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir);
+/* Empty the queue at particular precedence level */
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _hnd_pktq_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h
new file mode 100644
index 0000000..bfc9166
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h
@@ -0,0 +1,56 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hndpmu.h 657872 2016-09-02 22:17:34Z $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+#include <typedefs.h>
+#include <osl_decl.h>
+#include <siutils.h>
+#include <sbchipc.h>
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear);
+extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh);
+extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag);
+extern uint32 si_pmu_dump_pmucap_binary(si_t *sih, uchar *p);
+extern uint32 si_pmu_dump_buf_size_pmucap(si_t *sih);
+extern int si_pmu_wait_for_steady_state(si_t *sih, osl_t *osh, pmuregs_t *pmu);
+#if defined(BCMULP)
+int si_pmu_ulp_register(si_t *sih);
+extern void si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period);
+#endif /* BCMULP */
+extern uint32 si_pmu_get_pmutimer(si_t *sih);
+extern void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask);
+extern bool si_pmu_cap_fast_lpo(si_t *sih);
+extern int si_pmu_fast_lpo_disable(si_t *sih);
+#endif /* _hndpmu_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h
new file mode 100644
index 0000000..b35380a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h
@@ -0,0 +1,327 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hndsoc.h 613129 2016-01-17 09:25:52Z $
+ */
+
+#ifndef	_HNDSOC_H
+#define	_HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE		0x00000000	/* Physical SDRAM */
+#define SI_PCI_MEM		0x08000000	/* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ		(64 * 1024 * 1024)
+#define SI_PCI_CFG		0x0c000000	/* Host Mode sb2pcitranslation1 (64 MB) */
+#define	SI_SDRAM_SWAPPED	0x10000000	/* Byteswapped Physical SDRAM */
+#define SI_SDRAM_R2		0x80000000	/* Region 2 for sdram (512 MB) */
+
+#define SI_ENUM_BASE		0x18000000	/* Enumeration space base */
+#define SI_WRAP_BASE		0x18100000	/* Wrapper space base */
+#define SI_CORE_SIZE		0x1000		/* each core gets 4Kbytes for registers */
+
+#ifndef SI_MAXCORES
+#define	SI_MAXCORES		32		/* NorthStar has more cores */
+#endif /* SI_MAXCORES */
+
+#define	SI_MAXBR		4		/* Max bridges (this is arbitrary, for software
+					 * convenience and could be changed if we
+					 * make any larger chips
+					 */
+
+#define	SI_FASTRAM		0x19000000	/* On-chip RAM on chips that also have DDR */
+#define	SI_FASTRAM_SWAPPED	0x19800000
+
+#define	SI_FLASH2		0x1c000000	/* Flash Region 2 (region 1 shadowed here) */
+#define	SI_FLASH2_SZ		0x02000000	/* Size of Flash Region 2 */
+#define	SI_ARMCM3_ROM		0x1e000000	/* ARM Cortex-M3 ROM */
+#define	SI_FLASH1		0x1fc00000	/* MIPS Flash Region 1 */
+#define	SI_FLASH1_SZ		0x00400000	/* MIPS Size of Flash Region 1 */
+#define	SI_FLASH_WINDOW		0x01000000	/* Flash XIP Window */
+
+#define SI_NS_NANDFLASH		0x1c000000	/* NorthStar NAND flash base */
+#define SI_NS_NORFLASH		0x1e000000	/* NorthStar NOR flash base */
+#define SI_NS_ROM		0xfffd0000	/* NorthStar ROM */
+#define	SI_NS_FLASH_WINDOW	0x02000000	/* Flash XIP Window */
+
+#define	SI_ARM7S_ROM		0x20000000	/* ARM7TDMI-S ROM */
+#define	SI_ARMCR4_ROM		0x000f0000	/* ARM Cortex-R4 ROM */
+#define	SI_ARMCM3_SRAM2		0x60000000	/* ARM Cortex-M3 SRAM Region 2 */
+#define	SI_ARM7S_SRAM2		0x80000000	/* ARM7TDMI-S SRAM Region 2 */
+#define	SI_ARMCA7_ROM		0x00000000	/* ARM Cortex-A7 ROM */
+#define	SI_ARMCA7_RAM		0x00200000	/* ARM Cortex-A7 RAM */
+#define	SI_ARM_FLASH1		0xffff0000	/* ARM Flash Region 1 */
+#define	SI_ARM_FLASH1_SZ	0x00010000	/* ARM Size of Flash Region 1 */
+
+#define SI_SFLASH		0x14000000
+#define SI_PCI_DMA		0x40000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2		0x80000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ		0x40000000	/* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32		0x00000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), low 32 bits
+						 */
+#define SI_PCIE_DMA_H32		0x80000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+
+#define SI_BCM53573_NANDFLASH	0x30000000	/* 53573 NAND flash base */
+#define SI_BCM53573_NORFLASH	0x1c000000	/* 53573 NOR flash base */
+#define SI_BCM53573_FLASH2_SZ	0x04000000	/* 53573 NOR flash2 size */
+
+#define	SI_BCM53573_NORFLASH_WINDOW	0x01000000	/* only support 16M direct access for
+							 * 3-byte address modes in spi flash
+							 */
+#define	SI_BCM53573_BOOTDEV_MASK	0x3
+#define	SI_BCM53573_BOOTDEV_NOR		0x0
+
+#define SI_BCM53573_NAND_PRE_MASK	0x100	/* 53573 NAND present mask */
+
+#define	SI_BCM53573_DDRTYPE_MASK	0x10
+#define	SI_BCM53573_DDRTYPE_DDR3	0x10
+
+#define	SI_BCM47189_RGMII_VDD_MASK	0x3
+#define	SI_BCM47189_RGMII_VDD_SHIFT	21
+#define	SI_BCM47189_RGMII_VDD_3_3V	0
+#define	SI_BCM47189_RGMII_VDD_2_5V	1
+#define	SI_BCM47189_RGMII_VDD_1_5V	1
+
+#define	SI_BCM53573_LOCKED_CPUPLL	0x1
+
+/* APB bridge code */
+#define	APB_BRIDGE_ID		0x135		/* APB Bridge 0, 1, etc. */
+
+/* core codes */
+#define	NODEV_CORE_ID		0x700		/* Invalid coreid */
+#define	CC_CORE_ID		0x800		/* chipcommon core */
+#define	ILINE20_CORE_ID		0x801		/* iline20 core */
+#define	SRAM_CORE_ID		0x802		/* sram core */
+#define	SDRAM_CORE_ID		0x803		/* sdram core */
+#define	PCI_CORE_ID		0x804		/* pci core */
+#define	MIPS_CORE_ID		0x805		/* mips core */
+#define	ENET_CORE_ID		0x806		/* enet mac core */
+#define	CODEC_CORE_ID		0x807		/* v90 codec core */
+#define	USB_CORE_ID		0x808		/* usb 1.1 host/device core */
+#define	ADSL_CORE_ID		0x809		/* ADSL core */
+#define	ILINE100_CORE_ID	0x80a		/* iline100 core */
+#define	IPSEC_CORE_ID		0x80b		/* ipsec core */
+#define	UTOPIA_CORE_ID		0x80c		/* utopia core */
+#define	PCMCIA_CORE_ID		0x80d		/* pcmcia core */
+#define	SOCRAM_CORE_ID		0x80e		/* internal memory core */
+#define	MEMC_CORE_ID		0x80f		/* memc sdram core */
+#define	OFDM_CORE_ID		0x810		/* OFDM phy core */
+#define	EXTIF_CORE_ID		0x811		/* external interface core */
+#define	D11_CORE_ID		0x812		/* 802.11 MAC core */
+#define	APHY_CORE_ID		0x813		/* 802.11a phy core */
+#define	BPHY_CORE_ID		0x814		/* 802.11b phy core */
+#define	GPHY_CORE_ID		0x815		/* 802.11g phy core */
+#define	MIPS33_CORE_ID		0x816		/* mips3302 core */
+#define	USB11H_CORE_ID		0x817		/* usb 1.1 host core */
+#define	USB11D_CORE_ID		0x818		/* usb 1.1 device core */
+#define	USB20H_CORE_ID		0x819		/* usb 2.0 host core */
+#define	USB20D_CORE_ID		0x81a		/* usb 2.0 device core */
+#define	SDIOH_CORE_ID		0x81b		/* sdio host core */
+#define	ROBO_CORE_ID		0x81c		/* roboswitch core */
+#define	ATA100_CORE_ID		0x81d		/* parallel ATA core */
+#define	SATAXOR_CORE_ID		0x81e		/* serial ATA & XOR DMA core */
+#define	GIGETH_CORE_ID		0x81f		/* gigabit ethernet core */
+#define	PCIE_CORE_ID		0x820		/* pci express core */
+#define	NPHY_CORE_ID		0x821		/* 802.11n 2x2 phy core */
+#define	SRAMC_CORE_ID		0x822		/* SRAM controller core */
+#define	MINIMAC_CORE_ID		0x823		/* MINI MAC/phy core */
+#define	ARM11_CORE_ID		0x824		/* ARM 1176 core */
+#define	ARM7S_CORE_ID		0x825		/* ARM7tdmi-s core */
+#define	LPPHY_CORE_ID		0x826		/* 802.11a/b/g phy core */
+#define	PMU_CORE_ID		0x827		/* PMU core */
+#define	SSNPHY_CORE_ID		0x828		/* 802.11n single-stream phy core */
+#define	SDIOD_CORE_ID		0x829		/* SDIO device core */
+#define	ARMCM3_CORE_ID		0x82a		/* ARM Cortex M3 core */
+#define	HTPHY_CORE_ID		0x82b		/* 802.11n 4x4 phy core */
+#define	MIPS74K_CORE_ID		0x82c		/* mips 74k core */
+#define	GMAC_CORE_ID		0x82d		/* Gigabit MAC core */
+#define	DMEMC_CORE_ID		0x82e		/* DDR1/2 memory controller core */
+#define	PCIERC_CORE_ID		0x82f		/* PCIE Root Complex core */
+#define	OCP_CORE_ID		0x830		/* OCP2OCP bridge core */
+#define	SC_CORE_ID		0x831		/* shared common core */
+#define	AHB_CORE_ID		0x832		/* OCP2AHB bridge core */
+#define	SPIH_CORE_ID		0x833		/* SPI host core */
+#define	I2S_CORE_ID		0x834		/* I2S core */
+#define	DMEMS_CORE_ID		0x835		/* SDR/DDR1 memory controller core */
+#define	DEF_SHIM_COMP		0x837		/* SHIM component in ubus/6362 */
+
+#define ACPHY_CORE_ID		0x83b		/* Dot11 ACPHY */
+#define PCIE2_CORE_ID		0x83c		/* pci express Gen2 core */
+#define USB30D_CORE_ID		0x83d		/* usb 3.0 device core */
+#define ARMCR4_CORE_ID		0x83e		/* ARM CR4 CPU */
+#define GCI_CORE_ID		0x840		/* GCI Core */
+#define M2MDMA_CORE_ID          0x844           /* memory to memory dma */
+#define CMEM_CORE_ID		0x846		/* CNDS DDR2/3 memory controller */
+#define ARMCA7_CORE_ID		0x847		/* ARM CA7 CPU */
+#define SYSMEM_CORE_ID		0x849		/* System memory core */
+#define APB_BRIDGE_CORE_ID	0x135		/* APB bridge core ID */
+#define AXI_CORE_ID		0x301		/* AXI/GPV core ID */
+#define EROM_CORE_ID		0x366		/* EROM core ID */
+#define OOB_ROUTER_CORE_ID	0x367		/* OOB router core ID */
+#define DEF_AI_COMP		0xfff		/* Default component, in ai chips it maps all
+						 * unused address ranges
+						 */
+
+#define CC_4706_CORE_ID		0x500		/* chipcommon core */
+#define NS_PCIEG2_CORE_ID	0x501		/* PCIE Gen 2 core */
+#define NS_DMA_CORE_ID		0x502		/* DMA core */
+#define NS_SDIO3_CORE_ID	0x503		/* SDIO3 core */
+#define NS_USB20_CORE_ID	0x504		/* USB2.0 core */
+#define NS_USB30_CORE_ID	0x505		/* USB3.0 core */
+#define NS_A9JTAG_CORE_ID	0x506		/* ARM Cortex A9 JTAG core */
+#define NS_DDR23_CORE_ID	0x507		/* Denali DDR2/DDR3 memory controller */
+#define NS_ROM_CORE_ID		0x508		/* ROM core */
+#define NS_NAND_CORE_ID		0x509		/* NAND flash controller core */
+#define NS_QSPI_CORE_ID		0x50a		/* SPI flash controller core */
+#define NS_CCB_CORE_ID		0x50b		/* ChipcommonB core */
+#define SOCRAM_4706_CORE_ID	0x50e		/* internal memory core */
+#define NS_SOCRAM_CORE_ID	SOCRAM_4706_CORE_ID
+#define	ARMCA9_CORE_ID		0x510		/* ARM Cortex A9 core (ihost) */
+#define	NS_IHOST_CORE_ID	ARMCA9_CORE_ID	/* ARM Cortex A9 core (ihost) */
+#define GMAC_COMMON_4706_CORE_ID	0x5dc		/* Gigabit MAC core */
+#define GMAC_4706_CORE_ID	0x52d		/* Gigabit MAC core */
+#define AMEMC_CORE_ID		0x52e		/* DDR1/2 memory controller core */
+#define ALTA_CORE_ID		0x534		/* I2S core */
+#define DDR23_PHY_CORE_ID	0x5dd
+
+#define SI_PCI1_MEM     0x40000000  /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI1_CFG     0x44000000  /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_PCIE1_DMA_H32		0xc0000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+#define CC_4706B0_CORE_REV	0x8000001f		/* chipcommon core */
+#define SOCRAM_4706B0_CORE_REV	0x80000005		/* internal memory core */
+#define GMAC_4706B0_CORE_REV	0x80000000		/* Gigabit MAC core */
+#define NS_PCIEG2_CORE_REV_B0	0x7		/* NS-B0 PCIE Gen 2 core rev */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define	SI_CC_IDX		0
+/* SOC Interconnect types (aka chip types) */
+#define	SOCI_SB			0
+#define	SOCI_AI			1
+#define	SOCI_UBUS		2
+#define	SOCI_NAI		3
+
+/* Common core control flags */
+#define	SICF_BIST_EN		0x8000
+#define	SICF_PME_EN		0x4000
+#define	SICF_CORE_BITS		0x3ffc
+#define	SICF_FGC		0x0002
+#define	SICF_CLOCK_EN		0x0001
+
+/* Common core status flags */
+#define	SISF_BIST_DONE		0x8000
+#define	SISF_BIST_ERROR		0x4000
+#define	SISF_GATED_CLK		0x2000
+#define	SISF_DMA64		0x1000
+#define	SISF_CORE_BITS		0x0fff
+
+/* Norstar core status flags */
+#define SISF_NS_BOOTDEV_MASK	0x0003	/* ROM core */
+#define SISF_NS_BOOTDEV_NOR	0x0000	/* ROM core */
+#define SISF_NS_BOOTDEV_NAND	0x0001	/* ROM core */
+#define SISF_NS_BOOTDEV_ROM	0x0002	/* ROM core */
+#define SISF_NS_BOOTDEV_OFFLOAD	0x0003	/* ROM core */
+#define SISF_NS_SKUVEC_MASK	0x000c	/* ROM core */
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST		0x1e0		/* clock control and status */
+#define SI_PWR_CTL_ST		0x1e8		/* For memory clock gating */
+
+/* clk_ctl_st register */
+#define	CCS_FORCEALP		0x00000001	/* force ALP request */
+#define	CCS_FORCEHT		0x00000002	/* force HT request */
+#define	CCS_FORCEILP		0x00000004	/* force ILP request */
+#define	CCS_ALPAREQ		0x00000008	/* ALP Avail Request */
+#define	CCS_HTAREQ		0x00000010	/* HT Avail Request */
+#define	CCS_FORCEHWREQOFF	0x00000020	/* Force HW Clock Request Off */
+#define CCS_HQCLKREQ		0x00000040	/* HQ Clock Required */
+#define CCS_USBCLKREQ		0x00000100	/* USB Clock Req */
+#define CCS_SECICLKREQ		0x00000100	/* SECI Clock Req */
+#define CCS_ARMFASTCLOCKREQ	0x00000100	/* ARM CR4/CA7 fast clock request */
+#define CCS_AVBCLKREQ		0x00000400	/* AVB Clock enable request */
+#define CCS_ERSRC_REQ_MASK	0x00000700	/* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT	8
+#define	CCS_ALPAVAIL		0x00010000	/* ALP is available */
+#define	CCS_HTAVAIL		0x00020000	/* HT is available */
+#define CCS_BP_ON_APL		0x00040000	/* RO: Backplane is running on ALP clock */
+#define CCS_BP_ON_HT		0x00080000	/* RO: Backplane is running on HT clock */
+#define CCS_ARMFASTCLOCKSTATUS	0x01000000	/* Fast CPU clock is running */
+#define CCS_ERSRC_STS_MASK	0x07000000	/* external resource status */
+#define CCS_ERSRC_STS_SHIFT	24
+#define CCS_SECI_AVAIL		0x01000000	/* RO: SECI is available  */
+
+#define	CCS0_HTAVAIL		0x00010000	/* HT avail in chipc and pcmcia on 4328a0 */
+#define	CCS0_ALPAVAIL		0x00020000	/* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN		0x00020000	/* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size  */
+#define	BISZ_OFFSET		0x3e0		/* At this offset into the binary */
+#define	BISZ_MAGIC		0x4249535a	/* Marked with this value: 'BISZ' */
+#define	BISZ_MAGIC_IDX		0		/* Word 0: magic */
+#define	BISZ_TXTST_IDX		1		/*	1: text start */
+#define	BISZ_TXTEND_IDX		2		/*	2: text end */
+#define	BISZ_DATAST_IDX		3		/*	3: data start */
+#define	BISZ_DATAEND_IDX	4		/*	4: data end */
+#define	BISZ_BSSST_IDX		5		/*	5: bss start */
+#define	BISZ_BSSEND_IDX		6		/*	6: bss end */
+#define BISZ_SIZE		7		/* descriptor size in 32-bit integers */
+
+/* Boot/Kernel related defintion and functions */
+#define	SOC_BOOTDEV_ROM		0x00000001
+#define	SOC_BOOTDEV_PFLASH	0x00000002
+#define	SOC_BOOTDEV_SFLASH	0x00000004
+#define	SOC_BOOTDEV_NANDFLASH	0x00000008
+
+#define	SOC_KNLDEV_NORFLASH	0x00000002
+#define	SOC_KNLDEV_NANDFLASH	0x00000004
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+int soc_boot_dev(void *sih);
+int soc_knl_dev(void *sih);
+#endif	/* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */
+
+#define PMU_BASE_OFFSET	0x00012000	/* PMU offset is changed for ccrev >= 56 */
+#endif /* _HNDSOC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h
new file mode 100644
index 0000000..5556bc8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h
@@ -0,0 +1,1162 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: linux_osl.h 672413 2016-11-28 11:13:23Z $
+ */
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+#define DECLSPEC_ALIGN(x)	__attribute__ ((aligned(x)))
+
+/* Linux Kernel: File Operations: start */
+extern void * osl_os_open_image(char * filename);
+extern int osl_os_get_image_block(char * buf, int len, void * image);
+extern void osl_os_close_image(void * image);
+extern int osl_os_image_size(void *image);
+/* Linux Kernel: File Operations: end */
+
+#ifdef BCMDRIVER
+
+/* OSL initialization */
+#ifdef SHARED_OSL_CMN
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn);
+#else
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+#endif /* SHARED_OSL_CMN */
+
+extern void osl_detach(osl_t *osh);
+extern int osl_static_mem_init(osl_t *osh, void *adapter);
+extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
+extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
+extern void* osl_get_bus_handle(osl_t *osh);
+
+/* Global ASSERT type */
+extern uint32 g_assert_type;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PRI_FMT_x       "llx"
+#define PRI_FMT_X       "llX"
+#define PRI_FMT_o       "llo"
+#define PRI_FMT_d       "lld"
+#else
+#define PRI_FMT_x       "x"
+#define PRI_FMT_X       "X"
+#define PRI_FMT_o       "o"
+#define PRI_FMT_d       "d"
+#endif /* CONFIG_PHYS_ADDR_T_64BIT */
+/* ASSERT */
+#if defined(BCMASSERT_LOG)
+	#define ASSERT(exp) \
+	  do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(const char *exp, const char *file, int line);
+#else
+	#ifdef __GNUC__
+		#define GCC_VERSION \
+			(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+		#if GCC_VERSION > 30100
+			#define ASSERT(exp)	do {} while (0)
+		#else
+			/* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
+			#define ASSERT(exp)
+		#endif /* GCC_VERSION > 30100 */
+	#endif /* __GNUC__ */
+#endif 
+
+/* bcm_prefetch_32B */
+static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B)
+{
+#if (defined(STB) && defined(__arm__)) && (__LINUX_ARM_ARCH__ >= 5)
+	switch (cachelines_32B) {
+		case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc");
+		case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc");
+		case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc");
+		case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr +  0) : "cc");
+	}
+#endif 
+}
+
+/* microsecond delay */
+#define	OSL_DELAY(usec)		osl_delay(usec)
+extern void osl_delay(uint usec);
+
+#define OSL_SLEEP(ms)			osl_sleep(ms)
+extern void osl_sleep(uint ms);
+
+#define	OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define	OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+/* PCI configuration space access macros */
+#define	OSL_PCI_READ_CONFIG(osh, offset, size) \
+	osl_pci_read_config((osh), (offset), (size))
+#define	OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+	osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+/* PCI device bus # and slot # */
+#define OSL_PCI_BUS(osh)	osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh)	osl_pci_slot(osh)
+#define OSL_PCIE_DOMAIN(osh)	osl_pcie_domain(osh)
+#define OSL_PCIE_BUS(osh)	osl_pcie_bus(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+extern uint osl_pcie_domain(osl_t *osh);
+extern uint osl_pcie_bus(osl_t *osh);
+extern struct pci_dev *osl_pci_device(osl_t *osh);
+
+#define OSL_ACP_COHERENCE		(1<<1L)
+#define OSL_FWDERBUF			(1<<2L)
+
+/* Pkttag flag should be part of public information */
+typedef struct {
+	bool pkttag;
+	bool mmbus;		/**< Bus supports memory-mapped register accesses */
+	pktfree_cb_fn_t tx_fn;  /**< Callback function for PKTFREE */
+	void *tx_ctx;		/**< Context to the callback function */
+	void	*unused[3];
+} osl_pubinfo_t;
+
+extern void osl_flag_set(osl_t *osh, uint32 mask);
+extern void osl_flag_clr(osl_t *osh, uint32 mask);
+extern bool osl_is_flag_set(osl_t *osh, uint32 mask);
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx)		\
+	do {						\
+	   ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn;	\
+	   ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx;	\
+	} while (0)
+
+
+/* host/bus architecture-specific byte swap */
+#define BUS_SWAP32(v)		(v)
+	#define MALLOC(osh, size)	osl_malloc((osh), (size))
+	#define MALLOCZ(osh, size)	osl_mallocz((osh), (size))
+	#define MFREE(osh, addr, size)	osl_mfree((osh), (addr), (size))
+	#define VMALLOC(osh, size)	osl_vmalloc((osh), (size))
+	#define VMALLOCZ(osh, size)	osl_vmallocz((osh), (size))
+	#define VMFREE(osh, addr, size)	osl_vmfree((osh), (addr), (size))
+	#define MALLOCED(osh)		osl_malloced((osh))
+	#define MEMORY_LEFTOVER(osh) osl_check_memleak(osh)
+	extern void *osl_malloc(osl_t *osh, uint size);
+	extern void *osl_mallocz(osl_t *osh, uint size);
+	extern void osl_mfree(osl_t *osh, void *addr, uint size);
+	extern void *osl_vmalloc(osl_t *osh, uint size);
+	extern void *osl_vmallocz(osl_t *osh, uint size);
+	extern void osl_vmfree(osl_t *osh, void *addr, uint size);
+	extern uint osl_malloced(osl_t *osh);
+	extern uint osl_check_memleak(osl_t *osh);
+
+#define	MALLOC_FAILED(osh)	osl_malloc_failed((osh))
+extern uint osl_malloc_failed(osl_t *osh);
+
+/* allocate/free shared (dma-able) consistent memory */
+#define	DMA_CONSISTENT_ALIGN	osl_dma_consistent_align()
+#define	DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define	DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+#define	DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define	DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+extern uint osl_dma_consistent_align(void);
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align,
+	uint *tot, dmaaddr_t *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
+
+/* map/unmap direction */
+#define DMA_NO	0	/* Used to skip cache op */
+#define	DMA_TX	1	/* TX direction for DMA */
+#define	DMA_RX	2	/* RX direction for DMA */
+
+/* map/unmap shared (dma-able) memory */
+#define	DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+	osl_dma_unmap((osh), (pa), (size), (direction))
+extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+	hnddma_seg_map_t *txp_dmah);
+extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction);
+
+/* API for DMA addressing capability */
+#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
+
+#define OSL_SMP_WMB()	smp_wmb()
+
+/* API for CPU relax */
+extern void osl_cpu_relax(void);
+#define OSL_CPU_RELAX() osl_cpu_relax()
+
+extern void osl_preempt_disable(osl_t *osh);
+extern void osl_preempt_enable(osl_t *osh);
+#define OSL_DISABLE_PREEMPTION(osh)	osl_preempt_disable(osh)
+#define OSL_ENABLE_PREEMPTION(osh)	osl_preempt_enable(osh)
+
+#if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \
+	(defined(STBLINUX) && defined(__ARM_ARCH_7A__))
+	extern void osl_cache_flush(void *va, uint size);
+	extern void osl_cache_inv(void *va, uint size);
+	extern void osl_prefetch(const void *ptr);
+	#define OSL_CACHE_FLUSH(va, len)	osl_cache_flush((void *)(va), len)
+	#define OSL_CACHE_INV(va, len)		osl_cache_inv((void *)(va), len)
+	#define OSL_PREFETCH(ptr)			osl_prefetch(ptr)
+#if defined(__ARM_ARCH_7A__)
+	extern int osl_arch_is_coherent(void);
+	#define OSL_ARCH_IS_COHERENT()		osl_arch_is_coherent()
+	extern int osl_acp_war_enab(void);
+	#define OSL_ACP_WAR_ENAB()			osl_acp_war_enab()
+#else  /* !__ARM_ARCH_7A__ */
+	#define OSL_ARCH_IS_COHERENT()		NULL
+	#define OSL_ACP_WAR_ENAB()			NULL
+#endif /* !__ARM_ARCH_7A__ */
+#else  /* !__mips__ && !__ARM_ARCH_7A__ */
+	#define OSL_CACHE_FLUSH(va, len)	BCM_REFERENCE(va)
+	#define OSL_CACHE_INV(va, len)		BCM_REFERENCE(va)
+	#define OSL_PREFETCH(ptr)		BCM_REFERENCE(ptr)
+
+	#define OSL_ARCH_IS_COHERENT()		NULL
+	#define OSL_ACP_WAR_ENAB()			NULL
+#endif 
+
+/* register access macros */
+#if defined(BCMSDIO)
+	#include <bcmsdh.h>
+	#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \
+		(uintptr)(r), sizeof(*(r)), (v)))
+	#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
+		(uintptr)(r), sizeof(*(r))))
+#elif (defined(STB) && defined(__arm__))
+extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size);
+
+#define OSL_READ_REG(osh, r) \
+	({\
+		__typeof(*(r)) __osl_v; \
+		osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \
+		__osl_v; \
+	})
+#endif 
+
+#if (defined(STB) && defined(__arm__))
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;})
+#else /* !BCM47XX_CA9 */
+#if defined(BCMSDIO)
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+		mmap_op else bus_op
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+		mmap_op : bus_op
+#else
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+#endif 
+#endif 
+
+#define OSL_ERROR(bcmerror)	osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define	PKTBUFSZ	2048   /* largest reasonable packet buffer, driver uses for ethernet MTU */
+
+#define OSH_NULL   NULL
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ * Macros expand to calls to functions defined in linux_osl.c .
+ */
+#include <linuxver.h>           /* use current 2.4.x calling conventions */
+#include <linux/kernel.h>       /* for vsn/printf's */
+#include <linux/string.h>       /* for mem*, str* */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
+extern uint64 osl_sysuptime_us(void);
+#define OSL_SYSUPTIME()		((uint32)jiffies_to_msecs(jiffies))
+#define OSL_SYSUPTIME_US()	osl_sysuptime_us()
+#else
+#define OSL_SYSUPTIME()		((uint32)jiffies * (1000 / HZ))
+#error "OSL_SYSUPTIME_US() may need to be defined"
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
+#define	printf(fmt, args...)	printk(fmt , ## args)
+#include <linux/kernel.h>	/* for vsn/printf's */
+#include <linux/string.h>	/* for mem*, str* */
+/* bcopy's: Linux kernel doesn't provide these (anymore) */
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+/* register access macros */
+
+#ifdef CONFIG_64BIT
+/* readq is defined only for 64 bit platform */
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v = 0; \
+			switch (sizeof(*(r))) { \
+				case sizeof(uint8):	__osl_v = \
+					readb((volatile uint8*)(r)); break; \
+				case sizeof(uint16):	__osl_v = \
+					readw((volatile uint16*)(r)); break; \
+				case sizeof(uint32):	__osl_v = \
+					readl((volatile uint32*)(r)); break; \
+				case sizeof(uint64):	__osl_v = \
+					readq((volatile uint64*)(r)); break; \
+			} \
+			__osl_v; \
+		}), \
+		OSL_READ_REG(osh, r)) \
+)
+#else /* !CONFIG_64BIT */
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v = 0; \
+			switch (sizeof(*(r))) { \
+				case sizeof(uint8):	__osl_v = \
+					readb((volatile uint8*)(r)); break; \
+				case sizeof(uint16):	__osl_v = \
+					readw((volatile uint16*)(r)); break; \
+				case sizeof(uint32):	__osl_v = \
+					readl((volatile uint32*)(r)); break; \
+			} \
+			__osl_v; \
+		}), \
+		OSL_READ_REG(osh, r)) \
+)
+#endif /* CONFIG_64BIT */
+
+#ifdef CONFIG_64BIT
+/* writeq is defined only for 64 bit platform */
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh, \
+		switch (sizeof(*(r))) { \
+			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
+			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
+			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
+			case sizeof(uint64):	writeq((uint64)(v), (volatile uint64*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+
+#else /* !CONFIG_64BIT */
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh, \
+		switch (sizeof(*(r))) { \
+			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
+			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
+			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+#endif /* CONFIG_64BIT */
+
+#define	AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#define	OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+
+/* bcopy, bcmp, and bzero functions */
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+/* uncached/cached virtual address */
+#define OSL_UNCACHED(va)	((void *)va)
+#define OSL_CACHED(va)		((void *)va)
+
+#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va)
+#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va)
+
+/* get processor cycle count */
+#if defined(__i386__)
+#define	OSL_GETCYCLES(x)	rdtscl((x))
+#else
+#define OSL_GETCYCLES(x)	((x) = 0)
+#endif 
+
+/* dereference an address that may cause a bus exception */
+#define	BUSPROBE(val, addr)	({ (val) = R_REG(NULL, (addr)); 0; })
+
+/* map/unmap physical to virtual I/O */
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define	REG_MAP(pa, size)	ioremap((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size)       (void *)(0)
+#endif /* !defined(CONFIG_MMC_MSM7X00A */
+#define	REG_UNMAP(va)		iounmap((va))
+
+/* shared (dma-able) memory access macros */
+#define	R_SM(r)			*(r)
+#define	W_SM(r, v)		(*(r) = (v))
+#define	BZERO_SM(r, len)	memset((r), '\0', (len))
+
+/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
+ * performance reasons),  we need the Linux headers.
+ */
+#include <linuxver.h>		/* use current 2.4.x calling conventions */
+
+/* packet primitives */
+#ifdef BCMDBG_CTRACE
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len), __LINE__, __FILE__)
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb), __LINE__, __FILE__)
+#else
+#ifdef BCM_OBJECT_TRACE
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len), __LINE__, __FUNCTION__)
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb), __LINE__, __FUNCTION__)
+#else
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len))
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb))
+#endif /* BCM_OBJECT_TRACE */
+#endif /* BCMDBG_CTRACE */
+#define PKTLIST_DUMP(osh, buf)		BCM_REFERENCE(osh)
+#define PKTDBG_TRACE(osh, pkt, bit)	BCM_REFERENCE(osh)
+#if defined(BCM_OBJECT_TRACE)
+#define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__)
+#else
+#define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send))
+#endif /* BCM_OBJECT_TRACE */
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define	PKTGET_STATIC(osh, len, send)		osl_pktget_static((osh), (len))
+#define	PKTFREE_STATIC(osh, skb, send)		osl_pktfree_static((osh), (skb), (send))
+#else
+#define	PKTGET_STATIC	PKTGET
+#define	PKTFREE_STATIC	PKTFREE
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+#define	PKTDATA(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
+#define	PKTLEN(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
+#define PKTHEADROOM(osh, skb)		(PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTEXPHEADROOM(osh, skb, b)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
+	 })
+#define PKTTAILROOM(osh, skb)		\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_tailroom((struct sk_buff*)(skb)); \
+	 })
+#define PKTPADTAILROOM(osh, skb, padlen) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_pad((struct sk_buff*)(skb), (padlen)); \
+	 })
+#define	PKTNEXT(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
+#define	PKTSETNEXT(osh, skb, x)		\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
+	 })
+#define	PKTSETLEN(osh, skb, len)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 __skb_trim((struct sk_buff*)(skb), (len)); \
+	 })
+#define	PKTPUSH(osh, skb, bytes)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_push((struct sk_buff*)(skb), (bytes)); \
+	 })
+#define	PKTPULL(osh, skb, bytes)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_pull((struct sk_buff*)(skb), (bytes)); \
+	 })
+#define	PKTTAG(skb)			((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTSETPOOL(osh, skb, x, y)	BCM_REFERENCE(osh)
+#define	PKTPOOL(osh, skb)		({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#define PKTFREELIST(skb)        PKTLINK(skb)
+#define PKTSETFREELIST(skb, x)  PKTSETLINK((skb), (x))
+#define PKTPTR(skb)             (skb)
+#define PKTID(skb)              ({BCM_REFERENCE(skb); 0;})
+#define PKTSETID(skb, id)       ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
+#define PKTSHRINK(osh, m)		({BCM_REFERENCE(osh); m;})
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER)
+#define PKTORPHAN(skb, tsq)          osl_pkt_orphan_partial(skb, tsq)
+extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq);
+#else
+#define PKTORPHAN(skb, tsq)          ({BCM_REFERENCE(skb); 0;})
+#endif /* LINUX VERSION >= 3.6 */
+
+
+#ifdef BCMDBG_CTRACE
+#define	DEL_CTRACE(zosh, zskb) { \
+	unsigned long zflags; \
+	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+	list_del(&(zskb)->ctrace_list); \
+	(zosh)->ctrace_num--; \
+	(zskb)->ctrace_start = 0; \
+	(zskb)->ctrace_count = 0; \
+	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define	UPDATE_CTRACE(zskb, zfile, zline) { \
+	struct sk_buff *_zskb = (struct sk_buff *)(zskb); \
+	if (_zskb->ctrace_count < CTRACE_NUM) { \
+		_zskb->func[_zskb->ctrace_count] = zfile; \
+		_zskb->line[_zskb->ctrace_count] = zline; \
+		_zskb->ctrace_count++; \
+	} \
+	else { \
+		_zskb->func[_zskb->ctrace_start] = zfile; \
+		_zskb->line[_zskb->ctrace_start] = zline; \
+		_zskb->ctrace_start++; \
+		if (_zskb->ctrace_start >= CTRACE_NUM) \
+			_zskb->ctrace_start = 0; \
+	} \
+}
+
+#define	ADD_CTRACE(zosh, zskb, zfile, zline) { \
+	unsigned long zflags; \
+	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+	list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \
+	(zosh)->ctrace_num++; \
+	UPDATE_CTRACE(zskb, zfile, zline); \
+	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define PKTCALLER(zskb)	UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__)
+#endif /* BCMDBG_CTRACE */
+
+#ifdef CTFPOOL
+#define	CTFPOOL_REFILL_THRESH	3
+typedef struct ctfpool {
+	void		*head;
+	spinlock_t	lock;
+	osl_t		*osh;
+	uint		max_obj;
+	uint		curr_obj;
+	uint		obj_size;
+	uint		refills;
+	uint		fast_allocs;
+	uint 		fast_frees;
+	uint 		slow_allocs;
+} ctfpool_t;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	FASTBUF	(1 << 0)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->pktc_flags)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	FASTBUF	(1 << 16)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->mac_len)
+#else
+#define	FASTBUF	(1 << 0)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->__unused)
+#endif /* 2.6.22 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->ctfpool)
+#define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head)
+#else
+#define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->sk)
+#define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
+#endif
+
+extern void *osl_ctfpool_add(osl_t *osh);
+extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
+extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
+extern void osl_ctfpool_cleanup(osl_t *osh);
+extern void osl_ctfpool_stats(osl_t *osh, void *b);
+#else /* CTFPOOL */
+#define	PKTSETFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#endif /* CTFPOOL */
+
+#define	PKTSETCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#ifdef HNDCTF
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	SKIPCT	(1 << 2)
+#define	CHAINED	(1 << 3)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->pktc_flags & CHAINED)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	SKIPCT	(1 << 18)
+#define	CHAINED	(1 << 19)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->mac_len & CHAINED)
+#else /* 2.6.22 */
+#define	SKIPCT	(1 << 2)
+#define	CHAINED	(1 << 3)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->__unused & CHAINED)
+#endif /* 2.6.22 */
+typedef struct ctf_mark {
+	uint32	value;
+}	ctf_mark_t;
+#define CTF_MARK(m)				(m.value)
+#else /* HNDCTF */
+#define	PKTSETSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define CTF_MARK(m)		({BCM_REFERENCE(m); 0;})
+#endif /* HNDCTF */
+
+#if defined(BCM_GMAC3)
+
+/** pktalloced accounting in devices using GMAC Bulk Forwarding to DHD */
+
+/* Account for packets delivered to downstream forwarder by GMAC interface. */
+extern void osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTTOFWDER(osh, skbs, skb_cnt)  \
+	osl_pkt_tofwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
+
+/* Account for packets received from downstream forwarder. */
+#if defined(BCMDBG_CTRACE) /* pkt logging */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt,
+                             int line, char *file);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+	osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt), \
+	                 __LINE__, __FILE__)
+#else  /* ! (BCMDBG_PKT || BCMDBG_CTRACE) */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+	osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
+#endif 
+
+
+/** GMAC Forwarded packet tagging for reduced cache flush/invalidate.
+ * In FWDERBUF tagged packet, only FWDER_PKTMAPSZ amount of data would have
+ * been accessed in the GMAC forwarder. This may be used to limit the number of
+ * cachelines that need to be flushed or invalidated.
+ * Packets sent to the DHD from a GMAC forwarder will be tagged w/ FWDERBUF.
+ * DHD may clear the FWDERBUF tag, if more than FWDER_PKTMAPSZ was accessed.
+ * Likewise, a debug print of a packet payload in say the ethernet driver needs
+ * to be accompanied with a clear of the FWDERBUF tag.
+ */
+
+/** Forwarded packets, have a GMAC_FWDER_HWRXOFF sized rx header (etc.h) */
+#define FWDER_HWRXOFF       (18)
+
+/** Maximum amount of a pkt data that a downstream forwarder (GMAC) may have
+ * read into the L1 cache (not dirty). This may be used in reduced cache ops.
+ *
+ * Max 44: ET HWRXOFF[18] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4]
+ * Min 32: GMAC_FWDER_HWRXOFF[18] + EtherHdr[14]
+ */
+#define FWDER_MINMAPSZ      (FWDER_HWRXOFF + 14)
+#define FWDER_MAXMAPSZ      (FWDER_HWRXOFF + 4 + 14 + 4 + 4)
+#define FWDER_PKTMAPSZ      (FWDER_MINMAPSZ)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+
+#define FWDERBUF            (1 << 4)
+#define PKTSETFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= FWDERBUF); \
+	 })
+#define PKTCLRFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~FWDERBUF)); \
+	 })
+#define PKTISFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags & FWDERBUF); \
+	 })
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+
+#define FWDERBUF	        (1 << 20)
+#define PKTSETFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= FWDERBUF); \
+	 })
+#define PKTCLRFWDERBUF(osh, skb)  \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~FWDERBUF)); \
+	 })
+#define PKTISFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len & FWDERBUF); \
+	 })
+
+#else /* 2.6.22 */
+
+#define FWDERBUF            (1 << 4)
+#define PKTSETFWDERBUF(osh, skb)  \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= FWDERBUF); \
+	 })
+#define PKTCLRFWDERBUF(osh, skb)  \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~FWDERBUF)); \
+	 })
+#define PKTISFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused & FWDERBUF); \
+	 })
+
+#endif /* 2.6.22 */
+
+#else  /* ! BCM_GMAC3 */
+
+#define PKTSETFWDERBUF(osh, skb)  ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTCLRFWDERBUF(osh, skb)  ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTISFWDERBUF(osh, skb)   ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#endif /* ! BCM_GMAC3 */
+
+
+#ifdef HNDCTF
+/* For broadstream iqos */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	TOBR		(1 << 5)
+#define	PKTSETTOBR(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= TOBR); \
+	 })
+#define	PKTCLRTOBR(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~TOBR)); \
+	 })
+#define	PKTISTOBR(skb)	(((struct sk_buff*)(skb))->pktc_flags & TOBR)
+#define	PKTSETCTFIPCTXIF(skb, ifp)	(((struct sk_buff*)(skb))->ctf_ipc_txif = ifp)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTSETCTFIPCTXIF(skb, ifp)	({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#else /* 2.6.22 */
+#define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTSETCTFIPCTXIF(skb, ifp)	({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#endif /* 2.6.22 */
+#else /* HNDCTF */
+#define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
+#endif /* HNDCTF */
+
+
+#ifdef BCMFA
+#ifdef BCMFA_HW_HASH
+#define PKTSETFAHIDX(skb, idx)	(((struct sk_buff*)(skb))->napt_idx = idx)
+#else
+#define PKTSETFAHIDX(skb, idx)	({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
+#endif /* BCMFA_SW_HASH */
+#define PKTGETFAHIDX(skb)	(((struct sk_buff*)(skb))->napt_idx)
+#define PKTSETFADEV(skb, imp)	(((struct sk_buff*)(skb))->dev = imp)
+#define PKTSETRXDEV(skb)	(((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
+
+#define	AUX_TCP_FIN_RST	(1 << 0)
+#define	AUX_FREED	(1 << 1)
+#define PKTSETFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
+#define	PKTCLRFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
+#define	PKTISFAAUX(skb)		(((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
+#define PKTSETFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
+#define	PKTCLRFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
+#define	PKTISFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
+#define	PKTISFABRIDGED(skb)	PKTISFAAUX(skb)
+#else
+#define	PKTISFAAUX(skb)		({BCM_REFERENCE(skb); FALSE;})
+#define	PKTISFABRIDGED(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTISFAFREED(skb)	({BCM_REFERENCE(skb); FALSE;})
+
+#define	PKTCLRFAAUX(skb)	BCM_REFERENCE(skb)
+#define PKTSETFAFREED(skb)	BCM_REFERENCE(skb)
+#define	PKTCLRFAFREED(skb)	BCM_REFERENCE(skb)
+#endif /* BCMFA */
+
+#if defined(BCM_OBJECT_TRACE)
+extern void osl_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller);
+#else
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+#endif /* BCM_OBJECT_TRACE */
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+extern void osl_pktclone(osl_t *osh, void **pkt);
+
+#ifdef BCMDBG_CTRACE
+#define PKT_CTRACE_DUMP(osh, b)	osl_ctrace_dump((osh), (b))
+extern void *osl_pktget(osl_t *osh, uint len, int line, char *file);
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
+extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
+struct bcmstrbuf;
+extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b);
+#else
+#ifdef BCM_OBJECT_TRACE
+extern void *osl_pktget(osl_t *osh, uint len, int line, const char *caller);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller);
+#else
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+#endif /* BCM_OBJECT_TRACE */
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+#endif /* BCMDBG_CTRACE */
+extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
+#ifdef BCMDBG_CTRACE
+#define PKTFRMNATIVE(osh, skb)  osl_pkt_frmnative(((osl_t *)osh), \
+				(struct sk_buff*)(skb), __LINE__, __FILE__)
+#define	PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb))
+#else
+#define PKTFRMNATIVE(osh, skb)	osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
+#endif /* BCMDBG_CTRACE */
+#define PKTTONATIVE(osh, pkt)		osl_pkt_tonative((osl_t *)(osh), (pkt))
+
+#define	PKTLINK(skb)			(((struct sk_buff*)(skb))->prev)
+#define	PKTSETLINK(skb, x)		(((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define	PKTPRIO(skb)			(((struct sk_buff*)(skb))->priority)
+#define	PKTSETPRIO(skb, x)		(((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb)		(((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x)		(((struct sk_buff*)(skb))->ip_summed = \
+						((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
+#define PKTSHARED(skb)                  (((struct sk_buff*)(skb))->cloned)
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define PKTMARK(p)                     (((struct sk_buff *)(p))->mark)
+#define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->mark = (m)
+#else /* !2.6.0 */
+#define PKTMARK(p)                     (((struct sk_buff *)(p))->nfmark)
+#define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->nfmark = (m)
+#endif /* 2.6.0 */
+#else /* CONFIG_NF_CONNTRACK_MARK */
+#define PKTMARK(p)                     0
+#define PKTSETMARK(p, m)
+#endif /* CONFIG_NF_CONNTRACK_MARK */
+
+#define PKTALLOCED(osh)		osl_pktalloced(osh)
+extern uint osl_pktalloced(osl_t *osh);
+
+#define OSL_RAND()		osl_rand()
+extern uint32 osl_rand(void);
+
+#if !defined(BCM_SECURE_DMA)
+#define	DMA_MAP(osh, va, size, direction, p, dmah) \
+	osl_dma_map((osh), (va), (size), (direction), (p), (dmah))
+#endif /* !(defined(BCM_SECURE_DMA)) */
+
+#ifdef PKTC
+/* Use 8 bytes of skb tstamp field to store below info */
+struct chain_node {
+	struct sk_buff	*link;
+	unsigned int	flags:3, pkts:9, bytes:20;
+};
+
+#define CHAIN_NODE(skb)		((struct chain_node*)(((struct sk_buff*)skb)->pktc_cb))
+
+#define	PKTCSETATTR(s, f, p, b)	({CHAIN_NODE(s)->flags = (f); CHAIN_NODE(s)->pkts = (p); \
+	                         CHAIN_NODE(s)->bytes = (b);})
+#define	PKTCCLRATTR(s)		({CHAIN_NODE(s)->flags = CHAIN_NODE(s)->pkts = \
+	                         CHAIN_NODE(s)->bytes = 0;})
+#define	PKTCGETATTR(s)		(CHAIN_NODE(s)->flags << 29 | CHAIN_NODE(s)->pkts << 20 | \
+	                         CHAIN_NODE(s)->bytes)
+#define	PKTCCNT(skb)		(CHAIN_NODE(skb)->pkts)
+#define	PKTCLEN(skb)		(CHAIN_NODE(skb)->bytes)
+#define	PKTCGETFLAGS(skb)	(CHAIN_NODE(skb)->flags)
+#define	PKTCSETFLAGS(skb, f)	(CHAIN_NODE(skb)->flags = (f))
+#define	PKTCCLRFLAGS(skb)	(CHAIN_NODE(skb)->flags = 0)
+#define	PKTCFLAGS(skb)		(CHAIN_NODE(skb)->flags)
+#define	PKTCSETCNT(skb, c)	(CHAIN_NODE(skb)->pkts = (c))
+#define	PKTCINCRCNT(skb)	(CHAIN_NODE(skb)->pkts++)
+#define	PKTCADDCNT(skb, c)	(CHAIN_NODE(skb)->pkts += (c))
+#define	PKTCSETLEN(skb, l)	(CHAIN_NODE(skb)->bytes = (l))
+#define	PKTCADDLEN(skb, l)	(CHAIN_NODE(skb)->bytes += (l))
+#define	PKTCSETFLAG(skb, fb)	(CHAIN_NODE(skb)->flags |= (fb))
+#define	PKTCCLRFLAG(skb, fb)	(CHAIN_NODE(skb)->flags &= ~(fb))
+#define	PKTCLINK(skb)		(CHAIN_NODE(skb)->link)
+#define	PKTSETCLINK(skb, x)	(CHAIN_NODE(skb)->link = (struct sk_buff*)(x))
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+	for (; (skb) != NULL; (skb) = (nskb)) \
+		if ((nskb) = (PKTISCHAINED(skb) ? PKTCLINK(skb) : NULL), \
+		    PKTSETCLINK((skb), NULL), 1)
+#define	PKTCFREE(osh, skb, send) \
+do { \
+	void *nskb; \
+	ASSERT((skb) != NULL); \
+	FOREACH_CHAINED_PKT((skb), nskb) { \
+		PKTCLRCHAINED((osh), (skb)); \
+		PKTCCLRFLAGS((skb)); \
+		PKTFREE((osh), (skb), (send)); \
+	} \
+} while (0)
+#define PKTCENQTAIL(h, t, p) \
+do { \
+	if ((t) == NULL) { \
+		(h) = (t) = (p); \
+	} else { \
+		PKTSETCLINK((t), (p)); \
+		(t) = (p); \
+	} \
+} while (0)
+#endif /* PKTC */
+
+#else /* ! BCMDRIVER */
+
+
+/* ASSERT */
+	#define ASSERT(exp)	do {} while (0)
+
+/* MALLOC and MFREE */
+#define MALLOC(o, l) malloc(l)
+#define MFREE(o, p, l) free(p)
+#include <stdlib.h>
+
+/* str* and mem* functions */
+#include <string.h>
+
+/* *printf functions */
+#include <stdio.h>
+
+/* bcopy, bcmp, and bzero */
+extern void bcopy(const void *src, void *dst, size_t len);
+extern int bcmp(const void *b1, const void *b2, size_t len);
+extern void bzero(void *b, size_t len);
+#endif /* ! BCMDRIVER */
+
+/* Current STB 7445D1 doesn't use ACP and it is non-coherrent.
+ * Adding these dummy values for build apss only
+ * When we revisit need to change these.
+ */
+#if defined(STBLINUX)
+
+#if defined(__ARM_ARCH_7A__)
+#define ACP_WAR_ENAB() 0
+#define ACP_WIN_LIMIT 1
+#define arch_is_coherent() 0
+#endif /* __ARM_ARCH_7A__ */
+
+#endif /* STBLINUX */
+
+#ifdef BCM_SECURE_DMA
+
+#define	SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \
+	osl_sec_dma_map((osh), (va), (size), (direction), (p), (dmah), (pcma), (offset))
+#define	SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) \
+	osl_sec_dma_dd_map((osh), (va), (size), (direction), (p), (dmah))
+#define	SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) \
+	osl_sec_dma_map_txmeta((osh), (va), (size), (direction), (p), (dmah), (pcma))
+#define	SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) \
+	osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset))
+#define	SECURE_DMA_UNMAP_ALL(osh, pcma) \
+	osl_sec_dma_unmap_all((osh), (pcma))
+
+#define DMA_MAP(osh, va, size, direction, p, dmah)
+
+typedef struct sec_cma_info {
+	struct sec_mem_elem *sec_alloc_list;
+	struct sec_mem_elem *sec_alloc_list_tail;
+} sec_cma_info_t;
+
+#if defined(__ARM_ARCH_7A__)
+#define CMA_BUFSIZE_4K	4096
+#define CMA_BUFSIZE_2K	2048
+#define CMA_BUFSIZE_512	512
+
+#define	CMA_BUFNUM		2048
+#define SEC_CMA_COHERENT_BLK 0x8000 /* 32768 */
+#define SEC_CMA_COHERENT_MAX 278
+#define CMA_DMA_DESC_MEMBLOCK	(SEC_CMA_COHERENT_BLK * SEC_CMA_COHERENT_MAX)
+#define CMA_DMA_DATA_MEMBLOCK	(CMA_BUFSIZE_4K*CMA_BUFNUM)
+#define	CMA_MEMBLOCK		(CMA_DMA_DESC_MEMBLOCK + CMA_DMA_DATA_MEMBLOCK)
+#define CONT_REGION	0x02		/* Region CMA */
+#else
+#define CONT_REGION	0x00		/* To access the MIPs mem, Not yet... */
+#endif /* !defined __ARM_ARCH_7A__ */
+
+#define SEC_DMA_ALIGN	(1<<16)
+typedef struct sec_mem_elem {
+	size_t			size;
+	int				direction;
+	phys_addr_t		pa_cma;     /**< physical  address */
+	void			*va;        /**< virtual address of driver pkt */
+	dma_addr_t		dma_handle; /**< bus address assign by linux */
+	void			*vac;       /**< virtual address of cma buffer */
+	struct page *pa_cma_page;	/* phys to page address */
+	struct	sec_mem_elem	*next;
+} sec_mem_elem_t;
+
+extern dma_addr_t osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+	hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset);
+extern dma_addr_t osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p,
+	hnddma_seg_map_t *dmah);
+extern dma_addr_t osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size,
+  int direction, void *p, hnddma_seg_map_t *dmah, void *ptr_cma_info);
+extern void osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
+	void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset);
+extern void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info);
+
+#endif /* BCM_SECURE_DMA */
+
+typedef struct sk_buff_head PKT_LIST;
+#define PKTLIST_INIT(x)		skb_queue_head_init((x))
+#define PKTLIST_ENQ(x, y)	skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y))
+#define PKTLIST_DEQ(x)		skb_dequeue((struct sk_buff_head *)(x))
+#define PKTLIST_UNLINK(x, y)	skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x))
+#define PKTLIST_FINI(x)		skb_queue_purge((struct sk_buff_head *)(x))
+
+#ifdef REPORT_FATAL_TIMEOUTS
+typedef struct osl_timer {
+	struct timer_list *timer;
+	bool   set;
+} osl_timer_t;
+
+typedef void (*linux_timer_fn)(ulong arg);
+
+extern osl_timer_t * osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg);
+extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
+extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
+extern bool osl_timer_del(osl_t *osh, osl_timer_t *t);
+#endif
+
+#endif	/* _linux_osl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h
new file mode 100644
index 0000000..b0dbfb7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linuxver.h
@@ -0,0 +1,770 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: linuxver.h 646721 2016-06-30 12:36:41Z $
+ */
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
+#endif
+
+#include <typedefs.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+#include <linux/kconfig.h>
+#endif
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+/* linux/malloc.h is deprecated, use linux/slab.h instead. */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/semaphore.h>
+#else
+#include <asm/semaphore.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif	/* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a)	do { \
+		allow_signal(SIGKILL);	\
+		allow_signal(SIGTERM);	\
+	} while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
+#else
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
+#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
+	(RHEL_MAJOR == 5))
+/* Exclude RHEL 5 */
+typedef void (*work_func_t)(void *work);
+#endif
+#endif	/* >= 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* Some distributions have their own 2.6.x compatibility layers */
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED	SA_SHIRQ
+#endif /* < 2.6.18 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef	CONFIG_NET_RADIO
+#define	CONFIG_WIRELESS_EXT
+#endif
+#endif	/* < 2.6.17 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+#include <linux/sched.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+#include <linux/sched/rt.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <net/lib80211.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <linux/ieee80211.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#include <net/ieee80211.h>
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#  if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+#    define __devinit	__init
+#  else
+/* All devices are hotpluggable since linux 3.8.0 */
+#    define __devinit
+#  endif
+#endif /* !__devinit */
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x)	x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev)		(dev)->sysdata
+#define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
+
+/*
+ * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
+ */
+
+struct pci_device_id {
+	unsigned int vendor, device;		/* Vendor and device ID or PCI_ANY_ID */
+	unsigned int subvendor, subdevice;	/* Subsystem ID's or PCI_ANY_ID */
+	unsigned int class, class_mask;		/* (class,subclass,prog-if) triplet */
+	unsigned long driver_data;		/* Data private to the driver */
+};
+
+struct pci_driver {
+	struct list_head node;
+	char *name;
+	const struct pci_device_id *id_table;	/* NULL if wants all devices */
+	int (*probe)(struct pci_dev *dev,
+	             const struct pci_device_id *id); /* New device inserted */
+	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug
+						 * capable driver)
+						 */
+	void (*suspend)(struct pci_dev *dev);	/* Device suspended */
+	void (*resume)(struct pci_dev *dev);	/* Device woken up */
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+/* compatpci.c */
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif /* PCI registration */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x)	__initcall(x);
+#define module_exit(x)	__exitcall(x);
+#endif
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+#define WL_USE_NETDEV_OPS
+#else
+#undef WL_USE_NETDEV_OPS
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
+#define WL_CONFIG_RFKILL
+#else
+#undef WL_CONFIG_RFKILL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+	for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+/*
+ * DMA mapping
+ *
+ * See linux/Documentation/DMA-mapping.txt
+ */
+
+#ifndef PCI_DMA_TODEVICE
+#define	PCI_DMA_TODEVICE	1
+#define	PCI_DMA_FROMDEVICE	2
+#endif
+
+typedef u32 dma_addr_t;
+
+/* Pure 2^n version of get_order */
+static inline int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size-1) >> (PAGE_SHIFT-1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+                                         dma_addr_t *dma_handle)
+{
+	void *ret;
+	int gfp = GFP_ATOMIC | GFP_DMA;
+
+	ret = (void *)__get_free_pages(gfp, get_order(size));
+
+	if (ret != NULL) {
+		memset(ret, 0, size);
+		*dma_handle = virt_to_bus(ret);
+	}
+	return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+                                       void *vaddr, dma_addr_t dma_handle)
+{
+	free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif /* DMA mapping */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a)		dev_kfree_skb(a)
+#define netif_down(dev)			do { (dev)->start = 0; } while (0)
+
+/* pcmcia-cs provides its own netdevice compatibility layer */
+#ifndef _COMPAT_NETDEVICE_H
+
+/*
+ * SoftNet
+ *
+ * For pre-softnet kernels we need to tell the upper layer not to
+ * re-enter start_xmit() while we are in there. However softnet
+ * guarantees not to enter while we are in there so there is no need
+ * to do the netif_stop_queue() dance unless the transmit queue really
+ * gets stuck. This should also improve performance according to tests
+ * done by Aman Singla.
+ */
+
+#define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+	dev->tbusy = 0;
+	dev->interrupt = 0;
+	dev->start = 1;
+}
+
+#define netif_queue_stopped(dev)	(dev)->tbusy
+#define netif_running(dev)		(dev)->start
+
+#endif /* _COMPAT_NETDEVICE_H */
+
+#define netif_device_attach(dev)	netif_start_queue(dev)
+#define netif_device_detach(dev)	netif_stop_queue(dev)
+
+/* 2.4.x renamed bottom halves to tasklets */
+#define tasklet_struct				tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+	queue_task(tasklet, &tq_immediate);
+	mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+                                void (*func)(unsigned long),
+                                unsigned long data)
+{
+	tasklet->next = NULL;
+	tasklet->sync = 0;
+	tasklet->routine = (void (*)(void *))func;
+	tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet)	{ do {} while (0); }
+
+/* 2.4.x introduced del_timer_sync() */
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif /* SoftNet */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+/*
+ * Emit code to initialise a tq_struct's routine and data pointers
+ */
+#define PREPARE_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		(_tq)->routine = _routine;			\
+		(_tq)->data = _data;				\
+	} while (0)
+
+/*
+ * Emit code to initialise all of a tq_struct
+ */
+#define INIT_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		INIT_LIST_HEAD(&(_tq)->list);			\
+		(_tq)->sync = 0;				\
+		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
+	} while (0)
+
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
+
+/* Power management related macro & routines */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
+#else
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_read_config_dword(dev, i * 4, &buffer[i]);
+	}
+	return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_write_config_dword(dev, i * 4, buffer[i]);
+	}
+	/*
+	 * otherwise, write the context information we know from bootup.
+	 * This works around a problem where warm-booting from Windows
+	 * combined with a D3(hot)->D0 transition causes PCI config
+	 * header data to be forgotten.
+	 */
+	else {
+		for (i = 0; i < 6; i ++)
+			pci_write_config_dword(dev,
+			                       PCI_BASE_ADDRESS_0 + (i * 4),
+			                       pci_resource_start(dev, i));
+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+	}
+	return 0;
+}
+#endif /* PCI power management */
+
+/* Old cp0 access macros deprecated in 2.4.19 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+/* Module refcount handled internally in 2.6.x */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT		do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT		do {} while (0)
+#endif
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT			do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT			do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)	do {} while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev)		kfree(dev)
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* struct packet_type redefined in 2.6.x */
+#define af_packet_priv			data
+#endif
+
+/* suspend args */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW	CHECKSUM_PARTIAL
+#endif
+
+typedef struct {
+	void	*parent;  /* some external entity that the thread supposed to work for */
+	char	*proc_name;
+	struct	task_struct *p_task;
+	long	thr_pid;
+	int		prio; /* priority */
+	struct	semaphore sema;
+	int	terminated;
+	struct	completion completed;
+	spinlock_t	spinlock;
+	int		up_cnt;
+} tsk_ctl_t;
+
+
+/* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner ptr */
+/* note this macro assumes there may be only one context waiting on thread's completion */
+#ifdef DHD_DEBUG
+#define DBG_THR(x) printk x
+#else
+#define DBG_THR(x)
+#endif
+
+static inline bool binary_sema_down(tsk_ctl_t *tsk)
+{
+	if (down_interruptible(&tsk->sema) == 0) {
+		unsigned long flags = 0;
+		spin_lock_irqsave(&tsk->spinlock, flags);
+		if (tsk->up_cnt == 1)
+			tsk->up_cnt--;
+		else {
+			DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
+		}
+		spin_unlock_irqrestore(&tsk->spinlock, flags);
+		return false;
+	} else
+		return true;
+}
+
+static inline bool binary_sema_up(tsk_ctl_t *tsk)
+{
+	bool sem_up = false;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&tsk->spinlock, flags);
+	if (tsk->up_cnt == 0) {
+		tsk->up_cnt++;
+		sem_up = true;
+	} else if (tsk->up_cnt == 1) {
+		/* dhd_sched_dpc: dpc is alread up! */
+	} else
+		DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
+
+	spin_unlock_irqrestore(&tsk->spinlock, flags);
+
+	if (sem_up)
+		up(&tsk->sema);
+
+	return sem_up;
+}
+
+#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
+
+#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
+{ \
+	sema_init(&((tsk_ctl)->sema), 0); \
+	init_completion(&((tsk_ctl)->completed)); \
+	(tsk_ctl)->parent = owner; \
+	(tsk_ctl)->proc_name = name;  \
+	(tsk_ctl)->terminated = FALSE; \
+	(tsk_ctl)->p_task  = kthread_run(thread_func, tsk_ctl, (char*)name); \
+	if (IS_ERR((tsk_ctl)->p_task)) { \
+		(tsk_ctl)->thr_pid = DHD_PID_KT_INVALID; \
+		DBG_THR(("%s(): thread:%s:%lx failed\n", __FUNCTION__, \
+			(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+	} else { \
+		(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
+		spin_lock_init(&((tsk_ctl)->spinlock)); \
+		DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
+			(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+	}; \
+}
+
+#define PROC_STOP(tsk_ctl) \
+{ \
+	(tsk_ctl)->terminated = TRUE; \
+	smp_wmb(); \
+	up(&((tsk_ctl)->sema));	\
+	wait_for_completion(&((tsk_ctl)->completed)); \
+	DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+	(tsk_ctl)->thr_pid = -1; \
+}
+
+/*  ----------------------- */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#define KILL_PROC(nr, sig) \
+{ \
+struct task_struct *tsk; \
+struct pid *pid;    \
+pid = find_get_pid((pid_t)nr);    \
+tsk = pid_task(pid, PIDTYPE_PID);    \
+if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(2, 6, 30))
+#define KILL_PROC(pid, sig) \
+{ \
+	struct task_struct *tsk; \
+	tsk = find_task_by_vpid(pid); \
+	if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+	kill_proc(pid, sig, 1); \
+}
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include <linux/time.h>
+#include <linux/wait.h>
+#else
+#include <linux/sched.h>
+
+#define __wait_event_interruptible_timeout(wq, condition, ret)		\
+do {									\
+	wait_queue_t __wait;						\
+	init_waitqueue_entry(&__wait, current);				\
+									\
+	add_wait_queue(&wq, &__wait);					\
+	for (;;) {							\
+		set_current_state(TASK_INTERRUPTIBLE);			\
+		if (condition)						\
+			break;						\
+		if (!signal_pending(current)) {				\
+			ret = schedule_timeout(ret);			\
+			if (!ret)					\
+				break;					\
+			continue;					\
+		}							\
+		ret = -ERESTARTSYS;					\
+		break;							\
+	}								\
+	current->state = TASK_RUNNING;					\
+	remove_wait_queue(&wq, &__wait);				\
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout)	\
+({									\
+	long __ret = timeout;						\
+	if (!(condition))						\
+		__wait_event_interruptible_timeout(wq, condition, __ret); \
+	__ret;								\
+})
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+/*
+For < 2.6.24, wl creates its own netdev but doesn't
+align the priv area like the genuine alloc_netdev().
+Since netdev_priv() always gives us the aligned address, it will
+not match our unaligned address for < 2.6.24
+*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define DEV_PRIV(dev)	(dev->priv)
+#else
+#define DEV_PRIV(dev)	netdev_priv(dev)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+#define WL_ISR(i, d, p)         wl_isr((i), (d))
+#else
+#define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
+#endif  /* < 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+#define CAN_SLEEP()	((!in_atomic() && !irqs_disabled()))
+#else
+#define CAN_SLEEP()	(FALSE)
+#endif
+
+#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define RANDOM32	prandom_u32
+#else
+#define RANDOM32	random32
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define SRANDOM32(entropy)	prandom_seed(entropy)
+#else
+#define SRANDOM32(entropy)	srandom32(entropy)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+/*
+ * Overide latest kfifo functions with
+ * older version to work on older kernels
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+#define kfifo_in_spinlocked(a, b, c, d)		kfifo_put(a, (u8 *)b, c)
+#define kfifo_out_spinlocked(a, b, c, d)	kfifo_get(a, (u8 *)b, c)
+#define kfifo_esize(a)				1
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) &&	!defined(WL_COMPAT_WIRELESS)
+#define kfifo_in_spinlocked(a, b, c, d)		kfifo_in_locked(a, b, c, d)
+#define kfifo_out_spinlocked(a, b, c, d)	kfifo_out_locked(a, b, c, d)
+#define kfifo_esize(a)				1
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic pop
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+static inline struct inode *file_inode(const struct file *f)
+{
+	return f->f_dentry->d_inode;
+}
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
+
+#endif /* _linuxver_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h
new file mode 100644
index 0000000..6722351
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/miniopt.h
@@ -0,0 +1,83 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: miniopt.h 514727 2014-11-12 03:02:48Z $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY	128	/* Max options */
+typedef struct miniopt {
+
+	/* These are persistent after miniopt_init() */
+	const char* name;		/* name for prompt in error strings */
+	const char* flags;		/* option chars that take no args */
+	bool longflags;		/* long options may be flags */
+	bool opt_end;		/* at end of options (passed a "--") */
+
+	/* These are per-call to miniopt() */
+
+	int consumed;		/* number of argv entries cosumed in
+				 * the most recent call to miniopt()
+				 */
+	bool positional;
+	bool good_int;		/* 'val' member is the result of a sucessful
+				 * strtol conversion of the option value
+				 */
+	char opt;
+	char key[MINIOPT_MAXKEY];
+	char* valstr;		/* positional param, or value for the option,
+				 * or null if the option had
+				 * no accompanying value
+				 */
+	uint uval;		/* strtol translation of valstr */
+	int  val;		/* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif  /* MINI_OPT_H  */
diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h
new file mode 100644
index 0000000..a2da1d3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h
@@ -0,0 +1,62 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: msgtrace.h 542902 2015-03-22 23:29:48Z $
+ */
+
+#ifndef	_MSGTRACE_H
+#define	_MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+	uint8	version;
+	uint8   trace_type;
+#define MSGTRACE_HDR_TYPE_MSG 0
+#define MSGTRACE_HDR_TYPE_LOG 1
+	uint16	len;	/* Len of the trace */
+	uint32	seqnum;	/* Sequence number of message. Useful if the messsage has been lost
+			 * because of DMA error or a bus reset (ex: SDIO Func2)
+			 */
+	/* Msgtrace type  only */
+	uint32  discarded_bytes;  /* Number of discarded bytes because of trace overflow  */
+	uint32  discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN 	sizeof(msgtrace_hdr_t)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* _MSGTRACE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/nan.h b/drivers/net/wireless/bcmdhd/include/nan.h
new file mode 100644
index 0000000..4534414
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/nan.h
@@ -0,0 +1,1468 @@
+/*
+ * Fundamental types and constants relating to WFA NAN
+ * (Neighbor Awareness Networking)
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: nan.h 700076 2017-05-17 14:42:22Z $
+ */
+#ifndef _NAN_H_
+#define _NAN_H_
+
+#include <typedefs.h>
+#include <802.11.h>
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* WiFi NAN OUI values */
+#define NAN_OUI			WFA_OUI     /* WiFi OUI */
+/* For oui_type field identifying the type and version of the NAN IE. */
+#define NAN_OUI_TYPE		0x13        /* Type/Version */
+#define NAN_AF_OUI_TYPE		0x18        /* Type/Version */
+/* IEEE 802.11 vendor specific information element. (Same as P2P_IE_ID.) */
+#define NAN_IE_ID		0xdd
+
+/* Same as P2P_PUB_AF_CATEGORY and DOT11_ACTION_CAT_PUBLIC */
+#define NAN_PUB_AF_CATEGORY	DOT11_ACTION_CAT_PUBLIC
+/* Protected dual public action frame category */
+#define NAN_PROT_DUAL_PUB_AF_CATEGORY	DOT11_ACTION_CAT_PDPA
+/* IEEE 802.11 Public Action Frame Vendor Specific. (Same as P2P_PUB_AF_ACTION.) */
+#define NAN_PUB_AF_ACTION	DOT11_PUB_ACTION_VENDOR_SPEC
+/* Number of octents in hash of service name. (Same as P2P_WFDS_HASH_LEN.) */
+#define NAN_SVC_HASH_LEN	6
+/* Size of fixed length part of nan_pub_act_frame_t before attributes. */
+#define NAN_PUB_ACT_FRAME_FIXED_LEN 6
+/* Number of octents in master rank value. */
+#define NAN_MASTER_RANK_LEN     8
+/* NAN public action frame header size */
+#define NAN_PUB_ACT_FRAME_HDR_SIZE (OFFSETOF(nan_pub_act_frame_t, data))
+/* NAN network ID */
+#define NAN_NETWORK_ID		"\x51\x6F\x9A\x01\x00\x00"
+/* Service Control Type length */
+#define NAN_SVC_CONTROL_TYPE_LEN	2
+/* Binding Bitmap length */
+#define NAN_BINDING_BITMAP_LEN		2
+/* Service Response Filter (SRF) control field masks */
+#define NAN_SRF_BLOOM_MASK		0x01
+#define NAN_SRF_INCLUDE_MASK		0x02
+#define NAN_SRF_INDEX_MASK		0x0C
+/* SRF Bloom Filter index shift */
+#define NAN_SRF_BLOOM_SHIFT	2
+#define NAN_SRF_INCLUDE_SHIFT	1
+/* Mask for CRC32 output, used in hash function for NAN bloom filter */
+#define NAN_BLOOM_CRC32_MASK	0xFFFF
+
+/* Attribute TLV header size */
+#define NAN_ATTR_ID_OFF		0
+#define NAN_ATTR_LEN_OFF	1
+#define NAN_ATTR_DATA_OFF	3
+
+#define NAN_ATTR_ID_LEN		 1u	/* ID field length */
+#define NAN_ATTR_LEN_LEN	 2u	/* Length field length */
+#define NAN_ATTR_HDR_LEN	 (NAN_ATTR_ID_LEN + NAN_ATTR_LEN_LEN)
+#define NAN_ENTRY_CTRL_LEN       1      /* Entry control field length from FAM attribute */
+#define NAN_MAP_ID_LEN           1	/* MAP ID length to signify band */
+#define NAN_OPERATING_CLASS_LEN  1	/* operating class field length from NAN FAM */
+#define NAN_CHANNEL_NUM_LEN      1	/* channel number field length 1 byte */
+
+/* NAN slot duration / period */
+#define NAN_MIN_TU		16
+#define NAN_TU_PER_DW		512
+#define NAN_MAX_DW		16
+#define NAN_MAX_TU		(NAN_MAX_DW * NAN_TU_PER_DW)
+
+#define NAN_SLOT_DUR_0TU	0
+#define NAN_SLOT_DUR_16TU	16
+#define NAN_SLOT_DUR_32TU	32
+#define NAN_SLOT_DUR_64TU	64
+#define NAN_SLOT_DUR_128TU	128
+#define NAN_SLOT_DUR_256TU	256
+#define NAN_SLOT_DUR_512TU	512
+#define NAN_SLOT_DUR_1024TU	1024
+#define NAN_SLOT_DUR_2048TU	2048
+#define NAN_SLOT_DUR_4096TU	4096
+#define NAN_SLOT_DUR_8192TU	8192
+
+#define NAN_MAP_ID_2G   2  /* NAN Further Avail Map ID for band 2.4G */
+#define NAN_MAP_ID_5G   5  /* NAN Further Avail Map ID for band 5G */
+#define NAN_MAP_NUM_IDS 2  /* Max number of NAN Further Avail Map IDs supported */
+
+/* size of ndc id */
+#define NAN_DATA_NDC_ID_SIZE 6
+
+#define NAN_AVAIL_ENTRY_LEN_RES0 7      /* Avail entry len in FAM attribute for resolution 16TU */
+#define NAN_AVAIL_ENTRY_LEN_RES1 5      /* Avail entry len in FAM attribute for resolution 32TU */
+#define NAN_AVAIL_ENTRY_LEN_RES2 4      /* Avail entry len in FAM attribute for resolution 64TU */
+
+/* Vendor-specific public action frame for NAN */
+typedef BWL_PRE_PACKED_STRUCT struct nan_pub_act_frame_s {
+	/* NAN_PUB_AF_CATEGORY 0x04 */
+	uint8 category_id;
+	/* NAN_PUB_AF_ACTION 0x09 */
+	uint8 action_field;
+	/* NAN_OUI 0x50-6F-9A */
+	uint8 oui[DOT11_OUI_LEN];
+	/* NAN_OUI_TYPE 0x13 */
+	uint8 oui_type;
+	/* One or more NAN Attributes follow */
+	uint8 data[];
+} BWL_POST_PACKED_STRUCT nan_pub_act_frame_t;
+
+/* NAN attributes as defined in the nan spec */
+enum {
+	NAN_ATTR_MASTER_IND	= 0,
+	NAN_ATTR_CLUSTER	= 1,
+	NAN_ATTR_SVC_ID_LIST    = 2,
+	NAN_ATTR_SVC_DESCRIPTOR = 3,
+	NAN_ATTR_CONN_CAP       = 4,
+	NAN_ATTR_INFRA		= 5,
+	NAN_ATTR_P2P		= 6,
+	NAN_ATTR_IBSS		= 7,
+	NAN_ATTR_MESH		= 8,
+	NAN_ATTR_FURTHER_NAN_SD = 9,
+	NAN_ATTR_FURTHER_AVAIL	= 10,
+	NAN_ATTR_COUNTRY_CODE	= 11,
+	NAN_ATTR_RANGING	= 12,
+	NAN_ATTR_CLUSTER_DISC	= 13,
+	/* nan 2.0 */
+	NAN_ATTR_SVC_DESC_EXTENSION = 14,
+	NAN_ATTR_NAN_DEV_CAP = 15,
+	NAN_ATTR_NAN_NDP = 16,
+	NAN_ATTR_NAN_NMSG = 17,
+	NAN_ATTR_NAN_AVAIL = 18,
+	NAN_ATTR_NAN_NDC = 19,
+	NAN_ATTR_NAN_NDL = 20,
+	NAN_ATTR_NAN_NDL_QOS = 21,
+	NAN_ATTR_MCAST_SCHED = 22,
+	NAN_ATTR_UNALIGN_SCHED = 23,
+	NAN_ATTR_PAGING_UCAST = 24,
+	NAN_ATTR_PAGING_MCAST = 25,
+	NAN_ATTR_RANGING_INFO = 26,
+	NAN_ATTR_RANGING_SETUP = 27,
+	NAN_ATTR_FTM_RANGE_REPORT = 28,
+	NAN_ATTR_ELEMENT_CONTAINER = 29,
+	NAN_ATTR_WLAN_INFRA_EXT = 30,
+	NAN_ATTR_EXT_P2P_OPER = 31,
+	NAN_ATTR_EXT_IBSS = 32,
+	NAN_ATTR_EXT_MESH = 33,
+	NAN_ATTR_CIPHER_SUITE_INFO = 34,
+	NAN_ATTR_SEC_CTX_ID_INFO = 35,
+	NAN_ATTR_SHARED_KEY_DESC = 36,
+	NAN_ATTR_MCAST_SCHED_CHANGE = 37,
+	NAN_ATTR_MCAST_SCHED_OWNER_CHANGE = 38,
+	NAN_ATTR_PUBLIC_AVAILABILITY = 39,
+	NAN_ATTR_SUB_SVC_ID_LIST = 40,
+	/* change NAN_ATTR_MAX_ID to max ids + 1, excluding NAN_ATTR_VENDOR_SPECIFIC.
+	 * This is used in nan_parse.c
+	 */
+	NAN_ATTR_MAX_ID		= NAN_ATTR_SUB_SVC_ID_LIST + 1,
+
+	NAN_ATTR_VENDOR_SPECIFIC = 221
+};
+
+enum wifi_nan_avail_resolution {
+	NAN_AVAIL_RES_16_TU = 0,
+	NAN_AVAIL_RES_32_TU = 1,
+	NAN_AVAIL_RES_64_TU = 2,
+	NAN_AVAIL_RES_INVALID = 255
+};
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ie_s {
+	uint8	id;		/* IE ID: NAN_IE_ID 0xDD */
+	uint8	len;		/* IE length */
+	uint8	oui[DOT11_OUI_LEN]; /* NAN_OUI 50:6F:9A */
+	uint8	oui_type;	/* NAN_OUI_TYPE 0x13 */
+	uint8	attr[];	/* var len attributes */
+} BWL_POST_PACKED_STRUCT wifi_nan_ie_t;
+
+#define NAN_IE_HDR_SIZE	(OFFSETOF(wifi_nan_ie_t, attr))
+
+/* master indication record  */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_master_ind_attr_s {
+	uint8	id;
+	uint16	len;
+	uint8	master_preference;
+	uint8	random_factor;
+} BWL_POST_PACKED_STRUCT wifi_nan_master_ind_attr_t;
+
+/* cluster attr record  */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_cluster_attr_s {
+	uint8	id;
+	uint16	len;
+	uint8   amr[NAN_MASTER_RANK_LEN];
+	uint8   hop_count;
+	/* Anchor Master Beacon Transmission Time */
+	uint32  ambtt;
+} BWL_POST_PACKED_STRUCT wifi_nan_cluster_attr_t;
+
+/*  container for service ID records  */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_id_attr_s {
+	uint8	id;
+	uint16	len;
+	uint8	svcid[0]; /* 6*len of srvc IDs */
+} BWL_POST_PACKED_STRUCT wifi_nan_svc_id_attr_t;
+
+/* service_control bitmap for wifi_nan_svc_descriptor_attr_t below */
+#define NAN_SC_PUBLISH 0x0
+#define NAN_SC_SUBSCRIBE 0x1
+#define NAN_SC_FOLLOWUP 0x2
+/* Set to 1 if a Matching Filter field is included in descriptors. */
+#define NAN_SC_MATCHING_FILTER_PRESENT 0x4
+/* Set to 1 if a Service Response Filter field is included in descriptors. */
+#define NAN_SC_SR_FILTER_PRESENT 0x8
+/* Set to 1 if a Service Info field is included in descriptors. */
+#define NAN_SC_SVC_INFO_PRESENT 0x10
+/* range is close proximity only */
+#define NAN_SC_RANGE_LIMITED 0x20
+/* Set to 1 if binding bitamp is present in descriptors */
+#define NAN_SC_BINDING_BITMAP_PRESENT 0x40
+
+/* Service descriptor */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_descriptor_attr_s {
+	/* Attribute ID - 0x03. */
+	uint8 id;
+	/* Length of the following fields in the attribute */
+	uint16 len;
+	/* Hash of the Service Name */
+	uint8 svc_hash[NAN_SVC_HASH_LEN];
+	/* Publish or subscribe instance id */
+	uint8 instance_id;
+	/* Requestor Instance ID */
+	uint8 requestor_id;
+	/* Service Control Bitmask. Also determines what data follows. */
+	uint8 svc_control;
+	/* Optional fields follow */
+} BWL_POST_PACKED_STRUCT wifi_nan_svc_descriptor_attr_t;
+
+/* IBSS attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ibss_attr_s {
+	/* Attribute ID - 0x07. */
+	uint8 id;
+	/* Length of the following fields in the attribute */
+	uint16 len;
+	/* BSSID of the ibss */
+	struct ether_addr bssid;
+	/*
+	 map control:, bits:
+	[0-3]: Id for associated further avail map attribute
+	[4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved
+	[6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf?
+	[7] : reserved
+	*/
+	uint8 map_ctrl;
+	/* avail. intervals bitmap, var len  */
+	uint8 avail_bmp[1];
+} BWL_POST_PACKED_STRUCT wifi_nan_ibss_attr_t;
+
+/* Further Availability MAP attr  */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_favail_attr_s {
+	/* Attribute ID - 0x0A. */
+	uint8 id;
+	/* Length of the following fields in the attribute */
+	uint16 len;
+	/* MAP id: val [0..15], values[16-255] reserved */
+	uint8 map_id;
+	/*  availibility entry, var len */
+	uint8 avil_entry[1];
+} BWL_POST_PACKED_STRUCT wifi_nan_favail_attr_t;
+
+/* Further Availability MAP attr  */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_s {
+	/*
+	 entry control
+	 [0-1]: avail interval duration: 0:16ms; 1:32ms; 2:64ms;
+	 [2:7] reserved
+	*/
+	uint8 entry_ctrl;
+	/* operating class: freq band etc IEEE 802.11 */
+	uint8 opclass;
+	/* channel number */
+	uint8 chan;
+	/*  avail bmp, var len */
+	uint8 avail_bmp[1];
+} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_t;
+
+/* Map control Field */
+#define NAN_MAPCTRL_IDMASK	0x7
+#define NAN_MAPCTRL_DURSHIFT	4
+#define NAN_MAPCTRL_DURMASK	0x30
+#define NAN_MAPCTRL_REPEAT	0x40
+#define NAN_MAPCTRL_REPEATSHIFT	6
+
+#define NAN_VENDOR_TYPE_RTT	0
+#define NAN_VENDOR_TYPE_P2P	1
+
+/* Vendor Specific Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vendor_attr_s {
+	uint8	id;			/* 0xDD */
+	uint16	len;		/* IE length */
+	uint8	oui[DOT11_OUI_LEN]; /* 00-90-4C */
+	uint8	type;		/* attribute type */
+	uint8	attr[1];	/* var len attributes */
+} BWL_POST_PACKED_STRUCT wifi_nan_vendor_attr_t;
+
+#define NAN_VENDOR_HDR_SIZE	(OFFSETOF(wifi_nan_vendor_attr_t, attr))
+
+/* p2p operation attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_p2p_op_attr_s {
+	/* Attribute ID - 0x06. */
+	uint8 id;
+	/* Length of the following fields in the attribute */
+	uint16 len;
+	/* P2P device role */
+	uint8 dev_role;
+	/* BSSID of the ibss */
+	struct ether_addr p2p_dev_addr;
+	/*
+	map control:, bits:
+	[0-3]: Id for associated further avail map attribute
+	[4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved
+	[6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf?
+	[7] : reserved
+	*/
+	uint8 map_ctrl;
+	/* avail. intervals bitmap */
+	uint8 avail_bmp[1];
+} BWL_POST_PACKED_STRUCT wifi_nan_p2p_op_attr_t;
+
+/* ranging attribute */
+#define NAN_RANGING_MAP_CTRL_ID_SHIFT 0
+#define NAN_RANGING_MAP_CTRL_ID_MASK 0x0F
+#define NAN_RANGING_MAP_CTRL_DUR_SHIFT 4
+#define NAN_RANGING_MAP_CTRL_DUR_MASK 0x30
+#define NAN_RANGING_MAP_CTRL_REPEAT_SHIFT 6
+#define NAN_RANGING_MAP_CTRL_REPEAT_MASK 0x40
+#define NAN_RANGING_MAP_CTRL_REPEAT_DW(_ctrl) (((_ctrl) & \
+	NAN_RANGING_MAP_CTRL_DUR_MASK) ? 16 : 1)
+#define NAN_RANGING_MAP_CTRL(_id, _dur, _repeat) (\
+	(((_id) << NAN_RANGING_MAP_CTRL_ID_SHIFT) & \
+		 NAN_RANGING_MAP_CTRL_ID_MASK) | \
+	(((_dur) << NAN_RANGING_MAP_CTRL_DUR_SHIFT) & \
+		NAN_RANGING_MAP_CTRL_DUR_MASK) | \
+	(((_repeat) << NAN_RANGING_MAP_CTRL_REPEAT_SHIFT) & \
+		 NAN_RANGING_MAP_CTRL_REPEAT_MASK))
+
+enum {
+	NAN_RANGING_PROTO_FTM = 0
+};
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_attr_s {
+	uint8 id;			/* 0x0C */
+	uint16 len;			/* length that follows */
+	struct ether_addr dev_addr;	/* device mac address */
+
+	/*
+	map control:, bits:
+	[0-3]: Id for associated further avail map attribute
+	[4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved
+	[6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf?
+	[7] : reserved
+	*/
+	uint8 map_ctrl;
+
+	uint8 protocol;					/* FTM = 0 */
+	uint32 avail_bmp;				/* avail interval bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_attr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_info_attr_s {
+	uint8 id;			/* 0x1A */
+	uint16 len;			/* length that follows */
+	/*
+	location info availability bit map
+	0: LCI Local Coordinates
+	1: Geospatial LCI WGS84
+	2: Civi Location
+	3: Last Movement Indication
+	[4-7]: reserved
+	*/
+	uint8 lc_info_avail;
+	/*
+	Last movement indication
+	present if bit 3 is set in lc_info_avail
+	cluster TSF[29:14] at the last detected platform movement
+	*/
+	uint16 last_movement;
+
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_info_attr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_hdr_s {
+	uint8 id;			/* 0x1B */
+	uint16 len;			/* length that follows */
+	uint8 dialog_token;	/* Identify req and resp */
+	uint8 type_status;	/* bits 0-3 type, 4-7 status */
+	/* reason code
+	i. when frm type = response & status = reject
+	ii. frm type = termination
+	*/
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_s {
+
+	wifi_nan_ranging_setup_attr_hdr_t setup_attr_hdr;
+	/* Below fields not required when frm type = termination */
+	uint8 ranging_ctrl; /* Bit 0: ranging report required or not */
+	uint8 ftm_params[3];
+	uint8 data[];	/* schedule entry list */
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_t;
+
+#define NAN_RANGE_SETUP_ATTR_OFFSET_TBM_INFO (OFFSETOF(wifi_nan_ranging_setup_attr_t, data))
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_report_attr_s {
+	uint8 id;			/* 0x1C */
+	uint16 len;			/* length that follows */
+	/* FTM report format in spec.
+	See definition in 9.4.2.22.18 in 802.11mc D5.0
+	*/
+	uint8 entry_count;
+	uint8 data[2]; /* includes pad */
+	/*
+	dot11_ftm_range_entry_t entries[entry_count];
+	uint8 error_count;
+	dot11_ftm_error_entry_t errors[error_count];
+	 */
+} BWL_POST_PACKED_STRUCT wifi_nan_ranging_report_attr_t;
+
+/* Ranging control flags */
+#define NAN_RNG_REPORT_REQUIRED		0x01
+#define NAN_RNG_FTM_PARAMS_PRESENT	0x02
+#define NAN_RNG_SCHED_ENTRY_PRESENT	0X04
+
+/* Location info flags */
+#define NAN_RNG_LOCATION_FLAGS_LOCAL_CORD		0x1
+#define NAN_RNG_LOCATION_FLAGS_GEO_SPATIAL		0x2
+#define NAN_RNG_LOCATION_FLAGS_CIVIC			0x4
+#define NAN_RNG_LOCATION_FLAGS_LAST_MVMT		0x8
+
+/* Last movement mask and shift value */
+#define NAN_RNG_LOCATION_MASK_LAST_MVT_TSF	0x3FFFC000
+#define NAN_RNG_LOCATION_SHIFT_LAST_MVT_TSF	14
+
+/* FTM params shift values  */
+#define NAN_FTM_MAX_BURST_DUR_SHIFT	0
+#define NAN_FTM_MIN_FTM_DELTA_SHIFT	4
+#define NAN_FTM_NUM_FTM_SHIFT		10
+#define NAN_FTM_FORMAT_BW_SHIFT		15
+
+/* FTM params mask  */
+#define NAN_FTM_MAX_BURST_DUR_MASK	0x00000F
+#define NAN_FTM_MIN_FTM_DELTA_MASK	0x00003F
+#define NAN_FTM_NUM_FTM_MASK		0x00001F
+#define NAN_FTM_FORMAT_BW_MASK		0x00003F
+
+#define FTM_PARAMS_BURSTTMO_FACTOR 250
+
+/* set to value to uint32 */
+#define NAN_FTM_SET_BURST_DUR(ftm, dur) (ftm |= (((dur + 2) & NAN_FTM_MAX_BURST_DUR_MASK) <<\
+	NAN_FTM_MAX_BURST_DUR_SHIFT))
+#define NAN_FTM_SET_FTM_DELTA(ftm, delta) (ftm |= (((delta/100) & NAN_FTM_MIN_FTM_DELTA_MASK) <<\
+	NAN_FTM_MIN_FTM_DELTA_SHIFT))
+#define NAN_FTM_SET_NUM_FTM(ftm, delta) (ftm |= ((delta & NAN_FTM_NUM_FTM_MASK) <<\
+	NAN_FTM_NUM_FTM_SHIFT))
+#define NAN_FTM_SET_FORMAT_BW(ftm, delta) (ftm |= ((delta & NAN_FTM_FORMAT_BW_MASK) <<\
+	NAN_FTM_FORMAT_BW_SHIFT))
+/* set uint32 to attribute */
+#define NAN_FTM_PARAMS_UINT32_TO_ATTR(ftm_u32, ftm_attr) {ftm_attr[0] = ftm_u32 & 0xFF; \
+			ftm_attr[1] = (ftm_u32 >> 8) & 0xFF; ftm_attr[2] = (ftm_u32 >> 16) & 0xFF;}
+
+/* get atrribute to uint32 */
+#define NAN_FTM_PARAMS_ATTR_TO_UINT32(ftm_p, ftm_u32) (ftm_u32 = ftm_p[0] | ftm_p[1] << 8 | \
+	ftm_p[2] << 16)
+/* get param values from uint32 */
+#define NAN_FTM_GET_BURST_DUR(ftm) (((ftm >> NAN_FTM_MAX_BURST_DUR_SHIFT) &\
+	NAN_FTM_MAX_BURST_DUR_MASK))
+#define NAN_FTM_GET_BURST_DUR_USEC(_val) ((1 << ((_val)-2)) * FTM_PARAMS_BURSTTMO_FACTOR)
+#define NAN_FTM_GET_FTM_DELTA(ftm) (((ftm >> NAN_FTM_MIN_FTM_DELTA_SHIFT) &\
+	NAN_FTM_MIN_FTM_DELTA_MASK)*100)
+#define NAN_FTM_GET_NUM_FTM(ftm) ((ftm >> NAN_FTM_NUM_FTM_SHIFT) &\
+	NAN_FTM_NUM_FTM_MASK)
+#define NAN_FTM_GET_FORMAT_BW(ftm) ((ftm >> NAN_FTM_FORMAT_BW_SHIFT) &\
+	NAN_FTM_FORMAT_BW_MASK)
+
+#define NAN_CONN_CAPABILITY_WFD		0x0001
+#define NAN_CONN_CAPABILITY_WFDS	0x0002
+#define NAN_CONN_CAPABILITY_TDLS	0x0004
+#define NAN_CONN_CAPABILITY_INFRA	0x0008
+#define NAN_CONN_CAPABILITY_IBSS	0x0010
+#define NAN_CONN_CAPABILITY_MESH	0x0020
+
+#define NAN_DEFAULT_MAP_ID		0	/* nan default map id */
+#define NAN_DEFAULT_MAP_CTRL		0	/* nan default map control */
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_conn_cap_attr_s {
+	/* Attribute ID - 0x04. */
+	uint8 id;
+	/* Length of the following fields in the attribute */
+	uint16	len;
+	uint16	conn_cap_bmp;	/* Connection capability bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_conn_cap_attr_t;
+
+/* NAN Element container Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_container_attr_s {
+	uint8 id;	/* id - 0x20 */
+	uint16 len;	/* Total length of following IEs */
+	uint8 map_id;	/* map id */
+	uint8 data[1];	/* Data pointing to one or more IEs */
+} BWL_POST_PACKED_STRUCT wifi_nan_container_attr_t;
+
+/* NAN 2.0 NAN avail attribute */
+
+/* Availability Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_attr_s {
+	uint8 id;	/* id - 0x12 */
+	uint16 len;	/* total length */
+	uint8 seqid;	/* sequence id */
+	uint16 ctrl;	/* attribute control */
+	uint8 entry[1];	/* availability entry list */
+} BWL_POST_PACKED_STRUCT wifi_nan_avail_attr_t;
+
+/* for processing/building time bitmap info in nan_avail_entry */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_time_bitmap_s {
+	uint16 ctrl;		/* Time bitmap control */
+	uint8 len;		/* Time bitmap length */
+	uint8 bitmap[];	/* Time bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_time_bitmap_t;
+
+/* Availability Entry format */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_attr_s {
+	uint16 len;		/* Length */
+	uint16 entry_cntrl;	/* Entry Control */
+	uint8 var[];		/* Time bitmap and channel entry list */
+} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_attr_t;
+
+/* FAC Channel Entry  (section 10.7.19.1.5) */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_chan_entry_s {
+	uint8 oper_class;		/* Operating Class */
+	uint16 chan_bitmap;		/* Channel Bitmap */
+	uint8 primary_chan_bmp;		/* Primary Channel Bitmap */
+	uint8 aux_chan[0];			/* Auxiliary Channel bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_chan_entry_t;
+
+/* Channel entry */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_s {
+	uint8 opclass;		/* Operating class */
+	uint16 chan_bitmap;	/* Channel bitmap */
+	uint8 prim_bitmap;	/* Primary channel bitmap */
+	uint16 aux_bitmap;	/* Time bitmap length */
+} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_t;
+
+/* Type of  Availability: committed */
+#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL_MASK	0x1
+/* Type of  Availability: potential */
+#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL_MASK	0x2
+/* Type of  Availability: conditional */
+#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL_MASK	0x4
+
+#define NAN_AVAIL_CTRL_MAP_ID_MASK		0x000F
+#define NAN_AVAIL_CTRL_MAP_ID(_ctrl)		((_ctrl) & NAN_AVAIL_CTRL_MAP_ID_MASK)
+#define NAN_AVAIL_CTRL_COMM_CHANGED_MASK	0x0010
+#define NAN_AVAIL_CTRL_COMM_CHANGED(_ctrl)	((_ctrl) & NAN_AVAIL_CTRL_COMM_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_POTEN_CHANGED_MASK	0x0020
+#define NAN_AVAIL_CTRL_POTEN_CHANGED(_ctrl)	((_ctrl) & NAN_AVAIL_CTRL_POTEN_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK	0x0040
+#define NAN_AVAIL_CTRL_PUBLIC_CHANGED(_ctrl)	((_ctrl) & NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_NDC_CHANGED_MASK		0x0080
+#define NAN_AVAIL_CTRL_NDC_CHANGED(_ctrl)	((_ctrl) & NAN_AVAIL_CTRL_NDC_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_MCAST_CHANGED_MASK	0x0100
+#define NAN_AVAIL_CTRL_MCAST_CHANGED(_ctrl)	((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK	0x0200
+#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED(_ctrl)	((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK)
+#define NAN_AVAIL_CTRL_CHANGED_FLAGS_MASK	0x03f0
+
+#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK 0x07
+#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE(_flags) ((_flags) & NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK)
+#define NAN_AVAIL_ENTRY_CTRL_USAGE_MASK 0x18
+#define NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT 3
+#define NAN_AVAIL_ENTRY_CTRL_USAGE(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_USAGE_MASK) \
+	>> NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT)
+#define NAN_AVAIL_ENTRY_CTRL_UTIL_MASK 0x1E0
+#define NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT 5
+#define NAN_AVAIL_ENTRY_CTRL_UTIL(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_UTIL_MASK) \
+	>> NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT)
+#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK 0xF00
+#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT 8
+#define NAN_AVAIL_ENTRY_CTRL_RX_NSS(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK) \
+	>> NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT)
+#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK 0x1000
+#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT 12
+#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT(_flags) (((_flags) & \
+	NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK) >> NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT)
+
+#define NAN_TIME_BMAP_CTRL_BITDUR_MASK 0x07
+#define NAN_TIME_BMAP_CTRL_BITDUR(_flags) ((_flags) & NAN_TIME_BMAP_CTRL_BITDUR_MASK)
+#define NAN_TIME_BMAP_CTRL_PERIOD_MASK 0x38
+#define NAN_TIME_BMAP_CTRL_PERIOD_SHIFT 3
+#define NAN_TIME_BMAP_CTRL_PERIOD(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_PERIOD_MASK) \
+	>> NAN_TIME_BMAP_CTRL_PERIOD_SHIFT)
+#define NAN_TIME_BMAP_CTRL_OFFSET_MASK 0x7FC0
+#define NAN_TIME_BMAP_CTRL_OFFSET_SHIFT 6
+#define NAN_TIME_BMAP_CTRL_OFFSET(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_OFFSET_MASK) \
+	>> NAN_TIME_BMAP_CTRL_OFFSET_SHIFT)
+#define NAN_TIME_BMAP_LEN(avail_entry)	\
+	(*(uint8 *)(((wifi_nan_avail_entry_attr_t *)avail_entry)->var + 2))
+
+#define NAN_AVAIL_CHAN_LIST_HDR_LEN 1
+#define NAN_AVAIL_CHAN_LIST_TYPE_CHANNEL	0x01
+#define NAN_AVAIL_CHAN_LIST_NON_CONTIG_BW	0x02
+#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK	0xF0
+#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT	4
+#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES(_ctrl) (((_ctrl) & NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK) \
+	>> NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT)
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_list_s {
+	uint8 chan_info;
+	uint8 var[0];
+} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_list_t;
+
+/* define for chan_info */
+#define NAN_CHAN_OP_CLASS_MASK 0x01
+#define NAN_CHAN_NON_CONT_BW_MASK 0x02
+#define NAN_CHAN_RSVD_MASK 0x03
+#define NAN_CHAN_NUM_ENTRIES_MASK 0xF0
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_band_entry_s {
+	uint8 band[0];
+} BWL_POST_PACKED_STRUCT wifi_nan_band_entry_t;
+
+/* Type of  Availability: committed */
+#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL	        0x1
+/* Type of  Availability: potential */
+#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL	0x2
+/* Type of  Availability: conditional */
+#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL	        0x4
+/* Committed + Potential */
+#define NAN_ENTRY_CNTRL_TYPE_COMM_POTEN \
+	(NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL)
+/* Conditional + Potential */
+#define NAN_ENTRY_CNTRL_TYPE_COND_POTEN \
+	(NAN_ENTRY_CNTRL_TYPE_COND_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL)
+
+/* Type of  Availability */
+#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_MASK	0x07
+#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_SHIFT	0
+/* Usage Preference */
+#define NAN_ENTRY_CNTRL_USAGE_PREF_MASK		0x18
+#define NAN_ENTRY_CNTRL_USAGE_PREF_SHIFT	3
+/* Utilization */
+#define NAN_ENTRY_CNTRL_UTIL_MASK		0x1E0
+#define NAN_ENTRY_CNTRL_UTIL_SHIFT		5
+
+/* Time Bitmap Control field (section 5.7.18.2.3) */
+
+/* Reserved */
+#define NAN_TIME_BMP_CNTRL_RSVD_MASK	0x01
+#define NAN_TIME_BMP_CNTRL_RSVD_SHIFT	0
+/* Bitmap Len */
+#define NAN_TIME_BMP_CNTRL_BMP_LEN_MASK	0x7E
+#define NAN_TIME_BMP_CNTRL_BMP_LEN_SHIFT 1
+/* Bit Duration */
+#define NAN_TIME_BMP_CNTRL_BIT_DUR_MASK	0x380
+#define NAN_TIME_BMP_CNTRL_BIT_DUR_SHIFT	7
+/* Bitmap Len */
+#define NAN_TIME_BMP_CNTRL_PERIOD_MASK	0x1C00
+#define NAN_TIME_BMP_CNTRL_PERIOD_SHIFT	10
+/* Start Offset */
+#define NAN_TIME_BMP_CNTRL_START_OFFSET_MASK	0x3FE000
+#define NAN_TIME_BMP_CNTRL_START_OFFSET_SHIFT	13
+/* Reserved */
+#define NAN_TIME_BMP_CNTRL_RESERVED_MASK	0xC00000
+#define NAN_TIME_BMP_CNTRL_RESERVED_SHIFT	22
+
+/* Time Bitmap Control field: Period */
+typedef enum
+{
+	NAN_TIME_BMP_CTRL_PERIOD_128TU = 1,
+	NAN_TIME_BMP_CTRL_PERIOD_256TU = 2,
+	NAN_TIME_BMP_CTRL_PERIOD_512TU = 3,
+	NAN_TIME_BMP_CTRL_PERIOD_1024TU = 4,
+	NAN_TIME_BMP_CTRL_PERIOD_2048U = 5,
+	NAN_TIME_BMP_CTRL_PERIOD_4096U = 6,
+	NAN_TIME_BMP_CTRL_PERIOD_8192U = 7
+} nan_time_bmp_ctrl_repeat_interval_t;
+
+enum
+{
+	NAN_TIME_BMP_BIT_DUR_16TU_IDX = 0,
+	NAN_TIME_BMP_BIT_DUR_32TU_IDX = 1,
+	NAN_TIME_BMP_BIT_DUR_64TU_IDX = 2,
+	NAN_TIME_BMP_BIT_DUR_128TU_IDX = 3
+};
+
+enum
+{
+	NAN_TIME_BMP_BIT_DUR_IDX_0 = 16,
+	NAN_TIME_BMP_BIT_DUR_IDX_1 = 32,
+	NAN_TIME_BMP_BIT_DUR_IDX_2 = 64,
+	NAN_TIME_BMP_BIT_DUR_IDX_3 = 128
+};
+
+enum
+{
+	NAN_TIME_BMP_CTRL_PERIOD_IDX_1 = 128,
+	NAN_TIME_BMP_CTRL_PERIOD_IDX_2 = 256,
+	NAN_TIME_BMP_CTRL_PERIOD_IDX_3 = 512,
+	NAN_TIME_BMP_CTRL_PERIOD_IDX_4 = 1024,
+	NAN_TIME_BMP_CTRL_PERIOD_IDX_5 = 2048,
+	NAN_TIME_BMP_CTRL_PERIOD_IDX_6 = 4096,
+	NAN_TIME_BMP_CTRL_PERIOD_IDX_7 = 8192
+};
+
+/* Channel Entries List field */
+
+/* Type */
+#define NAN_CHAN_ENTRY_TYPE_MASK	0x01
+#define NAN_CHAN_ENTRY_TYPE_SHIFT	0
+/* Channel Entry Length Indication */
+#define NAN_CHAN_ENTRY_LEN_IND_MASK	0x02
+#define NAN_CHAN_ENTRY_LEN_IND_SHIFT	1
+/* Reserved */
+#define NAN_CHAN_ENTRY_RESERVED_MASK	0x0C
+#define NAN_CHAN_ENTRY_RESERVED_SHIFT	2
+/* Number of FAC Band or Channel Entries  */
+#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_MASK	0xF0
+#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_SHIFT	4
+
+#define NAN_CHAN_ENTRY_TYPE_BANDS	0
+#define NAN_CHAN_ENTRY_TYPE_OPCLASS_CHANS	1
+
+#define NAN_CHAN_ENTRY_BW_LT_80MHZ	0
+#define NAN_CHAN_ENTRY_BW_EQ_160MHZ	1
+
+/*
+ * NDL Attribute WFA Tech. Spec ver 1.0.r12 (section 10.7.19.2)
+ */
+#define NDL_ATTR_IM_MAP_ID_LEN		1
+#define NDL_ATTR_IM_TIME_BMP_CTRL_LEN	2
+#define NDL_ATTR_IM_TIME_BMP_LEN_LEN	1
+
+/*
+ * NDL Control field - Table xx
+ */
+#define NDL_ATTR_CTRL_PEER_ID_PRESENT_MASK		0x01
+#define NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT		0
+#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_MASK		0x02
+#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT	1
+#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_MASK		0x04
+#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT	2
+#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_MASK		0x08
+#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT	3
+#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_MASK		0x10	/* max idle period */
+#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT	4
+#define NDL_ATTR_CTRL_NDL_TYPE_MASK				0x20	/* NDL type */
+#define NDL_ATTR_CTRL_NDL_TYPE_SHIFT			5
+#define NDL_ATTR_CTRL_NDL_SETUP_REASON_MASK		0xC0	/* NDL Setup Reason */
+#define NDL_ATTR_CTRL_NDL_SETUP_REASON_SHIFT	6
+
+/* NDL setup Reason */
+#define NDL_ATTR_CTRL_NDL_TYPE_S_NDL	0x0	/* S-NDL */
+#define NDL_ATTR_CTRL_NDL_TYPE_P_NDL	0x1	/* P-NDL */
+
+/* NDL setup Reason */
+#define NDL_ATTR_CTRL_NDL_SETUP_REASON_NDP_RANG	0x0	/* NDP or Ranging */
+#define NDL_ATTR_CTRL_NDL_SETUP_REASON_FSD_GAS	0x1	/* FSD using GAS */
+
+#define NAN_NDL_TYPE_MASK				0x0F
+#define NDL_ATTR_TYPE_STATUS_REQUEST	0x00
+#define NDL_ATTR_TYPE_STATUS_RESPONSE	0x01
+#define NDL_ATTR_TYPE_STATUS_CONFIRM	0x02
+#define NDL_ATTR_TYPE_STATUS_CONTINUED	0x00
+#define NDL_ATTR_TYPE_STATUS_ACCEPTED	0x10
+#define NDL_ATTR_TYPE_STATUS_REJECTED	0x20
+
+#define NAN_NDL_TYPE_CHECK(_ndl, x)	(((_ndl)->type_status & NAN_NDL_TYPE_MASK) == (x))
+#define NAN_NDL_REQUEST(_ndl)		(((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \
+								NDL_ATTR_TYPE_STATUS_REQUEST)
+#define NAN_NDL_RESPONSE(_ndl)		(((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \
+								NDL_ATTR_TYPE_STATUS_RESPONSE)
+#define NAN_NDL_CONFIRM(_ndl)		(((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \
+								NDL_ATTR_TYPE_STATUS_CONFIRM)
+
+
+#define NAN_NDL_STATUS_SHIFT	4
+#define NAN_NDL_STATUS_MASK	0xF0
+#define NAN_NDL_CONT(_ndl)	(((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \
+								NDL_ATTR_TYPE_STATUS_CONTINUED)
+#define NAN_NDL_ACCEPT(_ndl)	(((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \
+								NDL_ATTR_TYPE_STATUS_ACCEPTED)
+#define NAN_NDL_REJECT(_ndl)	(((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \
+								NDL_ATTR_TYPE_STATUS_REJECTED)
+
+#define NDL_ATTR_CTRL_NONE				0
+#define NDL_ATTR_CTRL_PEER_ID_PRESENT	(1 << NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT)
+#define NDL_ATTR_CTRL_IMSCHED_PRESENT	(1 << NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT)
+#define NDL_ATTR_CTRL_NDC_PRESENT		(1 << NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT)
+#define NDL_ATTR_CTRL_NDL_QOS_PRESENT	(1 << NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT)
+#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT	(1 << NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT)
+
+#define NA_NDL_IS_IMMUT_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_IMSCHED_PRESENT)
+#define NA_NDL_IS_PEER_ID_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_PEER_ID_PRESENT)
+#define NA_NDL_IS_MAX_IDLE_PER_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT)
+
+#define NDL_ATTR_PEERID_LEN				1
+#define NDL_ATTR_MAX_IDLE_PERIOD_LEN	2
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_attr_s {
+	uint8 id;		/* NAN_ATTR_NAN_NDL = 0x17 */
+	uint16 len;		/* Length of the fields in the attribute */
+	uint8 dialog_token;	/* Identify req and resp */
+	uint8 type_status;		/* Bits[3-0] type subfield, Bits[7-4] status subfield */
+	uint8 reason;		/* Identifies reject reason */
+	uint8 ndl_ctrl;		/* NDL control field */
+	uint8 var[];		/* Optional fields follow */
+} BWL_POST_PACKED_STRUCT wifi_nan_ndl_attr_t;
+
+/*
+ * NDL QoS Attribute  WFA Tech. Spec ver r26
+ */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_qos_attr_s {
+	uint8 id;		/* NAN_ATTR_NAN_NDL_QOS = 24 */
+	uint16 len;		/* Length of the attribute field following */
+	uint8 min_slots;	/* Min. number of FAW slots needed per DW interval */
+	uint16 max_latency;  /* Max interval between non-cont FAW */
+} BWL_POST_PACKED_STRUCT wifi_nan_ndl_qos_attr_t;
+
+/* no preference to min time slots */
+#define NAN_NDL_QOS_MIN_SLOT_NO_PREF	0
+/* no preference to no. of slots between two non-contiguous slots */
+#define NAN_NDL_QOS_MAX_LAT_NO_PREF		0xFFFF
+
+/* Device Capability Attribute */
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_dev_cap_s {
+	uint8 id;		/* 0x0F */
+	uint16 len;		/* Length */
+	uint8 map_id;		/* map id */
+	uint16 commit_dw_info;	/* Committed DW Info */
+	uint8 bands_supported;	/* Supported Bands */
+	uint8 op_mode;		/* Operation Mode */
+	uint8 num_antennas;	/* Bit 0-3 tx, 4-7 rx */
+	uint16 chan_switch_time;	/* Max channel switch time in us */
+	uint8 capabilities;	/* DFS Master, Extended key id etc */
+} BWL_POST_PACKED_STRUCT wifi_nan_dev_cap_t;
+
+/* Awake DW Info field format */
+
+/* 2.4GHz DW */
+#define NAN_DEV_CAP_AWAKE_DW_2G_MASK	0x07
+/* 5GHz DW */
+#define NAN_DEV_CAP_AWAKE_DW_5G_MASK	0x38
+/* Reserved */
+#define NAN_DEV_CAP_AWAKE_DW_RSVD_MASK	0xC0
+
+/* bit shift for dev cap */
+#define NAN_DEV_CAP_AWAKE_DW_2G_SHIFT	0
+#define NAN_DEV_CAP_AWAKE_DW_5G_SHIFT	3
+
+/* Device Capability Attribute Format */
+
+/* Committed DW Info field format */
+/* 2.4GHz DW */
+#define NAN_DEV_CAP_COMMIT_DW_2G_MASK	0x07
+#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_MASK	0x3C0
+/* 5GHz DW */
+#define NAN_DEV_CAP_COMMIT_DW_5G_MASK	0x38
+#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_MASK 0x3C00
+/* Reserved */
+#define NAN_DEV_CAP_COMMIT_DW_RSVD_MASK	0xC000
+/* Committed DW bit shift for dev cap */
+#define NAN_DEV_CAP_COMMIT_DW_2G_SHIFT	0
+#define NAN_DEV_CAP_COMMIT_DW_5G_SHIFT	3
+#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_SHIFT	6
+#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_SHIFT	10
+/* Operation Mode */
+#define NAN_DEV_CAP_OP_PHY_MODE_HT_ONLY		0x00
+#define NAN_DEV_CAP_OP_PHY_MODE_VHT		0x01
+#define NAN_DEV_CAP_OP_PHY_MODE_VHT_8080	0x02
+#define NAN_DEV_CAP_OP_PHY_MODE_VHT_160		0x04
+#define NAN_DEV_CAP_OP_PAGING_NDL		0x08
+
+#define NAN_DEV_CAP_OP_MODE_VHT_MASK		0x01
+#define NAN_DEV_CAP_OP_MODE_VHT8080_MASK	0x03
+#define NAN_DEV_CAP_OP_MODE_VHT160_MASK		0x05
+#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_MASK	0x08
+
+#define NAN_DEV_CAP_RX_ANT_SHIFT		4
+#define NAN_DEV_CAP_TX_ANT_MASK			0x0F
+#define NAN_DEV_CAP_RX_ANT_MASK			0xF0
+
+/* Device capabilities */
+
+/* DFS master capability */
+#define NAN_DEV_CAP_DFS_MASTER_MASK		0x01
+#define NAN_DEV_CAP_DFS_MASTER_SHIFT	0
+/* extended iv cap */
+#define NAN_DEV_CAP_EXT_KEYID_MASK		0x02
+#define NAN_DEV_CAP_EXT_KEYID_SHIFT		1
+
+/* Band IDs */
+enum {
+	NAN_BAND_ID_TVWS		= 0,
+	NAN_BAND_ID_SIG			= 1,	/* Sub 1 GHz */
+	NAN_BAND_ID_2G			= 2,	/* 2.4 GHz */
+	NAN_BAND_ID_3G			= 3,	/* 3.6 GHz */
+	NAN_BAND_ID_5G			= 4,	/* 4.9 & 5 GHz */
+	NAN_BAND_ID_60G			= 5
+};
+typedef uint8 nan_band_id_t;
+
+/*
+ * Unaligned schedule attribute section 10.7.19.6 spec. ver r15
+ */
+#define NAN_ULW_ATTR_CTRL_SCHED_ID_MASK 0x000F
+#define NAN_ULW_ATTR_CTRL_SCHED_ID_SHIFT 0
+#define NAN_ULW_ATTR_CTRL_SEQ_ID_MASK 0xFF00
+#define NAN_ULW_ATTR_CTRL_SEQ_ID_SHIFT 8
+
+#define NAN_ULW_OVWR_ALL_MASK 0x01
+#define NAN_ULW_OVWR_ALL_SHIFT 0
+#define NAN_ULW_OVWR_MAP_ID_MASK 0x1E
+#define NAN_ULW_OVWR_MAP_ID_SHIFT 1
+
+#define NAN_ULW_CTRL_TYPE_MASK 0x03
+#define NAN_ULW_CTRL_TYPE_SHIFT 0
+#define NAN_ULW_CTRL_TYPE(ctrl)	(ctrl & NAN_ULW_CTRL_TYPE_MASK)
+#define NAN_ULW_CTRL_CHAN_AVAIL_MASK 0x04
+#define NAN_ULW_CTRL_CHAN_AVAIL_SHIFT 2
+#define NAN_ULW_CTRL_CHAN_AVAIL(ctrl)	((ctrl & NAN_ULW_CTRL_CHAN_AVAIL_MASK) \
+	>> NAN_ULW_CTRL_CHAN_AVAIL_SHIFT)
+#define NAN_ULW_CTRL_RX_NSS_MASK 0x78
+#define NAN_ULW_CTRL_RX_NSS_SHIFT 3
+
+#define NAN_ULW_CTRL_TYPE_BAND		0
+#define NAN_ULW_CTRL_TYPE_CHAN_NOAUX	1
+#define NAN_ULW_CTRL_TYPE_CHAN_AUX	2
+
+#define NAN_ULW_CNT_DOWN_NO_EXPIRE	0xFF	/* ULWs doen't end until next sched update */
+#define NAN_ULW_CNT_DOWN_CANCEL		0x0		/* cancel remaining ulws */
+
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ulw_attr_s {
+	uint8 id;
+	uint16 len;
+	uint16 ctrl;
+	uint32 start; /* low 32 bits of tsf */
+	uint32 dur;
+	uint32 period;
+	uint8 count_down;
+	uint8 overwrite;
+	/*
+	 * ulw[0] == optional field ULW control when present.
+	 * band ID or channel follows
+	 */
+	uint8 ulw_entry[];
+} BWL_POST_PACKED_STRUCT wifi_nan_ulw_attr_t;
+
+/* NAN2 Management Frame (section 5.6) */
+
+/* Public action frame for NAN2 */
+typedef BWL_PRE_PACKED_STRUCT struct nan2_pub_act_frame_s {
+	/* NAN_PUB_AF_CATEGORY 0x04 */
+	uint8 category_id;
+	/* NAN_PUB_AF_ACTION 0x09 */
+	uint8 action_field;
+	/* NAN_OUI 0x50-6F-9A */
+	uint8 oui[DOT11_OUI_LEN];
+	/* NAN_OUI_TYPE TBD */
+	uint8 oui_type;
+	/* NAN_OUI_SUB_TYPE TBD */
+	uint8 oui_sub_type;
+	/* One or more NAN Attributes follow */
+	uint8 data[];
+} BWL_POST_PACKED_STRUCT nan2_pub_act_frame_t;
+
+#define NAN2_PUB_ACT_FRM_SIZE	(OFFSETOF(nan2_pub_act_frame_t, data))
+
+/* NAN Action Frame Subtypes */
+/* Subtype-0 is Reserved */
+#define NAN_MGMT_FRM_SUBTYPE_RESERVED		0
+/* NAN Ranging Request */
+#define NAN_MGMT_FRM_SUBTYPE_RANGING_REQ	1
+/* NAN Ranging Response */
+#define NAN_MGMT_FRM_SUBTYPE_RANGING_RESP	2
+/* NAN Ranging Termination */
+#define NAN_MGMT_FRM_SUBTYPE_RANGING_TERM	3
+/* NAN Ranging Report */
+#define NAN_MGMT_FRM_SUBTYPE_RANGING_RPT	4
+/* NDP Request */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_REQ		5
+/* NDP Response */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_RESP		6
+/* NDP Confirm */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_CONFIRM	7
+/* NDP Key Installment */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_KEY_INST	8
+/* NDP Termination */
+#define NAN_MGMT_FRM_SUBTYPE_NDP_END		9
+/* Schedule Request */
+#define NAN_MGMT_FRM_SUBTYPE_SCHED_REQ		10
+/* Schedule Response */
+#define NAN_MGMT_FRM_SUBTYPE_SCHED_RESP		11
+/* Schedule Confirm */
+#define NAN_MGMT_FRM_SUBTYPE_SCHED_CONF		12
+/* Schedule Update */
+#define NAN_MGMT_FRM_SUBTYPE_SCHED_UPD		13
+
+/* Reason code defines */
+#define NAN_REASON_RESERVED			0x0
+#define NAN_REASON_UNSPECIFIED			0x1
+#define NAN_REASON_RESOURCE_LIMIT		0x2
+#define NAN_REASON_INVALID_PARAMS		0x3
+#define NAN_REASON_FTM_PARAM_INCAP		0x4
+#define NAN_REASON_NO_MOVEMENT			0x5
+#define NAN_REASON_INVALID_AVAIL		0x6
+#define NAN_REASON_IMMUT_UNACCEPT		0x7
+#define NAN_REASON_SEC_POLICY			0x8
+#define NAN_REASON_QOS_UNACCEPT			0x9
+#define NAN_REASON_NDP_REJECT			0xa
+#define NAN_REASON_NDL_UNACCEPTABLE		0xb
+
+/* nan 2.0 qos (not attribute) */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_qos_s {
+	uint8 tid;		/* traffic identifier */
+	uint16 pkt_size;	/* service data pkt size */
+	uint8 data_rate;	/* mean data rate */
+	uint8 svc_interval;	/* max service interval */
+} BWL_POST_PACKED_STRUCT wifi_nan_ndp_qos_t;
+
+/* NDP control bitmap defines */
+#define NAN_NDP_CTRL_CONFIRM_REQUIRED		0x01
+#define NAN_NDP_CTRL_SECURTIY_PRESENT		0x04
+#define NAN_NDP_CTRL_PUB_ID_PRESENT		0x08
+#define NAN_NDP_CTRL_RESP_NDI_PRESENT		0x10
+#define NAN_NDP_CTRL_SPEC_INFO_PRESENT		0x20
+#define NAN_NDP_CTRL_RESERVED			0xA0
+
+/* NDP Attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_attr_s {
+	uint8 id;		/* 0x10 */
+	uint16 len;		/* length */
+	uint8 dialog_token;	/* dialog token */
+	uint8 type_status;	/* bits 0-3 type, 4-7 status */
+	uint8 reason;		/* reason code */
+	struct ether_addr init_ndi;	/* ndp initiator's data interface address */
+	uint8 ndp_id;		/* ndp identifier (created by initiator */
+	uint8 control;		/* ndp control field */
+	uint8 var[];		/* Optional fields follow */
+} BWL_POST_PACKED_STRUCT wifi_nan_ndp_attr_t;
+/* NDP attribute type and status macros */
+#define NAN_NDP_TYPE_MASK	0x0F
+#define NAN_NDP_TYPE_REQUEST	0x0
+#define NAN_NDP_TYPE_RESPONSE	0x1
+#define NAN_NDP_TYPE_CONFIRM	0x2
+#define NAN_NDP_TYPE_SECURITY	0x3
+#define NAN_NDP_TYPE_TERMINATE	0x4
+#define NAN_NDP_REQUEST(_ndp)	(((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_REQUEST)
+#define NAN_NDP_RESPONSE(_ndp)	(((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_RESPONSE)
+#define NAN_NDP_CONFIRM(_ndp)	(((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_CONFIRM)
+#define NAN_NDP_SECURITY_INST(_ndp)	(((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \
+									NAN_NDP_TYPE_SECURITY)
+#define NAN_NDP_TERMINATE(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \
+									NAN_NDP_TYPE_TERMINATE)
+#define NAN_NDP_STATUS_SHIFT	4
+#define NAN_NDP_STATUS_MASK	0xF0
+#define NAN_NDP_STATUS_CONT	(0 << NAN_NDP_STATUS_SHIFT)
+#define NAN_NDP_STATUS_ACCEPT	(1 << NAN_NDP_STATUS_SHIFT)
+#define NAN_NDP_STATUS_REJECT	(2 << NAN_NDP_STATUS_SHIFT)
+#define NAN_NDP_CONT(_ndp)	(((_ndp)->type_status & NAN_NDP_STATUS_MASK) == NAN_NDP_STATUS_CONT)
+#define NAN_NDP_ACCEPT(_ndp)	(((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \
+									NAN_NDP_STATUS_ACCEPT)
+#define NAN_NDP_REJECT(_ndp)	(((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \
+									NAN_NDP_STATUS_REJECT)
+/* NDP Setup Status */
+#define NAN_NDP_SETUP_STATUS_OK		1
+#define NAN_NDP_SETUP_STATUS_FAIL	0
+#define NAN_NDP_SETUP_STATUS_REJECT	2
+
+/* Rng setup attribute type and status macros */
+#define NAN_RNG_TYPE_MASK	0x0F
+#define NAN_RNG_TYPE_REQUEST	0x0
+#define NAN_RNG_TYPE_RESPONSE	0x1
+#define NAN_RNG_TYPE_TERMINATE	0x2
+
+#define NAN_RNG_STATUS_SHIFT	4
+#define NAN_RNG_STATUS_MASK	0xF0
+#define NAN_RNG_STATUS_ACCEPT	(0 << NAN_RNG_STATUS_SHIFT)
+#define NAN_RNG_STATUS_REJECT	(1 << NAN_RNG_STATUS_SHIFT)
+
+#define NAN_RNG_ACCEPT(_rsua)	(((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \
+									NAN_RNG_STATUS_ACCEPT)
+#define NAN_RNG_REJECT(_rsua)	(((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \
+									NAN_RNG_STATUS_REJECT)
+
+/* schedule entry */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sched_entry_s {
+	uint8 map_id;		/* map id */
+	uint16 tbmp_ctrl;	/* time bitmap control */
+	uint8 tbmp_len;		/* time bitmap len */
+	uint8 tbmp[];		/* time bitmap - Optional */
+} BWL_POST_PACKED_STRUCT wifi_nan_sched_entry_t;
+
+#define NAN_SCHED_ENTRY_MIN_SIZE	OFFSETOF(wifi_nan_sched_entry_t, tbmp)
+#define NAN_SCHED_ENTRY_SIZE(_entry)	(NAN_SCHED_ENTRY_MIN_SIZE + (_entry)->tbmp_len)
+
+/* for dev cap, element container etc. */
+#define NAN_DEV_ELE_MAPID_CTRL_MASK		0x1
+#define NAN_DEV_ELE_MAPID_CTRL_SHIFT	0
+#define NAN_DEV_ELE_MAPID_MASK		0x1E
+#define NAN_DEV_ELE_MAPID_SHIFT		1
+
+#define NAN_DEV_ELE_MAPID_CTRL_SET(_mapid_field, value) \
+	do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_CTRL_MASK; \
+		(_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_CTRL_SHIFT) & \
+		NAN_DEV_ELE_MAPID_CTRL_MASK); \
+	} while (0);
+
+#define NAN_DEV_ELE_MAPID_CTRL_GET(_mapid_field) \
+	(((_mapid_field) & NAN_DEV_ELE_MAPID_CTRL_MASK) >> \
+	NAN_DEV_ELE_MAPID_CTRL_SHIFT)
+
+#define NAN_DEV_ELE_MAPID_SET(_mapid_field, value) \
+	do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_MASK; \
+		(_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_SHIFT) & \
+		NAN_DEV_ELE_MAPID_MASK); \
+	} while (0);
+
+#define NAN_DEV_ELE_MAPID_GET(_mapid_field) \
+	(((_mapid_field) & NAN_DEV_ELE_MAPID_MASK) >> \
+	NAN_DEV_ELE_MAPID_SHIFT)
+
+/* schedule entry map id handling */
+#define NAN_SCHED_ENTRY_MAPID_MASK		0x0F
+#define NAN_SCHED_ENTRY_MAPID_SHIFT		0
+
+#define NAN_SCHED_ENTRY_MAPID_SET(_mapid_field, value) \
+	do {(_mapid_field) &= ~NAN_SCHED_ENTRY_MAPID_MASK; \
+		(_mapid_field) |= ((value << NAN_SCHED_ENTRY_MAPID_SHIFT) & \
+		NAN_SCHED_ENTRY_MAPID_MASK); \
+	} while (0);
+
+#define NAN_SCHED_ENTRY_MAPID_GET(_mapid_field) \
+	(((_mapid_field) & NAN_SCHED_ENTRY_MAPID_MASK) >> \
+	NAN_SCHED_ENTRY_MAPID_SHIFT)
+
+/* NDC attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndc_attr_s {
+	uint8 id;
+	uint16 len;
+	uint8 ndc_id[NAN_DATA_NDC_ID_SIZE];
+	uint8 attr_cntrl;
+	uint8 var[];
+} BWL_POST_PACKED_STRUCT wifi_nan_ndc_attr_t;
+
+/* Attribute control subfield of NDC attr */
+/* Proposed NDC */
+#define NAN_NDC_ATTR_PROPOSED_NDC_MASK	0x1
+#define NAN_NDC_ATTR_PROPOSED_NDC_SHIFT 0
+
+/* get & set */
+#define NAN_NDC_GET_PROPOSED_FLAG(_attr)	\
+	(((_attr)->attr_cntrl & NAN_NDC_ATTR_PROPOSED_NDC_MASK) >>	\
+	NAN_NDC_ATTR_PROPOSED_NDC_SHIFT)
+#define NAN_NDC_SET_PROPOSED_FLAG(_attr, value) \
+	do {((_attr)->attr_cntrl &= ~NAN_NDC_ATTR_PROPOSED_NDC_MASK); \
+		((_attr)->attr_cntrl |=	\
+		(((value) << NAN_NDC_ATTR_PROPOSED_NDC_SHIFT) & NAN_NDC_ATTR_PROPOSED_NDC_MASK)); \
+	} while (0)
+
+/* Service descriptor extension attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_desc_ext_attr_s {
+	/* Attribute ID - 0x11 */
+	uint8 id;
+	/* Length of the following fields in the attribute */
+	uint16 len;
+	/* Instance id of associated service descriptor attribute */
+	uint8 instance_id;
+	/* SDE control field */
+	uint16 control;
+	/* range limit, svc upd indicator etc. */
+	uint8 var[];
+} BWL_POST_PACKED_STRUCT wifi_nan_svc_desc_ext_attr_t;
+
+#define NAN_SDE_ATTR_MIN_LEN OFFSETOF(wifi_nan_svc_desc_ext_attr_t, var)
+
+/* SDEA control field bit definitions and access macros */
+#define NAN_SDE_CF_FSD_REQUIRED		(1 << 0)
+#define NAN_SDE_CF_FSD_GAS		(1 << 1)
+#define NAN_SDE_CF_DP_REQUIRED		(1 << 2)
+#define NAN_SDE_CF_DP_TYPE		(1 << 3)
+#define NAN_SDE_CF_MULTICAST_TYPE	(1 << 4)
+#define NAN_SDE_CF_QOS_REQUIRED		(1 << 5)
+#define NAN_SDE_CF_SECURITY_REQUIRED	(1 << 6)
+#define NAN_SDE_CF_RANGING_REQUIRED	(1 << 7)
+#define NAN_SDE_CF_RANGE_PRESENT	(1 << 8)
+#define NAN_SDE_CF_SVC_UPD_IND_PRESENT	(1 << 9)
+#define NAN_SDE_FSD_REQUIRED(_sde)	((_sde)->control & NAN_SDE_CF_FSD_REQUIRED)
+#define NAN_SDE_FSD_GAS(_sde)		((_sde)->control & NAN_SDE_CF_FSD_GAS)
+#define NAN_SDE_DP_REQUIRED(_sde)	((_sde)->control & NAN_SDE_CF_DP_REQUIRED)
+#define NAN_SDE_DP_MULTICAST(_sde)	((_sde)->control & NAN_SDE_CF_DP_TYPE)
+#define NAN_SDE_MULTICAST_M_TO_M(_sde)	((_sde)->control & NAN_SDE_CF_MULTICAST_TYPE)
+#define NAN_SDE_QOS_REQUIRED(_sde)	((_sde)->control & NAN_SDE_CF_QOS_REQUIRED)
+#define NAN_SDE_SECURITY_REQUIRED(_sde)	((_sde)->control & NAN_SDE_CF_SECURITY_REQUIRED)
+#define NAN_SDE_RANGING_REQUIRED(_sde)	((_sde)->control & NAN_SDE_CF_RANGING_REQUIRED)
+#define NAN_SDE_RANGE_PRESENT(_sde)	((_sde)->control & NAN_SDE_CF_RANGE_PRESENT)
+#define NAN_SDE_SVC_UPD_IND_PRESENT(_sde)	((_sde)->control & NAN_SDE_CF_SVC_UPD_IND_PRESENT)
+
+/* nan2 security */
+
+/*
+ * Cipher suite information Attribute.
+ * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.2)
+ */
+#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_4     0
+#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_16    (1 << 0)
+
+/* enum security algo.
+*/
+enum nan_sec_csid {
+	NAN_SEC_ALGO_NONE = 0,
+	NAN_SEC_ALGO_NCS_SK_CCM_128 = 1,     /* CCMP 128 */
+	NAN_SEC_ALGO_NCS_SK_GCM_256 = 2,     /* GCMP 256 */
+	NAN_SEC_ALGO_LAST = 3
+};
+typedef int8 nan_sec_csid_e;
+
+/* nan2 cipher suite attribute field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_field_s {
+	uint8 cipher_suite_id;
+	uint8 inst_id;	/* Instance Id */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_field_t;
+
+/* nan2 cipher suite information attribute field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_info_attr_s {
+	uint8 attr_id;	/* 0x22 - NAN_ATTR_CIPHER_SUITE_INFO */
+	uint16 len;
+	uint8 capabilities;
+	uint8 var[];	/* cipher suite list */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_info_attr_t;
+
+/*
+ * Security context identifier attribute
+ * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.4)
+ */
+
+#define NAN_SEC_CTX_ID_TYPE_PMKID   (1 << 0)
+
+/* nan2 security context identifier attribute field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_field_s {
+	uint16 sec_ctx_id_type_len;	/* length of security ctx identifier */
+	uint8 sec_ctx_id_type;
+	uint8 inst_id;	/* Instance Id */
+	uint8 var[];	/* security ctx identifier */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_field_t;
+
+/* nan2 security context identifier info attribute field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_info_attr_s {
+	uint8 attr_id;	/* 0x23 - NAN_ATTR_SEC_CTX_ID_INFO */
+	uint16 len;
+	uint8 var[];	/* security context identifier  list */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_info_attr_t;
+
+/*
+ * Nan shared key descriptor attribute
+ * WFA Tech. Spec ver 23
+ */
+
+#define NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN	8
+#define NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN	32
+
+/* nan shared key descriptor attr field */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ncssk_key_desc_attr_s {
+	uint8 attr_id;	/* 0x24 - NAN_ATTR_SHARED_KEY_DESC */
+	uint16 len;
+	uint8 inst_id;  /* Publish service instance ID */
+	uint8 desc_type;
+	uint16 key_info;
+	uint16 key_len;
+	uint8 key_replay_cntr[NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN];
+	uint8 key_nonce[NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN];
+	uint8 reserved[32];	/* EAPOL IV + Key RSC + Rsvd fields in EAPOL Key */
+	uint8 mic[];  /* mic + key data len + key data */
+} BWL_POST_PACKED_STRUCT wifi_nan_sec_ncssk_key_desc_attr_t;
+
+/* Key Info fields */
+#define NAN_SEC_NCSSK_DESC_MASK			0x7
+#define NAN_SEC_NCSSK_DESC_SHIFT		0
+#define NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK	0x8
+#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT	3
+#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK	0x40
+#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT	6
+#define NAN_SEC_NCSSK_DESC_KEY_ACK_MASK		0x80
+#define NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT	7
+#define NAN_SEC_NCSSK_DESC_KEY_MIC_MASK		0x100
+#define NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT	8
+#define NAN_SEC_NCSSK_DESC_KEY_SEC_MASK		0x200
+#define NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT	9
+#define NAN_SEC_NCSSK_DESC_KEY_ERR_MASK		0x400
+#define NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT	10
+#define NAN_SEC_NCSSK_DESC_KEY_REQ_MASK		0x800
+#define NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT	11
+#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK	0x1000
+#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT	12
+#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK	0x2000
+#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT	13
+
+/* Key Info get & set macros */
+#define NAN_SEC_NCSSK_KEY_DESC_VER_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_MASK) >> NAN_SEC_NCSSK_DESC_SHIFT)
+#define NAN_SEC_NCSSK_KEY_DESC_VER_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_TYPE_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK) >> NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK) >> \
+	NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_ACK_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ACK_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_ACK_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ACK_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_KEY_ACK_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_MIC_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_KEY_MIC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_MIC_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_MIC_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_KEY_MIC_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_SEC_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SEC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_SEC_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SEC_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_KEY_SEC_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_ERR_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ERR_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_ERR_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ERR_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_KEY_ERR_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_REQ_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_KEY_REQ_MASK) >> NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_REQ_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_REQ_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_KEY_REQ_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK) >> \
+	NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK);} while (0)
+#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_GET(_key_info)	\
+	(((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK) >> \
+	NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT)
+#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SET(_val, _key_info)	\
+	do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK; \
+		(_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT) & \
+		NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK);} while (0)
+
+#define NAN_SEC_NCSSK_IEEE80211_KDESC_TYPE	2	/* IEEE 802.11 Key Descriptor Type */
+#define NAN_SEC_NCSSK_KEY_DESC_VER			0	/* NCSSK-128/256 */
+#define NAN_SEC_NCSSK_KEY_TYPE_PAIRWISE		1	/* Pairwise */
+#define NAN_SEC_NCSSK_LIFETIME_KDE			7	/* Lifetime KDE type */
+
+/* TODO include MTK related attributes */
+
+/* NAN Multicast service group(NMSG) definitions */
+/* Length of NMSG_ID -- (NDI * 2^16 + pub_id * 2^8 + Random_factor) */
+#define NAN_NMSG_ID_LEN                         8
+
+#define NAN_NMSG_TYPE_MASK			0x0F
+#define NMSG_ATTR_TYPE_STATUS_REQUEST		0x00
+#define NMSG_ATTR_TYPE_STATUS_RESPONSE		0x01
+#define NMSG_ATTR_TYPE_STATUS_CONFIRM		0x02
+#define NMSG_ATTR_TYPE_STATUS_SEC_INSTALL	0x03
+#define NMSG_ATTR_TYPE_STATUS_TERMINATE		0x04
+#define NMSG_ATTR_TYPE_STATUS_IMPLICIT_ENROL	0x05
+
+#define NMSG_ATTR_TYPE_STATUS_CONTINUED	        0x00
+#define NMSG_ATTR_TYPE_STATUS_ACCEPTED	        0x10
+#define NMSG_ATTR_TYPE_STATUS_REJECTED	        0x20
+
+#define NMSG_CTRL_PUB_ID_PRESENT                0x0001
+#define NMSG_CTRL_NMSG_ID_PRESENT               0x0002
+#define NMSG_CTRL_SECURITY_PRESENT              0x0004
+#define NMSG_CTRL_MANY_TO_MANY_PRESENT          0x0008
+#define NMSG_CTRL_SVC_INFO_PRESENT              0x0010
+
+/* NMSG attribute */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_nmsg_attr_s {
+	uint8 id; /* Attribute ID - 0x11 */
+	uint16 len; /* Length including pubid, NMSGID and svc info */
+	uint8 dialog_token;
+	uint8 type_status; /* Type and Status field byte */
+	uint8 reason_code;
+	uint8 mc_id; /* Multicast id similar to NDPID */
+	uint8 nmsg_ctrl; /* NMSG control field */
+	/* Optional publish id, NMSGID and svc info are included in var[] */
+	uint8 var[0];
+} BWL_POST_PACKED_STRUCT wifi_nan_nmsg_attr_t;
+
+#define NMSG_ATTR_MCAST_SCHED_MAP_ID_MASK     0x1E
+#define NMSG_ATTR_MCAST_SCHED_MAP_ID_SHIFT    1
+#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_MASK   0x20
+#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_SHIFT  5
+
+/* NAN Multicast Schedule atribute structure */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_mcast_sched_attr_s {
+	uint8 id; /* 0x16 */
+	uint16 len;
+	uint8 nmsg_id[NAN_NMSG_ID_LEN];
+	uint8 attr_cntrl;
+	uint8 sched_own[ETHER_ADDR_LEN];
+	uint8 var[]; /* multicast sched entry list (schedule_entry_list) */
+} BWL_POST_PACKED_STRUCT wifi_nan_mcast_sched_attr_t;
+
+
+/* FAC Channel Entry  (section 10.7.19.1.5) */
+typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_fac_chan_entry_s {
+	uint8 oper_class;		/* Operating Class */
+	uint16 chan_bitmap;		/* Channel Bitmap */
+	uint8 primary_chan_bmp;		/* Primary Channel Bitmap */
+	uint16 aux_chan;			/* Auxiliary Channel bitmap */
+} BWL_POST_PACKED_STRUCT wifi_nan_fac_chan_entry_t;
+
+/* TODO move this from nan.h */
+#define NAN_ALL_NAN_MGMT_FRAMES (NAN_FRM_SCHED_AF | \
+	NAN_FRM_NDP_AF | NAN_FRM_NDL_AF | \
+	NAN_FRM_DISC_BCN | NAN_FRM_SYNC_BCN | \
+	NAN_FRM_SVC_DISC | NAN_FRM_RNG_REQ_AF | \
+	NAN_FRM_RNG_RESP_AF | NAN_FRM_RNG_REPORT_AF | \
+	NAN_FRM_RNG_TERM_AF)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _NAN_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h
new file mode 100644
index 0000000..082b301
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl.h
@@ -0,0 +1,260 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: osl.h 642189 2016-06-07 21:12:50Z $
+ */
+
+#ifndef _osl_h_
+#define _osl_h_
+
+#include <osl_decl.h>
+
+#define OSL_PKTTAG_SZ	32 /* Size of PktTag */
+
+/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+/* Drivers use REGOPSSET() to register register read/write funcitons */
+typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size);
+typedef void  (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size);
+
+
+#if defined(WL_UNITTEST)
+#include <utest_osl.h>
+#else
+#include <linux_osl.h>
+#endif 
+
+#ifndef PKTDBG_TRACE
+#define PKTDBG_TRACE(osh, pkt, bit)	BCM_REFERENCE(osh)
+#endif
+
+#define PKTCTFMAP(osh, p)		BCM_REFERENCE(osh)
+
+/* --------------------------------------------------------------------------
+** Register manipulation macros.
+*/
+
+#define	SET_REG(osh, r, mask, val)	W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif   /* !AND_REG */
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif   /* !OR_REG */
+
+#if !defined(OSL_SYSUPTIME)
+#define OSL_SYSUPTIME() (0)
+#define OSL_SYSUPTIME_SUPPORT FALSE
+#else
+#define OSL_SYSUPTIME_SUPPORT TRUE
+#endif /* OSL_SYSUPTIME */
+
+#if !defined(OSL_SYSUPTIME_US)
+#define OSL_SYSUPTIME_US() (0)
+#endif /* OSL_SYSUPTIME */
+
+#ifndef OSL_SYS_HALT
+#define OSL_SYS_HALT()	do {} while (0)
+#endif
+
+#ifndef OSL_MEM_AVAIL
+#define OSL_MEM_AVAIL()	(0xffffffff)
+#endif
+
+#if !(defined(PKTC) || defined(PKTC_DONGLE))
+
+#ifndef OSL_OBFUSCATE_BUF
+/* For security reasons printing pointers is not allowed.
+ * Some OSLs implement OSL_OBFUSCATE_BUF to OS specific obfuscate API.
+ * If OSL_OBFUSCATE_BUF() is not implemented in OSL, then default to
+ * printing the input pointer
+ */
+#define OSL_OBFUSCATE_BUF(x) (x)
+#endif /* OSL_OBFUSCATE_BUF */
+
+#define	PKTCGETATTR(skb)	(0)
+#define	PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb)
+#define	PKTCCLRATTR(skb)	BCM_REFERENCE(skb)
+#define	PKTCCNT(skb)		(1)
+#define	PKTCLEN(skb)		PKTLEN(NULL, skb)
+#define	PKTCGETFLAGS(skb)	(0)
+#define	PKTCSETFLAGS(skb, f)	BCM_REFERENCE(skb)
+#define	PKTCCLRFLAGS(skb)	BCM_REFERENCE(skb)
+#define	PKTCFLAGS(skb)		(0)
+#define	PKTCSETCNT(skb, c)	BCM_REFERENCE(skb)
+#define	PKTCINCRCNT(skb)	BCM_REFERENCE(skb)
+#define	PKTCADDCNT(skb, c)	BCM_REFERENCE(skb)
+#define	PKTCSETLEN(skb, l)	BCM_REFERENCE(skb)
+#define	PKTCADDLEN(skb, l)	BCM_REFERENCE(skb)
+#define	PKTCSETFLAG(skb, fb)	BCM_REFERENCE(skb)
+#define	PKTCCLRFLAG(skb, fb)	BCM_REFERENCE(skb)
+#define	PKTCLINK(skb)		NULL
+#define	PKTSETCLINK(skb, x)	BCM_REFERENCE(skb)
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+	for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb))
+#define	PKTCFREE		PKTFREE
+#define PKTCENQTAIL(h, t, p) \
+do { \
+	if ((t) == NULL) { \
+		(h) = (t) = (p); \
+	} \
+} while (0)
+#endif /* !linux || !PKTC */
+
+#if !(defined(HNDCTF) || defined(PKTC_TX_DONGLE) || defined(PKTC))
+#define PKTSETCHAINED(osh, skb)		BCM_REFERENCE(osh)
+#define PKTCLRCHAINED(osh, skb)		BCM_REFERENCE(osh)
+#define PKTISCHAINED(skb)		FALSE
+#endif
+
+/* Lbuf with fraglist */
+#ifndef PKTFRAGPKTID
+#define PKTFRAGPKTID(osh, lb)		(0)
+#endif
+#ifndef PKTSETFRAGPKTID
+#define PKTSETFRAGPKTID(osh, lb, id)	BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGTOTNUM
+#define PKTFRAGTOTNUM(osh, lb)		(0)
+#endif
+#ifndef PKTSETFRAGTOTNUM
+#define PKTSETFRAGTOTNUM(osh, lb, tot)	BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGTOTLEN
+#define PKTFRAGTOTLEN(osh, lb)		(0)
+#endif
+#ifndef PKTSETFRAGTOTLEN
+#define PKTSETFRAGTOTLEN(osh, lb, len)	BCM_REFERENCE(osh)
+#endif
+#ifndef PKTIFINDEX
+#define PKTIFINDEX(osh, lb)		(0)
+#endif
+#ifndef PKTSETIFINDEX
+#define PKTSETIFINDEX(osh, lb, idx)	BCM_REFERENCE(osh)
+#endif
+#ifndef PKTGETLF
+#define	PKTGETLF(osh, len, send, lbuf_type)	(0)
+#endif
+
+/* in rx path, reuse totlen as used len */
+#ifndef PKTFRAGUSEDLEN
+#define PKTFRAGUSEDLEN(osh, lb)			(0)
+#endif
+#ifndef PKTSETFRAGUSEDLEN
+#define PKTSETFRAGUSEDLEN(osh, lb, len)		BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGLEN
+#define PKTFRAGLEN(osh, lb, ix)			(0)
+#endif
+#ifndef PKTSETFRAGLEN
+#define PKTSETFRAGLEN(osh, lb, ix, len)		BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGDATA_LO
+#define PKTFRAGDATA_LO(osh, lb, ix)		(0)
+#endif
+#ifndef PKTSETFRAGDATA_LO
+#define PKTSETFRAGDATA_LO(osh, lb, ix, addr)	BCM_REFERENCE(osh)
+#endif
+#ifndef PKTFRAGDATA_HI
+#define PKTFRAGDATA_HI(osh, lb, ix)		(0)
+#endif
+#ifndef PKTSETFRAGDATA_HI
+#define PKTSETFRAGDATA_HI(osh, lb, ix, addr)	BCM_REFERENCE(osh)
+#endif
+
+/* RX FRAG */
+#ifndef PKTISRXFRAG
+#define PKTISRXFRAG(osh, lb)    	(0)
+#endif
+#ifndef PKTSETRXFRAG
+#define PKTSETRXFRAG(osh, lb)		BCM_REFERENCE(osh)
+#endif
+#ifndef PKTRESETRXFRAG
+#define PKTRESETRXFRAG(osh, lb)		BCM_REFERENCE(osh)
+#endif
+
+/* TX FRAG */
+#ifndef PKTISTXFRAG
+#define PKTISTXFRAG(osh, lb)		(0)
+#endif
+#ifndef PKTSETTXFRAG
+#define PKTSETTXFRAG(osh, lb)		BCM_REFERENCE(osh)
+#endif
+
+/* Need Rx completion used for AMPDU reordering */
+#ifndef PKTNEEDRXCPL
+#define PKTNEEDRXCPL(osh, lb)           (TRUE)
+#endif
+#ifndef PKTSETNORXCPL
+#define PKTSETNORXCPL(osh, lb)          BCM_REFERENCE(osh)
+#endif
+#ifndef PKTRESETNORXCPL
+#define PKTRESETNORXCPL(osh, lb)        BCM_REFERENCE(osh)
+#endif
+#ifndef PKTISFRAG
+#define PKTISFRAG(osh, lb)		(0)
+#endif
+#ifndef PKTFRAGISCHAINED
+#define PKTFRAGISCHAINED(osh, i)	(0)
+#endif
+/* TRIM Tail bytes from lfrag */
+#ifndef PKTFRAG_TRIM_TAILBYTES
+#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type)	PKTSETLEN(osh, p, PKTLEN(osh, p) - len)
+#endif
+#ifndef PKTISHDRCONVTD
+#define PKTISHDRCONVTD(osh, lb)		(0)
+#endif
+
+#ifdef BCM_SECURE_DMA
+#define SECURE_DMA_ENAB(osh) (1)
+#else
+
+#define SECURE_DMA_ENAB(osh) (0)
+#ifndef BCMDMA64OSL
+#define	SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) ((0)))
+#else
+#define	SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) {(0)})
+#endif
+#define	SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) 0
+#ifndef BCMDMA64OSL
+#define	SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) ((0)))
+#else
+#define	SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) {(0)})
+#endif
+#define	SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset)
+#define	SECURE_DMA_UNMAP_ALL(osh, pcma)
+
+#endif /* BCMDMA64OSL */
+
+
+#ifndef ROMMABLE_ASSERT
+#define ROMMABLE_ASSERT(exp) ASSERT(exp)
+#endif /* ROMMABLE_ASSERT */
+
+#endif	/* _osl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/osl_decl.h b/drivers/net/wireless/bcmdhd/include/osl_decl.h
new file mode 100644
index 0000000..977a1ca
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl_decl.h
@@ -0,0 +1,37 @@
+/*
+ * osl forward declarations
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: osl_decl.h 596126 2015-10-29 19:53:48Z $
+ */
+
+#ifndef _osl_decl_h_
+#define _osl_decl_h_
+
+/* osl handle type forward declaration */
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+extern unsigned int lmtest; /* low memory test */
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/osl_ext.h b/drivers/net/wireless/bcmdhd/include/osl_ext.h
new file mode 100644
index 0000000..2503f6a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl_ext.h
@@ -0,0 +1,697 @@
+/*
+ * OS Abstraction Layer Extension - the APIs defined by the "extension" API
+ * are only supported by a subset of all operating systems.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: osl_ext.h 611959 2016-01-12 15:23:56Z $
+ */
+
+#ifndef _osl_ext_h_
+#define _osl_ext_h_
+
+
+/* ---- Include Files ---------------------------------------------------- */
+
+#if defined(TARGETOS_symbian)
+	#include <e32def.h>
+	#include <symbian_osl_ext.h>
+#elif defined(THREADX)
+	#include <threadx_osl_ext.h>
+#else
+	#define OSL_EXT_DISABLED
+#endif
+
+/* Include base operating system abstraction. */
+#include <osl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+/* -----------------------------------------------------------------------
+ * Generic OS types.
+ */
+typedef enum osl_ext_status_t
+{
+	OSL_EXT_SUCCESS,
+	OSL_EXT_ERROR,
+	OSL_EXT_TIMEOUT
+
+} osl_ext_status_t;
+#define OSL_EXT_STATUS_DECL(status)	osl_ext_status_t status;
+
+#define OSL_EXT_TIME_FOREVER ((osl_ext_time_ms_t)(-1))
+typedef unsigned int osl_ext_time_ms_t;
+
+typedef unsigned int osl_ext_event_bits_t;
+
+typedef unsigned int osl_ext_interrupt_state_t;
+
+/* -----------------------------------------------------------------------
+ * Timers.
+ */
+typedef enum
+{
+	/* One-shot timer. */
+	OSL_EXT_TIMER_MODE_ONCE,
+
+	/* Periodic timer. */
+	OSL_EXT_TIMER_MODE_REPEAT
+
+} osl_ext_timer_mode_t;
+
+/* User registered callback and parameter to invoke when timer expires. */
+typedef void* osl_ext_timer_arg_t;
+typedef void (*osl_ext_timer_callback)(osl_ext_timer_arg_t arg);
+
+
+/* -----------------------------------------------------------------------
+ * Tasks.
+ */
+
+/* Task entry argument. */
+typedef void* osl_ext_task_arg_t;
+
+/* Task entry function. */
+typedef void (*osl_ext_task_entry)(osl_ext_task_arg_t arg);
+
+/* Abstract task priority levels. */
+typedef enum
+{
+	OSL_EXT_TASK_IDLE_PRIORITY,
+	OSL_EXT_TASK_LOW_PRIORITY,
+	OSL_EXT_TASK_LOW_NORMAL_PRIORITY,
+	OSL_EXT_TASK_NORMAL_PRIORITY,
+	OSL_EXT_TASK_HIGH_NORMAL_PRIORITY,
+	OSL_EXT_TASK_HIGHEST_PRIORITY,
+	OSL_EXT_TASK_TIME_CRITICAL_PRIORITY,
+
+	/* This must be last. */
+	OSL_EXT_TASK_NUM_PRIORITES
+} osl_ext_task_priority_t;
+
+
+#ifndef OSL_EXT_DISABLED
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+/* --------------------------------------------------------------------------
+** Semaphore
+*/
+
+/****************************************************************************
+* Function:   osl_ext_sem_create
+*
+* Purpose:    Creates a counting semaphore object, which can subsequently be
+*             used for thread notification.
+*
+* Parameters: name     (in)  Name to assign to the semaphore (must be unique).
+*             init_cnt (in)  Initial count that the semaphore should have.
+*             sem      (out) Newly created semaphore.
+*
+* Returns:    OSL_EXT_SUCCESS if the semaphore was created successfully, or an
+*             error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_create(char *name, int init_cnt, osl_ext_sem_t *sem);
+
+/****************************************************************************
+* Function:   osl_ext_sem_delete
+*
+* Purpose:    Destroys a previously created semaphore object.
+*
+* Parameters: sem (mod) Semaphore object to destroy.
+*
+* Returns:    OSL_EXT_SUCCESS if the semaphore was deleted successfully, or an
+*             error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_delete(osl_ext_sem_t *sem);
+
+/****************************************************************************
+* Function:   osl_ext_sem_give
+*
+* Purpose:    Increments the count associated with the semaphore. This will
+*             cause one thread blocked on a take to wake up.
+*
+* Parameters: sem (mod) Semaphore object to give.
+*
+* Returns:    OSL_EXT_SUCCESS if the semaphore was given successfully, or an
+*             error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_give(osl_ext_sem_t *sem);
+
+/****************************************************************************
+* Function:   osl_ext_sem_take
+*
+* Purpose:    Decrements the count associated with the semaphore. If the count
+*             is less than zero, then the calling task will become blocked until
+*             another thread does a give on the semaphore. This function will only
+*             block the calling thread for timeout_msec milliseconds, before
+*             returning with OSL_EXT_TIMEOUT.
+*
+* Parameters: sem          (mod) Semaphore object to take.
+*             timeout_msec (in)  Number of milliseconds to wait for the
+*                                semaphore to enter a state where it can be
+*                                taken.
+*
+* Returns:    OSL_EXT_SUCCESS if the semaphore was taken successfully, or an
+*             error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_take(osl_ext_sem_t *sem, osl_ext_time_ms_t timeout_msec);
+
+
+/* --------------------------------------------------------------------------
+** Mutex
+*/
+
+/****************************************************************************
+* Function:   osl_ext_mutex_create
+*
+* Purpose:    Creates a mutex object, which can subsequently be used to control
+*             mutually exclusion of resources.
+*
+* Parameters: name  (in)  Name to assign to the mutex (must be unique).
+*             mutex (out) Mutex object to initialize.
+*
+* Returns:    OSL_EXT_SUCCESS if the mutex was created successfully, or an
+*             error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_create(char *name, osl_ext_mutex_t *mutex);
+
+/****************************************************************************
+* Function:   osl_ext_mutex_delete
+*
+* Purpose:    Destroys a previously created mutex object.
+*
+* Parameters: mutex (mod) Mutex object to destroy.
+*
+* Returns:    OSL_EXT_SUCCESS if the mutex was deleted successfully, or an
+*             error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_delete(osl_ext_mutex_t *mutex);
+
+/****************************************************************************
+* Function:   osl_ext_mutex_acquire
+*
+* Purpose:    Acquires the indicated mutual exclusion object. If the object is
+*             currently acquired by another task, then this function will wait
+*             for timeout_msec milli-seconds before returning with OSL_EXT_TIMEOUT.
+*
+* Parameters: mutex        (mod) Mutex object to acquire.
+*             timeout_msec (in)  Number of milliseconds to wait for the mutex.
+*
+* Returns:    OSL_EXT_SUCCESS if the mutex was acquired successfully, or an
+*             error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_acquire(osl_ext_mutex_t *mutex, osl_ext_time_ms_t timeout_msec);
+
+/****************************************************************************
+* Function:   osl_ext_mutex_release
+*
+* Purpose:    Releases the indicated mutual exclusion object. This makes it
+*             available for another task to acquire.
+*
+* Parameters: mutex (mod) Mutex object to release.
+*
+* Returns:    OSL_EXT_SUCCESS if the mutex was released successfully, or an
+*             error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_release(osl_ext_mutex_t *mutex);
+
+
+/* --------------------------------------------------------------------------
+** Timers
+*/
+
+/****************************************************************************
+* Function:   osl_ext_timer_create
+*
+* Purpose:    Creates a timer object.
+*
+* Parameters: name (in)         Name of timer.
+*             timeout_msec (in) Invoke callback after this number of milliseconds.
+*             mode (in)         One-shot or periodic timer.
+*             func (in)         Callback function to invoke on timer expiry.
+*             arg (in)          Argument to callback function.
+*             timer (out)       Timer object to create.
+*
+* Note: The function callback occurs in interrupt context. The application is
+*       required to provide context switch for the callback if required.
+*
+* Returns:    OSL_EXT_SUCCESS if the timer was created successfully, or an
+*             error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_create(char *name, osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode,
+                 osl_ext_timer_callback func, osl_ext_timer_arg_t arg, osl_ext_timer_t *timer);
+
+/****************************************************************************
+* Function:   osl_ext_timer_delete
+*
+* Purpose:    Destroys a previously created timer object.
+*
+* Parameters: timer (mod) Timer object to destroy.
+*
+* Returns:    OSL_EXT_SUCCESS if the timer was created successfully, or an
+*             error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_timer_delete(osl_ext_timer_t *timer);
+
+/****************************************************************************
+* Function:   osl_ext_timer_start
+*
+* Purpose:    Start a previously created timer object.
+*
+* Parameters: timer (in)        Timer object.
+*             timeout_msec (in) Invoke callback after this number of milliseconds.
+*             mode (in)         One-shot or periodic timer.
+*
+* Returns:    OSL_EXT_SUCCESS if the timer was created successfully, or an
+*             error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_start(osl_ext_timer_t *timer,
+	osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode);
+
+/****************************************************************************
+* Function:   osl_ext_timer_stop
+*
+* Purpose:    Stop a previously created timer object.
+*
+* Parameters: timer (in)        Timer object.
+*
+* Returns:    OSL_EXT_SUCCESS if the timer was created successfully, or an
+*             error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_stop(osl_ext_timer_t *timer);
+
+/****************************************************************************
+* Function:   osl_ext_time_get
+*
+* Purpose:    Returns incrementing time counter.
+*
+* Parameters: None.
+*
+* Returns:    Returns incrementing time counter in msec.
+*****************************************************************************
+*/
+osl_ext_time_ms_t osl_ext_time_get(void);
+
+/* --------------------------------------------------------------------------
+** Tasks
+*/
+
+/****************************************************************************
+* Function:   osl_ext_task_create
+*
+* Purpose:    Create a task.
+*
+* Parameters: name       (in)  Pointer to task string descriptor.
+*             stack      (in)  Pointer to stack. NULL to allocate.
+*             stack_size (in)  Stack size - in bytes.
+*             priority   (in)  Abstract task priority.
+*             func       (in)  A pointer to the task entry point function.
+*             arg        (in)  Value passed into task entry point function.
+*             task       (out) Task to create.
+*
+* Returns:    OSL_EXT_SUCCESS if the task was created successfully, or an
+*             error code if the task could not be created.
+*****************************************************************************
+*/
+
+#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \
+	   osl_ext_task_create_ex((name), (stack), (stack_size), (priority), 0, (func), \
+	   (arg), (task))
+
+osl_ext_status_t osl_ext_task_create_ex(char* name,
+	void *stack, unsigned int stack_size, osl_ext_task_priority_t priority,
+	osl_ext_time_ms_t timslice_msec, osl_ext_task_entry func, osl_ext_task_arg_t arg,
+	osl_ext_task_t *task);
+
+/****************************************************************************
+* Function:   osl_ext_task_delete
+*
+* Purpose:    Destroy a task.
+*
+* Parameters: task (mod) Task to destroy.
+*
+* Returns:    OSL_EXT_SUCCESS if the task was created successfully, or an
+*             error code if the task could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_delete(osl_ext_task_t *task);
+
+
+/****************************************************************************
+* Function:   osl_ext_task_is_running
+*
+* Purpose:    Returns current running task.
+*
+* Parameters: None.
+*
+* Returns:    osl_ext_task_t of current running task.
+*****************************************************************************
+*/
+osl_ext_task_t *osl_ext_task_current(void);
+
+
+/****************************************************************************
+* Function:   osl_ext_task_yield
+*
+* Purpose:    Yield the CPU to other tasks of the same priority that are
+*             ready-to-run.
+*
+* Parameters: None.
+*
+* Returns:    OSL_EXT_SUCCESS if successful, else error code.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_yield(void);
+
+
+/****************************************************************************
+* Function:   osl_ext_task_enable_stack_check
+*
+* Purpose:    Enable task stack checking.
+*
+* Parameters: None.
+*
+* Returns:    OSL_EXT_SUCCESS if successful, else error code.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_enable_stack_check(void);
+
+
+/* --------------------------------------------------------------------------
+** Queue
+*/
+
+/****************************************************************************
+* Function:   osl_ext_queue_create
+*
+* Purpose:    Create a queue.
+*
+* Parameters: name     (in)  Name to assign to the queue (must be unique).
+*             buffer   (in)  Queue buffer. NULL to allocate.
+*             size     (in)  Size of the queue.
+*             queue    (out) Newly created queue.
+*
+* Returns:    OSL_EXT_SUCCESS if the queue was created successfully, or an
+*             error code if the queue could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_create(char *name,
+	void *queue_buffer, unsigned int queue_size,
+	osl_ext_queue_t *queue);
+
+/****************************************************************************
+* Function:   osl_ext_queue_delete
+*
+* Purpose:    Destroys a previously created queue object.
+*
+* Parameters: queue    (mod) Queue object to destroy.
+*
+* Returns:    OSL_EXT_SUCCESS if the queue was deleted successfully, or an
+*             error code if the queue could not be deleteed.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_delete(osl_ext_queue_t *queue);
+
+/****************************************************************************
+* Function:   osl_ext_queue_send
+*
+* Purpose:    Send/add data to the queue. This function will not block the
+*             calling thread if the queue is full.
+*
+* Parameters: queue    (mod) Queue object.
+*             data     (in)  Data pointer to be queued.
+*
+* Returns:    OSL_EXT_SUCCESS if the data was queued successfully, or an
+*             error code if the data could not be queued.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_send(osl_ext_queue_t *queue, void *data);
+
+/****************************************************************************
+* Function:   osl_ext_queue_send_synchronous
+*
+* Purpose:    Send/add data to the queue. This function will block the
+*             calling thread until the data is dequeued.
+*
+* Parameters: queue    (mod) Queue object.
+*             data     (in)  Data pointer to be queued.
+*
+* Returns:    OSL_EXT_SUCCESS if the data was queued successfully, or an
+*             error code if the data could not be queued.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_send_synchronous(osl_ext_queue_t *queue, void *data);
+
+/****************************************************************************
+* Function:   osl_ext_queue_receive
+*
+* Purpose:    Receive/remove data from the queue. This function will only
+*             block the calling thread for timeout_msec milliseconds, before
+*             returning with OSL_EXT_TIMEOUT.
+*
+* Parameters: queue        (mod) Queue object.
+*             timeout_msec (in)  Number of milliseconds to wait for the
+*                                data from the queue.
+*             data         (out) Data pointer received/removed from the queue.
+*
+* Returns:    OSL_EXT_SUCCESS if the data was dequeued successfully, or an
+*             error code if the data could not be dequeued.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_receive(osl_ext_queue_t *queue,
+                 osl_ext_time_ms_t timeout_msec, void **data);
+
+/****************************************************************************
+* Function:   osl_ext_queue_count
+*
+* Purpose:    Returns the number of items in the queue.
+*
+* Parameters: queue        (mod) Queue object.
+*             count        (out) Data pointer received/removed from the queue.
+*
+* Returns:    OSL_EXT_SUCCESS if the count was returned successfully, or an
+*             error code if the count is invalid.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_count(osl_ext_queue_t *queue, int *count);
+
+
+/* --------------------------------------------------------------------------
+** Event
+*/
+
+/****************************************************************************
+* Function:   osl_ext_event_create
+*
+* Purpose:    Creates a event object, which can subsequently be used to
+*             notify and trigger tasks.
+*
+* Parameters: name  (in)  Name to assign to the event (must be unique).
+*             event (out) Event object to initialize.
+*
+* Returns:    OSL_EXT_SUCCESS if the event was created successfully, or an
+*             error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_create(char *name, osl_ext_event_t *event);
+
+/****************************************************************************
+* Function:   osl_ext_event_delete
+*
+* Purpose:    Destroys a previously created event object.
+*
+* Parameters: event (mod) Event object to destroy.
+*
+* Returns:    OSL_EXT_SUCCESS if the event was created successfully, or an
+*             error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_delete(osl_ext_event_t *event);
+
+/****************************************************************************
+* Function:   osl_ext_event_get
+*
+* Purpose:    Get event from specified event object.
+*
+* Parameters: event        (mod) Event object to get.
+*             requested    (in)  Requested event to get.
+*             timeout_msec (in)  Number of milliseconds to wait for the event.
+*             event_bits   (out) Event bits retrieved.
+*
+* Returns:    OSL_EXT_SUCCESS if the event was created successfully, or an
+*             error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_get(osl_ext_event_t *event,
+	osl_ext_event_bits_t requested,	osl_ext_time_ms_t timeout_msec,
+	osl_ext_event_bits_t *event_bits);
+
+/****************************************************************************
+* Function:   osl_ext_event_set
+*
+* Purpose:    Set event of specified event object.
+*
+* Parameters: event      (mod) Event object to set.
+*             event_bits (in)  Event bits to set.
+*
+* Returns:    OSL_EXT_SUCCESS if the event was created successfully, or an
+*             error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_set(osl_ext_event_t *event,
+	osl_ext_event_bits_t event_bits);
+
+
+/* --------------------------------------------------------------------------
+** Interrupt
+*/
+
+/****************************************************************************
+* Function:   osl_ext_interrupt_disable
+*
+* Purpose:    Disable CPU interrupt.
+*
+* Parameters: None.
+*
+* Returns:    The interrupt state before disable for restoring interrupt.
+*****************************************************************************
+*/
+osl_ext_interrupt_state_t osl_ext_interrupt_disable(void);
+
+
+/****************************************************************************
+* Function:   osl_ext_interrupt_restore
+*
+* Purpose:    Restore CPU interrupt state.
+*
+* Parameters: state (in)  Interrupt state to restore returned from
+*                         osl_ext_interrupt_disable().
+*
+* Returns:   None.
+*****************************************************************************
+*/
+void osl_ext_interrupt_restore(osl_ext_interrupt_state_t state);
+
+#else
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+/* Semaphore. */
+#define osl_ext_sem_t
+#define OSL_EXT_SEM_DECL(sem)
+
+/* Mutex. */
+#define osl_ext_mutex_t
+#define OSL_EXT_MUTEX_DECL(mutex)
+
+/* Timer. */
+#define osl_ext_timer_t
+#define OSL_EXT_TIMER_DECL(timer)
+
+/* Task. */
+#define osl_ext_task_t void
+#define OSL_EXT_TASK_DECL(task)
+
+/* Queue. */
+#define osl_ext_queue_t
+#define OSL_EXT_QUEUE_DECL(queue)
+
+/* Event. */
+#define osl_ext_event_t
+#define OSL_EXT_EVENT_DECL(event)
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+#define osl_ext_sem_create(name, init_cnt, sem)		(OSL_EXT_SUCCESS)
+#define osl_ext_sem_delete(sem)				(OSL_EXT_SUCCESS)
+#define osl_ext_sem_give(sem)				(OSL_EXT_SUCCESS)
+#define osl_ext_sem_take(sem, timeout_msec)		(OSL_EXT_SUCCESS)
+
+#define osl_ext_mutex_create(name, mutex)		(OSL_EXT_SUCCESS)
+#define osl_ext_mutex_delete(mutex)			(OSL_EXT_SUCCESS)
+#define osl_ext_mutex_acquire(mutex, timeout_msec)	(OSL_EXT_SUCCESS)
+#define osl_ext_mutex_release(mutex)			(OSL_EXT_SUCCESS)
+
+#define osl_ext_timer_create(name, timeout_msec, mode, func, arg, timer) \
+	(OSL_EXT_SUCCESS)
+#define osl_ext_timer_delete(timer)			(OSL_EXT_SUCCESS)
+#define osl_ext_timer_start(timer, timeout_msec, mode)	(OSL_EXT_SUCCESS)
+#define osl_ext_timer_stop(timer)			(OSL_EXT_SUCCESS)
+#define osl_ext_time_get()				(0)
+
+#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \
+	(OSL_EXT_SUCCESS)
+#define osl_ext_task_delete(task)			(OSL_EXT_SUCCESS)
+#define osl_ext_task_current()				(NULL)
+#define osl_ext_task_yield()				(OSL_EXT_SUCCESS)
+#define osl_ext_task_enable_stack_check()		(OSL_EXT_SUCCESS)
+
+#define osl_ext_queue_create(name, queue_buffer, queue_size, queue) \
+	(OSL_EXT_SUCCESS)
+#define osl_ext_queue_delete(queue)			(OSL_EXT_SUCCESS)
+#define osl_ext_queue_send(queue, data)			(OSL_EXT_SUCCESS)
+#define osl_ext_queue_send_synchronous(queue, data)	(OSL_EXT_SUCCESS)
+#define osl_ext_queue_receive(queue, timeout_msec, data) \
+	(OSL_EXT_SUCCESS)
+#define osl_ext_queue_count(queue, count)		(OSL_EXT_SUCCESS)
+
+#define osl_ext_event_create(name, event)		(OSL_EXT_SUCCESS)
+#define osl_ext_event_delete(event)			(OSL_EXT_SUCCESS)
+#define osl_ext_event_get(event, requested, timeout_msec, event_bits) \
+	(OSL_EXT_SUCCESS)
+#define osl_ext_event_set(event, event_bits)		(OSL_EXT_SUCCESS)
+
+#define osl_ext_interrupt_disable(void)
+#define osl_ext_interrupt_restore(state)
+
+#endif	/* OSL_EXT_DISABLED */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* _osl_ext_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/p2p.h b/drivers/net/wireless/bcmdhd/include/p2p.h
new file mode 100644
index 0000000..cc7aa68
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/p2p.h
@@ -0,0 +1,713 @@
+/*
+ * Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: p2p.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _P2P_H_
+#define _P2P_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <wlioctl.h>
+#include <802.11.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WiFi P2P OUI values */
+#define P2P_OUI			WFA_OUI			/* WiFi P2P OUI */
+#define P2P_VER			WFA_OUI_TYPE_P2P	/* P2P version: 9=WiFi P2P v1.0 */
+
+#define P2P_IE_ID		0xdd			/* P2P IE element ID */
+
+/* WiFi P2P IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
+	uint8	id;		/* IE ID: 0xDD */
+	uint8	len;		/* IE length */
+	uint8	OUI[3];		/* WiFi P2P specific OUI: P2P_OUI */
+	uint8	oui_type;	/* Identifies P2P version: P2P_VER */
+	uint8	subelts[1];	/* variable length subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ie wifi_p2p_ie_t;
+
+#define P2P_IE_FIXED_LEN	6
+
+#define P2P_ATTR_ID_OFF		0
+#define P2P_ATTR_LEN_OFF	1
+#define P2P_ATTR_DATA_OFF	3
+
+#define P2P_ATTR_ID_LEN		1	/* ID filed length */
+#define P2P_ATTR_LEN_LEN	2	/* length field length */
+#define P2P_ATTR_HDR_LEN	3 /* ID + 2-byte length field spec 1.02 */
+
+#define P2P_WFDS_HASH_LEN		6
+#define P2P_WFDS_MAX_SVC_NAME_LEN	32
+
+/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */
+#define P2P_SEID_STATUS			0	/* Status */
+#define P2P_SEID_MINOR_RC		1	/* Minor Reason Code */
+#define P2P_SEID_P2P_INFO		2	/* P2P Capability (capabilities info) */
+#define P2P_SEID_DEV_ID			3	/* P2P Device ID */
+#define P2P_SEID_INTENT			4	/* Group Owner Intent */
+#define P2P_SEID_CFG_TIMEOUT		5	/* Configuration Timeout */
+#define P2P_SEID_CHANNEL		6	/* Listen channel */
+#define P2P_SEID_GRP_BSSID		7	/* P2P Group BSSID */
+#define P2P_SEID_XT_TIMING		8	/* Extended Listen Timing */
+#define P2P_SEID_INTINTADDR		9	/* Intended P2P Interface Address */
+#define P2P_SEID_P2P_MGBTY		10	/* P2P Manageability */
+#define P2P_SEID_CHAN_LIST		11	/* Channel List */
+#define P2P_SEID_ABSENCE		12	/* Notice of Absence */
+#define P2P_SEID_DEV_INFO		13	/* Device Info */
+#define P2P_SEID_GROUP_INFO		14	/* Group Info */
+#define P2P_SEID_GROUP_ID		15	/* Group ID */
+#define P2P_SEID_P2P_IF			16	/* P2P Interface */
+#define P2P_SEID_OP_CHANNEL		17	/* Operating Channel */
+#define P2P_SEID_INVITE_FLAGS		18	/* Invitation Flags */
+#define P2P_SEID_SERVICE_HASH		21	/* Service hash */
+#define P2P_SEID_SESSION		22	/* Session information */
+#define P2P_SEID_CONNECT_CAP		23	/* Connection capability */
+#define P2P_SEID_ADVERTISE_ID		24	/* Advertisement ID */
+#define P2P_SEID_ADVERTISE_SERVICE	25	/* Advertised service */
+#define P2P_SEID_SESSION_ID		26	/* Session ID */
+#define P2P_SEID_FEATURE_CAP		27	/* Feature capability */
+#define	P2P_SEID_PERSISTENT_GROUP	28	/* Persistent group */
+#define P2P_SEID_SESSION_INFO_RESP	29	/* Session Information Response */
+#define P2P_SEID_VNDR			221	/* Vendor-specific subelement */
+
+#define P2P_SE_VS_ID_SERVICES	0x1b
+
+
+/* WiFi P2P IE subelement: P2P Capability (capabilities info) */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_INFO */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	dev;		/* Device Capability Bitmap */
+	uint8	group;		/* Group Capability Bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
+
+/* P2P Capability subelement's Device Capability Bitmap bit values */
+#define P2P_CAPSE_DEV_SERVICE_DIS	0x1 /* Service Discovery */
+#define P2P_CAPSE_DEV_CLIENT_DIS	0x2 /* Client Discoverability */
+#define P2P_CAPSE_DEV_CONCURRENT	0x4 /* Concurrent Operation */
+#define P2P_CAPSE_DEV_INFRA_MAN		0x8 /* P2P Infrastructure Managed */
+#define P2P_CAPSE_DEV_LIMIT			0x10 /* P2P Device Limit */
+#define P2P_CAPSE_INVITE_PROC		0x20 /* P2P Invitation Procedure */
+
+/* P2P Capability subelement's Group Capability Bitmap bit values */
+#define P2P_CAPSE_GRP_OWNER			0x1 /* P2P Group Owner */
+#define P2P_CAPSE_PERSIST_GRP		0x2 /* Persistent P2P Group */
+#define P2P_CAPSE_GRP_LIMIT			0x4 /* P2P Group Limit */
+#define P2P_CAPSE_GRP_INTRA_BSS		0x8 /* Intra-BSS Distribution */
+#define P2P_CAPSE_GRP_X_CONNECT		0x10 /* Cross Connection */
+#define P2P_CAPSE_GRP_PERSISTENT	0x20 /* Persistent Reconnect */
+#define P2P_CAPSE_GRP_FORMATION		0x40 /* Group Formation */
+
+
+/* WiFi P2P IE subelement: Group Owner Intent */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INTENT */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	intent;		/* Intent Value 0...15 (0=legacy 15=master only) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
+
+/* WiFi P2P IE subelement: Configuration Timeout */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CFG_TIMEOUT */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	go_tmo;		/* GO config timeout in units of 10 ms */
+	uint8	client_tmo;	/* Client config timeout in units of 10 ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
+
+/* WiFi P2P IE subelement: Listen Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CHANNEL */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	op_class;	/* Operating Class */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t;
+
+/* WiFi P2P IE subelement: P2P Group BSSID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_GRP_BSSID */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P group bssid */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t;
+
+/* WiFi P2P IE subelement: P2P Group ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_GROUP_ID */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P device address */
+	uint8	ssid[1];	/* ssid. device id. variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t;
+
+/* WiFi P2P IE subelement: P2P Interface */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_IF */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P device address */
+	uint8	ifaddrs;	/* P2P Interface Address count */
+	uint8	ifaddr[1][6];	/* P2P Interface Address list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t;
+
+/* WiFi P2P IE subelement: Status */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_STATUS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	status;		/* Status Code: P2P_STATSE_* */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
+
+/* Status subelement Status Code definitions */
+#define P2P_STATSE_SUCCESS			0
+				/* Success */
+#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL	1
+				/* Failed, information currently unavailable */
+#define P2P_STATSE_PASSED_UP			P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
+				/* Old name for above in P2P spec 1.08 and older */
+#define P2P_STATSE_FAIL_INCOMPAT_PARAMS		2
+				/* Failed, incompatible parameters */
+#define P2P_STATSE_FAIL_LIMIT_REACHED		3
+				/* Failed, limit reached */
+#define P2P_STATSE_FAIL_INVALID_PARAMS		4
+				/* Failed, invalid parameters */
+#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM		5
+				/* Failed, unable to accomodate request */
+#define P2P_STATSE_FAIL_PROTO_ERROR		6
+				/* Failed, previous protocol error or disruptive behaviour */
+#define P2P_STATSE_FAIL_NO_COMMON_CHAN		7
+				/* Failed, no common channels */
+#define P2P_STATSE_FAIL_UNKNOWN_GROUP		8
+				/* Failed, unknown P2P Group */
+#define P2P_STATSE_FAIL_INTENT			9
+				/* Failed, both peers indicated Intent 15 in GO Negotiation */
+#define P2P_STATSE_FAIL_INCOMPAT_PROVIS		10
+				/* Failed, incompatible provisioning method */
+#define P2P_STATSE_FAIL_USER_REJECT		11
+				/* Failed, rejected by user */
+#define P2P_STATSE_SUCCESS_USER_ACCEPT		12
+				/* Success, accepted by user */
+
+/* WiFi P2P IE attribute: Extended Listen Timing */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
+	uint8	eltId;		/* ID: P2P_SEID_EXT_TIMING */
+	uint8	len[2];		/* length not including eltId, len fields */
+	uint8	avail[2];	/* availibility period */
+	uint8	interval[2];	/* availibility interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
+
+#define P2P_EXT_MIN	10	/* minimum 10ms */
+
+/* WiFi P2P IE subelement: Intended P2P Interface Address */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INTINTADDR */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* intended P2P interface MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
+
+/* WiFi P2P IE subelement: Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_STATUS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	band;		/* Regulatory Class (band) */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
+
+/* Channel Entry structure within the Channel List SE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
+	uint8	band;						/* Regulatory Class (band) */
+	uint8	num_channels;				/* # of channels in the channel list */
+	uint8	channels[WL_NUMCHANNELS];	/* Channel List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
+#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
+
+/* WiFi P2P IE subelement: Channel List */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CHAN_LIST */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	num_entries;	/* # of channel entries */
+	wifi_p2p_chanlist_entry_t	entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
+						/* Channel Entry List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
+
+/* WiFi Primary Device Type structure */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s {
+	uint16	cat_id;		/* Category ID */
+	uint8	OUI[3];		/* WFA OUI: 0x0050F2 */
+	uint8	oui_type;	/* WPS_OUI_TYPE */
+	uint16	sub_cat_id;	/* Sub Category ID */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t;
+
+/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category
+ * maximum values for each category
+ */
+#define P2P_DISE_SUBCATEGORY_MINVAL		1
+#define P2P_DISE_CATEGORY_COMPUTER		1
+#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL		8
+#define P2P_DISE_CATEGORY_INPUT_DEVICE		2
+#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL	9
+#define P2P_DISE_CATEGORY_PRINTER		3
+#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL		5
+#define P2P_DISE_CATEGORY_CAMERA		4
+#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL		4
+#define P2P_DISE_CATEGORY_STORAGE		5
+#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL		1
+#define P2P_DISE_CATEGORY_NETWORK_INFRA		6
+#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL	4
+#define P2P_DISE_CATEGORY_DISPLAY		7
+#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL		4
+#define P2P_DISE_CATEGORY_MULTIMEDIA		8
+#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL		6
+#define P2P_DISE_CATEGORY_GAMING		9
+#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL		5
+#define P2P_DISE_CATEGORY_TELEPHONE		10
+#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL		5
+#define P2P_DISE_CATEGORY_AUDIO			11
+#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL		6
+
+/* WiFi P2P IE's Device Info subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_DEVINFO */
+	uint8	len[2];			/* SE length not including eltId, len fields */
+	uint8	mac[6];			/* P2P Device MAC address */
+	uint16	wps_cfg_meths;		/* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8	pri_devtype[8];		/* Primary Device Type */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
+
+#define P2P_DEV_TYPE_LEN	8
+
+/* WiFi P2P IE's Group Info subelement Client Info Descriptor */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
+	uint8	len;
+	uint8	devaddr[ETHER_ADDR_LEN];	/* P2P Device Address */
+	uint8	ifaddr[ETHER_ADDR_LEN];		/* P2P Interface Address */
+	uint8	devcap;				/* Device Capability */
+	uint8	cfg_meths[2];			/* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8	pridt[P2P_DEV_TYPE_LEN];	/* Primary Device Type */
+	uint8	secdts;				/* Number of Secondary Device Types */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
+
+/* WiFi P2P IE's Device ID subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
+	uint8	eltId;
+	uint8	len[2];
+	struct ether_addr	addr;			/* P2P Device MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
+
+/* WiFi P2P IE subelement: P2P Manageability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_MGBTY */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mg_bitmap;	/* manageability bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+/* mg_bitmap field bit values */
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG   0x1 /* AP supports Managed P2P Device */
+
+/* WiFi P2P IE subelement: Group Info */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_GROUP_INFO */
+	uint8	len[2];			/* SE length not including eltId, len fields */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
+
+/* WiFi IE subelement: Operating Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_OP_CHANNEL */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	op_class;	/* Operating Class */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t;
+
+/* WiFi IE subelement: INVITATION FLAGS */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INVITE_FLAGS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	flags;		/* Flags */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t;
+
+/* WiFi P2P IE subelement: Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_hash_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_SERVICE_HASH */
+	uint8	len[2];			/* SE length not including eltId, len fields
+					 * in multiple of 6 Bytes
+					*/
+	uint8	hash[1];		/* Variable length - SHA256 hash of
+					 * service names (can be more than one hashes)
+					*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_hash_se_s wifi_p2p_serv_hash_se_t;
+
+/* WiFi P2P IE subelement: Service Instance Data */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_inst_data_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_SESSION */
+	uint8	len[2];			/* SE length not including eltId, len */
+	uint8	ssn_info[1];		/* Variable length - Session information as specified by
+					 * the service layer, type matches serv. name
+					*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t;
+
+
+/* WiFi P2P IE subelement: Connection capability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_CONNECT_CAP */
+	uint8	len[2];			/* SE length not including eltId, len */
+	uint8	conn_cap;		/* 1byte capability as specified by the
+					 * service layer, valid bitmask/values
+					*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t;
+
+
+/* WiFi P2P IE subelement: Advertisement ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_ADVERTISE_ID */
+	uint8	len[2];			/* SE length not including eltId, len fixed 4 Bytes */
+	uint8	advt_id[4];		/* 4byte Advertisement ID of the peer device sent in
+					 * PROV Disc in Network byte order
+					*/
+	uint8	advt_mac[6];			/* P2P device address of the service advertiser */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t;
+
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s {
+	uint8	advt_id[4];		/* SE Advertise ID for the service */
+	uint16	nw_cfg_method;	/* SE Network Config method for the service */
+	uint8	serv_name_len;	/* SE length of the service name */
+	uint8	serv_name[1];	/* Variable length service name field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t;
+
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_ADVERTISE_SERVICE */
+	uint8	len[2];			/* SE length not including eltId, len fields mutiple len of
+					 * wifi_p2p_adv_serv_info_t entries
+					*/
+	wifi_p2p_adv_serv_info_t	p_advt_serv_info[1]; /* Variable length
+								of multiple instances
+								of the advertise service info
+								*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t;
+
+
+/* WiFi P2P IE subelement: Session ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_SESSION_ID */
+	uint8	len[2];			/* SE length not including eltId, len fixed 4 Bytes */
+	uint8	ssn_id[4];		/* 4byte Session ID of the peer device sent in
+							 * PROV Disc in Network byte order
+							 */
+	uint8	ssn_mac[6];		/* P2P device address of the seeker - session mac */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t;
+
+
+#define P2P_ADVT_SERV_SE_FIXED_LEN	3	/* Includes only the element ID and len */
+#define P2P_ADVT_SERV_INFO_FIXED_LEN	7	/* Per ADV Service Instance advt_id +
+						 * nw_config_method + serv_name_len
+						 */
+
+/* WiFi P2P Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
+	uint8	category;	/* P2P_AF_CATEGORY */
+	uint8	OUI[3];		/* OUI - P2P_OUI */
+	uint8	type;		/* OUI Type - P2P_VER */
+	uint8	subtype;	/* OUI Subtype - P2P_AF_* */
+	uint8	dialog_token;	/* nonzero, identifies req/resp tranaction */
+	uint8	elts[1];	/* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
+#define P2P_AF_CATEGORY		0x7f
+
+#define P2P_AF_FIXED_LEN	7
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE	0	/* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ		1	/* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP		2	/* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ		3	/* GO Discoverability Request */
+
+
+/* WiFi P2P Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
+	uint8	category;	/* P2P_PUB_AF_CATEGORY */
+	uint8	action;		/* P2P_PUB_AF_ACTION */
+	uint8	oui[3];		/* P2P_OUI */
+	uint8	oui_type;	/* OUI type - P2P_VER */
+	uint8	subtype;	/* OUI subtype - P2P_TYPE_* */
+	uint8	dialog_token;	/* nonzero, identifies req/rsp transaction */
+	uint8	elts[1];	/* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
+#define P2P_PUB_AF_FIXED_LEN	8
+#define P2P_PUB_AF_CATEGORY	0x04
+#define P2P_PUB_AF_ACTION	0x09
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ		0	/* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP		1	/* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF	2	/* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ	3	/* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP	4	/* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ	5	/* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP	6	/* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ	7	/* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP	8	/* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID	255	/* Invalid Subtype */
+
+/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */
+#define P2P_TYPE_MNREQ		P2P_PAF_GON_REQ
+#define P2P_TYPE_MNRSP		P2P_PAF_GON_RSP
+#define P2P_TYPE_MNCONF		P2P_PAF_GON_CONF
+
+/* WiFi P2P IE subelement: Notice of Absence */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
+	uint8	cnt_type;	/* Count/Type */
+	uint32	duration;	/* Duration */
+	uint32	interval;	/* Interval */
+	uint32	start;		/* Start Time */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
+	uint8	eltId;		/* Subelement ID */
+	uint8	len[2];		/* Length */
+	uint8	index;		/* Index */
+	uint8	ops_ctw_parms;	/* CTWindow and OppPS Parameters */
+	wifi_p2p_noa_desc_t	desc[1];	/* Notice of Absence Descriptor(s) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
+
+#define P2P_NOA_SE_FIXED_LEN	5
+
+#define P2P_NOA_SE_MAX_DESC	2	/* max NoA descriptors in presence request */
+
+/* cnt_type field values */
+#define P2P_NOA_DESC_CNT_RESERVED	0	/* reserved and should not be used */
+#define P2P_NOA_DESC_CNT_REPEAT		255	/* continuous schedule */
+#define P2P_NOA_DESC_TYPE_PREFERRED	1	/* preferred values */
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE	2	/* acceptable limits */
+
+/* ctw_ops_parms field values */
+#define P2P_NOA_CTW_MASK	0x7f
+#define P2P_NOA_OPS_MASK	0x80
+#define P2P_NOA_OPS_SHIFT	7
+
+#define P2P_CTW_MIN	10	/* minimum 10TU */
+
+/*
+ * P2P Service Discovery related
+ */
+#define	P2PSD_ACTION_CATEGORY		0x04
+				/* Public action frame */
+#define	P2PSD_ACTION_ID_GAS_IREQ	0x0a
+				/* Action value for GAS Initial Request AF */
+#define	P2PSD_ACTION_ID_GAS_IRESP	0x0b
+				/* Action value for GAS Initial Response AF */
+#define	P2PSD_ACTION_ID_GAS_CREQ	0x0c
+				/* Action value for GAS Comeback Request AF */
+#define	P2PSD_ACTION_ID_GAS_CRESP	0x0d
+				/* Action value for GAS Comeback Response AF */
+#define P2PSD_AD_EID				0x6c
+				/* Advertisement Protocol IE ID */
+#define P2PSD_ADP_TUPLE_QLMT_PAMEBI	0x00
+				/* Query Response Length Limit 7 bits plus PAME-BI 1 bit */
+#define P2PSD_ADP_PROTO_ID			0x00
+				/* Advertisement Protocol ID. Always 0 for P2P SD */
+#define P2PSD_GAS_OUI				P2P_OUI
+				/* WFA OUI */
+#define P2PSD_GAS_OUI_SUBTYPE		P2P_VER
+				/* OUI Subtype for GAS IE */
+#define P2PSD_GAS_NQP_INFOID		0xDDDD
+				/* NQP Query Info ID: 56797 */
+#define P2PSD_GAS_COMEBACKDEALY		0x00
+				/* Not used in the Native GAS protocol */
+
+/* Service Protocol Type */
+typedef enum p2psd_svc_protype {
+	SVC_RPOTYPE_ALL = 0,
+	SVC_RPOTYPE_BONJOUR = 1,
+	SVC_RPOTYPE_UPNP = 2,
+	SVC_RPOTYPE_WSD = 3,
+	SVC_RPOTYPE_WFDS = 11,
+	SVC_RPOTYPE_VENDOR = 255
+} p2psd_svc_protype_t;
+
+/* Service Discovery response status code */
+typedef enum {
+	P2PSD_RESP_STATUS_SUCCESS = 0,
+	P2PSD_RESP_STATUS_PROTYPE_NA = 1,
+	P2PSD_RESP_STATUS_DATA_NA = 2,
+	P2PSD_RESP_STATUS_BAD_REQUEST = 3
+} p2psd_resp_status_t;
+
+/* Advertisement Protocol IE tuple field */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
+	uint8	llm_pamebi;	/* Query Response Length Limit bit 0-6, set to 0 plus
+				* Pre-Associated Message Exchange BSSID Independent bit 7, set to 0
+				*/
+	uint8	adp_id;		/* Advertisement Protocol ID: 0 for NQP Native Query Protocol */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
+
+/* Advertisement Protocol IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
+	uint8	id;		/* IE ID: 0x6c - 108 */
+	uint8	len;	/* IE length */
+	wifi_p2psd_adp_tpl_t adp_tpl;  /* Advertisement Protocol Tuple field. Only one
+				* tuple is defined for P2P Service Discovery
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
+
+/* NQP Vendor-specific Content */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
+	uint8	oui_subtype;	/* OUI Subtype: 0x09 */
+	uint16	svc_updi;		/* Service Update Indicator */
+	uint8	svc_tlvs[1];	/* wifi_p2psd_qreq_tlv_t type for service request,
+				* wifi_p2psd_qresp_tlv_t type for service response
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
+
+/* Service Request TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
+	uint16	len;			/* Length: 5 plus size of Query Data */
+	uint8	svc_prot;		/* Service Protocol Type */
+	uint8	svc_tscid;		/* Service Transaction ID */
+	uint8	query_data[1];	/* Query Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
+
+/* Query Request Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
+	uint16	info_id;	/* Info ID: 0xDDDD */
+	uint16	len;		/* Length of service request TLV, 5 plus the size of request data */
+	uint8	oui[3];		/* WFA OUI: 0x0050F2 */
+	uint8	qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
+
+/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
+	wifi_p2psd_adp_ie_t		adp_ie;		/* Advertisement Protocol IE */
+	uint16					qreq_len;	/* Query Request Length */
+	uint8	qreq_frm[1];	/* Query Request Frame wifi_p2psd_qreq_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
+
+/* Service Response TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
+	uint16	len;				/* Length: 5 plus size of Query Data */
+	uint8	svc_prot;			/* Service Protocol Type */
+	uint8	svc_tscid;			/* Service Transaction ID */
+	uint8	status;				/* Value defined in Table 57 of P2P spec. */
+	uint8	query_data[1];		/* Response Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
+
+/* Query Response Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
+	uint16	info_id;	/* Info ID: 0xDDDD */
+	uint16	len;		/* Lenth of service response TLV, 6 plus the size of resp data */
+	uint8	oui[3];		/* WFA OUI: 0x0050F2 */
+	uint8	qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
+
+/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
+	uint16	status;			/* Value defined in Table 7-23 of IEEE P802.11u */
+	uint16	cb_delay;		/* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t	adp_ie;		/* Advertisement Protocol IE */
+	uint16		qresp_len;	/* Query Response Length */
+	uint8	qresp_frm[1];	/* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
+
+/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
+	uint16	status;			/* Value defined in Table 7-23 of IEEE P802.11u */
+	uint8	fragment_id;	/* Fragmentation ID */
+	uint16	cb_delay;		/* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t	adp_ie;		/* Advertisement Protocol IE */
+	uint16	qresp_len;		/* Query Response Length */
+	uint8	qresp_frm[1];	/* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
+
+/* Wi-Fi GAS Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
+	uint8	category;		/* 0x04 Public Action Frame */
+	uint8	action;			/* 0x6c Advertisement Protocol */
+	uint8	dialog_token;	/* nonzero, identifies req/rsp transaction */
+	uint8	query_data[1];	/* Query Data. wifi_p2psd_gas_ireq_frame_t
+					 * or wifi_p2psd_gas_iresp_frame_t format
+					 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _P2P_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
new file mode 100644
index 0000000..4827c70
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
@@ -0,0 +1,63 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: packed_section_end.h 514727 2014-11-12 03:02:48Z $
+ */
+
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is NOT defined at this
+ * point, then there is a missing include of packed_section_start.h.
+ */
+#ifdef BWL_PACKED_SECTION
+	#undef BWL_PACKED_SECTION
+#else
+	#error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+/* Compiler-specific directives for structure packing are declared in
+ * packed_section_start.h. This marks the end of the structure packing section,
+ * so, undef them here.
+ */
+#undef	BWL_PRE_PACKED_STRUCT
+#undef	BWL_POST_PACKED_STRUCT
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
new file mode 100644
index 0000000..9beb45d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
@@ -0,0 +1,67 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: packed_section_start.h 514727 2014-11-12 03:02:48Z $
+ */
+
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is already defined at this
+ * point, then there is a missing include of packed_section_end.h.
+ */
+#ifdef BWL_PACKED_SECTION
+	#error "BWL_PACKED_SECTION is already defined!"
+#else
+	#define BWL_PACKED_SECTION
+#endif
+
+
+
+
+/* Declare compiler-specific directives for structure packing. */
+#if defined(__GNUC__) || defined(__lint)
+	#define	BWL_PRE_PACKED_STRUCT
+	#define	BWL_POST_PACKED_STRUCT	__attribute__ ((packed))
+#elif defined(__CC_ARM)
+	#define	BWL_PRE_PACKED_STRUCT	__packed
+	#define	BWL_POST_PACKED_STRUCT
+#else
+	#error "Unknown compiler!"
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h
new file mode 100644
index 0000000..98f22c3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h
@@ -0,0 +1,330 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: pcicfg.h 621340 2016-02-25 12:26:40Z $
+ */
+
+#ifndef	_h_pcicfg_
+#define	_h_pcicfg_
+
+
+/* pci config status reg has a bit to indicate that capability ptr is present */
+
+#define PCI_CAPPTR_PRESENT	0x0010
+
+/* A structure for the config registers is nice, but in most
+ * systems the config space is not memory mapped, so we need
+ * field offsetts. :-(
+ */
+#define	PCI_CFG_VID		0
+#define	PCI_CFG_DID		2
+#define	PCI_CFG_CMD		4
+#define	PCI_CFG_STAT		6
+#define	PCI_CFG_REV		8
+#define	PCI_CFG_PROGIF		9
+#define	PCI_CFG_SUBCL		0xa
+#define	PCI_CFG_BASECL		0xb
+#define	PCI_CFG_CLSZ		0xc
+#define	PCI_CFG_LATTIM		0xd
+#define	PCI_CFG_HDR		0xe
+#define	PCI_CFG_BIST		0xf
+#define	PCI_CFG_BAR0		0x10
+/*
+* TODO: PCI_CFG_BAR1 is wrongly defined to be 0x14 whereas it should be
+* 0x18 as per the PCIe full dongle spec. Need to modify the values below
+* correctly at a later point of time
+*/
+#ifdef DHD_EFI
+#define	PCI_CFG_BAR1		0x18
+#else
+#define	PCI_CFG_BAR1		0x14
+#endif /* DHD_EFI */
+#define	PCI_CFG_BAR2		0x18
+#define	PCI_CFG_BAR3		0x1c
+#define	PCI_CFG_BAR4		0x20
+#define	PCI_CFG_BAR5		0x24
+#define	PCI_CFG_CIS		0x28
+#define	PCI_CFG_SVID		0x2c
+#define	PCI_CFG_SSID		0x2e
+#define	PCI_CFG_ROMBAR		0x30
+#define PCI_CFG_CAPPTR		0x34
+#define	PCI_CFG_INT		0x3c
+#define	PCI_CFG_PIN		0x3d
+#define	PCI_CFG_MINGNT		0x3e
+#define	PCI_CFG_MAXLAT		0x3f
+#define	PCI_CFG_DEVCTRL		0xd8
+#define PCI_CFG_TLCNTRL_5	0x814
+
+
+/* PCI CAPABILITY DEFINES */
+#define PCI_CAP_POWERMGMTCAP_ID		0x01
+#define PCI_CAP_MSICAP_ID		0x05
+#define PCI_CAP_VENDSPEC_ID		0x09
+#define PCI_CAP_PCIECAP_ID		0x10
+
+/* Data structure to define the Message Signalled Interrupt facility
+ * Valid for PCI and PCIE configurations
+ */
+typedef struct _pciconfig_cap_msi {
+	uint8	capID;
+	uint8	nextptr;
+	uint16	msgctrl;
+	uint32	msgaddr;
+} pciconfig_cap_msi;
+#define MSI_ENABLE	0x1		/* bit 0 of msgctrl */
+
+/* Data structure to define the Power managment facility
+ * Valid for PCI and PCIE configurations
+ */
+typedef struct _pciconfig_cap_pwrmgmt {
+	uint8	capID;
+	uint8	nextptr;
+	uint16	pme_cap;
+	uint16	pme_sts_ctrl;
+	uint8	pme_bridge_ext;
+	uint8	data;
+} pciconfig_cap_pwrmgmt;
+
+#define PME_CAP_PM_STATES (0x1f << 27)	/* Bits 31:27 states that can generate PME */
+#define PME_CSR_OFFSET	    0x4		/* 4-bytes offset */
+#define PME_CSR_PME_EN	  (1 << 8)	/* Bit 8 Enable generating of PME */
+#define PME_CSR_PME_STAT  (1 << 15)	/* Bit 15 PME got asserted */
+
+/* Data structure to define the PCIE capability */
+typedef struct _pciconfig_cap_pcie {
+	uint8	capID;
+	uint8	nextptr;
+	uint16	pcie_cap;
+	uint32	dev_cap;
+	uint16	dev_ctrl;
+	uint16	dev_status;
+	uint32	link_cap;
+	uint16	link_ctrl;
+	uint16	link_status;
+	uint32	slot_cap;
+	uint16	slot_ctrl;
+	uint16	slot_status;
+	uint16	root_ctrl;
+	uint16	root_cap;
+	uint32	root_status;
+} pciconfig_cap_pcie;
+
+/* PCIE Enhanced CAPABILITY DEFINES */
+#define PCIE_EXTCFG_OFFSET	0x100
+#define PCIE_ADVERRREP_CAPID	0x0001
+#define PCIE_VC_CAPID		0x0002
+#define PCIE_DEVSNUM_CAPID	0x0003
+#define PCIE_PWRBUDGET_CAPID	0x0004
+
+/* PCIE Extended configuration */
+#define PCIE_ADV_CORR_ERR_MASK	0x114
+#define CORR_ERR_RE	(1 << 0) /* Receiver  */
+#define CORR_ERR_BT 	(1 << 6) /* Bad TLP  */
+#define CORR_ERR_BD	(1 << 7) /* Bad DLLP */
+#define CORR_ERR_RR	(1 << 8) /* REPLAY_NUM rollover */
+#define CORR_ERR_RT	(1 << 12) /* Reply timer timeout */
+#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \
+			 CORR_ERR_RR | CORR_ERR_RT)
+
+/* PCIE Root Control Register bits (Host mode only) */
+#define	PCIE_RC_CORR_SERR_EN		0x0001
+#define	PCIE_RC_NONFATAL_SERR_EN	0x0002
+#define	PCIE_RC_FATAL_SERR_EN		0x0004
+#define	PCIE_RC_PME_INT_EN		0x0008
+#define	PCIE_RC_CRS_EN			0x0010
+
+/* PCIE Root Capability Register bits (Host mode only) */
+#define	PCIE_RC_CRS_VISIBILITY		0x0001
+
+/* Header to define the PCIE specific capabilities in the extended config space */
+typedef struct _pcie_enhanced_caphdr {
+	uint16	capID;
+	uint16	cap_ver : 4;
+	uint16	next_ptr : 12;
+} pcie_enhanced_caphdr;
+
+
+#define	PCI_BAR0_WIN		0x80	/* backplane addres space accessed by BAR0 */
+#define	PCI_BAR1_WIN		0x84	/* backplane addres space accessed by BAR1 */
+#define	PCI_SPROM_CONTROL	0x88	/* sprom property control */
+#define	PCI_BAR1_CONTROL	0x8c	/* BAR1 region burst control */
+#define	PCI_INT_STATUS		0x90	/* PCI and other cores interrupts */
+#define	PCI_INT_MASK		0x94	/* mask of PCI and other cores interrupts */
+#define PCI_TO_SB_MB		0x98	/* signal backplane interrupts */
+#define PCI_BACKPLANE_ADDR	0xa0	/* address an arbitrary location on the system backplane */
+#define PCI_BACKPLANE_DATA	0xa4	/* data at the location specified by above address */
+#define	PCI_CLK_CTL_ST		0xa8	/* pci config space clock control/status (>=rev14) */
+#define	PCI_BAR0_WIN2		0xac	/* backplane addres space accessed by second 4KB of BAR0 */
+#define	PCI_GPIO_IN		0xb0	/* pci config space gpio input (>=rev3) */
+#define	PCI_GPIO_OUT		0xb4	/* pci config space gpio output (>=rev3) */
+#define PCIE_CFG_DEVICE_CONTROL 0xb4    /* 0xb4 is used as device control in PCIE devices */
+#define PCIE_DC_AER_CORR_EN		(1u << 0u)
+#define PCIE_DC_AER_NON_FATAL_EN	(1u << 1u)
+#define PCIE_DC_AER_FATAL_EN		(1u << 2u)
+#define PCIE_DC_AER_UNSUP_EN		(1u << 3u)
+
+#define PCI_BAR0_WIN2_OFFSET		0x1000u
+#define PCIE2_BAR0_CORE2_WIN2_OFFSET	0x5000u
+
+#define	PCI_GPIO_OUTEN		0xb8	/* pci config space gpio output enable (>=rev3) */
+#define	PCI_L1SS_CTRL2		0x24c	/* The L1 PM Substates Control register */
+
+/* Private Registers */
+#define	PCI_STAT_CTRL		0xa80
+#define	PCI_L0_EVENTCNT		0xa84
+#define	PCI_L0_STATETMR		0xa88
+#define	PCI_L1_EVENTCNT		0xa8c
+#define	PCI_L1_STATETMR		0xa90
+#define	PCI_L1_1_EVENTCNT	0xa94
+#define	PCI_L1_1_STATETMR	0xa98
+#define	PCI_L1_2_EVENTCNT	0xa9c
+#define	PCI_L1_2_STATETMR	0xaa0
+#define	PCI_L2_EVENTCNT		0xaa4
+#define	PCI_L2_STATETMR		0xaa8
+
+#define	PCI_LINK_STATUS		0x4dc
+#define	PCI_LINK_SPEED_MASK	(15u << 0u)
+#define	PCI_LINK_SPEED_SHIFT	(0)
+#define PCIE_LNK_SPEED_GEN1		0x1
+#define PCIE_LNK_SPEED_GEN2		0x2
+#define PCIE_LNK_SPEED_GEN3		0x3
+
+#define	PCI_PL_SPARE	0x1808	/* Config to Increase external clkreq deasserted minimum time */
+#define	PCI_CONFIG_EXT_CLK_MIN_TIME_MASK	(1u << 31u)
+#define	PCI_CONFIG_EXT_CLK_MIN_TIME_SHIFT	(31)
+
+#define	PCI_PMCR_REFUP		0x1814	/* Trefup time */
+#define	PCI_PMCR_REFUP_EXT	0x1818	/* Trefup extend Max */
+#define PCI_TPOWER_SCALE_MASK 0x3
+#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */
+
+
+#define	PCI_BAR0_SHADOW_OFFSET	(2 * 1024)	/* bar0 + 2K accesses sprom shadow (in pci core) */
+#define	PCI_BAR0_SPROM_OFFSET	(4 * 1024)	/* bar0 + 4K accesses external sprom */
+#define	PCI_BAR0_PCIREGS_OFFSET	(6 * 1024)	/* bar0 + 6K accesses pci core registers */
+#define	PCI_BAR0_PCISBR_OFFSET	(4 * 1024)	/* pci core SB registers are at the end of the
+						 * 8KB window, so their address is the "regular"
+						 * address plus 4K
+						 */
+/*
+ * PCIE GEN2 changed some of the above locations for
+ * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase
+ * BAR0 maps 32K of register space
+*/
+#define PCIE2_BAR0_WIN2		0x70 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN	0x74 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN2	0x78 /* backplane addres space accessed by second 4KB of BAR0 */
+
+#define PCI_BAR0_WIN2_OFFSET		0x1000u
+#define PCI_CORE_ENUM_OFFSET		0x2000u
+#define PCI_CC_CORE_ENUM_OFFSET		0x3000u
+#define PCI_SEC_BAR0_WIN_OFFSET		0x4000u
+#define PCI_SEC_BAR0_WRAP_OFFSET	0x5000u
+#define PCI_CORE_ENUM2_OFFSET		0x6000u
+#define PCI_CC_CORE_ENUM2_OFFSET	0x7000u
+#define PCI_LAST_OFFSET			0x8000u
+
+#define PCI_BAR0_WINSZ		(16 * 1024)	/* bar0 window size Match with corerev 13 */
+/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
+#define	PCI_16KB0_PCIREGS_OFFSET (8 * 1024)	/* bar0 + 8K accesses pci/pcie core registers */
+#define	PCI_16KB0_CCREGS_OFFSET	(12 * 1024)	/* bar0 + 12K accesses chipc core registers */
+#define PCI_16KBB0_WINSZ	(16 * 1024)	/* bar0 window size */
+#define PCI_SECOND_BAR0_OFFSET	(16 * 1024)	/* secondary  bar 0 window */
+
+/* On AI chips we have a second window to map DMP regs are mapped: */
+#define	PCI_16KB0_WIN2_OFFSET	(4 * 1024)	/* bar0 + 4K is "Window 2" */
+
+/* PCI_INT_STATUS */
+#define	PCI_SBIM_STATUS_SERR	0x4	/* backplane SBErr interrupt status */
+
+/* PCI_INT_MASK */
+#define	PCI_SBIM_SHIFT		8	/* backplane core interrupt mask bits offset */
+#define	PCI_SBIM_MASK		0xff00	/* backplane core interrupt mask */
+#define	PCI_SBIM_MASK_SERR	0x4	/* backplane SBErr interrupt mask */
+#define	PCI_CTO_INT_SHIFT	16	/* backplane SBErr interrupt mask */
+#define	PCI_CTO_INT_MASK	(1 << PCI_CTO_INT_SHIFT)	/* backplane SBErr interrupt mask */
+
+/* PCI_SPROM_CONTROL */
+#define SPROM_SZ_MSK		0x02	/* SPROM Size Mask */
+#define SPROM_LOCKED		0x08	/* SPROM Locked */
+#define	SPROM_BLANK		0x04	/* indicating a blank SPROM */
+#define SPROM_WRITEEN		0x10	/* SPROM write enable */
+#define SPROM_BOOTROM_WE	0x20	/* external bootrom write enable */
+#define SPROM_BACKPLANE_EN	0x40	/* Enable indirect backplane access */
+#define SPROM_OTPIN_USE		0x80	/* device OTP In use */
+#define SPROM_CFG_TO_SB_RST	0x400	/* backplane reset */
+
+/* Bits in PCI command and status regs */
+#define PCI_CMD_IO		0x00000001	/* I/O enable */
+#define PCI_CMD_MEMORY		0x00000002	/* Memory enable */
+#define PCI_CMD_MASTER		0x00000004	/* Master enable */
+#define PCI_CMD_SPECIAL		0x00000008	/* Special cycles enable */
+#define PCI_CMD_INVALIDATE	0x00000010	/* Invalidate? */
+#define PCI_CMD_VGA_PAL		0x00000040	/* VGA Palate */
+#define PCI_STAT_TA		0x08000000	/* target abort status */
+
+/* Header types */
+#define	PCI_HEADER_MULTI	0x80
+#define	PCI_HEADER_MASK		0x7f
+typedef enum {
+	PCI_HEADER_NORMAL,
+	PCI_HEADER_BRIDGE,
+	PCI_HEADER_CARDBUS
+} pci_header_types;
+
+#define PCI_CONFIG_SPACE_SIZE	256
+
+#define DWORD_ALIGN(x)  (x & ~(0x03))
+#define BYTE_POS(x) (x & 0x3)
+#define WORD_POS(x) (x & 0x1)
+
+#define BYTE_SHIFT(x)  (8 * BYTE_POS(x))
+#define WORD_SHIFT(x)  (16 * WORD_POS(x))
+
+#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
+#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
+
+#define read_pci_cfg_byte(a) \
+	(BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff)
+
+#define read_pci_cfg_word(a) \
+	(WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff)
+
+#define write_pci_cfg_byte(a, val) do { \
+	uint32 tmpval; \
+	tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \
+	        val << BYTE_POS(a); \
+	OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
+	} while (0)
+
+#define write_pci_cfg_word(a, val) do { \
+	uint32 tmpval; \
+	tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \
+	        val << WORD_POS(a); \
+	OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
+	} while (0)
+
+#endif	/* _h_pcicfg_ */
diff --git a/drivers/net/wireless/bcmdhd/include/pcie_core.h b/drivers/net/wireless/bcmdhd/include/pcie_core.h
new file mode 100644
index 0000000..463561d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcie_core.h
@@ -0,0 +1,959 @@
+/*
+ * BCM43XX PCIE core hardware definitions.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: pcie_core.h 673814 2016-12-05 06:10:24Z $
+ */
+#ifndef	_PCIE_CORE_H
+#define	_PCIE_CORE_H
+
+#include <sbhnddma.h>
+#include <siutils.h>
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif
+
+/* PCIE Enumeration space offsets */
+#define  PCIE_CORE_CONFIG_OFFSET	0x0
+#define  PCIE_FUNC0_CONFIG_OFFSET	0x400
+#define  PCIE_FUNC1_CONFIG_OFFSET	0x500
+#define  PCIE_FUNC2_CONFIG_OFFSET	0x600
+#define  PCIE_FUNC3_CONFIG_OFFSET	0x700
+#define  PCIE_SPROM_SHADOW_OFFSET	0x800
+#define  PCIE_SBCONFIG_OFFSET		0xE00
+
+
+#define PCIEDEV_MAX_DMAS			4
+
+/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */
+#define PCIE_DEV_BAR0_SIZE		0x4000
+#define PCIE_BAR0_WINMAPCORE_OFFSET	0x0
+#define PCIE_BAR0_EXTSPROM_OFFSET	0x1000
+#define PCIE_BAR0_PCIECORE_OFFSET	0x2000
+#define PCIE_BAR0_CCCOREREG_OFFSET	0x3000
+
+/* different register spaces to access thr'u pcie indirect access */
+#define PCIE_CONFIGREGS		1		/* Access to config space */
+#define PCIE_PCIEREGS		2		/* Access to pcie registers */
+
+#define PCIEDEV_HOSTADDR_MAP_BASE     0x8000000
+#define PCIEDEV_HOSTADDR_MAP_WIN_MASK 0xFC000000
+
+/* dma regs to control the flow between host2dev and dev2host  */
+typedef volatile struct pcie_devdmaregs {
+	dma64regs_t	tx;
+	uint32		PAD[2];
+	dma64regs_t	rx;
+	uint32		PAD[2];
+} pcie_devdmaregs_t;
+
+#define PCIE_DB_HOST2DEV_0		0x1
+#define PCIE_DB_HOST2DEV_1		0x2
+#define PCIE_DB_DEV2HOST_0		0x3
+#define PCIE_DB_DEV2HOST_1		0x4
+
+/* door bell register sets */
+typedef struct pcie_doorbell {
+	uint32		host2dev_0;
+	uint32		host2dev_1;
+	uint32		dev2host_0;
+	uint32		dev2host_1;
+} pcie_doorbell_t;
+
+/* Flow Ring Manager */
+#define IFRM_FR_IDX_MAX	256
+#define IFRM_FR_GID_MAX 4
+#define IFRM_FR_DEV_MAX 8
+#define IFRM_FR_TID_MAX 8
+#define IFRM_FR_DEV_VALID 2
+
+#define IFRM_VEC_REG_BITS	32
+
+#define IFRM_FR_PER_VECREG			4
+#define IFRM_FR_PER_VECREG_SHIFT	2
+#define IFRM_FR_PER_VECREG_MASK		((0x1 << IFRM_FR_PER_VECREG_SHIFT) - 1)
+
+#define IFRM_VEC_BITS_PER_FR	(IFRM_VEC_REG_BITS/IFRM_FR_PER_VECREG)
+
+/* IFRM_DEV_0 : d11AC, IFRM_DEV_1 : d11AD */
+#define IFRM_DEV_0	0
+#define IFRM_DEV_1	1
+
+#define IFRM_FR_GID_0 0
+#define IFRM_FR_GID_1 1
+#define IFRM_FR_GID_2 2
+#define IFRM_FR_GID_3 3
+
+#define IFRM_TIDMASK 0xffffffff
+
+/* ifrm_ctrlst register */
+#define IFRM_EN (1<<0)
+#define IFRM_BUFF_INIT_DONE (1<<1)
+#define IFRM_COMPARE_EN0 (1<<4)
+#define IFRM_COMPARE_EN1 (1<<5)
+#define IFRM_COMPARE_EN2 (1<<6)
+#define IFRM_COMPARE_EN3 (1<<7)
+#define IFRM_INIT_DV0 (1<<8)
+#define IFRM_INIT_DV1 (1<<9)
+#define IFRM_INIT_DV2 (1<<10)
+#define IFRM_INIT_DV3 (1<<11)
+
+/* ifrm_msk_arr.addr, ifrm_tid_arr.addr register */
+#define IFRM_ADDR_SHIFT 0
+#define IFRM_FRG_ID_SHIFT 8
+
+/* ifrm_vec.diff_lat register */
+#define IFRM_DV_LAT			(1<<0)
+#define IFRM_DV_LAT_DONE	(1<<1)
+#define IFRM_SDV_OFFSET_SHIFT	4
+#define IFRM_SDV_FRGID_SHIFT	8
+#define IFRM_VECSTAT_MASK		0x3
+#define IFRM_VEC_MASK			0xff
+
+/* idma frm array */
+typedef struct pcie_ifrm_array {
+	uint32		addr;
+	uint32		data;
+} pcie_ifrm_array_t;
+
+/* idma frm vector */
+typedef struct pcie_ifrm_vector {
+	uint32		diff_lat;
+	uint32		sav_tid;
+	uint32		sav_diff;
+	uint32		PAD[1];
+} pcie_ifrm_vector_t;
+
+/* idma frm interrupt */
+typedef struct pcie_ifrm_intr {
+	uint32		intstat;
+	uint32		intmask;
+} pcie_ifrm_intr_t;
+
+/* SB side: PCIE core and host control registers */
+typedef volatile struct sbpcieregs {
+	uint32 control;		/* host mode only */
+	uint32 iocstatus;	/* PCIE2: iostatus */
+	uint32 PAD[1];
+	uint32 biststatus;	/* bist Status: 0x00C */
+	uint32 gpiosel;		/* PCIE gpio sel: 0x010 */
+	uint32 gpioouten;	/* PCIE gpio outen: 0x14 */
+	uint32 PAD[2];
+	uint32 intstatus;	/* Interrupt status: 0x20 */
+	uint32 intmask;		/* Interrupt mask: 0x24 */
+	uint32 sbtopcimailbox;	/* sb to pcie mailbox: 0x028 */
+	uint32 obffcontrol;	/* PCIE2: 0x2C */
+	uint32 obffintstatus;	/* PCIE2: 0x30 */
+	uint32 obffdatastatus;	/* PCIE2: 0x34 */
+	uint32 PAD[1];
+	uint32 ctoctrl;		/* PCIE2: 0x3C */
+	uint32 errlog;		/* PCIE2: 0x40 */
+	uint32 errlogaddr;	/* PCIE2: 0x44 */
+	uint32 mailboxint;	/* PCIE2: 0x48 */
+	uint32 mailboxintmsk; /* PCIE2: 0x4c */
+	uint32 ltrspacing;	/* PCIE2: 0x50 */
+	uint32 ltrhysteresiscnt;	/* PCIE2: 0x54 */
+	uint32 msivectorassign;	/* PCIE2: 0x58 */
+	uint32 intmask2;	/* PCIE2: 0x5C */
+	uint32 PAD[40];
+	uint32 sbtopcie0;	/* sb to pcie translation 0: 0x100 */
+	uint32 sbtopcie1;	/* sb to pcie translation 1: 0x104 */
+	uint32 sbtopcie2;	/* sb to pcie translation 2: 0x108 */
+	uint32 PAD[5];
+
+	/* pcie core supports in direct access to config space */
+	uint32 configaddr;	/* pcie config space access: Address field: 0x120 */
+	uint32 configdata;	/* pcie config space access: Data field: 0x124 */
+	union {
+		struct {
+			/* mdio access to serdes */
+			uint32 mdiocontrol;	/* controls the mdio access: 0x128 */
+			uint32 mdiodata;	/* Data to the mdio access: 0x12c */
+			/* pcie protocol phy/dllp/tlp register indirect access mechanism */
+			uint32 pcieindaddr; /* indirect access to the internal register: 0x130 */
+			uint32 pcieinddata;	/* Data to/from the internal regsiter: 0x134 */
+			uint32 clkreqenctrl;	/* >= rev 6, Clkreq rdma control : 0x138 */
+			uint32 PAD[177];
+			/* 0x400 - 0x7FF, PCIE Cfg Space, note: not used anymore in PcieGen2 */
+			uint32 pciecfg[4][64];
+		} pcie1;
+		struct {
+			/* mdio access to serdes */
+			uint32 mdiocontrol;	/* controls the mdio access: 0x128 */
+			uint32 mdiowrdata;	/* write data to mdio 0x12C */
+			uint32 mdiorddata;	/* read data to mdio 0x130 */
+			uint32	PAD[3]; 	/* 0x134-0x138-0x13c */
+			/* door bell registers available from gen2 rev5 onwards */
+			pcie_doorbell_t	   dbls[PCIEDEV_MAX_DMAS]; /* 0x140 - 0x17F */
+			uint32	dataintf;	/* 0x180 */
+			uint32  PAD[1];		/* 0x184 */
+			uint32	d2h_intrlazy_0; /* 0x188 */
+			uint32	h2d_intrlazy_0; /* 0x18c */
+			uint32  h2d_intstat_0;  /* 0x190 */
+			uint32  h2d_intmask_0;	/* 0x194 */
+			uint32  d2h_intstat_0;  /* 0x198 */
+			uint32  d2h_intmask_0;  /* 0x19c */
+			uint32	ltr_state;	/* 0x1A0 */
+			uint32	pwr_int_status;	/* 0x1A4 */
+			uint32	pwr_int_mask;	/* 0x1A8 */
+			uint32	pme_source; /* 0x1AC */
+			uint32	err_hdr_logreg1; /* 0x1B0 */
+			uint32	err_hdr_logreg2; /* 0x1B4 */
+			uint32	err_hdr_logreg3; /* 0x1B8 */
+			uint32	err_hdr_logreg4; /* 0x1BC */
+			uint32	err_code_logreg; /* 0x1C0 */
+			uint32  PAD[7]; /* 0x1C4 - 0x1DF */
+			uint32  clk_ctl_st;	/* 0x1E0 */
+			uint32  PAD[1];		/* 0x1E4 */
+			uint32	powerctl;	/* 0x1E8 */
+			uint32  PAD[5];		/* 0x1EC - 0x1FF */
+			pcie_devdmaregs_t  h2d0_dmaregs; /* 0x200 - 0x23c */
+			pcie_devdmaregs_t  d2h0_dmaregs; /* 0x240 - 0x27c */
+			pcie_devdmaregs_t  h2d1_dmaregs; /* 0x280 - 0x2bc */
+			pcie_devdmaregs_t  d2h1_dmaregs; /* 0x2c0 - 0x2fc */
+			pcie_devdmaregs_t  h2d2_dmaregs; /* 0x300 - 0x33c */
+			pcie_devdmaregs_t  d2h2_dmaregs; /* 0x340 - 0x37c */
+			pcie_devdmaregs_t  h2d3_dmaregs; /* 0x380 - 0x3bc */
+			pcie_devdmaregs_t  d2h3_dmaregs; /* 0x3c0 - 0x3fc */
+			uint32	d2h_intrlazy_1; /* 0x400 */
+			uint32	h2d_intrlazy_1; /* 0x404 */
+			uint32	h2d_intstat_1;	/* 0x408 */
+			uint32	h2d_intmask_1;	/* 0x40c */
+			uint32	d2h_intstat_1;	/* 0x410 */
+			uint32	d2h_intmask_1;	/* 0x414 */
+			uint32	PAD[2];			/* 0x418 - 0x41C */
+			uint32	d2h_intrlazy_2; /* 0x420 */
+			uint32	h2d_intrlazy_2; /* 0x424 */
+			uint32	h2d_intstat_2;	/* 0x428 */
+			uint32	h2d_intmask_2;	/* 0x42c */
+			uint32	d2h_intstat_2;	/* 0x430 */
+			uint32	d2h_intmask_2;	/* 0x434 */
+			uint32	PAD[10];		/* 0x438 - 0x45F */
+			uint32	ifrm_ctrlst;	/* 0x460 */
+			uint32	PAD[1];			/* 0x464 */
+			pcie_ifrm_array_t	ifrm_msk_arr;		/* 0x468 - 0x46F */
+			pcie_ifrm_array_t	ifrm_tid_arr[IFRM_FR_DEV_VALID];
+				/* 0x470 - 0x47F */
+			pcie_ifrm_vector_t	ifrm_vec[IFRM_FR_DEV_MAX];
+				/* 0x480 - 0x4FF */
+			pcie_ifrm_intr_t	ifrm_intr[IFRM_FR_DEV_MAX];
+				/* 0x500 - 0x53F */
+			uint32	PAD[48];		/* 0x540 - 0x5FF */
+			uint32	PAD[2][64];		/* 0x600 - 0x7FF */
+		} pcie2;
+	} u;
+	uint16	sprom[64];		/* SPROM shadow Area : 0x800 - 0x880 */
+	uint32	PAD[96];		/* 0x880 - 0x9FF */
+	/* direct memory access (pcie2 rev19 and after) : 0xA00 - 0xAFF */
+	uint32		PAD[16];	/* 0xA00 - 0xA3F */
+	uint32		dm_errlog;	/* 0xA40 */
+	uint32		dm_erraddr;	/* 0xA44 */
+	uint32		PAD[37];	/* 0xA48 - 0xADC */
+	uint32		dm_clk_ctl_st;	/* 0xAE0 */
+	uint32		PAD[1];		/* 0xAE4 */
+	uint32		dm_powerctl;	/* 0xAE8 */
+} sbpcieregs_t;
+
+#define PCIE_CFG_DA_OFFSET 0x400	/* direct access register offset for configuration space */
+
+/* PCI control */
+#define PCIE_RST_OE	0x01	/* When set, drives PCI_RESET out to pin */
+#define PCIE_RST	0x02	/* Value driven out to pin */
+#define PCIE_SPERST	0x04	/* SurvivePeRst */
+#define PCIE_FORCECFGCLKON_ALP	0x08
+#define PCIE_DISABLE_L1CLK_GATING	0x10
+#define PCIE_DLYPERST	0x100	/* Delay PeRst to CoE Core */
+#define PCIE_DISSPROMLD	0x200	/* DisableSpromLoadOnPerst */
+#define PCIE_WakeModeL2	0x1000	/* Wake on L2 */
+#define PCIE_MULTIMSI_EN		0x2000	/* enable multi-vector MSI messages */
+#define PCIE_PipeIddqDisable0	0x8000	/* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */
+#define PCIE_PipeIddqDisable1	0x10000	/* Disable assertion of pcie_pipe_iddq during L2 */
+#define PCIE_MSI_B2B_EN		0x100000	/* enable back-to-back MSI messages */
+#define PCIE_MSI_FIFO_CLEAR	0x200000	/* reset MSI FIFO */
+#define PCIE_IDMA_MODE_EN		0x800000	/* implicit M2M DMA mode */
+
+#define	PCIE_CFGADDR	0x120	/* offsetof(configaddr) */
+#define	PCIE_CFGDATA	0x124	/* offsetof(configdata) */
+#define PCIE_SWPME_FN0	0x10000
+#define PCIE_SWPME_FN0_SHF 16
+
+/* Interrupt status/mask */
+#define PCIE_INTA	0x01	/* PCIE INTA message is received */
+#define PCIE_INTB	0x02	/* PCIE INTB message is received */
+#define PCIE_INTFATAL	0x04	/* PCIE INTFATAL message is received */
+#define PCIE_INTNFATAL	0x08	/* PCIE INTNONFATAL message is received */
+#define PCIE_INTCORR	0x10	/* PCIE INTCORR message is received */
+#define PCIE_INTPME	0x20	/* PCIE INTPME message is received */
+#define PCIE_PERST	0x40	/* PCIE Reset Interrupt */
+
+#define PCIE_INT_MB_FN0_0 0x0100 /* PCIE to SB Mailbox int Fn0.0 is received */
+#define PCIE_INT_MB_FN0_1 0x0200 /* PCIE to SB Mailbox int Fn0.1 is received */
+#define PCIE_INT_MB_FN1_0 0x0400 /* PCIE to SB Mailbox int Fn1.0 is received */
+#define PCIE_INT_MB_FN1_1 0x0800 /* PCIE to SB Mailbox int Fn1.1 is received */
+#define PCIE_INT_MB_FN2_0 0x1000 /* PCIE to SB Mailbox int Fn2.0 is received */
+#define PCIE_INT_MB_FN2_1 0x2000 /* PCIE to SB Mailbox int Fn2.1 is received */
+#define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */
+#define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */
+
+/* PCIE MSI Vector Assignment register */
+#define MSIVEC_MB_0	(0x1 << 1) /* MSI Vector offset for mailbox0 is 2 */
+#define MSIVEC_MB_1	(0x1 << 2) /* MSI Vector offset for mailbox1 is 3 */
+#define MSIVEC_D2H0_DB0	(0x1 << 3) /* MSI Vector offset for interface0 door bell 0 is 4 */
+#define MSIVEC_D2H0_DB1	(0x1 << 4) /* MSI Vector offset for interface0 door bell 1 is 5 */
+
+/* PCIE MailboxInt/MailboxIntMask register */
+#define PCIE_MB_TOSB_FN0_0   	0x0001 /* write to assert PCIEtoSB Mailbox interrupt */
+#define PCIE_MB_TOSB_FN0_1   	0x0002
+#define PCIE_MB_TOSB_FN1_0   	0x0004
+#define PCIE_MB_TOSB_FN1_1   	0x0008
+#define PCIE_MB_TOSB_FN2_0   	0x0010
+#define PCIE_MB_TOSB_FN2_1   	0x0020
+#define PCIE_MB_TOSB_FN3_0   	0x0040
+#define PCIE_MB_TOSB_FN3_1   	0x0080
+#define PCIE_MB_TOPCIE_FN0_0 	0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */
+#define PCIE_MB_TOPCIE_FN0_1 	0x0200
+#define PCIE_MB_TOPCIE_FN1_0 	0x0400
+#define PCIE_MB_TOPCIE_FN1_1 	0x0800
+#define PCIE_MB_TOPCIE_FN2_0 	0x1000
+#define PCIE_MB_TOPCIE_FN2_1 	0x2000
+#define PCIE_MB_TOPCIE_FN3_0 	0x4000
+#define PCIE_MB_TOPCIE_FN3_1 	0x8000
+#define	PCIE_MB_TOPCIE_D2H0_DB0	0x10000
+#define	PCIE_MB_TOPCIE_D2H0_DB1	0x20000
+#define	PCIE_MB_TOPCIE_D2H1_DB0	0x40000
+#define	PCIE_MB_TOPCIE_D2H1_DB1	0x80000
+#define	PCIE_MB_TOPCIE_D2H2_DB0	0x100000
+#define	PCIE_MB_TOPCIE_D2H2_DB1	0x200000
+#define	PCIE_MB_TOPCIE_D2H3_DB0	0x400000
+#define	PCIE_MB_TOPCIE_D2H3_DB1	0x800000
+
+#define PCIE_MB_D2H_MB_MASK		\
+	(PCIE_MB_TOPCIE_D2H0_DB0 | PCIE_MB_TOPCIE_D2H0_DB1 |	\
+	PCIE_MB_TOPCIE_D2H1_DB0  | PCIE_MB_TOPCIE_D2H1_DB1 |	\
+	PCIE_MB_TOPCIE_D2H2_DB0  | PCIE_MB_TOPCIE_D2H2_DB1 |	\
+	PCIE_MB_TOPCIE_D2H3_DB0  | PCIE_MB_TOPCIE_D2H3_DB1)
+
+#define SBTOPCIE0_BASE 0x08000000
+#define SBTOPCIE1_BASE 0x0c000000
+
+/* On chips with CCI-400, the small pcie 128 MB region base has shifted */
+#define CCI400_SBTOPCIE0_BASE  0x20000000
+#define CCI400_SBTOPCIE1_BASE  0x24000000
+
+/* SB to PCIE translation masks */
+#define SBTOPCIE0_MASK	0xfc000000
+#define SBTOPCIE1_MASK	0xfc000000
+#define SBTOPCIE2_MASK	0xc0000000
+
+/* Access type bits (0:1) */
+#define SBTOPCIE_MEM	0
+#define SBTOPCIE_IO	1
+#define SBTOPCIE_CFG0	2
+#define SBTOPCIE_CFG1	3
+
+/* Prefetch enable bit 2 */
+#define SBTOPCIE_PF		4
+
+/* Write Burst enable for memory write bit 3 */
+#define SBTOPCIE_WR_BURST	8
+
+/* config access */
+#define CONFIGADDR_FUNC_MASK	0x7000
+#define CONFIGADDR_FUNC_SHF	12
+#define CONFIGADDR_REG_MASK	0x0FFF
+#define CONFIGADDR_REG_SHF	0
+
+#define PCIE_CONFIG_INDADDR(f, r)	((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \
+			                 (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF))
+
+/* PCIE protocol regs Indirect Address */
+#define PCIEADDR_PROT_MASK	0x300
+#define PCIEADDR_PROT_SHF	8
+#define PCIEADDR_PL_TLP		0
+#define PCIEADDR_PL_DLLP	1
+#define PCIEADDR_PL_PLP		2
+
+#define	PCIE_CORE_REG_CONTROL		0x00u   /* Control     */
+#define	PCIE_CORE_REG_IOSTATUS		0x04u   /* IO status   */
+#define	PCIE_CORE_REG_BITSTATUS		0x0Cu   /* bitstatus   */
+#define	PCIE_CORE_REG_GPIO_SEL		0x10u   /* gpio sel    */
+#define	PCIE_CORE_REG_GPIO_OUT_EN	0x14u   /* gpio out en */
+#define	PCIE_CORE_REG_INT_STATUS	0x20u   /* int status  */
+#define	PCIE_CORE_REG_INT_MASK		0x24u   /* int mask    */
+#define	PCIE_CORE_REG_SB_PCIE_MB	0x28u   /* sbpcie mb   */
+#define	PCIE_CORE_REG_ERRLOG		0x40u   /* errlog      */
+#define	PCIE_CORE_REG_ERR_ADDR		0x44u   /* errlog addr */
+#define	PCIE_CORE_REG_MB_INTR		0x48u   /* MB intr     */
+#define	PCIE_CORE_REG_SB_PCIE_0		0x100u  /* sbpcie0 map */
+#define	PCIE_CORE_REG_SB_PCIE_1		0x104u  /* sbpcie1 map */
+#define	PCIE_CORE_REG_SB_PCIE_2		0x108u  /* sbpcie2 map */
+
+/* PCIE Config registers */
+#define	PCIE_CFG_DEV_STS_CTRL_2		0x0d4u	/* "dev_sts_control_2  */
+#define	PCIE_CFG_ADV_ERR_CAP		0x100u	/* adv_err_cap         */
+#define	PCIE_CFG_UC_ERR_STS		0x104u	/* uc_err_status       */
+#define	PCIE_CFG_UC_ERR_MASK		0x108u	/* ucorr_err_mask      */
+#define	PCIE_CFG_UNCOR_ERR_SERV		0x10cu	/* ucorr_err_sevr      */
+#define	PCIE_CFG_CORR_ERR_STS		0x110u	/* corr_err_status     */
+#define	PCIE_CFG_CORR_ERR_MASK		0x114u	/* corr_err_mask       */
+#define	PCIE_CFG_ADV_ERR_CTRL		0x118u	/* adv_err_cap_control */
+#define	PCIE_CFG_HDR_LOG1		0x11Cu	/* header_log1         */
+#define	PCIE_CFG_HDR_LOG2		0x120u	/* header_log2         */
+#define	PCIE_CFG_HDR_LOG3		0x124u	/* header_log3         */
+#define	PCIE_CFG_HDR_LOG4		0x128u	/* header_log4         */
+#define	PCIE_CFG_PML1_SUB_CAP_ID	0x240u	/* PML1sub_capID       */
+#define	PCIE_CFG_PML1_SUB_CAP_REG	0x244u	/* PML1_sub_Cap_reg    */
+#define	PCIE_CFG_PML1_SUB_CTRL1		0x248u	/* PML1_sub_control1   */
+#define	PCIE_CFG_PML1_SUB_CTRL3		0x24Cu	/* PML1_sub_control2   */
+#define	PCIE_CFG_TL_CTRL_5		0x814u	/* tl_control_5        */
+#define	PCIE_CFG_PHY_ERR_ATT_VEC	0x1820u	/* phy_err_attn_vec    */
+#define	PCIE_CFG_PHY_ERR_ATT_MASK	0x1824u	/* phy_err_attn_mask   */
+
+/* PCIE protocol PHY diagnostic registers */
+#define	PCIE_PLP_MODEREG		0x200u /* Mode */
+#define	PCIE_PLP_STATUSREG		0x204u /* Status */
+#define PCIE_PLP_LTSSMCTRLREG		0x208u /* LTSSM control */
+#define PCIE_PLP_LTLINKNUMREG		0x20cu /* Link Training Link number */
+#define PCIE_PLP_LTLANENUMREG		0x210u /* Link Training Lane number */
+#define PCIE_PLP_LTNFTSREG		0x214u /* Link Training N_FTS */
+#define PCIE_PLP_ATTNREG		0x218u /* Attention */
+#define PCIE_PLP_ATTNMASKREG		0x21Cu /* Attention Mask */
+#define PCIE_PLP_RXERRCTR		0x220u /* Rx Error */
+#define PCIE_PLP_RXFRMERRCTR		0x224u /* Rx Framing Error */
+#define PCIE_PLP_RXERRTHRESHREG		0x228u /* Rx Error threshold */
+#define PCIE_PLP_TESTCTRLREG		0x22Cu /* Test Control reg */
+#define PCIE_PLP_SERDESCTRLOVRDREG	0x230u /* SERDES Control Override */
+#define PCIE_PLP_TIMINGOVRDREG		0x234u /* Timing param override */
+#define PCIE_PLP_RXTXSMDIAGREG		0x238u /* RXTX State Machine Diag */
+#define PCIE_PLP_LTSSMDIAGREG		0x23Cu /* LTSSM State Machine Diag */
+
+/* PCIE protocol DLLP diagnostic registers */
+#define PCIE_DLLP_LCREG			0x100u /* Link Control */
+#define PCIE_DLLP_LSREG			0x104u /* Link Status */
+#define PCIE_DLLP_LAREG			0x108u /* Link Attention */
+#define PCIE_DLLP_LAMASKREG		0x10Cu /* Link Attention Mask */
+#define PCIE_DLLP_NEXTTXSEQNUMREG	0x110u /* Next Tx Seq Num */
+#define PCIE_DLLP_ACKEDTXSEQNUMREG	0x114u /* Acked Tx Seq Num */
+#define PCIE_DLLP_PURGEDTXSEQNUMREG	0x118u /* Purged Tx Seq Num */
+#define PCIE_DLLP_RXSEQNUMREG		0x11Cu /* Rx Sequence Number */
+#define PCIE_DLLP_LRREG			0x120u /* Link Replay */
+#define PCIE_DLLP_LACKTOREG		0x124u /* Link Ack Timeout */
+#define PCIE_DLLP_PMTHRESHREG		0x128u /* Power Management Threshold */
+#define PCIE_DLLP_RTRYWPREG		0x12Cu /* Retry buffer write ptr */
+#define PCIE_DLLP_RTRYRPREG		0x130u /* Retry buffer Read ptr */
+#define PCIE_DLLP_RTRYPPREG		0x134u /* Retry buffer Purged ptr */
+#define PCIE_DLLP_RTRRWREG		0x138u /* Retry buffer Read/Write */
+#define PCIE_DLLP_ECTHRESHREG		0x13Cu /* Error Count Threshold */
+#define PCIE_DLLP_TLPERRCTRREG		0x140u /* TLP Error Counter */
+#define PCIE_DLLP_ERRCTRREG		0x144u /* Error Counter */
+#define PCIE_DLLP_NAKRXCTRREG		0x148u /* NAK Received Counter */
+#define PCIE_DLLP_TESTREG		0x14Cu /* Test */
+#define PCIE_DLLP_PKTBIST		0x150u /* Packet BIST */
+#define PCIE_DLLP_PCIE11		0x154u /* DLLP PCIE 1.1 reg */
+
+#define PCIE_DLLP_LSREG_LINKUP		(1u << 16u)
+
+/* PCIE protocol TLP diagnostic registers */
+#define PCIE_TLP_CONFIGREG		0x000u /* Configuration */
+#define PCIE_TLP_WORKAROUNDSREG		0x004u /* TLP Workarounds */
+#define PCIE_TLP_WRDMAUPPER		0x010u /* Write DMA Upper Address */
+#define PCIE_TLP_WRDMALOWER		0x014u /* Write DMA Lower Address */
+#define PCIE_TLP_WRDMAREQ_LBEREG	0x018u /* Write DMA Len/ByteEn Req */
+#define PCIE_TLP_RDDMAUPPER		0x01Cu /* Read DMA Upper Address */
+#define PCIE_TLP_RDDMALOWER		0x020u /* Read DMA Lower Address */
+#define PCIE_TLP_RDDMALENREG		0x024u /* Read DMA Len Req */
+#define PCIE_TLP_MSIDMAUPPER		0x028u /* MSI DMA Upper Address */
+#define PCIE_TLP_MSIDMALOWER		0x02Cu /* MSI DMA Lower Address */
+#define PCIE_TLP_MSIDMALENREG		0x030u /* MSI DMA Len Req */
+#define PCIE_TLP_SLVREQLENREG		0x034u /* Slave Request Len */
+#define PCIE_TLP_FCINPUTSREQ		0x038u /* Flow Control Inputs */
+#define PCIE_TLP_TXSMGRSREQ		0x03Cu /* Tx StateMachine and Gated Req */
+#define PCIE_TLP_ADRACKCNTARBLEN	0x040u /* Address Ack XferCnt and ARB Len */
+#define PCIE_TLP_DMACPLHDR0		0x044u /* DMA Completion Hdr 0 */
+#define PCIE_TLP_DMACPLHDR1		0x048u /* DMA Completion Hdr 1 */
+#define PCIE_TLP_DMACPLHDR2		0x04Cu /* DMA Completion Hdr 2 */
+#define PCIE_TLP_DMACPLMISC0		0x050u /* DMA Completion Misc0 */
+#define PCIE_TLP_DMACPLMISC1		0x054u /* DMA Completion Misc1 */
+#define PCIE_TLP_DMACPLMISC2		0x058u /* DMA Completion Misc2 */
+#define PCIE_TLP_SPTCTRLLEN		0x05Cu /* Split Controller Req len */
+#define PCIE_TLP_SPTCTRLMSIC0		0x060u /* Split Controller Misc 0 */
+#define PCIE_TLP_SPTCTRLMSIC1		0x064u /* Split Controller Misc 1 */
+#define PCIE_TLP_BUSDEVFUNC		0x068u /* Bus/Device/Func */
+#define PCIE_TLP_RESETCTR		0x06Cu /* Reset Counter */
+#define PCIE_TLP_RTRYBUF		0x070u /* Retry Buffer value */
+#define PCIE_TLP_TGTDEBUG1		0x074u /* Target Debug Reg1 */
+#define PCIE_TLP_TGTDEBUG2		0x078u /* Target Debug Reg2 */
+#define PCIE_TLP_TGTDEBUG3		0x07Cu /* Target Debug Reg3 */
+#define PCIE_TLP_TGTDEBUG4		0x080u /* Target Debug Reg4 */
+
+/* PCIE2 MDIO register offsets */
+#define PCIE2_MDIO_CONTROL    0x128
+#define PCIE2_MDIO_WR_DATA    0x12C
+#define PCIE2_MDIO_RD_DATA    0x130
+
+
+/* MDIO control */
+#define MDIOCTL_DIVISOR_MASK		0x7fu	/* clock to be used on MDIO */
+#define MDIOCTL_DIVISOR_VAL		0x2u
+#define MDIOCTL_PREAM_EN		0x80u	/* Enable preamble sequnce */
+#define MDIOCTL_ACCESS_DONE		0x100u   /* Tranaction complete */
+
+/* MDIO Data */
+#define MDIODATA_MASK			0x0000ffff	/* data 2 bytes */
+#define MDIODATA_TA			0x00020000	/* Turnaround */
+#define MDIODATA_REGADDR_SHF_OLD	18		/* Regaddr shift (rev < 10) */
+#define MDIODATA_REGADDR_MASK_OLD	0x003c0000	/* Regaddr Mask (rev < 10) */
+#define MDIODATA_DEVADDR_SHF_OLD	22		/* Physmedia devaddr shift (rev < 10) */
+#define MDIODATA_DEVADDR_MASK_OLD	0x0fc00000	/* Physmedia devaddr Mask (rev < 10) */
+#define MDIODATA_REGADDR_SHF		18		/* Regaddr shift */
+#define MDIODATA_REGADDR_MASK		0x007c0000	/* Regaddr Mask */
+#define MDIODATA_DEVADDR_SHF		23		/* Physmedia devaddr shift */
+#define MDIODATA_DEVADDR_MASK		0x0f800000	/* Physmedia devaddr Mask */
+#define MDIODATA_WRITE			0x10000000	/* write Transaction */
+#define MDIODATA_READ			0x20000000	/* Read Transaction */
+#define MDIODATA_START			0x40000000	/* start of Transaction */
+
+#define MDIODATA_DEV_ADDR		0x0		/* dev address for serdes */
+#define	MDIODATA_BLK_ADDR		0x1F		/* blk address for serdes */
+
+/* MDIO control/wrData/rdData register defines for PCIE Gen 2 */
+#define MDIOCTL2_DIVISOR_MASK		0x7f	/* clock to be used on MDIO */
+#define MDIOCTL2_DIVISOR_VAL		0x2
+#define MDIOCTL2_REGADDR_SHF		8		/* Regaddr shift */
+#define MDIOCTL2_REGADDR_MASK		0x00FFFF00	/* Regaddr Mask */
+#define MDIOCTL2_DEVADDR_SHF		24		/* Physmedia devaddr shift */
+#define MDIOCTL2_DEVADDR_MASK		0x0f000000	/* Physmedia devaddr Mask */
+#define MDIOCTL2_SLAVE_BYPASS		0x10000000	/* IP slave bypass */
+#define MDIOCTL2_READ			0x20000000	/* IP slave bypass */
+
+#define MDIODATA2_DONE			0x80000000u	/* rd/wr transaction done */
+#define MDIODATA2_MASK			0x7FFFFFFF	/* rd/wr transaction data */
+#define MDIODATA2_DEVADDR_SHF		4		/* Physmedia devaddr shift */
+
+
+/* MDIO devices (SERDES modules)
+ *  unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks.
+ *  two layers mapping (blockidx, register offset) is required
+ */
+#define MDIO_DEV_IEEE0		0x000
+#define MDIO_DEV_IEEE1		0x001
+#define MDIO_DEV_BLK0		0x800
+#define MDIO_DEV_BLK1		0x801
+#define MDIO_DEV_BLK2		0x802
+#define MDIO_DEV_BLK3		0x803
+#define MDIO_DEV_BLK4		0x804
+#define MDIO_DEV_TXPLL		0x808	/* TXPLL register block idx */
+#define MDIO_DEV_TXCTRL0	0x820
+#define MDIO_DEV_SERDESID	0x831
+#define MDIO_DEV_RXCTRL0	0x840
+
+
+/* XgxsBlk1_A Register Offsets */
+#define BLK1_PWR_MGMT0		0x16
+#define BLK1_PWR_MGMT1		0x17
+#define BLK1_PWR_MGMT2		0x18
+#define BLK1_PWR_MGMT3		0x19
+#define BLK1_PWR_MGMT4		0x1A
+
+/* serdes regs (rev < 10) */
+#define MDIODATA_DEV_PLL       		0x1d	/* SERDES PLL Dev */
+#define MDIODATA_DEV_TX        		0x1e	/* SERDES TX Dev */
+#define MDIODATA_DEV_RX        		0x1f	/* SERDES RX Dev */
+	/* SERDES RX registers */
+#define SERDES_RX_CTRL			1	/* Rx cntrl */
+#define SERDES_RX_TIMER1		2	/* Rx Timer1 */
+#define SERDES_RX_CDR			6	/* CDR */
+#define SERDES_RX_CDRBW			7	/* CDR BW */
+
+	/* SERDES RX control register */
+#define SERDES_RX_CTRL_FORCE		0x80	/* rxpolarity_force */
+#define SERDES_RX_CTRL_POLARITY		0x40	/* rxpolarity_value */
+
+	/* SERDES PLL registers */
+#define SERDES_PLL_CTRL                 1       /* PLL control reg */
+#define PLL_CTRL_FREQDET_EN             0x4000  /* bit 14 is FREQDET on */
+
+/* Power management threshold */
+#define PCIE_L0THRESHOLDTIME_MASK       0xFF00u	/* bits 0 - 7 */
+#define PCIE_L1THRESHOLDTIME_MASK       0xFF00u	/* bits 8 - 15 */
+#define PCIE_L1THRESHOLDTIME_SHIFT      8	/* PCIE_L1THRESHOLDTIME_SHIFT */
+#define PCIE_L1THRESHOLD_WARVAL         0x72	/* WAR value */
+#define PCIE_ASPMTIMER_EXTEND		0x01000000	/* > rev7: enable extend ASPM timer */
+
+/* SPROM offsets */
+#define SRSH_ASPM_OFFSET		4	/* word 4 */
+#define SRSH_ASPM_ENB			0x18	/* bit 3, 4 */
+#define SRSH_ASPM_L1_ENB		0x10	/* bit 4 */
+#define SRSH_ASPM_L0s_ENB		0x8	/* bit 3 */
+#define SRSH_PCIE_MISC_CONFIG		5	/* word 5 */
+#define SRSH_L23READY_EXIT_NOPERST	0x8000u	/* bit 15 */
+#define SRSH_CLKREQ_OFFSET_REV5		20	/* word 20 for srom rev <= 5 */
+#define SRSH_CLKREQ_OFFSET_REV8		52	/* word 52 for srom rev 8 */
+#define SRSH_CLKREQ_ENB			0x0800	/* bit 11 */
+#define SRSH_BD_OFFSET                  6       /* word 6 */
+#define SRSH_AUTOINIT_OFFSET            18      /* auto initialization enable */
+
+/* PCI Capability ID's
+ * Reference include/linux/pci_regs.h
+ * #define  PCI_CAP_LIST_ID	0       // Capability ID
+ * #define  PCI_CAP_ID_PM		0x01    // Power Management
+ * #define  PCI_CAP_ID_AGP		0x02    // Accelerated Graphics Port
+ * #define  PCI_CAP_ID_VPD		0x03    // Vital Product Data
+ * #define  PCI_CAP_ID_SLOTID	0x04    // Slot Identification
+ * #define  PCI_CAP_ID_MSI		0x05    // Message Signalled Interrupts
+ * #define  PCI_CAP_ID_CHSWP       0x06    // CompactPCI HotSwap
+ * #define  PCI_CAP_ID_PCIX        0x07    // PCI-X
+ * #define  PCI_CAP_ID_HT          0x08    // HyperTransport
+ * #define  PCI_CAP_ID_VNDR        0x09    // Vendor-Specific
+ * #define  PCI_CAP_ID_DBG         0x0A    // Debug port
+ * #define  PCI_CAP_ID_CCRC        0x0B    // CompactPCI Central Resource Control
+ * #define  PCI_CAP_ID_SHPC        0x0C    // PCI Standard Hot-Plug Controller
+ * #define  PCI_CAP_ID_SSVID       0x0D    // Bridge subsystem vendor/device ID
+ * #define  PCI_CAP_ID_AGP3        0x0E    // AGP Target PCI-PCI bridge
+ * #define  PCI_CAP_ID_SECDEV      0x0F    // Secure Device
+ * #define  PCI_CAP_ID_MSIX        0x11    // MSI-X
+ * #define  PCI_CAP_ID_SATA        0x12    // SATA Data/Index Conf.
+ * #define  PCI_CAP_ID_AF          0x13    // PCI Advanced Features
+ * #define  PCI_CAP_ID_EA          0x14    // PCI Enhanced Allocation
+ * #define  PCI_CAP_ID_MAX         PCI_CAP_ID_EA
+ */
+
+#define  PCIE_CAP_ID_EXP         0x10    // PCI Express
+
+/* PCIe Capabilities Offsets
+ * Reference include/linux/pci_regs.h
+ * #define PCIE_CAP_FLAGS           2       // Capabilities register
+ * #define PCIE_CAP_DEVCAP          4       // Device capabilities
+ * #define PCIE_CAP_DEVCTL          8       // Device Control
+ * #define PCIE_CAP_DEVSTA          10      // Device Status
+ * #define PCIE_CAP_LNKCAP          12      // Link Capabilities
+ * #define PCIE_CAP_LNKCTL          16      // Link Control
+ * #define PCIE_CAP_LNKSTA          18      // Link Status
+ * #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1  20      // v1 endpoints end here
+ * #define PCIE_CAP_SLTCAP          20      // Slot Capabilities
+ * #define PCIE_CAP_SLTCTL          24      // Slot Control
+ * #define PCIE_CAP_SLTSTA          26      // Slot Status
+ * #define PCIE_CAP_RTCTL           28      // Root Control
+ * #define PCIE_CAP_RTCAP           30      // Root Capabilities
+ * #define PCIE_CAP_RTSTA           32      // Root Status
+ */
+
+
+/* Linkcapability reg offset in PCIE Cap */
+#define PCIE_CAP_LINKCAP_OFFSET         12      /* linkcap offset in pcie cap */
+#define PCIE_CAP_LINKCAP_LNKSPEED_MASK	0xf     /* Supported Link Speeds */
+#define PCIE_CAP_LINKCAP_GEN2           0x2     /* Value for GEN2 */
+
+/* Uc_Err reg offset in AER Cap */
+#define PCIE_EXTCAP_ID_ERR		0x01	/* Advanced Error Reporting */
+#define PCIE_EXTCAP_AER_UCERR_OFFSET	4	/* Uc_Err reg offset in AER Cap */
+
+/* Linkcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_LINKCTRL_OFFSET	16	/* linkctrl offset in pcie cap */
+#define PCIE_CAP_LCREG_ASPML0s		0x01	/* ASPM L0s in linkctrl */
+#define PCIE_CAP_LCREG_ASPML1		0x02	/* ASPM L1 in linkctrl */
+#define PCIE_CLKREQ_ENAB		0x100	/* CLKREQ Enab in linkctrl */
+#define PCIE_LINKSPEED_MASK		0xF0000u	/* bits 0 - 3 of high word */
+#define PCIE_LINKSPEED_SHIFT		16	/* PCIE_LINKSPEED_SHIFT */
+
+/* Devcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL_OFFSET		8	/* devctrl offset in pcie cap */
+#define PCIE_CAP_DEVCTRL_MRRS_MASK	0x7000	/* Max read request size mask */
+#define PCIE_CAP_DEVCTRL_MRRS_SHIFT	12	/* Max read request size shift */
+#define PCIE_CAP_DEVCTRL_MRRS_128B	0	/* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_256B	1	/* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_512B	2	/* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_1024B	3	/* 1024 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_MASK	0x00e0	/* Max payload size mask */
+#define PCIE_CAP_DEVCTRL_MPS_SHIFT	5	/* Max payload size shift */
+#define PCIE_CAP_DEVCTRL_MPS_128B	0	/* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_256B	1	/* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_512B	2	/* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_1024B	3	/* 1024 Byte */
+
+#define PCIE_ASPM_ENAB			3	/* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L1_ENAB		2	/* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L0s_ENAB		1	/* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_DISAB			0	/* ASPM L0s & L1 in linkctrl */
+
+#define PCIE_ASPM_L11_ENAB		8	/* ASPM L1.1 in PML1_sub_control2 */
+#define PCIE_ASPM_L12_ENAB		4	/* ASPM L1.2 in PML1_sub_control2 */
+
+/* NumMsg and NumMsgEn in PCIE MSI Cap */
+#define MSICAP_NUM_MSG_SHF		17
+#define MSICAP_NUM_MSG_MASK		(0x7 << MSICAP_NUM_MSG_SHF)
+#define MSICAP_NUM_MSG_EN_SHF	20
+#define MSICAP_NUM_MSG_EN_MASK	(0x7 << MSICAP_NUM_MSG_EN_SHF)
+
+/* Devcontrol2 reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL2_OFFSET	0x28	/* devctrl2 offset in pcie cap */
+#define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK	0x400	/* Latency Tolerance Reporting Enable */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT 13	/* Enable OBFF mechanism, select signaling method */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000	/* Enable OBFF mechanism, select signaling method */
+
+/* LTR registers in PCIE Cap */
+#define PCIE_LTR0_REG_OFFSET	0x844u	/* ltr0_reg offset in pcie cap */
+#define PCIE_LTR1_REG_OFFSET	0x848u	/* ltr1_reg offset in pcie cap */
+#define PCIE_LTR2_REG_OFFSET	0x84cu	/* ltr2_reg offset in pcie cap */
+#define PCIE_LTR0_REG_DEFAULT_60	0x883c883cu	/* active latency default to 60usec */
+#define PCIE_LTR0_REG_DEFAULT_150	0x88968896u	/* active latency default to 150usec */
+#define PCIE_LTR1_REG_DEFAULT		0x88648864u	/* idle latency default to 100usec */
+#define PCIE_LTR2_REG_DEFAULT		0x90039003u	/* sleep latency default to 3msec */
+
+/* Status reg PCIE_PLP_STATUSREG */
+#define PCIE_PLP_POLARITYINV_STAT	0x10
+
+
+/* PCIE BRCM Vendor CAP REVID reg  bits */
+#define BRCMCAP_PCIEREV_CT_MASK			0xF00u
+#define BRCMCAP_PCIEREV_CT_SHIFT		8u
+#define BRCMCAP_PCIEREV_REVID_MASK		0xFFu
+#define BRCMCAP_PCIEREV_REVID_SHIFT		0
+
+#define PCIE_REVREG_CT_PCIE1		0
+#define PCIE_REVREG_CT_PCIE2		1
+
+/* PCIE GEN2 specific defines */
+/* PCIE BRCM Vendor Cap offsets w.r.t to vendor cap ptr */
+#define PCIE2R0_BRCMCAP_REVID_OFFSET		4
+#define PCIE2R0_BRCMCAP_BAR0_WIN0_WRAP_OFFSET	8
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_OFFSET	12
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_WRAP_OFFSET	16
+#define PCIE2R0_BRCMCAP_BAR0_WIN_OFFSET		20
+#define PCIE2R0_BRCMCAP_BAR1_WIN_OFFSET		24
+#define PCIE2R0_BRCMCAP_SPROM_CTRL_OFFSET	28
+#define PCIE2R0_BRCMCAP_BAR2_WIN_OFFSET		32
+#define PCIE2R0_BRCMCAP_INTSTATUS_OFFSET	36
+#define PCIE2R0_BRCMCAP_INTMASK_OFFSET		40
+#define PCIE2R0_BRCMCAP_PCIE2SB_MB_OFFSET	44
+#define PCIE2R0_BRCMCAP_BPADDR_OFFSET		48
+#define PCIE2R0_BRCMCAP_BPDATA_OFFSET		52
+#define PCIE2R0_BRCMCAP_CLKCTLSTS_OFFSET	56
+
+/* definition of configuration space registers of PCIe gen2
+ * http://hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/CurrentPcieGen2ProgramGuide/pcie_ep.htm
+ */
+#define PCIECFGREG_STATUS_CMD		0x4
+#define PCIECFGREG_PM_CSR		0x4C
+#define PCIECFGREG_MSI_CAP		0x58
+#define PCIECFGREG_MSI_ADDR_L		0x5C
+#define PCIECFGREG_MSI_ADDR_H		0x60
+#define PCIECFGREG_MSI_DATA		0x64
+#define PCIECFGREG_LINK_STATUS_CTRL	0xBCu
+#define PCIECFGREG_DEV_STATUS_CTRL 0xB4u
+#define PCIECFGGEN_DEV_STATUS_CTRL2	0xD4
+#define PCIECFGREG_LINK_STATUS_CTRL2	0xDCu
+#define PCIECFGREG_RBAR_CTRL		0x228
+#define PCIECFGREG_PML1_SUB_CTRL1	0x248
+#define PCIECFGREG_PML1_SUB_CTRL2	0x24C
+#define PCIECFGREG_REG_BAR2_CONFIG	0x4E0
+#define PCIECFGREG_REG_BAR3_CONFIG	0x4F4
+#define PCIECFGREG_PDL_CTRL1		0x1004
+#define PCIECFGREG_PDL_IDDQ		0x1814
+#define PCIECFGREG_REG_PHY_CTL7		0x181c
+#define PCIECFGREG_PHY_DBG_CLKREQ0		0x1E10
+#define PCIECFGREG_PHY_DBG_CLKREQ1		0x1E14
+#define PCIECFGREG_PHY_DBG_CLKREQ2		0x1E18
+#define PCIECFGREG_PHY_DBG_CLKREQ3		0x1E1C
+
+/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */
+#define PCI_PM_L1_2_ENA_MASK		0x00000001	/* PCI-PM L1.2 Enabled */
+#define PCI_PM_L1_1_ENA_MASK		0x00000002	/* PCI-PM L1.1 Enabled */
+#define ASPM_L1_2_ENA_MASK		0x00000004	/* ASPM L1.2 Enabled */
+#define ASPM_L1_1_ENA_MASK		0x00000008	/* ASPM L1.1 Enabled */
+
+/* PCIe gen2 mailbox interrupt masks */
+#define I_MB    0x3
+#define I_BIT0  0x1
+#define I_BIT1  0x2
+
+/* PCIE gen2 config regs */
+#define PCIIntstatus	0x090
+#define PCIIntmask	0x094
+#define PCISBMbx	0x98
+
+/* enumeration Core regs */
+#define PCIH2D_MailBox  0x140
+#define PCIH2D_DB1		0x144
+#define PCID2H_MailBox  0x148
+#define PCIH2D_MailBox_1	0x150  /* for dma channel1 */
+#define PCIH2D_DB1_1		0x154
+#define PCID2H_MailBox_1	0x158
+#define PCIH2D_MailBox_2	0x160  /* for dma channel2 which will be used for Implicit DMA */
+#define PCIH2D_DB1_2		0x164
+#define PCID2H_MailBox_2	0x168
+
+#define PCIMailBoxInt	0x48
+#define PCIMailBoxMask	0x4C
+#define PCIMSIVecAssign	0x58
+
+#define I_F0_B0         (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */
+#define I_F0_B1         (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */
+
+#define PCIECFGREG_DEVCONTROL	0xB4
+#define PCIECFGREG_BASEADDR0	0x10
+#define PCIECFGREG_DEVCONTROL_MRRS_SHFT	12
+#define PCIECFGREG_DEVCONTROL_MRRS_MASK	(0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT)
+#define PCIECFGREG_DEVCTRL_MPS_SHFT	5
+#define PCIECFGREG_DEVCTRL_MPS_MASK (0x7 << PCIECFGREG_DEVCTRL_MPS_SHFT)
+#define PCIECFGREG_PM_CSR_STATE_MASK 0x00000003
+#define PCIECFGREG_PM_CSR_STATE_D0 0
+#define PCIECFGREG_PM_CSR_STATE_D1 1
+#define PCIECFGREG_PM_CSR_STATE_D2 2
+#define PCIECFGREG_PM_CSR_STATE_D3_HOT 3
+#define PCIECFGREG_PM_CSR_STATE_D3_COLD 4
+
+
+/* SROM hardware region */
+#define SROM_OFFSET_BAR1_CTRL  52
+
+#define BAR1_ENC_SIZE_MASK	0x000e
+#define BAR1_ENC_SIZE_SHIFT	1
+
+#define BAR1_ENC_SIZE_1M	0
+#define BAR1_ENC_SIZE_2M	1
+#define BAR1_ENC_SIZE_4M	2
+
+#define PCIEGEN2_CAP_DEVSTSCTRL2_OFFSET		0xD4
+#define PCIEGEN2_CAP_DEVSTSCTRL2_LTRENAB	0x400
+
+/*
+ * Latency Tolerance Reporting (LTR) states
+ * Active has the least tolerant latency requirement
+ * Sleep is most tolerant
+ */
+#define LTR_ACTIVE				2
+#define LTR_ACTIVE_IDLE				1
+#define LTR_SLEEP				0
+#define LTR_FINAL_MASK				0x300
+#define LTR_FINAL_SHIFT				8
+
+/* pwrinstatus, pwrintmask regs */
+#define PCIEGEN2_PWRINT_D0_STATE_SHIFT		0
+#define PCIEGEN2_PWRINT_D1_STATE_SHIFT		1
+#define PCIEGEN2_PWRINT_D2_STATE_SHIFT		2
+#define PCIEGEN2_PWRINT_D3_STATE_SHIFT		3
+#define PCIEGEN2_PWRINT_L0_LINK_SHIFT		4
+#define PCIEGEN2_PWRINT_L0s_LINK_SHIFT		5
+#define PCIEGEN2_PWRINT_L1_LINK_SHIFT		6
+#define PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT	7
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT	8
+
+#define PCIEGEN2_PWRINT_D0_STATE_MASK		(1 << PCIEGEN2_PWRINT_D0_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D1_STATE_MASK		(1 << PCIEGEN2_PWRINT_D1_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D2_STATE_MASK		(1 << PCIEGEN2_PWRINT_D2_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D3_STATE_MASK		(1 << PCIEGEN2_PWRINT_D3_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_L0_LINK_MASK		(1 << PCIEGEN2_PWRINT_L0_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L0s_LINK_MASK		(1 << PCIEGEN2_PWRINT_L0s_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L1_LINK_MASK		(1 << PCIEGEN2_PWRINT_L1_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L2_L3_LINK_MASK		(1 << PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_MASK	(1 << PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT)
+
+/* sbtopcie mail box */
+#define SBTOPCIE_MB_FUNC0_SHIFT 8
+#define SBTOPCIE_MB_FUNC1_SHIFT 10
+#define SBTOPCIE_MB_FUNC2_SHIFT 12
+#define SBTOPCIE_MB_FUNC3_SHIFT 14
+
+#define SBTOPCIE_MB1_FUNC0_SHIFT 9
+#define SBTOPCIE_MB1_FUNC1_SHIFT 11
+#define SBTOPCIE_MB1_FUNC2_SHIFT 13
+#define SBTOPCIE_MB1_FUNC3_SHIFT 15
+
+/* pcieiocstatus */
+#define PCIEGEN2_IOC_D0_STATE_SHIFT		8
+#define PCIEGEN2_IOC_D1_STATE_SHIFT		9
+#define PCIEGEN2_IOC_D2_STATE_SHIFT		10
+#define PCIEGEN2_IOC_D3_STATE_SHIFT		11
+#define PCIEGEN2_IOC_L0_LINK_SHIFT		12
+#define PCIEGEN2_IOC_L1_LINK_SHIFT		13
+#define PCIEGEN2_IOC_L1L2_LINK_SHIFT		14
+#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT		15
+
+#define PCIEGEN2_IOC_D0_STATE_MASK		(1 << PCIEGEN2_IOC_D0_STATE_SHIFT)
+#define PCIEGEN2_IOC_D1_STATE_MASK		(1 << PCIEGEN2_IOC_D1_STATE_SHIFT)
+#define PCIEGEN2_IOC_D2_STATE_MASK		(1 << PCIEGEN2_IOC_D2_STATE_SHIFT)
+#define PCIEGEN2_IOC_D3_STATE_MASK		(1 << PCIEGEN2_IOC_D3_STATE_SHIFT)
+#define PCIEGEN2_IOC_L0_LINK_MASK		(1 << PCIEGEN2_IOC_L0_LINK_SHIFT)
+#define PCIEGEN2_IOC_L1_LINK_MASK		(1 << PCIEGEN2_IOC_L1_LINK_SHIFT)
+#define PCIEGEN2_IOC_L1L2_LINK_MASK		(1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT)
+#define PCIEGEN2_IOC_L2_L3_LINK_MASK		(1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT)
+
+/* stat_ctrl */
+#define PCIE_STAT_CTRL_RESET		0x1
+#define PCIE_STAT_CTRL_ENABLE		0x2
+#define PCIE_STAT_CTRL_INTENABLE	0x4
+#define PCIE_STAT_CTRL_INTSTATUS	0x8
+
+/* cpl_timeout_ctrl_reg */
+#define PCIE_CTO_TO_THRESHOLD_SHIFT	0
+#define PCIE_CTO_TO_THRESHHOLD_MASK	(0xfffff << PCIE_CTO_TO_THRESHOLD_SHIFT)
+
+#define PCIE_CTO_CLKCHKCNT_SHIFT		24
+#define PCIE_CTO_CLKCHKCNT_MASK		(0xf << PCIE_CTO_CLKCHKCNT_SHIFT)
+
+#define PCIE_CTO_ENAB_SHIFT			31
+#define PCIE_CTO_ENAB_MASK			(0x1 << PCIE_CTO_ENAB_SHIFT)
+
+#define PCIE_CTO_TO_THRESH_DEFAULT	0x58000
+#define PCIE_CTO_CLKCHKCNT_VAL		0xA
+
+/* ErrLog */
+#define PCIE_SROMRD_ERR_SHIFT			5
+#define PCIE_SROMRD_ERR_MASK			(0x1 << PCIE_SROMRD_ERR_SHIFT)
+
+#define PCIE_CTO_ERR_SHIFT			8
+#define PCIE_CTO_ERR_MASK				(0x1 << PCIE_CTO_ERR_SHIFT)
+
+#define PCIE_CTO_ERR_CODE_SHIFT		9
+#define PCIE_CTO_ERR_CODE_MASK		(0x3 << PCIE_CTO_ERR_CODE_SHIFT)
+
+#define PCIE_BP_CLK_OFF_ERR_SHIFT		12
+#define PCIE_BP_CLK_OFF_ERR_MASK		(0x1 << PCIE_BP_CLK_OFF_ERR_SHIFT)
+
+#define PCIE_BP_IN_RESET_ERR_SHIFT	13
+#define PCIE_BP_IN_RESET_ERR_MASK		(0x1 << PCIE_BP_IN_RESET_ERR_SHIFT)
+
+#ifdef BCMDRIVER
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
+void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
+void pcie_set_trefup_time_100us(si_t *sih);
+#endif /* BCMDRIVER */
+
+/* DMA intstatus and intmask */
+#define	I_PC		(1 << 10)	/* pci descriptor error */
+#define	I_PD		(1 << 11)	/* pci data error */
+#define	I_DE		(1 << 12)	/* descriptor protocol error */
+#define	I_RU		(1 << 13)	/* receive descriptor underflow */
+#define	I_RO		(1 << 14)	/* receive fifo overflow */
+#define	I_XU		(1 << 15)	/* transmit fifo underflow */
+#define	I_RI		(1 << 16)	/* receive interrupt */
+#define	I_XI		(1 << 24)	/* transmit interrupt */
+
+#endif	/* _PCIE_CORE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/rte_ioctl.h b/drivers/net/wireless/bcmdhd/include/rte_ioctl.h
new file mode 100644
index 0000000..f4c8c80
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/rte_ioctl.h
@@ -0,0 +1,87 @@
+/*
+ * HND Run Time Environment ioctl.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: rte_ioctl.h 615249 2016-01-27 02:04:07Z $
+ */
+
+#ifndef _rte_ioctl_h_
+#define _rte_ioctl_h_
+
+/* RTE IOCTL definitions for generic ether devices */
+#define RTEGHWADDR		0x8901
+#define RTESHWADDR		0x8902
+#define RTEGMTU			0x8903
+#define RTEGSTATS		0x8904
+#define RTEGALLMULTI		0x8905
+#define RTESALLMULTI		0x8906
+#define RTEGPROMISC		0x8907
+#define RTESPROMISC		0x8908
+#define RTESMULTILIST	0x8909
+#define RTEGUP			0x890A
+#define RTEGPERMADDR		0x890B
+#define RTEDEVPWRSTCHG		0x890C	/* Device pwr state change for PCIedev */
+#define RTEDEVPMETOGGLE		0x890D	/* Toggle PME# to wake up the host */
+
+#define RTE_IOCTL_QUERY			0x00
+#define RTE_IOCTL_SET			0x01
+#define RTE_IOCTL_OVL_IDX_MASK	0x1e
+#define RTE_IOCTL_OVL_RSV		0x20
+#define RTE_IOCTL_OVL			0x40
+#define RTE_IOCTL_OVL_IDX_SHIFT	1
+
+enum hnd_ioctl_cmd {
+	HND_RTE_DNGL_IS_SS = 1, /* true if device connected at super speed */
+
+	/* PCIEDEV specific wl <--> bus ioctls */
+	BUS_GET_VAR = 2,
+	BUS_SET_VAR = 3,
+	BUS_FLUSH_RXREORDER_Q = 4,
+	BUS_SET_LTR_STATE = 5,
+	BUS_FLUSH_CHAINED_PKTS = 6,
+	BUS_SET_COPY_COUNT = 7,
+	BUS_UPDATE_FLOW_PKTS_MAX = 8,
+	BUS_UPDATE_EXTRA_TXLFRAGS = 9
+};
+
+#define SDPCMDEV_SET_MAXTXPKTGLOM	1
+
+typedef struct memuse_info {
+	uint16 ver;			/* version of this struct */
+	uint16 len;			/* length in bytes of this structure */
+	uint32 tot;			/* Total memory */
+	uint32 text_len;	/* Size of Text segment memory */
+	uint32 data_len;	/* Size of Data segment memory */
+	uint32 bss_len;		/* Size of BSS segment memory */
+
+	uint32 arena_size;	/* Total Heap size */
+	uint32 arena_free;	/* Heap memory available or free */
+	uint32 inuse_size;	/* Heap memory currently in use */
+	uint32 inuse_hwm;	/* High watermark of memory - reclaimed memory */
+	uint32 inuse_overhead;	/* tally of allocated mem_t blocks */
+	uint32 inuse_total;	/* Heap in-use + Heap overhead memory  */
+} memuse_info_t;
+
+#endif /* _rte_ioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h
new file mode 100644
index 0000000..cbc75b2f5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h
@@ -0,0 +1,4417 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
+ * GPIO interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h 657872 2016-09-02 22:17:34Z $
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ */
+
+#ifndef	_SBCHIPC_H
+#define	_SBCHIPC_H
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+/**
+ * In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the
+ * 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to
+ * [pmu|gci|sreng] register interface is deprecated and removed. These register blocks would instead
+ * be assigned their respective chipc-specific address space and connected to the Always On
+ * Backplane via the APB interface.
+ */
+typedef volatile struct {
+	uint32  PAD[384];
+	uint32	pmucontrol;		/* 0x600 */
+	uint32	pmucapabilities; /* 0x604 */
+	uint32	pmustatus;	/* 0x608 */
+	uint32	res_state;	/* 0x60C */
+	uint32	res_pending;	/* 0x610 */
+	uint32	pmutimer;	/* 0x614 */
+	uint32	min_res_mask;	/* 0x618 */
+	uint32	max_res_mask;	/* 0x61C */
+	uint32	res_table_sel;	/* 0x620 */
+	uint32	res_dep_mask;
+	uint32	res_updn_timer;
+	uint32	res_timer;
+	uint32	clkstretch;
+	uint32	pmuwatchdog;
+	uint32	gpiosel;		/* 0x638, rev >= 1 */
+	uint32	gpioenable;		/* 0x63c, rev >= 1 */
+	uint32	res_req_timer_sel;	/* 0x640 */
+	uint32	res_req_timer;		/* 0x644 */
+	uint32	res_req_mask;		/* 0x648 */
+	uint32	core_cap_ext;		/* 0x64C */
+	uint32	chipcontrol_addr;	/* 0x650 */
+	uint32	chipcontrol_data;	/* 0x654 */
+	uint32	regcontrol_addr;
+	uint32	regcontrol_data;
+	uint32	pllcontrol_addr;
+	uint32	pllcontrol_data;
+	uint32	pmustrapopt;		/* 0x668, corerev >= 28 */
+	uint32	pmu_xtalfreq;		/* 0x66C, pmurev >= 10 */
+	uint32  retention_ctl;		/* 0x670 */
+	uint32	ILPPeriod;		/* 0x674 */
+	uint32  PAD[2];
+	uint32  retention_grpidx;	/* 0x680 */
+	uint32  retention_grpctl;	/* 0x684 */
+	uint32  mac_res_req_timer;	/* 0x688 */
+	uint32  mac_res_req_mask;	/* 0x68c */
+	uint32  PAD[18];
+	uint32	pmucontrol_ext;		/* 0x6d8 */
+	uint32	slowclkperiod;		/* 0x6dc */
+	uint32	PAD[8];
+	uint32	pmuintmask0;		/* 0x700 */
+	uint32	pmuintmask1;		/* 0x704 */
+	uint32  PAD[14];
+	uint32  pmuintstatus;		/* 0x740 */
+	uint32	extwakeupstatus;	/* 0x744 */
+	uint32  watchdog_res_mask;	/* 0x748 */
+	uint32	PAD[1];		/* 0x74C */
+	uint32	swscratch;		/* 0x750 */
+	uint32	PAD[3];		/* 0x754-0x75C */
+	uint32	extwakemask[2]; /* 0x760-0x764 */
+	uint32	PAD[2];		/* 0x768-0x76C */
+	uint32	extwakereqmask[2]; /* 0x770-0x774 */
+	uint32	PAD[2];		/* 0x778-0x77C */
+	uint32	pmuintctrl0;	/* 0x780 */
+	uint32	pmuintctrl1;	/* 0x784 */
+	uint32	PAD[2];
+	uint32	extwakectrl[2] ;   /* 0x790 */
+} pmuregs_t;
+
+typedef struct eci_prerev35 {
+	uint32	eci_output;
+	uint32	eci_control;
+	uint32	eci_inputlo;
+	uint32	eci_inputmi;
+	uint32	eci_inputhi;
+	uint32	eci_inputintpolaritylo;
+	uint32	eci_inputintpolaritymi;
+	uint32	eci_inputintpolarityhi;
+	uint32	eci_intmasklo;
+	uint32	eci_intmaskmi;
+	uint32	eci_intmaskhi;
+	uint32	eci_eventlo;
+	uint32	eci_eventmi;
+	uint32	eci_eventhi;
+	uint32	eci_eventmasklo;
+	uint32	eci_eventmaskmi;
+	uint32	eci_eventmaskhi;
+	uint32	PAD[3];
+} eci_prerev35_t;
+
+typedef struct eci_rev35 {
+	uint32	eci_outputlo;
+	uint32	eci_outputhi;
+	uint32	eci_controllo;
+	uint32	eci_controlhi;
+	uint32	eci_inputlo;
+	uint32	eci_inputhi;
+	uint32	eci_inputintpolaritylo;
+	uint32	eci_inputintpolarityhi;
+	uint32	eci_intmasklo;
+	uint32	eci_intmaskhi;
+	uint32	eci_eventlo;
+	uint32	eci_eventhi;
+	uint32	eci_eventmasklo;
+	uint32	eci_eventmaskhi;
+	uint32	eci_auxtx;
+	uint32	eci_auxrx;
+	uint32	eci_datatag;
+	uint32	eci_uartescvalue;
+	uint32	eci_autobaudctr;
+	uint32	eci_uartfifolevel;
+} eci_rev35_t;
+
+typedef struct flash_config {
+	uint32	PAD[19];
+	/* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */
+	uint32 flashstrconfig;
+} flash_config_t;
+
+typedef volatile struct {
+	uint32	chipid;			/* 0x0 */
+	uint32	capabilities;
+	uint32	corecontrol;		/* corerev >= 1 */
+	uint32	bist;
+
+	/* OTP */
+	uint32	otpstatus;		/* 0x10, corerev >= 10 */
+	uint32	otpcontrol;
+	uint32	otpprog;
+	uint32	otplayout;		/* corerev >= 23 */
+
+	/* Interrupt control */
+	uint32	intstatus;		/* 0x20 */
+	uint32	intmask;
+
+	/* Chip specific regs */
+	uint32	chipcontrol;		/* 0x28, rev >= 11 */
+	uint32	chipstatus;		/* 0x2c, rev >= 11 */
+
+	/* Jtag Master */
+	uint32	jtagcmd;		/* 0x30, rev >= 10 */
+	uint32	jtagir;
+	uint32	jtagdr;
+	uint32	jtagctrl;
+
+	/* serial flash interface registers */
+	uint32	flashcontrol;		/* 0x40 */
+	uint32	flashaddress;
+	uint32	flashdata;
+	uint32	otplayoutextension;	/* rev >= 35 */
+
+	/* Silicon backplane configuration broadcast control */
+	uint32	broadcastaddress;	/* 0x50 */
+	uint32	broadcastdata;
+
+	/* gpio - cleared only by power-on-reset */
+	uint32	gpiopullup;		/* 0x58, corerev >= 20 */
+	uint32	gpiopulldown;		/* 0x5c, corerev >= 20 */
+	uint32	gpioin;			/* 0x60 */
+	uint32	gpioout;		/* 0x64 */
+	uint32	gpioouten;		/* 0x68 */
+	uint32	gpiocontrol;		/* 0x6C */
+	uint32	gpiointpolarity;	/* 0x70 */
+	uint32	gpiointmask;		/* 0x74 */
+
+	/* GPIO events corerev >= 11 */
+	uint32	gpioevent;
+	uint32	gpioeventintmask;
+
+	/* Watchdog timer */
+	uint32	watchdog;		/* 0x80 */
+
+	/* GPIO events corerev >= 11 */
+	uint32	gpioeventintpolarity;
+
+	/* GPIO based LED powersave registers corerev >= 16 */
+	uint32  gpiotimerval;		/* 0x88 */
+	uint32  gpiotimeroutmask;
+
+	/* clock control */
+	uint32	clockcontrol_n;		/* 0x90 */
+	uint32	clockcontrol_sb;	/* aka m0 */
+	uint32	clockcontrol_pci;	/* aka m1 */
+	uint32	clockcontrol_m2;	/* mii/uart/mipsref */
+	uint32	clockcontrol_m3;	/* cpu */
+	uint32	clkdiv;			/* corerev >= 3 */
+	uint32	gpiodebugsel;		/* corerev >= 28 */
+	uint32	capabilities_ext;               	/* 0xac  */
+
+	/* pll delay registers (corerev >= 4) */
+	uint32	pll_on_delay;		/* 0xb0 */
+	uint32	fref_sel_delay;
+	uint32	slow_clk_ctl;		/* 5 < corerev < 10 */
+	uint32	PAD;
+
+	/* Instaclock registers (corerev >= 10) */
+	uint32	system_clk_ctl;		/* 0xc0 */
+	uint32	clkstatestretch;
+	uint32	PAD[2];
+
+	/* Indirect backplane access (corerev >= 22) */
+	uint32	bp_addrlow;		/* 0xd0 */
+	uint32	bp_addrhigh;
+	uint32	bp_data;
+	uint32	PAD;
+	uint32	bp_indaccess;
+	/* SPI registers, corerev >= 37 */
+	uint32	gsioctrl;
+	uint32	gsioaddress;
+	uint32	gsiodata;
+
+	/* More clock dividers (corerev >= 32) */
+	uint32	clkdiv2;
+	/* FAB ID (corerev >= 40) */
+	uint32	otpcontrol1;
+	uint32	fabid;			/* 0xf8 */
+
+	/* In AI chips, pointer to erom */
+	uint32	eromptr;		/* 0xfc */
+
+	/* ExtBus control registers (corerev >= 3) */
+	uint32	pcmcia_config;		/* 0x100 */
+	uint32	pcmcia_memwait;
+	uint32	pcmcia_attrwait;
+	uint32	pcmcia_iowait;
+	uint32	ide_config;
+	uint32	ide_memwait;
+	uint32	ide_attrwait;
+	uint32	ide_iowait;
+	uint32	prog_config;
+	uint32	prog_waitcount;
+	uint32	flash_config;
+	uint32	flash_waitcount;
+	uint32  SECI_config;		/* 0x130 SECI configuration */
+	uint32	SECI_status;
+	uint32	SECI_statusmask;
+	uint32	SECI_rxnibchanged;
+
+	uint32	PAD[20];
+
+	/* SROM interface (corerev >= 32) */
+	uint32	sromcontrol;		/* 0x190 */
+	uint32	sromaddress;
+	uint32	sromdata;
+	uint32	PAD[1];				/* 0x19C */
+	/* NAND flash registers for BCM4706 (corerev = 31) */
+	uint32  nflashctrl;         /* 0x1a0 */
+	uint32  nflashconf;
+	uint32  nflashcoladdr;
+	uint32  nflashrowaddr;
+	uint32  nflashdata;
+	uint32  nflashwaitcnt0;		/* 0x1b4 */
+	uint32  PAD[2];
+
+	uint32  seci_uart_data;		/* 0x1C0 */
+	uint32  seci_uart_bauddiv;
+	uint32  seci_uart_fcr;
+	uint32  seci_uart_lcr;
+	uint32  seci_uart_mcr;
+	uint32  seci_uart_lsr;
+	uint32  seci_uart_msr;
+	uint32  seci_uart_baudadj;
+	/* Clock control and hardware workarounds (corerev >= 20) */
+	uint32	clk_ctl_st;		/* 0x1e0 */
+	uint32	hw_war;
+	uint32  powerctl;		/* 0x1e8 */
+	uint32  PAD[69];
+
+	/* UARTs */
+	uint8	uart0data;		/* 0x300 */
+	uint8	uart0imr;
+	uint8	uart0fcr;
+	uint8	uart0lcr;
+	uint8	uart0mcr;
+	uint8	uart0lsr;
+	uint8	uart0msr;
+	uint8	uart0scratch;
+	uint8	PAD[248];		/* corerev >= 1 */
+
+	uint8	uart1data;		/* 0x400 */
+	uint8	uart1imr;
+	uint8	uart1fcr;
+	uint8	uart1lcr;
+	uint8	uart1mcr;
+	uint8	uart1lsr;
+	uint8	uart1msr;
+	uint8	uart1scratch;		/* 0x407 */
+	uint32	PAD[62];
+
+	/* save/restore, corerev >= 48 */
+	uint32	sr_capability;		/* 0x500 */
+	uint32	sr_control0;		/* 0x504 */
+	uint32	sr_control1;		/* 0x508 */
+	uint32  gpio_control;		/* 0x50C */
+	uint32	PAD[29];
+	/* 2 SR engines case */
+	uint32	sr1_control0;		/* 0x584 */
+	uint32	sr1_control1;		/* 0x588 */
+	uint32	PAD[29];
+	/* PMU registers (corerev >= 20) */
+	/* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP.
+	 * The CPU must read them twice, compare, and retry if different.
+	 */
+	uint32	pmucontrol;		/* 0x600 */
+	uint32	pmucapabilities;
+	uint32	pmustatus;
+	uint32	res_state;
+	uint32	res_pending;
+	uint32	pmutimer;
+	uint32	min_res_mask;
+	uint32	max_res_mask;
+	uint32	res_table_sel;
+	uint32	res_dep_mask;
+	uint32	res_updn_timer;
+	uint32	res_timer;
+	uint32	clkstretch;
+	uint32	pmuwatchdog;
+	uint32	gpiosel;		/* 0x638, rev >= 1 */
+	uint32	gpioenable;		/* 0x63c, rev >= 1 */
+	uint32	res_req_timer_sel;
+	uint32	res_req_timer;
+	uint32	res_req_mask;
+	uint32	core_cap_ext;		/* 0x64c */
+	uint32	chipcontrol_addr;	/* 0x650 */
+	uint32	chipcontrol_data;	/* 0x654 */
+	uint32	regcontrol_addr;
+	uint32	regcontrol_data;
+	uint32	pllcontrol_addr;
+	uint32	pllcontrol_data;
+	uint32	pmustrapopt;		/* 0x668, corerev >= 28 */
+	uint32	pmu_xtalfreq;		/* 0x66C, pmurev >= 10 */
+	uint32  retention_ctl;		/* 0x670 */
+	uint32  PAD[3];
+	uint32  retention_grpidx;	/* 0x680 */
+	uint32  retention_grpctl;	/* 0x684 */
+	uint32  PAD[20];
+	uint32	pmucontrol_ext;		/* 0x6d8 */
+	uint32	slowclkperiod;		/* 0x6dc */
+	uint32	PAD[8];
+	uint32	pmuintmask0;		/* 0x700 */
+	uint32	pmuintmask1;		/* 0x704 */
+	uint32  PAD[14];
+	uint32  pmuintstatus;		/* 0x740 */
+	uint32	PAD[15];
+	uint32  pmuintctrl0;		/* 0x780 */
+	uint32  PAD[31];
+	uint16	sromotp[512];		/* 0x800 */
+#ifdef CCNFLASH_SUPPORT
+	/* Nand flash MLC controller registers (corerev >= 38) */
+	uint32	nand_revision;		/* 0xC00 */
+	uint32	nand_cmd_start;
+	uint32	nand_cmd_addr_x;
+	uint32	nand_cmd_addr;
+	uint32	nand_cmd_end_addr;
+	uint32	nand_cs_nand_select;
+	uint32	nand_cs_nand_xor;
+	uint32	PAD;
+	uint32	nand_spare_rd0;
+	uint32	nand_spare_rd4;
+	uint32	nand_spare_rd8;
+	uint32	nand_spare_rd12;
+	uint32	nand_spare_wr0;
+	uint32	nand_spare_wr4;
+	uint32	nand_spare_wr8;
+	uint32	nand_spare_wr12;
+	uint32	nand_acc_control;
+	uint32	PAD;
+	uint32	nand_config;
+	uint32	PAD;
+	uint32	nand_timing_1;
+	uint32	nand_timing_2;
+	uint32	nand_semaphore;
+	uint32	PAD;
+	uint32	nand_devid;
+	uint32	nand_devid_x;
+	uint32	nand_block_lock_status;
+	uint32	nand_intfc_status;
+	uint32	nand_ecc_corr_addr_x;
+	uint32	nand_ecc_corr_addr;
+	uint32	nand_ecc_unc_addr_x;
+	uint32	nand_ecc_unc_addr;
+	uint32	nand_read_error_count;
+	uint32	nand_corr_stat_threshold;
+	uint32	PAD[2];
+	uint32	nand_read_addr_x;
+	uint32	nand_read_addr;
+	uint32	nand_page_program_addr_x;
+	uint32	nand_page_program_addr;
+	uint32	nand_copy_back_addr_x;
+	uint32	nand_copy_back_addr;
+	uint32	nand_block_erase_addr_x;
+	uint32	nand_block_erase_addr;
+	uint32	nand_inv_read_addr_x;
+	uint32	nand_inv_read_addr;
+	uint32	PAD[2];
+	uint32	nand_blk_wr_protect;
+	uint32	PAD[3];
+	uint32	nand_acc_control_cs1;
+	uint32	nand_config_cs1;
+	uint32	nand_timing_1_cs1;
+	uint32	nand_timing_2_cs1;
+	uint32	PAD[20];
+	uint32	nand_spare_rd16;
+	uint32	nand_spare_rd20;
+	uint32	nand_spare_rd24;
+	uint32	nand_spare_rd28;
+	uint32	nand_cache_addr;
+	uint32	nand_cache_data;
+	uint32	nand_ctrl_config;
+	uint32	nand_ctrl_status;
+#endif /* CCNFLASH_SUPPORT */
+	uint32  gci_corecaps0; /* GCI starting at 0xC00 */
+	uint32  gci_corecaps1;
+	uint32  gci_corecaps2;
+	uint32  gci_corectrl;
+	uint32  gci_corestat; /* 0xC10 */
+	uint32  gci_intstat; /* 0xC14 */
+	uint32  gci_intmask; /* 0xC18 */
+	uint32  gci_wakemask; /* 0xC1C */
+	uint32  gci_levelintstat; /* 0xC20 */
+	uint32  gci_eventintstat; /* 0xC24 */
+	uint32  PAD[6];
+	uint32  gci_indirect_addr; /* 0xC40 */
+	uint32  gci_gpioctl; /* 0xC44 */
+	uint32	gci_gpiostatus;
+	uint32  gci_gpiomask; /* 0xC4C */
+	uint32  PAD;
+	uint32  gci_miscctl; /* 0xC54 */
+	uint32	gci_gpiointmask;
+	uint32	gci_gpiowakemask;
+	uint32  gci_input[32]; /* C60 */
+	uint32  gci_event[32]; /* CE0 */
+	uint32  gci_output[4]; /* D60 */
+	uint32  gci_control_0; /* 0xD70 */
+	uint32  gci_control_1; /* 0xD74 */
+	uint32  gci_intpolreg; /* 0xD78 */
+	uint32  gci_levelintmask; /* 0xD7C */
+	uint32  gci_eventintmask; /* 0xD80 */
+	uint32  PAD[3];
+	uint32  gci_inbandlevelintmask; /* 0xD90 */
+	uint32  gci_inbandeventintmask; /* 0xD94 */
+	uint32  PAD[2];
+	uint32  gci_seciauxtx; /* 0xDA0 */
+	uint32  gci_seciauxrx; /* 0xDA4 */
+	uint32  gci_secitx_datatag; /* 0xDA8 */
+	uint32  gci_secirx_datatag; /* 0xDAC */
+	uint32  gci_secitx_datamask; /* 0xDB0 */
+	uint32  gci_seciusef0tx_reg; /* 0xDB4 */
+	uint32  gci_secif0tx_offset; /* 0xDB8 */
+	uint32  gci_secif0rx_offset; /* 0xDBC */
+	uint32  gci_secif1tx_offset; /* 0xDC0 */
+	uint32	gci_rxfifo_common_ctrl; /* 0xDC4 */
+	uint32	gci_rxfifoctrl; /* 0xDC8 */
+	uint32	gci_uartreadid; /* DCC */
+	uint32  gci_seciuartescval; /* DD0 */
+	uint32	PAD;
+	uint32	gci_secififolevel; /* DD8 */
+	uint32	gci_seciuartdata; /* DDC */
+	uint32  gci_secibauddiv; /* DE0 */
+	uint32  gci_secifcr; /* DE4 */
+	uint32  gci_secilcr; /* DE8 */
+	uint32  gci_secimcr; /* DEC */
+	uint32	gci_secilsr; /* DF0 */
+	uint32	gci_secimsr; /* DF4 */
+	uint32  gci_baudadj; /* DF8 */
+	uint32  PAD;
+	uint32  gci_chipctrl; /* 0xE00 */
+	uint32  gci_chipsts; /* 0xE04 */
+	uint32	gci_gpioout; /* 0xE08 */
+	uint32	gci_gpioout_read; /* 0xE0C */
+	uint32	gci_mpwaketx; /* 0xE10 */
+	uint32	gci_mpwakedetect; /* 0xE14 */
+	uint32	gci_seciin_ctrl; /* 0xE18 */
+	uint32	gci_seciout_ctrl; /* 0xE1C */
+	uint32	gci_seciin_auxfifo_en; /* 0xE20 */
+	uint32	gci_seciout_txen_txbr; /* 0xE24 */
+	uint32	gci_seciin_rxbrstatus; /* 0xE28 */
+	uint32	gci_seciin_rxerrstatus; /* 0xE2C */
+	uint32	gci_seciin_fcstatus; /* 0xE30 */
+	uint32	gci_seciout_txstatus; /* 0xE34 */
+	uint32	gci_seciout_txbrstatus; /* 0xE38 */
+} chipcregs_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+
+#define	CC_CHIPID		0
+#define	CC_CAPABILITIES		4
+#define	CC_CHIPST		0x2c
+#define	CC_EROMPTR		0xfc
+
+#define	CC_OTPST		0x10
+#define	CC_INTSTATUS		0x20
+#define	CC_INTMASK		0x24
+#define	CC_JTAGCMD		0x30
+#define	CC_JTAGIR		0x34
+#define	CC_JTAGDR		0x38
+#define	CC_JTAGCTRL		0x3c
+#define	CC_GPIOPU		0x58
+#define	CC_GPIOPD		0x5c
+#define	CC_GPIOIN		0x60
+#define	CC_GPIOOUT		0x64
+#define	CC_GPIOOUTEN		0x68
+#define	CC_GPIOCTRL		0x6c
+#define	CC_GPIOPOL		0x70
+#define	CC_GPIOINTM		0x74
+#define	CC_GPIOEVENT		0x78
+#define	CC_GPIOEVENTMASK	0x7c
+#define	CC_WATCHDOG		0x80
+#define	CC_GPIOEVENTPOL		0x84
+#define	CC_CLKC_N		0x90
+#define	CC_CLKC_M0		0x94
+#define	CC_CLKC_M1		0x98
+#define	CC_CLKC_M2		0x9c
+#define	CC_CLKC_M3		0xa0
+#define	CC_CLKDIV		0xa4
+#define	CC_CAP_EXT		0xac
+#define	CC_SYS_CLK_CTL		0xc0
+#define	CC_CLKDIV2		0xf0
+#define	CC_CLK_CTL_ST		SI_CLK_CTL_ST
+#define	PMU_CTL			0x600
+#define	PMU_CAP			0x604
+#define	PMU_ST			0x608
+#define PMU_RES_STATE		0x60c
+#define PMU_RES_PENDING		0x610
+#define PMU_TIMER		0x614
+#define	PMU_MIN_RES_MASK	0x618
+#define	PMU_MAX_RES_MASK	0x61c
+#define CC_CHIPCTL_ADDR         0x650
+#define CC_CHIPCTL_DATA         0x654
+#define PMU_REG_CONTROL_ADDR	0x658
+#define PMU_REG_CONTROL_DATA	0x65C
+#define PMU_PLL_CONTROL_ADDR	0x660
+#define PMU_PLL_CONTROL_DATA	0x664
+
+#define CC_SROM_CTRL		0x190
+#ifdef SROM16K_4364_ADDRSPACE
+#define	CC_SROM_OTP		0xa000		/* SROM/OTP address space */
+#else
+#define	CC_SROM_OTP		0x0800
+#endif
+#define CC_GCI_INDIRECT_ADDR_REG	0xC40
+#define CC_GCI_CHIP_CTRL_REG	0xE00
+#define CC_GCI_CC_OFFSET_2	2
+#define CC_GCI_CC_OFFSET_5	5
+#define CC_SWD_CTRL		0x380
+#define CC_SWD_REQACK		0x384
+#define CC_SWD_DATA		0x388
+
+#define CHIPCTRLREG0 0x0
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define CHIPCTRLREG6 0x6
+#define REGCTRLREG4 0x4
+#define REGCTRLREG5 0x5
+#define REGCTRLREG6 0x6
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define PMU_RES_DEP_MASK 0x624
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+#define PMUREG_RESREQ_TIMER 0x688
+#define PMUREG_RESREQ_MASK1 0x6f4
+#define PMUREG_RESREQ_TIMER1 0x6f0
+#define EXT_LPO_AVAIL 0x100
+#define LPO_SEL					(1 << 0)
+#define CC_EXT_LPO_PU 0x200000
+#define GC_EXT_LPO_PU 0x2
+#define CC_INT_LPO_PU 0x100000
+#define GC_INT_LPO_PU 0x1
+#define EXT_LPO_SEL 0x8
+#define INT_LPO_SEL 0x4
+#define ENABLE_FINE_CBUCK_CTRL 			(1 << 30)
+#define REGCTRL5_PWM_AUTO_CTRL_MASK 		0x007e0000
+#define REGCTRL5_PWM_AUTO_CTRL_SHIFT		17
+#define REGCTRL6_PWM_AUTO_CTRL_MASK 		0x3fff0000
+#define REGCTRL6_PWM_AUTO_CTRL_SHIFT		16
+#define CC_BP_IND_ACCESS_START_SHIFT		9
+#define CC_BP_IND_ACCESS_START_MASK		(1 << CC_BP_IND_ACCESS_START_SHIFT)
+#define CC_BP_IND_ACCESS_RDWR_SHIFT		8
+#define CC_BP_IND_ACCESS_RDWR_MASK		(1 << CC_BP_IND_ACCESS_RDWR_SHIFT)
+#define CC_BP_IND_ACCESS_ERROR_SHIFT		10
+#define CC_BP_IND_ACCESS_ERROR_MASK		(1 << CC_BP_IND_ACCESS_ERROR_SHIFT)
+
+#ifdef SR_DEBUG
+#define SUBCORE_POWER_ON 0x0001
+#define PHY_POWER_ON 0x0010
+#define VDDM_POWER_ON 0x0100
+#define MEMLPLDO_POWER_ON 0x1000
+#define SUBCORE_POWER_ON_CHK 0x00040000
+#define PHY_POWER_ON_CHK 0x00080000
+#define VDDM_POWER_ON_CHK 0x00100000
+#define MEMLPLDO_POWER_ON_CHK 0x00200000
+#endif /* SR_DEBUG */
+
+#ifdef CCNFLASH_SUPPORT
+/* NAND flash support */
+#define CC_NAND_REVISION	0xC00
+#define CC_NAND_CMD_START	0xC04
+#define CC_NAND_CMD_ADDR	0xC0C
+#define CC_NAND_SPARE_RD_0	0xC20
+#define CC_NAND_SPARE_RD_4	0xC24
+#define CC_NAND_SPARE_RD_8	0xC28
+#define CC_NAND_SPARE_RD_C	0xC2C
+#define CC_NAND_CONFIG		0xC48
+#define CC_NAND_DEVID		0xC60
+#define CC_NAND_DEVID_EXT	0xC64
+#define CC_NAND_INTFC_STATUS	0xC6C
+#endif /* CCNFLASH_SUPPORT */
+
+/* chipid */
+#define	CID_ID_MASK		0x0000ffff	/**< Chip Id mask */
+#define	CID_REV_MASK		0x000f0000	/**< Chip Revision mask */
+#define	CID_REV_SHIFT		16		/**< Chip Revision shift */
+#define	CID_PKG_MASK		0x00f00000	/**< Package Option mask */
+#define	CID_PKG_SHIFT		20		/**< Package Option shift */
+#define	CID_CC_MASK		0x0f000000	/**< CoreCount (corerev >= 4) */
+#define CID_CC_SHIFT		24
+#define	CID_TYPE_MASK		0xf0000000	/**< Chip Type */
+#define CID_TYPE_SHIFT		28
+
+/* capabilities */
+#define	CC_CAP_UARTS_MASK	0x00000003	/**< Number of UARTs */
+#define CC_CAP_MIPSEB		0x00000004	/**< MIPS is in big-endian mode */
+#define CC_CAP_UCLKSEL		0x00000018	/**< UARTs clock select */
+#define CC_CAP_UINTCLK		0x00000008	/**< UARTs are driven by internal divided clock */
+#define CC_CAP_UARTGPIO		0x00000020	/**< UARTs own GPIOs 15:12 */
+#define CC_CAP_EXTBUS_MASK	0x000000c0	/**< External bus mask */
+#define CC_CAP_EXTBUS_NONE	0x00000000	/**< No ExtBus present */
+#define CC_CAP_EXTBUS_FULL	0x00000040	/**< ExtBus: PCMCIA, IDE & Prog */
+#define CC_CAP_EXTBUS_PROG	0x00000080	/**< ExtBus: ProgIf only */
+#define	CC_CAP_FLASH_MASK	0x00000700	/**< Type of flash */
+#define	CC_CAP_PLL_MASK		0x00038000	/**< Type of PLL */
+#define CC_CAP_PWR_CTL		0x00040000	/**< Power control */
+#define CC_CAP_OTPSIZE		0x00380000	/**< OTP Size (0 = none) */
+#define CC_CAP_OTPSIZE_SHIFT	19		/**< OTP Size shift */
+#define CC_CAP_OTPSIZE_BASE	5		/**< OTP Size base */
+#define CC_CAP_JTAGP		0x00400000	/**< JTAG Master Present */
+#define CC_CAP_ROM		0x00800000	/**< Internal boot rom active */
+#define CC_CAP_BKPLN64		0x08000000	/**< 64-bit backplane */
+#define	CC_CAP_PMU		0x10000000	/**< PMU Present, rev >= 20 */
+#define	CC_CAP_ECI		0x20000000	/**< ECI Present, rev >= 21 */
+#define	CC_CAP_SROM		0x40000000	/**< Srom Present, rev >= 32 */
+#define	CC_CAP_NFLASH		0x80000000	/**< Nand flash present, rev >= 35 */
+
+#define	CC_CAP2_SECI		0x00000001	/**< SECI Present, rev >= 36 */
+#define	CC_CAP2_GSIO		0x00000002	/**< GSIO (spi/i2c) present, rev >= 37 */
+
+/* capabilities extension */
+#define CC_CAP_EXT_SECI_PRESENT				0x00000001	/**< SECI present */
+#define CC_CAP_EXT_GSIO_PRESENT				0x00000002	/**< GSIO present */
+#define CC_CAP_EXT_GCI_PRESENT  			0x00000004	/**< GCI present */
+#define CC_CAP_EXT_SECI_PUART_PRESENT		0x00000008  /**< UART present */
+#define CC_CAP_EXT_AOB_PRESENT  			0x00000040	/**< AOB present */
+#define CC_CAP_EXT_SWD_PRESENT  			0x00000400	/**< SWD present */
+
+/* WL Channel Info to BT via GCI - bits 40 - 47 */
+#define GCI_WL_CHN_INFO_MASK	(0xFF00)
+/* WL indication of MCHAN enabled/disabled to BT in awdl mode- bit 36 */
+#define GCI_WL_MCHAN_BIT_MASK	(0x0010)
+/* WL Strobe to BT */
+#define GCI_WL_STROBE_BIT_MASK	(0x0020)
+/* bits [51:48] - reserved for wlan TX pwr index */
+/* bits [55:52] btc mode indication */
+#define GCI_WL_BTC_MODE_SHIFT	(20)
+#define GCI_WL_BTC_MODE_MASK	(0xF << GCI_WL_BTC_MODE_SHIFT)
+#define GCI_WL_ANT_BIT_MASK	(0x00c0)
+#define GCI_WL_ANT_SHIFT_BITS	(6)
+/* PLL type */
+#define PLL_NONE		0x00000000
+#define PLL_TYPE1		0x00010000	/**< 48MHz base, 3 dividers */
+#define PLL_TYPE2		0x00020000	/**< 48MHz, 4 dividers */
+#define PLL_TYPE3		0x00030000	/**< 25MHz, 2 dividers */
+#define PLL_TYPE4		0x00008000	/**< 48MHz, 4 dividers */
+#define PLL_TYPE5		0x00018000	/**< 25MHz, 4 dividers */
+#define PLL_TYPE6		0x00028000	/**< 100/200 or 120/240 only */
+#define PLL_TYPE7		0x00038000	/**< 25MHz, 4 dividers */
+
+/* ILP clock */
+#define	ILP_CLOCK		32000
+
+/* ALP clock on pre-PMU chips */
+#define	ALP_CLOCK		20000000
+
+#ifdef CFG_SIM
+#define NS_ALP_CLOCK		84922
+#define NS_SLOW_ALP_CLOCK	84922
+#define NS_CPU_CLOCK		534500
+#define NS_SLOW_CPU_CLOCK	534500
+#define NS_SI_CLOCK		271750
+#define NS_SLOW_SI_CLOCK	271750
+#define NS_FAST_MEM_CLOCK	271750
+#define NS_MEM_CLOCK		271750
+#define NS_SLOW_MEM_CLOCK	271750
+#else
+#define NS_ALP_CLOCK		125000000
+#define NS_SLOW_ALP_CLOCK	100000000
+#define NS_CPU_CLOCK		1000000000
+#define NS_SLOW_CPU_CLOCK	800000000
+#define NS_SI_CLOCK		250000000
+#define NS_SLOW_SI_CLOCK	200000000
+#define NS_FAST_MEM_CLOCK	800000000
+#define NS_MEM_CLOCK		533000000
+#define NS_SLOW_MEM_CLOCK	400000000
+#endif /* CFG_SIM */
+
+#define ALP_CLOCK_53573		40000000
+
+/* HT clock */
+#define	HT_CLOCK		80000000
+
+/* corecontrol */
+#define CC_UARTCLKO		0x00000001	/**< Drive UART with internal clock */
+#define	CC_SE			0x00000002	/**< sync clk out enable (corerev >= 3) */
+#define CC_ASYNCGPIO	0x00000004	/**< 1=generate GPIO interrupt without backplane clock */
+#define CC_UARTCLKEN		0x00000008	/**< enable UART Clock (corerev > = 21 */
+
+/* retention_ctl */
+#define RCTL_MEM_RET_SLEEP_LOG_SHIFT	29
+#define RCTL_MEM_RET_SLEEP_LOG_MASK	(1 << RCTL_MEM_RET_SLEEP_LOG_SHIFT)
+
+/* 4321 chipcontrol */
+#define CHIPCTRL_4321A0_DEFAULT	0x3a4
+#define CHIPCTRL_4321A1_DEFAULT	0x0a4
+#define CHIPCTRL_4321_PLL_DOWN	0x800000	/**< serdes PLL down override */
+
+/* Fields in the otpstatus register in rev >= 21 */
+#define OTPS_OL_MASK		0x000000ff
+#define OTPS_OL_MFG		0x00000001	/**< manuf row is locked */
+#define OTPS_OL_OR1		0x00000002	/**< otp redundancy row 1 is locked */
+#define OTPS_OL_OR2		0x00000004	/**< otp redundancy row 2 is locked */
+#define OTPS_OL_GU		0x00000008	/**< general use region is locked */
+#define OTPS_GUP_MASK		0x00000f00
+#define OTPS_GUP_SHIFT		8
+#define OTPS_GUP_HW		0x00000100	/**< h/w subregion is programmed */
+#define OTPS_GUP_SW		0x00000200	/**< s/w subregion is programmed */
+#define OTPS_GUP_CI		0x00000400	/**< chipid/pkgopt subregion is programmed */
+#define OTPS_GUP_FUSE		0x00000800	/**< fuse subregion is programmed */
+#define OTPS_READY		0x00001000
+#define OTPS_RV(x)		(1 << (16 + (x)))	/**< redundancy entry valid */
+#define OTPS_RV_MASK		0x0fff0000
+#define OTPS_PROGOK     0x40000000
+
+/* Fields in the otpcontrol register in rev >= 21 */
+#define OTPC_PROGSEL		0x00000001
+#define OTPC_PCOUNT_MASK	0x0000000e
+#define OTPC_PCOUNT_SHIFT	1
+#define OTPC_VSEL_MASK		0x000000f0
+#define OTPC_VSEL_SHIFT		4
+#define OTPC_TMM_MASK		0x00000700
+#define OTPC_TMM_SHIFT		8
+#define OTPC_ODM		0x00000800
+#define OTPC_PROGEN		0x80000000
+
+/* Fields in the 40nm otpcontrol register in rev >= 40 */
+#define OTPC_40NM_PROGSEL_SHIFT	0
+#define OTPC_40NM_PCOUNT_SHIFT	1
+#define OTPC_40NM_PCOUNT_WR	0xA
+#define OTPC_40NM_PCOUNT_V1X	0xB
+#define OTPC_40NM_REGCSEL_SHIFT	5
+#define OTPC_40NM_REGCSEL_DEF	0x4
+#define OTPC_40NM_PROGIN_SHIFT	8
+#define OTPC_40NM_R2X_SHIFT	10
+#define OTPC_40NM_ODM_SHIFT	11
+#define OTPC_40NM_DF_SHIFT	15
+#define OTPC_40NM_VSEL_SHIFT	16
+#define OTPC_40NM_VSEL_WR	0xA
+#define OTPC_40NM_VSEL_V1X	0xA
+#define OTPC_40NM_VSEL_R1X	0x5
+#define OTPC_40NM_COFAIL_SHIFT	30
+
+#define OTPC1_CPCSEL_SHIFT	0
+#define OTPC1_CPCSEL_DEF	6
+#define OTPC1_TM_SHIFT		8
+#define OTPC1_TM_WR		0x84
+#define OTPC1_TM_V1X		0x84
+#define OTPC1_TM_R1X		0x4
+#define OTPC1_CLK_EN_MASK	0x00020000
+#define OTPC1_CLK_DIV_MASK	0x00FC0000
+
+/* Fields in otpprog in rev >= 21 and HND OTP */
+#define OTPP_COL_MASK		0x000000ff
+#define OTPP_COL_SHIFT		0
+#define OTPP_ROW_MASK		0x0000ff00
+#define OTPP_ROW_MASK9		0x0001ff00		/* for ccrev >= 49 */
+#define OTPP_ROW_SHIFT		8
+#define OTPP_OC_MASK		0x0f000000
+#define OTPP_OC_SHIFT		24
+#define OTPP_READERR		0x10000000
+#define OTPP_VALUE_MASK		0x20000000
+#define OTPP_VALUE_SHIFT	29
+#define OTPP_START_BUSY		0x80000000
+#define	OTPP_READ		0x40000000	/* HND OTP */
+
+/* Fields in otplayout register */
+#define OTPL_HWRGN_OFF_MASK	0x00000FFF
+#define OTPL_HWRGN_OFF_SHIFT	0
+#define OTPL_WRAP_REVID_MASK	0x00F80000
+#define OTPL_WRAP_REVID_SHIFT	19
+#define OTPL_WRAP_TYPE_MASK	0x00070000
+#define OTPL_WRAP_TYPE_SHIFT	16
+#define OTPL_WRAP_TYPE_65NM	0
+#define OTPL_WRAP_TYPE_40NM	1
+#define OTPL_WRAP_TYPE_28NM	2
+#define OTPL_ROW_SIZE_MASK	0x0000F000
+#define OTPL_ROW_SIZE_SHIFT	12
+
+/* otplayout reg corerev >= 36 */
+#define OTP_CISFORMAT_NEW	0x80000000
+
+/* Opcodes for OTPP_OC field */
+#define OTPPOC_READ		0
+#define OTPPOC_BIT_PROG		1
+#define OTPPOC_VERIFY		3
+#define OTPPOC_INIT		4
+#define OTPPOC_SET		5
+#define OTPPOC_RESET		6
+#define OTPPOC_OCST		7
+#define OTPPOC_ROW_LOCK		8
+#define OTPPOC_PRESCN_TEST	9
+
+/* Opcodes for OTPP_OC field (40NM) */
+#define OTPPOC_READ_40NM	0
+#define OTPPOC_PROG_ENABLE_40NM 1
+#define OTPPOC_PROG_DISABLE_40NM	2
+#define OTPPOC_VERIFY_40NM	3
+#define OTPPOC_WORD_VERIFY_1_40NM	4
+#define OTPPOC_ROW_LOCK_40NM	5
+#define OTPPOC_STBY_40NM	6
+#define OTPPOC_WAKEUP_40NM	7
+#define OTPPOC_WORD_VERIFY_0_40NM	8
+#define OTPPOC_PRESCN_TEST_40NM 9
+#define OTPPOC_BIT_PROG_40NM	10
+#define OTPPOC_WORDPROG_40NM	11
+#define OTPPOC_BURNIN_40NM	12
+#define OTPPOC_AUTORELOAD_40NM	13
+#define OTPPOC_OVST_READ_40NM	14
+#define OTPPOC_OVST_PROG_40NM	15
+
+/* Opcodes for OTPP_OC field (28NM) */
+#define OTPPOC_READ_28NM	0
+#define OTPPOC_READBURST_28NM	1
+#define OTPPOC_PROG_ENABLE_28NM 2
+#define OTPPOC_PROG_DISABLE_28NM	3
+#define OTPPOC_PRESCREEN_28NM	4
+#define OTPPOC_PRESCREEN_RP_28NM	5
+#define OTPPOC_FLUSH_28NM	6
+#define OTPPOC_NOP_28NM	7
+#define OTPPOC_PROG_ECC_28NM	8
+#define OTPPOC_PROG_ECC_READ_28NM	9
+#define OTPPOC_PROG_28NM	10
+#define OTPPOC_PROGRAM_RP_28NM	11
+#define OTPPOC_PROGRAM_OVST_28NM	12
+#define OTPPOC_RELOAD_28NM	13
+#define OTPPOC_ERASE_28NM	14
+#define OTPPOC_LOAD_RF_28NM	15
+#define OTPPOC_CTRL_WR_28NM 16
+#define OTPPOC_CTRL_RD_28NM	17
+#define OTPPOC_READ_HP_28NM	18
+#define OTPPOC_READ_OVST_28NM	19
+#define OTPPOC_READ_VERIFY0_28NM	20
+#define OTPPOC_READ_VERIFY1_28NM	21
+#define OTPPOC_READ_FORCE0_28NM	22
+#define OTPPOC_READ_FORCE1_28NM	23
+#define OTPPOC_BURNIN_28NM	24
+#define OTPPOC_PROGRAM_LOCK_28NM	25
+#define OTPPOC_PROGRAM_TESTCOL_28NM	26
+#define OTPPOC_READ_TESTCOL_28NM	27
+#define OTPPOC_READ_FOUT_28NM	28
+#define OTPPOC_SFT_RESET_28NM	29
+
+#define OTPP_OC_MASK_28NM		0x0f800000
+#define OTPP_OC_SHIFT_28NM		23
+#define OTPC_PROGEN_28NM		0x8
+#define OTPC_DBLERRCLR		0x20
+#define OTPC_CLK_EN_MASK	0x00000040
+#define OTPC_CLK_DIV_MASK	0x00000F80
+
+/* Fields in otplayoutextension */
+#define OTPLAYOUTEXT_FUSE_MASK	0x3FF
+
+
+/* Jtagm characteristics that appeared at a given corerev */
+#define	JTAGM_CREV_OLD		10	/**< Old command set, 16bit max IR */
+#define	JTAGM_CREV_IRP		22	/**< Able to do pause-ir */
+#define	JTAGM_CREV_RTI		28	/**< Able to do return-to-idle */
+
+/* jtagcmd */
+#define JCMD_START		0x80000000
+#define JCMD_BUSY		0x80000000
+#define JCMD_STATE_MASK		0x60000000
+#define JCMD_STATE_TLR		0x00000000	/**< Test-logic-reset */
+#define JCMD_STATE_PIR		0x20000000	/**< Pause IR */
+#define JCMD_STATE_PDR		0x40000000	/**< Pause DR */
+#define JCMD_STATE_RTI		0x60000000	/**< Run-test-idle */
+#define JCMD0_ACC_MASK		0x0000f000
+#define JCMD0_ACC_IRDR		0x00000000
+#define JCMD0_ACC_DR		0x00001000
+#define JCMD0_ACC_IR		0x00002000
+#define JCMD0_ACC_RESET		0x00003000
+#define JCMD0_ACC_IRPDR		0x00004000
+#define JCMD0_ACC_PDR		0x00005000
+#define JCMD0_IRW_MASK		0x00000f00
+#define JCMD_ACC_MASK		0x000f0000	/**< Changes for corerev 11 */
+#define JCMD_ACC_IRDR		0x00000000
+#define JCMD_ACC_DR		0x00010000
+#define JCMD_ACC_IR		0x00020000
+#define JCMD_ACC_RESET		0x00030000
+#define JCMD_ACC_IRPDR		0x00040000
+#define JCMD_ACC_PDR		0x00050000
+#define JCMD_ACC_PIR		0x00060000
+#define JCMD_ACC_IRDR_I		0x00070000	/**< rev 28: return to run-test-idle */
+#define JCMD_ACC_DR_I		0x00080000	/**< rev 28: return to run-test-idle */
+#define JCMD_IRW_MASK		0x00001f00
+#define JCMD_IRW_SHIFT		8
+#define JCMD_DRW_MASK		0x0000003f
+
+/* jtagctrl */
+#define JCTRL_FORCE_CLK		4		/**< Force clock */
+#define JCTRL_EXT_EN		2		/**< Enable external targets */
+#define JCTRL_EN		1		/**< Enable Jtag master */
+#define JCTRL_TAPSEL_BIT	0x00000008	/**< JtagMasterCtrl tap_sel bit */
+
+/* swdmasterctrl */
+#define SWDCTRL_INT_EN		8		/**< Enable internal targets */
+#define SWDCTRL_FORCE_CLK	4		/**< Force clock */
+#define SWDCTRL_OVJTAG		2		/**< Enable shared SWD/JTAG pins */
+#define SWDCTRL_EN		1		/**< Enable Jtag master */
+
+/* Fields in clkdiv */
+#define	CLKD_SFLASH		0x1f000000
+#define	CLKD_SFLASH_SHIFT	24
+#define	CLKD_OTP		0x000f0000
+#define	CLKD_OTP_SHIFT		16
+#define	CLKD_JTAG		0x00000f00
+#define	CLKD_JTAG_SHIFT		8
+#define	CLKD_UART		0x000000ff
+
+#define	CLKD2_SROM		0x00000003
+#define	CLKD2_SWD		0xf8000000
+#define	CLKD2_SWD_SHIFT		27
+
+/* intstatus/intmask */
+#define	CI_GPIO			0x00000001	/**< gpio intr */
+#define	CI_EI			0x00000002	/**< extif intr (corerev >= 3) */
+#define	CI_TEMP			0x00000004	/**< temp. ctrl intr (corerev >= 15) */
+#define	CI_SIRQ			0x00000008	/**< serial IRQ intr (corerev >= 15) */
+#define	CI_ECI			0x00000010	/**< eci intr (corerev >= 21) */
+#define	CI_PMU			0x00000020	/**< pmu intr (corerev >= 21) */
+#define	CI_UART			0x00000040	/**< uart intr (corerev >= 21) */
+#define	CI_WDRESET		0x80000000	/**< watchdog reset occurred */
+
+/* slow_clk_ctl */
+#define SCC_SS_MASK		0x00000007	/**< slow clock source mask */
+#define	SCC_SS_LPO		0x00000000	/**< source of slow clock is LPO */
+#define	SCC_SS_XTAL		0x00000001	/**< source of slow clock is crystal */
+#define	SCC_SS_PCI		0x00000002	/**< source of slow clock is PCI */
+#define SCC_LF			0x00000200	/**< LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SCC_LP			0x00000400	/**< LPOPowerDown, 1: LPO is disabled,
+						 * 0: LPO is enabled
+						 */
+#define SCC_FS			0x00000800 /**< ForceSlowClk, 1: sb/cores running on slow clock,
+						 * 0: power logic control
+						 */
+#define SCC_IP			0x00001000 /**< IgnorePllOffReq, 1/0: power logic ignores/honors
+						 * PLL clock disable requests from core
+						 */
+#define SCC_XC			0x00002000	/**< XtalControlEn, 1/0: power logic does/doesn't
+						 * disable crystal when appropriate
+						 */
+#define SCC_XP			0x00004000	/**< XtalPU (RO), 1/0: crystal running/disabled */
+#define SCC_CD_MASK		0xffff0000	/**< ClockDivider (SlowClk = 1/(4+divisor)) */
+#define SCC_CD_SHIFT		16
+
+/* system_clk_ctl */
+#define	SYCC_IE			0x00000001	/**< ILPen: Enable Idle Low Power */
+#define	SYCC_AE			0x00000002	/**< ALPen: Enable Active Low Power */
+#define	SYCC_FP			0x00000004	/**< ForcePLLOn */
+#define	SYCC_AR			0x00000008	/**< Force ALP (or HT if ALPen is not set */
+#define	SYCC_HR			0x00000010	/**< Force HT */
+#define SYCC_CD_MASK		0xffff0000	/**< ClkDiv  (ILP = 1/(4 * (divisor + 1)) */
+#define SYCC_CD_SHIFT		16
+
+/* Indirect backplane access */
+#define	BPIA_BYTEEN		0x0000000f
+#define	BPIA_SZ1		0x00000001
+#define	BPIA_SZ2		0x00000003
+#define	BPIA_SZ4		0x00000007
+#define	BPIA_SZ8		0x0000000f
+#define	BPIA_WRITE		0x00000100
+#define	BPIA_START		0x00000200
+#define	BPIA_BUSY		0x00000200
+#define	BPIA_ERROR		0x00000400
+
+/* pcmcia/prog/flash_config */
+#define	CF_EN			0x00000001	/**< enable */
+#define	CF_EM_MASK		0x0000000e	/**< mode */
+#define	CF_EM_SHIFT		1
+#define	CF_EM_FLASH		0		/**< flash/asynchronous mode */
+#define	CF_EM_SYNC		2		/**< synchronous mode */
+#define	CF_EM_PCMCIA		4		/**< pcmcia mode */
+#define	CF_DS			0x00000010	/**< destsize:  0=8bit, 1=16bit */
+#define	CF_BS			0x00000020	/**< byteswap */
+#define	CF_CD_MASK		0x000000c0	/**< clock divider */
+#define	CF_CD_SHIFT		6
+#define	CF_CD_DIV2		0x00000000	/**< backplane/2 */
+#define	CF_CD_DIV3		0x00000040	/**< backplane/3 */
+#define	CF_CD_DIV4		0x00000080	/**< backplane/4 */
+#define	CF_CE			0x00000100	/**< clock enable */
+#define	CF_SB			0x00000200	/**< size/bytestrobe (synch only) */
+
+/* pcmcia_memwait */
+#define	PM_W0_MASK		0x0000003f	/**< waitcount0 */
+#define	PM_W1_MASK		0x00001f00	/**< waitcount1 */
+#define	PM_W1_SHIFT		8
+#define	PM_W2_MASK		0x001f0000	/**< waitcount2 */
+#define	PM_W2_SHIFT		16
+#define	PM_W3_MASK		0x1f000000	/**< waitcount3 */
+#define	PM_W3_SHIFT		24
+
+/* pcmcia_attrwait */
+#define	PA_W0_MASK		0x0000003f	/**< waitcount0 */
+#define	PA_W1_MASK		0x00001f00	/**< waitcount1 */
+#define	PA_W1_SHIFT		8
+#define	PA_W2_MASK		0x001f0000	/**< waitcount2 */
+#define	PA_W2_SHIFT		16
+#define	PA_W3_MASK		0x1f000000	/**< waitcount3 */
+#define	PA_W3_SHIFT		24
+
+/* pcmcia_iowait */
+#define	PI_W0_MASK		0x0000003f	/**< waitcount0 */
+#define	PI_W1_MASK		0x00001f00	/**< waitcount1 */
+#define	PI_W1_SHIFT		8
+#define	PI_W2_MASK		0x001f0000	/**< waitcount2 */
+#define	PI_W2_SHIFT		16
+#define	PI_W3_MASK		0x1f000000	/**< waitcount3 */
+#define	PI_W3_SHIFT		24
+
+/* prog_waitcount */
+#define	PW_W0_MASK		0x0000001f	/**< waitcount0 */
+#define	PW_W1_MASK		0x00001f00	/**< waitcount1 */
+#define	PW_W1_SHIFT		8
+#define	PW_W2_MASK		0x001f0000	/**< waitcount2 */
+#define	PW_W2_SHIFT		16
+#define	PW_W3_MASK		0x1f000000	/**< waitcount3 */
+#define	PW_W3_SHIFT		24
+
+#define PW_W0       		0x0000000c
+#define PW_W1       		0x00000a00
+#define PW_W2       		0x00020000
+#define PW_W3       		0x01000000
+
+/* flash_waitcount */
+#define	FW_W0_MASK		0x0000003f	/**< waitcount0 */
+#define	FW_W1_MASK		0x00001f00	/**< waitcount1 */
+#define	FW_W1_SHIFT		8
+#define	FW_W2_MASK		0x001f0000	/**< waitcount2 */
+#define	FW_W2_SHIFT		16
+#define	FW_W3_MASK		0x1f000000	/**< waitcount3 */
+#define	FW_W3_SHIFT		24
+
+/* When Srom support present, fields in sromcontrol */
+#define	SRC_START		0x80000000
+#define	SRC_BUSY		0x80000000
+#define	SRC_OPCODE		0x60000000
+#define	SRC_OP_READ		0x00000000
+#define	SRC_OP_WRITE		0x20000000
+#define	SRC_OP_WRDIS		0x40000000
+#define	SRC_OP_WREN		0x60000000
+#define	SRC_OTPSEL		0x00000010
+#define SRC_OTPPRESENT		0x00000020
+#define	SRC_LOCK		0x00000008
+#define	SRC_SIZE_MASK		0x00000006
+#define	SRC_SIZE_1K		0x00000000
+#define	SRC_SIZE_4K		0x00000002
+#define	SRC_SIZE_16K		0x00000004
+#define	SRC_SIZE_SHIFT		1
+#define	SRC_PRESENT		0x00000001
+
+/* Fields in pmucontrol */
+#define	PCTL_ILP_DIV_MASK	0xffff0000
+#define	PCTL_ILP_DIV_SHIFT	16
+#define PCTL_LQ_REQ_EN		0x00008000
+#define PCTL_PLL_PLLCTL_UPD	0x00000400	/**< rev 2 */
+#define PCTL_NOILP_ON_WAIT	0x00000200	/**< rev 1 */
+#define	PCTL_HT_REQ_EN		0x00000100
+#define	PCTL_ALP_REQ_EN		0x00000080
+#define	PCTL_XTALFREQ_MASK	0x0000007c
+#define	PCTL_XTALFREQ_SHIFT	2
+#define	PCTL_ILP_DIV_EN		0x00000002
+#define	PCTL_LPO_SEL		0x00000001
+
+/* Fields in pmucontrol_ext */
+#define PCTL_EXT_FASTLPO_ENAB		0x00000080
+#define PCTL_EXT_FASTLPO_SWENAB	0x00000200
+#define PCTL_EXT_FASTLPO_PCIE_SWENAB	0x00004000  /**< rev33 for FLL1M */
+
+#define DEFAULT_43012_MIN_RES_MASK		0x0f8bfe77
+
+/*  Retention Control */
+#define PMU_RCTL_CLK_DIV_SHIFT		0
+#define PMU_RCTL_CHAIN_LEN_SHIFT	12
+#define PMU_RCTL_MACPHY_DISABLE_SHIFT	26
+#define PMU_RCTL_MACPHY_DISABLE_MASK	(1 << 26)
+#define PMU_RCTL_LOGIC_DISABLE_SHIFT	27
+#define PMU_RCTL_LOGIC_DISABLE_MASK	(1 << 27)
+#define PMU_RCTL_MEMSLP_LOG_SHIFT	28
+#define PMU_RCTL_MEMSLP_LOG_MASK	(1 << 28)
+#define PMU_RCTL_MEMRETSLP_LOG_SHIFT	29
+#define PMU_RCTL_MEMRETSLP_LOG_MASK	(1 << 29)
+
+/*  Retention Group Control */
+#define PMU_RCTLGRP_CHAIN_LEN_SHIFT	0
+#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT	14
+#define PMU_RCTLGRP_RMODE_ENABLE_MASK	(1 << 14)
+#define PMU_RCTLGRP_DFT_ENABLE_SHIFT	15
+#define PMU_RCTLGRP_DFT_ENABLE_MASK	(1 << 15)
+#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT	16
+#define PMU_RCTLGRP_NSRST_DISABLE_MASK	(1 << 16)
+/*  Retention Group Control special for 4334 */
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP0	338
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP1	315
+/*  Retention Group Control special for 43341 */
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP0	366
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP1	330
+
+/* Fields in clkstretch */
+#define CSTRETCH_HT		0xffff0000
+#define CSTRETCH_ALP		0x0000ffff
+
+/* gpiotimerval */
+#define GPIO_ONTIME_SHIFT	16
+
+/* clockcontrol_n */
+#define	CN_N1_MASK		0x3f		/**< n1 control */
+#define	CN_N2_MASK		0x3f00		/**< n2 control */
+#define	CN_N2_SHIFT		8
+#define	CN_PLLC_MASK		0xf0000		/**< pll control */
+#define	CN_PLLC_SHIFT		16
+
+/* clockcontrol_sb/pci/uart */
+#define	CC_M1_MASK		0x3f		/**< m1 control */
+#define	CC_M2_MASK		0x3f00		/**< m2 control */
+#define	CC_M2_SHIFT		8
+#define	CC_M3_MASK		0x3f0000	/**< m3 control */
+#define	CC_M3_SHIFT		16
+#define	CC_MC_MASK		0x1f000000	/**< mux control */
+#define	CC_MC_SHIFT		24
+
+/* N3M Clock control magic field values */
+#define	CC_F6_2			0x02		/**< A factor of 2 in */
+#define	CC_F6_3			0x03		/**< 6-bit fields like */
+#define	CC_F6_4			0x05		/**< N1, M1 or M3 */
+#define	CC_F6_5			0x09
+#define	CC_F6_6			0x11
+#define	CC_F6_7			0x21
+
+#define	CC_F5_BIAS		5		/**< 5-bit fields get this added */
+
+#define	CC_MC_BYPASS		0x08
+#define	CC_MC_M1		0x04
+#define	CC_MC_M1M2		0x02
+#define	CC_MC_M1M2M3		0x01
+#define	CC_MC_M1M3		0x11
+
+/* Type 2 Clock control magic field values */
+#define	CC_T2_BIAS		2		/**< n1, n2, m1 & m3 bias */
+#define	CC_T2M2_BIAS		3		/**< m2 bias */
+
+#define	CC_T2MC_M1BYP		1
+#define	CC_T2MC_M2BYP		2
+#define	CC_T2MC_M3BYP		4
+
+/* Type 6 Clock control magic field values */
+#define	CC_T6_MMASK		1		/**< bits of interest in m */
+#define	CC_T6_M0		120000000	/**< sb clock for m = 0 */
+#define	CC_T6_M1		100000000	/**< sb clock for m = 1 */
+#define	SB2MIPS_T6(sb)		(2 * (sb))
+
+/* Common clock base */
+#define	CC_CLOCK_BASE1		24000000	/**< Half the clock freq */
+#define CC_CLOCK_BASE2		12500000	/**< Alternate crystal on some PLLs */
+
+/* Clock control values for 200MHz in 5350 */
+#define	CLKC_5350_N		0x0311
+#define	CLKC_5350_M		0x04020009
+
+/* Flash types in the chipcommon capabilities register */
+#define FLASH_NONE		0x000		/**< No flash */
+#define SFLASH_ST		0x100		/**< ST serial flash */
+#define SFLASH_AT		0x200		/**< Atmel serial flash */
+#define NFLASH			0x300
+#define	PFLASH			0x700		/**< Parallel flash */
+#define QSPIFLASH_ST		0x800
+#define QSPIFLASH_AT		0x900
+
+/* Bits in the ExtBus config registers */
+#define	CC_CFG_EN		0x0001		/**< Enable */
+#define	CC_CFG_EM_MASK		0x000e		/**< Extif Mode */
+#define	CC_CFG_EM_ASYNC		0x0000		/**<   Async/Parallel flash */
+#define	CC_CFG_EM_SYNC		0x0002		/**<   Synchronous */
+#define	CC_CFG_EM_PCMCIA	0x0004		/**<   PCMCIA */
+#define	CC_CFG_EM_IDE		0x0006		/**<   IDE */
+#define	CC_CFG_DS		0x0010		/**< Data size, 0=8bit, 1=16bit */
+#define	CC_CFG_CD_MASK		0x00e0		/**< Sync: Clock divisor, rev >= 20 */
+#define	CC_CFG_CE		0x0100		/**< Sync: Clock enable, rev >= 20 */
+#define	CC_CFG_SB		0x0200		/**< Sync: Size/Bytestrobe, rev >= 20 */
+#define	CC_CFG_IS		0x0400		/**< Extif Sync Clk Select, rev >= 20 */
+
+/* ExtBus address space */
+#define	CC_EB_BASE		0x1a000000	/**< Chipc ExtBus base address */
+#define	CC_EB_PCMCIA_MEM	0x1a000000	/**< PCMCIA 0 memory base address */
+#define	CC_EB_PCMCIA_IO		0x1a200000	/**< PCMCIA 0 I/O base address */
+#define	CC_EB_PCMCIA_CFG	0x1a400000	/**< PCMCIA 0 config base address */
+#define	CC_EB_IDE		0x1a800000	/**< IDE memory base */
+#define	CC_EB_PCMCIA1_MEM	0x1a800000	/**< PCMCIA 1 memory base address */
+#define	CC_EB_PCMCIA1_IO	0x1aa00000	/**< PCMCIA 1 I/O base address */
+#define	CC_EB_PCMCIA1_CFG	0x1ac00000	/**< PCMCIA 1 config base address */
+#define	CC_EB_PROGIF		0x1b000000	/**< ProgIF Async/Sync base address */
+
+
+/* Start/busy bit in flashcontrol */
+#define SFLASH_OPCODE		0x000000ff
+#define SFLASH_ACTION		0x00000700
+#define	SFLASH_CS_ACTIVE	0x00001000	/**< Chip Select Active, rev >= 20 */
+#define SFLASH_START		0x80000000
+#define SFLASH_BUSY		SFLASH_START
+
+/* flashcontrol action codes */
+#define	SFLASH_ACT_OPONLY	0x0000		/**< Issue opcode only */
+#define	SFLASH_ACT_OP1D		0x0100		/**< opcode + 1 data byte */
+#define	SFLASH_ACT_OP3A		0x0200		/**< opcode + 3 addr bytes */
+#define	SFLASH_ACT_OP3A1D	0x0300		/**< opcode + 3 addr & 1 data bytes */
+#define	SFLASH_ACT_OP3A4D	0x0400		/**< opcode + 3 addr & 4 data bytes */
+#define	SFLASH_ACT_OP3A4X4D	0x0500		/**< opcode + 3 addr, 4 don't care & 4 data bytes */
+#define	SFLASH_ACT_OP3A1X4D	0x0700		/**< opcode + 3 addr, 1 don't care & 4 data bytes */
+
+/* flashcontrol action+opcodes for ST flashes */
+#define SFLASH_ST_WREN		0x0006		/**< Write Enable */
+#define SFLASH_ST_WRDIS		0x0004		/**< Write Disable */
+#define SFLASH_ST_RDSR		0x0105		/**< Read Status Register */
+#define SFLASH_ST_WRSR		0x0101		/**< Write Status Register */
+#define SFLASH_ST_READ		0x0303		/**< Read Data Bytes */
+#define SFLASH_ST_PP		0x0302		/**< Page Program */
+#define SFLASH_ST_SE		0x02d8		/**< Sector Erase */
+#define SFLASH_ST_BE		0x00c7		/**< Bulk Erase */
+#define SFLASH_ST_DP		0x00b9		/**< Deep Power-down */
+#define SFLASH_ST_RES		0x03ab		/**< Read Electronic Signature */
+#define SFLASH_ST_CSA		0x1000		/**< Keep chip select asserted */
+#define SFLASH_ST_SSE		0x0220		/**< Sub-sector Erase */
+
+#define SFLASH_ST_READ4B	0x6313		/* Read Data Bytes in 4Byte address */
+#define SFLASH_ST_PP4B		0x6312		/* Page Program in 4Byte address */
+#define SFLASH_ST_SE4B		0x62dc		/* Sector Erase in 4Byte address */
+#define SFLASH_ST_SSE4B		0x6221		/* Sub-sector Erase */
+
+#define SFLASH_MXIC_RDID	0x0390		/* Read Manufacture ID */
+#define SFLASH_MXIC_MFID	0xc2		/* MXIC Manufacture ID */
+
+/* Status register bits for ST flashes */
+#define SFLASH_ST_WIP		0x01		/**< Write In Progress */
+#define SFLASH_ST_WEL		0x02		/**< Write Enable Latch */
+#define SFLASH_ST_BP_MASK	0x1c		/**< Block Protect */
+#define SFLASH_ST_BP_SHIFT	2
+#define SFLASH_ST_SRWD		0x80		/**< Status Register Write Disable */
+
+/* flashcontrol action+opcodes for Atmel flashes */
+#define SFLASH_AT_READ				0x07e8
+#define SFLASH_AT_PAGE_READ			0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS			0x01d7
+#define SFLASH_AT_BUF1_WRITE			0x0384
+#define SFLASH_AT_BUF2_WRITE			0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM		0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM		0x0286
+#define SFLASH_AT_BUF1_PROGRAM			0x0288
+#define SFLASH_AT_BUF2_PROGRAM			0x0289
+#define SFLASH_AT_PAGE_ERASE			0x0281
+#define SFLASH_AT_BLOCK_ERASE			0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM	0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM	0x0385
+#define SFLASH_AT_BUF1_LOAD			0x0253
+#define SFLASH_AT_BUF2_LOAD			0x0255
+#define SFLASH_AT_BUF1_COMPARE			0x0260
+#define SFLASH_AT_BUF2_COMPARE			0x0261
+#define SFLASH_AT_BUF1_REPROGRAM		0x0258
+#define SFLASH_AT_BUF2_REPROGRAM		0x0259
+
+/* Status register bits for Atmel flashes */
+#define SFLASH_AT_READY				0x80
+#define SFLASH_AT_MISMATCH			0x40
+#define SFLASH_AT_ID_MASK			0x38
+#define SFLASH_AT_ID_SHIFT			3
+
+/* SPI register bits, corerev >= 37 */
+#define GSIO_START			0x80000000
+#define GSIO_BUSY			GSIO_START
+
+/*
+ * These are the UART port assignments, expressed as offsets from the base
+ * register.  These assignments should hold for any serial port based on
+ * a 8250, 16450, or 16550(A).
+ */
+
+#define UART_RX		0	/**< In:  Receive buffer (DLAB=0) */
+#define UART_TX		0	/**< Out: Transmit buffer (DLAB=0) */
+#define UART_DLL	0	/**< Out: Divisor Latch Low (DLAB=1) */
+#define UART_IER	1	/**< In/Out: Interrupt Enable Register (DLAB=0) */
+#define UART_DLM	1	/**< Out: Divisor Latch High (DLAB=1) */
+#define UART_IIR	2	/**< In: Interrupt Identity Register  */
+#define UART_FCR	2	/**< Out: FIFO Control Register */
+#define UART_LCR	3	/**< Out: Line Control Register */
+#define UART_MCR	4	/**< Out: Modem Control Register */
+#define UART_LSR	5	/**< In:  Line Status Register */
+#define UART_MSR	6	/**< In:  Modem Status Register */
+#define UART_SCR	7	/**< I/O: Scratch Register */
+#define UART_LCR_DLAB	0x80	/**< Divisor latch access bit */
+#define UART_LCR_WLEN8	0x03	/**< Word length: 8 bits */
+#define UART_MCR_OUT2	0x08	/**< MCR GPIO out 2 */
+#define UART_MCR_LOOP	0x10	/**< Enable loopback test mode */
+#define UART_LSR_RX_FIFO 	0x80	/**< Receive FIFO error */
+#define UART_LSR_TDHR		0x40	/**< Data-hold-register empty */
+#define UART_LSR_THRE		0x20	/**< Transmit-hold-register empty */
+#define UART_LSR_BREAK		0x10	/**< Break interrupt */
+#define UART_LSR_FRAMING	0x08	/**< Framing error */
+#define UART_LSR_PARITY		0x04	/**< Parity error */
+#define UART_LSR_OVERRUN	0x02	/**< Overrun error */
+#define UART_LSR_RXRDY		0x01	/**< Receiver ready */
+#define UART_FCR_FIFO_ENABLE 1	/**< FIFO control register bit controlling FIFO enable/disable */
+
+/* Interrupt Identity Register (IIR) bits */
+#define UART_IIR_FIFO_MASK	0xc0	/**< IIR FIFO disable/enabled mask */
+#define UART_IIR_INT_MASK	0xf	/**< IIR interrupt ID source */
+#define UART_IIR_MDM_CHG	0x0	/**< Modem status changed */
+#define UART_IIR_NOINT		0x1	/**< No interrupt pending */
+#define UART_IIR_THRE		0x2	/**< THR empty */
+#define UART_IIR_RCVD_DATA	0x4	/**< Received data available */
+#define UART_IIR_RCVR_STATUS 	0x6	/**< Receiver status */
+#define UART_IIR_CHAR_TIME 	0xc	/**< Character time */
+
+/* Interrupt Enable Register (IER) bits */
+#define UART_IER_PTIME	128	/**< Programmable THRE Interrupt Mode Enable */
+#define UART_IER_EDSSI	8	/**< enable modem status interrupt */
+#define UART_IER_ELSI	4	/**< enable receiver line status interrupt */
+#define UART_IER_ETBEI  2	/**< enable transmitter holding register empty interrupt */
+#define UART_IER_ERBFI	1	/**< enable data available interrupt */
+
+/* pmustatus */
+#define PST_SLOW_WR_PENDING 0x0400
+#define PST_EXTLPOAVAIL	0x0100
+#define PST_WDRESET	0x0080
+#define	PST_INTPEND	0x0040
+#define	PST_SBCLKST	0x0030
+#define	PST_SBCLKST_ILP	0x0010
+#define	PST_SBCLKST_ALP	0x0020
+#define	PST_SBCLKST_HT	0x0030
+#define	PST_ALPAVAIL	0x0008
+#define	PST_HTAVAIL	0x0004
+#define	PST_RESINIT	0x0003
+#define	PST_ILPFASTLPO	0x00010000
+
+/* pmucapabilities */
+#define PCAP_REV_MASK	0x000000ff
+#define PCAP_RC_MASK	0x00001f00
+#define PCAP_RC_SHIFT	8
+#define PCAP_TC_MASK	0x0001e000
+#define PCAP_TC_SHIFT	13
+#define PCAP_PC_MASK	0x001e0000
+#define PCAP_PC_SHIFT	17
+#define PCAP_VC_MASK	0x01e00000
+#define PCAP_VC_SHIFT	21
+#define PCAP_CC_MASK	0x1e000000
+#define PCAP_CC_SHIFT	25
+#define PCAP5_PC_MASK	0x003e0000	/**< PMU corerev >= 5 */
+#define PCAP5_PC_SHIFT	17
+#define PCAP5_VC_MASK	0x07c00000
+#define PCAP5_VC_SHIFT	22
+#define PCAP5_CC_MASK	0xf8000000
+#define PCAP5_CC_SHIFT	27
+
+/* CoreCapabilitiesExtension */
+#define PCAP_EXT_USE_MUXED_ILP_CLK_MASK	0x04000000
+
+/* PMU Resource Request Timer registers */
+/* This is based on PmuRev0 */
+#define	PRRT_TIME_MASK	0x03ff
+#define	PRRT_INTEN	0x0400
+#define	PRRT_REQ_ACTIVE	0x0800
+#define	PRRT_ALP_REQ	0x1000
+#define	PRRT_HT_REQ	0x2000
+#define PRRT_HQ_REQ 0x4000
+
+/* PMU Int Control register bits */
+#define PMU_INTC_ALP_REQ	0x1
+#define PMU_INTC_HT_REQ		0x2
+#define PMU_INTC_HQ_REQ		0x4
+
+/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */
+#define RSRC_INTR_MASK_TIMER_INT_0 1
+
+/* PMU resource bit position */
+#define PMURES_BIT(bit)	(1 << (bit))
+
+/* PMU resource number limit */
+#define PMURES_MAX_RESNUM	30
+
+/* PMU chip control0 register */
+#define	PMU_CHIPCTL0		0
+#define PMU43143_CC0_SDIO_DRSTR_OVR	(1 << 31) /* sdio drive strength override enable */
+
+/* clock req types */
+#define PMU_CC1_CLKREQ_TYPE_SHIFT	19
+#define PMU_CC1_CLKREQ_TYPE_MASK	(1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
+
+#define CLKREQ_TYPE_CONFIG_OPENDRAIN		0
+#define CLKREQ_TYPE_CONFIG_PUSHPULL		1
+
+/* PMU chip control1 register */
+#define	PMU_CHIPCTL1			1
+#define	PMU_CC1_RXC_DLL_BYPASS		0x00010000
+#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN	0x00000010
+
+#define PMU_CC1_IF_TYPE_MASK   		0x00000030
+#define PMU_CC1_IF_TYPE_RMII    	0x00000000
+#define PMU_CC1_IF_TYPE_MII     	0x00000010
+#define PMU_CC1_IF_TYPE_RGMII   	0x00000020
+
+#define PMU_CC1_SW_TYPE_MASK    	0x000000c0
+#define PMU_CC1_SW_TYPE_EPHY    	0x00000000
+#define PMU_CC1_SW_TYPE_EPHYMII 	0x00000040
+#define PMU_CC1_SW_TYPE_EPHYRMII	0x00000080
+#define PMU_CC1_SW_TYPE_RGMII   	0x000000c0
+
+#define PMU_CC1_ENABLE_CLOSED_LOOP_MASK 0x00000080
+#define PMU_CC1_ENABLE_CLOSED_LOOP      0x00000000
+
+/* PMU chip control2 register */
+#define PMU_CC2_RFLDO3P3_PU_FORCE_ON		(1 << 15)
+#define PMU_CC2_RFLDO3P3_PU_CLEAR		0x00000000
+
+#define PMU_CC2_WL2CDIG_I_PMU_SLEEP		(1 << 16)
+#define	PMU_CHIPCTL2		2
+#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON	(1 << 18)
+#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON		(1 << 19)
+#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON	(1 << 20)
+#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON	(1 << 21)
+#define PMU_CC2_MASK_WL_DEV_WAKE             (1 << 22)
+#define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE   (1 << 25)
+#define PMU_CC2_GCI2_WAKE                    (1 << 31)
+
+/* PMU chip control3 register */
+#define	PMU_CHIPCTL3		3
+#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT  19
+#define PMU_CC3_ENABLE_RF_SHIFT           22
+#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT   23
+
+/* PMU chip control4 register */
+#define PMU_CHIPCTL4                    4
+
+/* 53537 series moved switch_type and gmac_if_type to CC4 [15:14] and [13:12] */
+#define PMU_CC4_IF_TYPE_MASK		0x00003000
+#define PMU_CC4_IF_TYPE_RMII		0x00000000
+#define PMU_CC4_IF_TYPE_MII		0x00001000
+#define PMU_CC4_IF_TYPE_RGMII		0x00002000
+
+#define PMU_CC4_SW_TYPE_MASK		0x0000c000
+#define PMU_CC4_SW_TYPE_EPHY		0x00000000
+#define PMU_CC4_SW_TYPE_EPHYMII		0x00004000
+#define PMU_CC4_SW_TYPE_EPHYRMII	0x00008000
+#define PMU_CC4_SW_TYPE_RGMII		0x0000c000
+#define PMU_CC4_DISABLE_LQ_AVAIL	(1<<27)
+
+/* PMU chip control5 register */
+#define PMU_CHIPCTL5                    5
+
+/* PMU chip control6 register */
+#define PMU_CHIPCTL6                    6
+#define PMU_CC6_ENABLE_CLKREQ_WAKEUP    (1 << 4)
+#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP   (1 << 6)
+
+/* PMU chip control7 register */
+#define PMU_CHIPCTL7				7
+#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN	(1 << 25)
+#define PMU_CC7_ENABLE_MDIO_RESET_WAR		(1 << 27)
+/* 53537 series have gmca1 gmac_if_type in cc7 [7:6](defalut 0b01) */
+#define PMU_CC7_IF_TYPE_MASK		0x000000c0
+#define PMU_CC7_IF_TYPE_RMII		0x00000000
+#define PMU_CC7_IF_TYPE_MII		0x00000040
+#define PMU_CC7_IF_TYPE_RGMII		0x00000080
+
+#define PMU_CHIPCTL8			8
+#define PMU_CHIPCTL9			9
+
+/* PMU corerev and chip specific PLL controls.
+ * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
+ * to differentiate different PLLs controlled by the same PMU rev.
+ */
+/* pllcontrol registers */
+/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */
+#define	PMU0_PLL0_PLLCTL0		0
+#define	PMU0_PLL0_PC0_PDIV_MASK		1
+#define	PMU0_PLL0_PC0_PDIV_FREQ		25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK	0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT	3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE	8
+
+/* PC0_DIV_ARM for PLLOUT_ARM */
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ	0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ	1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ	2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ	3 /* Default */
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ	4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ	5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ	6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ	7
+
+/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */
+#define	PMU0_PLL0_PLLCTL1		1
+#define	PMU0_PLL0_PC1_WILD_INT_MASK	0xf0000000
+#define	PMU0_PLL0_PC1_WILD_INT_SHIFT	28
+#define	PMU0_PLL0_PC1_WILD_FRAC_MASK	0x0fffff00
+#define	PMU0_PLL0_PC1_WILD_FRAC_SHIFT	8
+#define	PMU0_PLL0_PC1_STOP_MOD		0x00000040
+
+/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */
+#define	PMU0_PLL0_PLLCTL2		2
+#define	PMU0_PLL0_PC2_WILD_INT_MASK	0xf
+#define	PMU0_PLL0_PC2_WILD_INT_SHIFT	4
+
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU1_PLL0_PLLCTL0		0
+#define PMU1_PLL0_PC0_P1DIV_MASK	0x00f00000
+#define PMU1_PLL0_PC0_P1DIV_SHIFT	20
+#define PMU1_PLL0_PC0_P2DIV_MASK	0x0f000000
+#define PMU1_PLL0_PC0_P2DIV_SHIFT	24
+
+/* m<x>div */
+#define PMU1_PLL0_PLLCTL1		1
+#define PMU1_PLL0_PC1_M1DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC1_M1DIV_SHIFT	0
+#define PMU1_PLL0_PC1_M2DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC1_M2DIV_SHIFT	8
+#define PMU1_PLL0_PC1_M3DIV_MASK	0x00ff0000
+#define PMU1_PLL0_PC1_M3DIV_SHIFT	16
+#define PMU1_PLL0_PC1_M4DIV_MASK	0xff000000
+#define PMU1_PLL0_PC1_M4DIV_SHIFT	24
+#define PMU1_PLL0_PC1_M4DIV_BY_9	9
+#define PMU1_PLL0_PC1_M4DIV_BY_18	0x12
+#define PMU1_PLL0_PC1_M4DIV_BY_36	0x24
+#define PMU1_PLL0_PC1_M4DIV_BY_60	0x3C
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL  (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU1_PLL0_PLLCTL2		2
+#define PMU1_PLL0_PC2_M5DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC2_M5DIV_SHIFT	0
+#define PMU1_PLL0_PC2_M5DIV_BY_12	0xc
+#define PMU1_PLL0_PC2_M5DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M5DIV_BY_31	0x1f
+#define PMU1_PLL0_PC2_M5DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_M5DIV_BY_42	0x2a
+#define PMU1_PLL0_PC2_M5DIV_BY_60	0x3c
+#define PMU1_PLL0_PC2_M6DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC2_M6DIV_SHIFT	8
+#define PMU1_PLL0_PC2_M6DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M6DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT	17
+#define PMU1_PLL0_PC2_NDIV_MODE_MASH	1
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB	2	/**< recommended for 4319 */
+#define PMU1_PLL0_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU1_PLL0_PC2_NDIV_INT_SHIFT	20
+
+/* ndiv_frac */
+#define PMU1_PLL0_PLLCTL3		3
+#define PMU1_PLL0_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT	0
+
+/* pll_ctrl */
+#define PMU1_PLL0_PLLCTL4		4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU1_PLL0_PLLCTL5		5
+#define PMU1_PLL0_PC5_CLK_DRV_MASK 	0xffffff00
+#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 	8
+#define PMU1_PLL0_PC5_ASSERT_CH_MASK 	0x3f000000
+#define PMU1_PLL0_PC5_ASSERT_CH_SHIFT 	24
+#define PMU1_PLL0_PC5_DEASSERT_CH_MASK 	0xff000000
+
+#define PMU1_PLL0_PLLCTL6		6
+#define PMU1_PLL0_PLLCTL7		7
+#define PMU1_PLL0_PLLCTL8		8
+
+#define PMU1_PLLCTL8_OPENLOOP_MASK	(1 << 1)
+#define PMU_PLL4350_OPENLOOP_MASK	(1 << 7)
+
+#define PMU1_PLL0_PLLCTL9		9
+
+#define PMU1_PLL0_PLLCTL10		10
+
+/* PMU rev 2 control words */
+#define PMU2_PHY_PLL_PLLCTL		4
+#define PMU2_SI_PLL_PLLCTL		10
+
+/* PMU rev 2 */
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU2_PLL_PLLCTL0		0
+#define PMU2_PLL_PC0_P1DIV_MASK 	0x00f00000
+#define PMU2_PLL_PC0_P1DIV_SHIFT	20
+#define PMU2_PLL_PC0_P2DIV_MASK 	0x0f000000
+#define PMU2_PLL_PC0_P2DIV_SHIFT	24
+
+/* m<x>div */
+#define PMU2_PLL_PLLCTL1		1
+#define PMU2_PLL_PC1_M1DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC1_M1DIV_SHIFT	0
+#define PMU2_PLL_PC1_M2DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC1_M2DIV_SHIFT	8
+#define PMU2_PLL_PC1_M3DIV_MASK 	0x00ff0000
+#define PMU2_PLL_PC1_M3DIV_SHIFT	16
+#define PMU2_PLL_PC1_M4DIV_MASK 	0xff000000
+#define PMU2_PLL_PC1_M4DIV_SHIFT	24
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU2_PLL_PLLCTL2		2
+#define PMU2_PLL_PC2_M5DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC2_M5DIV_SHIFT	0
+#define PMU2_PLL_PC2_M6DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC2_M6DIV_SHIFT	8
+#define PMU2_PLL_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU2_PLL_PC2_NDIV_MODE_SHIFT	17
+#define PMU2_PLL_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU2_PLL_PC2_NDIV_INT_SHIFT	20
+
+/* ndiv_frac */
+#define PMU2_PLL_PLLCTL3		3
+#define PMU2_PLL_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT	0
+
+/* pll_ctrl */
+#define PMU2_PLL_PLLCTL4		4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU2_PLL_PLLCTL5		5
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK	0x00000f00
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT	8
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK	0x0000f000
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT	12
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK	0x000f0000
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT	16
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK	0x00f00000
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT	20
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK	0x0f000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT	24
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK	0xf0000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT	28
+
+/* PMU rev 5 (& 6) */
+#define	PMU5_PLL_P1P2_OFF		0
+#define	PMU5_PLL_P1_MASK		0x0f000000
+#define	PMU5_PLL_P1_SHIFT		24
+#define	PMU5_PLL_P2_MASK		0x00f00000
+#define	PMU5_PLL_P2_SHIFT		20
+#define	PMU5_PLL_M14_OFF		1
+#define	PMU5_PLL_MDIV_MASK		0x000000ff
+#define	PMU5_PLL_MDIV_WIDTH		8
+#define	PMU5_PLL_NM5_OFF		2
+#define	PMU5_PLL_NDIV_MASK		0xfff00000
+#define	PMU5_PLL_NDIV_SHIFT		20
+#define	PMU5_PLL_NDIV_MODE_MASK		0x000e0000
+#define	PMU5_PLL_NDIV_MODE_SHIFT	17
+#define	PMU5_PLL_FMAB_OFF		3
+#define	PMU5_PLL_MRAT_MASK		0xf0000000
+#define	PMU5_PLL_MRAT_SHIFT		28
+#define	PMU5_PLL_ABRAT_MASK		0x08000000
+#define	PMU5_PLL_ABRAT_SHIFT		27
+#define	PMU5_PLL_FDIV_MASK		0x07ffffff
+#define	PMU5_PLL_PLLCTL_OFF		4
+#define	PMU5_PLL_PCHI_OFF		5
+#define	PMU5_PLL_PCHI_MASK		0x0000003f
+
+/* pmu XtalFreqRatio */
+#define	PMU_XTALFREQ_REG_ILPCTR_MASK	0x00001FFF
+#define	PMU_XTALFREQ_REG_MEASURE_MASK	0x80000000
+#define	PMU_XTALFREQ_REG_MEASURE_SHIFT	31
+
+/* Divider allocation in 4716/47162/5356/5357 */
+#define	PMU5_MAINPLL_CPU		1
+#define	PMU5_MAINPLL_MEM		2
+#define	PMU5_MAINPLL_SI			3
+
+/* 4706 PMU */
+#define PMU4706_MAINPLL_PLL0	0
+#define PMU6_4706_PROCPLL_OFF	4	/**< The CPU PLL */
+#define PMU6_4706_PROC_P2DIV_MASK		0x000f0000
+#define PMU6_4706_PROC_P2DIV_SHIFT	16
+#define PMU6_4706_PROC_P1DIV_MASK		0x0000f000
+#define PMU6_4706_PROC_P1DIV_SHIFT	12
+#define PMU6_4706_PROC_NDIV_INT_MASK	0x00000ff8
+#define PMU6_4706_PROC_NDIV_INT_SHIFT	3
+#define PMU6_4706_PROC_NDIV_MODE_MASK		0x00000007
+#define PMU6_4706_PROC_NDIV_MODE_SHIFT	0
+
+#define PMU7_PLL_PLLCTL7                7
+#define PMU7_PLL_CTL7_M4DIV_MASK	0xff000000
+#define PMU7_PLL_CTL7_M4DIV_SHIFT 	24
+#define PMU7_PLL_CTL7_M4DIV_BY_6	6
+#define PMU7_PLL_CTL7_M4DIV_BY_12	0xc
+#define PMU7_PLL_CTL7_M4DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL8                8
+#define PMU7_PLL_CTL8_M5DIV_MASK	0x000000ff
+#define PMU7_PLL_CTL8_M5DIV_SHIFT	0
+#define PMU7_PLL_CTL8_M5DIV_BY_8	8
+#define PMU7_PLL_CTL8_M5DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M5DIV_BY_24	0x18
+#define PMU7_PLL_CTL8_M6DIV_MASK	0x0000ff00
+#define PMU7_PLL_CTL8_M6DIV_SHIFT	8
+#define PMU7_PLL_CTL8_M6DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M6DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL11		11
+#define PMU7_PLL_PLLCTL11_MASK		0xffffff00
+#define PMU7_PLL_PLLCTL11_VAL		0x22222200
+
+/* PMU rev 15 */
+#define PMU15_PLL_PLLCTL0		0
+#define PMU15_PLL_PC0_CLKSEL_MASK	0x00000003
+#define PMU15_PLL_PC0_CLKSEL_SHIFT	0
+#define PMU15_PLL_PC0_FREQTGT_MASK	0x003FFFFC
+#define PMU15_PLL_PC0_FREQTGT_SHIFT	2
+#define PMU15_PLL_PC0_PRESCALE_MASK	0x00C00000
+#define PMU15_PLL_PC0_PRESCALE_SHIFT	22
+#define PMU15_PLL_PC0_KPCTRL_MASK	0x07000000
+#define PMU15_PLL_PC0_KPCTRL_SHIFT	24
+#define PMU15_PLL_PC0_FCNTCTRL_MASK	0x38000000
+#define PMU15_PLL_PC0_FCNTCTRL_SHIFT	27
+#define PMU15_PLL_PC0_FDCMODE_MASK	0x40000000
+#define PMU15_PLL_PC0_FDCMODE_SHIFT	30
+#define PMU15_PLL_PC0_CTRLBIAS_MASK	0x80000000
+#define PMU15_PLL_PC0_CTRLBIAS_SHIFT	31
+
+#define PMU15_PLL_PLLCTL1			1
+#define PMU15_PLL_PC1_BIAS_CTLM_MASK		0x00000060
+#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT		5
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK	0x00000040
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT	6
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK		0x0001FF80
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT	7
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK	0x03FE0000
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT	17
+#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK		0x0C000000
+#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT	26
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK	0x10000000
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT	28
+#define PMU15_PLL_PC1_OPENLP_EN_MASK		0x40000000
+#define PMU15_PLL_PC1_OPENLP_EN_SHIFT		30
+
+#define PMU15_PLL_PLLCTL2			2
+#define PMU15_PLL_PC2_CTEN_MASK			0x00000001
+#define PMU15_PLL_PC2_CTEN_SHIFT		0
+
+#define PMU15_PLL_PLLCTL3			3
+#define PMU15_PLL_PC3_DITHER_EN_MASK		0x00000001
+#define PMU15_PLL_PC3_DITHER_EN_SHIFT		0
+#define PMU15_PLL_PC3_DCOCTLSP_MASK		0xFE000000
+#define PMU15_PLL_PC3_DCOCTLSP_SHIFT		25
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK	0x01
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT	0
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK	0x02
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT	1
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK	0x04
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT	2
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK	0x18
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT	3
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK	0x60
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT	5
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1	0
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2	1
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3	2
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5	3
+
+#define PMU15_PLL_PLLCTL4			4
+#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK		0x00000007
+#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT		0
+#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK		0x00000038
+#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT		3
+#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK		0x000001C0
+#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT		6
+#define PMU15_PLL_PC4_DBGMODE_MASK		0x00000E00
+#define PMU15_PLL_PC4_DBGMODE_SHIFT		9
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK	0x00001000
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT	12
+#define PMU15_PLL_PC4_FLL480_CTLSP_MASK		0x000FE000
+#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT	13
+#define PMU15_PLL_PC4_DINPOL_MASK		0x00100000
+#define PMU15_PLL_PC4_DINPOL_SHIFT		20
+#define PMU15_PLL_PC4_CLKOUT_PD_MASK		0x00200000
+#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT		21
+#define PMU15_PLL_PC4_CLKDIV2_PD_MASK		0x00400000
+#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT		22
+#define PMU15_PLL_PC4_CLKDIV4_PD_MASK		0x00800000
+#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT		23
+#define PMU15_PLL_PC4_CLKDIV8_PD_MASK		0x01000000
+#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT		24
+#define PMU15_PLL_PC4_CLKDIV16_PD_MASK		0x02000000
+#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT		25
+#define PMU15_PLL_PC4_TEST_EN_MASK		0x04000000
+#define PMU15_PLL_PC4_TEST_EN_SHIFT		26
+
+#define PMU15_PLL_PLLCTL5			5
+#define PMU15_PLL_PC5_FREQTGT_MASK		0x000FFFFF
+#define PMU15_PLL_PC5_FREQTGT_SHIFT		0
+#define PMU15_PLL_PC5_DCOCTLSP_MASK		0x07F00000
+#define PMU15_PLL_PC5_DCOCTLSP_SHIFT		20
+#define PMU15_PLL_PC5_PRESCALE_MASK		0x18000000
+#define PMU15_PLL_PC5_PRESCALE_SHIFT		27
+
+#define PMU15_PLL_PLLCTL6		6
+#define PMU15_PLL_PC6_FREQTGT_MASK	0x000FFFFF
+#define PMU15_PLL_PC6_FREQTGT_SHIFT	0
+#define PMU15_PLL_PC6_DCOCTLSP_MASK	0x07F00000
+#define PMU15_PLL_PC6_DCOCTLSP_SHIFT	20
+#define PMU15_PLL_PC6_PRESCALE_MASK	0x18000000
+#define PMU15_PLL_PC6_PRESCALE_SHIFT	27
+
+#define PMU15_FREQTGT_480_DEFAULT	0x19AB1
+#define PMU15_FREQTGT_492_DEFAULT	0x1A4F5
+#define PMU15_ARM_96MHZ			96000000	/**< 96 Mhz */
+#define PMU15_ARM_98MHZ			98400000	/**< 98.4 Mhz */
+#define PMU15_ARM_97MHZ			97000000	/**< 97 Mhz */
+
+
+#define PMU17_PLLCTL2_NDIVTYPE_MASK		0x00000070
+#define PMU17_PLLCTL2_NDIVTYPE_SHIFT		4
+
+#define PMU17_PLLCTL2_NDIV_MODE_INT		0
+#define PMU17_PLLCTL2_NDIV_MODE_INT1B8		1
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111		2
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8	3
+
+#define PMU17_PLLCTL0_BBPLL_PWRDWN		0
+#define PMU17_PLLCTL0_BBPLL_DRST		3
+#define PMU17_PLLCTL0_BBPLL_DISBL_CLK		8
+
+/* PLL usage in 4716/47162 */
+#define	PMU4716_MAINPLL_PLL0		12
+
+/* PLL usage in 4335 */
+#define PMU4335_PLL0_PC2_P1DIV_MASK			0x000f0000
+#define PMU4335_PLL0_PC2_P1DIV_SHIFT		16
+#define PMU4335_PLL0_PC2_NDIV_INT_MASK		0xff800000
+#define PMU4335_PLL0_PC2_NDIV_INT_SHIFT		23
+#define PMU4335_PLL0_PC1_MDIV2_MASK			0x0000ff00
+#define PMU4335_PLL0_PC1_MDIV2_SHIFT		8
+
+/* PLL usage in 4347 */
+#define PMU4347_PLL0_PC2_P1DIV_MASK		0x000f0000
+#define PMU4347_PLL0_PC2_P1DIV_SHIFT		16
+#define PMU4347_PLL0_PC2_NDIV_INT_MASK		0x3ff00000
+#define PMU4347_PLL0_PC2_NDIV_INT_SHIFT		20
+#define PMU4347_PLL0_PC3_NDIV_FRAC_MASK		0x000fffff
+#define PMU4347_PLL0_PC3_NDIV_FRAC_SHIFT		0
+#define PMU4347_PLL1_PC5_P1DIV_MASK		0xc0000000
+#define PMU4347_PLL1_PC5_P1DIV_SHIFT		30
+#define PMU4347_PLL1_PC6_P1DIV_MASK		0x00000003
+#define PMU4347_PLL1_PC6_P1DIV_SHIFT		0
+#define PMU4347_PLL1_PC6_NDIV_INT_MASK		0x00000ffc
+#define PMU4347_PLL1_PC6_NDIV_INT_SHIFT		2
+#define PMU4347_PLL1_PC6_NDIV_FRAC_MASK		0xfffff000
+#define PMU4347_PLL1_PC6_NDIV_FRAC_SHIFT	12
+
+/* PLL usage in 5356/5357 */
+#define	PMU5356_MAINPLL_PLL0		0
+#define	PMU5357_MAINPLL_PLL0		0
+
+/* 4716/47162 resources */
+#define RES4716_PROC_PLL_ON		0x00000040
+#define RES4716_PROC_HT_AVAIL		0x00000080
+
+/* 4716/4717/4718 Chip specific ChipControl register bits */
+#define CCTRL_471X_I2S_PINS_ENABLE	0x0080 /* I2S pins off by default, shared w/ pflash */
+
+/* 5357 Chip specific ChipControl register bits */
+/* 2nd - 32-bit reg */
+#define CCTRL_5357_I2S_PINS_ENABLE	0x00040000 /* I2S pins enable */
+#define CCTRL_5357_I2CSPI_PINS_ENABLE	0x00080000 /* I2C/SPI pins enable */
+
+/* 5354 resources */
+#define RES5354_EXT_SWITCHER_PWM	0	/**< 0x00001 */
+#define RES5354_BB_SWITCHER_PWM		1	/**< 0x00002 */
+#define RES5354_BB_SWITCHER_BURST	2	/**< 0x00004 */
+#define RES5354_BB_EXT_SWITCHER_BURST	3	/**< 0x00008 */
+#define RES5354_ILP_REQUEST		4	/**< 0x00010 */
+#define RES5354_RADIO_SWITCHER_PWM	5	/**< 0x00020 */
+#define RES5354_RADIO_SWITCHER_BURST	6	/**< 0x00040 */
+#define RES5354_ROM_SWITCH		7	/**< 0x00080 */
+#define RES5354_PA_REF_LDO		8	/**< 0x00100 */
+#define RES5354_RADIO_LDO		9	/**< 0x00200 */
+#define RES5354_AFE_LDO			10	/**< 0x00400 */
+#define RES5354_PLL_LDO			11	/**< 0x00800 */
+#define RES5354_BG_FILTBYP		12	/**< 0x01000 */
+#define RES5354_TX_FILTBYP		13	/**< 0x02000 */
+#define RES5354_RX_FILTBYP		14	/**< 0x04000 */
+#define RES5354_XTAL_PU			15	/**< 0x08000 */
+#define RES5354_XTAL_EN			16	/**< 0x10000 */
+#define RES5354_BB_PLL_FILTBYP		17	/**< 0x20000 */
+#define RES5354_RF_PLL_FILTBYP		18	/**< 0x40000 */
+#define RES5354_BB_PLL_PU		19	/**< 0x80000 */
+
+/* 5357 Chip specific ChipControl register bits */
+#define CCTRL5357_EXTPA                 (1<<14) /* extPA in ChipControl 1, bit 14 */
+#define CCTRL5357_ANT_MUX_2o3		(1<<15) /* 2o3 in ChipControl 1, bit 15 */
+#define CCTRL5357_NFLASH		(1<<16) /* Nandflash in ChipControl 1, bit 16 */
+
+/* 43217 Chip specific ChipControl register bits */
+#define CCTRL43217_EXTPA_C0             (1<<13) /* core0 extPA in ChipControl 1, bit 13 */
+#define CCTRL43217_EXTPA_C1             (1<<8)  /* core1 extPA in ChipControl 1, bit 8 */
+
+/* 43228 Chip specific ChipControl register bits */
+#define CCTRL43228_EXTPA_C0             (1<<14) /* core1 extPA in ChipControl 1, bit 14 */
+#define CCTRL43228_EXTPA_C1             (1<<9)  /* core0 extPA in ChipControl 1, bit 1 */
+
+/* 4328 resources */
+#define RES4328_EXT_SWITCHER_PWM	0	/**< 0x00001 */
+#define RES4328_BB_SWITCHER_PWM		1	/**< 0x00002 */
+#define RES4328_BB_SWITCHER_BURST	2	/**< 0x00004 */
+#define RES4328_BB_EXT_SWITCHER_BURST	3	/**< 0x00008 */
+#define RES4328_ILP_REQUEST		4	/**< 0x00010 */
+#define RES4328_RADIO_SWITCHER_PWM	5	/**< 0x00020 */
+#define RES4328_RADIO_SWITCHER_BURST	6	/**< 0x00040 */
+#define RES4328_ROM_SWITCH		7	/**< 0x00080 */
+#define RES4328_PA_REF_LDO		8	/**< 0x00100 */
+#define RES4328_RADIO_LDO		9	/**< 0x00200 */
+#define RES4328_AFE_LDO			10	/**< 0x00400 */
+#define RES4328_PLL_LDO			11	/**< 0x00800 */
+#define RES4328_BG_FILTBYP		12	/**< 0x01000 */
+#define RES4328_TX_FILTBYP		13	/**< 0x02000 */
+#define RES4328_RX_FILTBYP		14	/**< 0x04000 */
+#define RES4328_XTAL_PU			15	/**< 0x08000 */
+#define RES4328_XTAL_EN			16	/**< 0x10000 */
+#define RES4328_BB_PLL_FILTBYP		17	/**< 0x20000 */
+#define RES4328_RF_PLL_FILTBYP		18	/**< 0x40000 */
+#define RES4328_BB_PLL_PU		19	/**< 0x80000 */
+
+/* 4325 A0/A1 resources */
+#define RES4325_BUCK_BOOST_BURST	0	/**< 0x00000001 */
+#define RES4325_CBUCK_BURST		1	/**< 0x00000002 */
+#define RES4325_CBUCK_PWM		2	/**< 0x00000004 */
+#define RES4325_CLDO_CBUCK_BURST	3	/**< 0x00000008 */
+#define RES4325_CLDO_CBUCK_PWM		4	/**< 0x00000010 */
+#define RES4325_BUCK_BOOST_PWM		5	/**< 0x00000020 */
+#define RES4325_ILP_REQUEST		6	/**< 0x00000040 */
+#define RES4325_ABUCK_BURST		7	/**< 0x00000080 */
+#define RES4325_ABUCK_PWM		8	/**< 0x00000100 */
+#define RES4325_LNLDO1_PU		9	/**< 0x00000200 */
+#define RES4325_OTP_PU			10	/**< 0x00000400 */
+#define RES4325_LNLDO3_PU		11	/**< 0x00000800 */
+#define RES4325_LNLDO4_PU		12	/**< 0x00001000 */
+#define RES4325_XTAL_PU			13	/**< 0x00002000 */
+#define RES4325_ALP_AVAIL		14	/**< 0x00004000 */
+#define RES4325_RX_PWRSW_PU		15	/**< 0x00008000 */
+#define RES4325_TX_PWRSW_PU		16	/**< 0x00010000 */
+#define RES4325_RFPLL_PWRSW_PU		17	/**< 0x00020000 */
+#define RES4325_LOGEN_PWRSW_PU		18	/**< 0x00040000 */
+#define RES4325_AFE_PWRSW_PU		19	/**< 0x00080000 */
+#define RES4325_BBPLL_PWRSW_PU		20	/**< 0x00100000 */
+#define RES4325_HT_AVAIL		21	/**< 0x00200000 */
+
+/* 4325 B0/C0 resources */
+#define RES4325B0_CBUCK_LPOM		1	/**< 0x00000002 */
+#define RES4325B0_CBUCK_BURST		2	/**< 0x00000004 */
+#define RES4325B0_CBUCK_PWM		3	/**< 0x00000008 */
+#define RES4325B0_CLDO_PU		4	/**< 0x00000010 */
+
+/* 4325 C1 resources */
+#define RES4325C1_LNLDO2_PU		12	/**< 0x00001000 */
+
+/* 4325 chip-specific ChipStatus register bits */
+#define CST4325_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4325_DEFCIS_SEL		0	/**< OTP is powered up, use def. CIS, no SPROM */
+#define CST4325_SPROM_SEL		1	/**< OTP is powered up, SPROM is present */
+#define CST4325_OTP_SEL			2	/**< OTP is powered up, no SPROM */
+#define CST4325_OTP_PWRDN		3	/**< OTP is powered down, SPROM is present */
+#define CST4325_SDIO_USB_MODE_MASK	0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT	2
+#define CST4325_RCAL_VALID_MASK		0x00000008
+#define CST4325_RCAL_VALID_SHIFT	3
+#define CST4325_RCAL_VALUE_MASK		0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT	4
+#define CST4325_PMUTOP_2B_MASK 		0x00000200	/**< 1 for 2b, 0 for to 2a */
+#define CST4325_PMUTOP_2B_SHIFT   	9
+
+#define RES4329_RESERVED0		0	/**< 0x00000001 */
+#define RES4329_CBUCK_LPOM		1	/**< 0x00000002 */
+#define RES4329_CBUCK_BURST		2	/**< 0x00000004 */
+#define RES4329_CBUCK_PWM		3	/**< 0x00000008 */
+#define RES4329_CLDO_PU			4	/**< 0x00000010 */
+#define RES4329_PALDO_PU		5	/**< 0x00000020 */
+#define RES4329_ILP_REQUEST		6	/**< 0x00000040 */
+#define RES4329_RESERVED7		7	/**< 0x00000080 */
+#define RES4329_RESERVED8		8	/**< 0x00000100 */
+#define RES4329_LNLDO1_PU		9	/**< 0x00000200 */
+#define RES4329_OTP_PU			10	/**< 0x00000400 */
+#define RES4329_RESERVED11		11	/**< 0x00000800 */
+#define RES4329_LNLDO2_PU		12	/**< 0x00001000 */
+#define RES4329_XTAL_PU			13	/**< 0x00002000 */
+#define RES4329_ALP_AVAIL		14	/**< 0x00004000 */
+#define RES4329_RX_PWRSW_PU		15	/**< 0x00008000 */
+#define RES4329_TX_PWRSW_PU		16	/**< 0x00010000 */
+#define RES4329_RFPLL_PWRSW_PU		17	/**< 0x00020000 */
+#define RES4329_LOGEN_PWRSW_PU		18	/**< 0x00040000 */
+#define RES4329_AFE_PWRSW_PU		19	/**< 0x00080000 */
+#define RES4329_BBPLL_PWRSW_PU		20	/**< 0x00100000 */
+#define RES4329_HT_AVAIL		21	/**< 0x00200000 */
+
+#define CST4329_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4329_DEFCIS_SEL		0	/**< OTP is powered up, use def. CIS, no SPROM */
+#define CST4329_SPROM_SEL		1	/**< OTP is powered up, SPROM is present */
+#define CST4329_OTP_SEL			2	/**< OTP is powered up, no SPROM */
+#define CST4329_OTP_PWRDN		3	/**< OTP is powered down, SPROM is present */
+#define CST4329_SPI_SDIO_MODE_MASK	0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT	2
+
+/* 4312 chip-specific ChipStatus register bits */
+#define CST4312_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4312_DEFCIS_SEL		0	/**< OTP is powered up, use def. CIS, no SPROM */
+#define CST4312_SPROM_SEL		1	/**< OTP is powered up, SPROM is present */
+#define CST4312_OTP_SEL			2	/**< OTP is powered up, no SPROM */
+#define CST4312_OTP_BAD			3	/**< OTP is broken, SPROM is present */
+
+/* 4312 resources (all PMU chips with little memory constraint) */
+#define RES4312_SWITCHER_BURST		0	/**< 0x00000001 */
+#define RES4312_SWITCHER_PWM    	1	/**< 0x00000002 */
+#define RES4312_PA_REF_LDO		2	/**< 0x00000004 */
+#define RES4312_CORE_LDO_BURST		3	/**< 0x00000008 */
+#define RES4312_CORE_LDO_PWM		4	/**< 0x00000010 */
+#define RES4312_RADIO_LDO		5	/**< 0x00000020 */
+#define RES4312_ILP_REQUEST		6	/**< 0x00000040 */
+#define RES4312_BG_FILTBYP		7	/**< 0x00000080 */
+#define RES4312_TX_FILTBYP		8	/**< 0x00000100 */
+#define RES4312_RX_FILTBYP		9	/**< 0x00000200 */
+#define RES4312_XTAL_PU			10	/**< 0x00000400 */
+#define RES4312_ALP_AVAIL		11	/**< 0x00000800 */
+#define RES4312_BB_PLL_FILTBYP		12	/**< 0x00001000 */
+#define RES4312_RF_PLL_FILTBYP		13	/**< 0x00002000 */
+#define RES4312_HT_AVAIL		14	/**< 0x00004000 */
+
+/* 4322 resources */
+#define RES4322_RF_LDO			0
+#define RES4322_ILP_REQUEST		1
+#define RES4322_XTAL_PU			2
+#define RES4322_ALP_AVAIL		3
+#define RES4322_SI_PLL_ON		4
+#define RES4322_HT_SI_AVAIL		5
+#define RES4322_PHY_PLL_ON		6
+#define RES4322_HT_PHY_AVAIL		7
+#define RES4322_OTP_PU			8
+
+/* 4322 chip-specific ChipStatus register bits */
+#define CST4322_XTAL_FREQ_20_40MHZ	0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK	0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT	6
+#define CST4322_NO_SPROM_OTP		0	/**< no OTP, no SPROM */
+#define CST4322_SPROM_PRESENT		1	/**< SPROM is present */
+#define CST4322_OTP_PRESENT		2	/**< OTP is present */
+#define CST4322_PCI_OR_USB		0x00000100
+#define CST4322_BOOT_MASK		0x00000600
+#define CST4322_BOOT_SHIFT		9
+#define CST4322_BOOT_FROM_SRAM		0	/**< boot from SRAM, ARM in reset */
+#define CST4322_BOOT_FROM_ROM		1	/**< boot from ROM */
+#define CST4322_BOOT_FROM_FLASH		2	/**< boot from FLASH */
+#define CST4322_BOOT_FROM_INVALID	3
+#define CST4322_ILP_DIV_EN		0x00000800
+#define CST4322_FLASH_TYPE_MASK		0x00001000
+#define CST4322_FLASH_TYPE_SHIFT	12
+#define CST4322_FLASH_TYPE_SHIFT_ST	0	/**< ST serial FLASH */
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL	1	/**< ATMEL flash */
+#define CST4322_ARM_TAP_SEL		0x00002000
+#define CST4322_RES_INIT_MODE_MASK	0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT	14
+#define CST4322_RES_INIT_MODE_ILPAVAIL	0	/**< resinitmode: ILP available */
+#define CST4322_RES_INIT_MODE_ILPREQ	1	/**< resinitmode: ILP request */
+#define CST4322_RES_INIT_MODE_ALPAVAIL	2	/**< resinitmode: ALP available */
+#define CST4322_RES_INIT_MODE_HTAVAIL	3	/**< resinitmode: HT available */
+#define CST4322_PCIPLLCLK_GATING	0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP	0x00020000
+#define CST4322_PCI_CARDBUS_MODE	0x00040000
+
+/* 43224 chip-specific ChipControl register bits */
+#define CCTRL43224_GPIO_TOGGLE          0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */
+#define CCTRL_43224A0_12MA_LED_DRIVE    0x00F000F0 /* 12 mA drive strength */
+#define CCTRL_43224B0_12MA_LED_DRIVE    0xF0    /* 12 mA drive strength for later 43224s */
+
+/* 43236 resources */
+#define RES43236_REGULATOR		0
+#define RES43236_ILP_REQUEST		1
+#define RES43236_XTAL_PU		2
+#define RES43236_ALP_AVAIL		3
+#define RES43236_SI_PLL_ON		4
+#define RES43236_HT_SI_AVAIL		5
+
+/* 43236 chip-specific ChipControl register bits */
+#define CCTRL43236_BT_COEXIST		(1<<0)	/**< 0 disable */
+#define CCTRL43236_SECI			(1<<1)	/**< 0 SECI is disabled (JATG functional) */
+#define CCTRL43236_EXT_LNA		(1<<2)	/**< 0 disable */
+#define CCTRL43236_ANT_MUX_2o3          (1<<3)	/**< 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43236_GSIO			(1<<4)	/**< 0 disable */
+
+/* 43236 Chip specific ChipStatus register bits */
+#define CST43236_SFLASH_MASK		0x00000040
+#define CST43236_OTP_SEL_MASK		0x00000080
+#define CST43236_OTP_SEL_SHIFT		7
+#define CST43236_HSIC_MASK		0x00000100	/**< USB/HSIC */
+#define CST43236_BP_CLK			0x00000200	/**< 120/96Mbps */
+#define CST43236_BOOT_MASK		0x00001800
+#define CST43236_BOOT_SHIFT		11
+#define CST43236_BOOT_FROM_SRAM		0	/**< boot from SRAM, ARM in reset */
+#define CST43236_BOOT_FROM_ROM		1	/**< boot from ROM */
+#define CST43236_BOOT_FROM_FLASH	2	/**< boot from FLASH */
+#define CST43236_BOOT_FROM_INVALID	3
+
+/* 43237 resources */
+#define RES43237_REGULATOR		0
+#define RES43237_ILP_REQUEST		1
+#define RES43237_XTAL_PU		2
+#define RES43237_ALP_AVAIL		3
+#define RES43237_SI_PLL_ON		4
+#define RES43237_HT_SI_AVAIL		5
+
+/* 43237 chip-specific ChipControl register bits */
+#define CCTRL43237_BT_COEXIST		(1<<0)	/**< 0 disable */
+#define CCTRL43237_SECI			(1<<1)	/**< 0 SECI is disabled (JATG functional) */
+#define CCTRL43237_EXT_LNA		(1<<2)	/**< 0 disable */
+#define CCTRL43237_ANT_MUX_2o3          (1<<3)	/**< 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43237_GSIO			(1<<4)	/**< 0 disable */
+
+/* 43237 Chip specific ChipStatus register bits */
+#define CST43237_SFLASH_MASK		0x00000040
+#define CST43237_OTP_SEL_MASK		0x00000080
+#define CST43237_OTP_SEL_SHIFT		7
+#define CST43237_HSIC_MASK		0x00000100	/**< USB/HSIC */
+#define CST43237_BP_CLK			0x00000200	/**< 120/96Mbps */
+#define CST43237_BOOT_MASK		0x00001800
+#define CST43237_BOOT_SHIFT		11
+#define CST43237_BOOT_FROM_SRAM		0	/**< boot from SRAM, ARM in reset */
+#define CST43237_BOOT_FROM_ROM		1	/**< boot from ROM */
+#define CST43237_BOOT_FROM_FLASH	2	/**< boot from FLASH */
+#define CST43237_BOOT_FROM_INVALID	3
+
+/* 43239 resources */
+#define RES43239_OTP_PU			9
+#define RES43239_MACPHY_CLKAVAIL	23
+#define RES43239_HT_AVAIL		24
+
+/* 43239 Chip specific ChipStatus register bits */
+#define CST43239_SPROM_MASK			0x00000002
+#define CST43239_SFLASH_MASK		0x00000004
+#define	CST43239_RES_INIT_MODE_SHIFT	7
+#define	CST43239_RES_INIT_MODE_MASK		0x000001f0
+#define CST43239_CHIPMODE_SDIOD(cs)	((cs) & (1 << 15))	/**< SDIO || gSPI */
+#define CST43239_CHIPMODE_USB20D(cs)	(~(cs) & (1 << 15))	/**< USB || USBDA */
+#define CST43239_CHIPMODE_SDIO(cs)	(((cs) & (1 << 0)) == 0)	/**< SDIO */
+#define CST43239_CHIPMODE_GSPI(cs)	(((cs) & (1 << 0)) == (1 << 0))	/**< gSPI */
+
+/* 4324 resources */
+/* 43242 use same PMU as 4324 */
+#define RES4324_LPLDO_PU			0
+#define RES4324_RESET_PULLDN_DIS		1
+#define RES4324_PMU_BG_PU			2
+#define RES4324_HSIC_LDO_PU			3
+#define RES4324_CBUCK_LPOM_PU			4
+#define RES4324_CBUCK_PFM_PU			5
+#define RES4324_CLDO_PU				6
+#define RES4324_LPLDO2_LVM			7
+#define RES4324_LNLDO1_PU			8
+#define RES4324_LNLDO2_PU			9
+#define RES4324_LDO3P3_PU			10
+#define RES4324_OTP_PU				11
+#define RES4324_XTAL_PU				12
+#define RES4324_BBPLL_PU			13
+#define RES4324_LQ_AVAIL			14
+#define RES4324_WL_CORE_READY			17
+#define RES4324_ILP_REQ				18
+#define RES4324_ALP_AVAIL			19
+#define RES4324_PALDO_PU			20
+#define RES4324_RADIO_PU			21
+#define RES4324_SR_CLK_STABLE			22
+#define RES4324_SR_SAVE_RESTORE			23
+#define RES4324_SR_PHY_PWRSW			24
+#define RES4324_SR_PHY_PIC			25
+#define RES4324_SR_SUBCORE_PWRSW		26
+#define RES4324_SR_SUBCORE_PIC			27
+#define RES4324_SR_MEM_PM0			28
+#define RES4324_HT_AVAIL			29
+#define RES4324_MACPHY_CLKAVAIL			30
+
+/* 4324 Chip specific ChipStatus register bits */
+#define CST4324_SPROM_MASK			0x00000080
+#define CST4324_SFLASH_MASK			0x00400000
+#define	CST4324_RES_INIT_MODE_SHIFT	10
+#define	CST4324_RES_INIT_MODE_MASK	0x00000c00
+#define CST4324_CHIPMODE_MASK		0x7
+#define CST4324_CHIPMODE_SDIOD(cs)	((~(cs)) & (1 << 2))	/**< SDIO || gSPI */
+#define CST4324_CHIPMODE_USB20D(cs)	(((cs) & CST4324_CHIPMODE_MASK) == 0x6)	/**< USB || USBDA */
+
+/* 43242 Chip specific ChipStatus register bits */
+#define CST43242_SFLASH_MASK                    0x00000008
+#define CST43242_SR_HALT			(1<<25)
+#define CST43242_SR_CHIP_STATUS_2		27 /* bit 27 */
+
+/* 4331 resources */
+#define RES4331_REGULATOR		0
+#define RES4331_ILP_REQUEST		1
+#define RES4331_XTAL_PU			2
+#define RES4331_ALP_AVAIL		3
+#define RES4331_SI_PLL_ON		4
+#define RES4331_HT_SI_AVAIL		5
+
+/* 4331 chip-specific ChipControl register bits */
+#define CCTRL4331_BT_COEXIST		(1<<0)	/**< 0 disable */
+#define CCTRL4331_SECI			(1<<1)	/**< 0 SECI is disabled (JATG functional) */
+#define CCTRL4331_EXT_LNA_G		(1<<2)	/**< 0 disable */
+#define CCTRL4331_SPROM_GPIO13_15       (1<<3)	/**< sprom/gpio13-15 mux */
+#define CCTRL4331_EXTPA_EN		(1<<4)	/**< 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_GPIOCLK_ON_SPROMCS	(1<<5)	/**< set drive out GPIO_CLK on sprom_cs pin */
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS	(1<<6)	/**< use sprom_cs pin as PCIE mdio interface */
+#define CCTRL4331_EXTPA_ON_GPIO2_5	(1<<7)	/* aband extpa will be at gpio2/5 and sprom_dout */
+#define CCTRL4331_OVR_PIPEAUXCLKEN	(1<<8)	/**< override core control on pipe_AuxClkEnable */
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN	(1<<9)	/**< override core control on pipe_AuxPowerDown */
+#define CCTRL4331_PCIE_AUXCLKEN		(1<<10)	/**< pcie_auxclkenable */
+#define CCTRL4331_PCIE_PIPE_PLLDOWN	(1<<11)	/**< pcie_pipe_pllpowerdown */
+#define CCTRL4331_EXTPA_EN2		(1<<12)	/**< 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_EXT_LNA_A		(1<<13)	/**< 0 disable */
+#define CCTRL4331_BT_SHD0_ON_GPIO4	(1<<16)	/**< enable bt_shd0 at gpio4 */
+#define CCTRL4331_BT_SHD1_ON_GPIO5	(1<<17)	/**< enable bt_shd1 at gpio5 */
+#define CCTRL4331_EXTPA_ANA_EN		(1<<24)	/**< 0 ext pa disable, 1 ext pa enabled */
+
+/* 4331 Chip specific ChipStatus register bits */
+#define	CST4331_XTAL_FREQ		0x00000001	/**< crystal frequency 20/40Mhz */
+#define	CST4331_SPROM_OTP_SEL_MASK	0x00000006
+#define	CST4331_SPROM_OTP_SEL_SHIFT	1
+#define	CST4331_SPROM_PRESENT		0x00000002
+#define	CST4331_OTP_PRESENT		0x00000004
+#define	CST4331_LDO_RF			0x00000008
+#define	CST4331_LDO_PAR			0x00000010
+
+/* 4315 resource */
+#define RES4315_CBUCK_LPOM		1	/**< 0x00000002 */
+#define RES4315_CBUCK_BURST		2	/**< 0x00000004 */
+#define RES4315_CBUCK_PWM		3	/**< 0x00000008 */
+#define RES4315_CLDO_PU			4	/**< 0x00000010 */
+#define RES4315_PALDO_PU		5	/**< 0x00000020 */
+#define RES4315_ILP_REQUEST		6	/**< 0x00000040 */
+#define RES4315_LNLDO1_PU		9	/**< 0x00000200 */
+#define RES4315_OTP_PU			10	/**< 0x00000400 */
+#define RES4315_LNLDO2_PU		12	/**< 0x00001000 */
+#define RES4315_XTAL_PU			13	/**< 0x00002000 */
+#define RES4315_ALP_AVAIL		14	/**< 0x00004000 */
+#define RES4315_RX_PWRSW_PU		15	/**< 0x00008000 */
+#define RES4315_TX_PWRSW_PU		16	/**< 0x00010000 */
+#define RES4315_RFPLL_PWRSW_PU		17	/**< 0x00020000 */
+#define RES4315_LOGEN_PWRSW_PU		18	/**< 0x00040000 */
+#define RES4315_AFE_PWRSW_PU		19	/**< 0x00080000 */
+#define RES4315_BBPLL_PWRSW_PU		20	/**< 0x00100000 */
+#define RES4315_HT_AVAIL		21	/**< 0x00200000 */
+
+/* 4315 chip-specific ChipStatus register bits */
+#define CST4315_SPROM_OTP_SEL_MASK	0x00000003	/**< gpio [7:6], SDIO CIS selection */
+#define CST4315_DEFCIS_SEL		0x00000000	/**< use default CIS, OTP is powered up */
+#define CST4315_SPROM_SEL		0x00000001	/**< use SPROM, OTP is powered up */
+#define CST4315_OTP_SEL			0x00000002	/**< use OTP, OTP is powered up */
+#define CST4315_OTP_PWRDN		0x00000003	/**< use SPROM, OTP is powered down */
+#define CST4315_SDIO_MODE		0x00000004	/**< gpio [8], sdio/usb mode */
+#define CST4315_RCAL_VALID		0x00000008
+#define CST4315_RCAL_VALUE_MASK		0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT	4
+#define CST4315_PALDO_EXTPNP		0x00000200 /**< PALDO is configured with external PNP */
+#define CST4315_CBUCK_MODE_MASK		0x00000c00
+#define CST4315_CBUCK_MODE_BURST	0x00000400
+#define CST4315_CBUCK_MODE_LPBURST	0x00000c00
+
+/* 4319 resources */
+#define RES4319_CBUCK_LPOM		1	/**< 0x00000002 */
+#define RES4319_CBUCK_BURST		2	/**< 0x00000004 */
+#define RES4319_CBUCK_PWM		3	/**< 0x00000008 */
+#define RES4319_CLDO_PU			4	/**< 0x00000010 */
+#define RES4319_PALDO_PU		5	/**< 0x00000020 */
+#define RES4319_ILP_REQUEST		6	/**< 0x00000040 */
+#define RES4319_LNLDO1_PU		9	/**< 0x00000200 */
+#define RES4319_OTP_PU			10	/**< 0x00000400 */
+#define RES4319_LNLDO2_PU		12	/**< 0x00001000 */
+#define RES4319_XTAL_PU			13	/**< 0x00002000 */
+#define RES4319_ALP_AVAIL		14	/**< 0x00004000 */
+#define RES4319_RX_PWRSW_PU		15	/**< 0x00008000 */
+#define RES4319_TX_PWRSW_PU		16	/**< 0x00010000 */
+#define RES4319_RFPLL_PWRSW_PU		17	/**< 0x00020000 */
+#define RES4319_LOGEN_PWRSW_PU		18	/**< 0x00040000 */
+#define RES4319_AFE_PWRSW_PU		19	/**< 0x00080000 */
+#define RES4319_BBPLL_PWRSW_PU		20	/**< 0x00100000 */
+#define RES4319_HT_AVAIL		21	/**< 0x00200000 */
+
+/* 4319 chip-specific ChipStatus register bits */
+#define	CST4319_SPI_CPULESSUSB		0x00000001
+#define	CST4319_SPI_CLK_POL		0x00000002
+#define	CST4319_SPI_CLK_PH		0x00000008
+#define	CST4319_SPROM_OTP_SEL_MASK	0x000000c0	/**< gpio [7:6], SDIO CIS selection */
+#define	CST4319_SPROM_OTP_SEL_SHIFT	6
+#define	CST4319_DEFCIS_SEL		0x00000000	/**< use default CIS, OTP is powered up */
+#define	CST4319_SPROM_SEL		0x00000040	/**< use SPROM, OTP is powered up */
+#define	CST4319_OTP_SEL			0x00000080      /* use OTP, OTP is powered up */
+#define	CST4319_OTP_PWRDN		0x000000c0      /* use SPROM, OTP is powered down */
+#define	CST4319_SDIO_USB_MODE		0x00000100	/**< gpio [8], sdio/usb mode */
+#define	CST4319_REMAP_SEL_MASK		0x00000600
+#define	CST4319_ILPDIV_EN		0x00000800
+#define	CST4319_XTAL_PD_POL		0x00001000
+#define	CST4319_LPO_SEL			0x00002000
+#define	CST4319_RES_INIT_MODE		0x0000c000
+#define	CST4319_PALDO_EXTPNP		0x00010000 /**< PALDO is configured with external PNP */
+#define	CST4319_CBUCK_MODE_MASK		0x00060000
+#define CST4319_CBUCK_MODE_BURST	0x00020000
+#define CST4319_CBUCK_MODE_LPBURST	0x00060000
+#define	CST4319_RCAL_VALID		0x01000000
+#define	CST4319_RCAL_VALUE_MASK		0x3e000000
+#define	CST4319_RCAL_VALUE_SHIFT	25
+
+#define PMU1_PLL0_CHIPCTL0		0
+#define PMU1_PLL0_CHIPCTL1		1
+#define PMU1_PLL0_CHIPCTL2		2
+#define CCTL_4319USB_XTAL_SEL_MASK	0x00180000
+#define CCTL_4319USB_XTAL_SEL_SHIFT	19
+#define CCTL_4319USB_48MHZ_PLL_SEL	1
+#define CCTL_4319USB_24MHZ_PLL_SEL	2
+
+/* PMU resources for 4336 */
+#define	RES4336_CBUCK_LPOM		0
+#define	RES4336_CBUCK_BURST		1
+#define	RES4336_CBUCK_LP_PWM		2
+#define	RES4336_CBUCK_PWM		3
+#define	RES4336_CLDO_PU			4
+#define	RES4336_DIS_INT_RESET_PD	5
+#define	RES4336_ILP_REQUEST		6
+#define	RES4336_LNLDO_PU		7
+#define	RES4336_LDO3P3_PU		8
+#define	RES4336_OTP_PU			9
+#define	RES4336_XTAL_PU			10
+#define	RES4336_ALP_AVAIL		11
+#define	RES4336_RADIO_PU		12
+#define	RES4336_BG_PU			13
+#define	RES4336_VREG1p4_PU_PU		14
+#define	RES4336_AFE_PWRSW_PU		15
+#define	RES4336_RX_PWRSW_PU		16
+#define	RES4336_TX_PWRSW_PU		17
+#define	RES4336_BB_PWRSW_PU		18
+#define	RES4336_SYNTH_PWRSW_PU		19
+#define	RES4336_MISC_PWRSW_PU		20
+#define	RES4336_LOGEN_PWRSW_PU		21
+#define	RES4336_BBPLL_PWRSW_PU		22
+#define	RES4336_MACPHY_CLKAVAIL		23
+#define	RES4336_HT_AVAIL		24
+#define	RES4336_RSVD			25
+
+/* 4336 chip-specific ChipStatus register bits */
+#define	CST4336_SPI_MODE_MASK		0x00000001
+#define	CST4336_SPROM_PRESENT		0x00000002
+#define	CST4336_OTP_PRESENT		0x00000004
+#define	CST4336_ARMREMAP_0		0x00000008
+#define	CST4336_ILPDIV_EN_MASK		0x00000010
+#define	CST4336_ILPDIV_EN_SHIFT		4
+#define	CST4336_XTAL_PD_POL_MASK	0x00000020
+#define	CST4336_XTAL_PD_POL_SHIFT	5
+#define	CST4336_LPO_SEL_MASK		0x00000040
+#define	CST4336_LPO_SEL_SHIFT		6
+#define	CST4336_RES_INIT_MODE_MASK	0x00000180
+#define	CST4336_RES_INIT_MODE_SHIFT	7
+#define	CST4336_CBUCK_MODE_MASK		0x00000600
+#define	CST4336_CBUCK_MODE_SHIFT	9
+
+/* 4336 Chip specific PMU ChipControl register bits */
+#define PCTL_4336_SERIAL_ENAB	(1  << 24)
+
+/* 4330 resources */
+#define	RES4330_CBUCK_LPOM		0
+#define	RES4330_CBUCK_BURST		1
+#define	RES4330_CBUCK_LP_PWM		2
+#define	RES4330_CBUCK_PWM		3
+#define	RES4330_CLDO_PU			4
+#define	RES4330_DIS_INT_RESET_PD	5
+#define	RES4330_ILP_REQUEST		6
+#define	RES4330_LNLDO_PU		7
+#define	RES4330_LDO3P3_PU		8
+#define	RES4330_OTP_PU			9
+#define	RES4330_XTAL_PU			10
+#define	RES4330_ALP_AVAIL		11
+#define	RES4330_RADIO_PU		12
+#define	RES4330_BG_PU			13
+#define	RES4330_VREG1p4_PU_PU		14
+#define	RES4330_AFE_PWRSW_PU		15
+#define	RES4330_RX_PWRSW_PU		16
+#define	RES4330_TX_PWRSW_PU		17
+#define	RES4330_BB_PWRSW_PU		18
+#define	RES4330_SYNTH_PWRSW_PU		19
+#define	RES4330_MISC_PWRSW_PU		20
+#define	RES4330_LOGEN_PWRSW_PU		21
+#define	RES4330_BBPLL_PWRSW_PU		22
+#define	RES4330_MACPHY_CLKAVAIL		23
+#define	RES4330_HT_AVAIL		24
+#define	RES4330_5gRX_PWRSW_PU		25
+#define	RES4330_5gTX_PWRSW_PU		26
+#define	RES4330_5g_LOGEN_PWRSW_PU	27
+
+/* 4330 chip-specific ChipStatus register bits */
+#define CST4330_CHIPMODE_SDIOD(cs)	(((cs) & 0x7) < 6)	/**< SDIO || gSPI */
+#define CST4330_CHIPMODE_USB20D(cs)	(((cs) & 0x7) >= 6)	/**< USB || USBDA */
+#define CST4330_CHIPMODE_SDIO(cs)	(((cs) & 0x4) == 0)	/**< SDIO */
+#define CST4330_CHIPMODE_GSPI(cs)	(((cs) & 0x6) == 4)	/**< gSPI */
+#define CST4330_CHIPMODE_USB(cs)	(((cs) & 0x7) == 6)	/**< USB packet-oriented */
+#define CST4330_CHIPMODE_USBDA(cs)	(((cs) & 0x7) == 7)	/**< USB Direct Access */
+#define	CST4330_OTP_PRESENT		0x00000010
+#define	CST4330_LPO_AUTODET_EN		0x00000020
+#define	CST4330_ARMREMAP_0		0x00000040
+#define	CST4330_SPROM_PRESENT		0x00000080	/**< takes priority over OTP if both set */
+#define	CST4330_ILPDIV_EN		0x00000100
+#define	CST4330_LPO_SEL			0x00000200
+#define	CST4330_RES_INIT_MODE_SHIFT	10
+#define	CST4330_RES_INIT_MODE_MASK	0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT	12
+#define CST4330_CBUCK_MODE_MASK		0x00003000
+#define	CST4330_CBUCK_POWER_OK		0x00004000
+#define	CST4330_BB_PLL_LOCKED		0x00008000
+#define SOCDEVRAM_BP_ADDR		0x1E000000
+#define SOCDEVRAM_ARM_ADDR		0x00800000
+
+/* 4330 Chip specific PMU ChipControl register bits */
+#define PCTL_4330_SERIAL_ENAB	(1  << 24)
+
+/* 4330 Chip specific ChipControl register bits */
+#define CCTRL_4330_GPIO_SEL		0x00000001    /* 1=select GPIOs to be muxed out */
+#define CCTRL_4330_ERCX_SEL		0x00000002    /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL_4330_SDIO_HOST_WAKE	0x00000004    /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL_4330_JTAG_DISABLE	0x00000008    /* 1=disable JTAG interface on mux'd pins */
+
+#define PMU_VREG0_ADDR				0
+#define PMU_VREG0_I_SR_CNTL_EN_SHIFT		0
+#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT	2
+#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT	3
+#define PMU_VREG0_CBUCKFSW_ADJ_SHIFT		7
+#define PMU_VREG0_CBUCKFSW_ADJ_MASK			0x1F
+#define PMU_VREG0_RAMP_SEL_SHIFT			13
+#define PMU_VREG0_RAMP_SEL_MASK				0x7
+#define PMU_VREG0_VFB_RSEL_SHIFT			17
+#define PMU_VREG0_VFB_RSEL_MASK				3
+
+#define PMU_VREG4_ADDR			4
+
+#define PMU_VREG4_CLDO_PWM_SHIFT	4
+#define PMU_VREG4_CLDO_PWM_MASK		0x7
+
+#define PMU_VREG4_LPLDO1_SHIFT		15
+#define PMU_VREG4_LPLDO1_MASK		0x7
+#define PMU_VREG4_LPLDO1_1p20V		0
+#define PMU_VREG4_LPLDO1_1p15V		1
+#define PMU_VREG4_LPLDO1_1p10V		2
+#define PMU_VREG4_LPLDO1_1p25V		3
+#define PMU_VREG4_LPLDO1_1p05V		4
+#define PMU_VREG4_LPLDO1_1p00V		5
+#define PMU_VREG4_LPLDO1_0p95V		6
+#define PMU_VREG4_LPLDO1_0p90V		7
+
+/* 4350/4345 VREG4 settings */
+#define PMU4350_VREG4_LPLDO1_1p10V	0
+#define PMU4350_VREG4_LPLDO1_1p15V	1
+#define PMU4350_VREG4_LPLDO1_1p21V	2
+#define PMU4350_VREG4_LPLDO1_1p24V	3
+#define PMU4350_VREG4_LPLDO1_0p90V	4
+#define PMU4350_VREG4_LPLDO1_0p96V	5
+#define PMU4350_VREG4_LPLDO1_1p01V	6
+#define PMU4350_VREG4_LPLDO1_1p04V	7
+
+#define PMU_VREG4_LPLDO2_LVM_SHIFT	18
+#define PMU_VREG4_LPLDO2_LVM_MASK	0x7
+#define PMU_VREG4_LPLDO2_HVM_SHIFT	21
+#define PMU_VREG4_LPLDO2_HVM_MASK	0x7
+#define PMU_VREG4_LPLDO2_LVM_HVM_MASK	0x3f
+#define PMU_VREG4_LPLDO2_1p00V		0
+#define PMU_VREG4_LPLDO2_1p15V		1
+#define PMU_VREG4_LPLDO2_1p20V		2
+#define PMU_VREG4_LPLDO2_1p10V		3
+#define PMU_VREG4_LPLDO2_0p90V		4	/**< 4 - 7 is 0.90V */
+
+#define PMU_VREG4_HSICLDO_BYPASS_SHIFT	27
+#define PMU_VREG4_HSICLDO_BYPASS_MASK	0x1
+
+#define PMU_VREG5_ADDR			5
+#define PMU_VREG5_HSICAVDD_PD_SHIFT	6
+#define PMU_VREG5_HSICAVDD_PD_MASK	0x1
+#define PMU_VREG5_HSICDVDD_PD_SHIFT	11
+#define PMU_VREG5_HSICDVDD_PD_MASK	0x1
+
+/* 4334 resources */
+#define RES4334_LPLDO_PU		0
+#define RES4334_RESET_PULLDN_DIS	1
+#define RES4334_PMU_BG_PU		2
+#define RES4334_HSIC_LDO_PU		3
+#define RES4334_CBUCK_LPOM_PU		4
+#define RES4334_CBUCK_PFM_PU		5
+#define RES4334_CLDO_PU			6
+#define RES4334_LPLDO2_LVM		7
+#define RES4334_LNLDO_PU		8
+#define RES4334_LDO3P3_PU		9
+#define RES4334_OTP_PU			10
+#define RES4334_XTAL_PU			11
+#define RES4334_WL_PWRSW_PU		12
+#define RES4334_LQ_AVAIL		13
+#define RES4334_LOGIC_RET		14
+#define RES4334_MEM_SLEEP		15
+#define RES4334_MACPHY_RET		16
+#define RES4334_WL_CORE_READY		17
+#define RES4334_ILP_REQ			18
+#define RES4334_ALP_AVAIL		19
+#define RES4334_MISC_PWRSW_PU		20
+#define RES4334_SYNTH_PWRSW_PU		21
+#define RES4334_RX_PWRSW_PU		22
+#define RES4334_RADIO_PU		23
+#define RES4334_WL_PMU_PU		24
+#define RES4334_VCO_LDO_PU		25
+#define RES4334_AFE_LDO_PU		26
+#define RES4334_RX_LDO_PU		27
+#define RES4334_TX_LDO_PU		28
+#define RES4334_HT_AVAIL		29
+#define RES4334_MACPHY_CLK_AVAIL	30
+
+/* 4334 chip-specific ChipStatus register bits */
+#define CST4334_CHIPMODE_MASK		7
+#define CST4334_SDIO_MODE		0x00000000
+#define CST4334_SPI_MODE		0x00000004
+#define CST4334_HSIC_MODE		0x00000006
+#define CST4334_BLUSB_MODE		0x00000007
+#define CST4334_CHIPMODE_HSIC(cs)	(((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE)
+#define CST4334_OTP_PRESENT		0x00000010
+#define CST4334_LPO_AUTODET_EN		0x00000020
+#define CST4334_ARMREMAP_0		0x00000040
+#define CST4334_SPROM_PRESENT		0x00000080
+#define CST4334_ILPDIV_EN_MASK		0x00000100
+#define CST4334_ILPDIV_EN_SHIFT		8
+#define CST4334_LPO_SEL_MASK		0x00000200
+#define CST4334_LPO_SEL_SHIFT		9
+#define CST4334_RES_INIT_MODE_MASK	0x00000C00
+#define CST4334_RES_INIT_MODE_SHIFT	10
+
+/* 4334 Chip specific PMU ChipControl register bits */
+#define PCTL_4334_GPIO3_ENAB    (1  << 3)
+
+/* 4334 Chip control */
+#define CCTRL4334_PMU_WAKEUP_GPIO1	(1  << 0)
+#define CCTRL4334_PMU_WAKEUP_HSIC	(1  << 1)
+#define CCTRL4334_PMU_WAKEUP_AOS	(1  << 2)
+#define CCTRL4334_HSIC_WAKE_MODE	(1  << 3)
+#define CCTRL4334_HSIC_INBAND_GPIO1	(1  << 4)
+#define CCTRL4334_HSIC_LDO_PU		(1  << 23)
+
+/* 4334 Chip control 3 */
+#define CCTRL4334_BLOCK_EXTRNL_WAKE		(1  << 4)
+#define CCTRL4334_SAVERESTORE_FIX		(1  << 5)
+
+/* 43341 Chip control 3 */
+#define CCTRL43341_BLOCK_EXTRNL_WAKE		(1  << 13)
+#define CCTRL43341_SAVERESTORE_FIX		(1  << 14)
+#define CCTRL43341_BT_ISO_SEL			(1  << 16)
+
+/* 4334 Chip specific ChipControl1 register bits */
+#define CCTRL1_4334_GPIO_SEL		(1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4334_ERCX_SEL		(1 << 1)    /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL1_4334_JTAG_DISABLE	(1 << 3)    /* 1=disable JTAG interface on mux'd pins */
+#define CCTRL1_4334_UART_ON_4_5	(1 << 28) /**< 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */
+
+/* 4324 Chip specific ChipControl1 register bits */
+#define CCTRL1_4324_GPIO_SEL            (1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 43143 chip-specific ChipStatus register bits based on Confluence documentation */
+/* register contains strap values sampled during POR */
+#define CST43143_REMAP_TO_ROM	 (3 << 0)    /* 00=Boot SRAM, 01=Boot ROM, 10=Boot SFLASH */
+#define CST43143_SDIO_EN	 (1 << 2)    /* 0 = USB Enab, SDIO pins are GPIO or I2S */
+#define CST43143_SDIO_ISO	 (1 << 3)    /* 1 = SDIO isolated */
+#define CST43143_USB_CPU_LESS	 (1 << 4)   /* 1 = CPULess mode Enabled */
+#define CST43143_CBUCK_MODE	 (3 << 6)   /* Indicates what controller mode CBUCK is in */
+#define CST43143_POK_CBUCK	 (1 << 8)   /* 1 = 1.2V CBUCK voltage ready */
+#define CST43143_PMU_OVRSPIKE	 (1 << 9)
+#define CST43143_PMU_OVRTEMP	 (0xF << 10)
+#define CST43143_SR_FLL_CAL_DONE (1 << 14)
+#define CST43143_USB_PLL_LOCKDET (1 << 15)
+#define CST43143_PMU_PLL_LOCKDET (1 << 16)
+#define CST43143_CHIPMODE_SDIOD(cs)	(((cs) & CST43143_SDIO_EN) != 0) /* SDIO */
+
+/* 43143 Chip specific ChipControl register bits */
+/* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire  */
+#define CCTRL_43143_SECI		(1<<0)
+#define CCTRL_43143_BT_LEGACY		(1<<1)
+#define CCTRL_43143_I2S_MODE		(1<<2)	/**< 0: SDIO enabled */
+#define CCTRL_43143_I2S_MASTER		(1<<3)	/**< 0: I2S MCLK input disabled */
+#define CCTRL_43143_I2S_FULL		(1<<4)	/**< 0: I2S SDIN and SPDIF_TX inputs disabled */
+#define CCTRL_43143_GSIO		(1<<5)	/**< 0: sFlash enabled */
+#define CCTRL_43143_RF_SWCTRL_MASK	(7<<6)	/**< 0: disabled */
+#define CCTRL_43143_RF_SWCTRL_0		(1<<6)
+#define CCTRL_43143_RF_SWCTRL_1		(2<<6)
+#define CCTRL_43143_RF_SWCTRL_2		(4<<6)
+#define CCTRL_43143_RF_XSWCTRL		(1<<9)	/**< 0: UART enabled */
+#define CCTRL_43143_HOST_WAKE0		(1<<11)	/**< 1: SDIO separate interrupt output from GPIO4 */
+#define CCTRL_43143_HOST_WAKE1		(1<<12)	/* 1: SDIO separate interrupt output from GPIO16 */
+
+/* 43143 resources, based on pmu_params.xls V1.19 */
+#define RES43143_EXT_SWITCHER_PWM	0	/**< 0x00001 */
+#define RES43143_XTAL_PU		1	/**< 0x00002 */
+#define RES43143_ILP_REQUEST		2	/**< 0x00004 */
+#define RES43143_ALP_AVAIL		3	/**< 0x00008 */
+#define RES43143_WL_CORE_READY		4	/**< 0x00010 */
+#define RES43143_BBPLL_PWRSW_PU		5	/**< 0x00020 */
+#define RES43143_HT_AVAIL		6	/**< 0x00040 */
+#define RES43143_RADIO_PU		7	/**< 0x00080 */
+#define RES43143_MACPHY_CLK_AVAIL	8	/**< 0x00100 */
+#define RES43143_OTP_PU			9	/**< 0x00200 */
+#define RES43143_LQ_AVAIL		10	/**< 0x00400 */
+
+#define PMU43143_XTAL_CORE_SIZE_MASK	0x3F
+
+/* 4313 resources */
+#define	RES4313_BB_PU_RSRC		0
+#define	RES4313_ILP_REQ_RSRC		1
+#define	RES4313_XTAL_PU_RSRC		2
+#define	RES4313_ALP_AVAIL_RSRC		3
+#define	RES4313_RADIO_PU_RSRC		4
+#define	RES4313_BG_PU_RSRC		5
+#define	RES4313_VREG1P4_PU_RSRC		6
+#define	RES4313_AFE_PWRSW_RSRC		7
+#define	RES4313_RX_PWRSW_RSRC		8
+#define	RES4313_TX_PWRSW_RSRC		9
+#define	RES4313_BB_PWRSW_RSRC		10
+#define	RES4313_SYNTH_PWRSW_RSRC	11
+#define	RES4313_MISC_PWRSW_RSRC		12
+#define	RES4313_BB_PLL_PWRSW_RSRC	13
+#define	RES4313_HT_AVAIL_RSRC		14
+#define	RES4313_MACPHY_CLK_AVAIL_RSRC	15
+
+/* 4313 chip-specific ChipStatus register bits */
+#define	CST4313_SPROM_PRESENT			1
+#define	CST4313_OTP_PRESENT			2
+#define	CST4313_SPROM_OTP_SEL_MASK		0x00000002
+#define	CST4313_SPROM_OTP_SEL_SHIFT		0
+
+/* 4313 Chip specific ChipControl register bits */
+#define CCTRL_4313_12MA_LED_DRIVE    0x00000007    /* 12 mA drive strengh for later 4313 */
+
+/* PMU respources for 4314 */
+#define RES4314_LPLDO_PU		0
+#define RES4314_PMU_SLEEP_DIS		1
+#define RES4314_PMU_BG_PU		2
+#define RES4314_CBUCK_LPOM_PU		3
+#define RES4314_CBUCK_PFM_PU		4
+#define RES4314_CLDO_PU			5
+#define RES4314_LPLDO2_LVM		6
+#define RES4314_WL_PMU_PU		7
+#define RES4314_LNLDO_PU		8
+#define RES4314_LDO3P3_PU		9
+#define RES4314_OTP_PU			10
+#define RES4314_XTAL_PU			11
+#define RES4314_WL_PWRSW_PU		12
+#define RES4314_LQ_AVAIL		13
+#define RES4314_LOGIC_RET		14
+#define RES4314_MEM_SLEEP		15
+#define RES4314_MACPHY_RET		16
+#define RES4314_WL_CORE_READY		17
+#define RES4314_ILP_REQ			18
+#define RES4314_ALP_AVAIL		19
+#define RES4314_MISC_PWRSW_PU		20
+#define RES4314_SYNTH_PWRSW_PU		21
+#define RES4314_RX_PWRSW_PU		22
+#define RES4314_RADIO_PU		23
+#define RES4314_VCO_LDO_PU		24
+#define RES4314_AFE_LDO_PU		25
+#define RES4314_RX_LDO_PU		26
+#define RES4314_TX_LDO_PU		27
+#define RES4314_HT_AVAIL		28
+#define RES4314_MACPHY_CLK_AVAIL	29
+
+/* 4314 chip-specific ChipStatus register bits */
+#define CST4314_OTP_ENABLED		0x00200000
+
+/* 43228 resources */
+#define RES43228_NOT_USED		0
+#define RES43228_ILP_REQUEST		1
+#define RES43228_XTAL_PU		2
+#define RES43228_ALP_AVAIL		3
+#define RES43228_PLL_EN			4
+#define RES43228_HT_PHY_AVAIL		5
+
+/* 43228 chipstatus  reg bits */
+#define CST43228_ILP_DIV_EN		0x1
+#define	CST43228_OTP_PRESENT		0x2
+#define	CST43228_SERDES_REFCLK_PADSEL	0x4
+#define	CST43228_SDIO_MODE		0x8
+#define	CST43228_SDIO_OTP_PRESENT	0x10
+#define	CST43228_SDIO_RESET		0x20
+
+/* 4706 chipstatus reg bits */
+#define	CST4706_PKG_OPTION		(1<<0) /* 0: full-featured package 1: low-cost package */
+#define	CST4706_SFLASH_PRESENT	(1<<1) /* 0: parallel, 1: serial flash is present */
+#define	CST4706_SFLASH_TYPE		(1<<2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
+#define	CST4706_MIPS_BENDIAN	(1<<3) /* 0: little,  1: big endian */
+#define	CST4706_PCIE1_DISABLE	(1<<5) /* PCIE1 enable strap pin */
+
+/* 4706 flashstrconfig reg bits */
+#define FLSTRCF4706_MASK		0x000000ff
+#define FLSTRCF4706_SF1			0x00000001	/**< 2nd serial flash present */
+#define FLSTRCF4706_PF1			0x00000002	/**< 2nd parallel flash present */
+#define FLSTRCF4706_SF1_TYPE	0x00000004	/**< 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define FLSTRCF4706_NF1			0x00000008	/**< 2nd NAND flash present */
+#define FLSTRCF4706_1ST_MADDR_SEG_MASK		0x000000f0	/**< Valid value mask */
+#define FLSTRCF4706_1ST_MADDR_SEG_4MB		0x00000010	/**< 4MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_8MB		0x00000020	/**< 8MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_16MB		0x00000030	/**< 16MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_32MB		0x00000040	/**< 32MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_64MB		0x00000050	/**< 64MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_128MB		0x00000060	/**< 128MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_256MB		0x00000070	/**< 256MB */
+
+/* 4360 Chip specific ChipControl register bits */
+#define CCTRL4360_I2C_MODE			(1 << 0)
+#define CCTRL4360_UART_MODE			(1 << 1)
+#define CCTRL4360_SECI_MODE			(1 << 2)
+#define CCTRL4360_BTSWCTRL_MODE			(1 << 3)
+#define CCTRL4360_DISCRETE_FEMCTRL_MODE		(1 << 4)
+#define CCTRL4360_DIGITAL_PACTRL_MODE		(1 << 5)
+#define CCTRL4360_BTSWCTRL_AND_DIGPA_PRESENT	(1 << 6)
+#define CCTRL4360_EXTRA_GPIO_MODE		(1 << 7)
+#define CCTRL4360_EXTRA_FEMCTRL_MODE		(1 << 8)
+#define CCTRL4360_BT_LGCY_MODE			(1 << 9)
+#define CCTRL4360_CORE2FEMCTRL4_ON		(1 << 21)
+#define CCTRL4360_SECI_ON_GPIO01		(1 << 24)
+
+/* 4360 Chip specific Regulator Control register bits */
+#define RCTRL4360_RFLDO_PWR_DOWN		(1 << 1)
+
+/* 4360 PMU resources and chip status bits */
+#define RES4360_REGULATOR          0
+#define RES4360_ILP_AVAIL          1
+#define RES4360_ILP_REQ            2
+#define RES4360_XTAL_LDO_PU        3
+#define RES4360_XTAL_PU            4
+#define RES4360_ALP_AVAIL          5
+#define RES4360_BBPLLPWRSW_PU      6
+#define RES4360_HT_AVAIL           7
+#define RES4360_OTP_PU             8
+#define RES4360_AVB_PLL_PWRSW_PU   9
+#define RES4360_PCIE_TL_CLK_AVAIL  10
+
+#define CST4360_XTAL_40MZ                  0x00000001
+#define CST4360_SFLASH                     0x00000002
+#define CST4360_SPROM_PRESENT              0x00000004
+#define CST4360_SFLASH_TYPE                0x00000004
+#define CST4360_OTP_ENABLED                0x00000008
+#define CST4360_REMAP_ROM                  0x00000010
+#define CST4360_RSRC_INIT_MODE_MASK        0x00000060
+#define CST4360_RSRC_INIT_MODE_SHIFT       5
+#define CST4360_ILP_DIVEN                  0x00000080
+#define CST4360_MODE_USB                   0x00000100
+#define CST4360_SPROM_SIZE_MASK            0x00000600
+#define CST4360_SPROM_SIZE_SHIFT           9
+#define CST4360_BBPLL_LOCK                 0x00000800
+#define CST4360_AVBBPLL_LOCK               0x00001000
+#define CST4360_USBBBPLL_LOCK              0x00002000
+#define CST4360_RSRC_INIT_MODE(cs)	((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+					CST4360_RSRC_INIT_MODE_SHIFT)
+
+#define CCTRL_4360_UART_SEL	0x2
+#define CST4360_RSRC_INIT_MODE(cs)	((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+					CST4360_RSRC_INIT_MODE_SHIFT)
+
+#define PMU4360_CC1_GPIO7_OVRD	           (1<<23) /* GPIO7 override */
+
+
+/* 43602 PMU resources based on pmu_params.xls version v0.95 */
+#define RES43602_LPLDO_PU		0
+#define RES43602_REGULATOR		1
+#define RES43602_PMU_SLEEP		2
+#define RES43602_RSVD_3			3
+#define RES43602_XTALLDO_PU		4
+#define RES43602_SERDES_PU		5
+#define RES43602_BBPLL_PWRSW_PU		6
+#define RES43602_SR_CLK_START		7
+#define RES43602_SR_PHY_PWRSW		8
+#define RES43602_SR_SUBCORE_PWRSW	9
+#define RES43602_XTAL_PU		10
+#define	RES43602_PERST_OVR		11
+#define RES43602_SR_CLK_STABLE		12
+#define RES43602_SR_SAVE_RESTORE	13
+#define RES43602_SR_SLEEP		14
+#define RES43602_LQ_START		15
+#define RES43602_LQ_AVAIL		16
+#define RES43602_WL_CORE_RDY		17
+#define RES43602_ILP_REQ		18
+#define RES43602_ALP_AVAIL		19
+#define RES43602_RADIO_PU		20
+#define RES43602_RFLDO_PU		21
+#define RES43602_HT_START		22
+#define RES43602_HT_AVAIL		23
+#define RES43602_MACPHY_CLKAVAIL	24
+#define RES43602_PARLDO_PU		25
+#define RES43602_RSVD_26		26
+
+/* 43602 chip status bits */
+#define CST43602_SPROM_PRESENT             (1<<1)
+#define CST43602_SPROM_SIZE                (1<<10) /* 0 = 16K, 1 = 4K */
+#define CST43602_BBPLL_LOCK                (1<<11)
+#define CST43602_RF_LDO_OUT_OK             (1<<15) /* RF LDO output OK */
+
+#define PMU43602_CC1_GPIO12_OVRD           (1<<28) /* GPIO12 override */
+
+#define PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN (1<<1)  /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_PCIE_PERST_L_WAKE_EN  (1<<2)  /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_ENABLE_L2REFCLKPAD_PWRDWN (1<<3)
+#define PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN (1<<5)  /* enable pmu_wakeup to request for ALP_AVAIL */
+#define PMU43602_CC2_PERST_L_EXTEND_EN     (1<<9)  /* extend perst_l until rsc PERST_OVR comes up */
+#define PMU43602_CC2_FORCE_EXT_LPO         (1<<19) /* 1=ext LPO clock is the final LPO clock */
+#define PMU43602_CC2_XTAL32_SEL            (1<<30) /* 0=ext_clock, 1=xtal */
+
+#define CC_SR1_43602_SR_ASM_ADDR	(0x0)
+
+/* PLL CTL register values for open loop, used during S/R operation */
+#define PMU43602_PLL_CTL6_VAL		0x68000528
+#define PMU43602_PLL_CTL7_VAL		0x6
+
+#define PMU43602_CC3_ARMCR4_DBG_CLK	(1 << 29)
+
+/* 4365 PMU resources */
+#define RES4365_REGULATOR_PU			0
+#define RES4365_XTALLDO_PU			1
+#define RES4365_XTAL_PU				2
+#define RES4365_CPU_PLLLDO_PU			3
+#define RES4365_CPU_PLL_PU			4
+#define RES4365_WL_CORE_RDY			5
+#define RES4365_ILP_REQ				6
+#define RES4365_ALP_AVAIL			7
+#define RES4365_HT_AVAIL			8
+#define RES4365_BB_PLLLDO_PU			9
+#define RES4365_BB_PLL_PU			10
+#define RES4365_MINIMU_PU			11
+#define RES4365_RADIO_PU			12
+#define RES4365_MACPHY_CLK_AVAIL		13
+
+/* 4349 related */
+#define RES4349_LPLDO_PU			0
+#define RES4349_BG_PU				1
+#define RES4349_PMU_SLEEP			2
+#define RES4349_PALDO3P3_PU			3
+#define RES4349_CBUCK_LPOM_PU		4
+#define RES4349_CBUCK_PFM_PU		5
+#define RES4349_COLD_START_WAIT		6
+#define RES4349_RSVD_7				7
+#define RES4349_LNLDO_PU			8
+#define RES4349_XTALLDO_PU			9
+#define RES4349_LDO3P3_PU			10
+#define RES4349_OTP_PU				11
+#define RES4349_XTAL_PU				12
+#define RES4349_SR_CLK_START		13
+#define RES4349_LQ_AVAIL			14
+#define RES4349_LQ_START			15
+#define RES4349_PERST_OVR			16
+#define RES4349_WL_CORE_RDY			17
+#define RES4349_ILP_REQ				18
+#define RES4349_ALP_AVAIL			19
+#define RES4349_MINI_PMU			20
+#define RES4349_RADIO_PU			21
+#define RES4349_SR_CLK_STABLE		22
+#define RES4349_SR_SAVE_RESTORE		23
+#define RES4349_SR_PHY_PWRSW		24
+#define RES4349_SR_VDDM_PWRSW		25
+#define RES4349_SR_SUBCORE_PWRSW	26
+#define RES4349_SR_SLEEP			27
+#define RES4349_HT_START			28
+#define RES4349_HT_AVAIL			29
+#define RES4349_MACPHY_CLKAVAIL		30
+
+/* SR Control0 bits */
+#define CC_SR0_4349_SR_ENG_EN_MASK             	0x1
+#define CC_SR0_4349_SR_ENG_EN_SHIFT             0
+#define CC_SR0_4349_SR_ENG_CLK_EN		(1 << 1)
+#define CC_SR0_4349_SR_RSRC_TRIGGER		(0xC << 2)
+#define CC_SR0_4349_SR_WD_MEM_MIN_DIV		(0x3 << 6)
+#define CC_SR0_4349_SR_MEM_STBY_ALLOW_MSK	(1 << 16)
+#define CC_SR0_4349_SR_MEM_STBY_ALLOW_SHIFT	16
+#define CC_SR0_4349_SR_ENABLE_ILP		(1 << 17)
+#define CC_SR0_4349_SR_ENABLE_ALP		(1 << 18)
+#define CC_SR0_4349_SR_ENABLE_HT		(1 << 19)
+#define CC_SR0_4349_SR_ALLOW_PIC		(3 << 20)
+#define CC_SR0_4349_SR_PMU_MEM_DISABLE		(1 << 30)
+
+/* SR Control0 bits */
+#define CC_SR0_4349_SR_ENG_EN_MASK             	0x1
+#define CC_SR0_4349_SR_ENG_EN_SHIFT             0
+#define CC_SR0_4349_SR_ENG_CLK_EN		(1 << 1)
+#define CC_SR0_4349_SR_RSRC_TRIGGER		(0xC << 2)
+#define CC_SR0_4349_SR_WD_MEM_MIN_DIV		(0x3 << 6)
+#define CC_SR0_4349_SR_MEM_STBY_ALLOW		(1 << 16)
+#define CC_SR0_4349_SR_ENABLE_ILP		(1 << 17)
+#define CC_SR0_4349_SR_ENABLE_ALP		(1 << 18)
+#define CC_SR0_4349_SR_ENABLE_HT		(1 << 19)
+#define CC_SR0_4349_SR_ALLOW_PIC		(3 << 20)
+#define CC_SR0_4349_SR_PMU_MEM_DISABLE		(1 << 30)
+
+/* SR binary offset is at 8K */
+#define CC_SR1_4349_SR_ASM_ADDR		(0x10)
+
+#define CST4349_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 6)) != 0)	/* SDIO */
+#define CST4349_CHIPMODE_PCIE(cs)	(((cs) & (1 << 7)) != 0)	/* PCIE */
+
+#define CST4349_SPROM_PRESENT		0x00000010
+
+#define	VREG4_4349_MEMLPLDO_PWRUP_MASK		(1 << 31)
+#define	VREG4_4349_MEMLPLDO_PWRUP_SHIFT		(31)
+#define VREG4_4349_LPLDO1_OUTPUT_VOLT_ADJ_MASK	(0x7 << 15)
+#define VREG4_4349_LPLDO1_OUTPUT_VOLT_ADJ_SHIFT	(15)
+#define CC2_4349_PHY_PWRSE_RST_CNT_MASK		(0xF << 0)
+#define CC2_4349_PHY_PWRSE_RST_CNT_SHIFT	(0)
+#define CC2_4349_VDDM_PWRSW_EN_MASK		(1 << 20)
+#define CC2_4349_VDDM_PWRSW_EN_SHIFT		(20)
+#define CC2_4349_MEMLPLDO_PWRSW_EN_MASK		(1 << 21)
+#define CC2_4349_MEMLPLDO_PWRSW_EN_SHIFT	(21)
+#define CC2_4349_SDIO_AOS_WAKEUP_MASK		(1 << 24)
+#define CC2_4349_SDIO_AOS_WAKEUP_SHIFT		(24)
+#define CC2_4349_PMUWAKE_EN_MASK		(1 << 31)
+#define CC2_4349_PMUWAKE_EN_SHIFT		(31)
+
+#define CC5_4349_MAC_PHY_CLK_8_DIV              (1 << 27)
+
+#define CC6_4349_PCIE_CLKREQ_WAKEUP_MASK	(1 << 4)
+#define CC6_4349_PCIE_CLKREQ_WAKEUP_SHIFT	(4)
+#define CC6_4349_PMU_WAKEUP_ALPAVAIL_MASK	(1 << 6)
+#define CC6_4349_PMU_WAKEUP_ALPAVAIL_SHIFT	(6)
+#define CC6_4349_PMU_EN_EXT_PERST_MASK		(1 << 13)
+#define CC6_4349_PMU_EN_L2_DEASSERT_MASK	(1 << 14)
+#define CC6_4349_PMU_EN_L2_DEASSERT_SHIF	(14)
+#define CC6_4349_PMU_ENABLE_L2REFCLKPAD_PWRDWN	(1 << 15)
+#define CC6_4349_PMU_EN_MDIO_MASK		(1 << 16)
+#define CC6_4349_PMU_EN_ASSERT_L2_MASK		(1 << 25)
+
+
+/* 4349 GCI function sel values */
+/*
+ * Reference
+ * http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/ToplevelArchitecture4349B0#Function_Sel
+ */
+#define CC4349_FNSEL_HWDEF		(0)
+#define CC4349_FNSEL_SAMEASPIN		(1)
+#define CC4349_FNSEL_GPIO		(2)
+#define CC4349_FNSEL_FAST_UART		(3)
+#define CC4349_FNSEL_GCI0		(4)
+#define CC4349_FNSEL_GCI1		(5)
+#define CC4349_FNSEL_DGB_UART		(6)
+#define CC4349_FNSEL_I2C		(7)
+#define CC4349_FNSEL_SPROM		(8)
+#define CC4349_FNSEL_MISC0		(9)
+#define CC4349_FNSEL_MISC1		(10)
+#define CC4349_FNSEL_MISC2		(11)
+#define CC4349_FNSEL_IND		(12)
+#define CC4349_FNSEL_PDN		(13)
+#define CC4349_FNSEL_PUP		(14)
+#define CC4349_FNSEL_TRISTATE		(15)
+
+/* 4364 related */
+#define RES4364_LPLDO_PU				0
+#define RES4364_BG_PU					1
+#define RES4364_MEMLPLDO_PU				2
+#define RES4364_PALDO3P3_PU				3
+#define RES4364_CBUCK_1P2				4
+#define RES4364_CBUCK_1V8				5
+#define RES4364_COLD_START_WAIT				6
+#define RES4364_SR_3x3_VDDM_PWRSW			7
+#define RES4364_3x3_MACPHY_CLKAVAIL			8
+#define RES4364_XTALLDO_PU				9
+#define RES4364_LDO3P3_PU				10
+#define RES4364_OTP_PU					11
+#define RES4364_XTAL_PU					12
+#define RES4364_SR_CLK_START				13
+#define RES4364_3x3_RADIO_PU				14
+#define RES4364_RF_LDO					15
+#define RES4364_PERST_OVR				16
+#define RES4364_WL_CORE_RDY				17
+#define RES4364_ILP_REQ					18
+#define RES4364_ALP_AVAIL				19
+#define RES4364_1x1_MINI_PMU				20
+#define RES4364_1x1_RADIO_PU				21
+#define RES4364_SR_CLK_STABLE				22
+#define RES4364_SR_SAVE_RESTORE				23
+#define RES4364_SR_PHY_PWRSW				24
+#define RES4364_SR_VDDM_PWRSW				25
+#define RES4364_SR_SUBCORE_PWRSW			26
+#define RES4364_SR_SLEEP				27
+#define RES4364_HT_START				28
+#define RES4364_HT_AVAIL				29
+#define RES4364_MACPHY_CLKAVAIL				30
+
+/* 4349 GPIO */
+#define CC4349_PIN_GPIO_00		(0)
+#define CC4349_PIN_GPIO_01		(1)
+#define CC4349_PIN_GPIO_02		(2)
+#define CC4349_PIN_GPIO_03		(3)
+#define CC4349_PIN_GPIO_04		(4)
+#define CC4349_PIN_GPIO_05		(5)
+#define CC4349_PIN_GPIO_06		(6)
+#define CC4349_PIN_GPIO_07		(7)
+#define CC4349_PIN_GPIO_08		(8)
+#define CC4349_PIN_GPIO_09		(9)
+#define CC4349_PIN_GPIO_10		(10)
+#define CC4349_PIN_GPIO_11		(11)
+#define CC4349_PIN_GPIO_12		(12)
+#define CC4349_PIN_GPIO_13		(13)
+#define CC4349_PIN_GPIO_14		(14)
+#define CC4349_PIN_GPIO_15		(15)
+#define CC4349_PIN_GPIO_16		(16)
+#define CC4349_PIN_GPIO_17		(17)
+#define CC4349_PIN_GPIO_18		(18)
+#define CC4349_PIN_GPIO_19		(19)
+
+/* Mask used to decide whether HOSTWAKE MUX to be performed or not */
+#define MUXENAB4349_HOSTWAKE_MASK	(0x000000f0) /* configure GPIO for SDIO host_wake */
+#define MUXENAB4349_HOSTWAKE_SHIFT	4
+#define MUXENAB4349_GETIX(val, name) \
+	((((val) & MUXENAB4349_ ## name ## _MASK) >> MUXENAB4349_ ## name ## _SHIFT) - 1)
+
+#define CR4_4364_RAM_BASE			(0x160000)
+
+/* SR binary offset is at 8K */
+#define CC_SR1_4364_SR_CORE0_ASM_ADDR			(0x10)
+#define CC_SR1_4364_SR_CORE1_ASM_ADDR			(0x10)
+
+#define CC_SR0_4364_SR_ENG_EN_MASK			0x1
+#define CC_SR0_4364_SR_ENG_EN_SHIFT			0
+#define CC_SR0_4364_SR_ENG_CLK_EN			(1 << 1)
+#define CC_SR0_4364_SR_RSRC_TRIGGER			(0xC << 2)
+#define CC_SR0_4364_SR_WD_MEM_MIN_DIV			(0x3 << 6)
+#define CC_SR0_4364_SR_MEM_STBY_ALLOW_MSK		(1 << 16)
+#define CC_SR0_4364_SR_MEM_STBY_ALLOW_SHIFT		16
+#define CC_SR0_4364_SR_ENABLE_ILP			(1 << 17)
+#define CC_SR0_4364_SR_ENABLE_ALP			(1 << 18)
+#define CC_SR0_4364_SR_ENABLE_HT			(1 << 19)
+#define CC_SR0_4364_SR_ALLOW_PIC			(3 << 20)
+#define CC_SR0_4364_SR_PMU_MEM_DISABLE			(1 << 30)
+
+#define PMU_4364_CC1_ENABLE_BBPLL_PWR_DWN		(0x1 << 4)
+#define PMU_4364_CC1_BBPLL_ARESET_LQ_TIME		(0x1 << 8)
+#define PMU_4364_CC1_BBPLL_ARESET_HT_UPTIME		(0x1 << 10)
+#define PMU_4364_CC1_BBPLL_DRESET_LQ_UPTIME		(0x1 << 12)
+#define PMU_4364_CC1_BBPLL_DRESET_HT_UPTIME		(0x4 << 16)
+#define PMU_4364_CC1_SUBCORE_PWRSW_UP_DELAY		(0x8 << 20)
+#define PMU_4364_CC1_SUBCORE_PWRSW_RESET_CNT		(0x4 << 24)
+
+#define PMU_4364_CC2_PHY_PWRSW_RESET_CNT		(0x2 << 0)
+#define PMU_4364_CC2_PHY_PWRSW_RESET_MASK		(0x7)
+#define PMU_4364_CC2_SEL_CHIPC_IF_FOR_SR		(1 << 21)
+
+#define PMU_4364_CC3_MEMLPLDO3x3_PWRSW_FORCE_MASK	(1 << 23)
+#define PMU_4364_CC3_MEMLPLDO1x1_PWRSW_FORCE_MASK	(1 << 24)
+#define PMU_4364_CC3_CBUCK1P2_PU_SR_VDDM_REQ_ON		(1 << 25)
+#define PMU_4364_CC3_MEMLPLDO3x3_PWRSW_FORCE_OFF	(0)
+#define PMU_4364_CC3_MEMLPLDO1x1_PWRSW_FORCE_OFF	(0)
+
+
+#define PMU_4364_CC5_DISABLE_BBPLL_CLKOUT6_DIV2_MASK	(1 << 26)
+#define PMU_4364_CC5_ENABLE_ARMCR4_DEBUG_CLK_MASK	(1 << 4)
+#define PMU_4364_CC5_DISABLE_BBPLL_CLKOUT6_DIV2		(1 << 26)
+#define PMU_4364_CC5_ENABLE_ARMCR4_DEBUG_CLK_OFF	(0)
+
+#define PMU_4364_CC6_MDI_RESET_MASK			(1 << 16)
+#define PMU_4364_CC6_USE_CLK_REQ_MASK			(1 << 18)
+#define PMU_4364_CC6_HIGHER_CLK_REQ_ALP_MASK		(1 << 20)
+#define PMU_4364_CC6_HT_AVAIL_REQ_ALP_AVAIL_MASK	(1 << 21)
+#define PMU_4364_CC6_PHY_CLK_REQUESTS_ALP_AVAIL_MASK	(1 << 22)
+#define PMU_4364_CC6_MDI_RESET				(1 << 16)
+#define PMU_4364_CC6_USE_CLK_REQ			(1 << 18)
+
+#define PMU_4364_CC6_HIGHER_CLK_REQ_ALP			(1 << 20)
+#define PMU_4364_CC6_HT_AVAIL_REQ_ALP_AVAIL		(1 << 21)
+#define PMU_4364_CC6_PHY_CLK_REQUESTS_ALP_AVAIL		(1 << 22)
+
+#define PMU_4364_VREG0_DISABLE_BT_PULL_DOWN		(1 << 2)
+#define PMU_4364_VREG1_DISABLE_WL_PULL_DOWN		(1 << 2)
+
+#define PMU_VREG_0					(0x0)
+#define PMU_VREG_1					(0x1)
+#define PMU_VREG_3					(0x3)
+#define PMU_VREG_4					(0x4)
+#define PMU_VREG_5					(0x5)
+#define PMU_VREG_6					(0x6)
+
+#define PMU_4364_VREG3_DISABLE_WPT_REG_ON_PULL_DOWN	(1 << 11)
+
+#define PMU_4364_VREG4_MEMLPLDO_PU_ON			(1 << 31)
+#define PMU_4364_VREG4_LPLPDO_ADJ			(3 << 16)
+#define PMU_4364_VREG4_LPLPDO_ADJ_MASK			(3 << 16)
+#define PMU_4364_VREG5_MAC_CLK_1x1_AUTO			(0x1 << 18)
+#define PMU_4364_VREG5_SR_AUTO				(0x1 << 20)
+#define PMU_4364_VREG5_BT_PWM_MASK			(0x1 << 21)
+#define PMU_4364_VREG5_BT_AUTO				(0x1 << 22)
+#define PMU_4364_VREG5_WL2CLB_DVFS_EN_MASK		(0x1 << 23)
+#define PMU_4364_VREG5_BT_PWMK				(0)
+#define PMU_4364_VREG5_WL2CLB_DVFS_EN			(0)
+
+#define PMU_4364_VREG6_BBPLL_AUTO			(0x1 << 17)
+#define PMU_4364_VREG6_MINI_PMU_PWM			(0x1 << 18)
+#define PMU_4364_VREG6_LNLDO_AUTO			(0x1 << 21)
+#define PMU_4364_VREG6_PCIE_PWRDN_0_AUTO		(0x1 << 23)
+#define PMU_4364_VREG6_PCIE_PWRDN_1_AUTO		(0x1 << 25)
+#define PMU_4364_VREG6_MAC_CLK_3x3_PWM			(0x1 << 27)
+#define PMU_4364_VREG6_ENABLE_FINE_CTRL			(0x1 << 30)
+
+#define PMU_4364_PLL0_DISABLE_CHANNEL6			(0x1 << 18)
+
+#define CC_GCI1_REG					(0x1)
+#define CC_GCI1_4364_IND_STATE_FOR_GPIO9_11		(0x0ccccccc)
+#define CC2_4364_SDIO_AOS_WAKEUP_MASK			(1 << 24)
+#define CC2_4364_SDIO_AOS_WAKEUP_SHIFT			(24)
+
+#define CC6_4364_PCIE_CLKREQ_WAKEUP_MASK		(1 << 4)
+#define CC6_4364_PCIE_CLKREQ_WAKEUP_SHIFT		(4)
+#define CC6_4364_PMU_WAKEUP_ALPAVAIL_MASK		(1 << 6)
+#define CC6_4364_PMU_WAKEUP_ALPAVAIL_SHIFT		(6)
+
+#define CST4364_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 6)) != 0)	/* SDIO */
+#define CST4364_CHIPMODE_PCIE(cs)	(((cs) & (1 << 7)) != 0)	/* PCIE */
+#define CST4364_SPROM_PRESENT		0x00000010
+
+#define PMU_4364_MACCORE_0_RES_REQ_MASK			0x3FCBF7FF
+#define PMU_4364_MACCORE_1_RES_REQ_MASK			0x7FFB3647
+
+
+#define PMU1_PLL0_SWITCH_MACCLOCK_120MHZ			(0)
+#define PMU1_PLL0_SWITCH_MACCLOCK_160MHZ			(1)
+#define TSF_CLK_FRAC_L_4364_120MHZ					0x8889
+#define TSF_CLK_FRAC_H_4364_120MHZ					0x8
+#define TSF_CLK_FRAC_L_4364_160MHZ					0x6666
+#define TSF_CLK_FRAC_H_4364_160MHZ					0x6
+#define PMU1_PLL0_PC1_M2DIV_VALUE_120MHZ			8
+#define PMU1_PLL0_PC1_M2DIV_VALUE_160MHZ			6
+
+#define CST4347_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 6)) != 0)	/* SDIO */
+#define CST4347_CHIPMODE_PCIE(cs)	(((cs) & (1 << 7)) != 0)	/* PCIE */
+#define CST4347_SPROM_PRESENT		0x00000010
+
+/* 43430 PMU resources based on pmu_params.xls */
+#define RES43430_LPLDO_PU				0
+#define RES43430_BG_PU					1
+#define RES43430_PMU_SLEEP				2
+#define RES43430_RSVD_3					3
+#define RES43430_CBUCK_LPOM_PU			4
+#define RES43430_CBUCK_PFM_PU			5
+#define RES43430_COLD_START_WAIT		6
+#define RES43430_RSVD_7					7
+#define RES43430_LNLDO_PU				8
+#define RES43430_RSVD_9					9
+#define RES43430_LDO3P3_PU				10
+#define RES43430_OTP_PU					11
+#define RES43430_XTAL_PU				12
+#define RES43430_SR_CLK_START			13
+#define RES43430_LQ_AVAIL				14
+#define RES43430_LQ_START				15
+#define RES43430_RSVD_16				16
+#define RES43430_WL_CORE_RDY			17
+#define RES43430_ILP_REQ				18
+#define RES43430_ALP_AVAIL				19
+#define RES43430_MINI_PMU				20
+#define RES43430_RADIO_PU				21
+#define RES43430_SR_CLK_STABLE			22
+#define RES43430_SR_SAVE_RESTORE		23
+#define RES43430_SR_PHY_PWRSW			24
+#define RES43430_SR_VDDM_PWRSW			25
+#define RES43430_SR_SUBCORE_PWRSW		26
+#define RES43430_SR_SLEEP				27
+#define RES43430_HT_START				28
+#define RES43430_HT_AVAIL				29
+#define RES43430_MACPHY_CLK_AVAIL		30
+
+/* 43430 chip status bits */
+#define CST43430_SDIO_MODE				0x00000001
+#define CST43430_GSPI_MODE				0x00000002
+#define CST43430_RSRC_INIT_MODE_0		0x00000080
+#define CST43430_RSRC_INIT_MODE_1		0x00000100
+#define CST43430_SEL0_SDIO				0x00000200
+#define CST43430_SEL1_SDIO				0x00000400
+#define CST43430_SEL2_SDIO				0x00000800
+#define CST43430_BBPLL_LOCKED			0x00001000
+#define CST43430_DBG_INST_DETECT		0x00004000
+#define CST43430_CLB2WL_BT_READY		0x00020000
+#define CST43430_JTAG_MODE				0x00100000
+#define CST43430_HOST_IFACE				0x00400000
+#define CST43430_TRIM_EN				0x00800000
+#define CST43430_DIN_PACKAGE_OPTION		0x10000000
+
+#define PMU43430_PLL0_PC2_P1DIV_MASK	0x0000000f
+#define PMU43430_PLL0_PC2_P1DIV_SHIFT	0
+#define PMU43430_PLL0_PC2_NDIV_INT_MASK	0x0000ff80
+#define PMU43430_PLL0_PC2_NDIV_INT_SHIFT	7
+#define PMU43430_PLL0_PC4_MDIV2_MASK	0x0000ff00
+#define PMU43430_PLL0_PC4_MDIV2_SHIFT	8
+
+/* 43430 chip SR definitions */
+#define SRAM_43430_SR_ASM_ADDR			0x7f800
+#define CC_SR1_43430_SR_ASM_ADDR		((SRAM_43430_SR_ASM_ADDR - 0x60000) >> 8)
+
+/* 43430 PMU Chip Control bits */
+#define CC2_43430_SDIO_AOS_WAKEUP_MASK			(1 << 24)
+#define CC2_43430_SDIO_AOS_WAKEUP_SHIFT			(24)
+
+
+#define PMU_MACCORE_0_RES_REQ_TIMER		0x1d000000
+#define PMU_MACCORE_0_RES_REQ_MASK		0x5FF2364F
+
+#define PMU_MACCORE_1_RES_REQ_TIMER		0x1d000000
+#define PMU_MACCORE_1_RES_REQ_MASK		0x5FF2364F
+
+/* defines to detect active host interface in use */
+#define CHIP_HOSTIF_PCIEMODE	0x1
+#define CHIP_HOSTIF_USBMODE	0x2
+#define CHIP_HOSTIF_SDIOMODE	0x4
+#define CHIP_HOSTIF_PCIE(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE)
+#define CHIP_HOSTIF_USB(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE)
+#define CHIP_HOSTIF_SDIO(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE)
+
+/* 4335 resources */
+#define RES4335_LPLDO_PO           0
+#define RES4335_PMU_BG_PU          1
+#define RES4335_PMU_SLEEP          2
+#define RES4335_RSVD_3             3
+#define RES4335_CBUCK_LPOM_PU		4
+#define RES4335_CBUCK_PFM_PU		5
+#define RES4335_RSVD_6             6
+#define RES4335_RSVD_7             7
+#define RES4335_LNLDO_PU           8
+#define RES4335_XTALLDO_PU         9
+#define RES4335_LDO3P3_PU			10
+#define RES4335_OTP_PU				11
+#define RES4335_XTAL_PU				12
+#define RES4335_SR_CLK_START       13
+#define RES4335_LQ_AVAIL			14
+#define RES4335_LQ_START           15
+#define RES4335_RSVD_16            16
+#define RES4335_WL_CORE_RDY        17
+#define RES4335_ILP_REQ				18
+#define RES4335_ALP_AVAIL			19
+#define RES4335_MINI_PMU           20
+#define RES4335_RADIO_PU			21
+#define RES4335_SR_CLK_STABLE		22
+#define RES4335_SR_SAVE_RESTORE		23
+#define RES4335_SR_PHY_PWRSW		24
+#define RES4335_SR_VDDM_PWRSW      25
+#define RES4335_SR_SUBCORE_PWRSW	26
+#define RES4335_SR_SLEEP           27
+#define RES4335_HT_START           28
+#define RES4335_HT_AVAIL			29
+#define RES4335_MACPHY_CLKAVAIL		30
+
+/* 4335 Chip specific ChipStatus register bits */
+#define CST4335_SPROM_MASK			0x00000020
+#define CST4335_SFLASH_MASK			0x00000040
+#define	CST4335_RES_INIT_MODE_SHIFT	7
+#define	CST4335_RES_INIT_MODE_MASK	0x00000180
+#define CST4335_CHIPMODE_MASK		0xF
+#define CST4335_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 0)) != 0)	/* SDIO */
+#define CST4335_CHIPMODE_GSPI(cs)	(((cs) & (1 << 1)) != 0)	/* gSPI */
+#define CST4335_CHIPMODE_USB20D(cs)	(((cs) & (1 << 2)) != 0)	/**< HSIC || USBDA */
+#define CST4335_CHIPMODE_PCIE(cs)	(((cs) & (1 << 3)) != 0)	/* PCIE */
+
+/* 4335 Chip specific ChipControl1 register bits */
+#define CCTRL1_4335_GPIO_SEL		(1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 4335 Chip specific ChipControl2 register bits */
+#define CCTRL2_4335_AOSBLOCK		(1 << 30)
+#define CCTRL2_4335_PMUWAKE		(1 << 31)
+#define PATCHTBL_SIZE			(0x800)
+#define CR4_4335_RAM_BASE                    (0x180000)
+#define CR4_4345_LT_C0_RAM_BASE              (0x1b0000)
+#define CR4_4345_GE_C0_RAM_BASE              (0x198000)
+#define CR4_4349_RAM_BASE                    (0x180000)
+#define CR4_4349_RAM_BASE_FROM_REV_9         (0x160000)
+#define CR4_4350_RAM_BASE                    (0x180000)
+#define CR4_4360_RAM_BASE                    (0x0)
+#define CR4_43602_RAM_BASE                   (0x180000)
+#define CA7_4365_RAM_BASE                    (0x200000)
+
+#define CR4_4347_RAM_BASE                    (0x170000)
+#define CR4_4362_RAM_BASE                    (0x170000)
+
+/* 4335 chip OTP present & OTP select bits. */
+#define SPROM4335_OTP_SELECT	0x00000010
+#define SPROM4335_OTP_PRESENT	0x00000020
+
+/* 4335 GCI specific bits. */
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT	(1 << 24)
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE	25
+#define CC4335_GCI_FUNC_SEL_PAD_SDIO	0x00707770
+
+/* SFLASH clkdev specific bits. */
+#define CC4335_SFLASH_CLKDIV_MASK	0x1F000000
+#define CC4335_SFLASH_CLKDIV_SHIFT	25
+
+/* 4335 OTP bits for SFLASH. */
+#define CC4335_SROM_OTP_SFLASH	40
+#define CC4335_SROM_OTP_SFLASH_PRESENT	0x1
+#define CC4335_SROM_OTP_SFLASH_TYPE	0x2
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK	0x003C
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT	2
+
+
+/* 4335 chip OTP present & OTP select bits. */
+#define SPROM4335_OTP_SELECT	0x00000010
+#define SPROM4335_OTP_PRESENT	0x00000020
+
+/* 4335 GCI specific bits. */
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT	(1 << 24)
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE	25
+#define CC4335_GCI_FUNC_SEL_PAD_SDIO	0x00707770
+
+/* SFLASH clkdev specific bits. */
+#define CC4335_SFLASH_CLKDIV_MASK	0x1F000000
+#define CC4335_SFLASH_CLKDIV_SHIFT	25
+
+/* 4335 OTP bits for SFLASH. */
+#define CC4335_SROM_OTP_SFLASH	40
+#define CC4335_SROM_OTP_SFLASH_PRESENT	0x1
+#define CC4335_SROM_OTP_SFLASH_TYPE	0x2
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK	0x003C
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT	2
+
+/* 4335 resources--END */
+
+/* 43012 PMU resources based on pmu_params.xls  - Start */
+#define RES43012_MEMLPLDO_PU			0
+#define RES43012_PMU_SLEEP			1
+#define RES43012_FAST_LPO			2
+#define RES43012_BTLPO_3P3			3
+#define RES43012_SR_POK				4
+#define RES43012_DUMMY_PWRSW			5
+#define RES43012_DUMMY_LDO3P3			6
+#define RES43012_DUMMY_BT_LDO3P3		7
+#define RES43012_DUMMY_RADIO			8
+#define RES43012_VDDB_VDDRET			9
+#define RES43012_HV_LDO3P3			10
+#define RES43012_OTP_PU				11
+#define RES43012_XTAL_PU			12
+#define RES43012_SR_CLK_START			13
+#define RES43012_XTAL_STABLE			14
+#define RES43012_FCBS				15
+#define RES43012_CBUCK_MODE			16
+#define RES43012_WL_CORE_RDY			17
+#define RES43012_ILP_REQ			18
+#define RES43012_ALP_AVAIL			19
+#define RES43012_RADIO_LDO			20
+#define RES43012_MINI_PMU			21
+#define RES43012_DUMMY				22
+#define RES43012_SR_SAVE_RESTORE		23
+#define RES43012_SR_PHY_PWRSW			24
+#define RES43012_SR_VDDB_CLDO			25
+#define RES43012_SR_SUBCORE_PWRSW		26
+#define RES43012_SR_SLEEP			27
+#define RES43012_HT_START			28
+#define RES43012_HT_AVAIL			29
+#define RES43012_MACPHY_CLK_AVAIL		30
+#define CST43012_SPROM_PRESENT        0x00000010
+
+/* PLL usage in 43012 */
+#define PMU43012_PLL0_PC0_NDIV_INT_MASK			0x0000003f
+#define PMU43012_PLL0_PC0_NDIV_INT_SHIFT		0
+#define PMU43012_PLL0_PC0_NDIV_FRAC_MASK		0xfffffc00
+#define PMU43012_PLL0_PC0_NDIV_FRAC_SHIFT		10
+#define PMU43012_PLL0_PC3_PDIV_MASK			0x00003c00
+#define PMU43012_PLL0_PC3_PDIV_SHIFT			10
+#define PMU43012_PLL_NDIV_FRAC_BITS			20
+#define PMU43012_PLL_P_DIV_SCALE_BITS			10
+
+#define CCTL_43012_ARM_OFFCOUNT_MASK			0x00000003
+#define CCTL_43012_ARM_OFFCOUNT_SHIFT			0
+#define CCTL_43012_ARM_ONCOUNT_MASK			0x0000000c
+#define CCTL_43012_ARM_ONCOUNT_SHIFT			2
+
+/* PMU Rev >= 30 */
+#define PMU30_ALPCLK_ONEMHZ_ENAB			0x80000000
+
+/* 43012 PMU Chip Control Registers */
+#define PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON		0x00000010
+#define PMUCCTL02_43012_PHY_PWRSW_FORCE_ON		0x00000040
+#define PMUCCTL02_43012_LHL_TIMER_SELECT		0x00000800
+#define PMUCCTL02_43012_RFLDO3P3_PU_FORCE_ON		0x00008000
+#define PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB	0x00010000
+
+#define PMUCCTL04_43012_BBPLL_ENABLE_PWRDN			0x00100000
+#define PMUCCTL04_43012_BBPLL_ENABLE_PWROFF			0x00200000
+#define PMUCCTL04_43012_FORCE_BBPLL_ARESET			0x00400000
+#define PMUCCTL04_43012_FORCE_BBPLL_DRESET			0x00800000
+#define PMUCCTL04_43012_FORCE_BBPLL_PWRDN			0x01000000
+#define PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH			0x02000000
+#define PMUCCTL04_43012_FORCE_BBPLL_PWROFF			0x04000000
+#define PMUCCTL04_43012_DISABLE_LQ_AVAIL			0x08000000
+#define PMUCCTL04_43012_DISABLE_HT_AVAIL			0x10000000
+#define PMUCCTL04_43012_USE_LOCK				0x20000000
+#define PMUCCTL04_43012_OPEN_LOOP_ENABLE			0x40000000
+#define PMUCCTL04_43012_FORCE_OPEN_LOOP				0x80000000
+#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_MASK		0x00000FC0
+#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_SHIFT	6
+#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_MASK		0x00FC0000
+#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_SHIFT	18
+#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_MASK		0x07000000
+#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_SHIFT		24
+#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK	0x0003F000
+#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT	12
+#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_MASK	0x00000038
+#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_SHIFT	3
+#define PMUCCTL13_43012_FCBS_UP_TRIG_EN			0x00000400
+
+#define PMUCCTL14_43012_ARMCM3_RESET_INITVAL		0x00000001
+#define PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL		0x00000020
+#define PMUCCTL14_43012_SDIOD_RESET_INIVAL		0x00000400
+#define PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL	0x00001000
+#define PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL		0x00004000
+#define PMUCCTL14_43012_M2MDMA_RESET_INITVAL		0x00008000
+#define PMUCCTL14_43012_DISABLE_LQ_AVAIL		0x08000000
+
+
+/* 4345 Chip specific ChipStatus register bits */
+#define CST4345_SPROM_MASK		0x00000020
+#define CST4345_SFLASH_MASK		0x00000040
+#define CST4345_RES_INIT_MODE_SHIFT	7
+#define CST4345_RES_INIT_MODE_MASK	0x00000180
+#define CST4345_CHIPMODE_MASK		0x4000F
+#define CST4345_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 0)) != 0)	/* SDIO */
+#define CST4345_CHIPMODE_GSPI(cs)	(((cs) & (1 << 1)) != 0)	/* gSPI */
+#define CST4345_CHIPMODE_HSIC(cs)	(((cs) & (1 << 2)) != 0)	/* HSIC */
+#define CST4345_CHIPMODE_PCIE(cs)	(((cs) & (1 << 3)) != 0)	/* PCIE */
+#define CST4345_CHIPMODE_USB20D(cs)	(((cs) & (1 << 18)) != 0)	/* USBDA */
+
+/* 4350 Chipcommon ChipStatus bits */
+#define CST4350_SDIO_MODE		0x00000001
+#define CST4350_HSIC20D_MODE		0x00000002
+#define CST4350_BP_ON_HSIC_CLK		0x00000004
+#define CST4350_PCIE_MODE		0x00000008
+#define CST4350_USB20D_MODE		0x00000010
+#define CST4350_USB30D_MODE		0x00000020
+#define CST4350_SPROM_PRESENT		0x00000040
+#define CST4350_RSRC_INIT_MODE_0	0x00000080
+#define CST4350_RSRC_INIT_MODE_1	0x00000100
+#define CST4350_SEL0_SDIO		0x00000200
+#define CST4350_SEL1_SDIO		0x00000400
+#define CST4350_SDIO_PAD_MODE		0x00000800
+#define CST4350_BBPLL_LOCKED		0x00001000
+#define CST4350_USBPLL_LOCKED		0x00002000
+#define CST4350_LINE_STATE		0x0000C000
+#define CST4350_SERDES_PIPE_PLLLOCK	0x00010000
+#define CST4350_BT_READY		0x00020000
+#define CST4350_SFLASH_PRESENT		0x00040000
+#define CST4350_CPULESS_ENABLE		0x00080000
+#define CST4350_STRAP_HOST_IFC_1	0x00100000
+#define CST4350_STRAP_HOST_IFC_2	0x00200000
+#define CST4350_STRAP_HOST_IFC_3	0x00400000
+#define CST4350_RAW_SPROM_PRESENT	0x00800000
+#define CST4350_APP_CLK_SWITCH_SEL_RDBACK	0x01000000
+#define CST4350_RAW_RSRC_INIT_MODE_0	0x02000000
+#define CST4350_SDIO_PAD_VDDIO		0x04000000
+#define CST4350_GSPI_MODE		0x08000000
+#define CST4350_PACKAGE_OPTION		0xF0000000
+#define CST4350_PACKAGE_SHIFT		28
+
+/* package option for 4350 */
+#define CST4350_PACKAGE_WLCSP		0x0
+#define CST4350_PACKAGE_PCIE		0x1
+#define CST4350_PACKAGE_WLBGA		0x2
+#define CST4350_PACKAGE_DBG		0x3
+#define CST4350_PACKAGE_USB		0x4
+#define CST4350_PACKAGE_USB_HSIC	0x4
+
+#define CST4350_PKG_MODE(cs)	((cs & CST4350_PACKAGE_OPTION) >> CST4350_PACKAGE_SHIFT)
+
+#define CST4350_PKG_WLCSP(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLCSP))
+#define CST4350_PKG_PCIE(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_PCIE))
+#define CST4350_PKG_WLBGA(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLBGA))
+#define CST4350_PKG_USB(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB))
+#define CST4350_PKG_USB_HSIC(cs)	(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB_HSIC))
+
+/* 4350C0 USB PACKAGE using raw_sprom_present to indicate 40mHz xtal */
+#define CST4350_PKG_USB_40M(cs)		(cs & CST4350_RAW_SPROM_PRESENT)
+
+#define CST4350_CHIPMODE_SDIOD(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD))
+#define CST4350_CHIPMODE_USB20D(cs)	((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D))
+#define CST4350_CHIPMODE_HSIC20D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D))
+#define CST4350_CHIPMODE_HSIC30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D))
+#define CST4350_CHIPMODE_USB30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D))
+#define CST4350_CHIPMODE_USB30D_WL(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL))
+#define CST4350_CHIPMODE_PCIE(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE))
+
+/* strap_host_ifc strap value */
+#define CST4350_HOST_IFC_MASK		0x00700000
+#define CST4350_HOST_IFC_SHIFT		20
+
+/* host_ifc raw mode */
+#define CST4350_IFC_MODE_SDIOD			0x0
+#define CST4350_IFC_MODE_HSIC20D		0x1
+#define CST4350_IFC_MODE_HSIC30D		0x2
+#define CST4350_IFC_MODE_PCIE			0x3
+#define CST4350_IFC_MODE_USB20D			0x4
+#define CST4350_IFC_MODE_USB30D			0x5
+#define CST4350_IFC_MODE_USB30D_WL		0x6
+#define CST4350_IFC_MODE_USB30D_BT		0x7
+
+#define CST4350_IFC_MODE(cs)	((cs & CST4350_HOST_IFC_MASK) >> CST4350_HOST_IFC_SHIFT)
+
+/* 4350 PMU resources */
+#define RES4350_LPLDO_PU	0
+#define RES4350_PMU_BG_PU	1
+#define RES4350_PMU_SLEEP	2
+#define RES4350_RSVD_3		3
+#define RES4350_CBUCK_LPOM_PU	4
+#define RES4350_CBUCK_PFM_PU	5
+#define RES4350_COLD_START_WAIT	6
+#define RES4350_RSVD_7		7
+#define RES4350_LNLDO_PU	8
+#define RES4350_XTALLDO_PU	9
+#define RES4350_LDO3P3_PU	10
+#define RES4350_OTP_PU		11
+#define RES4350_XTAL_PU		12
+#define RES4350_SR_CLK_START	13
+#define RES4350_LQ_AVAIL	14
+#define RES4350_LQ_START	15
+#define RES4350_PERST_OVR	16
+#define RES4350_WL_CORE_RDY	17
+#define RES4350_ILP_REQ		18
+#define RES4350_ALP_AVAIL	19
+#define RES4350_MINI_PMU	20
+#define RES4350_RADIO_PU	21
+#define RES4350_SR_CLK_STABLE	22
+#define RES4350_SR_SAVE_RESTORE	23
+#define RES4350_SR_PHY_PWRSW	24
+#define RES4350_SR_VDDM_PWRSW	25
+#define RES4350_SR_SUBCORE_PWRSW	26
+#define RES4350_SR_SLEEP	27
+#define RES4350_HT_START	28
+#define RES4350_HT_AVAIL	29
+#define RES4350_MACPHY_CLKAVAIL	30
+
+#define MUXENAB4350_UART_MASK		(0x0000000f)
+#define MUXENAB4350_UART_SHIFT		0
+#define MUXENAB4350_HOSTWAKE_MASK	(0x000000f0)	/**< configure GPIO for host_wake */
+#define MUXENAB4350_HOSTWAKE_SHIFT	4
+#define MUXENAB4349_UART_MASK           (0xf)
+
+
+#define CC4350_GPIO_COUNT		16
+
+/* 4350 GCI function sel values */
+#define CC4350_FNSEL_HWDEF		(0)
+#define CC4350_FNSEL_SAMEASPIN		(1)
+#define CC4350_FNSEL_UART		(2)
+#define CC4350_FNSEL_SFLASH		(3)
+#define CC4350_FNSEL_SPROM		(4)
+#define CC4350_FNSEL_I2C		(5)
+#define CC4350_FNSEL_MISC0		(6)
+#define CC4350_FNSEL_GCI		(7)
+#define CC4350_FNSEL_MISC1		(8)
+#define CC4350_FNSEL_MISC2		(9)
+#define CC4350_FNSEL_PWDOG 		(10)
+#define CC4350_FNSEL_IND		(12)
+#define CC4350_FNSEL_PDN		(13)
+#define CC4350_FNSEL_PUP		(14)
+#define CC4350_FNSEL_TRISTATE		(15)
+#define CC4350C_FNSEL_UART		(3)
+
+
+/* 4350 GPIO */
+#define CC4350_PIN_GPIO_00		(0)
+#define CC4350_PIN_GPIO_01		(1)
+#define CC4350_PIN_GPIO_02		(2)
+#define CC4350_PIN_GPIO_03		(3)
+#define CC4350_PIN_GPIO_04		(4)
+#define CC4350_PIN_GPIO_05		(5)
+#define CC4350_PIN_GPIO_06		(6)
+#define CC4350_PIN_GPIO_07		(7)
+#define CC4350_PIN_GPIO_08		(8)
+#define CC4350_PIN_GPIO_09		(9)
+#define CC4350_PIN_GPIO_10		(10)
+#define CC4350_PIN_GPIO_11		(11)
+#define CC4350_PIN_GPIO_12		(12)
+#define CC4350_PIN_GPIO_13		(13)
+#define CC4350_PIN_GPIO_14		(14)
+#define CC4350_PIN_GPIO_15		(15)
+
+#define CC4350_RSVD_16_SHIFT		16
+
+#define CC2_4350_PHY_PWRSW_UPTIME_MASK		(0xf << 0)
+#define CC2_4350_PHY_PWRSW_UPTIME_SHIFT		(0)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_MASK	(0xf << 4)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_SHIFT	(4)
+#define CC2_4350_VDDM_PWRSW_UPTIME_MASK		(0xf << 8)
+#define CC2_4350_VDDM_PWRSW_UPTIME_SHIFT	(8)
+#define CC2_4350_SBC_PWRSW_DNDELAY_MASK		(0x3 << 12)
+#define CC2_4350_SBC_PWRSW_DNDELAY_SHIFT	(12)
+#define CC2_4350_PHY_PWRSW_DNDELAY_MASK		(0x3 << 14)
+#define CC2_4350_PHY_PWRSW_DNDELAY_SHIFT	(14)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_MASK	(0x3 << 16)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_SHIFT	(16)
+#define CC2_4350_VDDM_PWRSW_EN_MASK		(1 << 20)
+#define CC2_4350_VDDM_PWRSW_EN_SHIFT		(20)
+#define CC2_4350_MEMLPLDO_PWRSW_EN_MASK		(1 << 21)
+#define CC2_4350_MEMLPLDO_PWRSW_EN_SHIFT	(21)
+#define CC2_4350_SDIO_AOS_WAKEUP_MASK		(1 << 24)
+#define CC2_4350_SDIO_AOS_WAKEUP_SHIFT		(24)
+
+/* Applies to 4335/4350/4345 */
+#define CC3_SR_CLK_SR_MEM_MASK			(1 << 0)
+#define CC3_SR_CLK_SR_MEM_SHIFT			(0)
+#define CC3_SR_BIT1_TBD_MASK			(1 << 1)
+#define CC3_SR_BIT1_TBD_SHIFT			(1)
+#define CC3_SR_ENGINE_ENABLE_MASK		(1 << 2)
+#define CC3_SR_ENGINE_ENABLE_SHIFT		(2)
+#define CC3_SR_BIT3_TBD_MASK			(1 << 3)
+#define CC3_SR_BIT3_TBD_SHIFT			(3)
+#define CC3_SR_MINDIV_FAST_CLK_MASK		(0xF << 4)
+#define CC3_SR_MINDIV_FAST_CLK_SHIFT		(4)
+#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_MASK	(1 << 8)
+#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_SHIFT	(8)
+#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_MASK	(1 << 9)
+#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_SHIFT	(9)
+#define CC3_SR_R23_SR_RISE_EDGE_TRIG_MASK	(1 << 10)
+#define CC3_SR_R23_SR_RISE_EDGE_TRIG_SHIFT	(10)
+#define CC3_SR_R23_SR_FALL_EDGE_TRIG_MASK	(1 << 11)
+#define CC3_SR_R23_SR_FALL_EDGE_TRIG_SHIFT	(11)
+#define CC3_SR_NUM_CLK_HIGH_MASK		(0x7 << 12)
+#define CC3_SR_NUM_CLK_HIGH_SHIFT		(12)
+#define CC3_SR_BIT15_TBD_MASK			(1 << 15)
+#define CC3_SR_BIT15_TBD_SHIFT			(15)
+#define CC3_SR_PHY_FUNC_PIC_MASK		(1 << 16)
+#define CC3_SR_PHY_FUNC_PIC_SHIFT		(16)
+#define CC3_SR_BIT17_19_TBD_MASK		(0x7 << 17)
+#define CC3_SR_BIT17_19_TBD_SHIFT		(17)
+#define CC3_SR_CHIP_TRIGGER_1_MASK		(1 << 20)
+#define CC3_SR_CHIP_TRIGGER_1_SHIFT		(20)
+#define CC3_SR_CHIP_TRIGGER_2_MASK		(1 << 21)
+#define CC3_SR_CHIP_TRIGGER_2_SHIFT		(21)
+#define CC3_SR_CHIP_TRIGGER_3_MASK		(1 << 22)
+#define CC3_SR_CHIP_TRIGGER_3_SHIFT		(22)
+#define CC3_SR_CHIP_TRIGGER_4_MASK		(1 << 23)
+#define CC3_SR_CHIP_TRIGGER_4_SHIFT		(23)
+#define CC3_SR_ALLOW_SBC_FUNC_PIC_MASK		(1 << 24)
+#define CC3_SR_ALLOW_SBC_FUNC_PIC_SHIFT		(24)
+#define CC3_SR_BIT25_26_TBD_MASK		(0x3 << 25)
+#define CC3_SR_BIT25_26_TBD_SHIFT		(25)
+#define CC3_SR_ALLOW_SBC_STBY_MASK		(1 << 27)
+#define CC3_SR_ALLOW_SBC_STBY_SHIFT		(27)
+#define CC3_SR_GPIO_MUX_MASK			(0xF << 28)
+#define CC3_SR_GPIO_MUX_SHIFT			(28)
+
+/* Applies to 4335/4350/4345 */
+#define CC4_SR_INIT_ADDR_MASK		(0x3FF0000)
+#define 	CC4_4350_SR_ASM_ADDR	(0x30)
+#define CC4_4350_C0_SR_ASM_ADDR		(0x0)
+#define 	CC4_4335_SR_ASM_ADDR	(0x48)
+#define 	CC4_4345_SR_ASM_ADDR	(0x48)
+#define CC4_SR_INIT_ADDR_SHIFT		(16)
+
+#define CC4_4350_EN_SR_CLK_ALP_MASK	(1 << 30)
+#define CC4_4350_EN_SR_CLK_ALP_SHIFT	(30)
+#define CC4_4350_EN_SR_CLK_HT_MASK	(1 << 31)
+#define CC4_4350_EN_SR_CLK_HT_SHIFT	(31)
+
+#define VREG4_4350_MEMLPDO_PU_MASK	(1 << 31)
+#define VREG4_4350_MEMLPDO_PU_SHIFT	31
+
+#define VREG6_4350_SR_EXT_CLKDIR_MASK	(1 << 20)
+#define VREG6_4350_SR_EXT_CLKDIR_SHIFT	20
+#define VREG6_4350_SR_EXT_CLKDIV_MASK	(0x3 << 21)
+#define VREG6_4350_SR_EXT_CLKDIV_SHIFT	21
+#define VREG6_4350_SR_EXT_CLKEN_MASK	(1 << 23)
+#define VREG6_4350_SR_EXT_CLKEN_SHIFT	23
+
+#define CC5_4350_PMU_EN_ASSERT_MASK	(1 << 13)
+#define CC5_4350_PMU_EN_ASSERT_SHIFT	(13)
+
+#define CC6_4350_PCIE_CLKREQ_WAKEUP_MASK	(1 << 4)
+#define CC6_4350_PCIE_CLKREQ_WAKEUP_SHIFT	(4)
+#define CC6_4350_PMU_WAKEUP_ALPAVAIL_MASK	(1 << 6)
+#define CC6_4350_PMU_WAKEUP_ALPAVAIL_SHIFT	(6)
+#define CC6_4350_PMU_EN_EXT_PERST_MASK		(1 << 17)
+#define CC6_4350_PMU_EN_EXT_PERST_SHIFT		(17)
+#define CC6_4350_PMU_EN_WAKEUP_MASK		(1 << 18)
+#define CC6_4350_PMU_EN_WAKEUP_SHIFT		(18)
+
+#define CC7_4350_PMU_EN_ASSERT_L2_MASK	(1 << 26)
+#define CC7_4350_PMU_EN_ASSERT_L2_SHIFT	(26)
+#define CC7_4350_PMU_EN_MDIO_MASK	(1 << 27)
+#define CC7_4350_PMU_EN_MDIO_SHIFT	(27)
+
+#define CC6_4345_PMU_EN_PERST_DEASSERT_MASK		(1 << 13)
+#define CC6_4345_PMU_EN_PERST_DEASSERT_SHIF		(13)
+#define CC6_4345_PMU_EN_L2_DEASSERT_MASK		(1 << 14)
+#define CC6_4345_PMU_EN_L2_DEASSERT_SHIF		(14)
+#define CC6_4345_PMU_EN_ASSERT_L2_MASK		(1 << 15)
+#define CC6_4345_PMU_EN_ASSERT_L2_SHIFT		(15)
+#define CC6_4345_PMU_EN_MDIO_MASK		(1 << 24)
+#define CC6_4345_PMU_EN_MDIO_SHIFT		(24)
+
+/* GCI chipcontrol register indices */
+#define CC_GCI_CHIPCTRL_00	(0)
+#define CC_GCI_CHIPCTRL_01	(1)
+#define CC_GCI_CHIPCTRL_02	(2)
+#define CC_GCI_CHIPCTRL_03	(3)
+#define CC_GCI_CHIPCTRL_04	(4)
+#define CC_GCI_CHIPCTRL_05	(5)
+#define CC_GCI_CHIPCTRL_06	(6)
+#define CC_GCI_CHIPCTRL_07	(7)
+#define CC_GCI_CHIPCTRL_08	(8)
+#define CC_GCI_CHIPCTRL_09	(9)
+#define CC_GCI_CHIPCTRL_10	(10)
+#define CC_GCI_CHIPCTRL_10	(10)
+#define CC_GCI_CHIPCTRL_11	(11)
+#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12)
+
+#define CC_GCI_06_JTAG_SEL_SHIFT	4
+#define CC_GCI_06_JTAG_SEL_MASK		(1 << 4)
+
+#define CC_GCI_NUMCHIPCTRLREGS(cap1)	((cap1 & 0xF00) >> 8)
+
+/* GCI chipstatus register indices */
+#define GCI_CHIPSTATUS_00	(0)
+#define GCI_CHIPSTATUS_01	(1)
+#define GCI_CHIPSTATUS_02	(2)
+#define GCI_CHIPSTATUS_03	(3)
+#define GCI_CHIPSTATUS_04	(4)
+#define GCI_CHIPSTATUS_05	(5)
+#define GCI_CHIPSTATUS_06	(6)
+#define GCI_CHIPSTATUS_07	(7)
+#define GCI_CHIPSTATUS_08	(8)
+
+/* 43021 GCI chipstatus registers */
+#define GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK	(1 << 3)
+
+/* 4345 PMU resources */
+#define RES4345_LPLDO_PU		0
+#define RES4345_PMU_BG_PU		1
+#define RES4345_PMU_SLEEP		2
+#define RES4345_HSICLDO_PU		3
+#define RES4345_CBUCK_LPOM_PU		4
+#define RES4345_CBUCK_PFM_PU		5
+#define RES4345_COLD_START_WAIT		6
+#define RES4345_RSVD_7			7
+#define RES4345_LNLDO_PU		8
+#define RES4345_XTALLDO_PU		9
+#define RES4345_LDO3P3_PU		10
+#define RES4345_OTP_PU			11
+#define RES4345_XTAL_PU			12
+#define RES4345_SR_CLK_START		13
+#define RES4345_LQ_AVAIL		14
+#define RES4345_LQ_START		15
+#define RES4345_PERST_OVR		16
+#define RES4345_WL_CORE_RDY		17
+#define RES4345_ILP_REQ			18
+#define RES4345_ALP_AVAIL		19
+#define RES4345_MINI_PMU		20
+#define RES4345_RADIO_PU		21
+#define RES4345_SR_CLK_STABLE		22
+#define RES4345_SR_SAVE_RESTORE		23
+#define RES4345_SR_PHY_PWRSW		24
+#define RES4345_SR_VDDM_PWRSW		25
+#define RES4345_SR_SUBCORE_PWRSW	26
+#define RES4345_SR_SLEEP		27
+#define RES4345_HT_START		28
+#define RES4345_HT_AVAIL		29
+#define RES4345_MACPHY_CLK_AVAIL	30
+
+/* 43012 pins
+ * note: only the values set as default/used are added here.
+ */
+#define CC43012_PIN_GPIO_00		(0)
+#define CC43012_PIN_GPIO_01		(1)
+#define CC43012_PIN_GPIO_02		(2)
+#define CC43012_PIN_GPIO_03		(3)
+#define CC43012_PIN_GPIO_04		(4)
+#define CC43012_PIN_GPIO_05		(5)
+#define CC43012_PIN_GPIO_06		(6)
+#define CC43012_PIN_GPIO_07		(7)
+#define CC43012_PIN_GPIO_08		(8)
+#define CC43012_PIN_GPIO_09		(9)
+#define CC43012_PIN_GPIO_10		(10)
+#define CC43012_PIN_GPIO_11		(11)
+#define CC43012_PIN_GPIO_12		(12)
+#define CC43012_PIN_GPIO_13		(13)
+#define CC43012_PIN_GPIO_14		(14)
+#define CC43012_PIN_GPIO_15		(15)
+
+/* 43012 GCI function sel values */
+#define CC43012_FNSEL_HWDEF		(0)
+#define CC43012_FNSEL_SAMEASPIN	(1)
+#define CC43012_FNSEL_GPIO0		(2)
+#define CC43012_FNSEL_GPIO1		(3)
+#define CC43012_FNSEL_GCI0		(4)
+#define CC43012_FNSEL_GCI1		(5)
+#define CC43012_FNSEL_DBG_UART	(6)
+#define CC43012_FNSEL_I2C		(7)
+#define CC43012_FNSEL_BT_SFLASH	(8)
+#define CC43012_FNSEL_MISC0		(9)
+#define CC43012_FNSEL_MISC1		(10)
+#define CC43012_FNSEL_MISC2		(11)
+#define CC43012_FNSEL_IND		(12)
+#define CC43012_FNSEL_PDN		(13)
+#define CC43012_FNSEL_PUP		(14)
+#define CC43012_FNSEL_TRI		(15)
+
+/* 4335 pins
+* note: only the values set as default/used are added here.
+*/
+#define CC4335_PIN_GPIO_00		(0)
+#define CC4335_PIN_GPIO_01		(1)
+#define CC4335_PIN_GPIO_02		(2)
+#define CC4335_PIN_GPIO_03		(3)
+#define CC4335_PIN_GPIO_04		(4)
+#define CC4335_PIN_GPIO_05		(5)
+#define CC4335_PIN_GPIO_06		(6)
+#define CC4335_PIN_GPIO_07		(7)
+#define CC4335_PIN_GPIO_08		(8)
+#define CC4335_PIN_GPIO_09		(9)
+#define CC4335_PIN_GPIO_10		(10)
+#define CC4335_PIN_GPIO_11		(11)
+#define CC4335_PIN_GPIO_12		(12)
+#define CC4335_PIN_GPIO_13		(13)
+#define CC4335_PIN_GPIO_14		(14)
+#define CC4335_PIN_GPIO_15		(15)
+#define CC4335_PIN_SDIO_CLK		(16)
+#define CC4335_PIN_SDIO_CMD		(17)
+#define CC4335_PIN_SDIO_DATA0	(18)
+#define CC4335_PIN_SDIO_DATA1	(19)
+#define CC4335_PIN_SDIO_DATA2	(20)
+#define CC4335_PIN_SDIO_DATA3	(21)
+#define CC4335_PIN_RF_SW_CTRL_6	(22)
+#define CC4335_PIN_RF_SW_CTRL_7	(23)
+#define CC4335_PIN_RF_SW_CTRL_8	(24)
+#define CC4335_PIN_RF_SW_CTRL_9	(25)
+/* Last GPIO Pad */
+#define CC4335_PIN_GPIO_LAST	(31)
+
+/* 4335 GCI function sel values
+*/
+#define CC4335_FNSEL_HWDEF		(0)
+#define CC4335_FNSEL_SAMEASPIN	(1)
+#define CC4335_FNSEL_GPIO0		(2)
+#define CC4335_FNSEL_GPIO1		(3)
+#define CC4335_FNSEL_GCI0		(4)
+#define CC4335_FNSEL_GCI1		(5)
+#define CC4335_FNSEL_UART		(6)
+#define CC4335_FNSEL_SFLASH		(7)
+#define CC4335_FNSEL_SPROM		(8)
+#define CC4335_FNSEL_MISC0		(9)
+#define CC4335_FNSEL_MISC1		(10)
+#define CC4335_FNSEL_MISC2		(11)
+#define CC4335_FNSEL_IND		(12)
+#define CC4335_FNSEL_PDN		(13)
+#define CC4335_FNSEL_PUP		(14)
+#define CC4335_FNSEL_TRI		(15)
+
+/* GCI Core Control Reg */
+#define	GCI_CORECTRL_SR_MASK	(1 << 0)	/**< SECI block Reset */
+#define	GCI_CORECTRL_RSL_MASK	(1 << 1)	/**< ResetSECILogic */
+#define	GCI_CORECTRL_ES_MASK	(1 << 2)	/**< EnableSECI */
+#define	GCI_CORECTRL_FSL_MASK	(1 << 3)	/**< Force SECI Out Low */
+#define	GCI_CORECTRL_SOM_MASK	(7 << 4)	/**< SECI Op Mode */
+#define	GCI_CORECTRL_US_MASK	(1 << 7)	/**< Update SECI */
+#define	GCI_CORECTRL_BOS_MASK	(1 << 8)	/**< Break On Sleep */
+
+/* 4345 pins
+* note: only the values set as default/used are added here.
+*/
+#define CC4345_PIN_GPIO_00		(0)
+#define CC4345_PIN_GPIO_01		(1)
+#define CC4345_PIN_GPIO_02		(2)
+#define CC4345_PIN_GPIO_03		(3)
+#define CC4345_PIN_GPIO_04		(4)
+#define CC4345_PIN_GPIO_05		(5)
+#define CC4345_PIN_GPIO_06		(6)
+#define CC4345_PIN_GPIO_07		(7)
+#define CC4345_PIN_GPIO_08		(8)
+#define CC4345_PIN_GPIO_09		(9)
+#define CC4345_PIN_GPIO_10		(10)
+#define CC4345_PIN_GPIO_11		(11)
+#define CC4345_PIN_GPIO_12		(12)
+#define CC4345_PIN_GPIO_13		(13)
+#define CC4345_PIN_GPIO_14		(14)
+#define CC4345_PIN_GPIO_15		(15)
+#define CC4345_PIN_GPIO_16		(16)
+#define CC4345_PIN_SDIO_CLK		(17)
+#define CC4345_PIN_SDIO_CMD		(18)
+#define CC4345_PIN_SDIO_DATA0	(19)
+#define CC4345_PIN_SDIO_DATA1	(20)
+#define CC4345_PIN_SDIO_DATA2	(21)
+#define CC4345_PIN_SDIO_DATA3	(22)
+#define CC4345_PIN_RF_SW_CTRL_0	(23)
+#define CC4345_PIN_RF_SW_CTRL_1	(24)
+#define CC4345_PIN_RF_SW_CTRL_2	(25)
+#define CC4345_PIN_RF_SW_CTRL_3	(26)
+#define CC4345_PIN_RF_SW_CTRL_4	(27)
+#define CC4345_PIN_RF_SW_CTRL_5	(28)
+#define CC4345_PIN_RF_SW_CTRL_6	(29)
+#define CC4345_PIN_RF_SW_CTRL_7	(30)
+#define CC4345_PIN_RF_SW_CTRL_8	(31)
+#define CC4345_PIN_RF_SW_CTRL_9	(32)
+
+/* 4345 GCI function sel values
+*/
+#define CC4345_FNSEL_HWDEF		(0)
+#define CC4345_FNSEL_SAMEASPIN		(1)
+#define CC4345_FNSEL_GPIO0		(2)
+#define CC4345_FNSEL_GPIO1		(3)
+#define CC4345_FNSEL_GCI0		(4)
+#define CC4345_FNSEL_GCI1		(5)
+#define CC4345_FNSEL_UART		(6)
+#define CC4345_FNSEL_SFLASH		(7)
+#define CC4345_FNSEL_SPROM		(8)
+#define CC4345_FNSEL_MISC0		(9)
+#define CC4345_FNSEL_MISC1		(10)
+#define CC4345_FNSEL_MISC2		(11)
+#define CC4345_FNSEL_IND		(12)
+#define CC4345_FNSEL_PDN		(13)
+#define CC4345_FNSEL_PUP		(14)
+#define CC4345_FNSEL_TRI		(15)
+
+#define MUXENAB4345_UART_MASK		(0x0000000f)
+#define MUXENAB4345_UART_SHIFT		0
+#define MUXENAB4345_HOSTWAKE_MASK	(0x000000f0)
+#define MUXENAB4345_HOSTWAKE_SHIFT	4
+
+/* 4349 Group (4349, 4355, 4359) GCI AVS function sel values */
+#define CC4349_GRP_GCI_AVS_CTRL_MASK   (0xffe00000)
+#define CC4349_GRP_GCI_AVS_CTRL_SHIFT  (21)
+#define CC4349_GRP_GCI_AVS_CTRL_ENAB   (1 << 5)
+
+/* 4345 GCI AVS function sel values */
+#define CC4345_GCI_AVS_CTRL_MASK   (0xfc)
+#define CC4345_GCI_AVS_CTRL_SHIFT  (2)
+#define CC4345_GCI_AVS_CTRL_ENAB   (1 << 5)
+
+/* 43430 Pin */
+#define CC43430_PIN_GPIO_00		(0)
+#define CC43430_PIN_GPIO_01		(1)
+#define CC43430_PIN_GPIO_02		(2)
+#define CC43430_PIN_GPIO_07		(7)
+#define CC43430_PIN_GPIO_08		(8)
+#define CC43430_PIN_GPIO_09		(9)
+#define CC43430_PIN_GPIO_10		(10)
+
+#define CC43430_FNSEL_SDIO_INT		(2)
+#define CC43430_FNSEL_6_FAST_UART	(6)
+#define CC43430_FNSEL_10_FAST_UART	(10)
+
+#define MUXENAB43430_UART_MASK		(0x0000000f)
+#define MUXENAB43430_UART_SHIFT		0
+#define MUXENAB43430_HOSTWAKE_MASK	(0x000000f0)	/* configure GPIO for SDIO host_wake */
+#define MUXENAB43430_HOSTWAKE_SHIFT	4
+
+#define CC43430_FNSEL_SAMEASPIN		(1)
+#define CC43430_RFSWCTRL_EN_MASK   (0x7f8)
+#define CC43430_RFSWCTRL_EN_SHIFT  (3)
+
+/* GCI GPIO for function sel GCI-0/GCI-1 */
+#define CC_GCI_GPIO_0			(0)
+#define CC_GCI_GPIO_1			(1)
+#define CC_GCI_GPIO_2			(2)
+#define CC_GCI_GPIO_3			(3)
+#define CC_GCI_GPIO_4			(4)
+#define CC_GCI_GPIO_5			(5)
+#define CC_GCI_GPIO_6			(6)
+#define CC_GCI_GPIO_7			(7)
+#define CC_GCI_GPIO_8			(8)
+#define CC_GCI_GPIO_9			(9)
+#define CC_GCI_GPIO_10			(10)
+#define CC_GCI_GPIO_11			(11)
+#define CC_GCI_GPIO_12			(12)
+#define CC_GCI_GPIO_13			(13)
+#define CC_GCI_GPIO_14			(14)
+#define CC_GCI_GPIO_15			(15)
+
+
+/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */
+#define CC_GCI_GPIO_INVALID		0xFF
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK(pos)  (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL(val, pos)  ((((uint32)val) << pos) & GCIMASK(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL(val, pos)	((val >> pos) & 0xF)
+
+
+/* find the 8 bit mask given the bit position */
+#define GCIMASK_8B(pos)  (((uint32)0xFF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_8B(val, pos)  ((((uint32)val) << pos) & GCIMASK_8B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_8B(val, pos)	((val >> pos) & 0xFF)
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK_4B(pos)  (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_4B(val, pos)  ((((uint32)val) << pos) & GCIMASK_4B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_4B(val, pos)	((val >> pos) & 0xF)
+
+
+/* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */
+#define GCI_INTSTATUS_RBI	(1 << 0)	/**< Rx Break Interrupt */
+#define GCI_INTSTATUS_UB	(1 << 1)	/**< UART Break Interrupt */
+#define GCI_INTSTATUS_SPE	(1 << 2)	/**< SECI Parity Error Interrupt */
+#define GCI_INTSTATUS_SFE	(1 << 3)	/**< SECI Framing Error Interrupt */
+#define GCI_INTSTATUS_SRITI	(1 << 9)	/**< SECI Rx Idle Timer Interrupt */
+#define GCI_INTSTATUS_STFF	(1 << 10)	/**< SECI Tx FIFO Full Interrupt */
+#define GCI_INTSTATUS_STFAE	(1 << 11)	/**< SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTSTATUS_SRFAF	(1 << 12)	/**< SECI Rx FIFO Almost Full */
+#define GCI_INTSTATUS_SRFNE	(1 << 14)	/**< SECI Rx FIFO Not Empty */
+#define GCI_INTSTATUS_SRFOF	(1 << 15)	/**< SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTSTATUS_GPIOINT	(1 << 25)	/**< GCIGpioInt */
+#define GCI_INTSTATUS_GPIOWAKE	(1 << 26)	/**< GCIGpioWake */
+
+/* 4335 GCI IntMask Register bits. */
+#define GCI_INTMASK_RBI		(1 << 0)	/**< Rx Break Interrupt */
+#define GCI_INTMASK_UB		(1 << 1)	/**< UART Break Interrupt */
+#define GCI_INTMASK_SPE		(1 << 2)	/**< SECI Parity Error Interrupt */
+#define GCI_INTMASK_SFE		(1 << 3)	/**< SECI Framing Error Interrupt */
+#define GCI_INTMASK_SRITI	(1 << 9)	/**< SECI Rx Idle Timer Interrupt */
+#define GCI_INTMASK_STFF	(1 << 10)	/**< SECI Tx FIFO Full Interrupt */
+#define GCI_INTMASK_STFAE	(1 << 11)	/**< SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTMASK_SRFAF	(1 << 12)	/**< SECI Rx FIFO Almost Full */
+#define GCI_INTMASK_SRFNE	(1 << 14)	/**< SECI Rx FIFO Not Empty */
+#define GCI_INTMASK_SRFOF	(1 << 15)	/**< SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTMASK_GPIOINT	(1 << 25)	/**< GCIGpioInt */
+#define GCI_INTMASK_GPIOWAKE	(1 << 26)	/**< GCIGpioWake */
+
+/* 4335 GCI WakeMask Register bits. */
+#define GCI_WAKEMASK_RBI	(1 << 0)	/**< Rx Break Interrupt */
+#define GCI_WAKEMASK_UB		(1 << 1)	/**< UART Break Interrupt */
+#define GCI_WAKEMASK_SPE	(1 << 2)	/**< SECI Parity Error Interrupt */
+#define GCI_WAKEMASK_SFE	(1 << 3)	/**< SECI Framing Error Interrupt */
+#define GCI_WAKE_SRITI		(1 << 9)	/**< SECI Rx Idle Timer Interrupt */
+#define GCI_WAKEMASK_STFF	(1 << 10)	/**< SECI Tx FIFO Full Interrupt */
+#define GCI_WAKEMASK_STFAE	(1 << 11)	/**< SECI Tx FIFO Almost Empty Intr */
+#define GCI_WAKEMASK_SRFAF	(1 << 12)	/**< SECI Rx FIFO Almost Full */
+#define GCI_WAKEMASK_SRFNE	(1 << 14)	/**< SECI Rx FIFO Not Empty */
+#define GCI_WAKEMASK_SRFOF	(1 << 15)	/**< SECI Rx FIFO Not Empty Timeout */
+#define GCI_WAKEMASK_GPIOINT	(1 << 25)	/**< GCIGpioInt */
+#define GCI_WAKEMASK_GPIOWAKE	(1 << 26)	/**< GCIGpioWake */
+
+#define	GCI_WAKE_ON_GCI_GPIO1	1
+#define	GCI_WAKE_ON_GCI_GPIO2	2
+#define	GCI_WAKE_ON_GCI_GPIO3	3
+#define	GCI_WAKE_ON_GCI_GPIO4	4
+#define	GCI_WAKE_ON_GCI_GPIO5	5
+#define	GCI_WAKE_ON_GCI_GPIO6	6
+#define	GCI_WAKE_ON_GCI_GPIO7	7
+#define	GCI_WAKE_ON_GCI_GPIO8	8
+#define	GCI_WAKE_ON_GCI_SECI_IN	9
+
+/* 43012 ULB dividers */
+#define PMU43012_CC0_ULB_DIVMASK		0xfffffc00
+#define PMU43012_10MHZ_ULB_DIV			((1 << 0) | (1 << 5))
+#define PMU43012_5MHZ_ULB_DIV			((3 << 0) | (3 << 5))
+#define PMU43012_2P5MHZ_ULB_DIV			((7 << 0) | (7 << 5))
+#define PMU43012_ULB_NO_DIV				0
+
+/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic
+* for now only UART for bootloader.
+*/
+#define MUXENAB4335_UART_MASK		(0x0000000f)
+
+#define MUXENAB4335_UART_SHIFT		0
+#define MUXENAB4335_HOSTWAKE_MASK	(0x000000f0)	/**< configure GPIO for SDIO host_wake */
+#define MUXENAB4335_HOSTWAKE_SHIFT	4
+#define MUXENAB4335_GETIX(val, name) \
+	((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1)
+
+/* 43012 MUX options */
+#define MUXENAB43012_HOSTWAKE_MASK	(0x00000001)
+#define MUXENAB43012_GETIX(val, name) (val - 1)
+
+/*
+* Maximum delay for the PMU state transition in us.
+* This is an upper bound intended for spinwaits etc.
+*/
+#define PMU_MAX_TRANSITION_DLY	15000
+
+/* PMU resource up transition time in ILP cycles */
+#define PMURES_UP_TRANSITION	2
+
+/* 53573 PMU Resource */
+#define RES53573_REGULATOR_PU     0
+#define RES53573_XTALLDO_PU       1
+#define RES53573_XTAL_PU          2
+#define RES53573_MINI_PMU         3
+#define RES53573_RADIO_PU         4
+#define RES53573_ILP_REQ          5
+#define RES53573_ALP_AVAIL        6
+#define RES53573_CPUPLL_LDO_PU    7
+#define RES53573_CPU_PLL_PU       8
+#define RES53573_WLAN_BB_PLL_PU   9
+#define RES53573_MISCPLL_LDO_PU    10
+#define RES53573_MISCPLL_PU       11
+#define RES53573_AUDIOPLL_PU      12
+#define RES53573_PCIEPLL_LDO_PU   13
+#define RES53573_PCIEPLL_PU       14
+#define RES53573_DDRPLL_LDO_PU    15
+#define RES53573_DDRPLL_PU        16
+#define RES53573_HT_AVAIL         17
+#define RES53573_MACPHY_CLK_AVAIL 18
+#define RES53573_OTP_PU           19
+#define RES53573_RSVD20           20
+
+/* 53573 Chip status registers */
+#define CST53573_LOCK_CPUPLL          0x00000001
+#define CST53573_LOCK_MISCPLL         0x00000002
+#define CST53573_LOCK_DDRPLL          0x00000004
+#define CST53573_LOCK_PCIEPLL         0x00000008
+#define CST53573_EPHY_ENERGY_DET      0x00001f00
+#define CST53573_RAW_ENERGY           0x0003e000
+#define CST53573_BBPLL_LOCKED_O       0x00040000
+#define CST53573_SERDES_PIPE_PLLLOCK  0x00080000
+#define CST53573_STRAP_PCIE_EP_MODE   0x00100000
+#define CST53573_EPHY_PLL_LOCK        0x00200000
+#define CST53573_AUDIO_PLL_LOCKED_O   0x00400000
+#define CST53573_PCIE_LINK_IN_L11     0x01000000
+#define CST53573_PCIE_LINK_IN_L12     0x02000000
+#define CST53573_DIN_PACKAGEOPTION    0xf0000000
+
+/* 53573 Chip control registers macro definitions */
+#define PMU_53573_CHIPCTL1                      1
+#define PMU_53573_CC1_HT_CLK_REQ_CTRL_MASK      0x00000010
+#define PMU_53573_CC1_HT_CLK_REQ_CTRL           0x00000010
+
+#define PMU_53573_CHIPCTL3                      3
+#define PMU_53573_CC3_ENABLE_CLOSED_LOOP_MASK   0x00000010
+#define PMU_53573_CC3_ENABLE_CLOSED_LOOP        0x00000000
+#define PMU_53573_CC3_ENABLE_BBPLL_PWRDOWN_MASK 0x00000002
+#define PMU_53573_CC3_ENABLE_BBPLL_PWRDOWN      0x00000002
+
+#define CST53573_CHIPMODE_PCIE(cs)		FALSE
+
+
+/* SECI Status (0x134) & Mask (0x138) bits - Rev 35 */
+#define SECI_STAT_BI	(1 << 0)	/* Break Interrupt */
+#define SECI_STAT_SPE	(1 << 1)	/* Parity Error */
+#define SECI_STAT_SFE	(1 << 2)	/* Parity Error */
+#define SECI_STAT_SDU	(1 << 3)	/* Data Updated */
+#define SECI_STAT_SADU	(1 << 4)	/* Auxiliary Data Updated */
+#define SECI_STAT_SAS	(1 << 6)	/* AUX State */
+#define SECI_STAT_SAS2	(1 << 7)	/* AUX2 State */
+#define SECI_STAT_SRITI	(1 << 8)	/* Idle Timer Interrupt */
+#define SECI_STAT_STFF	(1 << 9)	/* Tx FIFO Full */
+#define SECI_STAT_STFAE	(1 << 10)	/* Tx FIFO Almost Empty */
+#define SECI_STAT_SRFE	(1 << 11)	/* Rx FIFO Empty */
+#define SECI_STAT_SRFAF	(1 << 12)	/* Rx FIFO Almost Full */
+#define SECI_STAT_SFCE	(1 << 13)	/* Flow Control Event */
+
+/* SECI configuration */
+#define SECI_MODE_UART			0x0
+#define SECI_MODE_SECI			0x1
+#define SECI_MODE_LEGACY_3WIRE_BT	0x2
+#define SECI_MODE_LEGACY_3WIRE_WLAN	0x3
+#define SECI_MODE_HALF_SECI		0x4
+
+#define SECI_RESET		(1 << 0)
+#define SECI_RESET_BAR_UART	(1 << 1)
+#define SECI_ENAB_SECI_ECI	(1 << 2)
+#define SECI_ENAB_SECIOUT_DIS	(1 << 3)
+#define SECI_MODE_MASK		0x7
+#define SECI_MODE_SHIFT		4 /* (bits 5, 6, 7) */
+#define SECI_UPD_SECI		(1 << 7)
+
+#define SECI_SLIP_ESC_CHAR	0xDB
+#define SECI_SIGNOFF_0		SECI_SLIP_ESC_CHAR
+#define SECI_SIGNOFF_1     0
+#define SECI_REFRESH_REQ	0xDA
+
+/* seci clk_ctl_st bits */
+#define CLKCTL_STS_HT_AVAIL_REQ		(1 << 4)
+#define CLKCTL_STS_SECI_CLK_REQ		(1 << 8)
+#define CLKCTL_STS_SECI_CLK_AVAIL	(1 << 24)
+
+#define SECI_UART_MSR_CTS_STATE		(1 << 0)
+#define SECI_UART_MSR_RTS_STATE		(1 << 1)
+#define SECI_UART_SECI_IN_STATE		(1 << 2)
+#define SECI_UART_SECI_IN2_STATE	(1 << 3)
+
+/* GCI RX FIFO Control Register */
+#define	GCI_RXF_LVL_MASK	(0xFF << 0)
+#define	GCI_RXF_TIMEOUT_MASK	(0xFF << 8)
+
+/* GCI UART Registers' Bit definitions */
+/* Seci Fifo Level Register */
+#define	SECI_TXF_LVL_MASK	(0x3F << 8)
+#define	TXF_AE_LVL_DEFAULT	0x4
+#define	SECI_RXF_LVL_FC_MASK	(0x3F << 16)
+
+/* SeciUARTFCR Bit definitions */
+#define	SECI_UART_FCR_RFR		(1 << 0)
+#define	SECI_UART_FCR_TFR		(1 << 1)
+#define	SECI_UART_FCR_SR		(1 << 2)
+#define	SECI_UART_FCR_THP		(1 << 3)
+#define	SECI_UART_FCR_AB		(1 << 4)
+#define	SECI_UART_FCR_ATOE		(1 << 5)
+#define	SECI_UART_FCR_ARTSOE		(1 << 6)
+#define	SECI_UART_FCR_ABV		(1 << 7)
+#define	SECI_UART_FCR_ALM		(1 << 8)
+
+/* SECI UART LCR register bits */
+#define SECI_UART_LCR_STOP_BITS		(1 << 0) /* 0 - 1bit, 1 - 2bits */
+#define SECI_UART_LCR_PARITY_EN		(1 << 1)
+#define SECI_UART_LCR_PARITY		(1 << 2) /* 0 - odd, 1 - even */
+#define SECI_UART_LCR_RX_EN		(1 << 3)
+#define SECI_UART_LCR_LBRK_CTRL		(1 << 4) /* 1 => SECI_OUT held low */
+#define SECI_UART_LCR_TXO_EN		(1 << 5)
+#define SECI_UART_LCR_RTSO_EN		(1 << 6)
+#define SECI_UART_LCR_SLIPMODE_EN	(1 << 7)
+#define SECI_UART_LCR_RXCRC_CHK		(1 << 8)
+#define SECI_UART_LCR_TXCRC_INV		(1 << 9)
+#define SECI_UART_LCR_TXCRC_LSBF	(1 << 10)
+#define SECI_UART_LCR_TXCRC_EN		(1 << 11)
+#define	SECI_UART_LCR_RXSYNC_EN		(1 << 12)
+
+#define SECI_UART_MCR_TX_EN		(1 << 0)
+#define SECI_UART_MCR_PRTS		(1 << 1)
+#define SECI_UART_MCR_SWFLCTRL_EN	(1 << 2)
+#define SECI_UART_MCR_HIGHRATE_EN	(1 << 3)
+#define SECI_UART_MCR_LOOPBK_EN		(1 << 4)
+#define SECI_UART_MCR_AUTO_RTS		(1 << 5)
+#define SECI_UART_MCR_AUTO_TX_DIS	(1 << 6)
+#define SECI_UART_MCR_BAUD_ADJ_EN	(1 << 7)
+#define SECI_UART_MCR_XONOFF_RPT	(1 << 9)
+
+/* SeciUARTLSR Bit Mask */
+#define	SECI_UART_LSR_RXOVR_MASK	(1 << 0)
+#define	SECI_UART_LSR_RFF_MASK		(1 << 1)
+#define	SECI_UART_LSR_TFNE_MASK		(1 << 2)
+#define	SECI_UART_LSR_TI_MASK		(1 << 3)
+#define	SECI_UART_LSR_TPR_MASK		(1 << 4)
+#define	SECI_UART_LSR_TXHALT_MASK	(1 << 5)
+
+/* SeciUARTMSR Bit Mask */
+#define	SECI_UART_MSR_CTSS_MASK		(1 << 0)
+#define	SECI_UART_MSR_RTSS_MASK		(1 << 1)
+#define	SECI_UART_MSR_SIS_MASK		(1 << 2)
+#define	SECI_UART_MSR_SIS2_MASK		(1 << 3)
+
+/* SeciUARTData Bits */
+#define SECI_UART_DATA_RF_NOT_EMPTY_BIT	(1 << 12)
+#define SECI_UART_DATA_RF_FULL_BIT	(1 << 13)
+#define SECI_UART_DATA_RF_OVRFLOW_BIT	(1 << 14)
+#define	SECI_UART_DATA_FIFO_PTR_MASK	0xFF
+#define	SECI_UART_DATA_RF_RD_PTR_SHIFT	16
+#define	SECI_UART_DATA_RF_WR_PTR_SHIFT	24
+
+/* LTECX: ltecxmux */
+#define LTECX_EXTRACT_MUX(val, idx)	(getbit4(&(val), (idx)))
+
+/* LTECX: ltecxmux MODE */
+#define LTECX_MUX_MODE_IDX		0
+#define LTECX_MUX_MODE_WCI2		0x0
+#define LTECX_MUX_MODE_GPIO		0x1
+
+
+/* LTECX GPIO Information Index */
+#define LTECX_NVRAM_FSYNC_IDX	0
+#define LTECX_NVRAM_LTERX_IDX	1
+#define LTECX_NVRAM_LTETX_IDX	2
+#define LTECX_NVRAM_WLPRIO_IDX	3
+
+/* LTECX WCI2 Information Index */
+#define LTECX_NVRAM_WCI2IN_IDX	0
+#define LTECX_NVRAM_WCI2OUT_IDX	1
+
+/* LTECX: Macros to get GPIO/FNSEL/GCIGPIO */
+#define LTECX_EXTRACT_PADNUM(val, idx)	(getbit8(&(val), (idx)))
+#define LTECX_EXTRACT_FNSEL(val, idx)	(getbit4(&(val), (idx)))
+#define LTECX_EXTRACT_GCIGPIO(val, idx)	(getbit4(&(val), (idx)))
+
+/* WLAN channel numbers - used from wifi.h */
+
+/* WLAN BW */
+#define ECI_BW_20   0x0
+#define ECI_BW_25   0x1
+#define ECI_BW_30   0x2
+#define ECI_BW_35   0x3
+#define ECI_BW_40   0x4
+#define ECI_BW_45   0x5
+#define ECI_BW_50   0x6
+#define ECI_BW_ALL  0x7
+
+/* WLAN - number of antenna */
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+/* otpctrl1 0xF4 */
+#define OTPC_FORCE_PWR_OFF	0x02000000
+/* chipcommon s/r registers introduced with cc rev >= 48 */
+#define CC_SR_CTL0_ENABLE_MASK             0x1
+#define CC_SR_CTL0_ENABLE_SHIFT              0
+#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT       1 /* sr_clk to sr_memory enable */
+#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT        2 /* Rising edge resource trigger 0 to sr_engine  */
+#define CC_SR_CTL0_MIN_DIV_SHIFT             6 /* Min division value for fast clk in sr_engine */
+#define CC_SR_CTL0_EN_SBC_STBY_SHIFT        16 /* Allow Subcore mem StandBy? */
+#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18
+#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT       19
+#define CC_SR_CTL0_ALLOW_PIC_SHIFT          20 /* Allow pic to separate power domains */
+#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT  25
+#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30
+
+#define CC_SR_CTL1_SR_INIT_MASK             0x3FF
+#define CC_SR_CTL1_SR_INIT_SHIFT            0
+
+#define	ECI_INLO_PKTDUR_MASK	0x000000f0 /* [7:4] - 4 bits */
+#define ECI_INLO_PKTDUR_SHIFT	4
+
+/* gci chip control bits */
+#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT		0
+#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT		1
+#define GCI_GPIO_CHIPCTRL_INVERT_BIT		2
+#define GCI_GPIO_CHIPCTRL_PULLUP_BIT		3
+#define GCI_GPIO_CHIPCTRL_PULLDN_BIT		4
+#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT	5
+#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT	6
+#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT	7
+
+/* gci GPIO input status bits */
+#define GCI_GPIO_STS_VALUE_BIT			0
+#define GCI_GPIO_STS_POS_EDGE_BIT		1
+#define GCI_GPIO_STS_NEG_EDGE_BIT		2
+#define GCI_GPIO_STS_FAST_EDGE_BIT		3
+#define GCI_GPIO_STS_CLEAR			0xF
+
+#define GCI_GPIO_STS_VALUE	(1 << GCI_GPIO_STS_VALUE_BIT)
+
+/* SR Power Control */
+#define SRPWR_DMN0_PCIE			(0)				/* PCIE */
+#define SRPWR_DMN0_PCIE_SHIFT		(SRPWR_DMN0_PCIE)		/* PCIE */
+#define SRPWR_DMN0_PCIE_MASK		(1 << SRPWR_DMN0_PCIE_SHIFT)	/* PCIE */
+#define SRPWR_DMN1_ARMBPSD		(1)				/* ARM/BP/SDIO */
+#define SRPWR_DMN1_ARMBPSD_SHIFT	(SRPWR_DMN1_ARMBPSD)		/* ARM/BP/SDIO */
+#define SRPWR_DMN1_ARMBPSD_MASK		(1 << SRPWR_DMN1_ARMBPSD_SHIFT)	/* ARM/BP/SDIO */
+#define SRPWR_DMN2_MACAUX		(2)				/* MAC/Phy Aux */
+#define SRPWR_DMN2_MACAUX_SHIFT		(SRPWR_DMN2_MACAUX)		/* MAC/Phy Aux */
+#define SRPWR_DMN2_MACAUX_MASK		(1 << SRPWR_DMN2_MACAUX_SHIFT)	/* MAC/Phy Aux */
+#define SRPWR_DMN3_MACMAIN		(3)				/* MAC/Phy Main */
+#define SRPWR_DMN3_MACMAIN_SHIFT	(SRPWR_DMN3_MACMAIN)	/* MAC/Phy Main */
+#define SRPWR_DMN3_MACMAIN_MASK		(1 << SRPWR_DMN3_MACMAIN_SHIFT)	/* MAC/Phy Main */
+#define SRPWR_DMN_ALL_MASK		(0xF)
+
+#define SRPWR_REQON_SHIFT		(8)	/* PowerOnRequest[11:8] */
+#define SRPWR_REQON_MASK		(SRPWR_DMN_ALL_MASK << SRPWR_REQON_SHIFT)
+#define SRPWR_STATUS_SHIFT		(16)	/* ExtPwrStatus[19:16], RO */
+#define SRPWR_STATUS_MASK		(SRPWR_DMN_ALL_MASK << SRPWR_STATUS_SHIFT)
+#define SRPWR_DMN_SHIFT			(28)	/* PowerDomain[31:28], RO */
+#define SRPWR_DMN_MASK			(SRPWR_DMN_ALL_MASK << SRPWR_DMN_SHIFT)
+
+#endif	/* _SBCHIPC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h
new file mode 100644
index 0000000..ad9c408
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h
@@ -0,0 +1,285 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbconfig.h 530150 2015-01-29 08:43:40Z $
+ */
+
+#ifndef	_SBCONFIG_H
+#define	_SBCONFIG_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif
+
+/* enumeration in SB is based on the premise that cores are contiguos in the
+ * enumeration space.
+ */
+#define SB_BUS_SIZE		0x10000		/**< Each bus gets 64Kbytes for cores */
+#define SB_BUS_BASE(b)		(SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define	SB_BUS_MAXCORES		(SB_BUS_SIZE / SI_CORE_SIZE)	/**< Max cores per bus */
+
+/*
+ * Sonics Configuration Space Registers.
+ */
+#define	SBCONFIGOFF		0xf00		/**< core sbconfig regs are top 256bytes of regs */
+#define	SBCONFIGSIZE		256		/**< sizeof (sbconfig_t) */
+
+#define SBIPSFLAG		0x08
+#define SBTPSFLAG		0x18
+#define	SBTMERRLOGA		0x48		/**< sonics >= 2.3 */
+#define	SBTMERRLOG		0x50		/**< sonics >= 2.3 */
+#define SBADMATCH3		0x60
+#define SBADMATCH2		0x68
+#define SBADMATCH1		0x70
+#define SBIMSTATE		0x90
+#define SBINTVEC		0x94
+#define SBTMSTATELOW		0x98
+#define SBTMSTATEHIGH		0x9c
+#define SBBWA0			0xa0
+#define SBIMCONFIGLOW		0xa8
+#define SBIMCONFIGHIGH		0xac
+#define SBADMATCH0		0xb0
+#define SBTMCONFIGLOW		0xb8
+#define SBTMCONFIGHIGH		0xbc
+#define SBBCONFIG		0xc0
+#define SBBSTATE		0xc8
+#define SBACTCNFG		0xd8
+#define	SBFLAGST		0xe8
+#define SBIDLOW			0xf8
+#define SBIDHIGH		0xfc
+
+/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have
+ * a few registers *below* that line. I think it would be very confusing to try
+ * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here,
+ */
+
+#define SBIMERRLOGA		0xea8
+#define SBIMERRLOG		0xeb0
+#define SBTMPORTCONNID0		0xed8
+#define SBTMPORTLOCK0		0xef8
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+typedef volatile struct _sbconfig {
+	uint32	PAD[2];
+	uint32	sbipsflag;		/**< initiator port ocp slave flag */
+	uint32	PAD[3];
+	uint32	sbtpsflag;		/**< target port ocp slave flag */
+	uint32	PAD[11];
+	uint32	sbtmerrloga;		/**< (sonics >= 2.3) */
+	uint32	PAD;
+	uint32	sbtmerrlog;		/**< (sonics >= 2.3) */
+	uint32	PAD[3];
+	uint32	sbadmatch3;		/**< address match3 */
+	uint32	PAD;
+	uint32	sbadmatch2;		/**< address match2 */
+	uint32	PAD;
+	uint32	sbadmatch1;		/**< address match1 */
+	uint32	PAD[7];
+	uint32	sbimstate;		/**< initiator agent state */
+	uint32	sbintvec;		/**< interrupt mask */
+	uint32	sbtmstatelow;		/**< target state */
+	uint32	sbtmstatehigh;		/**< target state */
+	uint32	sbbwa0;			/**< bandwidth allocation table0 */
+	uint32	PAD;
+	uint32	sbimconfiglow;		/**< initiator configuration */
+	uint32	sbimconfighigh;		/**< initiator configuration */
+	uint32	sbadmatch0;		/**< address match0 */
+	uint32	PAD;
+	uint32	sbtmconfiglow;		/**< target configuration */
+	uint32	sbtmconfighigh;		/**< target configuration */
+	uint32	sbbconfig;		/**< broadcast configuration */
+	uint32	PAD;
+	uint32	sbbstate;		/**< broadcast state */
+	uint32	PAD[3];
+	uint32	sbactcnfg;		/**< activate configuration */
+	uint32	PAD[3];
+	uint32	sbflagst;		/**< current sbflags */
+	uint32	PAD[3];
+	uint32	sbidlow;		/**< identification */
+	uint32	sbidhigh;		/**< identification */
+} sbconfig_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+/* sbipsflag */
+#define	SBIPS_INT1_MASK		0x3f		/**< which sbflags get routed to mips interrupt 1 */
+#define	SBIPS_INT1_SHIFT	0
+#define	SBIPS_INT2_MASK		0x3f00		/**< which sbflags get routed to mips interrupt 2 */
+#define	SBIPS_INT2_SHIFT	8
+#define	SBIPS_INT3_MASK		0x3f0000	/**< which sbflags get routed to mips interrupt 3 */
+#define	SBIPS_INT3_SHIFT	16
+#define	SBIPS_INT4_MASK		0x3f000000	/**< which sbflags get routed to mips interrupt 4 */
+#define	SBIPS_INT4_SHIFT	24
+
+/* sbtpsflag */
+#define	SBTPS_NUM0_MASK		0x3f		/**< interrupt sbFlag # generated by this core */
+#define	SBTPS_F0EN0		0x40		/**< interrupt is always sent on the backplane */
+
+/* sbtmerrlog */
+#define	SBTMEL_CM		0x00000007	/**< command */
+#define	SBTMEL_CI		0x0000ff00	/**< connection id */
+#define	SBTMEL_EC		0x0f000000	/**< error code */
+#define	SBTMEL_ME		0x80000000	/**< multiple error */
+
+/* sbimstate */
+#define	SBIM_PC			0xf		/**< pipecount */
+#define	SBIM_AP_MASK		0x30		/**< arbitration policy */
+#define	SBIM_AP_BOTH		0x00		/**< use both timeslaces and token */
+#define	SBIM_AP_TS		0x10		/**< use timesliaces only */
+#define	SBIM_AP_TK		0x20		/**< use token only */
+#define	SBIM_AP_RSV		0x30		/**< reserved */
+#define	SBIM_IBE		0x20000		/**< inbanderror */
+#define	SBIM_TO			0x40000		/**< timeout */
+#define	SBIM_BY			0x01800000	/**< busy (sonics >= 2.3) */
+#define	SBIM_RJ			0x02000000	/**< reject (sonics >= 2.3) */
+
+/* sbtmstatelow */
+#define	SBTML_RESET		0x0001		/**< reset */
+#define	SBTML_REJ_MASK		0x0006		/**< reject field */
+#define	SBTML_REJ		0x0002		/**< reject */
+#define	SBTML_TMPREJ		0x0004		/**< temporary reject, for error recovery */
+
+#define	SBTML_SICF_SHIFT	16	/**< Shift to locate the SI control flags in sbtml */
+
+/* sbtmstatehigh */
+#define	SBTMH_SERR		0x0001		/**< serror */
+#define	SBTMH_INT		0x0002		/**< interrupt */
+#define	SBTMH_BUSY		0x0004		/**< busy */
+#define	SBTMH_TO		0x0020		/**< timeout (sonics >= 2.3) */
+
+#define	SBTMH_SISF_SHIFT	16		/**< Shift to locate the SI status flags in sbtmh */
+
+/* sbbwa0 */
+#define	SBBWA_TAB0_MASK		0xffff		/**< lookup table 0 */
+#define	SBBWA_TAB1_MASK		0xffff		/**< lookup table 1 */
+#define	SBBWA_TAB1_SHIFT	16
+
+/* sbimconfiglow */
+#define	SBIMCL_STO_MASK		0x7		/**< service timeout */
+#define	SBIMCL_RTO_MASK		0x70		/**< request timeout */
+#define	SBIMCL_RTO_SHIFT	4
+#define	SBIMCL_CID_MASK		0xff0000	/**< connection id */
+#define	SBIMCL_CID_SHIFT	16
+
+/* sbimconfighigh */
+#define	SBIMCH_IEM_MASK		0xc		/**< inband error mode */
+#define	SBIMCH_TEM_MASK		0x30		/**< timeout error mode */
+#define	SBIMCH_TEM_SHIFT	4
+#define	SBIMCH_BEM_MASK		0xc0		/**< bus error mode */
+#define	SBIMCH_BEM_SHIFT	6
+
+/* sbadmatch0 */
+#define	SBAM_TYPE_MASK		0x3		/**< address type */
+#define	SBAM_AD64		0x4		/**< reserved */
+#define	SBAM_ADINT0_MASK	0xf8		/**< type0 size */
+#define	SBAM_ADINT0_SHIFT	3
+#define	SBAM_ADINT1_MASK	0x1f8		/**< type1 size */
+#define	SBAM_ADINT1_SHIFT	3
+#define	SBAM_ADINT2_MASK	0x1f8		/**< type2 size */
+#define	SBAM_ADINT2_SHIFT	3
+#define	SBAM_ADEN		0x400		/**< enable */
+#define	SBAM_ADNEG		0x800		/**< negative decode */
+#define	SBAM_BASE0_MASK		0xffffff00	/**< type0 base address */
+#define	SBAM_BASE0_SHIFT	8
+#define	SBAM_BASE1_MASK		0xfffff000	/**< type1 base address for the core */
+#define	SBAM_BASE1_SHIFT	12
+#define	SBAM_BASE2_MASK		0xffff0000	/**< type2 base address for the core */
+#define	SBAM_BASE2_SHIFT	16
+
+/* sbtmconfiglow */
+#define	SBTMCL_CD_MASK		0xff		/**< clock divide */
+#define	SBTMCL_CO_MASK		0xf800		/**< clock offset */
+#define	SBTMCL_CO_SHIFT		11
+#define	SBTMCL_IF_MASK		0xfc0000	/**< interrupt flags */
+#define	SBTMCL_IF_SHIFT		18
+#define	SBTMCL_IM_MASK		0x3000000	/**< interrupt mode */
+#define	SBTMCL_IM_SHIFT		24
+
+/* sbtmconfighigh */
+#define	SBTMCH_BM_MASK		0x3		/**< busy mode */
+#define	SBTMCH_RM_MASK		0x3		/**< retry mode */
+#define	SBTMCH_RM_SHIFT		2
+#define	SBTMCH_SM_MASK		0x30		/**< stop mode */
+#define	SBTMCH_SM_SHIFT		4
+#define	SBTMCH_EM_MASK		0x300		/**< sb error mode */
+#define	SBTMCH_EM_SHIFT		8
+#define	SBTMCH_IM_MASK		0xc00		/**< int mode */
+#define	SBTMCH_IM_SHIFT		10
+
+/* sbbconfig */
+#define	SBBC_LAT_MASK		0x3		/**< sb latency */
+#define	SBBC_MAX0_MASK		0xf0000		/**< maxccntr0 */
+#define	SBBC_MAX0_SHIFT		16
+#define	SBBC_MAX1_MASK		0xf00000	/**< maxccntr1 */
+#define	SBBC_MAX1_SHIFT		20
+
+/* sbbstate */
+#define	SBBS_SRD		0x1		/**< st reg disable */
+#define	SBBS_HRD		0x2		/**< hold reg disable */
+
+/* sbidlow */
+#define	SBIDL_CS_MASK		0x3		/**< config space */
+#define	SBIDL_AR_MASK		0x38		/**< # address ranges supported */
+#define	SBIDL_AR_SHIFT		3
+#define	SBIDL_SYNCH		0x40		/**< sync */
+#define	SBIDL_INIT		0x80		/**< initiator */
+#define	SBIDL_MINLAT_MASK	0xf00		/**< minimum backplane latency */
+#define	SBIDL_MINLAT_SHIFT	8
+#define	SBIDL_MAXLAT		0xf000		/**< maximum backplane latency */
+#define	SBIDL_MAXLAT_SHIFT	12
+#define	SBIDL_FIRST		0x10000		/**< this initiator is first */
+#define	SBIDL_CW_MASK		0xc0000		/**< cycle counter width */
+#define	SBIDL_CW_SHIFT		18
+#define	SBIDL_TP_MASK		0xf00000	/**< target ports */
+#define	SBIDL_TP_SHIFT		20
+#define	SBIDL_IP_MASK		0xf000000	/**< initiator ports */
+#define	SBIDL_IP_SHIFT		24
+#define	SBIDL_RV_MASK		0xf0000000	/**< sonics backplane revision code */
+#define	SBIDL_RV_SHIFT		28
+#define	SBIDL_RV_2_2		0x00000000	/**< version 2.2 or earlier */
+#define	SBIDL_RV_2_3		0x10000000	/**< version 2.3 */
+
+/* sbidhigh */
+#define	SBIDH_RC_MASK		0x000f		/**< revision code */
+#define	SBIDH_RCE_MASK		0x7000		/**< revision code extension field */
+#define	SBIDH_RCE_SHIFT		8
+#define	SBCOREREV(sbidh) \
+	((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define	SBIDH_CC_MASK		0x8ff0		/**< core code */
+#define	SBIDH_CC_SHIFT		4
+#define	SBIDH_VC_MASK		0xffff0000	/**< vendor code */
+#define	SBIDH_VC_SHIFT		16
+
+#define	SB_COMMIT		0xfd8		/**< update buffered registers value */
+
+/* vendor codes */
+#define	SB_VEND_BCM		0x4243		/**< Broadcom's SB vendor code */
+
+#endif	/* _SBCONFIG_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbgci.h b/drivers/net/wireless/bcmdhd/include/sbgci.h
new file mode 100644
index 0000000..f04232d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbgci.h
@@ -0,0 +1,248 @@
+/*
+ * SiliconBackplane GCI core hardware definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbgci.h 612498 2016-01-14 05:09:09Z $
+ */
+
+#ifndef _SBGCI_H
+#define _SBGCI_H
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+#define GCI_OFFSETOF(sih, reg) \
+	(AOB_ENAB(sih) ? OFFSETOF(gciregs_t, reg) : OFFSETOF(chipcregs_t, reg))
+#define GCI_CORE_IDX(sih) (AOB_ENAB(sih) ? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX)
+
+typedef volatile struct {
+	uint32	gci_corecaps0;		/* 0x000 */
+	uint32	gci_corecaps1;		/* 0x004 */
+	uint32	gci_corecaps2;		/* 0x008 */
+	uint32	gci_corectrl;		/* 0x00c */
+	uint32	gci_corestat;		/* 0x010 */
+	uint32	gci_intstat;		/* 0x014 */
+	uint32	gci_intmask;		/* 0x018 */
+	uint32	gci_wakemask;		/* 0x01c */
+	uint32	gci_levelintstat;	/* 0x020 */
+	uint32	gci_eventintstat;	/* 0x024 */
+	uint32	gci_wakelevelintstat;	/* 0x028 */
+	uint32	gci_wakeeventintstat;	/* 0x02c */
+	uint32	semaphoreintstatus;	/* 0x030 */
+	uint32	semaphoreintmask;	/* 0x034 */
+	uint32	semaphorerequest;	/* 0x038 */
+	uint32	semaphorereserve;	/* 0x03c */
+	uint32	gci_indirect_addr;	/* 0x040 */
+	uint32	gci_gpioctl;		/* 0x044 */
+	uint32	gci_gpiostatus;		/* 0x048 */
+	uint32	gci_gpiomask;		/* 0x04c */
+	uint32	eventsummary;		/* 0x050 */
+	uint32	gci_miscctl;		/* 0x054 */
+	uint32	gci_gpiointmask;	/* 0x058 */
+	uint32	gci_gpiowakemask;	/* 0x05c */
+	uint32	gci_input[32];		/* 0x060 */
+	uint32	gci_event[32];		/* 0x0e0 */
+	uint32	gci_output[4];		/* 0x160 */
+	uint32	gci_control_0;		/* 0x170 */
+	uint32	gci_control_1;		/* 0x174 */
+	uint32	gci_intpolreg;		/* 0x178 */
+	uint32	gci_levelintmask;	/* 0x17c */
+	uint32	gci_eventintmask;	/* 0x180 */
+	uint32	wakelevelintmask;	/* 0x184 */
+	uint32	wakeeventintmask;	/* 0x188 */
+	uint32	hwmask;			/* 0x18c */
+	uint32	PAD;
+	uint32	gci_inbandeventintmask;	/* 0x194 */
+	uint32	PAD;
+	uint32	gci_inbandeventstatus;	/* 0x19c */
+	uint32	gci_seciauxtx;		/* 0x1a0 */
+	uint32	gci_seciauxrx;		/* 0x1a4 */
+	uint32	gci_secitx_datatag;	/* 0x1a8 */
+	uint32	gci_secirx_datatag;	/* 0x1ac */
+	uint32	gci_secitx_datamask;	/* 0x1b0 */
+	uint32	gci_seciusef0tx_reg;	/* 0x1b4 */
+	uint32	gci_secif0tx_offset;	/* 0x1b8 */
+	uint32	gci_secif0rx_offset;	/* 0x1bc */
+	uint32	gci_secif1tx_offset;	/* 0x1c0 */
+	uint32	gci_rxfifo_common_ctrl;	/* 0x1c4 */
+	uint32	gci_rxfifoctrl;		/* 0x1c8 */
+	uint32	gci_hw_sema_status;	/* 0x1cc */
+	uint32	gci_seciuartescval;	/* 0x1d0 */
+	uint32	gic_seciuartautobaudctr;	/* 0x1d4 */
+	uint32	gci_secififolevel;	/* 0x1d8 */
+	uint32	gci_seciuartdata;	/* 0x1dc */
+	uint32	gci_secibauddiv;	/* 0x1e0 */
+	uint32	gci_secifcr;		/* 0x1e4 */
+	uint32	gci_secilcr;		/* 0x1e8 */
+	uint32	gci_secimcr;		/* 0x1ec */
+	uint32	gci_secilsr;		/* 0x1f0 */
+	uint32	gci_secimsr;		/* 0x1f4 */
+	uint32	gci_baudadj;		/* 0x1f8 */
+	uint32	gci_inbandintmask;	/* 0x1fc */
+	uint32  gci_chipctrl;		/* 0x200 */
+	uint32  gci_chipsts; 		/* 0x204 */
+	uint32	gci_gpioout; 		/* 0x208 */
+	uint32	gci_gpioout_read; 	/* 0x20C */
+	uint32	gci_mpwaketx; 		/* 0x210 */
+	uint32	gci_mpwakedetect; 	/* 0x214 */
+	uint32	gci_seciin_ctrl; 	/* 0x218 */
+	uint32	gci_seciout_ctrl; 	/* 0x21C */
+	uint32	gci_seciin_auxfifo_en; 	/* 0x220 */
+	uint32	gci_seciout_txen_txbr; 	/* 0x224 */
+	uint32	gci_seciin_rxbrstatus; 	/* 0x228 */
+	uint32	gci_seciin_rxerrstatus; /* 0x22C */
+	uint32	gci_seciin_fcstatus; 	/* 0x230 */
+	uint32	gci_seciout_txstatus; 	/* 0x234 */
+	uint32	gci_seciout_txbrstatus; /* 0x238 */
+	uint32	wlan_mem_info;				/* 0x23C */
+	uint32	wlan_bankxinfo;				/* 0x240 */
+	uint32	bt_smem_select;				/* 0x244 */
+	uint32	bt_smem_stby;				/* 0x248 */
+	uint32	bt_smem_status;				/* 0x24C */
+	uint32	wlan_bankxactivepda;			/* 0x250 */
+	uint32	wlan_bankxsleeppda;			/* 0x254 */
+	uint32	wlan_bankxkill;				/* 0x258 */
+	uint32	PAD[41];
+	uint32	gci_chipid;		/* 0x300 */
+	uint32	PAD[3];
+	uint32	otpstatus;		/* 0x310 */
+	uint32	otpcontrol;		/* 0x314 */
+	uint32	otpprog;		/* 0x318 */
+	uint32	otplayout;		/* 0x31c */
+	uint32	otplayoutextension;	/* 0x320 */
+	uint32	otpcontrol1;		/* 0x324 */
+	uint32	otpprogdata;		/* 0x328 */
+	uint32	PAD[52];
+	uint32	otpECCstatus;		/* 0x3FC */
+	uint32	PAD[512];
+	uint32	lhl_core_capab_adr;			/* 0xC00 */
+	uint32	lhl_main_ctl_adr;			/* 0xC04 */
+	uint32	lhl_pmu_ctl_adr;			/* 0xC08 */
+	uint32	lhl_extlpo_ctl_adr;			/* 0xC0C */
+	uint32	lpo_ctl_adr;				/* 0xC10 */
+	uint32	lhl_lpo2_ctl_adr;			/* 0xC14 */
+	uint32	lhl_osc32k_ctl_adr;			/* 0xC18 */
+	uint32	lhl_clk_status_adr;			/* 0xC1C */
+	uint32	lhl_clk_det_ctl_adr;			/* 0xC20 */
+	uint32	lhl_clk_sel_adr;			/* 0xC24 */
+	uint32	hidoff_cnt_adr[2];			/* 0xC28-0xC2C */
+	uint32	lhl_autoclk_ctl_adr;			/* 0xC30 */
+	uint32	PAD;					/* reserved */
+	uint32	lhl_hibtim_adr;				/* 0xC38 */
+	uint32	lhl_wl_ilp_val_adr;			/* 0xC3C */
+	uint32	lhl_wl_armtim0_intrp_adr;		/* 0xC40 */
+	uint32	lhl_wl_armtim0_st_adr;			/* 0xC44 */
+	uint32	lhl_wl_armtim0_adr;			/* 0xC48 */
+	uint32	PAD[9];					/* 0xC4C-0xC6C */
+	uint32 lhl_wl_mactim0_intrp_adr;		/* 0xC70 */
+	uint32 lhl_wl_mactim0_st_adr;			/* 0xC74 */
+	uint32 lhl_wl_mactim_int0_adr;			/* 0xC78 */
+	uint32 lhl_wl_mactim_frac0_adr;			/* 0xC7C */
+	uint32 lhl_wl_mactim1_intrp_adr;		/* 0xC80 */
+	uint32 lhl_wl_mactim1_st_adr;			/* 0xC84 */
+	uint32 lhl_wl_mactim_int1_adr;			/* 0xC88 */
+	uint32 lhl_wl_mactim_frac1_adr;			/* 0xC8C */
+	uint32	PAD[8];					/* 0xC90-0xCAC */
+	uint32	gpio_int_en_port_adr[4];		/* 0xCB0-0xCBC */
+	uint32	gpio_int_st_port_adr[4];		/* 0xCC0-0xCCC */
+	uint32	gpio_ctrl_iocfg_p_adr[64];		/* 0xCD0-0xDCC */
+	uint32	gpio_gctrl_iocfg_p0_p39_adr;		/* 0xDD0 */
+	uint32	gpio_gdsctrl_iocfg_p0_p25_p30_p39_adr;	/* 0xDD4 */
+	uint32	gpio_gdsctrl_iocfg_p26_p29_adr;		/* 0xDD8 */
+	uint32	PAD[8];					/* 0xDDC-0xDF8 */
+	uint32	lhl_gpio_din0_adr;			/* 0xDFC */
+	uint32	lhl_gpio_din1_adr;			/* 0xE00 */
+	uint32	lhl_wkup_status_adr;			/* 0xE04 */
+	uint32	lhl_ctl_adr;				/* 0xE08 */
+	uint32	lhl_adc_ctl_adr;			/* 0xE0C */
+	uint32	lhl_qdxyz_in_dly_adr;			/* 0xE10 */
+	uint32	lhl_optctl_adr;				/* 0xE14 */
+	uint32	lhl_optct2_adr;				/* 0xE18 */
+	uint32	lhl_scanp_cntr_init_val_adr;		/* 0xE1C */
+	uint32	lhl_opt_togg_val_adr[6];		/* 0xE20-0xE34 */
+	uint32	lhl_optx_smp_val_adr;			/* 0xE38 */
+	uint32	lhl_opty_smp_val_adr;			/* 0xE3C */
+	uint32	lhl_optz_smp_val_adr;			/* 0xE40 */
+	uint32	lhl_hidoff_keepstate_adr[3];		/* 0xE44-0xE4C */
+	uint32	lhl_bt_slmboot_ctl0_adr[4];		/* 0xE50-0xE5C */
+	uint32	lhl_wl_fw_ctl;				/* 0xE60 */
+	uint32	lhl_wl_hw_ctl_adr[2];			/* 0xE64-0xE68 */
+	uint32	lhl_bt_hw_ctl_adr;			/* 0xE6C */
+	uint32	lhl_top_pwrseq_en_adr;			/* 0xE70 */
+	uint32	lhl_top_pwrdn_ctl_adr;			/* 0xE74 */
+	uint32	lhl_top_pwrup_ctl_adr;			/* 0xE78 */
+	uint32	lhl_top_pwrseq_ctl_adr;			/* 0xE7C */
+	uint32	lhl_top_pwrdn2_ctl_adr;			/* 0xE80 */
+	uint32	lhl_top_pwrup2_ctl_adr;			/* 0xE84 */
+	uint32	wpt_regon_intrp_cfg_adr;		/* 0xE88 */
+	uint32	bt_regon_intrp_cfg_adr;			/* 0xE8C */
+	uint32	wl_regon_intrp_cfg_adr;			/* 0xE90 */
+	uint32	regon_intrp_st_adr;			/* 0xE94 */
+	uint32	regon_intrp_en_adr;			/* 0xE98 */
+} gciregs_t;
+
+#define	GCI_CAP0_REV_MASK	0x000000ff
+
+/* GCI Capabilities registers */
+#define GCI_CORE_CAP_0_COREREV_MASK			0xFF
+#define GCI_CORE_CAP_0_COREREV_SHIFT			0
+
+#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK		0x3F
+#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT		0
+#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK		0xF
+#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT	16
+
+#define WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK		0xFFFF
+
+#define WLAN_BANKX_PKILL_REG_SLEEPPDA_MASK		0x1
+
+/* WLAN BankXInfo Register */
+#define WLAN_BANKXINFO_BANK_SIZE_MASK			0x00FFF000
+#define WLAN_BANKXINFO_BANK_SIZE_SHIFT			12
+
+/* WLAN Mem Info Register */
+#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_MASK		0x000000FF
+#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_SHIFT		0
+
+#define WLAN_MEM_INFO_REG_NUMD11MACBM_MASK		0x0000FF00
+#define WLAN_MEM_INFO_REG_NUMD11MACBM_SHIFT		8
+
+#define WLAN_MEM_INFO_REG_NUMD11MACUCM_MASK		0x00FF0000
+#define WLAN_MEM_INFO_REG_NUMD11MACUCM_SHIFT		16
+
+#define WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK		0xFF000000
+#define WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT		24
+
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+
+#endif	/* _SBGCI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
new file mode 100644
index 0000000..6d10d6a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
@@ -0,0 +1,429 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbhnddma.h 615537 2016-01-28 00:46:34Z $
+ */
+
+#ifndef	_sbhnddma_h_
+#define	_sbhnddma_h_
+
+/* DMA structure:
+ *  support two DMA engines: 32 bits address or 64 bit addressing
+ *  basic DMA register set is per channel(transmit or receive)
+ *  a pair of channels is defined for convenience
+ */
+
+
+/* 32 bits addressing */
+
+/** dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+	uint32	control;		/**< enable, et al */
+	uint32	addr;			/**< descriptor ring base address (4K aligned) */
+	uint32	ptr;			/**< last descriptor posted to chip */
+	uint32	status;			/**< current active descriptor, et al */
+} dma32regs_t;
+
+typedef volatile struct {
+	dma32regs_t	xmt;		/**< dma tx channel */
+	dma32regs_t	rcv;		/**< dma rx channel */
+} dma32regp_t;
+
+typedef volatile struct {	/* diag access */
+	uint32	fifoaddr;		/**< diag address */
+	uint32	fifodatalow;		/**< low 32bits of data */
+	uint32	fifodatahigh;		/**< high 32bits of data */
+	uint32	pad;			/**< reserved */
+} dma32diag_t;
+
+/**
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+	uint32	ctrl;		/**< misc control bits & bufcount */
+	uint32	addr;		/**< data buffer address */
+} dma32dd_t;
+
+/** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */
+#define	D32RINGALIGN_BITS	12
+#define	D32MAXRINGSZ		(1 << D32RINGALIGN_BITS)
+#define	D32RINGALIGN		(1 << D32RINGALIGN_BITS)
+
+#define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
+
+/* transmit channel control */
+#define	XC_XE		((uint32)1 << 0)	/**< transmit enable */
+#define	XC_SE		((uint32)1 << 1)	/**< transmit suspend request */
+#define	XC_LE		((uint32)1 << 2)	/**< loopback enable */
+#define	XC_FL		((uint32)1 << 4)	/**< flush request */
+#define XC_MR_MASK	0x000001C0		/**< Multiple outstanding reads */
+#define XC_MR_SHIFT	6
+#define	XC_PD		((uint32)1 << 11)	/**< parity check disable */
+#define	XC_AE		((uint32)3 << 16)	/**< address extension bits */
+#define	XC_AE_SHIFT	16
+#define XC_BL_MASK	0x001C0000		/**< BurstLen bits */
+#define XC_BL_SHIFT	18
+#define XC_PC_MASK	0x00E00000		/**< Prefetch control */
+#define XC_PC_SHIFT	21
+#define XC_PT_MASK	0x03000000		/**< Prefetch threshold */
+#define XC_PT_SHIFT	24
+
+/** Multiple outstanding reads */
+#define DMA_MR_1	0
+#define DMA_MR_2	1
+#define DMA_MR_4	2
+#define DMA_MR_8	3
+#define DMA_MR_12	4
+#define DMA_MR_16	5
+#define DMA_MR_20	6
+#define DMA_MR_32	7
+
+/** DMA Burst Length in bytes */
+#define DMA_BL_16	0
+#define DMA_BL_32	1
+#define DMA_BL_64	2
+#define DMA_BL_128	3
+#define DMA_BL_256	4
+#define DMA_BL_512	5
+#define DMA_BL_1024	6
+
+/** Prefetch control */
+#define DMA_PC_0	0
+#define DMA_PC_4	1
+#define DMA_PC_8	2
+#define DMA_PC_16	3
+/* others: reserved */
+
+/** Prefetch threshold */
+#define DMA_PT_1	0
+#define DMA_PT_2	1
+#define DMA_PT_4	2
+#define DMA_PT_8	3
+
+/** Channel Switch */
+#define DMA_CS_OFF	0
+#define DMA_CS_ON	1
+
+/* transmit descriptor table pointer */
+#define	XP_LD_MASK	0xfff			/**< last valid descriptor */
+
+/* transmit channel status */
+#define	XS_CD_MASK	0x0fff			/**< current descriptor pointer */
+#define	XS_XS_MASK	0xf000			/**< transmit state */
+#define	XS_XS_SHIFT	12
+#define	XS_XS_DISABLED	0x0000			/**< disabled */
+#define	XS_XS_ACTIVE	0x1000			/**< active */
+#define	XS_XS_IDLE	0x2000			/**< idle wait */
+#define	XS_XS_STOPPED	0x3000			/**< stopped */
+#define	XS_XS_SUSP	0x4000			/**< suspend pending */
+#define	XS_XE_MASK	0xf0000			/**< transmit errors */
+#define	XS_XE_SHIFT	16
+#define	XS_XE_NOERR	0x00000			/**< no error */
+#define	XS_XE_DPE	0x10000			/**< descriptor protocol error */
+#define	XS_XE_DFU	0x20000			/**< data fifo underrun */
+#define	XS_XE_BEBR	0x30000			/**< bus error on buffer read */
+#define	XS_XE_BEDA	0x40000			/**< bus error on descriptor access */
+#define	XS_AD_MASK	0xfff00000		/**< active descriptor */
+#define	XS_AD_SHIFT	20
+
+/* receive channel control */
+#define	RC_RE		((uint32)1 << 0)	/**< receive enable */
+#define	RC_RO_MASK	0xfe			/**< receive frame offset */
+#define	RC_RO_SHIFT	1
+#define	RC_FM		((uint32)1 << 8)	/**< direct fifo receive (pio) mode */
+#define	RC_SH		((uint32)1 << 9)	/**< separate rx header descriptor enable */
+#define	RC_OC		((uint32)1 << 10)	/**< overflow continue */
+#define	RC_PD		((uint32)1 << 11)	/**< parity check disable */
+#define	RC_AE		((uint32)3 << 16)	/**< address extension bits */
+#define	RC_AE_SHIFT	16
+#define RC_BL_MASK	0x001C0000		/**< BurstLen bits */
+#define RC_BL_SHIFT	18
+#define RC_PC_MASK	0x00E00000		/**< Prefetch control */
+#define RC_PC_SHIFT	21
+#define RC_PT_MASK	0x03000000		/**< Prefetch threshold */
+#define RC_PT_SHIFT	24
+#define RC_WAITCMP_MASK 0x00001000
+#define RC_WAITCMP_SHIFT 12
+/* receive descriptor table pointer */
+#define	RP_LD_MASK	0xfff			/**< last valid descriptor */
+
+/* receive channel status */
+#define	RS_CD_MASK	0x0fff			/**< current descriptor pointer */
+#define	RS_RS_MASK	0xf000			/**< receive state */
+#define	RS_RS_SHIFT	12
+#define	RS_RS_DISABLED	0x0000			/**< disabled */
+#define	RS_RS_ACTIVE	0x1000			/**< active */
+#define	RS_RS_IDLE	0x2000			/**< idle wait */
+#define	RS_RS_STOPPED	0x3000			/**< reserved */
+#define	RS_RE_MASK	0xf0000			/**< receive errors */
+#define	RS_RE_SHIFT	16
+#define	RS_RE_NOERR	0x00000			/**< no error */
+#define	RS_RE_DPE	0x10000			/**< descriptor protocol error */
+#define	RS_RE_DFO	0x20000			/**< data fifo overflow */
+#define	RS_RE_BEBW	0x30000			/**< bus error on buffer write */
+#define	RS_RE_BEDA	0x40000			/**< bus error on descriptor access */
+#define	RS_AD_MASK	0xfff00000		/**< active descriptor */
+#define	RS_AD_SHIFT	20
+
+/* fifoaddr */
+#define	FA_OFF_MASK	0xffff			/**< offset */
+#define	FA_SEL_MASK	0xf0000			/**< select */
+#define	FA_SEL_SHIFT	16
+#define	FA_SEL_XDD	0x00000			/**< transmit dma data */
+#define	FA_SEL_XDP	0x10000			/**< transmit dma pointers */
+#define	FA_SEL_RDD	0x40000			/**< receive dma data */
+#define	FA_SEL_RDP	0x50000			/**< receive dma pointers */
+#define	FA_SEL_XFD	0x80000			/**< transmit fifo data */
+#define	FA_SEL_XFP	0x90000			/**< transmit fifo pointers */
+#define	FA_SEL_RFD	0xc0000			/**< receive fifo data */
+#define	FA_SEL_RFP	0xd0000			/**< receive fifo pointers */
+#define	FA_SEL_RSD	0xe0000			/**< receive frame status data */
+#define	FA_SEL_RSP	0xf0000			/**< receive frame status pointers */
+
+/* descriptor control flags */
+#define	CTRL_BC_MASK	0x00001fff		/**< buffer byte count, real data len must <= 4KB */
+#define	CTRL_AE		((uint32)3 << 16)	/**< address extension bits */
+#define	CTRL_AE_SHIFT	16
+#define	CTRL_PARITY	((uint32)3 << 18)	/**< parity bit */
+#define	CTRL_EOT	((uint32)1 << 28)	/**< end of descriptor table */
+#define	CTRL_IOC	((uint32)1 << 29)	/**< interrupt on completion */
+#define	CTRL_EOF	((uint32)1 << 30)	/**< end of frame */
+#define	CTRL_SOF	((uint32)1 << 31)	/**< start of frame */
+
+/** control flags in the range [27:20] are core-specific and not defined here */
+#define	CTRL_CORE_MASK	0x0ff00000
+
+/* 64 bits addressing */
+
+/** dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+	uint32	control;	/**< enable, et al */
+	uint32	ptr;		/**< last descriptor posted to chip */
+	uint32	addrlow;	/**< descriptor ring base address low 32-bits (8K aligned) */
+	uint32	addrhigh;	/**< descriptor ring base address bits 63:32 (8K aligned) */
+	uint32	status0;	/**< current descriptor, xmt state */
+	uint32	status1;	/**< active descriptor, xmt error */
+} dma64regs_t;
+
+typedef volatile struct {
+	dma64regs_t	tx;		/**< dma64 tx channel */
+	dma64regs_t	rx;		/**< dma64 rx channel */
+} dma64regp_t;
+
+typedef volatile struct {		/**< diag access */
+	uint32	fifoaddr;		/**< diag address */
+	uint32	fifodatalow;		/**< low 32bits of data */
+	uint32	fifodatahigh;		/**< high 32bits of data */
+	uint32	pad;			/**< reserved */
+} dma64diag_t;
+
+/**
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+	uint32	ctrl1;		/**< misc control bits */
+	uint32	ctrl2;		/**< buffer count and address extension */
+	uint32	addrlow;	/**< memory address of the date buffer, bits 31:0 */
+	uint32	addrhigh;	/**< memory address of the date buffer, bits 63:32 */
+} dma64dd_t;
+
+/**
+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
+ */
+#define D64RINGALIGN_BITS	13
+#define	D64MAXRINGSZ		(1 << D64RINGALIGN_BITS)
+#define	D64RINGBOUNDARY		(1 << D64RINGALIGN_BITS)
+
+#define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
+
+/** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
+#define	D64MAXDD_LARGE		((1 << 16) / sizeof (dma64dd_t))
+
+/**
+ * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
+ * 64K boundary
+ */
+#define	D64RINGBOUNDARY_LARGE	(1 << 16)
+
+/*
+ * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11.
+ * When this field contains the value N, the burst length is 2**(N + 4) bytes.
+ */
+#define D64_DEF_USBBURSTLEN     2
+#define D64_DEF_SDIOBURSTLEN    1
+
+
+#ifndef D64_USBBURSTLEN
+#define D64_USBBURSTLEN	DMA_BL_64
+#endif
+#ifndef D64_SDIOBURSTLEN
+#define D64_SDIOBURSTLEN	DMA_BL_32
+#endif
+
+/* transmit channel control */
+#define	D64_XC_XE		0x00000001	/**< transmit enable */
+#define	D64_XC_SE		0x00000002	/**< transmit suspend request */
+#define	D64_XC_LE		0x00000004	/**< loopback enable */
+#define	D64_XC_FL		0x00000010	/**< flush request */
+#define D64_XC_MR_MASK		0x000001C0	/**< Multiple outstanding reads */
+#define D64_XC_MR_SHIFT		6
+#define D64_XC_CS_SHIFT		9		/**< channel switch enable */
+#define D64_XC_CS_MASK		0x00000200      /**< channel switch enable */
+#define	D64_XC_PD		0x00000800	/**< parity check disable */
+#define	D64_XC_AE		0x00030000	/**< address extension bits */
+#define	D64_XC_AE_SHIFT		16
+#define D64_XC_BL_MASK		0x001C0000	/**< BurstLen bits */
+#define D64_XC_BL_SHIFT		18
+#define D64_XC_PC_MASK		0x00E00000		/**< Prefetch control */
+#define D64_XC_PC_SHIFT		21
+#define D64_XC_PT_MASK		0x03000000		/**< Prefetch threshold */
+#define D64_XC_PT_SHIFT		24
+
+/* transmit descriptor table pointer */
+#define	D64_XP_LD_MASK		0x00001fff	/**< last valid descriptor */
+
+/* transmit channel status */
+#define	D64_XS0_CD_MASK		(di->d64_xs0_cd_mask)	/**< current descriptor pointer */
+#define	D64_XS0_XS_MASK		0xf0000000     	/**< transmit state */
+#define	D64_XS0_XS_SHIFT		28
+#define	D64_XS0_XS_DISABLED	0x00000000	/**< disabled */
+#define	D64_XS0_XS_ACTIVE	0x10000000	/**< active */
+#define	D64_XS0_XS_IDLE		0x20000000	/**< idle wait */
+#define	D64_XS0_XS_STOPPED	0x30000000	/**< stopped */
+#define	D64_XS0_XS_SUSP		0x40000000	/**< suspend pending */
+
+#define	D64_XS1_AD_MASK		(di->d64_xs1_ad_mask)	/**< active descriptor */
+#define	D64_XS1_XE_MASK		0xf0000000     	/**< transmit errors */
+#define	D64_XS1_XE_SHIFT		28
+#define	D64_XS1_XE_NOERR	0x00000000	/**< no error */
+#define	D64_XS1_XE_DPE		0x10000000	/**< descriptor protocol error */
+#define	D64_XS1_XE_DFU		0x20000000	/**< data fifo underrun */
+#define	D64_XS1_XE_DTE		0x30000000	/**< data transfer error */
+#define	D64_XS1_XE_DESRE	0x40000000	/**< descriptor read error */
+#define	D64_XS1_XE_COREE	0x50000000	/**< core error */
+
+/* receive channel control */
+#define	D64_RC_RE		0x00000001	/**< receive enable */
+#define	D64_RC_RO_MASK		0x000000fe	/**< receive frame offset */
+#define	D64_RC_RO_SHIFT		1
+#define	D64_RC_FM		0x00000100	/**< direct fifo receive (pio) mode */
+#define	D64_RC_SH		0x00000200	/**< separate rx header descriptor enable */
+#define	D64_RC_SHIFT		9	/**< separate rx header descriptor enable */
+#define	D64_RC_OC		0x00000400	/**< overflow continue */
+#define	D64_RC_PD		0x00000800	/**< parity check disable */
+#define D64_RC_SA		0x00002000	/**< select active */
+#define D64_RC_GE		0x00004000	/**< Glom enable */
+#define	D64_RC_AE		0x00030000	/**< address extension bits */
+#define	D64_RC_AE_SHIFT		16
+#define D64_RC_BL_MASK		0x001C0000	/**< BurstLen bits */
+#define D64_RC_BL_SHIFT		18
+#define D64_RC_PC_MASK		0x00E00000	/**< Prefetch control */
+#define D64_RC_PC_SHIFT		21
+#define D64_RC_PT_MASK		0x03000000	/**< Prefetch threshold */
+#define D64_RC_PT_SHIFT		24
+#define D64_RC_WAITCMP_MASK	0x00001000
+#define D64_RC_WAITCMP_SHIFT	12
+
+/* flags for dma controller */
+#define DMA_CTRL_PEN		(1 << 0)	/**< partity enable */
+#define DMA_CTRL_ROC		(1 << 1)	/**< rx overflow continue */
+#define DMA_CTRL_RXMULTI	(1 << 2)	/**< allow rx scatter to multiple descriptors */
+#define DMA_CTRL_UNFRAMED	(1 << 3)	/**< Unframed Rx/Tx data */
+#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
+#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5)	/**< DMA avoidance WAR for 4331 */
+#define DMA_CTRL_RXSINGLE	(1 << 6)	/**< always single buffer */
+#define DMA_CTRL_SDIO_RXGLOM	(1 << 7)	/**< DMA Rx glome is enabled */
+
+/* receive descriptor table pointer */
+#define	D64_RP_LD_MASK		0x00001fff	/**< last valid descriptor */
+
+/* receive channel status */
+#define	D64_RS0_CD_MASK		(di->d64_rs0_cd_mask)	/**< current descriptor pointer */
+#define	D64_RS0_RS_MASK		0xf0000000     	/**< receive state */
+#define	D64_RS0_RS_SHIFT		28
+#define	D64_RS0_RS_DISABLED	0x00000000	/**< disabled */
+#define	D64_RS0_RS_ACTIVE	0x10000000	/**< active */
+#define	D64_RS0_RS_IDLE		0x20000000	/**< idle wait */
+#define	D64_RS0_RS_STOPPED	0x30000000	/**< stopped */
+#define	D64_RS0_RS_SUSP		0x40000000	/**< suspend pending */
+
+#define	D64_RS1_AD_MASK		(di->d64_rs1_ad_mask)	/* active descriptor pointer */
+#define	D64_RS1_RE_MASK		0xf0000000	/* receive errors */
+#define	D64_RS1_RE_SHIFT		28
+#define	D64_RS1_RE_NOERR	0x00000000	/**< no error */
+#define	D64_RS1_RE_DPO		0x10000000	/**< descriptor protocol error */
+#define	D64_RS1_RE_DFU		0x20000000	/**< data fifo overflow */
+#define	D64_RS1_RE_DTE		0x30000000	/**< data transfer error */
+#define	D64_RS1_RE_DESRE	0x40000000	/**< descriptor read error */
+#define	D64_RS1_RE_COREE	0x50000000	/**< core error */
+
+/* fifoaddr */
+#define	D64_FA_OFF_MASK		0xffff		/**< offset */
+#define	D64_FA_SEL_MASK		0xf0000		/**< select */
+#define	D64_FA_SEL_SHIFT	16
+#define	D64_FA_SEL_XDD		0x00000		/**< transmit dma data */
+#define	D64_FA_SEL_XDP		0x10000		/**< transmit dma pointers */
+#define	D64_FA_SEL_RDD		0x40000		/**< receive dma data */
+#define	D64_FA_SEL_RDP		0x50000		/**< receive dma pointers */
+#define	D64_FA_SEL_XFD		0x80000		/**< transmit fifo data */
+#define	D64_FA_SEL_XFP		0x90000		/**< transmit fifo pointers */
+#define	D64_FA_SEL_RFD		0xc0000		/**< receive fifo data */
+#define	D64_FA_SEL_RFP		0xd0000		/**< receive fifo pointers */
+#define	D64_FA_SEL_RSD		0xe0000		/**< receive frame status data */
+#define	D64_FA_SEL_RSP		0xf0000		/**< receive frame status pointers */
+
+/* descriptor control flags 1 */
+#define D64_CTRL_COREFLAGS	0x0ff00000		/**< core specific flags */
+#define	D64_CTRL1_NOTPCIE	((uint32)1 << 18)	/**< buirst size control */
+#define	D64_CTRL1_EOT		((uint32)1 << 28)	/**< end of descriptor table */
+#define	D64_CTRL1_IOC		((uint32)1 << 29)	/**< interrupt on completion */
+#define	D64_CTRL1_EOF		((uint32)1 << 30)	/**< end of frame */
+#define	D64_CTRL1_SOF		((uint32)1 << 31)	/**< start of frame */
+
+/* descriptor control flags 2 */
+#define	D64_CTRL2_BC_MASK	0x00007fff /**< buffer byte count. real data len must <= 16KB */
+#define	D64_CTRL2_AE		0x00030000 /**< address extension bits */
+#define	D64_CTRL2_AE_SHIFT	16
+#define D64_CTRL2_PARITY	0x00040000      /* parity bit */
+
+/** control flags in the range [27:20] are core-specific and not defined here */
+#define	D64_CTRL_CORE_MASK	0x0ff00000
+
+#define D64_RX_FRM_STS_LEN	0x0000ffff	/**< frame length mask */
+#define D64_RX_FRM_STS_OVFL	0x00800000	/**< RxOverFlow */
+#define D64_RX_FRM_STS_DSCRCNT	0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */
+#define D64_RX_FRM_STS_DATATYPE	0xf0000000	/**< core-dependent data type */
+
+/** receive frame status */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} dma_rxh_t;
+
+#endif	/* _sbhnddma_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
new file mode 100644
index 0000000..0ffc97b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
@@ -0,0 +1,139 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbpcmcia.h 616054 2016-01-29 13:22:24Z $
+ */
+
+#ifndef	_SBPCMCIA_H
+#define	_SBPCMCIA_H
+
+/* All the addresses that are offsets in attribute space are divided
+ * by two to account for the fact that odd bytes are invalid in
+ * attribute space and our read/write routines make the space appear
+ * as if they didn't exist. Still we want to show the original numbers
+ * as documented in the hnd_pcmcia core manual.
+ */
+
+/* PCMCIA Function Configuration Registers */
+#define	PCMCIA_FCR		(0x700 / 2)
+
+#define	FCR0_OFF		0
+#define	FCR1_OFF		(0x40 / 2)
+#define	FCR2_OFF		(0x80 / 2)
+#define	FCR3_OFF		(0xc0 / 2)
+
+#define	PCMCIA_FCR0		(0x700 / 2)
+#define	PCMCIA_FCR1		(0x740 / 2)
+#define	PCMCIA_FCR2		(0x780 / 2)
+#define	PCMCIA_FCR3		(0x7c0 / 2)
+
+/* Standard PCMCIA FCR registers */
+
+#define	PCMCIA_COR		0
+
+#define	COR_RST			0x80
+#define	COR_LEV			0x40
+#define	COR_IRQEN		0x04
+#define	COR_BLREN		0x01
+#define	COR_FUNEN		0x01
+
+
+#define	PCICIA_FCSR		(2 / 2)
+#define	PCICIA_PRR		(4 / 2)
+#define	PCICIA_SCR		(6 / 2)
+#define	PCICIA_ESR		(8 / 2)
+
+
+#define PCM_MEMOFF		0x0000
+#define F0_MEMOFF		0x1000
+#define F1_MEMOFF		0x2000
+#define F2_MEMOFF		0x3000
+#define F3_MEMOFF		0x4000
+
+/* Memory base in the function fcr's */
+#define MEM_ADDR0		(0x728 / 2)
+#define MEM_ADDR1		(0x72a / 2)
+#define MEM_ADDR2		(0x72c / 2)
+
+/* PCMCIA base plus Srom access in fcr0: */
+#define PCMCIA_ADDR0		(0x072e / 2)
+#define PCMCIA_ADDR1		(0x0730 / 2)
+#define PCMCIA_ADDR2		(0x0732 / 2)
+
+#define MEM_SEG			(0x0734 / 2)
+#define SROM_CS			(0x0736 / 2)
+#define SROM_DATAL		(0x0738 / 2)
+#define SROM_DATAH		(0x073a / 2)
+#define SROM_ADDRL		(0x073c / 2)
+#define SROM_ADDRH		(0x073e / 2)
+#define	SROM_INFO2		(0x0772 / 2)	/* Corerev >= 2 && <= 5 */
+#define	SROM_INFO		(0x07be / 2)	/* Corerev >= 6 */
+
+/*  Values for srom_cs: */
+#define SROM_IDLE		0
+#define SROM_WRITE		1
+#define SROM_READ		2
+#define SROM_WEN		4
+#define SROM_WDS		7
+#define SROM_DONE		8
+
+/* Fields in srom_info: */
+#define	SRI_SZ_MASK		0x03
+#define	SRI_BLANK		0x04
+#define	SRI_OTP			0x80
+
+
+#define SROM16K_BANK_SEL_MASK		(3 << 11)
+#define SROM16K_BANK_SHFT_MASK		11
+#define SROM16K_ADDR_SEL_MASK	((1 << SROM16K_BANK_SHFT_MASK) - 1)
+
+
+
+/* Standard tuples we know about */
+
+#define CISTPL_NULL		0x00
+#define	CISTPL_END		0xff		/* End of the CIS tuple chain */
+
+
+#define	CISTPL_BRCM_HNBU	0x80
+
+
+#define HNBU_BOARDREV		0x02	/* One byte board revision */
+
+
+#define HNBU_BOARDTYPE		0x1b	/* 2 bytes; boardtype */
+
+
+#define HNBU_HNBUCIS		0x1d	/* what follows is proprietary HNBU CIS format */
+
+
+/* sbtmstatelow */
+#define SBTML_INT_ACK		0x40000		/* ack the sb interrupt */
+#define SBTML_INT_EN		0x20000		/* enable sb interrupt */
+
+/* sbtmstatehigh */
+#define SBTMH_INT_STATUS	0x40000		/* sb interrupt status */
+#endif	/* _SBPCMCIA_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h
new file mode 100644
index 0000000..68707c4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h
@@ -0,0 +1,189 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbsdio.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef	_SBSDIO_H
+#define	_SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION		3	/* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS			0x10000		/* sprom command and status */
+#define SBSDIO_SPROM_INFO		0x10001		/* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW		0x10002		/* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH		0x10003 	/* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW		0x10004		/* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH		0x10005		/* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA		0x10006		/* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN		0x10007		/* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK		0x10008		/* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL		0x10009		/* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW		0x1000A		/* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID		0x1000B		/* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH		0x1000C		/* SB Address Window High (b31:b24)    */
+#define SBSDIO_FUNC1_FRAMECTRL		0x1000D		/* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR		0x1000E		/* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 	0x1000F		/* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO		0x10019		/* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI		0x1001A		/* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO		0x1001B		/* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI		0x1001C		/* Read Frame Byte Count High */
+#define SBSDIO_FUNC1_MESBUSYCTRL	0x1001D		/* MesBusyCtl at 0x1001D (rev 11) */
+
+#define SBSDIO_FUNC1_MISC_REG_START	0x10000 	/* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT	0x1001C 	/* f1 misc register end */
+
+/* Sdio Core Rev 12 */
+#define SBSDIO_FUNC1_WAKEUPCTRL			0x1001E
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK		0x1
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT	0
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK		0x2
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT		1
+#define SBSDIO_FUNC1_SLEEPCSR			0x1001F
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK		0x1
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT		0
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN		1
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK	0x2
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT	1
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE		0
+#define SBSDIO_SPROM_WRITE		1
+#define SBSDIO_SPROM_READ		2
+#define SBSDIO_SPROM_WEN		4
+#define SBSDIO_SPROM_WDS		7
+#define SBSDIO_SPROM_DONE		8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK			0x03		/* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK			0x04		/* depreciated in corerev 6 */
+#define	SROM_OTP			0x80		/* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL		0x01		/* or'd with onchip xtal_pu,
+							 * 1: power on oscillator
+							 * (for 4318 only)
+							 */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK		0x7f		/* number of words - 1 for sd device
+							 * to wait before sending data to host
+							 */
+
+/* SBSDIO_MESBUSYCTRL */
+/* When RX FIFO has less entries than this & MBE is set
+ * => busy signal is asserted between data blocks.
+*/
+#define SBSDIO_MESBUSYCTRL_MASK		0x7f
+#define SBSDIO_MESBUSYCTRL_ENAB		0x80		/* Enable busy capability for MES access */
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY		0x01		/* 1: device will assert busy signal when
+							 * receiving CMD53
+							 */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02		/* 1: assertion of sdio interrupt is
+							 * synchronous to the sdio clock
+							 */
+#define SBSDIO_DEVCTL_CA_INT_ONLY	0x04		/* 1: mask all interrupts to host
+							 * except the chipActive (rev 8)
+							 */
+#define SBSDIO_DEVCTL_PADS_ISO		0x08		/* 1: isolate internal sdio signals, put
+							 * external pads in tri-state; requires
+							 * sdio bus power cycle to clear (rev 9)
+							 */
+#define SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK 0x10  /* Enable function 2 tx for each block */
+#define SBSDIO_DEVCTL_F2WM_ENAB		0x10		/* Enable F2 Watermark */
+#define SBSDIO_DEVCTL_NONDAT_PADS_ISO 	0x20		/* Isolate sdio clk and cmd (non-data) */
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP		0x01		/* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT			0x02		/* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP		0x04		/* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ		0x08		/* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ		0x10		/* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20		/* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL		0x40		/* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL			0x80		/* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL		0x40
+#define SBSDIO_Rev8_ALP_AVAIL		0x80
+#define SBSDIO_CSR_MASK			0x1F
+
+#define SBSDIO_AVBITS			(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)		((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)		(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval)		(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly)	(SBSDIO_ALPAV(regval) && \
+					(alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0		0x01		/* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1		0x02		/* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2		0x04		/* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD		0x08		/* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL		0x0f		/* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK		0x07FFF		/* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT	0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG	0x08000		/* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK		0x80		/* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK		0xff		/* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK		0xffU		/* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK		0xffff8000	/* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON		0x1000		/* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT		0x200		/* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT       0x078           /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF		/* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN	6		/* manfid tuple length, include tuple,
+							 * link bytes
+							 */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET		0x8		/* 8 control bytes first, CIS starts from
+							 * 8th byte
+							 */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX	64		/* sdio byte mode: maximum length of one
+							 * data comamnd
+							 */
+
+#define SBSDIO_CORE_ADDR_MASK		0x1FFFF		/* sdio core function one address mask */
+
+#endif	/* _SBSDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
new file mode 100644
index 0000000..8607d63
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
@@ -0,0 +1,300 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
+ * device core support
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbsdpcmdev.h 610395 2016-01-06 22:52:57Z $
+ */
+
+#ifndef	_sbsdpcmdev_h_
+#define	_sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	dma64regs_t	xmt;		/* dma tx */
+	uint32 PAD[2];
+	dma64regs_t	rcv;		/* dma rx */
+	uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+	dma64p_t dma64regs[2];
+	dma64diag_t dmafifo;		/* DMA Diagnostic Regs, 0x280-0x28c */
+	uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+	dma32regp_t dma32regs[2];	/* dma tx & rx, 0x200-0x23c */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x240-0x24c */
+	uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+	dma32regp_t dmaregs;		/* DMA Regs, 0x200-0x21c, rev8 */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x220-0x22c */
+	uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+	uint32 corecontrol;		/* CoreControl, 0x000, rev8 */
+	uint32 corestatus;		/* CoreStatus, 0x004, rev8  */
+	uint32 PAD[1];
+	uint32 biststatus;		/* BistStatus, 0x00c, rev8  */
+
+	/* PCMCIA access */
+	uint16 pcmciamesportaladdr;	/* PcmciaMesPortalAddr, 0x010, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciamesportalmask;	/* PcmciaMesPortalMask, 0x014, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciawrframebc;		/* PcmciaWrFrameBC, 0x018, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciaunderflowtimer;	/* PcmciaUnderflowTimer, 0x01c, rev8   */
+	uint16 PAD[1];
+
+	/* interrupt */
+	uint32 intstatus;		/* IntStatus, 0x020, rev8   */
+	uint32 hostintmask;		/* IntHostMask, 0x024, rev8   */
+	uint32 intmask;			/* IntSbMask, 0x028, rev8   */
+	uint32 sbintstatus;		/* SBIntStatus, 0x02c, rev8   */
+	uint32 sbintmask;		/* SBIntMask, 0x030, rev8   */
+	uint32 funcintmask;		/* SDIO Function Interrupt Mask, SDIO rev4 */
+	uint32 PAD[2];
+	uint32 tosbmailbox;		/* ToSBMailbox, 0x040, rev8   */
+	uint32 tohostmailbox;		/* ToHostMailbox, 0x044, rev8   */
+	uint32 tosbmailboxdata;		/* ToSbMailboxData, 0x048, rev8   */
+	uint32 tohostmailboxdata;	/* ToHostMailboxData, 0x04c, rev8   */
+
+	/* synchronized access to registers in SDIO clock domain */
+	uint32 sdioaccess;		/* SdioAccess, 0x050, rev8   */
+	uint32 PAD[3];
+
+	/* PCMCIA frame control */
+	uint8 pcmciaframectrl;		/* pcmciaFrameCtrl, 0x060, rev8   */
+	uint8 PAD[3];
+	uint8 pcmciawatermark;		/* pcmciaWaterMark, 0x064, rev8   */
+	uint8 PAD[155];
+
+	/* interrupt batching control */
+	uint32 intrcvlazy;		/* IntRcvLazy, 0x100, rev8 */
+	uint32 PAD[3];
+
+	/* counters */
+	uint32 cmd52rd;			/* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+	uint32 cmd52wr;			/* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+	uint32 cmd53rd;			/* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+	uint32 cmd53wr;			/* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+	uint32 abort;			/* AbortCount, 0x120, rev8, SDIO: aborts */
+	uint32 datacrcerror;		/* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+	uint32 rdoutofsync;		/* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+	uint32 wroutofsync;		/* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+	uint32 writebusy;		/* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+	uint32 readwait;		/* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+	uint32 readterm;		/* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+	uint32 writeterm;		/* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+	uint32 PAD[40];
+	uint32 clockctlstatus;		/* ClockCtlStatus, 0x1e0, rev8 */
+	uint32 PAD[1];
+	uint32 powerctl;		/* 0x1e8 */
+	uint32 PAD[5];
+
+	/* DMA engines */
+	volatile union {
+		pcmdma32_t pcm32;
+		sdiodma32_t sdiod32;
+		sdiodma64_t sdiod64;
+	} dma;
+
+	/* SDIO/PCMCIA CIS region */
+	char cis[512];			/* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+	/* PCMCIA function control registers */
+	char pcmciafcr[256];		/* PCMCIA FCR, 0x600-6ff, rev6 */
+	uint16 PAD[55];
+
+	/* PCMCIA backplane access */
+	uint16 backplanecsr;		/* BackplaneCSR, 0x76E, rev6 */
+	uint16 backplaneaddr0;		/* BackplaneAddr0, 0x770, rev6 */
+	uint16 backplaneaddr1;		/* BackplaneAddr1, 0x772, rev6 */
+	uint16 backplaneaddr2;		/* BackplaneAddr2, 0x774, rev6 */
+	uint16 backplaneaddr3;		/* BackplaneAddr3, 0x776, rev6 */
+	uint16 backplanedata0;		/* BackplaneData0, 0x778, rev6 */
+	uint16 backplanedata1;		/* BackplaneData1, 0x77a, rev6 */
+	uint16 backplanedata2;		/* BackplaneData2, 0x77c, rev6 */
+	uint16 backplanedata3;		/* BackplaneData3, 0x77e, rev6 */
+	uint16 PAD[31];
+
+	/* sprom "size" & "blank" info */
+	uint16 spromstatus;		/* SPROMStatus, 0x7BE, rev2 */
+	uint32 PAD[464];
+
+	/* Sonics SiliconBackplane registers */
+	sbconfig_t sbconfig;		/* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY		(1 << 0)	/* CIS Ready */
+#define CC_BPRESEN		(1 << 1)	/* CCCR RES signal causes backplane reset */
+#define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
+#define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation bit (rev 11) */
+#define CC_XMTDATAAVAIL_MODE	(1 << 4)	/* data avail generates an interrupt */
+#define CC_XMTDATAAVAIL_CTRL	(1 << 5)	/* data avail interrupt ctrl */
+
+/* corestatus */
+#define CS_PCMCIAMODE	(1 << 0)	/* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV	(1 << 1)	/* 1=smartDev enabled */
+#define CS_F2ENABLED	(1 << 2)	/* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK	0x7fff	/* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK	0x7fff	/* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK	0xffff	/* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK		0x07ff	/* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT	0		/* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT	4		/* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
+#define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
+#define	I_PC		(1 << 10)	/* descriptor error */
+#define	I_PD		(1 << 11)	/* data error */
+#define	I_DE		(1 << 12)	/* Descriptor protocol Error */
+#define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
+#define	I_RO		(1 << 14)	/* Receive fifo Overflow */
+#define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
+#define	I_RI		(1 << 16)	/* Receive Interrupt */
+#define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
+#define	I_XI		(1 << 24)	/* Transmit Interrupt */
+#define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
+#define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
+#define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
+#define I_CHIPACTIVE	(1 << 29)	/* chip transitioned from doze to active state */
+#define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
+#define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
+#define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)	/* DMA Errors */
+#define I_DMA		(I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR	(1 << 8)	/* Backplane SError (write) */
+#define I_SB_RESPERR	(1 << 9)	/* Backplane Response Error (read) */
+#define I_SB_SPROMERR	(1 << 10)	/* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK	0x000000ff	/* Read/Write Data Mask */
+#define SDA_ADDR_MASK	0x000fff00	/* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT	8		/* Read/Write Address Shift */
+#define SDA_WRITE	0x01000000	/* Write bit  */
+#define SDA_READ	0x00000000	/* Write bit cleared for Read */
+#define SDA_BUSY	0x80000000	/* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE		0x000	/* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE	0x100	/* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE	0x200	/* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE	0x300	/* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA	0x006	/* ChipControlData */
+#define SDA_CHIPCONTROLENAB	0x007	/* ChipControlEnable */
+#define SDA_F2WATERMARK		0x008	/* Function 2 Watermark */
+#define SDA_DEVICECONTROL	0x009	/* DeviceControl */
+#define SDA_SBADDRLOW		0x00a	/* SbAddrLow */
+#define SDA_SBADDRMID		0x00b	/* SbAddrMid */
+#define SDA_SBADDRHIGH		0x00c	/* SbAddrHigh */
+#define SDA_FRAMECTRL		0x00d	/* FrameCtrl */
+#define SDA_CHIPCLOCKCSR	0x00e	/* ChipClockCSR */
+#define SDA_SDIOPULLUP		0x00f	/* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW	0x019	/* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH	0x01a	/* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW	0x01b	/* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH	0x01c	/* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK	0x7f	/* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK	0x80	/* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK	0xff	/* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK	0xff	/* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+#define SFC_CRC4WOOS	(1 << 2)	/* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL	(1 << 3)	/* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define PFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+
+/* intrcvlazy */
+#define	IRL_TO_MASK	0x00ffffff	/* timeout */
+#define	IRL_FC_MASK	0xff000000	/* frame count */
+#define	IRL_FC_SHIFT	24		/* frame count */
+
+/* rx header */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC		0x0001		/* CRC error detected */
+#define RXF_WOOS	0x0002		/* write frame out of sync */
+#define RXF_WF_TERM	0x0004		/* write frame terminated */
+#define RXF_ABORT	0x0008		/* write frame aborted */
+#define RXF_DISCARD	(RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT)	/* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN	4	/* HW frametag: 2 bytes len, 2 bytes check val */
+
+#define SDPCM_HWEXT_LEN	8
+
+#endif	/* _sbsdpcmdev_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h
new file mode 100644
index 0000000..b2c0188
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h
@@ -0,0 +1,205 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbsocram.h 604712 2015-12-08 08:05:42Z $
+ */
+
+#ifndef	_SBSOCRAM_H
+#define	_SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+/* Memcsocram core registers */
+typedef volatile struct sbsocramregs {
+	uint32	coreinfo;
+	uint32	bwalloc;
+	uint32	extracoreinfo;
+	uint32	biststat;
+	uint32	bankidx;
+	uint32	standbyctrl;
+
+	uint32	errlogstatus;	/* rev 6 */
+	uint32	errlogaddr;	/* rev 6 */
+	/* used for patching rev 3 & 5 */
+	uint32	cambankidx;
+	uint32	cambankstandbyctrl;
+	uint32	cambankpatchctrl;
+	uint32	cambankpatchtblbaseaddr;
+	uint32	cambankcmdreg;
+	uint32	cambankdatareg;
+	uint32	cambankmaskreg;
+	uint32	PAD[1];
+	uint32	bankinfo;	/* corev 8 */
+	uint32	bankpda;
+	uint32	PAD[14];
+	uint32	extmemconfig;
+	uint32	extmemparitycsr;
+	uint32	extmemparityerrdata;
+	uint32	extmemparityerrcnt;
+	uint32	extmemwrctrlandsize;
+	uint32	PAD[84];
+	uint32	workaround;
+	uint32	pwrctl;		/* corerev >= 2 */
+	uint32	PAD[133];
+	uint32  sr_control;     /* corerev >= 15 */
+	uint32  sr_status;      /* corerev >= 15 */
+	uint32  sr_address;     /* corerev >= 15 */
+	uint32  sr_data;        /* corerev >= 15 */
+} sbsocramregs_t;
+
+#endif	/* _LANGUAGE_ASSEMBLY */
+
+/* Register offsets */
+#define	SR_COREINFO		0x00
+#define	SR_BWALLOC		0x04
+#define	SR_BISTSTAT		0x0c
+#define	SR_BANKINDEX		0x10
+#define	SR_BANKSTBYCTL		0x14
+#define SR_PWRCTL		0x1e8
+
+/* Coreinfo register */
+#define	SRCI_PT_MASK		0x00070000	/* corerev >= 6; port type[18:16] */
+#define	SRCI_PT_SHIFT		16
+/* port types : SRCI_PT_<processorPT>_<backplanePT> */
+#define SRCI_PT_OCP_OCP		0
+#define SRCI_PT_AXI_OCP		1
+#define SRCI_PT_ARM7AHB_OCP	2
+#define SRCI_PT_CM3AHB_OCP	3
+#define SRCI_PT_AXI_AXI		4
+#define SRCI_PT_AHB_AXI		5
+/* corerev >= 3 */
+#define SRCI_LSS_MASK		0x00f00000
+#define SRCI_LSS_SHIFT		20
+#define SRCI_LRS_MASK		0x0f000000
+#define SRCI_LRS_SHIFT		24
+
+/* In corerev 0, the memory size is 2 to the power of the
+ * base plus 16 plus to the contents of the memsize field plus 1.
+ */
+#define	SRCI_MS0_MASK		0xf
+#define SR_MS0_BASE		16
+
+/*
+ * In corerev 1 the bank size is 2 ^ the bank size field plus 14,
+ * the memory size is number of banks times bank size.
+ * The same applies to rom size.
+ */
+#define	SRCI_ROMNB_MASK		0xf000
+#define	SRCI_ROMNB_SHIFT	12
+#define	SRCI_ROMBSZ_MASK	0xf00
+#define	SRCI_ROMBSZ_SHIFT	8
+#define	SRCI_SRNB_MASK		0xf0
+#define	SRCI_SRNB_SHIFT		4
+#define	SRCI_SRBSZ_MASK		0xf
+#define	SRCI_SRBSZ_SHIFT	0
+
+#define	SRCI_SRNB_MASK_EXT	0x100
+
+#define SR_BSZ_BASE		14
+
+/* Standby control register */
+#define	SRSC_SBYOVR_MASK	0x80000000
+#define	SRSC_SBYOVR_SHIFT	31
+#define	SRSC_SBYOVRVAL_MASK	0x60000000
+#define	SRSC_SBYOVRVAL_SHIFT	29
+#define	SRSC_SBYEN_MASK		0x01000000	/* rev >= 3 */
+#define	SRSC_SBYEN_SHIFT	24
+
+/* Power control register */
+#define SRPC_PMU_STBYDIS_MASK	0x00000010	/* rev >= 3 */
+#define SRPC_PMU_STBYDIS_SHIFT	4
+#define SRPC_STBYOVRVAL_MASK	0x00000008
+#define SRPC_STBYOVRVAL_SHIFT	3
+#define SRPC_STBYOVR_MASK	0x00000007
+#define SRPC_STBYOVR_SHIFT	0
+
+/* Extra core capability register */
+#define SRECC_NUM_BANKS_MASK   0x000000F0
+#define SRECC_NUM_BANKS_SHIFT  4
+#define SRECC_BANKSIZE_MASK    0x0000000F
+#define SRECC_BANKSIZE_SHIFT   0
+
+#define SRECC_BANKSIZE(value)	 (1 << (value))
+
+/* CAM bank patch control */
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS   0x0001FFFC
+#define SRP_VALID     0x8000
+
+/* CAM bank command reg */
+#define SRCMD_WRITE  0x00020000
+#define SRCMD_READ   0x00010000
+#define SRCMD_DONE   0x80000000
+
+#define SRCMD_DONE_DLY	1000
+
+/* bankidx and bankinfo reg defines corerev >= 8 */
+#define SOCRAM_BANKINFO_SZMASK		0x7f
+#define SOCRAM_BANKIDX_ROM_MASK		0x100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT	8
+/* socram bankinfo memtype */
+#define SOCRAM_MEMTYPE_RAM		0
+#define SOCRAM_MEMTYPE_R0M		1
+#define SOCRAM_MEMTYPE_DEVRAM		2
+
+#define	SOCRAM_BANKINFO_REG		0x40
+#define	SOCRAM_BANKIDX_REG		0x10
+#define	SOCRAM_BANKINFO_STDBY_MASK	0x400
+#define	SOCRAM_BANKINFO_STDBY_TIMER	0x800
+
+/* bankinfo rev >= 10 */
+#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT		13
+#define SOCRAM_BANKINFO_DEVRAMSEL_MASK		0x2000
+#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT		14
+#define SOCRAM_BANKINFO_DEVRAMPRO_MASK		0x4000
+#define SOCRAM_BANKINFO_SLPSUPP_SHIFT		15
+#define SOCRAM_BANKINFO_SLPSUPP_MASK		0x8000
+#define SOCRAM_BANKINFO_RETNTRAM_SHIFT		16
+#define SOCRAM_BANKINFO_RETNTRAM_MASK		0x00010000
+#define SOCRAM_BANKINFO_PDASZ_SHIFT		17
+#define SOCRAM_BANKINFO_PDASZ_MASK		0x003E0000
+#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT	24
+#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK	0x01000000
+
+/* extracoreinfo register */
+#define SOCRAM_DEVRAMBANK_MASK		0xF000
+#define SOCRAM_DEVRAMBANK_SHIFT		12
+
+/* bank info to calculate bank size */
+#define   SOCRAM_BANKINFO_SZBASE          8192
+#define SOCRAM_BANKSIZE_SHIFT         13      /* SOCRAM_BANKINFO_SZBASE */
+
+
+#endif	/* _SBSOCRAM_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsysmem.h b/drivers/net/wireless/bcmdhd/include/sbsysmem.h
new file mode 100644
index 0000000..df26399
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsysmem.h
@@ -0,0 +1,180 @@
+/*
+ * SiliconBackplane System Memory core
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbsysmem.h 563229 2015-06-12 04:50:06Z $
+ */
+
+#ifndef	_SBSYSMEM_H
+#define	_SBSYSMEM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+/* sysmem core registers */
+typedef volatile struct sysmemregs {
+	uint32	coreinfo;
+	uint32	bwalloc;
+	uint32	extracoreinfo;
+	uint32	biststat;
+	uint32	bankidx;
+	uint32	standbyctrl;
+
+	uint32	errlogstatus;
+	uint32	errlogaddr;
+
+	uint32	cambankidx;
+	uint32	cambankstandbyctrl;
+	uint32	cambankpatchctrl;
+	uint32	cambankpatchtblbaseaddr;
+	uint32	cambankcmdreg;
+	uint32	cambankdatareg;
+	uint32	cambankmaskreg;
+	uint32	PAD[1];
+	uint32	bankinfo;
+	uint32	PAD[15];
+	uint32	extmemconfig;
+	uint32	extmemparitycsr;
+	uint32	extmemparityerrdata;
+	uint32	extmemparityerrcnt;
+	uint32	extmemwrctrlandsize;
+	uint32	PAD[84];
+	uint32	workaround;
+	uint32	pwrctl;
+	uint32	PAD[133];
+	uint32  sr_control;
+	uint32  sr_status;
+	uint32  sr_address;
+	uint32  sr_data;
+} sysmemregs_t;
+
+#endif	/* _LANGUAGE_ASSEMBLY */
+
+/* Register offsets */
+#define	SR_COREINFO		0x00
+#define	SR_BWALLOC		0x04
+#define	SR_BISTSTAT		0x0c
+#define	SR_BANKINDEX		0x10
+#define	SR_BANKSTBYCTL		0x14
+#define SR_PWRCTL		0x1e8
+
+/* Coreinfo register */
+#define	SRCI_PT_MASK		0x00070000	/* port type[18:16] */
+#define	SRCI_PT_SHIFT		16
+/* port types : SRCI_PT_<processorPT>_<backplanePT> */
+#define SRCI_PT_OCP_OCP		0
+#define SRCI_PT_AXI_OCP		1
+#define SRCI_PT_ARM7AHB_OCP	2
+#define SRCI_PT_CM3AHB_OCP	3
+#define SRCI_PT_AXI_AXI		4
+#define SRCI_PT_AHB_AXI		5
+
+#define SRCI_LSS_MASK		0x00f00000
+#define SRCI_LSS_SHIFT		20
+#define SRCI_LRS_MASK		0x0f000000
+#define SRCI_LRS_SHIFT		24
+
+/* In corerev 0, the memory size is 2 to the power of the
+ * base plus 16 plus to the contents of the memsize field plus 1.
+ */
+#define	SRCI_MS0_MASK		0xf
+#define SR_MS0_BASE		16
+
+/*
+ * In corerev 1 the bank size is 2 ^ the bank size field plus 14,
+ * the memory size is number of banks times bank size.
+ * The same applies to rom size.
+ */
+#define	SYSMEM_SRCI_ROMNB_MASK	0x3e0
+#define	SYSMEM_SRCI_ROMNB_SHIFT	5
+#define	SYSMEM_SRCI_SRNB_MASK	0x1f
+#define	SYSMEM_SRCI_SRNB_SHIFT	0
+
+/* Standby control register */
+#define	SRSC_SBYOVR_MASK	0x80000000
+#define	SRSC_SBYOVR_SHIFT	31
+#define	SRSC_SBYOVRVAL_MASK	0x60000000
+#define	SRSC_SBYOVRVAL_SHIFT	29
+#define	SRSC_SBYEN_MASK		0x01000000
+#define	SRSC_SBYEN_SHIFT	24
+
+/* Power control register */
+#define SRPC_PMU_STBYDIS_MASK	0x00000010
+#define SRPC_PMU_STBYDIS_SHIFT	4
+#define SRPC_STBYOVRVAL_MASK	0x00000008
+#define SRPC_STBYOVRVAL_SHIFT	3
+#define SRPC_STBYOVR_MASK	0x00000007
+#define SRPC_STBYOVR_SHIFT	0
+
+/* Extra core capability register */
+#define SRECC_NUM_BANKS_MASK   0x000000F0
+#define SRECC_NUM_BANKS_SHIFT  4
+#define SRECC_BANKSIZE_MASK    0x0000000F
+#define SRECC_BANKSIZE_SHIFT   0
+
+#define SRECC_BANKSIZE(value)	 (1 << (value))
+
+/* CAM bank patch control */
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS   0x0001FFFC
+#define SRP_VALID     0x8000
+
+/* CAM bank command reg */
+#define SRCMD_WRITE  0x00020000
+#define SRCMD_READ   0x00010000
+#define SRCMD_DONE   0x80000000
+
+#define SRCMD_DONE_DLY	1000
+
+/* bankidx and bankinfo reg defines */
+#define SYSMEM_BANKINFO_SZMASK		0x7f
+#define SYSMEM_BANKIDX_ROM_MASK		0x80
+
+#define	SYSMEM_BANKINFO_REG		0x40
+#define	SYSMEM_BANKIDX_REG		0x10
+#define	SYSMEM_BANKINFO_STDBY_MASK	0x200
+#define	SYSMEM_BANKINFO_STDBY_TIMER	0x400
+
+#define SYSMEM_BANKINFO_SLPSUPP_SHIFT		14
+#define SYSMEM_BANKINFO_SLPSUPP_MASK		0x4000
+#define SYSMEM_BANKINFO_PDASZ_SHIFT		16
+#define SYSMEM_BANKINFO_PDASZ_MASK		0x001F0000
+
+/* extracoreinfo register */
+#define SYSMEM_DEVRAMBANK_MASK		0xF000
+#define SYSMEM_DEVRAMBANK_SHIFT		12
+
+/* bank info to calculate bank size */
+#define	SYSMEM_BANKINFO_SZBASE          8192
+#define SYSMEM_BANKSIZE_SHIFT		13      /* SYSMEM_BANKINFO_SZBASE */
+
+#endif	/* _SBSYSMEM_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h
new file mode 100644
index 0000000..8f77e41
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdio.h
@@ -0,0 +1,630 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sdio.h 644725 2016-06-21 12:26:04Z $
+ */
+
+#ifndef	_SDIO_H
+#define	_SDIO_H
+
+#ifdef BCMSDIO
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+	uint8	cccr_sdio_rev;		/* RO, cccr and sdio revision */
+	uint8	sd_rev;			/* RO, sd spec revision */
+	uint8	io_en;			/* I/O enable */
+	uint8	io_rdy;			/* I/O ready reg */
+	uint8	intr_ctl;		/* Master and per function interrupt enable control */
+	uint8	intr_status;		/* RO, interrupt pending status */
+	uint8	io_abort;		/* read/write abort or reset all functions */
+	uint8	bus_inter;		/* bus interface control */
+	uint8	capability;		/* RO, card capability */
+
+	uint8	cis_base_low;		/* 0x9 RO, common CIS base address, LSB */
+	uint8	cis_base_mid;
+	uint8	cis_base_high;		/* 0xB RO, common CIS base address, MSB */
+
+	/* suspend/resume registers */
+	uint8	bus_suspend;		/* 0xC */
+	uint8	func_select;		/* 0xD */
+	uint8	exec_flag;		/* 0xE */
+	uint8	ready_flag;		/* 0xF */
+
+	uint8	fn0_blk_size[2];	/* 0x10(LSB), 0x11(MSB) */
+
+	uint8	power_control;		/* 0x12 (SDIO version 1.10) */
+
+	uint8	speed_control;		/* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV			0x00
+#define SDIOD_CCCR_SDREV		0x01
+#define SDIOD_CCCR_IOEN			0x02
+#define SDIOD_CCCR_IORDY		0x03
+#define SDIOD_CCCR_INTEN		0x04
+#define SDIOD_CCCR_INTPEND		0x05
+#define SDIOD_CCCR_IOABORT		0x06
+#define SDIOD_CCCR_BICTRL		0x07
+#define SDIOD_CCCR_CAPABLITIES		0x08
+#define SDIOD_CCCR_CISPTR_0		0x09
+#define SDIOD_CCCR_CISPTR_1		0x0A
+#define SDIOD_CCCR_CISPTR_2		0x0B
+#define SDIOD_CCCR_BUSSUSP		0x0C
+#define SDIOD_CCCR_FUNCSEL		0x0D
+#define SDIOD_CCCR_EXECFLAGS		0x0E
+#define SDIOD_CCCR_RDYFLAGS		0x0F
+#define SDIOD_CCCR_BLKSIZE_0		0x10
+#define SDIOD_CCCR_BLKSIZE_1		0x11
+#define SDIOD_CCCR_POWER_CONTROL	0x12
+#define SDIOD_CCCR_SPEED_CONTROL	0x13
+#define SDIOD_CCCR_UHSI_SUPPORT		0x14
+#define SDIOD_CCCR_DRIVER_STRENGTH	0x15
+#define SDIOD_CCCR_INTR_EXTN		0x16
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_CARDCAP		0xf0
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT	0x02
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT	0x04
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC	0x08
+#define SDIOD_CCCR_BRCM_CARDCTL			0xf1
+#define SDIOD_CCCR_BRCM_SEPINT			0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK	0xf0	/* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK	0x0f	/* CCCR format version number */
+#define SDIO_SPEC_VERSION_3_0	0x40	/* SDIO spec version 3.0 */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK		0x0f	/* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1	0x02	/* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2	0x04	/* function 2 I/O enable */
+#if defined(BT_OVER_SDIO)
+#define SDIO_FUNC_ENABLE_3	0x08	/* function 2 I/O enable */
+#define SDIO_FUNC_DISABLE_3	0xF0	/* function 2 I/O enable */
+#endif /* defined (BT_OVER_SDIO) */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1	0x02	/* function 1 I/O ready */
+#define SDIO_FUNC_READY_2	0x04	/* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN	0x1	/* interrupt enable master */
+#define INTR_CTL_FUNC1_EN	0x2	/* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN	0x4	/* interrupt enable for function 2 */
+#if defined(BT_OVER_SDIO)
+#define INTR_CTL_FUNC3_EN	0x8	/* interrupt enable for function 3 */
+#endif /* defined (BT_OVER_SDIO) */
+/* intr_status */
+#define INTR_STATUS_FUNC1	0x2	/* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2	0x4	/* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL	0x08	/* I/O card reset */
+#define IO_ABORT_FUNC_MASK	0x07	/* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS	0x80	/* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP	0x40	/* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN	0x20	/* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK	0x03	/* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT	0x02	/* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT	0x00	/* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS		0x80	/* 4-bit support for low speed card */
+#define SDIO_CAP_LSC		0x40	/* low speed card */
+#define SDIO_CAP_E4MI		0x20	/* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI		0x10	/* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS		0x08	/* support suspend/resume */
+#define SDIO_CAP_SRW		0x04	/* support read wait */
+#define SDIO_CAP_SMB		0x02	/* support multi-block transfer */
+#define SDIO_CAP_SDC		0x01	/* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC		0x01	/* supports master power control (RO) */
+#define SDIO_POWER_EMPC		0x02	/* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS		0x01	/* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS		0x02	/* enable high-speed [clocking] mode (RW) */
+#define SDIO_SPEED_UHSI_DDR50	   0x08
+
+/* for setting bus speed in card: 0x13h */
+#define SDIO_BUS_SPEED_UHSISEL_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSISEL_S	1
+
+/* for getting bus speed cap in card: 0x14h */
+#define SDIO_BUS_SPEED_UHSICAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSICAP_S	0
+
+/* for getting driver type CAP in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_CAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_DRVR_TYPE_CAP_S	0
+
+/* for setting driver type selection in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_SEL_M	BITFIELD_MASK(2)
+#define SDIO_BUS_DRVR_TYPE_SEL_S	4
+
+/* for getting async int support in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_CAP_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_CAP_S	0
+
+/* for setting async int selection in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_SEL_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_SEL_S	1
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK	0x01	/* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE		0x02	/* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI	0x04	/* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+	uint8	devctr;			/* device interface, CSA control */
+	uint8	ext_dev;		/* extended standard I/O device type code */
+	uint8	pwr_sel;		/* power selection support */
+	uint8	PAD[6];			/* reserved */
+
+	uint8	cis_low;		/* CIS LSB */
+	uint8	cis_mid;
+	uint8	cis_high;		/* CIS MSB */
+	uint8	csa_low;		/* code storage area, LSB */
+	uint8	csa_mid;
+	uint8	csa_high;		/* code storage area, MSB */
+	uint8	csa_dat_win;		/* data access window to function */
+
+	uint8	fnx_blk_size[2];	/* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_FUNCS			8
+#define SDIOD_MAX_IOFUNCS		7
+
+/* SDIO Device FBR Start Address  */
+#define SDIOD_FBR_STARTADDR		0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE			0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n)		((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR		0x00	/* basic info for function */
+#define SDIOD_FBR_EXT_DEV		0x01	/* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL		0x02	/* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0		0x09
+#define SDIOD_FBR_CISPTR_1		0x0A
+#define SDIOD_FBR_CISPTR_2		0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0		0x0C
+#define SDIOD_FBR_CSA_ADDR_1		0x0D
+#define SDIOD_FBR_CSA_ADDR_2		0x0E
+#define SDIOD_FBR_CSA_DATA		0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0		0x10
+#define SDIOD_FBR_BLKSIZE_1		0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC	0x0f	/* device interface code */
+#define SDIOD_FBR_DECVTR_CSA	0x40	/* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN	0x80	/* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE		0	/* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART		1
+#define SDIOD_DIC_BLUETOOTH_A	2
+#define SDIOD_DIC_BLUETOOTH_B	3
+#define SDIOD_DIC_GPS		4
+#define SDIOD_DIC_CAMERA	5
+#define SDIOD_DIC_PHS		6
+#define SDIOD_DIC_WLAN		7
+#define SDIOD_DIC_EXT		0xf	/* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS	0x01	/* supports power selection */
+#define SDIOD_PWR_SEL_EPS	0x02	/* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0		0
+#define SDIO_FUNC_1		1
+#define SDIO_FUNC_2		2
+#define SDIO_FUNC_4		4
+#define SDIO_FUNC_5		5
+#define SDIO_FUNC_6		6
+#define SDIO_FUNC_7		7
+
+#define SD_CARD_TYPE_UNKNOWN	0	/* bad type or unrecognized */
+#define SD_CARD_TYPE_IO		1	/* IO only card */
+#define SD_CARD_TYPE_MEMORY	2	/* memory only card */
+#define SD_CARD_TYPE_COMBO	3	/* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE	2048	/* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE	1	/* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE		31
+#define CARDREG_STATUS_BIT_COMCRCERROR		23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND	22
+#define CARDREG_STATUS_BIT_ERROR		19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3	12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2	11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1	10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0	9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR	4
+
+
+
+#define SD_CMD_GO_IDLE_STATE		0	/* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND		1
+#define SD_CMD_MMC_SET_RCA		3
+#define SD_CMD_IO_SEND_OP_COND		5	/* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD	7
+#define SD_CMD_SEND_CSD			9
+#define SD_CMD_SEND_CID			10
+#define SD_CMD_STOP_TRANSMISSION	12
+#define SD_CMD_SEND_STATUS		13
+#define SD_CMD_GO_INACTIVE_STATE	15
+#define SD_CMD_SET_BLOCKLEN		16
+#define SD_CMD_READ_SINGLE_BLOCK	17
+#define SD_CMD_READ_MULTIPLE_BLOCK	18
+#define SD_CMD_WRITE_BLOCK		24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK	25
+#define SD_CMD_PROGRAM_CSD		27
+#define SD_CMD_SET_WRITE_PROT		28
+#define SD_CMD_CLR_WRITE_PROT		29
+#define SD_CMD_SEND_WRITE_PROT		30
+#define SD_CMD_ERASE_WR_BLK_START	32
+#define SD_CMD_ERASE_WR_BLK_END		33
+#define SD_CMD_ERASE			38
+#define SD_CMD_LOCK_UNLOCK		42
+#define SD_CMD_IO_RW_DIRECT		52	/* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED		53	/* mandatory for SDIO */
+#define SD_CMD_APP_CMD			55
+#define SD_CMD_GEN_CMD			56
+#define SD_CMD_READ_OCR			58
+#define SD_CMD_CRC_ON_OFF		59	/* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS		13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS	22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT	23
+#define SD_ACMD_SD_SEND_OP_COND		41
+#define SD_ACMD_SET_CLR_CARD_DETECT	42
+#define SD_ACMD_SEND_SCR		51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ		0   /* Read_Write: Read */
+#define SD_IO_OP_WRITE		1   /* Read_Write: Write */
+#define SD_IO_RW_NORMAL		0   /* no RAW */
+#define SD_IO_RW_RAW		1   /* RAW */
+#define SD_IO_BYTE_MODE		0   /* Byte Mode */
+#define SD_IO_BLOCK_MODE	1   /* BlockMode */
+#define SD_IO_FIXED_ADDRESS	0   /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS	1   /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+	 (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+	 (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE			0
+#define SD_RSP_NO_1			1
+#define SD_RSP_NO_2			2
+#define SD_RSP_NO_3			3
+#define SD_RSP_NO_4			4
+#define SD_RSP_NO_5			5
+#define SD_RSP_NO_6			6
+
+	/* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR	0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND	0x4000
+#define SD_RSP_MR6_ERROR		0x2000
+
+	/* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT			0x80
+#define SD_RSP_MR1_PARAMETER_ERROR	0x40
+#define SD_RSP_MR1_RFU5			0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR	0x10
+#define SD_RSP_MR1_COM_CRC_ERROR	0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND	0x04
+#define SD_RSP_MR1_RFU1			0x02
+#define SD_RSP_MR1_IDLE_STATE		0x01
+
+	/* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR		0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND	0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1	0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0	0x10
+#define SD_RSP_R5_ERROR			0x08
+#define SD_RSP_R5_RFU			0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR	0x02
+#define SD_RSP_R5_OUT_OF_RANGE		0x01
+
+#define SD_RSP_R5_ERRBITS		0xCB
+
+
+/* ------------------------------------------------
+ *  SDIO Commands and responses
+ *
+ *  I/O only commands are:
+ *      CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0		0
+#define SDIOH_CMD_3		3
+#define SDIOH_CMD_5		5
+#define SDIOH_CMD_7		7
+#define SDIOH_CMD_11		11
+#define SDIOH_CMD_14		14
+#define SDIOH_CMD_15		15
+#define SDIOH_CMD_19		19
+#define SDIOH_CMD_52		52
+#define SDIOH_CMD_53		53
+#define SDIOH_CMD_59		59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE		0
+#define SDIOH_RSP_R1		1
+#define SDIOH_RSP_R2		2
+#define SDIOH_RSP_R3		3
+#define SDIOH_RSP_R4		4
+#define SDIOH_RSP_R5		5
+#define SDIOH_RSP_R6		6
+
+/*
+ *  SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS	0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * 	CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M		BITFIELD_MASK(24)
+#define CMD5_OCR_S		0
+
+#define CMD5_S18R_M		BITFIELD_MASK(1)
+#define CMD5_S18R_S		24
+
+#define CMD7_RCA_M		BITFIELD_MASK(16)
+#define CMD7_RCA_S		16
+
+#define CMD14_RCA_M		BITFIELD_MASK(16)
+#define CMD14_RCA_S		16
+#define CMD14_SLEEP_M		BITFIELD_MASK(1)
+#define CMD14_SLEEP_S		15
+
+#define CMD_15_RCA_M		BITFIELD_MASK(16)
+#define CMD_15_RCA_S		16
+
+#define CMD52_DATA_M		BITFIELD_MASK(8)  /* Bits [7:0]    - Write Data/Stuff bits of CMD52
+						   */
+#define CMD52_DATA_S		0
+#define CMD52_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD52_REG_ADDR_S	9
+#define CMD52_RAW_M		BITFIELD_MASK(1)  /* Bit  27       - Read after Write flag */
+#define CMD52_RAW_S		27
+#define CMD52_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD52_FUNCTION_S	28
+#define CMD52_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD52_RW_FLAG_S		31
+
+
+#define CMD53_BYTE_BLK_CNT_M	BITFIELD_MASK(9) /* Bits [8:0]     - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S	0
+#define CMD53_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD53_REG_ADDR_S	9
+#define CMD53_OP_CODE_M		BITFIELD_MASK(1)  /* Bit  26       - R/W Operation Code */
+#define CMD53_OP_CODE_S		26
+#define CMD53_BLK_MODE_M	BITFIELD_MASK(1)  /* Bit  27       - Block Mode */
+#define CMD53_BLK_MODE_S	27
+#define CMD53_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD53_FUNCTION_S	28
+#define CMD53_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD53_RW_FLAG_S		31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ *  -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M		BITFIELD_MASK(24) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S		0
+
+#define RSP4_S18A_M			BITFIELD_MASK(1) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_S18A_S			24
+
+#define RSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S		24
+#define RSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  27      - Memory present */
+#define RSP4_MEM_PRESENT_S	27
+#define RSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S	28
+#define RSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  31      - SDIO card ready */
+#define RSP4_CARD_READY_S	31
+
+#define RSP6_STATUS_M		BITFIELD_MASK(16) /* Bits [15:0]  - Card status bits [19,22,23,12:0]
+						   */
+#define RSP6_STATUS_S		0
+#define RSP6_IO_RCA_M		BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S		16
+
+#define RSP1_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3       - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S	3
+#define RSP1_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5       - Card expects ACMD */
+#define RSP1_APP_CMD_S		5
+#define RSP1_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8       - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S	8
+#define RSP1_CURR_STATE_M	BITFIELD_MASK(4)  /* Bits [12:9] - State of card
+						   * when Cmd was received
+						   */
+#define RSP1_CURR_STATE_S	9
+#define RSP1_EARSE_RESET_M	BITFIELD_MASK(1)  /* Bit 13   - Erase seq cleared */
+#define RSP1_EARSE_RESET_S	13
+#define RSP1_CARD_ECC_DISABLE_M	BITFIELD_MASK(1)  /* Bit 14   - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S	14
+#define RSP1_WP_ERASE_SKIP_M	BITFIELD_MASK(1)  /* Bit 15   - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S	15
+#define RSP1_CID_CSD_OVERW_M	BITFIELD_MASK(1)  /* Bit 16   - Illegal write to CID or R/O bits
+						   * of CSD
+						   */
+#define RSP1_CID_CSD_OVERW_S	16
+#define RSP1_ERROR_M		BITFIELD_MASK(1)  /* Bit 19   - General/Unknown error */
+#define RSP1_ERROR_S		19
+#define RSP1_CC_ERROR_M		BITFIELD_MASK(1)  /* Bit 20   - Internal Card Control error */
+#define RSP1_CC_ERROR_S		20
+#define RSP1_CARD_ECC_FAILED_M	BITFIELD_MASK(1)  /* Bit 21   - Card internal ECC failed
+						   * to correct data
+						   */
+#define RSP1_CARD_ECC_FAILED_S	21
+#define RSP1_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit 22   - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S	22
+#define RSP1_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 23   - CRC check of previous command failed
+						   */
+#define RSP1_COM_CRC_ERROR_S	23
+#define RSP1_LOCK_UNLOCK_FAIL_M	BITFIELD_MASK(1)  /* Bit 24   - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S	24
+#define RSP1_CARD_LOCKED_M	BITFIELD_MASK(1)  /* Bit 25   - Card locked by the host */
+#define RSP1_CARD_LOCKED_S	25
+#define RSP1_WP_VIOLATION_M	BITFIELD_MASK(1)  /* Bit 26   - Attempt to program
+						   * write-protected blocks
+						   */
+#define RSP1_WP_VIOLATION_S	26
+#define RSP1_ERASE_PARAM_M	BITFIELD_MASK(1)  /* Bit 27   - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S	27
+#define RSP1_ERASE_SEQ_ERR_M	BITFIELD_MASK(1)  /* Bit 28   - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S	28
+#define RSP1_BLK_LEN_ERR_M	BITFIELD_MASK(1)  /* Bit 29   - Block length error */
+#define RSP1_BLK_LEN_ERR_S	29
+#define RSP1_ADDR_ERR_M		BITFIELD_MASK(1)  /* Bit 30   - Misaligned address */
+#define RSP1_ADDR_ERR_S		30
+#define RSP1_OUT_OF_RANGE_M	BITFIELD_MASK(1)  /* Bit 31   - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S	31
+
+
+#define RSP5_DATA_M		BITFIELD_MASK(8)  /* Bits [0:7]   - data */
+#define RSP5_DATA_S		0
+#define RSP5_FLAGS_M		BITFIELD_MASK(8)  /* Bit  [15:8]  - Rsp flags */
+#define RSP5_FLAGS_S		8
+#define RSP5_STUFF_M		BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S		16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M	BITFIELD_MASK(16) /* Bits [15:0]    - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S	0
+#define SPIRSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [18:16]   - Stuff bits */
+#define SPIRSP4_STUFF_S		16
+#define SPIRSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  19        - Memory present */
+#define SPIRSP4_MEM_PRESENT_S	19
+#define SPIRSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [22:20]   - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S	20
+#define SPIRSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  23        - SDIO card ready */
+#define SPIRSP4_CARD_READY_S	23
+#define SPIRSP4_IDLE_STATE_M	BITFIELD_MASK(1)  /* Bit  24        - idle state */
+#define SPIRSP4_IDLE_STATE_S	24
+#define SPIRSP4_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S	26
+#define SPIRSP4_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S	27
+#define SPIRSP4_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP4_FUNC_NUM_ERROR_S	28
+#define SPIRSP4_PARAM_ERROR_M	BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S	30
+#define SPIRSP4_START_BIT_M	BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP4_START_BIT_S	31
+
+#define SPIRSP5_DATA_M			BITFIELD_MASK(8)  /* Bits [23:16]   - R/W Data */
+#define SPIRSP5_DATA_S			16
+#define SPIRSP5_IDLE_STATE_M		BITFIELD_MASK(1)  /* Bit  24        - Idle state */
+#define SPIRSP5_IDLE_STATE_S		24
+#define SPIRSP5_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S		26
+#define SPIRSP5_COM_CRC_ERROR_M		BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S		27
+#define SPIRSP5_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP5_FUNC_NUM_ERROR_S	28
+#define SPIRSP5_PARAM_ERROR_M		BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S		30
+#define SPIRSP5_START_BIT_M		BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP5_START_BIT_S		31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3	- Authentication seq error
+							   */
+#define RSP6STAT_AKE_SEQ_ERROR_S	3
+#define RSP6STAT_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5	- Card expects ACMD */
+#define RSP6STAT_APP_CMD_S		5
+#define RSP6STAT_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8	- Ready for data
+							   * (buff empty)
+							   */
+#define RSP6STAT_READY_FOR_DATA_S	8
+#define RSP6STAT_CURR_STATE_M		BITFIELD_MASK(4)  /* Bits [12:9] - Card state at
+							   * Cmd reception
+							   */
+#define RSP6STAT_CURR_STATE_S		9
+#define RSP6STAT_ERROR_M		BITFIELD_MASK(1)  /* Bit 13  - General/Unknown error Bit 19
+							   */
+#define RSP6STAT_ERROR_S		13
+#define RSP6STAT_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit 14  - Illegal cmd for
+							   * card state Bit 22
+							   */
+#define RSP6STAT_ILLEGAL_CMD_S		14
+#define RSP6STAT_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 15  - CRC previous command
+							   * failed Bit 23
+							   */
+#define RSP6STAT_COM_CRC_ERROR_S	15
+
+#define SDIOH_XFER_TYPE_READ    SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE   SD_IO_OP_WRITE
+
+/* command issue options */
+#define CMD_OPTION_DEFAULT	0
+#define CMD_OPTION_TUNING	1
+
+#endif /* def BCMSDIO */
+#endif /* _SDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h
new file mode 100644
index 0000000..f37c5f6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdioh.h
@@ -0,0 +1,448 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sdioh.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef	_SDIOH_H
+#define	_SDIOH_H
+
+#define SD_SysAddr			0x000
+#define SD_BlockSize			0x004
+#define SD_BlockCount 			0x006
+#define SD_Arg0				0x008
+#define SD_Arg1 			0x00A
+#define SD_TransferMode			0x00C
+#define SD_Command 			0x00E
+#define SD_Response0			0x010
+#define SD_Response1 			0x012
+#define SD_Response2			0x014
+#define SD_Response3 			0x016
+#define SD_Response4			0x018
+#define SD_Response5 			0x01A
+#define SD_Response6			0x01C
+#define SD_Response7 			0x01E
+#define SD_BufferDataPort0		0x020
+#define SD_BufferDataPort1 		0x022
+#define SD_PresentState			0x024
+#define SD_HostCntrl			0x028
+#define SD_PwrCntrl			0x029
+#define SD_BlockGapCntrl 		0x02A
+#define SD_WakeupCntrl 			0x02B
+#define SD_ClockCntrl			0x02C
+#define SD_TimeoutCntrl 		0x02E
+#define SD_SoftwareReset		0x02F
+#define SD_IntrStatus			0x030
+#define SD_ErrorIntrStatus 		0x032
+#define SD_IntrStatusEnable		0x034
+#define SD_ErrorIntrStatusEnable 	0x036
+#define SD_IntrSignalEnable		0x038
+#define SD_ErrorIntrSignalEnable 	0x03A
+#define SD_CMD12ErrorStatus		0x03C
+#define SD_Capabilities			0x040
+#define SD_Capabilities3		0x044
+#define SD_MaxCurCap			0x048
+#define SD_MaxCurCap_Reserved		0x04C
+#define SD_ADMA_ErrStatus		0x054
+#define SD_ADMA_SysAddr			0x58
+#define SD_SlotInterruptStatus		0x0FC
+#define SD_HostControllerVersion 	0x0FE
+#define	SD_GPIO_Reg			0x100
+#define	SD_GPIO_OE			0x104
+#define	SD_GPIO_Enable			0x108
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo	0x40
+
+/* HC 3.0 specific registers and offsets */
+#define SD3_HostCntrl2			0x03E
+/* preset regsstart and count */
+#define SD3_PresetValStart		0x060
+#define SD3_PresetValCount		8
+/* preset-indiv regs */
+#define SD3_PresetVal_init		0x060
+#define SD3_PresetVal_default	0x062
+#define SD3_PresetVal_HS		0x064
+#define SD3_PresetVal_SDR12		0x066
+#define SD3_PresetVal_SDR25		0x068
+#define SD3_PresetVal_SDR50		0x06a
+#define SD3_PresetVal_SDR104	0x06c
+#define SD3_PresetVal_DDR50		0x06e
+/* SDIO3.0 Revx specific Registers */
+#define SD3_Tuning_Info_Register 0x0EC
+#define SD3_WL_BT_reset_register 0x0F0
+
+
+/* preset value indices */
+#define SD3_PRESETVAL_INITIAL_IX	0
+#define SD3_PRESETVAL_DESPEED_IX	1
+#define SD3_PRESETVAL_HISPEED_IX	2
+#define SD3_PRESETVAL_SDR12_IX		3
+#define SD3_PRESETVAL_SDR25_IX		4
+#define SD3_PRESETVAL_SDR50_IX		5
+#define SD3_PRESETVAL_SDR104_IX		6
+#define SD3_PRESETVAL_DDR50_IX		7
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M 	BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 	0
+#define CAP_TO_CLKUNIT_M  	BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 	7
+/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2
+	bits are reserved. going ahead with 8 bits, as it is req for 3.0
+*/
+#define CAP_BASECLK_M 		BITFIELD_MASK(8)
+#define CAP_BASECLK_S 		8
+#define CAP_MAXBLOCK_M 		BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S		16
+#define CAP_ADMA2_M		BITFIELD_MASK(1)
+#define CAP_ADMA2_S		19
+#define CAP_ADMA1_M		BITFIELD_MASK(1)
+#define CAP_ADMA1_S		20
+#define CAP_HIGHSPEED_M		BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S		21
+#define CAP_DMA_M		BITFIELD_MASK(1)
+#define CAP_DMA_S		22
+#define CAP_SUSPEND_M		BITFIELD_MASK(1)
+#define CAP_SUSPEND_S		23
+#define CAP_VOLT_3_3_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S		24
+#define CAP_VOLT_3_0_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S		25
+#define CAP_VOLT_1_8_M		BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S		26
+#define CAP_64BIT_HOST_M	BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S	28
+
+#define SDIO_OCR_READ_FAIL	(2)
+
+
+#define CAP_ASYNCINT_SUP_M	BITFIELD_MASK(1)
+#define CAP_ASYNCINT_SUP_S	29
+
+#define CAP_SLOTTYPE_M		BITFIELD_MASK(2)
+#define CAP_SLOTTYPE_S		30
+
+#define CAP3_MSBits_OFFSET	(32)
+/* note: following are caps MSB32 bits.
+	So the bits start from 0, instead of 32. that is why
+	CAP3_MSBits_OFFSET is subtracted.
+*/
+#define CAP3_SDR50_SUP_M		BITFIELD_MASK(1)
+#define CAP3_SDR50_SUP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_SDR104_SUP_M	BITFIELD_MASK(1)
+#define CAP3_SDR104_SUP_S	(33 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DDR50_SUP_M	BITFIELD_MASK(1)
+#define CAP3_DDR50_SUP_S	(34 - CAP3_MSBits_OFFSET)
+
+/* for knowing the clk caps in a single read */
+#define CAP3_30CLKCAP_M		BITFIELD_MASK(3)
+#define CAP3_30CLKCAP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_A_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_A_S	(36 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_C_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_C_S	(37 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_D_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_D_S	(38 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_M	BITFIELD_MASK(4)
+#define CAP3_RETUNING_TC_S	(40 - CAP3_MSBits_OFFSET)
+
+#define CAP3_TUNING_SDR50_M	BITFIELD_MASK(1)
+#define CAP3_TUNING_SDR50_S	(45 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_MODES_M	BITFIELD_MASK(2)
+#define CAP3_RETUNING_MODES_S	(46 - CAP3_MSBits_OFFSET)
+
+#define CAP3_CLK_MULT_M		BITFIELD_MASK(8)
+#define CAP3_CLK_MULT_S		(48 - CAP3_MSBits_OFFSET)
+
+#define PRESET_DRIVR_SELECT_M	BITFIELD_MASK(2)
+#define PRESET_DRIVR_SELECT_S	14
+
+#define PRESET_CLK_DIV_M	BITFIELD_MASK(10)
+#define PRESET_CLK_DIV_S	0
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S		0
+#define CAP_CURR_3_0_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S		8
+#define CAP_CURR_1_8_M		BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S		16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M		BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S		0
+#define BLKSZ_BNDRY_M		BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S		12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes  */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M   	BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S	0
+#define XFER_BLK_COUNT_EN_M 	BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S	1
+#define XFER_CMD_12_EN_M    	BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 	2
+#define XFER_DATA_DIRECTION_M	BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S	4
+#define XFER_MULTI_BLOCK_M	BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S	5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 		0
+#define RESP_TYPE_136  		1
+#define RESP_TYPE_48   		2
+#define RESP_TYPE_48_BUSY	3
+/* type field */
+#define CMD_TYPE_NORMAL		0
+#define CMD_TYPE_SUSPEND	1
+#define CMD_TYPE_RESUME		2
+#define CMD_TYPE_ABORT		3
+
+#define CMD_RESP_TYPE_M		BITFIELD_MASK(2)	/* Bits [0-1] 	- Response type */
+#define CMD_RESP_TYPE_S		0
+#define CMD_CRC_EN_M		BITFIELD_MASK(1)	/* Bit 3 	- CRC enable */
+#define CMD_CRC_EN_S		3
+#define CMD_INDEX_EN_M		BITFIELD_MASK(1)	/* Bit 4 	- Enable index checking */
+#define CMD_INDEX_EN_S		4
+#define CMD_DATA_EN_M		BITFIELD_MASK(1)	/* Bit 5 	- Using DAT line */
+#define CMD_DATA_EN_S		5
+#define CMD_TYPE_M		BITFIELD_MASK(2)	/* Bit [6-7] 	- Normal, abort, resume, etc
+							 */
+#define CMD_TYPE_S		6
+#define CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [8-13] 	- Command number */
+#define CMD_INDEX_S		8
+
+/* SD_BufferDataPort0	: Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 	: Offset 0x022, size = 2 bytes */
+/* SD_PresentState	: Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 0	May use CMD */
+#define PRES_CMD_INHIBIT_S	0
+#define PRES_DAT_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 1	May use DAT */
+#define PRES_DAT_INHIBIT_S	1
+#define PRES_DAT_BUSY_M		BITFIELD_MASK(1)	/* Bit 2	DAT is busy */
+#define PRES_DAT_BUSY_S		2
+#define PRES_PRESENT_RSVD_M	BITFIELD_MASK(5)	/* Bit [3-7]	rsvd */
+#define PRES_PRESENT_RSVD_S	3
+#define PRES_WRITE_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 8	Write is active */
+#define PRES_WRITE_ACTIVE_S	8
+#define PRES_READ_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 9	Read is active */
+#define PRES_READ_ACTIVE_S	9
+#define PRES_WRITE_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 10	Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S	10
+#define PRES_READ_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 11	Read buf data avail */
+#define PRES_READ_DATA_RDY_S	11
+#define PRES_CARD_PRESENT_M	BITFIELD_MASK(1)	/* Bit 16	Card present - debounced */
+#define PRES_CARD_PRESENT_S	16
+#define PRES_CARD_STABLE_M	BITFIELD_MASK(1)	/* Bit 17	Debugging */
+#define PRES_CARD_STABLE_S	17
+#define PRES_CARD_PRESENT_RAW_M	BITFIELD_MASK(1)	/* Bit 18	Not debounced */
+#define PRES_CARD_PRESENT_RAW_S	18
+#define PRES_WRITE_ENABLED_M	BITFIELD_MASK(1)	/* Bit 19	Write protected? */
+#define PRES_WRITE_ENABLED_S	19
+#define PRES_DAT_SIGNAL_M	BITFIELD_MASK(4)	/* Bit [20-23]	Debugging */
+#define PRES_DAT_SIGNAL_S	20
+#define PRES_CMD_SIGNAL_M	BITFIELD_MASK(1)	/* Bit 24	Debugging */
+#define PRES_CMD_SIGNAL_S	24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M		BITFIELD_MASK(1)	/* Bit 0	LED On/Off */
+#define HOST_LED_S		0
+#define HOST_DATA_WIDTH_M	BITFIELD_MASK(1)	/* Bit 1	4 bit enable */
+#define HOST_DATA_WIDTH_S	1
+#define HOST_HI_SPEED_EN_M	BITFIELD_MASK(1)	/* Bit 2	High speed vs low speed */
+#define HOST_DMA_SEL_S		3
+#define HOST_DMA_SEL_M		BITFIELD_MASK(2)	/* Bit 4:3	DMA Select */
+#define HOST_HI_SPEED_EN_S	2
+
+/* Host Control2: */
+#define HOSTCtrl2_PRESVAL_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_PRESVAL_EN_S	15					/* bit# */
+
+#define HOSTCtrl2_ASYINT_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_ASYINT_EN_S	14					/* bit# */
+
+#define HOSTCtrl2_SAMPCLK_SEL_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_SAMPCLK_SEL_S	7					/* bit# */
+
+#define HOSTCtrl2_EXEC_TUNING_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_EXEC_TUNING_S	6					/* bit# */
+
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_M	BITFIELD_MASK(2)	/* 2 bit */
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_S	4					/* bit# */
+
+#define HOSTCtrl2_1_8SIG_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_1_8SIG_EN_S	3					/* bit# */
+
+#define HOSTCtrl2_UHSMODE_SEL_M	BITFIELD_MASK(3)	/* 3 bit */
+#define HOSTCtrl2_UHSMODE_SEL_S	0					/* bit# */
+
+#define HOST_CONTR_VER_2		(1)
+#define HOST_CONTR_VER_3		(2)
+
+/* misc defines */
+#define SD1_MODE 		0x1	/* SD Host Cntrlr Spec */
+#define SD4_MODE 		0x2	/* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M		BITFIELD_MASK(1)	/* Bit 0	Power the bus */
+#define PWR_BUS_EN_S		0
+#define PWR_VOLTS_M		BITFIELD_MASK(3)	/* Bit [1-3]	Voltage Select */
+#define PWR_VOLTS_S		1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M		BITFIELD_MASK(1)	/* Bit 0	Reset All */
+#define SW_RESET_ALL_S		0
+#define SW_RESET_CMD_M		BITFIELD_MASK(1)	/* Bit 1	CMD Line Reset */
+#define SW_RESET_CMD_S		1
+#define SW_RESET_DAT_M		BITFIELD_MASK(1)	/* Bit 2	DAT Line Reset */
+#define SW_RESET_DAT_S		2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M		BITFIELD_MASK(1)	/* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S		0
+#define INTSTAT_XFER_COMPLETE_M		BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S		1
+#define INTSTAT_BLOCK_GAP_EVENT_M	BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S	2
+#define INTSTAT_DMA_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S		3
+#define INTSTAT_BUF_WRITE_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S	4
+#define INTSTAT_BUF_READ_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S	5
+#define INTSTAT_CARD_INSERTION_M	BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S	6
+#define INTSTAT_CARD_REMOVAL_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S		7
+#define INTSTAT_CARD_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S		8
+#define INTSTAT_RETUNING_INT_M		BITFIELD_MASK(1)	/* Bit 12 */
+#define INTSTAT_RETUNING_INT_S		12
+#define INTSTAT_ERROR_INT_M		BITFIELD_MASK(1)	/* Bit 15 */
+#define INTSTAT_ERROR_INT_S		15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S		0
+#define ERRINT_CMD_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S		1
+#define ERRINT_CMD_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S		2
+#define ERRINT_CMD_INDEX_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S		3
+#define ERRINT_DATA_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S		4
+#define ERRINT_DATA_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S		5
+#define ERRINT_DATA_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S		6
+#define ERRINT_CURRENT_LIMIT_M		BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S		7
+#define ERRINT_AUTO_CMD12_M		BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S		8
+#define ERRINT_VENDOR_M			BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S			12
+#define ERRINT_ADMA_M			BITFIELD_MASK(1)
+#define ERRINT_ADMA_S			9
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT		0x0001
+#define ERRINT_CMD_CRC_BIT		0x0002
+#define ERRINT_CMD_ENDBIT_BIT		0x0004
+#define ERRINT_CMD_INDEX_BIT		0x0008
+#define ERRINT_DATA_TIMEOUT_BIT		0x0010
+#define ERRINT_DATA_CRC_BIT		0x0020
+#define ERRINT_DATA_ENDBIT_BIT		0x0040
+#define ERRINT_CURRENT_LIMIT_BIT	0x0080
+#define ERRINT_AUTO_CMD12_BIT		0x0100
+#define ERRINT_ADMA_BIT		0x0200
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS		(ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+				 ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS	(ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+				 ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT)
+#define ERRINT_TRANSFER_ERRS	(ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl	: Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl 	: Offset 0x02E , size = bytes */
+/* SD_IntrStatus	: Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus 	: Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable	: Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable	: Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus	: Offset 0x03C , size = bytes */
+/* SD_Capabilities	: Offset 0x040 , size = bytes */
+/* SD_MaxCurCap		: Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE			0
+#define SDIOH_ADMA1_MODE		1
+#define SDIOH_ADMA2_MODE		2
+#define SDIOH_ADMA2_64_MODE		3
+
+#define ADMA2_ATTRIBUTE_VALID		(1 << 0)	/* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END			(1 << 1)	/* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT			(1 << 2)	/* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP		(0 << 4)	/* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV		(1 << 4)	/* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET		(1 << 4)	/* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN	(2 << 4)	/* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK	(3 << 4)	/* Link Descriptor */
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+	uint32 len_attr;
+	uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+	uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+#endif /* _SDIOH_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h
new file mode 100644
index 0000000..719eeb1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h
@@ -0,0 +1,107 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sdiovar.h 610006 2016-01-06 01:38:47Z $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+	int func;
+	int offset;
+	int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL		0x0001	/* Error */
+#define SDH_TRACE_VAL		0x0002	/* Trace */
+#define SDH_INFO_VAL		0x0004	/* Info */
+#define SDH_DEBUG_VAL		0x0008	/* Debug */
+#define SDH_DATA_VAL		0x0010	/* Data */
+#define SDH_CTRL_VAL		0x0020	/* Control Regs */
+#define SDH_LOG_VAL		0x0040	/* Enable bcmlog */
+#define SDH_DMA_VAL		0x0080	/* DMA */
+#define SDH_COST_VAL		0x8000	/* Control Regs */
+
+#define NUM_PREV_TRANSACTIONS	16
+
+
+typedef struct sdio_bus_metrics {
+	uint32 active_dur;	/* msecs */
+
+	/* Generic */
+	uint32 data_intr_cnt;	/* data interrupt counter */
+	uint32 mb_intr_cnt;	/* mailbox interrupt counter */
+	uint32 error_intr_cnt;	/* error interrupt counter */
+	uint32 wakehost_cnt;	/* counter for OOB wakehost */
+
+	/* DS forcewake */
+	uint32 ds_wake_on_cnt;	/* counter for (clock) ON   */
+	uint32 ds_wake_on_dur;	/* duration for (clock) ON) */
+	uint32 ds_wake_off_cnt;	/* counter for (clock) OFF  */
+	uint32 ds_wake_off_dur;	/* duration for (clock) OFF */
+
+	/* DS_D0 state */
+	uint32 ds_d0_cnt;	/* counter for DS_D0 state */
+	uint32 ds_d0_dur;	/* duration for DS_D0 state */
+
+	/* DS_D3 state */
+	uint32 ds_d3_cnt;	/* counter for DS_D3 state */
+	uint32 ds_d3_dur;	/* duration for DS_D3 state */
+
+	/* DS DEV_WAKE */
+	uint32 ds_dw_assrt_cnt;		/* counter for DW_ASSERT */
+	uint32 ds_dw_dassrt_cnt;	/* counter for DW_DASSERT */
+
+	/* DS mailbox signals */
+	uint32 ds_tx_dsreq_cnt;		/* counter for tx HMB_DATA_DSREQ */
+	uint32 ds_tx_dsexit_cnt;	/* counter for tx HMB_DATA_DSEXIT */
+	uint32 ds_tx_d3ack_cnt;		/* counter for tx HMB_DATA_D3ACK */
+	uint32 ds_tx_d3exit_cnt;	/* counter for tx HMB_DATA_D3EXIT */
+	uint32 ds_rx_dsack_cnt;		/* counter for rx SMB_DATA_DSACK */
+	uint32 ds_rx_dsnack_cnt;	/* counter for rx SMB_DATA_DSNACK */
+	uint32 ds_rx_d3inform_cnt;	/* counter for rx SMB_DATA_D3INFORM */
+} sdio_bus_metrics_t;
+
+/* Bus interface info for SDIO */
+typedef struct wl_pwr_sdio_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_SDIO */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	sdio_bus_metrics_t sdio;	/* stats from SDIO bus driver */
+} wl_pwr_sdio_stats_t;
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sdspi.h b/drivers/net/wireless/bcmdhd/include/sdspi.h
new file mode 100644
index 0000000..c5b8aff
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdspi.h
@@ -0,0 +1,78 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sdspi.h 700076 2017-05-17 14:42:22Z $
+ */
+#ifndef	_SD_SPI_H
+#define	_SD_SPI_H
+
+#define SPI_START_M		BITFIELD_MASK(1)	/* Bit [31] 	- Start Bit */
+#define SPI_START_S		31
+#define SPI_DIR_M		BITFIELD_MASK(1)	/* Bit [30] 	- Direction */
+#define SPI_DIR_S		30
+#define SPI_CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S		24
+#define SPI_RW_M		BITFIELD_MASK(1)	/* Bit [23] 	- Read=0, Write=1 */
+#define SPI_RW_S		23
+#define SPI_FUNC_M		BITFIELD_MASK(3)	/* Bits [22:20]	- Function Number */
+#define SPI_FUNC_S		20
+#define SPI_RAW_M		BITFIELD_MASK(1)	/* Bit [19] 	- Read After Wr */
+#define SPI_RAW_S		19
+#define SPI_STUFF_M		BITFIELD_MASK(1)	/* Bit [18] 	- Stuff bit */
+#define SPI_STUFF_S		18
+#define SPI_BLKMODE_M		BITFIELD_MASK(1)	/* Bit [19] 	- Blockmode 1=blk */
+#define SPI_BLKMODE_S		19
+#define SPI_OPCODE_M		BITFIELD_MASK(1)	/* Bit [18] 	- OP Code */
+#define SPI_OPCODE_S		18
+#define SPI_ADDR_M		BITFIELD_MASK(17)	/* Bits [17:1] 	- Address */
+#define SPI_ADDR_S		1
+#define SPI_STUFF0_M		BITFIELD_MASK(1)	/* Bit [0] 	- Stuff bit */
+#define SPI_STUFF0_S		0
+
+#define SPI_RSP_START_M		BITFIELD_MASK(1)	/* Bit [7] 	- Start Bit (always 0) */
+#define SPI_RSP_START_S		7
+#define SPI_RSP_PARAM_ERR_M	BITFIELD_MASK(1)	/* Bit [6] 	- Parameter Error */
+#define SPI_RSP_PARAM_ERR_S	6
+#define SPI_RSP_RFU5_M		BITFIELD_MASK(1)	/* Bit [5] 	- RFU (Always 0) */
+#define SPI_RSP_RFU5_S		5
+#define SPI_RSP_FUNC_ERR_M	BITFIELD_MASK(1)	/* Bit [4] 	- Function number error */
+#define SPI_RSP_FUNC_ERR_S	4
+#define SPI_RSP_CRC_ERR_M	BITFIELD_MASK(1)	/* Bit [3] 	- COM CRC Error */
+#define SPI_RSP_CRC_ERR_S	3
+#define SPI_RSP_ILL_CMD_M	BITFIELD_MASK(1)	/* Bit [2] 	- Illegal Command error */
+#define SPI_RSP_ILL_CMD_S	2
+#define SPI_RSP_RFU1_M		BITFIELD_MASK(1)	/* Bit [1] 	- RFU (Always 0) */
+#define SPI_RSP_RFU1_S		1
+#define SPI_RSP_IDLE_M		BITFIELD_MASK(1)	/* Bit [0] 	- In idle state */
+#define SPI_RSP_IDLE_S		0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN	6	/* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK	0xFE	/* SD Start Block Token */
+#define SDSPI_IDLE_PAD		0xFF	/* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK	0x80
+
+#endif /* _SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h
new file mode 100644
index 0000000..2afca25
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/siutils.h
@@ -0,0 +1,729 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: siutils.h 668442 2016-11-03 08:42:43Z $
+ */
+
+#ifndef	_siutils_h_
+#define	_siutils_h_
+
+#ifdef SR_DEBUG
+#include "wlioctl.h"
+#endif /* SR_DEBUG */
+
+
+#define WARM_BOOT	0xA0B0C0D0
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+
+#define SI_MAX_ERRLOG_SIZE	4
+typedef struct si_axi_error
+{
+	uint32 error;
+	uint32 coreid;
+	uint32 errlog_lo;
+	uint32 errlog_hi;
+	uint32 errlog_id;
+	uint32 errlog_flags;
+	uint32 errlog_status;
+} si_axi_error_t;
+
+typedef struct si_axi_error_info
+{
+	uint32 count;
+	si_axi_error_t axi_error[SI_MAX_ERRLOG_SIZE];
+} si_axi_error_info_t;
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+/**
+ * Data structure to export all chip specific common variables
+ *   public (read-only) portion of siutils handle returned by si_attach()/si_kattach()
+ */
+struct si_pub {
+	uint	socitype;		/**< SOCI_SB, SOCI_AI */
+
+	uint	bustype;		/**< SI_BUS, PCI_BUS */
+	uint	buscoretype;		/**< PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
+	uint	buscorerev;		/**< buscore rev */
+	uint	buscoreidx;		/**< buscore index */
+	int	ccrev;			/**< chip common core rev */
+	uint32	cccaps;			/**< chip common capabilities */
+	uint32  cccaps_ext;			/**< chip common capabilities extension */
+	int	pmurev;			/**< pmu core rev */
+	uint32	pmucaps;		/**< pmu capabilities */
+	uint	boardtype;		/**< board type */
+	uint    boardrev;               /* board rev */
+	uint	boardvendor;		/**< board vendor */
+	uint	boardflags;		/**< board flags */
+	uint	boardflags2;		/**< board flags2 */
+	uint	chip;			/**< chip number */
+	uint	chiprev;		/**< chip revision */
+	uint	chippkg;		/**< chip package option */
+	uint32	chipst;			/**< chip status */
+	bool	issim;			/**< chip is in simulation or emulation */
+	uint    socirev;		/**< SOC interconnect rev */
+	bool	pci_pr32414;
+	int	gcirev;			/**< gci core rev */
+#ifdef BCM_BACKPLANE_TIMEOUT
+	si_axi_error_info_t * err_info;
+#endif /* BCM_BACKPLANE_TIMEOUT */
+};
+
+/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver
+ * for monolithic driver, it is readonly to prevent accident change
+ */
+typedef const struct si_pub si_t;
+
+/*
+ * Many of the routines below take an 'sih' handle as their first arg.
+ * Allocate this by calling si_attach().  Free it by calling si_detach().
+ * At any one time, the sih is logically focused on one particular si core
+ * (the "current core").
+ * Use si_setcore() or si_setcoreidx() to change the association to another core.
+ */
+#define	SI_OSH		NULL	/**< Use for si_kattach when no osh is available */
+
+#define	BADIDX		(SI_MAXCORES + 1)
+
+/* clkctl xtal what flags */
+#define	XTAL			0x1	/**< primary crystal oscillator (2050) */
+#define	PLL			0x2	/**< main chip pll */
+
+/* clkctl clk mode */
+#define	CLK_FAST		0	/**< force fast (pll) clock */
+#define	CLK_DYNAMIC		2	/**< enable dynamic clock control */
+
+/* GPIO usage priorities */
+#define GPIO_DRV_PRIORITY	0	/**< Driver */
+#define GPIO_APP_PRIORITY	1	/**< Application */
+#define GPIO_HI_PRIORITY	2	/**< Highest priority. Ignore GPIO reservation */
+
+/* GPIO pull up/down */
+#define GPIO_PULLUP		0
+#define GPIO_PULLDN		1
+
+/* GPIO event regtype */
+#define GPIO_REGEVT		0	/**< GPIO register event */
+#define GPIO_REGEVT_INTMSK	1	/**< GPIO register event int mask */
+#define GPIO_REGEVT_INTPOL	2	/**< GPIO register event int polarity */
+
+/* device path */
+#define SI_DEVPATH_BUFSZ	16	/**< min buffer size in bytes */
+
+/* SI routine enumeration: to be used by update function with multiple hooks */
+#define	SI_DOATTACH	1
+#define SI_PCIDOWN	2	/**< wireless interface is down */
+#define SI_PCIUP	3	/**< wireless interface is up */
+
+#ifdef SR_DEBUG
+#define PMU_RES		31
+#endif /* SR_DEBUG */
+
+/* "access" param defines for si_seci_access() below */
+#define SECI_ACCESS_STATUSMASK_SET	0
+#define SECI_ACCESS_INTRS			1
+#define SECI_ACCESS_UART_CTS		2
+#define SECI_ACCESS_UART_RTS		3
+#define SECI_ACCESS_UART_RXEMPTY	4
+#define SECI_ACCESS_UART_GETC		5
+#define SECI_ACCESS_UART_TXFULL		6
+#define SECI_ACCESS_UART_PUTC		7
+#define SECI_ACCESS_STATUSMASK_GET	8
+
+#define	ISSIM_ENAB(sih)	FALSE
+
+#define INVALID_ADDR (~0)
+
+/* PMU clock/power control */
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih)	(BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih)	((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+#if defined(BCMAOBENAB)
+#define AOB_ENAB(sih)  (BCMAOBENAB)
+#else
+#define AOB_ENAB(sih)	((sih)->ccrev >= 35 ? \
+			((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0)
+#endif /* BCMAOBENAB */
+
+/* chipcommon clock/power control (exclusive with PMU's) */
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih)		(0)
+#define CCPLL_ENAB(sih)		(0)
+#else
+#define CCCTL_ENAB(sih)		((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih)		((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg);
+
+/* External BT Coex enable mask */
+#define CC_BTCOEX_EN_MASK  0x01
+/* External PA enable mask */
+#define GPIO_CTRL_EPA_EN_MASK 0x40
+/* WL/BT control enable mask */
+#define GPIO_CTRL_5_6_EN_MASK 0x60
+#define GPIO_CTRL_7_6_EN_MASK 0xC0
+#define GPIO_OUT_7_EN_MASK 0x80
+
+
+
+/* CR4 specific defines used by the host driver */
+#define SI_CR4_CAP			(0x04)
+#define SI_CR4_BANKIDX		(0x40)
+#define SI_CR4_BANKINFO		(0x44)
+#define SI_CR4_BANKPDA		(0x4C)
+
+#define	ARMCR4_TCBBNB_MASK	0xf0
+#define	ARMCR4_TCBBNB_SHIFT	4
+#define	ARMCR4_TCBANB_MASK	0xf
+#define	ARMCR4_TCBANB_SHIFT	0
+
+#define	SICF_CPUHALT		(0x0020)
+#define	ARMCR4_BSZ_MASK		0x3f
+#define	ARMCR4_BSZ_MULT		8192
+#define	SI_BPIND_1BYTE		0x1
+#define	SI_BPIND_2BYTE		0x3
+#define	SI_BPIND_4BYTE		0xF
+#include <osl_decl.h>
+/* === exported functions === */
+extern si_t *si_attach(uint pcidev, osl_t *osh, volatile void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+extern volatile void *
+si_d11_switch_addrbase(si_t *sih, uint coreunit);
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_flag_alt(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_backplane_access(si_t *sih, uint addr, uint size,
+	uint *val, bool read);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val);
+extern volatile uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern volatile void *si_coreregs(si_t *sih);
+extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val);
+extern void *si_wrapperregs(si_t *sih);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_commit(si_t *sih);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_numcoreunits(si_t *sih, uint coreid);
+extern uint si_numd11coreunits(si_t *sih);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern volatile void *si_setcoreidx(si_t *sih, uint coreidx);
+extern volatile void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern volatile void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern uint si_chip_hostif(si_t *sih);
+extern bool si_read_pmu_autopll(si_t *sih);
+extern uint32 si_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */
+extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+	void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern void si_set_device_removed(si_t *sih, bool status);
+extern uint32 si_sysmem_size(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+extern uint32 si_socdevram_size(si_t *sih);
+extern uint32 si_socram_srmem_size(si_t *sih);
+extern void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda);
+extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap);
+extern bool si_socdevram_pkg(si_t *sih);
+extern bool si_socdevram_remap_isenb(si_t *sih);
+extern uint32 si_socdevram_remap_size(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern uint32 si_watchdog_msticks(void);
+extern volatile void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode);
+extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value);
+extern uint8 si_gci_host_wake_gpio_init(si_t *sih);
+extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state);
+
+extern void si_invalidate_second_bar0win(si_t *sih);
+
+/* GCI interrupt handlers */
+extern void si_gci_handler_process(si_t *sih);
+
+/* GCI GPIO event handlers */
+extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts,
+	gci_gpio_handler_t cb, void *arg);
+extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i);
+extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value);
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool si_pci_pmecap(si_t *sih);
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmestat(si_t *sih);
+extern void si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern void si_pci_pmestatclr(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val);
+extern void si_deepsleep_count(si_t *sih, bool arm_wakeup);
+
+
+#ifdef BCMSDIO
+extern void si_sdio_init(si_t *sih);
+#endif
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+	uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+extern uint32 si_seci_access(si_t *sih, uint32 val, int access);
+extern volatile void* si_seci_init(si_t *sih, uint8 seci_mode);
+extern void si_seci_clk_force(si_t *sih, bool val);
+extern bool si_seci_clk_force_status(si_t *sih);
+
+#define si_eci(sih) 0
+static INLINE void * si_eci_init(si_t *sih) {return NULL;}
+#define si_eci_notify_bt(sih, type, val)  (0)
+#define si_seci(sih) 0
+#define si_seci_upd(sih, a)	do {} while (0)
+static INLINE void * si_gci_init(si_t *sih) {return NULL;}
+#define si_seci_down(sih) do {} while (0)
+#define si_gci(sih) 0
+
+/* OTP status */
+extern bool si_is_otp_disabled(si_t *sih);
+extern bool si_is_otp_powered(si_t *sih);
+extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask);
+
+/* SPROM availability */
+extern bool si_is_sprom_available(si_t *sih);
+
+/* OTP/SROM CIS stuff */
+extern int si_cis_source(si_t *sih);
+#define CIS_DEFAULT	0
+#define CIS_SROM	1
+#define CIS_OTP		2
+
+/* Fab-id information */
+#define	DEFAULT_FAB	0x0	/**< Original/first fab used for this chip */
+#define	CSM_FAB7	0x1	/**< CSM Fab7 chip */
+#define	TSMC_FAB12	0x2	/**< TSMC Fab12/Fab14 chip */
+#define	SMIC_FAB4	0x3	/**< SMIC Fab4 chip */
+
+extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw);
+extern uint16 si_fabid(si_t *sih);
+extern uint16 si_chipid(si_t *sih);
+
+/*
+ * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
+ * The returned path is NULL terminated and has trailing '/'.
+ * Return 0 on success, nonzero otherwise.
+ */
+extern int si_devpath(si_t *sih, char *path, int size);
+extern int si_devpath_pcie(si_t *sih, char *path, int size);
+/* Read variable with prepending the devpath to the name */
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieltrenable(si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieobffenable(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltr_reg(si_t *sih, uint32 reg, uint32 mask, uint32 val);
+extern uint32 si_pcieltrspacing_reg(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltrhysteresiscnt_reg(si_t *sih, uint32 mask, uint32 val);
+extern void si_pcie_set_error_injection(si_t *sih, uint32 mode);
+extern void si_pcie_set_L1substate(si_t *sih, uint32 substate);
+extern uint32 si_pcie_get_L1substate(si_t *sih);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm);
+extern void si_pcie_power_save_enable(si_t *sih, bool enable);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+extern void si_chippkg_set(si_t *sih, uint);
+extern bool si_is_warmboot(void);
+
+extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on);
+extern void si_chipcontrl_restore(si_t *sih, uint32 val);
+extern uint32 si_chipcontrl_read(si_t *sih);
+extern void si_chipcontrl_epa4331(si_t *sih, bool on);
+extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl);
+extern void si_chipcontrl_srom4360(si_t *sih, bool on);
+extern void si_clk_srom4365(si_t *sih);
+/* Enable BT-COEX & Ex-PA for 4313 */
+extern void si_epa_4313war(si_t *sih);
+extern void si_btc_enable_chipcontrol(si_t *sih);
+/* BT/WL selection for 4313 bt combo >= P250 boards */
+extern void si_btcombo_p250_4313_war(si_t *sih);
+extern void si_btcombo_43228_war(si_t *sih);
+extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear);
+extern void si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag);
+extern void si_pmu_synth_pwrsw_4313_war(si_t *sih);
+extern uint si_pll_reset(si_t *sih);
+/* === debug routines === */
+
+extern bool si_taclear(si_t *sih, bool details);
+
+#if defined(BCMDBG_PHYDUMP)
+struct bcmstrbuf;
+extern int si_dump_pcieinfo(si_t *sih, struct bcmstrbuf *b);
+extern void si_dump_pmuregs(si_t *sih, struct bcmstrbuf *b);
+extern int si_dump_pcieregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+#if defined(BCMDBG_PHYDUMP)
+extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+extern int si_bpind_access(si_t *sih, uint32 addr_high, uint32 addr_low,
+	int32* data, bool read);
+#ifdef SR_DEBUG
+extern void si_dump_pmu(si_t *sih, void *pmu_var);
+extern void si_pmu_keep_on(si_t *sih, int32 int_val);
+extern uint32 si_pmu_keep_on_get(si_t *sih);
+extern uint32 si_power_island_set(si_t *sih, uint32 int_val);
+extern uint32 si_power_island_get(si_t *sih);
+#endif /* SR_DEBUG */
+extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val);
+extern void si_pcie_set_request_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_request_size(si_t *sih);
+extern void si_pcie_set_maxpayload_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_maxpayload_size(si_t *sih);
+extern uint16 si_pcie_get_ssid(si_t *sih);
+extern uint32 si_pcie_get_bar0(si_t *sih);
+extern int si_pcie_configspace_cache(si_t *sih);
+extern int si_pcie_configspace_restore(si_t *sih);
+extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size);
+
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+extern const si_axi_error_info_t * si_get_axi_errlog_info(si_t *sih);
+extern void si_reset_axi_errlog_info(si_t * sih);
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+extern uint32 si_tcm_size(si_t *sih);
+extern bool si_has_flops(si_t *sih);
+
+extern int si_set_sromctl(si_t *sih, uint32 value);
+extern uint32 si_get_sromctl(si_t *sih);
+
+extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_input(si_t *sih, uint reg);
+extern uint32 si_gci_int_enable(si_t *sih, bool enable);
+extern void si_gci_reset(si_t *sih);
+#ifdef BCMLTECOEX
+extern void si_gci_seci_init(si_t *sih);
+extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum,
+	uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
+#endif /* BCMLTECOEX */
+extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum,
+	uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
+
+extern bool si_btcx_wci2_init(si_t *sih);
+
+extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel);
+extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin);
+extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel);
+extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos);
+extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_chipstatus(si_t *sih, uint reg);
+extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status);
+extern void si_swdenable(si_t *sih, uint32 swdflag);
+extern uint8 si_enable_perst_wake(si_t *sih, uint8 *perst_wake_mask, uint8 *perst_cur_status);
+
+extern uint32 si_get_pmu_reg_addr(si_t *sih, uint32 offset);
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+
+void si_update_masks(si_t *sih);
+void si_force_islanding(si_t *sih, bool enable);
+extern uint32 si_pmu_res_req_timer_clr(si_t *sih);
+extern void si_pmu_rfldo(si_t *sih, bool on);
+extern void si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 spert_val);
+extern uint32 si_pcie_set_ctrlreg(si_t *sih, uint32 sperst_mask, uint32 spert_val);
+extern void si_pcie_ltr_war(si_t *sih);
+extern void si_pcie_hw_LTR_war(si_t *sih);
+extern void si_pcie_hw_L1SS_war(si_t *sih);
+extern void si_pciedev_crwlpciegen2(si_t *sih);
+extern void si_pcie_prep_D3(si_t *sih, bool enter_D3);
+extern void si_pciedev_reg_pm_clk_period(si_t *sih);
+extern void si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih);
+extern void si_d11rsdb_core1_alt_reg_clk_en(si_t *sih);
+extern void si_pcie_disable_oobselltr(si_t *sih);
+extern uint32 si_raw_reg(si_t *sih, uint32 reg, uint32 val, uint32 wrire_req);
+
+#ifdef WLRSDB
+extern void si_d11rsdb_core_disable(si_t *sih, uint32 bits);
+extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void set_secondary_d11_core(si_t *sih, void **secmap, void **secwrap);
+#endif
+
+
+/* Macro to enable clock gating changes in different cores */
+#define MEM_CLK_GATE_BIT 	5
+#define GCI_CLK_GATE_BIT 	18
+
+#define USBAPP_CLK_BIT		0
+#define PCIE_CLK_BIT		3
+#define ARMCR4_DBG_CLK_BIT	4
+#define SAMPLE_SYNC_CLK_BIT 	17
+#define PCIE_TL_CLK_BIT		18
+#define HQ_REQ_BIT		24
+#define PLL_DIV2_BIT_START	9
+#define PLL_DIV2_MASK		(0x37 << PLL_DIV2_BIT_START)
+#define PLL_DIV2_DIS_OP		(0x37 << PLL_DIV2_BIT_START)
+
+#define pmu_corereg(si, cc_idx, member, mask, val) \
+	(AOB_ENAB(si) ? \
+		si_pmu_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
+			       OFFSETOF(pmuregs_t, member), mask, val): \
+		si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val))
+
+/* Used only for the regs present in the pmu core and not present in the old cc core */
+#define PMU_REG_NEW(si, member, mask, val) \
+		si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
+			OFFSETOF(pmuregs_t, member), mask, val)
+
+#define PMU_REG(si, member, mask, val) \
+	(AOB_ENAB(si) ? \
+		si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
+			OFFSETOF(pmuregs_t, member), mask, val): \
+		si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val))
+
+#define LHL_REG(si, member, mask, val) \
+		si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \
+			OFFSETOF(gciregs_t, member), mask, val)
+
+#define CHIPC_REG(si, member, mask, val) \
+		si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val)
+
+/* GCI Macros */
+#define ALLONES_32				0xFFFFFFFF
+#define GCI_CCTL_SECIRST_OFFSET			0 /**< SeciReset */
+#define GCI_CCTL_RSTSL_OFFSET			1 /**< ResetSeciLogic */
+#define GCI_CCTL_SECIEN_OFFSET			2 /**< EnableSeci  */
+#define GCI_CCTL_FSL_OFFSET			3 /**< ForceSeciOutLow */
+#define GCI_CCTL_SMODE_OFFSET			4 /**< SeciOpMode, 6:4 */
+#define GCI_CCTL_US_OFFSET			7 /**< UpdateSeci */
+#define GCI_CCTL_BRKONSLP_OFFSET		8 /**< BreakOnSleep */
+#define GCI_CCTL_SILOWTOUT_OFFSET		9 /**< SeciInLowTimeout, 10:9 */
+#define GCI_CCTL_RSTOCC_OFFSET			11 /**< ResetOffChipCoex */
+#define GCI_CCTL_ARESEND_OFFSET			12 /**< AutoBTSigResend */
+#define GCI_CCTL_FGCR_OFFSET			16 /**< ForceGciClkReq */
+#define GCI_CCTL_FHCRO_OFFSET			17 /**< ForceHWClockReqOff */
+#define GCI_CCTL_FREGCLK_OFFSET			18 /**< ForceRegClk */
+#define GCI_CCTL_FSECICLK_OFFSET		19 /**< ForceSeciClk */
+#define GCI_CCTL_FGCA_OFFSET			20 /**< ForceGciClkAvail */
+#define GCI_CCTL_FGCAV_OFFSET			21 /**< ForceGciClkAvailValue */
+#define GCI_CCTL_SCS_OFFSET			24 /**< SeciClkStretch, 31:24 */
+
+#define GCI_MODE_UART				0x0
+#define GCI_MODE_SECI				0x1
+#define GCI_MODE_BTSIG				0x2
+#define GCI_MODE_GPIO				0x3
+#define GCI_MODE_MASK				0x7
+
+#define GCI_CCTL_LOWTOUT_DIS			0x0
+#define GCI_CCTL_LOWTOUT_10BIT			0x1
+#define GCI_CCTL_LOWTOUT_20BIT			0x2
+#define GCI_CCTL_LOWTOUT_30BIT			0x3
+#define GCI_CCTL_LOWTOUT_MASK			0x3
+
+#define GCI_CCTL_SCS_DEF			0x19
+#define GCI_CCTL_SCS_MASK			0xFF
+
+#define GCI_SECIIN_MODE_OFFSET			0
+#define GCI_SECIIN_GCIGPIO_OFFSET		4
+#define GCI_SECIIN_RXID2IP_OFFSET		8
+
+#define GCI_SECIOUT_MODE_OFFSET			0
+#define GCI_SECIOUT_GCIGPIO_OFFSET		4
+#define	GCI_SECIOUT_LOOPBACK_OFFSET		8
+#define GCI_SECIOUT_SECIINRELATED_OFFSET	16
+
+#define GCI_SECIAUX_RXENABLE_OFFSET		0
+#define GCI_SECIFIFO_RXENABLE_OFFSET		16
+
+#define GCI_SECITX_ENABLE_OFFSET		0
+
+#define GCI_GPIOCTL_INEN_OFFSET			0
+#define GCI_GPIOCTL_OUTEN_OFFSET		1
+#define GCI_GPIOCTL_PDN_OFFSET			4
+
+#define GCI_GPIOIDX_OFFSET			16
+
+#define GCI_LTECX_SECI_ID			0 /**< SECI port for LTECX */
+
+/* To access per GCI bit registers */
+#define GCI_REG_WIDTH				32
+
+/* GCI bit positions */
+/* GCI [127:000] = WLAN [127:0] */
+#define GCI_WLAN_IP_ID				0
+#define GCI_WLAN_BEGIN				0
+#define GCI_WLAN_PRIO_POS			(GCI_WLAN_BEGIN + 4)
+#define GCI_WLAN_PERST_POS			(GCI_WLAN_BEGIN + 15)
+
+/* GCI [639:512] = LTE [127:0] */
+#define GCI_LTE_IP_ID				4
+#define GCI_LTE_BEGIN				512
+#define GCI_LTE_FRAMESYNC_POS			(GCI_LTE_BEGIN + 0)
+#define GCI_LTE_RX_POS				(GCI_LTE_BEGIN + 1)
+#define GCI_LTE_TX_POS				(GCI_LTE_BEGIN + 2)
+#define GCI_LTE_WCI2TYPE_POS			(GCI_LTE_BEGIN + 48)
+#define GCI_LTE_WCI2TYPE_MASK			7
+#define GCI_LTE_AUXRXDVALID_POS			(GCI_LTE_BEGIN + 56)
+
+/* Reg Index corresponding to ECI bit no x of ECI space */
+#define GCI_REGIDX(x)				((x)/GCI_REG_WIDTH)
+/* Bit offset of ECI bit no x in 32-bit words */
+#define GCI_BITOFFSET(x)			((x)%GCI_REG_WIDTH)
+
+/* End - GCI Macros */
+
+#ifdef REROUTE_OOBINT
+#define CC_OOB          0x0
+#define M2MDMA_OOB      0x1
+#define PMU_OOB         0x2
+#define D11_OOB         0x3
+#define SDIOD_OOB       0x4
+#define WLAN_OOB	0x5
+#define PMU_OOB_BIT     0x12
+#endif /* REROUTE_OOBINT */
+
+#define GCI_REG(si, offset, mask, val) \
+		(AOB_ENAB(si) ? \
+			si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \
+				offset, mask, val): \
+			si_corereg(si, SI_CC_IDX, offset, mask, val))
+
+extern void si_pll_sr_reinit(si_t *sih);
+extern void si_pll_closeloop(si_t *sih);
+void si_config_4364_d11_oob(si_t *sih, uint coreid);
+extern void si_update_macclk_mul_fact(si_t *sih, uint mul_fact);
+extern uint32 si_get_macclk_mul_fact(si_t *sih);
+extern void si_gci_set_femctrl(si_t *sih, osl_t *osh, bool set);
+extern void si_gci_set_femctrl_mask_ant01(si_t *sih, osl_t *osh, bool set);
+extern uint si_num_slaveports(si_t *sih, uint coreid);
+extern uint32 si_get_slaveport_addr(si_t *sih, uint asidx, uint core_id, uint coreunit);
+extern uint32 si_get_d11_slaveport_addr(si_t *sih, uint asidx, uint coreunit);
+uint si_introff(si_t *sih);
+void si_intrrestore(si_t *sih, uint intr_val);
+void si_nvram_res_masks(si_t *sih, uint32 *min_mask, uint32 *max_mask);
+uint32 si_xtalfreq(si_t *sih);
+extern uint32 si_wrapper_dump_buf_size(si_t *sih);
+extern uint32 si_wrapper_dump_binary(si_t *sih, uchar *p);
+
+/* SR Power Control */
+extern uint32 si_srpwr_request(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_srpwr_stat(si_t *sih);
+extern uint32 si_srpwr_domain(si_t *sih);
+
+/* SR Power Control */
+#ifdef BCMSRPWR
+	/* No capabilities bit so using chipid for now */
+	#define SRPWR_CAP(sih)  (\
+		(CHIPID(sih->chip) == BCM4347_CHIP_ID) || \
+		(0))
+
+	extern bool _bcmsrpwr;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define SRPWR_ENAB()    (_bcmsrpwr)
+	#elif defined(BCMSRPWR_DISABLED)
+		#define SRPWR_ENAB()    (0)
+	#else
+		#define SRPWR_ENAB()    (1)
+	#endif
+#else
+	#define SRPWR_CAP(sih)          (0)
+	#define SRPWR_ENAB()            (0)
+#endif /* BCMSRPWR */
+
+#endif	/* _siutils_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/spid.h b/drivers/net/wireless/bcmdhd/include/spid.h
new file mode 100644
index 0000000..8fdefa4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/spid.h
@@ -0,0 +1,168 @@
+/*
+ * SPI device spec header file
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: spid.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef	_SPI_H
+#define	_SPI_H
+
+/*
+ * Brcm SPI Device Register Map.
+ *
+ */
+
+typedef volatile struct {
+	uint8	config;			/* 0x00, len, endian, clock, speed, polarity, wakeup */
+	uint8	response_delay;		/* 0x01, read response delay in bytes (corerev < 3) */
+	uint8	status_enable;		/* 0x02, status-enable, intr with status, response_delay
+					 * function selection, command/data error check
+					 */
+	uint8	reset_bp;		/* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */
+	uint16	intr_reg;		/* 0x04, Intr status register */
+	uint16	intr_en_reg;		/* 0x06, Intr mask register */
+	uint32	status_reg;		/* 0x08, RO, Status bits of last spi transfer */
+	uint16	f1_info_reg;		/* 0x0c, RO, enabled, ready for data transfer, blocksize */
+	uint16	f2_info_reg;		/* 0x0e, RO, enabled, ready for data transfer, blocksize */
+	uint16	f3_info_reg;		/* 0x10, RO, enabled, ready for data transfer, blocksize */
+	uint32	test_read;		/* 0x14, RO 0xfeedbead signature */
+	uint32	test_rw;		/* 0x18, RW */
+	uint8	resp_delay_f0;		/* 0x1c, read resp delay bytes for F0 (corerev >= 3) */
+	uint8	resp_delay_f1;		/* 0x1d, read resp delay bytes for F1 (corerev >= 3) */
+	uint8	resp_delay_f2;		/* 0x1e, read resp delay bytes for F2 (corerev >= 3) */
+	uint8	resp_delay_f3;		/* 0x1f, read resp delay bytes for F3 (corerev >= 3) */
+} spi_regs_t;
+
+/* SPI device register offsets */
+#define SPID_CONFIG			0x00
+#define SPID_RESPONSE_DELAY		0x01
+#define SPID_STATUS_ENABLE		0x02
+#define SPID_RESET_BP			0x03	/* (corerev >= 1) */
+#define SPID_INTR_REG			0x04	/* 16 bits - Interrupt status */
+#define SPID_INTR_EN_REG		0x06	/* 16 bits - Interrupt mask */
+#define SPID_STATUS_REG			0x08	/* 32 bits */
+#define SPID_F1_INFO_REG		0x0C	/* 16 bits */
+#define SPID_F2_INFO_REG		0x0E	/* 16 bits */
+#define SPID_F3_INFO_REG		0x10	/* 16 bits */
+#define SPID_TEST_READ			0x14	/* 32 bits */
+#define SPID_TEST_RW			0x18	/* 32 bits */
+#define SPID_RESP_DELAY_F0		0x1c	/* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F1		0x1d	/* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F2		0x1e	/* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F3		0x1f	/* 8 bits (corerev >= 3) */
+
+/* Bit masks for SPID_CONFIG device register */
+#define WORD_LENGTH_32	0x1	/* 0/1 16/32 bit word length */
+#define ENDIAN_BIG	0x2	/* 0/1 Little/Big Endian */
+#define CLOCK_PHASE	0x4	/* 0/1 clock phase delay */
+#define CLOCK_POLARITY	0x8	/* 0/1 Idle state clock polarity is low/high */
+#define HIGH_SPEED_MODE	0x10	/* 1/0 High Speed mode / Normal mode */
+#define INTR_POLARITY	0x20	/* 1/0 Interrupt active polarity is high/low */
+#define WAKE_UP		0x80	/* 0/1 Wake-up command from Host to WLAN */
+
+/* Bit mask for SPID_RESPONSE_DELAY device register */
+#define RESPONSE_DELAY_MASK	0xFF	/* Configurable rd response delay in multiples of 8 bits */
+
+/* Bit mask for SPID_STATUS_ENABLE device register */
+#define STATUS_ENABLE		0x1	/* 1/0 Status sent/not sent to host after read/write */
+#define INTR_WITH_STATUS	0x2	/* 0/1 Do-not / do-interrupt if status is sent */
+#define RESP_DELAY_ALL		0x4	/* Applicability of resp delay to F1 or all func's read */
+#define DWORD_PKT_LEN_EN	0x8	/* Packet len denoted in dwords instead of bytes */
+#define CMD_ERR_CHK_EN		0x20	/* Command error check enable */
+#define DATA_ERR_CHK_EN		0x40	/* Data error check enable */
+
+/* Bit mask for SPID_RESET_BP device register */
+#define RESET_ON_WLAN_BP_RESET	0x4	/* enable reset for WLAN backplane */
+#define RESET_ON_BT_BP_RESET	0x8	/* enable reset for BT backplane */
+#define RESET_SPI		0x80	/* reset the above enabled logic */
+
+/* Bit mask for SPID_INTR_REG device register */
+#define DATA_UNAVAILABLE	0x0001	/* Requested data not available; Clear by writing a "1" */
+#define F2_F3_FIFO_RD_UNDERFLOW	0x0002
+#define F2_F3_FIFO_WR_OVERFLOW	0x0004
+#define COMMAND_ERROR		0x0008	/* Cleared by writing 1 */
+#define DATA_ERROR		0x0010	/* Cleared by writing 1 */
+#define F2_PACKET_AVAILABLE	0x0020
+#define F3_PACKET_AVAILABLE	0x0040
+#define F1_OVERFLOW		0x0080	/* Due to last write. Bkplane has pending write requests */
+#define MISC_INTR0		0x0100
+#define MISC_INTR1		0x0200
+#define MISC_INTR2		0x0400
+#define MISC_INTR3		0x0800
+#define MISC_INTR4		0x1000
+#define F1_INTR			0x2000
+#define F2_INTR			0x4000
+#define F3_INTR			0x8000
+
+/* Bit mask for 32bit SPID_STATUS_REG device register */
+#define STATUS_DATA_NOT_AVAILABLE	0x00000001
+#define STATUS_UNDERFLOW		0x00000002
+#define STATUS_OVERFLOW			0x00000004
+#define STATUS_F2_INTR			0x00000008
+#define STATUS_F3_INTR			0x00000010
+#define STATUS_F2_RX_READY		0x00000020
+#define STATUS_F3_RX_READY		0x00000040
+#define STATUS_HOST_CMD_DATA_ERR	0x00000080
+#define STATUS_F2_PKT_AVAILABLE		0x00000100
+#define STATUS_F2_PKT_LEN_MASK		0x000FFE00
+#define STATUS_F2_PKT_LEN_SHIFT		9
+#define STATUS_F3_PKT_AVAILABLE		0x00100000
+#define STATUS_F3_PKT_LEN_MASK		0xFFE00000
+#define STATUS_F3_PKT_LEN_SHIFT		21
+
+/* Bit mask for 16 bits SPID_F1_INFO_REG device register */
+#define F1_ENABLED 			0x0001
+#define F1_RDY_FOR_DATA_TRANSFER	0x0002
+#define F1_MAX_PKT_SIZE			0x01FC
+
+/* Bit mask for 16 bits SPID_F2_INFO_REG device register */
+#define F2_ENABLED 			0x0001
+#define F2_RDY_FOR_DATA_TRANSFER	0x0002
+#define F2_MAX_PKT_SIZE			0x3FFC
+
+/* Bit mask for 16 bits SPID_F3_INFO_REG device register */
+#define F3_ENABLED 			0x0001
+#define F3_RDY_FOR_DATA_TRANSFER	0x0002
+#define F3_MAX_PKT_SIZE			0x3FFC
+
+/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */
+#define TEST_RO_DATA_32BIT_LE		0xFEEDBEAD
+
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS		4
+
+#define SPI_MAX_PKT_LEN		(2048*4)
+
+/* Misc defines */
+#define SPI_FUNC_0		0
+#define SPI_FUNC_1		1
+#define SPI_FUNC_2		2
+#define SPI_FUNC_3		3
+
+#define WAIT_F2RXFIFORDY	100
+#define WAIT_F2RXFIFORDY_DELAY	20
+
+#endif /* _SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h
new file mode 100644
index 0000000..50cd3c1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h
@@ -0,0 +1,95 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: trxhdr.h 520026 2014-12-10 01:29:40Z $
+ */
+
+#ifndef _TRX_HDR_H
+#define _TRX_HDR_H
+
+#include <typedefs.h>
+
+#define TRX_MAGIC	0x30524448	/* "HDR0" */
+#define TRX_MAX_LEN	0x3B0000	/* Max length */
+#define TRX_NO_HEADER	1		/* Do not write TRX header */
+#define TRX_GZ_FILES	0x2     /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_EMBED_UCODE	0x8	/* Trx contains embedded ucode image */
+#define TRX_ROMSIM_IMAGE	0x10	/* Trx contains ROM simulation image */
+#define TRX_UNCOMP_IMAGE	0x20	/* Trx contains uncompressed rtecdc.bin image */
+#define TRX_BOOTLOADER		0x40	/* the image is a bootloader */
+
+#define TRX_V1		1
+#define TRX_V1_MAX_OFFSETS	3		/* V1: Max number of individual files */
+
+#ifndef BCMTRXV2
+#define TRX_VERSION	TRX_V1		/* Version 1 */
+#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS
+#endif
+
+/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as
+ * Ver 2 of trx header. To make it generic, trx_header is structure is modified
+ * as below where size of "offsets" field will vary as per the TRX version.
+ * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well.
+ * To make sure, other applications like "dhdl" which are yet to be enhanced to support
+ * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2
+ * is defined.
+ */
+struct trx_header {
+	uint32 magic;		/* "HDR0" */
+	uint32 len;		/* Length of file including header */
+	uint32 crc32;		/* 32-bit CRC from flag_version to end of file */
+	uint32 flag_version;	/* 0:15 flags, 16:31 version */
+#ifndef BCMTRXV2
+	uint32 offsets[TRX_MAX_OFFSET];	/* Offsets of partitions from start of header */
+#else
+	uint32 offsets[1];	/* Offsets of partitions from start of header */
+#endif
+};
+
+#ifdef BCMTRXV2
+#define TRX_VERSION		TRX_V2		/* Version 2 */
+#define TRX_MAX_OFFSET  TRX_V2_MAX_OFFSETS
+
+#define TRX_V2		2
+/* V2: Max number of individual files
+ * To support SDR signature + Config data region
+ */
+#define TRX_V2_MAX_OFFSETS	5
+#define SIZEOF_TRXHDR_V1	(sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32))
+#define SIZEOF_TRXHDR_V2	(sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32))
+#define TRX_VER(trx)		((trx)->flag_version>>16)
+#define ISTRX_V1(trx)		(TRX_VER(trx) == TRX_V1)
+#define ISTRX_V2(trx)		(TRX_VER(trx) == TRX_V2)
+/* For V2, return size of V2 size: others, return V1 size */
+#define SIZEOF_TRX(trx)	    (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1)
+#else
+#define SIZEOF_TRX(trx)	    (sizeof(struct trx_header))
+#endif /* BCMTRXV2 */
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
+
+#endif /* _TRX_HDR_H */
diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h
new file mode 100644
index 0000000..aed3ace9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/typedefs.h
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: typedefs.h 639587 2016-05-24 06:44:44Z $
+ */
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#if (!defined(EDK_RELEASE_VERSION) || (EDK_RELEASE_VERSION < 0x00020000)) || \
+	!defined(BWL_NO_INTERNAL_STDLIB_SUPPORT)
+
+#ifdef SITE_TYPEDEFS
+
+/*
+ * Define SITE_TYPEDEFS in the compile to include a site-specific
+ * typedef file "site_typedefs.h".
+ *
+ * If SITE_TYPEDEFS is not defined, then the code section below makes
+ * inferences about the compile environment based on defined symbols and
+ * possibly compiler pragmas.
+ *
+ * Following these two sections is the Default Typedefs section.
+ * This section is only processed if USE_TYPEDEF_DEFAULTS is
+ * defined. This section has a default set of typedefs and a few
+ * preprocessor symbols (TRUE, FALSE, NULL, ...).
+ */
+
+#include "site_typedefs.h"
+
+#else
+
+/*
+ * Infer the compile environment based on preprocessor symbols and pragmas.
+ * Override type definitions as needed, and include configuration-dependent
+ * header files to define types.
+ */
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE	false
+#endif
+#ifndef TRUE
+#define TRUE	true
+#endif
+
+#else	/* ! __cplusplus */
+
+
+#endif	/* ! __cplusplus */
+
+#if defined(__LP64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+/* float_t types conflict with the same typedefs from the standard ANSI-C
+** math.h header file. Don't re-typedef them here.
+*/
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+
+
+
+
+#if defined(__sparc__)
+#define TYPEDEF_ULONG
+#endif
+
+/*
+ * If this is either a Linux hybrid build or the per-port code of a hybrid build
+ * then use the Linux header files to get some of the typedefs.  Otherwise, define
+ * them entirely in this file.  We can't always define the types because we get
+ * a duplicate typedef error; there is no way to "undefine" a typedef.
+ * We know when it's per-port code because each file defines LINUX_PORT at the top.
+ */
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif /* TARGETENV_android */
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif	/* >= 2.6.19 */
+/* special detection for 2.6.18-128.7.1.0.1.el5 */
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
+#include <linux/compiler.h>
+#ifdef noinline_for_stack
+#define TYPEDEF_BOOL
+#endif
+#endif	/* == 2.6.18 */
+#endif	/* __KERNEL__ */
+
+
+/* Do not support the (u)int64 types with strict ansi for GNU C */
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */
+
+/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode
+ * for signed or unsigned
+ */
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif /* __ICL */
+
+#if !defined(__DJGPP__)
+
+/* pick up ushort & uint from standard types.h */
+#if defined(__KERNEL__)
+
+/* See note above */
+#include <linux/types.h>	/* sys/types.h and linux/types.h are oil and water */
+
+#else
+
+#include <sys/types.h>
+
+#endif /* linux && __KERNEL__ */
+
+#endif 
+
+
+/* use the default typedefs in the next section of this file */
+#define USE_TYPEDEF_DEFAULTS
+
+#endif /* SITE_TYPEDEFS */
+
+
+/*
+ * Default Typedefs
+ */
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef	/* @abstract@ */ unsigned char	bool;
+#endif /* endif TYPEDEF_BOOL */
+
+/* define uchar, ushort, uint, ulong */
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char	uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short	ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int	uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long	ulong;
+#endif
+
+/* define [u]int8/16/32/64, uintptr */
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char	uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short	uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int	uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int	uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char	int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short	int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int	int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+/* define float32/64, float_t */
+
+#ifndef TYPEDEF_FLOAT32
+typedef float		float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double		float64;
+#endif
+
+/*
+ * abstracted floating point type allows for compile time selection of
+ * single or double precision arithmetic.  Compiling with -DFLOAT32
+ * selects single precision; the default is double precision.
+ */
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else /* default to double precision floating point */
+typedef float64 float_t;
+#endif
+
+#endif /* TYPEDEF_FLOAT_T */
+
+/* define macro values */
+
+#ifndef FALSE
+#define FALSE	0
+#endif
+
+#ifndef TRUE
+#define TRUE	1  /* TRUE */
+#endif
+
+#ifndef NULL
+#define	NULL	0
+#endif
+
+#ifndef OFF
+#define	OFF	0
+#endif
+
+#ifndef ON
+#define	ON	1  /* ON = 1 */
+#endif
+
+#define	AUTO	(-1) /* Auto = -1 */
+
+/* define PTRSZ, INLINE */
+
+#ifndef PTRSZ
+#define	PTRSZ	sizeof(char*)
+#endif
+
+
+/* Detect compiler type. */
+#if defined(__GNUC__) || defined(__lint)
+	#define BWL_COMPILER_GNU
+#elif defined(__CC_ARM) && __CC_ARM
+	#define BWL_COMPILER_ARMCC
+#else
+	#error "Unknown compiler!"
+#endif 
+
+
+#ifndef INLINE
+	#if defined(BWL_COMPILER_MICROSOFT)
+		#define INLINE __inline
+	#elif defined(BWL_COMPILER_GNU)
+		#define INLINE __inline__
+	#elif defined(BWL_COMPILER_ARMCC)
+		#define INLINE	__inline
+	#else
+		#define INLINE
+	#endif 
+#endif /* INLINE */
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif /* USE_TYPEDEF_DEFAULTS */
+
+/* Suppress unused parameter warning */
+#define UNUSED_PARAMETER(x) (void)(x)
+
+/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */
+#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
+
+#else
+
+#include <sys/types.h>
+#include <strings.h>
+#include <stdlib.h>
+
+#ifdef stderr
+#undef stderr
+#define stderr stdout
+#endif
+
+typedef UINT32  uint;
+typedef UINT64  ulong;
+typedef UINT16  ushort;
+typedef UINT8   uint8;
+typedef UINT16  uint16;
+typedef UINT32  uint32;
+typedef UINT64  uint64;
+typedef INT8    int8;
+typedef INT16   int16;
+typedef INT32   int32;
+typedef INT64   int64;
+
+typedef BOOLEAN       bool;
+typedef unsigned char uchar;
+typedef UINTN         uintptr;
+
+typedef UINT8   u_char;
+typedef UINT16  u_short;
+typedef UINTN   u_int;
+typedef ULONGN  u_long;
+
+#define UNUSED_PARAMETER(x) (void)(x)
+#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
+#define INLINE
+#define	AUTO	(-1) /* Auto = -1 */
+#define	ON	1  /* ON = 1 */
+#define	OFF	0
+
+#endif /* !EDK_RELEASE_VERSION || (EDK_RELEASE_VERSION < 0x00020000) */
+
+/*
+ * Including the bcmdefs.h here, to make sure everyone including typedefs.h
+ * gets this automatically
+*/
+#include <bcmdefs.h>
+#endif /* _TYPEDEFS_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/usbrdl.h b/drivers/net/wireless/bcmdhd/include/usbrdl.h
new file mode 100644
index 0000000..be5bd69
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/usbrdl.h
@@ -0,0 +1,134 @@
+/*
+ * Broadcom USB remote download definitions
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: usbrdl.h 597933 2015-11-06 18:52:06Z $
+ */
+
+#ifndef _USB_RDL_H
+#define _USB_RDL_H
+
+/* Control messages: bRequest values */
+#define DL_GETSTATE		0	/* returns the rdl_state_t struct */
+#define DL_CHECK_CRC		1	/* currently unused */
+#define DL_GO			2	/* execute downloaded image */
+#define DL_START		3	/* initialize dl state */
+#define DL_REBOOT		4	/* reboot the device in 2 seconds */
+#define DL_GETVER		5	/* returns the bootrom_id_t struct */
+#define DL_GO_PROTECTED		6	/* execute the downloaded code and set reset event
+					 * to occur in 2 seconds.  It is the responsibility
+					 * of the downloaded code to clear this event
+					 */
+#define DL_EXEC			7	/* jump to a supplied address */
+#define DL_RESETCFG		8	/* To support single enum on dongle
+					 * - Not used by bootloader
+					 */
+#define DL_DEFER_RESP_OK	9	/* Potentially defer the response to setup
+					 * if resp unavailable
+					 */
+#define DL_CHGSPD		0x0A
+
+#define	DL_HWCMD_MASK		0xfc	/* Mask for hardware read commands: */
+#define	DL_RDHW			0x10	/* Read a hardware address (Ctl-in) */
+#define	DL_RDHW32		0x10	/* Read a 32 bit word */
+#define	DL_RDHW16		0x11	/* Read 16 bits */
+#define	DL_RDHW8		0x12	/* Read an 8 bit byte */
+#define	DL_WRHW			0x14	/* Write a hardware address (Ctl-out) */
+#define DL_WRHW_BLK		0x13	/* Block write to hardware access */
+
+#define DL_CMD_WRHW		2
+
+
+/* states */
+#define DL_WAITING	0	/* waiting to rx first pkt that includes the hdr info */
+#define DL_READY	1	/* hdr was good, waiting for more of the compressed image */
+#define DL_BAD_HDR	2	/* hdr was corrupted */
+#define DL_BAD_CRC	3	/* compressed image was corrupted */
+#define DL_RUNNABLE	4	/* download was successful, waiting for go cmd */
+#define DL_START_FAIL	5	/* failed to initialize correctly */
+#define DL_NVRAM_TOOBIG	6	/* host specified nvram data exceeds DL_NVRAM value */
+#define DL_IMAGE_TOOBIG	7	/* download image too big (exceeds DATA_START for rdl) */
+
+#define TIMEOUT		5000	/* Timeout for usb commands */
+
+struct bcm_device_id {
+	char	*name;
+	uint32	vend;
+	uint32	prod;
+};
+
+typedef struct {
+	uint32	state;
+	uint32	bytes;
+} rdl_state_t;
+
+typedef struct {
+	uint32	chip;		/* Chip id */
+	uint32	chiprev;	/* Chip rev */
+	uint32  ramsize;    /* Size of RAM */
+	uint32  remapbase;   /* Current remap base address */
+	uint32  boardtype;   /* Type of board */
+	uint32  boardrev;    /* Board revision */
+} bootrom_id_t;
+
+/* struct for backplane & jtag accesses */
+typedef struct {
+	uint32	cmd;		/* tag to identify the cmd */
+	uint32	addr;		/* backplane address for write */
+	uint32	len;		/* length of data: 1, 2, 4 bytes */
+	uint32	data;		/* data to write */
+} hwacc_t;
+
+
+/* struct for querying nvram params from bootloader */
+#define QUERY_STRING_MAX 32
+typedef struct {
+	uint32  cmd;                    /* tag to identify the cmd */
+	char    var[QUERY_STRING_MAX];  /* param name */
+} nvparam_t;
+
+typedef void (*exec_fn_t)(void *sih);
+
+#define USB_CTRL_IN (USB_TYPE_VENDOR | 0x80 | USB_RECIP_INTERFACE)
+#define USB_CTRL_OUT (USB_TYPE_VENDOR | 0 | USB_RECIP_INTERFACE)
+
+#define USB_CTRL_EP_TIMEOUT 500 /* Timeout used in USB control_msg transactions. */
+#define USB_BULK_EP_TIMEOUT 500 /* Timeout used in USB bulk transactions. */
+
+#define RDL_CHUNK_MAX	(64 * 1024)  /* max size of each dl transfer */
+#define RDL_CHUNK	1500  /* size of each dl transfer */
+
+/* bootloader makes special use of trx header "offsets" array */
+#define TRX_OFFSETS_DLFWLEN_IDX	0	/* Size of the fw; used in uncompressed case */
+#define TRX_OFFSETS_JUMPTO_IDX	1	/* RAM address for jumpto after download */
+#define TRX_OFFSETS_NVM_LEN_IDX	2	/* Length of appended NVRAM data */
+#ifdef BCMTRXV2
+#define TRX_OFFSETS_DSG_LEN_IDX	3	/* Length of digital signature for the first image */
+#define TRX_OFFSETS_CFG_LEN_IDX	4	/* Length of config region, which is not digitally signed */
+#endif /* BCMTRXV2 */
+
+#define TRX_OFFSETS_DLBASE_IDX  0       /* RAM start address for download */
+
+#endif  /* _USB_RDL_H */
diff --git a/drivers/net/wireless/bcmdhd/include/vlan.h b/drivers/net/wireless/bcmdhd/include/vlan.h
new file mode 100644
index 0000000..4879eae
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/vlan.h
@@ -0,0 +1,98 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: vlan.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#ifndef	 VLAN_VID_MASK
+#define VLAN_VID_MASK		0xfff	/* low 12 bits are vlan id */
+#endif
+
+#define	VLAN_CFI_SHIFT		12	/* canonical format indicator bit */
+#define VLAN_PRI_SHIFT		13	/* user priority */
+
+#define VLAN_PRI_MASK		7	/* 3 bits of priority */
+
+#define	VLAN_TPID_OFFSET	12	/* offset of tag protocol id field */
+#define	VLAN_TCI_OFFSET		14	/* offset of tag ctrl info field */
+
+#define	VLAN_TAG_LEN		4
+#define	VLAN_TAG_OFFSET		(2 * ETHER_ADDR_LEN)	/* offset in Ethernet II packet only */
+
+#define VLAN_TPID		0x8100	/* VLAN ethertype/Tag Protocol ID */
+
+struct vlan_header {
+	uint16	vlan_type;		/* 0x8100 */
+	uint16	vlan_tag;		/* priority, cfi and vid */
+};
+
+struct ethervlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	vlan_type;		/* 0x8100 */
+	uint16	vlan_tag;		/* priority, cfi and vid */
+	uint16	ether_type;
+};
+
+struct dot3_mac_llc_snapvlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];	/* dest mac */
+	uint8	ether_shost[ETHER_ADDR_LEN];	/* src mac */
+	uint16	length;				/* frame length incl header */
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[3];				/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	vlan_type;			/* 0x8100 */
+	uint16	vlan_tag;			/* priority, cfi and vid */
+	uint16	ether_type;			/* ethertype */
+};
+
+#define	ETHERVLAN_HDR_LEN	(ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define ETHERVLAN_MOVE_HDR(d, s) \
+do { \
+	struct ethervlan_header t; \
+	t = *(struct ethervlan_header *)(s); \
+	*(struct ethervlan_header *)(d) = t; \
+} while (0)
+
+#endif /* _vlan_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
new file mode 100644
index 0000000..121af90
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wlfc_proto.h 675983 2016-12-19 23:18:49Z $
+ *
+ */
+
+/** WL flow control for PROP_TXSTATUS. Related to host AMPDU reordering. */
+
+
+#ifndef __wlfc_proto_definitions_h__
+#define __wlfc_proto_definitions_h__
+
+	/* Use TLV to convey WLFC information.
+	 ---------------------------------------------------------------------------
+	| Type |  Len | value                    | Description
+	 ---------------------------------------------------------------------------
+	|  1   |   1  | (handle)                 | MAC OPEN
+	 ---------------------------------------------------------------------------
+	|  2   |   1  | (handle)                 | MAC CLOSE
+	 ---------------------------------------------------------------------------
+	|  3   |   2  | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
+	 ---------------------------------------------------------------------------
+	|  4   |   4+ | see pkttag comments      | TXSTATUS
+	|      |   12 | TX status & timestamps   | Present only when pkt timestamp is enabled
+	 ---------------------------------------------------------------------------
+	|  5   |   4  | see pkttag comments      | PKKTTAG [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  6   |   8  | (handle, ifid, MAC)      | MAC ADD
+	 ---------------------------------------------------------------------------
+	|  7   |   8  | (handle, ifid, MAC)      | MAC DEL
+	 ---------------------------------------------------------------------------
+	|  8   |   1  | (rssi)                   | RSSI - RSSI value for the packet.
+	 ---------------------------------------------------------------------------
+	|  9   |   1  | (interface ID)           | Interface OPEN
+	 ---------------------------------------------------------------------------
+	|  10  |   1  | (interface ID)           | Interface CLOSE
+	 ---------------------------------------------------------------------------
+	|  11  |   8  | fifo credit returns map  | FIFO credits back to the host
+	|      |      |                          |
+	|      |      |                          | --------------------------------------
+	|      |      |                          | | ac0 | ac1 | ac2 | ac3 | bcmc | atim |
+	|      |      |                          | --------------------------------------
+	|      |      |                          |
+	 ---------------------------------------------------------------------------
+	|  12  |   2  | MAC handle,              | Host provides a bitmap of pending
+	|      |      | AC[0-3] traffic bitmap   | unicast traffic for MAC-handle dstn.
+	|      |      |                          | [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  13  |   3  | (count, handle, prec_bmp)| One time request for packet to a specific
+	|      |      |                          | MAC destination.
+	 ---------------------------------------------------------------------------
+	|  15  |  12  | (pkttag, timestamps)     | Send TX timestamp at reception from host
+	 ---------------------------------------------------------------------------
+	|  16  |  12  | (pkttag, timestamps)     | Send WLAN RX timestamp along with RX frame
+	 ---------------------------------------------------------------------------
+	| 255  |  N/A |  N/A                     | FILLER - This is a special type
+	|      |      |                          | that has no length or value.
+	|      |      |                          | Typically used for padding.
+	 ---------------------------------------------------------------------------
+	*/
+
+#define WLFC_CTL_TYPE_MAC_OPEN			1
+#define WLFC_CTL_TYPE_MAC_CLOSE			2
+#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT	3
+#define WLFC_CTL_TYPE_TXSTATUS			4
+#define WLFC_CTL_TYPE_PKTTAG			5	/** host<->dongle */
+
+#define WLFC_CTL_TYPE_MACDESC_ADD		6
+#define WLFC_CTL_TYPE_MACDESC_DEL		7
+#define WLFC_CTL_TYPE_RSSI			8
+
+#define WLFC_CTL_TYPE_INTERFACE_OPEN		9
+#define WLFC_CTL_TYPE_INTERFACE_CLOSE		10
+
+#define WLFC_CTL_TYPE_FIFO_CREDITBACK		11
+
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP	12	/** host->dongle */
+#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET	13
+#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS	14
+
+#define WLFC_CTL_TYPE_TX_ENTRY_STAMP		15
+#define WLFC_CTL_TYPE_RX_STAMP			16
+#define WLFC_CTL_TYPE_TX_STATUS_STAMP		17	/** obsolete */
+
+#define WLFC_CTL_TYPE_TRANS_ID			18
+#define WLFC_CTL_TYPE_COMP_TXSTATUS		19
+
+#define WLFC_CTL_TYPE_TID_OPEN			20
+#define WLFC_CTL_TYPE_TID_CLOSE			21
+
+
+#define WLFC_CTL_TYPE_FILLER			255
+
+#define WLFC_CTL_VALUE_LEN_MACDESC		8	/** handle, interface, MAC */
+
+#define WLFC_CTL_VALUE_LEN_MAC			1	/** MAC-handle */
+#define WLFC_CTL_VALUE_LEN_RSSI			1
+
+#define WLFC_CTL_VALUE_LEN_INTERFACE		1
+#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP	2
+
+#define WLFC_CTL_VALUE_LEN_TXSTATUS		4
+#define WLFC_CTL_VALUE_LEN_PKTTAG		4
+#define WLFC_CTL_VALUE_LEN_TIMESTAMP		12	/** 4-byte rate info + 2 TSF */
+
+#define WLFC_CTL_VALUE_LEN_SEQ			2
+
+/* The high bits of ratespec report in timestamp are used for various status */
+#define WLFC_TSFLAGS_RX_RETRY		(1 << 31)
+#define WLFC_TSFLAGS_PM_ENABLED		(1 << 30)
+#define WLFC_TSFLAGS_MASK		(WLFC_TSFLAGS_RX_RETRY | WLFC_TSFLAGS_PM_ENABLED)
+
+/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
+#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK	6
+
+#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT	3	/* credit, MAC-handle, prec_bitmap */
+#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET	3	/* credit, MAC-handle, prec_bitmap */
+
+
+#define WLFC_PKTFLAG_PKTFROMHOST	0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED	0x02
+
+#define WL_TXSTATUS_STATUS_MASK			0xff /* allow 8 bits */
+#define WL_TXSTATUS_STATUS_SHIFT		24
+
+#define WL_TXSTATUS_SET_STATUS(x, status)	((x)  = \
+	((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \
+	(((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT))
+#define WL_TXSTATUS_GET_STATUS(x)	(((x) >> WL_TXSTATUS_STATUS_SHIFT) & \
+	WL_TXSTATUS_STATUS_MASK)
+
+/**
+ * Bit 31 of the 32-bit packet tag is defined as 'generation ID'. It is set by the host to the
+ * "current" generation, and by the firmware to the "expected" generation, toggling on suppress. The
+ * firmware accepts a packet when the generation matches; on reset (startup) both "current" and
+ * "expected" are set to 0.
+ */
+#define WL_TXSTATUS_GENERATION_MASK		1 /* allow 1 bit */
+#define WL_TXSTATUS_GENERATION_SHIFT		31
+
+#define WL_TXSTATUS_SET_GENERATION(x, gen)	((x) = \
+	((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+	(((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WL_TXSTATUS_GET_GENERATION(x)	(((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+	WL_TXSTATUS_GENERATION_MASK)
+
+#define WL_TXSTATUS_FLAGS_MASK			0xf /* allow 4 bits only */
+#define WL_TXSTATUS_FLAGS_SHIFT			27
+
+#define WL_TXSTATUS_SET_FLAGS(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))
+#define WL_TXSTATUS_GET_FLAGS(x)		(((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \
+	WL_TXSTATUS_FLAGS_MASK)
+
+#define WL_TXSTATUS_FIFO_MASK			0x7 /* allow 3 bits for FIFO ID */
+#define WL_TXSTATUS_FIFO_SHIFT			24
+
+#define WL_TXSTATUS_SET_FIFO(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT))
+#define WL_TXSTATUS_GET_FIFO(x)		(((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK)
+
+#define WL_TXSTATUS_PKTID_MASK			0xffffff /* allow 24 bits */
+#define WL_TXSTATUS_SET_PKTID(x, num)	((x) = \
+	((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
+#define WL_TXSTATUS_GET_PKTID(x)		((x) & WL_TXSTATUS_PKTID_MASK)
+
+#define WL_TXSTATUS_HSLOT_MASK			0xffff /* allow 16 bits */
+#define WL_TXSTATUS_HSLOT_SHIFT			8
+
+#define WL_TXSTATUS_SET_HSLOT(x, hslot)	((x)  = \
+	((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \
+	(((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT))
+#define WL_TXSTATUS_GET_HSLOT(x)	(((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \
+	WL_TXSTATUS_HSLOT_MASK)
+
+#define WL_TXSTATUS_FREERUNCTR_MASK		0xff /* allow 8 bits */
+
+#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \
+	((ctr) & WL_TXSTATUS_FREERUNCTR_MASK))
+#define WL_TXSTATUS_GET_FREERUNCTR(x)		((x)& WL_TXSTATUS_FREERUNCTR_MASK)
+
+/* AMSDU part of d11 seq number */
+#define WL_SEQ_AMSDU_MASK             0x1 /* allow 1 bit */
+#define WL_SEQ_AMSDU_SHIFT            14
+#define WL_SEQ_SET_AMSDU(x, val)      ((x) = \
+	((x) & ~(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT)) | \
+	(((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT)) /**< sets a single AMSDU bit */
+/** returns TRUE if ring item is AMSDU (seq = d11 seq nr) */
+#define WL_SEQ_IS_AMSDU(x)   (((x) >> WL_SEQ_AMSDU_SHIFT) & \
+	WL_SEQ_AMSDU_MASK)
+
+#define WL_SEQ_FROMFW_MASK		0x1 /* allow 1 bit */
+#define WL_SEQ_FROMFW_SHIFT		13
+#define WL_SEQ_SET_FROMFW(x, val)	((x) = \
+	((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \
+	(((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT))
+/** Set when firmware assigns D11 sequence number to packet */
+#define SET_WL_HAS_ASSIGNED_SEQ(x)	WL_SEQ_SET_FROMFW((x), 1)
+
+/** returns TRUE if packet has been assigned a d11 seq number by the WL firmware layer */
+#define GET_WL_HAS_ASSIGNED_SEQ(x)	(((x) >> WL_SEQ_FROMFW_SHIFT) & WL_SEQ_FROMFW_MASK)
+
+/**
+ * Proptxstatus related.
+ *
+ * When a packet is suppressed by WL or the D11 core, the packet has to be retried. Assigning
+ * a new d11 sequence number for the packet when retrying would cause the peer to be unable to
+ * reorder the packets within an AMPDU. So, suppressed packet from bus layer (DHD for SDIO and
+ * pciedev for PCIE) is re-using d11 seq number, so FW should not assign a new one.
+ */
+#define WL_SEQ_FROMDRV_MASK		0x1 /* allow 1 bit */
+#define WL_SEQ_FROMDRV_SHIFT		12
+
+/**
+ * Proptxstatus, host or fw PCIe layer requests WL layer to reuse d11 seq no. Bit is reset by WL
+ * subsystem when it reuses the seq number.
+ */
+#define WL_SEQ_SET_REUSE(x, val)	((x) = \
+	((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \
+	(((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT))
+#define SET_WL_TO_REUSE_SEQ(x)   WL_SEQ_SET_REUSE((x), 1)
+#define RESET_WL_TO_REUSE_SEQ(x) WL_SEQ_SET_REUSE((x), 0)
+
+/** Proptxstatus, related to reuse of d11 seq numbers when retransmitting */
+#define IS_WL_TO_REUSE_SEQ(x)	(((x) >> WL_SEQ_FROMDRV_SHIFT) & \
+	WL_SEQ_FROMDRV_MASK)
+
+#define WL_SEQ_NUM_MASK			0xfff /* allow 12 bit */
+#define WL_SEQ_NUM_SHIFT		0
+/** Proptxstatus, sets d11seq no in pkt tag, related to reuse of d11seq no when retransmitting */
+#define WL_SEQ_SET_NUM(x, val)	((x) = \
+	((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \
+	(((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT))
+/** Proptxstatus, gets d11seq no from pkt tag, related to reuse of d11seq no when retransmitting */
+#define WL_SEQ_GET_NUM(x)	(((x) >> WL_SEQ_NUM_SHIFT) & \
+	WL_SEQ_NUM_MASK)
+
+#define WL_SEQ_AMSDU_SUPPR_MASK	((WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT) | \
+				(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT) | \
+				(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT))
+
+/* 32 STA should be enough??, 6 bits; Must be power of 2 */
+#define WLFC_MAC_DESC_TABLE_SIZE	32
+#define WLFC_MAX_IFNUM				16
+#define WLFC_MAC_DESC_ID_INVALID	0xff
+
+/* b[7:5] -reuse guard, b[4:0] -value */
+#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+
+#define WLFC_PKTFLAG_SET_PKTREQUESTED(x)	(x) |= \
+	(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x)	(x) &= \
+	~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+
+#define WLFC_MAX_PENDING_DATALEN	120
+
+/* host is free to discard the packet */
+#define WLFC_CTL_PKTFLAG_DISCARD	0
+/* D11 suppressed a packet */
+#define WLFC_CTL_PKTFLAG_D11SUPPRESS	1
+/* WL firmware suppressed a packet because MAC is
+	already in PSMode (short time window)
+*/
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS	2
+/* Firmware tossed this packet */
+#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC	3
+/* Firmware tossed after retries */
+#define WLFC_CTL_PKTFLAG_DISCARD_NOACK	4
+/* Firmware wrongly reported suppressed previously,now fixing to acked */
+#define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED	5
+#define WLFC_CTL_PKTFLAG_MASK		(0x0f)	/* For 4-bit mask with one extra bit */
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_DBGMESG(x) printf x
+/* wlfc-breadcrumb */
+#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
+	{printf("WLFC: %s():%d:caller:%p\n", \
+	__FUNCTION__, __LINE__, CALL_SITE);}} while (0)
+#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \
+	banner, ea[0], 	ea[1], 	ea[2], 	ea[3], 	ea[4], 	ea[5]); } while (0)
+#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s))
+#else
+#define WLFC_DBGMESG(x)
+#define WLFC_BREADCRUMB(x)
+#define WLFC_PRINTMAC(banner, ea)
+#define WLFC_WHEREIS(s)
+#endif
+
+/* AMPDU host reorder packet flags */
+#define WLHOST_REORDERDATA_MAXFLOWS		256
+#define WLHOST_REORDERDATA_LEN		 10
+#define WLHOST_REORDERDATA_TOTLEN	(WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */
+
+#define WLHOST_REORDERDATA_FLOWID_OFFSET		0
+#define WLHOST_REORDERDATA_MAXIDX_OFFSET		2
+#define WLHOST_REORDERDATA_FLAGS_OFFSET			4
+#define WLHOST_REORDERDATA_CURIDX_OFFSET		6
+#define WLHOST_REORDERDATA_EXPIDX_OFFSET		8
+
+#define WLHOST_REORDERDATA_DEL_FLOW		0x01
+#define WLHOST_REORDERDATA_FLUSH_ALL		0x02
+#define WLHOST_REORDERDATA_CURIDX_VALID		0x04
+#define WLHOST_REORDERDATA_EXPIDX_VALID		0x08
+#define WLHOST_REORDERDATA_NEW_HOLE		0x10
+
+/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */
+#define WLFC_CTL_TRANS_ID_LEN			6
+#define WLFC_TYPE_TRANS_ID_LEN			6
+
+#define WLFC_MODE_HANGER	1 /* use hanger */
+#define WLFC_MODE_AFQ		2 /* use afq (At Firmware Queue) */
+#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2))
+
+#define WLFC_MODE_AFQ_SHIFT		2	/* afq bit */
+#define WLFC_SET_AFQ(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_AFQ_SHIFT))
+/** returns TRUE if firmware supports 'at firmware queue' feature */
+#define WLFC_GET_AFQ(x)	(((x) >> WLFC_MODE_AFQ_SHIFT) & 1)
+
+#define WLFC_MODE_REUSESEQ_SHIFT	3	/* seq reuse bit */
+#define WLFC_SET_REUSESEQ(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT))
+/** returns TRUE if 'd11 sequence reuse' has been agreed upon between host and dongle */
+#define WLFC_GET_REUSESEQ(x)	(((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1)
+
+#define WLFC_MODE_REORDERSUPP_SHIFT	4	/* host reorder suppress pkt bit */
+#define WLFC_SET_REORDERSUPP(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT))
+/** returns TRUE if 'reorder suppress' has been agreed upon between host and dongle */
+#define WLFC_GET_REORDERSUPP(x)	(((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1)
+
+#define FLOW_RING_CREATE	1
+#define FLOW_RING_DELETE	2
+#define FLOW_RING_FLUSH		3
+#define FLOW_RING_OPEN		4
+#define FLOW_RING_CLOSED	5
+#define FLOW_RING_FLUSHED	6
+#define FLOW_RING_TIM_SET	7
+#define FLOW_RING_TIM_RESET	8
+#define FLOW_RING_FLUSH_TXFIFO	9
+
+/* bit 7, indicating if is TID(1) or AC(0) mapped info in tid field) */
+#define PCIEDEV_IS_AC_TID_MAP_MASK	0x80
+
+#endif /* __wlfc_proto_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h
new file mode 100644
index 0000000..58bcf06
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h
@@ -0,0 +1,13809 @@
+/*
+ * Custom OID/ioctl definitions for
+ *
+ *
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wlioctl.h 677952 2017-01-05 23:25:28Z $
+ */
+
+#ifndef _wlioctl_h_
+#define	_wlioctl_h_
+
+#include <typedefs.h>
+#include <ethernet.h>
+#include <bcmip.h>
+#include <bcmeth.h>
+#include <bcmip.h>
+#include <bcmipv6.h>
+#include <bcmevent.h>
+#include <802.11.h>
+#include <802.11s.h>
+#include <802.1d.h>
+#include <bcmwifi_channels.h>
+#include <bcmwifi_rates.h>
+#include <wlioctl_defs.h>
+#include <bcmipv6.h>
+
+#include <bcm_mpool_pub.h>
+#include <bcmcdc.h>
+
+
+typedef struct {
+	uint32 num;
+	chanspec_t list[1];
+} chanspec_list_t;
+
+#define RSN_KCK_LENGTH	16
+#define RSN_KEK_LENGTH	16
+#define TPK_FTM_LEN		16
+#ifndef INTF_NAME_SIZ
+#define INTF_NAME_SIZ	16
+#endif
+
+/**Used to send ioctls over the transport pipe */
+typedef struct remote_ioctl {
+	cdc_ioctl_t	msg;
+	uint32		data_len;
+	char           intf_name[INTF_NAME_SIZ];
+} rem_ioctl_t;
+#define REMOTE_SIZE	sizeof(rem_ioctl_t)
+
+#define BCM_IOV_XTLV_VERSION 0
+
+#define MAX_NUM_D11CORES 2
+
+/**DFS Forced param */
+typedef struct wl_dfs_forced_params {
+	chanspec_t chspec;
+	uint16 version;
+	chanspec_list_t chspec_list;
+} wl_dfs_forced_t;
+
+#define DFS_PREFCHANLIST_VER 0x01
+#define WL_CHSPEC_LIST_FIXED_SIZE	OFFSETOF(chanspec_list_t, list)
+/* size of dfs forced param size given n channels are in the list */
+#define WL_DFS_FORCED_PARAMS_SIZE(n) \
+	(sizeof(wl_dfs_forced_t) + (((n) < 1) ? (0) : (((n) - 1)* sizeof(chanspec_t))))
+#define WL_DFS_FORCED_PARAMS_FIXED_SIZE \
+	(WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list))
+#define WL_DFS_FORCED_PARAMS_MAX_SIZE \
+	WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t))
+
+/**association decision information */
+typedef struct {
+	uint8		assoc_approved;		/**< (re)association approved */
+	uint8		pad;
+	uint16		reject_reason;		/**< reason code for rejecting association */
+	struct		ether_addr   da;
+	uint8		pad1[6];
+	int64		sys_time;		/**< current system time */
+} assoc_decision_t;
+
+#define DFS_SCAN_S_IDLE		-1
+#define DFS_SCAN_S_RADAR_FREE 0
+#define DFS_SCAN_S_RADAR_FOUND 1
+#define DFS_SCAN_S_INPROGESS 2
+#define DFS_SCAN_S_SCAN_ABORTED 3
+#define DFS_SCAN_S_SCAN_MODESW_INPROGRESS 4
+#define DFS_SCAN_S_MAX 5
+
+
+#define ACTION_FRAME_SIZE 1800
+
+typedef struct wl_action_frame {
+	struct ether_addr 	da;
+	uint16 			len;
+	uint32 			packetId;
+	uint8			data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+typedef struct ssid_info
+{
+	uint8		ssid_len;	/**< the length of SSID */
+	uint8		ssid[32];	/**< SSID string */
+} ssid_info_t;
+
+typedef struct wl_af_params {
+	uint32			channel;
+	int32			dwell_time;
+	struct ether_addr	BSSID;
+	uint8 PAD[2];
+	wl_action_frame_t	action_frame;
+} wl_af_params_t;
+
+#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+
+#define MFP_TEST_FLAG_NORMAL	0
+#define MFP_TEST_FLAG_ANY_KEY	1
+typedef struct wl_sa_query {
+	uint32 flag;
+	uint8  action;
+	uint8  PAD;
+	uint16 id;
+	struct ether_addr da;
+	uint16  PAD;
+} wl_sa_query_t;
+
+/* EXT_STA */
+/**association information */
+typedef struct {
+	uint32		assoc_req;	/**< offset to association request frame */
+	uint32		assoc_req_len;	/**< association request frame length */
+	uint32		assoc_rsp;	/**< offset to association response frame */
+	uint32		assoc_rsp_len;	/**< association response frame length */
+	uint32		bcn;		/**< offset to AP beacon */
+	uint32		bcn_len;	/**< AP beacon length */
+	uint32		wsec;		/**< ucast security algo */
+	uint32		wpaie;		/**< offset to WPA ie */
+	uint8		auth_alg;	/**< 802.11 authentication mode */
+	uint8		WPA_auth;	/**< WPA: authenticated key management */
+	uint8		ewc_cap;	/**< EWC (MIMO) capable */
+	uint8		ofdm;		/**< OFDM */
+} assoc_info_t;
+/* defined(EXT_STA) */
+
+/* Flags for OBSS IOVAR Parameters */
+#define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD        (0x01)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_PERIOD      (0x02)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_INCR_PERIOD (0x04)
+#define WL_OBSS_DYN_BWSW_FLAG_PSEUDO_SENSE_PERIOD    (0x08)
+#define WL_OBSS_DYN_BWSW_FLAG_RX_CRS_PERIOD          (0x10)
+#define WL_OBSS_DYN_BWSW_FLAG_DUR_THRESHOLD          (0x20)
+#define WL_OBSS_DYN_BWSW_FLAG_TXOP_PERIOD            (0x40)
+
+/* OBSS IOVAR Version information */
+#define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 obss_bwsw_activity_cfm_count_cfg; /**< configurable count in
+		* seconds before we confirm that OBSS is present and
+		* dynamically activate dynamic bwswitch.
+		*/
+	uint8 obss_bwsw_no_activity_cfm_count_cfg; /**< configurable count in
+		* seconds before we confirm that OBSS is GONE and
+		* dynamically start pseudo upgrade. If in pseudo sense time, we
+		* will see OBSS, [means that, we false detected that OBSS-is-gone
+		* in watchdog] this count will be incremented in steps of
+		* obss_bwsw_no_activity_cfm_count_incr_cfg for confirming OBSS
+		* detection again. Note that, at present, max 30seconds is
+		* allowed like this. [OBSS_BWSW_NO_ACTIVITY_MAX_INCR_DEFAULT]
+		*/
+	uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above
+		*/
+	uint16 obss_bwsw_pseudo_sense_count_cfg; /**< number of msecs/cnt to be in
+		* pseudo state. This is used to sense/measure the stats from lq.
+		*/
+	uint8 obss_bwsw_rx_crs_threshold_cfg; /**< RX CRS default threshold */
+	uint8 obss_bwsw_dur_thres; /**< OBSS dyn bwsw trigger/RX CRS Sec */
+	uint8 obss_bwsw_txop_threshold_cfg; /**< TXOP default threshold */
+} BWL_POST_PACKED_STRUCT wlc_obss_dynbwsw_config_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 version;	/**< version field */
+	uint32 config_mask;
+	uint32 reset_mask;
+	wlc_obss_dynbwsw_config_t config_params;
+} BWL_POST_PACKED_STRUCT obss_config_params_t;
+#include <packed_section_end.h>
+
+/**bsscfg type */
+typedef enum bsscfg_type {
+	BSSCFG_TYPE_GENERIC = 0,	/**< Generic AP/STA/IBSS BSS */
+	BSSCFG_TYPE_P2P = 1,		/**< P2P BSS */
+	/* index 2 earlier used for BTAMP */
+	BSSCFG_TYPE_PSTA = 3,
+	BSSCFG_TYPE_TDLS = 4,
+	BSSCFG_TYPE_SLOTTED_BSS = 5,
+	BSSCFG_TYPE_PROXD = 6,
+	BSSCFG_TYPE_NAN = 7,
+	BSSCFG_TYPE_MESH = 8,
+	BSSCFG_TYPE_AIBSS = 9
+} bsscfg_type_t;
+
+/* bsscfg subtype */
+typedef enum bsscfg_subtype {
+	BSSCFG_SUBTYPE_NONE = 0,
+	BSSCFG_GENERIC_STA = 1,		/* GENERIC */
+	BSSCFG_GENERIC_AP = 2,
+	BSSCFG_GENERIC_IBSS = 6,
+	BSSCFG_P2P_GC = 3,		/* P2P */
+	BSSCFG_P2P_GO = 4,
+	BSSCFG_P2P_DISC = 5,
+	/* Index 7 & 8 earlier used for BTAMP */
+	BSSCFG_SUBTYPE_AWDL = 9, /* SLOTTED_BSS_TYPE */
+	BSSCFG_SUBTYPE_NAN_MGMT = 10,
+	BSSCFG_SUBTYPE_NAN_DATA = 11,
+	BSSCFG_SUBTYPE_NAN_MGMT_DATA = 12
+} bsscfg_subtype_t;
+
+typedef struct wlc_bsscfg_info {
+	uint32 type;
+	uint32 subtype;
+} wlc_bsscfg_info_t;
+
+/* ULP SHM Offsets info */
+typedef struct ulp_shm_info {
+	uint32 m_ulp_ctrl_sdio;
+	uint32 m_ulp_wakeevt_ind;
+	uint32 m_ulp_wakeind;
+} ulp_shm_info_t;
+
+
+/* Legacy structure to help keep backward compatible wl tool and tray app */
+
+#define	LEGACY_WL_BSS_INFO_VERSION	107	/**< older version of wl_bss_info struct */
+
+typedef struct wl_bss_info_107 {
+	uint32		version;		/**< version field */
+	uint32		length;			/**< byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/**< units are Kusec */
+	uint16		capability;		/**< Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	uint8		PAD;
+	struct {
+		uint32	count;			/**< # rates in this set */
+		uint8	rates[16];		/**< rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/**< supported rates */
+	uint8		channel;		/**< Channel no. */
+	uint8		PAD;
+	uint16		atim_window;		/**< units are Kusec */
+	uint8		dtim_period;		/**< DTIM period */
+	uint8		PAD;
+	int16		RSSI;			/**< receive signal strength (in dBm) */
+	int8		phy_noise;		/**< noise (in dBm) */
+	uint8		PAD[3];
+	uint32		ie_length;		/**< byte length of Information Elements */
+	/* variable length Information Elements */
+} wl_bss_info_107_t;
+
+/*
+ * Per-BSS information structure.
+ */
+
+#define	LEGACY2_WL_BSS_INFO_VERSION	108		/**< old version of wl_bss_info struct */
+
+/**
+ * BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_108 {
+	uint32		version;		/**< version field */
+	uint32		length;			/**< byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/**< units are Kusec */
+	uint16		capability;		/**< Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	uint8		PAD[1];
+	struct {
+		uint32	count;			/**< # rates in this set */
+		uint8	rates[16];		/**< rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/**< supported rates */
+	chanspec_t	chanspec;		/**< chanspec for bss */
+	uint16		atim_window;		/**< units are Kusec */
+	uint8		dtim_period;		/**< DTIM period */
+	uint8		PAD;
+	int16		RSSI;			/**< receive signal strength (in dBm) */
+	int8		phy_noise;		/**< noise (in dBm) */
+
+	uint8		n_cap;			/**< BSS is 802.11N Capable */
+	uint8		PAD[2];
+	uint32		nbss_cap;		/**< 802.11N BSS Capabilities (based on HT_CAP_*) */
+	uint8		ctl_ch;			/**< 802.11N BSS control channel number */
+	uint8		PAD[3];
+	uint32		reserved32[1];		/**< Reserved for expansion of BSS properties */
+	uint8		flags;			/**< flags */
+	uint8		reserved[3];		/**< Reserved for expansion of BSS properties */
+	uint8		basic_mcs[MCSSET_LEN];	/**< 802.11N BSS required MCS set */
+
+	uint16		ie_offset;		/**< offset at which IEs start, from beginning */
+	uint8		PAD[2];
+	uint32		ie_length;		/**< byte length of Information Elements */
+	/* Add new fields here */
+	/* variable length Information Elements */
+} wl_bss_info_108_t;
+
+
+#define	WL_BSS_INFO_VERSION	109		/**< current version of wl_bss_info struct */
+
+/**
+ * BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info {
+	uint32		version;		/**< version field */
+	uint32		length;			/**< byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/**< units are Kusec */
+	uint16		capability;		/**< Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	uint8		bcnflags;		/* additional flags w.r.t. beacon */
+	struct {
+		uint32	count;			/**< # rates in this set */
+		uint8	rates[16];		/**< rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/**< supported rates */
+	chanspec_t	chanspec;		/**< chanspec for bss */
+	uint16		atim_window;		/**< units are Kusec */
+	uint8		dtim_period;		/**< DTIM period */
+	uint8		accessnet;		/* from beacon interwork IE (if bcnflags) */
+	int16		RSSI;			/**< receive signal strength (in dBm) */
+	int8		phy_noise;		/**< noise (in dBm) */
+	uint8		n_cap;			/**< BSS is 802.11N Capable */
+	uint16		freespace1;		/* make implicit padding explicit */
+	uint32		nbss_cap;		/**< 802.11N+AC BSS Capabilities */
+	uint8		ctl_ch;			/**< 802.11N BSS control channel number */
+	uint8		padding1[3];		/**< explicit struct alignment padding */
+	uint16		vht_rxmcsmap;	/**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+	uint16		vht_txmcsmap;	/**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+	uint8		flags;			/**< flags */
+	uint8		vht_cap;		/**< BSS is vht capable */
+	uint8		reserved[2];		/**< Reserved for expansion of BSS properties */
+	uint8		basic_mcs[MCSSET_LEN];	/**< 802.11N BSS required MCS set */
+
+	uint16		ie_offset;		/**< offset at which IEs start, from beginning */
+	uint16		freespace2;		/* making implicit padding explicit */
+	uint32		ie_length;		/**< byte length of Information Elements */
+	int16		SNR;			/**< average SNR of during frame reception */
+	uint16		vht_mcsmap;		/**< STA's Associated vhtmcsmap */
+	uint16		vht_mcsmap_prop;	/**< STA's Associated prop vhtmcsmap */
+	uint16		vht_txmcsmap_prop;	/**< prop VHT tx mcs prop */
+	/* Add new fields here */
+	/* variable length Information Elements */
+} wl_bss_info_t;
+
+#define	WL_GSCAN_FULL_RESULT_VERSION	2	/* current version of wl_gscan_result_t struct */
+#define WL_GSCAN_INFO_FIXED_FIELD_SIZE   (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t))
+
+typedef struct wl_gscan_bss_info {
+	uint32      timestamp[2];
+	wl_bss_info_t info;
+	/* Do not add any more members below, fixed  */
+	/* and variable length Information Elements to follow */
+} wl_gscan_bss_info_t;
+
+
+typedef struct wl_bsscfg {
+	uint32  bsscfg_idx;
+	uint32  wsec;
+	uint32  WPA_auth;
+	uint32  wsec_index;
+	uint32  associated;
+	uint32  BSS;
+	uint32  phytest_on;
+	struct ether_addr   prev_BSSID;
+	struct ether_addr   BSSID;
+	uint32  targetbss_wpa2_flags;
+	uint32 assoc_type;
+	uint32 assoc_state;
+} wl_bsscfg_t;
+
+typedef struct wl_if_add {
+	uint32  bsscfg_flags;
+	uint32  if_flags;
+	uint32  ap;
+	struct ether_addr   mac_addr;
+	uint16  PAD;
+	uint32  wlc_index;
+} wl_if_add_t;
+
+typedef struct wl_bss_config {
+	uint32	atim_window;
+	uint32	beacon_period;
+	uint32	chanspec;
+} wl_bss_config_t;
+
+#define WL_BSS_USER_RADAR_CHAN_SELECT	0x1	/**< User application will randomly select
+						 * radar channel.
+						 */
+
+#define DLOAD_HANDLER_VER		1	/**< Downloader version */
+#define DLOAD_FLAG_VER_MASK		0xf000	/**< Downloader version mask */
+#define DLOAD_FLAG_VER_SHIFT		12	/**< Downloader version shift */
+
+#define DL_CRC_NOT_INUSE	0x0001
+#define DL_BEGIN		0x0002
+#define DL_END			0x0004
+
+/* Flags for Major/Minor/Date number shift and mask */
+#define EPI_VER_SHIFT     16
+#define EPI_VER_MASK      0xFFFF
+/** generic download types & flags */
+enum {
+	DL_TYPE_UCODE = 1,
+	DL_TYPE_CLM = 2
+};
+
+/** ucode type values */
+enum {
+	UCODE_FW,
+	INIT_VALS,
+	BS_INIT_VALS
+};
+
+struct wl_dload_data {
+	uint16 flag;
+	uint16 dload_type;
+	uint32 len;
+	uint32 crc;
+	uint8  data[1];
+};
+typedef struct wl_dload_data wl_dload_data_t;
+
+struct wl_ucode_info {
+	uint32 ucode_type;
+	uint32 num_chunks;
+	uint32 chunk_len;
+	uint32 chunk_num;
+	uint8  data_chunk[1];
+};
+typedef struct wl_ucode_info wl_ucode_info_t;
+
+struct wl_clm_dload_info {
+	uint32 ds_id;
+	uint32 clm_total_len;
+	uint32 num_chunks;
+	uint32 chunk_len;
+	uint32 chunk_offset;
+	uint8  data_chunk[1];
+};
+typedef struct wl_clm_dload_info wl_clm_dload_info_t;
+
+
+typedef struct wlc_ssid {
+	uint32		SSID_len;
+	uint8		SSID[DOT11_MAX_SSID_LEN];
+} wlc_ssid_t;
+
+typedef struct wlc_ssid_ext {
+	uint8      hidden;
+	uint8      PAD;
+	uint16     flags;
+	uint8      SSID_len;
+	int8       rssi_thresh;
+	uint8      SSID[DOT11_MAX_SSID_LEN];
+} wlc_ssid_ext_t;
+
+#define MAX_PREFERRED_AP_NUM     5
+typedef struct wlc_fastssidinfo {
+	uint32			SSID_channel[MAX_PREFERRED_AP_NUM];
+	wlc_ssid_t		SSID_info[MAX_PREFERRED_AP_NUM];
+} wlc_fastssidinfo_t;
+
+typedef struct wnm_url {
+	uint8   len;
+	uint8   data[1];
+} wnm_url_t;
+
+typedef struct chan_scandata {
+	uint8		txpower;
+	uint8		pad;
+	chanspec_t	channel;		/**< Channel num, bw, ctrl_sb and band */
+	uint32		channel_mintime;
+	uint32		channel_maxtime;
+} chan_scandata_t;
+
+typedef enum wl_scan_type {
+	EXTDSCAN_FOREGROUND_SCAN,
+	EXTDSCAN_BACKGROUND_SCAN,
+	EXTDSCAN_FORCEDBACKGROUND_SCAN
+} wl_scan_type_t;
+
+#define WLC_EXTDSCAN_MAX_SSID		5
+
+typedef struct wl_extdscan_params {
+	int8 		nprobes;		/**< 0, passive, otherwise active */
+	int8    	split_scan;		/**< split scan */
+	int8		band;			/**< band */
+	int8		pad;
+	wlc_ssid_t 	ssid[WLC_EXTDSCAN_MAX_SSID]; /**< ssid list */
+	uint32		tx_rate;		/**< in 500ksec units */
+	wl_scan_type_t	scan_type;		/**< enum */
+	int32 		channel_num;
+	chan_scandata_t channel_list[1];	/**< list of chandata structs */
+} wl_extdscan_params_t;
+
+#define WL_EXTDSCAN_PARAMS_FIXED_SIZE 	(sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+
+typedef struct wl_scan_params {
+	wlc_ssid_t ssid;		/**< default: {0, ""} */
+	struct ether_addr bssid;	/**< default: bcast */
+	int8 bss_type;			/**< default: any,
+					 * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+					 */
+	uint8 scan_type;		/**< flags, 0 use default */
+	int32 nprobes;			/**< -1 use default, number of probes per channel */
+	int32 active_time;		/**< -1 use default, dwell time per channel for
+					 * active scanning
+					 */
+	int32 passive_time;		/**< -1 use default, dwell time per channel
+					 * for passive scanning
+					 */
+	int32 home_time;		/**< -1 use default, dwell time for the home channel
+					 * between channel scans
+					 */
+	int32 channel_num;		/**< count of channels and ssids that follow
+					 *
+					 * low half is count of channels in channel_list, 0
+					 * means default (use all available channels)
+					 *
+					 * high half is entries in wlc_ssid_t array that
+					 * follows channel_list, aligned for int32 (4 bytes)
+					 * meaning an odd channel count implies a 2-byte pad
+					 * between end of channel_list and first ssid
+					 *
+					 * if ssid count is zero, single ssid in the fixed
+					 * parameter portion is assumed, otherwise ssid in
+					 * the fixed portion is ignored
+					 */
+	uint16 channel_list[1];		/**< list of chanspecs */
+} wl_scan_params_t;
+
+/** size of wl_scan_params not including variable length array */
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+#define WL_MAX_ROAMSCAN_DATSZ	(WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+
+#define ISCAN_REQ_VERSION 1
+
+/** incremental scan struct */
+typedef struct wl_iscan_params {
+	uint32 version;
+	uint16 action;
+	uint16 scan_duration;
+	wl_scan_params_t params;
+} wl_iscan_params_t;
+
+/** 3 fields + size of wl_scan_params, not including variable length array */
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_scan_results {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+/** size of wl_scan_results not including variable length array */
+#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
+#define ESCAN_REQ_VERSION 1
+
+/** event scan reduces amount of SOC memory needed to store scan results */
+typedef struct wl_escan_params {
+	uint32 version;
+	uint16 action;
+	uint16 sync_id;
+	wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+/** event scan reduces amount of SOC memory needed to store scan results */
+typedef struct wl_escan_result {
+	uint32 buflen;
+	uint32 version;
+	uint16 sync_id;
+	uint16 bss_count;
+	wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+typedef struct wl_gscan_result {
+	uint32 buflen;
+	uint32 version;
+	uint32 scan_ch_bucket;
+	wl_gscan_bss_info_t bss_info[1];
+} wl_gscan_result_t;
+
+#define WL_GSCAN_RESULTS_FIXED_SIZE (sizeof(wl_gscan_result_t) - sizeof(wl_gscan_bss_info_t))
+/** incremental scan results struct */
+typedef struct wl_iscan_results {
+	uint32 status;
+	wl_scan_results_t results;
+} wl_iscan_results_t;
+
+/** size of wl_iscan_results not including variable length array */
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+	(WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+typedef struct wl_probe_params {
+	wlc_ssid_t ssid;
+	struct ether_addr bssid;
+	struct ether_addr mac;
+} wl_probe_params_t;
+
+#define WL_MAXRATES_IN_SET		16	/**< max # of rates in a rateset */
+typedef struct wl_rateset {
+	uint32	count;				/**< # rates in this set */
+	uint8	rates[WL_MAXRATES_IN_SET];	/**< rates in 500kbps units w/hi bit set if basic */
+} wl_rateset_t;
+
+typedef struct wl_rateset_args {
+	uint32	count;				/**< # rates in this set */
+	uint8	rates[WL_MAXRATES_IN_SET];	/**< rates in 500kbps units w/hi bit set if basic */
+	uint8   mcs[MCSSET_LEN];        	/**< supported mcs index bit map */
+	uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /**< supported mcs index bit map per nss */
+} wl_rateset_args_t;
+
+#define TXBF_RATE_MCS_ALL		4
+#define TXBF_RATE_VHT_ALL		4
+#define TXBF_RATE_OFDM_ALL		8
+
+typedef struct wl_txbf_rateset {
+	uint8	txbf_rate_mcs[TXBF_RATE_MCS_ALL];	/**< one for each stream */
+	uint8	txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL];	/**< one for each stream */
+	uint16	txbf_rate_vht[TXBF_RATE_VHT_ALL];	/**< one for each stream */
+	uint16	txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL];	/**< one for each stream */
+	uint8	txbf_rate_ofdm[TXBF_RATE_OFDM_ALL]; /**< bitmap of ofdm rates that enables txbf */
+	uint8	txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */
+	uint8	txbf_rate_ofdm_cnt;
+	uint8	txbf_rate_ofdm_cnt_bcm;
+} wl_txbf_rateset_t;
+
+#define NUM_BFGAIN_ARRAY_1RX	2
+#define NUM_BFGAIN_ARRAY_2RX	3
+#define NUM_BFGAIN_ARRAY_3RX	4
+#define NUM_BFGAIN_ARRAY_4RX	5
+
+typedef struct wl_txbf_expgainset {
+	/* bitmap for each element: B[4:0]=>c0, B[9:5]=>c1, B[14:10]=>c2, B[19:15]=>c[3-7]
+	 * B[24:20]=>c[8-9], B[29:25]=>c[10-11]
+	 */
+	uint32	bfgain_2x1[NUM_BFGAIN_ARRAY_1RX]; /* exp     1ss, imp 1ss */
+	uint32	bfgain_2x2[NUM_BFGAIN_ARRAY_2RX]; /* exp [1-2]ss, imp 1ss */
+	uint32	bfgain_3x1[NUM_BFGAIN_ARRAY_1RX];
+	uint32	bfgain_3x2[NUM_BFGAIN_ARRAY_2RX];
+	uint32	bfgain_3x3[NUM_BFGAIN_ARRAY_3RX]; /* exp [1-3]ss, imp 1ss */
+	uint32	bfgain_4x1[NUM_BFGAIN_ARRAY_1RX];
+	uint32	bfgain_4x2[NUM_BFGAIN_ARRAY_2RX];
+	uint32	bfgain_4x3[NUM_BFGAIN_ARRAY_3RX];
+	uint32	bfgain_4x4[NUM_BFGAIN_ARRAY_4RX]; /* exp [1-4]ss, imp 1ss */
+} wl_txbf_expgainset_t;
+
+#define OFDM_RATE_MASK			0x0000007f
+typedef uint8 ofdm_rates_t;
+
+typedef struct wl_rates_info {
+	wl_rateset_t rs_tgt;
+	uint32 phy_type;
+	int32 bandtype;
+	uint8 cck_only;
+	uint8 rate_mask;
+	uint8 mcsallow;
+	uint8 bw;
+	uint8 txstreams;
+	uint8 PAD[3];
+} wl_rates_info_t;
+
+/**uint32 list */
+typedef struct wl_uint32_list {
+	/** in - # of elements, out - # of entries */
+	uint32 count;
+	/** variable length uint32 list */
+	uint32 element[1];
+} wl_uint32_list_t;
+
+/* WLC_SET_ALLOW_MODE values */
+#define ALLOW_MODE_ANY_BSSID		0
+#define ALLOW_MODE_ONLY_DESIRED_BSSID	1
+#define ALLOW_MODE_NO_BSSID		2
+
+/** used for association with a specific BSSID and chanspec list */
+typedef struct wl_assoc_params {
+	struct ether_addr bssid;	/**< 00:00:00:00:00:00: broadcast scan */
+	uint16 bssid_cnt;		/**< 0: use chanspec_num, and the single bssid,
+					* otherwise count of chanspecs in chanspec_list
+					* AND paired bssids following chanspec_list
+					* also, chanspec_num has to be set to zero
+					* for bssid list to be used
+					*/
+	int32 chanspec_num;		/**< 0: all available channels,
+					* otherwise count of chanspecs in chanspec_list
+					*/
+	chanspec_t chanspec_list[1];	/**< list of chanspecs */
+} wl_assoc_params_t;
+
+#define WL_ASSOC_PARAMS_FIXED_SIZE 	OFFSETOF(wl_assoc_params_t, chanspec_list)
+
+/** used for reassociation/roam to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE	WL_ASSOC_PARAMS_FIXED_SIZE
+
+/** used for association to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_join_assoc_params_t;
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE	WL_ASSOC_PARAMS_FIXED_SIZE
+
+/** used for join with or without a specific bssid and channel list */
+typedef struct wl_join_params {
+	wlc_ssid_t ssid;
+	wl_assoc_params_t params;	/**< optional field, but it must include the fixed portion
+					 * of the wl_assoc_params_t struct when it does present.
+					 */
+} wl_join_params_t;
+
+#define WL_JOIN_PARAMS_FIXED_SIZE 	(OFFSETOF(wl_join_params_t, params) + \
+					 WL_ASSOC_PARAMS_FIXED_SIZE)
+
+typedef struct wlc_roam_exp_params {
+	int8 a_band_boost_threshold;
+	int8 a_band_penalty_threshold;
+	int8 a_band_boost_factor;
+	int8 a_band_penalty_factor;
+	int8 cur_bssid_boost;
+	int8 alert_roam_trigger_threshold;
+	int16 a_band_max_boost;
+} wlc_roam_exp_params_t;
+
+#define ROAM_EXP_CFG_VERSION     1
+
+#define ROAM_EXP_ENABLE_FLAG             (1 << 0)
+
+#define ROAM_EXP_CFG_PRESENT             (1 << 1)
+
+typedef struct wl_roam_exp_cfg {
+	uint16 version;
+	uint16 flags;
+	wlc_roam_exp_params_t params;
+} wl_roam_exp_cfg_t;
+
+typedef struct wl_bssid_pref_list {
+	struct ether_addr bssid;
+	/* Add this to modify rssi */
+	int8 rssi_factor;
+	int8 flags;
+} wl_bssid_pref_list_t;
+
+#define BSSID_PREF_LIST_VERSION        1
+#define ROAM_EXP_CLEAR_BSSID_PREF        (1 << 0)
+
+typedef struct wl_bssid_pref_cfg {
+	uint16 version;
+	uint16 flags;
+	uint16 count;
+	uint16 reserved;
+	wl_bssid_pref_list_t bssids[];
+} wl_bssid_pref_cfg_t;
+
+#define SSID_WHITELIST_VERSION         1
+
+#define ROAM_EXP_CLEAR_SSID_WHITELIST    (1 << 0)
+
+/* Roam SSID whitelist, ssids in this list are ok to  */
+/* be considered as targets to join when considering a roam */
+
+typedef struct wl_ssid_whitelist {
+
+	uint16 version;
+	uint16 flags;
+
+	uint8 ssid_count;
+	uint8 reserved[3];
+	wlc_ssid_t ssids[];
+} wl_ssid_whitelist_t;
+
+#define ROAM_EXP_EVENT_VERSION       1
+
+typedef struct wl_roam_exp_event {
+
+	uint16 version;
+	uint16 flags;
+	wlc_ssid_t cur_ssid;
+} wl_roam_exp_event_t;
+
+/** scan params for extended join */
+typedef struct wl_join_scan_params {
+	uint8 scan_type;		/**< 0 use default, active or passive scan */
+	uint8 PAD[3];
+	int32 nprobes;			/**< -1 use default, number of probes per channel */
+	int32 active_time;		/**< -1 use default, dwell time per channel for
+					 * active scanning
+					 */
+	int32 passive_time;		/**< -1 use default, dwell time per channel
+					 * for passive scanning
+					 */
+	int32 home_time;		/**< -1 use default, dwell time for the home channel
+					 * between channel scans
+					 */
+} wl_join_scan_params_t;
+
+/** extended join params */
+typedef struct wl_extjoin_params {
+	wlc_ssid_t ssid;		/**< {0, ""}: wildcard scan */
+	wl_join_scan_params_t scan;
+	wl_join_assoc_params_t assoc;	/**< optional field, but it must include the fixed portion
+					 * of the wl_join_assoc_params_t struct when it does
+					 * present.
+					 */
+} wl_extjoin_params_t;
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE 	(OFFSETOF(wl_extjoin_params_t, assoc) + \
+					 WL_JOIN_ASSOC_PARAMS_FIXED_SIZE)
+
+#define ANT_SELCFG_MAX		4	/**< max number of antenna configurations */
+#define MAX_STREAMS_SUPPORTED	4	/**< max number of streams supported */
+typedef struct {
+	uint8 ant_config[ANT_SELCFG_MAX];	/**< antenna configuration */
+	uint8 num_antcfg;			/**< number of available antenna configurations */
+} wlc_antselcfg_t;
+
+typedef struct {
+	uint32 duration;	/**< millisecs spent sampling this channel */
+	uint32 congest_ibss;	/**< millisecs in our bss (presumably this traffic will */
+				/**<  move if cur bss moves channels) */
+	uint32 congest_obss;	/**< traffic not in our bss */
+	uint32 interference;	/**< millisecs detecting a non 802.11 interferer. */
+	uint32 timestamp;	/**< second timestamp */
+} cca_congest_t;
+
+typedef struct {
+	chanspec_t chanspec;	/**< Which channel? */
+	uint16 num_secs;	/**< How many secs worth of data */
+	cca_congest_t  secs[1];	/**< Data */
+} cca_congest_channel_req_t;
+typedef struct {
+	uint32 duration;	/**< millisecs spent sampling this channel */
+	uint32 congest;		/**< millisecs detecting busy CCA */
+	uint32 timestamp;	/**< second timestamp */
+} cca_congest_simple_t;
+
+typedef struct {
+	uint16 status;
+	uint16 id;
+	chanspec_t chanspec;			/**< Which channel? */
+	uint16 len;
+	union {
+		cca_congest_simple_t  cca_busy;	/**< CCA busy */
+		int32 noise;			/**< noise floor */
+	};
+} cca_chan_qual_event_t;
+
+typedef struct {
+	uint32 msrmnt_time;	/**< Time for Measurement (msec) */
+	uint32 msrmnt_done;	/**< flag set when measurement complete */
+	char buf[];
+} cca_stats_n_flags;
+
+typedef struct {
+	uint32 msrmnt_query;    /* host to driver query for measurement done */
+	uint32 time_req;        /* time required for measurement */
+	uint8 report_opt;       /* option to print different stats in report */
+	uint8 PAD[3];
+} cca_msrmnt_query;
+
+/* interference sources */
+enum interference_source {
+	ITFR_NONE = 0,			/**< interference */
+	ITFR_PHONE,			/**< wireless phone */
+	ITFR_VIDEO_CAMERA,		/**< wireless video camera */
+	ITFR_MICROWAVE_OVEN,		/**< microwave oven */
+	ITFR_BABY_MONITOR,		/**< wireless baby monitor */
+	ITFR_BLUETOOTH,			/**< bluetooth */
+	ITFR_VIDEO_CAMERA_OR_BABY_MONITOR,	/**< wireless camera or baby monitor */
+	ITFR_BLUETOOTH_OR_BABY_MONITOR,	/**< bluetooth or baby monitor */
+	ITFR_VIDEO_CAMERA_OR_PHONE,	/**< video camera or phone */
+	ITFR_UNIDENTIFIED		/**< interference from unidentified source */
+};
+
+/** structure for interference source report */
+typedef struct {
+	uint32 flags;		/**< flags.  bit definitions below */
+	uint32 source;		/**< last detected interference source */
+	uint32 timestamp;	/**< second timestamp on interferenced flag change */
+} interference_source_rep_t;
+
+#define WLC_CNTRY_BUF_SZ	4		/**< Country string is 3 bytes + NUL */
+
+typedef struct wl_country {
+	char country_abbrev[WLC_CNTRY_BUF_SZ];	/**< nul-terminated country code used in
+						 * the Country IE
+						 */
+	int32 rev;				/**< revision specifier for ccode
+						 * on set, -1 indicates unspecified.
+						 * on get, rev >= 0
+						 */
+	char ccode[WLC_CNTRY_BUF_SZ];		/**< nul-terminated built-in country code.
+						 * variable length, but fixed size in
+						 * struct allows simple allocation for
+						 * expected country strings <= 3 chars.
+						 */
+} wl_country_t;
+
+
+#define CCODE_INFO_VERSION 1
+
+typedef enum wl_ccode_role {
+	WLC_CCODE_ROLE_ACTIVE = 0,
+	WLC_CCODE_ROLE_HOST,
+	WLC_CCODE_ROLE_80211D_ASSOC,
+	WLC_CCODE_ROLE_80211D_SCAN,
+	WLC_CCODE_ROLE_DEFAULT,
+	WLC_CCODE_LAST
+} wl_ccode_role_t;
+#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST
+
+typedef struct wl_ccode_entry {
+	uint16 reserved;
+	uint8 band;
+	uint8 role;
+	char	ccode[WLC_CNTRY_BUF_SZ];
+} wl_ccode_entry_t;
+
+typedef struct wl_ccode_info {
+	uint16 version;
+	uint16 count;   /**< Number of ccodes entries in the set */
+	wl_ccode_entry_t ccodelist[1];
+} wl_ccode_info_t;
+#define WL_CCODE_INFO_FIXED_LEN	OFFSETOF(wl_ccode_info_t, ccodelist)
+typedef struct wl_channels_in_country {
+	uint32 buflen;
+	uint32 band;
+	char country_abbrev[WLC_CNTRY_BUF_SZ];
+	uint32 count;
+	uint32 channel[1];
+} wl_channels_in_country_t;
+
+typedef struct wl_country_list {
+	uint32 buflen;
+	uint32 band_set;
+	uint32 band;
+	uint32 count;
+	char country_abbrev[1];
+} wl_country_list_t;
+
+typedef struct wl_rm_req_elt {
+	int8	type;
+	int8	flags;
+	chanspec_t	chanspec;
+	uint32	token;		/**< token for this measurement */
+	uint32	tsf_h;		/**< TSF high 32-bits of Measurement start time */
+	uint32	tsf_l;		/**< TSF low 32-bits */
+	uint32	dur;		/**< TUs */
+} wl_rm_req_elt_t;
+
+typedef struct wl_rm_req {
+	uint32	token;		/**< overall measurement set token */
+	uint32	count;		/**< number of measurement requests */
+	void	*cb;		/**< completion callback function: may be NULL */
+	void	*cb_arg;	/**< arg to completion callback function */
+	wl_rm_req_elt_t	req[1];	/**< variable length block of requests */
+} wl_rm_req_t;
+#define WL_RM_REQ_FIXED_LEN	OFFSETOF(wl_rm_req_t, req)
+
+typedef struct wl_rm_rep_elt {
+	int8	type;
+	int8	flags;
+	chanspec_t	chanspec;
+	uint32	token;		/**< token for this measurement */
+	uint32	tsf_h;		/**< TSF high 32-bits of Measurement start time */
+	uint32	tsf_l;		/**< TSF low 32-bits */
+	uint32	dur;		/**< TUs */
+	uint32	len;		/**< byte length of data block */
+	uint8	data[1];	/**< variable length data block */
+} wl_rm_rep_elt_t;
+#define WL_RM_REP_ELT_FIXED_LEN	24	/**< length excluding data block */
+
+#define WL_RPI_REP_BIN_NUM 8
+typedef struct wl_rm_rpi_rep {
+	uint8	rpi[WL_RPI_REP_BIN_NUM];
+	int8	rpi_max[WL_RPI_REP_BIN_NUM];
+} wl_rm_rpi_rep_t;
+
+typedef struct wl_rm_rep {
+	uint32	token;		/**< overall measurement set token */
+	uint32	len;		/**< length of measurement report block */
+	wl_rm_rep_elt_t	rep[1];	/**< variable length block of reports */
+} wl_rm_rep_t;
+#define WL_RM_REP_FIXED_LEN	8
+typedef enum sup_auth_status {
+	/* Basic supplicant authentication states */
+	WLC_SUP_DISCONNECTED = 0,
+	WLC_SUP_CONNECTING,
+	WLC_SUP_IDREQUIRED,
+	WLC_SUP_AUTHENTICATING,
+	WLC_SUP_AUTHENTICATED,
+	WLC_SUP_KEYXCHANGE,
+	WLC_SUP_KEYED,
+	WLC_SUP_TIMEOUT,
+	WLC_SUP_LAST_BASIC_STATE,
+
+	/* Extended supplicant authentication states */
+	/** Waiting to receive handshake msg M1 */
+	WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+	/** Preparing to send handshake msg M2 */
+	WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+	/* Waiting to receive handshake msg M3 */
+	WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+	WLC_SUP_KEYXCHANGE_PREP_M4,	/**< Preparing to send handshake msg M4 */
+	WLC_SUP_KEYXCHANGE_WAIT_G1,	/**< Waiting to receive handshake msg G1 */
+	WLC_SUP_KEYXCHANGE_PREP_G2	/**< Preparing to send handshake msg G2 */
+} sup_auth_status_t;
+
+typedef struct wl_wsec_key {
+	uint32		index;		/**< key index */
+	uint32		len;		/**< key length */
+	uint8		data[DOT11_MAX_KEY_SIZE];	/**< key data */
+	uint32		pad_1[18];
+	uint32		algo;		/**< CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+	uint32		flags;		/**< misc flags */
+	uint32		pad_2[2];
+	int32		pad_3;
+	int32		iv_initialized;	/**< has IV been initialized already? */
+	int32		pad_4;
+	/* Rx IV */
+	struct {
+		uint32	hi;		/**< upper 32 bits of IV */
+		uint16	lo;		/**< lower 16 bits of IV */
+		uint16	PAD;
+	} rxiv;
+	uint32		pad_5[2];
+	struct ether_addr ea;		/**< per station */
+	uint16	PAD;
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN	8
+#define WSEC_MAX_PSK_LEN	64
+
+/** Flag for key material needing passhash'ing */
+#define WSEC_PASSPHRASE		(1<<0)
+
+/**receptacle for WLC_SET_WSEC_PMK parameter */
+typedef struct wsec_pmk {
+	ushort	key_len;		/**< octets in key material */
+	ushort	flags;			/**< key handling qualification */
+	uint8	key[WSEC_MAX_PSK_LEN];	/**< PMK material */
+} wsec_pmk_t;
+
+typedef struct _pmkid {
+	struct ether_addr	BSSID;
+	uint8			PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+	uint32	npmkid;
+	pmkid_t	pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+	struct ether_addr	BSSID;
+	uint8			preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+	uint32	npmkid_cand;
+	pmkid_cand_t	pmkid_cand[1];
+} pmkid_cand_list_t;
+
+#define WL_STA_ANT_MAX		4	/**< max possible rx antennas */
+
+typedef struct wl_assoc_info {
+	uint32		req_len;
+	uint32		resp_len;
+	uint32		flags;
+	struct dot11_assoc_req req;
+	struct ether_addr reassoc_bssid; /**< used in reassoc's */
+	struct dot11_assoc_resp resp;
+} wl_assoc_info_t;
+
+typedef struct wl_led_info {
+	uint32      index;      /**< led index */
+	uint32      behavior;
+	uint8       activehi;
+	uint8       PAD[3];
+} wl_led_info_t;
+
+
+/** srom read/write struct passed through ioctl */
+typedef struct {
+	uint32	byteoff;	/**< byte offset */
+	uint32	nbytes;		/**< number of bytes */
+	uint16	buf[];
+} srom_rw_t;
+
+#define CISH_FLAG_PCIECIS	(1 << 15)	/**< write CIS format bit for PCIe CIS */
+
+/** similar cis (srom or otp) struct [iovar: may not be aligned] */
+typedef struct {
+	uint16	source;		/**< cis source */
+	uint16	flags;		/**< flags */
+	uint32	byteoff;	/**< byte offset */
+	uint32	nbytes;		/**< number of bytes */
+	/* data follows here */
+} cis_rw_t;
+
+/** R_REG and W_REG struct passed through ioctl */
+typedef struct {
+	uint32	byteoff;	/**< byte offset of the field in d11regs_t */
+	uint32	val;		/**< read/write value of the field */
+	uint32	size;		/**< sizeof the field */
+	uint32	band;		/**< band (optional) */
+} rw_reg_t;
+
+/**
+ * Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band
+ * PCL - Power Control Loop
+ */
+typedef struct {
+	uint16	auto_ctrl;	/**< WL_ATTEN_XX */
+	uint16	bb;		/**< Baseband attenuation */
+	uint16	radio;		/**< Radio attenuation */
+	uint16	txctl1;		/**< Radio TX_CTL1 value */
+} atten_t;
+
+/** Per-AC retry parameters */
+struct wme_tx_params_s {
+	uint8  short_retry;
+	uint8  short_fallback;
+	uint8  long_retry;
+	uint8  long_fallback;
+	uint16 max_rate;  /**< In units of 512 Kbps */
+};
+
+typedef struct wme_tx_params_s wme_tx_params_t;
+
+#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
+
+/**Used to get specific link/ac parameters */
+typedef struct {
+	int32 ac;
+	uint8 val;
+	struct ether_addr ea;
+	uint8 PAD;
+} link_val_t;
+
+
+#define WL_PM_MUTE_TX_VER 1
+
+typedef struct wl_pm_mute_tx {
+	uint16 version;		/**< version */
+	uint16 len;		/**< length */
+	uint16 deadline;	/**< deadline timer (in milliseconds) */
+	uint8  enable;		/**< set to 1 to enable mode; set to 0 to disable it */
+	uint8 PAD;
+} wl_pm_mute_tx_t;
+
+
+/* sta_info_t version 4 */
+typedef struct {
+	uint16			ver;		/**< version of this struct */
+	uint16			len;		/**< length in bytes of this structure */
+	uint16			cap;		/**< sta's advertised capabilities */
+	uint16			PAD;
+	uint32			flags;		/**< flags defined below */
+	uint32			idle;		/**< time since data pkt rx'd from sta */
+	struct ether_addr	ea;		/**< Station address */
+	uint16			PAD;
+	wl_rateset_t	rateset;	/**< rateset in use */
+	uint32			in;		/**< seconds elapsed since associated */
+	uint32			listen_interval_inms; /**< Min Listen interval in ms for this STA */
+	uint32			tx_pkts;	/**< # of user packets transmitted (unicast) */
+	uint32			tx_failures;	/**< # of user packets failed */
+	uint32			rx_ucast_pkts;	/**< # of unicast packets received */
+	uint32			rx_mcast_pkts;	/**< # of multicast packets received */
+	uint32			tx_rate;	/**< Rate used by last tx frame */
+	uint32			rx_rate;	/**< Rate of last successful rx frame */
+	uint32			rx_decrypt_succeeds;	/**< # of packet decrypted successfully */
+	uint32			rx_decrypt_failures;	/**< # of packet decrypted unsuccessfully */
+	uint32			tx_tot_pkts;	/**< # of user tx pkts (ucast + mcast) */
+	uint32			rx_tot_pkts;	/**< # of data packets recvd (uni + mcast) */
+	uint32			tx_mcast_pkts;	/**< # of mcast pkts txed */
+	uint64			tx_tot_bytes;	/**< data bytes txed (ucast + mcast) */
+	uint64			rx_tot_bytes;	/**< data bytes recvd (ucast + mcast) */
+	uint64			tx_ucast_bytes;	/**< data bytes txed (ucast) */
+	uint64			tx_mcast_bytes;	/**< # data bytes txed (mcast) */
+	uint64			rx_ucast_bytes;	/**< data bytes recvd (ucast) */
+	uint64			rx_mcast_bytes;	/**< data bytes recvd (mcast) */
+	int8			rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna
+							* of data frames
+							*/
+	int8			nf[WL_STA_ANT_MAX];	/**< per antenna noise floor */
+	uint16			aid;			/**< association ID */
+	uint16			ht_capabilities;	/**< advertised ht caps */
+	uint16			vht_flags;		/**< converted vht flags */
+	uint16			PAD;
+	uint32			tx_pkts_retried;	/**< # of frames where a retry was
+							 * necessary
+							 */
+	uint32			tx_pkts_retry_exhausted; /**< # of user frames where a retry
+							  * was exhausted
+							  */
+	int8			rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last
+								  * received data frame.
+								  */
+	/* TX WLAN retry/failure statistics:
+	 * Separated for host requested frames and WLAN locally generated frames.
+	 * Include unicast frame only where the retries/failures can be counted.
+	 */
+	uint32			tx_pkts_total;		/**< # user frames sent successfully */
+	uint32			tx_pkts_retries;	/**< # user frames retries */
+	uint32			tx_pkts_fw_total;	/**< # FW generated sent successfully */
+	uint32			tx_pkts_fw_retries;	/**< # retries for FW generated frames */
+	uint32			tx_pkts_fw_retry_exhausted;	/**< # FW generated where a retry
+								 * was exhausted
+								 */
+	uint32			rx_pkts_retried;	/**< # rx with retry bit set */
+	uint32			tx_rate_fallback;	/**< lowest fallback TX rate */
+	/* Fields above this line are common to sta_info_t versions 4 and 5 */
+
+	uint32			rx_dur_total;	/* total user RX duration (estimated) */
+
+	chanspec_t		chanspec;       /** chanspec this sta is on */
+	uint16			PAD;
+	wl_rateset_args_t	rateset_adv;	/* rateset along with mcs index bitmap */
+	uint32			PAD;
+} sta_info_v4_t;
+
+/* Note: Version 4 is the latest version of sta_info_t.  Version 5 is abandoned.
+ * Please add new fields to version 4, not version 5.
+ */
+/* sta_info_t version 5 */
+typedef struct {
+	uint16			ver;		/**< version of this struct */
+	uint16			len;		/**< length in bytes of this structure */
+	uint16			cap;		/**< sta's advertised capabilities */
+	uint16			PAD;
+	uint32			flags;		/**< flags defined below */
+	uint32			idle;		/**< time since data pkt rx'd from sta */
+	struct ether_addr	ea;		/**< Station address */
+	uint16			PAD;
+	wl_rateset_t		rateset;	/**< rateset in use */
+	uint32			in;		/**< seconds elapsed since associated */
+	uint32			listen_interval_inms; /**< Min Listen interval in ms for this STA */
+	uint32			tx_pkts;	/**< # of user packets transmitted (unicast) */
+	uint32			tx_failures;	/**< # of user packets failed */
+	uint32			rx_ucast_pkts;	/**< # of unicast packets received */
+	uint32			rx_mcast_pkts;	/**< # of multicast packets received */
+	uint32			tx_rate;	/**< Rate used by last tx frame */
+	uint32			rx_rate;	/**< Rate of last successful rx frame */
+	uint32			rx_decrypt_succeeds;	/**< # of packet decrypted successfully */
+	uint32			rx_decrypt_failures;	/**< # of packet decrypted unsuccessfully */
+	uint32			tx_tot_pkts;	/**< # of user tx pkts (ucast + mcast) */
+	uint32			rx_tot_pkts;	/**< # of data packets recvd (uni + mcast) */
+	uint32			tx_mcast_pkts;	/**< # of mcast pkts txed */
+	uint64			tx_tot_bytes;	/**< data bytes txed (ucast + mcast) */
+	uint64			rx_tot_bytes;	/**< data bytes recvd (ucast + mcast) */
+	uint64			tx_ucast_bytes;	/**< data bytes txed (ucast) */
+	uint64			tx_mcast_bytes;	/**< # data bytes txed (mcast) */
+	uint64			rx_ucast_bytes;	/**< data bytes recvd (ucast) */
+	uint64			rx_mcast_bytes;	/**< data bytes recvd (mcast) */
+	int8			rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna
+							* of data frames
+							*/
+	int8			nf[WL_STA_ANT_MAX];	/**< per antenna noise floor */
+	uint16			aid;			/**< association ID */
+	uint16			ht_capabilities;	/**< advertised ht caps */
+	uint16			vht_flags;		/**< converted vht flags */
+	uint16			PAD;
+	uint32			tx_pkts_retried;	/**< # of frames where a retry was
+							 * necessary
+							 */
+	uint32			tx_pkts_retry_exhausted; /**< # of user frames where a retry
+							  * was exhausted
+							  */
+	int8			rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last
+								  * received data frame.
+								  */
+	/* TX WLAN retry/failure statistics:
+	 * Separated for host requested frames and WLAN locally generated frames.
+	 * Include unicast frame only where the retries/failures can be counted.
+	 */
+	uint32			tx_pkts_total;		/**< # user frames sent successfully */
+	uint32			tx_pkts_retries;	/**< # user frames retries */
+	uint32			tx_pkts_fw_total;	/**< # FW generated sent successfully */
+	uint32			tx_pkts_fw_retries;	/**< # retries for FW generated frames */
+	uint32			tx_pkts_fw_retry_exhausted;	/**< # FW generated where a retry
+								 * was exhausted
+								 */
+	uint32			rx_pkts_retried;	/**< # rx with retry bit set */
+	uint32			tx_rate_fallback;	/**< lowest fallback TX rate */
+	/* Fields above this line are common to sta_info_t versions 4 and 5 */
+
+	chanspec_t		chanspec;       /** chanspec this sta is on */
+	uint16			PAD;
+	wl_rateset_args_t	rateset_adv;	/* rateset along with mcs index bitmap */
+} sta_info_v5_t;
+
+#define WL_OLD_STAINFO_SIZE	OFFSETOF(sta_info_t, tx_tot_pkts)
+
+#define WL_STA_VER_4		4
+#define WL_STA_VER_5		5
+#define WL_STA_VER		WL_STA_VER_4
+
+#define SWDIV_STATS_VERSION_2 2
+#define SWDIV_STATS_CURRENT_VERSION SWDIV_STATS_VERSION_2
+
+struct wlc_swdiv_stats_v1 {
+	uint32 auto_en;
+	uint32 active_ant;
+	uint32 rxcount;
+	int32 avg_snr_per_ant0;
+	int32 avg_snr_per_ant1;
+	int32 avg_snr_per_ant2;
+	uint32 swap_ge_rxcount0;
+	uint32 swap_ge_rxcount1;
+	uint32 swap_ge_snrthresh0;
+	uint32 swap_ge_snrthresh1;
+	uint32 swap_txfail0;
+	uint32 swap_txfail1;
+	uint32 swap_timer0;
+	uint32 swap_timer1;
+	uint32 swap_alivecheck0;
+	uint32 swap_alivecheck1;
+	uint32 rxcount_per_ant0;
+	uint32 rxcount_per_ant1;
+	uint32 acc_rxcount;
+	uint32 acc_rxcount_per_ant0;
+	uint32 acc_rxcount_per_ant1;
+	uint32 tx_auto_en;
+	uint32 tx_active_ant;
+	uint32 rx_policy;
+	uint32 tx_policy;
+	uint32 cell_policy;
+	uint32 swap_snrdrop0;
+	uint32 swap_snrdrop1;
+	uint32 mws_antsel_ovr_tx;
+	uint32 mws_antsel_ovr_rx;
+	uint8 swap_trig_event_id;
+};
+
+struct wlc_swdiv_stats_v2 {
+	uint16	version;	/* version of the structure
+						* as defined by SWDIV_STATS_CURRENT_VERSION
+						*/
+	uint16	length;		/* length of the entire structure */
+	uint32 auto_en;
+	uint32 active_ant;
+	uint32 rxcount;
+	int32 avg_snr_per_ant0;
+	int32 avg_snr_per_ant1;
+	int32 avg_snr_per_ant2;
+	uint32 swap_ge_rxcount0;
+	uint32 swap_ge_rxcount1;
+	uint32 swap_ge_snrthresh0;
+	uint32 swap_ge_snrthresh1;
+	uint32 swap_txfail0;
+	uint32 swap_txfail1;
+	uint32 swap_timer0;
+	uint32 swap_timer1;
+	uint32 swap_alivecheck0;
+	uint32 swap_alivecheck1;
+	uint32 rxcount_per_ant0;
+	uint32 rxcount_per_ant1;
+	uint32 acc_rxcount;
+	uint32 acc_rxcount_per_ant0;
+	uint32 acc_rxcount_per_ant1;
+	uint32 tx_auto_en;
+	uint32 tx_active_ant;
+	uint32 rx_policy;
+	uint32 tx_policy;
+	uint32 cell_policy;
+	uint32 swap_snrdrop0;
+	uint32 swap_snrdrop1;
+	uint32 mws_antsel_ovr_tx;
+	uint32 mws_antsel_ovr_rx;
+	uint32 swap_trig_event_id;
+};
+
+#define	WLC_NUMRATES	16	/**< max # of rates in a rateset */
+
+/**Used to get specific STA parameters */
+typedef struct {
+	uint32	val;
+	struct ether_addr ea;
+	uint16	PAD;
+} scb_val_t;
+
+/**Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */
+typedef struct {
+	uint32 code;
+	scb_val_t ioctl_args;
+} authops_t;
+
+/** channel encoding */
+typedef struct channel_info {
+	int32 hw_channel;
+	int32 target_channel;
+	int32 scan_channel;
+} channel_info_t;
+
+/** For ioctls that take a list of MAC addresses */
+typedef struct maclist {
+	uint32 count;			/**< number of MAC addresses */
+	struct ether_addr ea[1];	/**< variable length array of MAC addresses */
+} maclist_t;
+
+/**get pkt count struct passed through ioctl */
+typedef struct get_pktcnt {
+	uint32 rx_good_pkt;
+	uint32 rx_bad_pkt;
+	uint32 tx_good_pkt;
+	uint32 tx_bad_pkt;
+	uint32 rx_ocast_good_pkt; /**< unicast packets destined for others */
+} get_pktcnt_t;
+
+/* NINTENDO2 */
+#define LQ_IDX_MIN              0
+#define LQ_IDX_MAX              1
+#define LQ_IDX_AVG              2
+#define LQ_IDX_SUM              2
+#define LQ_IDX_LAST             3
+#define LQ_STOP_MONITOR         0
+#define LQ_START_MONITOR        1
+
+/** Get averages RSSI, Rx PHY rate and SNR values */
+/* Link Quality */
+typedef struct {
+	int32 rssi[LQ_IDX_LAST];  /**< Array to keep min, max, avg rssi */
+	int32 snr[LQ_IDX_LAST];   /**< Array to keep min, max, avg snr */
+	int32 isvalid;            /**< Flag indicating whether above data is valid */
+} wl_lq_t;
+
+typedef enum wl_wakeup_reason_type {
+	LCD_ON = 1,
+	LCD_OFF,
+	DRC1_WAKE,
+	DRC2_WAKE,
+	REASON_LAST
+} wl_wr_type_t;
+
+typedef struct {
+	/** Unique filter id */
+	uint32	id;
+	/** stores the reason for the last wake up */
+	uint8	reason;
+	uint8	PAD[3];
+} wl_wr_t;
+
+/** Get MAC specific rate histogram command */
+typedef struct {
+	struct	ether_addr ea;	/**< MAC Address */
+	uint8	ac_cat;	/**< Access Category */
+	uint8	num_pkts;	/**< Number of packet entries to be averaged */
+} wl_mac_ratehisto_cmd_t;
+/** Get MAC rate histogram response */
+typedef struct {
+	uint32	rate[DOT11_RATE_MAX + 1];	/**< Rates */
+	uint32	mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX];	/**< MCS counts */
+	uint32	vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX];	/**< VHT counts */
+	uint32	tsf_timer[2][2];	/**< Start and End time for 8bytes value */
+	uint32	prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /** MCS counts */
+} wl_mac_ratehisto_res_t;
+
+/* sta_info ecounters */
+typedef struct {
+	struct ether_addr   ea;				/* Station MAC addr */
+	struct ether_addr   BSSID;			/* BSSID of the BSS */
+	uint32              tx_pkts_fw_total;		/* # FW generated sent successfully */
+	uint32              tx_pkts_fw_retries;		/* # retries for FW generated frames */
+	uint32              tx_pkts_fw_retry_exhausted;	/* # FW generated which
+							 * failed after retry
+							 */
+} sta_info_ecounters_t;
+
+#define STAMON_MODULE_VER		1
+
+/**Linux network driver ioctl encoding */
+typedef struct wl_ioctl {
+	uint32 cmd;	/**< common ioctl definition */
+	void *buf;	/**< pointer to user buffer */
+	uint32 len;	/**< length of user buffer */
+	uint8 set;		/**< 1=set IOCTL; 0=query IOCTL */
+	uint32 used;	/**< bytes read or written (optional) */
+	uint32 needed;	/**< bytes needed (optional) */
+} wl_ioctl_t;
+
+#ifdef CONFIG_COMPAT
+typedef struct compat_wl_ioctl {
+	uint32 cmd;	/**< common ioctl definition */
+	uint32 buf;	/**< pointer to user buffer */
+	uint32 len;	/**< length of user buffer */
+	uint8 set;		/**< 1=set IOCTL; 0=query IOCTL */
+	uint32 used;	/**< bytes read or written (optional) */
+	uint32 needed;	/**< bytes needed (optional) */
+} compat_wl_ioctl_t;
+#endif /* CONFIG_COMPAT */
+
+#define WL_NUM_RATES_CCK		4 /**< 1, 2, 5.5, 11 Mbps */
+#define WL_NUM_RATES_OFDM		8 /**< 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */
+#define WL_NUM_RATES_MCS_1STREAM	8 /**< MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
+#define WL_NUM_RATES_EXTRA_VHT		2 /**< Additional VHT 11AC rates */
+#define WL_NUM_RATES_VHT		10
+#define WL_NUM_RATES_MCS32		1
+
+
+/*
+ * Structure for passing hardware and software
+ * revision info up from the driver.
+ */
+typedef struct wlc_rev_info {
+	uint32		vendorid;	/**< PCI vendor id */
+	uint32		deviceid;	/**< device id of chip */
+	uint32		radiorev;	/**< radio revision */
+	uint32		chiprev;	/**< chip revision */
+	uint32		corerev;	/**< core revision */
+	uint32		boardid;	/**< board identifier (usu. PCI sub-device id) */
+	uint32		boardvendor;	/**< board vendor (usu. PCI sub-vendor id) */
+	uint32		boardrev;	/**< board revision */
+	uint32		driverrev;	/**< driver version */
+	uint32		ucoderev;	/**< microcode version */
+	uint32		bus;		/**< bus type */
+	uint32		chipnum;	/**< chip number */
+	uint32		phytype;	/**< phy type */
+	uint32		phyrev;		/**< phy revision */
+	uint32		anarev;		/**< anacore rev */
+	uint32		chippkg;	/**< chip package info */
+	uint32		nvramrev;	/**< nvram revision number */
+	uint32		phyminorrev;	/**< phy minor rev */
+	uint32		coreminorrev;	/**< core minor rev */
+	uint32		drvrev_major;	/**< driver version: major */
+	uint32		drvrev_minor;	/**< driver version: minor */
+	uint32		drvrev_rc;	/**< driver version: rc */
+	uint32		drvrev_rc_inc;	/**< driver version: rc incremental */
+} wlc_rev_info_t;
+
+#define WL_REV_INFO_LEGACY_LENGTH	48
+
+#define WL_BRAND_MAX 10
+typedef struct wl_instance_info {
+	uint32 instance;
+	int8 brand[WL_BRAND_MAX];
+	int8 PAD[4-(WL_BRAND_MAX%4)];
+} wl_instance_info_t;
+
+/** structure to change size of tx fifo */
+typedef struct wl_txfifo_sz {
+	uint16	magic;
+	uint16	fifo;
+	uint16	size;
+} wl_txfifo_sz_t;
+
+/* Transfer info about an IOVar from the driver */
+/**Max supported IOV name size in bytes, + 1 for nul termination */
+#define WLC_IOV_NAME_LEN	(32 + 1)
+
+typedef struct wlc_iov_trx_s {
+	uint8 module;
+	uint8 type;
+	char name[WLC_IOV_NAME_LEN];
+} wlc_iov_trx_t;
+
+/** bump this number if you change the ioctl interface */
+#define WLC_IOCTL_VERSION	2
+#define WLC_IOCTL_VERSION_LEGACY_IOTYPES	1
+/* ifdef EXT_STA */
+typedef struct _wl_assoc_result {
+	ulong associated;
+	ulong NDIS_auth;
+	ulong NDIS_infra;
+} wl_assoc_result_t;
+/* EXT_STA */
+
+#define WL_PHY_PAVARS_LEN	32	/**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */
+
+
+#define WL_PHY_PAVAR_VER	1	/**< pavars version */
+#define WL_PHY_PAVARS2_NUM	3	/**< a1, b0, b1 */
+typedef struct wl_pavars2 {
+	uint16 ver;		/**< version of this struct */
+	uint16 len;		/**< len of this structure */
+	uint16 inuse;		/**< driver return 1 for a1,b0,b1 in current band range */
+	uint16 phy_type;	/**< phy type */
+	uint16 bandrange;
+	uint16 chain;
+	uint16 inpa[WL_PHY_PAVARS2_NUM];	/**< phy pavars for one band range */
+} wl_pavars2_t;
+
+typedef struct wl_po {
+	uint16	phy_type;	/**< Phy type */
+	uint16	band;
+	uint16	cckpo;
+	uint16	PAD;
+	uint32	ofdmpo;
+	uint16	mcspo[8];
+} wl_po_t;
+
+#define WL_NUM_RPCALVARS 5	/**< number of rpcal vars */
+
+typedef struct wl_rpcal {
+	uint16 value;
+	uint16 update;
+} wl_rpcal_t;
+
+#define WL_NUM_RPCALPHASEVARS 5	/* number of rpcal phase vars */
+
+typedef struct wl_rpcal_phase {
+	uint16 value;
+	uint16 update;
+} wl_rpcal_phase_t;
+
+typedef struct wl_aci_args {
+	int32 enter_aci_thresh; /* Trigger level to start detecting ACI */
+	int32 exit_aci_thresh; /* Trigger level to exit ACI mode */
+	int32 usec_spin; /* microsecs to delay between rssi samples */
+	int32 glitch_delay; /* interval between ACI scans when glitch count is consistently high */
+	uint16 nphy_adcpwr_enter_thresh;	/**< ADC power to enter ACI mitigation mode */
+	uint16 nphy_adcpwr_exit_thresh;	/**< ADC power to exit ACI mitigation mode */
+	uint16 nphy_repeat_ctr;		/**< Number of tries per channel to compute power */
+	uint16 nphy_num_samples;	/**< Number of samples to compute power on one channel */
+	uint16 nphy_undetect_window_sz;	/**< num of undetects to exit ACI Mitigation mode */
+	uint16 nphy_b_energy_lo_aci;	/**< low ACI power energy threshold for bphy */
+	uint16 nphy_b_energy_md_aci;	/**< mid ACI power energy threshold for bphy */
+	uint16 nphy_b_energy_hi_aci;	/**< high ACI power energy threshold for bphy */
+	uint16 nphy_noise_noassoc_glitch_th_up; /**< wl interference 4 */
+	uint16 nphy_noise_noassoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_glitch_th_up;
+	uint16 nphy_noise_assoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_aci_glitch_th_up;
+	uint16 nphy_noise_assoc_aci_glitch_th_dn;
+	uint16 nphy_noise_assoc_enter_th;
+	uint16 nphy_noise_noassoc_enter_th;
+	uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th;
+	uint16 nphy_noise_noassoc_crsidx_incr;
+	uint16 nphy_noise_assoc_crsidx_incr;
+	uint16 nphy_noise_crsidx_decr;
+} wl_aci_args_t;
+
+#define WL_ACI_ARGS_LEGACY_LENGTH	16	/**< bytes of pre NPHY aci args */
+#define	WL_SAMPLECOLLECT_T_VERSION	2	/**< version of wl_samplecollect_args_t struct */
+typedef struct wl_samplecollect_args {
+	/* version 0 fields */
+	uint8 coll_us;
+	uint8 PAD[3];
+	int32 cores;
+	/* add'l version 1 fields */
+	uint16 version;     /**< see definition of WL_SAMPLECOLLECT_T_VERSION */
+	uint16 length;      /**< length of entire structure */
+	int8 trigger;
+	uint8 PAD;
+	uint16 timeout;
+	uint16 mode;
+	uint16 PAD;
+	uint32 pre_dur;
+	uint32 post_dur;
+	uint8 gpio_sel;
+	uint8 downsamp;
+	uint8 be_deaf;
+	uint8 agc;		/**< loop from init gain and going down */
+	uint8 filter;		/**< override high pass corners to lowest */
+	/* add'l version 2 fields */
+	uint8 trigger_state;
+	uint8 module_sel1;
+	uint8 module_sel2;
+	uint16 nsamps;
+	uint16 PAD;
+	int32 bitStart;
+	uint32 gpioCapMask;
+	uint8 gpio_collection;
+	uint8 PAD[3];
+} wl_samplecollect_args_t;
+
+#define	WL_SAMPLEDATA_T_VERSION		1	/**< version of wl_samplecollect_args_t struct */
+/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */
+#define	WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
+
+typedef struct wl_sampledata {
+	uint16 version;	/**< structure version */
+	uint16 size;	/**< size of structure */
+	uint16 tag;	/**< Header/Data */
+	uint16 length;	/**< data length */
+	uint32 flag;	/**< bit def */
+} wl_sampledata_t;
+
+
+/* WL_OTA START */
+/* OTA Test Status */
+enum {
+	WL_OTA_TEST_IDLE = 0,		/**< Default Idle state */
+	WL_OTA_TEST_ACTIVE = 1,		/**< Test Running */
+	WL_OTA_TEST_SUCCESS = 2,	/**< Successfully Finished Test */
+	WL_OTA_TEST_FAIL = 3		/**< Test Failed in the Middle */
+};
+
+/* OTA SYNC Status */
+enum {
+	WL_OTA_SYNC_IDLE = 0,	/**< Idle state */
+	WL_OTA_SYNC_ACTIVE = 1,	/**< Waiting for Sync */
+	WL_OTA_SYNC_FAIL = 2	/**< Sync pkt not recieved */
+};
+
+/* Various error states dut can get stuck during test */
+enum {
+	WL_OTA_SKIP_TEST_CAL_FAIL = 1,		/**< Phy calibration failed */
+	WL_OTA_SKIP_TEST_SYNCH_FAIL = 2,	/**< Sync Packet not recieved */
+	WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3,	/**< Cmd flow file download failed */
+	WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4,	/**< No test found in Flow file */
+	WL_OTA_SKIP_TEST_WL_NOT_UP = 5,		/**< WL UP failed */
+	WL_OTA_SKIP_TEST_UNKNOWN_CALL		/**< Unintentional scheduling on ota test */
+};
+
+/* Differentiator for ota_tx and ota_rx */
+enum {
+	WL_OTA_TEST_TX = 0,		/**< ota_tx */
+	WL_OTA_TEST_RX = 1,		/**< ota_rx */
+};
+
+/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */
+enum {
+	WL_OTA_TEST_BW_20_IN_40MHZ = 0,		/**< 20 in 40 operation */
+	WL_OTA_TEST_BW_20MHZ = 1,		/**< 20 Mhz operation */
+	WL_OTA_TEST_BW_40MHZ = 2,		/**< full 40Mhz operation */
+	WL_OTA_TEST_BW_80MHZ = 3		/* full 80Mhz operation */
+};
+#define HT_MCS_INUSE	0x00000080	/* HT MCS in use,indicates b0-6 holds an mcs */
+#define VHT_MCS_INUSE	0x00000100	/* VHT MCS in use,indicates b0-6 holds an mcs */
+#define OTA_RATE_MASK 0x0000007f	/* rate/mcs value */
+#define OTA_STF_SISO	0
+#define OTA_STF_CDD		1
+#define OTA_STF_STBC	2
+#define OTA_STF_SDM		3
+
+typedef struct ota_rate_info {
+	uint8 rate_cnt;					/**< Total number of rates */
+	uint8 PAD;
+	uint16 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE];	/**< array of rates from 1mbps to 130mbps */
+							/**< for legacy rates : ratein mbps * 2 */
+							/**< for HT rates : mcs index */
+} ota_rate_info_t;
+
+typedef struct ota_power_info {
+	int8 pwr_ctrl_on;	/**< power control on/off */
+	int8 start_pwr;		/**< starting power/index */
+	int8 delta_pwr;		/**< delta power/index */
+	int8 end_pwr;		/**< end power/index */
+} ota_power_info_t;
+
+typedef struct ota_packetengine {
+	uint16 delay;           /**< Inter-packet delay */
+				/**< for ota_tx, delay is tx ifs in micro seconds */
+				/* for ota_rx, delay is wait time in milliseconds */
+	uint16 nframes;         /**< Number of frames */
+	uint16 length;          /**< Packet length */
+} ota_packetengine_t;
+
+/*
+ * OTA txant/rxant parameter
+ *    bit7-4: 4 bits swdiv_tx/rx_policy bitmask, specify antenna-policy for SW diversity
+ *    bit3-0: 4 bits TxCore bitmask, specify cores used for transmit frames
+ *            (maximum spatial expansion)
+ */
+#define WL_OTA_TEST_ANT_MASK	0xF0
+#define WL_OTA_TEST_CORE_MASK	0x0F
+
+/* OTA txant/rxant 'ant_mask' field; map to Tx/Rx antenna policy for SW diversity */
+enum {
+	WL_OTA_TEST_FORCE_ANT0 = 0x10,	/* force antenna to Ant 0 */
+	WL_OTA_TEST_FORCE_ANT1 = 0x20,	/* force antenna to Ant 1 */
+};
+
+/* antenna/core fields access */
+#define WL_OTA_TEST_GET_ANT(_txant) ((_txant) & WL_OTA_TEST_ANT_MASK)
+#define WL_OTA_TEST_GET_CORE(_txant) ((_txant) & WL_OTA_TEST_CORE_MASK)
+
+/** Test info vector */
+typedef struct wl_ota_test_args {
+	uint8 cur_test;			/**< test phase */
+	uint8 chan;			/**< channel */
+	uint8 bw;			/**< bandwidth */
+	uint8 control_band;		/**< control band */
+	uint8 stf_mode;			/**< stf mode */
+	uint8 PAD;
+	ota_rate_info_t rt_info;	/**< Rate info */
+	ota_packetengine_t pkteng;	/**< packeteng info */
+	uint8 txant;			/**< tx antenna */
+	uint8 rxant;			/**< rx antenna */
+	ota_power_info_t pwr_info;	/**< power sweep info */
+	uint8 wait_for_sync;		/**< wait for sync or not */
+	uint8 ldpc;
+	uint8 sgi;
+	uint8 PAD;
+	/* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */
+} wl_ota_test_args_t;
+
+#define WL_OTA_TESTVEC_T_VERSION		1	/* version of wl_ota_test_vector_t struct */
+typedef struct wl_ota_test_vector {
+	uint16 version;
+	wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ];	/**< Test argument struct */
+	uint16 test_cnt;					/**< Total no of test */
+	uint8 file_dwnld_valid;					/**< File successfully downloaded */
+	uint8 sync_timeout;					/**< sync packet timeout */
+	int8 sync_fail_action;					/**< sync fail action */
+	struct ether_addr sync_mac;				/**< macaddress for sync pkt */
+	struct ether_addr tx_mac;				/**< macaddress for tx */
+	struct ether_addr rx_mac;				/**< macaddress for rx */
+	int8 loop_test;					/**< dbg feature to loop the test */
+	uint16 test_rxcnt;
+	/* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */
+} wl_ota_test_vector_t;
+
+
+/** struct copied back form dongle to host to query the status */
+typedef struct wl_ota_test_status {
+	int16 cur_test_cnt;		/**< test phase */
+	int8 skip_test_reason;		/**< skip test reasoin */
+	uint8 PAD;
+	wl_ota_test_args_t test_arg;	/**< cur test arg details */
+	uint16 test_cnt;		/**< total no of test downloaded */
+	uint8 file_dwnld_valid;		/**< file successfully downloaded ? */
+	uint8 sync_timeout;		/**< sync timeout */
+	int8 sync_fail_action;		/**< sync fail action */
+	struct ether_addr sync_mac;	/**< macaddress for sync pkt */
+	struct ether_addr tx_mac;	/**< tx mac address */
+	struct ether_addr rx_mac;	/**< rx mac address */
+	uint8  test_stage;		/**< check the test status */
+	int8 loop_test;			/**< Debug feature to puts test enfine in a loop */
+	uint8 sync_status;		/**< sync status */
+} wl_ota_test_status_t;
+
+/* FOR ioctl that take the sta monitor information */
+typedef struct stamon_data {
+	struct ether_addr  ea;
+	uint8 PAD[2];
+	int32 rssi;
+} stamon_data_t;
+
+typedef struct stamon_info {
+	int32 version;
+	uint32 count;
+	stamon_data_t sta_data[1];
+} stamon_info_t;
+
+typedef struct wl_ota_rx_rssi {
+	uint16	pktcnt;		/* Pkt count used for this rx test */
+	chanspec_t chanspec;	/* Channel info on which the packets are received */
+	int16	rssi;		/* Average RSSI of the first 50% packets received */
+} wl_ota_rx_rssi_t;
+
+#define	WL_OTARSSI_T_VERSION		1	/* version of wl_ota_test_rssi_t struct */
+#define WL_OTA_TEST_RSSI_FIXED_SIZE	OFFSETOF(wl_ota_test_rssi_t, rx_rssi)
+
+typedef struct wl_ota_test_rssi {
+	uint8 version;
+	uint8	testcnt;		/* total measured RSSI values, valid on output only */
+	wl_ota_rx_rssi_t rx_rssi[1]; /* Variable length array of wl_ota_rx_rssi_t */
+} wl_ota_test_rssi_t;
+
+/* WL_OTA END */
+
+/**wl_radar_args_t */
+typedef struct {
+	int32 npulses;	/**< required number of pulses at n * t_int */
+	int32 ncontig;	/**< required number of pulses at t_int */
+	int32 min_pw;	/**< minimum pulse width (20 MHz clocks) */
+	int32 max_pw;	/**< maximum pulse width (20 MHz clocks) */
+	uint16 thresh0;	/**< Radar detection, thresh 0 */
+	uint16 thresh1;	/**< Radar detection, thresh 1 */
+	uint16 blank;	/**< Radar detection, blank control */
+	uint16 fmdemodcfg;	/**< Radar detection, fmdemod config */
+	int32 npulses_lp;  /**< Radar detection, minimum long pulses */
+	int32 min_pw_lp; /**< Minimum pulsewidth for long pulses */
+	int32 max_pw_lp; /**< Maximum pulsewidth for long pulses */
+	int32 min_fm_lp; /**< Minimum fm for long pulses */
+	int32 max_span_lp;  /**< Maximum deltat for long pulses */
+	int32 min_deltat; /**< Minimum spacing between pulses */
+	int32 max_deltat; /**< Maximum spacing between pulses */
+	uint16 autocorr;	/**< Radar detection, autocorr on or off */
+	uint16 st_level_time;	/**< Radar detection, start_timing level */
+	uint16 t2_min; /**< minimum clocks needed to remain in state 2 */
+	uint8 PAD[2];
+	uint32 version; /**< version */
+	uint32 fra_pulse_err;	/**< sample error margin for detecting French radar pulsed */
+	int32 npulses_fra;  /**< Radar detection, minimum French pulses set */
+	int32 npulses_stg2;  /**< Radar detection, minimum staggered-2 pulses set */
+	int32 npulses_stg3;  /**< Radar detection, minimum staggered-3 pulses set */
+	uint16 percal_mask;	/**< defines which period cal is masked from radar detection */
+	uint8 PAD[2];
+	int32 quant;	/**< quantization resolution to pulse positions */
+	uint32 min_burst_intv_lp;	/**< minimum burst to burst interval for bin3 radar */
+	uint32 max_burst_intv_lp;	/**< maximum burst to burst interval for bin3 radar */
+	int32 nskip_rst_lp;	/**< number of skipped pulses before resetting lp buffer */
+	int32 max_pw_tol; /* maximum tolerance allowd in detected pulse width for radar detection */
+	uint16 feature_mask; /**< 16-bit mask to specify enabled features */
+	uint16 thresh0_sc;	/**< Radar detection, thresh 0 */
+	uint16 thresh1_sc;	/**< Radar detection, thresh 1 */
+	uint8 PAD[2];
+} wl_radar_args_t;
+
+#define WL_RADAR_ARGS_VERSION 2
+
+typedef struct {
+	uint32 version; /**< version */
+	uint16 thresh0_20_lo;	/**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */
+	uint16 thresh1_20_lo;	/**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */
+	uint16 thresh0_40_lo;	/**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */
+	uint16 thresh1_40_lo;	/**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */
+	uint16 thresh0_80_lo;	/**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */
+	uint16 thresh1_80_lo;	/**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */
+	uint16 thresh0_20_hi;	/**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */
+	uint16 thresh1_20_hi;	/**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */
+	uint16 thresh0_40_hi;	/**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */
+	uint16 thresh1_40_hi;	/**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */
+	uint16 thresh0_80_hi;	/**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */
+	uint16 thresh1_80_hi;	/**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */
+	uint16 thresh0_160_lo;	/**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */
+	uint16 thresh1_160_lo;	/**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */
+	uint16 thresh0_160_hi;	/**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */
+	uint16 thresh1_160_hi;	/**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */
+} wl_radar_thr_t;
+
+typedef struct {
+	uint32 version; /* version */
+	uint16 thresh0_sc_20_lo;
+	uint16 thresh1_sc_20_lo;
+	uint16 thresh0_sc_40_lo;
+	uint16 thresh1_sc_40_lo;
+	uint16 thresh0_sc_80_lo;
+	uint16 thresh1_sc_80_lo;
+	uint16 thresh0_sc_20_hi;
+	uint16 thresh1_sc_20_hi;
+	uint16 thresh0_sc_40_hi;
+	uint16 thresh1_sc_40_hi;
+	uint16 thresh0_sc_80_hi;
+	uint16 thresh1_sc_80_hi;
+	uint16 fc_varth_sb;
+	uint16 fc_varth_bin5_sb;
+	uint16 notradar_enb;
+	uint16 max_notradar_lp;
+	uint16 max_notradar;
+	uint16 max_notradar_lp_sc;
+	uint16 max_notradar_sc;
+	uint16 highpow_war_enb;
+	uint16 highpow_sp_ratio;	//unit is 0.5
+} wl_radar_thr2_t;
+
+#define WL_RADAR_THR_VERSION	2
+
+typedef struct {
+	uint32 ver;
+	uint32 len;
+	int32  rssi_th[3];
+	uint8  rssi_gain_80[4];
+	uint8  rssi_gain_160[4];
+} wl_dyn_switch_th_t;
+
+#define WL_PHY_DYN_SWITCH_TH_VERSION	1
+
+/** RSSI per antenna */
+typedef struct {
+	uint32	version;		/**< version field */
+	uint32	count;			/**< number of valid antenna rssi */
+	int8 rssi_ant[WL_RSSI_ANT_MAX];	/**< rssi per antenna */
+} wl_rssi_ant_t;
+
+/* SNR per antenna */
+typedef struct {
+	uint32  version;				/* version field */
+	uint32  count;					/* number of valid antenna snr */
+	int8 snr_ant[WL_RSSI_ANT_MAX];	/* snr per antenna */
+} wl_snr_ant_t;
+
+
+/** data structure used in 'dfs_status' wl interface, which is used to query dfs status */
+typedef struct {
+	uint32 state;		/**< noted by WL_DFS_CACSTATE_XX. */
+	uint32 duration;		/**< time spent in ms in state. */
+	/**
+	 * as dfs enters ISM state, it removes the operational channel from quiet channel
+	 * list and notes the channel in channel_cleared. set to 0 if no channel is cleared
+	 */
+	chanspec_t chanspec_cleared;
+	/** chanspec cleared used to be a uint32, add another to uint16 to maintain size */
+	uint16 pad;
+} wl_dfs_status_t;
+
+typedef struct {
+	uint32 state;		/* noted by WL_DFS_CACSTATE_XX */
+	uint32 duration;		/* time spent in ms in state */
+	chanspec_t chanspec;	/* chanspec of this core */
+	chanspec_t chanspec_last_cleared; /* chanspec last cleared for operation by scanning */
+	uint16 sub_type;	/* currently just the index of the core or the respective PLL */
+	uint16 pad;
+} wl_dfs_sub_status_t;
+
+#define WL_DFS_STATUS_ALL_VERSION	(1)
+typedef struct {
+	uint16 version;		/* version field; current max version 1 */
+	uint16 num_sub_status;
+	wl_dfs_sub_status_t  dfs_sub_status[1]; /* struct array of length num_sub_status */
+} wl_dfs_status_all_t;
+
+#define WL_DFS_AP_MOVE_VERSION	(1)
+
+struct wl_dfs_ap_move_status_v1 {
+	int16 dfs_status;	/* DFS scan status */
+	chanspec_t chanspec;	/* New AP Chanspec */
+	wl_dfs_status_t cac_status;	/* CAC status */
+};
+
+typedef struct wl_dfs_ap_move_status_v2 {
+	int8 version;            /* version field; current max version 1 */
+	int8 move_status;        /* DFS move status */
+	chanspec_t chanspec;     /* New AP Chanspec */
+	wl_dfs_status_all_t scan_status; /* status; see dfs_status_all for wl_dfs_status_all_t */
+} wl_dfs_ap_move_status_v2_t;
+
+#define WL_DFS_AP_MOVE_ABORT -1		/* Abort any dfs_ap_move in progress immediately */
+#define WL_DFS_AP_MOVE_STUNT -2		/* Stunt move but continue background CSA if in progress */
+
+
+/** data structure used in 'radar_status' wl interface, which is use to query radar det status */
+typedef struct {
+	uint8 detected;
+	uint8 PAD[3];
+	int32 count;
+	uint8 pretended;
+	uint8 PAD[3];
+	uint32 radartype;
+	uint32 timenow;
+	uint32 timefromL;
+	int32  lp_csect_single;
+	int32  detected_pulse_index;
+	int32  nconsecq_pulses;
+	chanspec_t ch;
+	uint8 PAD[2];
+	int32  pw[10];
+	int32  intv[10];
+	int32  fm[10];
+} wl_radar_status_t;
+
+#define NUM_PWRCTRL_RATES 12
+
+typedef struct {
+	uint8 txpwr_band_max[NUM_PWRCTRL_RATES];	/**< User set target */
+	uint8 txpwr_limit[NUM_PWRCTRL_RATES];		/**< reg and local power limit */
+	uint8 txpwr_local_max;				/**< local max according to the AP */
+	uint8 txpwr_local_constraint;			/**< local constraint according to the AP */
+	uint8 txpwr_chan_reg_max;			/**< Regulatory max for this channel */
+	uint8 txpwr_target[2][NUM_PWRCTRL_RATES];	/**< Latest target for 2.4 and 5 Ghz */
+	uint8 txpwr_est_Pout[2];			/**< Latest estimate for 2.4 and 5 Ghz */
+	uint8 txpwr_opo[NUM_PWRCTRL_RATES];		/**< On G phy, OFDM power offset */
+	uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES];	/**< Max CCK power for this band (SROM) */
+	uint8 txpwr_bphy_ofdm_max;			/**< Max OFDM power for this band (SROM) */
+	uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES];	/**< Max power for A band (SROM) */
+	int8  txpwr_antgain[2];				/**< Ant gain for each band - from SROM */
+	uint8 txpwr_est_Pout_gofdm;			/**< Pwr estimate for 2.4 OFDM */
+} tx_power_legacy_t;
+
+#define WL_TX_POWER_RATES_LEGACY    45
+#define WL_TX_POWER_MCS20_FIRST         12
+#define WL_TX_POWER_MCS20_NUM           16
+#define WL_TX_POWER_MCS40_FIRST         28
+#define WL_TX_POWER_MCS40_NUM           17
+
+typedef struct {
+	uint32 flags;
+	chanspec_t chanspec;                 /**< txpwr report for this channel */
+	chanspec_t local_chanspec;           /**< channel on which we are associated */
+	uint8 local_max;                 /**< local max according to the AP */
+	uint8 local_constraint;              /**< local constraint according to the AP */
+	int8  antgain[2];                /**< Ant gain for each band - from SROM */
+	uint8 rf_cores;                  /**< count of RF Cores being reported */
+	uint8 est_Pout[4];                           /**< Latest tx power out estimate per RF
+							  * chain without adjustment
+							  */
+	uint8 est_Pout_cck;                          /**< Latest CCK tx power out estimate */
+	uint8 user_limit[WL_TX_POWER_RATES_LEGACY];  /**< User limit */
+	uint8 reg_limit[WL_TX_POWER_RATES_LEGACY];   /**< Regulatory power limit */
+	uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /**< Max power board can support (SROM) */
+	uint8 target[WL_TX_POWER_RATES_LEGACY];      /**< Latest target power */
+	uint8 PAD[2];
+} tx_power_legacy2_t;
+
+#define WL_NUM_2x2_ELEMENTS		4
+#define WL_NUM_3x3_ELEMENTS		6
+#define WL_NUM_4x4_ELEMENTS		10
+
+typedef struct {
+	uint16 ver;				/**< version of this struct */
+	uint16 len;				/**< length in bytes of this structure */
+	uint32 flags;
+	chanspec_t chanspec;			/**< txpwr report for this channel */
+	chanspec_t local_chanspec;		/**< channel on which we are associated */
+	uint32     buflen;			/**< ppr buffer length */
+	uint8      pprbuf[1];			/**< Latest target power buffer */
+} wl_txppr_t;
+
+#define WL_TXPPR_VERSION	1
+#define WL_TXPPR_LENGTH	(sizeof(wl_txppr_t))
+#define TX_POWER_T_VERSION	45
+/** number of ppr serialization buffers, it should be reg, board and target */
+#define WL_TXPPR_SER_BUF_NUM	(3)
+
+typedef struct chanspec_txpwr_max {
+	chanspec_t chanspec;   /**< chanspec */
+	uint8 txpwr_max;       /**< max txpwr in all the rates */
+	uint8 padding;
+} chanspec_txpwr_max_t;
+
+typedef struct  wl_chanspec_txpwr_max {
+	uint16 ver;			/**< version of this struct */
+	uint16 len;			/**< length in bytes of this structure */
+	uint32 count;		/**< number of elements of (chanspec, txpwr_max) pair */
+	chanspec_txpwr_max_t txpwr[1];	/**< array of (chanspec, max_txpwr) pair */
+} wl_chanspec_txpwr_max_t;
+
+#define WL_CHANSPEC_TXPWR_MAX_VER	1
+#define WL_CHANSPEC_TXPWR_MAX_LEN	(sizeof(wl_chanspec_txpwr_max_t))
+
+typedef struct tx_inst_power {
+	uint8 txpwr_est_Pout[2];			/**< Latest estimate for 2.4 and 5 Ghz */
+	uint8 txpwr_est_Pout_gofdm;			/**< Pwr estimate for 2.4 OFDM */
+} tx_inst_power_t;
+
+#define WL_NUM_TXCHAIN_MAX	4
+typedef struct wl_txchain_pwr_offsets {
+	int8 offset[WL_NUM_TXCHAIN_MAX];	/**< quarter dBm signed offset for each chain */
+} wl_txchain_pwr_offsets_t;
+
+/** maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS		64
+#define WL_NUMCHANNELS_MANY_CHAN 10
+#define WL_ITER_LIMIT_MANY_CHAN 5
+
+#define WL_MIMO_PS_CFG_VERSION_1 1
+
+typedef struct wl_mimops_cfg {
+	uint8 version;
+	/* active_chains: 0 for all, 1 for 1 chain. */
+	uint8 active_chains;
+	/* static (0) or dynamic (1).or disabled (3) Mode applies only when active_chains = 0. */
+	uint8 mode;
+	/* bandwidth = Full (0), 20M (1), 40M (2), 80M (3). */
+	uint8 bandwidth;
+	uint8 applychangesafterlearning;
+	uint8 pad[3];
+} wl_mimops_cfg_t;
+
+/* This event is for tracing MIMO PS metrics snapshot calls.
+ * It is helpful to debug out-of-sync issue between
+ * ucode SHM values and FW snapshot calculation.
+ * It is part of the EVENT_LOG_TAG_MIMO_PS_TRACE.
+ */
+#define WL_MIMO_PS_METRICS_SNAPSHOT_TRACE_TYPE	0
+typedef struct wl_mimo_ps_metrics_snapshot_trace {
+	/* type field for this TLV: */
+	uint16  type;
+	/* length field for this TLV */
+	uint16  len;
+	uint32  idle_slotcnt_mimo;	/* MIMO idle slotcnt raw SHM value */
+	uint32  last_idle_slotcnt_mimo;	/* stored value snapshot */
+	uint32  idle_slotcnt_siso;	/* SISO idle slotcnt raw SHM value */
+	uint32  last_idle_slotcnt_siso;	/* stored value snapshot */
+	uint32	rx_time_mimo;		/* Rx MIMO raw SHM value */
+	uint32	last_rx_time_mimo;	/* stored value snapshot */
+	uint32	rx_time_siso;		/* RX SISO raw SHM value */
+	uint32	last_rx_time_siso;	/* stored value snapshot */
+	uint32  tx_time_1chain;		/* Tx 1-chain raw SHM value */
+	uint32  last_tx_time_1chain;	/* stored value snapshot */
+	uint32	tx_time_2chain;		/* Tx 2-chain raw SHM value */
+	uint32	last_tx_time_2chain;	/* stored value snapshot */
+	uint32  tx_time_3chain;		/* Tx 3-chain raw SHM value */
+	uint32  last_tx_time_3chain;	/* stored value snapshot */
+	uint16	reason;			/* reason for snapshot call, see below */
+	/* Does the call reset last values after delta calculation */
+	uint16	reset_last;
+} wl_mimo_ps_metrics_snapshot_trace_t;
+/* reason codes for mimo ps metrics snapshot function calls */
+#define WL_MIMOPS_METRICS_SNAPSHOT_REPORT		1
+#define WL_MIMOPS_METRICS_SNAPSHOT_RXCHAIN_SET		2
+#define WL_MIMOPS_METRICS_SNAPSHOT_ARBI			3
+#define WL_MIMOPS_METRICS_SNAPSHOT_SLOTUPD		4
+#define WL_MIMOPS_METRICS_SNAPSHOT_PMBCNRX		5
+#define WL_MIMOPS_METRICS_SNAPSHOT_BMACINIT		6
+#define WL_MIMOPS_METRICS_SNAPSHOT_HT_COMPLETE		7
+#define WL_MIMOPS_METRICS_SNAPSHOT_OCL                  8
+
+#define WL_MIMO_PS_STATUS_VERSION_2	2
+typedef struct wl_mimo_ps_status {
+	uint8 version;
+	uint8 ap_cap;			/* The associated AP's capability (BW, MIMO/SISO). */
+	uint8 association_status;	/* How we are associated to the AP (MIMO/SISO). */
+	uint8 mimo_ps_state;		/* mimo_ps_cfg states: [0-5]. See below for values */
+	uint8 mrc_state;		/* MRC state: NONE (0), ACTIVE(1) */
+	uint8 bss_rxchain;		/* bss rxchain bitmask */
+	uint8 bss_txchain;		/* bss txchain bitmask */
+	uint8 bss_bw;			/* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */
+	uint16 hw_state;		/* bitmask of hw state. See below for values */
+	uint8 hw_rxchain;		/* actual HW rxchain bitmask */
+	uint8 hw_txchain;		/* actual HW txchain bitmask */
+	uint8 hw_bw;			/* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */
+	uint8 pm_bcnrx_state;		/* actual state of ucode flag */
+	uint8 basic_rates_present;	/* internal flag to trigger siso bcmc rx */
+	uint8 siso_bcmc_rx_state;	/* actual state of ucode flag */
+} wl_mimo_ps_status_t;
+
+#define WL_MIMO_PS_STATUS_VERSION_1	1
+typedef struct wl_mimo_ps_status_v1 {
+	uint8 version;
+	uint8 ap_cap;			/* The associated AP's capability (BW, MIMO/SISO). */
+	uint8 association_status;	/* How we are associated to the AP (MIMO/SISO). */
+	uint8 mimo_ps_state;		/* mimo_ps_cfg states: [0-5]. See below for values */
+	uint8 mrc_state;		/* MRC state: NONE (0), ACTIVE(1) */
+	uint8 bss_rxchain;		/* bss rxchain bitmask */
+	uint8 bss_txchain;		/* bss txchain bitmask */
+	uint8 bss_bw;			/* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */
+	uint16 hw_state;		/* bitmask of hw state. See below for values */
+	uint8 hw_rxchain;		/* actual HW rxchain bitmask */
+	uint8 hw_txchain;		/* actual HW txchain bitmask */
+	uint8 hw_bw;			/* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */
+	uint8 pad[3];
+} wl_mimo_ps_status_v1_t;
+
+#define WL_MIMO_PS_STATUS_AP_CAP(ap_cap)	(ap_cap & 0x0F)
+#define WL_MIMO_PS_STATUS_AP_CAP_BW(ap_cap)	(ap_cap >> 4)
+#define WL_MIMO_PS_STATUS_ASSOC_BW_SHIFT 4
+
+/* version 3: assoc status: low nibble is status enum, high other flags */
+#define WL_MIMO_PS_STATUS_VERSION_3			3
+#define WL_MIMO_PS_STATUS_ASSOC_STATUS_MASK		0x0F
+#define WL_MIMO_PS_STATUS_ASSOC_STATUS_VHT_WITHOUT_OMN	0x80
+
+/* mimo_ps_status: ap_cap/association status */
+enum {
+	WL_MIMO_PS_STATUS_ASSOC_NONE	= 0,
+	WL_MIMO_PS_STATUS_ASSOC_SISO	= 1,
+	WL_MIMO_PS_STATUS_ASSOC_MIMO	= 2,
+	WL_MIMO_PS_STATUS_ASSOC_LEGACY	= 3
+};
+
+/* mimo_ps_status: mimo_ps_cfg states */
+enum {
+	WL_MIMO_PS_CFG_STATE_NONE			= 0,
+	WL_MIMO_PS_CFG_STATE_INFORM_AP_INPROGRESS	= 1,
+	WL_MIMO_PS_CFG_STATE_INFORM_AP_DONE		= 2,
+	WL_MIMO_PS_CFG_STATE_LEARNING			= 3,
+	WL_MIMO_PS_CFG_STATE_HW_CONFIGURE		= 4,
+	WL_MIMO_PS_CFG_STATE_INFORM_AP_PENDING		= 5
+};
+
+/* mimo_ps_status: hw_state values */
+#define WL_MIMO_PS_STATUS_HW_STATE_NONE			0
+#define WL_MIMO_PS_STATUS_HW_STATE_LTECOEX		(0x1 << 0)
+#define WL_MIMO_PS_STATUS_HW_STATE_MIMOPS_BSS		(0x1 << 1)
+#define WL_MIMO_PS_STATUS_HW_STATE_AWDL_BSS		(0x1 << 2)
+#define WL_MIMO_PS_STATUS_HW_STATE_SCAN			(0x1 << 3)
+#define WL_MIMO_PS_STATUS_HW_STATE_TXPPR		(0x1 << 4)
+#define WL_MIMO_PS_STATUS_HW_STATE_PWRTHOTTLE		(0x1 << 5)
+#define WL_MIMO_PS_STATUS_HW_STATE_TMPSENSE		(0x1 << 6)
+#define WL_MIMO_PS_STATUS_HW_STATE_IOVAR		(0x1 << 7)
+#define WL_MIMO_PS_STATUS_HW_STATE_AP_BSS		(0x1 << 8)
+
+/* mimo_ps_status: mrc states */
+#define WL_MIMO_PS_STATUS_MRC_NONE	0
+#define WL_MIMO_PS_STATUS_MRC_ACTIVE	1
+
+/* mimo_ps_status: core flag states for single-core beacon and siso-bcmc rx */
+#define WL_MIMO_PS_STATUS_MHF_FLAG_NONE		0
+#define WL_MIMO_PS_STATUS_MHF_FLAG_ACTIVE	1
+#define WL_MIMO_PS_STATUS_MHF_FLAG_COREDOWN	2
+#define WL_MIMO_PS_STATUS_MHF_FLAG_INVALID	3
+
+/* Type values for the REASON */
+#define WL_MIMO_PS_PS_LEARNING_ABORTED          (1 << 0)
+#define WL_MIMO_PS_PS_LEARNING_COMPLETED        (1 << 1)
+#define WL_MIMO_PS_PS_LEARNING_ONGOING          (1 << 2)
+
+typedef struct wl_mimo_ps_learning_event_data {
+	uint32 startTimeStamp;
+	uint32 endTimeStamp;
+	uint16 reason;
+	struct ether_addr BSSID;
+	uint32 totalSISO_below_rssi_threshold;
+	uint32 totalMIMO_below_rssi_threshold;
+	uint32 totalSISO_above_rssi_threshold;
+	uint32 totalMIMO_above_rssi_threshold;
+} wl_mimo_ps_learning_event_data_t;
+
+#define WL_MIMO_PS_PS_LEARNING_CFG_ABORT	(1 << 0)
+#define WL_MIMO_PS_PS_LEARNING_CFG_STATUS	(1 << 1)
+#define WL_MIMO_PS_PS_LEARNING_CFG_CONFIG	(1 << 2)
+
+#define WL_MIMO_PS_PS_LEARNING_CFG_V1 1
+
+typedef struct wl_mimops_learning_cfg {
+	/* flag:  bit 0 for abort */
+	/* flag:  bit 1 for status */
+	/* flag:  bit 2 for configuring no of packets and rssi */
+	uint8                  flag;
+	/* mimo ps learning version, compatible version is 0 */
+	uint8                  version;
+	/* if version is 0 or rssi is 0, ignored */
+	int8                   learning_rssi_threshold;
+	uint8                  reserved;
+	uint32                 no_of_packets_for_learning;
+	wl_mimo_ps_learning_event_data_t mimops_learning_data;
+} wl_mimops_learning_cfg_t;
+
+
+#define WL_OCL_STATUS_VERSION 1
+typedef struct ocl_status_info {
+	uint8  version;
+	uint8  len;
+	uint16 fw_status;     /* Bits representing FW disable reasons */
+	uint8  hw_status;     /* Bits for actual HW config and SISO/MIMO coremask */
+	uint8  coremask;      /* The ocl core mask (indicating listening core) */
+} ocl_status_info_t;
+
+/* MWS OCL map */
+#define WL_MWS_OCL_OVERRIDE_VERSION 1
+typedef struct wl_mws_ocl_override {
+	uint16  version;    /* Structure version */
+	uint16	bitmap_2g; /* bitmap for 2.4G channels bits 1-13 */
+	uint16	bitmap_5g_lo;  /* bitmap for 5G low channels by 2:
+				*34-48, 52-56, 60-64, 100-102
+				*/
+	uint16	bitmap_5g_mid; /* bitmap for 5G mid channels by 2:
+				* 104, 108-112, 116-120, 124-128,
+				* 132-136, 140, 149-151
+				*/
+	uint16	bitmap_5g_high; /* bitmap for 5G high channels by 2
+				* 153, 157-161, 165
+				*/
+} wl_mws_ocl_override_t;
+
+/* Bits for fw_status */
+#define OCL_DISABLED_HOST		0x01   /* Host has disabled through ocl_enable */
+#define OCL_DISABLED_RSSI		0x02   /* Disabled because of ocl_rssi_threshold */
+#define OCL_DISABLED_LTEC		0x04   /* Disabled due to LTE Coex activity */
+#define OCL_DISABLED_SISO		0x08   /* Disabled while in SISO mode */
+#define OCL_DISABLED_CAL		0x10   /* Disabled during active calibration */
+#define OCL_DISABLED_CHANSWITCH		0x20   /* Disabled during active channel switch */
+#define OCL_DISABLED_ASPEND     0x40   /* Disabled due to assoc pending */
+
+/* Bits for hw_status */
+#define OCL_HWCFG			0x01   /* State of OCL config bit in phy HW */
+#define OCL_HWMIMO			0x02   /* Set if current coremask is > 1 bit */
+#define OCL_COREDOWN			0x80   /* Set if core is currently down */
+
+
+/*
+ * Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
+ * a one-byte length, and a variable length value.  RSSI type tuple must be present
+ * in the array.
+ *
+ * Types are defined in "join preference types" section.
+ *
+ * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple
+ * and must be set to zero.
+ *
+ * Values are defined below.
+ *
+ * 1. RSSI - 2 octets
+ * offset 0: reserved
+ * offset 1: reserved
+ *
+ * 2. WPA - 2 + 12 * n octets (n is # tuples defined below)
+ * offset 0: reserved
+ * offset 1: # of tuples
+ * offset 2: tuple 1
+ * offset 14: tuple 2
+ * ...
+ * offset 2 + 12 * (n - 1) octets: tuple n
+ *
+ * struct wpa_cfg_tuple {
+ *   uint8 akm[DOT11_OUI_LEN+1];     akm suite
+ *   uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite
+ *   uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite
+ * };
+ *
+ * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY.
+ *
+ * 3. BAND - 2 octets
+ * offset 0: reserved
+ * offset 1: see "band preference" and "band types"
+ *
+ * 4. BAND RSSI - 2 octets
+ * offset 0: band types
+ * offset 1: +ve RSSI boost value in dB
+ */
+
+struct tsinfo_arg {
+	uint8 octets[3];
+};
+
+#define RATE_CCK_1MBPS 0
+#define RATE_CCK_2MBPS 1
+#define RATE_CCK_5_5MBPS 2
+#define RATE_CCK_11MBPS 3
+
+#define RATE_LEGACY_OFDM_6MBPS 0
+#define RATE_LEGACY_OFDM_9MBPS 1
+#define RATE_LEGACY_OFDM_12MBPS 2
+#define RATE_LEGACY_OFDM_18MBPS 3
+#define RATE_LEGACY_OFDM_24MBPS 4
+#define RATE_LEGACY_OFDM_36MBPS 5
+#define RATE_LEGACY_OFDM_48MBPS 6
+#define RATE_LEGACY_OFDM_54MBPS 7
+
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1
+
+typedef struct wl_bsstrans_rssi {
+	int8 rssi_2g;	/**< RSSI in dbm for 2.4 G */
+	int8 rssi_5g;	/**< RSSI in dbm for 5G, unused for cck */
+} wl_bsstrans_rssi_t;
+
+#define RSSI_RATE_MAP_MAX_STREAMS 4	/**< max streams supported */
+
+/** RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map {
+	uint16 ver;
+	uint16 len; /**< length of entire structure */
+	wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */
+	wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */
+	wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+	wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */
+} wl_bsstrans_rssi_rate_map_t;
+
+#define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1
+
+/** Configure number of scans allowed per throttle period */
+typedef struct wl_bsstrans_roamthrottle {
+	uint16 ver;
+	uint16 period;
+	uint16 scans_allowed;
+} wl_bsstrans_roamthrottle_t;
+
+#define	NFIFO			6	/**< # tx/rx fifopairs */
+
+#if defined(BCM_DMA_CT) && !defined(BCM_DMA_CT_DISABLED)
+#define NFIFO_EXT		32		/* 6 traditional FIFOs + 2 rsvd + 24 MU FIFOs */
+#elif defined(WL11AX) && defined(WL11AX_TRIGGERQ_ENABLED)
+#define NFIFO_EXT		10
+#else
+#define NFIFO_EXT		NFIFO
+#endif
+
+/* Reinit reason codes */
+enum {
+	WL_REINIT_RC_NONE             = 0,
+	WL_REINIT_RC_PS_SYNC          = 1,
+	WL_REINIT_RC_PSM_WD           = 2,
+	WL_REINIT_RC_MAC_WAKE         = 3,
+	WL_REINIT_RC_MAC_SUSPEND      = 4,
+	WL_REINIT_RC_MAC_SPIN_WAIT    = 5,
+	WL_REINIT_RC_AXI_BUS_ERROR    = 6,
+	WL_REINIT_RC_DEVICE_REMOVED   = 7,
+	WL_REINIT_RC_PCIE_FATAL_ERROR = 8,
+	WL_REINIT_RC_OL_FW_TRAP       = 9,
+	WL_REINIT_RC_FIFO_ERR         = 10,
+	WL_REINIT_RC_INV_TX_STATUS    = 11,
+	WL_REINIT_RC_MQ_ERROR         = 12,
+	WL_REINIT_RC_PHYTXERR_THRESH  = 13,
+	WL_REINIT_RC_USER_FORCED      = 14,
+	WL_REINIT_RC_FULL_RESET       = 15,
+	WL_REINIT_RC_AP_BEACON        = 16,
+	WL_REINIT_RC_PM_EXCESSED      = 17,
+	WL_REINIT_RC_NO_CLK           = 18,
+	WL_REINIT_RC_SW_ASSERT        = 19,
+	WL_REINIT_RC_PSM_JMP0         = 20,
+	WL_REINIT_RC_PSM_RUN          = 21,
+	WL_REINIT_RC_ENABLE_MAC       = 22,
+	WL_REINIT_RC_SCAN_TIMEOUT     = 23,
+	WL_REINIT_RC_JOIN_TIMEOUT     = 24,
+	/* Below error codes are generated during D3 exit validation */
+	WL_REINIT_RC_LINK_NOT_ACTIVE  = 25,
+	WL_REINIT_RC_PCI_CFG_RD_FAIL  = 26,
+	WL_REINIT_RC_INV_VEN_ID       = 27,
+	WL_REINIT_RC_INV_DEV_ID       = 28,
+	WL_REINIT_RC_INV_BAR0         = 29,
+	WL_REINIT_RC_INV_BAR2         = 30,
+	WL_REINIT_RC_AER_UC_FATAL     = 31,
+	WL_REINIT_RC_AER_UC_NON_FATAL = 32,
+	WL_REINIT_RC_AER_CORR         = 33,
+	WL_REINIT_RC_AER_DEV_STS      = 34,
+	WL_REINIT_RC_PCIe_STS         = 35,
+	WL_REINIT_RC_MMIO_RD_FAIL     = 36,
+	WL_REINIT_RC_MMIO_RD_INVAL    = 37,
+	WL_REINIT_RC_MMIO_ARM_MEM_RD_FAIL = 38,
+	WL_REINIT_RC_MMIO_ARM_MEM_INVAL   = 39,
+	WL_REINIT_RC_SROM_LOAD_FAILED     = 40,
+	WL_REINIT_RC_PHY_CRASH            = 41,
+	WL_REINIT_TX_STALL                = 42,
+	WL_REINIT_RC_TX_FLOW_CONTROL_BLOCKED	= 43,
+	WL_REINIT_RC_RX_HC_FAIL           = 44,
+	WL_REINIT_RC_RX_DMA_STALL         = 45,
+	WL_REINIT_UTRACE_BUF_OVERLAP_SR	  = 46,
+	WL_REINIT_UTRACE_TPL_OUT_BOUNDS   = 47,
+	WL_REINIT_UTRACE_TPL_OSET_STRT0   = 48,
+	WL_REINIT_RC_PHYTXERR             = 49,
+	WL_REINIT_RC_PSM_FATAL_SUSP       = 50,
+	WL_REINIT_RC_TX_FIFO_SUSP         = 51,
+	WL_REINIT_RC_MAC_ENABLE           = 52,
+	WL_REINIT_RC_SCAN_STALLED         = 53,
+	WL_REINIT_RC_LAST	/* This must be the last entry */
+};
+
+#define NREINITREASONCOUNT	8
+
+#define REINITRSNIDX(_x)	(((_x) < WL_REINIT_RC_LAST) ? (_x) : 0)
+
+#define	WL_CNT_T_VERSION	30	/**< current version of wl_cnt_t struct */
+#define WL_CNT_VERSION_6	6
+#define WL_CNT_VERSION_11	11
+#define WL_CNT_VERSION_XTLV	30
+
+#define WL_COUNTERS_IOV_VERSION_1	1
+#define WL_SUBCNTR_IOV_VER		WL_COUNTERS_IOV_VERSION_1
+/* First two uint16 are version and lenght fields. So offset of the first counter will be 4 */
+#define FIRST_COUNTER_OFFSET		0x04
+
+#define WLC_WITH_XTLV_CNT
+
+/**
+ * tlv IDs uniquely identifies counter component
+ * packed into wl_cmd_t container
+ */
+enum wl_cnt_xtlv_id {
+	WL_CNT_XTLV_SLICE_IDX = 0x1,		/**< Slice index */
+	WL_CNT_XTLV_WLC = 0x100,		/**< WLC layer counters */
+	WL_CNT_XTLV_WLC_RINIT_RSN = 0x101,	/**< WLC layer reinitreason extension */
+	WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200,	/**< wl counter ver < 11 UCODE MACSTAT */
+	WL_CNT_XTLV_LT40_UCODE_V1 = 0x300,	/**< corerev < 40 UCODE MACSTAT */
+	WL_CNT_XTLV_GE40_UCODE_V1 = 0x400,	/**< corerev >= 40 UCODE MACSTAT */
+	WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800	/* corerev >= 64 UCODEX MACSTAT */
+};
+
+/**
+ * The number of variables in wl macstat cnt struct.
+ * (wl_cnt_ge40mcst_v1_t, wl_cnt_lt40mcst_v1_t, wl_cnt_v_le10_mcst_t)
+ */
+#define WL_CNT_MCST_VAR_NUM 64
+/* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */
+#define WL_CNT_MCST_STRUCT_SZ ((uint32)sizeof(uint32) * WL_CNT_MCST_VAR_NUM)
+
+#define WL_CNT_MCXST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge64mcxst_v1_t))
+#define INVALID_CNT_VAL (uint32)(-1)
+
+#define WL_XTLV_CNTBUF_MAX_SIZE ((uint32)(OFFSETOF(wl_cnt_info_t, data)) +	\
+		(uint32)BCM_XTLV_HDR_SIZE + (uint32)sizeof(wl_cnt_wlc_t) +		\
+		(uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ +              \
+		(uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ)
+
+#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint32)sizeof(wl_cnt_ver_11_t))
+
+
+/** Top structure of counters IOVar buffer */
+typedef struct {
+	uint16	version;	/**< see definition of WL_CNT_T_VERSION */
+	uint16	datalen;	/**< length of data including all paddings. */
+	uint8   data [];	/**< variable length payload:
+				 * 1 or more bcm_xtlv_t type of tuples.
+				 * each tuple is padded to multiple of 4 bytes.
+				 * 'datalen' field of this structure includes all paddings.
+				 */
+} wl_cnt_info_t;
+
+/* Top structure of subcounters IOVar buffer
+ * Whenever we make any change in this structure
+ * WL_SUBCNTR_IOV_VER should be updated accordingly
+ * The structure definition should remain consistant b/w
+ * FW and wl/WLM app.
+ */
+typedef struct {
+	uint16	version;	  /* Version of IOVAR structure. Used for backward
+				   * compatibility in future. Whenever we make any
+				   * changes to this structure then value of WL_SUBCNTR_IOV_VER
+				   * needs to be updated properly.
+				   */
+	uint16	length;		  /* length in bytes of this structure */
+	uint16	counters_version; /* see definition of WL_CNT_T_VERSION
+				   * wl app will send the version of counters
+				   * which is used to calculate the offset of counters.
+				   * It must match the version of counters FW is using
+				   * else FW will return error with his version of counters
+				   * set in this field.
+				   */
+	uint16	num_subcounters;  /* Number of counter offset passed by wl app to FW. */
+	uint32	data[1];	  /* variable length payload:
+				   * Offsets to the counters will be passed to FW
+				   * throught this data field. FW will return the value of counters
+				   * at the offsets passed by wl app in this fiels itself.
+				   */
+} wl_subcnt_info_t;
+
+/** wlc layer counters */
+typedef struct {
+	/* transmit stat counters */
+	uint32	txframe;	/**< tx data frames */
+	uint32	txbyte;		/**< tx data bytes */
+	uint32	txretrans;	/**< tx mac retransmits */
+	uint32	txerror;	/**< tx data errors (derived: sum of others) */
+	uint32	txctl;		/**< tx management frames */
+	uint32	txprshort;	/**< tx short preamble frames */
+	uint32	txserr;		/**< tx status errors */
+	uint32	txnobuf;	/**< tx out of buffers errors */
+	uint32	txnoassoc;	/**< tx discard because we're not associated */
+	uint32	txrunt;		/**< tx runt frames */
+	uint32	txchit;		/**< tx header cache hit (fastpath) */
+	uint32	txcmiss;	/**< tx header cache miss (slowpath) */
+
+	/* transmit chip error counters */
+	uint32	txuflo;		/**< tx fifo underflows */
+	uint32	txphyerr;	/**< tx phy errors (indicated in tx status) */
+	uint32	txphycrs;
+
+	/* receive stat counters */
+	uint32	rxframe;	/**< rx data frames */
+	uint32	rxbyte;		/**< rx data bytes */
+	uint32	rxerror;	/**< rx data errors (derived: sum of others) */
+	uint32	rxctl;		/**< rx management frames */
+	uint32	rxnobuf;	/**< rx out of buffers errors */
+	uint32	rxnondata;	/**< rx non data frames in the data channel errors */
+	uint32	rxbadds;	/**< rx bad DS errors */
+	uint32	rxbadcm;	/**< rx bad control or management frames */
+	uint32	rxfragerr;	/**< rx fragmentation errors */
+	uint32	rxrunt;		/**< rx runt frames */
+	uint32	rxgiant;	/**< rx giant frames */
+	uint32	rxnoscb;	/**< rx no scb error */
+	uint32	rxbadproto;	/**< rx invalid frames */
+	uint32	rxbadsrcmac;	/**< rx frames with Invalid Src Mac */
+	uint32	rxbadda;	/**< rx frames tossed for invalid da */
+	uint32	rxfilter;	/**< rx frames filtered out */
+
+	/* receive chip error counters */
+	uint32	rxoflo;		/**< rx fifo overflow errors */
+	uint32	rxuflo[NFIFO];	/**< rx dma descriptor underflow errors */
+
+	uint32	d11cnt_txrts_off;	/**< d11cnt txrts value when reset d11cnt */
+	uint32	d11cnt_rxcrc_off;	/**< d11cnt rxcrc value when reset d11cnt */
+	uint32	d11cnt_txnocts_off;	/**< d11cnt txnocts value when reset d11cnt */
+
+	/* misc counters */
+	uint32	dmade;		/**< tx/rx dma descriptor errors */
+	uint32	dmada;		/**< tx/rx dma data errors */
+	uint32	dmape;		/**< tx/rx dma descriptor protocol errors */
+	uint32	reset;		/**< reset count */
+	uint32	tbtt;		/**< cnts the TBTT int's */
+	uint32	txdmawar;
+	uint32	pkt_callback_reg_fail;	/**< callbacks register failure */
+
+	/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+	uint32	txfrag;		/**< dot11TransmittedFragmentCount */
+	uint32	txmulti;	/**< dot11MulticastTransmittedFrameCount */
+	uint32	txfail;		/**< dot11FailedCount */
+	uint32	txretry;	/**< dot11RetryCount */
+	uint32	txretrie;	/**< dot11MultipleRetryCount */
+	uint32	rxdup;		/**< dot11FrameduplicateCount */
+	uint32	txrts;		/**< dot11RTSSuccessCount */
+	uint32	txnocts;	/**< dot11RTSFailureCount */
+	uint32	txnoack;	/**< dot11ACKFailureCount */
+	uint32	rxfrag;		/**< dot11ReceivedFragmentCount */
+	uint32	rxmulti;	/**< dot11MulticastReceivedFrameCount */
+	uint32	rxcrc;		/**< dot11FCSErrorCount */
+	uint32	txfrmsnt;	/**< dot11TransmittedFrameCount (bogus MIB?) */
+	uint32	rxundec;	/**< dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill;	/**< TKIPLocalMICFailures */
+	uint32	tkipcntrmsr;	/**< TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay;	/**< TKIPReplays */
+	uint32	ccmpfmterr;	/**< CCMPFormatErrors */
+	uint32	ccmpreplay;	/**< CCMPReplays */
+	uint32	ccmpundec;	/**< CCMPDecryptErrors */
+	uint32	fourwayfail;	/**< FourWayHandshakeFailures */
+	uint32	wepundec;	/**< dot11WEPUndecryptableCount */
+	uint32	wepicverr;	/**< dot11WEPICVErrorCount */
+	uint32	decsuccess;	/**< DecryptSuccessCount */
+	uint32	tkipicverr;	/**< TKIPICVErrorCount */
+	uint32	wepexcluded;	/**< dot11WEPExcludedCount */
+
+	uint32	txchanrej;	/**< Tx frames suppressed due to channel rejection */
+	uint32	psmwds;		/**< Count PSM watchdogs */
+	uint32	phywatchdog;	/**< Count Phy watchdogs (triggered by ucode) */
+
+	/* MBSS counters, AP only */
+	uint32	prq_entries_handled;	/**< PRQ entries read in */
+	uint32	prq_undirected_entries;	/**<    which were bcast bss & ssid */
+	uint32	prq_bad_entries;	/**<    which could not be translated to info */
+	uint32	atim_suppress_count;	/**< TX suppressions on ATIM fifo */
+	uint32	bcn_template_not_ready;	/**< Template marked in use on send bcn ... */
+	uint32	bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */
+	uint32	late_tbtt_dpc;	/**< TBTT DPC did not happen in time */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;	/**< packets rx at 1Mbps */
+	uint32  rx2mbps;	/**< packets rx at 2Mbps */
+	uint32  rx5mbps5;	/**< packets rx at 5.5Mbps */
+	uint32  rx6mbps;	/**< packets rx at 6Mbps */
+	uint32  rx9mbps;	/**< packets rx at 9Mbps */
+	uint32  rx11mbps;	/**< packets rx at 11Mbps */
+	uint32  rx12mbps;	/**< packets rx at 12Mbps */
+	uint32  rx18mbps;	/**< packets rx at 18Mbps */
+	uint32  rx24mbps;	/**< packets rx at 24Mbps */
+	uint32  rx36mbps;	/**< packets rx at 36Mbps */
+	uint32  rx48mbps;	/**< packets rx at 48Mbps */
+	uint32  rx54mbps;	/**< packets rx at 54Mbps */
+	uint32  rx108mbps;	/**< packets rx at 108mbps */
+	uint32  rx162mbps;	/**< packets rx at 162mbps */
+	uint32  rx216mbps;	/**< packets rx at 216 mbps */
+	uint32  rx270mbps;	/**< packets rx at 270 mbps */
+	uint32  rx324mbps;	/**< packets rx at 324 mbps */
+	uint32  rx378mbps;	/**< packets rx at 378 mbps */
+	uint32  rx432mbps;	/**< packets rx at 432 mbps */
+	uint32  rx486mbps;	/**< packets rx at 486 mbps */
+	uint32  rx540mbps;	/**< packets rx at 540 mbps */
+
+	uint32	rfdisable;	/**< count of radio disables */
+
+	uint32	txexptime;	/**< Tx frames suppressed due to timer expiration */
+
+	uint32	txmpdu_sgi;	/**< count for sgi transmit */
+	uint32	rxmpdu_sgi;	/**< count for sgi received */
+	uint32	txmpdu_stbc;	/**< count for stbc transmit */
+	uint32	rxmpdu_stbc;	/**< count for stbc received */
+
+	uint32	rxundec_mcst;	/**< dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill_mcst;	/**< TKIPLocalMICFailures */
+	uint32	tkipcntrmsr_mcst;	/**< TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay_mcst;	/**< TKIPReplays */
+	uint32	ccmpfmterr_mcst;	/**< CCMPFormatErrors */
+	uint32	ccmpreplay_mcst;	/**< CCMPReplays */
+	uint32	ccmpundec_mcst;	/**< CCMPDecryptErrors */
+	uint32	fourwayfail_mcst;	/**< FourWayHandshakeFailures */
+	uint32	wepundec_mcst;	/**< dot11WEPUndecryptableCount */
+	uint32	wepicverr_mcst;	/**< dot11WEPICVErrorCount */
+	uint32	decsuccess_mcst;	/**< DecryptSuccessCount */
+	uint32	tkipicverr_mcst;	/**< TKIPICVErrorCount */
+	uint32	wepexcluded_mcst;	/**< dot11WEPExcludedCount */
+
+	uint32	dma_hang;	/**< count for dma hang */
+	uint32	reinit;		/**< count for reinit */
+
+	uint32  pstatxucast;	/**< count of ucast frames xmitted on all psta assoc */
+	uint32  pstatxnoassoc;	/**< count of txnoassoc frames xmitted on all psta assoc */
+	uint32  pstarxucast;	/**< count of ucast frames received on all psta assoc */
+	uint32  pstarxbcmc;	/**< count of bcmc frames received on all psta */
+	uint32  pstatxbcmc;	/**< count of bcmc frames transmitted on all psta */
+
+	uint32  cso_passthrough; /**< hw cso required but passthrough */
+	uint32	cso_normal;	/**< hw cso hdr for normal process */
+	uint32	chained;	/**< number of frames chained */
+	uint32	chainedsz1;	/**< number of chain size 1 frames */
+	uint32	unchained;	/**< number of frames not chained */
+	uint32	maxchainsz;	/**< max chain size so far */
+	uint32	currchainsz;	/**< current chain size */
+	uint32	pciereset;	/**< Secondary Bus Reset issued by driver */
+	uint32	cfgrestore;	/**< configspace restore by driver */
+	uint32	reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */
+	uint32	rxrtry;
+	uint32  rxmpdu_mu;      /**< Number of MU MPDUs received */
+
+	/* detailed control/management frames */
+	uint32	txbar;		/**< Number of TX BAR */
+	uint32	rxbar;		/**< Number of RX BAR */
+	uint32	txpspoll;	/**< Number of TX PS-poll */
+	uint32	rxpspoll;	/**< Number of RX PS-poll */
+	uint32	txnull;		/**< Number of TX NULL_DATA */
+	uint32	rxnull;		/**< Number of RX NULL_DATA */
+	uint32	txqosnull;	/**< Number of TX NULL_QoSDATA */
+	uint32	rxqosnull;	/**< Number of RX NULL_QoSDATA */
+	uint32	txassocreq;	/**< Number of TX ASSOC request */
+	uint32	rxassocreq;	/**< Number of RX ASSOC request */
+	uint32	txreassocreq;	/**< Number of TX REASSOC request */
+	uint32	rxreassocreq;	/**< Number of RX REASSOC request */
+	uint32	txdisassoc;	/**< Number of TX DISASSOC */
+	uint32	rxdisassoc;	/**< Number of RX DISASSOC */
+	uint32	txassocrsp;	/**< Number of TX ASSOC response */
+	uint32	rxassocrsp;	/**< Number of RX ASSOC response */
+	uint32	txreassocrsp;	/**< Number of TX REASSOC response */
+	uint32	rxreassocrsp;	/**< Number of RX REASSOC response */
+	uint32	txauth;		/**< Number of TX AUTH */
+	uint32	rxauth;		/**< Number of RX AUTH */
+	uint32	txdeauth;	/**< Number of TX DEAUTH */
+	uint32	rxdeauth;	/**< Number of RX DEAUTH */
+	uint32	txprobereq;	/**< Number of TX probe request */
+	uint32	rxprobereq;	/**< Number of RX probe request */
+	uint32	txprobersp;	/**< Number of TX probe response */
+	uint32	rxprobersp;	/**< Number of RX probe response */
+	uint32	txaction;	/**< Number of TX action frame */
+	uint32	rxaction;	/**< Number of RX action frame */
+	uint32  ampdu_wds;	/**< Number of AMPDU watchdogs */
+	uint32  txlost;		/**< Number of lost packets reported in txs */
+	uint32	txdatamcast;	/**< Number of TX multicast data packets */
+	uint32	txdatabcast;	/**< Number of TX broadcast data packets */
+	uint32	psmxwds;	/**< Number of PSMx watchdogs */
+	uint32  rxback;
+	uint32  txback;
+	uint32  p2p_tbtt;	/**< Number of P2P TBTT Events */
+	uint32  p2p_tbtt_miss;	/**< Number of P2P TBTT Events Miss */
+	uint32	txqueue_start;
+	uint32	txqueue_end;
+	uint32  txbcast;        /* Broadcast TransmittedFrameCount */
+	uint32  txdropped;      /* tx dropped pkts */
+	uint32  rxbcast;        /* BroadcastReceivedFrameCount */
+	uint32  rxdropped;      /* rx dropped pkts (derived: sum of others) */
+} wl_cnt_wlc_t;
+
+/* Reinit reasons - do not put anything else other than reinit reasons here */
+typedef struct {
+	uint32 rsn[WL_REINIT_RC_LAST];
+} reinit_rsns_t;
+
+/* MACXSTAT counters for ucodex (corerev >= 64) */
+typedef struct {
+	uint32 macxsusp;
+	uint32 m2vmsg;
+	uint32 v2mmsg;
+	uint32 mboxout;
+	uint32 musnd;
+	uint32 sfb2v;
+} wl_cnt_ge64mcxst_v1_t;
+
+/** MACSTAT counters for ucode (corerev >= 40) */
+typedef struct {
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32	txallfrm;	/**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+				 * Control Management (includes retransmissions)
+				 */
+	uint32	txrtsfrm;	/**< number of RTS sent out by the MAC */
+	uint32	txctsfrm;	/**< number of CTS sent out by the MAC */
+	uint32	txackfrm;	/**< number of ACK frames sent out */
+	uint32	txdnlfrm;	/**< number of Null-Data transmission generated from template  */
+	uint32	txbcnfrm;	/**< beacons transmitted */
+	uint32	txfunfl[6];	/**< per-fifo tx underflows */
+	uint32	txampdu;	/**< number of AMPDUs transmitted */
+	uint32	txmpdu;		/**< number of MPDUs transmitted */
+	uint32	txtplunfl;	/**< Template underflows (mac was too slow to transmit ACK/CTS
+				 * or BCN)
+				 */
+	uint32	txphyerror;	/**< Transmit phy error, type of error is reported in tx-status for
+				 * driver enqueued frames
+				 */
+	uint32  pktengrxducast; /**< unicast frames rxed by the pkteng code */
+	uint32  pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+	uint32	rxfrmtoolong;	/**< Received frame longer than legal limit (2346 bytes) */
+	uint32	rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+	uint32	rxanyerr;	/**< Any RX error that is not counted by other counters. */
+	uint32	rxbadfcs;	/**< number of frames for which the CRC check failed in the MAC */
+	uint32	rxbadplcp;	/**< parity check of the PLCP header failed */
+	uint32	rxcrsglitch;	/**< PHY was able to correlate the preamble but not the header */
+	uint32	rxstrt;		/**< Number of received frames with a good PLCP
+				 * (i.e. passing parity check)
+				 */
+	uint32	rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+	uint32	rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */
+	uint32	rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */
+	uint32	rxrtsucast;	/**< number of unicast RTS addressed to the MAC (good FCS) */
+	uint32	rxctsucast;	/**< number of unicast CTS addressed to the MAC (good FCS) */
+	uint32	rxackucast;	/**< number of ucast ACKS received (good FCS) */
+	uint32	rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */
+	uint32	rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+	uint32	rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+	uint32	rxrtsocast;	/**< number of received RTS not addressed to the MAC */
+	uint32	rxctsocast;	/**< number of received CTS not addressed to the MAC */
+	uint32	rxdtmcast;	/**< number of RX Data multicast frames received by the MAC */
+	uint32	rxmgmcast;	/**< number of RX Management multicast frames received by the MAC */
+	uint32	rxctlmcast;	/**< number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32	rxbeaconmbss;	/**< beacons received from member of BSS */
+	uint32	rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32	rxbeaconobss;	/**< beacons received from other BSS */
+	uint32	rxrsptmout;	/**< number of response timeouts for transmitted frames
+				 * expecting a response
+				 */
+	uint32	bcntxcancl;	/**< transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32	rxnodelim;	/**< number of no valid delimiter detected by ampdu parser */
+	uint32	rxf0ovfl;	/**< number of receive fifo 0 overflows */
+	uint32	rxf1ovfl;	/**< number of receive fifo 1 overflows */
+	uint32	rxhlovfl;	/**< number of length / header fifo overflows */
+	uint32	missbcn_dbg;	/**< number of beacon missed to receive */
+	uint32	pmqovfl;	/**< number of PMQ overflows */
+	uint32	rxcgprqfrm;	/**< number of received Probe requests that made it into
+				 * the PRQ fifo
+				 */
+	uint32	rxcgprsqovfl;	/**< Rx Probe Request Que overflow in the AP */
+	uint32	txcgprsfail;	/**< Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32	txcgprssuc;	/**< Tx Probe Response Success (ACK was received) */
+	uint32	prs_timeout;	/**< number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32	txrtsfail;	/**< number of rts transmission failure that reach retry limit */
+	uint32	txucast;	/**< number of unicast tx expecting response other than cts/cwcts */
+	uint32  txinrtstxop;	/**< number of data frame transmissions during rts txop */
+	uint32	rxback;		/**< blockack rxcnt */
+	uint32	txback;		/**< blockack txcnt */
+	uint32	bphy_rxcrsglitch;	/**< PHY count of bphy glitches */
+	uint32	rxdrop20s;	/**< drop secondary cnt */
+	uint32	rxtoolate;	/**< receive too late */
+	uint32  bphy_badplcp;	/**< number of bad PLCP reception on BPHY rate */
+} wl_cnt_ge40mcst_v1_t;
+
+/** MACSTAT counters for ucode (corerev < 40) */
+typedef struct {
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32	txallfrm;	/**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+				 * Control Management (includes retransmissions)
+				 */
+	uint32	txrtsfrm;	/**< number of RTS sent out by the MAC */
+	uint32	txctsfrm;	/**< number of CTS sent out by the MAC */
+	uint32	txackfrm;	/**< number of ACK frames sent out */
+	uint32	txdnlfrm;	/**< number of Null-Data transmission generated from template  */
+	uint32	txbcnfrm;	/**< beacons transmitted */
+	uint32	txfunfl[6];	/**< per-fifo tx underflows */
+	uint32	txampdu;	/**< number of AMPDUs transmitted */
+	uint32	txmpdu;		/**< number of MPDUs transmitted */
+	uint32	txtplunfl;	/**< Template underflows (mac was too slow to transmit ACK/CTS
+				 * or BCN)
+				 */
+	uint32	txphyerror;	/**< Transmit phy error, type of error is reported in tx-status for
+				 * driver enqueued frames
+				 */
+	uint32  pktengrxducast; /**< unicast frames rxed by the pkteng code */
+	uint32  pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+	uint32	rxfrmtoolong;	/**< Received frame longer than legal limit (2346 bytes) */
+	uint32	rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+	uint32	rxanyerr;	/**< Any RX error that is not counted by other counters. */
+	uint32	rxbadfcs;	/**< number of frames for which the CRC check failed in the MAC */
+	uint32	rxbadplcp;	/**< parity check of the PLCP header failed */
+	uint32	rxcrsglitch;	/**< PHY was able to correlate the preamble but not the header */
+	uint32	rxstrt;		/**< Number of received frames with a good PLCP
+				 * (i.e. passing parity check)
+				 */
+	uint32	rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+	uint32	rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */
+	uint32	rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */
+	uint32	rxrtsucast;	/**< number of unicast RTS addressed to the MAC (good FCS) */
+	uint32	rxctsucast;	/**< number of unicast CTS addressed to the MAC (good FCS) */
+	uint32	rxackucast;	/**< number of ucast ACKS received (good FCS) */
+	uint32	rxdtocast;  /**< number of received DATA frames (good FCS and not matching RA) */
+	uint32	rxmgocast;  /**< number of received MGMT frames (good FCS and not matching RA) */
+	uint32	rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+	uint32	rxrtsocast;	/**< number of received RTS not addressed to the MAC */
+	uint32	rxctsocast;	/**< number of received CTS not addressed to the MAC */
+	uint32	rxdtmcast;	/**< number of RX Data multicast frames received by the MAC */
+	uint32	rxmgmcast;	/**< number of RX Management multicast frames received by the MAC */
+	uint32	rxctlmcast;	/**< number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32	rxbeaconmbss;	/**< beacons received from member of BSS */
+	uint32	rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32	rxbeaconobss;	/**< beacons received from other BSS */
+	uint32	rxrsptmout;	/**< number of response timeouts for transmitted frames
+				 * expecting a response
+				 */
+	uint32	bcntxcancl;	/**< transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32	rxnodelim;	/**< number of no valid delimiter detected by ampdu parser */
+	uint32	rxf0ovfl;	/**< number of receive fifo 0 overflows */
+	uint32	dbgoff46;
+	uint32	dbgoff47;
+	uint32	dbgoff48;	/**< Used for counting txstatus queue overflow (corerev <= 4)  */
+	uint32	pmqovfl;	/**< number of PMQ overflows */
+	uint32	rxcgprqfrm;	/**< number of received Probe requests that made it into
+				 * the PRQ fifo
+				 */
+	uint32	rxcgprsqovfl;	/**< Rx Probe Request Que overflow in the AP */
+	uint32	txcgprsfail;	/**< Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32	txcgprssuc;	/**< Tx Probe Response Success (ACK was received) */
+	uint32	prs_timeout;	/**< number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32	txrtsfail;	/**< number of rts transmission failure that reach retry limit */
+	uint32	txucast;	/**< number of unicast tx expecting response other than cts/cwcts */
+	uint32  txinrtstxop;	/**< number of data frame transmissions during rts txop */
+	uint32	rxback;		/**< blockack rxcnt */
+	uint32	txback;		/**< blockack txcnt */
+	uint32	bphy_rxcrsglitch;	/**< PHY count of bphy glitches */
+	uint32	phywatch;
+	uint32	rxtoolate;	/**< receive too late */
+	uint32  bphy_badplcp;	/**< number of bad PLCP reception on BPHY rate */
+} wl_cnt_lt40mcst_v1_t;
+
+/** MACSTAT counters for "wl counter" version <= 10 */
+typedef struct {
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32	txallfrm;	/**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+				 * Control Management (includes retransmissions)
+				 */
+	uint32	txrtsfrm;	/**< number of RTS sent out by the MAC */
+	uint32	txctsfrm;	/**< number of CTS sent out by the MAC */
+	uint32	txackfrm;	/**< number of ACK frames sent out */
+	uint32	txdnlfrm;	/**< number of Null-Data transmission generated from template  */
+	uint32	txbcnfrm;	/**< beacons transmitted */
+	uint32	txfunfl[6];	/**< per-fifo tx underflows */
+	uint32	txfbw;		/**< transmit at fallback bw (dynamic bw) */
+	uint32	PAD0;		/**< number of MPDUs transmitted */
+	uint32	txtplunfl;	/**< Template underflows (mac was too slow to transmit ACK/CTS
+				 * or BCN)
+				 */
+	uint32	txphyerror;	/**< Transmit phy error, type of error is reported in tx-status for
+				 * driver enqueued frames
+				 */
+	uint32  pktengrxducast; /**< unicast frames rxed by the pkteng code */
+	uint32  pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+	uint32	rxfrmtoolong;	/**< Received frame longer than legal limit (2346 bytes) */
+	uint32	rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+	uint32	rxinvmachdr;	/**< Either the protocol version != 0 or frame type not
+				 * data/control/management
+				 */
+	uint32	rxbadfcs;	/**< number of frames for which the CRC check failed in the MAC */
+	uint32	rxbadplcp;	/**< parity check of the PLCP header failed */
+	uint32	rxcrsglitch;	/**< PHY was able to correlate the preamble but not the header */
+	uint32	rxstrt;		/**< Number of received frames with a good PLCP
+				 * (i.e. passing parity check)
+				 */
+	uint32	rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */
+	uint32	rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+	uint32	rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */
+	uint32	rxrtsucast;  /**< number of unicast RTS addressed to the MAC (good FCS) */
+	uint32	rxctsucast;  /**< number of unicast CTS addressed to the MAC (good FCS) */
+	uint32	rxackucast;	/**< number of ucast ACKS received (good FCS) */
+	uint32	rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */
+	uint32	rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+	uint32	rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+	uint32	rxrtsocast;	/**< number of received RTS not addressed to the MAC */
+	uint32	rxctsocast;	/**< number of received CTS not addressed to the MAC */
+	uint32	rxdfrmmcast;	/**< number of RX Data multicast frames received by the MAC */
+	uint32	rxmfrmmcast;	/**< number of RX Management multicast frames received by the MAC */
+	uint32	rxcfrmmcast;	/**< number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32	rxbeaconmbss;	/**< beacons received from member of BSS */
+	uint32	rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32	rxbeaconobss;	/**< beacons received from other BSS */
+	uint32	rxrsptmout;	/**< number of response timeouts for transmitted frames
+				 * expecting a response
+				 */
+	uint32	bcntxcancl;	/**< transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32	PAD1;
+	uint32	rxf0ovfl;	/**< number of receive fifo 0 overflows */
+	uint32	rxf1ovfl;	/**< Number of receive fifo 1 overflows (obsolete) */
+	uint32	rxf2ovfl;	/**< Number of receive fifo 2 overflows (obsolete) */
+	uint32	txsfovfl;	/**< Number of transmit status fifo overflows (obsolete) */
+	uint32	pmqovfl;	/**< number of PMQ overflows */
+	uint32	rxcgprqfrm;	/**< number of received Probe requests that made it into
+				 * the PRQ fifo
+				 */
+	uint32	rxcgprsqovfl;	/**< Rx Probe Request Que overflow in the AP */
+	uint32	txcgprsfail;	/**< Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32	txcgprssuc;	/**< Tx Probe Response Success (ACK was received) */
+	uint32	prs_timeout;	/**< number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32	rxnack;		/**< obsolete */
+	uint32	frmscons;	/**< obsolete */
+	uint32  txnack;		/**< obsolete */
+	uint32	rxback;		/**< blockack rxcnt */
+	uint32	txback;		/**< blockack txcnt */
+	uint32	bphy_rxcrsglitch;	/**< PHY count of bphy glitches */
+	uint32	rxdrop20s;	/**< drop secondary cnt */
+	uint32	rxtoolate;	/**< receive too late */
+	uint32  bphy_badplcp;	/**< number of bad PLCP reception on BPHY rate */
+} wl_cnt_v_le10_mcst_t;
+
+#define MAX_RX_FIFO 3
+#define WL_RXFIFO_CNT_VERSION  1   /* current version of wl_rxfifo_cnt_t */
+typedef struct {
+	/* Counters for frames received from rx fifos */
+	uint16	version;
+	uint16	length;		/* length of entire structure */
+	uint32	rxf_data[MAX_RX_FIFO];		/* data frames from rx fifo */
+	uint32	rxf_mgmtctl[MAX_RX_FIFO];	/* mgmt/ctl frames from rx fifo */
+} wl_rxfifo_cnt_t;
+
+typedef struct {
+	uint16	version;	/**< see definition of WL_CNT_T_VERSION */
+	uint16	length;		/**< length of entire structure */
+
+	/* transmit stat counters */
+	uint32	txframe;	/**< tx data frames */
+	uint32	txbyte;		/**< tx data bytes */
+	uint32	txretrans;	/**< tx mac retransmits */
+	uint32	txerror;	/**< tx data errors (derived: sum of others) */
+	uint32	txctl;		/**< tx management frames */
+	uint32	txprshort;	/**< tx short preamble frames */
+	uint32	txserr;		/**< tx status errors */
+	uint32	txnobuf;	/**< tx out of buffers errors */
+	uint32	txnoassoc;	/**< tx discard because we're not associated */
+	uint32	txrunt;		/**< tx runt frames */
+	uint32	txchit;		/**< tx header cache hit (fastpath) */
+	uint32	txcmiss;	/**< tx header cache miss (slowpath) */
+
+	/* transmit chip error counters */
+	uint32	txuflo;		/**< tx fifo underflows */
+	uint32	txphyerr;	/**< tx phy errors (indicated in tx status) */
+	uint32	txphycrs;
+
+	/* receive stat counters */
+	uint32	rxframe;	/**< rx data frames */
+	uint32	rxbyte;		/**< rx data bytes */
+	uint32	rxerror;	/**< rx data errors (derived: sum of others) */
+	uint32	rxctl;		/**< rx management frames */
+	uint32	rxnobuf;	/**< rx out of buffers errors */
+	uint32	rxnondata;	/**< rx non data frames in the data channel errors */
+	uint32	rxbadds;	/**< rx bad DS errors */
+	uint32	rxbadcm;	/**< rx bad control or management frames */
+	uint32	rxfragerr;	/**< rx fragmentation errors */
+	uint32	rxrunt;		/**< rx runt frames */
+	uint32	rxgiant;	/**< rx giant frames */
+	uint32	rxnoscb;	/**< rx no scb error */
+	uint32	rxbadproto;	/**< rx invalid frames */
+	uint32	rxbadsrcmac;	/**< rx frames with Invalid Src Mac */
+	uint32	rxbadda;	/**< rx frames tossed for invalid da */
+	uint32	rxfilter;	/**< rx frames filtered out */
+
+	/* receive chip error counters */
+	uint32	rxoflo;		/**< rx fifo overflow errors */
+	uint32	rxuflo[NFIFO];	/**< rx dma descriptor underflow errors */
+
+	uint32	d11cnt_txrts_off;	/**< d11cnt txrts value when reset d11cnt */
+	uint32	d11cnt_rxcrc_off;	/**< d11cnt rxcrc value when reset d11cnt */
+	uint32	d11cnt_txnocts_off;	/**< d11cnt txnocts value when reset d11cnt */
+
+	/* misc counters */
+	uint32	dmade;		/**< tx/rx dma descriptor errors */
+	uint32	dmada;		/**< tx/rx dma data errors */
+	uint32	dmape;		/**< tx/rx dma descriptor protocol errors */
+	uint32	reset;		/**< reset count */
+	uint32	tbtt;		/**< cnts the TBTT int's */
+	uint32	txdmawar;
+	uint32	pkt_callback_reg_fail;	/**< callbacks register failure */
+
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32	txallfrm;	/**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+				 * Control Management (includes retransmissions)
+				 */
+	uint32	txrtsfrm;	/**< number of RTS sent out by the MAC */
+	uint32	txctsfrm;	/**< number of CTS sent out by the MAC */
+	uint32	txackfrm;	/**< number of ACK frames sent out */
+	uint32	txdnlfrm;	/**< Not used */
+	uint32	txbcnfrm;	/**< beacons transmitted */
+	uint32	txfunfl[6];	/**< per-fifo tx underflows */
+	uint32	rxtoolate;	/**< receive too late */
+	uint32  txfbw;		/**< transmit at fallback bw (dynamic bw) */
+	uint32	txtplunfl;	/**< Template underflows (mac was too slow to transmit ACK/CTS
+				 * or BCN)
+				 */
+	uint32	txphyerror;	/**< Transmit phy error, type of error is reported in tx-status for
+				 * driver enqueued frames
+				 */
+	uint32	rxfrmtoolong;	/**< Received frame longer than legal limit (2346 bytes) */
+	uint32	rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+	uint32	rxinvmachdr;	/**< Either the protocol version != 0 or frame type not
+				 * data/control/management
+				 */
+	uint32	rxbadfcs;	/**< number of frames for which the CRC check failed in the MAC */
+	uint32	rxbadplcp;	/**< parity check of the PLCP header failed */
+	uint32	rxcrsglitch;	/**< PHY was able to correlate the preamble but not the header */
+	uint32	rxstrt;		/**< Number of received frames with a good PLCP
+				 * (i.e. passing parity check)
+				 */
+	uint32	rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+	uint32	rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+	uint32	rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */
+	uint32	rxrtsucast;	/**< number of unicast RTS addressed to the MAC (good FCS) */
+	uint32	rxctsucast;	/**< number of unicast CTS addressed to the MAC (good FCS) */
+	uint32	rxackucast;	/**< number of ucast ACKS received (good FCS) */
+	uint32	rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */
+	uint32	rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+	uint32	rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+	uint32	rxrtsocast;	/**< number of received RTS not addressed to the MAC */
+	uint32	rxctsocast;	/**< number of received CTS not addressed to the MAC */
+	uint32	rxdfrmmcast;	/**< number of RX Data multicast frames received by the MAC */
+	uint32	rxmfrmmcast;	/**< number of RX Management multicast frames received by the MAC */
+	uint32	rxcfrmmcast;	/**< number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32	rxbeaconmbss;	/**< beacons received from member of BSS */
+	uint32	rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32	rxbeaconobss;	/**< beacons received from other BSS */
+	uint32	rxrsptmout;	/**< Number of response timeouts for transmitted frames
+				 * expecting a response
+				 */
+	uint32	bcntxcancl;	/**< transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32	rxf0ovfl;	/**< Number of receive fifo 0 overflows */
+	uint32	rxf1ovfl;	/**< Number of receive fifo 1 overflows (obsolete) */
+	uint32	rxf2ovfl;	/**< Number of receive fifo 2 overflows (obsolete) */
+	uint32	txsfovfl;	/**< Number of transmit status fifo overflows (obsolete) */
+	uint32	pmqovfl;	/**< Number of PMQ overflows */
+	uint32	rxcgprqfrm;	/**< Number of received Probe requests that made it into
+				 * the PRQ fifo
+				 */
+	uint32	rxcgprsqovfl;	/**< Rx Probe Request Que overflow in the AP */
+	uint32	txcgprsfail;	/**< Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32	txcgprssuc;	/**< Tx Probe Response Success (ACK was received) */
+	uint32	prs_timeout;	/**< Number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32	rxnack;		/**< obsolete */
+	uint32	frmscons;	/**< obsolete */
+	uint32  txnack;		/**< obsolete */
+	uint32	rxback;		/**< blockack rxcnt */
+	uint32	txback;		/**< blockack txcnt */
+
+	/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+	uint32	txfrag;		/**< dot11TransmittedFragmentCount */
+	uint32	txmulti;	/**< dot11MulticastTransmittedFrameCount */
+	uint32	txfail;		/**< dot11FailedCount */
+	uint32	txretry;	/**< dot11RetryCount */
+	uint32	txretrie;	/**< dot11MultipleRetryCount */
+	uint32	rxdup;		/**< dot11FrameduplicateCount */
+	uint32	txrts;		/**< dot11RTSSuccessCount */
+	uint32	txnocts;	/**< dot11RTSFailureCount */
+	uint32	txnoack;	/**< dot11ACKFailureCount */
+	uint32	rxfrag;		/**< dot11ReceivedFragmentCount */
+	uint32	rxmulti;	/**< dot11MulticastReceivedFrameCount */
+	uint32	rxcrc;		/**< dot11FCSErrorCount */
+	uint32	txfrmsnt;	/**< dot11TransmittedFrameCount (bogus MIB?) */
+	uint32	rxundec;	/**< dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill;	/**< TKIPLocalMICFailures */
+	uint32	tkipcntrmsr;	/**< TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay;	/**< TKIPReplays */
+	uint32	ccmpfmterr;	/**< CCMPFormatErrors */
+	uint32	ccmpreplay;	/**< CCMPReplays */
+	uint32	ccmpundec;	/**< CCMPDecryptErrors */
+	uint32	fourwayfail;	/**< FourWayHandshakeFailures */
+	uint32	wepundec;	/**< dot11WEPUndecryptableCount */
+	uint32	wepicverr;	/**< dot11WEPICVErrorCount */
+	uint32	decsuccess;	/**< DecryptSuccessCount */
+	uint32	tkipicverr;	/**< TKIPICVErrorCount */
+	uint32	wepexcluded;	/**< dot11WEPExcludedCount */
+
+	uint32	txchanrej;	/**< Tx frames suppressed due to channel rejection */
+	uint32	psmwds;		/**< Count PSM watchdogs */
+	uint32	phywatchdog;	/**< Count Phy watchdogs (triggered by ucode) */
+
+	/* MBSS counters, AP only */
+	uint32	prq_entries_handled;	/**< PRQ entries read in */
+	uint32	prq_undirected_entries;	/**<    which were bcast bss & ssid */
+	uint32	prq_bad_entries;	/**<    which could not be translated to info */
+	uint32	atim_suppress_count;	/**< TX suppressions on ATIM fifo */
+	uint32	bcn_template_not_ready;	/**< Template marked in use on send bcn ... */
+	uint32	bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */
+	uint32	late_tbtt_dpc;	/**< TBTT DPC did not happen in time */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;	/**< packets rx at 1Mbps */
+	uint32  rx2mbps;	/**< packets rx at 2Mbps */
+	uint32  rx5mbps5;	/**< packets rx at 5.5Mbps */
+	uint32  rx6mbps;	/**< packets rx at 6Mbps */
+	uint32  rx9mbps;	/**< packets rx at 9Mbps */
+	uint32  rx11mbps;	/**< packets rx at 11Mbps */
+	uint32  rx12mbps;	/**< packets rx at 12Mbps */
+	uint32  rx18mbps;	/**< packets rx at 18Mbps */
+	uint32  rx24mbps;	/**< packets rx at 24Mbps */
+	uint32  rx36mbps;	/**< packets rx at 36Mbps */
+	uint32  rx48mbps;	/**< packets rx at 48Mbps */
+	uint32  rx54mbps;	/**< packets rx at 54Mbps */
+	uint32  rx108mbps;	/**< packets rx at 108mbps */
+	uint32  rx162mbps;	/**< packets rx at 162mbps */
+	uint32  rx216mbps;	/**< packets rx at 216 mbps */
+	uint32  rx270mbps;	/**< packets rx at 270 mbps */
+	uint32  rx324mbps;	/**< packets rx at 324 mbps */
+	uint32  rx378mbps;	/**< packets rx at 378 mbps */
+	uint32  rx432mbps;	/**< packets rx at 432 mbps */
+	uint32  rx486mbps;	/**< packets rx at 486 mbps */
+	uint32  rx540mbps;	/**< packets rx at 540 mbps */
+
+	/* pkteng rx frame stats */
+	uint32	pktengrxducast; /**< unicast frames rxed by the pkteng code */
+	uint32	pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+
+	uint32	rfdisable;	/**< count of radio disables */
+	uint32	bphy_rxcrsglitch;	/**< PHY count of bphy glitches */
+	uint32  bphy_badplcp;
+
+	uint32	txexptime;	/**< Tx frames suppressed due to timer expiration */
+
+	uint32	txmpdu_sgi;	/**< count for sgi transmit */
+	uint32	rxmpdu_sgi;	/**< count for sgi received */
+	uint32	txmpdu_stbc;	/**< count for stbc transmit */
+	uint32	rxmpdu_stbc;	/**< count for stbc received */
+
+	uint32	rxundec_mcst;	/**< dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill_mcst;	/**< TKIPLocalMICFailures */
+	uint32	tkipcntrmsr_mcst;	/**< TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay_mcst;	/**< TKIPReplays */
+	uint32	ccmpfmterr_mcst;	/**< CCMPFormatErrors */
+	uint32	ccmpreplay_mcst;	/**< CCMPReplays */
+	uint32	ccmpundec_mcst;	/**< CCMPDecryptErrors */
+	uint32	fourwayfail_mcst;	/**< FourWayHandshakeFailures */
+	uint32	wepundec_mcst;	/**< dot11WEPUndecryptableCount */
+	uint32	wepicverr_mcst;	/**< dot11WEPICVErrorCount */
+	uint32	decsuccess_mcst;	/**< DecryptSuccessCount */
+	uint32	tkipicverr_mcst;	/**< TKIPICVErrorCount */
+	uint32	wepexcluded_mcst;	/**< dot11WEPExcludedCount */
+
+	uint32	dma_hang;	/**< count for dma hang */
+	uint32	reinit;		/**< count for reinit */
+
+	uint32  pstatxucast;	/**< count of ucast frames xmitted on all psta assoc */
+	uint32  pstatxnoassoc;	/**< count of txnoassoc frames xmitted on all psta assoc */
+	uint32  pstarxucast;	/**< count of ucast frames received on all psta assoc */
+	uint32  pstarxbcmc;	/**< count of bcmc frames received on all psta */
+	uint32  pstatxbcmc;	/**< count of bcmc frames transmitted on all psta */
+
+	uint32  cso_passthrough; /**< hw cso required but passthrough */
+	uint32	cso_normal;	/**< hw cso hdr for normal process */
+	uint32	chained;	/**< number of frames chained */
+	uint32	chainedsz1;	/**< number of chain size 1 frames */
+	uint32	unchained;	/**< number of frames not chained */
+	uint32	maxchainsz;	/**< max chain size so far */
+	uint32	currchainsz;	/**< current chain size */
+	uint32	rxdrop20s;	/**< drop secondary cnt */
+	uint32	pciereset;	/**< Secondary Bus Reset issued by driver */
+	uint32	cfgrestore;	/**< configspace restore by driver */
+	uint32	reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */
+	uint32  rxrtry;		/**< num of received packets with retry bit on */
+	uint32	txmpdu;		/**< macstat cnt only valid in ver 11. number of MPDUs txed.  */
+	uint32	rxnodelim;	/**< macstat cnt only valid in ver 11.
+				 * number of occasions that no valid delimiter is detected
+				 * by ampdu parser.
+				 */
+	uint32  rxmpdu_mu;      /**< Number of MU MPDUs received */
+
+	/* detailed control/management frames */
+	uint32	txbar;		/**< Number of TX BAR */
+	uint32	rxbar;		/**< Number of RX BAR */
+	uint32	txpspoll;	/**< Number of TX PS-poll */
+	uint32	rxpspoll;	/**< Number of RX PS-poll */
+	uint32	txnull;		/**< Number of TX NULL_DATA */
+	uint32	rxnull;		/**< Number of RX NULL_DATA */
+	uint32	txqosnull;	/**< Number of TX NULL_QoSDATA */
+	uint32	rxqosnull;	/**< Number of RX NULL_QoSDATA */
+	uint32	txassocreq;	/**< Number of TX ASSOC request */
+	uint32	rxassocreq;	/**< Number of RX ASSOC request */
+	uint32	txreassocreq;	/**< Number of TX REASSOC request */
+	uint32	rxreassocreq;	/**< Number of RX REASSOC request */
+	uint32	txdisassoc;	/**< Number of TX DISASSOC */
+	uint32	rxdisassoc;	/**< Number of RX DISASSOC */
+	uint32	txassocrsp;	/**< Number of TX ASSOC response */
+	uint32	rxassocrsp;	/**< Number of RX ASSOC response */
+	uint32	txreassocrsp;	/**< Number of TX REASSOC response */
+	uint32	rxreassocrsp;	/**< Number of RX REASSOC response */
+	uint32	txauth;		/**< Number of TX AUTH */
+	uint32	rxauth;		/**< Number of RX AUTH */
+	uint32	txdeauth;	/**< Number of TX DEAUTH */
+	uint32	rxdeauth;	/**< Number of RX DEAUTH */
+	uint32	txprobereq;	/**< Number of TX probe request */
+	uint32	rxprobereq;	/**< Number of RX probe request */
+	uint32	txprobersp;	/**< Number of TX probe response */
+	uint32	rxprobersp;	/**< Number of RX probe response */
+	uint32	txaction;	/**< Number of TX action frame */
+	uint32	rxaction;	/**< Number of RX action frame */
+	uint32  ampdu_wds;      /**< Number of AMPDU watchdogs */
+	uint32  txlost;         /**< Number of lost packets reported in txs */
+	uint32  txdatamcast;	/**< Number of TX multicast data packets */
+	uint32  txdatabcast;	/**< Number of TX broadcast data packets */
+	uint32  txbcast;        /* Broadcast TransmittedFrameCount */
+	uint32  txdropped;      /* tx dropped pkts */
+	uint32  rxbcast;        /* BroadcastReceivedFrameCount */
+	uint32  rxdropped;      /* rx dropped pkts (derived: sum of others) */
+
+} wl_cnt_ver_11_t;
+
+typedef struct {
+	uint16  version;    /**< see definition of WL_CNT_T_VERSION */
+	uint16  length;     /**< length of entire structure */
+
+	/* transmit stat counters */
+	uint32  txframe;    /**< tx data frames */
+	uint32  txbyte;     /**< tx data bytes */
+	uint32  txretrans;  /**< tx mac retransmits */
+	uint32  txerror;    /**< tx data errors (derived: sum of others) */
+	uint32  txctl;      /**< tx management frames */
+	uint32  txprshort;  /**< tx short preamble frames */
+	uint32  txserr;     /**< tx status errors */
+	uint32  txnobuf;    /**< tx out of buffers errors */
+	uint32  txnoassoc;  /**< tx discard because we're not associated */
+	uint32  txrunt;     /**< tx runt frames */
+	uint32  txchit;     /**< tx header cache hit (fastpath) */
+	uint32  txcmiss;    /**< tx header cache miss (slowpath) */
+
+	/* transmit chip error counters */
+	uint32  txuflo;     /**< tx fifo underflows */
+	uint32  txphyerr;   /**< tx phy errors (indicated in tx status) */
+	uint32  txphycrs;
+
+	/* receive stat counters */
+	uint32  rxframe;    /**< rx data frames */
+	uint32  rxbyte;     /**< rx data bytes */
+	uint32  rxerror;    /**< rx data errors (derived: sum of others) */
+	uint32  rxctl;      /**< rx management frames */
+	uint32  rxnobuf;    /**< rx out of buffers errors */
+	uint32  rxnondata;  /**< rx non data frames in the data channel errors */
+	uint32  rxbadds;    /**< rx bad DS errors */
+	uint32  rxbadcm;    /**< rx bad control or management frames */
+	uint32  rxfragerr;  /**< rx fragmentation errors */
+	uint32  rxrunt;     /**< rx runt frames */
+	uint32  rxgiant;    /**< rx giant frames */
+	uint32  rxnoscb;    /**< rx no scb error */
+	uint32  rxbadproto; /**< rx invalid frames */
+	uint32  rxbadsrcmac;    /**< rx frames with Invalid Src Mac */
+	uint32  rxbadda;    /**< rx frames tossed for invalid da */
+	uint32  rxfilter;   /**< rx frames filtered out */
+
+	/* receive chip error counters */
+	uint32  rxoflo;     /**< rx fifo overflow errors */
+	uint32  rxuflo[NFIFO];  /**< rx dma descriptor underflow errors */
+
+	uint32  d11cnt_txrts_off;   /**< d11cnt txrts value when reset d11cnt */
+	uint32  d11cnt_rxcrc_off;   /**< d11cnt rxcrc value when reset d11cnt */
+	uint32  d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */
+
+	/* misc counters */
+	uint32  dmade;      /**< tx/rx dma descriptor errors */
+	uint32  dmada;      /**< tx/rx dma data errors */
+	uint32  dmape;      /**< tx/rx dma descriptor protocol errors */
+	uint32  reset;      /**< reset count */
+	uint32  tbtt;       /**< cnts the TBTT int's */
+	uint32  txdmawar;
+	uint32  pkt_callback_reg_fail;  /**< callbacks register failure */
+
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32  txallfrm;   /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+			     * Control Management (includes retransmissions)
+			     */
+	uint32  txrtsfrm;   /**< number of RTS sent out by the MAC */
+	uint32  txctsfrm;   /**< number of CTS sent out by the MAC */
+	uint32  txackfrm;   /**< number of ACK frames sent out */
+	uint32  txdnlfrm;   /**< Not used */
+	uint32  txbcnfrm;   /**< beacons transmitted */
+	uint32  txfunfl[6]; /**< per-fifo tx underflows */
+	uint32	rxtoolate;	/**< receive too late */
+	uint32  txfbw;	    /**< transmit at fallback bw (dynamic bw) */
+	uint32  txtplunfl;  /**< Template underflows (mac was too slow to transmit ACK/CTS
+			     * or BCN)
+			     */
+	uint32  txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+			     * driver enqueued frames
+			     */
+	uint32  rxfrmtoolong;   /**< Received frame longer than legal limit (2346 bytes) */
+	uint32  rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+	uint32  rxinvmachdr;    /**< Either the protocol version != 0 or frame type not
+				 * data/control/management
+			   */
+	uint32  rxbadfcs;   /**< number of frames for which the CRC check failed in the MAC */
+	uint32  rxbadplcp;  /**< parity check of the PLCP header failed */
+	uint32  rxcrsglitch;    /**< PHY was able to correlate the preamble but not the header */
+	uint32  rxstrt;     /**< Number of received frames with a good PLCP
+			     * (i.e. passing parity check)
+			     */
+	uint32  rxdfrmucastmbss; /**< # of received DATA frames with good FCS and matching RA */
+	uint32  rxmfrmucastmbss; /**< # of received mgmt frames with good FCS and matching RA */
+	uint32  rxcfrmucast;     /**< # of received CNTRL frames with good FCS and matching RA */
+	uint32  rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+	uint32  rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+	uint32  rxackucast; /**< number of ucast ACKS received (good FCS) */
+	uint32  rxdfrmocast;    /**< # of received DATA frames (good FCS and not matching RA) */
+	uint32  rxmfrmocast;    /**< # of received MGMT frames (good FCS and not matching RA) */
+	uint32  rxcfrmocast;    /**< # of received CNTRL frame (good FCS and not matching RA) */
+	uint32  rxrtsocast; /**< number of received RTS not addressed to the MAC */
+	uint32  rxctsocast; /**< number of received CTS not addressed to the MAC */
+	uint32  rxdfrmmcast;    /**< number of RX Data multicast frames received by the MAC */
+	uint32  rxmfrmmcast;    /**< number of RX Management multicast frames received by the MAC */
+	uint32  rxcfrmmcast;    /**< number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32  rxbeaconmbss;   /**< beacons received from member of BSS */
+	uint32  rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32  rxbeaconobss;   /**< beacons received from other BSS */
+	uint32  rxrsptmout; /**< Number of response timeouts for transmitted frames
+			     * expecting a response
+			     */
+	uint32  bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32  rxf0ovfl;   /**< Number of receive fifo 0 overflows */
+	uint32  rxf1ovfl;   /**< Number of receive fifo 1 overflows (obsolete) */
+	uint32  rxf2ovfl;   /**< Number of receive fifo 2 overflows (obsolete) */
+	uint32  txsfovfl;   /**< Number of transmit status fifo overflows (obsolete) */
+	uint32  pmqovfl;    /**< Number of PMQ overflows */
+	uint32  rxcgprqfrm; /**< Number of received Probe requests that made it into
+			     * the PRQ fifo
+			     */
+	uint32  rxcgprsqovfl;   /**< Rx Probe Request Que overflow in the AP */
+	uint32  txcgprsfail;    /**< Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32  txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+	uint32  prs_timeout;    /**< Number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32  rxnack;
+	uint32  frmscons;
+	uint32  txnack;		/**< obsolete */
+	uint32	rxback;		/**< blockack rxcnt */
+	uint32	txback;		/**< blockack txcnt */
+
+	/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+	uint32  txfrag;     /**< dot11TransmittedFragmentCount */
+	uint32  txmulti;    /**< dot11MulticastTransmittedFrameCount */
+	uint32  txfail;     /**< dot11FailedCount */
+	uint32  txretry;    /**< dot11RetryCount */
+	uint32  txretrie;   /**< dot11MultipleRetryCount */
+	uint32  rxdup;      /**< dot11FrameduplicateCount */
+	uint32  txrts;      /**< dot11RTSSuccessCount */
+	uint32  txnocts;    /**< dot11RTSFailureCount */
+	uint32  txnoack;    /**< dot11ACKFailureCount */
+	uint32  rxfrag;     /**< dot11ReceivedFragmentCount */
+	uint32  rxmulti;    /**< dot11MulticastReceivedFrameCount */
+	uint32  rxcrc;      /**< dot11FCSErrorCount */
+	uint32  txfrmsnt;   /**< dot11TransmittedFrameCount (bogus MIB?) */
+	uint32  rxundec;    /**< dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32  tkipmicfaill;   /**< TKIPLocalMICFailures */
+	uint32  tkipcntrmsr;    /**< TKIPCounterMeasuresInvoked */
+	uint32  tkipreplay; /**< TKIPReplays */
+	uint32  ccmpfmterr; /**< CCMPFormatErrors */
+	uint32  ccmpreplay; /**< CCMPReplays */
+	uint32  ccmpundec;  /**< CCMPDecryptErrors */
+	uint32  fourwayfail;    /**< FourWayHandshakeFailures */
+	uint32  wepundec;   /**< dot11WEPUndecryptableCount */
+	uint32  wepicverr;  /**< dot11WEPICVErrorCount */
+	uint32  decsuccess; /**< DecryptSuccessCount */
+	uint32  tkipicverr; /**< TKIPICVErrorCount */
+	uint32  wepexcluded;    /**< dot11WEPExcludedCount */
+
+	uint32  rxundec_mcst;   /**< dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32  tkipmicfaill_mcst;  /**< TKIPLocalMICFailures */
+	uint32  tkipcntrmsr_mcst;   /**< TKIPCounterMeasuresInvoked */
+	uint32  tkipreplay_mcst;    /**< TKIPReplays */
+	uint32  ccmpfmterr_mcst;    /**< CCMPFormatErrors */
+	uint32  ccmpreplay_mcst;    /**< CCMPReplays */
+	uint32  ccmpundec_mcst; /**< CCMPDecryptErrors */
+	uint32  fourwayfail_mcst;   /**< FourWayHandshakeFailures */
+	uint32  wepundec_mcst;  /**< dot11WEPUndecryptableCount */
+	uint32  wepicverr_mcst; /**< dot11WEPICVErrorCount */
+	uint32  decsuccess_mcst;    /**< DecryptSuccessCount */
+	uint32  tkipicverr_mcst;    /**< TKIPICVErrorCount */
+	uint32  wepexcluded_mcst;   /**< dot11WEPExcludedCount */
+
+	uint32  txchanrej;  /**< Tx frames suppressed due to channel rejection */
+	uint32  txexptime;  /**< Tx frames suppressed due to timer expiration */
+	uint32  psmwds;     /**< Count PSM watchdogs */
+	uint32  phywatchdog;    /**< Count Phy watchdogs (triggered by ucode) */
+
+	/* MBSS counters, AP only */
+	uint32  prq_entries_handled;    /**< PRQ entries read in */
+	uint32  prq_undirected_entries; /**<    which were bcast bss & ssid */
+	uint32  prq_bad_entries;    /**<    which could not be translated to info */
+	uint32  atim_suppress_count;    /**< TX suppressions on ATIM fifo */
+	uint32  bcn_template_not_ready; /**< Template marked in use on send bcn ... */
+	uint32  bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */
+	uint32  late_tbtt_dpc;  /**< TBTT DPC did not happen in time */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;    /**< packets rx at 1Mbps */
+	uint32  rx2mbps;    /**< packets rx at 2Mbps */
+	uint32  rx5mbps5;   /**< packets rx at 5.5Mbps */
+	uint32  rx6mbps;    /**< packets rx at 6Mbps */
+	uint32  rx9mbps;    /**< packets rx at 9Mbps */
+	uint32  rx11mbps;   /**< packets rx at 11Mbps */
+	uint32  rx12mbps;   /**< packets rx at 12Mbps */
+	uint32  rx18mbps;   /**< packets rx at 18Mbps */
+	uint32  rx24mbps;   /**< packets rx at 24Mbps */
+	uint32  rx36mbps;   /**< packets rx at 36Mbps */
+	uint32  rx48mbps;   /**< packets rx at 48Mbps */
+	uint32  rx54mbps;   /**< packets rx at 54Mbps */
+	uint32  rx108mbps;  /**< packets rx at 108mbps */
+	uint32  rx162mbps;  /**< packets rx at 162mbps */
+	uint32  rx216mbps;  /**< packets rx at 216 mbps */
+	uint32  rx270mbps;  /**< packets rx at 270 mbps */
+	uint32  rx324mbps;  /**< packets rx at 324 mbps */
+	uint32  rx378mbps;  /**< packets rx at 378 mbps */
+	uint32  rx432mbps;  /**< packets rx at 432 mbps */
+	uint32  rx486mbps;  /**< packets rx at 486 mbps */
+	uint32  rx540mbps;  /**< packets rx at 540 mbps */
+
+	/* pkteng rx frame stats */
+	uint32  pktengrxducast; /**< unicast frames rxed by the pkteng code */
+	uint32  pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+
+	uint32  rfdisable;  /**< count of radio disables */
+	uint32  bphy_rxcrsglitch;   /**< PHY count of bphy glitches */
+	uint32  bphy_badplcp;
+
+	uint32  txmpdu_sgi; /**< count for sgi transmit */
+	uint32  rxmpdu_sgi; /**< count for sgi received */
+	uint32  txmpdu_stbc;    /**< count for stbc transmit */
+	uint32  rxmpdu_stbc;    /**< count for stbc received */
+
+	uint32	rxdrop20s;	/**< drop secondary cnt */
+} wl_cnt_ver_6_t;
+
+#define	WL_DELTA_STATS_T_VERSION	2	/**< current version of wl_delta_stats_t struct */
+
+typedef struct {
+	uint16 version;     /**< see definition of WL_DELTA_STATS_T_VERSION */
+	uint16 length;      /**< length of entire structure */
+
+	/* transmit stat counters */
+	uint32 txframe;     /**< tx data frames */
+	uint32 txbyte;      /**< tx data bytes */
+	uint32 txretrans;   /**< tx mac retransmits */
+	uint32 txfail;      /**< tx failures */
+
+	/* receive stat counters */
+	uint32 rxframe;     /**< rx data frames */
+	uint32 rxbyte;      /**< rx data bytes */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;	/**< packets rx at 1Mbps */
+	uint32  rx2mbps;	/**< packets rx at 2Mbps */
+	uint32  rx5mbps5;	/**< packets rx at 5.5Mbps */
+	uint32  rx6mbps;	/**< packets rx at 6Mbps */
+	uint32  rx9mbps;	/**< packets rx at 9Mbps */
+	uint32  rx11mbps;	/**< packets rx at 11Mbps */
+	uint32  rx12mbps;	/**< packets rx at 12Mbps */
+	uint32  rx18mbps;	/**< packets rx at 18Mbps */
+	uint32  rx24mbps;	/**< packets rx at 24Mbps */
+	uint32  rx36mbps;	/**< packets rx at 36Mbps */
+	uint32  rx48mbps;	/**< packets rx at 48Mbps */
+	uint32  rx54mbps;	/**< packets rx at 54Mbps */
+	uint32  rx108mbps;	/**< packets rx at 108mbps */
+	uint32  rx162mbps;	/**< packets rx at 162mbps */
+	uint32  rx216mbps;	/**< packets rx at 216 mbps */
+	uint32  rx270mbps;	/**< packets rx at 270 mbps */
+	uint32  rx324mbps;	/**< packets rx at 324 mbps */
+	uint32  rx378mbps;	/**< packets rx at 378 mbps */
+	uint32  rx432mbps;	/**< packets rx at 432 mbps */
+	uint32  rx486mbps;	/**< packets rx at 486 mbps */
+	uint32  rx540mbps;	/**< packets rx at 540 mbps */
+
+	/* phy stats */
+	uint32 rxbadplcp;
+	uint32 rxcrsglitch;
+	uint32 bphy_rxcrsglitch;
+	uint32 bphy_badplcp;
+
+} wl_delta_stats_t;
+
+/* Partial statistics counter report */
+#define WL_CNT_CTL_MGT_FRAMES	0
+
+typedef struct {
+	uint16	type;
+	uint16	len;
+
+	/* detailed control/management frames */
+	uint32	txnull;
+	uint32	rxnull;
+	uint32	txqosnull;
+	uint32	rxqosnull;
+	uint32	txassocreq;
+	uint32	rxassocreq;
+	uint32	txreassocreq;
+	uint32	rxreassocreq;
+	uint32	txdisassoc;
+	uint32	rxdisassoc;
+	uint32	txassocrsp;
+	uint32	rxassocrsp;
+	uint32	txreassocrsp;
+	uint32	rxreassocrsp;
+	uint32	txauth;
+	uint32	rxauth;
+	uint32	txdeauth;
+	uint32	rxdeauth;
+	uint32	txprobereq;
+	uint32	rxprobereq;
+	uint32	txprobersp;
+	uint32	rxprobersp;
+	uint32	txaction;
+	uint32	rxaction;
+	uint32	txrts;
+	uint32	rxrts;
+	uint32	txcts;
+	uint32	rxcts;
+	uint32	txack;
+	uint32	rxack;
+	uint32	txbar;
+	uint32	rxbar;
+	uint32	txback;
+	uint32	rxback;
+	uint32	txpspoll;
+	uint32	rxpspoll;
+} wl_ctl_mgt_cnt_t;
+
+typedef struct {
+	uint32 packets;
+	uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+	uint16	version;	/**< see definition of WL_WME_CNT_VERSION */
+	uint16	length;		/**< length of entire structure */
+
+	wl_traffic_stats_t tx[AC_COUNT];	/**< Packets transmitted */
+	wl_traffic_stats_t tx_failed[AC_COUNT];	/**< Packets dropped or failed to transmit */
+	wl_traffic_stats_t rx[AC_COUNT];	/**< Packets received */
+	wl_traffic_stats_t rx_failed[AC_COUNT];	/**< Packets failed to receive */
+
+	wl_traffic_stats_t forward[AC_COUNT];	/**< Packets forwarded by AP */
+
+	wl_traffic_stats_t tx_expired[AC_COUNT]; /**< packets dropped due to lifetime expiry */
+
+} wl_wme_cnt_t;
+
+struct wl_msglevel2 {
+	uint32 low;
+	uint32 high;
+};
+
+#define WL_ICMP_IPV6_CFG_VERSION         1
+#define WL_ICMP_IPV6_CLEAR_ALL           (1 << 0)
+
+typedef struct wl_icmp_ipv6_cfg {
+	uint16 version;
+	uint16 length;
+	uint16 fixed_length;
+	uint16 flags;
+	uint32 num_ipv6;
+	/* num_ipv6 to follow */
+	struct ipv6_addr host_ipv6[];
+} wl_icmp_ipv6_cfg_t;
+
+#define WL_ICMP_CFG_IPV6_FIXED_LEN      OFFSETOF(wl_icmp_ipv6_cfg_t, host_ipv6)
+#define WL_ICMP_CFG_IPV6_LEN(count)    (WL_ICMP_CFG_IPV6_FIXED_LEN + \
+					((count) * sizeof(struct ipv6_addr)))
+
+typedef struct wl_mkeep_alive_pkt {
+	uint16	version; /* Version for mkeep_alive */
+	uint16	length; /* length of fixed parameters in the structure */
+	uint32	period_msec; /* high bit on means immediate send */
+	uint16	len_bytes;
+	uint8	keep_alive_id; /* 0 - 3 for N = 4 */
+	uint8	data[1];
+} wl_mkeep_alive_pkt_t;
+
+#define WL_MKEEP_ALIVE_VERSION		1
+#define WL_MKEEP_ALIVE_FIXED_LEN	OFFSETOF(wl_mkeep_alive_pkt_t, data)
+#define WL_MKEEP_ALIVE_PRECISION	500
+#define WL_MKEEP_ALIVE_PERIOD_MASK  0x7FFFFFFF
+#define WL_MKEEP_ALIVE_IMMEDIATE    0x80000000
+
+/** TCP Keep-Alive conn struct */
+typedef struct wl_mtcpkeep_alive_conn_pkt {
+	struct ether_addr saddr;		/**< src mac address */
+	struct ether_addr daddr;		/**< dst mac address */
+	struct ipv4_addr sipaddr;		/**< source IP addr */
+	struct ipv4_addr dipaddr;		/**< dest IP addr */
+	uint16 sport;				/**< src port */
+	uint16 dport;				/**< dest port */
+	uint32 seq;				/**< seq number */
+	uint32 ack;				/**< ACK number */
+	uint16 tcpwin;				/**< TCP window */
+	uint16 PAD;
+} wl_mtcpkeep_alive_conn_pkt_t;
+
+/** TCP Keep-Alive interval struct */
+typedef struct wl_mtcpkeep_alive_timers_pkt {
+	uint16 interval;		/**< interval timer */
+	uint16 retry_interval;		/**< retry_interval timer */
+	uint16 retry_count;		/**< retry_count */
+} wl_mtcpkeep_alive_timers_pkt_t;
+
+typedef struct wake_info {
+	uint32 wake_reason;
+	uint32 wake_info_len;		/**< size of packet */
+	uint8  packet[];
+} wake_info_t;
+
+typedef struct wake_pkt {
+	uint32 wake_pkt_len;		/**< size of packet */
+	uint8  packet[];
+} wake_pkt_t;
+
+
+#define WL_MTCPKEEP_ALIVE_VERSION		1
+
+/* #ifdef WLBA */
+
+#define WLC_BA_CNT_VERSION  1   /**< current version of wlc_ba_cnt_t */
+
+/** block ack related stats */
+typedef struct wlc_ba_cnt {
+	uint16  version;    /**< WLC_BA_CNT_VERSION */
+	uint16  length;     /**< length of entire structure */
+
+	/* transmit stat counters */
+	uint32 txpdu;       /**< pdus sent */
+	uint32 txsdu;       /**< sdus sent */
+	uint32 txfc;        /**< tx side flow controlled packets */
+	uint32 txfci;       /**< tx side flow control initiated */
+	uint32 txretrans;   /**< retransmitted pdus */
+	uint32 txbatimer;   /**< ba resend due to timer */
+	uint32 txdrop;      /**< dropped packets */
+	uint32 txaddbareq;  /**< addba req sent */
+	uint32 txaddbaresp; /**< addba resp sent */
+	uint32 txdelba;     /**< delba sent */
+	uint32 txba;        /**< ba sent */
+	uint32 txbar;       /**< bar sent */
+	uint32 txpad[4];    /**< future */
+
+	/* receive side counters */
+	uint32 rxpdu;       /**< pdus recd */
+	uint32 rxqed;       /**< pdus buffered before sending up */
+	uint32 rxdup;       /**< duplicate pdus */
+	uint32 rxnobuf;     /**< pdus discarded due to no buf */
+	uint32 rxaddbareq;  /**< addba req recd */
+	uint32 rxaddbaresp; /**< addba resp recd */
+	uint32 rxdelba;     /**< delba recd */
+	uint32 rxba;        /**< ba recd */
+	uint32 rxbar;       /**< bar recd */
+	uint32 rxinvba;     /**< invalid ba recd */
+	uint32 rxbaholes;   /**< ba recd with holes */
+	uint32 rxunexp;     /**< unexpected packets */
+	uint32 rxpad[4];    /**< future */
+} wlc_ba_cnt_t;
+/* #endif  WLBA */
+
+/** structure for per-tid ampdu control */
+struct ampdu_tid_control {
+	uint8 tid;			/* tid */
+	uint8 enable;			/* enable/disable */
+};
+
+/** struct for ampdu tx/rx aggregation control */
+struct ampdu_aggr {
+	int8 aggr_override;	/**< aggr overrided by dongle. Not to be set by host. */
+	uint16 conf_TID_bmap;	/**< bitmap of TIDs to configure */
+	uint16 enab_TID_bmap;	/**< enable/disable per TID */
+};
+
+/** structure for identifying ea/tid for sending addba/delba */
+struct ampdu_ea_tid {
+	struct ether_addr ea;		/**< Station address */
+	uint8 tid;			/**< tid */
+	uint8 initiator;	/**< 0 is recipient, 1 is originator */
+};
+
+/** structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */
+struct ampdu_retry_tid {
+	uint8 tid;	/**< tid */
+	uint8 retry;	/**< retry value */
+};
+
+#define BDD_FNAME_LEN       32  /**< Max length of friendly name */
+typedef struct bdd_fname {
+	uint8 len;          /**< length of friendly name */
+	uchar name[BDD_FNAME_LEN];  /**< friendly name */
+} bdd_fname_t;
+
+/* structure for addts arguments */
+/** For ioctls that take a list of TSPEC */
+struct tslist {
+	int32 count;			/**< number of tspecs */
+	struct tsinfo_arg tsinfo[];	/**< variable length array of tsinfo */
+};
+
+/* WLTDLS */
+/**structure for tdls iovars */
+typedef struct tdls_iovar {
+	struct ether_addr ea;		/**< Station address */
+	uint8 mode;			/**< mode: depends on iovar */
+	uint8 PAD;
+	chanspec_t chanspec;
+	uint16 PAD;
+	uint32 pad;			/**< future */
+} tdls_iovar_t;
+
+#define TDLS_WFD_IE_SIZE		512
+/**structure for tdls wfd ie */
+typedef struct tdls_wfd_ie_iovar {
+	struct ether_addr ea;		/**< Station address */
+	uint8 mode;
+	uint8 PAD;
+	uint16 length;
+	uint8 data[TDLS_WFD_IE_SIZE];
+} tdls_wfd_ie_iovar_t;
+/* #endif WLTDLS */
+
+/** structure for addts/delts arguments */
+typedef struct tspec_arg {
+	uint16 version;			/**< see definition of TSPEC_ARG_VERSION */
+	uint16 length;			/**< length of entire structure */
+	uint32 flag;			/**< bit field */
+	/* TSPEC Arguments */
+	struct tsinfo_arg tsinfo;	/**< TS Info bit field */
+	uint8  PAD;
+	uint16 nom_msdu_size;		/**< (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/**< Maximum MSDU Size (bytes) */
+	uint32 min_srv_interval;		/**< Minimum Service Interval (us) */
+	uint32 max_srv_interval;		/**< Maximum Service Interval (us) */
+	uint32 inactivity_interval;	/**< Inactivity Interval (us) */
+	uint32 suspension_interval;	/**< Suspension Interval (us) */
+	uint32 srv_start_time;		/**< Service Start Time (us) */
+	uint32 min_data_rate;		/**< Minimum Data Rate (bps) */
+	uint32 mean_data_rate;		/**< Mean Data Rate (bps) */
+	uint32 peak_data_rate;		/**< Peak Data Rate (bps) */
+	uint32 max_burst_size;		/**< Maximum Burst Size (bytes) */
+	uint32 delay_bound;		/**< Delay Bound (us) */
+	uint32 min_phy_rate;		/**< Minimum PHY Rate (bps) */
+	uint16 surplus_bw;		/**< Surplus Bandwidth Allowance (range 1.0 to 8.0) */
+	uint16 medium_time;		/**< Medium Time (32 us/s periods) */
+	uint8 dialog_token;		/**< dialog token */
+	uint8  PAD[3];
+} tspec_arg_t;
+
+/** tspec arg for desired station */
+typedef	struct tspec_per_sta_arg {
+	struct ether_addr ea;
+	uint8  PAD[2];
+	struct tspec_arg ts;
+} tspec_per_sta_arg_t;
+
+/** structure for max bandwidth for each access category */
+typedef	struct wme_max_bandwidth {
+	uint32	ac[AC_COUNT];	/**< max bandwidth for each access category */
+} wme_max_bandwidth_t;
+
+#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t))
+
+/* current version of wl_tspec_arg_t struct */
+#define	TSPEC_ARG_VERSION		2	/**< current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_LENGTH		55	/**< argument length from tsinfo to medium_time */
+#define TSPEC_DEFAULT_DIALOG_TOKEN	42	/**< default dialog token */
+#define TSPEC_DEFAULT_SBW_FACTOR	0x3000	/**< default surplus bw */
+
+
+#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE  80
+#define WLC_WOWL_MAX_KEEPALIVE	2
+
+/** Packet lifetime configuration per ac */
+typedef struct wl_lifetime {
+	uint32 ac;	        /**< access class */
+	uint32 lifetime;    /**< Packet lifetime value in ms */
+} wl_lifetime_t;
+
+/** Management time configuration */
+typedef struct wl_lifetime_mg {
+	uint32 mgmt_bitmap;	/**< Mgmt subtype */
+	uint32 lifetime;    /**< Packet lifetime value in us */
+} wl_lifetime_mg_t;
+
+/* MAC Sample Capture related */
+#define	WL_MACCAPTR_DEFSTART_PTR	0xA00
+#define	WL_MACCAPTR_DEFSTOP_PTR		0xA3F
+#define	WL_MACCAPTR_DEFSZ		0x3F
+
+#define WL_MACCAPTR_DEF_MASK		0xFFFFFFFF
+
+typedef enum {
+	WL_MACCAPT_TRIG		= 0,
+	WL_MACCAPT_STORE	= 1,
+	WL_MACCAPT_TRANS	= 2,
+	WL_MACCAPT_MATCH	= 3
+} maccaptr_optn;
+
+typedef enum {
+	WL_MACCAPT_STRT	= 1,
+	WL_MACCAPT_STOP	= 2,
+	WL_MACCAPT_RST	= 3
+} maccaptr_cmd_t;
+
+/* MAC Sample Capture Set-up Paramters */
+typedef struct wl_maccapture_params {
+	uint8	gpio_sel;
+	uint8	la_mode;	/* TRUE: GPIO Out Enabled */
+	uint8 	PAD[2];
+	uint32	start_ptr;	/* Start address to store */
+	uint32	stop_ptr;	/* Stop address to store */
+	uint8	optn_bmp;	/* Options */
+	uint8 	PAD[3];
+	uint32	tr_mask;	/* Trigger Mask */
+	uint32	tr_val;		/* Trigger Value */
+	uint32	s_mask;		/* Store Mode Mask */
+	uint32	x_mask;		/* Trans. Mode Mask */
+	uint32	m_mask;		/* Match Mode Mask */
+	uint32	m_val;		/* Match Value */
+	maccaptr_cmd_t cmd;	/* Start / Stop */
+} wl_maccapture_params_t;
+
+/** Channel Switch Announcement param */
+typedef struct wl_chan_switch {
+	uint8 mode;		/**< value 0 or 1 */
+	uint8 count;		/**< count # of beacons before switching */
+	chanspec_t chspec;	/**< chanspec */
+	uint8 reg;		/**< regulatory class */
+	uint8 frame_type;		/**< csa frame type, unicast or broadcast */
+} wl_chan_switch_t;
+
+enum {
+	PFN_LIST_ORDER,
+	PFN_RSSI
+};
+
+enum {
+	DISABLE,
+	ENABLE
+};
+
+enum {
+	OFF_ADAPT,
+	SMART_ADAPT,
+	STRICT_ADAPT,
+	SLOW_ADAPT
+};
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT		2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT		6
+#define IMMEDIATE_EVENT_BIT		8
+#define SUPPRESS_SSID_BIT		9
+#define ENABLE_NET_OFFLOAD_BIT		10
+/** report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT		11
+
+#define SORT_CRITERIA_MASK	0x0001
+#define AUTO_NET_SWITCH_MASK	0x0002
+#define ENABLE_BKGRD_SCAN_MASK	0x0004
+#define IMMEDIATE_SCAN_MASK	0x0008
+#define AUTO_CONNECT_MASK	0x0010
+
+#define ENABLE_BD_SCAN_MASK	0x0020
+#define ENABLE_ADAPTSCAN_MASK	0x00c0
+#define IMMEDIATE_EVENT_MASK	0x0100
+#define SUPPRESS_SSID_MASK	0x0200
+#define ENABLE_NET_OFFLOAD_MASK	0x0400
+/** report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK	0x0800
+
+#define PFN_VERSION			2
+
+#define PFN_COMPLETE			1
+#define PFN_INCOMPLETE			0
+
+#define DEFAULT_BESTN			2
+#define DEFAULT_MSCAN			0
+#define DEFAULT_REPEAT			10
+#define DEFAULT_EXP			2
+
+#define PFN_PARTIAL_SCAN_BIT		0
+#define PFN_PARTIAL_SCAN_MASK		1
+
+#define PFN_SWC_RSSI_WINDOW_MAX   8
+#define PFN_SWC_MAX_NUM_APS       16
+#define PFN_HOTLIST_MAX_NUM_APS   64
+
+#define MAX_EPNO_HIDDEN_SSID    8
+#define MAX_WHITELIST_SSID      2
+
+/* Version 1 and 2 for various scan results structures defined below */
+#define PFN_SCANRESULTS_VERSION_V1	1
+#define PFN_SCANRESULTS_VERSION_V2	2
+
+/** PFN network info structure */
+typedef struct wl_pfn_subnet_info_v1 {
+	struct ether_addr BSSID;
+	uint8	channel; /**< channel number only */
+	uint8	SSID_len;
+	uint8	SSID[32];
+} wl_pfn_subnet_info_v1_t;
+
+typedef struct wl_pfn_subnet_info_v2 {
+	struct ether_addr BSSID;
+	uint8   channel; /**< channel number only */
+	uint8   SSID_len;
+	union {
+		uint8   SSID[32];
+		uint16 index;
+	} u;
+} wl_pfn_subnet_info_v2_t;
+
+typedef struct wl_pfn_net_info_v1 {
+	wl_pfn_subnet_info_v1_t pfnsubnet;
+	int16	RSSI; /**< receive signal strength (in dBm) */
+	uint16	timestamp; /**< age in seconds */
+} wl_pfn_net_info_v1_t;
+
+typedef struct wl_pfn_net_info_v2 {
+	wl_pfn_subnet_info_v2_t pfnsubnet;
+	int16   RSSI; /**< receive signal strength (in dBm) */
+	uint16  timestamp; /**< age in seconds */
+} wl_pfn_net_info_v2_t;
+
+/* Version 1 and 2 for various lbest scan results structures below */
+#define PFN_LBEST_SCAN_RESULT_VERSION_V1 1
+#define PFN_LBEST_SCAN_RESULT_VERSION_V2 2
+
+#define MAX_CHBKT_PER_RESULT		4
+
+typedef struct wl_pfn_lnet_info_v1 {
+	wl_pfn_subnet_info_v1_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */
+	uint16	flags; /**< partial scan, etc */
+	int16	RSSI; /**< receive signal strength (in dBm) */
+	uint32	timestamp; /**< age in miliseconds */
+	uint16	rtt0; /**< estimated distance to this AP in centimeters */
+	uint16	rtt1; /**< standard deviation of the distance to this AP in centimeters */
+} wl_pfn_lnet_info_v1_t;
+
+typedef struct wl_pfn_lnet_info_v2 {
+	wl_pfn_subnet_info_v2_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */
+	uint16  flags; /**< partial scan, etc */
+	int16   RSSI; /**< receive signal strength (in dBm) */
+	uint32  timestamp; /**< age in miliseconds */
+	uint16  rtt0; /**< estimated distance to this AP in centimeters */
+	uint16  rtt1; /**< standard deviation of the distance to this AP in centimeters */
+} wl_pfn_lnet_info_v2_t;
+
+typedef struct wl_pfn_lscanresults_v1 {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_lnet_info_v1_t netinfo[1];
+} wl_pfn_lscanresults_v1_t;
+
+typedef struct wl_pfn_lscanresults_v2 {
+	uint32 version;
+	uint16 status;
+	uint16 count;
+	uint32 scan_ch_buckets[MAX_CHBKT_PER_RESULT];
+	wl_pfn_lnet_info_v2_t netinfo[1];
+} wl_pfn_lscanresults_v2_t;
+
+/**this is used to report on 1-* pfn scan results */
+typedef struct wl_pfn_scanresults_v1 {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_v1_t netinfo[1];
+} wl_pfn_scanresults_v1_t;
+
+typedef struct wl_pfn_scanresults_v2 {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	uint32 scan_ch_bucket;
+	wl_pfn_net_info_v2_t netinfo[1];
+} wl_pfn_scanresults_v2_t;
+
+typedef struct wl_pfn_significant_net {
+	uint16 flags;
+	uint16 channel;
+	struct ether_addr BSSID;
+	int8 rssi[PFN_SWC_RSSI_WINDOW_MAX];
+} wl_pfn_significant_net_t;
+
+#define PFN_SWC_SCANRESULT_VERSION     1
+
+typedef struct wl_pfn_swc_results {
+	uint32 version;
+	uint32 pkt_count;   /**< No. of results in current frame */
+	uint32 total_count; /**< Total expected results */
+	wl_pfn_significant_net_t list[];
+} wl_pfn_swc_results_t;
+typedef struct wl_pfn_net_info_bssid {
+	struct ether_addr BSSID;
+	uint8 channel;	/**< channel number only */
+	int8  RSSI;	/**< receive signal strength (in dBm) */
+	uint16 flags;	/**< (e.g. partial scan, off channel) */
+	uint16 timestamp; /**< age in seconds */
+} wl_pfn_net_info_bssid_t;
+
+typedef struct wl_pfn_scanhist_bssid {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_bssid_t netinfo[1];
+} wl_pfn_scanhist_bssid_t;
+
+/* Version 1 and 2 for various single scan result */
+#define PFN_SCANRESULT_VERSION_V1	1
+#define PFN_SCANRESULT_VERSION_V2	2
+
+/* used to report exactly one scan result */
+/* plus reports detailed scan info in bss_info */
+typedef struct wl_pfn_scanresult_v1 {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_v1_t netinfo;
+	wl_bss_info_t bss_info;
+} wl_pfn_scanresult_v1_t;
+
+typedef struct wl_pfn_scanresult_v2 {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_v2_t netinfo;
+	wl_bss_info_t bss_info;
+} wl_pfn_scanresult_v2_t;
+
+/**PFN data structure */
+typedef struct wl_pfn_param {
+	int32 version;			/**< PNO parameters version */
+	int32 scan_freq;		/**< Scan frequency */
+	int32 lost_network_timeout;	/**< Timeout in sec. to declare
+								* discovered network as lost
+								*/
+	int16 flags;			/**< Bit field to control features
+							* of PFN such as sort criteria auto
+							* enable switch and background scan
+							*/
+	int16 rssi_margin;		/**< Margin to avoid jitter for choosing a
+							* PFN based on RSSI sort criteria
+							*/
+	uint8 bestn; /**< number of best networks in each scan */
+	uint8 mscan; /**< number of scans recorded */
+	uint8 repeat; /**< Minimum number of scan intervals
+				     *before scan frequency changes in adaptive scan
+				     */
+	uint8 exp; /**< Exponent of 2 for maximum scan interval */
+	int32 slow_freq; /**< slow scan period */
+} wl_pfn_param_t;
+
+typedef struct wl_pfn_bssid {
+	struct ether_addr  macaddr;
+	/* Bit4: suppress_lost, Bit3: suppress_found */
+	uint16             flags;
+} wl_pfn_bssid_t;
+typedef struct wl_pfn_significant_bssid {
+	struct ether_addr	macaddr;
+	int8    rssi_low_threshold;
+	int8    rssi_high_threshold;
+} wl_pfn_significant_bssid_t;
+#define WL_PFN_SUPPRESSFOUND_MASK	0x08
+#define WL_PFN_SUPPRESSLOST_MASK	0x10
+#define WL_PFN_SSID_IMPRECISE_MATCH	0x80
+#define WL_PFN_SSID_SAME_NETWORK	0x10000
+#define WL_PFN_SUPPRESS_AGING_MASK	0x20000
+#define WL_PFN_FLUSH_ALL_SSIDS		0x40000
+
+#define WL_PFN_IOVAR_FLAG_MASK		0xFFFF00FF
+#define WL_PFN_RSSI_MASK		0xff00
+#define WL_PFN_RSSI_SHIFT		8
+
+typedef struct wl_pfn_cfg {
+	uint32	reporttype;
+	int32	channel_num;
+	uint16	channel_list[WL_NUMCHANNELS];
+	uint32	flags;
+} wl_pfn_cfg_t;
+
+#define WL_PFN_SSID_CFG_VERSION		1
+#define WL_PFN_SSID_CFG_CLEAR		0x1
+
+typedef struct wl_pfn_ssid_params {
+	int8 min5G_rssi;           /* minimum 5GHz RSSI for a BSSID to be considered      */
+	int8 min2G_rssi;           /* minimum 2.4GHz RSSI for a BSSID to be considered   */
+	int16 init_score_max;     /* The maximum score that a network can have before bonuses  */
+
+	int16 cur_bssid_bonus;    /* Add to current bssid                                      */
+	int16 same_ssid_bonus;    /* score bonus for all networks with the same network flag   */
+	int16 secure_bonus;       /* score bonus for networks that are not open          */
+	int16 band_5g_bonus;
+} wl_pfn_ssid_params_t;
+
+typedef struct wl_ssid_ext_params {
+	int8 min5G_rssi;	/* minimum 5GHz RSSI for a BSSID to be considered      */
+	int8 min2G_rssi;	/* minimum 2.4GHz RSSI for a BSSID to be considered   */
+	int16 init_score_max;	/* The maximum score that a network can have before bonuses  */
+	int16 cur_bssid_bonus;	/* Add to current bssid                                      */
+	int16 same_ssid_bonus;	/* score bonus for all networks with the same network flag   */
+	int16 secure_bonus;	/* score bonus for networks that are not open                */
+	int16 band_5g_bonus;
+} wl_ssid_ext_params_t;
+
+typedef struct wl_pfn_ssid_cfg {
+	uint16 version;
+	uint16 flags;
+	wl_ssid_ext_params_t params;
+} wl_pfn_ssid_cfg_t;
+
+#define CH_BUCKET_REPORT_NONE                   0
+#define CH_BUCKET_REPORT_SCAN_COMPLETE_ONLY     1
+#define CH_BUCKET_REPORT_FULL_RESULT            2
+#define CH_BUCKET_REPORT_SCAN_COMPLETE    (CH_BUCKET_REPORT_SCAN_COMPLETE_ONLY | \
+								CH_BUCKET_REPORT_FULL_RESULT)
+#define CH_BUCKET_REPORT_REGULAR            0
+#define CH_BUCKET_GSCAN                     4
+
+typedef struct wl_pfn_gscan_ch_bucket_cfg {
+	uint8 bucket_end_index;
+	uint8 bucket_freq_multiple;
+	uint8 flag;
+	uint8 reserved;
+	uint16 repeat;
+	uint16 max_freq_multiple;
+} wl_pfn_gscan_ch_bucket_cfg_t;
+
+typedef struct wl_pfn_capabilities {
+	uint16 max_mscan;
+	uint16 max_bestn;
+	uint16 max_swc_bssid;
+	uint16 max_hotlist_bssid;
+} wl_pfn_capabilities_t;
+
+#define GSCAN_SEND_ALL_RESULTS_MASK          (1 << 0)
+#define GSCAN_ALL_BUCKETS_IN_FIRST_SCAN_MASK (1 << 3)
+#define GSCAN_CFG_FLAGS_ONLY_MASK            (1 << 7)
+#define WL_GSCAN_CFG_VERSION                     1
+typedef struct wl_pfn_gscan_cfg {
+	uint16 version;
+	/**
+	 * BIT0 1 = send probes/beacons to HOST
+	 * BIT1 Reserved
+	 * BIT2 Reserved
+	 * Add any future flags here
+	 * BIT7 1 = no other useful cfg sent
+	 */
+	uint8  flags;
+	/** Buffer filled threshold in % to generate an event */
+	uint8   buffer_threshold;
+	/**
+	 * No. of BSSIDs with "change" to generate an evt
+	 * change - crosses rssi threshold/lost
+	 */
+	uint8   swc_nbssid_threshold;
+	/* Max=8 (for now) Size of rssi cache buffer */
+	uint8  swc_rssi_window_size;
+	uint8  count_of_channel_buckets;
+	uint8  retry_threshold;
+	uint16  lost_ap_window;
+	wl_pfn_gscan_ch_bucket_cfg_t channel_bucket[1];
+} wl_pfn_gscan_cfg_t;
+
+#define WL_PFN_REPORT_ALLNET    0
+#define WL_PFN_REPORT_SSIDNET   1
+#define WL_PFN_REPORT_BSSIDNET  2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED	0x00000001	/* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_RESERVED	0xfffffffe	/**< Remaining reserved for future use */
+
+typedef struct wl_pfn {
+	wlc_ssid_t		ssid;			/**< ssid name and its length */
+	int32			flags;			/**< bit2: hidden */
+	int32			infra;			/**< BSS Vs IBSS */
+	int32			auth;			/**< Open Vs Closed */
+	int32			wpa_auth;		/**< WPA type */
+	int32			wsec;			/**< wsec value */
+} wl_pfn_t;
+
+typedef struct wl_pfn_list {
+	uint32		version;
+	uint32		enabled;
+	uint32		count;
+	wl_pfn_t	pfn[1];
+} wl_pfn_list_t;
+
+#define PFN_SSID_EXT_VERSION   1
+
+typedef struct wl_pfn_ext {
+	uint8 flags;
+	int8 rssi_thresh; /* RSSI threshold, track only if RSSI > threshold */
+	uint16 wpa_auth; /* Match the wpa auth type defined in wlioctl_defs.h */
+	uint8 ssid[DOT11_MAX_SSID_LEN];
+	uint8 ssid_len;
+	uint8 pad;
+} wl_pfn_ext_t;
+typedef struct wl_pfn_ext_list {
+	uint16 version;
+	uint16 count;
+	wl_pfn_ext_t pfn_ext[1];
+} wl_pfn_ext_list_t;
+
+#define WL_PFN_SSID_EXT_FOUND   0x1
+#define WL_PFN_SSID_EXT_LOST    0x2
+typedef struct wl_pfn_result_ssid {
+	uint8 flags;
+	int8 rssi;
+	/* channel number */
+	uint16 channel;
+	/* Assume idx in order of cfg */
+	uint32 index;
+} wl_pfn_result_ssid_crc32_t;
+
+typedef struct wl_pfn_ssid_ext_result {
+	uint16 version;
+	uint16 count;
+	wl_pfn_result_ssid_crc32_t net[1];
+} wl_pfn_ssid_ext_result_t;
+
+#define PFN_EXT_AUTH_CODE_OPEN   1 /* open */
+#define PFN_EXT_AUTH_CODE_PSK   2 /* WPA_PSK or WPA2PSK */
+#define PFN_EXT_AUTH_CODE_EAPOL 4 /* any EAPOL  */
+
+#define WL_PFN_HIDDEN_BIT		2
+#define WL_PFN_HIDDEN_MASK		0x4
+
+#ifndef BESTN_MAX
+#define BESTN_MAX			10
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX			90
+#endif
+
+/* Dynamic scan configuration for motion profiles */
+
+#define WL_PFN_MPF_VERSION 1
+
+/* Valid group IDs, may be expanded in the future */
+#define WL_PFN_MPF_GROUP_SSID 0
+#define WL_PFN_MPF_GROUP_BSSID 1
+#define WL_PFN_MPF_MAX_GROUPS 2
+
+/* Max number of MPF states supported in this time */
+#define WL_PFN_MPF_STATES_MAX 4
+
+/* Flags for the mpf-specific stuff */
+#define WL_PFN_MPF_ADAPT_ON_BIT		0
+#define WL_PFN_MPF_ADAPTSCAN_BIT	1
+
+#define WL_PFN_MPF_ADAPT_ON_MASK	0x0001
+#define WL_PFN_MPF_ADAPTSCAN_MASK 	0x0006
+
+/* Per-state timing values */
+typedef struct wl_pfn_mpf_state_params {
+	int32  scan_freq;	/* Scan frequency (secs) */
+	int32  lost_network_timeout; /* Timeout to declare net lost (secs) */
+	int16  flags;		/* Space for flags: ADAPT etc */
+	uint8  exp;		/* Exponent of 2 for max interval for SMART/STRICT_ADAPT */
+	uint8  repeat;		/* Number of scans before changing adaptation level */
+	int32  slow_freq;	/* Slow scan period for SLOW_ADAPT */
+} wl_pfn_mpf_state_params_t;
+
+typedef struct wl_pfn_mpf_param {
+	uint16 version;		/* Structure version */
+	uint16 groupid;		/* Group ID: 0 (SSID), 1 (BSSID), other: reserved */
+	wl_pfn_mpf_state_params_t state[WL_PFN_MPF_STATES_MAX];
+} wl_pfn_mpf_param_t;
+
+/* Structure for setting pfn_override iovar */
+typedef struct wl_pfn_override_param {
+	uint16 version;         /* Structure version */
+	uint16 start_offset;    /* Seconds from now to apply new params */
+	uint16 duration;        /* Seconds to keep new params applied */
+	uint16 reserved;
+	wl_pfn_mpf_state_params_t override;
+} wl_pfn_override_param_t;
+#define WL_PFN_OVERRIDE_VERSION	1
+
+/*
+ * Definitions for base MPF configuration
+ */
+
+#define WL_MPF_VERSION 1
+#define WL_MPF_MAX_BITS 3
+#define WL_MPF_MAX_STATES (1 << WL_MPF_MAX_BITS)
+
+#define WL_MPF_STATE_NAME_MAX 12
+
+typedef struct wl_mpf_val {
+	uint16 val;		/* Value of GPIO bits */
+	uint16 state;		/* State identifier */
+	char name[WL_MPF_STATE_NAME_MAX]; /* Optional name */
+} wl_mpf_val_t;
+
+typedef struct wl_mpf_map {
+	uint16 version;
+	uint16 type;
+	uint16 mask;		/* Which GPIO bits to use */
+	uint8  count;		/* Count of state/value mappings */
+	uint8  PAD;
+	wl_mpf_val_t vals[WL_MPF_MAX_STATES];
+} wl_mpf_map_t;
+
+#define WL_MPF_STATE_AUTO (0xFFFF) /* (uint16)-1) */
+
+typedef struct wl_mpf_state {
+	uint16 version;
+	uint16 type;
+	uint16 state;		/* Get/Set */
+	uint8 force;		/* 0 - auto (HW) state, 1 - forced state */
+	char name[WL_MPF_STATE_NAME_MAX]; /* Get/Set: Optional/actual name */
+	uint8  PAD;
+} wl_mpf_state_t;
+/*
+ * WLFCTS definition
+ */
+typedef struct wl_txstatus_additional_info {
+	uint32 rspec;
+	uint32 enq_ts;
+	uint32 last_ts;
+	uint32 entry_ts;
+	uint16 seq;
+	uint8  rts_cnt;
+	uint8  tx_cnt;
+} wl_txstatus_additional_info_t;
+
+/** Service discovery */
+typedef struct {
+	uint8	transaction_id;	/**< Transaction id */
+	uint8	protocol;	/**< Service protocol type */
+	uint16	query_len;	/**< Length of query */
+	uint16	response_len;	/**< Length of response */
+	uint8	qrbuf[];
+} wl_p2po_qr_t;
+
+typedef struct {
+	uint16			period;			/**< extended listen period */
+	uint16			interval;		/**< extended listen interval */
+	uint16                  count;                  /* count to repeat */
+	uint16                  pad;                    /* pad for 32bit align */
+} wl_p2po_listen_t;
+
+/** GAS state machine tunable parameters.  Structure field values of 0 means use the default. */
+typedef struct wl_gas_config {
+	uint16 max_retransmit;		/**< Max # of firmware/driver retransmits on no Ack
+					 * from peer (on top of the ucode retries).
+					 */
+	uint16 response_timeout;	/**< Max time to wait for a GAS-level response
+					 * after sending a packet.
+					 */
+	uint16 max_comeback_delay;	/**< Max GAS response comeback delay.
+					 * Exceeding this fails the GAS exchange.
+					 */
+	uint16 max_retries;		/**< Max # of GAS state machine retries on failure
+					 * of a GAS frame exchange.
+					 */
+} wl_gas_config_t;
+
+/** P2P Find Offload parameters */
+typedef struct wl_p2po_find_config {
+	uint16 version;			/**< Version of this struct */
+	uint16 length;			/**< sizeof(wl_p2po_find_config_t) */
+	int32 search_home_time;		/**< P2P search state home time when concurrent
+					 * connection exists.  -1 for default.
+					 */
+	uint8 num_social_channels;
+			/**< Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX.
+			 * 0 means use default social channels.
+			 */
+	uint8 flags;
+	uint16 social_channels[1];	/**< Variable length array of social channels */
+} wl_p2po_find_config_t;
+#define WL_P2PO_FIND_CONFIG_VERSION 2	/**< value for version field */
+
+/** wl_p2po_find_config_t flags */
+#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01	/**< Whether to scan for all APs in the p2po_find
+						 * periodic scans of all channels.
+						 * 0 means scan for only P2P devices.
+						 * 1 means scan for P2P devices plus non-P2P APs.
+						 */
+
+
+/** For adding a WFDS service to seek */
+typedef struct {
+	uint32 seek_hdl;		/**< unique id chosen by host */
+	uint8 addr[6];			/**< Seek service from a specific device with this
+					 * MAC address, all 1's for any device.
+					 */
+	uint8 service_hash[P2P_WFDS_HASH_LEN];
+	uint8 service_name_len;
+	uint8 service_name[MAX_WFDS_SEEK_SVC_NAME_LEN];
+					/**< Service name to seek, not null terminated */
+	uint8 service_info_req_len;
+	uint8 service_info_req[1];	/**< Service info request, not null terminated.
+					 * Variable length specified by service_info_req_len.
+					 * Maximum length is MAX_WFDS_SEEK_SVC_INFO_LEN.
+					 */
+} wl_p2po_wfds_seek_add_t;
+
+/** For deleting a WFDS service to seek */
+typedef struct {
+	uint32 seek_hdl;		/**< delete service specified by id */
+} wl_p2po_wfds_seek_del_t;
+
+
+/** For adding a WFDS service to advertise */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 advertise_hdl;		/**< unique id chosen by host */
+	uint8 service_hash[P2P_WFDS_HASH_LEN];
+	uint32 advertisement_id;
+	uint16 service_config_method;
+	uint8 service_name_len;
+	uint8 service_name[MAX_WFDS_SVC_NAME_LEN];
+					/**< Service name , not null terminated */
+	uint8 service_status;
+	uint16 service_info_len;
+	uint8 service_info[1];		/**< Service info, not null terminated.
+					 * Variable length specified by service_info_len.
+					 * Maximum length is MAX_WFDS_ADV_SVC_INFO_LEN.
+					 */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_add_t;
+#include <packed_section_end.h>
+
+/** For deleting a WFDS service to advertise */
+typedef struct {
+	uint32 advertise_hdl;	/**< delete service specified by hdl */
+} wl_p2po_wfds_advertise_del_t;
+
+/** P2P Offload discovery mode for the p2po_state iovar */
+typedef enum {
+	WL_P2PO_DISC_STOP,
+	WL_P2PO_DISC_LISTEN,
+	WL_P2PO_DISC_DISCOVERY
+} disc_mode_t;
+
+/* ANQP offload */
+
+#define ANQPO_MAX_QUERY_SIZE		256
+typedef struct {
+	uint16 max_retransmit;		/**< ~0 use default, max retransmit on no ACK from peer */
+	uint16 response_timeout; /**< ~0 use default, msec to wait for resp after tx packet */
+	uint16 max_comeback_delay;	/**< ~0 use default, max comeback delay in resp else fail */
+	uint16 max_retries;		/**< ~0 use default, max retries on failure */
+	uint16 query_len;		/**< length of ANQP query */
+	uint8 query_data[1];		/**< ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */
+} wl_anqpo_set_t;
+
+typedef struct {
+	uint16 channel;			/**< channel of the peer */
+	struct ether_addr addr;		/**< addr of the peer */
+} wl_anqpo_peer_t;
+
+#define ANQPO_MAX_PEER_LIST			64
+typedef struct {
+	uint16 count;				/**< number of peers in list */
+	wl_anqpo_peer_t peer[1];	/**< max ANQPO_MAX_PEER_LIST */
+} wl_anqpo_peer_list_t;
+
+#define ANQPO_MAX_IGNORE_SSID		64
+typedef struct {
+	uint8 is_clear;				/**< set to clear list (not used on GET) */
+	uint8 PAD;
+	uint16 count;				/**< number of SSID in list */
+	wlc_ssid_t ssid[1];			/**< max ANQPO_MAX_IGNORE_SSID */
+} wl_anqpo_ignore_ssid_list_t;
+
+#define ANQPO_MAX_IGNORE_BSSID		64
+typedef struct {
+	uint8 is_clear;				/**< set to clear list (not used on GET) */
+	uint8 PAD;
+	uint16 count;				/**< number of addr in list */
+	struct ether_addr bssid[];	/**< max ANQPO_MAX_IGNORE_BSSID */
+} wl_anqpo_ignore_bssid_list_t;
+
+
+struct toe_ol_stats_t {
+	/** Num of tx packets that don't need to be checksummed */
+	uint32 tx_summed;
+
+	/* Num of tx packets where checksum is filled by offload engine */
+	uint32 tx_iph_fill;
+	uint32 tx_tcp_fill;
+	uint32 tx_udp_fill;
+	uint32 tx_icmp_fill;
+
+	/*  Num of rx packets where toe finds out if checksum is good or bad */
+	uint32 rx_iph_good;
+	uint32 rx_iph_bad;
+	uint32 rx_tcp_good;
+	uint32 rx_tcp_bad;
+	uint32 rx_udp_good;
+	uint32 rx_udp_bad;
+	uint32 rx_icmp_good;
+	uint32 rx_icmp_bad;
+
+	/* Num of tx packets in which csum error is injected */
+	uint32 tx_tcp_errinj;
+	uint32 tx_udp_errinj;
+	uint32 tx_icmp_errinj;
+
+	/* Num of rx packets in which csum error is injected */
+	uint32 rx_tcp_errinj;
+	uint32 rx_udp_errinj;
+	uint32 rx_icmp_errinj;
+};
+
+/** Arp offload statistic counts */
+struct arp_ol_stats_t {
+	uint32  host_ip_entries; /**< Host IP table addresses (more than one if multihomed) */
+	uint32  host_ip_overflow;	/**< Host IP table additions skipped due to overflow */
+
+	uint32  arp_table_entries;	/**< ARP table entries */
+	uint32  arp_table_overflow;	/**< ARP table additions skipped due to overflow */
+
+	uint32  host_request;		/**< ARP requests from host */
+	uint32  host_reply;		/**< ARP replies from host */
+	uint32  host_service;		/**< ARP requests from host serviced by ARP Agent */
+
+	uint32  peer_request;		/**< ARP requests received from network */
+	uint32  peer_request_drop;	/**< ARP requests from network that were dropped */
+	uint32  peer_reply;		/**< ARP replies received from network */
+	uint32  peer_reply_drop;	/**< ARP replies from network that were dropped */
+	uint32  peer_service;		/**< ARP request from host serviced by ARP Agent */
+};
+
+/** NS offload statistic counts */
+struct nd_ol_stats_t {
+	uint32  host_ip_entries;    /**< Host IP table addresses (more than one if multihomed) */
+	uint32  host_ip_overflow;   /**< Host IP table additions skipped due to overflow */
+	uint32  peer_request;       /**< NS requests received from network */
+	uint32  peer_request_drop;  /**< NS requests from network that were dropped */
+	uint32  peer_reply_drop;    /**< NA replies from network that were dropped */
+	uint32  peer_service;       /**< NS request from host serviced by firmware */
+};
+
+/*
+ * Neighbor Discovery Offloading
+ */
+enum {
+	WL_ND_IPV6_ADDR_TYPE_UNICAST = 0,
+	WL_ND_IPV6_ADDR_TYPE_ANYCAST
+};
+
+typedef struct wl_nd_host_ip_addr {
+	struct ipv6_addr ip_addr;	/* host ip address */
+	uint8 type;			/* type of address */
+	uint8 pad[3];
+} wl_nd_host_ip_addr_t;
+
+typedef struct wl_nd_host_ip_list {
+	uint32 count;
+	wl_nd_host_ip_addr_t host_ip[1];
+} wl_nd_host_ip_list_t;
+
+#define WL_ND_HOSTIP_IOV_VER    1
+
+enum {
+	WL_ND_HOSTIP_OP_VER = 0,	/* get version */
+	WL_ND_HOSTIP_OP_ADD,		/* add address */
+	WL_ND_HOSTIP_OP_DEL,		/* delete specified address */
+	WL_ND_HOSTIP_OP_DEL_UC,		/* delete all unicast address */
+	WL_ND_HOSTIP_OP_DEL_AC,		/* delete all anycast address */
+	WL_ND_HOSTIP_OP_DEL_ALL,	/* delete all addresses */
+	WL_ND_HOSTIP_OP_LIST,		/* get list of host ip address */
+	WL_ND_HOSTIP_OP_MAX
+};
+
+typedef struct wl_nd_hostip {
+	uint16 version;				/* version of iovar buf */
+	uint16 op_type;				/* operation type */
+	uint32 length;				/* length of entire structure */
+	union {
+		wl_nd_host_ip_addr_t host_ip;	/* set param for add */
+		uint16 version;			/* get return for ver */
+	} u;
+} wl_nd_hostip_t;
+
+#define WL_ND_HOSTIP_FIXED_LEN		OFFSETOF(wl_nd_hostip_t, u)
+#define WL_ND_HOSTIP_WITH_ADDR_LEN	(WL_ND_HOSTIP_FIXED_LEN + sizeof(wl_nd_host_ip_addr_t))
+
+/*
+ * Keep-alive packet offloading.
+ */
+
+/**
+ * NAT keep-alive packets format: specifies the re-transmission period, the packet
+ * length, and packet contents.
+ */
+typedef struct wl_keep_alive_pkt {
+	uint32	period_msec;	/** Retransmission period (0 to disable packet re-transmits) */
+	uint16	len_bytes;	/* Size of packet to transmit (0 to disable packet re-transmits) */
+	uint8	data[1];	/** Variable length packet to transmit.  Contents should include
+				 * entire ethernet packet (enet header, IP header, UDP header,
+				 * and UDP payload) in network byte order.
+				 */
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN		OFFSETOF(wl_keep_alive_pkt_t, data)
+
+#define MAX_RSSI_COUNT			8
+typedef struct rssi_struct {
+	int8	val[MAX_RSSI_COUNT];	/**< rssi values in AFs */
+	int16	sum;			/**< total rssi sum */
+	uint8	cnt;			/**< number rssi samples */
+	uint8	idx;			/**< next rssi location */
+} rssi_struct_t;
+
+
+/*
+ * ptk_start: iovar to start 4-way handshake for secured ranging
+*/
+
+/* ptk negotiation security type - determines negotiation parameters */
+typedef enum {
+	WL_PTK_START_SEC_TYPE_PMK = 1
+} wl_ptk_start_sec_type_t;
+
+/* ptk negotiation role */
+typedef enum {
+	ROLE_NONE	= 0x0,
+	ROLE_AUTH	= 0x1,
+	ROLE_SUP	= 0x2,
+	ROLE_STATIC	= 0x3,
+	ROLE_INVALID	= 0xff,
+	WL_PTK_START_ROLE_NONE = ROLE_NONE,
+	WL_PTK_START_ROLE_AUTH = ROLE_AUTH,
+	WL_PTK_START_ROLE_SUP = ROLE_SUP,
+	WL_PTK_START_ROLE_STATIC = ROLE_STATIC,
+	WL_PTK_START_ROLE_INVALID = ROLE_INVALID
+} wl_ptk_start_role_t;
+
+typedef struct wl_ptk_start_tlv {
+	uint16 id;
+	uint16 len;
+	uint8 data[1];
+} wl_ptk_start_tlv_t;
+
+typedef enum {
+	WL_PTK_START_TLV_PMK	= 1	/* uint8[] */
+} wl_ptk_start_tlv_type;
+
+typedef enum {
+	WL_PTK_START_FLAG_NO_DATA_PROT	= 1,	/* data frame protection disabled */
+	WL_PTK_START_FLAG_GEN_FTM_TPK	= 2	/* Generate FTM Toast/Seq Protection Key */
+} wl_ptk_start_flags_t;
+
+typedef struct wl_ptk_start_iov {
+	uint16 version;
+	uint16 len;				/* length of entire iov from version */
+	wl_ptk_start_flags_t flags;
+	wl_ptk_start_sec_type_t sec_type;
+	wl_ptk_start_role_t role;
+	struct ether_addr peer_addr;
+	uint16 pad;				/* reserved/32 bit alignment */
+	wl_ptk_start_tlv_t tlvs[1];
+} wl_ptk_start_iov_t;
+
+/*
+ * Dongle pattern matching filter.
+ */
+
+#define MAX_WAKE_PACKET_CACHE_BYTES 128 /**< Maximum cached wake packet */
+
+#define MAX_WAKE_PACKET_BYTES	    (DOT11_A3_HDR_LEN +			    \
+				     DOT11_QOS_LEN +			    \
+				     sizeof(struct dot11_llc_snap_header) + \
+				     ETHER_MAX_DATA)
+
+typedef struct pm_wake_packet {
+	uint32	status;		/**< Is the wake reason a packet (if all the other field's valid) */
+	uint32	pattern_id;	/**< Pattern ID that matched */
+	uint32	original_packet_size;
+	uint32	saved_packet_size;
+	uint8	packet[MAX_WAKE_PACKET_CACHE_BYTES];
+} pm_wake_packet_t;
+
+/* Packet filter types. Currently, only pattern matching is supported. */
+typedef enum wl_pkt_filter_type {
+	WL_PKT_FILTER_TYPE_PATTERN_MATCH=0,       /**< Pattern matching filter */
+	WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /**< Magic packet match */
+	WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2,  /**< A pattern list (match all to match filter) */
+	WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH=3, /**< SECURE WOWL magic / net pattern match */
+	WL_PKT_FILTER_TYPE_APF_MATCH=4, /* Android packet filter match */
+	WL_PKT_FILTER_TYPE_PATTERN_MATCH_TIMEOUT=5, /* Pattern matching filter with timeout event */
+	WL_PKT_FILTER_TYPE_IMMEDIATE_PATTERN_MATCH=6, /* Immediately pattern matching filter */
+	WL_PKT_FILTYER_TYPE_MAX = 7,	/* Pkt filter type MAX */
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+/* String mapping for types that may be used by applications or debug */
+#define WL_PKT_FILTER_TYPE_NAMES \
+	{ "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH },	\
+	{ "MAGIC",   WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH },	\
+	{ "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH },	\
+	{ "SECURE WOWL", WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH },	\
+	{ "APF", WL_PKT_FILTER_TYPE_APF_MATCH }, \
+	{ "PATTERN TIMEOUT", WL_PKT_FILTER_TYPE_PATTERN_MATCH_TIMEOUT }, \
+	{ "IMMEDIATE", WL_PKT_FILTER_TYPE_IMMEDIATE_PATTERN_MATCH }
+
+/** Secured WOWL packet was encrypted, need decrypted before check filter match */
+typedef struct wl_pkt_decrypter {
+	uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending);
+	void*  dec_ctx;
+} wl_pkt_decrypter_t;
+
+/**
+ * Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+typedef struct wl_pkt_filter_pattern {
+	uint32	offset;		/**< Offset within received packet to start pattern matching.
+				 * Offset '0' is the first byte of the ethernet header.
+				 */
+	uint32	size_bytes;	/**< Size of the pattern.  Bitmask must be the same size. */
+	uint8   mask_and_pattern[]; /**< Variable length mask and pattern data.  mask starts
+				      * at offset 0.  Pattern immediately follows mask. for
+				      * secured pattern, put the descrypter pointer to the
+				      * beginning, mask and pattern postponed correspondingly
+				      */
+} wl_pkt_filter_pattern_t;
+
+/** A pattern list is a numerically specified list of modified pattern structures. */
+typedef struct wl_pkt_filter_pattern_listel {
+	uint16 rel_offs;	/**< Offset to begin match (relative to 'base' below) */
+	uint16 base_offs;	/**< Base for offset (defined below) */
+	uint16 size_bytes;	/**< Size of mask/pattern */
+	uint16 match_flags;	/**< Addition flags controlling the match */
+	uint8  mask_and_data[1]; /**< Variable length mask followed by data, each size_bytes */
+} wl_pkt_filter_pattern_listel_t;
+
+typedef struct wl_pkt_filter_pattern_list {
+	uint8 list_cnt;		/**< Number of elements in the list */
+	uint8 PAD1[1];		/**< Reserved (possible version: reserved) */
+	uint16 totsize;		/**< Total size of this pattern list (includes this struct) */
+	wl_pkt_filter_pattern_listel_t patterns[]; /**< Variable number of list elements */
+} wl_pkt_filter_pattern_list_t;
+
+typedef struct wl_apf_program {
+	uint16 version;
+	uint16 instr_len;	/* number of instruction blocks */
+	uint32 inst_ts;		/* program installation timestamp */
+	uint8 instrs[];	/* variable length instructions */
+} wl_apf_program_t;
+
+typedef struct wl_pkt_filter_pattern_timeout {
+	uint32	offset;	/* Offset within received packet to start pattern matching.
+					 * Offset '0' is the first byte of the ethernet header.
+					 */
+	uint32	size_bytes;	/* Size of the pattern. Bitmask must be the same size. */
+	uint32	timeout;	/* Timeout(seconds) */
+	uint8	mask_and_pattern[1]; /* Variable length mask and pattern data.
+								 * mask starts at offset 0. Pattern
+								 * immediately follows mask.
+								*/
+} wl_pkt_filter_pattern_timeout_t;
+
+/** IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+typedef struct wl_pkt_filter {
+	uint32	id;		/**< Unique filter id, specified by app. */
+	uint32	type;		/**< Filter type (WL_PKT_FILTER_TYPE_xxx). */
+	uint32	negate_match;	/**< Negate the result of filter matches */
+	union {			/* Filter definitions */
+		wl_pkt_filter_pattern_t pattern;	/**< Pattern matching filter */
+		wl_pkt_filter_pattern_list_t patlist; /**< List of patterns to match */
+		wl_apf_program_t apf_program; /* apf program */
+		wl_pkt_filter_pattern_timeout_t pattern_timeout; /* Pattern timeout event filter */
+	} u;
+} wl_pkt_filter_t;
+
+/** IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */
+typedef struct wl_tcp_keep_set {
+	uint32	val1;
+	uint32	val2;
+} wl_tcp_keep_set_t;
+
+#define WL_PKT_FILTER_FIXED_LEN		  OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN	  OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+#define WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_list_t, patterns)
+#define WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN	\
+			OFFSETOF(wl_pkt_filter_pattern_listel_t, mask_and_data)
+#define WL_PKT_FILTER_PATTERN_TIMEOUT_FIXED_LEN	\
+			OFFSETOF(wl_pkt_filter_pattern_timeout_t, mask_and_pattern)
+
+#define WL_APF_INTERNAL_VERSION	1
+#define WL_APF_PROGRAM_MAX_SIZE (2 * 1024)
+#define WL_APF_PROGRAM_FIXED_LEN	OFFSETOF(wl_apf_program_t, instrs)
+#define WL_APF_PROGRAM_LEN(apf_program)	\
+	((apf_program)->instr_len * sizeof((apf_program)->instrs[0]))
+#define WL_APF_PROGRAM_TOTAL_LEN(apf_program)	\
+	(WL_APF_PROGRAM_FIXED_LEN + WL_APF_PROGRAM_LEN(apf_program))
+
+/** IOVAR "pkt_filter_enable" parameter. */
+typedef struct wl_pkt_filter_enable {
+	uint32	id;		/**< Unique filter id */
+	uint32	enable;		/**< Enable/disable bool */
+} wl_pkt_filter_enable_t;
+
+/** IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
+typedef struct wl_pkt_filter_list {
+	uint32	num;		/**< Number of installed packet filters */
+	wl_pkt_filter_t	filter[1];	/**< Variable array of packet filters. */
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN	  OFFSETOF(wl_pkt_filter_list_t, filter)
+
+/** IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */
+typedef struct wl_pkt_filter_stats {
+	uint32	num_pkts_matched;	/**< # filter matches for specified filter id */
+	uint32	num_pkts_forwarded;	/**< # packets fwded from dongle to host for all filters */
+	uint32	num_pkts_discarded;	/**< # packets discarded by dongle for all filters */
+} wl_pkt_filter_stats_t;
+
+/** IOVAR "pkt_filter_ports" parameter.  Configure TCP/UDP port filters. */
+typedef struct wl_pkt_filter_ports {
+	uint8 version;		/**< Be proper */
+	uint8 reserved;		/**< Be really proper */
+	uint16 count;		/**< Number of ports following */
+	/* End of fixed data */
+	uint16 ports[1];	/**< Placeholder for ports[<count>] */
+} wl_pkt_filter_ports_t;
+
+#define WL_PKT_FILTER_PORTS_FIXED_LEN	OFFSETOF(wl_pkt_filter_ports_t, ports)
+
+#define WL_PKT_FILTER_PORTS_VERSION	0
+#define WL_PKT_FILTER_PORTS_MAX		128
+
+#define RSN_REPLAY_LEN 8
+typedef struct _gtkrefresh {
+	uint8	KCK[RSN_KCK_LENGTH];
+	uint8	KEK[RSN_KEK_LENGTH];
+	uint8	ReplayCounter[RSN_REPLAY_LEN];
+} gtk_keyinfo_t, *pgtk_keyinfo_t;
+
+/** Sequential Commands ioctl */
+typedef struct wl_seq_cmd_ioctl {
+	uint32 cmd;		/**< common ioctl definition */
+	uint32 len;		/**< length of user buffer */
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES	4
+
+/**
+ * These are the set of get IOCTLs that should be allowed when using
+ * IOCTL sequence commands. These are issued implicitly by wl.exe each time
+ * it is invoked. We never want to buffer these, or else wl.exe will stop working.
+ */
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+	(((cmd) == WLC_GET_MAGIC)		|| \
+	 ((cmd) == WLC_GET_VERSION)		|| \
+	 ((cmd) == WLC_GET_AP)			|| \
+	 ((cmd) == WLC_GET_INSTANCE))
+
+typedef struct wl_pkteng {
+	uint32 flags;
+	uint32 delay;			/**< Inter-packet delay */
+	uint32 nframes;			/**< Number of frames */
+	uint32 length;			/**< Packet length */
+	uint8  seqno;			/**< Enable/disable sequence no. */
+	struct ether_addr dest;		/**< Destination address */
+	struct ether_addr src;		/**< Source address */
+	uint8  PAD[3];
+} wl_pkteng_t;
+
+#define WL_PKTENG_RU_FILL_VER_1		1
+// struct for ru packet engine
+typedef struct wl_pkteng_ru {
+	uint16 version;		/* ver is 1 */
+	uint16 length;		/* size of complete structure */
+	uint8 bw;			/* bandwidth info */
+	uint8 ru_alloc_val;		/* ru allocation index number */
+	uint8 mcs_val;			/* mcs allocated value */
+	uint8 nss_val;			/* num of spatial streams */
+	uint32 num_bytes;		/* approx num of bytes to calculate other required params */
+	uint8 cp_ltf_val ;		/* GI and LTF symbol size */
+	uint8 he_ltf_symb ;		/* num of HE-LTF symbols */
+	uint8 stbc;			/* STBC support */
+	uint8 coding_val;		/* BCC/LDPC coding support */
+	uint8 pe_category;	/* PE duration 0/8/16usecs  */
+	uint8 dcm;			/* dual carrier modulation */
+	uint8 mumimo_ltfmode; /* ltf mode */
+	uint8 PAD[1];		/* pad bytes to make structure occupy 4 byte aligned */
+} wl_pkteng_ru_fill_t;
+
+typedef struct wl_pkteng_stats {
+	uint32 lostfrmcnt;		/**< RX PER test: no of frames lost (skip seqno) */
+	int32 rssi;			/**< RSSI */
+	int32 snr;			/**< signal to noise ratio */
+	uint16 rxpktcnt[NUM_80211_RATES+1];
+	uint8 rssi_qdb;			/**< qdB portion of the computed rssi */
+	uint8  PAD;
+} wl_pkteng_stats_t;
+
+typedef struct wl_txcal_params {
+	wl_pkteng_t pkteng;
+	uint8 gidx_start;
+	int8 gidx_step;
+	uint8 gidx_stop;
+	uint8  PAD;
+} wl_txcal_params_t;
+
+
+typedef struct wl_sslpnphy_papd_debug_data {
+	uint8 psat_pwr;
+	uint8 psat_indx;
+	uint8 final_idx;
+	uint8 start_idx;
+	int32 min_phase;
+	int32 voltage;
+	int8 temperature;
+	uint8  PAD[3];
+} wl_sslpnphy_papd_debug_data_t;
+typedef struct wl_sslpnphy_debug_data {
+	int16 papdcompRe [64];
+	int16 papdcompIm [64];
+} wl_sslpnphy_debug_data_t;
+typedef struct wl_sslpnphy_spbdump_data {
+	uint16 tbl_length;
+	int16 spbreal[256];
+	int16 spbimg[256];
+} wl_sslpnphy_spbdump_data_t;
+typedef struct wl_sslpnphy_percal_debug_data {
+	uint32 cur_idx;
+	uint32 tx_drift;
+	uint8 prev_cal_idx;
+	uint8  PAD[3];
+	uint32 percal_ctr;
+	int32 nxt_cal_idx;
+	uint32 force_1idxcal;
+	uint32 onedxacl_req;
+	int32 last_cal_volt;
+	int8 last_cal_temp;
+	uint8  PAD[3];
+	uint32 vbat_ripple;
+	uint32 exit_route;
+	int32 volt_winner;
+} wl_sslpnphy_percal_debug_data_t;
+
+typedef enum {
+	wowl_pattern_type_bitmap = 0,
+	wowl_pattern_type_arp,
+	wowl_pattern_type_na
+} wowl_pattern_type_t;
+
+typedef struct wl_wowl_pattern {
+	uint32		    masksize;		/**< Size of the mask in #of bytes */
+	uint32		    offset;		/**< Pattern byte offset in packet */
+	uint32		    patternoffset;	/**< Offset of start of pattern in the structure */
+	uint32		    patternsize;	/**< Size of the pattern itself in #of bytes */
+	uint32		    id;			/**< id */
+	uint32		    reasonsize;		/**< Size of the wakeup reason code */
+	wowl_pattern_type_t type;		/**< Type of pattern */
+	/* Mask follows the structure above */
+	/* Pattern follows the mask is at 'patternoffset' from the start */
+} wl_wowl_pattern_t;
+
+typedef struct wl_wowl_pattern_list {
+	uint32			count;
+	wl_wowl_pattern_t	pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct wl_wowl_wakeind {
+	uint8	pci_wakeind;	/**< Whether PCI PMECSR PMEStatus bit was set */
+	uint32	ucode_wakeind;	/**< What wakeup-event indication was set by ucode */
+} wl_wowl_wakeind_t;
+
+/** per AC rate control related data structure */
+typedef struct wl_txrate_class {
+	uint8		init_rate;
+	uint8		min_rate;
+	uint8		max_rate;
+} wl_txrate_class_t;
+
+/** structure for Overlap BSS scan arguments */
+typedef struct wl_obss_scan_arg {
+	int16	passive_dwell;
+	int16	active_dwell;
+	int16	bss_widthscan_interval;
+	int16	passive_total;
+	int16	active_total;
+	int16	chanwidth_transition_delay;
+	int16	activity_threshold;
+} wl_obss_scan_arg_t;
+
+#define WL_OBSS_SCAN_PARAM_LEN	sizeof(wl_obss_scan_arg_t)
+
+/** RSSI event notification configuration. */
+typedef struct wl_rssi_event {
+	uint32 rate_limit_msec;		/**< # of events posted to application will be limited to
+					 * one per specified period (0 to disable rate limit).
+					 */
+	uint8 num_rssi_levels;		/**< Number of entries in rssi_levels[] below */
+	int8 rssi_levels[MAX_RSSI_LEVELS];	/**< Variable number of RSSI levels. An event
+						 * will be posted each time the RSSI of received
+						 * beacons/packets crosses a level.
+						 */
+	int8 pad[3];
+} wl_rssi_event_t;
+
+#define RSSI_MONITOR_VERSION    1
+#define RSSI_MONITOR_STOP       (1 << 0)
+typedef struct wl_rssi_monitor_cfg {
+	uint8 version;
+	uint8 flags;
+	int8 max_rssi;
+	int8 min_rssi;
+} wl_rssi_monitor_cfg_t;
+
+typedef struct wl_rssi_monitor_evt {
+	uint8 version;
+	int8 cur_rssi;
+	uint16 pad;
+} wl_rssi_monitor_evt_t;
+
+/* CCA based channel quality event configuration */
+#define WL_CHAN_QUAL_CCA	0
+#define WL_CHAN_QUAL_NF		1
+#define WL_CHAN_QUAL_NF_LTE	2
+#define WL_CHAN_QUAL_TOTAL	3
+
+#define MAX_CHAN_QUAL_LEVELS	8
+
+typedef struct wl_chan_qual_metric {
+	uint8 id;				/**< metric ID */
+	uint8 num_levels;               	/**< Number of entries in rssi_levels[] below */
+	uint16 flags;
+	int16 htol[MAX_CHAN_QUAL_LEVELS];	/**< threshold level array: hi-to-lo */
+	int16 ltoh[MAX_CHAN_QUAL_LEVELS];	/**< threshold level array: lo-to-hi */
+} wl_chan_qual_metric_t;
+
+typedef struct wl_chan_qual_event {
+	uint32 rate_limit_msec;		/**< # of events posted to application will be limited to
+					 * one per specified period (0 to disable rate limit).
+					 */
+	uint16 flags;
+	uint16 num_metrics;
+	wl_chan_qual_metric_t metric[WL_CHAN_QUAL_TOTAL];	/**< metric array */
+} wl_chan_qual_event_t;
+typedef struct wl_action_obss_coex_req {
+	uint8 info;
+	uint8 num;
+	uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+/** IOVar parameter block for small MAC address array with type indicator */
+#define WL_IOV_MAC_PARAM_LEN  4
+
+#define WL_IOV_PKTQ_LOG_PRECS 16
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 num_addrs;
+	uint8   addr_type[WL_IOV_MAC_PARAM_LEN];
+	struct ether_addr ea[WL_IOV_MAC_PARAM_LEN];
+} BWL_POST_PACKED_STRUCT wl_iov_mac_params_t;
+#include <packed_section_end.h>
+
+/** This is extra info that follows wl_iov_mac_params_t */
+typedef struct {
+	uint32 addr_info[WL_IOV_MAC_PARAM_LEN];
+} wl_iov_mac_extra_params_t;
+
+/** Combined structure */
+typedef struct {
+	wl_iov_mac_params_t params;
+	wl_iov_mac_extra_params_t extra_params;
+} wl_iov_mac_full_params_t;
+
+/** Parameter block for PKTQ_LOG statistics */
+#define PKTQ_LOG_COUNTERS_V4 \
+	/* packets requested to be stored */ \
+	uint32 requested; \
+	/* packets stored */ \
+	uint32 stored; \
+	/* packets saved, because a lowest priority queue has given away one packet */ \
+	uint32 saved; \
+	/* packets saved, because an older packet from the same queue has been dropped */ \
+	uint32 selfsaved; \
+	/* packets dropped, because pktq is full with higher precedence packets */ \
+	uint32 full_dropped; \
+	 /* packets dropped because pktq per that precedence is full */ \
+	uint32 dropped; \
+	/* packets dropped, in order to save one from a queue of a highest priority */ \
+	uint32 sacrificed; \
+	/* packets droped because of hardware/transmission error */ \
+	uint32 busy; \
+	/* packets re-sent because they were not received */ \
+	uint32 retry; \
+	/* packets retried again (ps pretend) prior to moving power save mode */ \
+	uint32 ps_retry; \
+	 /* suppressed packet count */ \
+	uint32 suppress; \
+	/* packets finally dropped after retry limit */ \
+	uint32 retry_drop; \
+	/* the high-water mark of the queue capacity for packets - goes to zero as queue fills */ \
+	uint32 max_avail; \
+	/* the high-water mark of the queue utilisation for packets - ('inverse' of max_avail) */ \
+	uint32 max_used; \
+	 /* the maximum capacity of the queue */ \
+	uint32 queue_capacity; \
+	/* count of rts attempts that failed to receive cts */ \
+	uint32 rtsfail; \
+	/* count of packets sent (acked) successfully */ \
+	uint32 acked; \
+	/* running total of phy rate of packets sent successfully */ \
+	uint32 txrate_succ; \
+	/* running total of phy 'main' rate */ \
+	uint32 txrate_main; \
+	/* actual data transferred successfully */ \
+	uint32 throughput; \
+	/* time difference since last pktq_stats */ \
+	uint32 time_delta;
+
+typedef struct {
+	PKTQ_LOG_COUNTERS_V4
+} pktq_log_counters_v04_t;
+
+/** v5 is the same as V4 with extra parameter */
+typedef struct {
+	PKTQ_LOG_COUNTERS_V4
+	/** cumulative time to transmit */
+	uint32 airtime;
+} pktq_log_counters_v05_t;
+
+typedef struct {
+	uint8                num_prec[WL_IOV_MAC_PARAM_LEN];
+	pktq_log_counters_v04_t  counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+	uint32               counter_info[WL_IOV_MAC_PARAM_LEN];
+	uint32               pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+	char                 headings[];
+} pktq_log_format_v04_t;
+
+typedef struct {
+	uint8                num_prec[WL_IOV_MAC_PARAM_LEN];
+	pktq_log_counters_v05_t  counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+	uint32               counter_info[WL_IOV_MAC_PARAM_LEN];
+	uint32               pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+	char                 headings[];
+} pktq_log_format_v05_t;
+
+
+typedef struct {
+	uint32               version;
+	wl_iov_mac_params_t  params;
+	union {
+		pktq_log_format_v04_t v04;
+		pktq_log_format_v05_t v05;
+	} pktq_log;
+} wl_iov_pktq_log_t;
+
+/* PKTQ_LOG_AUTO, PKTQ_LOG_DEF_PREC flags introduced in v05, they are ignored by v04 */
+#define PKTQ_LOG_AUTO     (1 << 31)
+#define PKTQ_LOG_DEF_PREC (1 << 30)
+
+typedef struct wl_pfn_macaddr_cfg_0 {
+	uint8 version;
+	uint8 reserved;
+	struct ether_addr macaddr;
+} wl_pfn_macaddr_cfg_0_t;
+#define LEGACY1_WL_PFN_MACADDR_CFG_VER 0
+#define WL_PFN_MAC_OUI_ONLY_MASK      1
+#define WL_PFN_SET_MAC_UNASSOC_MASK   2
+#define WL_PFN_RESTRICT_LA_MAC_MASK   4
+#define WL_PFN_MACADDR_FLAG_MASK     0x7
+/** To configure pfn_macaddr */
+typedef struct wl_pfn_macaddr_cfg {
+	uint8 version;
+	uint8 flags;
+	struct ether_addr macaddr;
+} wl_pfn_macaddr_cfg_t;
+#define WL_PFN_MACADDR_CFG_VER 1
+
+/*
+ * SCB_BS_DATA iovar definitions start.
+ */
+#define SCB_BS_DATA_STRUCT_VERSION	1
+
+/** The actual counters maintained for each station */
+typedef struct {
+	/* The following counters are a subset of what pktq_stats provides per precedence. */
+	uint32 retry;          /**< packets re-sent because they were not received */
+	uint32 retry_drop;     /**< packets finally dropped after retry limit */
+	uint32 rtsfail;        /**< count of rts attempts that failed to receive cts */
+	uint32 acked;          /**< count of packets sent (acked) successfully */
+	uint32 txrate_succ;    /**< running total of phy rate of packets sent successfully */
+	uint32 txrate_main;    /**< running total of phy 'main' rate */
+	uint32 throughput;     /**< actual data transferred successfully */
+	uint32 time_delta;     /**< time difference since last pktq_stats */
+	uint32 airtime;        /**< cumulative total medium access delay in useconds */
+} iov_bs_data_counters_t;
+
+/** The structure for individual station information. */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ether_addr	station_address;	/**< The station MAC address */
+	uint16			station_flags;		/**< Bit mask of flags, for future use. */
+	iov_bs_data_counters_t	station_counters;	/**< The actual counter values */
+} BWL_POST_PACKED_STRUCT iov_bs_data_record_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16	structure_version;	/**< Structure version number (for wl/wlu matching) */
+	uint16	structure_count;	/**< Number of iov_bs_data_record_t records following */
+	iov_bs_data_record_t	structure_record[1];	/**< 0 - structure_count records */
+} BWL_POST_PACKED_STRUCT iov_bs_data_struct_t;
+#include <packed_section_end.h>
+
+/* Bitmask of options that can be passed in to the iovar. */
+enum {
+	SCB_BS_DATA_FLAG_NO_RESET = (1<<0)	/**< Do not clear the counters after reading */
+};
+/*
+ * SCB_BS_DATA iovar definitions end.
+ */
+
+typedef struct wlc_extlog_cfg {
+	int32 max_number;
+	uint16 module;	/**< bitmap */
+	uint8 level;
+	uint8 flag;
+	uint16 version;
+	uint16 PAD;
+} wlc_extlog_cfg_t;
+
+typedef struct log_record {
+	uint32 time;
+	uint16 module;
+	uint16 id;
+	uint8 level;
+	uint8 sub_unit;
+	uint8 seq_num;
+	uint8 pad;
+	int32 arg;
+	char  str[MAX_ARGSTR_LEN];
+	char  PAD[4-MAX_ARGSTR_LEN%4];
+} log_record_t;
+
+typedef struct wlc_extlog_req {
+	uint32 from_last;
+	uint32 num;
+} wlc_extlog_req_t;
+
+typedef struct wlc_extlog_results {
+	uint16 version;
+	uint16 record_len;
+	uint32 num;
+	log_record_t logs[1];
+} wlc_extlog_results_t;
+
+typedef struct log_idstr {
+	uint16	id;
+	uint16	flag;
+	uint8	arg_type;
+	const char	*fmt_str;
+} log_idstr_t;
+
+#define FMTSTRF_USER		1
+
+/* flat ID definitions
+ * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will
+ * affect backward compatibility with pre-existing apps
+ */
+typedef enum {
+	FMTSTR_DRIVER_UP_ID = 0,
+	FMTSTR_DRIVER_DOWN_ID = 1,
+	FMTSTR_SUSPEND_MAC_FAIL_ID = 2,
+	FMTSTR_NO_PROGRESS_ID = 3,
+	FMTSTR_RFDISABLE_ID = 4,
+	FMTSTR_REG_PRINT_ID = 5,
+	FMTSTR_EXPTIME_ID = 6,
+	FMTSTR_JOIN_START_ID = 7,
+	FMTSTR_JOIN_COMPLETE_ID = 8,
+	FMTSTR_NO_NETWORKS_ID = 9,
+	FMTSTR_SECURITY_MISMATCH_ID = 10,
+	FMTSTR_RATE_MISMATCH_ID = 11,
+	FMTSTR_AP_PRUNED_ID = 12,
+	FMTSTR_KEY_INSERTED_ID = 13,
+	FMTSTR_DEAUTH_ID = 14,
+	FMTSTR_DISASSOC_ID = 15,
+	FMTSTR_LINK_UP_ID = 16,
+	FMTSTR_LINK_DOWN_ID = 17,
+	FMTSTR_RADIO_HW_OFF_ID = 18,
+	FMTSTR_RADIO_HW_ON_ID = 19,
+	FMTSTR_EVENT_DESC_ID = 20,
+	FMTSTR_PNP_SET_POWER_ID = 21,
+	FMTSTR_RADIO_SW_OFF_ID = 22,
+	FMTSTR_RADIO_SW_ON_ID = 23,
+	FMTSTR_PWD_MISMATCH_ID = 24,
+	FMTSTR_FATAL_ERROR_ID = 25,
+	FMTSTR_AUTH_FAIL_ID = 26,
+	FMTSTR_ASSOC_FAIL_ID = 27,
+	FMTSTR_IBSS_FAIL_ID = 28,
+	FMTSTR_EXTAP_FAIL_ID = 29,
+	FMTSTR_MAX_ID
+} log_fmtstr_id_t;
+
+/** 11k Neighbor Report element (unversioned, deprecated) */
+typedef struct nbr_element {
+	uint8 id;
+	uint8 len;
+	struct ether_addr bssid;
+	uint32 bssid_info;
+	uint8 reg;
+	uint8 channel;
+	uint8 phytype;
+	uint8 pad;
+} nbr_element_t;
+
+#define WL_RRM_NBR_RPT_VER		1
+/** 11k Neighbor Report element */
+typedef struct nbr_rpt_elem {
+	uint8 version;
+	uint8 id;
+	uint8 len;
+	uint8 pad;
+	struct ether_addr bssid;
+	uint8 pad_1[2];
+	uint32 bssid_info;
+	uint8 reg;
+	uint8 channel;
+	uint8 phytype;
+	uint8 pad_2;
+	wlc_ssid_t ssid;
+	chanspec_t chanspec;
+	uint8 bss_trans_preference;
+	uint8 flags;
+} nbr_rpt_elem_t;
+
+typedef enum event_msgs_ext_command {
+	EVENTMSGS_NONE		=	0,
+	EVENTMSGS_SET_BIT	=	1,
+	EVENTMSGS_RESET_BIT	=	2,
+	EVENTMSGS_SET_MASK	=	3
+} event_msgs_ext_command_t;
+
+#define EVENTMSGS_VER 1
+#define EVENTMSGS_EXT_STRUCT_SIZE	OFFSETOF(eventmsgs_ext_t, mask[0])
+
+/* len-	for SET it would be mask size from the application to the firmware */
+/*		for GET it would be actual firmware mask size */
+/* maxgetsize -	is only used for GET. indicate max mask size that the */
+/*				application can read from the firmware */
+typedef struct eventmsgs_ext
+{
+	uint8	ver;
+	uint8	command;
+	uint8	len;
+	uint8	maxgetsize;
+	uint8	mask[1];
+} eventmsgs_ext_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_params {
+	/** no of host dma descriptors programmed by the firmware before a commit */
+	uint16		max_dma_descriptors;
+
+	uint16		host_buf_len; /**< length of host buffer */
+	dmaaddr_t	host_buf_addr; /**< physical address for bus_throughput_buf */
+} BWL_POST_PACKED_STRUCT pcie_bus_tput_params_t;
+#include <packed_section_end.h>
+
+typedef struct pcie_bus_tput_stats {
+	uint16		time_taken; /**< no of secs the test is run */
+	uint16		nbytes_per_descriptor; /**< no of bytes of data dma ed per descriptor */
+
+	/** no of desciptors for which dma is sucessfully completed within the test time */
+	uint32		count;
+} pcie_bus_tput_stats_t;
+
+typedef struct keepalives_max_idle {
+	uint16  keepalive_count;        /**< nmbr of keepalives per bss_max_idle period */
+	uint8   mkeepalive_index;       /**< mkeepalive_index for keepalive frame to be used */
+	uint8   PAD;			/**< to align next field */
+	uint16  max_interval;           /**< seconds */
+} keepalives_max_idle_t;
+
+#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0)
+#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1)
+
+/* ##### Power Stats section ##### */
+
+#define WL_PWRSTATS_VERSION	2
+
+/** Input structure for pwrstats IOVAR */
+typedef struct wl_pwrstats_query {
+	uint16 length;		/**< Number of entries in type array. */
+	uint16 type[1];		/**< Types (tags) to retrieve.
+				 * Length 0 (no types) means get all.
+				 */
+} wl_pwrstats_query_t;
+
+/** This structure is for version 2; version 1 will be deprecated in by FW */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats {
+	uint16 version;		      /**< Version = 2 is TLV format */
+	uint16 length;		      /**< Length of entire structure */
+	uint8 data[1];		      /**< TLV data, a series of structures,
+				       * each starting with type and length.
+				       *
+				       * Padded as necessary so each section
+				       * starts on a 4-byte boundary.
+				       *
+				       * Both type and len are uint16, but the
+				       * upper nibble of length is reserved so
+				       * valid len values are 0-4095.
+				       */
+} BWL_POST_PACKED_STRUCT wl_pwrstats_t;
+#include <packed_section_end.h>
+#define WL_PWR_STATS_HDRLEN	OFFSETOF(wl_pwrstats_t, data)
+
+/* Bits for wake reasons */
+#define WLC_PMD_WAKE_SET		0x1
+#define WLC_PMD_PM_AWAKE_BCN		0x2
+/* BIT:3 is no longer being used */
+#define WLC_PMD_SCAN_IN_PROGRESS	0x8
+#define WLC_PMD_RM_IN_PROGRESS		0x10
+#define WLC_PMD_AS_IN_PROGRESS		0x20
+#define WLC_PMD_PM_PEND			0x40
+#define WLC_PMD_PS_POLL			0x80
+#define WLC_PMD_CHK_UNALIGN_TBTT	0x100
+#define WLC_PMD_APSD_STA_UP		0x200
+#define WLC_PMD_TX_PEND_WAR		0x400   /* obsolete, can be reused */
+#define WLC_PMD_GPTIMER_STAY_AWAKE	0x800
+#define WLC_PMD_PM2_RADIO_SOFF_PEND	0x2000
+#define WLC_PMD_NON_PRIM_STA_UP		0x4000
+#define WLC_PMD_AP_UP			0x8000
+
+typedef struct wlc_pm_debug {
+	uint32 timestamp;	     /**< timestamp in millisecond */
+	uint32 reason;		     /**< reason(s) for staying awake */
+} wlc_pm_debug_t;
+
+/** WL_PWRSTATS_TYPE_PM_AWAKE1 structures (for 6.25 firmware) */
+#define WLC_STA_AWAKE_STATES_MAX_V1	30
+#define WLC_PMD_EVENT_MAX_V1		32
+/** Data sent as part of pwrstats IOVAR (and EXCESS_PM_WAKE event) */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v1 {
+	uint32 curr_time;	/**< ms */
+	uint32 hw_macc;		/**< HW maccontrol */
+	uint32 sw_macc;		/**< SW maccontrol */
+	uint32 pm_dur;		/**< Total sleep time in PM, msecs */
+	uint32 mpc_dur;		/**< Total sleep time in MPC, msecs */
+
+	/* int32 drifts = remote - local; +ve drift => local-clk slow */
+	int32 last_drift;	/**< Most recent TSF drift from beacon */
+	int32 min_drift;	/**< Min TSF drift from beacon in magnitude */
+	int32 max_drift;	/**< Max TSF drift from beacon in magnitude */
+
+	uint32 avg_drift;	/**< Avg TSF drift from beacon */
+
+	/* Wake history tracking */
+	uint8  pmwake_idx;				   /**< for stepping through pm_state */
+	wlc_pm_debug_t pm_state[WLC_STA_AWAKE_STATES_MAX_V1]; /**< timestamped wake bits */
+	uint32 pmd_event_wake_dur[WLC_PMD_EVENT_MAX_V1];   /**< cumulative usecs per wake reason */
+	uint32 drift_cnt;	/**< Count of drift readings over which avg_drift was computed */
+} BWL_POST_PACKED_STRUCT pm_awake_data_v1_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v1 {
+	uint16 type;	     /**< WL_PWRSTATS_TYPE_PM_AWAKE */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+
+	pm_awake_data_v1_t awake_data;
+	uint32 frts_time;	/**< Cumulative ms spent in frts since driver load */
+	uint32 frts_end_cnt;	/**< No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v1_t;
+#include <packed_section_end.h>
+
+/** WL_PWRSTATS_TYPE_PM_AWAKE2 structures. Data sent as part of pwrstats IOVAR */
+typedef struct pm_awake_data_v2 {
+	uint32 curr_time;	/**< ms */
+	uint32 hw_macc;		/**< HW maccontrol */
+	uint32 sw_macc;		/**< SW maccontrol */
+	uint32 pm_dur;		/**< Total sleep time in PM, msecs */
+	uint32 mpc_dur;		/**< Total sleep time in MPC, msecs */
+
+	/* int32 drifts = remote - local; +ve drift => local-clk slow */
+	int32 last_drift;	/**< Most recent TSF drift from beacon */
+	int32 min_drift;	/**< Min TSF drift from beacon in magnitude */
+	int32 max_drift;	/**< Max TSF drift from beacon in magnitude */
+
+	uint32 avg_drift;	/**< Avg TSF drift from beacon */
+
+	/* Wake history tracking */
+
+	/* pmstate array (type wlc_pm_debug_t) start offset */
+	uint16 pm_state_offset;
+	/** pmstate number of array entries */
+	uint16 pm_state_len;
+
+	/** array (type uint32) start offset */
+	uint16 pmd_event_wake_dur_offset;
+	/** pmd_event_wake_dur number of array entries */
+	uint16 pmd_event_wake_dur_len;
+
+	uint32 drift_cnt;	/**< Count of drift readings over which avg_drift was computed */
+	uint8  pmwake_idx;	/**< for stepping through pm_state */
+	uint8  flags;		/**< bit0: 1-sleep, 0- wake. bit1: 0-bit0 invlid, 1-bit0 valid */
+	uint8  pad[2];
+	uint32 frts_time;	/**< Cumulative ms spent in frts since driver load */
+	uint32 frts_end_cnt;	/**< No of times frts ended since driver load */
+} pm_awake_data_v2_t;
+
+typedef struct wl_pwr_pm_awake_stats_v2 {
+	uint16 type;	     /**< WL_PWRSTATS_TYPE_PM_AWAKE */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+
+	pm_awake_data_v2_t awake_data;
+} wl_pwr_pm_awake_stats_v2_t;
+
+/* bit0: 1-sleep, 0- wake. bit1: 0-bit0 invlid, 1-bit0 valid */
+#define WL_PWR_PM_AWAKE_STATS_WAKE      0x02
+#define WL_PWR_PM_AWAKE_STATS_ASLEEP    0x03
+#define WL_PWR_PM_AWAKE_STATS_WAKE_MASK 0x03
+
+/* WL_PWRSTATS_TYPE_PM_AWAKE Version 2 structures taken from 4324/43342 */
+/* These structures are only to be used with 4324/43342 devices */
+
+#define WL_STA_AWAKE_STATES_MAX_V2	30
+#define WL_PMD_EVENT_MAX_V2		32
+#define MAX_P2P_BSS_DTIM_PRD		4
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct ucode_dbg_v2 {
+	uint32 macctrl;
+	uint16 m_p2p_hps;
+	uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+	uint32 psmdebug[20];
+	uint32 phydebug[20];
+	uint32 psm_brc;
+	uint32 ifsstat;
+} BWL_POST_PACKED_STRUCT ucode_dbg_v2_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pmalert_awake_data_v2 {
+	uint32 curr_time;	/* ms */
+	uint32 hw_macc;		/* HW maccontrol */
+	uint32 sw_macc;		/* SW maccontrol */
+	uint32 pm_dur;		/* Total sleep time in PM, msecs */
+	uint32 mpc_dur;		/* Total sleep time in MPC, msecs */
+
+	/* int32 drifts = remote - local; +ve drift => local-clk slow */
+	int32 last_drift;	/* Most recent TSF drift from beacon */
+	int32 min_drift;	/* Min TSF drift from beacon in magnitude */
+	int32 max_drift;	/* Max TSF drift from beacon in magnitude */
+
+	uint32 avg_drift;	/* Avg TSF drift from beacon */
+
+	/* Wake history tracking */
+	uint8  pmwake_idx;				   /* for stepping through pm_state */
+	wlc_pm_debug_t pm_state[WL_STA_AWAKE_STATES_MAX_V2]; /* timestamped wake bits */
+	uint32 pmd_event_wake_dur[WL_PMD_EVENT_MAX_V2];      /* cumulative usecs per wake reason */
+	uint32 drift_cnt;	/* Count of drift readings over which avg_drift was computed */
+	uint32	start_event_dur[WL_PMD_EVENT_MAX_V2]; /* start event-duration */
+	ucode_dbg_v2_t ud;
+	uint32 frts_time;	/* Cumulative ms spent in frts since driver load */
+	uint32 frts_end_cnt;	/* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT pmalert_awake_data_v2_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pm_alert_data_v2 {
+	uint32 version;
+	uint32 length; /* Length of entire structure */
+	uint32 reasons; /* reason(s) for pm_alert */
+	/* Following fields are present only for reasons
+	 * PM_DUR_EXCEEDED, MPC_DUR_EXCEEDED & CONST_AWAKE_DUR_EXCEEDED
+	 */
+	uint32 prev_stats_time;	/* msecs */
+	uint32 prev_pm_dur;	/* msecs */
+	uint32 prev_mpc_dur;	/* msecs */
+	pmalert_awake_data_v2_t awake_data;
+} BWL_POST_PACKED_STRUCT pm_alert_data_v2_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_status_v2 {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_PM_AWAKE */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	pmalert_awake_data_v2_t awake_data;
+	uint32 frts_time;	/* Cumulative ms spent in frts since driver load */
+	uint32 frts_end_cnt;	/* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_status_v2_t;
+#include <packed_section_end.h>
+
+/* Below are latest definitions from PHO25178RC100_BRANCH_6_50 */
+/* wl_pwr_pm_awake_stats_v1_t is used for WL_PWRSTATS_TYPE_PM_AWAKE */
+/* (at least) the chip independent registers */
+typedef struct ucode_dbg_ext {
+	uint32 x120;
+	uint32 x124;
+	uint32 x154;
+	uint32 x158;
+	uint32 x15c;
+	uint32 x180;
+	uint32 x184;
+	uint32 x188;
+	uint32 x18c;
+	uint32 x1a0;
+	uint32 x1a8;
+	uint32 x1e0;
+	uint32 scr_x14;
+	uint32 scr_x2b;
+	uint32 scr_x2c;
+	uint32 scr_x2d;
+	uint32 scr_x2e;
+
+	uint16 x40a;
+	uint16 x480;
+	uint16 x490;
+	uint16 x492;
+	uint16 x4d8;
+	uint16 x4b8;
+	uint16 x4ba;
+	uint16 x4bc;
+	uint16 x4be;
+	uint16 x500;
+	uint16 x50e;
+	uint16 x522;
+	uint16 x546;
+	uint16 x578;
+	uint16 x602;
+	uint16 x646;
+	uint16 x648;
+	uint16 x666;
+	uint16 x670;
+	uint16 x690;
+	uint16 x692;
+	uint16 x6a0;
+	uint16 x6a2;
+	uint16 x6a4;
+	uint16 x6b2;
+	uint16 x7c0;
+
+	uint16 shm_x20;
+	uint16 shm_x4a;
+	uint16 shm_x5e;
+	uint16 shm_x5f;
+	uint16 shm_xaab;
+	uint16 shm_x74a;
+	uint16 shm_x74b;
+	uint16 shm_x74c;
+	uint16 shm_x74e;
+	uint16 shm_x756;
+	uint16 shm_x75b;
+	uint16 shm_x7b9;
+	uint16 shm_x7d4;
+
+	uint16 shm_P2P_HPS;
+	uint16 shm_P2P_intr[16];
+	uint16 shm_P2P_perbss[48];
+} ucode_dbg_ext_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct pm_alert_data_v1 {
+	uint32 version;
+	uint32 length; /**< Length of entire structure */
+	uint32 reasons; /**< reason(s) for pm_alert */
+	/* Following fields are present only for reasons
+	 * PM_DUR_EXCEEDED, MPC_DUR_EXCEEDED & CONST_AWAKE_DUR_EXCEEDED
+	 */
+	uint32 prev_stats_time;	/**< msecs */
+	uint32 prev_pm_dur;	/**< msecs */
+	uint32 prev_mpc_dur;	/**< msecs */
+	pm_awake_data_v1_t awake_data;
+	uint32	start_event_dur[WLC_PMD_EVENT_MAX_V1]; /**< start event-duration */
+	ucode_dbg_v2_t ud;
+	uint32 frts_time;	/**< Cumulative ms spent in frts since driver load */
+	uint32 frts_end_cnt;	/**< No of times frts ended since driver load */
+	ucode_dbg_ext_t ud_ext;
+	uint32 prev_frts_dur; /**< ms */
+} BWL_POST_PACKED_STRUCT pm_alert_data_v1_t;
+#include <packed_section_end.h>
+
+/* End of 43342/4324 v2 structure definitions */
+
+/* Original bus structure is for HSIC */
+
+typedef struct bus_metrics {
+	uint32 suspend_ct;	/**< suspend count */
+	uint32 resume_ct;	/**< resume count */
+	uint32 disconnect_ct;	/**< disconnect count */
+	uint32 reconnect_ct;	/**< reconnect count */
+	uint32 active_dur;	/**< msecs in bus, usecs for user */
+	uint32 suspend_dur;	/**< msecs in bus, usecs for user */
+	uint32 disconnect_dur;	/**< msecs in bus, usecs for user */
+} bus_metrics_t;
+
+/** Bus interface info for USB/HSIC */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_usb_hsic_stats {
+	uint16 type;	     /**< WL_PWRSTATS_TYPE_USB_HSIC */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+
+	bus_metrics_t hsic;	/**< stats from hsic bus driver */
+} BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t;
+#include <packed_section_end.h>
+
+typedef struct pcie_bus_metrics {
+	uint32 d3_suspend_ct;	/**< suspend count */
+	uint32 d0_resume_ct;	/**< resume count */
+	uint32 perst_assrt_ct;	/**< PERST# assert count */
+	uint32 perst_deassrt_ct;	/**< PERST# de-assert count */
+	uint32 active_dur;	/**< msecs */
+	uint32 d3_suspend_dur;	/**< msecs */
+	uint32 perst_dur;	/**< msecs */
+	uint32 l0_cnt;		/**< L0 entry count */
+	uint32 l0_usecs;	/**< L0 duration in usecs */
+	uint32 l1_cnt;		/**< L1 entry count */
+	uint32 l1_usecs;	/**< L1 duration in usecs */
+	uint32 l1_1_cnt;	/**< L1_1ss entry count */
+	uint32 l1_1_usecs;	/**< L1_1ss duration in usecs */
+	uint32 l1_2_cnt;	/**< L1_2ss entry count */
+	uint32 l1_2_usecs;	/**< L1_2ss duration in usecs */
+	uint32 l2_cnt;		/**< L2 entry count */
+	uint32 l2_usecs;	/**< L2 duration in usecs */
+	uint32 timestamp;	/**< Timestamp on when stats are collected */
+	uint32 num_h2d_doorbell;	/**< # of doorbell interrupts - h2d */
+	uint32 num_d2h_doorbell;	/**< # of doorbell interrupts - d2h */
+	uint32 num_submissions; /**< # of submissions */
+	uint32 num_completions; /**< # of completions */
+	uint32 num_rxcmplt;	/**< # of rx completions */
+	uint32 num_rxcmplt_drbl;	/**< of drbl interrupts for rx complt. */
+	uint32 num_txstatus;	/**< # of tx completions */
+	uint32 num_txstatus_drbl;	/**< of drbl interrupts for tx complt. */
+	uint32 deepsleep_count; /**< # of times chip went to deepsleep */
+	uint32 deepsleep_dur;   /**< # of msecs chip was in deepsleep */
+	uint32 ltr_active_ct;	/**< # of times chip went to LTR ACTIVE */
+	uint32 ltr_active_dur;	/**< # of msecs chip was in LTR ACTIVE */
+	uint32 ltr_sleep_ct;	/**< # of times chip went to LTR SLEEP */
+	uint32 ltr_sleep_dur;	/**< # of msecs chip was in LTR SLEEP */
+} pcie_bus_metrics_t;
+
+/** Bus interface info for PCIE */
+typedef struct wl_pwr_pcie_stats {
+	uint16 type;	     /**< WL_PWRSTATS_TYPE_PCIE */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+	pcie_bus_metrics_t pcie;	/**< stats from pcie bus driver */
+} wl_pwr_pcie_stats_t;
+
+/** Scan information history per category */
+typedef struct scan_data {
+	uint32 count;		/**< Number of scans performed */
+	uint32 dur;		/**< Total time (in us) used */
+} scan_data_t;
+
+typedef struct wl_pwr_scan_stats {
+	uint16 type;	     /**< WL_PWRSTATS_TYPE_SCAN */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+
+	/* Scan history */
+	scan_data_t user_scans;	  /**< User-requested scans: (i/e/p)scan */
+	scan_data_t assoc_scans;  /**< Scans initiated by association requests */
+	scan_data_t roam_scans;	  /**< Scans initiated by the roam engine */
+	scan_data_t pno_scans[8]; /**< For future PNO bucketing (BSSID, SSID, etc) */
+	scan_data_t other_scans;  /**< Scan engine usage not assigned to the above */
+} wl_pwr_scan_stats_t;
+
+typedef struct wl_pwr_connect_stats {
+	uint16 type;	     /**< WL_PWRSTATS_TYPE_SCAN */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+
+	/* Connection (Association + Key exchange) data */
+	uint32 count;	/**< Number of connections performed */
+	uint32 dur;		/**< Total time (in ms) used */
+} wl_pwr_connect_stats_t;
+
+typedef struct wl_pwr_phy_stats {
+	uint16 type;	    /**< WL_PWRSTATS_TYPE_PHY */
+	uint16 len;	    /**< Up to 4K-1, top 4 bits are reserved */
+	uint32 tx_dur;	    /**< TX Active duration in us */
+	uint32 rx_dur;	    /**< RX Active duration in us */
+} wl_pwr_phy_stats_t;
+
+
+typedef struct wl_mimo_meas_metrics_v1 {
+	uint16 type;
+	uint16 len;
+	/* Total time(us) idle in MIMO RX chain configuration */
+	uint32 total_idle_time_mimo;
+	/* Total time(us) idle in SISO  RX chain configuration */
+	uint32 total_idle_time_siso;
+	/* Total receive time (us) in SISO RX chain configuration */
+	uint32 total_rx_time_siso;
+	/* Total receive time (us) in MIMO RX chain configuration */
+	uint32 total_rx_time_mimo;
+	/* Total 1-chain transmit time(us) */
+	uint32 total_tx_time_1chain;
+	/* Total 2-chain transmit time(us) */
+	uint32 total_tx_time_2chain;
+	/* Total 3-chain transmit time(us) */
+	uint32 total_tx_time_3chain;
+} wl_mimo_meas_metrics_v1_t;
+
+typedef struct wl_mimo_meas_metrics {
+	uint16 type;
+	uint16 len;
+	/* Total time(us) idle in MIMO RX chain configuration */
+	uint32 total_idle_time_mimo;
+	/* Total time(us) idle in SISO  RX chain configuration */
+	uint32 total_idle_time_siso;
+	/* Total receive time (us) in SISO RX chain configuration */
+	uint32 total_rx_time_siso;
+	/* Total receive time (us) in MIMO RX chain configuration */
+	uint32 total_rx_time_mimo;
+	/* Total 1-chain transmit time(us) */
+	uint32 total_tx_time_1chain;
+	/* Total 2-chain transmit time(us) */
+	uint32 total_tx_time_2chain;
+	/* Total 3-chain transmit time(us) */
+	uint32 total_tx_time_3chain;
+	/* End of original, OCL fields start here */
+	/* Total time(us) idle in ocl mode */
+	uint32 total_idle_time_ocl;
+	/* Total receive time (us) in ocl mode */
+	uint32 total_rx_time_ocl;
+	/* End of OCL fields, internal adjustment fields here */
+	/* Total SIFS idle time in MIMO mode */
+	uint32 total_sifs_time_mimo;
+	/* Total SIFS idle time in SISO mode */
+	uint32 total_sifs_time_siso;
+} wl_mimo_meas_metrics_t;
+/* ##### End of Power Stats section ##### */
+
+/** IPV4 Arp offloads for ndis context */
+#include <packed_section_start.h>
+BWL_PRE_PACKED_STRUCT struct hostip_id {
+	struct ipv4_addr ipa;
+	uint8 id;
+} BWL_POST_PACKED_STRUCT;
+#include <packed_section_end.h>
+
+/* Return values */
+#define ND_REPLY_PEER		0x1	/**< Reply was sent to service NS request from peer */
+#define ND_REQ_SINK		0x2	/**< Input packet should be discarded */
+#define ND_FORCE_FORWARD	0X3	/**< For the dongle to forward req to HOST */
+
+/** Neighbor Solicitation Response Offload IOVAR param */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct nd_param {
+	struct ipv6_addr	host_ip[2];
+	struct ipv6_addr	solicit_ip;
+	struct ipv6_addr	remote_ip;
+	uint8	host_mac[ETHER_ADDR_LEN];
+	uint32	offload_id;
+} BWL_POST_PACKED_STRUCT nd_param_t;
+#include <packed_section_end.h>
+
+typedef struct wl_pfn_roam_thresh {
+	uint32 pfn_alert_thresh; /**< time in ms */
+	uint32 roam_alert_thresh; /**< time in ms */
+} wl_pfn_roam_thresh_t;
+
+
+/* Reasons for wl_pmalert_t */
+#define PM_DUR_EXCEEDED			(1<<0)
+#define MPC_DUR_EXCEEDED		(1<<1)
+#define ROAM_ALERT_THRESH_EXCEEDED	(1<<2)
+#define PFN_ALERT_THRESH_EXCEEDED	(1<<3)
+#define CONST_AWAKE_DUR_ALERT		(1<<4)
+#define CONST_AWAKE_DUR_RECOVERY	(1<<5)
+
+#define MIN_PM_ALERT_LEN 9
+
+/** Data sent in EXCESS_PM_WAKE event */
+#define WL_PM_ALERT_VERSION 3
+
+/** This structure is for version 3; version 2 will be deprecated in by FW */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert {
+	uint16 version;		/**< Version = 3 is TLV format */
+	uint16 length;		/**< Length of entire structure */
+	uint32 reasons;		/**< reason(s) for pm_alert */
+	uint8 data[1];		/**< TLV data, a series of structures,
+				 * each starting with type and length.
+				 *
+				 * Padded as necessary so each section
+				 * starts on a 4-byte boundary.
+				 *
+				 * Both type and len are uint16, but the
+				 * upper nibble of length is reserved so
+				 * valid len values are 0-4095.
+				*/
+} BWL_POST_PACKED_STRUCT wl_pmalert_t;
+#include <packed_section_end.h>
+
+/* Type values for the data section */
+#define WL_PMALERT_FIXED	0	/**< struct wl_pmalert_fixed_t, fixed fields */
+#define WL_PMALERT_PMSTATE	1	/**< struct wl_pmalert_pmstate_t, variable */
+#define WL_PMALERT_EVENT_DUR	2	/**< struct wl_pmalert_event_dur_t, variable */
+#define WL_PMALERT_UCODE_DBG	3	/**< struct wl_pmalert_ucode_dbg_v1, variable */
+#define WL_PMALERT_PS_ALLOWED_HIST	4 /**< struct wl_pmalert_ps_allowed_history, variable */
+#define WL_PMALERT_EXT_UCODE_DBG	5 /**< struct wl_pmalert_ext_ucode_dbg_t, variable */
+#define WL_PMALERT_EPM_START_EVENT_DUR	6 /**< struct wl_pmalert_event_dur_t, variable */
+#define WL_PMALERT_UCODE_DBG_V2		7 /**< struct wl_pmalert_ucode_dbg_v2, variable */
+
+typedef struct wl_pmalert_fixed {
+	uint16 type;		/**< WL_PMALERT_FIXED */
+	uint16 len;		/**< Up to 4K-1, top 4 bits are reserved */
+	uint32 prev_stats_time;	/**< msecs */
+	uint32 curr_time;	/**< ms */
+	uint32 prev_pm_dur;	/**< msecs */
+	uint32 pm_dur;		/**< Total sleep time in PM, msecs */
+	uint32 prev_mpc_dur;	/**< msecs */
+	uint32 mpc_dur;		/**< Total sleep time in MPC, msecs */
+	uint32 hw_macc;		/**< HW maccontrol */
+	uint32 sw_macc;		/**< SW maccontrol */
+
+	/* int32 drifts = remote - local; +ve drift -> local-clk slow */
+	int32 last_drift;	/**< Most recent TSF drift from beacon */
+	int32 min_drift;	/**< Min TSF drift from beacon in magnitude */
+	int32 max_drift;	/**< Max TSF drift from beacon in magnitude */
+
+	uint32 avg_drift;	/**< Avg TSF drift from beacon */
+	uint32 drift_cnt;	/**< Count of drift readings over which avg_drift was computed */
+	uint32 frts_time;	/**< Cumulative ms spent in data frts since driver load */
+	uint32 frts_end_cnt;	/**< No of times frts ended since driver load */
+	uint32 prev_frts_dur;	/**< Data frts duration at start of pm-period */
+	uint32 cal_dur;		/**< Cumulative ms spent in calibration */
+	uint32 prev_cal_dur;	/**< cal duration at start of pm-period */
+} wl_pmalert_fixed_t;
+
+typedef struct wl_pmalert_pmstate {
+	uint16 type;	     /**< WL_PMALERT_PMSTATE */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+
+	uint8 pmwake_idx;   /**< for stepping through pm_state */
+	uint8 pad[3];
+	/* Array of pmstate; len of array is based on tlv len */
+	wlc_pm_debug_t pmstate[1];
+} wl_pmalert_pmstate_t;
+
+typedef struct wl_pmalert_event_dur {
+	uint16 type;	     /**< WL_PMALERT_EVENT_DUR */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+
+	/* Array of event_dur, len of array is based on tlv len */
+	uint32 event_dur[1];
+} wl_pmalert_event_dur_t;
+
+#include <packed_section_start.h>
+BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg_v1 {
+	uint16 type;         /* WL_PMALERT_UCODE_DBG */
+	uint16 len;      /* Up to 4K-1, top 4 bits are reserved */
+	uint32 macctrl;
+	uint16 m_p2p_hps;
+	uint32 psm_brc;
+	uint32 ifsstat;
+	uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+	uint32 psmdebug[20];
+	uint32 phydebug[20];
+	uint16 M_P2P_BSS[3][12];
+	uint16 M_P2P_PRE_TBTT[3];
+
+	/* Following is valid only for corerevs<40 */
+	uint16 xmtfifordy;
+
+	/* Following 3 are valid only for 11ac corerevs (>=40) */
+	uint16 psm_maccommand;
+	uint16 txe_status1;
+	uint16 AQMFifoReady;
+} BWL_POST_PACKED_STRUCT;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg_v2 {
+	uint16 type;	     /**< WL_PMALERT_UCODE_DBG_V2 */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+	uint32 macctrl;
+	uint16 m_p2p_hps;
+	uint32 psm_brc;
+	uint32 ifsstat;
+	uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+	uint32 psmdebug[20];
+	uint32 phydebug[20];
+	uint16 M_P2P_BSS[3][12];
+	uint16 M_P2P_PRE_TBTT[3];
+
+	/* Following is valid only for corerevs<40 */
+	uint16 xmtfifordy;
+
+	/* Following 3 are valid only for 11ac corerevs (>=40) */
+	uint16 psm_maccommand;
+	uint16 txe_status1;
+	uint32 AQMFifoReady;
+} BWL_POST_PACKED_STRUCT;
+#include <packed_section_end.h>
+
+typedef struct wlc_ps_debug {
+	uint32 timestamp;	     /**< timestamp in millisecond */
+	uint32 ps_mask;		     /**< reason(s) for disallowing ps */
+} wlc_ps_debug_t;
+
+typedef struct wl_pmalert_ps_allowed_hist {
+	uint16 type;	     /**< WL_PMALERT_PS_ALLOWED_HIST */
+	uint16 len;	     /**< Up to 4K-1, top 4 bits are reserved */
+	uint32 ps_allowed_start_idx;
+	/* Array of ps_debug, len of array is based on tlv len */
+	wlc_ps_debug_t ps_debug[1];
+} wl_pmalert_ps_allowed_hist_t;
+
+/* Structures and constants used for "vndr_ie" IOVar interface */
+#define VNDR_IE_CMD_LEN		4	/**< length of the set command string:
+					 * "add", "del" (+ NUL)
+					 */
+
+#define VNDR_IE_INFO_HDR_LEN	(sizeof(uint32))
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;			/**< bitmask indicating which packet(s) contain this IE */
+	vndr_ie_t vndr_ie_data;		/**< vendor IE data */
+} BWL_POST_PACKED_STRUCT vndr_ie_info_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int32 iecount;			/**< number of entries in the vndr_ie_list[] array */
+	vndr_ie_info_t vndr_ie_list[1];	/**< variable size list of vndr_ie_info_t structs */
+} BWL_POST_PACKED_STRUCT vndr_ie_buf_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char cmd[VNDR_IE_CMD_LEN];	/**< vndr_ie IOVar set command : "add", "del" + NUL */
+	vndr_ie_buf_t vndr_ie_buffer;	/**< buffer containing Vendor IE list information */
+} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t;
+#include <packed_section_end.h>
+
+/** tag_ID/length/value_buffer tuple */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT tlv_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;			/**< bitmask indicating which packet(s) contain this IE */
+	tlv_t ie_data;		/**< IE data */
+} BWL_POST_PACKED_STRUCT ie_info_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int32 iecount;			/**< number of entries in the ie_list[] array */
+	ie_info_t ie_list[1];	/**< variable size list of ie_info_t structs */
+} BWL_POST_PACKED_STRUCT ie_buf_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char cmd[VNDR_IE_CMD_LEN];	/**< ie IOVar set command : "add" + NUL */
+	ie_buf_t ie_buffer;	/**< buffer containing IE list information */
+} BWL_POST_PACKED_STRUCT ie_setbuf_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;		/**< bitmask indicating which packet(s) contain this IE */
+	uint8 id;		/**< IE type */
+} BWL_POST_PACKED_STRUCT ie_getbuf_t;
+#include <packed_section_end.h>
+
+/* structures used to define format of wps ie data from probe requests */
+/* passed up to applications via iovar "prbreq_wpsie" */
+typedef struct sta_prbreq_wps_ie_hdr {
+	struct ether_addr staAddr;
+	uint16 ieLen;
+} sta_prbreq_wps_ie_hdr_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+	sta_prbreq_wps_ie_hdr_t hdr;
+	uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+	uint32 totLen;
+	uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 flags;
+	chanspec_t chanspec;			/**< txpwr report for this channel */
+	chanspec_t local_chanspec;		/**< channel on which we are associated */
+	uint8 local_max;			/**< local max according to the AP */
+	uint8 local_constraint;			/**< local constraint according to the AP */
+	int8  antgain[2];			/**< Ant gain for each band - from SROM */
+	uint8 rf_cores;				/**< count of RF Cores being reported */
+	uint8 est_Pout[4];			/**< Latest tx power out estimate per RF chain */
+	uint8 est_Pout_act[4]; /**< Latest tx power out estimate per RF chain w/o adjustment */
+	uint8 est_Pout_cck;			/**< Latest CCK tx power out estimate */
+	uint8 tx_power_max[4];			/**< Maximum target power among all rates */
+	uint32 tx_power_max_rate_ind[4];  /**< Index of the rate with the max target power */
+	int8 sar;				/**< SAR limit for display by wl executable */
+	int8 channel_bandwidth;		/**< 20, 40 or 80 MHz bandwidth? */
+	uint8 version;				/**< Version of the data format wlu <--> driver */
+	uint8 display_core;			/**< Displayed curpower core */
+	int8 target_offsets[4];		/**< Target power offsets for current rate per core */
+	uint32 last_tx_ratespec;	/**< Ratespec for last transmition */
+	uint32 user_target;		/**< user limit */
+	uint32 ppr_len;		/**< length of each ppr serialization buffer */
+	int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
+	uint8  pprdata[1];		/**< ppr serialization buffer */
+} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ipv4_addr	ipv4_addr;
+	struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT ibss_route_entry_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 num_entry;
+	ibss_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT ibss_route_tbl_t;
+#include <packed_section_end.h>
+
+#define MAX_IBSS_ROUTE_TBL_ENTRY	64
+
+#define TXPWR_TARGET_VERSION  0
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int32 version;		/**< version number */
+	chanspec_t chanspec;	/**< txpwr report for this channel */
+	int8 txpwr[WL_STA_ANT_MAX]; /**< Max tx target power, in qdb */
+	uint8 rf_cores;		/**< count of RF Cores being reported */
+} BWL_POST_PACKED_STRUCT txpwr_target_max_t;
+#include <packed_section_end.h>
+
+#define BSS_PEER_INFO_PARAM_CUR_VER	0
+/** Input structure for IOV_BSS_PEER_INFO */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT	struct {
+	uint16			version;
+	struct	ether_addr ea;	/**< peer MAC address */
+} BWL_POST_PACKED_STRUCT bss_peer_info_param_t;
+#include <packed_section_end.h>
+
+#define BSS_PEER_INFO_CUR_VER		0
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16			version;
+	struct ether_addr	ea;
+	int32			rssi;
+	uint32			tx_rate;	/**< current tx rate */
+	uint32			rx_rate;	/**< current rx rate */
+	wl_rateset_t		rateset;	/**< rateset in use */
+	uint32			age;		/**< age in seconds */
+} BWL_POST_PACKED_STRUCT bss_peer_info_t;
+#include <packed_section_end.h>
+
+#define BSS_PEER_LIST_INFO_CUR_VER	0
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16			version;
+	uint16			bss_peer_info_len;	/**< length of bss_peer_info_t */
+	uint32			count;			/**< number of peer info */
+	bss_peer_info_t		peer_info[1];		/**< peer info */
+} BWL_POST_PACKED_STRUCT bss_peer_list_info_t;
+#include <packed_section_end.h>
+
+#define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info)
+
+#define AIBSS_BCN_FORCE_CONFIG_VER_0	0
+
+/** structure used to configure AIBSS beacon force xmit */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16  version;
+	uint16	len;
+	uint32 initial_min_bcn_dur;	/**< dur in ms to check a bcn in bcn_flood period */
+	uint32 min_bcn_dur;	/**< dur in ms to check a bcn after bcn_flood period */
+	uint32 bcn_flood_dur; /**< Initial bcn xmit period in ms */
+} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t;
+#include <packed_section_end.h>
+
+#define AIBSS_TXFAIL_CONFIG_VER_0    0
+#define AIBSS_TXFAIL_CONFIG_VER_1    1
+#define AIBSS_TXFAIL_CONFIG_CUR_VER		AIBSS_TXFAIL_CONFIG_VER_1
+
+/** structure used to configure aibss tx fail event */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16  version;
+	uint16  len;
+	uint32 bcn_timeout;     /**< dur in seconds to receive 1 bcn */
+	uint32 max_tx_retry;     /**< no of consecutive no acks to send txfail event */
+	uint32 max_atim_failure; /**< no of consecutive atim failure */
+} BWL_POST_PACKED_STRUCT aibss_txfail_config_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if {
+	uint16 version;
+	uint16 len;
+	uint32 flags;
+	struct ether_addr addr;
+	chanspec_t chspec;
+} BWL_POST_PACKED_STRUCT wl_aibss_if_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_entry {
+	struct ipv4_addr ip_addr;
+	struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_entry_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl {
+	uint32 num_entry;
+	wlc_ipfo_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_tbl_t;
+#include <packed_section_end.h>
+
+/* Version of wlc_btc_stats_t structure.
+ * Increment whenever a change is made to wlc_btc_stats_t
+ */
+#define BTCX_STATS_VER   2
+
+typedef struct wlc_btc_stats {
+	uint16 version; /* version number of struct */
+	uint16 valid; /* Size of this struct */
+	uint32 stats_update_timestamp;	/* tStamp when data is updated. */
+	uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */
+	uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */
+	uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */
+	uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */
+	uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */
+	uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */
+	uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */
+	uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */
+	uint16 rsvd; /* pad to align struct to 32bit bndry	 */
+} wlc_btc_stats_t;
+
+#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4
+#define WL_MAX_IPFO_ROUTE_TBL_ENTRY	64
+
+	/* Global ASSERT Logging */
+#define ASSERTLOG_CUR_VER	0x0100
+#define MAX_ASSRTSTR_LEN	64
+
+	typedef struct assert_record {
+		uint32 time;
+		uint8 seq_num;
+		int8 str[MAX_ASSRTSTR_LEN];
+	} assert_record_t;
+
+	typedef struct assertlog_results {
+		uint16 version;
+		uint16 record_len;
+		uint32 num;
+		assert_record_t logs[1];
+	} assertlog_results_t;
+
+#define LOGRRC_FIX_LEN	8
+#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
+#ifdef BCMWAPI_WAI
+/* BCMWAPI_WAI */
+#define IV_LEN 16
+	struct wapi_sta_msg_t
+	{
+		uint16	msg_type;
+		uint16	datalen;
+		uint8	vap_mac[6];
+		uint8	reserve_data1[2];
+		uint8	sta_mac[6];
+		uint8	reserve_data2[2];
+		uint8	gsn[IV_LEN];
+		uint8	wie[256];
+	};
+#endif /* BCMWAPI_WAI */
+	/* chanim acs record */
+	typedef struct {
+		uint8 valid;
+		uint8 trigger;
+		chanspec_t selected_chspc;
+		int8 bgnoise;
+		uint32 glitch_cnt;
+		uint8 ccastats;
+		uint8 chan_idle;
+		uint32 timestamp;
+	} chanim_acs_record_t;
+
+	typedef struct {
+		chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+		uint8 count;
+		uint32 timestamp;
+	} wl_acs_record_t;
+
+#define WL_CHANIM_STATS_V2 2
+#define CCASTATS_V2_MAX 9
+typedef struct chanim_stats_v2 {
+	uint32 glitchcnt;               /**< normalized as per second count */
+	uint32 badplcp;                 /**< normalized as per second count */
+	uint8 ccastats[CCASTATS_V2_MAX];   /**< normalized as 0-255 */
+	int8 bgnoise;                   /**< background noise level (in dBm) */
+	chanspec_t chanspec;            /**< ctrl chanspec of the interface */
+	uint32 timestamp;               /**< time stamp at which the stats are collected */
+	uint32 bphy_glitchcnt;          /**< normalized as per second count */
+	uint32 bphy_badplcp;            /**< normalized as per second count */
+	uint8 chan_idle;                /**< normalized as 0~255 */
+	uint8 PAD[3];
+} chanim_stats_v2_t;
+
+typedef struct chanim_stats {
+	uint32 glitchcnt;               /**< normalized as per second count */
+	uint32 badplcp;                 /**< normalized as per second count */
+	uint8 ccastats[CCASTATS_MAX];   /**< normalized as 0-255 */
+	int8 bgnoise;                   /**< background noise level (in dBm) */
+	uint8 pad_1[11 - CCASTATS_MAX];
+	chanspec_t chanspec;            /**< ctrl chanspec of the interface */
+	uint8 pad_2[2];
+	uint32 timestamp;               /**< time stamp at which the stats are collected */
+	uint32 bphy_glitchcnt;          /**< normalized as per second count */
+	uint32 bphy_badplcp;            /**< normalized as per second count */
+	uint8 chan_idle;                /**< normalized as 0~255 */
+	uint8 PAD[3];
+} chanim_stats_t;
+
+#define WL_CHANIM_STATS_VERSION 3
+typedef struct {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	chanim_stats_t stats[1];
+} wl_chanim_stats_t;
+
+#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats)
+
+/** Noise measurement metrics. */
+#define NOISE_MEASURE_KNOISE	0x1
+
+/** scb probe parameter */
+typedef struct {
+	uint32 scb_timeout;
+	uint32 scb_activity_time;
+	uint32 scb_max_probe;
+} wl_scb_probe_t;
+
+/* structure/defines for selective mgmt frame (smf) stats support */
+
+#define SMFS_VERSION 1
+/** selected mgmt frame (smf) stats element */
+typedef struct wl_smfs_elem {
+	uint32 count;
+	uint16 code;  /**< SC or RC code */
+	uint8 PAD[2];
+} wl_smfs_elem_t;
+
+typedef struct wl_smf_stats {
+	uint32 version;
+	uint16 length;	/**< reserved for future usage */
+	uint8 type;
+	uint8 codetype;
+	uint32 ignored_cnt;
+	uint32 malformed_cnt;
+	uint32 count_total; /**< count included the interested group */
+	wl_smfs_elem_t elem[1];
+} wl_smf_stats_t;
+
+#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem);
+
+enum {
+	SMFS_CODETYPE_SC,
+	SMFS_CODETYPE_RC
+};
+
+typedef enum smfs_type {
+	SMFS_TYPE_AUTH,
+	SMFS_TYPE_ASSOC,
+	SMFS_TYPE_REASSOC,
+	SMFS_TYPE_DISASSOC_TX,
+	SMFS_TYPE_DISASSOC_RX,
+	SMFS_TYPE_DEAUTH_TX,
+	SMFS_TYPE_DEAUTH_RX,
+	SMFS_TYPE_MAX
+} smfs_type_t;
+
+/* #ifdef PHYMON */
+
+#define PHYMON_VERSION 1
+
+typedef struct wl_phycal_core_state {
+	/* Tx IQ/LO calibration coeffs */
+	int16 tx_iqlocal_a;
+	int16 tx_iqlocal_b;
+	int8 tx_iqlocal_ci;
+	int8 tx_iqlocal_cq;
+	int8 tx_iqlocal_di;
+	int8 tx_iqlocal_dq;
+	int8 tx_iqlocal_ei;
+	int8 tx_iqlocal_eq;
+	int8 tx_iqlocal_fi;
+	int8 tx_iqlocal_fq;
+
+	/** Rx IQ calibration coeffs */
+	int16 rx_iqcal_a;
+	int16 rx_iqcal_b;
+
+	uint8 tx_iqlocal_pwridx; /**< Tx Power Index for Tx IQ/LO calibration */
+	uint8 PAD[3];
+	uint32 papd_epsilon_table[64]; /**< PAPD epsilon table */
+	int16 papd_epsilon_offset; /**< PAPD epsilon offset */
+	uint8 curr_tx_pwrindex; /**< Tx power index */
+	int8 idle_tssi; /**< Idle TSSI */
+	int8 est_tx_pwr; /**< Estimated Tx Power (dB) */
+	int8 est_rx_pwr; /**< Estimated Rx Power (dB) from RSSI */
+	uint16 rx_gaininfo; /**< Rx gain applied on last Rx pkt */
+	uint16 init_gaincode; /**< initgain required for ACI */
+	int8 estirr_tx;
+	int8 estirr_rx;
+} wl_phycal_core_state_t;
+
+typedef struct wl_phycal_state {
+	int32 version;
+	int8 num_phy_cores; /**< number of cores */
+	int8 curr_temperature; /**< on-chip temperature sensor reading */
+	chanspec_t chspec; /**< channspec for this state */
+	uint8 aci_state; /**< ACI state: ON/OFF */
+	uint8 PAD;
+	uint16 crsminpower; /**< crsminpower required for ACI */
+	uint16 crsminpowerl; /**< crsminpowerl required for ACI */
+	uint16 crsminpoweru; /**< crsminpoweru required for ACI */
+	wl_phycal_core_state_t phycal_core[1];
+} wl_phycal_state_t;
+
+#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core)
+/* endif PHYMON */
+
+/** discovery state */
+typedef struct wl_p2p_disc_st {
+	uint8 state;	/**< see state */
+	uint8 PAD;
+	chanspec_t chspec;	/**< valid in listen state */
+	uint16 dwell;	/**< valid in listen state, in ms */
+} wl_p2p_disc_st_t;
+
+/** scan request */
+typedef struct wl_p2p_scan {
+	uint8 type;		/**< 'S' for WLC_SCAN, 'E' for "escan" */
+	uint8 reserved[3];
+	/* scan or escan parms... */
+} wl_p2p_scan_t;
+
+/** i/f request */
+typedef struct wl_p2p_if {
+	struct ether_addr addr;
+	uint8 type;	/**< see i/f type */
+	uint8 PAD;
+	chanspec_t chspec;	/**< for p2p_ifadd GO */
+} wl_p2p_if_t;
+
+/** i/f query */
+typedef struct wl_p2p_ifq {
+	uint32 bsscfgidx;
+	char ifname[BCM_MSG_IFNAME_MAX];
+} wl_p2p_ifq_t;
+
+/** OppPS & CTWindow */
+typedef struct wl_p2p_ops {
+	uint8 ops;	/**< 0: disable 1: enable */
+	uint8 ctw;	/**< >= 10 */
+} wl_p2p_ops_t;
+
+/** absence and presence request */
+typedef struct wl_p2p_sched_desc {
+	uint32 start;
+	uint32 interval;
+	uint32 duration;
+	uint32 count;	/**< see count */
+} wl_p2p_sched_desc_t;
+
+typedef struct wl_p2p_sched {
+	uint8 type;	/**< see schedule type */
+	uint8 action;	/**< see schedule action */
+	uint8 option;	/**< see schedule option */
+	uint8 PAD;
+	wl_p2p_sched_desc_t desc[1];
+} wl_p2p_sched_t;
+
+typedef struct wl_p2p_wfds_hash {
+	uint32	advt_id;
+	uint16	nw_cfg_method;
+	uint8	wfds_hash[6];
+	uint8	name_len;
+	uint8	service_name[MAX_WFDS_SVC_NAME_LEN];
+	uint8	PAD[3];
+} wl_p2p_wfds_hash_t;
+
+typedef struct wl_bcmdcs_data {
+	uint32 reason;
+	chanspec_t chspec;
+	uint8	PAD[2];
+} wl_bcmdcs_data_t;
+/* ifdef EXT_STA */
+/**
+ * Format of IHV data passed to OID_DOT11_NIC_SPECIFIC_EXTENSION.
+ */
+typedef struct _IHV_NIC_SPECIFIC_EXTENSION {
+	uint8			oui[4];     /**< vendor specific OUI value */
+	uint32			event;      /**< event code */
+	uint8			ihvData[1];    /**< ihv data */
+} IHV_NIC_SPECIFIC_EXTENSION, *PIHV_NIC_SPECIFIC_EXTENSION;
+#define IHV_NIC_SPECIFIC_EXTENTION_HEADER	OFFSETOF(IHV_NIC_SPECIFIC_EXTENSION, ihvData[0])
+/* EXT_STA */
+/** NAT configuration */
+typedef struct {
+	uint32 ipaddr;		/**< interface ip address */
+	uint32 ipaddr_mask;	/**< interface ip address mask */
+	uint32 ipaddr_gateway;	/**< gateway ip address */
+	uint8 mac_gateway[6];	/**< gateway mac address */
+	uint8	PAD[2];
+	uint32 ipaddr_dns;	/**< DNS server ip address, valid only for public if */
+	uint8 mac_dns[6];	/**< DNS server mac address,  valid only for public if */
+	uint8 GUID[38];		/**< interface GUID */
+} nat_if_info_t;
+
+typedef struct {
+	uint32 op;		/**< operation code */
+	uint8 pub_if;		/**< set for public if, clear for private if */
+	uint8	PAD[3];
+	nat_if_info_t if_info;	/**< interface info */
+} nat_cfg_t;
+
+typedef struct {
+	int32 state;	/**< NAT state returned */
+} nat_state_t;
+
+typedef struct flush_txfifo {
+	uint32 txfifobmp;
+	uint32 hwtxfifoflush;
+	struct ether_addr ea;
+	uint8	PAD[2];
+} flush_txfifo_t;
+
+enum {
+	SPATIAL_MODE_2G_IDX = 0,
+	SPATIAL_MODE_5G_LOW_IDX,
+	SPATIAL_MODE_5G_MID_IDX,
+	SPATIAL_MODE_5G_HIGH_IDX,
+	SPATIAL_MODE_5G_UPPER_IDX,
+	SPATIAL_MODE_MAX_IDX
+};
+
+#define WLC_TXCORE_MAX		4	/**< max number of txcore supports */
+#define WLC_TXCORE_MAX_OLD	2	/**< backward compatibilty for TXCAL */
+#define WLC_SUBBAND_MAX		4	/**< max number of sub-band supports */
+typedef struct {
+	uint8	band2g[WLC_TXCORE_MAX];
+	uint8	band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX];
+} sar_limit_t;
+
+#define MAX_NUM_TXCAL_MEAS 128
+#define MAX_NUM_PWR_STEP 40
+#define TXCAL_IOVAR_VERSION	0x1
+typedef struct wl_txcal_meas_percore {
+	uint16 tssi[MAX_NUM_TXCAL_MEAS];
+	int16 pwr[MAX_NUM_TXCAL_MEAS];
+} wl_txcal_meas_percore_t;
+
+typedef struct wl_txcal_meas_ncore {
+	uint16 version;
+	uint8 valid_cnt;
+	uint8 num_core;
+	wl_txcal_meas_percore_t txcal_percore[1];
+} wl_txcal_meas_ncore_t;
+
+typedef struct wl_txcal_power_tssi_percore {
+	int16 tempsense;
+	int16 pwr_start;
+	uint8 pwr_start_idx;
+	uint8 num_entries;
+	uint16 pad;
+	uint8 tssi[MAX_NUM_PWR_STEP];
+} wl_txcal_power_tssi_percore_t;
+
+typedef struct wl_txcal_power_tssi_ncore {
+	uint16 version;
+	uint8 set_core;
+	uint8 channel;
+	uint8 num_core;
+	uint8 gen_tbl;
+	uint16 pad;
+	wl_txcal_power_tssi_percore_t tssi_percore[1];
+} wl_txcal_power_tssi_ncore_t;
+
+typedef struct wl_txcal_meas {
+	uint16 tssi[WLC_TXCORE_MAX][MAX_NUM_TXCAL_MEAS];
+	int16 pwr[WLC_TXCORE_MAX][MAX_NUM_TXCAL_MEAS];
+	uint8 valid_cnt;
+	uint8 PAD;
+} wl_txcal_meas_t;
+
+typedef struct wl_txcal_meas_old {
+	uint16 tssi[WLC_TXCORE_MAX_OLD][MAX_NUM_TXCAL_MEAS];
+	int16 pwr[WLC_TXCORE_MAX_OLD][MAX_NUM_TXCAL_MEAS];
+	uint8 valid_cnt;
+	uint8 PAD;
+} wl_txcal_meas_old_t;
+
+typedef struct wl_txcal_power_tssi {
+	uint8 set_core;
+	uint8 channel;
+	int16 tempsense[WLC_TXCORE_MAX];
+	int16 pwr_start[WLC_TXCORE_MAX];
+	uint8 pwr_start_idx[WLC_TXCORE_MAX];
+	uint8 num_entries[WLC_TXCORE_MAX];
+	uint8 tssi[WLC_TXCORE_MAX][MAX_NUM_PWR_STEP];
+	uint8 gen_tbl;
+	uint8 PAD;
+} wl_txcal_power_tssi_t;
+
+typedef struct wl_txcal_power_tssi_old {
+	uint8 set_core;
+	uint8 channel;
+	int16 tempsense[WLC_TXCORE_MAX_OLD];
+	int16 pwr_start[WLC_TXCORE_MAX_OLD];
+	uint8 pwr_start_idx[WLC_TXCORE_MAX_OLD];
+	uint8 num_entries[WLC_TXCORE_MAX_OLD];
+	uint8 tssi[WLC_TXCORE_MAX_OLD][MAX_NUM_PWR_STEP];
+	uint8 gen_tbl;
+	uint8 PAD;
+} wl_txcal_power_tssi_old_t;
+
+typedef struct wl_olpc_pwr {
+	uint16 version;
+	uint8 core;
+	uint8 channel;
+	int16 tempsense;
+	uint8 olpc_idx;
+	uint8 pad;
+} wl_olpc_pwr_t;
+
+/** IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
+typedef struct wl_mempool_stats {
+	int32 num;		/**< Number of memory pools */
+	bcm_mp_stats_t s[1];	/**< Variable array of memory pool stats. */
+} wl_mempool_stats_t;
+
+typedef struct {
+	uint32 ipaddr;
+	uint32 ipaddr_netmask;
+	uint32 ipaddr_gateway;
+} nwoe_ifconfig_t;
+
+/** Traffic management priority classes */
+typedef enum trf_mgmt_priority_class {
+	trf_mgmt_priority_low           = 0,        /**< Maps to 802.1p BK */
+	trf_mgmt_priority_medium        = 1,        /**< Maps to 802.1p BE */
+	trf_mgmt_priority_high          = 2,        /**< Maps to 802.1p VI */
+	trf_mgmt_priority_nochange	= 3,	    /**< do not update the priority */
+	trf_mgmt_priority_invalid       = (trf_mgmt_priority_nochange + 1)
+} trf_mgmt_priority_class_t;
+
+/** Traffic management configuration parameters */
+typedef struct trf_mgmt_config {
+	uint32  trf_mgmt_enabled;                           /**< 0 - disabled, 1 - enabled */
+	uint32  flags;                                      /**< See TRF_MGMT_FLAG_xxx defines */
+	uint32  host_ip_addr;                              /**< My IP address to determine subnet */
+	uint32  host_subnet_mask;                           /**< My subnet mask */
+	uint32  downlink_bandwidth;                         /**< In units of kbps */
+	uint32  uplink_bandwidth;                           /**< In units of kbps */
+	uint32  min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES];  /**< Minimum guaranteed tx bandwidth */
+	uint32  min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES];  /**< Minimum guaranteed rx bandwidth */
+} trf_mgmt_config_t;
+
+/** Traffic management filter */
+typedef struct trf_mgmt_filter {
+	struct ether_addr           dst_ether_addr;         /**< His L2 address */
+	uint8						PAD[2];
+	uint32                      dst_ip_addr;            /**< His IP address */
+	uint16                      dst_port;               /**< His L4 port */
+	uint16                      src_port;               /**< My L4 port */
+	uint16                      prot;                   /**< L4 protocol (only TCP or UDP) */
+	uint16                      flags;                  /**< TBD. For now, this must be zero. */
+	trf_mgmt_priority_class_t   priority;               /**< Priority for filtered packets */
+	uint32                      dscp;                   /**< DSCP */
+} trf_mgmt_filter_t;
+
+/** Traffic management filter list (variable length) */
+typedef struct trf_mgmt_filter_list     {
+	uint32              num_filters;
+	trf_mgmt_filter_t   filter[1];
+} trf_mgmt_filter_list_t;
+
+/** Traffic management global info used for all queues */
+typedef struct trf_mgmt_global_info {
+	uint32  maximum_bytes_per_second;
+	uint32  maximum_bytes_per_sampling_period;
+	uint32  total_bytes_consumed_per_second;
+	uint32  total_bytes_consumed_per_sampling_period;
+	uint32  total_unused_bytes_per_sampling_period;
+} trf_mgmt_global_info_t;
+
+/** Traffic management shaping info per priority queue */
+typedef struct trf_mgmt_shaping_info {
+	uint32  gauranteed_bandwidth_percentage;
+	uint32  guaranteed_bytes_per_second;
+	uint32  guaranteed_bytes_per_sampling_period;
+	uint32  num_bytes_produced_per_second;
+	uint32  num_bytes_consumed_per_second;
+	uint32  num_queued_packets;                         /**< Number of packets in queue */
+	uint32  num_queued_bytes;                           /**< Number of bytes in queue */
+} trf_mgmt_shaping_info_t;
+
+/** Traffic management shaping info array */
+typedef struct trf_mgmt_shaping_info_array {
+	trf_mgmt_global_info_t   tx_global_shaping_info;
+	trf_mgmt_shaping_info_t  tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+	trf_mgmt_global_info_t   rx_global_shaping_info;
+	trf_mgmt_shaping_info_t  rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_shaping_info_array_t;
+
+
+/** Traffic management statistical counters */
+typedef struct trf_mgmt_stats {
+	uint32  num_processed_packets;      /**< Number of packets processed */
+	uint32  num_processed_bytes;        /**< Number of bytes processed */
+	uint32  num_discarded_packets;      /**< Number of packets discarded from queue */
+} trf_mgmt_stats_t;
+
+/** Traffic management statistics array */
+typedef struct trf_mgmt_stats_array {
+	trf_mgmt_stats_t  tx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+	trf_mgmt_stats_t  rx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_stats_array_t;
+
+/* Both powersel_params and lpc_params are used by IOVAR lpc_params.
+ * The powersel_params is replaced by lpc_params in later WLC versions.
+ */
+typedef struct powersel_params {
+	/* LPC Params exposed via IOVAR */
+	int32		tp_ratio_thresh;  /**< Throughput ratio threshold */
+	uint8		rate_stab_thresh; /**< Thresh for rate stability based on nupd */
+	uint8		pwr_stab_thresh; /**< Number of successes before power step down */
+	uint8		pwr_sel_exp_time; /**< Time lapse for expiry of database */
+	uint8		PAD;
+} powersel_params_t;
+
+#define WL_LPC_PARAMS_VER_2	2
+#define WL_LPC_PARAMS_CURRENT_VERSION WL_LPC_PARAMS_VER_2
+
+typedef struct lpc_params {
+	uint16		version;
+	uint16		length;
+	/* LPC Params exposed via IOVAR */
+	uint8		rate_stab_thresh; /**< Thresh for rate stability based on nupd */
+	uint8		pwr_stab_thresh; /**< Number of successes before power step down */
+	uint8		lpc_exp_time; /**< Time lapse for expiry of database */
+	uint8		pwrup_slow_step; /**< Step size for slow step up */
+	uint8		pwrup_fast_step; /**< Step size for fast step up */
+	uint8		pwrdn_slow_step; /**< Step size for slow step down */
+} lpc_params_t;
+
+/* tx pkt delay statistics */
+#define	SCB_RETRY_SHORT_DEF	7	/**< Default Short retry Limit */
+#define WLPKTDLY_HIST_NBINS	16	/**< number of bins used in the Delay histogram */
+
+/** structure to store per-AC delay statistics */
+typedef struct scb_delay_stats {
+	uint32 txmpdu_lost;	/**< number of MPDUs lost */
+	uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /**< retry times histogram */
+	uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /**< cumulative packet latency */
+	uint32 delay_min;	/**< minimum packet latency observed */
+	uint32 delay_max;	/**< maximum packet latency observed */
+	uint32 delay_avg;	/**< packet latency average */
+	uint32 delay_hist[WLPKTDLY_HIST_NBINS];	/**< delay histogram */
+	uint32 delay_count;	/**< minimum number of time period units before
+				consequent packet delay events can be generated
+				*/
+	uint32 prev_txmpdu_cnt;	/**< Previous value of txmpdu_cnt[] during last iteration */
+	uint32 prev_delay_sum;	/**< Previous value of delay_sum[] during last iteration */
+} scb_delay_stats_t;
+
+/** structure for txdelay event */
+typedef struct txdelay_event {
+	uint8				status;
+	uint8				PAD[3];
+	int32				rssi;
+	chanim_stats_t		chanim_stats;
+	scb_delay_stats_t	delay_stats[AC_COUNT];
+} txdelay_event_t;
+
+/** structure for txdelay parameters */
+typedef struct txdelay_params {
+	uint16	ratio;	/**< Avg Txdelay Delta */
+	uint8	cnt;	/**< Sample cnt */
+	uint8	period;	/**< Sample period */
+	uint8	tune;	/**< Debug */
+	uint8	PAD;
+} txdelay_params_t;
+#define MAX_TXDELAY_STATS_SCBS 6
+#define TXDELAY_STATS_VERSION 1
+
+enum {
+	TXDELAY_STATS_PARTIAL_RESULT = 0,
+	TXDELAY_STATS_FULL_RESULT = 1
+};
+
+typedef struct scb_total_delay_stats {
+	struct  ether_addr ea;
+	uint8   pad[2];
+	scb_delay_stats_t dlystats[AC_COUNT];
+} scb_total_delay_stats_t;
+
+typedef struct txdelay_stats {
+	uint32  version;
+	uint32  full_result;    /* 0:Partial, 1:full */
+	uint32  scb_cnt;        /* in:requested, out:returned */
+	scb_total_delay_stats_t scb_delay_stats[1];
+} txdelay_stats_t;
+
+#define WL_TXDELAY_STATS_FIXED_SIZE \
+	(sizeof(txdelay_stats_t)+(MAX_TXDELAY_STATS_SCBS-1)*sizeof(scb_total_delay_stats_t))
+enum {
+	WNM_SERVICE_DMS = 1,
+	WNM_SERVICE_FMS = 2,
+	WNM_SERVICE_TFS = 3
+};
+
+/** Definitions for WNM/NPS TCLAS */
+typedef struct wl_tclas {
+	uint8 user_priority;
+	uint8 fc_len;
+	dot11_tclas_fc_t fc;
+} wl_tclas_t;
+
+#define WL_TCLAS_FIXED_SIZE	OFFSETOF(wl_tclas_t, fc)
+
+typedef struct wl_tclas_list {
+	uint32 num;
+	wl_tclas_t tclas[];
+} wl_tclas_list_t;
+
+/** Definitions for WNM/NPS Traffic Filter Service */
+typedef struct wl_tfs_req {
+	uint8 tfs_id;
+	uint8 tfs_actcode;
+	uint8 tfs_subelem_id;
+	uint8 send;
+} wl_tfs_req_t;
+
+typedef struct wl_tfs_filter {
+	uint8 status;			/**< Status returned by the AP */
+	uint8 tclas_proc;		/**< TCLAS processing value (0:and, 1:or)  */
+	uint8 tclas_cnt;		/**< count of all wl_tclas_t in tclas array */
+	uint8 tclas[1];			/**< VLA of wl_tclas_t */
+} wl_tfs_filter_t;
+#define WL_TFS_FILTER_FIXED_SIZE	OFFSETOF(wl_tfs_filter_t, tclas)
+
+typedef struct wl_tfs_fset {
+	struct ether_addr ea;		/**< Address of AP/STA involved with this filter set */
+	uint8 tfs_id;			/**< TFS ID field chosen by STA host */
+	uint8 status;			/**< Internal status TFS_STATUS_xxx */
+	uint8 actcode;			/**< Action code DOT11_TFS_ACTCODE_xxx */
+	uint8 token;			/**< Token used in last request frame */
+	uint8 notify;			/**< Notify frame sent/received because of this set */
+	uint8 filter_cnt;		/**< count of all wl_tfs_filter_t in filter array */
+	uint8 filter[1];		/**< VLA of wl_tfs_filter_t */
+} wl_tfs_fset_t;
+#define WL_TFS_FSET_FIXED_SIZE		OFFSETOF(wl_tfs_fset_t, filter)
+
+enum {
+	TFS_STATUS_DISABLED = 0,	/**< TFS filter set disabled by user */
+	TFS_STATUS_DISABLING = 1,	/**< Empty request just sent to AP */
+	TFS_STATUS_VALIDATED = 2,	/**< Filter set validated by AP (but maybe not enabled!) */
+	TFS_STATUS_VALIDATING = 3,	/**< Filter set just sent to AP */
+	TFS_STATUS_NOT_ASSOC = 4,	/**< STA not associated */
+	TFS_STATUS_NOT_SUPPORT = 5,	/**< TFS not supported by AP */
+	TFS_STATUS_DENIED = 6,		/**< Filter set refused by AP (=> all sets are disabled!) */
+};
+
+typedef struct wl_tfs_status {
+	uint8 fset_cnt;			/**< count of all wl_tfs_fset_t in fset array */
+	wl_tfs_fset_t fset[1];		/**< VLA of wl_tfs_fset_t */
+} wl_tfs_status_t;
+
+typedef struct wl_tfs_set {
+	uint8 send;		/**< Immediatly register registered sets on AP side */
+	uint8 tfs_id;		/**< ID of a specific set (existing or new), or nul for all */
+	uint8 actcode;		/**< Action code for this filter set */
+	uint8 tclas_proc;	/**< TCLAS processing operator for this filter set */
+} wl_tfs_set_t;
+
+typedef struct wl_tfs_term {
+	uint8 del;			/**< Delete internal set once confirmation received */
+	uint8 tfs_id;			/**< ID of a specific set (existing), or nul for all */
+} wl_tfs_term_t;
+
+
+#define DMS_DEP_PROXY_ARP (1 << 0)
+
+/* Definitions for WNM/NPS Directed Multicast Service */
+enum {
+	DMS_STATUS_DISABLED = 0,	/**< DMS desc disabled by user */
+	DMS_STATUS_ACCEPTED = 1,	/**< Request accepted by AP */
+	DMS_STATUS_NOT_ASSOC = 2,	/**< STA not associated */
+	DMS_STATUS_NOT_SUPPORT = 3,	/**< DMS not supported by AP */
+	DMS_STATUS_DENIED = 4,		/**< Request denied by AP */
+	DMS_STATUS_TERM = 5,		/**< Request terminated by AP */
+	DMS_STATUS_REMOVING = 6,	/**< Remove request just sent */
+	DMS_STATUS_ADDING = 7,		/**< Add request just sent */
+	DMS_STATUS_ERROR = 8,		/**< Non compliant AP behvior */
+	DMS_STATUS_IN_PROGRESS = 9,	/**< Request just sent */
+	DMS_STATUS_REQ_MISMATCH = 10	/**< Conditions for sending DMS req not met */
+};
+
+typedef struct wl_dms_desc {
+	uint8 user_id;
+	uint8 status;
+	uint8 token;
+	uint8 dms_id;
+	uint8 tclas_proc;
+	uint8 mac_len;		/**< length of all ether_addr in data array, 0 if STA */
+	uint8 tclas_len;	/**< length of all wl_tclas_t in data array */
+	uint8 data[1];		/**< VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */
+} wl_dms_desc_t;
+
+#define WL_DMS_DESC_FIXED_SIZE	OFFSETOF(wl_dms_desc_t, data)
+
+typedef struct wl_dms_status {
+	uint32 cnt;
+	wl_dms_desc_t desc[1];
+} wl_dms_status_t;
+
+typedef struct wl_dms_set {
+	uint8 send;
+	uint8 user_id;
+	uint8 tclas_proc;
+} wl_dms_set_t;
+
+typedef struct wl_dms_term {
+	uint8 del;
+	uint8 user_id;
+} wl_dms_term_t;
+
+typedef struct wl_service_term {
+	uint8 service;
+	union {
+		wl_dms_term_t dms;
+	} u;
+} wl_service_term_t;
+
+/** Definitions for WNM/NPS BSS Transistion */
+typedef struct wl_bsstrans_req {
+	uint16 tbtt;			/**< time of BSS to end of life, in unit of TBTT */
+	uint16 dur;			/**< time of BSS to keep off, in unit of minute */
+	uint8 reqmode;			/**< request mode of BSS transition request */
+	uint8 unicast;			/**< request by unicast or by broadcast */
+} wl_bsstrans_req_t;
+
+enum {
+	BSSTRANS_RESP_AUTO = 0,		/**< Currently equivalent to ENABLE */
+	BSSTRANS_RESP_DISABLE = 1,	/**< Never answer BSS Trans Req frames */
+	BSSTRANS_RESP_ENABLE = 2,	/**< Always answer Req frames with preset data */
+	BSSTRANS_RESP_WAIT = 3,		/**< Send ind, wait and/or send preset data (NOT IMPL) */
+	BSSTRANS_RESP_IMMEDIATE = 4	/**< After an ind, set data and send resp (NOT IMPL) */
+};
+
+typedef struct wl_bsstrans_resp {
+	uint8 policy;
+	uint8 status;
+	uint8 delay;
+	struct ether_addr target;
+} wl_bsstrans_resp_t;
+
+/* "wnm_bsstrans_policy" argument programs behavior after BSSTRANS Req reception.
+ * BSS-Transition feature is used by multiple programs such as NPS-PF, VE-PF,
+ * Band-steering, Hotspot 2.0 and customer requirements. Each PF and its test plan
+ * mandates different behavior on receiving BSS-transition request. To accomodate
+ * such divergent behaviors these policies have been created.
+ */
+typedef enum {
+	WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0,	/**< Roam (or disassociate) in all cases */
+	WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1,	/**< Roam only if requested by Request Mode field */
+	WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2,	/**< Roam only if Preferred BSS provided */
+	WL_BSSTRANS_POLICY_WAIT = 3,		/**< Wait for deauth and send Accepted status */
+	WL_BSSTRANS_POLICY_PRODUCT = 4,	/**< Policy for real product use cases (Olympic) */
+	WL_BSSTRANS_POLICY_PRODUCT_WBTEXT = 5,	/**< Policy for real product use cases (SS) */
+	WL_BSSTRANS_POLICY_MAX = 6
+} wnm_bsstrans_policy_type_t;
+
+/** Definitions for WNM/NPS TIM Broadcast */
+typedef struct wl_timbc_offset {
+	int16 offset;		/**< offset in us */
+	uint16 fix_intv;	/**< override interval sent from STA */
+	uint16 rate_override;	/**< use rate override to send high rate TIM broadcast frame */
+	uint8 tsf_present;	/**< show timestamp in TIM broadcast frame */
+	uint8 PAD;
+} wl_timbc_offset_t;
+
+typedef struct wl_timbc_set {
+	uint8 interval;		/**< Interval in DTIM wished or required. */
+	uint8 flags;		/**< Bitfield described below */
+	uint16 rate_min;	/**< Minimum rate required for High/Low TIM frames. Optionnal */
+	uint16 rate_max;	/**< Maximum rate required for High/Low TIM frames. Optionnal */
+} wl_timbc_set_t;
+
+enum {
+	WL_TIMBC_SET_TSF_REQUIRED = 1,	/**< Enable TIMBC only if TSF in TIM frames */
+	WL_TIMBC_SET_NO_OVERRIDE = 2,	/**< ... if AP does not override interval */
+	WL_TIMBC_SET_PROXY_ARP = 4,	/**< ... if AP support Proxy ARP */
+	WL_TIMBC_SET_DMS_ACCEPTED = 8	/**< ... if all DMS desc have been accepted */
+};
+
+typedef struct wl_timbc_status {
+	uint8 status_sta;		/**< Status from internal state machine (check below) */
+	uint8 status_ap;		/**< From AP response frame (check 8.4.2.86 from 802.11) */
+	uint8 interval;
+	uint8 pad;
+	int32 offset;
+	uint16 rate_high;
+	uint16 rate_low;
+} wl_timbc_status_t;
+
+enum {
+	WL_TIMBC_STATUS_DISABLE = 0,		/**< TIMBC disabled by user */
+	WL_TIMBC_STATUS_REQ_MISMATCH = 1,	/**< AP settings do no match user requirements */
+	WL_TIMBC_STATUS_NOT_ASSOC = 2,		/**< STA not associated */
+	WL_TIMBC_STATUS_NOT_SUPPORT = 3,	/**< TIMBC not supported by AP */
+	WL_TIMBC_STATUS_DENIED = 4,		/**< Req to disable TIMBC sent to AP */
+	WL_TIMBC_STATUS_ENABLE = 5		/**< TIMBC enabled */
+};
+
+/** Definitions for PM2 Dynamic Fast Return To Sleep */
+typedef struct wl_pm2_sleep_ret_ext {
+	uint8  logic;			/**< DFRTS logic: see WL_DFRTS_LOGIC_* below */
+	uint8  PAD;
+	uint16 low_ms;			/**< Low FRTS timeout */
+	uint16 high_ms;			/**< High FRTS timeout */
+	uint16 rx_pkts_threshold;	/**< switching threshold: # rx pkts */
+	uint16 tx_pkts_threshold;	/**< switching threshold: # tx pkts */
+	uint16 txrx_pkts_threshold;	/**< switching threshold: # (tx+rx) pkts */
+	uint32 rx_bytes_threshold;	/**< switching threshold: # rx bytes */
+	uint32 tx_bytes_threshold;	/**< switching threshold: # tx bytes */
+	uint32 txrx_bytes_threshold;	/**< switching threshold: # (tx+rx) bytes */
+} wl_pm2_sleep_ret_ext_t;
+
+#define WL_DFRTS_LOGIC_OFF	0	/**< Feature is disabled */
+#define WL_DFRTS_LOGIC_OR	1	/**< OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND	2	/**< AND all non-zero threshold conditions */
+
+/* Values for the passive_on_restricted_mode iovar.  When set to non-zero, this iovar
+ * disables automatic conversions of a channel from passively scanned to
+ * actively scanned.  These values only have an effect for country codes such
+ * as XZ where some 5 GHz channels are defined to be passively scanned.
+ */
+#define WL_PASSACTCONV_DISABLE_NONE	0	/**< Enable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_ALL	1	/**< Disable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_PERM	2	/**< Disable only permanent conversions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RMC_CNT_VERSION	   1
+#define WL_RMC_TR_VERSION	   1
+#define WL_RMC_MAX_CLIENT	   32
+#define WL_RMC_FLAG_INBLACKLIST	   1
+#define WL_RMC_FLAG_ACTIVEACKER	   2
+#define WL_RMC_FLAG_RELMCAST	   4
+#define WL_RMC_MAX_TABLE_ENTRY     4
+
+#define WL_RMC_VER		   1
+#define WL_RMC_INDEX_ACK_ALL       255
+#define WL_RMC_NUM_OF_MC_STREAMS   4
+#define WL_RMC_MAX_TRS_PER_GROUP   1
+#define WL_RMC_MAX_TRS_IN_ACKALL   1
+#define WL_RMC_ACK_MCAST0          0x02
+#define WL_RMC_ACK_MCAST_ALL       0x01
+#define WL_RMC_ACTF_TIME_MIN       300		/**< time in ms */
+#define WL_RMC_ACTF_TIME_MAX       20000	/**< time in ms */
+#define WL_RMC_MAX_NUM_TRS	   32		/**< maximun transmitters allowed */
+#define WL_RMC_ARTMO_MIN           350		/**< time in ms */
+#define WL_RMC_ARTMO_MAX           40000	/**< time in ms */
+
+/* RMC events in action frames */
+enum rmc_opcodes {
+	RELMCAST_ENTRY_OP_DISABLE = 0,   /**< Disable multi-cast group */
+	RELMCAST_ENTRY_OP_DELETE  = 1,   /**< Delete multi-cast group */
+	RELMCAST_ENTRY_OP_ENABLE  = 2,   /**< Enable multi-cast group */
+	RELMCAST_ENTRY_OP_ACK_ALL = 3    /**< Enable ACK ALL bit in AMT */
+};
+
+/* RMC operational modes */
+enum rmc_modes {
+	WL_RMC_MODE_RECEIVER    = 0,	 /**< Receiver mode by default */
+	WL_RMC_MODE_TRANSMITTER = 1,	 /**< Transmitter mode using wl ackreq */
+	WL_RMC_MODE_INITIATOR   = 2	 /**< Initiator mode using wl ackreq */
+};
+
+/** Each RMC mcast client info */
+typedef struct wl_relmcast_client {
+	uint8 flag;			/**< status of client such as AR, R, or blacklisted */
+	uint8 PAD;
+	int16 rssi;			/**< rssi value of RMC client */
+	struct ether_addr addr;		/**< mac address of RMC client */
+} wl_relmcast_client_t;
+
+/** RMC Counters */
+typedef struct wl_rmc_cnts {
+	uint16  version;		/**< see definition of WL_CNT_T_VERSION */
+	uint16  length;			/**< length of entire structure */
+	uint16	dupcnt;			/**< counter for duplicate rmc MPDU */
+	uint16	ackreq_err;		/**< counter for wl ackreq error    */
+	uint16	af_tx_err;		/**< error count for action frame transmit   */
+	uint16	null_tx_err;		/**< error count for rmc null frame transmit */
+	uint16	af_unicast_tx_err;	/**< error count for rmc unicast frame transmit */
+	uint16	mc_no_amt_slot;		/**< No mcast AMT entry available */
+	/* Unused. Keep for rom compatibility */
+	uint16	mc_no_glb_slot;		/**< No mcast entry available in global table */
+	uint16	mc_not_mirrored;	/**< mcast group is not mirrored */
+	uint16	mc_existing_tr;		/**< mcast group is already taken by transmitter */
+	uint16	mc_exist_in_amt;	/**< mcast group is already programmed in amt */
+	/* Unused. Keep for rom compatibility */
+	uint16	mc_not_exist_in_gbl;	/**< mcast group is not in global table */
+	uint16	mc_not_exist_in_amt;	/**< mcast group is not in AMT table */
+	uint16	mc_utilized;		/**< mcast addressed is already taken */
+	uint16	mc_taken_other_tr;	/**< multi-cast addressed is already taken */
+	uint32	rmc_rx_frames_mac;      /**< no of mc frames received from mac */
+	uint32	rmc_tx_frames_mac;      /**< no of mc frames transmitted to mac */
+	uint32	mc_null_ar_cnt;         /**< no. of times NULL AR is received */
+	uint32	mc_ar_role_selected;	/**< no. of times took AR role */
+	uint32	mc_ar_role_deleted;	/**< no. of times AR role cancelled */
+	uint32	mc_noacktimer_expired;  /**< no. of times noack timer expired */
+	uint16  mc_no_wl_clk;           /**< no wl clk detected when trying to access amt */
+	uint16  mc_tr_cnt_exceeded;     /**< No of transmitters in the network exceeded */
+} wl_rmc_cnts_t;
+
+/** RMC Status */
+typedef struct wl_relmcast_st {
+	uint8         ver;		/**< version of RMC */
+	uint8         num;		/**< number of clients detected by transmitter */
+	wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT];
+	uint16        err;		/**< error status (used in infra) */
+	uint16        actf_time;	/**< action frame time period */
+} wl_relmcast_status_t;
+
+/** Entry for each STA/node */
+typedef struct wl_rmc_entry {
+	/* operation on multi-cast entry such add,
+	 * delete, ack-all
+	 */
+	int8    flag;
+	struct ether_addr addr;		/**< multi-cast group mac address */
+} wl_rmc_entry_t;
+
+/** RMC table */
+typedef struct wl_rmc_entry_table {
+	uint8   index;			/**< index to a particular mac entry in table */
+	uint8   opcode;			/**< opcodes or operation on entry */
+	wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY];
+} wl_rmc_entry_table_t;
+
+typedef struct wl_rmc_trans_elem {
+	struct ether_addr tr_mac;	/**< transmitter mac */
+	struct ether_addr ar_mac;	/**< ar mac */
+	uint16 artmo;			/**< AR timeout */
+	uint8 amt_idx;			/**< amt table entry */
+	uint8 PAD;
+	uint16 flag;			/**< entry will be acked, not acked, programmed, full etc */
+} wl_rmc_trans_elem_t;
+
+/** RMC transmitters */
+typedef struct wl_rmc_trans_in_network {
+	uint8         ver;		/**< version of RMC */
+	uint8         num_tr;		/**< number of transmitters in the network */
+	wl_rmc_trans_elem_t trs[WL_RMC_MAX_NUM_TRS];
+} wl_rmc_trans_in_network_t;
+
+/** To update vendor specific ie for RMC */
+typedef struct wl_rmc_vsie {
+	uint8	oui[DOT11_OUI_LEN];
+	uint8	PAD;
+	uint16	payload;	/**< IE Data Payload */
+} wl_rmc_vsie_t;
+
+
+/* structures  & defines for proximity detection  */
+enum proxd_method {
+	PROXD_UNDEFINED_METHOD = 0,
+	PROXD_RSSI_METHOD = 1,
+	PROXD_TOF_METHOD = 2
+};
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE	0
+#define WL_PROXD_MODE_NEUTRAL	1
+#define WL_PROXD_MODE_INITIATOR	2
+#define WL_PROXD_MODE_TARGET	3
+
+#define WL_PROXD_ACTION_STOP		0
+#define WL_PROXD_ACTION_START		1
+
+#define WL_PROXD_FLAG_TARGET_REPORT	0x1
+#define WL_PROXD_FLAG_REPORT_FAILURE	0x2
+#define WL_PROXD_FLAG_INITIATOR_REPORT	0x4
+#define WL_PROXD_FLAG_NOCHANSWT		0x8
+#define WL_PROXD_FLAG_NETRUAL		0x10
+#define WL_PROXD_FLAG_INITIATOR_RPTRTT	0x20
+#define WL_PROXD_FLAG_ONEWAY		0x40
+#define WL_PROXD_FLAG_SEQ_EN		0x80
+
+#define WL_PROXD_SETFLAG_K		0x1
+#define WL_PROXD_SETFLAG_N		0x2
+#define WL_PROXD_SETFLAG_S		0x4
+
+#define WL_PROXD_SETFLAG_K		0x1
+#define WL_PROXD_SETFLAG_N		0x2
+#define WL_PROXD_SETFLAG_S		0x4
+
+#define WL_PROXD_RANDOM_WAKEUP	0x8000
+#define WL_PROXD_MAXREPORT	8
+
+typedef struct wl_proxd_iovar {
+	uint16	method;		/**< Proximity Detection method */
+	uint16	mode;		/**< Mode (neutral, initiator, target) */
+} wl_proxd_iovar_t;
+
+/*
+ * structures for proximity detection parameters
+ * consists of two parts, common and method specific params
+ * common params should be placed at the beginning
+ */
+
+typedef struct wl_proxd_params_common	{
+	chanspec_t	chanspec;	/**< channel spec */
+	int16		tx_power;	/**< tx power of Proximity Detection(PD) frames (in dBm) */
+	uint16		tx_rate;	/**< tx rate of PD rames  (in 500kbps units) */
+	uint16		timeout;	/**< timeout value */
+	uint16		interval;	/**< interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/**< duration of neighbor finding attempts (in ms) */
+} wl_proxd_params_common_t;
+
+typedef struct wl_proxd_params_rssi_method {
+	chanspec_t	chanspec;	/**< chanspec for home channel */
+	int16		tx_power;	/**< tx power of Proximity Detection frames (in dBm) */
+	uint16		tx_rate;	/**< tx rate of PD frames, 500kbps units */
+	uint16		timeout;	/**< state machine wait timeout of the frames (in ms) */
+	uint16		interval;	/**< interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/**< duration of neighbor finding attempts (in ms) */
+					/* method specific ones go after this line */
+	int16		rssi_thresh;	/**< RSSI threshold (in dBm) */
+	uint16		maxconvergtmo;	/**< max wait converge timeout (in ms) */
+} wl_proxd_params_rssi_method_t;
+
+#define Q1_NS			25	/**< Q1 time units */
+
+#define TOF_BW_NUM		3	/**< number of bandwidth that the TOF can support */
+#define TOF_BW_SEQ_NUM		(TOF_BW_NUM+2)	/* number of total index */
+enum tof_bw_index {
+	TOF_BW_20MHZ_INDEX = 0,
+	TOF_BW_40MHZ_INDEX = 1,
+	TOF_BW_80MHZ_INDEX = 2,
+	TOF_BW_SEQTX_INDEX = 3,
+	TOF_BW_SEQRX_INDEX = 4
+};
+
+#define BANDWIDTH_BASE	20	/**< base value of bandwidth */
+#define TOF_BW_20MHZ    (BANDWIDTH_BASE << TOF_BW_20MHZ_INDEX)
+#define TOF_BW_40MHZ    (BANDWIDTH_BASE << TOF_BW_40MHZ_INDEX)
+#define TOF_BW_80MHZ    (BANDWIDTH_BASE << TOF_BW_80MHZ_INDEX)
+#define TOF_BW_10MHZ    10
+
+#define NFFT_BASE		64	/**< base size of fft */
+#define TOF_NFFT_20MHZ  (NFFT_BASE << TOF_BW_20MHZ_INDEX)
+#define TOF_NFFT_40MHZ  (NFFT_BASE << TOF_BW_40MHZ_INDEX)
+#define TOF_NFFT_80MHZ  (NFFT_BASE << TOF_BW_80MHZ_INDEX)
+
+typedef struct wl_proxd_params_tof_method {
+	chanspec_t	chanspec;	/**< chanspec for home channel */
+	int16		tx_power;	/**< tx power of Proximity Detection(PD) frames (in dBm) */
+	uint16		tx_rate;	/**< tx rate of PD rames  (in 500kbps units) */
+	uint16		timeout;	/**< state machine wait timeout of the frames (in ms) */
+	uint16		interval;	/**< interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/**< duration of neighbor finding attempts (in ms) */
+	/* specific for the method go after this line */
+	struct ether_addr tgt_mac;	/**< target mac addr for TOF method */
+	uint16		ftm_cnt;	/**< number of the frames txed by initiator */
+	uint16		retry_cnt;	/**< number of retransmit attampts for ftm frames */
+	int16		vht_rate;	/**< ht or vht rate */
+	/* add more params required for other methods can be added here  */
+} wl_proxd_params_tof_method_t;
+
+typedef struct wl_proxd_seq_config
+{
+	int16 N_tx_log2;
+	int16 N_rx_log2;
+	int16 N_tx_scale;
+	int16 N_rx_scale;
+	int16 w_len;
+	int16 w_offset;
+} wl_proxd_seq_config_t;
+
+#define WL_PROXD_TUNE_VERSION_1		1
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune {
+	uint32		version;
+	uint32		Ki;			/**< h/w delay K factor for initiator */
+	uint32		Kt;			/**< h/w delay K factor for target */
+	int16		vhtack;			/**< enable/disable VHT ACK */
+	int16		N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
+	int16		w_offset[TOF_BW_NUM];	/**< offset of threshold crossing window(per BW) */
+	int16		w_len[TOF_BW_NUM];	/**< length of threshold crossing window(per BW) */
+	int32		maxDT;			/**< max time difference of T4/T1 or T3/T2 */
+	int32		minDT;			/**< min time difference of T4/T1 or T3/T2 */
+	uint8		totalfrmcnt;	/**< total count of transfered measurement frames */
+	uint16		rsv_media;		/**< reserve media value for TOF */
+	uint32		flags;			/**< flags */
+	uint8		core;			/**< core to use for tx */
+	uint8		setflags;		/* set flags of K, N. S values  */
+	int16		N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
+	uint8		sw_adj;			/**< enable sw assisted timestamp adjustment */
+	uint8		hw_adj;			/**< enable hw assisted timestamp adjustment */
+	uint8		seq_en;			/**< enable ranging sequence */
+	uint8		ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */
+	int16		N_log2_2g;		/**< simple threshold crossing for 2g channel */
+	int16		N_scale_2g;		/**< simple threshold crossing for 2g channel */
+	wl_proxd_seq_config_t seq_5g20;
+	wl_proxd_seq_config_t seq_2g20;		/* Thresh crossing params for 2G Sequence */
+	uint16          bitflip_thresh;		/* bitflip threshold */
+	uint16          snr_thresh;		/* SNR threshold */
+	int8            recv_2g_thresh;		/* 2g recieve sensitivity threshold */
+	uint32          acs_gdv_thresh;
+	int8            acs_rssi_thresh;
+	uint8           smooth_win_en;
+	int32		acs_gdmm_thresh;
+	int8		acs_delta_rssi_thresh;
+	int32		emu_delay;
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t;
+#include <packed_section_end.h>
+
+typedef struct wl_proxd_params_iovar {
+	uint16	method;			/**< Proximity Detection method */
+	union {
+		/* common params for pdsvc */
+		wl_proxd_params_common_t	cmn_params;	/**< common parameters */
+		/*  method specific */
+		wl_proxd_params_rssi_method_t	rssi_params;	/**< RSSI method parameters */
+		wl_proxd_params_tof_method_t	tof_params;	/**< TOF method parameters */
+		/* tune parameters */
+		wl_proxd_params_tof_tune_t	tof_tune;	/**< TOF tune parameters */
+		uint8	PAD[sizeof(wl_proxd_params_tof_tune_t)+1];
+	} u;				/**< Method specific optional parameters */
+} wl_proxd_params_iovar_t;
+
+#define PROXD_COLLECT_GET_STATUS	0
+#define PROXD_COLLECT_SET_STATUS	1
+#define PROXD_COLLECT_QUERY_HEADER	2
+#define PROXD_COLLECT_QUERY_DATA	3
+#define PROXD_COLLECT_QUERY_DEBUG	4
+#define PROXD_COLLECT_REMOTE_REQUEST	5
+#define PROXD_COLLECT_DONE		6
+
+typedef enum {
+	WL_PROXD_COLLECT_METHOD_TYPE_DISABLE		= 0x0,
+	WL_PROXD_COLLECT_METHOD_TYPE_IOVAR		= 0x1,
+	WL_PROXD_COLLECT_METHOD_TYPE_EVENT		= 0x2,
+	WL_PROXD_COLLECT_METHOD_TYPE_EVENT_LOG		= 0x4
+} wl_proxd_collect_method_type_t;
+
+typedef uint16 wl_proxd_collect_method_t;	/* query status: method to send proxd collect */
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query {
+	uint32		method;		/**< method */
+	uint8		request;	/**< Query request. */
+	uint8		status;		/**< bitmask 0 -- disable, 0x1 -- enable collection, */
+					/* 0x2 -- Use generic event, 0x4 -- use event log */
+	uint16		index;		/**< The current frame index [0 to total_frames - 1]. */
+	uint16		mode;		/**< Initiator or Target */
+	uint8		busy;		/**< tof sm is busy */
+	uint8		remote;		/**< Remote collect data */
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_query_t;
+#include <packed_section_end.h>
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header {
+	uint16		total_frames;			/**< The total frames for this collect. */
+	uint16		nfft;				/**< nfft value */
+	uint16		bandwidth;			/**< bandwidth */
+	uint16		channel;			/**< channel number */
+	uint32		chanspec;			/**< channel spec */
+	uint32		fpfactor;			/**< avb timer value factor */
+	uint16		fpfactor_shift;			/**< avb timer value shift bits */
+	int32		distance;			/**< distance calculated by fw */
+	uint32		meanrtt;			/**< mean of RTTs */
+	uint32		modertt;			/**< mode of RTTs */
+	uint32		medianrtt;			/**< median of RTTs */
+	uint32		sdrtt;				/**< standard deviation of RTTs */
+	uint32		clkdivisor;			/**< clock divisor */
+	uint16		chipnum;			/**< chip type */
+	uint8		chiprev;			/**< chip revision */
+	uint8		phyver;				/**< phy version */
+	struct ether_addr	localMacAddr;		/**< local mac address */
+	struct ether_addr	remoteMacAddr;		/**< remote mac address */
+	wl_proxd_params_tof_tune_t params;
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t;
+#include <packed_section_end.h>
+
+
+/* ifdef WL_NAN */
+/*  ********************** NAN wl interface struct types and defs ******************** */
+/*
+ * Uses new common IOVAR batch processing mechanism
+ */
+
+/*
+ * NAN config control
+ * Bits 0 - 23 can be set by host
+ * Bits 24 - 31 - Internal use for firmware, host cannot set it
+ */
+
+/*
+ * Bit 0 : If set to 1, means event uses nan bsscfg,
+ * otherwise uses infra bsscfg. Default is using infra bsscfg
+ */
+#define WL_NAN_CTRL_ROUTE_EVENT_VIA_NAN_BSSCFG	0x1
+/* If set, discovery beacons are transmitted on 2G band */
+#define WL_NAN_CTRL_DISC_BEACON_TX_2G		0x2
+/* If set, sync beacons are transmitted on 2G band */
+#define WL_NAN_CTRL_SYNC_BEACON_TX_2G		0x4
+/* If set, discovery beacons are transmitted on 5G band */
+#define WL_NAN_CTRL_DISC_BEACON_TX_5G		0x8
+/* If set, sync beacons are transmitted on 5G band */
+#define WL_NAN_CTRL_SYNC_BEACON_TX_5G		0x10
+/* If set, auto datapath responses will be sent by FW */
+#define WL_NAN_CTRL_AUTO_DPRESP			0x20
+/* If set, auto datapath confirms will be sent by FW */
+#define WL_NAN_CTRL_AUTO_DPCONF			0x40
+
+/* Value when all host-configurable bits set */
+#define WL_NAN_CTRL_MAX_MASK			0xFFFFFF
+#define WL_NAN_CFG_CTRL_FW_BITS			8
+
+/* Bit 31:
+ * If set - indicates that NAN initialization is successful
+ * NOTE: This is a ready-only bit. All sets to this are masked off
+ */
+#define WL_NAN_PROTO_INIT_DONE		0x80000000
+#define WL_NAN_GET_PROTO_INIT_STATUS(x) \
+		(((x) >> 31) & 1)
+#define WL_NAN_CLEAR_PROTO_INIT_STATUS(x) \
+		((x) &= ~WL_NAN_PROTO_INIT_DONE)
+#define WL_NAN_SET_PROTO_INIT_STATUS(x) \
+		((x) |= (1 << 31))
+
+#define WL_NAN_IOCTL_VERSION			0x2
+/* < some sufficient ioc buff size for our module */
+#define WL_NAN_IOC_BUFSZ			256
+/* some sufficient ioc buff size for dump commands */
+#define WL_NAN_IOC_BUFSZ_EXT			1024
+#define WL_NAN_MAX_SIDS_IN_BEACONS		127 /* Max allowed SIDs */
+#define WL_NAN_MASTER_RANK_LEN			8
+#define WL_NAN_RANGE_LIMITED			0x0040 /* Publish/Subscribe flags */
+
+/** The service hash (service id) is exactly this many bytes. */
+#define WL_NAN_SVC_HASH_LEN			6
+#define WL_NAN_HASHES_PER_BLOOM			4 /** Number of hash functions per bloom filter */
+
+/* no. of max last disc results */
+#define WL_NAN_MAX_DISC_RESULTS			3
+
+/* Max len of Rx and Tx filters */
+#define WL_NAN_MAX_SVC_MATCH_FILTER_LEN	255
+
+/* Max service name len */
+#define WL_NAN_MAX_SVC_NAME_LEN	32
+
+/* Type of Data path connection */
+#define WL_NAN_DP_TYPE_UNICAST			0
+#define WL_NAN_DP_TYPE_MULTICAST		1
+
+/* MAX security params length PMK field */
+#define WL_NAN_NCS_SK_PMK_LEN 32
+
+/* Post disc attr ID type */
+typedef uint8 wl_nan_post_disc_attr_id_t;
+
+/*
+ * Component IDs
+ */
+typedef enum {
+	WL_NAN_COMPID_CONFIG = 1,
+	WL_NAN_COMPID_ELECTION = 2,
+	WL_NAN_COMPID_SD = 3,
+	WL_NAN_COMPID_TIMESYNC = 4,
+	WL_NAN_COMPID_DATA_PATH = 5,
+	WL_NAN_COMPID_DEBUG = 15 /* Keep this at the end */
+} wl_nan_comp_id_t;
+
+#define WL_NAN_COMP_SHIFT	8
+#define WL_NAN_COMP_MASK(_c)	(0x0F & ((uint8)(_c)))
+#define WL_NAN_COMP_ID(_c)	(WL_NAN_COMP_MASK(_c) << WL_NAN_COMP_SHIFT)
+
+/* NAN Events */
+
+/** Instance ID type (unique identifier) */
+typedef uint8 wl_nan_instance_id_t;
+
+/* Publish sent for a subscribe */
+/* WL_NAN_EVENT_REPLIED */
+
+typedef struct wl_nan_ev_replied {
+	struct ether_addr	sub_mac; /* Subscriber MAC */
+	wl_nan_instance_id_t	pub_id; /* Publisher Instance ID */
+	uint8			sub_id; /* Subscriber ID */
+	int8			sub_rssi; /* Subscriber RSSI */
+	uint8			pad[3];
+} wl_nan_ev_replied_t;
+
+typedef struct wl_nan_event_replied {
+	struct ether_addr	sub_mac; /* Subscriber MAC */
+	wl_nan_instance_id_t	pub_id; /* Publisher Instance ID */
+	uint8			sub_id; /* Subscriber ID */
+	int8			sub_rssi; /* Subscriber RSSI */
+	uint8		attr_num;
+	uint16		attr_list_len;  /* sizeof attributes attached to payload */
+	uint8		attr_list[0];   /* attributes payload */
+} wl_nan_event_replied_t;
+
+/* Subscribe or Publish instance Terminated */
+
+/* WL_NAN_EVENT_TERMINATED */
+
+#define	NAN_SD_TERM_REASON_TIMEOUT	1
+#define	NAN_SD_TERM_REASON_HOSTREQ	2
+#define	NAN_SD_TERM_REASON_FWTERM	3
+#define	NAN_SD_TERM_REASON_FAIL		4
+
+typedef struct wl_nan_ev_terminated {
+	uint8 instance_id;	/* publish / subscribe instance id */
+	uint8 reason;		/* 1=timeout, 2=Host/IOVAR, 3=FW Terminated 4=Failure */
+	uint8 svctype;		/* 0 - Publish, 0x1 - Subscribe */
+	uint8 pad;		/* Align */
+} wl_nan_ev_terminated_t;
+
+/* Follow up received against a pub / subscr */
+/* WL_NAN_EVENT_RECEIVE */
+
+typedef struct wl_nan_ev_receive {
+	struct ether_addr remote_addr;	/* Peer NAN device MAC */
+	uint8	local_id;		/* Local subscribe or publish ID */
+	uint8	remote_id;		/* Remote subscribe or publish ID */
+	int8	fup_rssi;
+	uint8	attr_num;
+	uint16	attr_list_len;  /* sizeof attributes attached to payload */
+	uint8	attr_list[0];   /* attributes payload */
+} wl_nan_ev_receive_t;
+
+/*
+ * TLVs - Below XTLV definitions will be deprecated
+ * in due course (soon as all other branches update
+ * to the comp ID based XTLVs listed below).
+ */
+enum wl_nan_cmd_xtlv_id {
+	WL_NAN_XTLV_MAC_ADDR = 0x120,
+	WL_NAN_XTLV_MATCH_RX = 0x121,
+	WL_NAN_XTLV_MATCH_TX = 0x122,
+	WL_NAN_XTLV_SVC_INFO = 0x123,
+	WL_NAN_XTLV_SVC_NAME = 0x124,
+	WL_NAN_XTLV_SR_FILTER = 0x125,
+	WL_NAN_XTLV_FOLLOWUP = 0x126,
+	WL_NAN_XTLV_SVC_LIFE_COUNT = 0x127,
+	WL_NAN_XTLV_AVAIL = 0x128,
+	WL_NAN_XTLV_SDF_RX = 0x129,
+	WL_NAN_XTLV_SDE_CONTROL = 0x12a,
+	WL_NAN_XTLV_SDE_RANGE_LIMIT = 0x12b,
+	WL_NAN_XTLV_NAN_AF = 0x12c,
+	WL_NAN_XTLV_SD_TERMINATE = 0x12d,
+	WL_NAN_XTLV_CLUSTER_ID = 0x12e,
+	WL_NAN_XTLV_PEER_RSSI = 0x12f,
+	WL_NAN_XTLV_BCN_RX = 0x130,
+	WL_NAN_XTLV_REPLIED = 0x131, /* Publish sent for a subscribe */
+	WL_NAN_XTLV_RECEIVED = 0x132, /* FUP Received */
+	WL_NAN_XTLV_DISC_RESULTS = 0x133 /* Discovery results */
+};
+
+#define WL_NAN_CMD_GLOBAL		0x00
+#define WL_NAN_CMD_CFG_COMP_ID		0x01
+#define WL_NAN_CMD_ELECTION_COMP_ID	0x02
+#define WL_NAN_CMD_SD_COMP_ID		0x03
+#define WL_NAN_CMD_SYNC_COMP_ID		0x04
+#define WL_NAN_CMD_DATA_COMP_ID		0x05
+#define WL_NAN_CMD_DAM_COMP_ID		0x06
+#define WL_NAN_CMD_RANGE_COMP_ID	0x07
+#define WL_NAN_CMD_DBG_COMP_ID		0x0f
+
+#define WL_NAN_CMD_COMP_SHIFT		8
+#define NAN_CMD(x, y)  (((x) << WL_NAN_CMD_COMP_SHIFT) | (y))
+
+/*
+ * Module based NAN TLV IDs
+ */
+typedef enum wl_nan_tlv {
+
+	WL_NAN_XTLV_CFG_MATCH_RX	= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x01),
+	WL_NAN_XTLV_CFG_MATCH_TX	= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x02),
+	WL_NAN_XTLV_CFG_SR_FILTER	= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x03),
+	WL_NAN_XTLV_CFG_SVC_NAME	= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x04),
+	WL_NAN_XTLV_CFG_NAN_STATUS	= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x05),
+	WL_NAN_XTLV_CFG_SVC_LIFE_COUNT	= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x06),
+	WL_NAN_XTLV_CFG_SVC_HASH	= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x07),
+	WL_NAN_XTLV_CFG_SEC_CSID	= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x08), /* Security CSID */
+	WL_NAN_XTLV_CFG_SEC_PMK		= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x09), /* Security PMK */
+	WL_NAN_XTLV_CFG_SEC_PMKID	= NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0A),
+
+	WL_NAN_XTLV_SD_SVC_INFO		= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01),
+	WL_NAN_XTLV_SD_FOLLOWUP		= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x02),
+	WL_NAN_XTLV_SD_SDF_RX		= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x03),
+	WL_NAN_XTLV_SD_SDE_CONTROL	= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x04),
+	WL_NAN_XTLV_SD_SDE_RANGE_LIMIT	= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x05),
+	WL_NAN_XTLV_SD_NAN_AF		= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x06),
+	WL_NAN_XTLV_SD_TERM		= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x07),
+	WL_NAN_XTLV_SD_REPLIED		= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x08), /* Pub sent */
+	WL_NAN_XTLV_SD_FUP_RECEIVED	= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x09), /* FUP Received */
+	WL_NAN_XTLV_SD_DISC_RESULTS	= NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0A), /* Pub RX */
+
+	WL_NAN_XTLV_SYNC_BCN_RX		= NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01),
+
+	WL_NAN_XTLV_DATA_DP_END		= NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01),
+	WL_NAN_XTLV_DATA_DP_INFO	= NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02),
+	WL_NAN_XTLV_DATA_DP_SEC_INST	= NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03),
+
+	WL_NAN_XTLV_RANGE_INFO		= NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01)
+} wl_nan_tlv_t;
+
+enum wl_nan_sub_cmd_xtlv_id {
+
+	/* Special command - Tag zero */
+	WL_NAN_CMD_GLB_NAN_VER = NAN_CMD(WL_NAN_CMD_GLOBAL, 0x00),
+
+	 /* nan cfg sub-commands */
+
+	WL_NAN_CMD_CFG_NAN_INIT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x01),
+	WL_NAN_CMD_CFG_ROLE = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x02),
+	WL_NAN_CMD_CFG_HOP_CNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x03),
+	WL_NAN_CMD_CFG_HOP_LIMIT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x04),
+	WL_NAN_CMD_CFG_WARMUP_TIME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x05),
+	WL_NAN_CMD_CFG_STATUS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x06),
+	WL_NAN_CMD_CFG_OUI = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x07),
+	WL_NAN_CMD_CFG_COUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x08),
+	WL_NAN_CMD_CFG_CLEARCOUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x09),
+	WL_NAN_CMD_CFG_CHANNEL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0A),
+	WL_NAN_CMD_CFG_BAND = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0B),
+	WL_NAN_CMD_CFG_CID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0C),
+	WL_NAN_CMD_CFG_IF_ADDR = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0D),
+	WL_NAN_CMD_CFG_BCN_INTERVAL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0E),
+	WL_NAN_CMD_CFG_SDF_TXTIME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0F),
+	WL_NAN_CMD_CFG_SID_BEACON = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x10),
+	WL_NAN_CMD_CFG_DW_LEN = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x11),
+	WL_NAN_CMD_CFG_AVAIL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x12),
+	WL_NAN_CMD_CFG_WFA_TM = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x13),
+	WL_NAN_CMD_CFG_EVENT_MASK =  NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x14),
+	WL_NAN_CMD_CFG_NAN_CONFIG = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x15),
+	WL_NAN_CMD_CFG_NAN_ENAB = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x16),
+	WL_NAN_CMD_CFG_ULW = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x17),
+	WL_NAN_CMD_CFG_MAX = WL_NAN_CMD_CFG_NAN_ENAB,
+	/* Add new commands before and update */
+
+	/* nan election sub-commands */
+	WL_NAN_CMD_ELECTION_JOIN = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x04), /* Deprecated */
+	WL_NAN_CMD_ELECTION_STOP = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x07), /* Deprecate */
+
+	WL_NAN_CMD_ELECTION_HOST_ENABLE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x01),
+	WL_NAN_CMD_ELECTION_METRICS_CONFIG = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x02),
+	WL_NAN_CMD_ELECTION_METRICS_STATE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x03),
+	WL_NAN_CMD_ELECTION_LEAVE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x03),
+	WL_NAN_CMD_ELECTION_MERGE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x04),
+	WL_NAN_CMD_ELECTION_ADVERTISERS = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x05),
+	WL_NAN_CMD_ELECTION_RSSI_THRESHOLD = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x06),
+	WL_NAN_CMD_ELECTION_MAX = WL_NAN_CMD_ELECTION_RSSI_THRESHOLD,
+	/* New commands go before and update */
+
+	/* nan SD sub-commands */
+	WL_NAN_CMD_SD_PARAMS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01),
+	WL_NAN_CMD_SD_PUBLISH = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x02),
+	WL_NAN_CMD_SD_PUBLISH_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x03),
+	WL_NAN_CMD_SD_CANCEL_PUBLISH = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x04),
+	WL_NAN_CMD_SD_SUBSCRIBE = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x05),
+	WL_NAN_CMD_SD_SUBSCRIBE_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x06),
+	WL_NAN_CMD_SD_CANCEL_SUBSCRIBE = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x07),
+	WL_NAN_CMD_SD_VND_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x08),
+	WL_NAN_CMD_SD_STATS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x09),
+	WL_NAN_CMD_SD_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0A),
+	WL_NAN_CMD_SD_FUP_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B),
+	WL_NAN_CMD_SD_CONNECTION = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C),
+	WL_NAN_CMD_SD_SHOW = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D),
+	WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_SHOW,
+
+	/* nan time sync sub-commands */
+
+	WL_NAN_CMD_SYNC_SOCIAL_CHAN = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01),
+	WL_NAN_CMD_SYNC_AWAKE_DWS = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02),
+	WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x03),
+	WL_NAN_CMD_SYNC_MAX = WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD,
+
+	/* nan2 commands */
+	WL_NAN_CMD_DATA_CONFIG = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01),
+	WL_NAN_CMD_DATA_RSVD02 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02),
+	WL_NAN_CMD_DATA_RSVD03 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03),
+	WL_NAN_CMD_DATA_DATAREQ = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x04),
+	WL_NAN_CMD_DATA_DATARESP = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x05),
+	WL_NAN_CMD_DATA_DATAEND = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x06),
+	WL_NAN_CMD_DATA_SCHEDUPD = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x07),
+	WL_NAN_CMD_DATA_RSVD08 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x08),
+	WL_NAN_CMD_DATA_CAP = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x9),
+	WL_NAN_CMD_DATA_STATUS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0A),
+	WL_NAN_CMD_DATA_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0B),
+	WL_NAN_CMD_DATA_RSVD0C = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0C),
+	WL_NAN_CMD_DATA_NDP_SHOW = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0D),
+	WL_NAN_CMD_DATA_DATACONF = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0E),
+	WL_NAN_CMD_DATA_MIN_TX_RATE = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0F),
+	WL_NAN_CMD_DATA_MAX_PEERS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x10),
+	WL_NAN_CMD_DATA_PATH_MAX = WL_NAN_CMD_DATA_MAX_PEERS, /* New ones before and update */
+
+	/* nan dam sub-commands */
+	WL_NAN_CMD_DAM_CFG = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01),
+	WL_NAN_CMD_DAM_MAX = WL_NAN_CMD_DAM_CFG,  /* New ones before and update */
+
+	/* nan2.0 ranging commands */
+	WL_NAN_CMD_RANGE_REQUEST = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01),
+	WL_NAN_CMD_RANGE_AUTO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02),
+	WL_NAN_CMD_RANGE_RESPONSE = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x03),
+	WL_NAN_CMD_RANGE_CANCEL = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x04),
+
+	/*  nan debug sub-commands  */
+	WL_NAN_CMD_DBG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01),
+	WL_NAN_CMD_DBG_SCAN = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x02),
+	WL_NAN_CMD_DBG_SCAN_RESULTS =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x03),
+	/* This is now moved under CFG */
+	WL_NAN_CMD_DBG_EVENT_MASK =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x04),
+	WL_NAN_CMD_DBG_EVENT_CHECK =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x05),
+	WL_NAN_CMD_DBG_DUMP =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x06),
+	WL_NAN_CMD_DBG_CLEAR =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x07),
+	WL_NAN_CMD_DBG_RSSI =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x08),
+	WL_NAN_CMD_DBG_DEBUG =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x09),
+	WL_NAN_CMD_DBG_TEST1 =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0A),
+	WL_NAN_CMD_DBG_TEST2 =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0B),
+	WL_NAN_CMD_DBG_TEST3 =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0C),
+	WL_NAN_CMD_DBG_DISC_RESULTS =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0D),
+	WL_NAN_CMD_DBG_STATS =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0E),
+	WL_NAN_CMD_DBG_LEVEL =  NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0F),
+	WL_NAN_CMD_DBG_MAX = WL_NAN_CMD_DBG_LEVEL /* New ones before and update */
+};
+
+/** status - TBD BCME_ vs NAN status - range reserved for BCME_ */
+enum {
+	/* add new status here... */
+	WL_NAN_E_SEC_SA_NOTFOUND	= -2076,
+	WL_NAN_E_BSSCFG_NOTFOUND	= -2075,
+	WL_NAN_E_SCB_NOTFOUND		= -2074,
+	WL_NAN_E_NCS_SK_KDESC_TYPE      = -2073,
+	WL_NAN_E_NCS_SK_KEY_DESC_VER    = -2072,	/* key descr ver */
+	WL_NAN_E_NCS_SK_KEY_TYPE        = -2071,	/* key descr type */
+	WL_NAN_E_NCS_SK_KEYINFO_FAIL    = -2070,	/* key info (generic) */
+	WL_NAN_E_NCS_SK_KEY_LEN         = -2069,	/* key len */
+	WL_NAN_E_NCS_SK_KDESC_NOT_FOUND = -2068,	/* key desc not found */
+	WL_NAN_E_NCS_SK_INVALID_PARAMS  = -2067,	/* invalid args */
+	WL_NAN_E_NCS_SK_KDESC_INVALID   = -2066,	/* key descr is not valid */
+	WL_NAN_E_NCS_SK_NONCE_MISMATCH  = -2065,
+	WL_NAN_E_NCS_SK_KDATA_SAVE_FAIL = -2064,	/* not able to save key data */
+	WL_NAN_E_NCS_SK_AUTH_TOKEN_CALC_FAIL = -2063,
+	WL_NAN_E_NCS_SK_PTK_CALC_FAIL   = -2062,
+	WL_NAN_E_INVALID_STARTOFFSET	= -2061,
+	WL_NAN_E_BAD_NA_ENTRY_TYPE	= -2060,
+	WL_NAN_E_INVALID_CHANBMP	= -2059,
+	WL_NAN_E_INVALID_OP_CLASS	= -2058,
+	WL_NAN_E_NO_IES			= -2057,
+	WL_NAN_E_NO_PEER_ENTRY_AVAIL	= -2056,
+	WL_NAN_E_INVALID_PEER		= -2055,
+	WL_NAN_E_PEER_EXISTS		= -2054,
+	WL_NAN_E_PEER_NOTFOUND		= -2053,
+	WL_NAN_E_NO_MEM			= -2052,
+	WL_NAN_E_INVALID_OPTION		= -2051,
+	WL_NAN_E_INVALID_BAND		= -2050,
+	WL_NAN_E_INVALID_MAC		= -2049,
+	WL_NAN_E_BAD_INSTANCE		= -2048,
+	WL_NAN_E_NDC_EXISTS		= -2047,
+	WL_NAN_E_NO_NDC_ENTRY_AVAIL	= -2046,
+	WL_NAN_E_INVALID_NDC_ENTRY      = -2045,
+	WL_NAN_E_ERROR			= -1,
+	WL_NAN_E_OK			= 0
+};
+
+typedef int32 wl_nan_status_t;
+
+/** nan cmd list entry  */
+enum wl_nan_sub_cmd_input_flags {
+	WL_NAN_SUB_CMD_FLAG_NONE = 0,
+	WL_NAN_SUB_CMD_FLAG_SKIP = 1, /* Skip to next sub-command on error */
+	WL_NAN_SUB_CMD_FLAG_TERMINATE = 2, /* Terminate processing and return */
+	WL_NAN_SUB_CMD_FLAG_LAST /* Keep this at the end */
+};
+
+/** container for nan events */
+typedef struct wl_nan_ioc {
+	uint16	version;	/**< interface command or event version */
+	uint16	id;			/**< nan ioctl cmd  ID  */
+	uint16	len;		/**< total length of all tlv records in data[]  */
+	uint16	pad;		/**< pad to be 32 bit aligment */
+	uint8	data [];	/**< var len payload of bcm_xtlv_t type */
+} wl_nan_ioc_t;
+
+/*
+ * NAN sub-command data structures
+ */
+
+/*
+ * Config component WL_NAN_CMD_CFG_XXXX sub-commands
+ * WL_NAN_CMD_CFG_ENABLE
+ */
+enum wl_nan_config_state {
+	WL_NAN_CONFIG_STATE_DISABLE = 0,
+	WL_NAN_CONFIG_STATE_ENABLE = 1
+};
+
+typedef int8 wl_nan_config_state_t;
+
+/* WL_NAN_CMD_CFG_NAN_INIT */
+
+typedef uint8 wl_nan_init_t;
+
+/* WL_NAN_CMD_CFG_NAN_VERSION */
+typedef uint16 wl_nan_ver_t;
+
+/* WL_NAN_CMD_CFG_NAN_CONFIG  */
+typedef uint32 wl_nan_cfg_ctrl_t;
+
+/*
+ * WL_NAN_CMD_CFG_BAND, WL_NAN_CMD_CFG_RSSI_THRESHOLD(Get only)
+ */
+typedef uint8 wl_nan_band_t;
+
+/*
+ * WL_NAN_CMD_CFG_ROLE
+ */
+enum wl_nan_role {
+	WL_NAN_ROLE_AUTO = 0,
+	WL_NAN_ROLE_NON_MASTER_NON_SYNC = 1,
+	WL_NAN_ROLE_NON_MASTER_SYNC = 2,
+	WL_NAN_ROLE_MASTER = 3,
+	WL_NAN_ROLE_ANCHOR_MASTER = 4
+};
+
+typedef uint8 wl_nan_role_t;
+
+typedef struct wl_nan_device_state
+{
+	wl_nan_role_t role;		/* Sync Master, Non-Sync Master */
+	uint8 state;	/* TBD  */
+	uint8 hopcount;	/* Hops to the Anchor Master */
+	struct ether_addr immediate_master; /* Master MAC */
+	struct ether_addr anchor_master;	/* Anchor Master MAC */
+	struct ether_addr cluster_id; /* Cluster ID to which this device belongs to */
+	uint8 PAD[3];
+	uint32 tsf_high;  /* NAN Cluster TSFs */
+	uint32 tsf_low;
+} wl_nan_device_state_t;
+
+/*
+ * WL_NAN_CMD_CFG_HOP_CNT, WL_NAN_CMD_CFG_HOP_LIMIT
+ */
+typedef uint8 wl_nan_hop_count_t;
+
+/*
+ * WL_NAN_CMD_CFG_WARMUP_TIME
+ */
+typedef uint32 wl_nan_warmup_time_ticks_t;
+
+/*
+ * WL_NAN_CMD_CFG_RSSI_THRESHOLD
+ * rssi_close and rssi_mid are used to transition master to non-master
+ * role by NAN state machine. rssi thresholds corresponding to the band
+ * will be updated.
+ */
+/* To be deprecated */
+typedef struct wl_nan_rssi_threshold {
+	wl_nan_band_t band;
+	int8 rssi_close;
+	int8 rssi_mid;
+	uint8 pad;
+} wl_nan_rssi_threshold_t;
+
+/* WL_NAN_CMD_ELECTION_RSSI_THRESHOLD */
+
+typedef struct wl_nan_rssi_thld {
+	int8 rssi_close_2g;
+	int8 rssi_mid_2g;
+	int8 rssi_close_5g;
+	int8 rssi_mid_5g;
+} wl_nan_rssi_thld_t;
+
+/* WL_NAN_CMD_DATA_MAX_PEERS */
+
+typedef uint8 wl_nan_max_peers_t;
+
+#define NAN_MAX_BANDS 2
+/*
+ * WL_NAN_CMD_CFG_STATUS
+ */
+/* Deprecated - Begin */
+typedef struct wl_nan_cfg_status {
+	uint8 enabled;
+	uint8 inited;
+	uint8 joined;
+	uint8 merged;
+	uint8 role;
+	uint8 PAD[3];
+	uint32 chspec[2];
+	uint8 mr[8];                    /**< Master Rank */
+	uint8 amr[8];			/**< Anchor Master Rank */
+	uint32 cnt_pend_txfrm;		/**< pending TX frames */
+	uint32 cnt_bcn_tx;		/**< TX disc/sync beacon count */
+	uint32 cnt_bcn_rx;		/**< RX disc/sync beacon count */
+	uint32 cnt_svc_disc_tx;		/**< TX svc disc frame count */
+	uint32 cnt_svc_disc_rx;		/**< RX svc disc frame count */
+	uint32 ambtt;                   /**< Anchor master beacon target time */
+	struct ether_addr cid;          /**< Cluster id */
+	uint8 hop_count;                /**< Hop count */
+	uint8 PAD;
+} wl_nan_cfg_status_t;
+
+typedef struct wl_nan_config_status {
+	struct ether_addr	def_cid;	/* Default Cluster id */
+	uint8			inited;		/* NAN Initialized successfully */
+	uint8			enabled;	/* NAN Enabled */
+	struct ether_addr	cur_cid;	/* Default Cluster id */
+	uint8			joined;		/* Joined or started own cluster */
+	uint8			role;		/* Master, Non Master, NM Sync & Non-Sync */
+	chanspec_t		chspec[NAN_MAX_BANDS]; /* Channel Spec 2.4G followed by 5G */
+	uint8			mr[WL_NAN_MASTER_RANK_LEN]; /* Master Rank */
+	uint8			amr[WL_NAN_MASTER_RANK_LEN]; /* Anchor Master Rank */
+	uint32			cnt_pend_txfrm;	/* Pending Tx Frames */
+	uint32			cnt_bcn_tx;	/* TX disc/sync beacon count */
+	uint32			cnt_bcn_rx;	/* RX disc/sync beacon count */
+	uint32			cnt_svc_disc_tx; /* TX svc disc frame count */
+	uint32			cnt_svc_disc_rx; /* RX svc disc frame count */
+	uint32			ambtt;	/* Anchor master beacon target time */
+	uint8			hop_count; /* Hop count */
+	uint8			pad[3];	/* Align */
+} wl_nan_config_status_t;
+/* Deprecated - End */
+
+typedef enum wl_nan_election_mode {
+	WL_NAN_ELECTION_RUN_BY_HOST = 1,
+	WL_NAN_ELECTION_RUN_BY_FW = 2
+} wl_nan_election_mode_t;
+
+typedef struct wl_nan_conf_status {
+	struct ether_addr	nmi;		/*  NAN mgmt interface address */
+	uint8			enabled;	/* NAN is enabled */
+	uint8			role;		/* Current nan sync role */
+	struct ether_addr	cid;		/*  Current Cluster id */
+	uint8			social_chans[2];	/* Social channels */
+	uint8			mr[8];		/* Master Rank */
+	uint8			amr[8];		/* Anchor Master Rank */
+	uint32			ambtt;		/* Anchor master beacon target time */
+	uint32			cluster_tsf_h;	/* Current Cluster TSF High */
+	uint32			cluster_tsf_l;	/* Current Cluster TSF Low */
+	uint8			election_mode; /* Election mode, host or firmware */
+	uint8			hop_count;	/* Current Hop count */
+	uint8			pad[2];
+} wl_nan_conf_status_t;
+
+/*
+ * WL_NAN_CMD_CFG_OUI
+ */
+typedef struct wl_nan_oui_type {
+	uint8 nan_oui[DOT11_OUI_LEN];
+	uint8 type;
+} wl_nan_oui_type_t;
+
+/*
+ * WL_NAN_CMD_CFG_COUNT
+ */
+typedef struct wl_nan_count {
+	uint32 cnt_bcn_tx;		/**< TX disc/sync beacon count */
+	uint32 cnt_bcn_rx;		/**< RX disc/sync beacon count */
+	uint32 cnt_svc_disc_tx;		/**< TX svc disc frame count */
+	uint32 cnt_svc_disc_rx;		/**< RX svc disc frame count */
+} wl_nan_count_t;
+/*
+ * Election component WL_NAN_CMD_ELECTION_XXXX sub-commands
+ * WL_NAN_CMD_ELECTION_HOST_ENABLE
+ */
+enum wl_nan_enable_flags {
+	WL_NAN_DISABLE_FLAG_HOST_ELECTION = 0,
+	WL_NAN_ENABLE_FLAG_HOST_ELECTION = 1
+};
+
+/*
+ * 0 - disable host based election
+ * 1 - enable host based election
+ */
+typedef uint8 wl_nan_host_enable_t;
+
+/*
+ * WL_NAN_CMD_ELECTION_METRICS_CONFIG
+ */
+/* Set only */
+typedef struct wl_nan_election_metric_config {
+	uint8 random_factor; /* Configured random factor */
+	uint8 master_pref;	/* configured master preference */
+	uint8 pad[2];
+} wl_nan_election_metric_config_t;
+
+/*
+ * WL_NAN_CMD_ELECTION_METRICS_STATE
+ */
+/* Get only */
+typedef struct wl_nan_election_metric_state {
+	uint8 random_factor; /* random factor used in MIs */
+	uint8 master_pref;	 /* Master advertised in MIs */
+	uint8 pad[2];
+} wl_nan_election_metric_state_t;
+
+/*
+ * WL_NAN_CMD_ELECTION_LEAVE
+ * WL_NAN_CMD_ELECTION_STOP
+ */
+typedef struct ether_addr wl_nan_cluster_id_t;
+
+/*
+ * WL_NAN_CMD_ELECTION_JOIN
+ */
+typedef struct wl_nan_join {
+	uint8 start_cluster;	/* Start a cluster */
+	uint8 pad[3];
+	wl_nan_cluster_id_t cluster_id;	/* Cluster ID to join */
+} wl_nan_join_t;
+
+/*
+ * WL_NAN_CMD_ELECTION_MERGE
+ * 0 - disable cluster merge
+ * 1 - enable cluster merge
+ */
+typedef uint8 wl_nan_merge_enable_t;
+
+/*
+ * WL_NAN_CMD_CFG_ROLE
+ * role = 0 means configuration by firmware; otherwise by host
+ * when host configures role, also need target master address to sync to
+ */
+#define NAN_SYNC_MASTER_SELF	0
+#define NAN_SYNC_MASTER_AM		1
+#define NAN_SYNC_MASTER_INTERMEDIATE	2
+/* 	ltsf_h, ltsf_l:
+	The local TSF timestamp filled in by FW in the WL_NAN_EVENT_BCN_RX event;
+	rtsf_h, rtsf_l:
+	The timestamp in the Rx beacon frame, filled in by host
+	uint32 ambtt:
+	the amtt in the cluster ID attribute in the Rx beacon frame
+*/
+typedef struct nan_sync_master {
+	uint8 flag;	/* 0: self, 1: anchor-master, 2: intermediate master */
+	uint8  hop_count;
+	struct ether_addr addr;
+	struct ether_addr cluster_id;
+	chanspec_t channel; /* bcn reception channel */
+	uint32 ltsf_h;
+	uint32 ltsf_l;
+	uint32 rtsf_h;
+	uint32 rtsf_l;
+	uint8 amr[WL_NAN_MASTER_RANK_LEN];
+	uint32 ambtt;
+} nan_sync_master_t;
+
+/* NAN advertiser structure */
+/* TODO RSDB: add chspec to indicates core corresponds correct core */
+typedef struct nan_adv_entry {
+	uint8 age;	/* used to remove stale entries */
+	uint8 hop_count;
+	struct ether_addr addr;
+	struct ether_addr cluster_id;
+	chanspec_t channel; /* bcn reception channel */
+	uint32 ltsf_h;
+	uint32 ltsf_l;
+	uint32 rtsf_h;
+	uint32 rtsf_l;
+	uint8 amr[WL_NAN_MASTER_RANK_LEN];
+	uint32 ambtt;
+	int8	rssi[NAN_MAX_BANDS];		/* rssi last af was received at */
+	int8	last_rssi[NAN_MAX_BANDS];	/* rssi in the last AF */
+} nan_adv_entry_t;
+
+typedef struct nan_adv_table {
+	uint8  num_adv;
+	uint8	adv_size;
+	uint8	pad[2];
+	nan_adv_entry_t adv_nodes[0];
+} nan_adv_table_t;
+
+typedef struct wl_nan_role_cfg {
+	wl_nan_role_t cfg_role;
+	wl_nan_role_t cur_role;
+	uint8 pad[2];
+	nan_sync_master_t target_master;
+} wl_nan_role_cfg_t;
+
+typedef struct wl_nan_role_config {
+	wl_nan_role_t role;
+	struct ether_addr target_master;
+	uint8 pad;
+} wl_nan_role_config_t;
+
+typedef int8 wl_nan_sd_optional_field_types_t;
+
+/* Flag bits for Publish and Subscribe (wl_nan_sd_params_t flags) */
+
+#define WL_NAN_RANGE_LIMITED		0x0040
+
+/* Event generation indicator (default is continuous) */
+
+#define WL_NAN_MATCH_ONCE		0x100000
+#define WL_NAN_MATCH_NEVER		0x200000
+
+/* Bits specific to Publish */
+
+#define WL_NAN_PUB_UNSOLICIT	0x1000	/* Unsolicited Tx */
+#define WL_NAN_PUB_SOLICIT		0x2000	/* Solicited Tx */
+#define WL_NAN_PUB_BOTH			0x3000	/* Both the above */
+
+#define WL_NAN_PUB_BCAST		0x4000	/* bcast solicited Tx only */
+#define WL_NAN_PUB_EVENT		0x8000	/* Event on each solicited Tx */
+#define WL_NAN_PUB_SOLICIT_PENDING	0x10000 /* Used for one-time solicited Publish */
+
+#define WL_NAN_FOLLOWUP			0x20000 /* Follow-up frames */
+
+/* Bits specific to Subscribe */
+
+#define WL_NAN_SUB_ACTIVE		0x1000 /* Active subscribe mode */
+#define WL_NAN_SUB_MATCH_IF_SVC_INFO	0x2000 /* Service info in publish */
+
+#define WL_NAN_TTL_UNTIL_CANCEL	0xFFFFFFFF /* Special values for time to live (ttl) parameter */
+
+/*
+ * Publish -  runs until first transmission
+ * Subscribe - runs until first  DiscoveryResult event
+ */
+#define WL_NAN_TTL_FIRST	0
+
+/*
+ * WL_NAN_CMD_SD_PARAMS
+ */
+typedef struct wl_nan_sd_params
+{
+	uint16	length; /* length including options */
+	uint8	period; /* period of the unsolicited SDF xmission in DWs */
+	uint8   pad;
+	uint8	svc_hash[WL_NAN_SVC_HASH_LEN]; /* Hash for the service name */
+	uint8	instance_id; /* Instance of the current service */
+	int8	proximity_rssi; /* RSSI limit to Rx subscribe or pub SDF 0 no effect */
+	uint32	flags; /* bitmap representing aforesaid optional flags */
+	int32	ttl; /* TTL for this instance id, -1 will run till cancelled */
+	tlv_t	optional[1]; /* optional fields in the SDF  as appropriate */
+} wl_nan_sd_params_t;
+
+/*
+ * WL_NAN_CMD_SD_PUBLISH_LIST
+ * WL_NAN_CMD_SD_SUBSCRIBE_LIST
+ */
+typedef struct wl_nan_service_info
+{
+	uint8 instance_id;	/* Publish instance ID */
+	uint8 service_hash[WL_NAN_SVC_HASH_LEN]; /* Hash for service name */
+} wl_nan_service_info_t;
+
+typedef struct wl_nan_service_list
+{
+	uint16 id_count; /* Number of registered publish/subscribe services */
+	wl_nan_service_info_t list[1]; /* service info defined by nan_service instance */
+} wl_nan_service_list_t;
+
+/*
+ * WL_NAN_CMD_CFG_BCN_INTERVAL
+ */
+typedef uint16 wl_nan_disc_bcn_interval_t;
+
+/*
+ * WL_NAN_CMD_CFG_SDF_TXTIME
+ */
+typedef uint16 wl_nan_svc_disc_txtime_t;
+
+/*
+ * WL_NAN_CMD_CFG_STOP_BCN_TX
+ */
+typedef uint16 wl_nan_stop_bcn_tx_t;
+
+/*
+ * WL_NAN_CMD_CFG_SID_BEACON
+ */
+typedef struct wl_nan_sid_beacon_control {
+	uint8 sid_enable;	/* Flag to indicate the inclusion of Service IDs in Beacons */
+	uint8 sid_count;	/* Limit for number of SIDs to be included in Beacons */
+	uint8 pad[2];
+} wl_nan_sid_beacon_control_t;
+
+/*
+ * WL_NAN_CMD_CFG_DW_LEN
+ */
+typedef uint16 wl_nan_dw_len_t;
+
+/*
+ * WL_NAN_CMD_CFG_AWAKE_DW   Will be deprecated.
+ */
+typedef struct wl_nan_awake_dw {
+	wl_nan_band_t band;	/* 0 - b mode 1- a mode */
+	uint8 interval;		/* 1 or 2 or 4 or 8 or 16 */
+	uint16 pad;
+} wl_nan_awake_dw_t;
+
+/*
+ * WL_NAN_CMD_CFG_AWAKE_DWS
+ */
+typedef struct wl_nan_awake_dws {
+	uint8 dw_interval_2g;	/* 2G DW interval */
+	uint8 dw_interval_5g;	/* 5G DW interval */
+	uint16 pad;
+} wl_nan_awake_dws_t;
+
+/* WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD */
+
+typedef struct wl_nan_rssi_notif_thld {
+	int8 bcn_rssi_2g;
+	int8 bcn_rssi_5g;
+	int16 pad;
+} wl_nan_rssi_notif_thld_t;
+
+/*
+ * WL_NAN_CMD_CFG_SOCIAL_CHAN
+ */
+typedef struct wl_nan_social_channels {
+	uint8 soc_chan_2g;	/* 2G social channel */
+	uint8 soc_chan_5g;	/* 5G social channel */
+	uint16 pad;
+} wl_nan_social_channels_t;
+
+/*
+ * WL_NAN_CMD_SD_CANCEL_PUBLISH
+ * WL_NAN_CMD_SD_CANCEL_SUBSCRIBE
+ */
+typedef uint8 wl_nan_instance_id; /* Instance ID of an active publish instance */
+
+/*
+ * WL_NAN_CMD_SD_VND_INFO
+ */
+typedef struct wl_nan_sd_vendor_info
+{
+	uint16 length; /* Size in bytes of the payload following this field */
+	uint8 data[];	/* Vendor Information */
+} wl_nan_sd_vendor_info_t;
+
+/*
+ * WL_NAN_CMD_SD_STATS
+ */
+typedef struct wl_nan_sd_stats {
+	uint32  sdftx;
+	uint32  sdfrx;
+	uint32  sdsrffail;
+	uint32  sdrejrssi;
+	uint32  sdfollowuprx;
+	uint32  sdsubmatch;
+	uint32  sdpubreplied;
+	uint32  sdmftfail1;
+	uint32  sdmftfail2;
+	uint32  sdmftfail3;
+	uint32  sdmftfail4;
+}  wl_nan_sd_stats_t;
+
+/*
+ * WL_NAN_CMD_SD_TRANSMIT
+ * WL_NAN_CMD_SD_FUP_TRANSMIT
+ */
+typedef struct wl_nan_sd_transmit {
+	uint8 local_service_id; /* Sender Service ID */
+	uint8 requestor_service_id; /* Destination Service ID */
+	struct ether_addr destination_addr; /* Destination MAC */
+	uint16 token; /* follow_up_token when a follow-up msg is queued successfully */
+	uint8 priority; /* requested relative prio */
+	uint8 service_info_len; /* size in bytes of the service info payload */
+	uint8 service_info[]; /* Service Info payload */
+} wl_nan_sd_transmit_t;
+
+/*
+ * WL_NAN_CMD_SYNC_TSRESERVE
+ */
+/** time slot */
+#define NAN_MAX_TIMESLOT	32
+typedef struct wl_nan_timeslot {
+	uint32	abitmap; /**< available bitmap */
+	uint32 chanlist[NAN_MAX_TIMESLOT];
+} wl_nan_timeslot_t;
+
+/*
+ * Deprecated
+ *
+ * WL_NAN_CMD_SYNC_TSRELEASE
+ */
+typedef uint32 wl_nan_ts_bitmap_t;
+
+/* nan passive scan params */
+#define NAN_SCAN_MAX_CHCNT 8
+typedef struct wl_nan_scan_params {
+	uint16 scan_time;
+	uint16 home_time;
+	uint16 ms_intvl; /**< interval between merge scan */
+	uint16 ms_dur;  /**< duration of merge scan */
+	uint16 chspec_num;
+	uint8 pad[2];
+	chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /**< act. used 3, 5 rfu */
+} wl_nan_scan_params_t;
+
+/*
+ * WL_NAN_CMD_DBG_SCAN
+ */
+typedef struct wl_nan_dbg_scan {
+	struct ether_addr cid;
+	uint8 pad[2];
+} wl_nan_dbg_scan_t;
+
+/* NAN_DBG_LEVEL */
+typedef struct wl_nan_dbg_level {
+	uint32 nan_err_level; /* for Error levels */
+	uint32 nan_dbg_level; /* for bebug logs and trace */
+	uint32 nan_info_level; /* for dumps like prhex */
+} wl_nan_dbg_level_t;
+
+/*
+ * WL_NAN_CMD_DBG_EVENT_MASK
+ */
+typedef uint32 wl_nan_event_mask_t;
+
+/*
+ * WL_NAN_CMD_DBG_EVENT_CHECK
+ */
+typedef uint8 wl_nan_dbg_ifname[BCM_MSG_IFNAME_MAX];
+
+/*
+ * WL_NAN_CMD_DBG_DUMP
+ * WL_NAN_CMD_DBG_CLEAR
+ */
+enum wl_nan_dbg_dump_type {
+	WL_NAN_DBG_DT_RSSI_DATA = 1,
+	WL_NAN_DBG_DT_STATS_DATA = 2,
+	/*
+	 * Additional enums before this line
+	 */
+	WL_NAN_DBG_DT_INVALID
+};
+typedef int8 wl_nan_dbg_dump_type_t;
+
+/** various params and ctl swithce for nan_debug instance  */
+/*
+ * WL_NAN_CMD_DBG_DEBUG
+ */
+typedef struct wl_nan_debug_params {
+	uint16	cmd;	/**< debug cmd to perform a debug action */
+	uint16	status;
+	uint32	msglevel; /**< msg level if enabled */
+	uint8	enabled; /**< runtime debuging enabled */
+	uint8 collect;
+	uint8 PAD[2];
+} wl_nan_debug_params_t;
+
+
+typedef struct wl_nan_sched_svc_timeslot_s {
+	uint32 abitmap; /* availability bitmap */
+	uint32 chanlist[NAN_MAX_TIMESLOT];
+	uint8  res; /* resolution: 0 = 16ms, 1 = 32ms, 2 = 64ms 3 = reserved. REfer NAN spec */
+	uint8  mapid; /* mapid from NAN spec. Used to differentiate 2G Vs 5G band */
+	uint8 PAD[2];
+} wl_nan_sched_svc_timeslot_t;
+
+
+/* nan passive scan params */
+#define NAN_SCAN_MAX_CHCNT 8
+typedef struct nan_scan_params {
+	uint16 scan_time;
+	uint16 home_time;
+	uint16 ms_intvl; /**< interval between merge scan */
+	uint16 ms_dur;  /**< duration of merge scan */
+	uint16 chspec_num;
+	uint8 pad[2];
+	chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /**< act. used 3, 5 rfu */
+} nan_scan_params_t;
+
+/* nan cmd IDs */
+enum wl_nan_cmds {
+	 /* nan cfg /disc & dbg ioctls */
+	WL_NAN_CMD_ENABLE = 1,
+	WL_NAN_CMD_ATTR = 2,
+	WL_NAN_CMD_NAN_JOIN = 3,
+	WL_NAN_CMD_LEAVE = 4,
+	WL_NAN_CMD_MERGE = 5,
+	WL_NAN_CMD_STATUS = 6,
+	WL_NAN_CMD_TSRESERVE = 7,
+	WL_NAN_CMD_TSSCHEDULE = 8,
+	WL_NAN_CMD_TSRELEASE = 9,
+	WL_NAN_CMD_OUI = 10,
+	WL_NAN_CMD_OOB_AF = 11,
+
+	WL_NAN_CMD_COUNT = 15,
+	WL_NAN_CMD_CLEARCOUNT = 16,
+
+	/*  discovery engine commands */
+	WL_NAN_CMD_PUBLISH = 20,
+	WL_NAN_CMD_SUBSCRIBE = 21,
+	WL_NAN_CMD_CANCEL_PUBLISH = 22,
+	WL_NAN_CMD_CANCEL_SUBSCRIBE = 23,
+	WL_NAN_CMD_TRANSMIT = 24,
+	WL_NAN_CMD_CONNECTION = 25,
+	WL_NAN_CMD_SHOW = 26,
+	WL_NAN_CMD_STOP = 27,	/* stop nan for a given cluster ID  */
+	/*  nan debug iovars & cmds  */
+	WL_NAN_CMD_SCAN_PARAMS = 46,
+	WL_NAN_CMD_SCAN = 47,
+	WL_NAN_CMD_SCAN_RESULTS = 48,
+	WL_NAN_CMD_EVENT_MASK = 49,
+	WL_NAN_CMD_EVENT_CHECK = 50,
+	WL_NAN_CMD_DUMP = 51,
+	WL_NAN_CMD_CLEAR = 52,
+	WL_NAN_CMD_RSSI = 53,
+
+	WL_NAN_CMD_DEBUG = 60,
+	WL_NAN_CMD_TEST1 = 61,
+	WL_NAN_CMD_TEST2 = 62,
+	WL_NAN_CMD_TEST3 = 63,
+	WL_NAN_CMD_DISC_RESULTS = 64,
+	/* nan 2.0 data path commands */
+	WL_NAN_CMD_DATAPATH = 65
+};
+
+/*   NAN DP interface commands  */
+enum wl_nan_dp_cmds {
+	/* nan 2.0 ioctls */
+	WL_NAN_CMD_DP_CAP = 1000,
+	WL_NAN_CMD_DP_CONFIG = 1001,
+	WL_NAN_CMD_DP_CREATE = 1002,
+	WL_NAN_CMD_DP_AUTO_CONNECT = 1003,
+	WL_NAN_CMD_DP_DATA_REQ = 1004,
+	WL_NAN_CMD_DP_DATA_RESP = 1005,
+	WL_NAN_CMD_DP_SCHED_UPD = 1006,
+	WL_NAN_CMD_DP_END = 1007,
+	WL_NAN_CMD_DP_CONNECT = 1008,
+	WL_NAN_CMD_DP_STATUS = 1009
+};
+
+/* TODO Should remove this fixed length */
+#define WL_NAN_DATA_SVC_SPEC_INFO_LEN 32 /* arbitrary */
+#define WL_NAN_DP_MAX_SVC_INFO	      0xFF
+#define WL_NAN_DATA_NDP_INST_SUPPORT 16
+
+/* Nan flags */
+#define WL_NAN_DP_FLAG_SVC_INFO	      (1 << 0)
+#define WL_NAN_DP_FLAG_CONFIRM	      (1 << 1)
+#define WL_NAN_DP_FLAG_EXPLICIT_CFM   (1 << 2)
+#define WL_NAN_DP_FLAG_SECURITY	      (1 << 3)
+
+/* NAN Datapath host status */
+#define WL_NAN_DP_STATUS_ACCEPTED     1
+#define WL_NAN_DP_STATUS_REJECTED     0
+
+/* to be done */
+typedef struct wl_nan_dp_cap {
+	uint8 tbd;
+} wl_nan_dp_cap_t;
+
+
+/** The service hash (service id) is exactly this many bytes. */
+#define WL_NAN_SVC_HASH_LEN	6
+/** Number of hash functions per bloom filter */
+#define WL_NAN_HASHES_PER_BLOOM 4
+/* no. of max last disc results */
+#define WL_NAN_MAX_DISC_RESULTS	3
+
+/* NAN security related defines */
+/* NCS-SK related */
+#define WL_NAN_NCS_SK_PMK_LEN	32
+#define WL_NAN_NCS_SK_PMKID_LEN	16
+
+/* recent discovery results */
+typedef struct wl_nan_disc_result_s
+{
+	wl_nan_instance_id_t instance_id;	/* instance id of pub/sub req */
+	wl_nan_instance_id_t peer_instance_id;	/* peer instance id of pub/sub req/resp */
+	uint8 svc_hash[WL_NAN_SVC_HASH_LEN];	/* service descp string */
+	struct ether_addr peer_mac;	/* peer mac address */
+} wl_nan_disc_result_t;
+
+/* list of recent discovery results */
+typedef struct wl_nan_disc_results_s
+{
+	wl_nan_disc_result_t disc_result[WL_NAN_MAX_DISC_RESULTS];
+} wl_nan_disc_results_list_t;
+
+/* nan 1.0 events */
+/* To be deprecated - will be replaced by event_disc_result */
+typedef struct wl_nan_ev_disc_result {
+	wl_nan_instance_id_t pub_id;
+	wl_nan_instance_id_t sub_id;
+	struct ether_addr pub_mac;
+	uint8 opt_tlvs[0];
+} wl_nan_ev_disc_result_t;
+
+typedef struct wl_nan_event_disc_result {
+	wl_nan_instance_id_t pub_id;
+	wl_nan_instance_id_t sub_id;
+	struct ether_addr pub_mac;
+	int8		publish_rssi;		/* publisher RSSI */
+	uint8		attr_num;
+	uint16		attr_list_len;	/* length of the all the attributes in the SDF */
+	uint8		attr_list[0];	/* list of NAN attributes */
+} wl_nan_event_disc_result_t;
+
+typedef struct wl_nan_ev_p2p_avail {
+	struct ether_addr sender;
+	struct ether_addr p2p_dev_addr;
+	uint8 dev_role;
+	uint8 resolution;
+	uint8 repeat;
+	uint8 pad[3];
+	chanspec_t chanspec;
+	uint32 avail_bmap;
+} wl_nan_ev_p2p_avail_t;
+
+/*
+* discovery interface event structures *
+*/
+
+/* mandatory parameters for OOB action frame */
+/* single-shot when bitmap and offset are set to 0; periodic otherwise */
+typedef struct wl_nan_oob_af_params_s
+{
+	/* bitmap for the 32 timeslots in 512TU dw interval */
+	uint32 ts_map;
+	/* offset from start of dw, in us */
+	uint32 tx_offset;
+	struct ether_addr bssid;
+	struct ether_addr dest;
+	uint32 pkt_lifetime;
+	uint16 payload_len;
+	uint8 payload[1];
+} wl_nan_oob_af_params_t;
+
+/* NAN Ranging */
+
+/* Bit defines for global flags */
+#define WL_NAN_RANGING_ENABLE		1 /**< enable RTT */
+#define WL_NAN_RANGING_RANGED		2 /**< Report to host if ranged as target */
+typedef struct nan_ranging_config {
+	uint32 chanspec;		/**< Ranging chanspec */
+	uint16 timeslot;		/**< NAN RTT start time slot  1-511 */
+	uint16 duration;		/**< NAN RTT duration in ms */
+	struct ether_addr allow_mac;	/**< peer initiated ranging: the allowed peer mac
+					 * address, a unicast (for one peer) or
+					 * a broadcast for all. Setting it to all zeros
+					 * means responding to none,same as not setting
+					 * the flag bit NAN_RANGING_RESPOND
+					 */
+	uint16 flags;
+} wl_nan_ranging_config_t;
+
+/** list of peers for self initiated ranging */
+/** Bit defines for per peer flags */
+#define WL_NAN_RANGING_REPORT (1<<0)	/**< Enable reporting range to target */
+typedef struct nan_ranging_peer {
+	uint32 chanspec;		/**< desired chanspec for this peer */
+	uint32 abitmap;			/**< available bitmap */
+	struct ether_addr ea;		/**< peer MAC address */
+	uint8 frmcnt;			/**< frame count */
+	uint8 retrycnt;			/**< retry count */
+	uint16 flags;			/**< per peer flags, report or not */
+	uint16 PAD;
+} wl_nan_ranging_peer_t;
+typedef struct nan_ranging_list {
+	uint8 count;			/**< number of MAC addresses */
+	uint8 num_peers_done;		/**< host set to 0, when read, shows number of peers
+					 * completed, success or fail
+					 */
+	uint8 num_dws;			/**< time period to do the ranging, specified in dws */
+	uint8 reserve;			/**< reserved field */
+	wl_nan_ranging_peer_t rp[1];	/**< variable length array of peers */
+} wl_nan_ranging_list_t;
+
+/* ranging results, a list for self initiated ranging and one for peer initiated ranging */
+/* There will be one structure for each peer */
+#define WL_NAN_RANGING_STATUS_SUCCESS		1
+#define WL_NAN_RANGING_STATUS_FAIL		2
+#define WL_NAN_RANGING_STATUS_TIMEOUT		3
+#define WL_NAN_RANGING_STATUS_ABORT		4 /**< with partial results if sounding count > 0 */
+typedef struct nan_ranging_result {
+	uint8 status;			/**< 1: Success, 2: Fail 3: Timeout 4: Aborted */
+	uint8 sounding_count;		/**< number of measurements completed (0 = failure) */
+	struct ether_addr ea;		/**< initiator MAC address */
+	uint32 chanspec;		/**< Chanspec where the ranging was done */
+	uint32 timestamp;		/**< 32bits of the TSF timestamp ranging was completed at */
+	uint32 distance;		/**< mean distance in meters expressed as Q4 number.
+					 * Only valid when sounding_count > 0. Examples:
+					 * 0x08 = 0.5m
+					 * 0x10 = 1m
+					 * 0x18 = 1.5m
+					 * set to 0xffffffff to indicate invalid number
+					 */
+	int32 rtt_var;			/**< standard deviation in 10th of ns of RTTs measured.
+					 * Only valid when sounding_count > 0
+					 */
+	struct ether_addr tgtea;	/**< target MAC address */
+	uint8 PAD[2];
+} wl_nan_ranging_result_t;
+typedef struct nan_ranging_event_data {
+	uint8 mode;			/**< 1: Result of host initiated ranging */
+					/* 2: Result of peer initiated ranging */
+	uint8 reserved;
+	uint8 success_count;		/**< number of peers completed successfully */
+	uint8 count;			/**< number of peers in the list */
+	wl_nan_ranging_result_t rr[1];	/**< variable array of ranging peers */
+} wl_nan_ranging_event_data_t;
+
+enum {
+	WL_NAN_STATS_RSSI = 1,
+	WL_NAN_STATS_DATA = 2,
+	WL_NAN_STATS_DP = 3,
+/*
+ * ***** ADD before this line ****
+ */
+	WL_NAN_STATS_INVALID
+};
+typedef struct wl_nan_dp_stats {
+	uint32 tbd; /* TBD */
+} wl_nan_dp_stats_t;
+
+typedef struct wl_nan_stats {
+	/* general */
+	uint32 cnt_dw; /* DW slots */
+	uint32 cnt_disc_bcn_sch; /* disc beacon slots */
+	uint32 cnt_amr_exp; /* count of ambtt expiries resetting roles */
+	uint32 cnt_bcn_upd; /* count of beacon template updates */
+	uint32 cnt_bcn_tx; /* count of sync & disc bcn tx */
+	uint32 cnt_bcn_rx; /* count of sync & disc bcn rx */
+	uint32 cnt_sync_bcn_tx; /* count of sync bcn tx within DW */
+	uint32 cnt_disc_bcn_tx; /* count of disc bcn tx */
+	uint32 cnt_sdftx_bcmc; /* count of bcast/mcast sdf tx */
+	uint32 cnt_sdftx_uc; /* count of unicast sdf tx */
+	uint32 cnt_sdftx_fail; /* count of unicast sdf tx fails */
+	uint32 cnt_sdf_rx; /* count of  sdf rx */
+	/* NAN roles */
+	uint32 cnt_am; /* anchor master */
+	uint32 cnt_master; /* master */
+	uint32 cnt_nms; /* non master sync */
+	uint32 cnt_nmns; /* non master non sync */
+	/* TX */
+	uint32 cnt_err_txtime; /* txtime in sync bcn frame not a multiple of dw intv */
+	uint32 cnt_err_unsch_tx; /* tx while not in DW/ disc bcn slot */
+	uint32 cnt_err_bcn_tx; /*  beacon tx error */
+	uint32 cnt_sync_bcn_tx_miss; /* no. of times time delta between 2 cosequetive
+						* sync beacons is more than expected
+						*/
+	/* MSCH */
+	uint32 cnt_err_msch_reg; /* error is Dw/disc reg with msch */
+	uint32 cnt_err_wrong_ch_cb; /* count of msch calbacks in wrong channel */
+	uint32 cnt_dw_skip;	/* count of DW rejected */
+	uint32 cnt_disc_skip; /* count of disc bcn rejected */
+	uint32 cnt_dw_start_early; /* msch cb not at registered time */
+	uint32 cnt_dw_start_late; /* no. of delays in slot start */
+	/* SCANS */
+	uint32 cnt_mrg_scan; /* count of merge scans completed */
+	uint32 cnt_err_ms_rej; /* number of merge scan failed */
+	uint32 cnt_scan_results; /* no. of nan beacons scanned */
+	uint32 cnt_join_scan_rej; /* no. of join scans rejected */
+	uint32 cnt_nan_scan_abort; /* no. of join scans rejected */
+	/* enable/disable */
+	uint32 cnt_nan_enab; /* no. of times nan feature got enabled */
+	uint32 cnt_nan_disab; /* no. of times nan feature got disabled */
+	uint32 cnt_sync_bcn_rx; /* count of sync bcn rx within DW */
+} wl_nan_stats_t;
+
+#define WL_NAN_MAC_MAX_NAN_PEERS 6
+#define WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER  10
+
+typedef struct wl_nan_nbr_rssi {
+	uint8 rx_chan; /* channel number on which bcn rcvd */
+	uint8 PAD[3];
+	int32 rssi_raw;  /* received rssi value */
+	int32 rssi_avg;  /* normalized rssi value */
+} wl_nan_peer_rssi_t;
+
+typedef struct wl_nan_peer_rssi_entry {
+	struct ether_addr mac;  /* peer mac address */
+	uint8 flags;   /* TODO:rssi data order: latest first, oldest first etc */
+	uint8 rssi_cnt;   /* rssi data sample present */
+	wl_nan_peer_rssi_t rssi[WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER]; /* RSSI data frm peer */
+} wl_nan_peer_rssi_entry_t;
+
+#define WL_NAN_PEER_RSSI      0x1
+#define WL_NAN_PEER_RSSI_LIST 0x2
+
+typedef struct wl_nan_nbr_rssi_data {
+	uint8 flags;   /* this is a list or single rssi data */
+	uint8 peer_cnt; /* number of peers */
+	uint16 pad; /* padding */
+	wl_nan_peer_rssi_entry_t peers[1]; /* peers data list */
+} wl_nan_peer_rssi_data_t;
+
+/* WL_NAN_CMD_DBG_DUMP, GET Resp */
+typedef struct wl_nan_dbg_dump_rsp {
+	wl_nan_dbg_dump_type_t dump_type; /* dump data type */
+	uint8 pad[3];
+	union {
+		wl_nan_peer_rssi_data_t peer_rssi;
+		wl_nan_stats_t		nan_stats;
+	} u;
+} wl_nan_dbg_dump_rsp_t;
+
+enum nan_termination_status {
+	NAN_TERM_REASON_INVALID = 1,
+	NAN_TERM_REASON_TIMEOUT = 2,
+	NAN_TERM_REASON_USER_REQ = 3,
+	NAN_TERM_REASON_FAILURE = 4,
+	NAN_TERM_REASON_COUNT_REACHED = 5,
+	NAN_TERM_REASON_DE_SHUTDOWN = 6,
+	NAN_TERM_REASON_DISABLE_IN_PROGRESS = 7
+};
+
+/* nan2 data iovar */
+/* nan2 qos */
+typedef struct wl_nan_dp_qos
+{
+	uint8 tid;
+	uint8 pad;
+	uint16 pkt_size;
+	uint16 mean_rate;
+	uint16 svc_interval;
+} wl_nan_dp_qos_t;
+/* ndp config */
+typedef struct wl_nan_ndp_config
+{
+	uint8 ndp_id;
+	uint8 pub_id;
+	struct ether_addr pub_addr;
+	struct ether_addr data_addr;	/* configure local data addr */
+	struct ether_addr init_data_addr;	/* initiator data addr */
+	uint8 svc_spec_info[WL_NAN_DATA_SVC_SPEC_INFO_LEN];
+	wl_nan_dp_qos_t qos;
+	uint16 avail_len;
+	uint8 pad[3];
+	uint8 data[1];
+} wl_nan_ndp_config_t;
+
+/* nan2 device capabilities */
+typedef struct wl_nan_ndp_oper_cfg {
+	uint8 awake_dw_2g;
+	uint8 awake_dw_5g;
+	uint8 bands_supported;
+	uint8 op_mode;
+} wl_nan_ndp_oper_cfg_t;
+
+typedef uint8 wl_nan_ndp_ndpid_t;
+typedef uint8 wl_nan_ndp_conn_t;
+
+typedef struct wl_nan_dp_req {
+	uint8 type;		    /* 0- unicast 1 - multicast */
+	uint8 pub_id;		    /* Publisher ID */
+	uint16 flags;
+	struct ether_addr peer_mac; /* Peer's NMI addr */
+	struct ether_addr mcast_mac; /* Multicast addr */
+	wl_nan_dp_qos_t qos;
+	uint8 tlv_params[];	/* xtlv parameters for command */
+} wl_nan_dp_req_t;
+
+/* TODO  Need to replace ndp_id with lndp_id */
+/* Return structure to data req IOVAR */
+typedef struct wl_nan_dp_req_ret {
+	struct ether_addr indi;	    /* Initiators data mac addr */
+	uint8 ndp_id;		    /* Initiators ndpid */
+	uint8 pad;
+} wl_nan_dp_req_ret_t;
+
+typedef struct wl_nan_dp_resp {
+	uint8 type;		    /* 0- unicast 1 - multicast */
+	uint8 status;		    /* Accepted or Rejected */
+	uint8 reason_code;
+	/* Local NDP ID for unicast, mc_id for multicast, 0 for implicit NMSG */
+	uint8 ndp_id;
+	wl_nan_dp_qos_t qos;
+	/* Initiator data address for unicast or multicast address for multicast */
+	struct ether_addr mac_addr;
+	uint16 flags;
+	uint8 tlv_params[];	/* xtlv parameters for command */
+} wl_nan_dp_resp_t;
+
+/* Return structure to data resp IOVAR */
+typedef struct wl_nan_dp_resp_ret {
+	uint8 nmsgid;		    /* NMSG ID or for multicast else 0 */
+	uint8 pad[3];
+} wl_nan_dp_resp_ret_t;
+
+typedef struct wl_nan_dp_conf {
+	uint8 lndp_id;
+	uint8 status;		    /* Accepted or Rejected */
+	uint8 pad[2];
+} wl_nan_dp_conf_t;
+
+typedef struct wl_nan_dp_end
+{
+	uint8 lndp_id;
+	uint8 status;
+	uint8 pad[2];
+} wl_nan_dp_end_t;
+
+typedef struct wl_nan_dp_schedupd {
+	uint8 type;		/* 0: unicast, 1: multicast */
+	uint8 flags;
+	struct ether_addr addr;	/* peer NMI or multicast addr */
+	wl_nan_dp_qos_t qos;
+	uint8 map_id;
+	uint8 pad[3];
+} wl_nan_dp_schedupd_t;
+
+/* set: update with notification, unset: NDL setup handshake */
+#define WL_NAN_DP_SCHEDUPD_NOTIF (1 << 0)
+
+/* list ndp ids */
+typedef struct wl_nan_ndp_id_list {
+	uint16 ndp_count;
+	uint8 lndp_id[];
+} wl_nan_ndp_id_list_t;
+
+/* nan2 status */
+typedef struct ndp_session {
+	uint8 lndp_id;
+	uint8 state;
+	uint8 pub_id;
+	uint8 pad;
+} ndp_session_t;
+
+typedef struct wl_nan_ndp_status {
+	struct ether_addr peer_nmi;
+	struct ether_addr peer_ndi;
+	ndp_session_t session;
+	uint8 pad;
+} wl_nan_ndp_status_t;
+
+/* events */
+#define NAN_DP_SESSION_UNICAST         0
+#define NAN_DP_SESSION_MULTICAST       1
+#define NAN_DP_SECURITY_NONE           0
+#define NAN_DP_SECURITY_CSID           1
+#define NAN_DP_SECURITY_MK             2
+#define WL_NAN_DATA_NMSGID_LEN    8 /* 8 bytes as per nan spec */
+
+/* Common event structure for Nan Datapath
+ * Used for sending NDP Indication, Response, Confirmation, Securty Install and Establish events
+ */
+typedef struct wl_nan_ev_datapath_cmn {
+	uint8 type;
+	/* ndp_id is valid only if type is unicast */
+	uint8 ndp_id;
+	uint8 pub_id;
+	uint8 security;
+	/* Following two fields are valid only if type is unicast */
+	struct ether_addr initiator_ndi;
+	struct ether_addr responder_ndi;
+	struct ether_addr peer_nmi;
+	uint8 status;
+	uint8 role;
+	/* Following two fields are valid only if type is multicast */
+	uint8 nmsg_id[WL_NAN_DATA_NMSGID_LEN];
+	uint8 mc_id;
+	uint8 pad[1];
+	uint16 opt_tlv_len;
+	uint8 opt_tlvs[];
+} wl_nan_ev_datapath_cmn_t;
+
+typedef struct wl_nan_ev_datapath_end {
+	uint8 ndp_id;
+	uint8 status;
+	uint8 pad[2];
+	struct ether_addr peer_nmi;
+	struct ether_addr peer_ndi;
+} wl_nan_ev_datapath_end_t;
+
+/* NAN2.0 Ranging definitions */
+
+/* result indication bit map */
+#define NAN_RANGE_INDICATION_CONT		(1<<0)
+#define NAN_RANGE_INDICATION_INGRESS		(1<<1)
+#define NAN_RANGE_INIDICATION_EGRESS		(1<<2)
+
+/* responder flags */
+#define NAN_RANGE_FLAG_AUTO_ACCEPT	(1 << 0)
+#define NAN_RANGE_FLAG_RESULT_REQUIRED	(1 << 1)
+
+typedef struct wl_nan_range_req {
+	struct ether_addr peer;
+	uint8 publisher_id;
+	uint8 indication; /* bit map for result event */
+	uint32 resolution; /* default millimeters */
+	uint32 ingress; /* ingress limit in mm */
+	uint32 egress; /* egress limit in mm */
+	uint32 interval; /* max interval(in TU) b/w two ranging measurements */
+} wl_nan_range_req_t;
+
+#define NAN_RNG_REQ_IOV_LEN	24
+
+typedef uint8 wl_nan_range_id;
+
+typedef struct wl_nan_range_resp {
+	wl_nan_range_id range_id;
+	uint8 flags; /* auto response, range result required */
+	uint8 status; /* accept, reject */
+	uint8 indication; /* bit map for result event */
+	uint32 resolution; /* default millimeters */
+	uint32 ingress; /* ingress limit in mm */
+	uint32 egress; /* egress limit in mm */
+	uint32 interval; /* max interval(in TU) b/w two ranging measurements */
+} wl_nan_range_resp_t;
+
+#define NAN_RNG_RESP_IOV_LEN	20
+
+#define NAN_RNG_MAX_IOV_LEN	255
+
+typedef struct wl_nan_ev_rng_req_ind {
+	struct ether_addr peer_m_addr;
+	uint8 rng_id;
+	/* ftm parameters */
+	uint8 max_burst_dur;
+	uint8 min_ftm_delta;
+	uint8 max_num_ftm;
+	uint8 ftm_format_bw;
+	/* location info availability bit map */
+	uint8 lc_info_avail;
+	/* Last movement indication */
+	uint16 last_movement;
+	uint8 pad[2];
+} wl_nan_ev_rng_req_ind_t;
+
+#define NAN_RNG_REQ_IND_SIZE 14
+
+typedef struct wl_nan_ev_rng_rpt_ind {
+	uint32 dist_mm; /* in millimeter */
+	struct ether_addr peer_m_addr;
+	uint8 indication; /* indication definitions mentioned above */
+	uint8 pad;
+} wl_nan_ev_rng_rpt_ind_t;
+
+#define NAN_RNG_RPT_IND_SIZE 11
+
+typedef struct wl_nan_ev_rng_term_ind {
+	struct ether_addr peer_m_addr;
+	uint8 reason_code;
+	uint8 pad;
+} wl_nan_ev_rng_term_ind_t;
+
+#define NAN_RNG_TERM_IND_SIZE 7
+
+
+/* ********************* end of NAN section ******************************** */
+/* endif WL_NAN */
+
+#define P2P_NAN_IOC_BUFSZ  512 /* some sufficient ioc buff size */
+#define WL_P2P_NAN_IOCTL_VERSION    0x1
+
+/* container for p2p nan iovtls & events */
+typedef struct wl_p2p_nan_ioc {
+	uint16  version;    /* interface command or event version */
+	uint16  id;     /* p2p nan ioctl cmd  ID  */
+	uint16  len;        /* total length of data[]  */
+	uint16  pad;        /* padding */
+	uint8   data [];   /* var len payload of bcm_xtlv_t type */
+} wl_p2p_nan_ioc_t;
+
+/* p2p nan cmd IDs */
+enum wl_p2p_nan_cmds {
+	/* p2p nan cfg ioctls */
+	WL_P2P_NAN_CMD_ENABLE = 1,
+	WL_P2P_NAN_CMD_CONFIG = 2,
+	WL_P2P_NAN_CMD_DEL_CONFIG = 3,
+	WL_P2P_NAN_CMD_GET_INSTS = 4
+};
+
+#define WL_P2P_NAN_CONFIG_VERSION       1
+
+#define WL_P2P_NAN_DEVICE_P2P  0x0
+#define WL_P2P_NAN_DEVICE_GO   0x1
+#define WL_P2P_NAN_DEVICE_GC   0x2
+#define WL_P2P_NAN_DEVICE_INVAL   0xFF
+
+/* NAN P2P operation */
+typedef struct p2p_nan_config {
+	uint16 version;            /* wl_p2p_nan_config_t structure version */
+	uint16 len;                /* total length including version and variable IE */
+	uint32 flags;              /* 0x1 to NEW, 0x2 to ADD, 0x4 to DEL */
+	uint8  inst_id;            /* publisher/subscriber id */
+	uint8  inst_type;          /* publisher/subscriber */
+	uint8  dev_role;           /* P2P device role: 'P2P','GO' or 'GC' */
+	uint8  pad1;               /* padding */
+	uint8  resolution;         /* Availability bitmap resolution */
+	uint8  repeat;             /* Whether Availabilty repeat across DW */
+	uint16 ie_len;             /* variable ie len */
+	struct ether_addr dev_mac; /* P2P device addres */
+	uint16 pad2;               /* Padding */
+	uint32 avail_bmap;         /* availability interval bitmap */
+	uint32 chanspec;           /* Chanspec */
+	uint8  ie[];              /* hex ie data */
+} wl_p2p_nan_config_t;
+
+#define WL_P2P_NAN_SERVICE_LIST_VERSION 1
+typedef enum wl_nan_service_type {
+	WL_NAN_SVC_INST_PUBLISHER = 1,
+	WL_NAN_SVC_INST_SUBSCRIBER = 2
+} wl_nan_service_type_t;
+
+#define WL_P2P_NAN_CONFIG_NEW   0x1
+#define WL_P2P_NAN_CONFIG_ADD   0x2
+#define WL_P2P_NAN_CONFIG_DEL   0x4
+
+typedef struct wl_nan_svc_inst {
+	uint8  inst_id;      /* publisher/subscriber id */
+	uint8  inst_type;    /* publisher/subscriber */
+} wl_nan_svc_inst_t;
+
+typedef struct wl_nan_svc_inst_list {
+	uint16 version;           /* this structure version */
+	uint16 len;               /* total length including version and variable svc list */
+	uint16 count;             /* service instance count */
+	uint16 pad;               /* padding */
+	wl_nan_svc_inst_t svc[1]; /* service instance list */
+} wl_nan_svc_inst_list_t;
+
+#define NAN_POST_DISC_P2P_DATA_VER  1
+/* This structure will be used send peer p2p data with
+ * NAN discovery result
+ */
+typedef struct nan_post_disc_p2p_data {
+	uint8 ver;                 /* this structure version */
+	uint8 dev_role;            /* P2P Device role */
+	uint8 resolution;          /* Availability bitmap resolution */
+	uint8 repeat;              /* Whether Availabilty repeat across DW */
+	struct ether_addr dev_mac; /* P2P device addres */
+	uint16 pad1;               /* Padding */
+	uint32 chanspec;           /* Chanspec */
+	uint32 avl_bmp;				/* availability interval bitmap */
+} nan_post_disc_p2p_data_t;
+
+/* timeslot etc for NAN */
+enum {
+	WL_TMU_TU            = 0,
+	WL_TMU_SEC           = 1,
+	WL_TMU_MILLI_SEC     = 2,
+	WL_TMU_MICRO_SEC     = 3,
+	WL_TMU_NANO_SEC      = 4,
+	WL_TMU_PICO_SEC      = 5
+};
+typedef int16 wl_tmu_t;
+
+typedef struct {
+	uint32   intvl;               /* time interval */
+	wl_tmu_t tmu;                 /* time unit */
+	uint8    pad[2];              /* padding */
+} wl_time_interval_t;
+
+/* availabiloty slot flags */
+enum {
+	WL_AVAIL_SLOT_NONE		= 0x0000,
+	WL_AVAIL_SLOT_COM		= 0x0001,		/* committed */
+	WL_AVAIL_SLOT_POT		= 0x0002,		/* potential */
+	WL_AVAIL_SLOT_PROP		= 0x0004,	/* proposed -  note: not configurable */
+	WL_AVAIL_SLOT_PAGED		= 0x0008	/* P-NDL */
+	/* 0x0030 - resrved for NDC index */
+	/* 0x00c0 - resrved for usage preference */
+};
+typedef int16 wl_avail_slot_flags_t;
+
+#define WL_AVAIL_SLOT_NDC_MASK 0x0030 /* up to 4 NDCs */
+#define WL_AVAIL_SLOT_NDC_SHIFT 4
+#define WL_AVAIL_SLOT_NDC(_flags)  (((_flags) & WL_AVAIL_SLOT_NDC_MASK) \
+	 >> WL_AVAIL_SLOT_NDC_SHIFT)
+#define WL_AVAIL_SLOT_SET_NDC(_flags, _ndc_idx) (((_flags) & ~WL_AVAIL_SLOT_NDC_MASK) |\
+	((_ndc_idx) << WL_AVAIL_SLOT_NDC_SHIFT))
+
+#define WL_AVAIL_SLOT_UPREF_MASK 0x00c0 /* up to 4 usage preferences */
+#define WL_AVAIL_SLOT_UPREF_SHIFT 6
+#define WL_AVAIL_SLOT_UPREF(_flags)  (((_flags) & WL_AVAIL_SLOT_UPREF_MASK) \
+	 >> WL_AVAIL_SLOT_UPREF_SHIFT)
+#define WL_AVAIL_SLOT_SET_UPREF(_flags, _pref) (((_flags) & ~WL_AVAIL_SLOT_UPREF_MASK) |\
+	((_pref) << WL_AVAIL_SLOT_UPREF_SHIFT))
+
+typedef struct wl_avail_slot {
+	wl_avail_slot_flags_t flags;
+	uint16                PAD;
+	wl_time_interval_t    start;        /* from time ref */
+	wl_time_interval_t    duration;     /* from start */
+	uint32                chanspec;     /* channel spec */
+} wl_avail_slot_t;
+
+/* time reference */
+enum {
+	WL_TIME_REF_NONE        = 0,
+	WL_TIME_REF_DEV_TSF     = 1,
+	WL_TIME_REF_NAN_DW      = 2,
+	WL_TIME_REF_TBTT        = 3,
+	WL_TIME_REF_NAN_DW0     = 4
+};
+typedef int16 wl_time_ref_t;
+
+enum {
+	WL_AVAIL_NONE		= 0x0000,
+	WL_AVAIL_LOCAL		= 0x0001,
+	WL_AVAIL_PEER		= 0x0002,
+	WL_AVAIL_NDC		= 0x0003,
+	WL_AVAIL_IMMUTABLE	= 0x0004,
+	WL_AVAIL_RESPONSE	= 0x0005,
+	WL_AVAIL_COUNTER	= 0x0006,
+	WL_AVAIL_RANGING	= 0x0007,
+	WL_AVAIL_TYPE_MAX	= WL_AVAIL_RANGING	/* New ones before and update */
+};
+#define WL_AVAIL_TYPE_MASK	0x000F
+#define WL_AVAIL_FLAG_RAW_MODE	0x8000
+typedef int16 wl_avail_flags_t;
+
+/* availability entry flags */
+enum {
+	WL_AVAIL_ENTRY_NONE		= 0x0000,
+	WL_AVAIL_ENTRY_COM		= 0x0001,		/* committed */
+	WL_AVAIL_ENTRY_POT		= 0x0002,		/* potential */
+	WL_AVAIL_ENTRY_COND		= 0x0004,	/* conditional */
+	WL_AVAIL_ENTRY_PAGED		= 0x0008,	/* P-NDL */
+	WL_AVAIL_ENTRY_USAGE		= 0x0030,	/* usage preference */
+	WL_AVAIL_ENTRY_BIT_DUR		= 0x00c0,	/* bit duration */
+	WL_AVAIL_ENTRY_BAND_PRESENT	= 0x0100,	/* band present */
+	WL_AVAIL_ENTRY_CHAN_PRESENT	= 0x0200,	/* channel information present */
+	WL_AVAIL_ENTRY_CHAN_ENTRY_PRESENT	= 0x0400,	/* channel entry (opclass+bitmap) */
+};
+
+/* bit duration */
+enum {
+	WL_AVAIL_BIT_DUR_16	= 0,	/* 16TU */
+	WL_AVAIL_BIT_DUR_32	= 1,	/* 32TU */
+	WL_AVAIL_BIT_DUR_64	= 2,	/* 64TU */
+	WL_AVAIL_BIT_DUR_128	= 3,	/* 128TU */
+};
+
+/* period */
+enum {
+	WL_AVAIL_PERIOD_0	= 0,	/* 0TU */
+	WL_AVAIL_PERIOD_128	= 1,	/* 128TU */
+	WL_AVAIL_PERIOD_256	= 2,	/* 256TU */
+	WL_AVAIL_PERIOD_512	= 3,	/* 512TU */
+	WL_AVAIL_PERIOD_1024	= 4,	/* 1024TU */
+	WL_AVAIL_PERIOD_2048	= 5,	/* 2048TU */
+	WL_AVAIL_PERIOD_4096	= 6,	/* 4096TU */
+	WL_AVAIL_PERIOD_8192	= 7,	/* 8192TU */
+};
+
+/* band */
+enum {
+	WL_AVAIL_BAND_NONE	= 0,	/* reserved */
+	WL_AVAIL_BAND_SUB1G	= 1,	/* sub-1 GHz */
+	WL_AVAIL_BAND_2G	= 2,	/* 2.4 GHz */
+	WL_AVAIL_BAND_3G	= 3,	/* reserved (for 3.6 GHz) */
+	WL_AVAIL_BAND_5G	= 4,	/* 4.9 and 5 GHz */
+	WL_AVAIL_BAND_60G	= 5,	/* reserved (for 60 GHz) */
+};
+
+#define WL_AVAIL_ENTRY_TYPE_MASK 0x0F
+#define WL_AVAIL_ENTRY_USAGE_MASK 0x0030 /* up to 4 usage preferences */
+#define WL_AVAIL_ENTRY_USAGE_SHIFT 4
+#define WL_AVAIL_ENTRY_USAGE_VAL(_flags)  (((_flags) & WL_AVAIL_ENTRY_USAGE_MASK) \
+	>> WL_AVAIL_ENTRY_USAGE_SHIFT)
+
+#define WL_AVAIL_ENTRY_BIT_DUR_MASK 0x00c0 /* 0:16TU, 1:32TU, 2:64TU, 3:128TU */
+#define WL_AVAIL_ENTRY_BIT_DUR_SHIFT 6
+#define WL_AVAIL_ENTRY_BIT_DUR_VAL(_flags)  (((_flags) & WL_AVAIL_ENTRY_BIT_DUR_MASK) \
+	>> WL_AVAIL_ENTRY_BIT_DUR_SHIFT)
+
+#define WL_AVAIL_ENTRY_BAND_MASK 0x0100 /* 0=band not present, 1=present */
+#define WL_AVAIL_ENTRY_BAND_SHIFT 8
+
+#define WL_AVAIL_ENTRY_CHAN_MASK 0x0200 /* 0=channel info not present, 1=present */
+#define WL_AVAIL_ENTRY_CHAN_SHIFT 9
+
+#define WL_AVAIL_ENTRY_CHAN_ENTRY_MASK 0x0400 /* 0=chanspec, 1=hex channel entry */
+#define WL_AVAIL_ENTRY_CHAN_ENTRY_SHIFT 10
+
+#define WL_AVAIL_ENTRY_OPCLASS_MASK 0xFF
+#define WL_AVAIL_ENTRY_CHAN_BITMAP_MASK 0xFF00
+#define WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT 8
+#define WL_AVAIL_ENTRY_CHAN_BITMAP_VAL(_info) (((_info) & WL_AVAIL_ENTRY_CHAN_BITMAP_MASK) \
+	>> WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT)
+
+/* Used for raw channel entry field input */
+#define	MAX_CHAN_ENTRY_LEN 6
+
+typedef struct wl_avail_entry {
+	uint16		length;		/* total length */
+	uint16		start_offset;	/* in TUs, multiply by 16 for total offset */
+	union {
+		uint32 channel_info;	/* either chanspec or hex channel entry (opclass +
+					 * bitmap per NAN spec), as indicated by setting
+					 * WL_AVAIL_ENTRY_HEX_CHAN_ENTRY flag
+					 */
+		uint32 band;		/* defined by WL_BAND enum, 2=2.4GHz, 4=5GHz */
+		uint8	channel_entry[MAX_CHAN_ENTRY_LEN];
+	} u;				/* band or channel value, 0=all band/channels */
+	uint8		pad[2];
+	uint8		period;		/* in TUs, defined by WL_AVAIL_PERIOD enum
+					 * 1:128, 2:256, 3:512, 4:1024, 5:2048, 6:4096,
+					 * 7:8192
+					 */
+	uint8		bitmap_len;
+	uint16		flags;		/* defined by avail entry flags enum:
+					 * type, usage pref, bit duration, band, channel
+					 */
+	uint8		bitmap[];	/* time bitmap */
+} wl_avail_entry_t;
+
+typedef struct wl_avail {
+	uint16		length;		/* total length */
+	uint16		flags;		/* defined by WL_AVAIL enum
+					 * 1=local, 2=peer, 3=ndc, 4=immutable,
+					 * 5=response, 6=counter
+					 */
+	uint8		id;		/* id used for multiple maps/avail */
+	uint8		pad[3];
+	struct ether_addr addr;	/* peer mac address or ndc id */
+	uint8		num_entries;
+	uint8		entry_offset;
+	/* add additional fields above this line */
+	uint8		entry[];
+} wl_avail_t;
+
+#define WL_AVAIL_MIN_LEN(n) ((n) ? OFFSETOF(wl_avail_t, entry) + \
+		((n) * OFFSETOF(wl_avail_entry_t, bitmap)) : 0)
+
+/* unaligned schedule (window) */
+typedef struct wl_avail_ulw {
+	uint8	id;		/* schedule ID */
+	uint8	overwrite;	/* bit 0: overwrite all
+				 * 1-4: map ID if overwrite all is 0
+				 */
+	uint16	flags;
+	uint32	start;		/* start time of first ULW, in us */
+	uint32	dur;		/* duration of ULW, in us */
+	uint32	period;		/* time between consecutive ULWs, in us */
+	union {
+		uint32 chanspec;
+		uint32 band;
+		uint8 chan_entry[MAX_CHAN_ENTRY_LEN];
+		uint8 pad[8];
+	} u;
+	uint8	cntdwn;		/* remaining ULWs before schedule ends */
+	uint8	pad[3];
+} wl_avail_ulw_t;
+
+/* unset: NAN is not available during ULW, set: NAN is avail depending on ctrl flags */
+#define WL_NAN_ULW_CTRL_PRESENT		(1 << 0)
+/* unset: band, set: channel */
+#define WL_NAN_ULW_CTRL_TYPE		(1 << 1)
+/* set: NAN is availabile on specified band/channel */
+#define WL_NAN_ULW_CTRL_AVAIL		(1 << 2)
+/* channel is provided in raw attribute format */
+#define WL_NAN_ULW_CTRL_RAW_CHAN	(1 << 3)
+
+/* nan wfa testmode operations */
+enum {
+	WL_NAN_WFA_TM_IGNORE_TERMINATE_NAF		= 0x00000001,
+	WL_NAN_WFA_TM_IGNORE_RX_DATA_OUTSIDE_CRB	= 0x00000002,
+	WL_NAN_WFA_TM_ALLOW_TX_DATA_OUTSIDE_CRB		= 0x00000004,
+	WL_NAN_WFA_TM_ENFORCE_NDL_COUNTER		= 0x00000008,
+	WL_NAN_WFA_TM_BYPASS_NDL_PROPOSAL_VALIDATION	= 0x00000010,
+	/* allow data(pings) tx while ndp sec negotiation */
+	WL_NAN_WFA_TM_SEC_SEND_PINGS_BYPASS_NDP_SM	= 0x00000020,
+	/* generate and insert incorrect mic */
+	WL_NAN_WFA_TM_SEC_INCORRECT_MIC		        = 0x00000040,
+	/* send m4 reject deliberately */
+	WL_NAN_WFA_TM_SEC_REJECT_STATUS4M4		= 0x00000080,
+	/* send mgmt frame (for eg. ndp terminate) in clear txt (bypass security) */
+	WL_NAN_WFA_TM_SEC_SEND_MGMT_CLEAR	        = 0x00000100,
+	WL_NAN_WFA_TM_FLAG_MASK				= 0x000001ff /* add above & update mask */
+};
+typedef uint32 wl_nan_wfa_testmode_t;
+
+#define RSSI_THRESHOLD_SIZE 16
+#define MAX_IMP_RESP_SIZE 256
+
+typedef struct wl_proxd_rssi_bias {
+	int32		version;			/**< version */
+	int32		threshold[RSSI_THRESHOLD_SIZE];	/**< threshold */
+	int32		peak_offset;			/**< peak offset */
+	int32		bias;				/**< rssi bias */
+	int32		gd_delta;			/**< GD - GD_ADJ */
+	int32		imp_resp[MAX_IMP_RESP_SIZE];	/**< (Hi*Hi)+(Hr*Hr) */
+} wl_proxd_rssi_bias_t;
+
+typedef struct wl_proxd_rssi_bias_avg {
+	int32		avg_threshold[RSSI_THRESHOLD_SIZE];	/**< avg threshold */
+	int32		avg_peak_offset;			/**< avg peak offset */
+	int32		avg_rssi;				/**< avg rssi */
+	int32		avg_bias;				/**< avg bias */
+} wl_proxd_rssi_bias_avg_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_info {
+	uint16		type;  /**< type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */
+	uint16		index;		/**< The current frame index, from 1 to total_frames. */
+	uint16		tof_cmd;	/**< M_TOF_CMD      */
+	uint16		tof_rsp;	/**< M_TOF_RSP      */
+	uint16		tof_avb_rxl;	/**< M_TOF_AVB_RX_L */
+	uint16		tof_avb_rxh;	/**< M_TOF_AVB_RX_H */
+	uint16		tof_avb_txl;	/**< M_TOF_AVB_TX_L */
+	uint16		tof_avb_txh;	/**< M_TOF_AVB_TX_H */
+	uint16		tof_id;		/**< M_TOF_ID */
+	uint8		tof_frame_type;
+	uint8		tof_frame_bw;
+	int8		tof_rssi;
+	int32		tof_cfo;
+	int32		gd_adj_ns;	/**< gound delay */
+	int32		gd_h_adj_ns;	/**< group delay + threshold crossing */
+	int16		nfft;		/**< number of samples stored in H */
+	uint8		num_max_cores;
+
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_info_t;
+#include <packed_section_end.h>
+
+#define K_TOF_COLLECT_H_PAD 1
+#define K_TOF_COLLECT_SC_20MHZ (64)
+/* Maximum possible size of sample capture */
+#define K_TOF_COLLECT_SC_80MHZ (2*K_TOF_COLLECT_SC_20MHZ)
+/* Maximum possible size of channel dump */
+#define K_TOF_COLLECT_CHAN_SIZE (2*K_TOF_COLLECT_SC_80MHZ)
+
+/*
+A few extra samples are required to estimate frequency offset
+Right now 16 samples are being used. Can be changed in future.
+*/
+#define K_TOF_COLLECT_SAMP_SIZE_20MHZ (2*(K_TOF_COLLECT_SC_20MHZ)+16+K_TOF_COLLECT_H_PAD)
+#define K_TOF_COLLECT_RAW_SAMP_SIZE_20MHZ (2*K_TOF_COLLECT_SAMP_SIZE_20MHZ)
+#define K_TOF_COLLECT_H_SIZE_20MHZ (K_TOF_COLLECT_SAMP_SIZE_20MHZ)
+#define K_TOF_COLLECT_HRAW_SIZE_20MHZ (K_TOF_COLLECT_RAW_SAMP_SIZE_20MHZ)
+
+#define K_TOF_COLLECT_SAMP_SIZE_80MHZ (2*(K_TOF_COLLECT_SC_80MHZ)+16+K_TOF_COLLECT_H_PAD)
+#define K_TOF_COLLECT_RAW_SAMP_SIZE_80MHZ (2*K_TOF_COLLECT_SAMP_SIZE_80MHZ)
+#define K_TOF_COLLECT_H_SIZE_80MHZ (K_TOF_COLLECT_SAMP_SIZE_80MHZ)
+#define K_TOF_COLLECT_HRAW_SIZE_80MHZ (K_TOF_COLLECT_RAW_SAMP_SIZE_80MHZ)
+
+#define WL_PROXD_COLLECT_DATA_VERSION_1		1
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data_v1 {
+	wl_proxd_collect_info_t  info;
+	uint8	ri_rr[FTM_TPK_RI_RR_LEN];
+	/**< raw data read from phy used to adjust timestamps */
+	uint32	H[K_TOF_COLLECT_H_SIZE_20MHZ];
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t_v1;
+#include <packed_section_end.h>
+
+#define WL_PROXD_COLLECT_DATA_VERSION_2		2
+typedef struct wl_proxd_collect_data_v2 {
+	uint16			version;
+	uint16			len;
+	wl_proxd_collect_info_t	info;
+	uint8			ri_rr[FTM_TPK_RI_RR_LEN];
+	uint8			pad[3]; /* should be based on FTM_TPK_RI_RR_LEN */
+	/**< raw data read from phy used to adjust timestamps */
+	uint32			H[K_TOF_COLLECT_H_SIZE_20MHZ];
+	uint32			chan[4 * K_TOF_COLLECT_CHAN_SIZE];
+} wl_proxd_collect_data_t_v2;
+#define WL_PROXD_COLLECT_DATA_VERSION_MAX	WL_PROXD_COLLECT_DATA_VERSION_2
+
+typedef struct wl_proxd_debug_data {
+	uint8		count;		/**< number of packets */
+	uint8		stage;		/**< state machone stage */
+	uint8		received;	/**< received or txed */
+	uint8		paket_type;	/**< packet type */
+	uint8		category;	/**< category field */
+	uint8		action;		/**< action field */
+	uint8		token;		/**< token number */
+	uint8		follow_token;	/**< following token number */
+	uint16		index;		/**< index of the packet */
+	uint16		tof_cmd;	/**< M_TOF_CMD */
+	uint16		tof_rsp;	/**< M_TOF_RSP */
+	uint16		tof_avb_rxl;	/**< M_TOF_AVB_RX_L */
+	uint16		tof_avb_rxh;	/**< M_TOF_AVB_RX_H */
+	uint16		tof_avb_txl;	/**< M_TOF_AVB_TX_L */
+	uint16		tof_avb_txh;	/**< M_TOF_AVB_TX_H */
+	uint16		tof_id;		/**< M_TOF_ID */
+	uint16		tof_status0;	/**< M_TOF_STATUS_0 */
+	uint16		tof_status2;	/**< M_TOF_STATUS_2 */
+	uint16		tof_chsm0;	/**< M_TOF_CHNSM_0 */
+	uint16		tof_phyctl0;	/**< M_TOF_PHYCTL0 */
+	uint16		tof_phyctl1;	/**< M_TOF_PHYCTL1 */
+	uint16		tof_phyctl2;	/**< M_TOF_PHYCTL2 */
+	uint16		tof_lsig;	/**< M_TOF_LSIG */
+	uint16		tof_vhta0;	/**< M_TOF_VHTA0 */
+	uint16		tof_vhta1;	/**< M_TOF_VHTA1 */
+	uint16		tof_vhta2;	/**< M_TOF_VHTA2 */
+	uint16		tof_vhtb0;	/**< M_TOF_VHTB0 */
+	uint16		tof_vhtb1;	/**< M_TOF_VHTB1 */
+	uint16		tof_apmductl;	/**< M_TOF_AMPDU_CTL */
+	uint16		tof_apmdudlim;	/**< M_TOF_AMPDU_DLIM */
+	uint16		tof_apmdulen;	/**< M_TOF_AMPDU_LEN */
+} wl_proxd_debug_data_t;
+
+/** version of the wl_wsec_info structure */
+#define WL_WSEC_INFO_VERSION 0x01
+
+/** start enum value for BSS properties */
+#define WL_WSEC_INFO_BSS_BASE 0x0100
+
+/** size of len and type fields of wl_wsec_info_tlv_t struct */
+#define WL_WSEC_INFO_TLV_HDR_LEN OFFSETOF(wl_wsec_info_tlv_t, data)
+
+/** Allowed wl_wsec_info properties; not all of them may be supported. */
+typedef enum {
+	WL_WSEC_INFO_NONE = 0,
+	WL_WSEC_INFO_MAX_KEYS = 1,
+	WL_WSEC_INFO_NUM_KEYS = 2,
+	WL_WSEC_INFO_NUM_HW_KEYS = 3,
+	WL_WSEC_INFO_MAX_KEY_IDX = 4,
+	WL_WSEC_INFO_NUM_REPLAY_CNTRS = 5,
+	WL_WSEC_INFO_SUPPORTED_ALGOS = 6,
+	WL_WSEC_INFO_MAX_KEY_LEN = 7,
+	WL_WSEC_INFO_FLAGS = 8,
+	/* add global/per-wlc properties above */
+	WL_WSEC_INFO_BSS_FLAGS = (WL_WSEC_INFO_BSS_BASE + 1),
+	WL_WSEC_INFO_BSS_WSEC = (WL_WSEC_INFO_BSS_BASE + 2),
+	WL_WSEC_INFO_BSS_TX_KEY_ID = (WL_WSEC_INFO_BSS_BASE + 3),
+	WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4),
+	WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5),
+	WL_WSEC_INFO_BSS_ALGOS = (WL_WSEC_INFO_BSS_BASE + 6),
+	/* add per-BSS properties above */
+	WL_WSEC_INFO_MAX = 0xffff
+} wl_wsec_info_type_t;
+
+typedef struct {
+	uint32 algos; /* set algos to be enabled/disabled */
+	uint32 mask; /* algos outside mask unaltered */
+} wl_wsec_info_algos_t;
+
+/** tlv used to return wl_wsec_info properties */
+typedef struct {
+	uint16 type;
+	uint16 len;		/**< data length */
+	uint8 data[1];	/**< data follows */
+} wl_wsec_info_tlv_t;
+
+/** input/output data type for wsec_info iovar */
+typedef struct wl_wsec_info {
+	uint8 version; /**< structure version */
+	uint8 pad[2];
+	uint8 num_tlvs;
+	wl_wsec_info_tlv_t tlvs[1]; /**< tlv data follows */
+} wl_wsec_info_t;
+
+/*
+ * randmac definitions
+ */
+#define WL_RANDMAC_MODULE			"randmac"
+#define WL_RANDMAC_API_VERSION		0x0100 /**< version 1.0 */
+#define WL_RANDMAC_API_MIN_VERSION	0x0100 /**< version 1.0 */
+
+/** subcommands that can apply to randmac */
+enum {
+	WL_RANDMAC_SUBCMD_NONE				= 0,
+	WL_RANDMAC_SUBCMD_GET_VERSION			= 1,
+	WL_RANDMAC_SUBCMD_ENABLE			= 2,
+	WL_RANDMAC_SUBCMD_DISABLE			= 3,
+	WL_RANDMAC_SUBCMD_CONFIG			= 4,
+	WL_RANDMAC_SUBCMD_STATS				= 5,
+	WL_RANDMAC_SUBCMD_CLEAR_STATS			= 6,
+
+	WL_RANDMAC_SUBCMD_MAX
+};
+typedef int16 wl_randmac_subcmd_t;
+
+/* Common IOVAR struct */
+typedef struct wl_randmac {
+	uint16 version;
+	uint16 len;			/* total length */
+	wl_randmac_subcmd_t subcmd_id;	/* subcommand id */
+	uint8 data[0];			/* subcommand data */
+} wl_randmac_t;
+
+#define WL_RANDMAC_IOV_HDR_SIZE OFFSETOF(wl_randmac_t, data)
+
+/* randmac version subcommand */
+typedef struct wl_randmac_version {
+	uint16 version;  /* Randmac method version info */
+	uint8 pad[2];    /* Align on 4 byte boundary */
+} wl_randmac_version_t;
+
+/*
+ * Bitmask for methods supporting MAC randomization feature
+ */
+#define WL_RANDMAC_USER_NONE		0x0000
+#define WL_RANDMAC_USER_FTM		0x0001
+#define WL_RANDMAC_USER_NAN		0x0002
+#define WL_RANDMAC_USER_SCAN		0x0004
+#define WL_RANDMAC_USER_ALL		0xFFFF
+typedef uint16 wl_randmac_method_t;
+
+enum {
+	WL_RANDMAC_FLAGS_NONE	= 0x00,
+	WL_RANDMAC_FLAGS_ADDR	= 0x01,
+	WL_RANDMAC_FLAGS_MASK	= 0x02,
+	WL_RANDMAC_FLAGS_METHOD	= 0x04,
+	WL_RANDMAC_FLAGS_ALL	= 0xFF
+};
+typedef uint8 wl_randmac_flags_t;
+
+/* randmac statistics subcommand */
+typedef struct wl_randmac_stats {
+	uint32 set_ok;		/* Set random addr success count */
+	uint32 set_fail;	/* Set random addr failed count */
+	uint32 set_reqs;	/* Set random addr count */
+	uint32 reset_reqs;	/* Restore random addr count */
+	uint32 restore_ok;	/* Restore random addr succes count */
+	uint32 restore_fail;	/* Restore random addr failed count */
+	uint32 events_sent;	/* randmac module events count */
+	uint32 events_rcvd;	/* randmac events received count */
+} wl_randmac_stats_t;
+
+/* randmac config subcommand */
+typedef struct wl_randmac_config {
+	struct ether_addr addr;			/* Randomized MAC address */
+	struct ether_addr addr_mask;		/* bitmask for randomization */
+	wl_randmac_method_t method;		/* Enabled methods */
+	wl_randmac_flags_t flags;		/* What config info changed */
+	uint8	PAD;
+} wl_randmac_config_t;
+
+enum {
+	WL_RANDMAC_EVENT_NONE			= 0,	/**< not an event, reserved */
+	WL_RANDMAC_EVENT_BSSCFG_ADDR_SET	= 1,	/* bsscfg addr randomized */
+	WL_RANDMAC_EVENT_BSSCFG_ADDR_RESTORE	= 2,	/* bsscfg addr restored */
+	WL_RANDMAC_EVENT_ENABLED		= 3,	/* randmac module enabled */
+	WL_RANDMAC_EVENT_DISABLE		= 4,	/* randmac module disabled */
+	WL_RANDMAC_EVENT_BSSCFG_STATUS	= 5,	/* bsscfg enable/disable */
+
+	WL_RANDMAC_EVENT_MAX
+};
+typedef int16 wl_randmac_event_type_t;
+typedef int32 wl_randmac_status_t;
+typedef uint32 wl_randmac_event_mask_t;
+
+#define WL_RANDMAC_EVENT_MASK_ALL 0xfffffffe
+#define WL_RANDMAC_EVENT_MASK_EVENT(_event_type) (1 << (_event_type))
+#define WL_RANDMAC_EVENT_ENABLED(_mask, _event_type) (\
+	((_mask) & WL_RANDMAC_EVENT_MASK_EVENT(_event_type)) != 0)
+
+/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */
+enum {
+	WL_RANDMAC_TLV_NONE		= 0,
+	WL_RANDMAC_TLV_METHOD		= 1,
+	WL_RANDMAC_TLV_ADDR		= 2,
+	WL_RANDMAC_TLV_MASK		= 3
+};
+typedef uint16 wl_randmac_tlv_id_t;
+
+typedef struct wl_randmac_tlv {
+	wl_randmac_tlv_id_t id;
+	uint16 len;		/* Length of variable */
+	uint8  data[1];
+} wl_randmac_tlv_t;
+
+/** randmac event */
+typedef struct wl_randmac_event {
+	uint16					version;
+	uint16					len;	/* Length of all variables */
+	wl_randmac_event_type_t			type;
+	wl_randmac_method_t			method;
+	uint8					pad[2];
+	wl_randmac_tlv_t			tlvs[1];	/**< variable */
+} wl_randmac_event_t;
+
+/*
+ * scan MAC definitions
+ */
+
+/** common iovar struct */
+typedef struct wl_scanmac {
+	uint16 subcmd_id;	/**< subcommand id */
+	uint16 len;		/**< total length of data[] */
+	uint8 data[];		/**< subcommand data */
+} wl_scanmac_t;
+
+/* subcommand ids */
+#define WL_SCANMAC_SUBCMD_ENABLE   0
+#define WL_SCANMAC_SUBCMD_BSSCFG   1   /**< only GET supported */
+#define WL_SCANMAC_SUBCMD_CONFIG   2
+
+/** scanmac enable data struct */
+typedef struct wl_scanmac_enable {
+	uint8 enable;	/**< 1 - enable, 0 - disable */
+	uint8 pad[3];	/**< 4-byte struct alignment */
+} wl_scanmac_enable_t;
+
+/** scanmac bsscfg data struct */
+typedef struct wl_scanmac_bsscfg {
+	uint32 bsscfg;	/**< bsscfg index */
+} wl_scanmac_bsscfg_t;
+
+/** scanmac config data struct */
+typedef struct wl_scanmac_config {
+	struct ether_addr mac;	/**< 6 bytes of MAC address or MAC prefix (i.e. OUI) */
+	struct ether_addr random_mask;	/**< randomized bits on each scan */
+	uint16 scan_bitmap;	/**< scans to use this MAC address */
+	uint8 pad[2];	/**< 4-byte struct alignment */
+} wl_scanmac_config_t;
+
+/* scan bitmap */
+#define WL_SCANMAC_SCAN_UNASSOC		(0x01 << 0)	/**< unassociated scans */
+#define WL_SCANMAC_SCAN_ASSOC_ROAM	(0x01 << 1)	/**< associated roam scans */
+#define WL_SCANMAC_SCAN_ASSOC_PNO	(0x01 << 2)	/**< associated PNO scans */
+#define WL_SCANMAC_SCAN_ASSOC_HOST	(0x01 << 3)	/**< associated host scans */
+/*
+ * bonjour dongle offload definitions
+ */
+
+/* common iovar struct */
+typedef struct wl_bdo {
+	uint16 subcmd_id;	/* subcommand id */
+	uint16 len;		/* total length of data[] */
+	uint8 data[];		/* subcommand data */
+} wl_bdo_t;
+
+/* subcommand ids */
+#define WL_BDO_SUBCMD_DOWNLOAD		0	/* Download flattened database  */
+#define WL_BDO_SUBCMD_ENABLE		1	/* Start bonjour after download */
+#define WL_BDO_SUBCMD_MAX_DOWNLOAD	2	/* Get the max download size    */
+
+/* maximum fragment size */
+#define BDO_MAX_FRAGMENT_SIZE	1024
+
+/* download flattened database
+ *
+ * BDO must be disabled before database download else fail.
+ *
+ * If database size is within BDO_MAX_FRAGMENT_SIZE then only a single fragment
+ * is required (i.e. frag_num = 0, total_size = frag_size).
+ * If database size exceeds BDO_MAX_FRAGMENT_SIZE then multiple fragments are required.
+ */
+typedef struct wl_bdo_download {
+	uint16 total_size;	/* total database size */
+	uint16 frag_num;	/* fragment number, 0 for first fragment, N-1 for last fragment */
+	uint16 frag_size;	/* size of fragment (max BDO_MAX_FRAGMENT_SIZE) */
+	uint8 pad[2];		/* 4-byte struct alignment */
+	uint8 fragment[BDO_MAX_FRAGMENT_SIZE];  /* fragment data */
+} wl_bdo_download_t;
+
+/* enable
+ *
+ * Enable requires a downloaded database else fail.
+ */
+typedef struct wl_bdo_enable {
+	uint8 enable;	/* 1 - enable, 0 - disable */
+	uint8 pad[3];	/* 4-byte struct alignment */
+} wl_bdo_enable_t;
+
+/*
+ * Get the max download size for Bonjour Offload.
+ */
+typedef struct wl_bdo_max_download {
+	uint16 size;	/* Max download size in bytes */
+	uint8 pad[2];	/* 4-byte struct alignment    */
+} wl_bdo_max_download_t;
+
+/*
+ * TCP keepalive offload definitions
+ */
+
+/* common iovar struct */
+typedef struct wl_tko {
+	uint16 subcmd_id;	/* subcommand id */
+	uint16 len;		/* total length of data[] */
+	uint8 data[];		/* subcommand data */
+} wl_tko_t;
+
+/* subcommand ids */
+#define WL_TKO_SUBCMD_MAX_TCP		0	/* max TCP connections supported */
+#define WL_TKO_SUBCMD_PARAM		1	/* configure offload common parameters  */
+#define WL_TKO_SUBCMD_CONNECT		2	/* TCP connection info */
+#define WL_TKO_SUBCMD_ENABLE		3	/* enable/disable */
+#define WL_TKO_SUBCMD_STATUS		4	/* TCP connection status */
+
+/* WL_TKO_SUBCMD_MAX_CONNECT subcommand data */
+typedef struct wl_tko_max_tcp {
+	uint8 max;	/* max TCP connections supported */
+	uint8 pad[3];	/* 4-byte struct alignment */
+} wl_tko_max_tcp_t;
+
+/* WL_TKO_SUBCMD_PARAM subcommand data */
+typedef struct wl_tko_param {
+	uint16 interval;	/* keepalive tx interval (secs) */
+	uint16 retry_interval;	/* keepalive retry interval (secs) */
+	uint16 retry_count;	/* retry_count */
+	uint8 pad[2];		/* 4-byte struct alignment */
+} wl_tko_param_t;
+
+/* WL_TKO_SUBCMD_CONNECT subcommand data
+ * invoke with unique 'index' for each TCP connection
+ */
+typedef struct wl_tko_connect {
+	uint8 index;		/* TCP connection index, 0 to max-1 */
+	uint8 ip_addr_type;	/* 0 - IPv4, 1 - IPv6 */
+	uint16 local_port;	/* local port */
+	uint16 remote_port;	/* remote port */
+	uint16 PAD;
+	uint32 local_seq;	/* local sequence number */
+	uint32 remote_seq;	/* remote sequence number */
+	uint16 request_len;	/* TCP keepalive request packet length */
+	uint16 response_len;	/* TCP keepalive response packet length */
+	uint8 data[];		/* variable length field containing local/remote IPv4/IPv6,
+				 * TCP keepalive request packet, TCP keepalive response packet
+				 *    For IPv4, length is 4 * 2 + request_length + response_length
+				 *       offset 0 - local IPv4
+				 *       offset 4 - remote IPv4
+				 *       offset 8 - TCP keepalive request packet
+				 *       offset 8+request_length - TCP keepalive response packet
+				 *    For IPv6, length is 16 * 2 + request_length + response_length
+				 *       offset 0 - local IPv6
+				 *       offset 16 - remote IPv6
+				 *       offset 32 - TCP keepalive request packet
+				 *       offset 32+request_length - TCP keepalive response packet
+				 */
+} wl_tko_connect_t;
+
+/* WL_TKO_SUBCMD_CONNECT subcommand data to GET configured info for specific index */
+typedef struct wl_tko_get_connect {
+	uint8 index;		/* TCP connection index, 0 to max-1 */
+	uint8 pad[3];		/* 4-byte struct alignment */
+} wl_tko_get_connect_t;
+
+typedef struct wl_tko_enable {
+	uint8 enable;	/* 1 - enable, 0 - disable */
+	uint8 pad[3];	/* 4-byte struct alignment */
+} wl_tko_enable_t;
+
+/* WL_TKO_SUBCMD_STATUS subcommand data */
+/* must be invoked before tko is disabled else status is unavailable */
+typedef struct wl_tko_status {
+	uint8 count;		/* number of status entries (i.e. equals
+				 * max TCP connections supported)
+	                         */
+	uint8 status[1];	/* variable length field contain status for
+				 * each TCP connection index
+				 */
+} wl_tko_status_t;
+
+typedef enum {
+	TKO_STATUS_NORMAL			= 0,	/* TCP connection normal, no error */
+	TKO_STATUS_NO_RESPONSE			= 1,	/* no response to TCP keepalive */
+	TKO_STATUS_NO_TCP_ACK_FLAG		= 2,	/* TCP ACK flag not set */
+	TKO_STATUS_UNEXPECT_TCP_FLAG		= 3,	/* unexpect TCP flags set other than ACK */
+	TKO_STATUS_SEQ_NUM_INVALID		= 4,	/* ACK != sequence number */
+	TKO_STATUS_REMOTE_SEQ_NUM_INVALID	= 5,	/* SEQ > remote sequence number */
+	TKO_STATUS_TCP_DATA			= 6,	/* TCP data available */
+	TKO_STATUS_UNAVAILABLE			= 255,	/* not used/configured */
+} tko_status_t;
+
+enum rssi_reason {
+	RSSI_REASON_UNKNOW = 0,
+	RSSI_REASON_LOWRSSI = 1,
+	RSSI_REASON_NSYC = 2,
+	RSSI_REASON_TIMEOUT = 3
+};
+
+enum tof_reason {
+	TOF_REASON_OK = 0,
+	TOF_REASON_REQEND = 1,
+	TOF_REASON_TIMEOUT = 2,
+	TOF_REASON_NOACK = 3,
+	TOF_REASON_INVALIDAVB = 4,
+	TOF_REASON_INITIAL = 5,
+	TOF_REASON_ABORT = 6
+};
+
+enum rssi_state {
+	RSSI_STATE_POLL = 0,
+	RSSI_STATE_TPAIRING = 1,
+	RSSI_STATE_IPAIRING = 2,
+	RSSI_STATE_THANDSHAKE = 3,
+	RSSI_STATE_IHANDSHAKE = 4,
+	RSSI_STATE_CONFIRMED = 5,
+	RSSI_STATE_PIPELINE = 6,
+	RSSI_STATE_NEGMODE = 7,
+	RSSI_STATE_MONITOR = 8,
+	RSSI_STATE_LAST = 9
+};
+
+enum tof_state {
+	TOF_STATE_IDLE	 = 0,
+	TOF_STATE_IWAITM = 1,
+	TOF_STATE_TWAITM = 2,
+	TOF_STATE_ILEGACY = 3,
+	TOF_STATE_IWAITCL = 4,
+	TOF_STATE_TWAITCL = 5,
+	TOF_STATE_ICONFIRM = 6,
+	TOF_STATE_IREPORT = 7
+};
+
+enum tof_mode_type {
+	TOF_LEGACY_UNKNOWN	= 0,
+	TOF_LEGACY_AP		= 1,
+	TOF_NONLEGACY_AP	= 2
+};
+
+enum tof_way_type {
+	TOF_TYPE_ONE_WAY = 0,
+	TOF_TYPE_TWO_WAY = 1,
+	TOF_TYPE_REPORT = 2
+};
+
+enum tof_rate_type {
+	TOF_FRAME_RATE_VHT = 0,
+	TOF_FRAME_RATE_LEGACY = 1
+};
+
+#define TOF_ADJ_TYPE_NUM	4	/**< number of assisted timestamp adjustment */
+enum tof_adj_mode {
+	TOF_ADJ_SOFTWARE = 0,
+	TOF_ADJ_HARDWARE = 1,
+	TOF_ADJ_SEQ = 2,
+	TOF_ADJ_NONE = 3
+};
+
+#define FRAME_TYPE_NUM		4	/**< number of frame type */
+enum frame_type {
+	FRAME_TYPE_CCK	= 0,
+	FRAME_TYPE_OFDM	= 1,
+	FRAME_TYPE_11N	= 2,
+	FRAME_TYPE_11AC	= 3
+};
+
+typedef struct wl_proxd_status_iovar {
+	uint16			method;				/**< method */
+	uint8			mode;				/**< mode */
+	uint8			peermode;			/**< peer mode */
+	uint8			state;				/**< state */
+	uint8			reason;				/**< reason code */
+	uint8			PAD[2];
+	uint32			distance;			/**< distance */
+	uint32			txcnt;				/**< tx pkt counter */
+	uint32			rxcnt;				/**< rx pkt counter */
+	struct ether_addr	peer;				/**< peer mac address */
+	int8			avg_rssi;			/**< average rssi */
+	int8			hi_rssi;			/**< highest rssi */
+	int8			low_rssi;			/**< lowest rssi */
+	uint8			PAD[3];
+	uint32			dbgstatus;			/**< debug status */
+	uint16			frame_type_cnt[FRAME_TYPE_NUM];	/**< frame types */
+	uint8			adj_type_cnt[TOF_ADJ_TYPE_NUM];	/**< adj types HW/SW */
+} wl_proxd_status_iovar_t;
+
+/* ifdef NET_DETECT */
+typedef struct net_detect_adapter_features {
+	uint8	wowl_enabled;
+	uint8	net_detect_enabled;
+	uint8	nlo_enabled;
+} net_detect_adapter_features_t;
+
+typedef enum net_detect_bss_type {
+	nd_bss_any = 0,
+	nd_ibss,
+	nd_ess
+} net_detect_bss_type_t;
+
+typedef struct net_detect_profile {
+	wlc_ssid_t		ssid;
+	net_detect_bss_type_t   bss_type;	/**< Ignore for now since Phase 1 is only for ESS */
+	uint32			cipher_type;	/**< DOT11_CIPHER_ALGORITHM enumeration values */
+	uint32			auth_type;	/**< DOT11_AUTH_ALGORITHM enumeration values */
+} net_detect_profile_t;
+
+typedef struct net_detect_profile_list {
+	uint32			num_nd_profiles;
+	net_detect_profile_t	nd_profile[];
+} net_detect_profile_list_t;
+
+typedef struct net_detect_config {
+	uint8			    nd_enabled;
+	uint8				PAD[3];
+	uint32			    scan_interval;
+	uint32			    wait_period;
+	uint8			    wake_if_connected;
+	uint8			    wake_if_disconnected;
+	uint8				PAD[2];
+	net_detect_profile_list_t   nd_profile_list;
+} net_detect_config_t;
+
+typedef enum net_detect_wake_reason {
+	nd_reason_unknown,
+	nd_net_detected,
+	nd_wowl_event,
+	nd_ucode_error
+} net_detect_wake_reason_t;
+
+typedef struct net_detect_wake_data {
+	net_detect_wake_reason_t    nd_wake_reason;
+	uint32			    nd_wake_date_length;
+	uint8			    nd_wake_data[0];	    /**< Wake data (currently unused) */
+} net_detect_wake_data_t;
+
+/* endif NET_DETECT */
+
+/* (unversioned, deprecated) */
+typedef struct bcnreq {
+	uint8 bcn_mode;
+	uint8 PAD[3];
+	int32 dur;
+	int32 channel;
+	struct ether_addr da;
+	uint16 random_int;
+	wlc_ssid_t ssid;
+	uint16 reps;
+	uint8 PAD[2];
+} bcnreq_t;
+
+#define WL_RRM_BCN_REQ_VER		1
+typedef struct bcn_req {
+	uint8 version;
+	uint8 bcn_mode;
+	uint8 pad_1[2];
+	int32 dur;
+	int32 channel;
+	struct ether_addr da;
+	uint16 random_int;
+	wlc_ssid_t ssid;
+	uint16 reps;
+	uint8 req_elements;
+	uint8 pad_2;
+	chanspec_list_t chspec_list;
+} bcn_req_t;
+
+typedef struct rrmreq {
+	struct ether_addr da;
+	uint8 reg;
+	uint8 chan;
+	uint16 random_int;
+	uint16 dur;
+	uint16 reps;
+} rrmreq_t;
+
+typedef struct framereq {
+	struct ether_addr da;
+	uint8 reg;
+	uint8 chan;
+	uint16 random_int;
+	uint16 dur;
+	struct ether_addr ta;
+	uint16 reps;
+} framereq_t;
+
+typedef struct statreq {
+	struct ether_addr da;
+	struct ether_addr peer;
+	uint16 random_int;
+	uint16 dur;
+	uint8 group_id;
+	uint8 PAD;
+	uint16 reps;
+} statreq_t;
+
+typedef struct wl_rrm_config_ioc {
+	uint16 version; /* command version */
+	uint16 id;      /* subiovar cmd ID */
+	uint16 len;     /* total length of all bytes in data[] */
+	uint16 pad;     /* 4-byte boundary padding */
+	uint8 data[1];  /* payload */
+} wl_rrm_config_ioc_t;
+
+enum {
+	WL_RRM_CONFIG_NONE	= 0,	/* reserved */
+	WL_RRM_CONFIG_GET_LCI	= 1,	/* get LCI */
+	WL_RRM_CONFIG_SET_LCI	= 2,	/* set LCI */
+	WL_RRM_CONFIG_GET_CIVIC	= 3,	/* get civic location */
+	WL_RRM_CONFIG_SET_CIVIC	= 4,	/* set civic location */
+	WL_RRM_CONFIG_MAX		= 5
+};
+
+#define WL_RRM_CONFIG_NAME "rrm_config"
+#define WL_RRM_CONFIG_MIN_LENGTH OFFSETOF(wl_rrm_config_ioc_t, data)
+
+enum {
+	WL_RRM_EVENT_NONE		= 0,	/* not an event, reserved */
+	WL_RRM_EVENT_FRNG_REQ	= 1,	/* Receipt of FRNG request frame */
+	WL_RRM_EVENT_FRNG_REP	= 2,	/* Receipt of FRNG report frame */
+
+	WL_RRM_EVENT_MAX
+};
+typedef int16 wl_rrm_event_type_t;
+
+typedef struct frngreq_target {
+	uint32 bssid_info;
+	uint8 channel;
+	uint8 phytype;
+	uint8 reg;
+	uint8 pad;
+	struct ether_addr bssid;
+	chanspec_t chanspec;
+	uint32 sid;
+} frngreq_target_t;
+
+typedef struct frngreq {
+	wl_rrm_event_type_t event;			/* RRM event type */
+	struct ether_addr da;
+	uint16 max_init_delay;	/* Upper bound of random delay, in TUs */
+	uint8 min_ap_count;		/* Min FTM ranges requested (1-15) */
+	uint8 num_aps;			/* Number of APs to range, at least min_ap_count */
+	uint16 max_age;			/* Max elapsed time before FTM request, 0xFFFF = any */
+	uint16 reps;			/* Number of repetitions of this measurement type */
+	frngreq_target_t targets[1];	/* Target BSSIDs to range */
+} frngreq_t;
+
+typedef struct frngrep_range {
+	uint32 start_tsf;		/* 4 lsb of tsf */
+	struct ether_addr bssid;
+	uint8 pad[2];
+	uint32 range;
+	uint32 max_err;
+	uint8  rsvd;
+	uint8 pad2[3];
+} frngrep_range_t;
+
+typedef struct frngrep_error {
+	uint32 start_tsf;		/* 4 lsb of tsf */
+	struct ether_addr bssid;
+	uint8  code;
+	uint8 pad[1];
+} frngrep_error_t;
+
+typedef struct frngrep {
+	wl_rrm_event_type_t event;			/* RRM event type */
+	struct ether_addr da;
+	uint8 range_entry_count;
+	uint8 error_entry_count;
+	uint16 dialog_token;				/* dialog token */
+	frngrep_range_t range_entries[DOT11_FTM_RANGE_ENTRY_MAX_COUNT];
+	frngrep_error_t error_entries[DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT];
+} frngrep_t;
+
+typedef struct wl_rrm_frng_ioc {
+	uint16 version; /* command version */
+	uint16 id;      /* subiovar cmd ID */
+	uint16 len;     /* total length of all bytes in data[] */
+	uint16 pad;     /* 4-byte boundary padding */
+	uint8 data[];  /* payload */
+} wl_rrm_frng_ioc_t;
+
+enum {
+	WL_RRM_FRNG_NONE	= 0,	/* reserved */
+	WL_RRM_FRNG_SET_REQ	= 1,	/* send ftm ranging request */
+	WL_RRM_FRNG_MAX		= 2
+};
+
+#define WL_RRM_FRNG_NAME "rrm_frng"
+#define WL_RRM_FRNG_MIN_LENGTH OFFSETOF(wl_rrm_frng_ioc_t, data)
+
+#define WL_RRM_RPT_VER		0
+#define WL_RRM_RPT_MAX_PAYLOAD	256
+#define WL_RRM_RPT_MIN_PAYLOAD	7
+#define WL_RRM_RPT_FALG_ERR	0
+#define WL_RRM_RPT_FALG_GRP_ID_PROPR	(1 << 0)
+#define WL_RRM_RPT_FALG_GRP_ID_0	(1 << 1)
+typedef struct {
+	uint16 ver;		/**< version */
+	struct ether_addr addr;	/**< STA MAC addr */
+	uint32 timestamp;	/**< timestamp of the report */
+	uint16 flag;		/**< flag */
+	uint16 len;		/**< length of payload data */
+	uint8 data[WL_RRM_RPT_MAX_PAYLOAD];
+} statrpt_t;
+
+typedef struct wlc_dwds_config {
+	uint32		enable;
+	uint32		mode; /**< STA/AP interface */
+	struct ether_addr ea;
+	uint8  PAD[2];
+} wlc_dwds_config_t;
+
+typedef struct wl_el_set_params_s {
+	uint8 set;	/**< Set number */
+	uint8  PAD[3];
+	uint32 size;	/**< Size to make/expand */
+} wl_el_set_params_t;
+
+typedef struct wl_el_tag_params_s {
+	uint16 tag;
+	uint8 set;
+	uint8 flags;
+} wl_el_tag_params_t;
+
+/** Video Traffic Interference Monitor config */
+#define INTFER_VERSION		1
+typedef struct wl_intfer_params {
+	uint16 version;			/**< version */
+	uint8 period;			/**< sample period */
+	uint8 cnt;			/**< sample cnt */
+	uint8 txfail_thresh;	/**< non-TCP txfail threshold */
+	uint8 tcptxfail_thresh;	/**< tcptxfail threshold */
+} wl_intfer_params_t;
+
+typedef struct wl_staprio_cfg {
+	struct ether_addr ea;	/**< mac addr */
+	uint8 prio;		/**< scb priority */
+} wl_staprio_cfg_t;
+
+typedef enum wl_stamon_cfg_cmd_type {
+	STAMON_CFG_CMD_DEL = 0,
+	STAMON_CFG_CMD_ADD = 1,
+	STAMON_CFG_CMD_ENB = 2,
+	STAMON_CFG_CMD_DSB = 3,
+	STAMON_CFG_CMD_CNT = 4,
+	STAMON_CFG_CMD_RSTCNT = 5,
+	STAMON_CFG_CMD_GET_STATS = 6
+} wl_stamon_cfg_cmd_type_t;
+
+typedef struct wlc_stamon_sta_config {
+	wl_stamon_cfg_cmd_type_t cmd; /**< 0 - delete, 1 - add */
+	struct ether_addr ea;
+	uint8  PAD[2];
+} wlc_stamon_sta_config_t;
+
+/* ifdef SR_DEBUG */
+typedef struct /* pmu_reg */{
+	uint32  pmu_control;
+	uint32  pmu_capabilities;
+	uint32  pmu_status;
+	uint32  res_state;
+	uint32  res_pending;
+	uint32  pmu_timer1;
+	uint32  min_res_mask;
+	uint32  max_res_mask;
+	uint32  pmu_chipcontrol1[4];
+	uint32  pmu_regcontrol[5];
+	uint32  pmu_pllcontrol[5];
+	uint32  pmu_rsrc_up_down_timer[31];
+	uint32  rsrc_dep_mask[31];
+} pmu_reg_t;
+/* endif SR_DEBUG */
+
+typedef struct wl_taf_define {
+	struct ether_addr ea;	/**< STA MAC or 0xFF... */
+	uint16 version;         /**< version */
+	uint32 sch;             /**< method index */
+	uint32 prio;            /**< priority */
+	uint32 misc;            /**< used for return value */
+	uint8  text[];         /**< used to pass and return ascii text */
+} wl_taf_define_t;
+
+/** Received Beacons lengths information */
+#define WL_LAST_BCNS_INFO_FIXED_LEN		OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring)
+typedef struct wlc_bcn_len_hist {
+	uint16	ver;				/**< version field */
+	uint16	cur_index;			/**< current pointed index in ring buffer */
+	uint32	max_bcnlen;		/**< Max beacon length received */
+	uint32	min_bcnlen;		/**< Min beacon length received */
+	uint32	ringbuff_len;		/**< Length of the ring buffer 'bcnlen_ring' */
+	uint32	bcnlen_ring[1];	/**< ring buffer storing received beacon lengths */
+} wlc_bcn_len_hist_t;
+
+/* WDS net interface types */
+#define WL_WDSIFTYPE_NONE  0x0 /**< The interface type is neither WDS nor DWDS. */
+#define WL_WDSIFTYPE_WDS   0x1 /**< The interface is WDS type. */
+#define WL_WDSIFTYPE_DWDS  0x2 /**< The interface is DWDS type. */
+
+typedef struct wl_bssload_static {
+	uint8 is_static;
+	uint8  PAD;
+	uint16 sta_count;
+	uint8 chan_util;
+	uint8  PAD;
+	uint16 aac;
+} wl_bssload_static_t;
+
+/* Buffer of size WLC_SAMPLECOLLECT_MAXLEN (=10240 for 4345a0 ACPHY)
+ * gets copied to this, multiple times
+ */
+typedef enum wl_gpaio_option {
+	GPAIO_PMU_AFELDO,
+	GPAIO_PMU_TXLDO,
+	GPAIO_PMU_VCOLDO,
+	GPAIO_PMU_LNALDO,
+	GPAIO_PMU_ADCLDO,
+	GPAIO_ICTAT_CAL,
+	GPAIO_PMU_CLEAR,
+	GPAIO_OFF,
+	GPAIO_PMU_LOGENLDO,
+	GPAIO_PMU_RXLDO2G,
+	GPAIO_PMU_RXLDO5G
+} wl_gpaio_option_t;
+
+/** IO Var Operations - the Value of iov_op In wlc_ap_doiovar */
+typedef enum wlc_ap_iov_bss_operation {
+	WLC_AP_IOV_OP_DELETE                   = -1,
+	WLC_AP_IOV_OP_DISABLE                  = 0,
+	WLC_AP_IOV_OP_ENABLE                   = 1,
+	WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE  = 2,
+	WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 3,
+	WLC_AP_IOV_OP_MOVE                     = 4
+} wlc_ap_iov_bss_oper_t;
+
+/* LTE coex info */
+/* Analogue of HCI Set MWS Signaling cmd */
+typedef struct {
+	int16	mws_rx_assert_offset;
+	int16	mws_rx_assert_jitter;
+	int16	mws_rx_deassert_offset;
+	int16	mws_rx_deassert_jitter;
+	int16	mws_tx_assert_offset;
+	int16	mws_tx_assert_jitter;
+	int16	mws_tx_deassert_offset;
+	int16	mws_tx_deassert_jitter;
+	int16	mws_pattern_assert_offset;
+	int16	mws_pattern_assert_jitter;
+	int16	mws_inact_dur_assert_offset;
+	int16	mws_inact_dur_assert_jitter;
+	int16	mws_scan_freq_assert_offset;
+	int16	mws_scan_freq_assert_jitter;
+	int16	mws_prio_assert_offset_req;
+} wci2_config_t;
+
+/** Analogue of HCI MWS Channel Params */
+typedef struct {
+	uint16	mws_rx_center_freq; /**< MHz */
+	uint16	mws_tx_center_freq;
+	uint16	mws_rx_channel_bw;  /**< KHz */
+	uint16	mws_tx_channel_bw;
+	uint8	mws_channel_en;
+	uint8	mws_channel_type;   /**< Don't care for WLAN? */
+} mws_params_t;
+
+#define LTECX_MAX_NUM_PERIOD_TYPES	7
+
+/* LTE Frame params */
+typedef struct {
+	uint16	mws_frame_dur;
+	int16	mws_framesync_assert_offset;
+	uint16	mws_framesync_assert_jitter;
+	uint16  mws_period_dur[LTECX_MAX_NUM_PERIOD_TYPES];
+	uint8	mws_period_type[LTECX_MAX_NUM_PERIOD_TYPES];
+	uint8	mws_num_periods;
+} mws_frame_config_t;
+
+/** MWS wci2 message */
+typedef struct {
+	uint8	mws_wci2_data; /**< BT-SIG msg */
+	uint8	PAD;
+	uint16	mws_wci2_interval; /**< Interval in us */
+	uint16	mws_wci2_repeat; /**< No of msgs to send */
+} mws_wci2_msg_t;
+/* MWS ANT map */
+typedef struct {
+	uint16	combo1; /* mws ant selection 1 */
+	uint16	combo2; /* mws ant selection 2 */
+	uint16	combo3; /* mws ant selection 3 */
+	uint16	combo4; /* mws ant selection 4 */
+} mws_ant_map_t;
+
+/* MWS SCAN_REQ Bitmap */
+typedef struct mws_scanreq_params {
+	uint16 idx;
+	uint16 bm_2g;
+	uint16 bm_5g_lo;
+	uint16 bm_5g_mid;
+	uint16 bm_5g_hi;
+} mws_scanreq_params_t;
+
+typedef struct {
+	uint32 config;	/**< MODE: AUTO (-1), Disable (0), Enable (1) */
+	uint32 status;	/**< Current state: Disabled (0), Enabled (1) */
+} wl_config_t;
+
+#define WLC_RSDB_MODE_AUTO_MASK 0x80
+#define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK))))
+
+typedef struct {
+	uint16  request; /* type of sensor hub request */
+	uint16  enable; /* enable/disable response for specified request */
+	uint16  interval; /* interval between responses to the request */
+} shub_req_t;
+
+#define	WL_IF_STATS_T_VERSION 1	/**< current version of wl_if_stats structure */
+
+/** per interface counters */
+typedef struct wl_if_stats {
+	uint16	version;		/**< version of the structure */
+	uint16	length;			/**< length of the entire structure */
+	uint32	PAD;			/**< padding */
+
+	/* transmit stat counters */
+	uint64	txframe;		/**< tx data frames */
+	uint64	txbyte;			/**< tx data bytes */
+	uint64	txerror;		/**< tx data errors (derived: sum of others) */
+	uint64  txnobuf;		/**< tx out of buffer errors */
+	uint64  txrunt;			/**< tx runt frames */
+	uint64  txfail;			/**< tx failed frames */
+	uint64	txretry;		/**< tx retry frames */
+	uint64	txretrie;		/**< tx multiple retry frames */
+	uint64	txfrmsnt;		/**< tx sent frames */
+	uint64	txmulti;		/**< tx mulitcast sent frames */
+	uint64	txfrag;			/**< tx fragments sent */
+
+	/* receive stat counters */
+	uint64	rxframe;		/**< rx data frames */
+	uint64	rxbyte;			/**< rx data bytes */
+	uint64	rxerror;		/**< rx data errors (derived: sum of others) */
+	uint64	rxnobuf;		/**< rx out of buffer errors */
+	uint64  rxrunt;			/**< rx runt frames */
+	uint64  rxfragerr;		/**< rx fragment errors */
+	uint64	rxmulti;		/**< rx multicast frames */
+
+	uint64	txexptime;		/* DATA Tx frames suppressed due to timer expiration */
+	uint64	txrts;			/* RTS/CTS succeeeded count */
+	uint64	txnocts;		/* RTS/CTS faled count */
+
+	uint64	txretrans;		/* Number of frame retransmissions */
+}
+wl_if_stats_t;
+
+typedef struct wl_band {
+	uint16		bandtype;		/**< WL_BAND_2G, WL_BAND_5G */
+	uint16		bandunit;		/**< bandstate[] index */
+	uint16		phytype;		/**< phytype */
+	uint16		phyrev;
+}
+wl_band_t;
+
+#define	WL_WLC_VERSION_T_VERSION 1 /**< current version of wlc_version structure */
+
+/** wlc interface version */
+typedef struct wl_wlc_version {
+	uint16	version;		/**< version of the structure */
+	uint16	length;			/**< length of the entire structure */
+
+	/* epi version numbers */
+	uint16	epi_ver_major;		/**< epi major version number */
+	uint16	epi_ver_minor;		/**< epi minor version number */
+	uint16	epi_rc_num;		/**< epi RC number */
+	uint16	epi_incr_num;		/**< epi increment number */
+
+	/* wlc interface version numbers */
+	uint16	wlc_ver_major;		/**< wlc interface major version number */
+	uint16	wlc_ver_minor;		/**< wlc interface minor version number */
+}
+wl_wlc_version_t;
+
+/* Highest version of WLC_API_VERSION supported */
+#define WLC_API_VERSION_MAJOR_MAX	8
+#define WLC_API_VERSION_MINOR_MAX	0
+
+/* begin proxd definitions */
+#include <packed_section_start.h>
+
+#define WL_PROXD_API_VERSION 0x0300	/**< version 3.0 */
+
+/** Minimum supported API version */
+#define WL_PROXD_API_MIN_VERSION 0x0300
+
+/** proximity detection methods */
+enum {
+	WL_PROXD_METHOD_NONE	= 0,
+	WL_PROXD_METHOD_RSVD1	= 1, /**< backward compatibility - RSSI, not supported */
+	WL_PROXD_METHOD_TOF	= 2,
+	WL_PROXD_METHOD_RSVD2	= 3, /**< 11v only - if needed */
+	WL_PROXD_METHOD_FTM	= 4, /**< IEEE rev mc/2014 */
+	WL_PROXD_METHOD_MAX
+};
+typedef int16 wl_proxd_method_t;
+
+/** global and method configuration flags */
+enum {
+	WL_PROXD_FLAG_NONE			= 0x00000000,
+	WL_PROXD_FLAG_RX_ENABLED		= 0x00000001, /**< respond to requests, per bss */
+	WL_PROXD_FLAG_RX_RANGE_REQ		= 0x00000002, /**< 11mc range requests enabled */
+	WL_PROXD_FLAG_TX_LCI			= 0x00000004, /**< tx lci, if known */
+	WL_PROXD_FLAG_TX_CIVIC			= 0x00000008, /**< tx civic, if known */
+	WL_PROXD_FLAG_RX_AUTO_BURST		= 0x00000010, /**< auto respond w/o host action */
+	WL_PROXD_FLAG_TX_AUTO_BURST		= 0x00000020, /**< continue tx w/o host action */
+	WL_PROXD_FLAG_AVAIL_PUBLISH		= 0x00000040,     /**< publish availability */
+	WL_PROXD_FLAG_AVAIL_SCHEDULE		= 0x00000080,    /**< schedule using availability */
+	WL_PROXD_FLAG_ASAP_CAPABLE		= 0x00000100, /* ASAP capable */
+	WL_PROXD_FLAG_MBURST_FOLLOWUP		= 0x00000200, /* new multi-burst algorithm */
+	WL_PROXD_FLAG_SECURE			= 0x00000400, /* per bsscfg option */
+	WL_PROXD_FLAG_NO_TSF_SYNC		= 0x00000800, /* disable tsf sync */
+	WL_PROXD_FLAG_ALL			= 0xffffffff
+};
+typedef uint32 wl_proxd_flags_t;
+
+#define WL_PROXD_FLAGS_AVAIL (WL_PROXD_FLAG_AVAIL_PUBLISH | \
+	WL_PROXD_FLAG_AVAIL_SCHEDULE)
+
+/** session flags */
+enum {
+	WL_PROXD_SESSION_FLAG_NONE 		= 0x00000000,  /**< no flags */
+	WL_PROXD_SESSION_FLAG_INITIATOR 	= 0x00000001,  /**< local device is initiator */
+	WL_PROXD_SESSION_FLAG_TARGET 		= 0x00000002,  /**< local device is target */
+	WL_PROXD_SESSION_FLAG_ONE_WAY		= 0x00000004,  /**< (initiated) 1-way rtt */
+	WL_PROXD_SESSION_FLAG_AUTO_BURST	= 0x00000008,  /**< created w/ rx_auto_burst */
+	WL_PROXD_SESSION_FLAG_PERSIST		= 0x00000010,  /**< good until cancelled */
+	WL_PROXD_SESSION_FLAG_RTT_DETAIL	= 0x00000020,  /**< rtt detail in results */
+	WL_PROXD_SESSION_FLAG_SECURE		= 0x00000040,  /**< sessionis secure */
+	WL_PROXD_SESSION_FLAG_AOA		= 0x00000080,  /**< AOA along w/ RTT */
+	WL_PROXD_SESSION_FLAG_RX_AUTO_BURST	= 0x00000100,  /**< Same as proxd flags above */
+	WL_PROXD_SESSION_FLAG_TX_AUTO_BURST	= 0x00000200,  /**< Same as proxd flags above */
+	WL_PROXD_SESSION_FLAG_NAN_BSS		= 0x00000400,  /**< Use NAN BSS, if applicable */
+	WL_PROXD_SESSION_FLAG_TS1		= 0x00000800,  /**< e.g. FTM1 - ASAP-capable */
+	WL_PROXD_SESSION_FLAG_REPORT_FAILURE	= 0x00002000, /**< report failure to target */
+	WL_PROXD_SESSION_FLAG_INITIATOR_RPT	= 0x00004000, /**< report distance to target */
+	WL_PROXD_SESSION_FLAG_NOCHANSWT		= 0x00008000,
+	WL_PROXD_SESSION_FLAG_NETRUAL		= 0x00010000, /**< netrual mode */
+	WL_PROXD_SESSION_FLAG_SEQ_EN		= 0x00020000, /**< Toast */
+	WL_PROXD_SESSION_FLAG_NO_PARAM_OVRD	= 0x00040000, /**< no param override from target */
+	WL_PROXD_SESSION_FLAG_ASAP		= 0x00080000, /**< ASAP session */
+	WL_PROXD_SESSION_FLAG_REQ_LCI		= 0x00100000, /**< transmit LCI req */
+	WL_PROXD_SESSION_FLAG_REQ_CIV		= 0x00200000, /**< transmit civic loc req */
+	WL_PROXD_SESSION_FLAG_PRE_SCAN		= 0x00400000, /* enable pre-scan for asap=1 */
+	WL_PROXD_SESSION_FLAG_AUTO_VHTACK	= 0x00800000, /* use vhtack based on brcm ie */
+	WL_PROXD_SESSION_FLAG_VHTACK		= 0x01000000, /* vht ack is in use - output only */
+	WL_PROXD_SESSION_FLAG_BDUR_NOPREF	= 0x02000000, /* burst-duration: no preference */
+	WL_PROXD_SESSION_FLAG_NUM_FTM_NOPREF	= 0x04000000, /* num of FTM frames: no preference */
+	WL_PROXD_SESSION_FLAG_FTM_SEP_NOPREF	= 0x08000000, /* time btw FTM frams: no pref */
+	WL_PROXD_SESSION_FLAG_NUM_BURST_NOPREF	= 0x10000000, /* num of bursts: no pref */
+	WL_PROXD_SESSION_FLAG_BURST_PERIOD_NOPREF = 0x20000000, /* burst period: no pref */
+	WL_PROXD_SESSION_FLAG_MBURST_FOLLOWUP	= 0x40000000, /* new mburst algo  - reserved */
+	WL_PROXD_SESSION_FLAG_MBURST_NODELAY	= 0x80000000, /**< good until cancelled */
+	WL_PROXD_SESSION_FLAG_ALL		= 0xffffffff
+
+};
+typedef uint32 wl_proxd_session_flags_t;
+
+/** time units - mc supports up to 0.1ns resolution */
+enum {
+	WL_PROXD_TMU_TU			= 0,		/**< 1024us */
+	WL_PROXD_TMU_SEC		= 1,
+	WL_PROXD_TMU_MILLI_SEC	= 2,
+	WL_PROXD_TMU_MICRO_SEC	= 3,
+	WL_PROXD_TMU_NANO_SEC	= 4,
+	WL_PROXD_TMU_PICO_SEC	= 5
+};
+typedef int16 wl_proxd_tmu_t;
+
+/** time interval e.g. 10ns */
+typedef struct wl_proxd_intvl {
+	uint32 intvl;
+	wl_proxd_tmu_t tmu;
+	uint8	pad[2];
+} wl_proxd_intvl_t;
+
+/** commands that can apply to proxd, method or a session */
+enum {
+	WL_PROXD_CMD_NONE				= 0,
+	WL_PROXD_CMD_GET_VERSION		= 1,
+	WL_PROXD_CMD_ENABLE 			= 2,
+	WL_PROXD_CMD_DISABLE 			= 3,
+	WL_PROXD_CMD_CONFIG 			= 4,
+	WL_PROXD_CMD_START_SESSION 		= 5,
+	WL_PROXD_CMD_BURST_REQUEST 		= 6,
+	WL_PROXD_CMD_STOP_SESSION 		= 7,
+	WL_PROXD_CMD_DELETE_SESSION 	= 8,
+	WL_PROXD_CMD_GET_RESULT 		= 9,
+	WL_PROXD_CMD_GET_INFO 			= 10,
+	WL_PROXD_CMD_GET_STATUS 		= 11,
+	WL_PROXD_CMD_GET_SESSIONS 		= 12,
+	WL_PROXD_CMD_GET_COUNTERS 		= 13,
+	WL_PROXD_CMD_CLEAR_COUNTERS 	= 14,
+	WL_PROXD_CMD_COLLECT 			= 15,	/* not supported, see 'wl proxd_collect' */
+	WL_PROXD_CMD_TUNE 			= 16,	/* not supported, see 'wl proxd_tune' */
+	WL_PROXD_CMD_DUMP 				= 17,
+	WL_PROXD_CMD_START_RANGING		= 18,
+	WL_PROXD_CMD_STOP_RANGING		= 19,
+	WL_PROXD_CMD_GET_RANGING_INFO	= 20,
+	WL_PROXD_CMD_IS_TLV_SUPPORTED	= 21,
+
+	WL_PROXD_CMD_MAX
+};
+typedef int16 wl_proxd_cmd_t;
+
+/* session ids:
+ * id 0 is reserved
+ * ids 1..0x7fff - allocated by host/app
+ * 0x8000-0xffff - allocated by firmware, used for auto/rx
+ */
+enum {
+	 WL_PROXD_SESSION_ID_GLOBAL = 0
+};
+
+/* Externally allocated sids */
+#define WL_PROXD_SID_EXT_MAX 0x7fff
+#define WL_PROXD_SID_EXT_ALLOC(_sid) ((_sid) > 0 && (_sid) <= WL_PROXD_SID_EXT_MAX)
+
+/* block size for reserved sid blocks */
+#define WL_PROXD_SID_EXT_BLKSZ 256
+#define WL_PROXD_SID_EXT_BLK_START(_i) (WL_PROXD_SID_EXT_MAX - (_i) * WL_PROXD_SID_EXT_BLKSZ + 1)
+#define WL_PROXD_SID_EXT_BLK_END(_start) ((_start) + WL_PROXD_SID_EXT_BLKSZ - 1)
+
+/* rrm block */
+#define WL_PROXD_SID_RRM_START WL_PROXD_SID_EXT_BLK_START(1)
+#define WL_PROXD_SID_RRM_END WL_PROXD_SID_EXT_BLK_END(WL_PROXD_SID_RRM_START)
+
+/* nan block */
+#define WL_PROXD_SID_NAN_START WL_PROXD_SID_EXT_BLK_START(2)
+#define WL_PROXD_SID_NAN_END WL_PROXD_SID_EXT_BLK_END(WL_PROXD_SID_NAN_START)
+
+/** maximum number sessions that can be allocated, may be less if tunable */
+#define WL_PROXD_MAX_SESSIONS 16
+
+typedef uint16 wl_proxd_session_id_t;
+
+/** status - TBD BCME_ vs proxd status - range reserved for BCME_ */
+enum {
+	WL_PROXD_E_NOAVAIL		= -1056,
+	WL_PROXD_E_EXT_SCHED		= -1055,
+	WL_PROXD_E_NOT_BCM			= -1054,
+	WL_PROXD_E_FRAME_TYPE		= -1053,
+	WL_PROXD_E_VERNOSUPPORT		= -1052,
+	WL_PROXD_E_SEC_NOKEY		= -1051,
+	WL_PROXD_E_SEC_POLICY		= -1050,
+	WL_PROXD_E_SCAN_INPROCESS	= -1049,
+	WL_PROXD_E_BAD_PARTIAL_TSF	= -1048,
+	WL_PROXD_E_SCANFAIL			= -1047,
+	WL_PROXD_E_NOTSF			= -1046,
+	WL_PROXD_E_POLICY			= -1045,
+	WL_PROXD_E_INCOMPLETE		= -1044,
+	WL_PROXD_E_OVERRIDDEN		= -1043,
+	WL_PROXD_E_ASAP_FAILED		= -1042,
+	WL_PROXD_E_NOTSTARTED		= -1041,
+	WL_PROXD_E_INVALIDMEAS		= -1040,
+	WL_PROXD_E_INCAPABLE		= -1039,
+	WL_PROXD_E_MISMATCH			= -1038,
+	WL_PROXD_E_DUP_SESSION		= -1037,
+	WL_PROXD_E_REMOTE_FAIL		= -1036,
+	WL_PROXD_E_REMOTE_INCAPABLE = -1035,
+	WL_PROXD_E_SCHED_FAIL		= -1034,
+	WL_PROXD_E_PROTO			= -1033,
+	WL_PROXD_E_EXPIRED			= -1032,
+	WL_PROXD_E_TIMEOUT			= -1031,
+	WL_PROXD_E_NOACK			= -1030,
+	WL_PROXD_E_DEFERRED			= -1029,
+	WL_PROXD_E_INVALID_SID		= -1028,
+	WL_PROXD_E_REMOTE_CANCEL 	= -1027,
+	WL_PROXD_E_CANCELED			= -1026,	/**< local */
+	WL_PROXD_E_INVALID_SESSION	= -1025,
+	WL_PROXD_E_BAD_STATE		= -1024,
+	WL_PROXD_E_ERROR			= -1,
+	WL_PROXD_E_OK				= 0
+};
+typedef int32 wl_proxd_status_t;
+
+/* proxd errors from phy */
+#define PROXD_TOF_INIT_ERR_BITS 16
+
+enum {
+	WL_PROXD_PHY_ERR_LB_CORR_THRESH    = (1 << 0), /* Loopback Correlation threshold */
+	WL_PROXD_PHY_ERR_RX_CORR_THRESH    = (1 << 1), /* Received Correlation threshold */
+	WL_PROXD_PHY_ERR_LB_PEAK_POWER     = (1 << 2), /* Loopback Peak power   */
+	WL_PROXD_PHY_ERR_RX_PEAK_POWER     = (1 << 3), /* Received Peak power   */
+	WL_PROXD_PHY_ERR_BITFLIP           = (1 << 4), /* Bitflips */
+	WL_PROXD_PHY_ERR_SNR               = (1 << 5), /* SNR */
+	WL_PROXD_PHY_RX_STRT_WIN_OFF       = (1 << 6), /* Receive start window is off */
+	WL_PROXD_PHY_RX_END_WIN_OFF        = (1 << 7), /* Receive End window is off */
+	WL_PROXD_PHY_ERR_LOW_CONFIDENCE    = (1 << 15), /* Low confidence on meas distance */
+};
+typedef uint32 wl_proxd_phy_error_t;
+
+/** session states */
+enum {
+	WL_PROXD_SESSION_STATE_NONE				= 0,
+	WL_PROXD_SESSION_STATE_CREATED			= 1,
+	WL_PROXD_SESSION_STATE_CONFIGURED		= 2,
+	WL_PROXD_SESSION_STATE_STARTED			= 3,
+	WL_PROXD_SESSION_STATE_DELAY			= 4,
+	WL_PROXD_SESSION_STATE_USER_WAIT		= 5,
+	WL_PROXD_SESSION_STATE_SCHED_WAIT		= 6,
+	WL_PROXD_SESSION_STATE_BURST			= 7,
+	WL_PROXD_SESSION_STATE_STOPPING			= 8,
+	WL_PROXD_SESSION_STATE_ENDED			= 9,
+	WL_PROXD_SESSION_STATE_START_WAIT		= 10,
+	WL_PROXD_SESSION_STATE_DESTROYING		= -1
+};
+typedef int16 wl_proxd_session_state_t;
+
+/** RTT sample flags */
+enum {
+	WL_PROXD_RTT_SAMPLE_NONE = 0x00,
+	WL_PROXD_RTT_SAMPLE_DISCARD	= 0x01
+};
+typedef uint8 wl_proxd_rtt_sample_flags_t;
+typedef int16 wl_proxd_rssi_t;
+typedef uint16 wl_proxd_snr_t;
+typedef uint16 wl_proxd_bitflips_t;
+
+typedef struct wl_proxd_rtt_sample {
+	uint8				id;			/**< id for the sample - non-zero */
+	wl_proxd_rtt_sample_flags_t	flags;
+	wl_proxd_rssi_t			rssi;
+	wl_proxd_intvl_t			rtt;		/**< round trip time */
+	uint32				ratespec;
+	wl_proxd_snr_t                  snr;
+	wl_proxd_bitflips_t             bitflips;
+	wl_proxd_status_t               status;
+	int32                           distance;
+	wl_proxd_phy_error_t		tof_phy_error;
+	wl_proxd_phy_error_t		tof_tgt_phy_error; /* target phy error bit map */
+	wl_proxd_snr_t                  tof_tgt_snr;
+	wl_proxd_bitflips_t             tof_tgt_bitflips;
+	uint8                           coreid;
+	uint8                           pad[3];
+} wl_proxd_rtt_sample_t;
+
+/** result flags */
+enum {
+	WL_PRXOD_RESULT_FLAG_NONE	= 0x0000,
+	WL_PROXD_RESULT_FLAG_NLOS	= 0x0001,	/**< LOS - if available */
+	WL_PROXD_RESULT_FLAG_LOS	= 0x0002,	/**< NLOS - if available */
+	WL_PROXD_RESULT_FLAG_FATAL	= 0x0004,	/**< Fatal error during burst */
+	WL_PROXD_RESULT_FLAG_VHTACK	= 0x0008,	/* VHTACK or Legacy ACK used */
+	WL_PROXD_REQUEST_SENT		= 0x0010,	/* FTM request was sent */
+	WL_PROXD_REQUEST_ACKED		= 0x0020,	/* FTM request was acked */
+	WL_PROXD_LTFSEQ_STARTED		= 0x0040,	/* LTF sequence started */
+	WL_PROXD_RESULT_FLAG_ALL 	= 0xffff
+};
+typedef int16 wl_proxd_result_flags_t;
+
+/** rtt measurement result */
+typedef struct wl_proxd_rtt_result {
+	wl_proxd_session_id_t		sid;
+	wl_proxd_result_flags_t		flags;
+	wl_proxd_status_t		status;
+	struct ether_addr		peer;
+	wl_proxd_session_state_t	state;		/**< current state */
+	union {
+		wl_proxd_intvl_t		retry_after;	/* hint for errors */
+		wl_proxd_intvl_t		burst_duration; /* burst duration */
+	} u;
+	wl_proxd_rtt_sample_t		avg_rtt;
+	uint32				avg_dist;	/* 1/256m units */
+	uint16				sd_rtt;		/* RTT standard deviation */
+	uint8				num_valid_rtt;	/* valid rtt cnt */
+	uint8				num_ftm;	/* actual num of ftm cnt (Configured) */
+	uint16				burst_num;	/* in a session */
+	uint16				num_rtt;	/* 0 if no detail */
+	uint16				num_meas;	/* number of ftm frames seen OTA */
+	uint8                           pad[2];
+	wl_proxd_rtt_sample_t		rtt[1];		/* variable */
+} wl_proxd_rtt_result_t;
+
+/** aoa measurement result */
+typedef struct wl_proxd_aoa_result {
+	wl_proxd_session_id_t			sid;
+	wl_proxd_result_flags_t			flags;
+	wl_proxd_status_t				status;
+	struct ether_addr				peer;
+	wl_proxd_session_state_t		state;
+	uint16							burst_num;
+	uint8							pad[2];
+	/* wl_proxd_aoa_sample_t sample_avg; TBD */
+} BWL_POST_PACKED_STRUCT wl_proxd_aoa_result_t;
+#include <packed_section_end.h>
+
+/** global stats */
+typedef struct wl_proxd_counters {
+	uint32 tx;					/**< tx frame count */
+	uint32 rx;					/**< rx frame count */
+	uint32 burst;				/**< total number of burst */
+	uint32 sessions;			/**< total number of sessions */
+	uint32 max_sessions;		/**< max concurrency */
+	uint32 sched_fail;			/**< scheduling failures */
+	uint32 timeouts;			/**< timeouts */
+	uint32 protoerr;			/**< protocol errors */
+	uint32 noack;				/**< tx w/o ack */
+	uint32 txfail;				/**< any tx falure */
+	uint32 lci_req_tx;			/**< tx LCI requests */
+	uint32 lci_req_rx;			/**< rx LCI requests */
+	uint32 lci_rep_tx;			/**< tx LCI reports */
+	uint32 lci_rep_rx;			/**< rx LCI reports */
+	uint32 civic_req_tx;		/**< tx civic requests */
+	uint32 civic_req_rx;		/**< rx civic requests */
+	uint32 civic_rep_tx;		/**< tx civic reports */
+	uint32 civic_rep_rx;		/**< rx civic reports */
+	uint32 rctx;				/**< ranging contexts created */
+	uint32 rctx_done;			/**< count of ranging done */
+	uint32 publish_err;     /**< availability publishing errors */
+	uint32 on_chan;         /**< count of scheduler onchan */
+	uint32 off_chan;        /**< count of scheduler offchan */
+	uint32 tsf_lo;          /* local tsf or session tsf */
+	uint32 tsf_hi;
+	uint32 num_meas;
+} wl_proxd_counters_t;
+
+typedef struct wl_proxd_counters wl_proxd_session_counters_t;
+
+enum {
+	WL_PROXD_CAP_NONE 		= 0x0000,
+	WL_PROXD_CAP_ALL 		= 0xffff
+};
+typedef int16 wl_proxd_caps_t;
+
+/** method capabilities */
+enum {
+	WL_PROXD_FTM_CAP_NONE = 0x0000,
+	WL_PROXD_FTM_CAP_FTM1 = 0x0001
+};
+typedef uint16 wl_proxd_ftm_caps_t;
+
+typedef struct wl_proxd_tlv_id_list {
+	uint16			num_ids;
+	uint16			ids[1];
+} wl_proxd_tlv_id_list_t;
+
+typedef struct wl_proxd_session_id_list {
+	uint16 num_ids;
+	wl_proxd_session_id_t ids[1];
+} wl_proxd_session_id_list_t;
+
+typedef struct wl_proxd_tpk {
+	struct ether_addr	peer;
+	uint8 tpk[TPK_FTM_LEN];
+} wl_proxd_tpk_t;
+
+/* tlvs returned for get_info on ftm method
+ *	configuration:
+ *	proxd flags
+ *	event mask
+ *	debug mask
+ *	session defaults (session tlvs)
+ * status tlv - not supported for ftm method
+ * info tlv
+ */
+typedef struct wl_proxd_ftm_info {
+	wl_proxd_ftm_caps_t caps;
+	uint16 max_sessions;
+	uint16 num_sessions;
+	uint16 rx_max_burst;
+} wl_proxd_ftm_info_t;
+
+enum {
+	WL_PROXD_WAIT_NONE  = 0x0000,
+	WL_PROXD_WAIT_KEY	= 0x0001,
+	WL_PROXD_WAIT_SCHED	= 0x0002,
+	WL_PROXD_WAIT_TSF	= 0x0004
+};
+typedef int16 wl_proxd_wait_reason_t;
+
+/* tlvs returned for get_info on session
+ * session config (tlvs)
+ * session info tlv
+ */
+typedef struct wl_proxd_ftm_session_info {
+	uint16 sid;
+	uint8 bss_index;
+	uint8 pad;
+	struct ether_addr bssid;
+	wl_proxd_session_state_t state;
+	wl_proxd_status_t status;
+	uint16	burst_num;
+	wl_proxd_wait_reason_t wait_reason;
+	uint32	meas_start_lo; /* sn tsf of 1st meas for cur/prev burst */
+	uint32	meas_start_hi;
+} wl_proxd_ftm_session_info_t;
+
+typedef struct wl_proxd_ftm_session_status {
+	uint16 sid;
+	wl_proxd_session_state_t state;
+	wl_proxd_status_t status;
+	uint16	burst_num;
+	uint16	pad;
+} wl_proxd_ftm_session_status_t;
+
+/** rrm range request */
+typedef struct wl_proxd_range_req {
+	uint16 			num_repeat;
+	uint16			init_delay_range;	/**< in TUs */
+	uint8			pad;
+	uint8			num_nbr;		/**< number of (possible) neighbors */
+	nbr_element_t		nbr[1];
+} wl_proxd_range_req_t;
+
+#define WL_PROXD_LCI_LAT_OFF 	0
+#define WL_PROXD_LCI_LONG_OFF 	5
+#define WL_PROXD_LCI_ALT_OFF 	10
+
+#define WL_PROXD_LCI_GET_LAT(_lci, _lat, _lat_err) { \
+	unsigned _off = WL_PROXD_LCI_LAT_OFF; \
+	_lat_err = (_lci)->data[(_off)] & 0x3f; \
+	_lat = (_lci)->data[(_off)+1]; \
+	_lat |= (_lci)->data[(_off)+2] << 8; \
+	_lat |= (_lci)->data[_(_off)+3] << 16; \
+	_lat |= (_lci)->data[(_off)+4] << 24; \
+	_lat <<= 2; \
+	_lat |= (_lci)->data[(_off)] >> 6; \
+}
+
+#define WL_PROXD_LCI_GET_LONG(_lci, _lcilong, _long_err) { \
+	unsigned _off = WL_PROXD_LCI_LONG_OFF; \
+	_long_err = (_lci)->data[(_off)] & 0x3f; \
+	_lcilong = (_lci)->data[(_off)+1]; \
+	_lcilong |= (_lci)->data[(_off)+2] << 8; \
+	_lcilong |= (_lci)->data[_(_off)+3] << 16; \
+	_lcilong |= (_lci)->data[(_off)+4] << 24; \
+	__lcilong <<= 2; \
+	_lcilong |= (_lci)->data[(_off)] >> 6; \
+}
+
+#define WL_PROXD_LCI_GET_ALT(_lci, _alt_type, _alt, _alt_err) { \
+	unsigned _off = WL_PROXD_LCI_ALT_OFF; \
+	_alt_type = (_lci)->data[_off] & 0x0f; \
+	_alt_err = (_lci)->data[(_off)] >> 4; \
+	_alt_err |= ((_lci)->data[(_off)+1] & 0x03) << 4; \
+	_alt = (_lci)->data[(_off)+2]; \
+	_alt |= (_lci)->data[(_off)+3] << 8; \
+	_alt |= (_lci)->data[_(_off)+4] << 16; \
+	_alt <<= 6; \
+	_alt |= (_lci)->data[(_off) + 1] >> 2; \
+}
+
+#define WL_PROXD_LCI_VERSION(_lci) ((_lci)->data[15] >> 6)
+
+/* availability. advertising mechanism bss specific */
+/** availablity flags */
+enum {
+	WL_PROXD_AVAIL_NONE = 0,
+	WL_PROXD_AVAIL_NAN_PUBLISHED = 0x0001,
+	WL_PROXD_AVAIL_SCHEDULED = 0x0002        /**< scheduled by proxd */
+};
+typedef int16 wl_proxd_avail_flags_t;
+
+/** time reference */
+enum {
+	WL_PROXD_TREF_NONE = 0,
+	WL_PROXD_TREF_DEV_TSF = 1,
+	WL_PROXD_TREF_NAN_DW = 2,
+	WL_PROXD_TREF_TBTT = 3,
+	WL_PROXD_TREF_MAX		/* last entry */
+};
+typedef int16 wl_proxd_time_ref_t;
+
+/** proxd channel-time slot */
+typedef struct {
+	wl_proxd_intvl_t start;         /**< from ref */
+	wl_proxd_intvl_t duration;      /**< from start */
+	uint32  chanspec;
+} wl_proxd_time_slot_t;
+
+typedef struct wl_proxd_avail24 {
+	wl_proxd_avail_flags_t flags; /**< for query only */
+	wl_proxd_time_ref_t time_ref;
+	uint16	max_slots; /**< for query only */
+	uint16  num_slots;
+	wl_proxd_time_slot_t slots[1];	/**< ROM compat - not used */
+	wl_proxd_intvl_t 	repeat;
+	wl_proxd_time_slot_t ts0[1];
+} wl_proxd_avail24_t;
+#define WL_PROXD_AVAIL24_TIMESLOT(_avail24, _i) (&(_avail24)->ts0[(_i)])
+#define WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) OFFSETOF(wl_proxd_avail24_t, ts0)
+#define WL_PROXD_AVAIL24_TIMESLOTS(_avail24) WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0)
+#define WL_PROXD_AVAIL24_SIZE(_avail24, _num_slots) (\
+	WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) + \
+	(_num_slots) * sizeof(*WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0)))
+
+typedef struct wl_proxd_avail {
+	wl_proxd_avail_flags_t flags; /**< for query only */
+	wl_proxd_time_ref_t time_ref;
+	uint16	max_slots; /**< for query only */
+	uint16  num_slots;
+	wl_proxd_intvl_t 	repeat;
+	wl_proxd_time_slot_t slots[1];
+} wl_proxd_avail_t;
+#define WL_PROXD_AVAIL_TIMESLOT(_avail, _i) (&(_avail)->slots[(_i)])
+#define WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) OFFSETOF(wl_proxd_avail_t, slots)
+
+#define WL_PROXD_AVAIL_TIMESLOTS(_avail) WL_PROXD_AVAIL_TIMESLOT(_avail, 0)
+#define WL_PROXD_AVAIL_SIZE(_avail, _num_slots) (\
+	WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) + \
+	(_num_slots) * sizeof(*WL_PROXD_AVAIL_TIMESLOT(_avail, 0)))
+
+/* collect support TBD */
+
+/** debugging */
+enum {
+	WL_PROXD_DEBUG_NONE		= 0x00000000,
+	WL_PROXD_DEBUG_LOG		= 0x00000001,
+	WL_PROXD_DEBUG_IOV		= 0x00000002,
+	WL_PROXD_DEBUG_EVENT		= 0x00000004,
+	WL_PROXD_DEBUG_SESSION		= 0x00000008,
+	WL_PROXD_DEBUG_PROTO		= 0x00000010,
+	WL_PROXD_DEBUG_SCHED		= 0x00000020,
+	WL_PROXD_DEBUG_RANGING		= 0x00000040,
+	WL_PROXD_DEBUG_NAN		= 0x00000080,
+	WL_PROXD_DEBUG_PKT		= 0x00000100,
+	WL_PROXD_DEBUG_SEC		= 0x00000200,
+	WL_PROXD_DEBUG_EVENTLOG		= 0x80000000,	/* map/enable EVNET_LOG_TAG_PROXD_INFO */
+	WL_PROXD_DEBUG_ALL		= 0xffffffff
+};
+typedef uint32 wl_proxd_debug_mask_t;
+
+/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */
+enum {
+	WL_PROXD_TLV_ID_NONE			= 0,
+	WL_PROXD_TLV_ID_METHOD			= 1,
+	WL_PROXD_TLV_ID_FLAGS			= 2,
+	WL_PROXD_TLV_ID_CHANSPEC		= 3,	/**< note: uint32 */
+	WL_PROXD_TLV_ID_TX_POWER		= 4,
+	WL_PROXD_TLV_ID_RATESPEC		= 5,
+	WL_PROXD_TLV_ID_BURST_DURATION		= 6,	/**< intvl - length of burst */
+	WL_PROXD_TLV_ID_BURST_PERIOD		= 7,	/**< intvl - between bursts */
+	WL_PROXD_TLV_ID_BURST_FTM_SEP		= 8,	/**< intvl - between FTMs */
+	WL_PROXD_TLV_ID_BURST_NUM_FTM		= 9,	/**< uint16 - per burst */
+	WL_PROXD_TLV_ID_NUM_BURST		= 10,	/**< uint16 */
+	WL_PROXD_TLV_ID_FTM_RETRIES		= 11,	/**< uint16 at FTM level */
+	WL_PROXD_TLV_ID_BSS_INDEX		= 12,	/**< uint8 */
+	WL_PROXD_TLV_ID_BSSID			= 13,
+	WL_PROXD_TLV_ID_INIT_DELAY		= 14,	/**< intvl - optional,non-standalone only */
+	WL_PROXD_TLV_ID_BURST_TIMEOUT		= 15,	/**< expect response within - intvl */
+	WL_PROXD_TLV_ID_EVENT_MASK		= 16,	/**< interested events - in/out */
+	WL_PROXD_TLV_ID_FLAGS_MASK		= 17,	/**< interested flags - in only */
+	WL_PROXD_TLV_ID_PEER_MAC		= 18,	/**< mac address of peer */
+	WL_PROXD_TLV_ID_FTM_REQ			= 19,	/**< dot11_ftm_req */
+	WL_PROXD_TLV_ID_LCI_REQ			= 20,
+	WL_PROXD_TLV_ID_LCI			= 21,
+	WL_PROXD_TLV_ID_CIVIC_REQ		= 22,
+	WL_PROXD_TLV_ID_CIVIC			= 23,
+	WL_PROXD_TLV_ID_AVAIL24			= 24,	/**< ROM compatibility */
+	WL_PROXD_TLV_ID_SESSION_FLAGS		= 25,
+	WL_PROXD_TLV_ID_SESSION_FLAGS_MASK	= 26,	/**< in only */
+	WL_PROXD_TLV_ID_RX_MAX_BURST		= 27,	/**< uint16 - limit bursts per session */
+	WL_PROXD_TLV_ID_RANGING_INFO		= 28,	/**< ranging info */
+	WL_PROXD_TLV_ID_RANGING_FLAGS		= 29,	/**< uint16 */
+	WL_PROXD_TLV_ID_RANGING_FLAGS_MASK	= 30,	/**< uint16, in only */
+	WL_PROXD_TLV_ID_NAN_MAP_ID		= 31,
+	WL_PROXD_TLV_ID_DEV_ADDR		= 32,
+	WL_PROXD_TLV_ID_AVAIL			= 33,	/**< wl_proxd_avail_t  */
+	WL_PROXD_TLV_ID_TLV_ID			= 34,	/* uint16 tlv-id */
+	WL_PROXD_TLV_ID_FTM_REQ_RETRIES		= 35,	/* uint16 FTM request retries */
+	WL_PROXD_TLV_ID_TPK			= 36,	/* 32byte TPK  */
+	WL_PROXD_TLV_ID_RI_RR			= 36,	/* RI_RR */
+	WL_PROXD_TLV_ID_TUNE			= 37,	/* wl_proxd_pararms_tof_tune_t */
+
+	/* output - 512 + x */
+	WL_PROXD_TLV_ID_STATUS			= 512,
+	WL_PROXD_TLV_ID_COUNTERS		= 513,
+	WL_PROXD_TLV_ID_INFO			= 514,
+	WL_PROXD_TLV_ID_RTT_RESULT		= 515,
+	WL_PROXD_TLV_ID_AOA_RESULT		= 516,
+	WL_PROXD_TLV_ID_SESSION_INFO		= 517,
+	WL_PROXD_TLV_ID_SESSION_STATUS		= 518,
+	WL_PROXD_TLV_ID_SESSION_ID_LIST		= 519,
+
+	/* debug tlvs can be added starting 1024 */
+	WL_PROXD_TLV_ID_DEBUG_MASK		= 1024,
+	WL_PROXD_TLV_ID_COLLECT			= 1025,	/**< output only */
+	WL_PROXD_TLV_ID_STRBUF			= 1026,
+
+	WL_PROXD_TLV_ID_COLLECT_HEADER		= 1025,	/* wl_proxd_collect_header_t */
+	WL_PROXD_TLV_ID_COLLECT_INFO		= 1028,	/* wl_proxd_collect_info_t */
+	WL_PROXD_TLV_ID_COLLECT_DATA		= 1029,	/* wl_proxd_collect_data_t */
+	WL_PROXD_TLV_ID_COLLECT_CHAN_DATA	= 1030,	/* wl_proxd_collect_data_t */
+
+	WL_PROXD_TLV_ID_MAX
+};
+
+typedef struct wl_proxd_tlv {
+	uint16 id;
+	uint16 len;
+	uint8  data[1];
+} wl_proxd_tlv_t;
+
+/** proxd iovar - applies to proxd, method or session */
+typedef struct wl_proxd_iov {
+	uint16                  version;
+	uint16                  len;
+	wl_proxd_cmd_t          cmd;
+	wl_proxd_method_t       method;
+	wl_proxd_session_id_t   sid;
+	uint8                   PAD[2];
+	wl_proxd_tlv_t          tlvs[1];	/**< variable */
+} wl_proxd_iov_t;
+
+#define WL_PROXD_IOV_HDR_SIZE OFFSETOF(wl_proxd_iov_t, tlvs)
+
+/* The following event definitions may move to bcmevent.h, but sharing proxd types
+ * across needs more invasive changes unrelated to proxd
+ */
+enum {
+	WL_PROXD_EVENT_NONE			= 0,	/**< not an event, reserved */
+	WL_PROXD_EVENT_SESSION_CREATE		= 1,
+	WL_PROXD_EVENT_SESSION_START		= 2,
+	WL_PROXD_EVENT_FTM_REQ			= 3,
+	WL_PROXD_EVENT_BURST_START		= 4,
+	WL_PROXD_EVENT_BURST_END		= 5,
+	WL_PROXD_EVENT_SESSION_END		= 6,
+	WL_PROXD_EVENT_SESSION_RESTART		= 7,
+	WL_PROXD_EVENT_BURST_RESCHED		= 8,	/**< burst rescheduled-e.g. partial TSF */
+	WL_PROXD_EVENT_SESSION_DESTROY		= 9,
+	WL_PROXD_EVENT_RANGE_REQ		= 10,
+	WL_PROXD_EVENT_FTM_FRAME		= 11,
+	WL_PROXD_EVENT_DELAY			= 12,
+	WL_PROXD_EVENT_VS_INITIATOR_RPT		= 13,	/**< (target) rx initiator-report */
+	WL_PROXD_EVENT_RANGING			= 14,
+	WL_PROXD_EVENT_LCI_MEAS_REP		= 15,	/* LCI measurement report */
+	WL_PROXD_EVENT_CIVIC_MEAS_REP		= 16,	/* civic measurement report */
+	WL_PROXD_EVENT_COLLECT			= 17,
+	WL_PROXD_EVENT_START_WAIT		= 18,	/* waiting to start */
+
+	WL_PROXD_EVENT_MAX
+};
+typedef int16 wl_proxd_event_type_t;
+
+/** proxd event mask - upto 32 events for now */
+typedef uint32 wl_proxd_event_mask_t;
+
+#define WL_PROXD_EVENT_MASK_ALL 0xfffffffe
+#define WL_PROXD_EVENT_MASK_EVENT(_event_type) (1 << (_event_type))
+#define WL_PROXD_EVENT_ENABLED(_mask, _event_type) (\
+	((_mask) & WL_PROXD_EVENT_MASK_EVENT(_event_type)) != 0)
+
+/** proxd event - applies to proxd, method or session */
+typedef struct wl_proxd_event {
+	uint16					version;
+	uint16					len;
+	wl_proxd_event_type_t 	type;
+	wl_proxd_method_t 		method;
+	wl_proxd_session_id_t 	sid;
+	uint8					pad[2];
+	wl_proxd_tlv_t 			tlvs[1];	/**< variable */
+} wl_proxd_event_t;
+
+enum {
+	WL_PROXD_RANGING_STATE_NONE = 0,
+	WL_PROXD_RANGING_STATE_NOTSTARTED = 1,
+	WL_PROXD_RANGING_STATE_INPROGRESS = 2,
+	WL_PROXD_RANGING_STATE_DONE = 3
+};
+typedef int16 wl_proxd_ranging_state_t;
+
+/** proxd ranging flags */
+enum {
+	WL_PROXD_RANGING_FLAG_NONE = 0x0000,  /**< no flags */
+	WL_PROXD_RANGING_FLAG_DEL_SESSIONS_ON_STOP = 0x0001,
+	WL_PROXD_RANGING_FLAG_ALL = 0xffff
+};
+typedef uint16 wl_proxd_ranging_flags_t;
+
+struct wl_proxd_ranging_info {
+	wl_proxd_status_t   status;
+	wl_proxd_ranging_state_t state;
+	wl_proxd_ranging_flags_t flags;
+	uint16	num_sids;
+	uint16	num_done;
+};
+typedef struct wl_proxd_ranging_info wl_proxd_ranging_info_t;
+
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_event_data {
+	uint32                  H_LB[K_TOF_COLLECT_H_SIZE_20MHZ];
+	uint32                  H_RX[K_TOF_COLLECT_H_SIZE_20MHZ];
+	uint8                   ri_rr[FTM_TPK_LEN];
+	wl_proxd_phy_error_t    phy_err_mask;
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_event_data_t;
+#include <packed_section_end.h>
+
+/** Data returned by the bssload_report iovar. This is also the WLC_E_BSS_LOAD event data */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct wl_bssload {
+	uint16 sta_count;		/**< station count */
+	uint16 aac;			/**< available admission capacity */
+	uint8 chan_util;		/**< channel utilization */
+} BWL_POST_PACKED_STRUCT wl_bssload_t;
+#include <packed_section_end.h>
+
+/**
+ * Maximum number of configurable BSS Load levels.  The number of BSS Load
+ * ranges is always 1 more than the number of configured levels.  eg. if
+ * 3 levels of 10, 20, 30 are configured then this defines 4 load ranges:
+ * 0-10, 11-20, 21-30, 31-255.  A WLC_E_BSS_LOAD event is generated each time
+ * the utilization level crosses into another range, subject to the rate limit.
+ */
+#define MAX_BSSLOAD_LEVELS 8
+#define MAX_BSSLOAD_RANGES (MAX_BSSLOAD_LEVELS + 1)
+
+/** BSS Load event notification configuration. */
+typedef struct wl_bssload_cfg {
+	uint32 rate_limit_msec;	/**< # of events posted to application will be limited to
+				 * one per specified period (0 to disable rate limit).
+				 */
+	uint8 num_util_levels;	/**< Number of entries in util_levels[] below */
+	uint8 util_levels[MAX_BSSLOAD_LEVELS];
+				/**< Variable number of BSS Load utilization levels in
+				 * low to high order.  An event will be posted each time
+				 * a received beacon's BSS Load IE channel utilization
+				 * value crosses a level.
+				 */
+	uint8 PAD[3];
+} wl_bssload_cfg_t;
+
+/** Multiple roaming profile suport */
+#define WL_MAX_ROAM_PROF_BRACKETS	4
+
+#define WL_ROAM_PROF_VER_0	0
+#define WL_ROAM_PROF_VER_1	1
+#define WL_MAX_ROAM_PROF_VER	WL_ROAM_PROF_VER_1
+
+#define WL_ROAM_PROF_NONE	(0 << 0)
+#define WL_ROAM_PROF_LAZY	(1 << 0)
+#define WL_ROAM_PROF_NO_CI	(1 << 1)
+#define WL_ROAM_PROF_SUSPEND	(1 << 2)
+#define WL_ROAM_PROF_SYNC_DTIM	(1 << 6)
+#define WL_ROAM_PROF_DEFAULT	(1 << 7)	/**< backward compatible single default profile */
+
+#define WL_FACTOR_TABLE_MAX_LIMIT 5
+
+#define WL_CU_2G_ROAM_TRIGGER (-60)
+#define WL_CU_5G_ROAM_TRIGGER (-70)
+
+#define WL_CU_SCORE_DELTA_DEFAULT 20
+
+#define WL_MAX_CHANNEL_USAGE 0x0FF
+#define WL_CU_PERCENTAGE_DISABLE 0
+#define WL_CU_PERCENTAGE_DEFAULT 70
+#define WL_CU_PERCENTAGE_MAX 100
+#define WL_CU_CALC_DURATION_DEFAULT 10 /* seconds */
+#define WL_CU_CALC_DURATION_MAX 60 /* seconds */
+
+typedef struct wl_roam_prof_v2 {
+	int8	roam_flags;		/**< bit flags */
+	int8	roam_trigger;		/**< RSSI trigger level per profile/RSSI bracket */
+	int8	rssi_lower;
+	int8	roam_delta;
+
+	/* if channel_usage if zero, roam_delta is rssi delta required for new AP */
+	/* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */
+	int8	rssi_boost_thresh;	/**< Min RSSI to qualify for RSSI boost */
+	int8	rssi_boost_delta;	/**< RSSI boost for AP in the other band */
+	uint16	nfscan;			/**< number of full scan to start with */
+	uint16	fullscan_period;
+	uint16	init_scan_period;
+	uint16	backoff_multiplier;
+	uint16	max_scan_period;
+	uint8	channel_usage;
+	uint8	cu_avg_calc_dur;
+	uint8	pad[2];
+} wl_roam_prof_v2_t;
+
+typedef struct wl_roam_prof_v1 {
+	int8	roam_flags;		/**< bit flags */
+	int8	roam_trigger;		/**< RSSI trigger level per profile/RSSI bracket */
+	int8	rssi_lower;
+	int8	roam_delta;
+
+	/* if channel_usage if zero, roam_delta is rssi delta required for new AP */
+	/* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */
+	int8	rssi_boost_thresh;	/**< Min RSSI to qualify for RSSI boost */
+	int8	rssi_boost_delta;	/**< RSSI boost for AP in the other band */
+	uint16	nfscan;			/**< number of full scan to start with */
+	uint16	fullscan_period;
+	uint16	init_scan_period;
+	uint16	backoff_multiplier;
+	uint16	max_scan_period;
+} wl_roam_prof_v1_t;
+
+typedef struct wl_roam_prof_band_v2 {
+	uint32	band;			/**< Must be just one band */
+	uint16	ver;			/**< version of this struct */
+	uint16	len;			/**< length in bytes of this structure */
+	wl_roam_prof_v2_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_v2_t;
+
+typedef struct wl_roam_prof_band_v1 {
+	uint32	band;			/**< Must be just one band */
+	uint16	ver;			/**< version of this struct */
+	uint16	len;			/**< length in bytes of this structure */
+	wl_roam_prof_v1_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_v1_t;
+
+#define BSS_MAXTABLE_SIZE 10
+#define WNM_BSS_SELECT_FACTOR_VERSION   1
+typedef struct wnm_bss_select_factor_params {
+	uint8 low;
+	uint8 high;
+	uint8 factor;
+	uint8 pad;
+} wnm_bss_select_factor_params_t;
+
+#define WNM_BSS_SELECT_FIXED_SIZE OFFSETOF(wnm_bss_select_factor_cfg_t, params)
+typedef struct wnm_bss_select_factor_cfg {
+	uint8 version;
+	uint8 band;
+	uint16 type;
+	uint16 pad;
+	uint16 count;
+	wnm_bss_select_factor_params_t params[1];
+} wnm_bss_select_factor_cfg_t;
+
+#define WNM_BSS_SELECT_WEIGHT_VERSION   1
+typedef struct wnm_bss_select_weight_cfg {
+	uint8 version;
+	uint8 band;
+	uint16 type;
+	uint16 weight; /* weightage for each type between 0 to 100 */
+} wnm_bss_select_weight_cfg_t;
+
+#define WNM_BSS_SELECT_TYPE_RSSI   0
+#define WNM_BSS_SELECT_TYPE_CU   1
+
+#define WNM_BSSLOAD_MONITOR_VERSION   1
+typedef struct wnm_bssload_monitor_cfg {
+	uint8 version;
+	uint8 band;
+	uint8 duration; /* duration between 1 to 20sec */
+} wnm_bssload_monitor_cfg_t;
+
+#define WNM_ROAM_TRIGGER_VERSION   1
+typedef struct wnm_roam_trigger_cfg {
+	uint8 version;
+	uint8 band;
+	uint16 type;
+	int16 trigger; /* trigger for each type in new roam algorithm */
+} wnm_roam_trigger_cfg_t;
+
+/* Data structures for Interface Create/Remove  */
+
+#define WL_INTERFACE_CREATE_VER	(0)
+#define WL_INTERFACE_CREATE_VER_1	1
+#define WL_INTERFACE_CREATE_VER_2	2
+#define WL_INTERFACE_CREATE_VER_3	3
+
+/*
+ * The flags filed of the wl_interface_create is designed to be
+ * a Bit Mask. As of now only Bit 0 and Bit 1 are used as mentioned below.
+ * The rest of the bits can be used, incase we have to provide
+ * more information to the dongle
+ */
+
+/*
+ * Bit 0 of flags field is used to inform whether the interface requested to
+ * be created is STA or AP.
+ * 0 - Create a STA interface
+ * 1 - Create an AP interface
+ * NOTE: This Bit 0 is applicable for the WL_INTERFACE_CREATE_VER < 2
+ */
+#define WL_INTERFACE_CREATE_STA	(0 << 0)
+#define WL_INTERFACE_CREATE_AP	(1 << 0)
+
+/*
+ * From revision >= 2 Bit 0 of flags field will not used be for STA or AP interface creation.
+ * "iftype" field shall be used for identifying the interface type.
+ */
+typedef enum wl_interface_type {
+	WL_INTERFACE_TYPE_STA = 0,
+	WL_INTERFACE_TYPE_AP = 1,
+	WL_INTERFACE_TYPE_AWDL = 2,
+	WL_INTERFACE_TYPE_NAN = 3,
+	WL_INTERFACE_TYPE_MAX
+} wl_interface_type_t;
+
+/*
+ * Bit 1 of flags field is used to inform whether MAC is present in the
+ * data structure or not.
+ * 0 - Ignore mac_addr field
+ * 1 - Use the mac_addr field
+ */
+#define WL_INTERFACE_MAC_DONT_USE	(0 << 1)
+#define WL_INTERFACE_MAC_USE		(1 << 1)
+
+/*
+ * Bit 2 of flags field is used to inform whether core or wlc index
+ * is present in the data structure or not.
+ * 0 - Ignore wlc_index field
+ * 1 - Use the wlc_index field
+ */
+#define WL_INTERFACE_WLC_INDEX_DONT_USE	(0 << 2)
+#define WL_INTERFACE_WLC_INDEX_USE	(1 << 2)
+
+/*
+ * Bit 3 of flags field is used to create interface on the host requested interface index
+ * 0 - Ignore if_index field
+ * 1 - Use the if_index field
+ */
+#define WL_INTERFACE_IF_INDEX_USE       (1 << 3)
+
+/*
+ * Bit 4 of flags field is used to assign BSSID
+ * 0 - Ignore bssid field
+ * 1 - Use the bssid field
+ */
+#define WL_INTERFACE_BSSID_INDEX_USE	(1 << 4)
+
+#ifdef WLMESH
+typedef struct wl_interface_info {
+    uint16  ver;            /* version of this struct */
+    struct ether_addr    mac_addr;  /* MAC address of the interface */
+    char    ifname[BCM_MSG_IFNAME_MAX]; /* name of interface */
+    uint8   bsscfgidx;      /* source bsscfg index */
+} wl_interface_info_t;
+#endif
+
+typedef struct wl_interface_create {
+	uint16	ver;			/* version of this struct */
+	uint32  flags;			/* flags that defines the operation */
+	struct	ether_addr   mac_addr;	/* Optional Mac address */
+} wl_interface_create_t;
+
+typedef struct wl_interface_create_v1 {
+	uint16  ver;                    /**< version of this struct */
+	uint8   pad1[2];                /**< Padding bytes */
+	uint32  flags;                  /**< flags that defines the operation */
+	struct  ether_addr   mac_addr;  /**< Optional Mac address */
+	uint8   pad2[2];                /**< Padding bytes */
+	uint32  wlc_index;              /**< Optional wlc index */
+} wl_interface_create_v1_t;
+
+typedef struct wl_interface_create_v2 {
+	uint16  ver;                    /**< version of this struct */
+	uint8   pad1[2];                /**< Padding bytes */
+	uint32  flags;                  /**< flags that defines the operation */
+	struct  ether_addr   mac_addr;  /**< Optional Mac address */
+	uint8   iftype;                 /**< Type of interface created */
+	uint8   pad2;                   /**< Padding bytes */
+	uint32  wlc_index;              /**< Optional wlc index */
+} wl_interface_create_v2_t;
+
+typedef struct wl_interface_create_v3 {
+	uint16	ver;			/**< version of this struct */
+	uint16	len;			/**< length of whole structure including variable length */
+	uint16	fixed_len;		/**< Fixed length of this structure excluding data[] */
+	uint8	iftype;			/**< Type of interface created */
+	uint8	wlc_index;		/**< Optional wlc index */
+	uint32  flags;			/**< flags that defines the operation */
+	struct	ether_addr   mac_addr;	/**< Optional Mac address */
+	struct	ether_addr   bssid;	/**< Optional BSSID */
+	uint8	if_index;		/**< interface index requested by Host */
+	uint8	pad[3];			/**< Padding bytes to ensure data[] is at 32 bit aligned */
+	uint8	data[];			/**< Optional application/Module specific data */
+} wl_interface_create_v3_t;
+
+#define WL_INTERFACE_INFO_VER_1		1
+#define WL_INTERFACE_INFO_VER_2		2
+
+typedef struct wl_interface_info_v1 {
+	uint16  ver;                    /**< version of this struct */
+	struct ether_addr    mac_addr;  /**< MAC address of the interface */
+	char    ifname[BCM_MSG_IFNAME_MAX]; /**< name of interface */
+	uint8   bsscfgidx;              /**< source bsscfg index */
+	uint8	PAD;
+} wl_interface_info_v1_t;
+
+typedef struct wl_interface_info_v2 {
+	uint16                  ver;                    /**< version of this struct */
+	uint16                  length;                 /**< length of the whole structure */
+	struct  ether_addr      mac_addr;               /**< MAC address of the interface */
+	uint8                   bsscfgidx;              /**< source bsscfg index */
+	uint8                   if_index;               /**< Interface index allocated by FW */
+	char                    ifname[BCM_MSG_IFNAME_MAX]; /**< name of interface */
+} wl_interface_info_v2_t;
+
+#define PHY_RXIQEST_AVERAGING_DELAY 10
+
+typedef struct wl_iqest_params {
+	uint32 rxiq;
+	uint8 niter;
+	uint8 delay;
+	uint8 PAD[2];
+} wl_iqest_params_t;
+
+typedef struct wl_iqest_sweep_params {
+	wl_iqest_params_t params;
+	uint8 nchannels;
+	uint8 channel[3];	/** variable */
+} wl_iqest_sweep_params_t;
+
+typedef struct wl_iqest_value {
+	uint8 channel;
+	uint8 PAD[3];
+	uint32 rxiq;
+} wl_iqest_value_t;
+
+typedef struct wl_iqest_result {
+	uint8 nvalues;
+	uint8 PAD[3];
+	wl_iqest_value_t value[1];
+} wl_iqest_result_t;
+
+/* BTCX AIBSS (Oxygen) Status */
+typedef struct wlc_btc_aibss_info {
+	uint32	prev_tsf_l;		// Lower 32 bits of last read of TSF
+	uint32	prev_tsf_h;		// Higher 32 bits of last read of TSF
+	uint32	last_btinfo;		// Last read of BT info
+	uint32	local_btinfo;		// Local BT INFO BitMap
+	uint8	bt_out_of_sync_cnt;	// BT not in sync with strobe
+	uint8	esco_off_cnt;		// Count incremented when ESCO is off
+	uint8	strobe_enabled;		// Set only in AIBSS mode
+	uint8	strobe_on;		// strobe to BT is on for Oxygen
+	uint8	local_bt_in_sync;	// Sync status of local BT when strobe is on
+	uint8	other_bt_in_sync;	// Sync state of BT in other devices in AIBSS
+	uint8	local_bt_is_master;	// Local BT is master
+	uint8	sco_prot_on;		// eSCO Protection on in local device
+	uint8	other_esco_present;	// eSCO status in other devices in AIBSS
+	uint8	rx_agg_change;		// Indicates Rx Agg size needs to change
+	uint8	rx_agg_modified;	// Rx Agg size modified
+	uint8	acl_grant_set;		// ACL grants on for speeding up sync
+	uint8	write_ie_err_cnt;	// BTCX Ie write error cnt
+	uint8	parse_ie_err_cnt;	// BTCX IE parse error cnt
+	uint8	wci2_fail_cnt;		// WCI2 init failure cnt
+	uint8	strobe_enable_err_cnt;	// Strobe enable err cnt
+	uint8	strobe_init_err_cnt;	// Strobe init err cnt
+	uint8	tsf_jump_cnt;		// TSF jump cnt
+	uint8	acl_grant_cnt;		// ALC grant cnt
+	uint8	pad1;
+	uint16	ibss_tsf_shm;		// SHM address of strobe TSF
+	uint16	pad2;
+} wlc_btc_aibss_info_t;
+
+#define WLC_BTC_AIBSS_STATUS_VER	1
+#define WLC_BTC_AIBSS_STATUS_LEN	(sizeof(wlc_btc_aibss_status_t) - 2 * (sizeof(uint16)))
+
+typedef struct wlc_btc_aibss_status {
+	uint16	version;		// Version #
+	uint16	len;			// Length of the structure(excluding len & version)
+	int32	mode;			// Current value of btc_mode
+	uint16	bth_period;             // bt coex period. read from shm.
+	uint16	agg_off_bm;		// AGG OFF BM read from SHM
+	uint8	bth_active;             // bt active session
+	uint8	pad[3];
+	wlc_btc_aibss_info_t aibss_info;	// Structure definition above
+} wlc_btc_aibss_status_t;
+
+typedef enum {
+	STATE_NONE = 0,
+
+	/* WLAN -> BT */
+	W2B_DATA_SET = 21,
+	B2W_ACK_SET = 22,
+	W2B_DATA_CLEAR = 23,
+	B2W_ACK_CLEAR = 24,
+
+	/* BT -> WLAN */
+	B2W_DATA_SET = 31,
+	W2B_ACK_SET = 32,
+	B2W_DATA_CLEAR = 33,
+	W2B_ACK_CLEAR = 34
+} bwte_gci_intstate_t;
+
+#define WL_BWTE_STATS_VERSION 1 /* version of bwte_stats_t */
+typedef struct {
+	uint32 version;
+
+	bwte_gci_intstate_t inttobt;
+	bwte_gci_intstate_t intfrombt;
+
+	uint32 bt2wl_intrcnt; /* bt->wlan interrrupt count */
+	uint32 wl2bt_intrcnt; /* wlan->bt interrupt count  */
+
+	uint32 wl2bt_dset_cnt;
+	uint32 wl2bt_dclear_cnt;
+	uint32 wl2bt_aset_cnt;
+	uint32 wl2bt_aclear_cnt;
+
+	uint32 bt2wl_dset_cnt;
+	uint32 bt2wl_dclear_cnt;
+	uint32 bt2wl_aset_cnt;
+	uint32 bt2wl_aclear_cnt;
+
+	uint32 state_error_1;
+	uint32 state_error_2;
+	uint32 state_error_3;
+	uint32 state_error_4;
+} bwte_stats_t;
+
+#define TBOW_MAX_SSID_LEN        32
+#define TBOW_MAX_PASSPHRASE_LEN  63
+
+#define WL_TBOW_SETUPINFO_T_VERSION 1 /* version of tbow_setup_netinfo_t */
+typedef struct tbow_setup_netinfo {
+	uint32 version;
+	uint8 opmode;
+	uint8 pad;
+	uint8 macaddr[ETHER_ADDR_LEN];
+	uint32 ssid_len;
+	uint8 ssid[TBOW_MAX_SSID_LEN];
+	uint8 passphrase_len;
+	uint8 passphrase[TBOW_MAX_PASSPHRASE_LEN];
+	chanspec_t chanspec;
+	uint8 PAD[2];
+	uint32 channel;
+} tbow_setup_netinfo_t;
+
+typedef enum tbow_ho_opmode {
+	TBOW_HO_MODE_START_GO = 0,
+	TBOW_HO_MODE_START_STA,
+	TBOW_HO_MODE_START_GC,
+	TBOW_HO_MODE_TEST_GO,
+	TBOW_HO_MODE_STOP_GO = 0x10,
+	TBOW_HO_MODE_STOP_STA,
+	TBOW_HO_MODE_STOP_GC,
+	TBOW_HO_MODE_TEARDOWN
+} tbow_ho_opmode_t;
+
+/* Beacon trim feature statistics */
+/* configuration */
+#define BCNTRIMST_PER			0	/* Number of beacons to trim (0: disable) */
+#define BCNTRIMST_TIMEND		1	/* Number of bytes till TIM IE */
+#define BCNTRIMST_TSFLMT		2	/* TSF tolerance value (usecs) */
+/* internal use */
+#define BCNTRIMST_CUR			3	/* PSM's local beacon trim counter */
+#define BCNTRIMST_PREVLEN		4	/* Beacon length excluding the TIM IE */
+#define BCNTRIMST_TIMLEN		5	/* TIM IE Length */
+#define BCNTRIMST_RSSI			6	/* Partial beacon RSSI */
+#define BCNTRIMST_CHAN			7	/* Partial beacon channel */
+/* debug stat (off by default) */
+#define BCNTRIMST_DUR			8	/* RX duration until beacon trimmed */
+#define BCNTRIMST_RXMBSS		9	/* MYBSSID beacon received */
+#define BCNTRIMST_CANTRIM		10	/* # beacons which were trimmed */
+#define BCNTRIMST_LENCHG		11	/* # beacons not trimmed due to length change */
+#define BCNTRIMST_TSFDRF		12	/* # beacons not trimmed due to large TSF delta */
+#define BCNTRIMST_NOTIM			13	/* # beacons not trimmed due to TIM missing */
+
+#define BCNTRIMST_NUM			14
+
+#define WL_BCNTRIM_STATUS_VERSION_1 1
+typedef struct wl_bcntrim_status_query_v1 {
+	uint16  version;
+	uint16  len;     /* Total length includes fixed fields */
+	uint8   reset;   /* reset after reading the stats */
+	uint8   pad[3];  /* 4-byte alignment */
+} wl_bcntrim_status_query_v1_t;
+
+typedef struct wl_bcntrim_status_v1 {
+	uint16  version;
+	uint16  len;            /* Total length includes fixed fields and variable data[] */
+	uint8   curr_slice_id;  /* slice index of the interface */
+	uint8   applied_cfg;    /* applied bcntrim N threshold */
+	uint8   pad[2];         /* 4-byte alignment */
+	uint32  fw_status;      /* Bits representing bcntrim disable reason in FW */
+	uint32  total_disable_dur;    /* total duration (msec) bcntrim remains
+	                                 disabled due to FW disable reasons
+	                               */
+	uint32  data[];         /* variable length data containing stats */
+} wl_bcntrim_status_v1_t;
+
+#define BCNTRIM_STATS_MAX            10      /* Total stats part of the status data[] */
+
+/* Bits for FW status */
+#define WL_BCNTRIM_DISABLE_HOST      0x1  /* Host disabled bcntrim through bcntrim IOVar */
+#define WL_BCNTRIM_DISABLE_PHY_RATE  0x2  /* bcntrim disabled because beacon rx rate is
+	                                     higher than phy_rate_thresh
+	                                   */
+#define WL_BCNTRIM_DISABLE_QUIET_IE  0x4  /* bcntrim disable when Quiet IE present */
+
+#define WL_BCNTRIM_CFG_VERSION_1     1
+/* Common IOVAR struct */
+typedef struct wl_bcntrim_cfg_v1 {
+	uint16  version;
+	uint16 len;          /* Total length includes fixed fields and variable data[] */
+	uint16 subcmd_id;    /* subcommand id */
+	uint16 pad;          /* pad/reserved */
+	uint8 data[];        /* subcommand data; could be empty */
+} wl_bcntrim_cfg_v1_t;
+
+/* subcommands ids */
+enum {
+	WL_BCNTRIM_CFG_SUBCMD_PHY_RATE_THRESH =       0,   /* PHY rate threshold above
+	                                                      which bcntrim is not applied
+	                                                    */
+	WL_BCNTRIM_CFG_SUBCMD_OVERRIDE_DISABLE_MASK = 1,   /* Override bcntrim disable reasons */
+	WL_BCNTRIM_CFG_SUBCMD_TSF_DRIFT_LIMIT =       2    /* TSF drift limit to consider bcntrim */
+};
+
+#define BCNTRIM_MAX_PHY_RATE	48     /* in 500Kbps */
+#define BCNTRIM_MAX_TSF_DRIFT   65535  /* in usec */
+#define WL_BCNTRIM_OVERRIDE_DISABLE_MASK  (WL_BCNTRIM_DISABLE_QUIET_IE)
+
+/* WL_BCNTRIM_CFG_SUBCMD_PHY_RATE_TRESH */
+typedef struct wl_bcntrim_cfg_phy_rate_thresh {
+	uint32 rate;      /* beacon rate (in 500kbps units)  */
+} wl_bcntrim_cfg_phy_rate_thresh_t;
+
+/* WL_BCNTRIM_CFG_SUBCMD_OVERRIDE_DISABLE_MASK */
+typedef struct wl_bcntrim_cfg_override_disable_mask {
+	uint32  mask;     /* bits representing individual disable reason to override */
+} wl_bcntrim_cfg_override_disable_mask_t;
+
+/* WL_BCNTRIM_CFG_SUBCMD_TSF_DRIFT_LIMIT */
+typedef struct wl_bcntrim_cfg_tsf_drift_limit {
+	uint16   drift;   /* tsf drift limit specified in usec */
+	uint8    pad[2];  /* 4-byte alignment */
+} wl_bcntrim_cfg_tsf_drift_limit_t;
+
+
+/* --------------  TX Power Cap --------------- */
+#define TXPWRCAP_MAX_NUM_CORES 8
+#define TXPWRCAP_MAX_NUM_ANTENNAS (TXPWRCAP_MAX_NUM_CORES * 2)
+
+#define TXPWRCAP_NUM_SUBBANDS 5
+
+/* IOVAR txcapconfig enum's */
+#define TXPWRCAPCONFIG_WCI2 0
+#define TXPWRCAPCONFIG_HOST 1
+#define TXPWRCAPCONFIG_WCI2_AND_HOST 2
+
+/* IOVAR txcapstate enum's */
+#define TXPWRCAPSTATE_LOW_CAP  0
+#define TXPWRCAPSTATE_HIGH_CAP 1
+#define TXPWRCAPSTATE_HOST_LOW_WCI2_LOW_CAP	0
+#define TXPWRCAPSTATE_HOST_LOW_WCI2_HIGH_CAP	1
+#define TXPWRCAPSTATE_HOST_HIGH_WCI2_LOW_CAP	2
+#define TXPWRCAPSTATE_HOST_HIGH_WCI2_HIGH_CAP	3
+
+/* IOVAR txcapconfig and txcapstate structure is shared: SET and GET */
+#define TXPWRCAPCTL_VERSION 2
+typedef struct wl_txpwrcap_ctl {
+	uint8   version;
+	uint8   ctl[TXPWRCAP_NUM_SUBBANDS];
+} wl_txpwrcap_ctl_t;
+
+/* IOVAR txcapdump structure: GET only */
+#define TXPWRCAP_DUMP_VERSION 2
+typedef struct wl_txpwrcap_dump {
+	uint8   version;
+	uint8	pad0;
+	uint8   current_country[2];
+	uint32	current_channel;
+	uint8   config[TXPWRCAP_NUM_SUBBANDS];
+	uint8   state[TXPWRCAP_NUM_SUBBANDS];
+	uint8	high_cap_state_enabled;
+	uint8	wci2_cell_status_last;
+	uint8   download_present;
+	uint8	num_subbands;
+	uint8	num_antennas;
+	uint8   num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
+	uint8	num_cc_groups;
+	uint8   current_country_cc_group_info_index;
+	int8    low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+	int8    high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+	uint8	PAD[3];
+} wl_txpwrcap_dump_t;
+
+typedef struct wl_txpwrcap_dump_v3 {
+	uint8   version;
+	uint8	pad0;
+	uint8   current_country[2];
+	uint32	current_channel;
+	uint8   config[TXPWRCAP_NUM_SUBBANDS];
+	uint8   state[TXPWRCAP_NUM_SUBBANDS];
+	uint8	high_cap_state_enabled;
+	uint8	wci2_cell_status_last;
+	uint8   download_present;
+	uint8	num_subbands;
+	uint8	num_antennas;
+	uint8   num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
+	uint8	num_cc_groups;
+	uint8   current_country_cc_group_info_index;
+	uint8	cap_states_per_cc_group;
+	int8    host_low_wci2_low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+	int8    host_low_wci2_high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+	int8    host_high_wci2_low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+	int8    host_high_wci2_high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS];
+	uint8	PAD[2];
+} wl_txpwrcap_dump_v3_t;
+
+typedef struct wl_txpwrcap_tbl {
+	uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
+	/* Stores values for valid antennas */
+	int8 pwrcap_cell_on[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */
+	int8 pwrcap_cell_off[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */
+} wl_txpwrcap_tbl_t;
+
+/* ##### Ecounters section ##### */
+#define ECOUNTERS_VERSION_1	1
+
+/* Input structure for ecounters IOVAR */
+typedef struct ecounters_config_request {
+	uint16 version;		/* config version */
+	uint16 set;		/* Set where data will go. */
+	uint16 size;		/* Size of the set. */
+	uint16 timeout;		/* timeout in seconds. */
+	uint16 num_events;	/* Number of events to report. */
+	uint16 ntypes;		/* Number of entries in type array. */
+	uint16 type[1];		/* Statistics Types (tags) to retrieve. */
+} ecounters_config_request_t;
+
+#define ECOUNTERS_EVENTMSGS_VERSION_1		1
+#define ECOUNTERS_TRIGGER_CONFIG_VERSION_1	1
+
+#define ECOUNTERS_EVENTMSGS_EXT_MASK_OFFSET	\
+		OFFSETOF(ecounters_eventmsgs_ext_t, mask[0])
+
+#define ECOUNTERS_TRIG_CONFIG_TYPE_OFFSET	\
+		OFFSETOF(ecounters_trigger_config_t, type[0])
+
+typedef struct ecounters_eventmsgs_ext {
+	uint8 version;
+	uint8 len;
+	uint8 mask[1];
+} ecounters_eventmsgs_ext_t;
+
+typedef struct ecounters_trigger_config {
+	uint16 version;		/* version */
+	uint16 set;		/* set where data should go */
+	uint16 rsvd;		/* reserved */
+	uint16 pad;		/* pad/reserved */
+	uint16 ntypes;		/* number of types/tags */
+	uint16 type[1];		/* list of types */
+} ecounters_trigger_config_t;
+
+#define ECOUNTERS_TRIGGER_REASON_VERSION_1	1
+/* Triggered due to timer based ecounters */
+#define ECOUNTERS_TRIGGER_REASON_TIMER		0
+/* Triggered due to event based configuration */
+#define ECOUNTERS_TRIGGER_REASON_EVENTS		1
+#define ECOUNTERS_TRIGGER_REASON_MAX		1
+
+typedef struct ecounters_trigger_reason {
+	uint16 version;			/* version */
+	uint16 trigger_reason;		/* trigger reason */
+	uint32 sub_reason_code;		/* sub reason code */
+	uint32 trigger_time_now;	/* time in ms  at trigger */
+	uint32 host_ref_time;		/* host ref time */
+} ecounters_trigger_reason_t;
+
+#define WL_LQM_VERSION_1 1
+
+/* For wl_lqm_t flags field */
+#define WL_LQM_CURRENT_BSS_VALID 0x1
+#define WL_LQM_TARGET_BSS_VALID 0x2
+
+typedef struct {
+	struct ether_addr BSSID;
+	chanspec_t chanspec;
+	int32 rssi;
+	int32 snr;
+} wl_rx_signal_metric_t;
+
+typedef struct {
+	uint8 version;
+	uint8 flags;
+	uint16 pad;
+	int32 noise_level; /* current noise level */
+	wl_rx_signal_metric_t current_bss;
+	wl_rx_signal_metric_t target_bss;
+} wl_lqm_t;
+
+/* ##### Ecounters v2 section ##### */
+
+#define ECOUNTERS_VERSION_2	2
+
+/* Enumeration of various ecounters request types. This namespace is different from
+ * global reportable stats namespace.
+*/
+enum {
+	WL_ECOUNTERS_XTLV_REPORT_REQ = 1
+};
+
+/* Input structure for ecounters IOVAR */
+typedef struct ecounters_config_request_v2 {
+	uint16 version;		/* config version */
+	uint16 len;		/* Length of this struct including variable len */
+	uint16 logset;		/* Set where data will go. */
+	uint16 reporting_period;	/* reporting_period */
+	uint16 num_reports;	/* Number of timer expirations to report on */
+	uint8 pad[2];		/* Reserved for future use */
+	uint8 ecounters_xtlvs[];	/* Statistics Types (tags) to retrieve. */
+} ecounters_config_request_v2_t;
+
+#define ECOUNTERS_STATS_TYPES_FLAG_SLICE	0x1
+#define ECOUNTERS_STATS_TYPES_FLAG_IFACE	0x2
+#define ECOUNTERS_STATS_TYPES_FLAG_GLOBAL	0x4
+
+/* Slice mask bits */
+#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE0	0x1
+#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE1	0x2
+
+typedef struct ecounters_stats_types_report_req {
+	/* flags: bit0 = slice, bit1 = iface, bit2 = global,
+	 * rest reserved
+	 */
+	uint16 flags;
+	uint16 if_index;	/* host interface index */
+	uint16 slice_mask;	/* bit0 = slice0, bit1=slice1, rest reserved */
+	uint8 pad[2];	/* padding */
+	uint8 stats_types_req[]; /* XTLVs of requested types */
+} ecounters_stats_types_report_req_t;
+
+/* -------------- dynamic BTCOEX --------------- */
+#define DCTL_TROWS	2			/**< currently practical number of rows  */
+#define DCTL_TROWS_MAX	4			/**<  2 extra rows RFU */
+/* DYNCTL profile flags */
+#define DCTL_FLAGS_DISABLED	0		/**< default value: all features disabled */
+#define DCTL_FLAGS_DYNCTL	(1 << 0)	/**<  1 - enabled, 0 - legacy only */
+#define DCTL_FLAGS_DESENSE	(1 << 1)	/**< auto desense is enabled */
+#define DCTL_FLAGS_MSWITCH	(1 << 2)	/**< mode switching is enabled */
+#define DCTL_FLAGS_PWRCTRL	(1 << 3)	/**< Tx power control is enabled */
+/* for now AGG on/off is handled separately  */
+#define DCTL_FLAGS_TX_AGG_OFF	(1 << 4)	/**< TBD: allow TX agg Off */
+#define DCTL_FLAGS_RX_AGG_OFF	(1 << 5)	/**< TBD: allow RX agg Off */
+/* used for dry run testing only */
+#define DCTL_FLAGS_DRYRUN	(1 << 7)	/**< Enables dynctl dry run mode  */
+#define IS_DYNCTL_ON(prof)	((prof->flags & DCTL_FLAGS_DYNCTL) != 0)
+#define IS_DESENSE_ON(prof)	((prof->flags & DCTL_FLAGS_DESENSE) != 0)
+#define IS_MSWITCH_ON(prof)	((prof->flags & DCTL_FLAGS_MSWITCH) != 0)
+#define IS_PWRCTRL_ON(prof)	((prof->flags & DCTL_FLAGS_PWRCTRL) != 0)
+/* desense level currently in use */
+#define DESENSE_OFF	0
+#define DFLT_DESENSE_MID	12
+#define DFLT_DESENSE_HIGH	2
+
+/**
+ * dynctl data points(a set of btpwr & wlrssi thresholds)
+ * for mode & desense switching
+ */
+typedef struct btc_thr_data {
+	int8	mode;	/**< used by desense sw */
+	int8	bt_pwr;	/**< BT tx power threshold */
+	int8	bt_rssi;	/**< BT rssi threshold */
+	/* wl rssi range when mode or desense change may be needed */
+	int8	wl_rssi_high;
+	int8	wl_rssi_low;
+} btc_thr_data_t;
+
+/* dynctl. profile data structure  */
+#define DCTL_PROFILE_VER 0x01
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct  dctl_prof {
+	uint8 version;  /**< dynctl profile version */
+	/* dynctl profile flags bit:0 - dynctl On, bit:1 dsns On, bit:2 mode sw On, */
+	uint8 flags;  /**< bit[6:3] reserved, bit7 - Dryrun (sim) - On */
+	/**  wl desense levels to apply */
+	uint8	dflt_dsns_level;
+	uint8	low_dsns_level;
+	uint8	mid_dsns_level;
+	uint8	high_dsns_level;
+	/** mode switching hysteresis in dBm */
+	int8	msw_btrssi_hyster;
+	/** default btcoex mode */
+	uint8	default_btc_mode;
+	/** num of active rows in mode switching table */
+	uint8	msw_rows;
+	/** num of rows in desense table */
+	uint8	dsns_rows;
+	/** dynctl mode switching data table  */
+	btc_thr_data_t msw_data[DCTL_TROWS_MAX];
+	/** dynctl desense switching data table */
+	btc_thr_data_t dsns_data[DCTL_TROWS_MAX];
+} BWL_POST_PACKED_STRUCT dctl_prof_t;
+#include <packed_section_end.h>
+
+/**  dynctl status info */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct  dynctl_status {
+	uint8 sim_on;	/**< true if simulation is On */
+	uint16	bt_pwr_shm; /**< BT per/task power as read from ucode  */
+	int8	bt_pwr;		/**< BT pwr extracted & converted to dBm */
+	int8	bt_rssi;	/**< BT rssi in dBm */
+	int8	wl_rssi;	/**< last wl rssi reading used by btcoex */
+	uint8	dsns_level; /**< current desense level */
+	uint8	btc_mode;   /**< current btcoex mode */
+	/* add more status items if needed,  pad to 4 BB if needed */
+} BWL_POST_PACKED_STRUCT dynctl_status_t;
+#include <packed_section_end.h>
+
+/**  dynctl simulation (dryrun data) */
+#include <packed_section_start.h>
+typedef BWL_PRE_PACKED_STRUCT struct  dynctl_sim {
+	uint8 sim_on;	/**< simulation mode on/off */
+	int8 btpwr;		/**< simulated BT power in dBm */
+	int8 btrssi;	/**< simulated BT rssi in dBm */
+	int8 wlrssi;	/**< simulated WL rssi in dBm */
+} BWL_POST_PACKED_STRUCT dynctl_sim_t;
+/* no default structure packing */
+#include <packed_section_end.h>
+
+/** PTK key maintained per SCB */
+#define RSN_TEMP_ENCR_KEY_LEN 16
+typedef struct wpa_ptk {
+	uint8 kck[RSN_KCK_LENGTH]; /**< EAPOL-Key Key Confirmation Key (KCK) */
+	uint8 kek[RSN_KEK_LENGTH]; /**< EAPOL-Key Key Encryption Key (KEK) */
+	uint8 tk1[RSN_TEMP_ENCR_KEY_LEN]; /**< Temporal Key 1 (TK1) */
+	uint8 tk2[RSN_TEMP_ENCR_KEY_LEN]; /**< Temporal Key 2 (TK2) */
+} wpa_ptk_t;
+
+/** GTK key maintained per SCB */
+typedef struct wpa_gtk {
+	uint32 idx;
+	uint32 key_len;
+	uint8  key[DOT11_MAX_KEY_SIZE];
+} wpa_gtk_t;
+
+/** FBT Auth Response Data structure */
+typedef struct wlc_fbt_auth_resp {
+	uint8 macaddr[ETHER_ADDR_LEN]; /**< station mac address */
+	uint8 pad[2];
+	uint8 pmk_r1_name[WPA2_PMKID_LEN];
+	wpa_ptk_t ptk; /**< pairwise key */
+	wpa_gtk_t gtk; /**< group key */
+	uint32 ie_len;
+	uint8 status;  /**< Status of parsing FBT authentication
+					Request in application
+					*/
+	uint8 ies[1]; /**< IEs contains MDIE, RSNIE,
+					FBTIE (ANonce, SNonce,R0KH-ID, R1KH-ID)
+					*/
+} wlc_fbt_auth_resp_t;
+
+/** FBT Action Response frame */
+typedef struct wlc_fbt_action_resp {
+	uint16 version; /**< structure version */
+	uint16 length; /**< length of structure */
+	uint8 macaddr[ETHER_ADDR_LEN]; /**< station mac address */
+	uint8 data_len;  /**< len of ie from Category */
+	uint8 data[1]; /**< data contains category, action, sta address, target ap,
+						status code,fbt response frame body
+						*/
+} wlc_fbt_action_resp_t;
+
+#define MACDBG_PMAC_ADDR_INPUT_MAXNUM 16
+#define MACDBG_PMAC_OBJ_TYPE_LEN 8
+
+typedef struct _wl_macdbg_pmac_param_t {
+	char type[MACDBG_PMAC_OBJ_TYPE_LEN];
+	uint8 step;
+	uint8 w_en;
+	uint16 num;
+	uint32 bitmap;
+	uint8 addr_raw;
+	uint8 addr_num;
+	uint16 addr[MACDBG_PMAC_ADDR_INPUT_MAXNUM];
+	uint8 pad0[2];
+	uint32 w_val;
+} wl_macdbg_pmac_param_t;
+
+/** IOVAR 'svmp_sampcol' parameter. Used to set and read SVMP_SAMPLE_COLLECT's setting */
+typedef struct wl_svmp_sampcol_param {
+	uint32 version;           /* version */
+	uint8  enable;
+	uint8  trigger_mode;      /* SVMP_SAMPCOL_TRIGGER */
+	uint8  trigger_mode_s[2]; /* SVMP_SAMPCOL_PKTPROC */
+	uint8  data_samplerate;   /* SVMP_SAMPCOL_SAMPLERATE */
+	uint8  data_sel_phy1;     /* SVMP_SAMPCOL_PHY1MUX */
+	uint8  data_sel_rx1;      /* SVMP_SAMPCOL_RX1MUX without iqCompOut */
+	uint8  data_sel_dualcap;  /* SVMP_SAMPCOL_RX1MUX */
+	uint8  pack_mode;         /* SVMP_SAMPCOL_PACK */
+	uint8  pack_order;
+	uint8  pack_cfix_fmt;
+	uint8  pack_1core_sel;
+	uint16 waitcnt;
+	uint16 caplen;
+	uint32 buff_addr_start;   /* in word-size (2-bytes) */
+	uint32 buff_addr_end;     /* note: Tcl in byte-size, HW in vector-size (8-bytes) */
+	uint8  int2vasip;
+	uint8  PAD;
+	uint16 status;
+} wl_svmp_sampcol_t;
+
+#define WL_SVMP_SAMPCOL_PARAMS_VERSION	1
+
+enum {
+	SVMP_SAMPCOL_TRIGGER_PKTPROC_TRANSITION = 0,
+	SVMP_SAMPCOL_TRIGGER_FORCE_IMMEDIATE,
+	SVMP_SAMPCOL_TRIGGER_RADAR_DET
+};
+
+enum {
+	SVMP_SAMPCOL_PHY1MUX_GPIOOUT = 0,
+	SVMP_SAMPCOL_PHY1MUX_FFT,
+	SVMP_SAMPCOL_PHY1MUX_DBGHX,
+	SVMP_SAMPCOL_PHY1MUX_RX1MUX
+};
+
+enum {
+	SVMP_SAMPCOL_RX1MUX_FARROWOUT = 4,
+	SVMP_SAMPCOL_RX1MUX_IQCOMPOUT,
+	SVMP_SAMPCOL_RX1MUX_DCFILTEROUT,
+	SVMP_SAMPCOL_RX1MUX_RXFILTEROUT,
+	SVMP_SAMPCOL_RX1MUX_ACIFILTEROUT
+};
+
+enum {
+	SVMP_SAMPCOL_SAMPLERATE_1XBW = 0,
+	SVMP_SAMPCOL_SAMPLERATE_2XBW
+};
+
+enum {
+	SVMP_SAMPCOL_PACK_DUALCAP = 0,
+	SVMP_SAMPCOL_PACK_4CORE,
+	SVMP_SAMPCOL_PACK_2CORE,
+	SVMP_SAMPCOL_PACK_1CORE
+};
+
+enum {
+	SVMP_SAMPCOL_PKTPROC_RESET = 0,
+	SVMP_SAMPCOL_PKTPROC_CARRIER_SEARCH,
+	SVMP_SAMPCOL_PKTPROC_WAIT_FOR_NB_PWR,
+	SVMP_SAMPCOL_PKTPROC_WAIT_FOR_W1_PWR,
+	SVMP_SAMPCOL_PKTPROC_WAIT_FOR_W2_PWR,
+	SVMP_SAMPCOL_PKTPROC_OFDM_PHY,
+	SVMP_SAMPCOL_PKTPROC_TIMING_SEARCH,
+	SVMP_SAMPCOL_PKTPROC_CHAN_EST_1,
+	SVMP_SAMPCOL_PKTPROC_LEG_SIG_DEC,
+	SVMP_SAMPCOL_PKTPROC_SIG_DECODE_1,
+	SVMP_SAMPCOL_PKTPROC_SIG_DECODE_2,
+	SVMP_SAMPCOL_PKTPROC_HT_AGC,
+	SVMP_SAMPCOL_PKTPROC_CHAN_EST_2,
+	SVMP_SAMPCOL_PKTPROC_PAY_DECODE,
+	SVMP_SAMPCOL_PKTPROC_DSSS_CCK_PHY,
+	SVMP_SAMPCOL_PKTPROC_WAIT_ENERGY_DROP,
+	SVMP_SAMPCOL_PKTPROC_WAIT_NCLKS,
+	SVMP_SAMPCOL_PKTPROC_PAY_DEC_EXT,
+	SVMP_SAMPCOL_PKTPROC_SIG_FAIL_DELAY,
+	SVMP_SAMPCOL_PKTPROC_RIFS_SEARCH,
+	SVMP_SAMPCOL_PKTPROC_BOARD_SWITCH_DIV_SEARCH,
+	SVMP_SAMPCOL_PKTPROC_DSSS_CCK_BOARD_SWITCH_DIV_SEARCH,
+	SVMP_SAMPCOL_PKTPROC_CHAN_EST_3,
+	SVMP_SAMPCOL_PKTPROC_CHAN_EST_4,
+	SVMP_SAMPCOL_PKTPROC_FINE_TIMING_SEARCH,
+	SVMP_SAMPCOL_PKTPROC_SET_CLIP_GAIN,
+	SVMP_SAMPCOL_PKTPROC_NAP,
+	SVMP_SAMPCOL_PKTPROC_VHT_SIGA_DEC,
+	SVMP_SAMPCOL_PKTPROC_VHT_SIGB_DEC,
+	SVMP_SAMPCOL_PKTPROC_PKT_ABORT,
+	SVMP_SAMPCOL_PKTPROC_DCCAL
+};
+
+/** IOVAR 'svmp_mem' parameter. Used to read/clear svmp memory */
+typedef struct svmp_mem {
+	uint32 addr;	/**< offset to read svmp memory from vasip base address */
+	uint16 len;	/**< length in count of uint16's */
+	uint16 val;	/**< set the range of addr/len with a value */
+} svmp_mem_t;
+
+/** IOVAR 'mu_rate' parameter. read/set mu rate for upto four users */
+#define MU_RATE_CFG_VERSION	1
+typedef struct mu_rate {
+	uint16	version;	/**< version of the structure as defined by MU_RATE_CFG_VERSION */
+	uint16	length;		/**< length of entire structure */
+	uint8	auto_rate;	/**< enable/disable auto rate */
+	uint8	PAD;
+	uint16	rate_user[4];	/**< rate per each of four users, set to -1 for no change */
+} mu_rate_t;
+
+/** IOVAR 'mu_group' parameter. Used to set and read MU group recommendation setting */
+#define WL_MU_GROUP_AUTO_COMMAND      -1
+#define WL_MU_GROUP_PARAMS_VERSION     3
+#define WL_MU_GROUP_METHOD_NAMELEN    64
+#define WL_MU_GROUP_NGROUP_MAX        15
+#define WL_MU_GROUP_NUSER_MAX          4
+#define WL_MU_GROUP_METHOD_MIN         0
+#define WL_MU_GROUP_NUMBER_AUTO_MIN    1
+#define WL_MU_GROUP_NUMBER_AUTO_MAX   15
+#define WL_MU_GROUP_NUMBER_FORCED_MAX  8
+#define WL_MU_GROUP_METHOD_OLD         0
+#define WL_MU_GROUP_MODE_AUTO          0
+#define WL_MU_GROUP_MODE_FORCED        1
+#define WL_MU_GROUP_FORCED_1GROUP      1
+#define WL_MU_GROUP_ENTRY_EMPTY       -1
+typedef struct mu_group {
+	uint32 version;          /* version */
+	int16  forced;           /* forced group recommendation */
+	int16  forced_group_mcs; /* forced group with mcs */
+	int16  forced_group_num; /* forced group number */
+	int16  group_option[WL_MU_GROUP_NGROUP_MAX][WL_MU_GROUP_NUSER_MAX];
+	                         /* set mode for forced grouping and read mode for auto grouping */
+	int16  group_GID[WL_MU_GROUP_NGROUP_MAX];
+	int16  group_method;     /* methof for VASIP group recommendation */
+	int16  group_number;     /* requested number for VASIP group recommendation */
+	int16  auto_group_num;   /* exact number from VASIP group recommendation */
+	int8   group_method_name[WL_MU_GROUP_METHOD_NAMELEN];
+	uint8  PAD[2];
+} mu_group_t;
+
+typedef struct mupkteng_sta {
+    struct ether_addr ea;
+	uint8  PAD[2];
+    int32 nrxchain;
+    int32 idx;
+} mupkteng_sta_t;
+
+typedef struct mupkteng_client {
+    int32 rspec;
+    int32 idx;
+    int32 flen;
+    int32 nframes;
+} mupkteng_client_t;
+
+typedef struct mupkteng_tx {
+    mupkteng_client_t client[8];
+    int32 nclients;
+    int32 ntx;
+} mupkteng_tx_t;
+
+/*
+ * MU Packet engine interface.
+ * The following two definitions will go into
+ * wlioctl_defs.h
+ * when wl utility changes are merged to EAGLE TOB & Trunk
+ */
+
+#define WL_MUPKTENG_PER_TX_START		0x10
+#define WL_MUPKTENG_PER_TX_STOP		        0x20
+
+/** IOVAR 'mu_policy' parameter. Used to configure MU admission control policies */
+#define WL_MU_POLICY_PARAMS_VERSION     1
+#define WL_MU_POLICY_SCHED_DEFAULT	60
+#define WL_MU_POLICY_DISABLED		0
+#define WL_MU_POLICY_ENABLED		1
+#define WL_MU_POLICY_NRX_MIN		1
+#define WL_MU_POLICY_NRX_MAX		2
+typedef struct mu_policy {
+	uint16 version;
+	uint16 length;
+	uint32 sched_timer;
+	uint32 pfmon;
+	uint32 pfmon_gpos;
+	uint32 samebw;
+	uint32 nrx;
+	uint32 max_muclients;
+} mu_policy_t;
+
+#define WL_NAN_BAND_STR_SIZE 5       /* sizeof ("auto") */
+
+/** Definitions of different NAN Bands */
+/* do not change the order */
+enum {
+		NAN_BAND_B = 0,
+		NAN_BAND_A,
+		NAN_BAND_AUTO,
+		NAN_BAND_INVALID = 0xFF
+};
+
+/* ifdef WL11ULB */
+/* ULB Mode configured via "ulb_mode" IOVAR */
+enum {
+    ULB_MODE_DISABLED = 0,
+    ULB_MODE_STD_ALONE_MODE = 1,    /* Standalone ULB Mode */
+    ULB_MODE_DYN_MODE = 2,      /* Dynamic ULB Mode */
+	/* Add all other enums before this */
+    MAX_SUPP_ULB_MODES
+};
+
+/* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only.
+ * Values of this enumeration are also used to specify 'Current Operational Bandwidth'
+ * and 'Primary Operational Bandwidth' sub-fields in 'ULB Operations' field (used in
+ * 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute)
+ */
+typedef enum {
+    ULB_BW_DISABLED = 0,
+    ULB_BW_10MHZ    = 1,    /* Standalone ULB BW in 10 MHz BW */
+    ULB_BW_5MHZ = 2,    /* Standalone ULB BW in 5 MHz BW */
+    ULB_BW_2P5MHZ   = 3,    /* Standalone ULB BW in 2.5 MHz BW */
+	/* Add all other enums before this */
+    MAX_SUPP_ULB_BW
+} ulb_bw_type_t;
+/* endif WL11ULB */
+
+
+#define WL_MESH_IOCTL_VERSION     1
+#define MESH_IOC_BUFSZ            512 /* sufficient ioc buff size for mesh */
+/* container for mesh iovtls & events */
+typedef struct wl_mesh_ioc {
+	uint16  version;        /* interface command or event version */
+	uint16  id;             /* mesh ioctl cmd  ID  */
+	uint16  len;            /* total length of all tlv records in data[]  */
+	uint16  pad;            /* pad to be 32 bit aligment */
+	uint8   data[];       /* var len payload of bcm_xtlv_t type */
+} wl_mesh_ioc_t;
+
+enum wl_mesh_cmds {
+	WL_MESH_CMD_ENABLE = 1,
+	WL_MESH_CMD_JOIN = 2,
+	WL_MESH_CMD_PEER_STATUS = 3,
+	WL_MESH_CMD_ADD_ROUTE = 4,
+	WL_MESH_CMD_DEL_ROUTE = 5,
+	WL_MESH_CMD_ADD_FILTER = 6,
+	WL_MESH_CMD_ENAB_AL_METRIC = 7
+};
+
+enum wl_mesh_cmd_xtlv_id {
+	WL_MESH_XTLV_ENABLE = 1,
+	WL_MESH_XTLV_JOIN = 2,
+	WL_MESH_XTLV_STATUS = 3,
+	WL_MESH_XTLV_ADD_ROUTE = 4,
+	WL_MESH_XTLV_DEL_ROUTE = 5,
+	WL_MESH_XTLV_ADD_FILTER = 6,
+	WL_MESH_XTLV_ENAB_AIRLINK = 7
+};
+/* endif WLMESH */
+
+#ifdef WLMESH
+#ifndef SAE_MAX_PASSWD_LEN
+#define SAE_MAX_PASSWD_LEN	32
+#endif
+#endif
+
+/* Fast BSS Transition parameter configuration */
+#define FBT_PARAM_CURRENT_VERSION 0
+
+typedef struct _wl_fbt_params {
+	uint16	version;		/* version of the structure
+					* as defined by FBT_PARAM_CURRENT_VERSION
+					*/
+	uint16	length;			/* length of the entire structure */
+
+	uint16 param_type;		/* type of parameter defined below */
+	uint16 param_len;		/* length of the param_value */
+	uint8 param_value[1];		/* variable length */
+} wl_fbt_params_t;
+
+#define WL_FBT_PARAM_TYPE_RSNIE			0
+#define WL_FBT_PARAM_TYPE_FTIE			0x1
+#define WL_FBT_PARAM_TYPE_SNONCE		0x2
+#define WL_FBT_PARAM_TYPE_MDE			0x3
+#define WL_FBT_PARAM_TYPE_PMK_R0_NAME		0x4
+#define WL_FBT_PARAM_TYPE_R0_KHID		0x5
+#define WL_FBT_PARAM_TYPE_R1_KHID		0x6
+#define WL_FBT_PARAM_TYPE_FIRST_INVALID		0x7
+
+/* Assoc Mgr commands for fine control of assoc */
+#define WL_ASSOC_MGR_CURRENT_VERSION  0x0
+
+typedef struct {
+	uint16	version;		/* version of the structure as
+					 * defined by WL_ASSOC_MGR_CURRENT_VERSION
+					 */
+	uint16	length;			/* length of the entire structure */
+
+	uint16 cmd;
+	uint16 params;
+} wl_assoc_mgr_cmd_t;
+
+#define WL_ASSOC_MGR_CMD_PAUSE_ON_EVT		0 /* have assoc pause on certain events */
+#define WL_ASSOC_MGR_CMD_ABORT_ASSOC		1
+
+#define WL_ASSOC_MGR_PARAMS_EVENT_NONE			0 /* use this to resume as well as clear */
+#define WL_ASSOC_MGR_PARAMS_PAUSE_EVENT_AUTH_RESP	1
+
+#define WL_WINVER_STRUCT_VER_1 (1)
+
+typedef struct wl_winver {
+
+	/* Version and length of this structure. Length includes all fields in wl_winver_t */
+	uint16 struct_version;
+	uint16 struct_length;
+
+	/* Windows operating system version info (Microsoft provided) */
+	struct {
+		uint32 major_ver;
+		uint32 minor_ver;
+		uint32 build;
+	} os_runtime;
+
+	/* NDIS runtime version (Microsoft provided) */
+	struct {
+		uint16 major_ver;
+		uint16 minor_ver;
+	} ndis_runtime;
+
+	/* NDIS Driver version (Broadcom provided) */
+	struct {
+		uint16 major_ver;
+		uint16 minor_ver;
+	} ndis_driver;
+
+	/* WDI Upper Edge (UE) Driver version (Microsoft provided) */
+	struct {
+		uint8 major_ver;
+		uint8 minor_ver;
+		uint8 suffix;
+	} wdi_ue;
+
+	/* WDI Lower Edge (LE) Driver version (Broadcom provided) */
+	struct {
+		uint8 major_ver;
+		uint8 minor_ver;
+		uint8 suffix;
+	} wdi_le;
+	uint8 PAD[2];
+} wl_winver_t;
+
+/* defined(WLRCC) || defined(ROAM_CHANNEL_CACHE) */
+#define MAX_ROAM_CHANNEL      20
+typedef struct {
+	int32 n;
+	chanspec_t channels[MAX_ROAM_CHANNEL];
+} wl_roam_channel_list_t;
+/* endif RCC || ROAM_CHANNEL_CACHE */
+
+/* values for IOV_MFP arg */
+enum {
+    WL_MFP_NONE = 0,
+    WL_MFP_CAPABLE,
+    WL_MFP_REQUIRED
+};
+
+typedef enum {
+	CHANSW_UNKNOWN = 0,	/* channel switch due to unknown reason */
+	CHANSW_SCAN = 1,	/* channel switch due to scan */
+	CHANSW_PHYCAL = 2,	/* channel switch due to phy calibration */
+	CHANSW_INIT = 3,	/* channel set at WLC up time */
+	CHANSW_ASSOC = 4,	/* channel switch due to association */
+	CHANSW_ROAM = 5,	/* channel switch due to roam */
+	CHANSW_MCHAN = 6,	/* channel switch triggered by mchan module */
+	CHANSW_IOVAR = 7,	/* channel switch due to IOVAR */
+	CHANSW_CSA_DFS = 8,	/* channel switch due to chan switch  announcement from AP */
+	CHANSW_APCS = 9,	/* Channel switch from AP channel select module */
+	CHANSW_AWDL = 10,	/* channel switch due to AWDL */
+	CHANSW_FBT = 11,	/* Channel switch from FBT module for action frame response */
+	CHANSW_UPDBW = 12,	/* channel switch at update bandwidth */
+	CHANSW_ULB = 13,	/* channel switch at ULB */
+	CHANSW_LAST = 14	/* last channel switch reason */
+} chansw_reason_t;
+
+/*
+ * WOWL unassociated mode power svae pattern.
+ */
+typedef struct wowl_radio_duty_cycle {
+	uint16 wake_interval;
+	uint16  sleep_interval;
+} wowl_radio_duty_cycle_t;
+
+typedef struct nd_ra_ol_limits {
+	uint16 version;         /* version of the iovar buffer */
+	uint16 type;            /* type of data provided */
+	uint16 length;          /* length of the entire structure */
+	uint16 pad1;            /* pad union to 4 byte boundary */
+	union {
+		struct {
+			uint16 min_time;         /* seconds, min time for RA offload hold */
+			uint16 lifetime_percent;
+			/* percent, lifetime percentage for offload hold time */
+		} lifetime_relative;
+		struct {
+			uint16 hold_time;        /* seconds, RA offload hold time */
+			uint16 pad2;             /* unused */
+		} fixed;
+	} limits;
+} nd_ra_ol_limits_t;
+
+#define ND_RA_OL_LIMITS_VER 1
+
+/* nd_ra_ol_limits sub-types */
+#define ND_RA_OL_LIMITS_REL_TYPE   0     /* relative, percent of RA lifetime */
+#define ND_RA_OL_LIMITS_FIXED_TYPE 1     /* fixed time */
+
+/* buffer lengths for the different nd_ra_ol_limits types */
+#define ND_RA_OL_LIMITS_REL_TYPE_LEN   12
+#define ND_RA_OL_LIMITS_FIXED_TYPE_LEN  10
+
+/*
+ * Temperature Throttling control mode
+ */
+typedef struct wl_temp_control {
+	uint8 enable;
+	uint8 PAD;
+	uint16 control_bit;
+} wl_temp_control_t;
+
+/* SensorHub Interworking mode */
+
+#define SHUB_CONTROL_VERSION    1
+#define SHUB_CONTROL_LEN    12
+
+typedef struct {
+	uint16  verison;
+	uint16  length;
+	uint16  cmd;
+	uint16  op_mode;
+	uint16  interval;
+	uint16  enable;
+} shub_control_t;
+
+/* WLC_MAJOR_VER <= 5 */
+/* Data structures for non-TLV format */
+
+/* Data structures for rsdb caps */
+/*
+ * The flags field of the rsdb_caps_response is designed to be
+ * a Bit Mask. As of now only Bit 0 is used as mentioned below.
+ */
+
+/* Bit-0 in flags is used to indicate if the cores can operate synchronously
+* i.e either as 2x2 MIMO or 2(1x1 SISO). This is true only for 4349 variants
+* 0 - device can operate only in rsdb mode (eg: 4364)
+* 1 - device can operate in both rsdb and mimo (eg : 4359 variants)
+*/
+
+#define WL_RSDB_CAPS_VER 2
+#define SYNCHRONOUS_OPERATION_TRUE	(1 << 0)
+#define WL_RSDB_CAPS_FIXED_LEN  OFFSETOF(rsdb_caps_response_t, num_chains)
+
+typedef struct rsdb_caps_response {
+	uint8 ver;		/* Version */
+	uint8 len;		/* length of this structure excluding ver and len */
+	uint8 rsdb;		/* TRUE for rsdb chip */
+	uint8 num_of_cores;	/* no of d11 cores */
+	uint16 flags;		/* Flags to indicate various capabilities */
+	uint8 num_chains[1];	/* Tx/Rx chains for each core */
+} rsdb_caps_response_t;
+
+/* Data structures for rsdb bands */
+
+#define WL_RSDB_BANDS_VER       2
+#define WL_RSDB_BANDS_FIXED_LEN  OFFSETOF(rsdb_bands_t, band)
+
+typedef struct rsdb_bands
+{
+	uint8 ver;
+	uint8 len;
+	uint16 num_cores;	/* num of D11 cores */
+	int16 band[1];		/* The band operating on each of the d11 cores */
+} rsdb_bands_t;
+
+/* rsdb config */
+
+#define WL_RSDB_CONFIG_VER 3
+#define ALLOW_SIB_PARALLEL_SCAN	(1 << 0)
+#define MAX_BANDS 2
+
+#define WL_RSDB_CONFIG_LEN sizeof(rsdb_config_t)
+
+
+typedef uint8 rsdb_opmode_t;
+typedef uint32 rsdb_flags_t;
+
+typedef enum rsdb_modes {
+	WLC_SDB_MODE_NOSDB_MAIN = 1, /* 2X2 or MIMO mode (applicable only for 4355) */
+	WLC_SDB_MODE_NOSDB_AUX = 2,
+	WLC_SDB_MODE_SDB_MAIN = 3, /* This is RSDB mode(default) applicable only for 4364 */
+	WLC_SDB_MODE_SDB_AUX = 4,
+	WLC_SDB_MODE_SDB_AUTO = 5, /* Same as WLC_RSDB_MODE_RSDB(1+1) mode above */
+} rsdb_modes_t;
+
+typedef struct rsdb_config {
+	uint8 ver;
+	uint8 len;
+	uint16 reserved;
+	rsdb_opmode_t non_infra_mode;
+	rsdb_opmode_t infra_mode[MAX_BANDS];
+	rsdb_flags_t flags[MAX_BANDS];
+	rsdb_opmode_t current_mode;   /* Valid only in GET, returns the current mode */
+	uint8  pad[3];
+} rsdb_config_t;
+
+/* WLC_MAJOR_VER > =5 */
+/* TLV definitions and data structures for rsdb subcmds */
+
+enum wl_rsdb_cmd_ids {
+	/* RSDB ioctls */
+	WL_RSDB_CMD_VER = 0,
+	WL_RSDB_CMD_CAPS = 1,
+	WL_RSDB_CMD_BANDS = 2,
+	WL_RSDB_CMD_CONFIG = 3,
+	/* Add before this !! */
+	WL_RSDB_CMD_LAST
+};
+#define WL_RSDB_IOV_VERSION	0x1
+
+typedef struct rsdb_caps_response_v1 {
+	uint8 rsdb;		/* TRUE for rsdb chip */
+	uint8 num_of_cores;	/* no of d11 cores */
+	uint16 flags;		/* Flags to indicate various capabilities */
+	uint8 num_chains[MAX_NUM_D11CORES];	/* Tx/Rx chains for each core */
+	uint8 band_cap[MAX_NUM_D11CORES]; /* band cap bitmask per slice */
+} rsdb_caps_response_v1_t;
+
+typedef struct rsdb_bands_v1
+{
+	uint8 num_cores;		/* num of D11 cores */
+	uint8 pad;			/* padding bytes for 4 byte alignment */
+	int8 band[MAX_NUM_D11CORES];	/* The band operating on each of the d11 cores */
+} rsdb_bands_v1_t;
+
+typedef struct rsdb_config_xtlv {
+	rsdb_opmode_t reserved1;	/* Non_infra mode is no more applicable */
+	rsdb_opmode_t infra_mode[MAX_BANDS]; /* Target mode for Infra association */
+	uint8 pad;	/* pad bytes for 4 byte alignment */
+	rsdb_flags_t  flags[MAX_BANDS];
+	rsdb_opmode_t current_mode; /* GET only; has current mode of operation */
+	uint8 pad1[3];
+} rsdb_config_xtlv_t;
+
+/* Definitions for slot_bss chanseq iovar */
+#define WL_SLOT_BSS_VERSION 1
+
+enum wl_slotted_bss_cmd_id {
+	WL_SLOTTED_BSS_CMD_VER = 0,
+	WL_SLOTTED_BSS_CMD_CHANSEQ = 1
+};
+typedef uint16 chan_seq_type_t;
+enum chan_seq_type {
+	CHAN_SEQ_TYPE_AWDL = 1,
+	CHAN_SEQ_TYPE_SLICE = 2,
+	CHAN_SEQ_TYPE_NAN = 3
+};
+typedef uint8 sched_flag_t;
+enum sched_flag {
+	NO_SDB_SCHED = 0x1,
+	SDB_TDM_SCHED = 0x2,
+	SDB_SPLIT_BAND_SCHED = 0x4, /* default mode for 4357 */
+	MAIN_ONLY = 0x8,
+	AUX_ONLY = 0x10,
+	SDB_DUAL_TIME = (MAIN_ONLY | AUX_ONLY),
+	NO_SDB_MAIN_ONLY = (NO_SDB_SCHED | MAIN_ONLY), /* default mode for 4364 */
+	SDB_TDM_SCHED_MAIN = (SDB_TDM_SCHED | MAIN_ONLY),
+	SDB_TDM_SCHED_AUX = (SDB_TDM_SCHED | AUX_ONLY),
+	SDB_TDM_SCHED_DUAL_TIME = (SDB_TDM_SCHED | SDB_DUAL_TIME),
+	SDB_SPLIT_BAND_SCHED_DUAL_TIME = (SDB_SPLIT_BAND_SCHED | SDB_DUAL_TIME)
+};
+
+typedef struct chan_seq_tlv_data {
+	uint32 flags;
+	uint8 data[1];
+} chan_seq_tlv_data_t;
+
+typedef struct chan_seq_tlv {
+	chan_seq_type_t type;
+	uint16 len;
+	chan_seq_tlv_data_t chanseq_data[1];
+} chan_seq_tlv_t;
+
+typedef struct sb_channel_sequence {
+	sched_flag_t sched_flags; /* (sdb-tdm or sdb-sb or Dual-Time) */
+	uint8 num_seq; /* number of chan_seq_tlv following */
+	uint16 pad;
+	chan_seq_tlv_t seq[1];
+} sb_channel_sequence_t;
+
+typedef struct slice_chan_seq {
+	uint8 slice_index;  /* 0(Main) or 1 (Aux) */
+	uint8 num_chanspecs;
+	uint16 pad;
+	chanspec_t chanspecs[1];
+} slice_chan_seq_t;
+
+#define WL_SLICE_CHAN_SEQ_FIXED_LEN   OFFSETOF(slice_chan_seq_t, chanspecs)
+
+typedef struct sim_pm_params {
+	uint32 enabled;
+	uint16 cycle;
+	uint16 up;
+} sim_pm_params_t;
+
+/* Bits for fw_status */
+#define NAP_DISABLED_HOST		0x01   /* Host has disabled through nap_enable */
+#define NAP_DISABLED_RSSI		0x02   /* Disabled because of nap_rssi_threshold */
+
+/* Bits for hw_status */
+#define NAP_HWCFG			0x01   /* State of NAP config bit in phy HW */
+
+/* ifdef WL_NATOE */
+#define WL_NATOE_IOCTL_VERSION		1
+#define WL_NATOE_IOC_BUFSZ		512	/* sufficient ioc buff size for natoe */
+#define WL_NATOE_DBG_STATS_BUFSZ	2048
+
+/* config natoe STA and AP IP's structure */
+typedef struct {
+	uint32 sta_ip;
+	uint32 sta_netmask;
+	uint32 sta_router_ip;
+	uint32 sta_dnsip;
+	uint32 ap_ip;
+	uint32 ap_netmask;
+} wl_natoe_config_ips_t;
+
+/* natoe ports config structure */
+typedef struct {
+	uint16 start_port_num;
+	uint16 no_of_ports;
+} wl_natoe_ports_config_t;
+
+/* natoe ports exception info */
+typedef struct {
+	uint16 sta_port_num;
+	uint16 dst_port_num;    /* for SIP type protocol, dst_port_num info can be ignored by FW */
+	uint32 ip;              /* for SIP ip is APcli_ip and for port clash it is dst_ip */
+	uint8  entry_type;      /* Create/Destroy */
+	uint8  pad[3];
+} wl_natoe_exception_port_t;
+
+/* container for natoe ioctls & events */
+typedef struct wl_natoe_ioc {
+	uint16  version;        /* interface command or event version */
+	uint16  id;             /* natoe ioctl cmd  ID  */
+	uint16  len;            /* total length of all tlv records in data[]  */
+	uint16  pad;            /* pad to be 32 bit aligment */
+	uint8   data[];       /* var len payload of bcm_xtlv_t type */
+} wl_natoe_ioc_t;
+
+enum wl_natoe_cmds {
+	WL_NATOE_CMD_ENABLE = 1,
+	WL_NATOE_CMD_CONFIG_IPS = 2,
+	WL_NATOE_CMD_CONFIG_PORTS = 3,
+	WL_NATOE_CMD_DBG_STATS = 4,
+	WL_NATOE_CMD_EXCEPTION_PORT = 5,
+	WL_NATOE_CMD_SKIP_PORT = 6,
+	WL_NATOE_CMD_TBL_CNT = 7
+};
+
+enum wl_natoe_cmd_xtlv_id {
+	WL_NATOE_XTLV_ENABLE = 1,
+	WL_NATOE_XTLV_CONFIG_IPS = 2,
+	WL_NATOE_XTLV_CONFIG_PORTS = 3,
+	WL_NATOE_XTLV_DBG_STATS = 4,
+	WL_NATOE_XTLV_EXCEPTION_PORT = 5,
+	WL_NATOE_XTLV_SKIP_PORT = 6,
+	WL_NATOE_XTLV_TBL_CNT = 7
+};
+
+/* endif WL_NATOE */
+
+enum wl_idauth_cmd_ids {
+	WL_IDAUTH_CMD_CONFIG				= 1,
+	WL_IDAUTH_CMD_PEER_INFO				= 2,
+	WL_IDAUTH_CMD_COUNTERS				= 3,
+	WL_IDAUTH_CMD_LAST
+};
+enum wl_idauth_xtlv_id {
+	WL_IDAUTH_XTLV_AUTH_ENAB			= 0x1,
+	WL_IDAUTH_XTLV_GTK_ROTATION			= 0x2,
+	WL_IDAUTH_XTLV_EAPOL_COUNT			= 0x3,
+	WL_IDAUTH_XTLV_EAPOL_INTRVL			= 0x4,
+	WL_IDAUTH_XTLV_BLKLIST_COUNT			= 0x5,
+	WL_IDAUTH_XTLV_BLKLIST_AGE			= 0x6,
+	WL_IDAUTH_XTLV_PEERS_INFO			= 0x7,
+	WL_IDAUTH_XTLV_COUNTERS				= 0x8
+};
+enum wl_idauth_stats {
+	WL_AUTH_PEER_STATE_AUTHORISED			= 0x01,
+	WL_AUTH_PEER_STATE_BLACKLISTED			= 0x02,
+	WL_AUTH_PEER_STATE_4WAY_HS_ONGOING		= 0x03,
+	WL_AUTH_PEER_STATE_LAST
+};
+typedef struct {
+	uint16 state;				/* Peer State: Authorised or Blacklisted */
+	struct ether_addr peer_addr;		/* peer Address */
+	uint32 blklist_end_time;		/* Time of blacklist end */
+} auth_peer_t;
+typedef struct wl_idauth_counters {
+	uint32 auth_reqs;			/* No of auth req recvd */
+	uint32 mic_fail;			/* No of mic fails */
+	uint32 four_way_hs_fail;		/* No of 4-way handshake fails */
+} wl_idauth_counters_t;
+
+#define WLC_UTRACE_LEN  512
+#define WLC_UTRACE_READ_END 0
+#define WLC_UTRACE_MORE_DATA 1
+typedef struct wl_utrace_capture_args_v1 {
+	uint32 length;
+	uint32 flag;
+} wl_utrace_capture_args_v1_t;
+
+#define UTRACE_CAPTURE_VER_2	2
+typedef struct wl_utrace_capture_args_v2 {
+	/* structure control */
+	uint16 version;		/**< structure version */
+	uint16 length;		/**< length of the response */
+	uint32 flag;		/* Indicates if there is more data or not */
+} wl_utrace_capture_args_v2_t;
+
+/* XTLV IDs for the Health Check "hc" iovar top level container */
+enum {
+	WL_HC_XTLV_ID_CAT_HC = 1,		/* category for HC as a whole */
+	WL_HC_XTLV_ID_CAT_DATAPATH_TX = 2,	/* Datapath Tx */
+	WL_HC_XTLV_ID_CAT_DATAPATH_RX = 3,	/* Datapath Rx */
+	WL_HC_XTLV_ID_CAT_SCAN	= 4,		/* Scan */
+};
+
+/* Health Check: Common XTLV IDs for sub-elements in the top level container
+ * Number starts at 0x8000 to be out of the way for category specific IDs.
+ */
+enum {
+	WL_HC_XTLV_ID_ERR       = 0x8000,       /* for sub-command  err return */
+	WL_HC_XTLV_ID_IDLIST    = 0x8001,       /* container for uint16 IDs */
+};
+
+/* Health Check: Datapath TX IDs */
+enum {
+	WL_HC_TX_XTLV_ID_VAL_STALL_THRESHOLD   = 1,     /* stall_threshold */
+	WL_HC_TX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 2,     /* stall_sample_size */
+	WL_HC_TX_XTLV_ID_VAL_STALL_TIMEOUT     = 3,     /* stall_timeout */
+	WL_HC_TX_XTLV_ID_VAL_STALL_FORCE       = 4,     /* stall_force */
+	WL_HC_TX_XTLV_ID_VAL_STALL_EXCLUDE     = 5,     /* stall_exclude */
+	WL_HC_TX_XTLV_ID_VAL_FC_TIMEOUT        = 6,     /* flow ctl timeout */
+	WL_HC_TX_XTLV_ID_VAL_FC_FORCE          = 7,     /* flow ctl force failure */
+	WL_HC_TX_XTLV_ID_VAL_DELAY_TO_TRAP     = 8,     /* delay threshold for forced trap */
+	WL_HC_TX_XTLV_ID_VAL_DELAY_TO_RPT      = 9,     /* delay threshold for event log report */
+};
+
+/* Health Check: Datapath RX IDs */
+enum {
+	WL_HC_RX_XTLV_ID_VAL_DMA_STALL_TIMEOUT = 1,     /* dma_stall_timeout */
+	WL_HC_RX_XTLV_ID_VAL_DMA_STALL_FORCE   = 2,     /* dma_stall test trigger */
+	WL_HC_RX_XTLV_ID_VAL_STALL_THRESHOLD   = 3,     /* stall_threshold */
+	WL_HC_RX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 4,     /* stall_sample_size */
+	WL_HC_RX_XTLV_ID_VAL_STALL_FORCE       = 5,     /* stall test trigger */
+};
+
+/* Health Check: Datapath SCAN IDs */
+enum {
+	WL_HC_XTLV_ID_VAL_SCAN_STALL_THRESHOLD	= 1,	/* scan stall threshold */
+};
+
+/* IDs of Health Check report structures for sub types of health checks within WL */
+enum {
+	WL_HC_DD_UNDEFINED = 0,		/* Undefined */
+	WL_HC_DD_RX_DMA_STALL = 1,	/* RX DMA stall check */
+	WL_HC_DD_RX_STALL = 2,		/* RX stall check */
+	WL_HC_DD_TX_STALL = 3,		/* TX stall check */
+	WL_HC_DD_SCAN_STALL = 4,	/* SCAN stall check */
+	WL_HC_DD_MAX
+};
+
+/*
+ * Health Check report structures for sub types of health checks within WL
+ */
+
+/* Health Check report structure for Rx DMA Stall check */
+typedef struct {
+	uint16 type;
+	uint16 length;
+	uint16 timeout;
+	uint16 stalled_dma_bitmap;
+} wl_rx_dma_hc_info_t;
+
+/* Health Check report structure for Tx packet failure check */
+typedef struct {
+	uint16 type;
+	uint16 length;
+	uint32 stall_bitmap;
+	uint32 stall_bitmap1;
+	uint32 failure_ac;
+	uint32 threshold;
+	uint32 tx_all;
+	uint32 tx_failure_all;
+} wl_tx_hc_info_t;
+
+/* Health Check report structure for Rx dropped packet failure check */
+typedef struct {
+	uint16 type;
+	uint16 length;
+	uint32 bsscfg_idx;
+	uint32 rx_hc_pkts;
+	uint32 rx_hc_dropped_all;
+	uint32 rx_hc_alert_th;
+} wl_rx_hc_info_t;
+
+/* HE top level command IDs */
+enum {
+	WL_HE_CMD_ENAB = 0,
+	WL_HE_CMD_FEATURES = 1,
+	WL_HE_CMD_TWT_SETUP = 2,
+	WL_HE_CMD_TWT_TEARDOWN = 3,
+	WL_HE_CMD_TWT_INFO = 4,
+	WL_HE_CMD_BSSCOLOR = 5,
+	WL_HE_CMD_PARTIAL_BSSCOLOR = 6,
+	WL_HE_CMD_LAST
+};
+
+#define WL_HEB_VERSION	0
+
+/* HEB top level command IDs */
+enum {
+	WL_HEB_CMD_ENAB = 0,
+	WL_HEB_CMD_NUM_HEB = 1,
+	WL_HEB_CMD_COUNTERS = 1,
+	WL_HEB_CMD_CLEAR_COUNTERS = 2,
+	WL_HEB_CMD_LAST
+};
+
+/* HEB counters structures */
+typedef struct {
+	uint16 pre_event;
+	uint16 start_event;
+	uint16 end_event;
+	uint16 missed;
+} wl_heb_int_cnt_t;
+
+typedef struct {
+	/* structure control */
+	uint16 version;	/* structure version */
+	uint16 length;	/* data length (starting after this field) */
+	wl_heb_int_cnt_t heb_int_cnt[1];
+} wl_heb_cnt_t;
+
+
+/* TWT Setup descriptor */
+typedef struct {
+	/* Setup Command. */
+	uint8 setup_cmd;	/* See TWT_SETUP_CMD_XXXX in 802.11ah.h,
+				 * valid when bcast_twt is FALSE.
+				 */
+	/* Flow attributes */
+	uint8 flow_flags;	/* See WL_TWT_FLOW_FLAG_XXXX below */
+	uint8 flow_id;		/* must be between 0 and 7 */
+	/* Target Wake Time */
+	uint8 wake_type;	/* See WL_TWT_TIME_TYPE_XXXX below */
+	uint32 wake_time_h;	/* target wake time - BSS TSF (us) */
+	uint32 wake_time_l;
+	uint32 wake_dur;	/* target wake duration in us units */
+	uint32 wake_int;	/* target wake interval */
+} wl_twt_sdesc_t;
+
+/* Flow flags */
+#define WL_TWT_FLOW_FLAG_BROADCAST	(1<<0)
+#define WL_TWT_FLOW_FLAG_IMPLICIT	(1<<1)
+#define WL_TWT_FLOW_FLAG_UNANNOUNCED	(1<<2)
+#define WL_TWT_FLOW_FLAG_TRIGGER	(1<<3)
+
+/* Flow id */
+#define WL_TWT_FLOW_ID_FID	0x07	/* flow id */
+#define WL_TWT_FLOW_ID_GID_MASK	0x70	/* group id - broadcast TWT only */
+#define WL_TWT_FLOW_ID_GID_SHIFT 4
+
+/* Wake type */
+/* TODO: not yet finalized */
+#define WL_TWT_TIME_TYPE_BSS	0	/* The time specified in wake_time_h/l is
+					 * the BSS TSF time.
+					 */
+#define WL_TWT_TIME_TYPE_OFFSET	1	/* The time specified in wake_time_h/l is an offset
+					 * of the TSF time when the iovar is processed.
+					 */
+
+#define WL_TWT_SETUP_VER	0
+
+/* HE TWT Setup command */
+typedef struct {
+	/* structure control */
+	uint16 version;	/* structure version */
+	uint16 length;	/* data length (starting after this field) */
+	/* peer address */
+	struct ether_addr peer;	/* leave it all 0s' for AP */
+	/* session id */
+	uint8 dialog;	/* an arbitrary number to identify the seesion */
+	uint8 pad;
+	/* setup descriptor */
+	wl_twt_sdesc_t desc;
+} wl_twt_setup_t;
+
+#define WL_TWT_TEARDOWN_VER	0
+
+/* HE TWT Teardown command */
+typedef struct {
+	/* structure control */
+	uint16 version;	/* structure version */
+	uint16 length;	/* data length (starting after this field) */
+	/* peer address */
+	struct ether_addr peer;	/* leave it all 0s' for AP */
+	/* flow attributes */
+	uint8 flow_flags;	/* See WL_TWT_FLOW_FLAG_XXXX above.
+				 * (only BORADCAST) is applicable)
+				 */
+	uint8 flow_id;		/* must be between 0 and 7 */
+} wl_twt_teardown_t;
+
+/* twt information descriptor */
+typedef struct {
+	uint8 flow_flags;	/* See WL_TWT_INFO_FLAG_XXX below */
+	uint8 flow_id;
+	uint8 pad[2];
+	uint32 next_twt_h;
+	uint32 next_twt_l;
+} wl_twt_idesc_t;
+
+/* Flow flags */
+#define WL_TWT_INFO_FLAG_RESP_REQ	(1<<0)	/* Request response */
+
+#define WL_TWT_INFO_VER	0
+
+/* HE TWT Information command */
+typedef struct {
+	/* structure control */
+	uint16 version;	/* structure version */
+	uint16 length;	/* data length (starting after this field) */
+	/* peer address */
+	struct ether_addr peer;	/* leave it all 0s' for AP */
+	uint8 pad[2];
+	/* information descriptor */
+	wl_twt_idesc_t desc;
+} wl_twt_info_t;
+
+/* Current version for wlc_clm_power_limits_req_t structure and flags */
+#define WLC_CLM_POWER_LIMITS_REQ_VERSION 1
+/* "clm_power_limits" iovar request structure */
+typedef struct wlc_clm_power_limits_req {
+	/* Input. Structure and flags version */
+	uint32 version;
+	/* Full length of buffer (includes this structure and space for TLV-encoded PPR) */
+	uint32 buflen;
+	/* Input. Flags (see WLC_CLM_POWER_LIMITS_INPUT_FLAG_... below) */
+	uint32 input_flags;
+	/* Input. CC of region whose data is being requested */
+	char cc[WLC_CNTRY_BUF_SZ];
+	/* Input. Channel/subchannel in chanspec_t format */
+	uint32 chanspec;
+	/* Subchannel encoded as clm_limits_type_t */
+	uint32 clm_subchannel;
+	/* Input. 0-based antenna index */
+	uint32 antenna_idx;
+	/* Output. General flags (see WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_... below) */
+	uint32 output_flags;
+	/* Output. 2.4G country flags, encoded as clm_flags_t enum */
+	uint32 clm_country_flags_2g;
+	/* Output. 5G country flags, encoded as clm_flags_t enum */
+	uint32 clm_country_flags_5g;
+	/* Output. Length of TLV-encoded PPR data that follows this structure */
+	uint32 ppr_tlv_size;
+	/* Output. Beginning of buffer for TLV-encoded PPR data */
+	uint8 ppr_tlv[1];
+} wlc_clm_power_limits_req_t;
+
+/* Input. Do not apply SAR limits */
+#define WLC_CLM_POWER_LIMITS_INPUT_FLAG_NO_SAR				0x00000001
+/* Input. Do not apply board limits */
+#define WLC_CLM_POWER_LIMITS_INPUT_FLAG_NO_BOARD			0x00000002
+/* Output. Limits taken from product-specific country data */
+#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_PRODUCT_LIMITS			0x00000001
+/* Output. Limits taken from product-specific worldwide data */
+#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_WORLDWIDE_LIMITS		0x00000002
+/* Output. Limits taken from country-default (all-product) data */
+#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_DEFAULT_COUNTRY_LIMITS		0x00000004
+
+/*
+ * WOG (Wake On Googlecast)
+ */
+
+#define MAX_GCAST_APPID_CNT_LIMIT 50
+#define MAX_DNS_LABEL 63
+
+typedef struct wog_appid {
+	uint8 appID[MAX_DNS_LABEL+1];
+} wog_appid_t;
+
+enum {
+	WOG_APPID_ADD,
+	WOG_APPID_DEL,
+	WOG_APPID_CLEAR,
+	WOG_APPID_LIST,
+	WOG_MAX_APPID_CNT
+};
+
+#define WOG_APPID_IOV_VER 1
+typedef struct wog_appid_iov {
+	/* version for iovar */
+	uint32 ver;
+	/* add/del/clear/list operation */
+	uint32 operation;
+	/* for adding or deleting multiple items */
+	/* for WOG_MAX_APPID_CNT, this value is used for max count for AppID */
+	uint32 cnt;
+	/* Application IDs */
+	/* If FW found an AppID from this list, FW will respond to discovery */
+	/* without wake up the host */
+	wog_appid_t appids[1];
+} wog_appid_iov_t;
+
+/* dns service record */
+/* service name : _googlecast */
+typedef struct wog_srv_record {
+	uint32 ttl;
+	uint16 port; /* tcp 8008 or 8009 */
+	uint8 PAD[2];
+} wog_srv_record_t;
+
+#define GCAST_MAX_MODEL_NAME_LEN 16
+#define GCAST_MAX_FNAME_LEN 64
+#define GCAST_MAX_RS_LEN 60
+
+#define GCAST_UUID_LEN 32
+#define GCAST_PUBLICKEY_ID_LEN 64
+#define GCAST_VER_LEN 2
+typedef struct wog_txt_record {
+	uint32 ttl;
+	/* id : UUID for the receiver */
+	char id[GCAST_UUID_LEN+1];
+
+	/* Cast protocol version supported. Begins at 2 */
+	/* and is incremented by 1 with each version */
+	char ver[GCAST_VER_LEN+1];
+
+	/* 256bit receiver Subject Public Key Identifier from the SSL cert */
+	char public_key[GCAST_PUBLICKEY_ID_LEN+1];
+
+	/* A bitfield of device capabilities. */
+	/* bit 0 : video_out (1:has video out, 0:no video) */
+	/* bit 1 : video_in */
+	/* bit 2 : audio_out */
+	/* bit 3 : audio_in */
+	/* bit 4 : dev_mode */
+	/*	 (1:dev mode enabled, 0: not enabled) */
+	char capability;
+
+	/* Receiver status flag 0:IDLE, 1(BUSY/JOIN) */
+	/* IDLE : The receiver is idle */
+	/*	   and doesn't need to be connected now. */
+	/* BUSY/JOIN : The receiver is hosting an activity */
+	/*	 and invites the sender to join */
+	char receiver_status_flag;
+
+	uint8 PAD0[1];
+
+	char friendly_name[GCAST_MAX_FNAME_LEN+1];
+	uint8 PAD1[3];
+
+	char model_name[GCAST_MAX_MODEL_NAME_LEN+1];
+	uint8 PAD2[3];
+
+	/* Receiver Status text for Cast Protocol v2 */
+	/* Spec says that if the status text exceeds 60 characters in length, */
+	/* it is truncated at 60 caracters and */
+	/* a UTF-8 ellipsis character is appended to indicate trucation. */
+	/* But our dongle won't use UTF-8 ellipsis. It's not a big deal. */
+	char receiver_status[GCAST_MAX_RS_LEN+1];
+	uint8 PAD3[3];
+} wog_txt_record_t;
+
+/* ip will be taken from the ip of wog_info_t */
+typedef struct wog_a_record {
+	uint32 ttl;
+} wog_a_record_t;
+
+/* Google Cast protocl uses mDNS SD for its discovery */
+#define WOG_SD_RESP_VER 1
+typedef struct wog_sd_resp {
+	/* version for iovar */
+	int32 ver;
+	/* device name of Google Cast receiver */
+	char device_name[MAX_DNS_LABEL+1];
+	/* IP address of Google Cast receiver */
+	uint8 ip[4];
+	/* ttl of PTR response */
+	uint32 ptr_ttl;
+	/* DNS TXT record */
+	wog_txt_record_t txt;
+	/* DNS SRV record */
+	wog_srv_record_t srv;
+	/* DNS A record */
+	wog_a_record_t a;
+} wog_sd_resp_t;
+
+enum wl_mbo_cmd_ids {
+	WL_MBO_CMD_ADD_CHAN_PREF = 1,
+	WL_MBO_CMD_DEL_CHAN_PREF = 2,
+	WL_MBO_CMD_LIST_CHAN_PREF = 3,
+	WL_MBO_CMD_CELLULAR_DATA_CAP = 4,
+	WL_MBO_CMD_DUMP_COUNTERS = 5,
+	WL_MBO_CMD_CLEAR_COUNTERS = 6,
+	WL_MBO_CMD_FORCE_ASSOC = 7,
+	WL_MBO_CMD_BSSTRANS_REJECT = 8,
+	WL_MBO_CMD_SEND_NOTIF = 9,
+	/* Add before this !! */
+	WL_MBO_CMD_LAST
+};
+
+enum wl_mbo_xtlv_id {
+	WL_MBO_XTLV_OPCLASS            = 0x1,
+	WL_MBO_XTLV_CHAN               = 0x2,
+	WL_MBO_XTLV_PREFERENCE         = 0x3,
+	WL_MBO_XTLV_REASON_CODE        = 0x4,
+	WL_MBO_XTLV_CELL_DATA_CAP      = 0x5,
+	WL_MBO_XTLV_COUNTERS           = 0x6,
+	WL_MBO_XTLV_ENABLE             = 0x7,
+	WL_MBO_XTLV_SUB_ELEM_TYPE      = 0x8
+};
+
+typedef struct wl_mbo_counters {
+	/* No of transition req recvd */
+	uint16 trans_req_rcvd;
+	/* No of transition req with disassoc imminent */
+	uint16 trans_req_disassoc;
+	/* No of transition req with BSS Termination */
+	uint16 trans_req_bss_term;
+	/* No of trans req w/ unspecified reason */
+	uint16 trans_resn_unspec;
+	/* No of trans req w/ reason frame loss */
+	uint16 trans_resn_frm_loss;
+	/* No of trans req w/ reason traffic delay */
+	uint16 trans_resn_traffic_delay;
+	/* No of trans req w/ reason insufficient buffer */
+	uint16 trans_resn_insuff_bw;
+	/* No of trans req w/ reason load balance */
+	uint16 trans_resn_load_bal;
+	/* No of trans req w/ reason low rssi */
+	uint16 trans_resn_low_rssi;
+	/* No of trans req w/ reason excessive retransmission */
+	uint16 trans_resn_xcess_retransmn;
+	/* No of trans req w/ reason gray zone */
+	uint16 trans_resn_gray_zone;
+	/* No of trans req w/ reason switch to premium AP */
+	uint16 trans_resn_prem_ap_sw;
+	/* No of transition rejection sent */
+	uint16 trans_rejn_sent;
+	/* No of trans rejn reason excessive frame loss */
+	uint16 trans_rejn_xcess_frm_loss;
+	/* No of trans rejn reason excessive traffic delay */
+	uint16 trans_rejn_xcess_traffic_delay;
+	/* No of trans rejn reason insufficient QoS capability */
+	uint16 trans_rejn_insuffic_qos_cap;
+	/* No of trans rejn reason low RSSI */
+	uint16 trans_rejn_low_rssi;
+	/* No of trans rejn reason high interference */
+	uint16 trans_rejn_high_interference;
+	/* No of trans rejn reason service unavilable */
+	uint16 trans_rejn_service_unavail;
+	/* No of beacon request rcvd */
+	uint16 bcn_req_rcvd;
+	/* No of beacon report sent */
+	uint16 bcn_rep_sent;
+	/* No of null beacon report sent */
+	uint16 null_bcn_rep_sent;
+	/* No of wifi to cell switch */
+	uint16 wifi_to_cell;
+} wl_mbo_counters_t;
+
+/* otpread command */
+#define WL_OTPREAD_VER 1
+
+typedef struct {
+	uint16 version;		/* cmd structure version */
+	uint16 cmd_len;		/* cmd struct len */
+	uint32 rdmode;		/* otp read mode */
+	uint32 rdoffset;	/* byte offset into otp to start read */
+	uint32 rdsize;		/* number of bytes to read */
+} wl_otpread_cmd_t;
+
+/* "otpecc_rows" command */
+typedef struct {
+	uint16 version;		/* version of this structure */
+	uint16 len;			/* len in bytes of this structure */
+	uint32 cmdtype;		/* command type : 0 : read row data, 1 : ECC lock  */
+	uint32 rowoffset;	/* start row offset */
+	uint32 numrows;		/* number of rows */
+	uint8  rowdata[];	/* read rows data */
+} wl_otpecc_rows_t;
+
+#define WL_OTPECC_ROWS_VER  1
+
+#define WL_OTPECC_ROWS_CMD_READ  0
+#define WL_OTPECC_ROWS_CMD_LOCK  1
+
+#define WL_OTPECC_ARGIDX_CMDTYPE		0	/* command type */
+#define WL_OTPECC_ARGIDX_ROWOFFSET		1	/* start row offset */
+#define WL_OTPECC_ARGIDX_NUMROWS		2	/* number of rows */
+
+/* "otpeccrows" raw data size per row */
+#define WL_ECCDUMP_ROW_SIZE_BYTE	6 /* 4 bytes row data + 2 bytes ECC status */
+#define WL_ECCDUMP_ROW_SIZE_WORD	3
+
+/* otpECCstatus */
+#define OTP_ECC_ENAB_SHIFT		13
+#define OTP_ECC_ENAB_MASK		0x7
+#define OTP_ECC_CORR_ST_SHIFT	12
+#define OTP_ECC_CORR_ST_MASK	0x1
+#define OTP_ECC_DBL_ERR_SHIFT	11
+#define OTP_ECC_DBL_ERR_MASK	0x1
+#define OTP_ECC_DED_ST_SHIFT	10
+#define OTP_ECC_DED_ST_MASK		0x1
+#define OTP_ECC_SEC_ST_SHIFT	9
+#define OTP_ECC_SEC_ST_MASK		0x1
+#define OTP_ECC_DATA_SHIFT		0
+#define OTP_ECC_DATA_MASK		0x7f
+
+/* OTP_ECC_CORR_ST field */
+#define OTP_ECC_MODE		1
+#define OTP_NO_ECC_MODE		0
+
+/* OTP_ECC_ENAB field (bit15:13) :
+ * When 2 or 3 bits are set,
+ * it indicates that OTP ECC is enabled on the last row read.
+ * Otherwise, ECC is disabled
+ */
+#define OTP_ECC_ENAB(val) \
+	(bcm_bitcount((uint8 *)&(val), sizeof(uint8)) > 1)
+
+#define WL_LEAKY_AP_STATS_GT_TYPE	0
+#define WL_LEAKY_AP_STATS_PKT_TYPE	1
+typedef struct wlc_leaked_infra_guard_marker {
+	/* type field for this TLV: WL_LEAKY_AP_STATS_GT_TYPE */
+	uint16  type;
+	/* length field for this TLV */
+	uint16  len;
+	/* guard sample sequence number; Updated by 1 on every guard sample */
+	uint32  seq_number;
+	/* Guard time start time (tsf; PS indicated and acked) */
+	uint32  start_time;
+	/* tsf timestamp for the GT end event */
+	uint32  gt_tsf_l;
+	/* Guard time period in ms */
+	uint16  guard_duration;
+	/* Number PPDUs in the notification */
+	uint16  num_pkts;
+	/* Flags to indicate some states see below */
+	uint8   flag;
+	/* pad for 32-bit alignment */
+	uint8   reserved[3];
+} wlc_leaked_infra_guard_marker_t;
+
+/* Flag information */
+#define WL_LEAKED_GUARD_TIME_NONE	0               /* Not in any guard time */
+#define WL_LEAKED_GUARD_TIME_FRTS	(0x01 << 0)     /* Normal FRTS power save */
+#define WL_LEAKED_GUARD_TIME_SCAN	(0x01 << 1)     /* Channel switch due to scanning */
+#define WL_LEAKED_GUARD_TIME_AWDL_PSF	(0x01 << 2)     /* Channel switch due to AWDL PSF */
+#define WL_LEAKED_GUARD_TIME_AWDL_AW	(0x01 << 3)     /* Channel switch due to AWDL AW */
+#define WL_LEAKED_GUARD_TIME_INFRA_STA	(0x01 << 4)	/* generic type infra sta channel switch */
+#define WL_LEAKED_GUARD_TIME_TERMINATED (0x01 << 7)     /* indicate a GT is terminated early */
+
+typedef struct wlc_leaked_infra_packet_stat {
+	uint16  type;			/* type field for this TLV: WL_LEAKY_AP_STATS_PKT_TYPE */
+	uint16  len;			/* length field for this TLV */
+	uint16  ppdu_len_bytes;		/* PPDU packet length in bytes */
+	uint16  num_mpdus;		/* number of the MPDUs in the PPDU */
+	uint32  ppdu_time;		/* PPDU arrival time at the begining of the guard time */
+	uint32  rate;			/* PPDU packet rate; Received packet's data rate */
+	uint16  seq_number;		/* sequence number */
+	int8    rssi;			/* RSSI */
+	uint8   tid;			/* tid */
+} wlc_leaked_infra_packet_stat_t;
+
+/* Wake timer structure definition */
+#define WAKE_TIMER_VERSION 1
+#define WAKE_TIMER_NOLIMIT 0xFFFF
+
+typedef struct wake_timer {
+	uint16 ver;
+	uint16 len;
+	uint16 limit;   /* number of events to deliver
+			* 0-disable, 0xffff-indefinite, num_events otherwise
+			*/
+	uint16 count;	/* number of events delivered since enable (get only) */
+	uint16 period;	/* timeout/period in milliseconds */
+} wake_timer_t;
+
+typedef struct wl_desense_restage_gain {
+	uint16 version;
+	uint16 length;
+	uint32 band;
+	uint8 num_cores;
+	uint8 desense_array[WL_TX_CHAINS_MAX];
+	uint8 PAD[3];
+} wl_desense_restage_gain_t;
+
+#define MAX_UCM_CHAINS 5
+#define MAX_UCM_PROFILES 4
+#define UCM_PROFILE_VERSION_1 1
+
+/* UCM per chain attribute struct */
+typedef struct wlc_btcx_chain_attr {
+	uint16 length;			/* chain attr length, version is same as profile version */
+	int8 desense_level;		/* per chain desense level */
+	int8 ack_pwr_strong_rssi;	/* per chain ack power at strong rssi */
+	int8 ack_pwr_weak_rssi;		/* per chain ack power at weak rssi */
+	int8 tx_pwr_strong_rssi;	/* per chain tx power at strong rssi */
+	int8 tx_pwr_weak_rssi;		/* per chain tx power at weak rssi */
+	uint8 PAD[1];			/* additional bytes for alignment */
+} wlc_btcx_chain_attr_t;
+
+typedef struct wlc_btcx_profile_v1 {
+	uint16 version;			/* UCM profile version */
+	uint16 length;			/* profile size */
+	uint16 fixed_length;		/* size of the fixed portion of the profile */
+	uint8 init;			/* profile initialized or not */
+	uint8 chain_attr_count;		/* Number of elements in chain_attr array */
+	uint8 profile_index;		/* profile index */
+	uint8 mode_strong_wl_bt;	/* Mode under strong WLAN and BT RSSI */
+	uint8 mode_weak_wl;		/* Mode under weak WLAN RSSI */
+	uint8 mode_weak_bt;		/* Mode under weak BT RSSI */
+	uint8 mode_weak_wl_bt;		/* Mode under weak BT and WLAN RSSI */
+	int8 mode_wl_hi_lo_rssi_thresh;	/* Strong to weak WLAN RSSI threshold for mode selection */
+	int8 mode_wl_lo_hi_rssi_thresh;	/* Weak to strong WLAN RSSI threshold for mode selection */
+	int8 mode_bt_hi_lo_rssi_thresh;	/* Strong to weak BT RSSI threshold for mode selection */
+	int8 mode_bt_lo_hi_rssi_thresh;	/* Weak to strong BT RSSI threshold for mode selection */
+	int8 desense_wl_hi_lo_rssi_thresh;	/* Strong to weak RSSI threshold for desense */
+	int8 desense_wl_lo_hi_rssi_thresh;	/* Weak to strong RSSI threshold for desense */
+	int8 ack_pwr_wl_hi_lo_rssi_thresh;	/* Strong to weak RSSI threshold for ACK power */
+	int8 ack_pwr_wl_lo_hi_rssi_thresh;	/* Weak to strong RSSI threshold for ACK power */
+	int8 tx_pwr_wl_hi_lo_rssi_thresh;	/* Strong to weak RSSI threshold for Tx power */
+	int8 tx_pwr_wl_lo_hi_rssi_thresh;	/* Weak to strong RSSI threshold for Tx power */
+	uint8 PAD[1];				/* additional bytes for 4 byte alignment */
+	wlc_btcx_chain_attr_t chain_attr[];	/* variable length array with chain attributes */
+} wlc_btcx_profile_v1_t;
+
+#define SSSR_D11_RESET_SEQ_STEPS   5
+#define SSSR_REG_INFO_VER   0
+
+typedef struct sssr_reg_info {
+	uint16 version;
+	uint16 length;  /* length of the structure validated at host */
+	struct {
+		struct {
+			uint32 pmuintmask0;
+			uint32 pmuintmask1;
+			uint32 resreqtimer;
+			uint32 macresreqtimer;
+			uint32 macresreqtimer1;
+		} base_regs;
+	} pmu_regs;
+	struct {
+		struct {
+			uint32 intmask;
+			uint32 powerctrl;
+			uint32 clockcontrolstatus;
+			uint32 powerctrl_mask;
+		} base_regs;
+	} chipcommon_regs;
+	struct {
+		struct {
+			uint32 clockcontrolstatus;
+			uint32 clockcontrolstatus_val;
+		} base_regs;
+		struct {
+			uint32 resetctrl;
+			uint32 itopoobb;
+		} wrapper_regs;
+	} arm_regs;
+	struct {
+		struct {
+			uint32 ltrstate;
+			uint32 clockcontrolstatus;
+			uint32 clockcontrolstatus_val;
+		} base_regs;
+		struct {
+			uint32 itopoobb;
+		} wrapper_regs;
+	} pcie_regs;
+	struct {
+		struct {
+			uint32 ioctrl;
+		} wrapper_regs;
+		uint32 vasip_sr_addr;
+		uint32 vasip_sr_size;
+	} vasip_regs;
+	struct {
+		struct {
+			uint32 xmtaddress;
+			uint32 xmtdata;
+			uint32 clockcontrolstatus;
+			uint32 clockcontrolstatus_val;
+		} base_regs;
+		struct {
+			uint32 resetctrl;
+			uint32 itopoobb;
+			uint32 ioctrl;
+			uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS];
+		} wrapper_regs;
+		uint32 sr_size;
+	} mac_regs[MAX_NUM_D11CORES];
+} sssr_reg_info_t;
+
+/* ADaptive Power Save(ADPS) structure definition */
+#define WL_ADPS_IOV_MAJOR_VER	1
+#define WL_ADPS_IOV_MINOR_VER	0
+#define WL_ADPS_IOV_MAJOR_VER_SHIFT	8
+#define WL_ADPS_IOV_VER \
+	((WL_ADPS_IOV_MAJOR_VER << WL_ADPS_IOV_MAJOR_VER_SHIFT) | WL_ADPS_IOV_MINOR_VER)
+
+#define ADPS_NUM_DIR	2
+#define ADPS_RX		0
+#define ADPS_TX		1
+
+#define WL_ADPS_IOV_MODE	0x0001
+#define WL_ADPS_IOV_RSSI	0x0002
+#define WL_ADPS_IOV_DUMP	0x0003
+#define WL_ADPS_IOV_DUMP_CLEAR	0x0004
+
+#define ADPS_SUMMARY_STEP_NUM   2
+#define ADPS_SUMMARY_STEP_LOW	0
+#define ADPS_SUMMARY_STEP_HIGH	1
+
+#define ADPS_SUB_IOV_VERSION_1	1
+#define ADPS_SUB_IOV_VERSION_2	2
+
+typedef struct wl_adps_params_v1 {
+	uint16 version;
+	uint16 length;
+	uint8 band;		/* band - 2G or 5G */
+	uint8 mode;		/* operation mode, default = 0 (ADPS disable) */
+	uint16 padding;
+} wl_adps_params_v1_t;
+
+typedef struct wl_adps_rssi {
+	int32 thresh_hi;	/* rssi threshold to resume ADPS operation */
+	int32 thresh_lo;	/* rssi threshold to suspend ADPS operation */
+} wl_adps_rssi_t;
+
+typedef struct wl_adps_rssi_params_v1 {
+	uint16 version;
+	uint16 length;
+	uint8 band;
+	uint8 padding[3];
+	wl_adps_rssi_t rssi;
+} wl_adps_rssi_params_v1_t;
+
+typedef struct adps_stat_elem {
+	uint32 duration;	/* each step duration time (mSec) */
+	uint32 counts;		/* each step hit count number */
+} adps_stat_elem_t;
+
+typedef struct wl_adps_dump_summary_v1 {
+	uint16 version;
+	uint16 length;
+	uint8 mode;					/* operation mode: On/Off */
+	uint8 flags;					/* restrict flags */
+	uint8 current_step;				/* current step */
+	uint8 padding;
+	adps_stat_elem_t stat[ADPS_SUMMARY_STEP_NUM];	/* statistics */
+} wl_adps_dump_summary_v1_t;
+
+typedef struct wlc_btc_2gchain_dis {
+	uint16 ver;
+	uint16 len;
+	uint8 chain_dis;
+	uint8 flag;
+} wlc_btc_2gchain_dis_t;
+
+#define WLC_BTC_2GCHAIN_DIS_REASSOC	0x1
+#define WLC_BTC_2GCHAIN_DIS_VER1	0x1
+#define WLC_BTC_2GCHAIN_DIS_VER1_LEN	6
+
+enum wl_rpsnoa_cmd_ids {
+	WL_RPSNOA_CMD_ENABLE = 1,
+	WL_RPSNOA_CMD_STATUS,
+	WL_RPSNOA_CMD_PARAMS,
+	WL_RPSNOA_CMD_LAST
+};
+
+typedef struct rpsnoa_cmnhdr {
+	uint16 ver;		/* cmd structure version */
+	uint16 len;		/* cmd structure len */
+	uint32 subcmd;
+	uint32 cnt;
+} rpsnoa_cmnhdr_t;
+
+typedef struct rpsnoa_data {
+	int16 band;
+	int16 value;
+} rpsnoa_data_t;
+
+typedef struct rpsnoa_param {
+	uint16 band;
+	uint8 level;
+	uint8 stas_assoc_check;
+	uint32 pps;
+	uint32 quiet_time;
+} rpsnoa_param_t;
+
+typedef struct rpsnoa_iovar {
+	rpsnoa_cmnhdr_t hdr;
+	rpsnoa_data_t data[1];
+} rpsnoa_iovar_t;
+
+typedef struct rpsnoa_iovar_params {
+	rpsnoa_cmnhdr_t hdr;
+	rpsnoa_param_t param[1];
+} rpsnoa_iovar_params_t;
+
+/* Per-interface reportable stats types */
+enum wl_ifstats_xtlv_id {
+	/* global */
+	WL_IFSTATS_XTLV_SLICE_INDEX = 1,
+	WL_IFSTATS_XTLV_IF_INDEX = 2,
+	WL_IFSTATS_XTLV_MAC_ADDR = 3,
+	WL_IFSTATS_XTLV_REPORT_CMD = 4,	/* Comes in an iovar */
+	WL_IFSTATS_XTLV_BUS_PCIE = 5,
+
+	/* Report data across all SCBs using ecounters */
+	WL_IFSTATS_XTLV_WL_STA_INFO_ECOUNTERS = 0x100,
+
+	/* Per-slice information
+	 * Per-interface reporting could also include slice specific data
+	 */
+	/* xtlv container for reporting */
+	WL_IFSTATS_XTLV_WL_SLICE = 0x301,
+	/* Per-slice AMPDU stats */
+	WL_IFSTATS_XTLV_WL_SLICE_AMPDU_DUMP = 0x302,
+	/* Per-slice BTCOEX stats */
+	WL_IFSTATS_XTLV_WL_SLICE_BTCOEX = 0x303,
+	/* V11_WLCNTRS used in ecounters */
+	WL_IFSTATS_XTLV_WL_SLICE_V11_WLCNTRS = 0x304,
+	/* V30_WLCNTRS Used in ecounters */
+	WL_IFSTATS_XTLV_WL_SLICE_V30_WLCNTRS = 0x305,
+
+	/* Per-interface */
+	/* XTLV container for reporting */
+	WL_IFSTATS_XTLV_IF = 0x501,
+	/* Generic stats applicable to all IFs */
+	WL_IFSTATS_XTLV_GENERIC = 0x502,
+	/* Infra specific */
+	WL_IFSTATS_XTLV_INFRA_SPECIFIC = 0x503,
+	/* MGT counters infra and softAP */
+	WL_IFSTATS_XTLV_MGT_CNT = 0x504,
+	/* AMPDU stats on per-IF */
+	WL_IFSTATS_XTLV_AMPDU_DUMP = 0x505,
+	WL_IFSTATS_XTLV_IF_SPECIFIC = 0x506
+};
+
+/* interface specific mgt count */
+#define WL_MGT_STATS_VERSION_V1	1
+/* Associated stats type: WL_IFSTATS_MGT_CNT */
+typedef struct {
+	uint16	version;
+	uint8   pad[2];
+
+	/* detailed control/management frames */
+	uint32	txnull;
+	uint32	rxnull;
+	uint32	txqosnull;
+	uint32	rxqosnull;
+	uint32	txassocreq;
+	uint32	rxassocreq;
+	uint32	txreassocreq;
+	uint32	rxreassocreq;
+	uint32	txdisassoc;
+	uint32	rxdisassoc;
+	uint32	txassocrsp;
+	uint32	rxassocrsp;
+	uint32	txreassocrsp;
+	uint32	rxreassocrsp;
+	uint32	txauth;
+	uint32	rxauth;
+	uint32	txdeauth;
+	uint32	rxdeauth;
+	uint32	txprobereq;
+	uint32	rxprobereq;
+	uint32	txprobersp;
+	uint32	rxprobersp;
+	uint32	txaction;
+	uint32	rxaction;
+	uint32	txpspoll;
+	uint32	rxpspoll;
+} wl_if_mgt_stats_t;
+
+#define WL_INFRA_STATS_VERSION_V1	1
+/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC */
+typedef struct wl_infra_stats {
+	uint16 version;             /**< version of the structure */
+	uint8  pad[2];
+	uint32 rxbeaconmbss;
+	uint32 tbtt;
+} wl_if_infra_stats_t;
+
+typedef struct csa_event_data {
+	chanspec_t chan_old;
+	dot11_ext_csa_ie_t ecsa;
+	dot11_mesh_csp_ie_t mcsp;
+	dot11_wide_bw_chan_switch_ie_t wbcs;
+	uint8 PAD;
+} csa_event_data_t;
+
+#endif /* _wlioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl_defs.h b/drivers/net/wireless/bcmdhd/include/wlioctl_defs.h
new file mode 100644
index 0000000..f1c1bf8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlioctl_defs.h
@@ -0,0 +1,2241 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wlioctl_defs.h 677667 2017-01-04 07:43:05Z $
+ */
+
+
+#ifndef wlioctl_defs_h
+#define wlioctl_defs_h
+
+
+
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef  D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#ifndef USE_NEW_RSPEC_DEFS
+/* WL_RSPEC defines for rate information */
+#define WL_RSPEC_RATE_MASK		0x000000FF      /* rate or HT MCS value */
+#define WL_RSPEC_HE_MCS_MASK		0x0000000F      /* HE MCS value */
+#define WL_RSPEC_HE_NSS_MASK		0x000000F0      /* HE Nss value */
+#define WL_RSPEC_HE_NSS_SHIFT		4               /* HE Nss value shift */
+#define WL_RSPEC_VHT_MCS_MASK		0x0000000F      /* VHT MCS value */
+#define WL_RSPEC_VHT_NSS_MASK		0x000000F0      /* VHT Nss value */
+#define WL_RSPEC_VHT_NSS_SHIFT		4               /* VHT Nss value shift */
+#define WL_RSPEC_TXEXP_MASK		0x00000300
+#define WL_RSPEC_TXEXP_SHIFT		8
+#define WL_RSPEC_BW_MASK		0x00070000      /* bandwidth mask */
+#define WL_RSPEC_BW_SHIFT		16              /* bandwidth shift */
+#define WL_RSPEC_STBC			0x00100000      /* STBC encoding, Nsts = 2 x Nss */
+#define WL_RSPEC_TXBF			0x00200000      /* bit indicates TXBF mode */
+#define WL_RSPEC_LDPC			0x00400000      /* bit indicates adv coding in use */
+#define WL_RSPEC_SGI			0x00800000      /* Short GI mode */
+#define WL_RSPEC_ENCODING_MASK		0x03000000      /* Encoding of Rate/MCS field */
+#define WL_RSPEC_OVERRIDE_RATE		0x40000000      /* bit indicate to override mcs only */
+#define WL_RSPEC_OVERRIDE_MODE		0x80000000      /* bit indicates override rate & mode */
+
+/* WL_RSPEC_ENCODING field defs */
+#define WL_RSPEC_ENCODE_RATE	0x00000000      /* Legacy rate is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HT	0x01000000      /* HT MCS is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_VHT	0x02000000      /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HE	0x03000000      /* HE MCS and Nss is stored in RSPEC_RATE_MASK */
+
+/* WL_RSPEC_BW field defs */
+#define WL_RSPEC_BW_UNSPECIFIED 0
+#define WL_RSPEC_BW_20MHZ       0x00010000
+#define WL_RSPEC_BW_40MHZ       0x00020000
+#define WL_RSPEC_BW_80MHZ       0x00030000
+#define WL_RSPEC_BW_160MHZ      0x00040000
+#define WL_RSPEC_BW_10MHZ	0x00050000
+#define WL_RSPEC_BW_5MHZ	0x00060000
+#define WL_RSPEC_BW_2P5MHZ      0x00070000
+
+#define HIGHEST_SINGLE_STREAM_MCS	7 /* MCS values greater than this enable multiple streams */
+
+#endif /* !USE_NEW_RSPEC_DEFS */
+
+/* Legacy defines for the nrate iovar */
+#define OLD_NRATE_MCS_INUSE         0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define OLD_NRATE_RATE_MASK         0x0000007f /* rate/mcs value */
+#define OLD_NRATE_STF_MASK          0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define OLD_NRATE_STF_SHIFT         8          /* stf mode shift */
+#define OLD_NRATE_OVERRIDE          0x80000000 /* bit indicates override both rate & mode */
+#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define OLD_NRATE_SGI               0x00800000 /* sgi mode */
+#define OLD_NRATE_LDPC_CODING       0x00400000 /* bit indicates adv coding in use */
+
+#define OLD_NRATE_STF_SISO	0		/* stf mode SISO */
+#define OLD_NRATE_STF_CDD	1		/* stf mode CDD */
+#define OLD_NRATE_STF_STBC	2		/* stf mode STBC */
+#define OLD_NRATE_STF_SDM	3		/* stf mode SDM */
+
+#define WLC_11N_N_PROP_MCS	6
+#define WLC_11N_FIRST_PROP_MCS	87
+#define WLC_11N_LAST_PROP_MCS	102
+
+#define MAX_CCA_CHANNELS 38	/* Max number of 20 Mhz wide channels */
+#define MAX_CCA_SECS	60	/* CCA keeps this many seconds history */
+
+#define IBSS_MED        15	/* Mediom in-bss congestion percentage */
+#define IBSS_HI         25	/* Hi in-bss congestion percentage */
+#define OBSS_MED        12
+#define OBSS_HI         25
+#define INTERFER_MED    5
+#define INTERFER_HI     10
+
+#define  CCA_FLAG_2G_ONLY		0x01	/* Return a channel from 2.4 Ghz band */
+#define  CCA_FLAG_5G_ONLY		0x02	/* Return a channel from 2.4 Ghz band */
+#define  CCA_FLAG_IGNORE_DURATION	0x04	/* Ignore dwell time for each channel */
+#define  CCA_FLAGS_PREFER_1_6_11	0x10
+#define  CCA_FLAG_IGNORE_INTERFER 	0x20 /* do not exlude channel based on interfer level */
+
+#define CCA_ERRNO_BAND 		1	/* After filtering for band pref, no choices left */
+#define CCA_ERRNO_DURATION	2	/* After filtering for duration, no choices left */
+#define CCA_ERRNO_PREF_CHAN	3	/* After filtering for chan pref, no choices left */
+#define CCA_ERRNO_INTERFER	4	/* After filtering for interference, no choices left */
+#define CCA_ERRNO_TOO_FEW	5	/* Only 1 channel was input */
+
+#define WL_STA_AID(a)		((a) &~ 0xc000)
+
+/* Flags for sta_info_t indicating properties of STA */
+#define WL_STA_BRCM		0x00000001	/* Running a Broadcom driver */
+#define WL_STA_WME		0x00000002	/* WMM association */
+#define WL_STA_NONERP		0x00000004	/* No ERP */
+#define WL_STA_AUTHE		0x00000008	/* Authenticated */
+#define WL_STA_ASSOC		0x00000010	/* Associated */
+#define WL_STA_AUTHO		0x00000020	/* Authorized */
+#define WL_STA_WDS		0x00000040	/* Wireless Distribution System */
+#define WL_STA_WDS_LINKUP	0x00000080	/* WDS traffic/probes flowing properly */
+#define WL_STA_PS		0x00000100	/* STA is in power save mode from AP's viewpoint */
+#define WL_STA_APSD_BE		0x00000200	/* APSD delv/trigger for AC_BE is default enabled */
+#define WL_STA_APSD_BK		0x00000400	/* APSD delv/trigger for AC_BK is default enabled */
+#define WL_STA_APSD_VI		0x00000800	/* APSD delv/trigger for AC_VI is default enabled */
+#define WL_STA_APSD_VO		0x00001000	/* APSD delv/trigger for AC_VO is default enabled */
+#define WL_STA_N_CAP		0x00002000	/* STA 802.11n capable */
+#define WL_STA_SCBSTATS		0x00004000	/* Per STA debug stats */
+#define WL_STA_AMPDU_CAP	0x00008000	/* STA AMPDU capable */
+#define WL_STA_AMSDU_CAP	0x00010000	/* STA AMSDU capable */
+#define WL_STA_MIMO_PS		0x00020000	/* mimo ps mode is enabled */
+#define WL_STA_MIMO_RTS		0x00040000	/* send rts in mimo ps mode */
+#define WL_STA_RIFS_CAP		0x00080000	/* rifs enabled */
+#define WL_STA_VHT_CAP		0x00100000	/* STA VHT(11ac) capable */
+#define WL_STA_WPS		0x00200000	/* WPS state */
+#define WL_STA_DWDS_CAP		0x01000000	/* DWDS CAP */
+#define WL_STA_DWDS		0x02000000	/* DWDS active */
+#define WL_WDS_LINKUP		WL_STA_WDS_LINKUP	/* deprecated */
+
+/* STA HT cap fields */
+#define WL_STA_CAP_LDPC_CODING		0x0001	/* Support for rx of LDPC coded pkts */
+#define WL_STA_CAP_40MHZ		0x0002  /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define WL_STA_CAP_MIMO_PS_MASK		0x000C  /* Mimo PS mask */
+#define WL_STA_CAP_MIMO_PS_SHIFT	0x0002	/* Mimo PS shift */
+#define WL_STA_CAP_MIMO_PS_OFF		0x0003	/* Mimo PS, no restriction */
+#define WL_STA_CAP_MIMO_PS_RTS		0x0001	/* Mimo PS, send RTS/CTS around MIMO frames */
+#define WL_STA_CAP_MIMO_PS_ON		0x0000	/* Mimo PS, MIMO disallowed */
+#define WL_STA_CAP_GF			0x0010	/* Greenfield preamble support */
+#define WL_STA_CAP_SHORT_GI_20		0x0020	/* 20MHZ short guard interval support */
+#define WL_STA_CAP_SHORT_GI_40		0x0040	/* 40Mhz short guard interval support */
+#define WL_STA_CAP_TX_STBC		0x0080	/* Tx STBC support */
+#define WL_STA_CAP_RX_STBC_MASK		0x0300	/* Rx STBC mask */
+#define WL_STA_CAP_RX_STBC_SHIFT	8	/* Rx STBC shift */
+#define WL_STA_CAP_DELAYED_BA		0x0400	/* delayed BA support */
+#define WL_STA_CAP_MAX_AMSDU		0x0800	/* Max AMSDU size in bytes , 0=3839, 1=7935 */
+#define WL_STA_CAP_DSSS_CCK		0x1000	/* DSSS/CCK supported by the BSS */
+#define WL_STA_CAP_PSMP			0x2000	/* Power Save Multi Poll support */
+#define WL_STA_CAP_40MHZ_INTOLERANT	0x4000	/* 40MHz Intolerant */
+#define WL_STA_CAP_LSIG_TXOP		0x8000	/* L-SIG TXOP protection support */
+
+#define WL_STA_CAP_RX_STBC_NO		0x0	/* no rx STBC support */
+#define WL_STA_CAP_RX_STBC_ONE_STREAM	0x1	/* rx STBC support of 1 spatial stream */
+#define WL_STA_CAP_RX_STBC_TWO_STREAM	0x2	/* rx STBC support of 1-2 spatial streams */
+#define WL_STA_CAP_RX_STBC_THREE_STREAM	0x3	/* rx STBC support of 1-3 spatial streams */
+
+/* scb vht flags */
+#define WL_STA_VHT_LDPCCAP	0x0001
+#define WL_STA_SGI80		0x0002
+#define WL_STA_SGI160		0x0004
+#define WL_STA_VHT_TX_STBCCAP	0x0008
+#define WL_STA_VHT_RX_STBCCAP	0x0010
+#define WL_STA_SU_BEAMFORMER	0x0020
+#define WL_STA_SU_BEAMFORMEE	0x0040
+#define WL_STA_MU_BEAMFORMER	0x0080
+#define WL_STA_MU_BEAMFORMEE	0x0100
+#define WL_STA_VHT_TXOP_PS	0x0200
+#define WL_STA_HTC_VHT_CAP	0x0400
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED  0
+#define WLC_TXFILTER_OVERRIDE_ENABLED   1
+
+#define WL_IOCTL_ACTION_GET		0x0
+#define WL_IOCTL_ACTION_SET		0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK	0x1e
+#define WL_IOCTL_ACTION_OVL_RSV		0x20
+#define WL_IOCTL_ACTION_OVL		0x40
+#define WL_IOCTL_ACTION_MASK		0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT	1
+
+/* For WLC_SET_INFRA ioctl & infra_configuration iovar SET/GET operations */
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_ANY   2	/* deprecated */
+#define WL_BSSTYPE_MESH  3
+/* Bitmask for scan_type */
+#define WL_SCANFLAGS_PASSIVE	0x01	/* force passive scan */
+#define WL_SCANFLAGS_RESERVED	0x02	/* Reserved */
+#define WL_SCANFLAGS_PROHIBITED	0x04	/* allow scanning prohibited channels */
+#define WL_SCANFLAGS_OFFCHAN	0x08	/* allow scanning/reporting off-channel APs */
+#define WL_SCANFLAGS_HOTSPOT	0x10	/* automatic ANQP to hotspot APs */
+#define WL_SCANFLAGS_SWTCHAN	0x20	/* Force channel switch for differerent bandwidth */
+#define WL_SCANFLAGS_FORCE_PARALLEL 0x40 /* Force parallel scan even when actcb_fn_t is on.
+					  * by default parallel scan will be disabled if actcb_fn_t
+					  * is provided.
+					  */
+#define WL_SCANFLAGS_SISO	0x40	/* Use 1 RX chain for scanning */
+#define WL_SCANFLAGS_MIMO	0x80	/* Force MIMO scanning */
+
+/* wl_iscan_results status values */
+#define WL_SCAN_RESULTS_SUCCESS	0
+#define WL_SCAN_RESULTS_PARTIAL	1
+#define WL_SCAN_RESULTS_PENDING	2
+#define WL_SCAN_RESULTS_ABORTED	3
+#define WL_SCAN_RESULTS_NO_MEM  4
+
+#define SCANOL_ENABLED			(1 << 0)
+#define SCANOL_BCAST_SSID		(1 << 1)
+#define SCANOL_NOTIFY_BCAST_SSID	(1 << 2)
+#define SCANOL_RESULTS_PER_CYCLE	(1 << 3)
+
+/* scan times in milliseconds */
+#define SCANOL_HOME_TIME		45	/* for home channel processing */
+#define SCANOL_ASSOC_TIME		20	/* dwell on a channel while associated */
+#define SCANOL_UNASSOC_TIME		40	/* dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME		110	/* listen on a channelfor passive scan */
+#define SCANOL_AWAY_LIMIT		100	/* max time to be away from home channel */
+#define SCANOL_IDLE_REST_TIME		40
+#define SCANOL_IDLE_REST_MULTIPLIER	0
+#define SCANOL_ACTIVE_REST_TIME		20
+#define SCANOL_ACTIVE_REST_MULTIPLIER	0
+#define SCANOL_CYCLE_IDLE_REST_TIME	300000	/* Idle Rest Time between Scan Cycle (msec) */
+#define SCANOL_CYCLE_IDLE_REST_MULTIPLIER	0	/* Idle Rest Time Multiplier */
+#define SCANOL_CYCLE_ACTIVE_REST_TIME	200
+#define SCANOL_CYCLE_ACTIVE_REST_MULTIPLIER	0
+#define SCANOL_MAX_REST_TIME		3600000	/* max rest time between scan cycle (msec) */
+#define SCANOL_CYCLE_DEFAULT		0	/* default for Max Scan Cycle, 0 = forever */
+#define SCANOL_CYCLE_MAX		864000	/* Max Scan Cycle */
+						/* 10 sec/scan cycle => 100 days */
+#define SCANOL_NPROBES			2	/* for Active scan; send n probes on each channel */
+#define SCANOL_NPROBES_MAX		5	/* for Active scan; send n probes on each channel */
+#define SCANOL_SCAN_START_DLY		10	/* delay start of offload scan (sec) */
+#define SCANOL_SCAN_START_DLY_MAX	240	/* delay start of offload scan (sec) */
+#define SCANOL_MULTIPLIER_MAX		10	/* Max Multiplier */
+#define SCANOL_UNASSOC_TIME_MAX		100	/* max dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME_MAX		500	/* max listen on a channel for passive scan */
+#define SCANOL_SSID_MAX			16	/* max supported preferred SSID */
+
+/* masks for channel and ssid count */
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START      1
+#define WL_SCAN_ACTION_CONTINUE   2
+#define WL_SCAN_ACTION_ABORT      3
+
+#define ANTENNA_NUM_1	1		/* total number of antennas to be used */
+#define ANTENNA_NUM_2	2
+#define ANTENNA_NUM_3	3
+#define ANTENNA_NUM_4	4
+
+#define ANT_SELCFG_AUTO		0x80	/* bit indicates antenna sel AUTO */
+#define ANT_SELCFG_MASK		0x33	/* antenna configuration mask */
+#define ANT_SELCFG_TX_UNICAST	0	/* unicast tx antenna configuration */
+#define ANT_SELCFG_RX_UNICAST	1	/* unicast rx antenna configuration */
+#define ANT_SELCFG_TX_DEF	2	/* default tx antenna configuration */
+#define ANT_SELCFG_RX_DEF	3	/* default rx antenna configuration */
+
+/* interference source detection and identification mode */
+#define ITFR_MODE_DISABLE	0	/* disable feature */
+#define ITFR_MODE_MANUAL_ENABLE	1	/* enable manual detection */
+#define ITFR_MODE_AUTO_ENABLE	2	/* enable auto detection */
+
+/* bit definitions for flags in interference source report */
+#define ITFR_INTERFERENCED	1	/* interference detected */
+#define ITFR_HOME_CHANNEL	2	/* home channel has interference */
+#define ITFR_NOISY_ENVIRONMENT	4	/* noisy environemnt so feature stopped */
+
+#define WL_NUM_RPI_BINS		8
+#define WL_RM_TYPE_BASIC	1
+#define WL_RM_TYPE_CCA		2
+#define WL_RM_TYPE_RPI		3
+#define WL_RM_TYPE_ABORT	-1	/* ABORT any in-progress RM request */
+
+#define WL_RM_FLAG_PARALLEL	(1<<0)
+
+#define WL_RM_FLAG_LATE		(1<<1)
+#define WL_RM_FLAG_INCAPABLE	(1<<2)
+#define WL_RM_FLAG_REFUSED	(1<<3)
+
+/* flags */
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
+
+#define WLC_CIS_DEFAULT	0	/* built-in default */
+#define WLC_CIS_SROM	1	/* source is sprom */
+#define WLC_CIS_OTP	2	/* source is otp */
+
+/* PCL - Power Control Loop */
+/* current gain setting is replaced by user input */
+#define WL_ATTEN_APP_INPUT_PCL_OFF	0	/* turn off PCL, apply supplied input */
+#define WL_ATTEN_PCL_ON			1	/* turn on PCL */
+/* current gain setting is maintained */
+#define WL_ATTEN_PCL_OFF		2	/* turn off PCL. */
+
+/* defines used by poweridx iovar - it controls power in a-band */
+/* current gain setting is maintained */
+#define WL_PWRIDX_PCL_OFF	-2	/* turn off PCL.  */
+#define WL_PWRIDX_PCL_ON	-1	/* turn on PCL */
+#define WL_PWRIDX_LOWER_LIMIT	-2	/* lower limit */
+#define WL_PWRIDX_UPPER_LIMIT	63	/* upper limit */
+/* value >= 0 causes
+ *	- input to be set to that value
+ *	- PCL to be off
+ */
+
+#define BCM_MAC_STATUS_INDICATION	(0x40010200L)
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED  0
+#define WLC_TXFILTER_OVERRIDE_ENABLED   1
+
+/* magic pattern used for mismatch driver and wl */
+#define WL_TXFIFO_SZ_MAGIC	0xa5a5
+
+/* check this magic number */
+#define WLC_IOCTL_MAGIC		0x14e46c77
+
+
+/* bss_info_cap_t flags */
+#define WL_BSS_FLAGS_FROM_BEACON	0x01	/* bss_info derived from beacon */
+#define WL_BSS_FLAGS_FROM_CACHE		0x02	/* bss_info collected from cache */
+#define WL_BSS_FLAGS_RSSI_ONCHANNEL	0x04	/* rssi info received on channel (vs offchannel) */
+#define WL_BSS_FLAGS_HS20		0x08	/* hotspot 2.0 capable */
+#define WL_BSS_FLAGS_RSSI_INVALID	0x10	/* BSS contains invalid RSSI */
+#define WL_BSS_FLAGS_RSSI_INACCURATE	0x20	/* BSS contains inaccurate RSSI */
+#define WL_BSS_FLAGS_SNR_INVALID	0x40	/* BSS contains invalid SNR */
+#define WL_BSS_FLAGS_NF_INVALID		0x80	/* BSS contains invalid noise floor */
+
+/* bit definitions for bcnflags in wl_bss_info */
+#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT	0x01 /* beacon had IE, accessnet valid */
+#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT_VALID 0x02 /* on indicates support for this API */
+
+/* bssinfo flag for nbss_cap */
+#define VHT_BI_SGI_80MHZ			0x00000100
+#define VHT_BI_80MHZ			    0x00000200
+#define VHT_BI_160MHZ			    0x00000400
+#define VHT_BI_8080MHZ			    0x00000800
+
+/* reference to wl_ioctl_t struct used by usermode driver */
+#define ioctl_subtype	set		/* subtype param */
+#define ioctl_pid	used		/* pid param */
+#define ioctl_status	needed		/* status param */
+
+
+/* Enumerate crypto algorithms */
+#define	CRYPTO_ALGO_OFF			0
+#define	CRYPTO_ALGO_WEP1		1
+#define	CRYPTO_ALGO_TKIP		2
+#define	CRYPTO_ALGO_WEP128		3
+#define CRYPTO_ALGO_AES_CCM		4
+#define CRYPTO_ALGO_AES_OCB_MSDU	5
+#define CRYPTO_ALGO_AES_OCB_MPDU	6
+#if !defined(BCMEXTCCX)
+#define CRYPTO_ALGO_NALG		7
+#else
+#define CRYPTO_ALGO_CKIP		7
+#define CRYPTO_ALGO_CKIP_MMH	8
+#define CRYPTO_ALGO_WEP_MMH		9
+#define CRYPTO_ALGO_NALG		10
+#endif 
+
+#define CRYPTO_ALGO_SMS4		11
+#define CRYPTO_ALGO_PMK			12	/* for 802.1x supp to set PMK before 4-way */
+#define CRYPTO_ALGO_BIP			13  /* 802.11w BIP (aes cmac) */
+
+#define CRYPTO_ALGO_AES_GCM     14  /* 128 bit GCM */
+#define CRYPTO_ALGO_AES_CCM256  15  /* 256 bit CCM */
+#define CRYPTO_ALGO_AES_GCM256  16  /* 256 bit GCM */
+#define CRYPTO_ALGO_BIP_CMAC256 17  /* 256 bit BIP CMAC */
+#define CRYPTO_ALGO_BIP_GMAC    18  /* 128 bit BIP GMAC */
+#define CRYPTO_ALGO_BIP_GMAC256 19  /* 256 bit BIP GMAC */
+
+#define CRYPTO_ALGO_NONE        CRYPTO_ALGO_OFF
+
+/* algo bit vector */
+#define KEY_ALGO_MASK(_algo)	(1 << _algo)
+
+#if defined(BCMEXTCCX)
+#define KEY_ALGO_MASK_CCX		(KEY_ALGO_MASK(CRYPTO_ALGO_CKIP) | \
+					KEY_ALGO_MASK(CRYPTO_ALGO_CKIP_MMH) | \
+					KEY_ALGO_MASK(CRYPTO_ALGO_WEP_MMH))
+#endif 
+
+#define KEY_ALGO_MASK_WEP		(KEY_ALGO_MASK(CRYPTO_ALGO_WEP1) | \
+					KEY_ALGO_MASK(CRYPTO_ALGO_WEP128) | \
+					KEY_ALGO_MASK(CRYPTO_ALGO_NALG))
+
+#define KEY_ALGO_MASK_AES		(KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM) | \
+					KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM256) | \
+					KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM) | \
+					KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256))
+#define KEY_ALGO_MASK_TKIP		(KEY_ALGO_MASK(CRYPTO_ALGO_TKIP))
+#define KEY_ALGO_MASK_WAPI		(KEY_ALGO_MASK(CRYPTO_ALGO_SMS4))
+
+#define WSEC_GEN_MIC_ERROR	0x0001
+#define WSEC_GEN_REPLAY		0x0002
+#define WSEC_GEN_ICV_ERROR	0x0004
+#define WSEC_GEN_MFP_ACT_ERROR	0x0008
+#define WSEC_GEN_MFP_DISASSOC_ERROR	0x0010
+#define WSEC_GEN_MFP_DEAUTH_ERROR	0x0020
+
+#define WL_SOFT_KEY	(1 << 0)	/* Indicates this key is using soft encrypt */
+#define WL_PRIMARY_KEY	(1 << 1)	/* Indicates this key is the primary (ie tx) key */
+#if defined(BCMEXTCCX)
+#define WL_CKIP_KP	(1 << 4)	/* CMIC */
+#define WL_CKIP_MMH	(1 << 5)	/* CKIP */
+#else
+#define WL_KF_RES_4	(1 << 4)	/* Reserved for backward compat */
+#define WL_KF_RES_5	(1 << 5)	/* Reserved for backward compat */
+#endif 
+#define WL_IBSS_PEER_GROUP_KEY	(1 << 6)	/* Indicates a group key for a IBSS PEER */
+
+/* wireless security bitvec */
+#define WEP_ENABLED		0x0001
+#define TKIP_ENABLED		0x0002
+#define AES_ENABLED		0x0004
+#define WSEC_SWFLAG		0x0008
+#define SES_OW_ENABLED		0x0040	/* to go into transition mode without setting wep */
+#ifdef BCMWAPI_WPI
+#define SMS4_ENABLED		0x0100
+#endif /* BCMWAPI_WPI */
+
+#define WSEC_WEP_ENABLED(wsec)	((wsec) & WEP_ENABLED)
+#define WSEC_TKIP_ENABLED(wsec)	((wsec) & TKIP_ENABLED)
+#define WSEC_AES_ENABLED(wsec)	((wsec) & AES_ENABLED)
+
+#ifdef BCMCCX
+#define WSEC_CKIP_KP_ENABLED(wsec)	((wsec) & CKIP_KP_ENABLED)
+#define WSEC_CKIP_MIC_ENABLED(wsec)	((wsec) & CKIP_MIC_ENABLED)
+#define WSEC_CKIP_ENABLED(wsec)	((wsec) & (CKIP_KP_ENABLED|CKIP_MIC_ENABLED))
+
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec) \
+	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED |	\
+	  CKIP_MIC_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec) \
+		((wsec) & \
+		 (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | CKIP_MIC_ENABLED))
+#endif /* BCMWAPI_WPI */
+#else /* defined BCMCCX */
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec)	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec)	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+#endif /* BCMCCX */
+
+#define WSEC_SES_OW_ENABLED(wsec)	((wsec) & SES_OW_ENABLED)
+#ifdef BCMWAPI_WAI
+#define WSEC_SMS4_ENABLED(wsec)	((wsec) & SMS4_ENABLED)
+#endif /* BCMWAPI_WAI */
+
+/* Following macros are not used any more. Just kept here to
+ * avoid build issue in BISON/CARIBOU branch
+ */
+#define MFP_CAPABLE		0x0200
+#define MFP_REQUIRED	0x0400
+#define MFP_SHA256		0x0800 /* a special configuration for STA for WIFI test tool */
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED	0x0000	/* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE		0x0001	/* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED	0x0002	/* over 802.1x */
+#define WPA_AUTH_PSK		0x0004	/* Pre-shared key */
+#if defined(BCMEXTCCX)
+#define WPA_AUTH_CCKM		0x0008	/* CCKM */
+#define WPA2_AUTH_CCKM		0x0010	/* CCKM2 */
+#endif	
+/* #define WPA_AUTH_8021X 0x0020 */	/* 802.1x, reserved */
+#define WPA2_AUTH_UNSPECIFIED	0x0040	/* over 802.1x */
+#define WPA2_AUTH_PSK		0x0080	/* Pre-shared key */
+#define BRCM_AUTH_PSK           0x0100  /* BRCM specific PSK */
+#define BRCM_AUTH_DPT		0x0200	/* DPT PSK without group keys */
+#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
+#define WPA_AUTH_WAPI           0x0400
+#define WAPI_AUTH_NONE		WPA_AUTH_NONE	/* none (IBSS) */
+#define WAPI_AUTH_UNSPECIFIED	0x0400	/* over AS */
+#define WAPI_AUTH_PSK		0x0800	/* Pre-shared key */
+#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
+#define WPA2_AUTH_1X_SHA256	0x1000  /* 1X with SHA256 key derivation */
+#define WPA2_AUTH_TPK		0x2000	/* TDLS Peer Key */
+#define WPA2_AUTH_FT		0x4000	/* Fast Transition. */
+#define WPA2_AUTH_PSK_SHA256	0x8000	/* PSK with SHA256 key derivation */
+/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */
+#define WPA2_AUTH_SHA256	0x8000
+#define WPA_AUTH_PFN_ANY	0xffffffff	/* for PFN, match only ssid */
+
+/* pmkid */
+#define	MAXPMKID		16
+
+/* SROM12 changes */
+#define	WLC_IOCTL_MAXLEN		8192	/* max length ioctl buffer required */
+
+
+#define	WLC_IOCTL_SMLEN			256	/* "small" length ioctl buffer required */
+#define WLC_IOCTL_MEDLEN		1536    /* "med" length ioctl buffer required */
+#if defined(LCNCONF) || defined(LCN40CONF) || defined(LCN20CONF)
+#define WLC_SAMPLECOLLECT_MAXLEN	8192	/* Max Sample Collect buffer */
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN	10240	/* Max Sample Collect buffer for two cores */
+#endif
+#define WLC_SAMPLECOLLECT_MAXLEN_LCN40  8192
+
+/* common ioctl definitions */
+#define WLC_GET_MAGIC				0
+#define WLC_GET_VERSION				1
+#define WLC_UP					2
+#define WLC_DOWN				3
+#define WLC_GET_LOOP				4
+#define WLC_SET_LOOP				5
+#define WLC_DUMP				6
+#define WLC_GET_MSGLEVEL			7
+#define WLC_SET_MSGLEVEL			8
+#define WLC_GET_PROMISC				9
+#define WLC_SET_PROMISC				10
+/* #define WLC_OVERLAY_IOCTL			11 */ /* not supported */
+#define WLC_GET_RATE				12
+#define WLC_GET_MAX_RATE			13
+#define WLC_GET_INSTANCE			14
+/* #define WLC_GET_FRAG				15 */ /* no longer supported */
+/* #define WLC_SET_FRAG				16 */ /* no longer supported */
+/* #define WLC_GET_RTS				17 */ /* no longer supported */
+/* #define WLC_SET_RTS				18 */ /* no longer supported */
+#define WLC_GET_INFRA				19
+#define WLC_SET_INFRA				20
+#define WLC_GET_AUTH				21
+#define WLC_SET_AUTH				22
+#define WLC_GET_BSSID				23
+#define WLC_SET_BSSID				24
+#define WLC_GET_SSID				25
+#define WLC_SET_SSID				26
+#define WLC_RESTART				27
+#define WLC_TERMINATED				28
+/* #define WLC_DUMP_SCB				28 */ /* no longer supported */
+#define WLC_GET_CHANNEL				29
+#define WLC_SET_CHANNEL				30
+#define WLC_GET_SRL				31
+#define WLC_SET_SRL				32
+#define WLC_GET_LRL				33
+#define WLC_SET_LRL				34
+#define WLC_GET_PLCPHDR				35
+#define WLC_SET_PLCPHDR				36
+#define WLC_GET_RADIO				37
+#define WLC_SET_RADIO				38
+#define WLC_GET_PHYTYPE				39
+#define WLC_DUMP_RATE				40
+#define WLC_SET_RATE_PARAMS			41
+#define WLC_GET_FIXRATE				42
+#define WLC_SET_FIXRATE				43
+/* #define WLC_GET_WEP				42 */ /* no longer supported */
+/* #define WLC_SET_WEP				43 */ /* no longer supported */
+#define WLC_GET_KEY				44
+#define WLC_SET_KEY				45
+#define WLC_GET_REGULATORY			46
+#define WLC_SET_REGULATORY			47
+#define WLC_GET_PASSIVE_SCAN			48
+#define WLC_SET_PASSIVE_SCAN			49
+#define WLC_SCAN				50
+#define WLC_SCAN_RESULTS			51
+#define WLC_DISASSOC				52
+#define WLC_REASSOC				53
+#define WLC_GET_ROAM_TRIGGER			54
+#define WLC_SET_ROAM_TRIGGER			55
+#define WLC_GET_ROAM_DELTA			56
+#define WLC_SET_ROAM_DELTA			57
+#define WLC_GET_ROAM_SCAN_PERIOD		58
+#define WLC_SET_ROAM_SCAN_PERIOD		59
+#define WLC_EVM					60	/* diag */
+#define WLC_GET_TXANT				61
+#define WLC_SET_TXANT				62
+#define WLC_GET_ANTDIV				63
+#define WLC_SET_ANTDIV				64
+/* #define WLC_GET_TXPWR			65 */ /* no longer supported */
+/* #define WLC_SET_TXPWR			66 */ /* no longer supported */
+#define WLC_GET_CLOSED				67
+#define WLC_SET_CLOSED				68
+#define WLC_GET_MACLIST				69
+#define WLC_SET_MACLIST				70
+#define WLC_GET_RATESET				71
+#define WLC_SET_RATESET				72
+/* #define WLC_GET_LOCALE			73 */ /* no longer supported */
+#define WLC_LONGTRAIN				74
+#define WLC_GET_BCNPRD				75
+#define WLC_SET_BCNPRD				76
+#define WLC_GET_DTIMPRD				77
+#define WLC_SET_DTIMPRD				78
+#define WLC_GET_SROM				79
+#define WLC_SET_SROM				80
+#define WLC_GET_WEP_RESTRICT			81
+#define WLC_SET_WEP_RESTRICT			82
+#define WLC_GET_COUNTRY				83
+#define WLC_SET_COUNTRY				84
+#define WLC_GET_PM				85
+#define WLC_SET_PM				86
+#define WLC_GET_WAKE				87
+#define WLC_SET_WAKE				88
+/* #define WLC_GET_D11CNTS			89 */ /* -> "counters" iovar */
+#define WLC_GET_FORCELINK			90	/* ndis only */
+#define WLC_SET_FORCELINK			91	/* ndis only */
+#define WLC_FREQ_ACCURACY			92	/* diag */
+#define WLC_CARRIER_SUPPRESS			93	/* diag */
+#define WLC_GET_PHYREG				94
+#define WLC_SET_PHYREG				95
+#define WLC_GET_RADIOREG			96
+#define WLC_SET_RADIOREG			97
+#define WLC_GET_REVINFO				98
+#define WLC_GET_UCANTDIV			99
+#define WLC_SET_UCANTDIV			100
+#define WLC_R_REG				101
+#define WLC_W_REG				102
+/* #define WLC_DIAG_LOOPBACK			103	old tray diag */
+/* #define WLC_RESET_D11CNTS			104 */ /* -> "reset_d11cnts" iovar */
+#define WLC_GET_MACMODE				105
+#define WLC_SET_MACMODE				106
+#define WLC_GET_MONITOR				107
+#define WLC_SET_MONITOR				108
+#define WLC_GET_GMODE				109
+#define WLC_SET_GMODE				110
+#define WLC_GET_LEGACY_ERP			111
+#define WLC_SET_LEGACY_ERP			112
+#define WLC_GET_RX_ANT				113
+#define WLC_GET_CURR_RATESET			114	/* current rateset */
+#define WLC_GET_SCANSUPPRESS			115
+#define WLC_SET_SCANSUPPRESS			116
+#define WLC_GET_AP				117
+#define WLC_SET_AP				118
+#define WLC_GET_EAP_RESTRICT			119
+#define WLC_SET_EAP_RESTRICT			120
+#define WLC_SCB_AUTHORIZE			121
+#define WLC_SCB_DEAUTHORIZE			122
+#define WLC_GET_WDSLIST				123
+#define WLC_SET_WDSLIST				124
+#define WLC_GET_ATIM				125
+#define WLC_SET_ATIM				126
+#define WLC_GET_RSSI				127
+#define WLC_GET_PHYANTDIV			128
+#define WLC_SET_PHYANTDIV			129
+#define WLC_AP_RX_ONLY				130
+#define WLC_GET_TX_PATH_PWR			131
+#define WLC_SET_TX_PATH_PWR			132
+#define WLC_GET_WSEC				133
+#define WLC_SET_WSEC				134
+#define WLC_GET_PHY_NOISE			135
+#define WLC_GET_BSS_INFO			136
+#define WLC_GET_PKTCNTS				137
+#define WLC_GET_LAZYWDS				138
+#define WLC_SET_LAZYWDS				139
+#define WLC_GET_BANDLIST			140
+
+#define WLC_GET_BAND				141
+#define WLC_SET_BAND				142
+#define WLC_SCB_DEAUTHENTICATE			143
+#define WLC_GET_SHORTSLOT			144
+#define WLC_GET_SHORTSLOT_OVERRIDE		145
+#define WLC_SET_SHORTSLOT_OVERRIDE		146
+#define WLC_GET_SHORTSLOT_RESTRICT		147
+#define WLC_SET_SHORTSLOT_RESTRICT		148
+#define WLC_GET_GMODE_PROTECTION		149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE	150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE	151
+#define WLC_UPGRADE				152
+/* #define WLC_GET_MRATE			153 */ /* no longer supported */
+/* #define WLC_SET_MRATE			154 */ /* no longer supported */
+#define WLC_GET_IGNORE_BCNS			155
+#define WLC_SET_IGNORE_BCNS			156
+#define WLC_GET_SCB_TIMEOUT			157
+#define WLC_SET_SCB_TIMEOUT			158
+#define WLC_GET_ASSOCLIST			159
+#define WLC_GET_CLK				160
+#define WLC_SET_CLK				161
+#define WLC_GET_UP				162
+#define WLC_OUT					163
+#define WLC_GET_WPA_AUTH			164
+#define WLC_SET_WPA_AUTH			165
+#define WLC_GET_UCFLAGS				166
+#define WLC_SET_UCFLAGS				167
+#define WLC_GET_PWRIDX				168
+#define WLC_SET_PWRIDX				169
+#define WLC_GET_TSSI				170
+#define WLC_GET_SUP_RATESET_OVERRIDE		171
+#define WLC_SET_SUP_RATESET_OVERRIDE		172
+/* #define WLC_SET_FAST_TIMER			173 */ /* no longer supported */
+/* #define WLC_GET_FAST_TIMER			174 */ /* no longer supported */
+/* #define WLC_SET_SLOW_TIMER			175 */ /* no longer supported */
+/* #define WLC_GET_SLOW_TIMER			176 */ /* no longer supported */
+/* #define WLC_DUMP_PHYREGS			177 */ /* no longer supported */
+#define WLC_GET_PROTECTION_CONTROL		178
+#define WLC_SET_PROTECTION_CONTROL		179
+#define WLC_GET_PHYLIST				180
+#define WLC_ENCRYPT_STRENGTH			181	/* ndis only */
+#define WLC_DECRYPT_STATUS			182	/* ndis only */
+#define WLC_GET_KEY_SEQ				183
+#define WLC_GET_SCAN_CHANNEL_TIME		184
+#define WLC_SET_SCAN_CHANNEL_TIME		185
+#define WLC_GET_SCAN_UNASSOC_TIME		186
+#define WLC_SET_SCAN_UNASSOC_TIME		187
+#define WLC_GET_SCAN_HOME_TIME			188
+#define WLC_SET_SCAN_HOME_TIME			189
+#define WLC_GET_SCAN_NPROBES			190
+#define WLC_SET_SCAN_NPROBES			191
+#define WLC_GET_PRB_RESP_TIMEOUT		192
+#define WLC_SET_PRB_RESP_TIMEOUT		193
+#define WLC_GET_ATTEN				194
+#define WLC_SET_ATTEN				195
+#define WLC_GET_SHMEM				196	/* diag */
+#define WLC_SET_SHMEM				197	/* diag */
+/* #define WLC_GET_GMODE_PROTECTION_CTS		198 */ /* no longer supported */
+/* #define WLC_SET_GMODE_PROTECTION_CTS		199 */ /* no longer supported */
+#define WLC_SET_WSEC_TEST			200
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON	201
+#define WLC_TKIP_COUNTERMEASURES		202
+#define WLC_GET_PIOMODE				203
+#define WLC_SET_PIOMODE				204
+#define WLC_SET_ASSOC_PREFER			205
+#define WLC_GET_ASSOC_PREFER			206
+#define WLC_SET_ROAM_PREFER			207
+#define WLC_GET_ROAM_PREFER			208
+#define WLC_SET_LED				209
+#define WLC_GET_LED				210
+#define WLC_GET_INTERFERENCE_MODE		211
+#define WLC_SET_INTERFERENCE_MODE		212
+#define WLC_GET_CHANNEL_QA			213
+#define WLC_START_CHANNEL_QA			214
+#define WLC_GET_CHANNEL_SEL			215
+#define WLC_START_CHANNEL_SEL			216
+#define WLC_GET_VALID_CHANNELS			217
+#define WLC_GET_FAKEFRAG			218
+#define WLC_SET_FAKEFRAG			219
+#define WLC_GET_PWROUT_PERCENTAGE		220
+#define WLC_SET_PWROUT_PERCENTAGE		221
+#define WLC_SET_BAD_FRAME_PREEMPT		222
+#define WLC_GET_BAD_FRAME_PREEMPT		223
+#define WLC_SET_LEAP_LIST			224
+#define WLC_GET_LEAP_LIST			225
+#define WLC_GET_CWMIN				226
+#define WLC_SET_CWMIN				227
+#define WLC_GET_CWMAX				228
+#define WLC_SET_CWMAX				229
+#define WLC_GET_WET				230
+#define WLC_SET_WET				231
+#define WLC_GET_PUB				232
+/* #define WLC_SET_GLACIAL_TIMER		233 */ /* no longer supported */
+/* #define WLC_GET_GLACIAL_TIMER		234 */ /* no longer supported */
+#define WLC_GET_KEY_PRIMARY			235
+#define WLC_SET_KEY_PRIMARY			236
+
+
+/* #define WLC_DUMP_RADIOREGS			237 */ /* no longer supported */
+#define WLC_GET_ACI_ARGS			238
+#define WLC_SET_ACI_ARGS			239
+#define WLC_UNSET_CALLBACK			240
+#define WLC_SET_CALLBACK			241
+#define WLC_GET_RADAR				242
+#define WLC_SET_RADAR				243
+#define WLC_SET_SPECT_MANAGMENT			244
+#define WLC_GET_SPECT_MANAGMENT			245
+#define WLC_WDS_GET_REMOTE_HWADDR		246	/* handled in wl_linux.c/wl_vx.c */
+#define WLC_WDS_GET_WPA_SUP			247
+#define WLC_SET_CS_SCAN_TIMER			248
+#define WLC_GET_CS_SCAN_TIMER			249
+#define WLC_MEASURE_REQUEST			250
+#define WLC_INIT				251
+#define WLC_SEND_QUIET				252
+#define WLC_KEEPALIVE			253
+#define WLC_SEND_PWR_CONSTRAINT			254
+#define WLC_UPGRADE_STATUS			255
+#define WLC_CURRENT_PWR				256
+#define WLC_GET_SCAN_PASSIVE_TIME		257
+#define WLC_SET_SCAN_PASSIVE_TIME		258
+#define WLC_LEGACY_LINK_BEHAVIOR		259
+#define WLC_GET_CHANNELS_IN_COUNTRY		260
+#define WLC_GET_COUNTRY_LIST			261
+#define WLC_GET_VAR				262	/* get value of named variable */
+#define WLC_SET_VAR				263	/* set named variable to value */
+#define WLC_NVRAM_GET				264	/* deprecated */
+#define WLC_NVRAM_SET				265
+#define WLC_NVRAM_DUMP				266
+#define WLC_REBOOT				267
+#define WLC_SET_WSEC_PMK			268
+#define WLC_GET_AUTH_MODE			269
+#define WLC_SET_AUTH_MODE			270
+#define WLC_GET_WAKEENTRY			271
+#define WLC_SET_WAKEENTRY			272
+#define WLC_NDCONFIG_ITEM			273	/* currently handled in wl_oid.c */
+#define WLC_NVOTPW				274
+#define WLC_OTPW				275
+#define WLC_IOV_BLOCK_GET			276
+#define WLC_IOV_MODULES_GET			277
+#define WLC_SOFT_RESET				278
+#define WLC_GET_ALLOW_MODE			279
+#define WLC_SET_ALLOW_MODE			280
+#define WLC_GET_DESIRED_BSSID			281
+#define WLC_SET_DESIRED_BSSID			282
+#define	WLC_DISASSOC_MYAP			283
+#define WLC_GET_NBANDS				284	/* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES			285	/* for Dongle EXT_STA support */
+#define WLC_GET_WLC_BSS_INFO			286	/* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_INFO			287	/* for Dongle EXT_STA support */
+#define WLC_GET_OID_PHY				288	/* for Dongle EXT_STA support */
+#define WLC_SET_OID_PHY				289	/* for Dongle EXT_STA support */
+#define WLC_SET_ASSOC_TIME			290	/* for Dongle EXT_STA support */
+#define WLC_GET_DESIRED_SSID			291	/* for Dongle EXT_STA support */
+#define WLC_GET_CHANSPEC			292	/* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_STATE			293	/* for Dongle EXT_STA support */
+#define WLC_SET_PHY_STATE			294	/* for Dongle EXT_STA support */
+#define WLC_GET_SCAN_PENDING			295	/* for Dongle EXT_STA support */
+#define WLC_GET_SCANREQ_PENDING			296	/* for Dongle EXT_STA support */
+#define WLC_GET_PREV_ROAM_REASON		297	/* for Dongle EXT_STA support */
+#define WLC_SET_PREV_ROAM_REASON		298	/* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES_PI			299	/* for Dongle EXT_STA support */
+#define WLC_GET_PHY_STATE			300	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA_RSN			301	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA2_RSN			302	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_BCN_TS			303	/* for Dongle EXT_STA support */
+#define WLC_GET_INT_DISASSOC			304	/* for Dongle EXT_STA support */
+#define WLC_SET_NUM_PEERS			305     /* for Dongle EXT_STA support */
+#define WLC_GET_NUM_BSS				306	/* for Dongle EXT_STA support */
+#define WLC_PHY_SAMPLE_COLLECT			307	/* phy sample collect mode */
+/* #define WLC_UM_PRIV				308 */	/* Deprecated: usermode driver */
+#define WLC_GET_CMD				309
+/* #define WLC_LAST				310 */	/* Never used - can be reused */
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE	311	/* set inter mode override */
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE	312	/* get inter mode override */
+/* #define WLC_GET_WAI_RESTRICT	313 */
+/* #define WLC_SET_WAI_RESTRICT	314 */
+/* #define WLC_SET_WAI_REKEY	315 */
+#define WLC_SET_NAT_CONFIG			316	/* for configuring NAT filter driver */
+#define WLC_GET_NAT_STATE			317
+#define WLC_GET_TXBF_RATESET			318
+#define WLC_SET_TXBF_RATESET			319
+#define WLC_SCAN_CQ				320
+#define WLC_GET_RSSI_QDB			321 /* qdB portion of the RSSI */
+#define WLC_DUMP_RATESET			322
+#define WLC_ECHO				323
+#define WLC_LAST				324
+#define WLC_SPEC_FLAG			0x80000000 /* For some special IOCTL */
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE		0xABADCEDE
+#endif
+
+/* vx wlc ioctl's offset */
+#define CMN_IOCTL_OFF 0x180
+
+/*
+ * custom OID support
+ *
+ * 0xFF - implementation specific OID
+ * 0xE4 - first byte of Broadcom PCI vendor ID
+ * 0x14 - second byte of Broadcom PCI vendor ID
+ * 0xXX - the custom OID number
+ */
+
+/* begin 0x1f values beyond the start of the ET driver range. */
+#define WL_OID_BASE		0xFFE41420
+
+/* NDIS overrides */
+#define OID_WL_GETINSTANCE	(WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK	(WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK	(WL_OID_BASE + WLC_SET_FORCELINK)
+#define	OID_WL_ENCRYPT_STRENGTH	(WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS	(WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM	(WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+/* EXT_STA Dongle suuport */
+#define OID_STA_CHANSPEC	(WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS		(WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY		(WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY		(WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME	(WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID	(WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE	(WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING	(WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE	(WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC	(WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS	(WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS	(WL_OID_BASE + WLC_GET_NUM_BSS)
+
+/* NAT filter driver support */
+#define OID_NAT_SET_CONFIG	(WL_OID_BASE + WLC_SET_NAT_CONFIG)
+#define OID_NAT_GET_STATE	(WL_OID_BASE + WLC_GET_NAT_STATE)
+
+#define WL_DECRYPT_STATUS_SUCCESS	1
+#define WL_DECRYPT_STATUS_FAILURE	2
+#define WL_DECRYPT_STATUS_UNKNOWN	3
+
+/* allows user-mode app to poll the status of USB image upgrade */
+#define WLC_UPGRADE_SUCCESS			0
+#define WLC_UPGRADE_PENDING			1
+
+/* WLC_GET_AUTH, WLC_SET_AUTH values */
+#define WL_AUTH_OPEN_SYSTEM		0	/* d11 open authentication */
+#define WL_AUTH_SHARED_KEY		1	/* d11 shared authentication */
+#define WL_AUTH_OPEN_SHARED		2	/* try open, then shared if open failed w/rc 13 */
+
+/* a large TX Power as an init value to factor out of MIN() calculations,
+ * keep low enough to fit in an int8, units are .25 dBm
+ */
+#define WLC_TXPWR_MAX		(127)	/* ~32 dBm = 1,500 mW */
+
+/* "diag" iovar argument and error code */
+#define WL_DIAG_INTERRUPT			1	/* d11 loopback interrupt test */
+#define WL_DIAG_LOOPBACK			2	/* d11 loopback data test */
+#define WL_DIAG_MEMORY				3	/* d11 memory test */
+#define WL_DIAG_LED				4	/* LED test */
+#define WL_DIAG_REG				5	/* d11/phy register test */
+#define WL_DIAG_SROM				6	/* srom read/crc test */
+#define WL_DIAG_DMA				7	/* DMA test */
+#define WL_DIAG_LOOPBACK_EXT			8	/* enhenced d11 loopback data test */
+
+#define WL_DIAGERR_SUCCESS			0
+#define WL_DIAGERR_FAIL_TO_RUN			1	/* unable to run requested diag */
+#define WL_DIAGERR_NOT_SUPPORTED		2	/* diag requested is not supported */
+#define WL_DIAGERR_INTERRUPT_FAIL		3	/* loopback interrupt test failed */
+#define WL_DIAGERR_LOOPBACK_FAIL		4	/* loopback data test failed */
+#define WL_DIAGERR_SROM_FAIL			5	/* srom read failed */
+#define WL_DIAGERR_SROM_BADCRC			6	/* srom crc failed */
+#define WL_DIAGERR_REG_FAIL			7	/* d11/phy register test failed */
+#define WL_DIAGERR_MEMORY_FAIL			8	/* d11 memory test failed */
+#define WL_DIAGERR_NOMEM			9	/* diag test failed due to no memory */
+#define WL_DIAGERR_DMA_FAIL			10	/* DMA test failed */
+
+#define WL_DIAGERR_MEMORY_TIMEOUT		11	/* d11 memory test didn't finish in time */
+#define WL_DIAGERR_MEMORY_BADPATTERN		12	/* d11 memory test result in bad pattern */
+
+/* band types */
+#define	WLC_BAND_AUTO		0	/* auto-select */
+#define	WLC_BAND_5G		1	/* 5 Ghz */
+#define	WLC_BAND_2G		2	/* 2.4 Ghz */
+#define	WLC_BAND_ALL		3	/* all bands */
+#define WLC_BAND_INVALID	-1	/* Invalid band */
+
+/* band range returned by band_range iovar */
+#define WL_CHAN_FREQ_RANGE_2G      0
+#define WL_CHAN_FREQ_RANGE_5GL     1
+#define WL_CHAN_FREQ_RANGE_5GM     2
+#define WL_CHAN_FREQ_RANGE_5GH     3
+
+#define WL_CHAN_FREQ_RANGE_5GLL_5BAND    4
+#define WL_CHAN_FREQ_RANGE_5GLH_5BAND    5
+#define WL_CHAN_FREQ_RANGE_5GML_5BAND    6
+#define WL_CHAN_FREQ_RANGE_5GMH_5BAND    7
+#define WL_CHAN_FREQ_RANGE_5GH_5BAND     8
+
+#define WL_CHAN_FREQ_RANGE_5G_BAND0     1
+#define WL_CHAN_FREQ_RANGE_5G_BAND1     2
+#define WL_CHAN_FREQ_RANGE_5G_BAND2     3
+#define WL_CHAN_FREQ_RANGE_5G_BAND3     4
+#define WL_CHAN_FREQ_RANGE_5G_4BAND     5
+
+/* SROM12 */
+#define WL_CHAN_FREQ_RANGE_5G_BAND4 5
+#define WL_CHAN_FREQ_RANGE_2G_40 6
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_40 8
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_40 9
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_40 10
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_40 11
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_80 12
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_80 13
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_80 14
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16
+
+#define WL_CHAN_FREQ_RANGE_5G_5BAND	18
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_40	19
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_80	20
+
+#define WLC_MACMODE_DISABLED	0	/* MAC list disabled */
+#define WLC_MACMODE_DENY	1	/* Deny specified (i.e. allow unspecified) */
+#define WLC_MACMODE_ALLOW	2	/* Allow specified (i.e. deny unspecified) */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B			Rateset: 1b, 2b, 5.5, 11
+ *					Preamble: Long
+ *					Shortslot: Off
+ * GMODE_AUTO				Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ *					Extended Rateset: 6, 9, 12, 48
+ *					Preamble: Long
+ *					Shortslot: Auto
+ * GMODE_ONLY				Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ *					Extended Rateset: 6b, 9, 12b, 48
+ *					Preamble: Short required
+ *					Shortslot: Auto
+ * GMODE_B_DEFERRED			Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ *					Extended Rateset: 6, 9, 12, 48
+ *					Preamble: Long
+ *					Shortslot: On
+ * GMODE_PERFORMANCE			Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ *					Preamble: Short required
+ *					Shortslot: On and required
+ * GMODE_LRS				Rateset: 1b, 2b, 5.5b, 11b
+ *					Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ *					Preamble: Long
+ *					Shortslot: Auto
+ */
+#define GMODE_LEGACY_B		0
+#define GMODE_AUTO		1
+#define GMODE_ONLY		2
+#define GMODE_B_DEFERRED	3
+#define GMODE_PERFORMANCE	4
+#define GMODE_LRS		5
+#define GMODE_MAX		6
+
+/* values for PLCPHdr_override */
+#define WLC_PLCP_AUTO	-1
+#define WLC_PLCP_SHORT	0
+#define WLC_PLCP_LONG	1
+
+/* values for g_protection_override and n_protection_override */
+#define WLC_PROTECTION_AUTO		-1
+#define WLC_PROTECTION_OFF		0
+#define WLC_PROTECTION_ON		1
+#define WLC_PROTECTION_MMHDR_ONLY	2
+#define WLC_PROTECTION_CTS_ONLY		3
+
+/* values for g_protection_control and n_protection_control */
+#define WLC_PROTECTION_CTL_OFF		0
+#define WLC_PROTECTION_CTL_LOCAL	1
+#define WLC_PROTECTION_CTL_OVERLAP	2
+
+/* values for n_protection */
+#define WLC_N_PROTECTION_OFF		0
+#define WLC_N_PROTECTION_OPTIONAL	1
+#define WLC_N_PROTECTION_20IN40		2
+#define WLC_N_PROTECTION_MIXEDMODE	3
+
+/* values for n_preamble_type */
+#define WLC_N_PREAMBLE_MIXEDMODE	0
+#define WLC_N_PREAMBLE_GF		1
+#define WLC_N_PREAMBLE_GF_BRCM          2
+
+/* values for band specific 40MHz capabilities (deprecated) */
+#define WLC_N_BW_20ALL			0
+#define WLC_N_BW_40ALL			1
+#define WLC_N_BW_20IN2G_40IN5G		2
+
+#define WLC_BW_20MHZ_BIT		(1<<0)
+#define WLC_BW_40MHZ_BIT		(1<<1)
+#define WLC_BW_80MHZ_BIT		(1<<2)
+#define WLC_BW_160MHZ_BIT		(1<<3)
+#define WLC_BW_10MHZ_BIT		(1<<4)
+#define WLC_BW_5MHZ_BIT			(1<<5)
+#define WLC_BW_2P5MHZ_BIT		(1<<6)
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ		(WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ		(WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ		(WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_160MHZ		(WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+	WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_2P5MHZ		(WLC_BW_2P5MHZ_BIT)
+#define WLC_BW_CAP_5MHZ			(WLC_BW_5MHZ_BIT)
+#define WLC_BW_CAP_10MHZ		(WLC_BW_10MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED		0xFF
+
+#define WL_BW_CAP_20MHZ(bw_cap)	(((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_40MHZ(bw_cap)	(((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_80MHZ(bw_cap)	(((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_2P5MHZ(bw_cap)(((bw_cap) & WLC_BW_2P5MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_5MHZ(bw_cap)	(((bw_cap) & WLC_BW_5MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_10MHZ(bw_cap)	(((bw_cap) & WLC_BW_10MHZ_BIT) ? TRUE : FALSE)
+/* values to force tx/rx chain */
+#define WLC_N_TXRX_CHAIN0		0
+#define WLC_N_TXRX_CHAIN1		1
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define WLC_N_SGI_20			0x01
+#define WLC_N_SGI_40			0x02
+#define WLC_VHT_SGI_80			0x04
+#define WLC_VHT_SGI_160			0x08
+
+/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */
+#define WLC_SGI_ALL				0x02
+
+#define LISTEN_INTERVAL			10
+/* interference mitigation options */
+#define	INTERFERE_OVRRIDE_OFF	-1	/* interference override off */
+#define	INTERFERE_NONE	0	/* off */
+#define	NON_WLAN	1	/* foreign/non 802.11 interference, no auto detect */
+#define	WLAN_MANUAL	2	/* ACI: no auto detection */
+#define	WLAN_AUTO	3	/* ACI: auto detect */
+#define	WLAN_AUTO_W_NOISE	4	/* ACI: auto - detect and non 802.11 interference */
+#define AUTO_ACTIVE	(1 << 7) /* Auto is currently active */
+
+/* interfernece mode bit-masks (ACPHY) */
+#define ACPHY_ACI_GLITCHBASED_DESENSE 1   /* bit 0 */
+#define ACPHY_ACI_HWACI_PKTGAINLMT 2      /* bit 1 */
+#define ACPHY_ACI_W2NB_PKTGAINLMT 4       /* bit 2 */
+#define ACPHY_ACI_PREEMPTION 8            /* bit 3 */
+#define ACPHY_HWACI_MITIGATION 16         /* bit 4 */
+#define ACPHY_LPD_PREEMPTION 32           /* bit 5 */
+#define ACPHY_HWOBSS_MITIGATION 64        /* bit 6 */
+#define ACPHY_ACI_MAX_MODE 127
+
+/* AP environment */
+#define AP_ENV_DETECT_NOT_USED		0 /* We aren't using AP environment detection */
+#define AP_ENV_DENSE			1 /* "Corporate" or other AP dense environment */
+#define AP_ENV_SPARSE			2 /* "Home" or other sparse environment */
+#define AP_ENV_INDETERMINATE		3 /* AP environment hasn't been identified */
+
+#define TRIGGER_NOW				0
+#define TRIGGER_CRS				0x01
+#define TRIGGER_CRSDEASSERT			0x02
+#define TRIGGER_GOODFCS				0x04
+#define TRIGGER_BADFCS				0x08
+#define TRIGGER_BADPLCP				0x10
+#define TRIGGER_CRSGLITCH			0x20
+
+#define	WL_SAMPLEDATA_HEADER_TYPE	1
+#define WL_SAMPLEDATA_HEADER_SIZE	80	/* sample collect header size (bytes) */
+#define	WL_SAMPLEDATA_TYPE		2
+#define	WL_SAMPLEDATA_SEQ		0xff	/* sequence # */
+#define	WL_SAMPLEDATA_MORE_DATA		0x100	/* more data mask */
+
+/* WL_OTA START */
+#define WL_OTA_ARG_PARSE_BLK_SIZE	1200
+#define WL_OTA_TEST_MAX_NUM_RATE	30
+#define WL_OTA_TEST_MAX_NUM_SEQ		100
+#define WL_OTA_TEST_MAX_NUM_RSSI	85
+#define WL_THRESHOLD_LO_BAND	70	/* range from 5250MHz - 5350MHz */
+
+/* radar iovar SET defines */
+#define WL_RADAR_DETECTOR_OFF		0	/* radar detector off */
+#define WL_RADAR_DETECTOR_ON		1	/* radar detector on */
+#define WL_RADAR_SIMULATED		2	/* force radar detector to declare
+						 * detection once
+						 */
+#define WL_RADAR_SIMULATED_SC		3	/* force radar detector to declare
+						 * detection once on scan core
+						 * if available and active
+						 */
+#define WL_RSSI_ANT_VERSION	1	/* current version of wl_rssi_ant_t */
+#define WL_ANT_RX_MAX		2	/* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX	4	/* max 4 receive antennas/cores */
+#define WL_ANT_IDX_1		0	/* antenna index 1 */
+#define WL_ANT_IDX_2		1	/* antenna index 2 */
+
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX		4	/* max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+/* dfs_status iovar-related defines */
+
+/* cac - channel availability check,
+ * ism - in-service monitoring
+ * csa - channel switching announcement
+ */
+
+/* cac state values */
+#define WL_DFS_CACSTATE_IDLE		0	/* state for operating in non-radar channel */
+#define	WL_DFS_CACSTATE_PREISM_CAC	1	/* CAC in progress */
+#define WL_DFS_CACSTATE_ISM		2	/* ISM in progress */
+#define WL_DFS_CACSTATE_CSA		3	/* csa */
+#define WL_DFS_CACSTATE_POSTISM_CAC	4	/* ISM CAC */
+#define WL_DFS_CACSTATE_PREISM_OOC	5	/* PREISM OOC */
+#define WL_DFS_CACSTATE_POSTISM_OOC	6	/* POSTISM OOC */
+#define WL_DFS_CACSTATES		7	/* this many states exist */
+
+/* Defines used with channel_bandwidth for curpower */
+#define WL_BW_20MHZ		0
+#define WL_BW_40MHZ		1
+#define WL_BW_80MHZ		2
+#define WL_BW_160MHZ		3
+#define WL_BW_8080MHZ		4
+#define WL_BW_2P5MHZ		5
+#define WL_BW_5MHZ		6
+#define WL_BW_10MHZ		7
+
+/* tx_power_t.flags bits */
+#define WL_TX_POWER_F_ENABLED	1
+#define WL_TX_POWER_F_HW		2
+#define WL_TX_POWER_F_MIMO		4
+#define WL_TX_POWER_F_SISO		8
+#define WL_TX_POWER_F_HT		0x10
+#define WL_TX_POWER_F_VHT		0x20
+#define WL_TX_POWER_F_OPENLOOP		0x40
+#define WL_TX_POWER_F_PROP11NRATES	0x80
+#define WL_TX_POWER_F_UNIT_QDBM		0x100
+/* Message levels */
+#define WL_ERROR_VAL		0x00000001
+#define WL_TRACE_VAL		0x00000002
+#define WL_PRHDRS_VAL		0x00000004
+#define WL_PRPKT_VAL		0x00000008
+#define WL_INFORM_VAL		0x00000010
+#define WL_TMP_VAL		0x00000020
+#define WL_OID_VAL		0x00000040
+#define WL_RATE_VAL		0x00000080
+#define WL_ASSOC_VAL		0x00000100
+#define WL_PRUSR_VAL		0x00000200
+#define WL_PS_VAL		0x00000400
+#define WL_TXPWR_VAL		0x00000000	/* retired in TOT on 6/10/2009 */
+#define WL_MODE_SWITCH_VAL	0x00000800 /* Using retired TXPWR val */
+#define WL_PORT_VAL		0x00001000
+#define WL_DUAL_VAL		0x00002000
+#define WL_WSEC_VAL		0x00004000
+#define WL_WSEC_DUMP_VAL	0x00008000
+#define WL_LOG_VAL		0x00010000
+#define WL_NRSSI_VAL		0x00000000	/* retired in TOT on 6/10/2009 */
+#define WL_BCNTRIM_VAL		0x00020000	/* Using retired NRSSI VAL */
+#define WL_LOFT_VAL		0x00000000	/* retired in TOT on 6/10/2009 */
+#define WL_PFN_VAL		0x00040000 /* Using retired LOFT_VAL */
+#define WL_REGULATORY_VAL	0x00080000
+#define WL_CSA_VAL		0x00080000  /* Reusing REGULATORY_VAL due to lackof bits */
+#define WL_TAF_VAL		0x00100000
+#define WL_RADAR_VAL		0x00000000	/* retired in TOT on 6/10/2009 */
+#define WL_WDI_VAL		0x00200000	/* Using retired WL_RADAR_VAL VAL */
+#define WL_MPC_VAL		0x00400000
+#define WL_APSTA_VAL		0x00800000
+#define WL_DFS_VAL		0x01000000
+#define WL_BA_VAL		0x00000000	/* retired in TOT on 6/14/2010 */
+#define WL_MUMIMO_VAL		0x02000000      /* Using retired WL_BA_VAL */
+#define WL_ACI_VAL		0x04000000
+#define WL_PRMAC_VAL		0x04000000
+#define WL_MBSS_VAL		0x04000000
+#define WL_CAC_VAL		0x08000000
+#define WL_AMSDU_VAL		0x10000000
+#define WL_AMPDU_VAL		0x20000000
+#define WL_FFPLD_VAL		0x40000000
+#define WL_ROAM_EXP_VAL		0x80000000
+
+/* wl_msg_level is full. For new bits take the next one and AND with
+ * wl_msg_level2 in wl_dbg.h
+ */
+#define WL_DPT_VAL		0x00000001
+/* re-using WL_DPT_VAL */
+/* re-using WL_MESH_VAL */
+#define WL_NATOE_VAL		0x00000001
+#define WL_MESH_VAL		0x00000001
+#define WL_SCAN_VAL		0x00000002
+#define WL_WOWL_VAL		0x00000004
+#define WL_COEX_VAL		0x00000008
+#define WL_RTDC_VAL		0x00000010
+#define WL_PROTO_VAL		0x00000020
+#define WL_SWDIV_VAL		0x00000040
+#define WL_CHANINT_VAL		0x00000080
+#define WL_WMF_VAL		0x00000100
+#define WL_P2P_VAL		0x00000200
+#define WL_ITFR_VAL		0x00000400
+#define WL_MCHAN_VAL		0x00000800
+#define WL_TDLS_VAL		0x00001000
+#define WL_MCNX_VAL		0x00002000
+#define WL_PROT_VAL		0x00004000
+#define WL_PSTA_VAL		0x00008000
+#define WL_TSO_VAL		0x00010000
+#define WL_TRF_MGMT_VAL		0x00020000
+#define WL_LPC_VAL	        0x00040000
+#define WL_L2FILTER_VAL		0x00080000
+#define WL_TXBF_VAL		0x00100000
+#define WL_P2PO_VAL		0x00200000
+#define WL_TBTT_VAL		0x00400000
+#define WL_FBT_VAL		0x00800000
+#define WL_RRM_VAL		0x00800000 /* reuse */
+#define WL_MQ_VAL		0x01000000
+
+/* This level is currently used in Phoenix2 only */
+#define WL_SRSCAN_VAL		0x02000000
+
+#define WL_WNM_VAL		0x04000000
+/* re-using WL_WNM_VAL for MBO */
+#define WL_MBO_VAL		0x04000000
+#define WL_PWRSEL_VAL		0x10000000
+#define WL_NET_DETECT_VAL	0x20000000
+#define WL_OCE_VAL  0x20000000 /* reuse */
+#define WL_PCIE_VAL		0x40000000
+#define WL_PMDUR_VAL	0x80000000
+
+
+/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
+ * rather than a message-type of its own
+ */
+#define WL_TIMESTAMP_VAL        0x80000000
+
+/* max # of leds supported by GPIO (gpio pin# == led index#) */
+#define	WL_LED_NUMGPIO		32	/* gpio 0-31 */
+
+/* led per-pin behaviors */
+#define	WL_LED_OFF		0		/* always off */
+#define	WL_LED_ON		1		/* always on */
+#define	WL_LED_ACTIVITY		2		/* activity */
+#define	WL_LED_RADIO		3		/* radio enabled */
+#define	WL_LED_ARADIO		4		/* 5  Ghz radio enabled */
+#define	WL_LED_BRADIO		5		/* 2.4Ghz radio enabled */
+#define	WL_LED_BGMODE		6		/* on if gmode, off if bmode */
+#define	WL_LED_WI1		7
+#define	WL_LED_WI2		8
+#define	WL_LED_WI3		9
+#define	WL_LED_ASSOC		10		/* associated state indicator */
+#define	WL_LED_INACTIVE		11		/* null behavior (clears default behavior) */
+#define	WL_LED_ASSOCACT		12		/* on when associated; blink fast for activity */
+#define WL_LED_WI4		13
+#define WL_LED_WI5		14
+#define	WL_LED_BLINKSLOW	15		/* blink slow */
+#define	WL_LED_BLINKMED		16		/* blink med */
+#define	WL_LED_BLINKFAST	17		/* blink fast */
+#define	WL_LED_BLINKCUSTOM	18		/* blink custom */
+#define	WL_LED_BLINKPERIODIC	19		/* blink periodic (custom 1000ms / off 400ms) */
+#define WL_LED_ASSOC_WITH_SEC	20		/* when connected with security */
+						/* keep on for 300 sec */
+#define WL_LED_START_OFF	21		/* off upon boot, could be turned on later */
+#define WL_LED_WI6		22
+#define WL_LED_WI7		23
+#define WL_LED_WI8		24
+#define	WL_LED_NUMBEHAVIOR	25
+
+/* led behavior numeric value format */
+#define	WL_LED_BEH_MASK		0x3f		/* behavior mask */
+#define	WL_LED_PMU_OVERRIDE	0x40		/* need to set PMU Override bit for the GPIO */
+#define	WL_LED_AL_MASK		0x80		/* activelow (polarity) bit */
+
+/* number of bytes needed to define a proper bit mask for MAC event reporting */
+#define BCMIO_ROUNDUP(x, y)	((((x) + ((y) - 1)) / (y)) * (y))
+#define BCMIO_NBBY		8
+#define WL_EVENTING_MASK_LEN	(16+4)
+
+#define WL_EVENTING_MASK_EXT_LEN \
+    MAX(WL_EVENTING_MASK_LEN, (ROUNDUP(WLC_E_LAST, NBBY)/NBBY))
+
+/* join preference types */
+#define WL_JOIN_PREF_RSSI	1	/* by RSSI */
+#define WL_JOIN_PREF_WPA	2	/* by akm and ciphers */
+#define WL_JOIN_PREF_BAND	3	/* by 802.11 band */
+#define WL_JOIN_PREF_RSSI_DELTA	4	/* by 802.11 band only if RSSI delta condition matches */
+#define WL_JOIN_PREF_TRANS_PREF	5	/* defined by requesting AP */
+
+/* band preference */
+#define WLJP_BAND_ASSOC_PREF	255	/* use what WLC_SET_ASSOC_PREFER ioctl specifies */
+
+/* any multicast cipher suite */
+#define WL_WPA_ACP_MCS_ANY	"\x00\x00\x00\x00"
+
+/* 802.11h measurement types */
+#define WLC_MEASURE_TPC			1
+#define WLC_MEASURE_CHANNEL_BASIC	2
+#define WLC_MEASURE_CHANNEL_CCA		3
+#define WLC_MEASURE_CHANNEL_RPI		4
+
+/* regulatory enforcement levels */
+#define SPECT_MNGMT_OFF			0		/* both 11h and 11d disabled */
+#define SPECT_MNGMT_LOOSE_11H		1		/* allow non-11h APs in scan lists */
+#define SPECT_MNGMT_STRICT_11H		2		/* prune out non-11h APs from scan list */
+#define SPECT_MNGMT_STRICT_11D		3		/* switch to 802.11D mode */
+/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE
+ * adoption is done regardless of capability spectrum_management
+ */
+#define SPECT_MNGMT_LOOSE_11H_D		4		/* operation defined above */
+
+/* bit position in per_chan_info; these depend on current country/regulatory domain */
+#define WL_CHAN_VALID_HW		(1 << 0)	/* valid with current HW */
+#define WL_CHAN_VALID_SW		(1 << 1)	/* valid with current country setting */
+#define WL_CHAN_BAND_5G			(1 << 2)	/* 5GHz-band channel */
+#define WL_CHAN_RADAR			(1 << 3)	/* radar sensitive channel */
+#define WL_CHAN_INACTIVE		(1 << 4)	/* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE			(1 << 5)	/* channel is in passive mode */
+#define WL_CHAN_RESTRICTED		(1 << 6)	/* restricted use channel */
+#define WL_CHAN_RADAR_EU_WEATHER	(1 << 7)	/* EU Radar weather channel. Implies an
+							 * EU Radar channel.
+							 */
+#define WL_CHAN_CLM_RESTRICTED		(1 << 8)	/* channel restricted in CLM
+							 * (i.e. by default)
+							 */
+
+/* following definition is for precommit; will be removed once wl, acsd switch to the new def */
+#define WL_CHAN_WEATHER_RADAR		WL_CHAN_RADAR_EU_WEATHER
+
+/* BTC mode used by "btc_mode" iovar */
+#define	WL_BTC_DISABLE		0	/* disable BT coexistence */
+#define WL_BTC_FULLTDM      1	/* full TDM COEX */
+#define WL_BTC_ENABLE       1	/* full TDM COEX to maintain backward compatiblity */
+#define WL_BTC_PREMPT      2    /* full TDM COEX with preemption */
+#define WL_BTC_LITE        3	/* light weight coex for large isolation platform */
+#define WL_BTC_PARALLEL		4   /* BT and WLAN run in parallel with separate antenna  */
+#define WL_BTC_HYBRID		5   /* hybrid coex, only ack is allowed to transmit in BT slot */
+#define WL_BTC_DEFAULT		8	/* set the default mode for the device */
+#define WL_INF_BTC_DISABLE      0
+#define WL_INF_BTC_ENABLE       1
+#define WL_INF_BTC_AUTO         3
+
+/* BTC wire used by "btc_wire" iovar */
+#define	WL_BTC_DEFWIRE		0	/* use default wire setting */
+#define WL_BTC_2WIRE		2	/* use 2-wire BTC */
+#define WL_BTC_3WIRE		3	/* use 3-wire BTC */
+#define WL_BTC_4WIRE		4	/* use 4-wire BTC */
+
+/* BTC flags: BTC configuration that can be set by host */
+#define WL_BTC_FLAG_PREMPT               (1 << 0)
+#define WL_BTC_FLAG_BT_DEF               (1 << 1)
+#define WL_BTC_FLAG_ACTIVE_PROT          (1 << 2)
+#define WL_BTC_FLAG_SIM_RSP              (1 << 3)
+#define WL_BTC_FLAG_PS_PROTECT           (1 << 4)
+#define WL_BTC_FLAG_SIM_TX_LP	         (1 << 5)
+#define WL_BTC_FLAG_ECI                  (1 << 6)
+#define WL_BTC_FLAG_LIGHT                (1 << 7)
+#define WL_BTC_FLAG_PARALLEL             (1 << 8)
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS		64
+
+/* max number of chanspecs (used by the iovar to calc. buf space) */
+#ifdef WL11AC_80P80
+#define WL_NUMCHANSPECS 206
+#else
+#define WL_NUMCHANSPECS 110
+#endif
+
+/* WDS link local endpoint WPA role */
+#define WL_WDS_WPA_ROLE_AUTH	0	/* authenticator */
+#define WL_WDS_WPA_ROLE_SUP	1	/* supplicant */
+#define WL_WDS_WPA_ROLE_AUTO	255	/* auto, based on mac addr value */
+
+/* Base offset values */
+#define WL_PKT_FILTER_BASE_PKT   0
+#define WL_PKT_FILTER_BASE_END   1
+#define WL_PKT_FILTER_BASE_D11_H 2 /* May be removed */
+#define WL_PKT_FILTER_BASE_D11_D 3 /* May be removed */
+#define WL_PKT_FILTER_BASE_ETH_H 4
+#define WL_PKT_FILTER_BASE_ETH_D 5
+#define WL_PKT_FILTER_BASE_ARP_H 6
+#define WL_PKT_FILTER_BASE_ARP_D 7 /* May be removed */
+#define WL_PKT_FILTER_BASE_IP4_H 8
+#define WL_PKT_FILTER_BASE_IP4_D 9
+#define WL_PKT_FILTER_BASE_IP6_H 10
+#define WL_PKT_FILTER_BASE_IP6_D 11
+#define WL_PKT_FILTER_BASE_TCP_H 12
+#define WL_PKT_FILTER_BASE_TCP_D 13 /* May be removed */
+#define WL_PKT_FILTER_BASE_UDP_H 14
+#define WL_PKT_FILTER_BASE_UDP_D 15
+#define WL_PKT_FILTER_BASE_IP6_P 16
+#define WL_PKT_FILTER_BASE_COUNT 17 /* May be removed */
+
+/* String mapping for bases that may be used by applications or debug */
+#define WL_PKT_FILTER_BASE_NAMES \
+	{ "START", WL_PKT_FILTER_BASE_PKT },   \
+	{ "END",   WL_PKT_FILTER_BASE_END },   \
+	{ "ETH_H", WL_PKT_FILTER_BASE_ETH_H }, \
+	{ "ETH_D", WL_PKT_FILTER_BASE_ETH_D }, \
+	{ "D11_H", WL_PKT_FILTER_BASE_D11_H }, \
+	{ "D11_D", WL_PKT_FILTER_BASE_D11_D }, \
+	{ "ARP_H", WL_PKT_FILTER_BASE_ARP_H }, \
+	{ "IP4_H", WL_PKT_FILTER_BASE_IP4_H }, \
+	{ "IP4_D", WL_PKT_FILTER_BASE_IP4_D }, \
+	{ "IP6_H", WL_PKT_FILTER_BASE_IP6_H }, \
+	{ "IP6_D", WL_PKT_FILTER_BASE_IP6_D }, \
+	{ "IP6_P", WL_PKT_FILTER_BASE_IP6_P }, \
+	{ "TCP_H", WL_PKT_FILTER_BASE_TCP_H }, \
+	{ "TCP_D", WL_PKT_FILTER_BASE_TCP_D }, \
+	{ "UDP_H", WL_PKT_FILTER_BASE_UDP_H }, \
+	{ "UDP_D", WL_PKT_FILTER_BASE_UDP_D }
+
+/* Flags for a pattern list element */
+#define WL_PKT_FILTER_MFLAG_NEG 0x0001
+
+/*
+ * Packet engine interface
+ */
+
+#define WL_PKTENG_PER_TX_START			0x01
+#define WL_PKTENG_PER_TX_STOP			0x02
+#define WL_PKTENG_PER_RX_START			0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START		0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START		0x06
+#define WL_PKTENG_PER_RX_STOP			0x08
+#define WL_PKTENG_PER_RU_TX_START		0x09
+#define WL_PKTENG_PER_MASK			0xff
+
+#define WL_PKTENG_SYNCHRONOUS			0x100	/* synchronous flag */
+#define WL_PKTENG_SYNCHRONOUS_UNBLK		0x200	/* synchronous unblock flag */
+#ifdef PKTENG_LONGPKTSZ
+/* max pktsz limit for pkteng */
+#define WL_PKTENG_MAXPKTSZ				PKTENG_LONGPKTSZ
+#else
+#define WL_PKTENG_MAXPKTSZ				16384
+#endif
+
+#define NUM_80211b_RATES	4
+#define NUM_80211ag_RATES	8
+#define NUM_80211n_RATES	32
+#define NUM_80211_RATES		(NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+
+/*
+ * WOWL capability/override settings
+ */
+#define WL_WOWL_MAGIC           (1 << 0)    /* Wakeup on Magic packet */
+#define WL_WOWL_NET             (1 << 1)    /* Wakeup on Netpattern */
+#define WL_WOWL_DIS             (1 << 2)    /* Wakeup on loss-of-link due to Disassoc/Deauth */
+#define WL_WOWL_RETR            (1 << 3)    /* Wakeup on retrograde TSF */
+#define WL_WOWL_BCN             (1 << 4)    /* Wakeup on loss of beacon */
+#define WL_WOWL_TST             (1 << 5)    /* Wakeup after test */
+#define WL_WOWL_M1              (1 << 6)    /* Wakeup after PTK refresh */
+#define WL_WOWL_EAPID           (1 << 7)    /* Wakeup after receipt of EAP-Identity Req */
+#define WL_WOWL_PME_GPIO        (1 << 8)    /* Wakeind via PME(0) or GPIO(1) */
+#define WL_WOWL_ULP_BAILOUT     (1 << 8)    /* wakeind via unknown pkt by basic ULP-offloads -
+ * WL_WOWL_ULP_BAILOUT - same as WL_WOWL_PME_GPIO used only for DONGLE BUILDS
+ */
+#define WL_WOWL_NEEDTKIP1       (1 << 9)    /* need tkip phase 1 key to be updated by the driver */
+#define WL_WOWL_GTK_FAILURE     (1 << 10)   /* enable wakeup if GTK fails */
+#define WL_WOWL_EXTMAGPAT       (1 << 11)   /* support extended magic packets */
+#define WL_WOWL_ARPOFFLOAD      (1 << 12)   /* support ARP/NS/keepalive offloading */
+#define WL_WOWL_WPA2            (1 << 13)   /* read protocol version for EAPOL frames */
+#define WL_WOWL_KEYROT          (1 << 14)   /* If the bit is set, use key rotaton */
+#define WL_WOWL_BCAST           (1 << 15)   /* If the bit is set, frm received was bcast frame */
+#define WL_WOWL_SCANOL          (1 << 16)   /* If the bit is set, scan offload is enabled */
+#define WL_WOWL_TCPKEEP_TIME    (1 << 17)   /* Wakeup on tcpkeep alive timeout */
+#define WL_WOWL_MDNS_CONFLICT   (1 << 18)   /* Wakeup on mDNS Conflict Resolution */
+#define WL_WOWL_MDNS_SERVICE    (1 << 19)   /* Wakeup on mDNS Service Connect */
+#define WL_WOWL_TCPKEEP_DATA    (1 << 20)   /* tcp keepalive got data */
+#define WL_WOWL_FW_HALT         (1 << 21)   /* Firmware died in wowl mode */
+#define WL_WOWL_ENAB_HWRADIO    (1 << 22)   /* Enable detection of radio button changes */
+#define WL_WOWL_MIC_FAIL        (1 << 23)   /* Offloads detected MIC failure(s) */
+#define WL_WOWL_UNASSOC         (1 << 24)   /* Wakeup in Unassociated state (Net/Magic Pattern) */
+#define WL_WOWL_SECURE          (1 << 25)   /* Wakeup if received matched secured pattern */
+#define WL_WOWL_EXCESS_WAKE     (1 << 26)   /* Excess wake */
+#define WL_WOWL_LINKDOWN        (1 << 31)   /* Link Down indication in WoWL mode */
+
+#define WL_WOWL_TCPKEEP         (1 << 20)   /* temp copy to satisfy automerger */
+#define MAGIC_PKT_MINLEN 102    /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+
+#define WOWL_PATTEN_TYPE_ARP	(1 << 0)	/* ARP offload Pattern */
+#define WOWL_PATTEN_TYPE_NA	(1 << 1)	/* NA offload Pattern */
+
+#define MAGIC_PKT_MINLEN	102    /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+#define MAGIC_PKT_NUM_MAC_ADDRS	16
+
+/* Overlap BSS Scan parameters default, minimum, maximum */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT		20	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN			5	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX			1000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT		10	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN			10	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX			1000	/* unit TU */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT	300	/* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN		10	/* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX		900	/* unit Sec */
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX	100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT	200	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN	200	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX	10000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT	20	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN	20	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX	10000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT	25	/* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN		0	/* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX		100	/* unit percent */
+
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7	/* minimum number of arguments required for OBSS Scan */
+
+#define WL_COEX_INFO_MASK		0x07
+#define WL_COEX_INFO_REQ		0x01
+#define	WL_COEX_40MHZ_INTOLERANT	0x02
+#define	WL_COEX_WIDTH20			0x04
+
+#define	WLC_RSSI_INVALID	 0	/* invalid RSSI value */
+
+#define MAX_RSSI_LEVELS 8
+
+/* **** EXTLOG **** */
+#define EXTLOG_CUR_VER		0x0100
+
+#define MAX_ARGSTR_LEN		18 /* At least big enough for storing ETHER_ADDR_STR_LEN */
+
+/* log modules (bitmap) */
+#define LOG_MODULE_COMMON	0x0001
+#define LOG_MODULE_ASSOC	0x0002
+#define LOG_MODULE_EVENT	0x0004
+#define LOG_MODULE_MAX		3			/* Update when adding module */
+
+/* log levels */
+#define WL_LOG_LEVEL_DISABLE	0
+#define WL_LOG_LEVEL_ERR	1
+#define WL_LOG_LEVEL_WARN	2
+#define WL_LOG_LEVEL_INFO	3
+#define WL_LOG_LEVEL_MAX	WL_LOG_LEVEL_INFO	/* Update when adding level */
+
+/* flag */
+#define LOG_FLAG_EVENT		1
+
+/* log arg_type */
+#define LOG_ARGTYPE_NULL	0
+#define LOG_ARGTYPE_STR		1	/* %s */
+#define LOG_ARGTYPE_INT		2	/* %d */
+#define LOG_ARGTYPE_INT_STR	3	/* %d...%s */
+#define LOG_ARGTYPE_STR_INT	4	/* %s...%d */
+
+/* 802.11 Mgmt Packet flags */
+#define VNDR_IE_BEACON_FLAG	0x1
+#define VNDR_IE_PRBRSP_FLAG	0x2
+#define VNDR_IE_ASSOCRSP_FLAG	0x4
+#define VNDR_IE_AUTHRSP_FLAG	0x8
+#define VNDR_IE_PRBREQ_FLAG	0x10
+#define VNDR_IE_ASSOCREQ_FLAG	0x20
+#define VNDR_IE_IWAPID_FLAG	0x40 /* vendor IE in IW advertisement protocol ID field */
+#define VNDR_IE_AUTHREQ_FLAG	0x80
+#define VNDR_IE_CUSTOM_FLAG	0x100 /* allow custom IE id */
+#define VNDR_IE_DISASSOC_FLAG	0x200
+
+#if defined(WLP2P)
+/* P2P Action Frames flags (spec ordered) */
+#define VNDR_IE_GONREQ_FLAG     0x001000
+#define VNDR_IE_GONRSP_FLAG     0x002000
+#define VNDR_IE_GONCFM_FLAG     0x004000
+#define VNDR_IE_INVREQ_FLAG     0x008000
+#define VNDR_IE_INVRSP_FLAG     0x010000
+#define VNDR_IE_DISREQ_FLAG     0x020000
+#define VNDR_IE_DISRSP_FLAG     0x040000
+#define VNDR_IE_PRDREQ_FLAG     0x080000
+#define VNDR_IE_PRDRSP_FLAG     0x100000
+
+#define VNDR_IE_P2PAF_SHIFT	12
+#endif /* WLP2P */
+
+/* channel interference measurement (chanim) related defines */
+
+/* chanim mode */
+#define CHANIM_DISABLE	0	/* disabled */
+#define CHANIM_DETECT	1	/* detection only */
+#define CHANIM_EXT		2	/* external state machine */
+#define CHANIM_ACT		3	/* full internal state machine, detect + act */
+#define CHANIM_MODE_MAX 4
+
+/* define for apcs reason code */
+#define APCS_INIT		0
+#define APCS_IOCTL		1
+#define APCS_CHANIM		2
+#define APCS_CSTIMER		3
+#define APCS_TXDLY		5
+#define APCS_NONACSD		6
+#define APCS_DFS_REENTRY	7
+#define APCS_TXFAIL		8
+#define APCS_MAX		9
+
+/* number of ACS record entries */
+#define CHANIM_ACS_RECORD			10
+
+/* CHANIM */
+#define CCASTATS_TXDUR  0
+#define CCASTATS_INBSS  1
+#define CCASTATS_OBSS   2
+#define CCASTATS_NOCTG  3
+#define CCASTATS_NOPKT  4
+#define CCASTATS_DOZE   5
+#define CCASTATS_TXOP	6
+#define CCASTATS_GDTXDUR        7
+#define CCASTATS_BDTXDUR        8
+
+#ifndef WLCHANIM_V2
+#define CCASTATS_MAX    9
+#else /* WLCHANIM_V2 */
+#define CCASTATS_MYRX      9
+#define CCASTATS_MAX    10
+#endif /* WLCHANIM_V2 */
+
+#define WL_CHANIM_COUNT_ALL	0xff
+#define WL_CHANIM_COUNT_ONE	0x1
+
+/* ap tpc modes */
+#define	AP_TPC_OFF		0
+#define	AP_TPC_BSS_PWR		1	/* BSS power control */
+#define AP_TPC_AP_PWR		2	/* AP power control */
+#define	AP_TPC_AP_BSS_PWR	3	/* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN	127
+
+/* ap tpc modes */
+#define	AP_TPC_OFF		0
+#define	AP_TPC_BSS_PWR		1	/* BSS power control */
+#define AP_TPC_AP_PWR		2	/* AP power control */
+#define	AP_TPC_AP_BSS_PWR	3	/* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN	127
+
+/* state */
+#define WL_P2P_DISC_ST_SCAN	0
+#define WL_P2P_DISC_ST_LISTEN	1
+#define WL_P2P_DISC_ST_SEARCH	2
+
+/* i/f type */
+#define WL_P2P_IF_CLIENT	0
+#define WL_P2P_IF_GO		1
+#define WL_P2P_IF_DYNBCN_GO	2
+#define WL_P2P_IF_DEV		3
+
+/* count */
+#define WL_P2P_SCHED_RSVD	0
+#define WL_P2P_SCHED_REPEAT	255	/* anything > 255 will be treated as 255 */
+
+#define WL_P2P_SCHED_FIXED_LEN		3
+
+/* schedule type */
+#define WL_P2P_SCHED_TYPE_ABS		0	/* Scheduled Absence */
+#define WL_P2P_SCHED_TYPE_REQ_ABS	1	/* Requested Absence */
+
+/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
+#define WL_P2P_SCHED_ACTION_NONE	0	/* no action */
+#define WL_P2P_SCHED_ACTION_DOZE	1	/* doze */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_ACTION_GOOFF	2	/* turn off GO beacon/prbrsp functions */
+/* schedule option - WL_P2P_SCHED_TYPE_XXX */
+#define WL_P2P_SCHED_ACTION_RESET	255	/* reset */
+
+/* schedule option - WL_P2P_SCHED_TYPE_ABS */
+#define WL_P2P_SCHED_OPTION_NORMAL	0	/* normal start/interval/duration/count */
+#define WL_P2P_SCHED_OPTION_BCNPCT	1	/* percentage of beacon interval */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_OPTION_TSFOFS	2	/* normal start/internal/duration/count with
+						 * start being an offset of the 'current' TSF
+						 */
+
+/* feature flags */
+#define WL_P2P_FEAT_GO_CSA	(1 << 0)	/* GO moves with the STA using CSA method */
+#define WL_P2P_FEAT_GO_NOLEGACY	(1 << 1)	/* GO does not probe respond to non-p2p probe
+						 * requests
+						 */
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2)	/* Restrict p2p dev interface from responding */
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2			1
+#define WL_11N_3x3			3
+#define WL_11N_4x4			4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N		0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX	0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX	0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX	0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX	0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX	0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX	0x00000040
+#define WLFEATURE_DISABLE_11N_GF	0x00000080
+
+/* Proxy STA modes */
+#define PSTA_MODE_DISABLED		0
+#define PSTA_MODE_PROXY			1
+#define PSTA_MODE_REPEATER		2
+
+/* op code in nat_cfg */
+#define NAT_OP_ENABLE		1	/* enable NAT on given interface */
+#define NAT_OP_DISABLE		2	/* disable NAT on given interface */
+#define NAT_OP_DISABLE_ALL	3	/* disable NAT on all interfaces */
+
+/* NAT state */
+#define NAT_STATE_ENABLED	1	/* NAT is enabled */
+#define NAT_STATE_DISABLED	2	/* NAT is disabled */
+
+#define CHANNEL_5G_LOW_START	36	/* 5G low (36..48) CDD enable/disable bit mask */
+#define CHANNEL_5G_MID_START	52	/* 5G mid (52..64) CDD enable/disable bit mask */
+#define CHANNEL_5G_HIGH_START	100	/* 5G high (100..140) CDD enable/disable bit mask */
+#define CHANNEL_5G_UPPER_START	149	/* 5G upper (149..161) CDD enable/disable bit mask */
+
+/* D0 Coalescing */
+#define IPV4_ARP_FILTER		0x0001
+#define IPV4_NETBT_FILTER	0x0002
+#define IPV4_LLMNR_FILTER	0x0004
+#define IPV4_SSDP_FILTER	0x0008
+#define IPV4_WSD_FILTER		0x0010
+#define IPV6_NETBT_FILTER	0x0200
+#define IPV6_LLMNR_FILTER	0x0400
+#define IPV6_SSDP_FILTER	0x0800
+#define IPV6_WSD_FILTER		0x1000
+
+/* Network Offload Engine */
+#define NWOE_OL_ENABLE		0x00000001
+
+/*
+ * Traffic management structures/defines.
+ */
+
+/* Traffic management bandwidth parameters */
+#define TRF_MGMT_MAX_PRIORITIES                 3
+
+#define TRF_MGMT_FLAG_ADD_DSCP                  0x0001  /* Add DSCP to IP TOS field */
+#define TRF_MGMT_FLAG_DISABLE_SHAPING           0x0002  /* Don't shape traffic */
+#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC      0x0008  /* Manage traffic over our local subnet */
+#define TRF_MGMT_FLAG_FILTER_ON_MACADDR         0x0010  /* filter on MAC address */
+#define TRF_MGMT_FLAG_NO_RX                     0x0020  /* do not apply fiters to rx packets */
+
+#define TRF_FILTER_MAC_ADDR              0x0001 /* L2 filter use dst mac address for filtering */
+#define TRF_FILTER_IP_ADDR               0x0002 /* L3 filter use ip ddress for filtering */
+#define TRF_FILTER_L4                    0x0004 /* L4 filter use tcp/udp for filtering */
+#define TRF_FILTER_DWM                   0x0008 /* L3 filter use DSCP for filtering */
+#define TRF_FILTER_FAVORED               0x0010 /* Tag the packet FAVORED */
+
+/* WNM/NPS subfeatures mask */
+#define WL_WNM_BSSTRANS		0x00000001
+#define WL_WNM_PROXYARP		0x00000002
+#define WL_WNM_MAXIDLE		0x00000004
+#define WL_WNM_TIMBC		0x00000008
+#define WL_WNM_TFS		0x00000010
+#define WL_WNM_SLEEP		0x00000020
+#define WL_WNM_DMS		0x00000040
+#define WL_WNM_FMS		0x00000080
+#define WL_WNM_NOTIF		0x00000100
+#define WL_WNM_WBTEXT	0x00000200
+#define WL_WNM_MAX		0x00000400
+#ifdef WLWNM_BRCM
+#define BRCM_WNM_FEATURE_SET\
+					(WL_WNM_PROXYARP | \
+					WL_WNM_SLEEP | \
+					WL_WNM_FMS | \
+					WL_WNM_TFS | \
+					WL_WNM_TIMBC | \
+					WL_WNM_BSSTRANS | \
+					WL_WNM_DMS | \
+					WL_WNM_NOTIF | \
+					0)
+#endif /* WLWNM_BRCM */
+#ifndef ETHER_MAX_DATA
+#define ETHER_MAX_DATA	1500
+#endif /* ETHER_MAX_DATA */
+
+/* Different discovery modes for dpt */
+#define	DPT_DISCOVERY_MANUAL	0x01	/* manual discovery mode */
+#define	DPT_DISCOVERY_AUTO	0x02	/* auto discovery mode */
+#define	DPT_DISCOVERY_SCAN	0x04	/* scan-based discovery mode */
+
+/* different path selection values */
+#define DPT_PATHSEL_AUTO	0	/* auto mode for path selection */
+#define DPT_PATHSEL_DIRECT	1	/* always use direct DPT path */
+#define DPT_PATHSEL_APPATH	2	/* always use AP path */
+
+/* different ops for deny list */
+#define DPT_DENY_LIST_ADD	1	/* add to dpt deny list */
+#define DPT_DENY_LIST_REMOVE	2	/* remove from dpt deny list */
+
+/* different ops for manual end point */
+#define DPT_MANUAL_EP_CREATE	1	/* create manual dpt endpoint */
+#define DPT_MANUAL_EP_MODIFY	2	/* modify manual dpt endpoint */
+#define DPT_MANUAL_EP_DELETE	3	/* delete manual dpt endpoint */
+
+/* flags to indicate DPT status */
+#define	DPT_STATUS_ACTIVE	0x01	/* link active (though may be suspended) */
+#define	DPT_STATUS_AES		0x02	/* link secured through AES encryption */
+#define	DPT_STATUS_FAILED	0x04	/* DPT link failed */
+
+#ifdef WLTDLS
+/* different ops for manual end point */
+#define TDLS_MANUAL_EP_CREATE	1	/* create manual dpt endpoint */
+#define TDLS_MANUAL_EP_MODIFY	2	/* modify manual dpt endpoint */
+#define TDLS_MANUAL_EP_DELETE	3	/* delete manual dpt endpoint */
+#define TDLS_MANUAL_EP_PM		4	/*  put dpt endpoint in PM mode */
+#define TDLS_MANUAL_EP_WAKE		5	/* wake up dpt endpoint from PM */
+#define TDLS_MANUAL_EP_DISCOVERY	6	/* discover if endpoint is TDLS capable */
+#define TDLS_MANUAL_EP_CHSW		7	/* channel switch */
+#define TDLS_MANUAL_EP_WFD_TPQ	8	/* WiFi-Display Tunneled Probe reQuest */
+
+/* modes */
+#define TDLS_WFD_IE_TX			0
+#define TDLS_WFD_IE_RX			1
+#define TDLS_WFD_PROBE_IE_TX	2
+#define TDLS_WFD_PROBE_IE_RX	3
+#endif /* WLTDLS */
+
+/* define for flag */
+#define TSPEC_PENDING		0	/* TSPEC pending */
+#define TSPEC_ACCEPTED		1	/* TSPEC accepted */
+#define TSPEC_REJECTED		2	/* TSPEC rejected */
+#define TSPEC_UNKNOWN		3	/* TSPEC unknown */
+#define TSPEC_STATUS_MASK	7	/* TSPEC status mask */
+
+
+/* Software feature flag defines used by wlfeatureflag */
+#ifdef WLAFTERBURNER
+#define WL_SWFL_ABBFL       0x0001 /* Allow Afterburner on systems w/o hardware BFL */
+#define WL_SWFL_ABENCORE    0x0002 /* Allow AB on non-4318E chips */
+#endif /* WLAFTERBURNER */
+#define WL_SWFL_NOHWRADIO	0x0004
+#define WL_SWFL_FLOWCONTROL     0x0008 /* Enable backpressure to OS stack */
+#define WL_SWFL_WLBSSSORT	0x0010 /* Per-port supports sorting of BSS */
+
+#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
+
+#define CSA_BROADCAST_ACTION_FRAME	0	/* csa broadcast action frame */
+#define CSA_UNICAST_ACTION_FRAME	  1 /* csa unicast action frame */
+
+/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER.
+ *
+ * (-100 < value < 0)   value is used directly as a roaming trigger in dBm
+ * (0 <= value) value specifies a logical roaming trigger level from
+ *                      the list below
+ *
+ * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never
+ * the logical roam trigger value.
+ */
+#define WLC_ROAM_TRIGGER_DEFAULT	0 /* default roaming trigger */
+#define WLC_ROAM_TRIGGER_BANDWIDTH	1 /* optimize for bandwidth roaming trigger */
+#define WLC_ROAM_TRIGGER_DISTANCE	2 /* optimize for distance roaming trigger */
+#define WLC_ROAM_TRIGGER_AUTO		3 /* auto-detect environment */
+#define WLC_ROAM_TRIGGER_MAX_VALUE	3 /* max. valid value */
+
+#define WLC_ROAM_NEVER_ROAM_TRIGGER	(-100) /* Avoid Roaming by setting a large value */
+
+/* Preferred Network Offload (PNO, formerly PFN) defines */
+#define WPA_AUTH_PFN_ANY	0xffffffff	/* for PFN, match only ssid */
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT		2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT		6
+#define IMMEDIATE_EVENT_BIT		8
+#define SUPPRESS_SSID_BIT		9
+#define ENABLE_NET_OFFLOAD_BIT		10
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT		11
+#define BESTN_BSSID_ONLY_BIT		12
+
+#define SORT_CRITERIA_MASK		0x0001
+#define AUTO_NET_SWITCH_MASK		0x0002
+#define ENABLE_BKGRD_SCAN_MASK		0x0004
+#define IMMEDIATE_SCAN_MASK		0x0008
+#define	AUTO_CONNECT_MASK		0x0010
+
+#define ENABLE_BD_SCAN_MASK		0x0020
+#define ENABLE_ADAPTSCAN_MASK		0x00c0
+#define IMMEDIATE_EVENT_MASK		0x0100
+#define SUPPRESS_SSID_MASK		0x0200
+#define ENABLE_NET_OFFLOAD_MASK		0x0400
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK		0x0800
+#define BESTN_BSSID_ONLY_MASK		0x1000
+
+#define PFN_VERSION			2
+#ifdef PFN_SCANRESULT_2
+#define PFN_SCANRESULT_VERSION		2
+#else
+#define PFN_SCANRESULT_VERSION		1
+#endif /* PFN_SCANRESULT_2 */
+#ifndef MAX_PFN_LIST_COUNT
+#define MAX_PFN_LIST_COUNT		16
+#endif /* MAX_PFN_LIST_COUNT */
+
+#define PFN_COMPLETE			1
+#define PFN_INCOMPLETE			0
+
+#define DEFAULT_BESTN			2
+#define DEFAULT_MSCAN			0
+#define DEFAULT_REPEAT			10
+#define DEFAULT_EXP				2
+
+#define PFN_PARTIAL_SCAN_BIT		0
+#define PFN_PARTIAL_SCAN_MASK		1
+
+#define WL_PFN_SUPPRESSFOUND_MASK	0x08
+#define WL_PFN_SUPPRESSLOST_MASK	0x10
+#define WL_PFN_SSID_A_BAND_TRIG		0x20
+#define WL_PFN_SSID_BG_BAND_TRIG	0x40
+#define WL_PFN_SSID_IMPRECISE_MATCH	0x80
+#define WL_PFN_SSID_SAME_NETWORK	0x10000
+#define WL_PFN_SUPPRESS_AGING_MASK	0x20000
+#define WL_PFN_FLUSH_ALL_SSIDS		0x40000
+#define WL_PFN_RSSI_MASK		0xff00
+#define WL_PFN_RSSI_SHIFT		8
+
+#define WL_PFN_REPORT_ALLNET    0
+#define WL_PFN_REPORT_SSIDNET   1
+#define WL_PFN_REPORT_BSSIDNET  2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED	0x00000001	/* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_HISTORY_OFF	0x00000002	/* Scan history suppressed */
+
+#define WL_PFN_HIDDEN_BIT		2
+#define PNO_SCAN_MAX_FW			508*1000	/* max time scan time in msec */
+#define PNO_SCAN_MAX_FW_SEC		PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
+#define PNO_SCAN_MIN_FW_SEC		10			/* min time scan time in SEC */
+#define WL_PFN_HIDDEN_MASK		0x4
+#define MAX_SSID_WHITELIST_NUM         4
+#define MAX_BSSID_PREF_LIST_NUM        32
+#define MAX_BSSID_BLACKLIST_NUM        32
+
+#ifndef BESTN_MAX
+#define BESTN_MAX			10
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX			32
+#endif
+
+/* TCP Checksum Offload error injection for testing */
+#define TOE_ERRTEST_TX_CSUM	0x00000001
+#define TOE_ERRTEST_RX_CSUM	0x00000002
+#define TOE_ERRTEST_RX_CSUM2	0x00000004
+
+/* ARP Offload feature flags for arp_ol iovar */
+#define ARP_OL_AGENT		0x00000001
+#define ARP_OL_SNOOP		0x00000002
+#define ARP_OL_HOST_AUTO_REPLY	0x00000004
+#define ARP_OL_PEER_AUTO_REPLY	0x00000008
+
+/* ARP Offload error injection */
+#define ARP_ERRTEST_REPLY_PEER	0x1
+#define ARP_ERRTEST_REPLY_HOST	0x2
+
+#define ARP_MULTIHOMING_MAX	8	/* Maximum local host IP addresses */
+#define ND_MULTIHOMING_MAX 10	/* Maximum local host IP addresses */
+#define ND_REQUEST_MAX		5	/* Max set of offload params */
+/* AOAC wake event flag */
+#define WAKE_EVENT_NLO_DISCOVERY_BIT		1
+#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT	2
+#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4
+#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8
+#define WAKE_EVENT_NET_PACKET_BIT 0x10
+
+#define MAX_NUM_WOL_PATTERN	22 /* LOGO requirements min 22 */
+
+
+/* Packet filter operation mode */
+/* True: 1; False: 0 */
+#define PKT_FILTER_MODE_FORWARD_ON_MATCH		1
+/* Enable and disable pkt_filter as a whole */
+#define PKT_FILTER_MODE_DISABLE					2
+/* Cache first matched rx pkt(be queried by host later) */
+#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH		4
+/* If pkt_filter is enabled and no filter is set, don't forward anything */
+#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8
+
+#ifdef DONGLEOVERLAYS
+#define OVERLAY_IDX_MASK		0x000000ff
+#define OVERLAY_IDX_SHIFT		0
+#define OVERLAY_FLAGS_MASK		0xffffff00
+#define OVERLAY_FLAGS_SHIFT		8
+/* overlay written to device memory immediately after loading the base image */
+#define OVERLAY_FLAG_POSTLOAD	0x100
+/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */
+#define OVERLAY_FLAG_DEFER_DL	0x200
+/* overlay downloaded prior to the host going to sleep */
+#define OVERLAY_FLAG_PRESLEEP	0x400
+#define OVERLAY_DOWNLOAD_CHUNKSIZE	1024
+#endif /* DONGLEOVERLAYS */
+
+/* reuse two number in the sc/rc space */
+#define	SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED	0xFFFD
+
+/* RFAWARE def */
+#define BCM_ACTION_RFAWARE		0x77
+#define BCM_ACTION_RFAWARE_DCS  0x01
+
+/* DCS reason code define */
+#define BCM_DCS_IOVAR		0x1
+#define BCM_DCS_UNKNOWN		0xFF
+
+#ifdef PROP_TXSTATUS
+/* Bit definitions for tlv iovar */
+/*
+ * enable RSSI signals:
+ * WLFC_CTL_TYPE_RSSI
+ */
+#define WLFC_FLAGS_RSSI_SIGNALS			0x0001
+
+/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals:
+ *
+ * WLFC_CTL_TYPE_MAC_OPEN
+ * WLFC_CTL_TYPE_MAC_CLOSE
+ *
+ * WLFC_CTL_TYPE_INTERFACE_OPEN
+ * WLFC_CTL_TYPE_INTERFACE_CLOSE
+ *
+ * WLFC_CTL_TYPE_MACDESC_ADD
+ * WLFC_CTL_TYPE_MACDESC_DEL
+ *
+ */
+#define WLFC_FLAGS_XONXOFF_SIGNALS		0x0002
+
+/* enable (status, fifo_credit, mac_credit) signals
+ * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT
+ * WLFC_CTL_TYPE_TXSTATUS
+ * WLFC_CTL_TYPE_FIFO_CREDITBACK
+ */
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS	0x0004
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE	0x0008
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE	0x0010
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE	0x0020
+#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE	0x0040
+#define WLFC_FLAGS_PKT_STAMP_SIGNALS		0x0080
+
+#endif /* PROP_TXSTATUS */
+
+#define WL_TIMBC_STATUS_AP_UNKNOWN	255	/* AP status for internal use only */
+
+#define WL_DFRTS_LOGIC_OFF	0	/* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR	1	/* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND	2	/* AND all non-zero threshold conditions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RELMCAST_MAX_CLIENT		32
+#define WL_RELMCAST_FLAG_INBLACKLIST	1
+#define WL_RELMCAST_FLAG_ACTIVEACKER	2
+#define WL_RELMCAST_FLAG_RELMCAST	4
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE	0
+#define WL_PROXD_MODE_NEUTRAL	1
+#define WL_PROXD_MODE_INITIATOR	2
+#define WL_PROXD_MODE_TARGET	3
+#define WL_PROXD_RANDOM_WAKEUP	0x8000
+
+
+#ifdef NET_DETECT
+#define NET_DETECT_MAX_WAKE_DATA_SIZE	2048
+#define NET_DETECT_MAX_PROFILES		16
+#define NET_DETECT_MAX_CHANNELS		50
+#endif /* NET_DETECT */
+
+
+/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+#define WL_RADIO_SW_DISABLE		(1<<0)
+#define WL_RADIO_HW_DISABLE		(1<<1)
+#define WL_RADIO_MPC_DISABLE		(1<<2)
+#define WL_RADIO_COUNTRY_DISABLE	(1<<3)	/* some countries don't support any channel */
+#define WL_RADIO_PERCORE_DISABLE	(1<<4)	/* Radio diable per core for DVT */
+
+#define	WL_SPURAVOID_OFF	0
+#define	WL_SPURAVOID_ON1	1
+#define	WL_SPURAVOID_ON2	2
+
+
+#define WL_4335_SPURAVOID_ON1	1
+#define WL_4335_SPURAVOID_ON2	2
+#define WL_4335_SPURAVOID_ON3	3
+#define WL_4335_SPURAVOID_ON4	4
+#define WL_4335_SPURAVOID_ON5	5
+#define WL_4335_SPURAVOID_ON6	6
+#define WL_4335_SPURAVOID_ON7	7
+#define WL_4335_SPURAVOID_ON8	8
+#define WL_4335_SPURAVOID_ON9	9
+
+/* Override bit for WLC_SET_TXPWR.  if set, ignore other level limits */
+#define WL_TXPWR_OVERRIDE	(1U<<31)
+#define WL_TXPWR_2G		(1U<<30)
+#define WL_TXPWR_5G		(1U<<29)
+#define WL_TXPWR_NEG   (1U<<28)
+
+#define WL_TXPWR_MASK		(~(0x7<<29))
+#define WL_TXPWR_CORE_MAX	(3)
+#define WL_TXPWR_CORE0_MASK	(0x000000FF)
+#define WL_TXPWR_CORE0_SHIFT	(0)
+#define WL_TXPWR_CORE1_MASK	(0x0000FF00)
+#define WL_TXPWR_CORE1_SHIFT	(8)
+#define WL_TXPWR_CORE2_MASK	(0x00FF0000)
+#define WL_TXPWR_CORE2_SHIFT	(16)
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define	WLC_PHY_TYPE_A		0
+#define	WLC_PHY_TYPE_B		1
+#define	WLC_PHY_TYPE_G		2
+#define	WLC_PHY_TYPE_N		4
+#define	WLC_PHY_TYPE_LP		5
+#define	WLC_PHY_TYPE_SSN	6
+#define	WLC_PHY_TYPE_HT		7
+#define	WLC_PHY_TYPE_LCN	8
+#define	WLC_PHY_TYPE_LCN40	10
+#define WLC_PHY_TYPE_AC		11
+#define	WLC_PHY_TYPE_LCN20	12
+#define	WLC_PHY_TYPE_NULL	0xf
+
+/* Values for PM */
+#define PM_OFF	0
+#define PM_MAX	1
+#define PM_FAST 2
+#define PM_FORCE_OFF 3		/* use this bit to force PM off even bt is active */
+
+#define WL_WME_CNT_VERSION	1	/* current version of wl_wme_cnt_t */
+
+/* fbt_cap: FBT assoc / reassoc modes. */
+#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC  1 /* Driver 4-way handshake & reassoc (WLFBT). */
+
+/* monitor_promisc_level bits */
+#define WL_MONPROMISC_PROMISC 0x0001
+#define WL_MONPROMISC_CTRL 0x0002
+#define WL_MONPROMISC_FCS 0x0004
+
+/* TCP Checksum Offload defines */
+#define TOE_TX_CSUM_OL		0x00000001
+#define TOE_RX_CSUM_OL		0x00000002
+
+/* Wi-Fi Display Services (WFDS) */
+#define WL_P2P_SOCIAL_CHANNELS_MAX  WL_NUMCHANNELS
+#define MAX_WFDS_SEEK_SVC 4	/* Max # of wfds services to seek */
+#define MAX_WFDS_ADVERT_SVC 4	/* Max # of wfds services to advertise */
+#define MAX_WFDS_SVC_NAME_LEN 200	/* maximum service_name length */
+#define MAX_WFDS_ADV_SVC_INFO_LEN 65000	/* maximum adv service_info length */
+#define P2P_WFDS_HASH_LEN 6		/* Length of a WFDS service hash */
+#define MAX_WFDS_SEEK_SVC_INFO_LEN 255	/* maximum seek service_info req length */
+#define MAX_WFDS_SEEK_SVC_NAME_LEN 200	/* maximum service_name length */
+
+/* ap_isolate bitmaps */
+#define AP_ISOLATE_DISABLED		0x0
+#define AP_ISOLATE_SENDUP_ALL		0x01
+#define AP_ISOLATE_SENDUP_MCAST		0x02
+
+/* Type values for the wl_pwrstats_t data field */
+#define WL_PWRSTATS_TYPE_PHY		0 /**< struct wl_pwr_phy_stats */
+#define WL_PWRSTATS_TYPE_SCAN		1 /**< struct wl_pwr_scan_stats */
+#define WL_PWRSTATS_TYPE_USB_HSIC	2 /**< struct wl_pwr_usb_hsic_stats */
+#define WL_PWRSTATS_TYPE_PM_AWAKE1	3 /**< struct wl_pwr_pm_awake_stats_v1 */
+#define WL_PWRSTATS_TYPE_CONNECTION	4 /* struct wl_pwr_connect_stats; assoc and key-exch time */
+#define WL_PWRSTATS_TYPE_PCIE		6 /**< struct wl_pwr_pcie_stats */
+#define WL_PWRSTATS_TYPE_PM_AWAKE2	7 /**< struct wl_pwr_pm_awake_stats_v2 */
+#define WL_PWRSTATS_TYPE_SDIO		8 /* struct wl_pwr_sdio_stats */
+#define WL_PWRSTATS_TYPE_MIMO_PS_METRICS 9 /* struct wl_mimo_meas_metrics_t */
+
+#endif /* wlioctl_defs_h */
diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h b/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h
new file mode 100644
index 0000000..797531c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h
@@ -0,0 +1,61 @@
+/*
+ * Custom OID/ioctl related helper functions.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wlioctl_utils.h 614820 2016-01-23 17:16:17Z $
+ */
+
+#ifndef _wlioctl_utils_h_
+#define _wlioctl_utils_h_
+
+#include <wlioctl.h>
+
+#ifndef BCMDRIVER
+#define CCA_THRESH_MILLI	14
+#define CCA_THRESH_INTERFERE	6
+
+extern cca_congest_channel_req_t * cca_per_chan_summary(cca_congest_channel_req_t *input,
+	cca_congest_channel_req_t *avg, bool percent);
+
+extern int cca_analyze(cca_congest_channel_req_t *input[], int num_chans,
+	uint flags, chanspec_t *answer);
+#endif /* BCMDRIVER */
+
+extern int wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf,
+	int buflen, uint32 corerev);
+
+extern const char * wl_get_reinit_rc_name(int rc);
+
+/* Get data pointer of wlc layer counters tuple from xtlv formatted counters IOVar buffer. */
+#define GET_WLCCNT_FROM_CNTBUF(cntbuf)						\
+		bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)cntbuf)->data,	\
+		((wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC,		\
+		NULL, BCM_XTLV_OPTION_ALIGN32)
+
+#define CHK_CNTBUF_DATALEN(cntbuf, ioctl_buflen) do {					\
+	if (((wl_cnt_info_t *)cntbuf)->datalen +			\
+		OFFSETOF(wl_cnt_info_t, data) > ioctl_buflen)	\
+		printf("%s: IOVAR buffer short!\n", __FUNCTION__);	\
+} while (0)
+
+#endif /* _wlioctl_utils_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/wpa.h b/drivers/net/wireless/bcmdhd/include/wpa.h
new file mode 100644
index 0000000..fbb9c29
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wpa.h
@@ -0,0 +1,217 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wpa.h 700076 2017-05-17 14:42:22Z $
+ */
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <ethernet.h>
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Reason Codes */
+
+/* 13 through 23 taken from IEEE Std 802.11i-2004 */
+#define DOT11_RC_INVALID_WPA_IE		13	/* Invalid info. element */
+#define DOT11_RC_MIC_FAILURE		14	/* Michael failure */
+#define DOT11_RC_4WH_TIMEOUT		15	/* 4-way handshake timeout */
+#define DOT11_RC_GTK_UPDATE_TIMEOUT	16	/* Group key update timeout */
+#define DOT11_RC_WPA_IE_MISMATCH	17	/* WPA IE in 4-way handshake differs from
+						 * (re-)assoc. request/probe response
+						 */
+#define DOT11_RC_INVALID_MC_CIPHER	18	/* Invalid multicast cipher */
+#define DOT11_RC_INVALID_UC_CIPHER	19	/* Invalid unicast cipher */
+#define DOT11_RC_INVALID_AKMP		20	/* Invalid authenticated key management protocol */
+#define DOT11_RC_BAD_WPA_VERSION	21	/* Unsupported WPA version */
+#define DOT11_RC_INVALID_WPA_CAP	22	/* Invalid WPA IE capabilities */
+#define DOT11_RC_8021X_AUTH_FAIL	23	/* 802.1X authentication failure */
+
+#define WPA2_PMKID_LEN	16
+
+/* WPA IE fixed portion */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 tag;	/* TAG */
+	uint8 length;	/* TAG length */
+	uint8 oui[3];	/* IE OUI */
+	uint8 oui_type;	/* OUI type */
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	/* IE version */
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN	4
+#define WPA_IE_FIXED_LEN	8
+#define WPA_IE_TAG_FIXED_LEN	6
+
+#define BIP_OUI_TYPE WPA2_OUI "\x06"
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 tag;	/* TAG */
+	uint8 length;	/* TAG length */
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	/* IE version */
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN	4
+#define WPA_RSN_IE_TAG_FIXED_LEN	2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+#define WFA_OSEN_IE_FIXED_LEN	6
+
+/* WPA suite/multicast suite */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 oui[3];
+	uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN	4
+
+/* WPA unicast suite list/key management suite list */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN	2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+/* WPA cipher suites */
+#define WPA_CIPHER_NONE		0	/* None */
+#define WPA_CIPHER_WEP_40	1	/* WEP (40-bit) */
+#define WPA_CIPHER_TKIP		2	/* TKIP: default for WPA */
+#define WPA_CIPHER_AES_OCB	3	/* AES (OCB) */
+#define WPA_CIPHER_AES_CCM	4	/* AES (CCM) */
+#define WPA_CIPHER_WEP_104	5	/* WEP (104-bit) */
+#define WPA_CIPHER_BIP		6	/* WEP (104-bit) */
+#define WPA_CIPHER_TPK		7	/* Group addressed traffic not allowed */
+
+#define WPA_CIPHER_AES_GCM	8	/* AES (GCM) */
+#define WPA_CIPHER_AES_GCM256	9	/* AES (GCM256) */
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CIPHER_NONE	WPA_CIPHER_NONE
+#define WAPI_CIPHER_SMS4	11
+
+#define WAPI_CSE_WPI_SMS4	1
+#endif /* BCMWAPI_WAI */
+
+#define IS_WPA_CIPHER(cipher)	((cipher) == WPA_CIPHER_NONE || \
+				 (cipher) == WPA_CIPHER_WEP_40 || \
+				 (cipher) == WPA_CIPHER_WEP_104 || \
+				 (cipher) == WPA_CIPHER_TKIP || \
+				 (cipher) == WPA_CIPHER_AES_OCB || \
+				 (cipher) == WPA_CIPHER_AES_CCM || \
+				 (cipher) == WPA_CIPHER_AES_GCM || \
+				 (cipher) == WPA_CIPHER_AES_GCM256 || \
+				 (cipher) == WPA_CIPHER_TPK)
+
+#ifdef BCMWAPI_WAI
+#define IS_WAPI_CIPHER(cipher)	((cipher) == WAPI_CIPHER_NONE || \
+				 (cipher) == WAPI_CSE_WPI_SMS4)
+
+/* convert WAPI_CSE_WPI_XXX to WAPI_CIPHER_XXX */
+#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \
+				WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE)
+
+#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
+				WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
+#endif /* BCMWAPI_WAI */
+
+/* WPA TKIP countermeasures parameters */
+#define WPA_TKIP_CM_DETECT	60	/* multiple MIC failure window (seconds) */
+#define WPA_TKIP_CM_BLOCK	60	/* countermeasures active window (seconds) */
+
+/* RSN IE defines */
+#define RSN_CAP_LEN		2	/* Length of RSN capabilities field (2 octets) */
+
+/* RSN Capabilities defined in 802.11i */
+#define RSN_CAP_PREAUTH			0x0001
+#define RSN_CAP_NOPAIRWISE		0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK	0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT	2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK	0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT	4
+#define RSN_CAP_1_REPLAY_CNTR		0
+#define RSN_CAP_2_REPLAY_CNTRS		1
+#define RSN_CAP_4_REPLAY_CNTRS		2
+#define RSN_CAP_16_REPLAY_CNTRS		3
+#define RSN_CAP_MFPR			0x0040
+#define RSN_CAP_MFPC			0x0080
+#define RSN_CAP_SPPC			0x0400
+#define RSN_CAP_SPPR			0x0800
+
+/* WPA capabilities defined in 802.11i */
+#define WPA_CAP_4_REPLAY_CNTRS		RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS		RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT	RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK	RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+/* WPA capabilities defined in 802.11zD9.0 */
+#define WPA_CAP_PEER_KEY_ENABLE		(0x1 << 1)	/* bit 9 */
+
+/* WPA Specific defines */
+#define WPA_CAP_LEN	RSN_CAP_LEN	/* Length of RSN capabilities in RSN IE (2 octets) */
+#define WPA_PMKID_CNT_LEN	2 	/* Length of RSN PMKID count (2 octests) */
+
+#define	WPA_CAP_WPA2_PREAUTH		RSN_CAP_PREAUTH
+
+#define WPA2_PMKID_COUNT_LEN	2
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CAP_PREAUTH		RSN_CAP_PREAUTH
+
+/* Other WAI definition */
+#define WAPI_WAI_REQUEST		0x00F1
+#define WAPI_UNICAST_REKEY		0x00F2
+#define WAPI_STA_AGING			0x00F3
+#define WAPI_MUTIL_REKEY		0x00F4
+#define WAPI_STA_STATS			0x00F5
+
+#define WAPI_USK_REKEY_COUNT		0x4000000 /* 0xA00000 */
+#define WAPI_MSK_REKEY_COUNT		0x4000000 /* 0xA00000 */
+#endif /* BCMWAPI_WAI */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _proto_wpa_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/wps.h b/drivers/net/wireless/bcmdhd/include/wps.h
new file mode 100644
index 0000000..aa4cc1b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wps.h
@@ -0,0 +1,389 @@
+/*
+ * WPS IE definitions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _WPS_
+#define _WPS_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Data Element Definitions */
+#define WPS_ID_AP_CHANNEL         0x1001
+#define WPS_ID_ASSOC_STATE        0x1002
+#define WPS_ID_AUTH_TYPE          0x1003
+#define WPS_ID_AUTH_TYPE_FLAGS    0x1004
+#define WPS_ID_AUTHENTICATOR      0x1005
+#define WPS_ID_CONFIG_METHODS     0x1008
+#define WPS_ID_CONFIG_ERROR       0x1009
+#define WPS_ID_CONF_URL4          0x100A
+#define WPS_ID_CONF_URL6          0x100B
+#define WPS_ID_CONN_TYPE          0x100C
+#define WPS_ID_CONN_TYPE_FLAGS    0x100D
+#define WPS_ID_CREDENTIAL         0x100E
+#define WPS_ID_DEVICE_NAME        0x1011
+#define WPS_ID_DEVICE_PWD_ID      0x1012
+#define WPS_ID_E_HASH1            0x1014
+#define WPS_ID_E_HASH2            0x1015
+#define WPS_ID_E_SNONCE1          0x1016
+#define WPS_ID_E_SNONCE2          0x1017
+#define WPS_ID_ENCR_SETTINGS      0x1018
+#define WPS_ID_ENCR_TYPE          0x100F
+#define WPS_ID_ENCR_TYPE_FLAGS    0x1010
+#define WPS_ID_ENROLLEE_NONCE     0x101A
+#define WPS_ID_FEATURE_ID         0x101B
+#define WPS_ID_IDENTITY           0x101C
+#define WPS_ID_IDENTITY_PROOF     0x101D
+#define WPS_ID_KEY_WRAP_AUTH      0x101E
+#define WPS_ID_KEY_IDENTIFIER     0x101F
+#define WPS_ID_MAC_ADDR           0x1020
+#define WPS_ID_MANUFACTURER       0x1021
+#define WPS_ID_MSG_TYPE           0x1022
+#define WPS_ID_MODEL_NAME         0x1023
+#define WPS_ID_MODEL_NUMBER       0x1024
+#define WPS_ID_NW_INDEX           0x1026
+#define WPS_ID_NW_KEY             0x1027
+#define WPS_ID_NW_KEY_INDEX       0x1028
+#define WPS_ID_NEW_DEVICE_NAME    0x1029
+#define WPS_ID_NEW_PWD            0x102A
+#define WPS_ID_OOB_DEV_PWD        0x102C
+#define WPS_ID_OS_VERSION         0x102D
+#define WPS_ID_POWER_LEVEL        0x102F
+#define WPS_ID_PSK_CURRENT        0x1030
+#define WPS_ID_PSK_MAX            0x1031
+#define WPS_ID_PUBLIC_KEY         0x1032
+#define WPS_ID_RADIO_ENABLED      0x1033
+#define WPS_ID_REBOOT             0x1034
+#define WPS_ID_REGISTRAR_CURRENT  0x1035
+#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036
+#define WPS_ID_REGISTRAR_LIST     0x1037
+#define WPS_ID_REGISTRAR_MAX      0x1038
+#define WPS_ID_REGISTRAR_NONCE    0x1039
+#define WPS_ID_REQ_TYPE           0x103A
+#define WPS_ID_RESP_TYPE          0x103B
+#define WPS_ID_RF_BAND            0x103C
+#define WPS_ID_R_HASH1            0x103D
+#define WPS_ID_R_HASH2            0x103E
+#define WPS_ID_R_SNONCE1          0x103F
+#define WPS_ID_R_SNONCE2          0x1040
+#define WPS_ID_SEL_REGISTRAR      0x1041
+#define WPS_ID_SERIAL_NUM         0x1042
+#define WPS_ID_SC_STATE           0x1044
+#define WPS_ID_SSID               0x1045
+#define WPS_ID_TOT_NETWORKS       0x1046
+#define WPS_ID_UUID_E             0x1047
+#define WPS_ID_UUID_R             0x1048
+#define WPS_ID_VENDOR_EXT         0x1049
+#define WPS_ID_VERSION            0x104A
+#define WPS_ID_X509_CERT_REQ      0x104B
+#define WPS_ID_X509_CERT          0x104C
+#define WPS_ID_EAP_IDENTITY       0x104D
+#define WPS_ID_MSG_COUNTER        0x104E
+#define WPS_ID_PUBKEY_HASH        0x104F
+#define WPS_ID_REKEY_KEY          0x1050
+#define WPS_ID_KEY_LIFETIME       0x1051
+#define WPS_ID_PERM_CFG_METHODS   0x1052
+#define WPS_ID_SEL_REG_CFG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE      0x1054
+#define WPS_ID_SEC_DEV_TYPE_LIST  0x1055
+#define WPS_ID_PORTABLE_DEVICE    0x1056
+#define WPS_ID_AP_SETUP_LOCKED    0x1057
+#define WPS_ID_APP_LIST           0x1058
+#define WPS_ID_EAP_TYPE           0x1059
+#define WPS_ID_INIT_VECTOR        0x1060
+#define WPS_ID_KEY_PROVIDED_AUTO  0x1061
+#define WPS_ID_8021X_ENABLED      0x1062
+#define WPS_ID_WEP_TRANSMIT_KEY   0x1064
+#define WPS_ID_REQ_DEV_TYPE       0x106A
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WFA_VENDOR_EXT_ID                 "\x00\x37\x2A"
+#define WPS_WFA_SUBID_VERSION2            0x00
+#define WPS_WFA_SUBID_AUTHORIZED_MACS     0x01
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE    0x02
+#define WPS_WFA_SUBID_REQ_TO_ENROLL       0x03
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04
+#define WPS_WFA_SUBID_REG_CFG_METHODS     0x05
+
+
+/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */
+#define MS_VENDOR_EXT_ID           "\x00\x01\x37"
+#define WPS_MS_ID_VPI               0x1001	/* Vertical Pairing Identifier TLV */
+#define WPS_MS_ID_TRANSPORT_UUID    0x1002      /* Transport UUID TLV */
+
+/* Vertical Pairing Identifier TLV Definitions */
+#define WPS_MS_VPI_TRANSPORT_NONE   0x00        /* None */
+#define WPS_MS_VPI_TRANSPORT_DPWS   0x01        /* Devices Profile for Web Services */
+#define WPS_MS_VPI_TRANSPORT_UPNP   0x02        /* uPnP */
+#define WPS_MS_VPI_TRANSPORT_SDNWS  0x03        /* Secure Devices Profile for Web Services */
+#define WPS_MS_VPI_NO_PROFILE_REQ   0x00        /* Wi-Fi profile not requested.
+						 * Not supported in Windows 7
+						 */
+#define WPS_MS_VPI_PROFILE_REQ      0x01        /* Wi-Fi profile requested.  */
+
+/* sizes of the fixed size elements */
+#define WPS_ID_AP_CHANNEL_S       2
+#define WPS_ID_ASSOC_STATE_S      2
+#define WPS_ID_AUTH_TYPE_S        2
+#define WPS_ID_AUTH_TYPE_FLAGS_S  2
+#define WPS_ID_AUTHENTICATOR_S    8
+#define WPS_ID_CONFIG_METHODS_S   2
+#define WPS_ID_CONFIG_ERROR_S     2
+#define WPS_ID_CONN_TYPE_S          1
+#define WPS_ID_CONN_TYPE_FLAGS_S    1
+#define WPS_ID_DEVICE_PWD_ID_S      2
+#define WPS_ID_ENCR_TYPE_S          2
+#define WPS_ID_ENCR_TYPE_FLAGS_S    2
+#define WPS_ID_FEATURE_ID_S         4
+#define WPS_ID_MAC_ADDR_S           6
+#define WPS_ID_MSG_TYPE_S           1
+#define WPS_ID_SC_STATE_S           1
+#define WPS_ID_RF_BAND_S            1
+#define WPS_ID_OS_VERSION_S         4
+#define WPS_ID_VERSION_S            1
+#define WPS_ID_SEL_REGISTRAR_S      1
+#define WPS_ID_SEL_REG_CFG_METHODS_S 2
+#define WPS_ID_REQ_TYPE_S           1
+#define WPS_ID_RESP_TYPE_S          1
+#define WPS_ID_AP_SETUP_LOCKED_S    1
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WPS_WFA_SUBID_VERSION2_S            1
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S    1
+#define WPS_WFA_SUBID_REQ_TO_ENROLL_S       1
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1
+#define WPS_WFA_SUBID_REG_CFG_METHODS_S     2
+
+/* Association states */
+#define WPS_ASSOC_NOT_ASSOCIATED  0
+#define WPS_ASSOC_CONN_SUCCESS    1
+#define WPS_ASSOC_CONFIG_FAIL     2
+#define WPS_ASSOC_ASSOC_FAIL      3
+#define WPS_ASSOC_IP_FAIL         4
+
+/* Authentication types */
+#define WPS_AUTHTYPE_OPEN        0x0001
+#define WPS_AUTHTYPE_WPAPSK      0x0002	/* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_SHARED      0x0004	/* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA         0x0008	/* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA2        0x0010
+#define WPS_AUTHTYPE_WPA2PSK     0x0020
+
+/* Config methods */
+#define WPS_CONFMET_USBA            0x0001	/* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_ETHERNET        0x0002	/* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_LABEL           0x0004
+#define WPS_CONFMET_DISPLAY         0x0008
+#define WPS_CONFMET_EXT_NFC_TOK     0x0010
+#define WPS_CONFMET_INT_NFC_TOK     0x0020
+#define WPS_CONFMET_NFC_INTF        0x0040
+#define WPS_CONFMET_PBC             0x0080
+#define WPS_CONFMET_KEYPAD          0x0100
+/* WSC 2.0 */
+#define WPS_CONFMET_VIRT_PBC        0x0280
+#define WPS_CONFMET_PHY_PBC         0x0480
+#define WPS_CONFMET_VIRT_DISPLAY    0x2008
+#define WPS_CONFMET_PHY_DISPLAY     0x4008
+
+/* WPS error messages */
+#define WPS_ERROR_NO_ERROR                0
+#define WPS_ERROR_OOB_INT_READ_ERR        1
+#define WPS_ERROR_DECRYPT_CRC_FAIL        2
+#define WPS_ERROR_CHAN24_NOT_SUPP         3
+#define WPS_ERROR_CHAN50_NOT_SUPP         4
+#define WPS_ERROR_SIGNAL_WEAK             5	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_AUTH_FAIL            6	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_ASSOC_FAIL           7	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NO_DHCP_RESP            8	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAILED_DHCP_CONF        9	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_IP_ADDR_CONFLICT        10	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAIL_CONN_REGISTRAR     11
+#define WPS_ERROR_MULTI_PBC_DETECTED      12
+#define WPS_ERROR_ROGUE_SUSPECTED         13
+#define WPS_ERROR_DEVICE_BUSY             14
+#define WPS_ERROR_SETUP_LOCKED            15
+#define WPS_ERROR_MSG_TIMEOUT             16	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_REG_SESSION_TIMEOUT     17	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_DEV_PWD_AUTH_FAIL       18
+#define WPS_ERROR_60GHZ_NOT_SUPPORT       19
+#define WPS_ERROR_PKH_MISMATCH            20	/* Public Key Hash Mismatch */
+
+/* Connection types */
+#define WPS_CONNTYPE_ESS    0x01
+#define WPS_CONNTYPE_IBSS   0x02
+
+/* Device password ID */
+#define WPS_DEVICEPWDID_DEFAULT          0x0000
+#define WPS_DEVICEPWDID_USER_SPEC        0x0001
+#define WPS_DEVICEPWDID_MACHINE_SPEC     0x0002
+#define WPS_DEVICEPWDID_REKEY            0x0003
+#define WPS_DEVICEPWDID_PUSH_BTN         0x0004
+#define WPS_DEVICEPWDID_REG_SPEC         0x0005
+#define WPS_DEVICEPWDID_IBSS             0x0006
+#define WPS_DEVICEPWDID_NFC_CHO          0x0007	/* NFC-Connection-Handover */
+#define WPS_DEVICEPWDID_WFDS             0x0008	/* Wi-Fi Direct Services Specification */
+
+/* Encryption type */
+#define WPS_ENCRTYPE_NONE    0x0001
+#define WPS_ENCRTYPE_WEP     0x0002	/* Deprecated in WSC 2.0 */
+#define WPS_ENCRTYPE_TKIP    0x0004	/* Deprecated in version 2.0. TKIP can only
+					  * be advertised on the AP when Mixed Mode
+					  * is enabled (Encryption Type is 0x000c).
+					  */
+#define WPS_ENCRTYPE_AES     0x0008
+
+
+/* WPS Message Types */
+#define WPS_ID_BEACON            0x01
+#define WPS_ID_PROBE_REQ         0x02
+#define WPS_ID_PROBE_RESP        0x03
+#define WPS_ID_MESSAGE_M1        0x04
+#define WPS_ID_MESSAGE_M2        0x05
+#define WPS_ID_MESSAGE_M2D       0x06
+#define WPS_ID_MESSAGE_M3        0x07
+#define WPS_ID_MESSAGE_M4        0x08
+#define WPS_ID_MESSAGE_M5        0x09
+#define WPS_ID_MESSAGE_M6        0x0A
+#define WPS_ID_MESSAGE_M7        0x0B
+#define WPS_ID_MESSAGE_M8        0x0C
+#define WPS_ID_MESSAGE_ACK       0x0D
+#define WPS_ID_MESSAGE_NACK      0x0E
+#define WPS_ID_MESSAGE_DONE      0x0F
+
+/* WSP private ID for local use */
+#define WPS_PRIVATE_ID_IDENTITY		(WPS_ID_MESSAGE_DONE + 1)
+#define WPS_PRIVATE_ID_WPS_START	(WPS_ID_MESSAGE_DONE + 2)
+#define WPS_PRIVATE_ID_FAILURE		(WPS_ID_MESSAGE_DONE + 3)
+#define WPS_PRIVATE_ID_FRAG		(WPS_ID_MESSAGE_DONE + 4)
+#define WPS_PRIVATE_ID_FRAG_ACK		(WPS_ID_MESSAGE_DONE + 5)
+#define WPS_PRIVATE_ID_EAPOL_START	(WPS_ID_MESSAGE_DONE + 6)
+
+
+/* Device Type categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_CAT_COMPUTER        1
+#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE    2
+#define WPS_DEVICE_TYPE_CAT_PRINTER         3
+#define WPS_DEVICE_TYPE_CAT_CAMERA          4
+#define WPS_DEVICE_TYPE_CAT_STORAGE         5
+#define WPS_DEVICE_TYPE_CAT_NW_INFRA        6
+#define WPS_DEVICE_TYPE_CAT_DISPLAYS        7
+#define WPS_DEVICE_TYPE_CAT_MM_DEVICES      8
+#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES    9
+#define WPS_DEVICE_TYPE_CAT_TELEPHONE       10
+#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES   11	/* WSC 2.0 */
+
+/* Device Type sub categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC         1
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER     2
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR  3
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC      4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK   5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP    6	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID        7	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK    8	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard    1	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE       2	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK    3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL   4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL    5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE      6	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER  8	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER  9	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER    1
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER    2
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX        3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER     4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE   5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL  1
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM   2	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM     3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM    4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS        1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP           1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER       2
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH       3
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY      4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE       5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV         1
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME  2
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR  3
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR    4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR          1
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR          2
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX          3
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB          4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME        5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP          6	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX        1
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360    2
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS          3
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC          4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD         5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM        1
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM       2	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM       3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM       4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM       5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER     1	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS  2	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP       3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET   4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE    5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE    6	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS       7	/* WSC 2.0 */
+
+
+/* Device request/response type */
+#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY    0x00
+#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X   0x01
+#define WPS_MSGTYPE_REGISTRAR             0x02
+#define WPS_MSGTYPE_AP_WLAN_MGR           0x03
+
+/* RF Band */
+#define WPS_RFBAND_24GHZ    0x01
+#define WPS_RFBAND_50GHZ    0x02
+
+/* Simple Config state */
+#define WPS_SCSTATE_UNCONFIGURED    0x01
+#define WPS_SCSTATE_CONFIGURED      0x02
+#define WPS_SCSTATE_OFF 11
+
+/* WPS Vendor extension key */
+#define WPS_OUI_HEADER_LEN 2
+#define WPS_OUI_HEADER_SIZE 4
+#define WPS_OUI_FIXED_HEADER_OFF 16
+#define WPS_WFA_SUBID_V2_OFF 3
+#define WPS_WFA_V2_OFF 5
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WPS_ */
diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c
new file mode 100644
index 0000000..5de253d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/linux_osl.c
@@ -0,0 +1,2781 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: linux_osl.c 680580 2017-01-20 11:49:58Z $
+ */
+
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+
+
+#if !defined(STBLINUX)
+#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
+#include <asm/cacheflush.h>
+#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
+#endif /* STBLINUX */
+
+#include <linux/random.h>
+
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <pcicfg.h>
+
+
+#ifdef BCM_SECURE_DMA
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <asm/io.h>
+#include <linux/skbuff.h>
+#include <stbutils.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+#include <asm/memory.h>
+#endif /* BCM_SECURE_DMA */
+
+#include <linux/fs.h>
+
+#if defined(STB)
+#include <linux/spinlock.h>
+extern spinlock_t l2x0_reg_lock;
+#endif 
+
+#ifdef BCM_OBJECT_TRACE
+#include <bcmutils.h>
+#endif /* BCM_OBJECT_TRACE */
+
+#define PCI_CFG_RETRY		10
+
+#define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN	24		/* Mem. filename length */
+#define DUMPBUFSZ 1024
+
+/* dependancy check */
+#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
+#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
+#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define DHD_SKB_1PAGE_BUFSIZE	(PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE	(PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE	(PAGE_SIZE*4)
+
+#define PREALLOC_FREE_MAGIC	0xFEDC
+#define PREALLOC_USED_MAGIC	0xFCDE
+#else
+#define DHD_SKB_HDRSIZE		336
+#define DHD_SKB_1PAGE_BUFSIZE	((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_2PAGE_BUFSIZE	((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_4PAGE_BUFSIZE	((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#define STATIC_BUF_MAX_NUM	16
+#define STATIC_BUF_SIZE	(PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN	(STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+
+typedef struct bcm_static_buf {
+	spinlock_t static_lock;
+	unsigned char *buf_ptr;
+	unsigned char buf_use[STATIC_BUF_MAX_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define STATIC_PKT_4PAGE_NUM	0
+#define DHD_SKB_MAX_BUFSIZE	DHD_SKB_2PAGE_BUFSIZE
+#elif defined(ENHANCED_STATIC_BUF)
+#define STATIC_PKT_4PAGE_NUM	1
+#define DHD_SKB_MAX_BUFSIZE	DHD_SKB_4PAGE_BUFSIZE
+#else
+#define STATIC_PKT_4PAGE_NUM	0
+#define DHD_SKB_MAX_BUFSIZE	DHD_SKB_2PAGE_BUFSIZE
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define STATIC_PKT_1PAGE_NUM	0
+#define STATIC_PKT_2PAGE_NUM	128
+#else
+#define STATIC_PKT_1PAGE_NUM	8
+#define STATIC_PKT_2PAGE_NUM	8
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#define STATIC_PKT_1_2PAGE_NUM	\
+	((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
+#define STATIC_PKT_MAX_NUM	\
+	((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
+
+typedef struct bcm_static_pkt {
+#ifdef DHD_USE_STATIC_CTRLBUF
+	struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
+	unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM];
+	spinlock_t osl_pkt_lock;
+	uint32 last_allocated_index;
+#else
+	struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM];
+	struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
+#ifdef ENHANCED_STATIC_BUF
+	struct sk_buff *skb_16k;
+#endif /* ENHANCED_STATIC_BUF */
+	struct semaphore osl_pkt_sem;
+#endif /* DHD_USE_STATIC_CTRLBUF */
+	unsigned char pkt_use[STATIC_PKT_MAX_NUM];
+} bcm_static_pkt_t;
+
+static bcm_static_pkt_t *bcm_static_skb = 0;
+
+void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+typedef struct bcm_mem_link {
+	struct bcm_mem_link *prev;
+	struct bcm_mem_link *next;
+	uint	size;
+	int	line;
+	void 	*osh;
+	char	file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_cmn_info {
+	atomic_t malloced;
+	atomic_t pktalloced;    /* Number of allocated packet buffers */
+	spinlock_t dbgmem_lock;
+	bcm_mem_link_t *dbgmem_list;
+	bcm_mem_link_t *dbgvmem_list;
+	spinlock_t pktalloc_lock;
+	atomic_t refcount; /* Number of references to this shared structure. */
+};
+typedef struct osl_cmn_info osl_cmn_t;
+
+struct osl_info {
+	osl_pubinfo_t pub;
+	uint32  flags;		/* If specific cases to be handled in the OSL */
+#ifdef CTFPOOL
+	ctfpool_t *ctfpool;
+#endif /* CTFPOOL */
+	uint magic;
+	void *pdev;
+	uint failed;
+	uint bustype;
+	osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
+
+	void *bus_handle;
+#ifdef BCMDBG_CTRACE
+	spinlock_t ctrace_lock;
+	struct list_head ctrace_list;
+	int ctrace_num;
+#endif /* BCMDBG_CTRACE */
+#ifdef	BCM_SECURE_DMA
+	struct sec_mem_elem *sec_list_4096;
+	struct sec_mem_elem *sec_list_base_4096;
+	phys_addr_t  contig_base;
+	void *contig_base_va;
+	phys_addr_t  contig_base_alloc;
+	void *contig_base_alloc_va;
+	phys_addr_t contig_base_alloc_coherent;
+	void *contig_base_alloc_coherent_va;
+	void *contig_base_coherent_va;
+	void *contig_delta_va_pa;
+	struct {
+		phys_addr_t pa;
+		void *va;
+		bool avail;
+	} sec_cma_coherent[SEC_CMA_COHERENT_MAX];
+	int stb_ext_params;
+#endif /* BCM_SECURE_DMA */
+};
+#ifdef BCM_SECURE_DMA
+static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
+	bool iscache, bool isdecr);
+static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size);
+static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max,
+	sec_mem_elem_t **list);
+static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max,
+	void *sec_list_base);
+static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size,
+	int direction, struct sec_cma_info *ptr_cma_info, uint offset);
+static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem);
+static void osl_sec_dma_init_consistent(osl_t *osh);
+static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits,
+	ulong *pap);
+static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
+#endif /* BCM_SECURE_DMA */
+
+#ifdef BCM_OBJECT_TRACE
+/* don't clear the first 4 byte that is the pkt sn */
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+	struct sk_buff *s = (struct sk_buff *)(p); \
+	ASSERT(OSL_PKTTAG_SZ == 32); \
+	*(uint32 *)(&s->cb[4]) = 0; \
+	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+#else
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+	struct sk_buff *s = (struct sk_buff *)(p); \
+	ASSERT(OSL_PKTTAG_SZ == 32); \
+	*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+#endif /* BCM_OBJECT_TRACE */
+
+/* PCMCIA attribute space access macros */
+
+uint32 g_assert_type = 0; /* By Default Kernel Panic */
+
+module_param(g_assert_type, int, 0);
+#ifdef	BCM_SECURE_DMA
+#define	SECDMA_MODULE_PARAMS	0
+#define	SECDMA_EXT_FILE	1
+unsigned long secdma_addr = 0;
+unsigned long secdma_addr2 = 0;
+u32 secdma_size = 0;
+u32 secdma_size2 = 0;
+module_param(secdma_addr, ulong, 0);
+module_param(secdma_size, int, 0);
+module_param(secdma_addr2, ulong, 0);
+module_param(secdma_size2, int, 0);
+static int secdma_found = 0;
+#endif /* BCM_SECURE_DMA */
+
+static int16 linuxbcmerrormap[] =
+{	0,				/* 0 */
+	-EINVAL,		/* BCME_ERROR */
+	-EINVAL,		/* BCME_BADARG */
+	-EINVAL,		/* BCME_BADOPTION */
+	-EINVAL,		/* BCME_NOTUP */
+	-EINVAL,		/* BCME_NOTDOWN */
+	-EINVAL,		/* BCME_NOTAP */
+	-EINVAL,		/* BCME_NOTSTA */
+	-EINVAL,		/* BCME_BADKEYIDX */
+	-EINVAL,		/* BCME_RADIOOFF */
+	-EINVAL,		/* BCME_NOTBANDLOCKED */
+	-EINVAL, 		/* BCME_NOCLK */
+	-EINVAL, 		/* BCME_BADRATESET */
+	-EINVAL, 		/* BCME_BADBAND */
+	-E2BIG,			/* BCME_BUFTOOSHORT */
+	-E2BIG,			/* BCME_BUFTOOLONG */
+	-EBUSY, 		/* BCME_BUSY */
+	-EINVAL, 		/* BCME_NOTASSOCIATED */
+	-EINVAL, 		/* BCME_BADSSIDLEN */
+	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
+	-EINVAL, 		/* BCME_BADCHAN */
+	-EFAULT, 		/* BCME_BADADDR */
+	-ENOMEM, 		/* BCME_NORESOURCE */
+	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
+	-EMSGSIZE,		/* BCME_BADLENGTH */
+	-EINVAL,		/* BCME_NOTREADY */
+	-EPERM,			/* BCME_EPERM */
+	-ENOMEM, 		/* BCME_NOMEM */
+	-EINVAL, 		/* BCME_ASSOCIATED */
+	-ERANGE, 		/* BCME_RANGE */
+	-EINVAL, 		/* BCME_NOTFOUND */
+	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
+	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
+	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
+	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
+	-EIO,			/* BCME_SDIO_ERROR */
+	-ENODEV,		/* BCME_DONGLE_DOWN */
+	-EINVAL,		/* BCME_VERSION */
+	-EIO,			/* BCME_TXFAIL */
+	-EIO,			/* BCME_RXFAIL */
+	-ENODEV,		/* BCME_NODEVICE */
+	-EINVAL,		/* BCME_NMODE_DISABLED */
+	-ENODATA,		/* BCME_NONRESIDENT */
+	-EINVAL,		/* BCME_SCANREJECT */
+	-EINVAL,		/* BCME_USAGE_ERROR */
+	-EIO,     		/* BCME_IOCTL_ERROR */
+	-EIO,			/* BCME_SERIAL_PORT_ERR */
+	-EOPNOTSUPP,	/* BCME_DISABLED, BCME_NOTENABLED */
+	-EIO,			/* BCME_DECERR */
+	-EIO,			/* BCME_ENCERR */
+	-EIO,			/* BCME_MICERR */
+	-ERANGE,		/* BCME_REPLAY */
+	-EINVAL,		/* BCME_IE_NOTFOUND */
+	-EINVAL,		/* BCME_DATA_NOTFOUND */
+	-EINVAL,        /* BCME_NOT_GC */
+	-EINVAL,        /* BCME_PRS_REQ_FAILED */
+	-EINVAL,        /* BCME_NO_P2P_SE */
+	-EINVAL,        /* BCME_NOA_PND */
+	-EINVAL,        /* BCME_FRAG_Q_FAILED */
+	-EINVAL,        /* BCME_GET_AF_FAILED */
+	-EINVAL,		/* BCME_MSCH_NOTREADY */
+
+/* When an new error code is added to bcmutils.h, add os
+ * specific error translation here as well
+ */
+/* check if BCME_LAST changed since the last time this function was updated */
+#if BCME_LAST != -60
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+	for new error code defined in bcmutils.h"
+#endif
+};
+uint lmtest = FALSE;
+
+/* translate bcmerrors into linux errors */
+int
+osl_error(int bcmerror)
+{
+	if (bcmerror > 0)
+		bcmerror = 0;
+	else if (bcmerror < BCME_LAST)
+		bcmerror = BCME_ERROR;
+
+	/* Array bounds covered by ASSERT in osl_attach */
+	return linuxbcmerrormap[-bcmerror];
+}
+
+osl_t *
+#ifdef SHARED_OSL_CMN
+osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
+#else
+osl_attach(void *pdev, uint bustype, bool pkttag)
+#endif /* SHARED_OSL_CMN */
+{
+#ifndef SHARED_OSL_CMN
+	void **osl_cmn = NULL;
+#endif /* SHARED_OSL_CMN */
+	osl_t *osh;
+	gfp_t flags;
+#ifdef BCM_SECURE_DMA
+	u32 secdma_memsize;
+#endif
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	if (!(osh = kmalloc(sizeof(osl_t), flags)))
+		return osh;
+
+	ASSERT(osh);
+
+	bzero(osh, sizeof(osl_t));
+
+	if (osl_cmn == NULL || *osl_cmn == NULL) {
+		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
+			kfree(osh);
+			return NULL;
+		}
+		bzero(osh->cmn, sizeof(osl_cmn_t));
+		if (osl_cmn)
+			*osl_cmn = osh->cmn;
+		atomic_set(&osh->cmn->malloced, 0);
+		osh->cmn->dbgmem_list = NULL;
+		spin_lock_init(&(osh->cmn->dbgmem_lock));
+
+		spin_lock_init(&(osh->cmn->pktalloc_lock));
+
+	} else {
+		osh->cmn = *osl_cmn;
+	}
+	atomic_add(1, &osh->cmn->refcount);
+
+	bcm_object_trace_init();
+
+	/* Check that error map has the right number of entries in it */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+	osh->failed = 0;
+	osh->pdev = pdev;
+	osh->pub.pkttag = pkttag;
+	osh->bustype = bustype;
+	osh->magic = OS_HANDLE_MAGIC;
+#ifdef BCM_SECURE_DMA
+
+	if ((secdma_addr != 0) && (secdma_size != 0)) {
+		printk("linux_osl.c: Buffer info passed via module params, using it.\n");
+		if (secdma_found == 0) {
+			osh->contig_base_alloc = (phys_addr_t)secdma_addr;
+			secdma_memsize = secdma_size;
+		} else if (secdma_found == 1) {
+			osh->contig_base_alloc = (phys_addr_t)secdma_addr2;
+			secdma_memsize = secdma_size2;
+		} else {
+			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
+			kfree(osh);
+			return NULL;
+		}
+		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
+		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
+		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
+			(unsigned int)osh->contig_base_alloc);
+		osh->stb_ext_params = SECDMA_MODULE_PARAMS;
+	}
+	else if (stbpriv_init(osh) == 0) {
+		printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n");
+		if (secdma_found == 0) {
+			osh->contig_base_alloc =
+				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0);
+			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0);
+		} else if (secdma_found == 1) {
+			osh->contig_base_alloc =
+				(phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0);
+			secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0);
+		} else {
+			printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found);
+			kfree(osh);
+			return NULL;
+		}
+		osh->contig_base = (phys_addr_t)osh->contig_base_alloc;
+		printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize);
+		printf("linux_osl.c: secdma_cma_addr = 0x%x \n",
+			(unsigned int)osh->contig_base_alloc);
+		osh->stb_ext_params = SECDMA_EXT_FILE;
+	}
+	else {
+		printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n");
+		kfree(osh);
+		return NULL;
+	}
+	secdma_found++;
+	osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
+		phys_to_page((u32)osh->contig_base_alloc),
+		CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
+
+	if (osh->contig_base_alloc_coherent_va == NULL) {
+		if (osh->cmn)
+			kfree(osh->cmn);
+	    kfree(osh);
+	    return NULL;
+	}
+	osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va;
+	osh->contig_base_alloc_coherent = osh->contig_base_alloc;
+	osl_sec_dma_init_consistent(osh);
+
+	osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK;
+
+	osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh,
+		phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE);
+	if (osh->contig_base_alloc_va == NULL) {
+		osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
+		if (osh->cmn)
+			kfree(osh->cmn);
+		kfree(osh);
+		return NULL;
+	}
+	osh->contig_base_va = osh->contig_base_alloc_va;
+
+	if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
+		CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
+	    osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
+	    osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
+		if (osh->cmn)
+			kfree(osh->cmn);
+		kfree(osh);
+		return NULL;
+	}
+	osh->sec_list_base_4096 = osh->sec_list_4096;
+
+#endif /* BCM_SECURE_DMA */
+
+	switch (bustype) {
+		case PCI_BUS:
+		case SI_BUS:
+		case PCMCIA_BUS:
+			osh->pub.mmbus = TRUE;
+			break;
+		case JTAG_BUS:
+		case SDIO_BUS:
+		case USB_BUS:
+		case SPI_BUS:
+		case RPC_BUS:
+			osh->pub.mmbus = FALSE;
+			break;
+		default:
+			ASSERT(FALSE);
+			break;
+	}
+
+#ifdef BCMDBG_CTRACE
+	spin_lock_init(&osh->ctrace_lock);
+	INIT_LIST_HEAD(&osh->ctrace_list);
+	osh->ctrace_num = 0;
+#endif /* BCMDBG_CTRACE */
+
+
+	return osh;
+}
+
+int osl_static_mem_init(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+		if (!bcm_static_buf && adapter) {
+			if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
+				3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+				printk("can not alloc static buf!\n");
+				bcm_static_skb = NULL;
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				return -ENOMEM;
+			} else {
+				printk("alloc static buf at %p!\n", bcm_static_buf);
+			}
+
+			spin_lock_init(&bcm_static_buf->static_lock);
+
+			bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+		}
+
+#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
+		if (!bcm_static_skb && adapter) {
+			int i;
+			void *skb_buff_ptr = 0;
+			bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+			skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
+			if (!skb_buff_ptr) {
+				printk("cannot alloc static buf!\n");
+				bcm_static_buf = NULL;
+				bcm_static_skb = NULL;
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				return -ENOMEM;
+			}
+
+			bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
+				(STATIC_PKT_MAX_NUM));
+			for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+				bcm_static_skb->pkt_use[i] = 0;
+			}
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+			spin_lock_init(&bcm_static_skb->osl_pkt_lock);
+			bcm_static_skb->last_allocated_index = 0;
+#else
+			sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+		}
+#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	return 0;
+}
+
+void osl_set_bus_handle(osl_t *osh, void *bus_handle)
+{
+	osh->bus_handle = bus_handle;
+}
+
+void* osl_get_bus_handle(osl_t *osh)
+{
+	return osh->bus_handle;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+	if (osh == NULL)
+		return;
+
+#ifdef BCM_SECURE_DMA
+	if (osh->stb_ext_params == SECDMA_EXT_FILE)
+		stbpriv_exit(osh);
+	osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
+	osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
+	osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
+	secdma_found--;
+#endif /* BCM_SECURE_DMA */
+
+
+	bcm_object_trace_deinit();
+
+	ASSERT(osh->magic == OS_HANDLE_MAGIC);
+	atomic_sub(1, &osh->cmn->refcount);
+	if (atomic_read(&osh->cmn->refcount) == 0) {
+			kfree(osh->cmn);
+	}
+	kfree(osh);
+}
+
+int osl_static_mem_deinit(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+#ifdef BCMSDIO
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	return 0;
+}
+
+/* APIs to set/get specific quirks in OSL layer */
+void BCMFASTPATH
+osl_flag_set(osl_t *osh, uint32 mask)
+{
+	osh->flags |= mask;
+}
+
+void
+osl_flag_clr(osl_t *osh, uint32 mask)
+{
+	osh->flags &= ~mask;
+}
+
+#if defined(STB)
+inline bool BCMFASTPATH
+#else
+bool
+#endif 
+osl_is_flag_set(osl_t *osh, uint32 mask)
+{
+	return (osh->flags & mask);
+}
+
+
+#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
+
+inline int BCMFASTPATH
+osl_arch_is_coherent(void)
+{
+	return 0;
+}
+
+inline int BCMFASTPATH
+osl_acp_war_enab(void)
+{
+	return 0;
+}
+
+inline void BCMFASTPATH
+osl_cache_flush(void *va, uint size)
+{
+
+	if (size > 0)
+	dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TO_DEVICE);
+}
+
+inline void BCMFASTPATH
+osl_cache_inv(void *va, uint size)
+{
+
+	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
+}
+
+inline void BCMFASTPATH
+osl_prefetch(const void *ptr)
+{
+	__asm__ __volatile__("pld\t%0" :: "o"(*(char *)ptr) : "cc");
+}
+
+#endif 
+
+/*
+ * To avoid ACP latency, a fwder buf will be sent directly to DDR using
+ * DDR aliasing into non-ACP address space. Such Fwder buffers must be
+ * explicitly managed from a coherency perspective.
+ */
+static inline void BCMFASTPATH
+osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb)
+{
+}
+
+static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
+{
+	struct sk_buff *skb;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+	gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+#ifdef DHD_USE_ATOMIC_PKTGET
+	flags = GFP_ATOMIC;
+#endif /* DHD_USE_ATOMIC_PKTGET */
+	skb = __dev_alloc_skb(len, flags);
+#else
+	skb = dev_alloc_skb(len);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
+	return skb;
+}
+
+#ifdef CTFPOOL
+
+#ifdef CTFPOOL_SPINLOCK
+#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_irqsave(&(ctfpool)->lock, flags)
+#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_irqrestore(&(ctfpool)->lock, flags)
+#else
+#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_bh(&(ctfpool)->lock)
+#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_bh(&(ctfpool)->lock)
+#endif /* CTFPOOL_SPINLOCK */
+/*
+ * Allocate and add an object to packet pool.
+ */
+void *
+osl_ctfpool_add(osl_t *osh)
+{
+	struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return NULL;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
+
+	/* No need to allocate more objects */
+	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	/* Allocate a new skb and add it to the ctfpool */
+	skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
+	if (skb == NULL) {
+		printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
+		       osh->ctfpool->obj_size);
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	/* Add to ctfpool */
+	skb->next = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = skb;
+	osh->ctfpool->fast_frees++;
+	osh->ctfpool->curr_obj++;
+
+	/* Hijack a skb member to store ptr to ctfpool */
+	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
+
+	/* Use bit flag to indicate skb from fast ctfpool */
+	PKTFAST(osh, skb) = FASTBUF;
+
+	/* If ctfpool's osh is a fwder osh, reset the fwder buf */
+	osl_fwderbuf_reset(osh->ctfpool->osh, skb);
+
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	return skb;
+}
+
+/*
+ * Add new objects to the pool.
+ */
+void
+osl_ctfpool_replenish(osl_t *osh, uint thresh)
+{
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	/* Do nothing if no refills are required */
+	while ((osh->ctfpool->refills > 0) && (thresh--)) {
+		osl_ctfpool_add(osh);
+		osh->ctfpool->refills--;
+	}
+}
+
+/*
+ * Initialize the packet pool with specified number of objects.
+ */
+int32
+osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
+{
+	gfp_t flags;
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
+	ASSERT(osh->ctfpool);
+
+	osh->ctfpool->osh = osh;
+
+	osh->ctfpool->max_obj = numobj;
+	osh->ctfpool->obj_size = size;
+
+	spin_lock_init(&osh->ctfpool->lock);
+
+	while (numobj--) {
+		if (!osl_ctfpool_add(osh))
+			return -1;
+		osh->ctfpool->fast_frees--;
+	}
+
+	return 0;
+}
+
+/*
+ * Cleanup the packet pool objects.
+ */
+void
+osl_ctfpool_cleanup(osl_t *osh)
+{
+	struct sk_buff *skb, *nskb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+
+	skb = osh->ctfpool->head;
+
+	while (skb != NULL) {
+		nskb = skb->next;
+		dev_kfree_skb(skb);
+		skb = nskb;
+		osh->ctfpool->curr_obj--;
+	}
+
+	ASSERT(osh->ctfpool->curr_obj == 0);
+	osh->ctfpool->head = NULL;
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	kfree(osh->ctfpool);
+	osh->ctfpool = NULL;
+}
+
+void
+osl_ctfpool_stats(osl_t *osh, void *b)
+{
+	struct bcmstrbuf *bb;
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+#ifdef BCMSDIO
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	bb = b;
+
+	ASSERT((osh != NULL) && (bb != NULL));
+
+	bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
+	            osh->ctfpool->max_obj, osh->ctfpool->obj_size,
+	            osh->ctfpool->curr_obj, osh->ctfpool->refills);
+	bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
+	            osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
+	            osh->ctfpool->slow_allocs);
+}
+
+static inline struct sk_buff *
+osl_pktfastget(osl_t *osh, uint len)
+{
+	struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	/* Try to do fast allocate. Return null if ctfpool is not in use
+	 * or if there are no items in the ctfpool.
+	 */
+	if (osh->ctfpool == NULL)
+		return NULL;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+	if (osh->ctfpool->head == NULL) {
+		ASSERT(osh->ctfpool->curr_obj == 0);
+		osh->ctfpool->slow_allocs++;
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	if (len > osh->ctfpool->obj_size) {
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	ASSERT(len <= osh->ctfpool->obj_size);
+
+	/* Get an object from ctfpool */
+	skb = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = (void *)skb->next;
+
+	osh->ctfpool->fast_allocs++;
+	osh->ctfpool->curr_obj--;
+	ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	/* Init skb struct */
+	skb->next = skb->prev = NULL;
+#if defined(__ARM_ARCH_7A__)
+	skb->data = skb->head + NET_SKB_PAD;
+	skb->tail = skb->head + NET_SKB_PAD;
+#else
+	skb->data = skb->head + 16;
+	skb->tail = skb->head + 16;
+#endif /* __ARM_ARCH_7A__ */
+	skb->len = 0;
+	skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+	skb->list = NULL;
+#endif
+	atomic_set(&skb->users, 1);
+
+	PKTSETCLINK(skb, NULL);
+	PKTCCLRATTR(skb);
+	PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
+
+	return skb;
+}
+#endif /* CTFPOOL */
+
+#if defined(BCM_GMAC3)
+/* Account for a packet delivered to downstream forwarder.
+ * Decrement a GMAC forwarder interface's pktalloced count.
+ */
+void BCMFASTPATH
+osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt)
+{
+
+	atomic_sub(skb_cnt, &osh->cmn->pktalloced);
+}
+
+/* Account for a downstream forwarder delivered packet to a WL/DHD driver.
+ * Increment a GMAC forwarder interface's pktalloced count.
+ */
+void BCMFASTPATH
+#ifdef BCMDBG_CTRACE
+osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, int line, char *file)
+#else
+osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt)
+#endif /* BCMDBG_CTRACE */
+{
+#if defined(BCMDBG_CTRACE)
+	int i;
+	struct sk_buff *skb;
+#endif 
+
+#if defined(BCMDBG_CTRACE)
+	if (skb_cnt > 1) {
+		struct sk_buff **skb_array = (struct sk_buff **)skbs;
+		for (i = 0; i < skb_cnt; i++) {
+			skb = skb_array[i];
+#if defined(BCMDBG_CTRACE)
+			ASSERT(!PKTISCHAINED(skb));
+			ADD_CTRACE(osh, skb, file, line);
+#endif /* BCMDBG_CTRACE */
+		}
+	} else {
+		skb = (struct sk_buff *)skbs;
+#if defined(BCMDBG_CTRACE)
+		ASSERT(!PKTISCHAINED(skb));
+		ADD_CTRACE(osh, skb, file, line);
+#endif /* BCMDBG_CTRACE */
+	}
+#endif 
+
+	atomic_add(skb_cnt, &osh->cmn->pktalloced);
+}
+
+#endif /* BCM_GMAC3 */
+
+/* Convert a driver packet to native(OS) packet
+ * In the process, packettag is zeroed out before sending up
+ * IP code depends on skb->cb to be setup correctly with various options
+ * In our case, that means it should be 0
+ */
+struct sk_buff * BCMFASTPATH
+osl_pkt_tonative(osl_t *osh, void *pkt)
+{
+	struct sk_buff *nskb;
+#ifdef BCMDBG_CTRACE
+	struct sk_buff *nskb1, *nskb2;
+#endif
+
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(pkt);
+
+	/* Decrement the packet counter */
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
+
+#ifdef BCMDBG_CTRACE
+		for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
+			if (PKTISCHAINED(nskb1)) {
+				nskb2 = PKTCLINK(nskb1);
+			} else {
+				nskb2 = NULL;
+			}
+
+			DEL_CTRACE(osh, nskb1);
+		}
+#endif /* BCMDBG_CTRACE */
+	}
+	return (struct sk_buff *)pkt;
+}
+
+/* Convert a native(OS) packet to driver packet.
+ * In the process, native packet is destroyed, there is no copying
+ * Also, a packettag is zeroed out
+ */
+void * BCMFASTPATH
+#ifdef BCMDBG_CTRACE
+osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
+#else
+osl_pkt_frmnative(osl_t *osh, void *pkt)
+#endif /* BCMDBG_CTRACE */
+{
+	struct sk_buff *cskb;
+	struct sk_buff *nskb;
+	unsigned long pktalloced = 0;
+
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(pkt);
+
+	/* walk the PKTCLINK() list */
+	for (cskb = (struct sk_buff *)pkt;
+	     cskb != NULL;
+	     cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
+
+		/* walk the pkt buffer list */
+		for (nskb = cskb; nskb; nskb = nskb->next) {
+
+			/* Increment the packet counter */
+			pktalloced++;
+
+			/* clean the 'prev' pointer
+			 * Kernel 3.18 is leaving skb->prev pointer set to skb
+			 * to indicate a non-fragmented skb
+			 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+			nskb->prev = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
+
+
+#ifdef BCMDBG_CTRACE
+			ADD_CTRACE(osh, nskb, file, line);
+#endif /* BCMDBG_CTRACE */
+		}
+	}
+
+	/* Increment the packet counter */
+	atomic_add(pktalloced, &osh->cmn->pktalloced);
+
+	return (void *)pkt;
+}
+
+/* Return a new packet. zero out pkttag */
+void * BCMFASTPATH
+#ifdef BCMDBG_CTRACE
+osl_pktget(osl_t *osh, uint len, int line, char *file)
+#else
+#ifdef BCM_OBJECT_TRACE
+osl_pktget(osl_t *osh, uint len, int line, const char *caller)
+#else
+osl_pktget(osl_t *osh, uint len)
+#endif /* BCM_OBJECT_TRACE */
+#endif /* BCMDBG_CTRACE */
+{
+	struct sk_buff *skb;
+	uchar num = 0;
+	if (lmtest != FALSE) {
+		get_random_bytes(&num, sizeof(uchar));
+		if ((num + 1) <= (256 * lmtest / 100))
+			return NULL;
+	}
+
+#ifdef CTFPOOL
+	/* Allocate from local pool */
+	skb = osl_pktfastget(osh, len);
+	if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL))
+#else /* CTFPOOL */
+	if ((skb = osl_alloc_skb(osh, len)))
+#endif /* CTFPOOL */
+	{
+		skb->tail += len;
+		skb->len  += len;
+		skb->priority = 0;
+
+#ifdef BCMDBG_CTRACE
+		ADD_CTRACE(osh, skb, file, line);
+#endif
+		atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCM_OBJECT_TRACE
+		bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+	}
+
+	return ((void*) skb);
+}
+
+#ifdef CTFPOOL
+static inline void
+osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
+{
+	ctfpool_t *ctfpool;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+	skb->tstamp.tv.sec = 0;
+#else
+	skb->stamp.tv_sec = 0;
+#endif
+
+	/* We only need to init the fields that we change */
+	skb->dev = NULL;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+	skb->dst = NULL;
+#endif
+	OSL_PKTTAG_CLEAR(skb);
+	skb->ip_summed = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	skb_orphan(skb);
+#else
+	skb->destructor = NULL;
+#endif
+
+	ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+	ASSERT(ctfpool != NULL);
+
+	/* if osh is a fwder osh, reset the fwder buf */
+	osl_fwderbuf_reset(ctfpool->osh, skb);
+
+	/* Add object to the ctfpool */
+	CTFPOOL_LOCK(ctfpool, flags);
+	skb->next = (struct sk_buff *)ctfpool->head;
+	ctfpool->head = (void *)skb;
+
+	ctfpool->fast_frees++;
+	ctfpool->curr_obj++;
+
+	ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
+	CTFPOOL_UNLOCK(ctfpool, flags);
+}
+#endif /* CTFPOOL */
+
+/* Free the driver packet. Free the tag if present */
+void BCMFASTPATH
+#ifdef BCM_OBJECT_TRACE
+osl_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
+#else
+osl_pktfree(osl_t *osh, void *p, bool send)
+#endif /* BCM_OBJECT_TRACE */
+{
+	struct sk_buff *skb, *nskb;
+	if (osh == NULL)
+		return;
+
+	skb = (struct sk_buff*) p;
+
+	if (send && osh->pub.tx_fn)
+		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+	PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
+	if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
+		printk("%s: pkt %p is from static pool\n",
+			__FUNCTION__, p);
+		dump_stack();
+		return;
+	}
+
+	if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
+		printk("%s: pkt %p is from static pool and not in used\n",
+			__FUNCTION__, p);
+		dump_stack();
+		return;
+	}
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
+
+	/* perversion: we use skb->next to chain multi-skb packets */
+	while (skb) {
+		nskb = skb->next;
+		skb->next = NULL;
+
+#ifdef BCMDBG_CTRACE
+		DEL_CTRACE(osh, skb);
+#endif
+
+
+#ifdef BCM_OBJECT_TRACE
+		bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+
+#ifdef CTFPOOL
+		if (PKTISFAST(osh, skb)) {
+			if (atomic_read(&skb->users) == 1)
+				smp_rmb();
+			else if (!atomic_dec_and_test(&skb->users))
+				goto next_skb;
+			osl_pktfastfree(osh, skb);
+		} else
+#endif
+		{
+			dev_kfree_skb_any(skb);
+		}
+#ifdef CTFPOOL
+next_skb:
+#endif
+		atomic_dec(&osh->cmn->pktalloced);
+		skb = nskb;
+	}
+}
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+	int i = 0;
+	struct sk_buff *skb;
+#ifdef DHD_USE_STATIC_CTRLBUF
+	unsigned long flags;
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+	if (!bcm_static_skb)
+		return osl_pktget(osh, len);
+
+	if (len > DHD_SKB_MAX_BUFSIZE) {
+		printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+		return osl_pktget(osh, len);
+	}
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+	spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
+
+	if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+		uint32 index;
+		for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
+			index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
+			bcm_static_skb->last_allocated_index++;
+			if (bcm_static_skb->skb_8k[index] &&
+				bcm_static_skb->pkt_use[index] == 0) {
+				break;
+			}
+		}
+
+		if ((i != STATIC_PKT_2PAGE_NUM) &&
+			(index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) {
+			bcm_static_skb->pkt_use[index] = 1;
+			skb = bcm_static_skb->skb_8k[index];
+			skb->data = skb->head;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+			skb_set_tail_pointer(skb, NET_SKB_PAD);
+#else
+			skb->tail = skb->data + NET_SKB_PAD;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+			skb->data += NET_SKB_PAD;
+			skb->cloned = 0;
+			skb->priority = 0;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+			skb_set_tail_pointer(skb, len);
+#else
+			skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+			skb->len = len;
+			skb->mac_len = PREALLOC_USED_MAGIC;
+			spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+			return skb;
+		}
+	}
+
+	spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+	printk("%s: all static pkt in use!\n", __FUNCTION__);
+	return NULL;
+#else
+	down(&bcm_static_skb->osl_pkt_sem);
+
+	if (len <= DHD_SKB_1PAGE_BUFSIZE) {
+		for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
+			if (bcm_static_skb->skb_4k[i] &&
+				bcm_static_skb->pkt_use[i] == 0) {
+				break;
+			}
+		}
+
+		if (i != STATIC_PKT_1PAGE_NUM) {
+			bcm_static_skb->pkt_use[i] = 1;
+
+			skb = bcm_static_skb->skb_4k[i];
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+			skb_set_tail_pointer(skb, len);
+#else
+			skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+			skb->len = len;
+
+			up(&bcm_static_skb->osl_pkt_sem);
+			return skb;
+		}
+	}
+
+	if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+		for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+			if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
+				bcm_static_skb->pkt_use[i] == 0) {
+				break;
+			}
+		}
+
+		if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
+			bcm_static_skb->pkt_use[i] = 1;
+			skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+			skb_set_tail_pointer(skb, len);
+#else
+			skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+			skb->len = len;
+
+			up(&bcm_static_skb->osl_pkt_sem);
+			return skb;
+		}
+	}
+
+#if defined(ENHANCED_STATIC_BUF)
+	if (bcm_static_skb->skb_16k &&
+		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
+		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
+
+		skb = bcm_static_skb->skb_16k;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+		skb_set_tail_pointer(skb, len);
+#else
+		skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+		skb->len = len;
+
+		up(&bcm_static_skb->osl_pkt_sem);
+		return skb;
+	}
+#endif /* ENHANCED_STATIC_BUF */
+
+	up(&bcm_static_skb->osl_pkt_sem);
+	printk("%s: all static pkt in use!\n", __FUNCTION__);
+	return osl_pktget(osh, len);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+}
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+	int i;
+#ifdef DHD_USE_STATIC_CTRLBUF
+	struct sk_buff *skb = (struct sk_buff *)p;
+	unsigned long flags;
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+	if (!p) {
+		return;
+	}
+
+	if (!bcm_static_skb) {
+		osl_pktfree(osh, p, send);
+		return;
+	}
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+	spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
+
+	for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
+		if (p == bcm_static_skb->skb_8k[i]) {
+			if (bcm_static_skb->pkt_use[i] == 0) {
+				printk("%s: static pkt idx %d(%p) is double free\n",
+					__FUNCTION__, i, p);
+			} else {
+				bcm_static_skb->pkt_use[i] = 0;
+			}
+
+			if (skb->mac_len != PREALLOC_USED_MAGIC) {
+				printk("%s: static pkt idx %d(%p) is not in used\n",
+					__FUNCTION__, i, p);
+			}
+
+			skb->mac_len = PREALLOC_FREE_MAGIC;
+			spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+			return;
+		}
+	}
+
+	spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+	printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
+#else
+	down(&bcm_static_skb->osl_pkt_sem);
+	for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
+		if (p == bcm_static_skb->skb_4k[i]) {
+			bcm_static_skb->pkt_use[i] = 0;
+			up(&bcm_static_skb->osl_pkt_sem);
+			return;
+		}
+	}
+
+	for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+		if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
+			bcm_static_skb->pkt_use[i] = 0;
+			up(&bcm_static_skb->osl_pkt_sem);
+			return;
+		}
+	}
+#ifdef ENHANCED_STATIC_BUF
+	if (p == bcm_static_skb->skb_16k) {
+		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
+		up(&bcm_static_skb->osl_pkt_sem);
+		return;
+	}
+#endif
+	up(&bcm_static_skb->osl_pkt_sem);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+	osl_pktfree(osh, p, send);
+}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+	uint val = 0;
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	/* only 4byte access supported */
+	ASSERT(size == 4);
+
+	do {
+		pci_read_config_dword(osh->pdev, offset, &val);
+		if (val != 0xffffffff)
+			break;
+	} while (retry--);
+
+
+	return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	/* only 4byte access supported */
+	ASSERT(size == 4);
+
+	do {
+		pci_write_config_dword(osh->pdev, offset, val);
+		if (offset != PCI_BAR0_WIN)
+			break;
+		if (osl_pci_read_config(osh, offset, size) == val)
+			break;
+	} while (retry--);
+
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pci_bus(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+#else
+	return ((struct pci_dev *)osh->pdev)->bus->number;
+#endif
+}
+
+/* return slot # for the pci device pointed by osh->pdev */
+uint
+osl_pci_slot(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
+#else
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+#endif
+}
+
+/* return domain # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_domain(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_bus(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+/* return the pci device pointed by osh->pdev */
+struct pci_dev *
+osl_pci_device(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return osh->pdev;
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+void *
+osl_malloc(osl_t *osh, uint size)
+{
+	void *addr;
+	gfp_t flags;
+
+	/* only ASSERT if osh is defined */
+	if (osh)
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf)
+	{
+		unsigned long irq_flags;
+		int i = 0;
+		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
+		{
+			spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags);
+
+			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
+			{
+				if (bcm_static_buf->buf_use[i] == 0)
+					break;
+			}
+
+			if (i == STATIC_BUF_MAX_NUM)
+			{
+				spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
+				printk("all static buff in use!\n");
+				goto original;
+			}
+
+			bcm_static_buf->buf_use[i] = 1;
+			spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags);
+
+			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
+			if (osh)
+				atomic_add(size, &osh->cmn->malloced);
+
+			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
+		}
+	}
+original:
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	if ((addr = kmalloc(size, flags)) == NULL) {
+		if (osh)
+			osh->failed++;
+		return (NULL);
+	}
+	if (osh && osh->cmn)
+		atomic_add(size, &osh->cmn->malloced);
+
+	return (addr);
+}
+
+void *
+osl_mallocz(osl_t *osh, uint size)
+{
+	void *ptr;
+
+	ptr = osl_malloc(osh, size);
+
+	if (ptr != NULL) {
+		bzero(ptr, size);
+	}
+
+	return ptr;
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	unsigned long flags;
+
+	if (bcm_static_buf)
+	{
+		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
+			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
+		{
+			int buf_idx = 0;
+
+			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
+
+			spin_lock_irqsave(&bcm_static_buf->static_lock, flags);
+			bcm_static_buf->buf_use[buf_idx] = 0;
+			spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags);
+
+			if (osh && osh->cmn) {
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				atomic_sub(size, &osh->cmn->malloced);
+			}
+			return;
+		}
+	}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	if (osh && osh->cmn) {
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+		ASSERT(size <= osl_malloced(osh));
+
+		atomic_sub(size, &osh->cmn->malloced);
+	}
+	kfree(addr);
+}
+
+void *
+osl_vmalloc(osl_t *osh, uint size)
+{
+	void *addr;
+
+	/* only ASSERT if osh is defined */
+	if (osh)
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+	if ((addr = vmalloc(size)) == NULL) {
+		if (osh)
+			osh->failed++;
+		return (NULL);
+	}
+	if (osh && osh->cmn)
+		atomic_add(size, &osh->cmn->malloced);
+
+	return (addr);
+}
+
+void *
+osl_vmallocz(osl_t *osh, uint size)
+{
+	void *ptr;
+
+	ptr = osl_vmalloc(osh, size);
+
+	if (ptr != NULL) {
+		bzero(ptr, size);
+	}
+
+	return ptr;
+}
+
+void
+osl_vmfree(osl_t *osh, void *addr, uint size)
+{
+	if (osh && osh->cmn) {
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+		ASSERT(size <= osl_malloced(osh));
+
+		atomic_sub(size, &osh->cmn->malloced);
+	}
+	vfree(addr);
+}
+
+uint
+osl_check_memleak(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	if (atomic_read(&osh->cmn->refcount) == 1)
+		return (atomic_read(&osh->cmn->malloced));
+	else
+		return 0;
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (atomic_read(&osh->cmn->malloced));
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (osh->failed);
+}
+
+
+uint
+osl_dma_consistent_align(void)
+{
+	return (PAGE_SIZE);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
+{
+	void *va;
+	uint16 align = (1 << align_bits);
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+		size += align;
+	*alloced = size;
+
+#ifndef	BCM_SECURE_DMA
+#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
+	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
+	if (va)
+		*pap = (ulong)__virt_to_phys((ulong)va);
+#else
+	{
+		dma_addr_t pap_lin;
+		struct pci_dev *hwdev = osh->pdev;
+		gfp_t flags;
+#ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
+		flags = GFP_ATOMIC;
+#else
+		flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+#endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
+		va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
+#ifdef BCMDMA64OSL
+		PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
+		PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
+#else
+		*pap = (dmaaddr_t)pap_lin;
+#endif /* BCMDMA64OSL */
+	}
+#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
+#else
+	va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
+#endif /* BCM_SECURE_DMA */
+	return va;
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
+{
+#ifdef BCMDMA64OSL
+	dma_addr_t paddr;
+#endif /* BCMDMA64OSL */
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+#ifndef BCM_SECURE_DMA
+#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
+	kfree(va);
+#else
+#ifdef BCMDMA64OSL
+	PHYSADDRTOULONG(pa, paddr);
+	pci_free_consistent(osh->pdev, size, va, paddr);
+#else
+	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+#endif /* BCMDMA64OSL */
+#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
+#else
+	osl_sec_dma_free_consistent(osh, va, size, pa);
+#endif /* BCM_SECURE_DMA */
+}
+
+dmaaddr_t BCMFASTPATH
+osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
+{
+	int dir;
+	dmaaddr_t ret_addr;
+	dma_addr_t map_addr;
+	int ret;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+
+
+
+
+	map_addr = pci_map_single(osh->pdev, va, size, dir);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	ret = pci_dma_mapping_error(osh->pdev, map_addr);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
+	ret = pci_dma_mapping_error(map_addr);
+#else
+	ret = 0;
+#endif
+	if (ret) {
+		printk("%s: Failed to map memory\n", __FUNCTION__);
+		PHYSADDRLOSET(ret_addr, 0);
+		PHYSADDRHISET(ret_addr, 0);
+	} else {
+		PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff);
+		PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
+	}
+
+	return ret_addr;
+}
+
+void BCMFASTPATH
+osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
+{
+	int dir;
+#ifdef BCMDMA64OSL
+	dma_addr_t paddr;
+#endif /* BCMDMA64OSL */
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+#ifdef BCMDMA64OSL
+	PHYSADDRTOULONG(pa, paddr);
+	pci_unmap_single(osh->pdev, paddr, size, dir);
+#else
+	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+#endif /* BCMDMA64OSL */
+}
+
+/* OSL function for CPU relax */
+inline void BCMFASTPATH
+osl_cpu_relax(void)
+{
+	cpu_relax();
+}
+
+extern void osl_preempt_disable(osl_t *osh)
+{
+	preempt_disable();
+}
+
+extern void osl_preempt_enable(osl_t *osh)
+{
+	preempt_enable();
+}
+
+#if defined(BCMASSERT_LOG)
+void
+osl_assert(const char *exp, const char *file, int line)
+{
+	char tempbuf[256];
+	const char *basename;
+
+	basename = strrchr(file, '/');
+	/* skip the '/' */
+	if (basename)
+		basename++;
+
+	if (!basename)
+		basename = file;
+
+#ifdef BCMASSERT_LOG
+	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
+		exp, basename, line);
+#endif /* BCMASSERT_LOG */
+
+
+	switch (g_assert_type) {
+	case 0:
+		panic("%s", tempbuf);
+		break;
+	case 1:
+		/* fall through */
+	case 3:
+		printk("%s", tempbuf);
+		break;
+	case 2:
+		printk("%s", tempbuf);
+		BUG();
+		break;
+	default:
+		break;
+	}
+}
+#endif 
+
+void
+osl_delay(uint usec)
+{
+	uint d;
+
+	while (usec > 0) {
+		d = MIN(usec, 1000);
+		udelay(d);
+		usec -= d;
+	}
+}
+
+void
+osl_sleep(uint ms)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	if (ms < 20)
+		usleep_range(ms*1000, ms*1000 + 1000);
+	else
+#endif
+	msleep(ms);
+}
+
+uint64
+osl_sysuptime_us(void)
+{
+	struct timespec64 tv;
+	uint64 usec;
+
+	ktime_get_real_ts64(&tv);
+	/* tv_usec content is fraction of a second */
+	usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_nsec / NSEC_PER_USEC;
+	return usec;
+}
+
+
+/* Clone a packet.
+ * The pkttag contents are NOT cloned.
+ */
+void *
+#ifdef BCMDBG_CTRACE
+osl_pktdup(osl_t *osh, void *skb, int line, char *file)
+#else
+#ifdef BCM_OBJECT_TRACE
+osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
+#else
+osl_pktdup(osl_t *osh, void *skb)
+#endif /* BCM_OBJECT_TRACE */
+#endif /* BCMDBG_CTRACE */
+{
+	void * p;
+
+	ASSERT(!PKTISCHAINED(skb));
+
+	/* clear the CTFBUF flag if set and map the rest of the buffer
+	 * before cloning.
+	 */
+	PKTCTFMAP(osh, skb);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#else
+	if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#endif
+		return NULL;
+
+#ifdef CTFPOOL
+	if (PKTISFAST(osh, skb)) {
+		ctfpool_t *ctfpool;
+
+		/* if the buffer allocated from ctfpool is cloned then
+		 * we can't be sure when it will be freed. since there
+		 * is a chance that we will be losing a buffer
+		 * from our pool, we increment the refill count for the
+		 * object to be alloced later.
+		 */
+		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+		ASSERT(ctfpool != NULL);
+		PKTCLRFAST(osh, p);
+		PKTCLRFAST(osh, skb);
+		ctfpool->refills++;
+	}
+#endif /* CTFPOOL */
+
+	/* Clear PKTC  context */
+	PKTSETCLINK(p, NULL);
+	PKTCCLRFLAGS(p);
+	PKTCSETCNT(p, 1);
+	PKTCSETLEN(p, PKTLEN(osh, skb));
+
+	/* skb_clone copies skb->cb.. we don't want that */
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(p);
+
+	/* Increment the packet counter */
+	atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCM_OBJECT_TRACE
+	bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+
+#ifdef BCMDBG_CTRACE
+	ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
+#endif
+	return (p);
+}
+
+#ifdef BCMDBG_CTRACE
+int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
+{
+	unsigned long flags;
+	struct sk_buff *skb;
+	int ck = FALSE;
+
+	spin_lock_irqsave(&osh->ctrace_lock, flags);
+
+	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+		if (pkt == skb) {
+			ck = TRUE;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
+	return ck;
+}
+
+void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
+{
+	unsigned long flags;
+	struct sk_buff *skb;
+	int idx = 0;
+	int i, j;
+
+	spin_lock_irqsave(&osh->ctrace_lock, flags);
+
+	if (b != NULL)
+		bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
+	else
+		printk(" Total %d sbk not free\n", osh->ctrace_num);
+
+	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+		if (b != NULL)
+			bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
+		else
+			printk("[%d] skb %p:\n", ++idx, skb);
+
+		for (i = 0; i < skb->ctrace_count; i++) {
+			j = (skb->ctrace_start + i) % CTRACE_NUM;
+			if (b != NULL)
+				bcm_bprintf(b, "    [%s(%d)]\n", skb->func[j], skb->line[j]);
+			else
+				printk("    [%s(%d)]\n", skb->func[j], skb->line[j]);
+		}
+		if (b != NULL)
+			bcm_bprintf(b, "\n");
+		else
+			printk("\n");
+	}
+
+	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
+
+	return;
+}
+#endif /* BCMDBG_CTRACE */
+
+
+/*
+ * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
+ */
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ */
+
+uint
+osl_pktalloced(osl_t *osh)
+{
+	if (atomic_read(&osh->cmn->refcount) == 1)
+		return (atomic_read(&osh->cmn->pktalloced));
+	else
+		return 0;
+}
+
+uint32
+osl_rand(void)
+{
+	uint32 rand;
+
+	get_random_bytes(&rand, sizeof(rand));
+
+	return rand;
+}
+
+/* Linux Kernel: File Operations: start */
+void *
+osl_os_open_image(char *filename)
+{
+	struct file *fp;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+osl_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	if (!image)
+		return 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	rdlen = kernel_read(fp, buf, len, &fp->f_pos);
+#else
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+#endif
+
+	return rdlen;
+}
+
+void
+osl_os_close_image(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+int
+osl_os_image_size(void *image)
+{
+	int len = 0, curroffset;
+
+	if (image) {
+		/* store the current offset */
+		curroffset = generic_file_llseek(image, 0, 1);
+		/* goto end of file to get length */
+		len = generic_file_llseek(image, 0, 2);
+		/* restore back the offset */
+		generic_file_llseek(image, curroffset, 0);
+	}
+	return len;
+}
+
+/* Linux Kernel: File Operations: end */
+
+#if (defined(STB) && defined(__arm__))
+inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size)
+{
+	unsigned long flags = 0;
+	int pci_access = 0;
+#if defined(BCM_GMAC3)
+	const int acp_war_enab = 1;
+#else  /* !BCM_GMAC3 */
+	int acp_war_enab = ACP_WAR_ENAB();
+#endif /* !BCM_GMAC3 */
+
+	if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
+		pci_access = 1;
+
+	if (pci_access && acp_war_enab)
+		spin_lock_irqsave(&l2x0_reg_lock, flags);
+
+	switch (size) {
+	case sizeof(uint8):
+		*(uint8*)v = readb((volatile uint8*)(addr));
+		break;
+	case sizeof(uint16):
+		*(uint16*)v = readw((volatile uint16*)(addr));
+		break;
+	case sizeof(uint32):
+		*(uint32*)v = readl((volatile uint32*)(addr));
+		break;
+	case sizeof(uint64):
+		*(uint64*)v = *((volatile uint64*)(addr));
+		break;
+	}
+
+	if (pci_access && acp_war_enab)
+		spin_unlock_irqrestore(&l2x0_reg_lock, flags);
+}
+#endif 
+
+#ifdef BCM_SECURE_DMA
+static void *
+osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr)
+{
+
+	struct page **map;
+	int order, i;
+	void *addr = NULL;
+
+	size = PAGE_ALIGN(size);
+	order = get_order(size);
+
+	map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC);
+
+	if (map == NULL)
+		return NULL;
+
+	for (i = 0; i < (size >> PAGE_SHIFT); i++)
+		map[i] = page + i;
+
+	if (iscache) {
+		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL));
+		if (isdecr) {
+			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
+		}
+	} else {
+
+#if defined(__ARM_ARCH_7A__)
+		addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
+			pgprot_noncached(__pgprot(PAGE_KERNEL)));
+#endif
+		if (isdecr) {
+			osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
+		}
+	}
+
+	kfree(map);
+	return (void *)addr;
+}
+
+static void
+osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size)
+{
+	vunmap(contig_base_va);
+}
+
+static int
+osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
+{
+	int i;
+	int ret = BCME_OK;
+	sec_mem_elem_t *sec_mem_elem;
+
+	if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) {
+
+		*list = sec_mem_elem;
+		bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max));
+		for (i = 0; i < max-1; i++) {
+			sec_mem_elem->next = (sec_mem_elem + 1);
+			sec_mem_elem->size = mbsize;
+			sec_mem_elem->pa_cma = osh->contig_base_alloc;
+			sec_mem_elem->vac = osh->contig_base_alloc_va;
+
+			sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
+			osh->contig_base_alloc += mbsize;
+			osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
+
+			sec_mem_elem = sec_mem_elem + 1;
+		}
+		sec_mem_elem->next = NULL;
+		sec_mem_elem->size = mbsize;
+		sec_mem_elem->pa_cma = osh->contig_base_alloc;
+		sec_mem_elem->vac = osh->contig_base_alloc_va;
+
+		sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
+		osh->contig_base_alloc += mbsize;
+		osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va +  mbsize);
+
+	} else {
+		printf("%s sec mem elem kmalloc failed\n", __FUNCTION__);
+		ret = BCME_ERROR;
+	}
+	return ret;
+}
+
+
+static void
+osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
+{
+	if (sec_list_base)
+		kfree(sec_list_base);
+}
+
+static sec_mem_elem_t * BCMFASTPATH
+osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
+	struct sec_cma_info *ptr_cma_info, uint offset)
+{
+	sec_mem_elem_t *sec_mem_elem = NULL;
+
+		ASSERT(osh->sec_list_4096);
+		sec_mem_elem = osh->sec_list_4096;
+		osh->sec_list_4096 = sec_mem_elem->next;
+
+		sec_mem_elem->next = NULL;
+
+	if (ptr_cma_info->sec_alloc_list_tail) {
+		ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem;
+		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
+	}
+	else {
+		/* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */
+		ASSERT(ptr_cma_info->sec_alloc_list == NULL);
+		ptr_cma_info->sec_alloc_list = sec_mem_elem;
+		ptr_cma_info->sec_alloc_list_tail = sec_mem_elem;
+	}
+	return sec_mem_elem;
+}
+
+static void BCMFASTPATH
+osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem)
+{
+	sec_mem_elem->dma_handle = 0x0;
+	sec_mem_elem->va = NULL;
+		sec_mem_elem->next = osh->sec_list_4096;
+		osh->sec_list_4096 = sec_mem_elem;
+}
+
+static sec_mem_elem_t * BCMFASTPATH
+osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
+{
+	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
+	sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list;
+
+	if (sec_mem_elem->dma_handle == dma_handle) {
+
+		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
+
+		if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) {
+			ptr_cma_info->sec_alloc_list_tail = NULL;
+			ASSERT(ptr_cma_info->sec_alloc_list == NULL);
+		}
+
+		return sec_mem_elem;
+	}
+	sec_mem_elem = sec_mem_elem->next;
+
+	while (sec_mem_elem != NULL) {
+
+		if (sec_mem_elem->dma_handle == dma_handle) {
+
+			sec_prv_elem->next = sec_mem_elem->next;
+			if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail)
+				ptr_cma_info->sec_alloc_list_tail = sec_prv_elem;
+
+			return sec_mem_elem;
+		}
+		sec_prv_elem = sec_mem_elem;
+		sec_mem_elem = sec_mem_elem->next;
+	}
+	return NULL;
+}
+
+static sec_mem_elem_t *
+osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
+{
+	sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list;
+
+	if (sec_mem_elem) {
+
+		ptr_cma_info->sec_alloc_list = sec_mem_elem->next;
+
+		if (ptr_cma_info->sec_alloc_list == NULL)
+			ptr_cma_info->sec_alloc_list_tail = NULL;
+
+		return sec_mem_elem;
+
+	} else
+		return NULL;
+}
+
+static void * BCMFASTPATH
+osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info)
+{
+	return ptr_cma_info->sec_alloc_list_tail;
+}
+
+dma_addr_t BCMFASTPATH
+osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p,
+	hnddma_seg_map_t *dmah, void *ptr_cma_info)
+{
+	sec_mem_elem_t *sec_mem_elem;
+	struct page *pa_cma_page;
+	uint loffset;
+	void *vaorig = ((uint8 *)va + size);
+	dma_addr_t dma_handle = 0x0;
+	/* packet will be the one added with osl_sec_dma_map() just before this call */
+
+	sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info);
+
+	if (sec_mem_elem && sec_mem_elem->va == vaorig) {
+
+		pa_cma_page = phys_to_page(sec_mem_elem->pa_cma);
+		loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
+
+		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
+			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
+
+	} else {
+		printf("%s: error orig va not found va = 0x%p \n",
+			__FUNCTION__, vaorig);
+	}
+	return dma_handle;
+}
+
+dma_addr_t BCMFASTPATH
+osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+	hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset)
+{
+
+	sec_mem_elem_t *sec_mem_elem;
+	struct page *pa_cma_page;
+	void *pa_cma_kmap_va = NULL;
+	uint buflen = 0;
+	dma_addr_t dma_handle = 0x0;
+	uint loffset;
+
+	ASSERT((direction == DMA_RX) || (direction == DMA_TX));
+	sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
+
+	sec_mem_elem->va = va;
+	sec_mem_elem->direction = direction;
+	pa_cma_page = sec_mem_elem->pa_cma_page;
+
+	loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1));
+	/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
+	* pa_cma_kmap_va += loffset;
+	*/
+
+	pa_cma_kmap_va = sec_mem_elem->vac;
+	pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset);
+	buflen = size;
+
+	if (direction == DMA_TX) {
+		memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
+
+		if (dmah) {
+			dmah->nsegs = 1;
+			dmah->origsize = buflen;
+		}
+	}
+	else
+	{
+		if ((p != NULL) && (dmah != NULL)) {
+			dmah->nsegs = 1;
+			dmah->origsize = buflen;
+		}
+		*(uint32 *)(pa_cma_kmap_va) = 0x0;
+	}
+
+	if (direction == DMA_RX) {
+		flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
+	}
+		dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen,
+			(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
+	if (dmah) {
+		dmah->segs[0].addr = dma_handle;
+		dmah->segs[0].length = buflen;
+	}
+	sec_mem_elem->dma_handle = dma_handle;
+	/* kunmap_atomic(pa_cma_kmap_va-loffset); */
+	return dma_handle;
+}
+
+dma_addr_t BCMFASTPATH
+osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map)
+{
+
+	struct page *pa_cma_page;
+	phys_addr_t pa_cma;
+	dma_addr_t dma_handle = 0x0;
+	uint loffset;
+
+	pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa);
+	pa_cma_page = phys_to_page(pa_cma);
+	loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1));
+
+	dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size,
+		(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
+
+	return dma_handle;
+}
+
+void BCMFASTPATH
+osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction,
+void *p, hnddma_seg_map_t *map,	void *ptr_cma_info, uint offset)
+{
+	sec_mem_elem_t *sec_mem_elem;
+	void *pa_cma_kmap_va = NULL;
+	uint buflen = 0;
+	dma_addr_t pa_cma;
+	void *va;
+	int read_count = 0;
+	BCM_REFERENCE(buflen);
+	BCM_REFERENCE(read_count);
+
+	sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle);
+	ASSERT(sec_mem_elem);
+
+	va = sec_mem_elem->va;
+	va = (uint8 *)va - offset;
+	pa_cma = sec_mem_elem->pa_cma;
+
+
+	if (direction == DMA_RX) {
+
+		if (p == NULL) {
+
+			/* pa_cma_kmap_va = kmap_atomic(pa_cma_page);
+			* pa_cma_kmap_va += loffset;
+			*/
+
+			pa_cma_kmap_va = sec_mem_elem->vac;
+
+			do {
+				invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int));
+
+				buflen = *(uint *)(pa_cma_kmap_va);
+				if (buflen)
+					break;
+
+				OSL_DELAY(1);
+				read_count++;
+			} while (read_count < 200);
+			dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE);
+			memcpy(va, pa_cma_kmap_va, size);
+			/* kunmap_atomic(pa_cma_kmap_va); */
+		}
+	} else {
+		dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
+	}
+
+	osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
+}
+
+void
+osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info)
+{
+
+	sec_mem_elem_t *sec_mem_elem;
+
+	sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
+
+	while (sec_mem_elem != NULL) {
+
+		dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size,
+			sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		osl_sec_dma_free_mem_elem(osh, sec_mem_elem);
+
+		sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info);
+	}
+}
+
+static void
+osl_sec_dma_init_consistent(osl_t *osh)
+{
+	int i;
+	void *temp_va = osh->contig_base_alloc_coherent_va;
+	phys_addr_t temp_pa = osh->contig_base_alloc_coherent;
+
+	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
+		osh->sec_cma_coherent[i].avail = TRUE;
+		osh->sec_cma_coherent[i].va = temp_va;
+		osh->sec_cma_coherent[i].pa = temp_pa;
+		temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK;
+		temp_pa += SEC_CMA_COHERENT_BLK;
+	}
+}
+
+static void *
+osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap)
+{
+
+	void *temp_va = NULL;
+	ulong temp_pa = 0;
+	int i;
+
+	if (size > SEC_CMA_COHERENT_BLK) {
+		printf("%s unsupported size\n", __FUNCTION__);
+		return NULL;
+	}
+
+	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
+		if (osh->sec_cma_coherent[i].avail == TRUE) {
+			temp_va = osh->sec_cma_coherent[i].va;
+			temp_pa = osh->sec_cma_coherent[i].pa;
+			osh->sec_cma_coherent[i].avail = FALSE;
+			break;
+		}
+	}
+
+	if (i == SEC_CMA_COHERENT_MAX)
+		printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
+			temp_va, (ulong)temp_pa, size);
+
+	*pap = (unsigned long)temp_pa;
+	return temp_va;
+}
+
+static void
+osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
+{
+	int i = 0;
+
+	for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) {
+		if (osh->sec_cma_coherent[i].va == va) {
+			osh->sec_cma_coherent[i].avail = TRUE;
+			break;
+		}
+	}
+	if (i == SEC_CMA_COHERENT_MAX)
+		printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
+			va, (ulong)pa, size);
+}
+
+#endif /* BCM_SECURE_DMA */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
+#include <linux/kallsyms.h>
+#include <net/sock.h>
+void
+osl_pkt_orphan_partial(struct sk_buff *skb, int tsq)
+{
+	uint32 fraction;
+	static void *p_tcp_wfree = NULL;
+
+	if (tsq <= 0)
+		return;
+
+	if (!skb->destructor || skb->destructor == sock_wfree)
+		return;
+
+	if (unlikely(!p_tcp_wfree)) {
+		char sym[KSYM_SYMBOL_LEN];
+		sprint_symbol(sym, (unsigned long)skb->destructor);
+		sym[9] = 0;
+		if (!strcmp(sym, "tcp_wfree"))
+			p_tcp_wfree = skb->destructor;
+		else
+			return;
+	}
+
+	if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
+		return;
+
+	/* abstract a certain portion of skb truesize from the socket
+	 * sk_wmem_alloc to allow more skb can be allocated for this
+	 * socket for better cusion meeting WiFi device requirement
+	 */
+	fraction = skb->truesize * (tsq - 1) / tsq;
+	skb->truesize -= fraction;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+	atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs);
+#else
+	atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
+#endif /* LINUX_VERSION >= 4.13.0 */
+	skb_orphan(skb);
+}
+#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
+
+/* timer apis */
+/* Note: All timer api's are thread unsafe and should be protected with locks by caller */
+
+#ifdef REPORT_FATAL_TIMEOUTS
+osl_timer_t *
+osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
+{
+	osl_timer_t *t;
+	BCM_REFERENCE(fn);
+	if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
+		printk(KERN_ERR "osl_timer_init: malloced failed for osl_timer_t\n");
+		return (NULL);
+	}
+	bzero(t, sizeof(osl_timer_t));
+	if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
+		printk(KERN_ERR "osl_timer_init: malloc failed\n");
+		MFREE(NULL, t, sizeof(osl_timer_t));
+		return (NULL);
+	}
+	t->timer->data = (ulong)arg;
+	t->timer->function = (linux_timer_fn)fn;
+	t->set = TRUE;
+
+	init_timer(t->timer);
+
+	return (t);
+}
+
+void
+osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
+{
+
+	if (t == NULL) {
+		printf("%s: Timer handle is NULL\n", __FUNCTION__);
+		return;
+	}
+	ASSERT(!t->set);
+
+	t->set = TRUE;
+	if (periodic) {
+		printf("Periodic timers are not supported by Linux timer apis\n");
+	}
+	t->timer->expires = jiffies + ms*HZ/1000;
+
+	add_timer(t->timer);
+
+	return;
+}
+
+void
+osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
+{
+
+	if (t == NULL) {
+		printf("%s: Timer handle is NULL\n", __FUNCTION__);
+		return;
+	}
+	if (periodic) {
+		printf("Periodic timers are not supported by Linux timer apis\n");
+	}
+	t->set = TRUE;
+	t->timer->expires = jiffies + ms*HZ/1000;
+
+	mod_timer(t->timer, t->timer->expires);
+
+	return;
+}
+
+/*
+ * Return TRUE if timer successfully deleted, FALSE if still pending
+ */
+bool
+osl_timer_del(osl_t *osh, osl_timer_t *t)
+{
+	if (t == NULL) {
+		printf("%s: Timer handle is NULL\n", __FUNCTION__);
+		return (FALSE);
+	}
+	if (t->set) {
+		t->set = FALSE;
+		if (t->timer) {
+			del_timer(t->timer);
+			MFREE(NULL, t->timer, sizeof(struct timer_list));
+		}
+		MFREE(NULL, t, sizeof(osl_timer_t));
+	}
+	return (TRUE);
+}
+#endif
diff --git a/drivers/net/wireless/bcmdhd/pcie_core.c b/drivers/net/wireless/bcmdhd/pcie_core.c
new file mode 100644
index 0000000..63f955c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/pcie_core.c
@@ -0,0 +1,135 @@
+/** @file pcie_core.c
+ *
+ * Contains PCIe related functions that are shared between different driver models (e.g. firmware
+ * builds, DHD builds, BMAC builds), in order to avoid code duplication.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: pcie_core.c 658668 2016-09-09 00:42:11Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "pcie_core.h"
+
+/* local prototypes */
+
+/* local variables */
+
+/* function definitions */
+
+#ifdef BCMDRIVER
+
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs)
+{
+	uint32 val, i, lsc;
+	uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR,
+		PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L,
+		PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA,
+		PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL,
+		PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG,
+		PCIECFGREG_REG_BAR3_CONFIG};
+	sbpcieregs_t *pcie = NULL;
+	uint32 origidx = si_coreidx(sih);
+
+	/* Switch to PCIE2 core */
+	pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
+	BCM_REFERENCE(pcie);
+	ASSERT(pcie != NULL);
+
+	/* Disable/restore ASPM Control to protect the watchdog reset */
+	W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+	lsc = R_REG(osh, &sbpcieregs->configdata);
+	val = lsc & (~PCIE_ASPM_ENAB);
+	W_REG(osh, &sbpcieregs->configdata, val);
+
+	si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4);
+	OSL_DELAY(100000);
+
+	W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+	W_REG(osh, &sbpcieregs->configdata, lsc);
+
+	if (sih->buscorerev <= 13) {
+		/* Write configuration registers back to the shadow registers
+		 * cause shadow registers are cleared out after watchdog reset.
+		 */
+		for (i = 0; i < ARRAYSIZE(cfg_offset); i++) {
+			W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]);
+			val = R_REG(osh, &sbpcieregs->configdata);
+			W_REG(osh, &sbpcieregs->configdata, val);
+		}
+	}
+	si_setcoreidx(sih, origidx);
+}
+
+
+/* CRWLPCIEGEN2-117 pcie_pipe_Iddq should be controlled
+ * by the L12 state from MAC to save power by putting the
+ * SerDes analog in IDDQ mode
+ */
+void  pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs)
+{
+	sbpcieregs_t *pcie = NULL;
+	uint crwlpciegen2_117_disable = 0;
+	uint32 origidx = si_coreidx(sih);
+
+	crwlpciegen2_117_disable = PCIE_PipeIddqDisable0 | PCIE_PipeIddqDisable1;
+	/* Switch to PCIE2 core */
+	pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
+	BCM_REFERENCE(pcie);
+	ASSERT(pcie != NULL);
+
+	OR_REG(osh, &sbpcieregs->control,
+		crwlpciegen2_117_disable);
+
+	si_setcoreidx(sih, origidx);
+}
+
+#define PCIE_PMCR_REFUP_MASK 0x3f0001e0
+#define PCIE_PMCR_REFEXT_MASK 0x400000
+#define PCIE_PMCR_REFUP_100US 0x38000080
+#define PCIE_PMCR_REFEXT_100US 0x400000
+
+/* Set PCIE TRefUp time to 100us */
+void pcie_set_trefup_time_100us(si_t *sih)
+{
+	si_corereg(sih, sih->buscoreidx,
+		OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_PMCR_REFUP);
+	si_corereg(sih, sih->buscoreidx,
+		OFFSETOF(sbpcieregs_t, configdata), PCIE_PMCR_REFUP_MASK, PCIE_PMCR_REFUP_100US);
+
+	si_corereg(sih, sih->buscoreidx,
+		OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_PMCR_REFUP_EXT);
+	si_corereg(sih, sih->buscoreidx,
+		OFFSETOF(sbpcieregs_t, configdata), PCIE_PMCR_REFEXT_MASK, PCIE_PMCR_REFEXT_100US);
+}
+#endif /* BCMDRIVER */
diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c
new file mode 100644
index 0000000..975ee6a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/sbutils.c
@@ -0,0 +1,1101 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbutils.c 599296 2015-11-13 06:36:13Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
+                     uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+#define	SET_SBREG(sii, r, mask, val)	\
+		W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define	REGS2SB(va)	(sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define	R_SBREG(sii, sbr)	sb_read_sbreg((sii), (sbr))
+#define	W_SBREG(sii, sbr, v)	sb_write_sbreg((sii), (sbr), (v))
+#define	AND_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define	OR_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+	uint8 tmp;
+	uint32 val, intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	val = R_REG(sii->osh, sbr);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+	uint8 tmp;
+	volatile uint32 dummy;
+	uint32 intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+		dummy = R_REG(sii->osh, sbr);
+		BCM_REFERENCE(dummy);
+		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+		dummy = R_REG(sii->osh, sbr);
+		BCM_REFERENCE(dummy);
+		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+	} else
+		W_REG(sii->osh, sbr, v);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	volatile void *corereg;
+	sbconfig_t *sb;
+	uint origidx, intflag, intr_val = 0;
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+	corereg = si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(corereg != NULL);
+	sb = REGS2SB(corereg);
+	intflag = R_SBREG(sii, &sb->sbflagst);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+
+	return intflag;
+}
+
+uint
+sb_flag(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 vec;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	if (siflag == -1)
+		vec = 0;
+	else
+		vec = 1 << siflag;
+	W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+	uint i;
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	for (i = 0; i < sii->numcores; i ++)
+		if (sba == cores_info->coresba[i])
+			return i;
+	return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+	uint32 sbaddr;
+
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS: {
+		sbconfig_t *sb = REGS2SB(sii->curmap);
+		sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+		break;
+	}
+
+	case PCI_BUS:
+		sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = 0;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		sbaddr  = (uint32)tmp << 12;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		sbaddr |= (uint32)tmp << 16;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		sbaddr |= (uint32)tmp << 24;
+		break;
+	}
+
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		sbaddr = (uint32)(uintptr)sii->curmap;
+		break;
+#endif
+
+
+	default:
+		sbaddr = BADCOREADDR;
+		break;
+	}
+
+	return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint sbidh;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+	sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+	return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+	        (val << SBTML_SICF_SHIFT);
+	W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+		        (val << SBTML_SICF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatelow, w);
+	}
+
+	/* return the new value
+	 * for write operation, the following readback ensures the completion of write opration.
+	 */
+	return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+		        (val << SBTMH_SISF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatehigh, w);
+	}
+
+	/* return the new value */
+	return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbtmstatelow) &
+	         (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+	        (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	volatile uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (volatile uint32 *)((volatile char *)sii->curmap +
+			               PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (volatile uint32 *)((volatile char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (volatile uint32 *)((volatile char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) +
+		               regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		if (regoff >= SBCONFIGOFF) {
+			w = (R_SBREG(sii, r) & ~mask) | val;
+			W_SBREG(sii, r, w);
+		} else {
+			w = (R_REG(sii->osh, r) & ~mask) | val;
+			W_REG(sii->osh, r, w);
+		}
+	}
+
+	/* readback */
+	if (regoff >= SBCONFIGOFF)
+		w = R_SBREG(sii, r);
+	else {
+		w = R_REG(sii->osh, r);
+	}
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			sb_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+volatile uint32 *
+sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	volatile uint32 *r = NULL;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (volatile uint32 *)((volatile char *)sii->curmap +
+			               PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (volatile uint32 *)((volatile char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (volatile uint32 *)((volatile char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast)
+		return 0;
+
+	return (r);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES	2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
+	uint32 sbba, uint numcores)
+{
+	uint next;
+	uint ncc = 0;
+	uint i;
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (bus >= SB_MAXBUSES) {
+		SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+		return 0;
+	}
+	SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+	/* Scan all cores on the bus starting from core 0.
+	 * Core addresses must be contiguous on each bus.
+	 */
+	for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+		cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+		/* keep and reuse the initial register mapping */
+		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
+			SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+			cores_info->regs[next] = regs;
+		}
+
+		/* change core to 'next' and read its coreid */
+		sii->curmap = _sb_setcoreidx(sii, next);
+		sii->curidx = next;
+
+		cores_info->coreid[next] = sb_coreid(&sii->pub);
+
+		/* core specific processing... */
+		/* chipc provides # cores */
+		if (cores_info->coreid[next] == CC_CORE_ID) {
+			chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+			uint32 ccrev = sb_corerev(&sii->pub);
+
+			/* determine numcores - this is the total # cores in the chip */
+			if (((ccrev == 4) || (ccrev >= 6))) {
+				ASSERT(cc);
+				numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+				        CID_CC_SHIFT;
+			} else {
+				/* Older chips */
+				uint chip = CHIPID(sii->pub.chip);
+
+				if (chip == BCM4704_CHIP_ID)
+					numcores = 9;
+				else if (chip == BCM5365_CHIP_ID)
+					numcores = 7;
+				else {
+					SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+					          chip));
+					ASSERT(0);
+					numcores = 1;
+				}
+			}
+			SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+				sii->pub.issim ? "QT" : ""));
+		}
+		/* scan bridged SB(s) and add results to the end of the list */
+		else if (cores_info->coreid[next] == OCP_CORE_ID) {
+			sbconfig_t *sb = REGS2SB(sii->curmap);
+			uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+			uint nsbcc;
+
+			sii->numcores = next + 1;
+
+			if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+				continue;
+			nsbba &= 0xfffff000;
+			if (_sb_coreidx(sii, nsbba) != BADIDX)
+				continue;
+
+			nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+			nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+			if (sbba == SI_ENUM_BASE)
+				numcores -= nsbcc;
+			ncc += nsbcc;
+		}
+	}
+
+	SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+	sii->numcores = i + ncc;
+	return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, volatile void *regs, uint devid)
+{
+	uint32 origsba;
+	sbconfig_t *sb;
+	si_info_t *sii = SI_INFO(sih);
+	BCM_REFERENCE(devid);
+
+	sb = REGS2SB(sii->curmap);
+
+	sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
+
+	/* Save the current core info and validate it later till we know
+	 * for sure what is good and what is bad.
+	 */
+	origsba = _sb_coresba(sii);
+
+	/* scan all SB(s) starting from SI_ENUM_BASE */
+	sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+volatile void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (coreidx >= sii->numcores)
+		return (NULL);
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	sii->curmap = _sb_setcoreidx(sii, coreidx);
+	sii->curidx = coreidx;
+
+	return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static volatile void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 sbaddr = cores_info->coresba[coreidx];
+	volatile void *regs;
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		regs = cores_info->regs[coreidx];
+		break;
+
+	case PCI_BUS:
+		/* point bar0 window */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+		regs = sii->curmap;
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = (sbaddr >> 12) & 0x0f;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		tmp = (sbaddr >> 16) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		tmp = (sbaddr >> 24) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		regs = sii->curmap;
+		break;
+	}
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		regs = cores_info->regs[coreidx];
+		break;
+#endif	/* BCMSDIO */
+
+
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+	sbconfig_t *sb;
+	volatile uint32 *addrm;
+
+	sb = REGS2SB(sii->curmap);
+
+	switch (asidx) {
+	case 0:
+		addrm =  &sb->sbadmatch0;
+		break;
+
+	case 1:
+		addrm =  &sb->sbadmatch1;
+		break;
+
+	case 2:
+		addrm =  &sb->sbadmatch2;
+		break;
+
+	case 3:
+		addrm =  &sb->sbadmatch3;
+		break;
+
+	default:
+		SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+		return 0;
+	}
+
+	return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	/* + 1 because of enumeration space */
+	return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+
+	origidx = sii->curidx;
+	ASSERT(GOODIDX(origidx));
+
+	INTR_OFF(sii, intr_val);
+
+	/* switch over to chipcommon core if there is one, else use pci */
+	if (sii->pub.ccrev != NOREV) {
+		chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+		ASSERT(ccregs != NULL);
+
+		/* do the buffer registers update */
+		W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+		W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+	} else
+		ASSERT(0);
+
+	/* restore core index */
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii;
+	volatile uint32 dummy;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/* if core is already in reset, just return */
+	if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+		return;
+
+	/* if clocks are not enabled, put into reset and return */
+	if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+		goto disable;
+
+	/* set target reject and spin until busy is clear (preserve core-specific bits) */
+	OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+	SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+		SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+		OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+		dummy = R_SBREG(sii, &sb->sbimstate);
+		BCM_REFERENCE(dummy);
+		OSL_DELAY(1);
+		SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+	}
+
+	/* set reset and reject while enabling the clocks */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_REJ | SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(10);
+
+	/* don't forget to clear the initiator reject bit */
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+		AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+	/* leave reset and reject asserted */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+	OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	volatile uint32 dummy;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/*
+	 * Must do the disable sequence first to work for arbitrary current core state.
+	 */
+	sb_core_disable(sih, (bits | resetbits));
+
+	/*
+	 * Now do the initialization sequence.
+	 */
+
+	/* set reset while enabling the clock and forcing them on throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+		W_SBREG(sii, &sb->sbtmstatehigh, 0);
+	}
+	if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+		AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+	}
+
+	/* clear reset and allow it to propagate throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	/* leave clock enabled */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ *	SI_BUS => mips
+ *	JTAG_BUS => chipc
+ *	PCI_BUS => pci or pcie
+ *	PCMCIA_BUS => pcmcia
+ *	SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+	uint32 tmp, ret = 0xffffffff;
+	sbconfig_t *sb;
+
+
+	if ((to & ~TO_MASK) != 0)
+		return ret;
+
+	/* Figure out the master core */
+	if (idx == BADIDX) {
+		switch (BUSTYPE(sii->pub.bustype)) {
+		case PCI_BUS:
+			idx = sii->pub.buscoreidx;
+			break;
+		case JTAG_BUS:
+			idx = SI_CC_IDX;
+			break;
+		case PCMCIA_BUS:
+#ifdef BCMSDIO
+		case SDIO_BUS:
+#endif
+			idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+			break;
+		case SI_BUS:
+			idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+			break;
+		default:
+			ASSERT(0);
+		}
+		if (idx == BADIDX)
+			return ret;
+	}
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+	tmp = R_SBREG(sii, &sb->sbimconfiglow);
+	ret = tmp & TO_MASK;
+	W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+	sb_commit(sih);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+	return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+	uint32 base;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	base = 0;
+
+	if (type == 0) {
+		base = admatch & SBAM_BASE0_MASK;
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE1_MASK;
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE2_MASK;
+	}
+
+	return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+	uint32 size;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	size = 0;
+
+	if (type == 0) {
+		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+	}
+
+	return (size);
+}
+
+#if defined(BCMDBG_PHYDUMP)
+/* print interesting sbconfig registers */
+void
+sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+	sbconfig_t *sb;
+	uint origidx, i, intr_val = 0;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	origidx = sii->curidx;
+
+	INTR_OFF(sii, intr_val);
+
+	for (i = 0; i < sii->numcores; i++) {
+		sb = REGS2SB(sb_setcoreidx(sih, i));
+
+		bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
+
+		if (sii->pub.socirev > SONICS_2_2)
+			bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
+			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
+			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
+
+		bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
+		            "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
+		            R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
+		            R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
+		            R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
+	}
+
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+}
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c
new file mode 100644
index 0000000..edc3df0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils.c
@@ -0,0 +1,3724 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: siutils.c 668442 2016-11-03 08:42:43Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <sbgci.h>
+#ifdef BCMPCIEDEV
+#include <pciedev.h>
+#endif /* BCMPCIEDEV */
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsysmem.h>
+#include <sbsocram.h>
+#ifdef BCMSDIO
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#endif /* BCMSDIO */
+#include <hndpmu.h>
+#include <dhd_config.h>
+
+#ifdef BCM_SDRBL
+#include <hndcpu.h>
+#endif /* BCM_SDRBL */
+#ifdef HNDGCI
+#include <hndgci.h>
+#endif /* HNDGCI */
+#ifdef BCMULP
+#include <ulp.h>
+#endif /* BCMULP */
+
+
+#include "siutils_priv.h"
+#ifdef SECI_UART
+/* Defines the set of GPIOs to be used for SECI UART if not specified in NVRAM */
+#define DEFAULT_SECI_UART_PINMUX_43430	0x0102
+static bool force_seci_clk = 0;
+#endif /* SECI_UART */
+
+/**
+ * A set of PMU registers is clocked in the ILP domain, which has an implication on register write
+ * behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb
+ * the write. During that time the 'SlowWritePending' bit in the PMUStatus register is set.
+ */
+#define PMUREGS_ILP_SENSITIVE(regoff) \
+	((regoff) == OFFSETOF(pmuregs_t, pmutimer) || \
+	 (regoff) == OFFSETOF(pmuregs_t, pmuwatchdog) || \
+	 (regoff) == OFFSETOF(pmuregs_t, res_req_timer))
+
+#define CHIPCREGS_ILP_SENSITIVE(regoff) \
+	((regoff) == OFFSETOF(chipcregs_t, pmutimer) || \
+	 (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \
+	 (regoff) == OFFSETOF(chipcregs_t, res_req_timer))
+
+#define GCI_FEM_CTRL_WAR 0x11111111
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs,
+                              uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, volatile void *regs);
+
+
+static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff);
+
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+static bool si_onetimeinit = FALSE;
+
+#ifdef SR_DEBUG
+static const uint32 si_power_island_test_array[] = {
+	0x0000, 0x0001, 0x0010, 0x0011,
+	0x0100, 0x0101, 0x0110, 0x0111,
+	0x1000, 0x1001, 0x1010, 0x1011,
+	0x1100, 0x1101, 0x1110, 0x1111
+};
+#endif /* SR_DEBUG */
+
+int do_4360_pcie2_war = 0;
+
+#ifdef BCMULP
+/* Variable to store boot_type: warm_boot/cold_boot/etc. */
+static int boot_type = 0;
+#endif
+
+/* global kernel resource */
+static si_info_t ksii;
+static si_cores_info_t ksii_cores_info;
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ *        function set 'vars' to NULL, making dereferencing of this parameter undesired.
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, volatile void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	si_info_t *sii;
+	si_cores_info_t *cores_info;
+	/* alloc si_info_t */
+	if ((sii = MALLOCZ(osh, sizeof (si_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		return (NULL);
+	}
+
+	/* alloc si_cores_info_t */
+	if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, sizeof (si_cores_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		MFREE(osh, sii, sizeof(si_info_t));
+		return (NULL);
+	}
+	sii->cores_info = cores_info;
+
+	if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+		MFREE(osh, sii, sizeof(si_info_t));
+		MFREE(osh, cores_info, sizeof(si_cores_info_t));
+		return (NULL);
+	}
+	sii->vars = vars ? *vars : NULL;
+	sii->varsz = varsz ? *varsz : 0;
+
+	return (si_t *)sii;
+}
+
+
+static uint32	wd_msticks;		/**< watchdog timer ticks normalized to ms */
+
+/** generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+	static bool ksii_attached = FALSE;
+	si_cores_info_t *cores_info;
+
+	if (!ksii_attached) {
+		void *regs = NULL;
+		regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+		cores_info = (si_cores_info_t *)&ksii_cores_info;
+		ksii.cores_info = cores_info;
+
+		ASSERT(osh);
+		if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+		                SI_BUS, NULL,
+		                osh != SI_OSH ? &(ksii.vars) : NULL,
+		                osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) {
+			SI_ERROR(("si_kattach: si_doattach failed\n"));
+			REG_UNMAP(regs);
+			return NULL;
+		}
+		REG_UNMAP(regs);
+
+		/* save ticks normalized to ms for si_watchdog_ms() */
+		if (PMUCTL_ENAB(&ksii.pub)) {
+			{
+				/* based on 32KHz ILP clock */
+				wd_msticks = 32;
+			}
+		} else {
+			wd_msticks = ALP_CLOCK / 1000;
+		}
+
+		ksii_attached = TRUE;
+		SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+		        CCREV(ksii.pub.ccrev), wd_msticks));
+	}
+
+	return &ksii.pub;
+}
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+	BCM_REFERENCE(sdh);
+	BCM_REFERENCE(devid);
+	/* need to set memseg flag for CF card first before any sb registers access */
+	if (BUSTYPE(bustype) == PCMCIA_BUS)
+		sii->memseg = TRUE;
+
+
+#if defined(BCMSDIO)
+	if (BUSTYPE(bustype) == SDIO_BUS) {
+		int err;
+		uint8 clkset;
+
+		/* Try forcing SDIO core to do ALPAvail request only */
+		clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+		if (!err) {
+			uint8 clkval;
+
+			/* If register supported, wait for ALPAvail and then force ALP */
+			clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+			if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+				SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+					PMU_MAX_TRANSITION_DLY);
+				if (!SBSDIO_ALPAV(clkval)) {
+					SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+						clkval));
+					return FALSE;
+				}
+				clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					clkset, &err);
+				OSL_DELAY(65);
+			}
+		}
+
+		/* Also, disable the extra SDIO pull-ups */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+	}
+
+#endif /* BCMSDIO && BCMDONGLEHOST */
+
+	return TRUE;
+}
+
+uint32
+si_get_pmu_reg_addr(si_t *sih, uint32 offset)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 pmuaddr = INVALID_ADDR;
+	uint origidx = 0;
+
+	SI_MSG(("%s: pmu access, offset: %x\n", __FUNCTION__, offset));
+	if (!(sii->pub.cccaps & CC_CAP_PMU)) {
+		goto done;
+	}
+	if (AOB_ENAB(&sii->pub)) {
+		uint pmucoreidx;
+		pmuregs_t *pmu;
+		SI_MSG(("%s: AOBENAB: %x\n", __FUNCTION__, offset));
+		origidx = sii->curidx;
+		pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0);
+		pmu = si_setcoreidx(&sii->pub, pmucoreidx);
+		pmuaddr = (uint32)(uintptr)((volatile uint8*)pmu + offset);
+		si_setcoreidx(sih, origidx);
+	} else
+		pmuaddr = SI_ENUM_BASE + offset;
+
+done:
+	printf("%s: addrRET: %x\n", __FUNCTION__, pmuaddr);
+	return pmuaddr;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, volatile void *regs)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	bool pci, pcie, pcie_gen2 = FALSE;
+	uint i;
+	uint pciidx, pcieidx, pcirev, pcierev;
+
+#if defined(BCM_BACKPLANE_TIMEOUT) || defined(AXI_TIMEOUTS)
+	/* first, enable backplane timeouts */
+	si_slave_wrapper_add(&sii->pub);
+#endif
+	sii->curidx = 0;
+
+	cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+	/* get chipcommon chipstatus */
+	if (CCREV(sii->pub.ccrev) >= 11)
+		sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+	/* get chipcommon capabilites */
+	sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+	/* get chipcommon extended capabilities */
+
+	if (CCREV(sii->pub.ccrev) >= 35)
+		sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+	/* get pmu rev and caps */
+	if (sii->pub.cccaps & CC_CAP_PMU) {
+		if (AOB_ENAB(&sii->pub)) {
+			uint pmucoreidx;
+			pmuregs_t *pmu;
+			struct si_pub *sih = &sii->pub;
+
+			pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0);
+			if (!GOODIDX(pmucoreidx)) {
+				SI_ERROR(("si_buscore_setup: si_findcoreidx failed\n"));
+				return FALSE;
+			}
+
+			pmu = si_setcoreidx(&sii->pub, pmucoreidx);
+			sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities);
+			si_setcoreidx(&sii->pub, SI_CC_IDX);
+
+			sii->pub.gcirev = si_corereg(sih,
+					GCI_CORE_IDX(sih),
+					GCI_OFFSETOF(sih, gci_corecaps0), 0, 0) & GCI_CAP0_REV_MASK;
+		} else
+			sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+
+		sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+	}
+
+	SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+		CCREV(sii->pub.ccrev), sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+		sii->pub.pmucaps));
+
+	/* figure out bus/orignal core idx */
+	sii->pub.buscoretype = NODEV_CORE_ID;
+	sii->pub.buscorerev = (uint)NOREV;
+	sii->pub.buscoreidx = BADIDX;
+
+	pci = pcie = FALSE;
+	pcirev = pcierev = (uint)NOREV;
+	pciidx = pcieidx = BADIDX;
+
+	for (i = 0; i < sii->numcores; i++) {
+		uint cid, crev;
+
+		si_setcoreidx(&sii->pub, i);
+		cid = si_coreid(&sii->pub);
+		crev = si_corerev(&sii->pub);
+
+		/* Display cores found */
+		SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+			i, cid, crev, cores_info->coresba[i], cores_info->regs[i]));
+
+		if (BUSTYPE(bustype) == SI_BUS) {
+			/* now look at the chipstatus register to figure the pacakge */
+			/* for SDIO but downloaded on PCIE dev */
+			if (cid == PCIE2_CORE_ID) {
+				if (BCM43602_CHIP(sii->pub.chip) ||
+					(CHIPID(sii->pub.chip) == BCM4365_CHIP_ID) ||
+					(CHIPID(sii->pub.chip) == BCM4347_CHIP_ID) ||
+					(CHIPID(sii->pub.chip) == BCM4366_CHIP_ID) ||
+					((BCM4345_CHIP(sii->pub.chip) ||
+					BCM4349_CHIP(sii->pub.chip)) &&
+					CST4345_CHIPMODE_PCIE(sii->pub.chipst))) {
+					pcieidx = i;
+					pcierev = crev;
+					pcie = TRUE;
+					pcie_gen2 = TRUE;
+				}
+			}
+
+		} else if (BUSTYPE(bustype) == PCI_BUS) {
+			if (cid == PCI_CORE_ID) {
+				pciidx = i;
+				pcirev = crev;
+				pci = TRUE;
+			} else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) {
+				pcieidx = i;
+				pcierev = crev;
+				pcie = TRUE;
+				if (cid == PCIE2_CORE_ID)
+					pcie_gen2 = TRUE;
+			}
+		} else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+		           (cid == PCMCIA_CORE_ID)) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+#ifdef BCMSDIO
+		else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+		          (BUSTYPE(bustype) == SPI_BUS)) &&
+		         ((cid == PCMCIA_CORE_ID) ||
+		          (cid == SDIOD_CORE_ID))) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+#endif /* BCMSDIO */
+
+		/* find the core idx before entering this func. */
+		if ((savewin && (savewin == cores_info->coresba[i])) ||
+		    (regs == cores_info->regs[i]))
+			*origidx = i;
+	}
+
+
+#if defined(PCIE_FULL_DONGLE)
+	if (pcie) {
+		if (pcie_gen2)
+			sii->pub.buscoretype = PCIE2_CORE_ID;
+		else
+			sii->pub.buscoretype = PCIE_CORE_ID;
+		sii->pub.buscorerev = pcierev;
+		sii->pub.buscoreidx = pcieidx;
+	}
+	BCM_REFERENCE(pci);
+	BCM_REFERENCE(pcirev);
+	BCM_REFERENCE(pciidx);
+#else
+	if (pci) {
+		sii->pub.buscoretype = PCI_CORE_ID;
+		sii->pub.buscorerev = pcirev;
+		sii->pub.buscoreidx = pciidx;
+	} else if (pcie) {
+		if (pcie_gen2)
+			sii->pub.buscoretype = PCIE2_CORE_ID;
+		else
+			sii->pub.buscoretype = PCIE_CORE_ID;
+		sii->pub.buscorerev = pcierev;
+		sii->pub.buscoreidx = pcieidx;
+	}
+#endif /* defined(PCIE_FULL_DONGLE) */
+
+	SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+	         sii->pub.buscorerev));
+
+
+#if defined(BCMSDIO)
+	/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+	 * already running.
+	 */
+	if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+		if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+		    si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+			si_core_disable(&sii->pub, 0);
+	}
+#endif /* BCMSDIO && BCMDONGLEHOST */
+
+	/* return to the original core */
+	si_setcoreidx(&sii->pub, *origidx);
+
+	return TRUE;
+}
+
+
+
+
+
+uint16
+si_chipid(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	return (sii->chipnew) ? sii->chipnew : sih->chip;
+}
+
+/* CHIP_ID's being mapped here should not be used anywhere else in the code */
+static void
+si_chipid_fixup(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	ASSERT(sii->chipnew == 0);
+	switch (sih->chip) {
+		case BCM43567_CHIP_ID:
+			sii->chipnew = sih->chip; /* save it */
+			sii->pub.chip = BCM43570_CHIP_ID; /* chip class */
+		break;
+		case BCM43562_CHIP_ID:
+		case BCM4358_CHIP_ID:
+		case BCM43566_CHIP_ID:
+			sii->chipnew = sih->chip; /* save it */
+			sii->pub.chip = BCM43569_CHIP_ID; /* chip class */
+		break;
+		case BCM4356_CHIP_ID:
+		case BCM4371_CHIP_ID:
+			sii->chipnew = sih->chip; /* save it */
+			sii->pub.chip = BCM4354_CHIP_ID; /* chip class */
+		break;
+		case BCM4357_CHIP_ID:
+		case BCM4361_CHIP_ID:
+			sii->chipnew = sih->chip; /* save it */
+			sii->pub.chip = BCM4347_CHIP_ID; /* chip class */
+		break;
+		default:
+		break;
+	}
+}
+
+#ifdef BCMULP
+static void
+si_check_boot_type(si_t *sih, osl_t *osh)
+{
+	if (sih->pmurev >= 30) {
+		boot_type = PMU_REG_NEW(sih, swscratch, 0, 0);
+	} else {
+		boot_type = CHIPC_REG(sih, flashdata, 0, 0);
+	}
+
+	SI_ERROR(("%s: boot_type: 0x%08x\n", __func__, boot_type));
+}
+#endif /* BCMULP */
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ *        function set 'vars' to NULL.
+ */
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	struct si_pub *sih = &sii->pub;
+	uint32 w, savewin;
+	chipcregs_t *cc;
+	char *pvars = NULL;
+	uint origidx;
+#if !defined(_CFEZ_) || defined(CFG_WL)
+#endif 
+
+	ASSERT(GOODREGS(regs));
+
+	savewin = 0;
+
+	sih->buscoreidx = BADIDX;
+	sii->device_removed = FALSE;
+
+	sii->curmap = regs;
+	sii->sdh = sdh;
+	sii->osh = osh;
+	sii->second_bar0win = ~0x0;
+
+#if defined(BCM_BACKPLANE_TIMEOUT)
+	sih->err_info = MALLOCZ(osh, sizeof(si_axi_error_info_t));
+	if (sih->err_info == NULL) {
+		SI_ERROR(("%s: %d bytes MALLOC FAILED",
+			__FUNCTION__, sizeof(si_axi_error_info_t)));
+		return NULL;
+	}
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+
+	/* check to see if we are a si core mimic'ing a pci core */
+	if ((bustype == PCI_BUS) &&
+	    (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) {
+		SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI "
+		          "devid:0x%x\n", __FUNCTION__, devid));
+		bustype = SI_BUS;
+	}
+
+	/* find Chipcommon address */
+	if (bustype == PCI_BUS) {
+		savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+			savewin = SI_ENUM_BASE;
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+		if (!regs)
+			return NULL;
+		cc = (chipcregs_t *)regs;
+#ifdef BCMSDIO
+	} else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+		cc = (chipcregs_t *)sii->curmap;
+#endif
+	} else {
+		cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+	}
+
+	sih->bustype = bustype;
+#ifdef BCMBUSTYPE
+	if (bustype != BUSTYPE(bustype)) {
+		SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+			bustype, BUSTYPE(bustype)));
+		return NULL;
+	}
+#endif
+
+	/* bus/core/clk setup for register access */
+	if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+		SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+		return NULL;
+	}
+
+	/* ChipID recognition.
+	*   We assume we can read chipid at offset 0 from the regs arg.
+	*   If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+	*   some way of recognizing them needs to be added here.
+	*/
+	if (!cc) {
+		SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
+		return NULL;
+	}
+	w = R_REG(osh, &cc->chipid);
+	if ((w & 0xfffff) == 148277) w -= 65532;
+	sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+	/* Might as wll fill in chip id rev & pkg */
+	sih->chip = w & CID_ID_MASK;
+	sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+	sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+#if defined(BCMSDIO) && (defined(HW_OOB) || defined(FORCE_WOWLAN))
+	dhd_conf_set_hw_oob_intr(sdh, sih->chip);
+#endif
+
+	si_chipid_fixup(sih);
+
+	sih->issim = IS_SIM(sih->chippkg);
+
+	/* scan for cores */
+	if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+		SI_MSG(("Found chip type SB (0x%08x)\n", w));
+		sb_scan(&sii->pub, regs, devid);
+	} else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) ||
+		(CHIPTYPE(sii->pub.socitype) == SOCI_NAI)) {
+		if (CHIPTYPE(sii->pub.socitype) == SOCI_AI)
+			SI_MSG(("Found chip type AI (0x%08x)\n", w));
+		else
+			SI_MSG(("Found chip type NAI (0x%08x)\n", w));
+		/* pass chipc address instead of original core base */
+
+		sii->axi_wrapper = (axi_wrapper_t *)MALLOCZ(sii->osh,
+			(sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS));
+
+		if (sii->axi_wrapper == NULL) {
+			SI_ERROR(("%s: %zu  bytes MALLOC Failed", __FUNCTION__,
+				(sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS)));
+			return NULL;
+		}
+
+		ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+		SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
+		/* pass chipc address instead of original core base */
+		ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else {
+		SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+		return NULL;
+	}
+	/* no cores found, bail out */
+	if (sii->numcores == 0) {
+		SI_ERROR(("si_doattach: could not find any cores\n"));
+		return NULL;
+	}
+	/* bus/core/clk setup */
+	origidx = SI_CC_IDX;
+	if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+		SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+		goto exit;
+	}
+#ifdef BCMULP
+	si_check_boot_type(sih, osh);
+
+	if (ulp_module_init(osh, sih) != BCME_OK) {
+		ULP_ERR(("%s: err in ulp_module_init\n", __FUNCTION__));
+		goto exit;
+	}
+#endif /* BCMULP */
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+	/* assume current core is CC */
+	if ((CCREV(sii->pub.ccrev) == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43235_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43234_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43238_CHIP_ID) &&
+	                                 (CHIPREV(sii->pub.chiprev) <= 2))) {
+
+		if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
+			uint clkdiv;
+			clkdiv = R_REG(osh, &cc->clkdiv);
+			/* otp_clk_div is even number, 120/14 < 9mhz */
+			clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
+			W_REG(osh, &cc->clkdiv, clkdiv);
+			SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv));
+		}
+		OSL_DELAY(10);
+	}
+
+	if (bustype == PCI_BUS) {
+
+	}
+#endif 
+#ifdef BCM_SDRBL
+	/* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is
+	 * not turned on, then we want to hold arm in reset.
+	 * Bottomline: In sdrenable case, we allow arm to boot only when protection is
+	 * turned on.
+	 */
+	if (CHIP_HOSTIF_PCIE(&(sii->pub))) {
+		uint32 sflags = si_arm_sflags(&(sii->pub));
+
+		/* If SDR is enabled but protection is not turned on
+		* then we want to force arm to WFI.
+		*/
+		if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) {
+			disable_arm_irq();
+			while (1) {
+				hnd_cpu_wait(sih);
+			}
+		}
+	}
+#endif /* BCM_SDRBL */
+
+	pvars = NULL;
+	BCM_REFERENCE(pvars);
+
+	if (!si_onetimeinit) {
+
+
+		if (CCREV(sii->pub.ccrev) >= 20) {
+			uint32 gpiopullup = 0, gpiopulldown = 0;
+			cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+			ASSERT(cc != NULL);
+
+			/* 4314/43142 has pin muxing, don't clear gpio bits */
+			if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) ||
+				(CHIPID(sih->chip) == BCM43142_CHIP_ID)) {
+				gpiopullup |= 0x402e0;
+				gpiopulldown |= 0x20500;
+			}
+
+
+			W_REG(osh, &cc->gpiopullup, gpiopullup);
+			W_REG(osh, &cc->gpiopulldown, gpiopulldown);
+			si_setcoreidx(sih, origidx);
+		}
+
+	}
+
+	/* clear any previous epidiag-induced target abort */
+	ASSERT(!si_taclear(sih, FALSE));
+
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+	/* Enable console prints */
+	si_muxenab(sii, 3);
+#endif
+
+	return (sii);
+
+exit:
+
+	return NULL;
+}
+
+/** may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint idx;
+
+
+	if (BUSTYPE(sih->bustype) == SI_BUS)
+		for (idx = 0; idx < SI_MAXCORES; idx++)
+			if (cores_info->regs[idx]) {
+				REG_UNMAP(cores_info->regs[idx]);
+				cores_info->regs[idx] = NULL;
+			}
+
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (cores_info != &ksii_cores_info)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, cores_info, sizeof(si_cores_info_t));
+
+#if defined(BCM_BACKPLANE_TIMEOUT)
+	if (sih->err_info) {
+		MFREE(sii->osh, sih->err_info, sizeof(si_axi_error_info_t));
+		sii->pub.err_info = NULL;
+	}
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+	if (sii->axi_wrapper) {
+		MFREE(sii->osh, sii->axi_wrapper,
+			(sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS));
+		sii->axi_wrapper = NULL;
+	}
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (sii != &ksii)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	if (sii->osh != NULL) {
+		SI_ERROR(("osh is already set....\n"));
+		ASSERT(!sii->osh);
+	}
+	sii->osh = osh;
+}
+
+/** register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+                          void *intrsenabled_fn, void *intr_arg)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	sii->intr_arg = intr_arg;
+	sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+	sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+	sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+	/* save current core id.  when this function called, the current core
+	 * must be the core which provides driver functions(il, et, wl, etc.)
+	 */
+	sii->dev_coreid = cores_info->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	sii->intrsoff_fn = NULL;
+	sii->intrsrestore_fn = NULL;
+	sii->intrsenabled_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_intflag(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return R_REG(sii->osh, ((uint32 *)(uintptr)
+			    (sii->oob_router + OOB_STATUSA)));
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_flag(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_flag(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_flag(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag_alt(si_t *sih)
+{
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_flag_alt(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_setint(sih, siflag);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_setint(sih, siflag);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_setint(sih, siflag);
+	else
+		ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	return cores_info->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->curidx;
+}
+
+volatile void *
+si_d11_switch_addrbase(si_t *sih, uint coreunit)
+{
+	return si_setcore(sih,  D11_CORE_ID, coreunit);
+}
+
+/** return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint idx;
+	uint coreid;
+	uint coreunit;
+	uint i;
+
+	coreunit = 0;
+
+	idx = sii->curidx;
+
+	ASSERT(GOODREGS(sii->curmap));
+	coreid = si_coreid(sih);
+
+	/* count the cores of our type */
+	for (i = 0; i < idx; i++)
+		if (cores_info->coreid[i] == coreid)
+			coreunit++;
+
+	return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corevendor(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corevendor(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corevendor(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+	return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corerev(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corerev(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corerev(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+/* return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint found;
+	uint i;
+
+
+	found = 0;
+
+	for (i = 0; i < sii->numcores; i++)
+		if (cores_info->coreid[i] == coreid) {
+			if (found == coreunit)
+				return (i);
+			found++;
+		}
+
+	return (BADIDX);
+}
+
+/** return total coreunit of coreid or zero if not found */
+uint
+si_numcoreunits(si_t *sih, uint coreid)
+{
+	if ((CHIPID(sih->chip) == BCM4347_CHIP_ID) &&
+		(CHIPREV(sih->chiprev) == 0)) {
+		/*
+		  * 4347TC2 does not have Aux core.
+		  * fixed to 1 here because EROM (using 4349 EROM) has two entries
+		  */
+		return 1;
+	} else	{
+		si_info_t *sii = SI_INFO(sih);
+		si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+		uint found = 0;
+		uint i;
+
+		for (i = 0; i < sii->numcores; i++) {
+			if (cores_info->coreid[i] == coreid) {
+				found++;
+			}
+		}
+
+		return found;
+	}
+}
+
+/** return total D11 coreunits */
+uint
+BCMRAMFN(si_numd11coreunits)(si_t *sih)
+{
+	uint found = 0;
+
+	found = si_numcoreunits(sih, D11_CORE_ID);
+
+#if defined(WLRSDB) && defined(WLRSDB_DISABLED)
+	/* If RSDB functionality is compiled out,
+	 * then ignore any D11 cores beyond the first
+	 * Used in norsdb dongle build variants for rsdb chip.
+	 */
+	found = 1;
+#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */
+
+	return found;
+}
+
+/** return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	bcopy((uchar*)cores_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+	return (sii->numcores);
+}
+
+/** return current wrapper mapping */
+void *
+si_wrapperregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+
+	return (sii->curwrap);
+}
+
+/** return current register mapping */
+volatile void *
+si_coreregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+
+	return (sii->curmap);
+}
+
+
+/**
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+volatile void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+	uint idx;
+
+	idx = si_findcoreidx(sih, coreid, coreunit);
+	if (!GOODIDX(idx))
+		return (NULL);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, idx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_setcoreidx(sih, idx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, idx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+volatile void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, coreidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_setcoreidx(sih, coreidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, coreidx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+/** Turn off interrupt as required by sb_setcore, before switch core */
+volatile void *
+si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+	volatile void *cc;
+	si_info_t *sii = SI_INFO(sih);
+
+	if (SI_FAST(sii)) {
+		/* Overloading the origidx variable to remember the coreid,
+		 * this works because the core ids cannot be confused with
+		 * core indices.
+		 */
+		*origidx = coreid;
+		if (coreid == CC_CORE_ID)
+			return (volatile void *)CCREGS_FAST(sii);
+		else if (coreid == BUSCORETYPE(sih->buscoretype))
+			return (volatile void *)PCIEREGS(sii);
+	}
+	INTR_OFF(sii, *intr_val);
+	*origidx = sii->curidx;
+	cc = si_setcore(sih, coreid, 0);
+	ASSERT(cc != NULL);
+
+	return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void
+si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == BUSCORETYPE(sih->buscoretype))))
+		return;
+
+	si_setcoreidx(sih, coreid);
+	INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_numaddrspaces(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_numaddrspaces(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_numaddrspaces(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspace(sih, asidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_addrspace(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspace(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspacesize(sih, asidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_addrspacesize(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspacesize(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+	/* Only supported for SOCI_AI */
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_coreaddrspaceX(sih, asidx, addr, size);
+	else
+		*size = 0;
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_cflags(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_core_cflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_cflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_cflags_wo(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_cflags_wo(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_cflags_wo(sih, mask, val);
+	else
+		ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_sflags(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_core_sflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_sflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_commit(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_commit(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_AI || CHIPTYPE(sih->socitype) == SOCI_NAI)
+		;
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		;
+	else {
+		ASSERT(0);
+	}
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_iscoreup(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_iscoreup(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_iscoreup(sih);
+	else {
+		ASSERT(0);
+		return FALSE;
+	}
+}
+
+uint
+si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	/* only for AI back plane chips */
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return (ai_wrap_reg(sih, offset, mask, val));
+	return 0;
+}
+/* si_backplane_access is used to read full backplane address from host for PCIE FD
+ * it uses secondary bar-0 window which lies at an offset of 16K from primary bar-0
+ * Provides support for read/write of 1/2/4 bytes of backplane address
+ * Can be used to read/write
+ *	1. core regs
+ *	2. Wrapper regs
+ *	3. memory
+ *	4. BT area
+ * For accessing any 32 bit backplane address, [31 : 12] of backplane should be given in "region"
+ * [11 : 0] should be the "regoff"
+ * for reading  4 bytes from reg 0x200 of d11 core use it like below
+ * : si_backplane_access(sih, 0x18001000, 0x200, 4, 0, TRUE)
+ */
+static int si_backplane_addr_sane(uint addr, uint size)
+{
+	int bcmerror = BCME_OK;
+
+	/* For 2 byte access, address has to be 2 byte aligned */
+	if (size == 2) {
+		if (addr & 0x1) {
+			bcmerror = BCME_ERROR;
+		}
+	}
+	/* For 4 byte access, address has to be 4 byte aligned */
+	if (size == 4) {
+		if (addr & 0x3) {
+			bcmerror = BCME_ERROR;
+		}
+	}
+	return bcmerror;
+}
+
+void
+si_invalidate_second_bar0win(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	sii->second_bar0win = ~0x0;
+}
+
+uint
+si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read)
+{
+	volatile uint32 *r = NULL;
+	uint32 region = 0;
+	si_info_t *sii = SI_INFO(sih);
+
+	/* Valid only for pcie bus */
+	if (BUSTYPE(sih->bustype) != PCI_BUS) {
+		SI_ERROR(("Valid only for pcie bus \n"));
+		return BCME_ERROR;
+	}
+
+	/* Split adrr into region and address offset */
+	region = (addr & (0xFFFFF << 12));
+	addr = addr & 0xFFF;
+
+	/* check for address and size sanity */
+	if (si_backplane_addr_sane(addr, size) != BCME_OK)
+		return BCME_ERROR;
+
+	/* Update window if required */
+	if (sii->second_bar0win != region) {
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region);
+		sii->second_bar0win = region;
+	}
+
+	/* Estimate effective address
+	 * sii->curmap   : bar-0 virtual address
+	 * PCI_SECOND_BAR0_OFFSET  : secondar bar-0 offset
+	 * regoff : actual reg offset
+	 */
+	r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr);
+
+	SI_VMSG(("si curmap %p  region %x regaddr %x effective addr %p READ %d\n",
+		(volatile char*)sii->curmap, region, addr, r, read));
+
+	switch (size) {
+		case sizeof(uint8) :
+			if (read)
+				*val = R_REG(sii->osh, (volatile uint8*)r);
+			else
+				W_REG(sii->osh, (volatile uint8*)r, *val);
+			break;
+		case sizeof(uint16) :
+			if (read)
+				*val = R_REG(sii->osh, (volatile uint16*)r);
+			else
+				W_REG(sii->osh, (volatile uint16*)r, *val);
+			break;
+		case sizeof(uint32) :
+			if (read)
+				*val = R_REG(sii->osh, (volatile uint32*)r);
+			else
+				W_REG(sii->osh, (volatile uint32*)r, *val);
+			break;
+		default :
+			SI_ERROR(("Invalid  size %d \n", size));
+			return (BCME_ERROR);
+			break;
+	}
+
+	return (BCME_OK);
+}
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg(sih, coreidx, regoff, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corereg(sih, coreidx, regoff, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corereg(sih, coreidx, regoff, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+/** ILP sensitive register access needs special treatment to avoid backplane stalls */
+bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff)
+{
+	if (idx == SI_CC_IDX) {
+		if (CHIPCREGS_ILP_SENSITIVE(regoff))
+			return TRUE;
+	} else if (PMUREGS_ILP_SENSITIVE(regoff)) {
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+/** 'idx' should refer either to the chipcommon core or the PMU core */
+uint
+si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val)
+{
+	int pmustatus_offset;
+
+	/* prevent backplane stall on double write to 'ILP domain' registers in the PMU */
+	if (mask != 0 && PMUREV(sih->pmurev) >= 22 &&
+	    si_pmu_is_ilp_sensitive(idx, regoff)) {
+		pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) :
+			OFFSETOF(chipcregs_t, pmustatus);
+
+		while (si_corereg(sih, idx, pmustatus_offset, 0, 0) & PST_SLOW_WR_PENDING)
+			{};
+	}
+
+	return si_corereg(sih, idx, regoff, mask, val);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+volatile uint32 *
+si_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg_addr(sih, coreidx, regoff);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corereg_addr(sih, coreidx, regoff);
+	else {
+		return 0;
+	}
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_disable(sih, bits);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_disable(sih, bits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_reset(sih, bits, resetbits);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_reset(sih, bits, resetbits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_reset(sih, bits, resetbits);
+}
+
+/** Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+	uint32 cflags;
+	int result = 0;
+
+	/* Read core control flags */
+	cflags = si_core_cflags(sih, 0, 0);
+
+	/* Set bist & fgc */
+	si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+	/* Wait for bist done */
+	SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+	if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+		result = BCME_ERROR;
+
+	/* Reset core control flags */
+	si_core_cflags(sih, 0xffff, cflags);
+
+	return result;
+}
+
+uint
+si_num_slaveports(si_t *sih, uint coreid)
+{
+	uint idx = si_findcoreidx(sih, coreid, 0);
+	uint num = 0;
+
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI))
+		num = ai_num_slaveports(sih, idx);
+
+	return num;
+}
+
+uint32
+si_get_slaveport_addr(si_t *sih, uint asidx, uint core_id, uint coreunit)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx = sii->curidx;
+	uint32 addr = 0x0;
+
+	if (!((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)))
+		goto done;
+
+	si_setcore(sih, core_id, coreunit);
+
+	addr = ai_addrspace(sih, asidx);
+
+	si_setcoreidx(sih, origidx);
+
+done:
+	return addr;
+}
+
+uint32
+si_get_d11_slaveport_addr(si_t *sih, uint asidx, uint coreunit)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx = sii->curidx;
+	uint32 addr = 0x0;
+
+	if (!((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)))
+		goto done;
+
+	si_setcore(sih, D11_CORE_ID, coreunit);
+
+	addr = ai_addrspace(sih, asidx);
+
+	si_setcoreidx(sih, origidx);
+
+done:
+	return addr;
+}
+
+static uint32
+factor6(uint32 x)
+{
+	switch (x) {
+	case CC_F6_2:	return 2;
+	case CC_F6_3:	return 3;
+	case CC_F6_4:	return 4;
+	case CC_F6_5:	return 5;
+	case CC_F6_6:	return 6;
+	case CC_F6_7:	return 7;
+	default:	return 0;
+	}
+}
+
+/*
+ * Divide the clock by the divisor with protection for
+ * a zero divisor.
+ */
+static uint32
+divide_clock(uint32 clock, uint32 div)
+{
+	return div ? clock / div : 0;
+}
+
+
+/** calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+	uint32 n1, n2, clock, m1, m2, m3, mc;
+
+	n1 = n & CN_N1_MASK;
+	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+	if (pll_type == PLL_TYPE6) {
+		if (m & CC_T6_MMASK)
+			return CC_T6_M1;
+		else
+			return CC_T6_M0;
+	} else if ((pll_type == PLL_TYPE1) ||
+	           (pll_type == PLL_TYPE3) ||
+	           (pll_type == PLL_TYPE4) ||
+	           (pll_type == PLL_TYPE7)) {
+		n1 = factor6(n1);
+		n2 += CC_F5_BIAS;
+	} else if (pll_type == PLL_TYPE2) {
+		n1 += CC_T2_BIAS;
+		n2 += CC_T2_BIAS;
+		ASSERT((n1 >= 2) && (n1 <= 7));
+		ASSERT((n2 >= 5) && (n2 <= 23));
+	} else if (pll_type == PLL_TYPE5) {
+		return (100000000);
+	} else
+		ASSERT(0);
+	/* PLL types 3 and 7 use BASE2 (25Mhz) */
+	if ((pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE7)) {
+		clock = CC_CLOCK_BASE2 * n1 * n2;
+	} else
+		clock = CC_CLOCK_BASE1 * n1 * n2;
+
+	if (clock == 0)
+		return 0;
+
+	m1 = m & CC_M1_MASK;
+	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+	if ((pll_type == PLL_TYPE1) ||
+	    (pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE4) ||
+	    (pll_type == PLL_TYPE7)) {
+		m1 = factor6(m1);
+		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+			m2 += CC_F5_BIAS;
+		else
+			m2 = factor6(m2);
+		m3 = factor6(m3);
+
+		switch (mc) {
+		case CC_MC_BYPASS:	return (clock);
+		case CC_MC_M1:		return divide_clock(clock, m1);
+		case CC_MC_M1M2:	return divide_clock(clock, m1 * m2);
+		case CC_MC_M1M2M3:	return divide_clock(clock, m1 * m2 * m3);
+		case CC_MC_M1M3:	return divide_clock(clock, m1 * m3);
+		default:		return (0);
+		}
+	} else {
+		ASSERT(pll_type == PLL_TYPE2);
+
+		m1 += CC_T2_BIAS;
+		m2 += CC_T2M2_BIAS;
+		m3 += CC_T2_BIAS;
+		ASSERT((m1 >= 2) && (m1 <= 7));
+		ASSERT((m2 >= 3) && (m2 <= 10));
+		ASSERT((m3 >= 2) && (m3 <= 7));
+
+		if ((mc & CC_T2MC_M1BYP) == 0)
+			clock /= m1;
+		if ((mc & CC_T2MC_M2BYP) == 0)
+			clock /= m2;
+		if ((mc & CC_T2MC_M3BYP) == 0)
+			clock /= m3;
+
+		return (clock);
+	}
+}
+
+/**
+ * Some chips could have multiple host interfaces, however only one will be active.
+ * For a given chip. Depending pkgopt and cc_chipst return the active host interface.
+ */
+uint
+si_chip_hostif(si_t *sih)
+{
+	uint hosti = 0;
+
+	switch (CHIPID(sih->chip)) {
+	case BCM43018_CHIP_ID:
+	case BCM43430_CHIP_ID:
+		hosti = CHIP_HOSTIF_SDIOMODE;
+		break;
+	case BCM43012_CHIP_ID:
+		hosti = CHIP_HOSTIF_SDIOMODE;
+		break;
+	CASE_BCM43602_CHIP:
+		hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4360_CHIP_ID:
+		/* chippkg bit-0 == 0 is PCIE only pkgs
+		 * chippkg bit-0 == 1 has both PCIE and USB cores enabled
+		 */
+		if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else
+			hosti = CHIP_HOSTIF_PCIEMODE;
+
+		break;
+
+	case BCM4335_CHIP_ID:
+		/* TBD: like in 4360, do we need to check pkg? */
+		if (CST4335_CHIPMODE_USB20D(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4335_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	CASE_BCM4345_CHIP:
+		if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4345_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4345_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4349_CHIP_GRPID:
+	case BCM53573_CHIP_GRPID:
+		if (CST4349_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4349_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+	case BCM4347_CHIP_ID:
+		 if (CST4347_CHIPMODE_SDIOD(sih->chipst))
+			 hosti = CHIP_HOSTIF_SDIOMODE;
+		 else if (CST4347_CHIPMODE_PCIE(sih->chipst))
+			 hosti = CHIP_HOSTIF_PCIEMODE;
+		 break;
+
+	case BCM4350_CHIP_ID:
+	case BCM4354_CHIP_ID:
+	case BCM43556_CHIP_ID:
+	case BCM43558_CHIP_ID:
+	case BCM43566_CHIP_ID:
+	case BCM43568_CHIP_ID:
+	case BCM43569_CHIP_ID:
+	case BCM43570_CHIP_ID:
+	case BCM4358_CHIP_ID:
+		if (CST4350_CHIPMODE_USB20D(sih->chipst) ||
+		    CST4350_CHIPMODE_HSIC20D(sih->chipst) ||
+		    CST4350_CHIPMODE_USB30D(sih->chipst) ||
+		    CST4350_CHIPMODE_USB30D_WL(sih->chipst) ||
+		    CST4350_CHIPMODE_HSIC30D(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4350_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4350_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	default:
+		break;
+	}
+
+	return hosti;
+}
+
+
+/** set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+	uint nb, maxt;
+	uint pmu_wdt = 1;
+
+
+	if (PMUCTL_ENAB(sih) && pmu_wdt) {
+			nb = (CCREV(sih->ccrev) < 26) ? 16 : ((CCREV(sih->ccrev) >= 37) ? 32 : 24);
+		/* The mips compiler uses the sllv instruction,
+		 * so we specially handle the 32-bit case.
+		 */
+		if (nb == 32)
+			maxt = 0xffffffff;
+		else
+			maxt = ((1 << nb) - 1);
+
+		if (ticks == 1)
+			ticks = 2;
+		else if (ticks > maxt)
+			ticks = maxt;
+		if (CHIPID(sih->chip) == BCM43012_CHIP_ID) {
+			PMU_REG_NEW(sih, min_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK);
+			PMU_REG_NEW(sih, watchdog_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK);
+			PMU_REG_NEW(sih, pmustatus, PST_WDRESET, PST_WDRESET);
+			PMU_REG_NEW(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_SWENAB, 0);
+			SPINWAIT((PMU_REG(sih, pmustatus, 0, 0) & PST_ILPFASTLPO),
+				PMU_MAX_TRANSITION_DLY);
+		}
+
+		pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks);
+	} else {
+		maxt = (1 << 28) - 1;
+		if (ticks > maxt)
+			ticks = maxt;
+
+		si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+	}
+}
+
+/** trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+	si_watchdog(sih, wd_msticks * ms);
+}
+
+uint32 si_watchdog_msticks(void)
+{
+	return wd_msticks;
+}
+
+bool
+si_taclear(si_t *sih, bool details)
+{
+	return FALSE;
+}
+
+
+
+/** return the slow clock source - LPO, XTAL, or PCI */
+static uint
+si_slowclk_src(si_info_t *sii)
+{
+	chipcregs_t *cc;
+
+	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+	if (CCREV(sii->pub.ccrev) < 6) {
+		if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
+		    (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) &
+		     PCI_CFG_GPIO_SCS))
+			return (SCC_SS_PCI);
+		else
+			return (SCC_SS_XTAL);
+	} else if (CCREV(sii->pub.ccrev) < 10) {
+		cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx);
+		ASSERT(cc);
+		return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
+	} else	/* Insta-clock */
+		return (SCC_SS_XTAL);
+}
+
+/** return the ILP (slowclock) min or max frequency */
+static uint
+si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
+{
+	uint32 slowclk;
+	uint div;
+
+	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+	/* shouldn't be here unless we've established the chip has dynamic clk control */
+	ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
+
+	slowclk = si_slowclk_src(sii);
+	if (CCREV(sii->pub.ccrev) < 6) {
+		if (slowclk == SCC_SS_PCI)
+			return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
+		else
+			return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
+	} else if (CCREV(sii->pub.ccrev) < 10) {
+		div = 4 *
+		        (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
+		if (slowclk == SCC_SS_LPO)
+			return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
+		else if (slowclk == SCC_SS_XTAL)
+			return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
+		else if (slowclk == SCC_SS_PCI)
+			return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
+		else
+			ASSERT(0);
+	} else {
+		/* Chipc rev 10 is InstaClock */
+		div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
+		div = 4 * (div + 1);
+		return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
+	}
+	return (0);
+}
+
+static void
+si_clkctl_setdelay(si_info_t *sii, void *chipcregs)
+{
+	chipcregs_t *cc = (chipcregs_t *)chipcregs;
+	uint slowmaxfreq, pll_delay, slowclk;
+	uint pll_on_delay, fref_sel_delay;
+
+	pll_delay = PLL_DELAY;
+
+	/* If the slow clock is not sourced by the xtal then add the xtal_on_delay
+	 * since the xtal will also be powered down by dynamic clk control logic.
+	 */
+
+	slowclk = si_slowclk_src(sii);
+	if (slowclk != SCC_SS_XTAL)
+		pll_delay += XTAL_ON_DELAY;
+
+	/* Starting with 4318 it is ILP that is used for the delays */
+	slowmaxfreq = si_slowclk_freq(sii, (CCREV(sii->pub.ccrev) >= 10) ? FALSE : TRUE, cc);
+
+	pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
+	fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
+
+	W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
+	W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
+}
+
+/** initialize power control delay registers */
+void
+si_clkctl_init(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx = 0;
+	chipcregs_t *cc;
+	bool fast;
+
+	if (!CCCTL_ENAB(sih))
+		return;
+
+	sii = SI_INFO(sih);
+	fast = SI_FAST(sii);
+	if (!fast) {
+		origidx = sii->curidx;
+		if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+			return;
+	} else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
+		return;
+	ASSERT(cc != NULL);
+
+	/* set all Instaclk chip ILP to 1 MHz */
+	if (CCREV(sih->ccrev) >= 10)
+		SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
+		        (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+
+	si_clkctl_setdelay(sii, (void *)(uintptr)cc);
+
+	OSL_DELAY(20000);
+
+	if (!fast)
+		si_setcoreidx(sih, origidx);
+}
+
+
+/** change logical "focus" to the gpio core for optimized access */
+volatile void *
+si_gpiosetcore(si_t *sih)
+{
+	return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/**
+ * mask & set gpiocontrol bits.
+ * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
+ * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
+ *   to some chip-specific purpose.
+ */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioouten);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioout);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already reserved */
+	if (si_gpioreservation & gpio_bitmask)
+		return 0xffffffff;
+	/* set reservation */
+	si_gpioreservation |= gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/**
+ * release one gpio.
+ *
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till someone overwrites it.
+ */
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already released */
+	if (!(si_gpioreservation & gpio_bitmask))
+		return 0xffffffff;
+
+	/* clear reservation */
+	si_gpioreservation &= ~gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+	uint regoff;
+
+	regoff = OFFSETOF(chipcregs_t, gpioin);
+	return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointmask);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+uint32
+si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+	regoff = OFFSETOF(chipcregs_t, gpioeventintmask);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CCREV(sih->ccrev) < 16)
+		return 0xffffffff;
+
+	/* gpio led powersave reg */
+	return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+	if (CCREV(sih->ccrev) < 16)
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX,
+		OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+	uint offs;
+
+	if (CCREV(sih->ccrev) < 20)
+		return 0xffffffff;
+
+	offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+	uint offs;
+
+	if (CCREV(sih->ccrev) < 11)
+		return 0xffffffff;
+
+	if (regtype == GPIO_REGEVT)
+		offs = OFFSETOF(chipcregs_t, gpioevent);
+	else if (regtype == GPIO_REGEVT_INTMSK)
+		offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+	else if (regtype == GPIO_REGEVT_INTPOL)
+		offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+	else
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+	uint offs;
+
+	if (CCREV(sih->ccrev) < 11)
+		return 0xffffffff;
+
+	offs = OFFSETOF(chipcregs_t, intmask);
+	return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+/** Return the size of the specified SYSMEM bank */
+static uint
+sysmem_banksize(si_info_t *sii, sysmemregs_t *regs, uint8 idx)
+{
+	uint banksize, bankinfo;
+	uint bankidx = idx;
+
+	W_REG(sii->osh, &regs->bankidx, bankidx);
+	bankinfo = R_REG(sii->osh, &regs->bankinfo);
+	banksize = SYSMEM_BANKINFO_SZBASE * ((bankinfo & SYSMEM_BANKINFO_SZMASK) + 1);
+	return banksize;
+}
+
+/** Return the RAM size of the SYSMEM core */
+uint32
+si_sysmem_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+
+	sysmemregs_t *regs;
+	bool wasup;
+	uint32 coreinfo;
+	uint memsize = 0;
+	uint8 i;
+	uint nb, nrb;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SYSMEM core */
+	if (!(regs = si_setcore(sih, SYSMEM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Number of ROM banks, SW need to skip the ROM banks. */
+	nrb = (coreinfo & SYSMEM_SRCI_ROMNB_MASK) >> SYSMEM_SRCI_ROMNB_SHIFT;
+
+	nb = (coreinfo & SYSMEM_SRCI_SRNB_MASK) >> SYSMEM_SRCI_SRNB_SHIFT;
+	for (i = 0; i < nb; i++)
+		memsize += sysmem_banksize(sii, regs, i + nrb);
+
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+/** Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
+{
+	uint banksize, bankinfo;
+	uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+	ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+	W_REG(sii->osh, &regs->bankidx, bankidx);
+	bankinfo = R_REG(sii->osh, &regs->bankinfo);
+	banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+	return banksize;
+}
+
+void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		W_REG(sii->osh, &regs->bankidx, bankidx);
+		W_REG(sii->osh, &regs->bankpda, bankpda);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	if (!set)
+		*enable = *protect = *remap = 0;
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+		uint32 bankidx, bankinfo;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (set) {
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK;
+				if (*enable) {
+					bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
+					if (*protect)
+						bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
+					if ((corerev >= 16) && *remap)
+						bankinfo |=
+							(1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT);
+				}
+				W_REG(sii->osh, &regs->bankinfo, bankinfo);
+			} else if (i == 0) {
+				if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
+					*enable = 1;
+					if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK)
+						*protect = 1;
+					if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK)
+						*remap = 1;
+				}
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+}
+
+bool
+si_socdevram_remap_isenb(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup, remap = FALSE;
+	uint corerev;
+	uint32 extcinfo;
+	uint8 nb;
+	uint8 i;
+	uint32 bankidx, bankinfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+				remap = TRUE;
+				break;
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+	return remap;
+}
+
+bool
+si_socdevram_pkg(si_t *sih)
+{
+	if (si_socdevram_size(sih) > 0)
+		return TRUE;
+	else
+		return FALSE;
+}
+
+uint32
+si_socdevram_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+	uint32 memsize = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+uint32
+si_socdevram_remap_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+	uint32 memsize = 0, banksz;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 extcinfo;
+	uint8 nb;
+	uint8 i;
+	uint32 bankidx, bankinfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+
+		/*
+		 * FIX: A0 Issue: Max addressable is 512KB, instead 640KB
+		 * Only four banks are accessible to ARM
+		 */
+		if ((corerev == 16) && (nb == 5))
+			nb = 4;
+
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+				banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+				memsize += banksz;
+			} else {
+				/* Account only consecutive banks for now */
+				break;
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+/** Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev == 0)
+		memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+	else if (corerev < 3) {
+		memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+		memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+	} else if ((corerev <= 7) || (corerev == 12)) {
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+		uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+		if (lss != 0)
+			nb --;
+		memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+		if (lss != 0)
+			memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+	} else {
+		uint8 i;
+		uint nb;
+		/* length of SRAM Banks increased for corerev greater than 23 */
+		if (corerev >= 23) {
+			nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) >> SRCI_SRNB_SHIFT;
+		} else {
+			nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		}
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+/** Return the TCM-RAM size of the ARMCR4 core. */
+uint32
+si_tcm_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+	volatile uint8 *regs;
+	bool wasup;
+	uint32 corecap;
+	uint memsize = 0;
+	uint32 nab = 0;
+	uint32 nbb = 0;
+	uint32 totb = 0;
+	uint32 bxinfo = 0;
+	uint32 idx = 0;
+	volatile uint32 *arm_cap_reg;
+	volatile uint32 *arm_bidx;
+	volatile uint32 *arm_binfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to CR4 core */
+	if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size. If in reset, come out of reset,
+	 * but remain in halt
+	 */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT);
+
+	arm_cap_reg = (volatile uint32 *)(regs + SI_CR4_CAP);
+	corecap = R_REG(sii->osh, arm_cap_reg);
+
+	nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
+	nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
+	totb = nab + nbb;
+
+	arm_bidx = (volatile uint32 *)(regs + SI_CR4_BANKIDX);
+	arm_binfo = (volatile uint32 *)(regs + SI_CR4_BANKINFO);
+	for (idx = 0; idx < totb; idx++) {
+		W_REG(sii->osh, arm_bidx, idx);
+
+		bxinfo = R_REG(sii->osh, arm_binfo);
+		memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+bool
+si_has_flops(si_t *sih)
+{
+	uint origidx, cr4_rev;
+
+	/* Find out CR4 core revision */
+	origidx = si_coreidx(sih);
+	if (si_setcore(sih, ARMCR4_CORE_ID, 0)) {
+		cr4_rev = si_corerev(sih);
+		si_setcoreidx(sih, origidx);
+
+		if (cr4_rev == 1 || cr4_rev >= 3)
+			return TRUE;
+	}
+	return FALSE;
+}
+
+uint32
+si_socram_srmem_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) {
+		return (32 * 1024);
+	}
+
+	if (CHIPID(sih->chip) == BCM43430_CHIP_ID ||
+		CHIPID(sih->chip) == BCM43018_CHIP_ID) {
+		return (64 * 1024);
+	}
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev >= 16) {
+		uint8 i;
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		for (i = 0; i < nb; i++) {
+			W_REG(sii->osh, &regs->bankidx, i);
+			if (R_REG(sii->osh, &regs->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK)
+				memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+void
+si_btcgpiowar(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint origidx;
+	uint intr_val = 0;
+	chipcregs_t *cc;
+
+	/* Make sure that there is ChipCommon core present &&
+	 * UART_TX is strapped to 1
+	 */
+	if (!(sih->cccaps & CC_CAP_UARTGPIO))
+		return;
+
+	/* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(cc != NULL);
+
+	W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_btshd0_4331(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx;
+	uint32 val;
+	uint intr_val = 0;
+
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	/* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */
+	if (on) {
+		/* Enable bt_shd0 on gpio4: */
+		val |= (CCTRL4331_BT_SHD0_ON_GPIO4);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+		val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_restore(si_t *sih, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+	W_REG(sii->osh, &cc->chipcontrol, val);
+	si_setcoreidx(sih, origidx);
+}
+
+uint32
+si_chipcontrl_read(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return -1;
+	}
+	val = R_REG(sii->osh, &cc->chipcontrol);
+	si_setcoreidx(sih, origidx);
+	return val;
+}
+
+void
+si_chipcontrl_epa4331(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (on) {
+		if (sih->chippkg == 9 || sih->chippkg == 0xb) {
+			val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+			/* Ext PA Controls for 4331 12x9 Package */
+			W_REG(sii->osh, &cc->chipcontrol, val);
+		} else {
+			/* Ext PA Controls for 4331 12x12 Package */
+			if (CHIPREV(sih->chiprev) > 0) {
+				W_REG(sii->osh, &cc->chipcontrol, val |
+				      (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2));
+			} else {
+				W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN));
+			}
+		}
+	} else {
+		val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+
+	si_setcoreidx(sih, origidx);
+}
+
+/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */
+void
+si_chipcontrl_srom4360(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (on) {
+		val &= ~(CCTRL4360_SECI_MODE |
+			CCTRL4360_BTSWCTRL_MODE |
+			CCTRL4360_EXTRA_FEMCTRL_MODE |
+			CCTRL4360_BT_LGCY_MODE |
+			CCTRL4360_CORE2FEMCTRL4_ON);
+
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+	}
+
+	si_setcoreidx(sih, origidx);
+}
+
+void
+si_clk_srom4365(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+	val = R_REG(sii->osh, &cc->clkdiv2);
+	W_REG(sii->osh, &cc->clkdiv2, ((val&~0xf) | 0x4));
+
+	si_setcoreidx(sih, origidx);
+}
+
+void
+si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl)
+{
+	si_info_t *sii;
+	chipcregs_t *cc;
+	uint origidx;
+	uint32 val;
+	bool sel_chip;
+
+	sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) ||
+		(CHIPID(sih->chip) == BCM43431_CHIP_ID);
+	sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb));
+
+	if (!sel_chip)
+		return;
+
+	sii = SI_INFO(sih);
+	origidx = si_coreidx(sih);
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (enter_wowl) {
+		val |= CCTRL4331_EXTPA_EN;
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+		val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+	si_setcoreidx(sih, origidx);
+}
+#endif 
+
+uint
+si_pll_reset(si_t *sih)
+{
+	uint err = 0;
+
+	return (err);
+}
+
+/** Enable BT-COEX & Ex-PA for 4313 */
+void
+si_epa_4313war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+
+	/* EPA Fix */
+	W_REG(sii->osh, &cc->gpiocontrol,
+	R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+
+void
+si_clk_pmu_htavail_set(si_t *sih, bool set_clear)
+{
+}
+
+void
+si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag)
+{
+}
+
+/** Re-enable synth_pwrsw resource in min_res_mask for 4313 */
+void
+si_pmu_synth_pwrsw_4313_war(si_t *sih)
+{
+}
+
+/** WL/BT control for 4313 btcombo boards >= P250 */
+void
+si_btcombo_p250_4313_war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+	W_REG(sii->osh, &cc->gpiocontrol,
+		R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK);
+
+	W_REG(sii->osh, &cc->gpioouten,
+		R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+void
+si_btc_enable_chipcontrol(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+
+	/* BT fix */
+	W_REG(sii->osh, &cc->chipcontrol,
+		R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+void
+si_btcombo_43228_war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+		SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+		return;
+	}
+
+	W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK);
+	W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+
+/** cache device removed state */
+void si_set_device_removed(si_t *sih, bool status)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	sii->device_removed = status;
+}
+
+/** check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+	uint32 w;
+	si_info_t *sii = SI_INFO(sih);
+
+	if (sii->device_removed) {
+		return TRUE;
+	}
+
+	switch (BUSTYPE(sih->bustype)) {
+	case PCI_BUS:
+		ASSERT(SI_INFO(sih)->osh != NULL);
+		w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32));
+		if ((w & 0xFFFF) != VENDOR_BROADCOM)
+			return TRUE;
+		break;
+	}
+	return FALSE;
+}
+
+bool
+si_is_warmboot(void)
+{
+
+#ifdef BCMULP
+	return (boot_type == WARM_BOOT);
+#else
+	return FALSE;
+#endif
+}
+
+bool
+si_is_sprom_available(si_t *sih)
+{
+	if (CCREV(sih->ccrev) >= 31) {
+		si_info_t *sii;
+		uint origidx;
+		chipcregs_t *cc;
+		uint32 sromctrl;
+
+		if ((sih->cccaps & CC_CAP_SROM) == 0)
+			return FALSE;
+
+		sii = SI_INFO(sih);
+		origidx = sii->curidx;
+		cc = si_setcoreidx(sih, SI_CC_IDX);
+		ASSERT(cc);
+		sromctrl = R_REG(sii->osh, &cc->sromcontrol);
+		si_setcoreidx(sih, origidx);
+		return (sromctrl & SRC_PRESENT);
+	}
+
+	switch (CHIPID(sih->chip)) {
+	case BCM43018_CHIP_ID:
+	case BCM43430_CHIP_ID:
+		return FALSE;
+	case BCM4336_CHIP_ID:
+	case BCM43362_CHIP_ID:
+		return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
+	case BCM4330_CHIP_ID:
+		return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
+	case BCM4313_CHIP_ID:
+		return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+	case BCM4331_CHIP_ID:
+	case BCM43431_CHIP_ID:
+		return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
+	case BCM43239_CHIP_ID:
+		return ((sih->chipst & CST43239_SPROM_MASK) &&
+			!(sih->chipst & CST43239_SFLASH_MASK));
+	case BCM4324_CHIP_ID:
+	case BCM43242_CHIP_ID:
+		return ((sih->chipst & CST4324_SPROM_MASK) &&
+			!(sih->chipst & CST4324_SFLASH_MASK));
+	case BCM4335_CHIP_ID:
+	CASE_BCM4345_CHIP:
+		return ((sih->chipst & CST4335_SPROM_MASK) &&
+			!(sih->chipst & CST4335_SFLASH_MASK));
+	case BCM4349_CHIP_GRPID:
+		return (sih->chipst & CST4349_SPROM_PRESENT) != 0;
+	case BCM53573_CHIP_GRPID:
+		return FALSE; /* SPROM PRESENT is not defined for 53573 as of now */
+	case BCM4347_CHIP_ID:
+		return (sih->chipst & CST4347_SPROM_PRESENT) != 0;
+		break;
+	case BCM4350_CHIP_ID:
+	case BCM4354_CHIP_ID:
+	case BCM43556_CHIP_ID:
+	case BCM43558_CHIP_ID:
+	case BCM43566_CHIP_ID:
+	case BCM43568_CHIP_ID:
+	case BCM43569_CHIP_ID:
+	case BCM43570_CHIP_ID:
+	case BCM4358_CHIP_ID:
+		return (sih->chipst & CST4350_SPROM_PRESENT) != 0;
+	CASE_BCM43602_CHIP:
+		return (sih->chipst & CST43602_SPROM_PRESENT) != 0;
+	case BCM43131_CHIP_ID:
+	case BCM43217_CHIP_ID:
+	case BCM43227_CHIP_ID:
+	case BCM43228_CHIP_ID:
+	case BCM43428_CHIP_ID:
+		return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT;
+	case BCM43012_CHIP_ID:
+		return FALSE;
+	default:
+		return TRUE;
+	}
+}
+
+
+uint32 si_get_sromctl(si_t *sih)
+{
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 sromctl;
+	osl_t *osh = si_osh(sih);
+
+	cc = si_setcoreidx(sih, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	sromctl = R_REG(osh, &cc->sromcontrol);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	return sromctl;
+}
+
+int si_set_sromctl(si_t *sih, uint32 value)
+{
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	osl_t *osh = si_osh(sih);
+	int ret = BCME_OK;
+
+	cc = si_setcoreidx(sih, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	if (si_corerev(sih) >= 32) {
+		/* SpromCtrl is only accessible if CoreCapabilities.SpromSupported and
+		 * SpromPresent is 1.
+		 */
+		if ((R_REG(osh, &cc->capabilities) & CC_CAP_SROM) != 0 &&
+		     (R_REG(osh, &cc->sromcontrol) & SRC_PRESENT)) {
+			W_REG(osh, &cc->sromcontrol, value);
+		} else {
+			ret = BCME_NODEVICE;
+		}
+	} else {
+		ret = BCME_UNSUPPORTED;
+	}
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+
+	return ret;
+}
+
+uint
+si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val)
+{
+	uint origidx, intr_val = 0;
+	uint ret_val;
+	si_info_t *sii = SI_INFO(sih);
+
+	origidx = si_coreidx(sih);
+
+	INTR_OFF(sii, intr_val);
+	si_setcoreidx(sih, coreidx);
+
+	ret_val = si_wrapperreg(sih, offset, mask, val);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+	return ret_val;
+}
+
+
+/* cleanup the timer from the host when ARM is been halted
+ * without a chance for ARM cleanup its resources
+ * If left not cleanup, Intr from a software timer can still
+ * request HT clk when ARM is halted.
+ */
+uint32
+si_pmu_res_req_timer_clr(si_t *sih)
+{
+	uint32 mask;
+
+	mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ;
+	mask <<= 14;
+	/* clear mask bits */
+	pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0);
+	/* readback to ensure write completes */
+	return pmu_corereg(sih, SI_CC_IDX, res_req_timer, 0, 0);
+}
+
+/** turn on/off rfldo */
+void
+si_pmu_rfldo(si_t *sih, bool on)
+{
+}
+
+
+#ifdef SURVIVE_PERST_ENAB
+static uint32
+si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	if (!PCIE(sii))
+		return (0);
+
+	return pcie_survive_perst(sii->pch, mask, val);
+}
+
+static void
+si_watchdog_reset(si_t *sih)
+{
+	uint32 i;
+
+	/* issue a watchdog reset */
+	pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, 2, 2);
+	/* do busy wait for 20ms */
+	for (i = 0; i < 2000; i++) {
+		OSL_DELAY(10);
+	}
+}
+#endif /* SURVIVE_PERST_ENAB */
+
+void
+si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_val)
+{
+#ifdef SURVIVE_PERST_ENAB
+	if (BUSTYPE(sih->bustype) != PCI_BUS)
+		  return;
+
+	if ((CHIPID(sih->chip) != BCM4360_CHIP_ID && CHIPID(sih->chip) != BCM4352_CHIP_ID) ||
+	    (CHIPREV(sih->chiprev) >= 4))
+		return;
+
+	if (reset) {
+		si_info_t *sii = SI_INFO(sih);
+		uint32 bar0win, bar0win_after;
+
+		/* save the bar0win */
+		bar0win = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+
+		si_watchdog_reset(sih);
+
+		bar0win_after = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (bar0win_after != bar0win) {
+			SI_ERROR(("%s: bar0win before %08x, bar0win after %08x\n",
+				__FUNCTION__, bar0win, bar0win_after));
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32), bar0win);
+		}
+	}
+	if (sperst_mask) {
+		/* enable survive perst */
+		si_pcie_survive_perst(sih, sperst_mask, sperst_val);
+	}
+#endif /* SURVIVE_PERST_ENAB */
+}
+
+/* Caller of this function should make sure is on PCIE core
+ * Used in pciedev.c.
+ */
+void
+si_pcie_disable_oobselltr(si_t *sih)
+{
+	ASSERT(si_coreid(sih) == PCIE2_CORE_ID);
+	si_wrapperreg(sih, AI_OOBSELIND30, ~0, 0);
+}
+
+void
+si_pcie_ltr_war(si_t *sih)
+{
+}
+
+void
+si_pcie_hw_LTR_war(si_t *sih)
+{
+}
+
+void
+si_pciedev_reg_pm_clk_period(si_t *sih)
+{
+}
+
+void
+si_pciedev_crwlpciegen2(si_t *sih)
+{
+}
+
+void
+si_pcie_prep_D3(si_t *sih, bool enter_D3)
+{
+}
+
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+uint32
+si_clear_backplane_to_fast(si_t *sih, void * addr)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+		return ai_clear_backplane_to_fast(sih, addr);
+	}
+
+	return 0;
+}
+
+const si_axi_error_info_t * si_get_axi_errlog_info(si_t * sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+		return (const si_axi_error_info_t *)sih->err_info;
+	}
+
+	return NULL;
+}
+
+void si_reset_axi_errlog_info(si_t * sih)
+{
+	sih->err_info->count = 0;
+}
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
+uint32
+si_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+		return ai_clear_backplane_to_per_core(sih, coreid, coreunit, wrap);
+	}
+
+	return AXI_WRAP_STS_NONE;
+}
+#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
+
+uint32
+si_clear_backplane_to(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+		return ai_clear_backplane_to(sih);
+	}
+
+	return 0;
+}
+
+/*
+ * This routine adds the AXI timeouts for
+ * chipcommon, pcie and ARM slave wrappers
+ */
+void
+si_slave_wrapper_add(si_t *sih)
+{
+#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
+	/* Enable only for AXI */
+	if (CHIPTYPE(sih->socitype) != SOCI_AI) {
+		return;
+	}
+
+	if (CHIPID(sih->chip) == BCM4345_CHIP_ID && CHIPREV(sih->chiprev) >= 6) {
+		si_info_t *sii = SI_INFO(sih);
+
+		int wrapper_idx = (int)sii->axi_num_wrappers - 1;
+
+		ASSERT(wrapper_idx >= 0);		/* axi_wrapper[] not initialised */
+		do {
+			if (sii->axi_wrapper[wrapper_idx].wrapper_type == AI_SLAVE_WRAPPER &&
+				sii->axi_wrapper[wrapper_idx].cid == 0xfff) {
+				sii->axi_wrapper[wrapper_idx].wrapper_addr = 0x1810b000;
+				break;
+			}
+		} while (wrapper_idx-- > 0);
+		ASSERT(wrapper_idx >= 0);	/* all addresses valid for the chiprev under test */
+	}
+
+	/* All required slave wrappers are added in ai_scan */
+	ai_enable_backplane_timeouts(sih);
+#endif /* AXI_TIMEOUTS  || BCM_BACKPLANE_TIMEOUT */
+}
+
+
+void
+si_pll_sr_reinit(si_t *sih)
+{
+}
+
+
+/* Programming d11 core oob  settings for 4364
+ * WARs for HW4364-237 and HW4364-166
+*/
+void
+si_config_4364_d11_oob(si_t *sih, uint coreid)
+{
+	uint save_idx;
+
+	save_idx = si_coreidx(sih);
+	si_setcore(sih, coreid, 0);
+	si_wrapperreg(sih, AI_OOBSELINC30, ~0, 0x81828180);
+	si_wrapperreg(sih, AI_OOBSELINC74, ~0, 0x87868183);
+	si_wrapperreg(sih, AI_OOBSELOUTB74, ~0, 0x84858484);
+	si_setcore(sih, coreid, 1);
+	si_wrapperreg(sih, AI_OOBSELINC30, ~0, 0x81828180);
+	si_wrapperreg(sih, AI_OOBSELINC74, ~0, 0x87868184);
+	si_wrapperreg(sih, AI_OOBSELOUTB74, ~0, 0x84868484);
+	si_setcoreidx(sih, save_idx);
+}
+
+void
+si_pll_closeloop(si_t *sih)
+{
+#if defined(SAVERESTORE)
+	uint32 data;
+
+	/* disable PLL open loop operation */
+	switch (CHIPID(sih->chip)) {
+#ifdef SAVERESTORE
+		case BCM43018_CHIP_ID:
+		case BCM43430_CHIP_ID:
+			if (SR_ENAB() && sr_isenab(sih)) {
+				/* read back the pll openloop state */
+				data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0);
+				/* current mode is openloop (possible POR) */
+				if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) != 0) {
+					si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8,
+						PMU1_PLLCTL8_OPENLOOP_MASK, 0);
+					si_pmu_pllupd(sih);
+				}
+			}
+			break;
+#endif /* SAVERESTORE */
+		default:
+			/* any unsupported chip bail */
+			return;
+	}
+#endif 
+}
+
+void
+si_update_macclk_mul_fact(si_t *sih, uint32 mul_fact)
+{
+	si_info_t *sii = SI_INFO(sih);
+	sii->macclk_mul_fact = mul_fact;
+}
+
+uint32
+si_get_macclk_mul_fact(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	return sii->macclk_mul_fact;
+}
+
+
+#if defined(BCMSRPWR) && !defined(BCMSRPWR_DISABLED)
+bool _bcmsrpwr = TRUE;
+#else
+bool _bcmsrpwr = FALSE;
+#endif
+
+uint32
+si_srpwr_request(si_t *sih, uint32 mask, uint32 val)
+{
+	uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
+	uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
+
+	if (mask || val) {
+		mask <<= SRPWR_REQON_SHIFT;
+		val  <<= SRPWR_REQON_SHIFT;
+
+		r = ((si_corereg(sih, cidx, offset, 0, 0) & ~mask) | val);
+		r = si_corereg(sih, cidx, offset, ~0, r);
+	} else {
+		r = si_corereg(sih, cidx, offset, 0, 0);
+	}
+
+	return r;
+}
+
+uint32
+si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val)
+{
+	uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
+	uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
+
+	ASSERT(mask);
+	ASSERT(val);
+
+	/* spinwait on pwrstatus */
+	mask <<= SRPWR_STATUS_SHIFT;
+	val <<= SRPWR_STATUS_SHIFT;
+
+	SPINWAIT(((si_corereg(sih, cidx, offset, 0, 0) & mask) != val),
+		PMU_MAX_TRANSITION_DLY);
+	ASSERT((si_corereg(sih, cidx, offset, 0, 0) & mask) == val);
+
+	r = si_corereg(sih, cidx, offset, 0, 0) & mask;
+	r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK;
+
+	return r;
+}
+
+uint32
+si_srpwr_stat(si_t *sih)
+{
+	uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
+	uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
+
+	r = si_corereg(sih, cidx, offset, 0, 0);
+	r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK;
+
+	return r;
+}
+
+uint32
+si_srpwr_domain(si_t *sih)
+{
+	uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
+	uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
+
+	r = si_corereg(sih, cidx, offset, 0, 0);
+	r = (r >> SRPWR_DMN_SHIFT) & SRPWR_DMN_ALL_MASK;
+
+	return r;
+}
+
+/* Utility API to read/write the raw registers with absolute address.
+ * This function can be invoked from either FW or host driver.
+ */
+uint32
+si_raw_reg(si_t *sih, uint32 reg, uint32 val, uint32 wrire_req)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 address_space = reg & ~0xFFF;
+	volatile uint32 * addr = (void*)(uintptr)(reg);
+	uint32 prev_value = 0;
+	uint32 cfg_reg = 0;
+
+	if (sii == NULL) {
+		return 0;
+	}
+
+	/* No need to translate the absolute address on SI bus */
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		goto skip_cfg;
+	}
+
+	/* This API supports only the PCI host interface */
+	if (BUSTYPE(sih->bustype) != PCI_BUS) {
+		return ID32_INVALID;
+	}
+
+	if (PCIE_GEN2(sii)) {
+		/* Use BAR0 Secondary window is PCIe Gen2.
+		 * Set the secondary BAR0 Window to current register of interest
+		 */
+		addr = (volatile uint32*)(((volatile uint8*)sii->curmap) +
+			PCI_SEC_BAR0_WIN_OFFSET + (reg & 0xfff));
+		cfg_reg = PCIE2_BAR0_CORE2_WIN;
+
+	} else {
+		/* PCIe Gen1 do not have secondary BAR0 window.
+		 * reuse the BAR0 WIN2
+		 */
+		addr = (volatile uint32*)(((volatile uint8*)sii->curmap) +
+			PCI_BAR0_WIN2_OFFSET + (reg & 0xfff));
+		cfg_reg = PCI_BAR0_WIN2;
+	}
+
+	prev_value = OSL_PCI_READ_CONFIG(sii->osh, cfg_reg, 4);
+
+	if (prev_value != address_space) {
+		OSL_PCI_WRITE_CONFIG(sii->osh, cfg_reg,
+			sizeof(uint32), address_space);
+	} else {
+		prev_value = 0;
+	}
+
+skip_cfg:
+	if (wrire_req) {
+		W_REG(sii->osh, addr, val);
+	} else {
+		val = R_REG(sii->osh, addr);
+	}
+
+	if (prev_value) {
+		/* Restore BAR0 WIN2 for PCIE GEN1 devices */
+		OSL_PCI_WRITE_CONFIG(sii->osh,
+			cfg_reg, sizeof(uint32), prev_value);
+	}
+
+	return val;
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h
new file mode 100644
index 0000000..4b887df
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils_priv.h
@@ -0,0 +1,333 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: siutils_priv.h 625739 2016-03-17 12:28:03Z $
+ */
+
+#ifndef	_siutils_priv_h_
+#define	_siutils_priv_h_
+
+#if defined(SI_ERROR_ENFORCE)
+#define	SI_ERROR(args)	printf args
+#else
+#define	SI_ERROR(args) printf args
+#endif	
+
+#if defined(ENABLE_CORECAPTURE)
+
+#define	SI_PRINT(args)	osl_wificc_logDebug args
+
+#else
+
+#define	SI_PRINT(args)	printf args
+
+#endif /* ENABLE_CORECAPTURE */
+
+
+#define	SI_MSG(args)
+
+#ifdef BCMDBG_SI
+#define	SI_VMSG(args)	printf args
+#else
+#define	SI_VMSG(args)
+#endif
+
+#define	IS_SIM(chippkg)	((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+
+#define SI_GPIO_MAX		16
+
+typedef struct gci_gpio_item {
+	void			*arg;
+	uint8			gci_gpio;
+	uint8			status;
+	gci_gpio_handler_t	handler;
+	struct gci_gpio_item	*next;
+} gci_gpio_item_t;
+
+#define AI_SLAVE_WRAPPER        0
+#define AI_MASTER_WRAPPER       1
+
+typedef struct axi_wrapper {
+	uint32  mfg;
+	uint32  cid;
+	uint32  rev;
+	uint32  wrapper_type;
+	uint32  wrapper_addr;
+	uint32  wrapper_size;
+} axi_wrapper_t;
+
+#define SI_MAX_AXI_WRAPPERS		32
+#define AI_REG_READ_TIMEOUT		300 /* in msec */
+
+/* for some combo chips, BT side accesses chipcommon->0x190, as a 16 byte addr */
+/* register at 0x19C doesn't exist, so error is logged at the slave wrapper */
+#define BT_CC_SPROM_BADREG_LO   0x18000190
+#define BT_CC_SPROM_BADREG_HI   0
+#define BCM4350_BT_AXI_ID       6
+#define BCM4345_BT_AXI_ID       6
+
+typedef struct si_cores_info {
+	volatile void	*regs[SI_MAXCORES];	/* other regs va */
+
+	uint	coreid[SI_MAXCORES];	/**< id of each core */
+	uint32	coresba[SI_MAXCORES];	/**< backplane address of each core */
+	void	*regs2[SI_MAXCORES];	/**< va of each core second register set (usbh20) */
+	uint32	coresba2[SI_MAXCORES];	/**< address of each core second register set (usbh20) */
+	uint32	coresba_size[SI_MAXCORES]; /**< backplane address space size */
+	uint32	coresba2_size[SI_MAXCORES]; /**< second address space size */
+
+	void	*wrappers[SI_MAXCORES];	/**< other cores wrapper va */
+	uint32	wrapba[SI_MAXCORES];	/**< address of controlling wrapper */
+
+	void	*wrappers2[SI_MAXCORES];	/**< other cores wrapper va */
+	uint32	wrapba2[SI_MAXCORES];	/**< address of controlling wrapper */
+
+	uint32	cia[SI_MAXCORES];	/**< erom cia entry for each core */
+	uint32	cib[SI_MAXCORES];	/**< erom cia entry for each core */
+} si_cores_info_t;
+
+/** misc si info needed by some of the routines */
+typedef struct si_info {
+	struct si_pub pub;		/**< back plane public state (must be first field) */
+
+	void	*osh;			/**< osl os handle */
+	void	*sdh;			/**< bcmsdh handle */
+
+	uint	dev_coreid;		/**< the core provides driver functions */
+	void	*intr_arg;		/**< interrupt callback function arg */
+	si_intrsoff_t intrsoff_fn;	/**< turns chip interrupts off */
+	si_intrsrestore_t intrsrestore_fn; /**< restore chip interrupts */
+	si_intrsenabled_t intrsenabled_fn; /**< check if interrupts are enabled */
+
+	void *pch;			/**< PCI/E core handle */
+
+	bool	memseg;			/**< flag to toggle MEM_SEG register */
+
+	char *vars;
+	uint varsz;
+
+	volatile void	*curmap;		/* current regs va */
+
+	uint	curidx;			/**< current core index */
+	uint	numcores;		/**< # discovered cores */
+
+	void	*curwrap;		/**< current wrapper va */
+
+	uint32	oob_router;		/**< oob router registers for axi */
+
+	si_cores_info_t *cores_info;
+	gci_gpio_item_t	*gci_gpio_head;	/**< gci gpio interrupts head */
+	uint	chipnew;		/**< new chip number */
+	uint second_bar0win;		/**< Backplane region */
+	uint	num_br;		/**< # discovered bridges */
+	uint32	br_wrapba[SI_MAXBR];	/**< address of bridge controlling wrapper */
+	uint32	xtalfreq;
+	uint32	macclk_mul_fact;	/* Multiplication factor necessary to adjust MAC Clock
+	* during ULB Mode operation. One instance where this is used is configuring TSF L-frac
+	* register
+	*/
+	bool	device_removed;
+	uint	axi_num_wrappers;
+	axi_wrapper_t   * axi_wrapper;
+} si_info_t;
+
+
+#define	SI_INFO(sih)	((si_info_t *)(uintptr)sih)
+
+#define	GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+		ISALIGNED((x), SI_CORE_SIZE))
+#define	GOODREGS(regs)	((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR	0
+#define	GOODIDX(idx)	(((uint)idx) < SI_MAXCORES)
+#define	NOREV		-1		/**< Invalid rev */
+
+#define PCI(si)		((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCI_CORE_ID))
+
+#define PCIE_GEN1(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE_CORE_ID))
+
+#define PCIE_GEN2(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE2_CORE_ID))
+
+#define PCIE(si)	(PCIE_GEN1(si) || PCIE_GEN2(si))
+
+#define PCMCIA(si)	((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/** Newer chips can access PCI/PCIE and CC core without requiring to change PCI BAR0 WIN */
+#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13)))
+
+#define CCREGS_FAST(si) \
+	(((si)->curmap == NULL) ? NULL : \
+	    ((volatile char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+#define PCIEREGS(si) (((volatile char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+	if ((si)->intrsoff_fn && (si)->cores_info->coreid[(si)->curidx] == (si)->dev_coreid) { \
+		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+	if ((si)->intrsrestore_fn && (si)->cores_info->coreid[(si)->curidx] == (si)->dev_coreid) { \
+		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define	LPOMINFREQ		25000		/**< low power oscillator min */
+#define	LPOMAXFREQ		43000		/**< low power oscillator max */
+#define	XTALMINFREQ		19800000	/**< 20 MHz - 1% */
+#define	XTALMAXFREQ		20200000	/**< 20 MHz + 1% */
+#define	PCIMINFREQ		25000000	/**< 25 MHz */
+#define	PCIMAXFREQ		34000000	/**< 33 MHz + fudge */
+
+#define	ILP_DIV_5MHZ		0		/**< ILP = 5 MHz */
+#define	ILP_DIV_1MHZ		4		/**< ILP = 1 MHz */
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME	10		/**< Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME	90		/**< Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL  ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, volatile void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_intflag(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern volatile uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern bool sb_iscoreup(si_t *sih);
+extern volatile void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+extern bool sb_taclear(si_t *sih, bool details);
+
+#if defined(BCMDBG_PHYDUMP)
+extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern uint ai_flag_alt(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern volatile uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern bool ai_iscoreup(si_t *sih);
+extern volatile void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern volatile void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits,
+	uint32 resetbits, void *p, void *s);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits,
+	aidmp_t *pmacai, aidmp_t *smacai);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern void ai_enable_backplane_timeouts(si_t *sih);
+extern uint32 ai_clear_backplane_to(si_t *sih);
+extern uint ai_num_slaveports(si_t *sih, uint coreidx);
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+uint32 ai_clear_backplane_to_fast(si_t *sih, void * addr);
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
+#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
+extern uint32 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap);
+#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
+
+#if defined(BCMDBG_PHYDUMP)
+extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+extern uint32 ai_wrapper_dump_buf_size(si_t *sih);
+extern uint32 ai_wrapper_dump_binary(si_t *sih, uchar *p);
+
+#define ub_scan(a, b, c) do {} while (0)
+#define ub_flag(a) (0)
+#define ub_setint(a, b) do {} while (0)
+#define ub_coreidx(a) (0)
+#define ub_corevendor(a) (0)
+#define ub_corerev(a) (0)
+#define ub_iscoreup(a) (0)
+#define ub_setcoreidx(a, b) (0)
+#define ub_core_cflags(a, b, c) (0)
+#define ub_core_cflags_wo(a, b, c) do {} while (0)
+#define ub_core_sflags(a, b, c) (0)
+#define ub_corereg(a, b, c, d, e) (0)
+#define ub_core_reset(a, b, c) do {} while (0)
+#define ub_core_disable(a, b) do {} while (0)
+#define ub_numaddrspaces(a) (0)
+#define ub_addrspace(a, b)  (0)
+#define ub_addrspacesize(a, b) (0)
+#define ub_view(a, b) do {} while (0)
+#define ub_dumpregs(a, b) do {} while (0)
+
+#endif	/* _siutils_priv_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c
new file mode 100644
index 0000000..d78ecd9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.c
@@ -0,0 +1,5472 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_android.c 710862 2017-07-14 07:43:59Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/netlink.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <wlc_types.h>
+#include <wlioctl.h>
+#include <wlioctl_utils.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_config.h>
+#include <bcmip.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef BCMSDIO
+#include <bcmsdbus.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+#include <dhd_linux.h>
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
+
+#if defined(STAT_REPORT)
+#include <wl_statreport.h>
+#endif /* STAT_REPORT */
+
+#ifndef WL_CFG80211
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+#endif
+
+uint android_msg_level = ANDROID_ERROR_LEVEL;
+
+/*
+ * Android private command strings, PLEASE define new private commands here
+ * so they can be updated easily in the future (if needed)
+ */
+
+#define CMD_START		"START"
+#define CMD_STOP		"STOP"
+#define	CMD_SCAN_ACTIVE		"SCAN-ACTIVE"
+#define	CMD_SCAN_PASSIVE	"SCAN-PASSIVE"
+#define CMD_RSSI		"RSSI"
+#define CMD_LINKSPEED		"LINKSPEED"
+#define CMD_RXFILTER_START	"RXFILTER-START"
+#define CMD_RXFILTER_STOP	"RXFILTER-STOP"
+#define CMD_RXFILTER_ADD	"RXFILTER-ADD"
+#define CMD_RXFILTER_REMOVE	"RXFILTER-REMOVE"
+#define CMD_BTCOEXSCAN_START	"BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP	"BTCOEXSCAN-STOP"
+#define CMD_BTCOEXMODE		"BTCOEXMODE"
+#define CMD_SETSUSPENDOPT	"SETSUSPENDOPT"
+#define CMD_SETSUSPENDMODE      "SETSUSPENDMODE"
+#define CMD_MAXDTIM_IN_SUSPEND  "MAX_DTIM_IN_SUSPEND"
+#define CMD_P2P_DEV_ADDR	"P2P_DEV_ADDR"
+#define CMD_SETFWPATH		"SETFWPATH"
+#define CMD_SETBAND		"SETBAND"
+#define CMD_GETBAND		"GETBAND"
+#define CMD_COUNTRY		"COUNTRY"
+#ifdef WLMESH
+#define CMD_SAE_SET_PASSWORD "SAE_SET_PASSWORD"
+#define CMD_SET_RSDB_MODE "RSDB_MODE"
+#endif
+#define CMD_P2P_SET_NOA		"P2P_SET_NOA"
+#if !defined WL_ENABLE_P2P_IF
+#define CMD_P2P_GET_NOA			"P2P_GET_NOA"
+#endif /* WL_ENABLE_P2P_IF */
+#define CMD_P2P_SD_OFFLOAD		"P2P_SD_"
+#define CMD_P2P_LISTEN_OFFLOAD		"P2P_LO_"
+#define CMD_P2P_SET_PS		"P2P_SET_PS"
+#define CMD_P2P_ECSA		"P2P_ECSA"
+#define CMD_P2P_INC_BW		"P2P_INCREASE_BW"
+#define CMD_SET_AP_WPS_P2P_IE 		"SET_AP_WPS_P2P_IE"
+#define CMD_SETROAMMODE 	"SETROAMMODE"
+#define CMD_SETIBSSBEACONOUIDATA	"SETIBSSBEACONOUIDATA"
+#define CMD_MIRACAST		"MIRACAST"
+#define CMD_COUNTRY_DELIMITER "/"
+#ifdef WL11ULB
+#define CMD_ULB_MODE "ULB_MODE"
+#define CMD_ULB_BW "ULB_BW"
+#endif /* WL11ULB */
+
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+#define CMD_GET_BEST_CHANNELS	"GET_BEST_CHANNELS"
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#define CMD_80211_MODE    "MODE"  /* 802.11 mode a/b/g/n/ac */
+#define CMD_CHANSPEC      "CHANSPEC"
+#define CMD_DATARATE      "DATARATE"
+#define CMD_ASSOC_CLIENTS "ASSOCLIST"
+#define CMD_SET_CSA       "SETCSA"
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+#define CMD_SET_HAPD_AUTO_CHANNEL	"HAPD_AUTO_CHANNEL"
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef SUPPORT_SET_LPC
+#define CMD_HAPD_LPC_ENABLED		"HAPD_LPC_ENABLED"
+#endif /* SUPPORT_SET_LPC */
+#ifdef SUPPORT_TRIGGER_HANG_EVENT
+#define CMD_TEST_FORCE_HANG		"TEST_FORCE_HANG"
+#endif /* SUPPORT_TRIGGER_HANG_EVENT */
+#ifdef TEST_TX_POWER_CONTROL
+#define CMD_TEST_SET_TX_POWER		"TEST_SET_TX_POWER"
+#define CMD_TEST_GET_TX_POWER		"TEST_GET_TX_POWER"
+#endif /* TEST_TX_POWER_CONTROL */
+#define CMD_SARLIMIT_TX_CONTROL		"SET_TX_POWER_CALLING"
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+#define CMD_KEEP_ALIVE		"KEEPALIVE"
+
+
+#ifdef PNO_SUPPORT
+#define CMD_PNOSSIDCLR_SET	"PNOSSIDCLR"
+#define CMD_PNOSETUP_SET	"PNOSETUP "
+#define CMD_PNOENABLE_SET	"PNOFORCE"
+#define CMD_PNODEBUG_SET	"PNODEBUG"
+#define CMD_WLS_BATCHING	"WLS_BATCHING"
+#endif /* PNO_SUPPORT */
+
+#define	CMD_HAPD_MAC_FILTER	"HAPD_MAC_FILTER"
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+#define ENABLE_RANDOM_MAC "ENABLE_RANDOM_MAC"
+#define DISABLE_RANDOM_MAC "DISABLE_RANDOM_MAC"
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+
+#define CMD_CHANGE_RL 	"CHANGE_RL"
+#define CMD_RESTORE_RL  "RESTORE_RL"
+
+#define CMD_SET_RMC_ENABLE			"SETRMCENABLE"
+#define CMD_SET_RMC_TXRATE			"SETRMCTXRATE"
+#define CMD_SET_RMC_ACTPERIOD		"SETRMCACTIONPERIOD"
+#define CMD_SET_RMC_IDLEPERIOD		"SETRMCIDLEPERIOD"
+#define CMD_SET_RMC_LEADER			"SETRMCLEADER"
+#define CMD_SET_RMC_EVENT			"SETRMCEVENT"
+
+#define CMD_SET_SCSCAN		"SETSINGLEANT"
+#define CMD_GET_SCSCAN		"GETSINGLEANT"
+#ifdef WLTDLS
+#define CMD_TDLS_RESET "TDLS_RESET"
+#endif /* WLTDLS */
+
+#ifdef FCC_PWR_LIMIT_2G
+#define CMD_GET_FCC_PWR_LIMIT_2G "GET_FCC_CHANNEL"
+#define CMD_SET_FCC_PWR_LIMIT_2G "SET_FCC_CHANNEL"
+/* CUSTOMER_HW4's value differs from BRCM FW value for enable/disable */
+#define CUSTOMER_HW4_ENABLE		0
+#define CUSTOMER_HW4_DISABLE	-1
+#define CUSTOMER_HW4_EN_CONVERT(i)	(i += 1)
+#endif /* FCC_PWR_LIMIT_2G */
+
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+
+
+#define CMD_ROAM_OFFLOAD			"SETROAMOFFLOAD"
+#define CMD_INTERFACE_CREATE			"INTERFACE_CREATE"
+#define CMD_INTERFACE_DELETE			"INTERFACE_DELETE"
+#define CMD_GET_LINK_STATUS			"GETLINKSTATUS"
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define CMD_GET_BSS_INFO            "GETBSSINFO"
+#define CMD_GET_ASSOC_REJECT_INFO   "GETASSOCREJECTINFO"
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+#define CMD_GET_STA_INFO   "GETSTAINFO"
+
+/* related with CMD_GET_LINK_STATUS */
+#define WL_ANDROID_LINK_VHT					0x01
+#define WL_ANDROID_LINK_MIMO					0x02
+#define WL_ANDROID_LINK_AP_VHT_SUPPORT		0x04
+#define WL_ANDROID_LINK_AP_MIMO_SUPPORT	0x08
+
+#ifdef P2PRESP_WFDIE_SRC
+#define CMD_P2P_SET_WFDIE_RESP      "P2P_SET_WFDIE_RESP"
+#define CMD_P2P_GET_WFDIE_RESP      "P2P_GET_WFDIE_RESP"
+#endif /* P2PRESP_WFDIE_SRC */
+
+#define CMD_DFS_AP_MOVE			"DFS_AP_MOVE"
+#define CMD_WBTEXT_ENABLE		"WBTEXT_ENABLE"
+#define CMD_WBTEXT_PROFILE_CONFIG	"WBTEXT_PROFILE_CONFIG"
+#define CMD_WBTEXT_WEIGHT_CONFIG	"WBTEXT_WEIGHT_CONFIG"
+#define CMD_WBTEXT_TABLE_CONFIG		"WBTEXT_TABLE_CONFIG"
+#define CMD_WBTEXT_DELTA_CONFIG		"WBTEXT_DELTA_CONFIG"
+#define CMD_WBTEXT_BTM_TIMER_THRESHOLD	"WBTEXT_BTM_TIMER_THRESHOLD"
+#define CMD_WBTEXT_BTM_DELTA		"WBTEXT_BTM_DELTA"
+
+#ifdef WLWFDS
+#define CMD_ADD_WFDS_HASH	"ADD_WFDS_HASH"
+#define CMD_DEL_WFDS_HASH	"DEL_WFDS_HASH"
+#endif /* WLWFDS */
+
+#ifdef SET_RPS_CPUS
+#define CMD_RPSMODE  "RPSMODE"
+#endif /* SET_RPS_CPUS */
+
+#ifdef BT_WIFI_HANDOVER
+#define CMD_TBOW_TEARDOWN "TBOW_TEARDOWN"
+#endif /* BT_WIFI_HANDOVER */
+
+#define CMD_MURX_BFE_CAP "MURX_BFE_CAP"
+
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+#define CMD_SET_AP_BEACONRATE				"SET_AP_BEACONRATE"
+#define CMD_GET_AP_BASICRATE				"GET_AP_BASICRATE"
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+#define CMD_SET_AP_RPS						"SET_AP_RPS"
+#define CMD_GET_AP_RPS						"GET_AP_RPS"
+#define CMD_SET_AP_RPS_PARAMS				"SET_AP_RPS_PARAMS"
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+#ifdef SUPPORT_RSSI_LOGGING
+#define CMD_SET_RSSI_LOGGING				"SET_RSSI_LOGGING"
+#define CMD_GET_RSSI_LOGGING				"GET_RSSI_LOGGING"
+#define CMD_GET_RSSI_PER_ANT				"GET_RSSI_PER_ANT"
+#endif /* SUPPORT_RSSI_LOGGING */
+
+#define CMD_GET_SNR							"GET_SNR"
+
+/* miracast related definition */
+#define MIRACAST_MODE_OFF	0
+#define MIRACAST_MODE_SOURCE	1
+#define MIRACAST_MODE_SINK	2
+
+#ifndef MIRACAST_AMPDU_SIZE
+#define MIRACAST_AMPDU_SIZE	8
+#endif
+
+#ifndef MIRACAST_MCHAN_ALGO
+#define MIRACAST_MCHAN_ALGO     1
+#endif
+
+#ifndef MIRACAST_MCHAN_BW
+#define MIRACAST_MCHAN_BW       25
+#endif
+
+#ifdef CONNECTION_STATISTICS
+#define CMD_GET_CONNECTION_STATS	"GET_CONNECTION_STATS"
+
+struct connection_stats {
+	u32 txframe;
+	u32 txbyte;
+	u32 txerror;
+	u32 rxframe;
+	u32 rxbyte;
+	u32 txfail;
+	u32 txretry;
+	u32 txretrie;
+	u32 txrts;
+	u32 txnocts;
+	u32 txexptime;
+	u32 txrate;
+	u8	chan_idle;
+};
+#endif /* CONNECTION_STATISTICS */
+
+#ifdef SUPPORT_LQCM
+#define CMD_SET_LQCM_ENABLE			"SET_LQCM_ENABLE"
+#define CMD_GET_LQCM_REPORT			"GET_LQCM_REPORT"
+#endif
+
+static LIST_HEAD(miracast_resume_list);
+#ifdef WL_CFG80211
+static u8 miracast_cur_mode;
+#endif
+
+#ifdef DHD_LOG_DUMP
+#define CMD_NEW_DEBUG_PRINT_DUMP			"DEBUG_DUMP"
+extern void dhd_schedule_log_dump(dhd_pub_t *dhdp);
+extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
+#endif /* DHD_LOG_DUMP */
+
+#ifdef DHD_HANG_SEND_UP_TEST
+#define CMD_MAKE_HANG  "MAKE_HANG"
+#endif /* CMD_DHD_HANG_SEND_UP_TEST */
+#ifdef DHD_DEBUG_UART
+extern bool dhd_debug_uart_is_running(struct net_device *dev);
+#endif	/* DHD_DEBUG_UART */
+
+struct io_cfg {
+	s8 *iovar;
+	s32 param;
+	u32 ioctl;
+	void *arg;
+	u32 len;
+	struct list_head list;
+};
+
+#if defined(BCMFW_ROAM_ENABLE)
+#define CMD_SET_ROAMPREF	"SET_ROAMPREF"
+
+#define MAX_NUM_SUITES		10
+#define WIDTH_AKM_SUITE		8
+#define JOIN_PREF_RSSI_LEN		0x02
+#define JOIN_PREF_RSSI_SIZE		4	/* RSSI pref header size in bytes */
+#define JOIN_PREF_WPA_HDR_SIZE		4 /* WPA pref header size in bytes */
+#define JOIN_PREF_WPA_TUPLE_SIZE	12	/* Tuple size in bytes */
+#define JOIN_PREF_MAX_WPA_TUPLES	16
+#define MAX_BUF_SIZE		(JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +	\
+				           (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES))
+#endif /* BCMFW_ROAM_ENABLE */
+
+#ifdef WL_NATOE
+
+#define CMD_NATOE		"NATOE"
+
+#define NATOE_MAX_PORT_NUM	65535
+
+/* natoe command info structure */
+typedef struct wl_natoe_cmd_info {
+	uint8  *command;        /* pointer to the actual command */
+	uint16 tot_len;        /* total length of the command */
+	uint16 bytes_written;  /* Bytes written for get response */
+} wl_natoe_cmd_info_t;
+
+typedef struct wl_natoe_sub_cmd wl_natoe_sub_cmd_t;
+typedef int (natoe_cmd_handler_t)(struct net_device *dev,
+		const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+
+struct wl_natoe_sub_cmd {
+	char *name;
+	uint8  version;              /* cmd  version */
+	uint16 id;                   /* id for the dongle f/w switch/case */
+	uint16 type;                 /* base type of argument */
+	natoe_cmd_handler_t *handler; /* cmd handler  */
+};
+
+#define WL_ANDROID_NATOE_FUNC(suffix) wl_android_natoe_subcmd_ ##suffix
+static int wl_android_process_natoe_cmd(struct net_device *dev,
+		char *command, int total_len);
+static int wl_android_natoe_subcmd_enable(struct net_device *dev,
+		const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+static int wl_android_natoe_subcmd_config_ips(struct net_device *dev,
+		const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+static int wl_android_natoe_subcmd_config_ports(struct net_device *dev,
+		const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+static int wl_android_natoe_subcmd_dbg_stats(struct net_device *dev,
+		const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+static int wl_android_natoe_subcmd_tbl_cnt(struct net_device *dev,
+		const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info);
+
+static const wl_natoe_sub_cmd_t natoe_cmd_list[] = {
+	/* wl natoe enable [0/1] or new: "wl natoe [0/1]" */
+	{"enable", 0x01, WL_NATOE_CMD_ENABLE,
+	IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(enable)
+	},
+	{"config_ips", 0x01, WL_NATOE_CMD_CONFIG_IPS,
+	IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(config_ips)
+	},
+	{"config_ports", 0x01, WL_NATOE_CMD_CONFIG_PORTS,
+	IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(config_ports)
+	},
+	{"stats", 0x01, WL_NATOE_CMD_DBG_STATS,
+	IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(dbg_stats)
+	},
+	{"tbl_cnt", 0x01, WL_NATOE_CMD_TBL_CNT,
+	IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(tbl_cnt)
+	},
+	{NULL, 0, 0, 0, NULL}
+};
+
+#endif /* WL_NATOE */
+
+#ifdef SET_PCIE_IRQ_CPU_CORE
+#define CMD_PCIE_IRQ_CORE	"PCIE_IRQ_CORE"
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+
+#ifdef WLADPS_PRIVATE_CMD
+#define CMD_SET_ADPS	"SET_ADPS"
+#define CMD_GET_ADPS	"GET_ADPS"
+#endif /* WLADPS_PRIVATE_CMD */
+
+#ifdef DHD_PKT_LOGGING
+#define CMD_PKTLOG_FILTER_ENABLE	"PKTLOG_FILTER_ENABLE"
+#define CMD_PKTLOG_FILTER_DISABLE	"PKTLOG_FILTER_DISABLE"
+#define CMD_PKTLOG_FILTER_PATTERN_ENABLE	"PKTLOG_FILTER_PATTERN_ENABLE"
+#define CMD_PKTLOG_FILTER_PATTERN_DISABLE	"PKTLOG_FILTER_PATTERN_DISABLE"
+#define CMD_PKTLOG_FILTER_ADD	"PKTLOG_FILTER_ADD"
+#define CMD_PKTLOG_FILTER_INFO	"PKTLOG_FILTER_INFO"
+#define CMD_PKTLOG_START	"PKTLOG_START"
+#define CMD_PKTLOG_STOP		"PKTLOG_STOP"
+#define CMD_PKTLOG_FILTER_EXIST "PKTLOG_FILTER_EXIST"
+#endif /* DHD_PKT_LOGGING */
+
+#if defined(STAT_REPORT)
+#define CMD_STAT_REPORT_GET_START	"STAT_REPORT_GET_START"
+#define CMD_STAT_REPORT_GET_NEXT	"STAT_REPORT_GET_NEXT"
+#endif /* STAT_REPORT */
+
+
+#ifdef SUPPORT_LQCM
+#define LQCM_ENAB_MASK			0x000000FF	/* LQCM enable flag mask */
+#define LQCM_TX_INDEX_MASK		0x0000FF00	/* LQCM tx index mask */
+#define LQCM_RX_INDEX_MASK		0x00FF0000	/* LQCM rx index mask */
+
+#define LQCM_TX_INDEX_SHIFT		8	/* LQCM tx index shift */
+#define LQCM_RX_INDEX_SHIFT		16	/* LQCM rx index shift */
+#endif /* SUPPORT_LQCM */
+
+/**
+ * Extern function declarations (TODO: move them to dhd_linux.h)
+ */
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+int dhd_dev_init_ioctl(struct net_device *dev);
+#ifdef WL_CFG80211
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command);
+#else
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{ return 0; }
+int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len)
+{ return 0; }
+#endif /* WL_CFG80211 */
+#ifdef WBTEXT
+static int wl_android_wbtext(struct net_device *dev, char *command, int total_len);
+static int wl_cfg80211_wbtext_btm_timer_threshold(struct net_device *dev,
+	char *command, int total_len);
+static int wl_cfg80211_wbtext_btm_delta(struct net_device *dev,
+	char *command, int total_len);
+#endif /* WBTEXT */
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24;	/* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+extern bool ap_fw_loaded;
+extern char iface_name[IFNAMSIZ];
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+int g_wifi_on = TRUE;
+
+/**
+ * Local (static) function definitions
+ */
+
+#ifdef WLWFDS
+static int wl_android_set_wfds_hash(
+	struct net_device *dev, char *command, int total_len, bool enable)
+{
+	int error = 0;
+	wl_p2p_wfds_hash_t *wfds_hash = NULL;
+	char *smbuf = NULL;
+	smbuf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+
+	if (smbuf == NULL) {
+		ANDROID_ERROR(("%s: failed to allocated memory %d bytes\n",
+			__FUNCTION__, WLC_IOCTL_MAXLEN));
+		return -ENOMEM;
+	}
+
+	if (enable) {
+		wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_ADD_WFDS_HASH) + 1);
+		error = wldev_iovar_setbuf(dev, "p2p_add_wfds_hash", wfds_hash,
+			sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+	}
+	else {
+		wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_DEL_WFDS_HASH) + 1);
+		error = wldev_iovar_setbuf(dev, "p2p_del_wfds_hash", wfds_hash,
+			sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+	}
+
+	if (error) {
+		ANDROID_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error));
+	}
+
+	if (smbuf)
+		kfree(smbuf);
+	return error;
+}
+#endif /* WLWFDS */
+
+static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
+{
+	int link_speed;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_link_speed(net, &link_speed);
+	if (error) {
+		ANDROID_ERROR(("Get linkspeed failed \n"));
+		return -1;
+	}
+
+	/* Convert Kbps to Android Mbps */
+	link_speed = link_speed / 1000;
+	bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
+	ANDROID_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+	return bytes_written;
+}
+
+static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
+{
+	wlc_ssid_t ssid = {0, {0}};
+	int bytes_written = 0;
+	int error = 0;
+	scb_val_t scbval;
+	char *delim = NULL;
+	struct net_device *target_ndev = net;
+#ifdef WL_VIRTUAL_APSTA
+	char *pos = NULL;
+	struct bcm_cfg80211 *cfg;
+#endif /* WL_VIRTUAL_APSTA */
+
+	delim = strchr(command, ' ');
+	/* For Ap mode rssi command would be
+	 * driver rssi <sta_mac_addr>
+	 * for STA/GC mode
+	 * driver rssi
+	*/
+	if (delim) {
+		/* Ap/GO mode
+		* driver rssi <sta_mac_addr>
+		*/
+		ANDROID_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim));
+		/* skip space from delim after finding char */
+		delim++;
+		if (!(bcm_ether_atoe((delim), &scbval.ea)))
+		{
+			ANDROID_ERROR(("%s:address err\n", __FUNCTION__));
+			return -1;
+		}
+	        scbval.val = htod32(0);
+		ANDROID_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet)));
+#ifdef WL_VIRTUAL_APSTA
+		/* RSDB AP may have another virtual interface
+		 * In this case, format of private command is as following,
+		 * DRIVER rssi <sta_mac_addr> <AP interface name>
+		 */
+
+		/* Current position is start of MAC address string */
+		pos = delim;
+		delim = strchr(pos, ' ');
+		if (delim) {
+			/* skip space from delim after finding char */
+			delim++;
+			if (strnlen(delim, IFNAMSIZ)) {
+				cfg = wl_get_cfg(net);
+				target_ndev = wl_get_ap_netdev(cfg, delim);
+				if (target_ndev == NULL)
+					target_ndev = net;
+			}
+		}
+#endif /* WL_VIRTUAL_APSTA */
+	}
+	else {
+		/* STA/GC mode */
+		memset(&scbval, 0, sizeof(scb_val_t));
+	}
+
+	error = wldev_get_rssi(target_ndev, &scbval);
+	if (error)
+		return -1;
+#if defined(RSSIOFFSET)
+	scbval.val = wl_update_rssi_offset(net, scbval.val);
+#endif
+
+	error = wldev_get_ssid(target_ndev, &ssid);
+	if (error)
+		return -1;
+	if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
+		ANDROID_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
+	} else if (total_len <= ssid.SSID_len) {
+		return -ENOMEM;
+	} else {
+		memcpy(command, ssid.SSID, ssid.SSID_len);
+		bytes_written = ssid.SSID_len;
+	}
+	if ((total_len - bytes_written) < (strlen(" rssi -XXX") + 1))
+		return -ENOMEM;
+
+	bytes_written += scnprintf(&command[bytes_written], total_len - bytes_written,
+		" rssi %d", scbval.val);
+	command[bytes_written] = '\0';
+
+	ANDROID_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+	return bytes_written;
+}
+
+static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len)
+{
+	int suspend_flag;
+	int ret_now;
+	int ret = 0;
+
+	suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
+
+	if (suspend_flag != 0) {
+		suspend_flag = 1;
+	}
+	ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+	if (ret_now != suspend_flag) {
+		if (!(ret = net_os_set_suspend(dev, ret_now, 1))) {
+			ANDROID_INFO(("%s: Suspend Flag %d -> %d\n",
+				__FUNCTION__, ret_now, suspend_flag));
+		} else {
+			ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+		}
+	}
+
+	return ret;
+}
+
+static int wl_android_set_suspendmode(struct net_device *dev, char *command, int total_len)
+{
+	int ret = 0;
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND)
+	int suspend_flag;
+
+	suspend_flag = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0';
+	if (suspend_flag != 0)
+		suspend_flag = 1;
+
+	if (!(ret = net_os_set_suspend(dev, suspend_flag, 0)))
+		ANDROID_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag));
+	else
+		ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+#endif
+
+	return ret;
+}
+
+#ifdef WL_CFG80211
+int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_len)
+{
+	uint8 mode[5];
+	int  error = 0;
+	int bytes_written = 0;
+
+	error = wldev_get_mode(dev, mode, sizeof(mode));
+	if (error)
+		return -1;
+
+	ANDROID_INFO(("%s: mode:%s\n", __FUNCTION__, mode));
+	bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode);
+	ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
+	return bytes_written;
+
+}
+
+extern chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec);
+int wl_android_get_chanspec(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int bytes_written = 0;
+	int chsp = {0};
+	uint16 band = 0;
+	uint16 bw = 0;
+	uint16 channel = 0;
+	u32 sb = 0;
+	chanspec_t chanspec;
+
+	/* command is
+	 * driver chanspec
+	 */
+	error = wldev_iovar_getint(dev, "chanspec", &chsp);
+	if (error)
+		return -1;
+
+	chanspec = wl_chspec_driver_to_host(chsp);
+	ANDROID_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec));
+
+	channel = chanspec & WL_CHANSPEC_CHAN_MASK;
+	band = chanspec & WL_CHANSPEC_BAND_MASK;
+	bw = chanspec & WL_CHANSPEC_BW_MASK;
+
+	ANDROID_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw));
+
+	if (bw == WL_CHANSPEC_BW_80)
+		bw = WL_CH_BANDWIDTH_80MHZ;
+	else if (bw == WL_CHANSPEC_BW_40)
+		bw = WL_CH_BANDWIDTH_40MHZ;
+	else if	(bw == WL_CHANSPEC_BW_20)
+		bw = WL_CH_BANDWIDTH_20MHZ;
+	else
+		bw = WL_CH_BANDWIDTH_20MHZ;
+
+	if (bw == WL_CH_BANDWIDTH_40MHZ) {
+		if (CHSPEC_SB_UPPER(chanspec)) {
+			channel += CH_10MHZ_APART;
+		} else {
+			channel -= CH_10MHZ_APART;
+		}
+	}
+	else if (bw == WL_CH_BANDWIDTH_80MHZ) {
+		sb = chanspec & WL_CHANSPEC_CTL_SB_MASK;
+		if (sb == WL_CHANSPEC_CTL_SB_LL) {
+			channel -= (CH_10MHZ_APART + CH_20MHZ_APART);
+		} else if (sb == WL_CHANSPEC_CTL_SB_LU) {
+			channel -= CH_10MHZ_APART;
+		} else if (sb == WL_CHANSPEC_CTL_SB_UL) {
+			channel += CH_10MHZ_APART;
+		} else {
+			/* WL_CHANSPEC_CTL_SB_UU */
+			channel += (CH_10MHZ_APART + CH_20MHZ_APART);
+		}
+	}
+	bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC,
+		channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw);
+
+	ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
+	return bytes_written;
+
+}
+#endif
+
+/* returns current datarate datarate returned from firmware are in 500kbps */
+int wl_android_get_datarate(struct net_device *dev, char *command, int total_len)
+{
+	int  error = 0;
+	int datarate = 0;
+	int bytes_written = 0;
+
+	error = wldev_get_datarate(dev, &datarate);
+	if (error)
+		return -1;
+
+	ANDROID_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate));
+
+	bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2));
+	return bytes_written;
+}
+int wl_android_get_assoclist(struct net_device *dev, char *command, int total_len)
+{
+	int  error = 0;
+	int bytes_written = 0;
+	uint i;
+	char mac_buf[MAX_NUM_OF_ASSOCLIST *
+		sizeof(struct ether_addr) + sizeof(uint)] = {0};
+	struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+	ANDROID_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+	assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
+
+	error = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf));
+	if (error)
+		return -1;
+
+	assoc_maclist->count = dtoh32(assoc_maclist->count);
+	bytes_written = snprintf(command, total_len, "%s listcount: %d Stations:",
+		CMD_ASSOC_CLIENTS, assoc_maclist->count);
+
+	for (i = 0; i < assoc_maclist->count; i++) {
+		bytes_written += snprintf(command + bytes_written, total_len, " " MACDBG,
+			MAC2STRDBG(assoc_maclist->ea[i].octet));
+	}
+	return bytes_written;
+
+}
+
+#ifdef WL_CFG80211
+extern chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec);
+static int wl_android_set_csa(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	char smbuf[WLC_IOCTL_SMLEN];
+	wl_chan_switch_t csa_arg;
+	u32 chnsp = 0;
+	int err = 0;
+
+	ANDROID_INFO(("%s: command:%s\n", __FUNCTION__, command));
+
+	command = (command + strlen(CMD_SET_CSA));
+	/* Order is mode, count channel */
+	if (!*++command) {
+		ANDROID_ERROR(("%s:error missing arguments\n", __FUNCTION__));
+		return -1;
+	}
+	csa_arg.mode = bcm_atoi(command);
+
+	if (csa_arg.mode != 0 && csa_arg.mode != 1) {
+		ANDROID_ERROR(("Invalid mode\n"));
+		return -1;
+	}
+
+	if (!*++command) {
+		ANDROID_ERROR(("%s:error missing count\n", __FUNCTION__));
+		return -1;
+	}
+	command++;
+	csa_arg.count = bcm_atoi(command);
+
+	csa_arg.reg = 0;
+	csa_arg.chspec = 0;
+	command += 2;
+	if (!*command) {
+		ANDROID_ERROR(("%s:error missing channel\n", __FUNCTION__));
+		return -1;
+	}
+
+	chnsp = wf_chspec_aton(command);
+	if (chnsp == 0)	{
+		ANDROID_ERROR(("%s:chsp is not correct\n", __FUNCTION__));
+		return -1;
+	}
+	chnsp = wl_chspec_host_to_driver(chnsp);
+	csa_arg.chspec = chnsp;
+
+	if (chnsp & WL_CHANSPEC_BAND_5G) {
+		u32 chanspec = chnsp;
+		err = wldev_iovar_getint(dev, "per_chan_info", &chanspec);
+		if (!err) {
+			if ((chanspec & WL_CHAN_RADAR) || (chanspec & WL_CHAN_PASSIVE)) {
+				ANDROID_ERROR(("Channel is radar sensitive\n"));
+				return -1;
+			}
+			if (chanspec == 0) {
+				ANDROID_ERROR(("Invalid hw channel\n"));
+				return -1;
+			}
+		} else  {
+			ANDROID_ERROR(("does not support per_chan_info\n"));
+			return -1;
+		}
+		ANDROID_INFO(("non radar sensitivity\n"));
+	}
+	error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg),
+		smbuf, sizeof(smbuf), NULL);
+	if (error) {
+		ANDROID_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error));
+		return -1;
+	}
+	return 0;
+}
+#endif
+
+static int
+wl_android_set_max_dtim(struct net_device *dev, char *command, int total_len)
+{
+	int ret = 0;
+	int dtim_flag;
+
+	dtim_flag = *(command + strlen(CMD_MAXDTIM_IN_SUSPEND) + 1) - '0';
+
+	if (!(ret = net_os_set_max_dtim_enable(dev, dtim_flag))) {
+		ANDROID_TRACE(("%s: use Max bcn_li_dtim in suspend %s\n",
+			__FUNCTION__, (dtim_flag ? "Enable" : "Disable")));
+	} else {
+		ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+	}
+
+	return ret;
+}
+
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+	uint band;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_band(dev, &band);
+	if (error)
+		return -1;
+	bytes_written = snprintf(command, total_len, "Band %d", band);
+	return bytes_written;
+}
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+
+#ifdef WLTDLS
+int wl_android_tdls_reset(struct net_device *dev)
+{
+	int ret = 0;
+	ret = dhd_tdls_enable(dev, false, false, NULL);
+	if (ret < 0) {
+		ANDROID_ERROR(("Disable tdls failed. %d\n", ret));
+		return ret;
+	}
+	ret = dhd_tdls_enable(dev, true, true, NULL);
+	if (ret < 0) {
+		ANDROID_ERROR(("enable tdls failed. %d\n", ret));
+		return ret;
+	}
+	return 0;
+}
+#endif /* WLTDLS */
+#ifdef FCC_PWR_LIMIT_2G
+int
+wl_android_set_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int enable = 0;
+
+	sscanf(command+sizeof("SET_FCC_CHANNEL"), "%d", &enable);
+
+	if ((enable != CUSTOMER_HW4_ENABLE) && (enable != CUSTOMER_HW4_DISABLE)) {
+		ANDROID_ERROR(("%s: Invalid data\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	CUSTOMER_HW4_EN_CONVERT(enable);
+
+	ANDROID_ERROR(("%s: fccpwrlimit2g set (%d)\n", __FUNCTION__, enable));
+	error = wldev_iovar_setint(dev, "fccpwrlimit2g", enable);
+	if (error) {
+		ANDROID_ERROR(("%s: fccpwrlimit2g set returned (%d)\n", __FUNCTION__, error));
+		return BCME_ERROR;
+	}
+
+	return error;
+}
+
+int
+wl_android_get_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int enable = 0;
+	int bytes_written = 0;
+
+	error = wldev_iovar_getint(dev, "fccpwrlimit2g", &enable);
+	if (error) {
+		ANDROID_ERROR(("%s: fccpwrlimit2g get error (%d)\n", __FUNCTION__, error));
+		return BCME_ERROR;
+	}
+	ANDROID_ERROR(("%s: fccpwrlimit2g get (%d)\n", __FUNCTION__, enable));
+
+	bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_FCC_PWR_LIMIT_2G, enable);
+
+	return bytes_written;
+}
+#endif /* FCC_PWR_LIMIT_2G */
+
+s32
+wl_cfg80211_get_sta_info(struct net_device *dev, char* command, int total_len)
+{
+	static char iovar_buf[WLC_IOCTL_MAXLEN];
+	int bytes_written = -1, ret = 0;
+	char *pcmd = command;
+	char *str;
+	sta_info_t *sta = NULL;
+	wl_cnt_wlc_t* wlc_cnt = NULL;
+	struct ether_addr mac;
+
+	/* Client information */
+	uint16 cap = 0;
+	uint32 rxrtry = 0;
+	uint32 rxmulti = 0;
+
+	ANDROID_TRACE(("%s\n", command));
+	str = bcmstrtok(&pcmd, " ", NULL);
+	if (str) {
+		str = bcmstrtok(&pcmd, " ", NULL);
+		/* If GETSTAINFO subcmd name is not provided, return error */
+		if (str == NULL) {
+			ANDROID_ERROR(("GETSTAINFO subcmd not provided %s\n", __FUNCTION__));
+			goto error;
+		}
+
+		memset(&mac, 0, ETHER_ADDR_LEN);
+		if ((bcm_ether_atoe((str), &mac))) {
+			/* get the sta info */
+			ret = wldev_iovar_getbuf(dev, "sta_info",
+				(struct ether_addr *)mac.octet,
+				ETHER_ADDR_LEN, iovar_buf, WLC_IOCTL_SMLEN, NULL);
+			if (ret < 0) {
+				ANDROID_ERROR(("Get sta_info ERR %d\n", ret));
+				goto error;
+			}
+
+			sta = (sta_info_t *)iovar_buf;
+			cap = dtoh16(sta->cap);
+			rxrtry = dtoh32(sta->rx_pkts_retried);
+			rxmulti = dtoh32(sta->rx_mcast_pkts);
+		} else if ((!strncmp(str, "all", 3)) || (!strncmp(str, "ALL", 3))) {
+			/* get counters info */
+			ret = wldev_iovar_getbuf(dev, "counters", NULL, 0,
+				iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+			if (unlikely(ret)) {
+				ANDROID_ERROR(("counters error (%d) - size = %zu\n",
+					ret, sizeof(wl_cnt_wlc_t)));
+				goto error;
+			}
+			ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0);
+			if (ret != BCME_OK) {
+				ANDROID_ERROR(("wl_cntbuf_to_xtlv_format ERR %d\n", ret));
+				goto error;
+			}
+			if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
+				ANDROID_ERROR(("wlc_cnt NULL!\n"));
+				goto error;
+			}
+
+			rxrtry = dtoh32(wlc_cnt->rxrtry);
+			rxmulti = dtoh32(wlc_cnt->rxmulti);
+		} else {
+			ANDROID_ERROR(("Get address fail\n"));
+			goto error;
+		}
+	} else {
+		ANDROID_ERROR(("Command ERR\n"));
+		goto error;
+	}
+
+	bytes_written = snprintf(command, total_len,
+		"%s %s Rx_Retry_Pkts=%d Rx_BcMc_Pkts=%d CAP=%04x\n",
+		CMD_GET_STA_INFO, str, rxrtry, rxmulti, cap);
+
+	ANDROID_TRACE(("%s", command));
+
+error:
+	return bytes_written;
+}
+#endif
+
+#ifdef WBTEXT
+static int wl_android_wbtext(struct net_device *dev, char *command, int total_len)
+{
+	int error = BCME_OK, argc = 0;
+	int data, bytes_written;
+	int roam_trigger[2];
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+	argc = sscanf(command+sizeof(CMD_WBTEXT_ENABLE), "%d", &data);
+	if (!argc) {
+		error = wldev_iovar_getint(dev, "wnm_bsstrans_resp", &data);
+		if (error) {
+			ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n",
+				__FUNCTION__, error));
+			return error;
+		}
+		bytes_written = snprintf(command, total_len, "WBTEXT %s\n",
+				(data == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT)?
+				"ENABLED" : "DISABLED");
+		return bytes_written;
+	} else {
+		if (data) {
+			data = WL_BSSTRANS_POLICY_PRODUCT_WBTEXT;
+		}
+
+		if ((error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data)) != BCME_OK) {
+			ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n",
+				__FUNCTION__, error));
+			return error;
+		}
+
+		if (data) {
+			/* reset roam_prof when wbtext is on */
+			if ((error = wl_cfg80211_wbtext_set_default(dev)) != BCME_OK) {
+				return error;
+			}
+			dhdp->wbtext_support = TRUE;
+		} else {
+			/* reset legacy roam trigger when wbtext is off */
+			roam_trigger[0] = DEFAULT_ROAM_TRIGGER_VALUE;
+			roam_trigger[1] = WLC_BAND_ALL;
+			if ((error = wldev_ioctl_set(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+					sizeof(roam_trigger))) != BCME_OK) {
+				ANDROID_ERROR(("%s: Failed to reset roam trigger = %d\n",
+					__FUNCTION__, error));
+				return error;
+			}
+			dhdp->wbtext_support = FALSE;
+		}
+	}
+	return error;
+}
+
+static int wl_cfg80211_wbtext_btm_timer_threshold(struct net_device *dev,
+	char *command, int total_len)
+{
+	int error = BCME_OK, argc = 0;
+	int data, bytes_written;
+
+	argc = sscanf(command, CMD_WBTEXT_BTM_TIMER_THRESHOLD " %d\n", &data);
+	if (!argc) {
+		error = wldev_iovar_getint(dev, "wnm_bsstrans_timer_threshold", &data);
+		if (error) {
+			ANDROID_ERROR(("Failed to get wnm_bsstrans_timer_threshold (%d)\n", error));
+			return error;
+		}
+		bytes_written = snprintf(command, total_len, "%d\n", data);
+		return bytes_written;
+	} else {
+		if ((error = wldev_iovar_setint(dev, "wnm_bsstrans_timer_threshold",
+				data)) != BCME_OK) {
+			ANDROID_ERROR(("Failed to set wnm_bsstrans_timer_threshold (%d)\n", error));
+			return error;
+		}
+	}
+	return error;
+}
+
+static int wl_cfg80211_wbtext_btm_delta(struct net_device *dev,
+	char *command, int total_len)
+{
+	int error = BCME_OK, argc = 0;
+	int data = 0, bytes_written;
+
+	argc = sscanf(command, CMD_WBTEXT_BTM_DELTA " %d\n", &data);
+	if (!argc) {
+		error = wldev_iovar_getint(dev, "wnm_btmdelta", &data);
+		if (error) {
+			ANDROID_ERROR(("Failed to get wnm_btmdelta (%d)\n", error));
+			return error;
+		}
+		bytes_written = snprintf(command, total_len, "%d\n", data);
+		return bytes_written;
+	} else {
+		if ((error = wldev_iovar_setint(dev, "wnm_btmdelta",
+				data)) != BCME_OK) {
+			ANDROID_ERROR(("Failed to set wnm_btmdelta (%d)\n", error));
+			return error;
+		}
+	}
+	return error;
+}
+
+#endif /* WBTEXT */
+
+#ifdef PNO_SUPPORT
+#define PNO_PARAM_SIZE 50
+#define VALUE_SIZE 50
+#define LIMIT_STR_FMT  ("%50s %50s")
+
+static int
+wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len)
+{
+	int err = BCME_OK;
+	uint i, tokens;
+	char *pos, *pos2, *token, *token2, *delim;
+	char param[PNO_PARAM_SIZE+1], value[VALUE_SIZE+1];
+	struct dhd_pno_batch_params batch_params;
+
+	ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+	if (total_len < strlen(CMD_WLS_BATCHING)) {
+		ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+		err = BCME_ERROR;
+		goto exit;
+	}
+	pos = command + strlen(CMD_WLS_BATCHING) + 1;
+	memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params));
+
+	if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) {
+		pos += strlen(PNO_BATCHING_SET) + 1;
+		while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) {
+			memset(param, 0, sizeof(param));
+			memset(value, 0, sizeof(value));
+			if (token == NULL || !*token)
+				break;
+			if (*token == '\0')
+				continue;
+			delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER);
+			if (delim != NULL)
+				*delim = ' ';
+
+			tokens = sscanf(token, LIMIT_STR_FMT, param, value);
+			if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) {
+				batch_params.scan_fr = simple_strtol(value, NULL, 0);
+				ANDROID_INFO(("scan_freq : %d\n", batch_params.scan_fr));
+			} else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) {
+				batch_params.bestn = simple_strtol(value, NULL, 0);
+				ANDROID_INFO(("bestn : %d\n", batch_params.bestn));
+			} else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) {
+				batch_params.mscan = simple_strtol(value, NULL, 0);
+				ANDROID_INFO(("mscan : %d\n", batch_params.mscan));
+			} else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) {
+				i = 0;
+				pos2 = value;
+				tokens = sscanf(value, "<%s>", value);
+				if (tokens != 1) {
+					err = BCME_ERROR;
+					ANDROID_ERROR(("%s : invalid format for channel"
+					" <> params\n", __FUNCTION__));
+					goto exit;
+				}
+				while ((token2 = strsep(&pos2,
+						PNO_PARAM_CHANNEL_DELIMETER)) != NULL) {
+					if (token2 == NULL || !*token2)
+						break;
+					if (*token2 == '\0')
+						continue;
+					if (*token2 == 'A' || *token2 == 'B') {
+						batch_params.band = (*token2 == 'A')?
+							WLC_BAND_5G : WLC_BAND_2G;
+						ANDROID_INFO(("band : %s\n",
+							(*token2 == 'A')? "A" : "B"));
+					} else {
+						if ((batch_params.nchan >= WL_NUMCHANNELS) ||
+							(i >= WL_NUMCHANNELS)) {
+							ANDROID_ERROR(("Too many nchan %d\n",
+								batch_params.nchan));
+							err = BCME_BUFTOOSHORT;
+							goto exit;
+						}
+						batch_params.chan_list[i++] =
+							simple_strtol(token2, NULL, 0);
+						batch_params.nchan++;
+						ANDROID_INFO(("channel :%d\n",
+							batch_params.chan_list[i-1]));
+					}
+				 }
+			} else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) {
+				batch_params.rtt = simple_strtol(value, NULL, 0);
+				ANDROID_INFO(("rtt : %d\n", batch_params.rtt));
+			} else {
+				ANDROID_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param));
+				err = BCME_ERROR;
+				goto exit;
+			}
+		}
+		err = dhd_dev_pno_set_for_batch(dev, &batch_params);
+		if (err < 0) {
+			ANDROID_ERROR(("failed to configure batch scan\n"));
+		} else {
+			memset(command, 0, total_len);
+			err = snprintf(command, total_len, "%d", err);
+		}
+	} else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
+		err = dhd_dev_pno_get_for_batch(dev, command, total_len);
+		if (err < 0) {
+			ANDROID_ERROR(("failed to getting batching results\n"));
+		} else {
+			err = strlen(command);
+		}
+	} else if (!strncmp(pos, PNO_BATCHING_STOP, strlen(PNO_BATCHING_STOP))) {
+		err = dhd_dev_pno_stop_for_batch(dev);
+		if (err < 0) {
+			ANDROID_ERROR(("failed to stop batching scan\n"));
+		} else {
+			memset(command, 0, total_len);
+			err = snprintf(command, total_len, "OK");
+		}
+	} else {
+		ANDROID_ERROR(("%s : unknown command\n", __FUNCTION__));
+		err = BCME_ERROR;
+		goto exit;
+	}
+exit:
+	return err;
+}
+
+#ifndef WL_SCHED_SCAN
+static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len)
+{
+	wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
+	int res = -1;
+	int nssid = 0;
+	cmd_tlv_t *cmd_tlv_temp;
+	char *str_ptr;
+	int tlv_size_left;
+	int pno_time = 0;
+	int pno_repeat = 0;
+	int pno_freq_expo_max = 0;
+
+#ifdef PNO_SET_DEBUG
+	int i;
+	char pno_in_example[] = {
+		'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+		'S', '1', '2', '0',
+		'S',
+		0x05,
+		'd', 'l', 'i', 'n', 'k',
+		'S',
+		0x04,
+		'G', 'O', 'O', 'G',
+		'T',
+		'0', 'B',
+		'R',
+		'2',
+		'M',
+		'2',
+		0x00
+		};
+#endif /* PNO_SET_DEBUG */
+	ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+	if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
+		ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+		goto exit_proc;
+	}
+#ifdef PNO_SET_DEBUG
+	memcpy(command, pno_in_example, sizeof(pno_in_example));
+	total_len = sizeof(pno_in_example);
+#endif
+	str_ptr = command + strlen(CMD_PNOSETUP_SET);
+	tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
+
+	cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+	memset(ssids_local, 0, sizeof(ssids_local));
+
+	if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+		(cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+		(cmd_tlv_temp->subtype == PNO_TLV_SUBTYPE_LEGACY_PNO)) {
+
+		str_ptr += sizeof(cmd_tlv_t);
+		tlv_size_left -= sizeof(cmd_tlv_t);
+
+		if ((nssid = wl_parse_ssid_list_tlv(&str_ptr, ssids_local,
+			MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+			ANDROID_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+			goto exit_proc;
+		} else {
+			if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+				ANDROID_ERROR(("%s scan duration corrupted field size %d\n",
+					__FUNCTION__, tlv_size_left));
+				goto exit_proc;
+			}
+			str_ptr++;
+			pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+			ANDROID_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+			if (str_ptr[0] != 0) {
+				if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+					ANDROID_ERROR(("%s pno repeat : corrupted field\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+				ANDROID_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+				if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+					ANDROID_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+				ANDROID_INFO(("%s: pno_freq_expo_max=%d\n",
+					__FUNCTION__, pno_freq_expo_max));
+			}
+		}
+	} else {
+		ANDROID_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+		goto exit_proc;
+	}
+
+	res = dhd_dev_pno_set_for_ssid(dev, ssids_local, nssid, pno_time, pno_repeat,
+		pno_freq_expo_max, NULL, 0);
+exit_proc:
+	return res;
+}
+#endif /* !WL_SCHED_SCAN */
+#endif /* PNO_SUPPORT  */
+
+static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len)
+{
+	int ret;
+	int bytes_written = 0;
+
+	ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command);
+	if (ret)
+		return 0;
+	bytes_written = sizeof(struct ether_addr);
+	return bytes_written;
+}
+
+
+int
+wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
+{
+	int i, j, match;
+	int ret	= 0;
+	char mac_buf[MAX_NUM_OF_ASSOCLIST *
+		sizeof(struct ether_addr) + sizeof(uint)] = {0};
+	struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+	/* set filtering mode */
+	if ((ret = wldev_ioctl_set(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode)) != 0)) {
+		ANDROID_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret));
+		return ret;
+	}
+	if (macmode != MACLIST_MODE_DISABLED) {
+		/* set the MAC filter list */
+		if ((ret = wldev_ioctl_set(dev, WLC_SET_MACLIST, maclist,
+			sizeof(int) + sizeof(struct ether_addr) * maclist->count)) != 0) {
+			ANDROID_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		/* get the current list of associated STAs */
+		assoc_maclist->count = MAX_NUM_OF_ASSOCLIST;
+		if ((ret = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist,
+			sizeof(mac_buf))) != 0) {
+			ANDROID_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		/* do we have any STA associated?  */
+		if (assoc_maclist->count) {
+			/* iterate each associated STA */
+			for (i = 0; i < assoc_maclist->count; i++) {
+				match = 0;
+				/* compare with each entry */
+				for (j = 0; j < maclist->count; j++) {
+					ANDROID_INFO(("%s : associated="MACDBG " list="MACDBG "\n",
+					__FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet),
+					MAC2STRDBG(maclist->ea[j].octet)));
+					if (memcmp(assoc_maclist->ea[i].octet,
+						maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) {
+						match = 1;
+						break;
+					}
+				}
+				/* do conditional deauth */
+				/*   "if not in the allow list" or "if in the deny list" */
+				if ((macmode == MACLIST_MODE_ALLOW && !match) ||
+					(macmode == MACLIST_MODE_DENY && match)) {
+					scb_val_t scbval;
+
+					scbval.val = htod32(1);
+					memcpy(&scbval.ea, &assoc_maclist->ea[i],
+						ETHER_ADDR_LEN);
+					if ((ret = wldev_ioctl_set(dev,
+						WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+						&scbval, sizeof(scb_val_t))) != 0)
+						ANDROID_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n",
+							__FUNCTION__, ret));
+				}
+			}
+		}
+	}
+	return ret;
+}
+
+/*
+ * HAPD_MAC_FILTER mac_mode mac_cnt mac_addr1 mac_addr2
+ *
+ */
+static int
+wl_android_set_mac_address_filter(struct net_device *dev, char* str)
+{
+	int i;
+	int ret = 0;
+	int macnum = 0;
+	int macmode = MACLIST_MODE_DISABLED;
+	struct maclist *list;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	const char *token;
+
+	/* string should look like below (macmode/macnum/maclist) */
+	/*   1 2 00:11:22:33:44:55 00:11:22:33:44:ff  */
+
+	/* get the MAC filter mode */
+	token = strsep((char**)&str, " ");
+	if (!token) {
+		return -1;
+	}
+	macmode = bcm_atoi(token);
+
+	if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) {
+		ANDROID_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode));
+		return -1;
+	}
+
+	token = strsep((char**)&str, " ");
+	if (!token) {
+		return -1;
+	}
+	macnum = bcm_atoi(token);
+	if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+		ANDROID_ERROR(("%s : invalid number of MAC address entries %d\n",
+			__FUNCTION__, macnum));
+		return -1;
+	}
+	/* allocate memory for the MAC list */
+	list = (struct maclist*)kmalloc(sizeof(int) +
+		sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+	if (!list) {
+		ANDROID_ERROR(("%s : failed to allocate memory\n", __FUNCTION__));
+		return -1;
+	}
+	/* prepare the MAC list */
+	list->count = htod32(macnum);
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+	for (i = 0; i < list->count; i++) {
+		strncpy(eabuf, strsep((char**)&str, " "), ETHER_ADDR_STR_LEN - 1);
+		if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) {
+			ANDROID_ERROR(("%s : mac parsing err index=%d, addr=%s\n",
+				__FUNCTION__, i, eabuf));
+			list->count--;
+			break;
+		}
+		ANDROID_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf));
+	}
+	/* set the list */
+	if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0)
+		ANDROID_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+	kfree(list);
+
+	return 0;
+}
+
+/**
+ * Global function definitions (declared in wl_android.h)
+ */
+
+int wl_android_wifi_on(struct net_device *dev)
+{
+	int ret = 0;
+	int retry = POWERUP_MAX_RETRY;
+
+	if (!dev) {
+		ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	printf("%s in 1\n", __FUNCTION__);
+	dhd_net_if_lock(dev);
+	printf("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on);
+	if (!g_wifi_on) {
+		do {
+			if (!dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY)) {
+#ifdef BCMSDIO
+			ret = dhd_net_bus_resume(dev, 0);
+#endif /* BCMSDIO */
+			}
+#ifdef BCMPCIE
+			ret = dhd_net_bus_devreset(dev, FALSE);
+#endif /* BCMPCIE */
+			if (ret == 0) {
+				break;
+			}
+			ANDROID_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
+				retry));
+#ifdef BCMPCIE
+			dhd_net_bus_devreset(dev, TRUE);
+#endif /* BCMPCIE */
+			dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+		} while (retry-- > 0);
+		if (ret != 0) {
+			ANDROID_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
+			goto exit;
+		}
+#if defined(BCMSDIO) || defined(BCMDBUS)
+		ret = dhd_net_bus_devreset(dev, FALSE);
+		if (ret)
+			goto err;
+#ifdef BCMSDIO
+		dhd_net_bus_resume(dev, 1);
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMDBUS */
+#if defined(BCMSDIO) || defined(BCMDBUS)
+		if (!ret) {
+			if (dhd_dev_init_ioctl(dev) < 0) {
+				ret = -EFAULT;
+				goto err;
+			}
+		}
+#endif /* BCMSDIO || BCMDBUS */
+		g_wifi_on = TRUE;
+	}
+
+exit:
+	printf("%s: Success\n", __FUNCTION__);
+	dhd_net_if_unlock(dev);
+	return ret;
+
+#if defined(BCMSDIO) || defined(BCMDBUS)
+err:
+	dhd_net_bus_devreset(dev, TRUE);
+#ifdef BCMSDIO
+	dhd_net_bus_suspend(dev);
+#endif /* BCMSDIO */
+	dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+	printf("%s: Failed\n", __FUNCTION__);
+	dhd_net_if_unlock(dev);
+	return ret;
+#endif /* BCMSDIO || BCMDBUS */
+}
+
+int wl_android_wifi_off(struct net_device *dev, bool on_failure)
+{
+	int ret = 0;
+
+	if (!dev) {
+		ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	printf("%s in 1\n", __FUNCTION__);
+#if defined(BCMPCIE) && defined(DHD_DEBUG_UART)
+	ret = dhd_debug_uart_is_running(dev);
+	if (ret) {
+		ANDROID_ERROR(("%s - Debug UART App is running\n", __FUNCTION__));
+		return -EBUSY;
+	}
+#endif	/* BCMPCIE && DHD_DEBUG_UART */
+	dhd_net_if_lock(dev);
+	printf("%s in 2: g_wifi_on=%d, on_failure=%d\n", __FUNCTION__, g_wifi_on, on_failure);
+	if (g_wifi_on || on_failure) {
+#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
+		ret = dhd_net_bus_devreset(dev, TRUE);
+#if  defined(BCMSDIO)
+		dhd_net_bus_suspend(dev);
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
+		dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+		g_wifi_on = FALSE;
+	}
+	printf("%s out\n", __FUNCTION__);
+	dhd_net_if_unlock(dev);
+
+	return ret;
+}
+
+static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len)
+{
+	if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
+		return -1;
+	return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1);
+}
+
+#ifdef CONNECTION_STATISTICS
+static int
+wl_chanim_stats(struct net_device *dev, u8 *chan_idle)
+{
+	int err;
+	wl_chanim_stats_t *list;
+	/* Parameter _and_ returned buffer of chanim_stats. */
+	wl_chanim_stats_t param;
+	u8 result[WLC_IOCTL_SMLEN];
+	chanim_stats_t *stats;
+
+	memset(&param, 0, sizeof(param));
+
+	param.buflen = htod32(sizeof(wl_chanim_stats_t));
+	param.count = htod32(WL_CHANIM_COUNT_ONE);
+
+	if ((err = wldev_iovar_getbuf(dev, "chanim_stats", (char*)&param, sizeof(wl_chanim_stats_t),
+		(char*)result, sizeof(result), 0)) < 0) {
+		ANDROID_ERROR(("Failed to get chanim results %d \n", err));
+		return err;
+	}
+
+	list = (wl_chanim_stats_t*)result;
+
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+
+	if (list->buflen == 0) {
+		list->version = 0;
+		list->count = 0;
+	} else if (list->version != WL_CHANIM_STATS_VERSION) {
+		ANDROID_ERROR(("Sorry, firmware has wl_chanim_stats version %d "
+			"but driver supports only version %d.\n",
+				list->version, WL_CHANIM_STATS_VERSION));
+		list->buflen = 0;
+		list->count = 0;
+	}
+
+	stats = list->stats;
+	stats->glitchcnt = dtoh32(stats->glitchcnt);
+	stats->badplcp = dtoh32(stats->badplcp);
+	stats->chanspec = dtoh16(stats->chanspec);
+	stats->timestamp = dtoh32(stats->timestamp);
+	stats->chan_idle = dtoh32(stats->chan_idle);
+
+	ANDROID_INFO(("chanspec: 0x%4x glitch: %d badplcp: %d idle: %d timestamp: %d\n",
+		stats->chanspec, stats->glitchcnt, stats->badplcp, stats->chan_idle,
+		stats->timestamp));
+
+	*chan_idle = stats->chan_idle;
+
+	return (err);
+}
+
+static int
+wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len)
+{
+	static char iovar_buf[WLC_IOCTL_MAXLEN];
+	wl_cnt_wlc_t* wlc_cnt = NULL;
+#ifndef DISABLE_IF_COUNTERS
+	wl_if_stats_t* if_stats = NULL;
+#endif /* DISABLE_IF_COUNTERS */
+
+	int link_speed = 0;
+	struct connection_stats *output;
+	unsigned int bufsize = 0;
+	int bytes_written = -1;
+	int ret = 0;
+
+	ANDROID_INFO(("%s: enter Get Connection Stats\n", __FUNCTION__));
+
+	if (total_len <= 0) {
+		ANDROID_ERROR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len));
+		goto error;
+	}
+
+	bufsize = total_len;
+	if (bufsize < sizeof(struct connection_stats)) {
+		ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%zu\n",
+			__FUNCTION__, bufsize,
+			sizeof(struct connection_stats)));
+		goto error;
+	}
+
+	output = (struct connection_stats *)command;
+
+#ifndef DISABLE_IF_COUNTERS
+	if ((if_stats = kmalloc(sizeof(*if_stats), GFP_KERNEL)) == NULL) {
+		ANDROID_ERROR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__));
+		goto error;
+	}
+	memset(if_stats, 0, sizeof(*if_stats));
+
+	ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+		(char *)if_stats, sizeof(*if_stats), NULL);
+	if (ret) {
+		ANDROID_ERROR(("%s: if_counters not supported ret=%d\n",
+			__FUNCTION__, ret));
+
+		/* In case if_stats IOVAR is not supported, get information from counters. */
+#endif /* DISABLE_IF_COUNTERS */
+		ret = wldev_iovar_getbuf(dev, "counters", NULL, 0,
+			iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+		if (unlikely(ret)) {
+			ANDROID_ERROR(("counters error (%d) - size = %zu\n", ret, sizeof(wl_cnt_wlc_t)));
+			goto error;
+		}
+		ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("%s wl_cntbuf_to_xtlv_format ERR %d\n",
+			__FUNCTION__, ret));
+			goto error;
+		}
+
+		if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
+			ANDROID_ERROR(("%s wlc_cnt NULL!\n", __FUNCTION__));
+			goto error;
+		}
+
+		output->txframe   = dtoh32(wlc_cnt->txframe);
+		output->txbyte    = dtoh32(wlc_cnt->txbyte);
+		output->txerror   = dtoh32(wlc_cnt->txerror);
+		output->rxframe   = dtoh32(wlc_cnt->rxframe);
+		output->rxbyte    = dtoh32(wlc_cnt->rxbyte);
+		output->txfail    = dtoh32(wlc_cnt->txfail);
+		output->txretry   = dtoh32(wlc_cnt->txretry);
+		output->txretrie  = dtoh32(wlc_cnt->txretrie);
+		output->txrts     = dtoh32(wlc_cnt->txrts);
+		output->txnocts   = dtoh32(wlc_cnt->txnocts);
+		output->txexptime = dtoh32(wlc_cnt->txexptime);
+#ifndef DISABLE_IF_COUNTERS
+	} else {
+		/* Populate from if_stats. */
+		if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) {
+			ANDROID_ERROR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n",
+				__FUNCTION__,  WL_IF_STATS_T_VERSION, if_stats->version));
+			goto error;
+		}
+
+		output->txframe   = (uint32)dtoh64(if_stats->txframe);
+		output->txbyte    = (uint32)dtoh64(if_stats->txbyte);
+		output->txerror   = (uint32)dtoh64(if_stats->txerror);
+		output->rxframe   = (uint32)dtoh64(if_stats->rxframe);
+		output->rxbyte    = (uint32)dtoh64(if_stats->rxbyte);
+		output->txfail    = (uint32)dtoh64(if_stats->txfail);
+		output->txretry   = (uint32)dtoh64(if_stats->txretry);
+		output->txretrie  = (uint32)dtoh64(if_stats->txretrie);
+		if (dtoh16(if_stats->length) > OFFSETOF(wl_if_stats_t, txexptime)) {
+			output->txexptime = (uint32)dtoh64(if_stats->txexptime);
+			output->txrts     = (uint32)dtoh64(if_stats->txrts);
+			output->txnocts   = (uint32)dtoh64(if_stats->txnocts);
+		} else {
+			output->txexptime = 0;
+			output->txrts     = 0;
+			output->txnocts   = 0;
+		}
+	}
+#endif /* DISABLE_IF_COUNTERS */
+
+	/* link_speed is in kbps */
+	ret = wldev_get_link_speed(dev, &link_speed);
+	if (ret || link_speed < 0) {
+		ANDROID_ERROR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n",
+			__FUNCTION__, ret, link_speed));
+		goto error;
+	}
+
+	output->txrate    = link_speed;
+
+	/* Channel idle ratio. */
+	if (wl_chanim_stats(dev, &(output->chan_idle)) < 0) {
+		output->chan_idle = 0;
+	};
+
+	bytes_written = sizeof(struct connection_stats);
+
+error:
+#ifndef DISABLE_IF_COUNTERS
+	if (if_stats) {
+		kfree(if_stats);
+	}
+#endif /* DISABLE_IF_COUNTERS */
+
+	return bytes_written;
+}
+#endif /* CONNECTION_STATISTICS */
+
+#ifdef WL_NATOE
+static int
+wl_android_process_natoe_cmd(struct net_device *dev, char *command, int total_len)
+{
+	int ret = BCME_ERROR;
+	char *pcmd = command;
+	char *str = NULL;
+	wl_natoe_cmd_info_t cmd_info;
+	const wl_natoe_sub_cmd_t *natoe_cmd = &natoe_cmd_list[0];
+
+	/* skip to cmd name after "natoe" */
+	str = bcmstrtok(&pcmd, " ", NULL);
+
+	/* If natoe subcmd name is not provided, return error */
+	if (*pcmd == '\0') {
+		ANDROID_ERROR(("natoe subcmd not provided %s\n", __FUNCTION__));
+		ret = -EINVAL;
+		return ret;
+	}
+
+	/* get the natoe command name to str */
+	str = bcmstrtok(&pcmd, " ", NULL);
+
+	while (natoe_cmd->name != NULL) {
+		if (strcmp(natoe_cmd->name, str) == 0)  {
+			/* dispacth cmd to appropriate handler */
+			if (natoe_cmd->handler) {
+				cmd_info.command = command;
+				cmd_info.tot_len = total_len;
+				ret = natoe_cmd->handler(dev, natoe_cmd, pcmd, &cmd_info);
+			}
+			return ret;
+		}
+		natoe_cmd++;
+	}
+	return ret;
+}
+
+static int
+wlu_natoe_set_vars_cbfn(void *ctx, uint8 *data, uint16 type, uint16 len)
+{
+	int res = BCME_OK;
+	wl_natoe_cmd_info_t *cmd_info = (wl_natoe_cmd_info_t *)ctx;
+	uint8 *command = cmd_info->command;
+	uint16 total_len = cmd_info->tot_len;
+	uint16 bytes_written = 0;
+
+	UNUSED_PARAMETER(len);
+
+	switch (type) {
+
+	case WL_NATOE_XTLV_ENABLE:
+	{
+		bytes_written = snprintf(command, total_len, "natoe: %s\n",
+				*data?"enabled":"disabled");
+		cmd_info->bytes_written = bytes_written;
+		break;
+	}
+
+	case WL_NATOE_XTLV_CONFIG_IPS:
+	{
+		wl_natoe_config_ips_t *config_ips;
+		uint8 buf[16];
+
+		config_ips = (wl_natoe_config_ips_t *)data;
+		bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_ip, buf);
+		bytes_written = snprintf(command, total_len, "sta ip: %s\n", buf);
+		bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_netmask, buf);
+		bytes_written += snprintf(command + bytes_written, total_len,
+				"sta netmask: %s\n", buf);
+		bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_router_ip, buf);
+		bytes_written += snprintf(command + bytes_written, total_len,
+				"sta router ip: %s\n", buf);
+		bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_dnsip, buf);
+		bytes_written += snprintf(command + bytes_written, total_len,
+				"sta dns ip: %s\n", buf);
+		bcm_ip_ntoa((struct ipv4_addr *)&config_ips->ap_ip, buf);
+		bytes_written += snprintf(command + bytes_written, total_len,
+				"ap ip: %s\n", buf);
+		bcm_ip_ntoa((struct ipv4_addr *)&config_ips->ap_netmask, buf);
+		bytes_written += snprintf(command + bytes_written, total_len,
+				"ap netmask: %s\n", buf);
+		cmd_info->bytes_written = bytes_written;
+		break;
+	}
+
+	case WL_NATOE_XTLV_CONFIG_PORTS:
+	{
+		wl_natoe_ports_config_t *ports_config;
+
+		ports_config = (wl_natoe_ports_config_t *)data;
+		bytes_written = snprintf(command, total_len, "starting port num: %d\n",
+				dtoh16(ports_config->start_port_num));
+		bytes_written += snprintf(command + bytes_written, total_len,
+				"number of ports: %d\n", dtoh16(ports_config->no_of_ports));
+		cmd_info->bytes_written = bytes_written;
+		break;
+	}
+
+	case WL_NATOE_XTLV_DBG_STATS:
+	{
+		char *stats_dump = (char *)data;
+
+		bytes_written = snprintf(command, total_len, "%s\n", stats_dump);
+		cmd_info->bytes_written = bytes_written;
+		break;
+	}
+
+	case WL_NATOE_XTLV_TBL_CNT:
+	{
+		bytes_written = snprintf(command, total_len, "natoe max tbl entries: %d\n",
+				dtoh32(*(uint32 *)data));
+		cmd_info->bytes_written = bytes_written;
+		break;
+	}
+
+	default:
+		/* ignore */
+		break;
+	}
+
+	return res;
+}
+
+/*
+ *   --- common for all natoe get commands ----
+ */
+static int
+wl_natoe_get_ioctl(struct net_device *dev, wl_natoe_ioc_t *natoe_ioc,
+		uint16 iocsz, uint8 *buf, uint16 buflen, wl_natoe_cmd_info_t *cmd_info)
+{
+	/* for gets we only need to pass ioc header */
+	wl_natoe_ioc_t *iocresp = (wl_natoe_ioc_t *)buf;
+	int res;
+
+	/*  send getbuf natoe iovar */
+	res = wldev_iovar_getbuf(dev, "natoe", natoe_ioc, iocsz, buf,
+			buflen, NULL);
+
+	/*  check the response buff  */
+	if ((res == BCME_OK)) {
+		/* scans ioctl tlvbuf f& invokes the cbfn for processing  */
+		res = bcm_unpack_xtlv_buf(cmd_info, iocresp->data, iocresp->len,
+				BCM_XTLV_OPTION_ALIGN32, wlu_natoe_set_vars_cbfn);
+
+		if (res == BCME_OK) {
+			res = cmd_info->bytes_written;
+		}
+	}
+	else
+	{
+		ANDROID_ERROR(("%s: get command failed code %d\n", __FUNCTION__, res));
+		res = BCME_ERROR;
+	}
+
+	return res;
+}
+
+static int
+wl_android_natoe_subcmd_enable(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd,
+		char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+	int ret = BCME_OK;
+	wl_natoe_ioc_t *natoe_ioc;
+	char *pcmd = command;
+	gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
+	uint16 buflen = WL_NATOE_IOC_BUFSZ;
+	bcm_xtlv_t *pxtlv = NULL;
+	char *ioctl_buf = NULL;
+
+	ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags);
+	if (!ioctl_buf) {
+		ANDROID_ERROR(("ioctl memory alloc failed\n"));
+		return -ENOMEM;
+	}
+
+	/* alloc mem for ioctl headr + tlv data */
+	natoe_ioc = kzalloc(iocsz, kflags);
+	if (!natoe_ioc) {
+		ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+		kfree(ioctl_buf);
+		return -ENOMEM;
+	}
+
+	/* make up natoe cmd ioctl header */
+	natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+	natoe_ioc->id = htod16(cmd->id);
+	natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ);
+	pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+	if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+		iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+		ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+				WLC_IOCTL_MEDLEN, cmd_info);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+			ret = -EINVAL;
+		}
+	} else {	/* set */
+		uint8 val = bcm_atoi(pcmd);
+
+		/* buflen is max tlv data we can write, it will be decremented as we pack */
+		/* save buflen at start */
+		uint16 buflen_at_start = buflen;
+
+		/* we'll adjust final ioc size at the end */
+		ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_ENABLE,
+			sizeof(uint8), &val, BCM_XTLV_OPTION_ALIGN32);
+
+		if (ret != BCME_OK) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		/* adjust iocsz to the end of last data record */
+		natoe_ioc->len = (buflen_at_start - buflen);
+		iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+		ret = wldev_iovar_setbuf(dev, "natoe",
+				natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+			ret = -EINVAL;
+		}
+	}
+
+exit:
+	kfree(ioctl_buf);
+	kfree(natoe_ioc);
+
+	return ret;
+}
+
+static int
+wl_android_natoe_subcmd_config_ips(struct net_device *dev,
+		const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+	int ret = BCME_OK;
+	wl_natoe_config_ips_t config_ips;
+	wl_natoe_ioc_t *natoe_ioc;
+	char *pcmd = command;
+	char *str;
+	gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
+	uint16 buflen = WL_NATOE_IOC_BUFSZ;
+	bcm_xtlv_t *pxtlv = NULL;
+	char *ioctl_buf = NULL;
+
+	ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags);
+	if (!ioctl_buf) {
+		ANDROID_ERROR(("ioctl memory alloc failed\n"));
+		return -ENOMEM;
+	}
+
+	/* alloc mem for ioctl headr + tlv data */
+	natoe_ioc = kzalloc(iocsz, kflags);
+	if (!natoe_ioc) {
+		ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+		kfree(ioctl_buf);
+		return -ENOMEM;
+	}
+
+	/* make up natoe cmd ioctl header */
+	natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+	natoe_ioc->id = htod16(cmd->id);
+	natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ);
+	pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+	if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+		iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+		ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+				WLC_IOCTL_MEDLEN, cmd_info);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+			ret = -EINVAL;
+		}
+	} else {	/* set */
+		/* buflen is max tlv data we can write, it will be decremented as we pack */
+		/* save buflen at start */
+		uint16 buflen_at_start = buflen;
+
+		memset(&config_ips, 0, sizeof(config_ips));
+
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_ip)) {
+			ANDROID_ERROR(("Invalid STA IP addr %s\n", str));
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_netmask)) {
+			ANDROID_ERROR(("Invalid STA netmask %s\n", str));
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_router_ip)) {
+			ANDROID_ERROR(("Invalid STA router IP addr %s\n", str));
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_dnsip)) {
+			ANDROID_ERROR(("Invalid STA DNS IP addr %s\n", str));
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.ap_ip)) {
+			ANDROID_ERROR(("Invalid AP IP addr %s\n", str));
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.ap_netmask)) {
+			ANDROID_ERROR(("Invalid AP netmask %s\n", str));
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		ret = bcm_pack_xtlv_entry((uint8**)&pxtlv,
+				&buflen, WL_NATOE_XTLV_CONFIG_IPS, sizeof(config_ips),
+				&config_ips, BCM_XTLV_OPTION_ALIGN32);
+
+		if (ret != BCME_OK) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		/* adjust iocsz to the end of last data record */
+		natoe_ioc->len = (buflen_at_start - buflen);
+		iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+		ret = wldev_iovar_setbuf(dev, "natoe",
+				natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+			ret = -EINVAL;
+		}
+	}
+
+exit:
+	kfree(ioctl_buf);
+	kfree(natoe_ioc);
+
+	return ret;
+}
+
+static int
+wl_android_natoe_subcmd_config_ports(struct net_device *dev,
+		const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+	int ret = BCME_OK;
+	wl_natoe_ports_config_t ports_config;
+	wl_natoe_ioc_t *natoe_ioc;
+	char *pcmd = command;
+	char *str;
+	gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
+	uint16 buflen = WL_NATOE_IOC_BUFSZ;
+	bcm_xtlv_t *pxtlv = NULL;
+	char *ioctl_buf = NULL;
+
+	ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags);
+	if (!ioctl_buf) {
+		ANDROID_ERROR(("ioctl memory alloc failed\n"));
+		return -ENOMEM;
+	}
+
+	/* alloc mem for ioctl headr + tlv data */
+	natoe_ioc = kzalloc(iocsz, kflags);
+	if (!natoe_ioc) {
+		ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+		kfree(ioctl_buf);
+		return -ENOMEM;
+	}
+
+	/* make up natoe cmd ioctl header */
+	natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+	natoe_ioc->id = htod16(cmd->id);
+	natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ);
+	pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+	if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+		iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+		ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+				WLC_IOCTL_MEDLEN, cmd_info);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+			ret = -EINVAL;
+		}
+	} else {	/* set */
+		/* buflen is max tlv data we can write, it will be decremented as we pack */
+		/* save buflen at start */
+		uint16 buflen_at_start = buflen;
+
+		memset(&ports_config, 0, sizeof(ports_config));
+
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str) {
+			ANDROID_ERROR(("Invalid port string %s\n", str));
+			ret = -EINVAL;
+			goto exit;
+		}
+		ports_config.start_port_num = htod16(bcm_atoi(str));
+
+		str = bcmstrtok(&pcmd, " ", NULL);
+		if (!str) {
+			ANDROID_ERROR(("Invalid port string %s\n", str));
+			ret = -EINVAL;
+			goto exit;
+		}
+		ports_config.no_of_ports = htod16(bcm_atoi(str));
+
+		if ((uint32)(ports_config.start_port_num + ports_config.no_of_ports) >
+				NATOE_MAX_PORT_NUM) {
+			ANDROID_ERROR(("Invalid port configuration\n"));
+			ret = -EINVAL;
+			goto exit;
+		}
+		ret = bcm_pack_xtlv_entry((uint8**)&pxtlv,
+				&buflen, WL_NATOE_XTLV_CONFIG_PORTS, sizeof(ports_config),
+				&ports_config, BCM_XTLV_OPTION_ALIGN32);
+
+		if (ret != BCME_OK) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		/* adjust iocsz to the end of last data record */
+		natoe_ioc->len = (buflen_at_start - buflen);
+		iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+		ret = wldev_iovar_setbuf(dev, "natoe",
+				natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+			ret = -EINVAL;
+		}
+	}
+
+exit:
+	kfree(ioctl_buf);
+	kfree(natoe_ioc);
+
+	return ret;
+}
+
+static int
+wl_android_natoe_subcmd_dbg_stats(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd,
+		char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+	int ret = BCME_OK;
+	wl_natoe_ioc_t *natoe_ioc;
+	char *pcmd = command;
+	gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_DBG_STATS_BUFSZ;
+	uint16 buflen = WL_NATOE_DBG_STATS_BUFSZ;
+	bcm_xtlv_t *pxtlv = NULL;
+	char *ioctl_buf = NULL;
+
+	ioctl_buf = kzalloc(WLC_IOCTL_MAXLEN, kflags);
+	if (!ioctl_buf) {
+		ANDROID_ERROR(("ioctl memory alloc failed\n"));
+		return -ENOMEM;
+	}
+
+	/* alloc mem for ioctl headr + tlv data */
+	natoe_ioc = kzalloc(iocsz, kflags);
+	if (!natoe_ioc) {
+		ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+		kfree(ioctl_buf);
+		return -ENOMEM;
+	}
+
+	/* make up natoe cmd ioctl header */
+	natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+	natoe_ioc->id = htod16(cmd->id);
+	natoe_ioc->len = htod16(WL_NATOE_DBG_STATS_BUFSZ);
+	pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+	if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+		iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+		ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+				WLC_IOCTL_MAXLEN, cmd_info);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+			ret = -EINVAL;
+		}
+	} else {	/* set */
+		uint8 val = bcm_atoi(pcmd);
+
+		/* buflen is max tlv data we can write, it will be decremented as we pack */
+		/* save buflen at start */
+		uint16 buflen_at_start = buflen;
+
+		/* we'll adjust final ioc size at the end */
+		ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_ENABLE,
+			sizeof(uint8), &val, BCM_XTLV_OPTION_ALIGN32);
+
+		if (ret != BCME_OK) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		/* adjust iocsz to the end of last data record */
+		natoe_ioc->len = (buflen_at_start - buflen);
+		iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+		ret = wldev_iovar_setbuf(dev, "natoe",
+				natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+			ret = -EINVAL;
+		}
+	}
+
+exit:
+	kfree(ioctl_buf);
+	kfree(natoe_ioc);
+
+	return ret;
+}
+
+static int
+wl_android_natoe_subcmd_tbl_cnt(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd,
+		char *command, wl_natoe_cmd_info_t *cmd_info)
+{
+	int ret = BCME_OK;
+	wl_natoe_ioc_t *natoe_ioc;
+	char *pcmd = command;
+	gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
+	uint16 buflen = WL_NATOE_IOC_BUFSZ;
+	bcm_xtlv_t *pxtlv = NULL;
+	char *ioctl_buf = NULL;
+
+	ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags);
+	if (!ioctl_buf) {
+		ANDROID_ERROR(("ioctl memory alloc failed\n"));
+		return -ENOMEM;
+	}
+
+	/* alloc mem for ioctl headr + tlv data */
+	natoe_ioc = kzalloc(iocsz, kflags);
+	if (!natoe_ioc) {
+		ANDROID_ERROR(("ioctl header memory alloc failed\n"));
+		kfree(ioctl_buf);
+		return -ENOMEM;
+	}
+
+	/* make up natoe cmd ioctl header */
+	natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION);
+	natoe_ioc->id = htod16(cmd->id);
+	natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ);
+	pxtlv = (bcm_xtlv_t *)natoe_ioc->data;
+
+	if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */
+		iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv);
+		ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
+				WLC_IOCTL_MEDLEN, cmd_info);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
+			ret = -EINVAL;
+		}
+	} else {	/* set */
+		uint32 val = bcm_atoi(pcmd);
+
+		/* buflen is max tlv data we can write, it will be decremented as we pack */
+		/* save buflen at start */
+		uint16 buflen_at_start = buflen;
+
+		/* we'll adjust final ioc size at the end */
+		ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_TBL_CNT,
+			sizeof(uint32), &val, BCM_XTLV_OPTION_ALIGN32);
+
+		if (ret != BCME_OK) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		/* adjust iocsz to the end of last data record */
+		natoe_ioc->len = (buflen_at_start - buflen);
+		iocsz = sizeof(*natoe_ioc) + natoe_ioc->len;
+
+		ret = wldev_iovar_setbuf(dev, "natoe",
+				natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		if (ret != BCME_OK) {
+			ANDROID_ERROR(("Fail to set iovar %d\n", ret));
+			ret = -EINVAL;
+		}
+	}
+
+exit:
+	kfree(ioctl_buf);
+	kfree(natoe_ioc);
+
+	return ret;
+}
+
+#endif /* WL_NATOE */
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+/* SoftAP feature */
+#define APCS_BAND_2G_LEGACY1	20
+#define APCS_BAND_2G_LEGACY2	0
+#define APCS_BAND_AUTO		"band=auto"
+#define APCS_BAND_2G		"band=2g"
+#define APCS_BAND_5G		"band=5g"
+#define APCS_MAX_2G_CHANNELS	11
+#define APCS_MAX_RETRY		10
+#define APCS_DEFAULT_2G_CH	1
+#define APCS_DEFAULT_5G_CH	149
+static int
+wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str,
+	char* command, int total_len)
+{
+	int channel = 0;
+	int chosen = 0;
+	int retry = 0;
+	int ret = 0;
+	int spect = 0;
+	u8 *reqbuf = NULL;
+	uint32 band = WLC_BAND_2G;
+	uint32 buf_size;
+	char *pos = command;
+	int band_new, band_cur = WLC_BAND_2G;
+
+	if (cmd_str) {
+		ANDROID_INFO(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str)));
+		if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) {
+			band = WLC_BAND_AUTO;
+		} else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) {
+			band = WLC_BAND_5G;
+		} else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) {
+			band = WLC_BAND_2G;
+		} else {
+			/*
+			 * For backward compatibility: Some platforms used to issue argument 20 or 0
+			 * to enforce the 2G channel selection
+			 */
+			channel = bcm_atoi(cmd_str);
+			if ((channel == APCS_BAND_2G_LEGACY1) ||
+				(channel == APCS_BAND_2G_LEGACY2)) {
+				band = WLC_BAND_2G;
+			} else {
+				ANDROID_ERROR(("%s: Invalid argument\n", __FUNCTION__));
+				return -EINVAL;
+			}
+		}
+	} else {
+		/* If no argument is provided, default to 2G */
+		ANDROID_ERROR(("%s: No argument given default to 2.4G scan\n", __FUNCTION__));
+		band = WLC_BAND_2G;
+	}
+	ANDROID_INFO(("%s : HAPD_AUTO_CHANNEL = %d, band=%d \n", __FUNCTION__, channel, band));
+
+	ret = wldev_ioctl_set(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur));
+
+	if ((ret =
+	     wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect))) < 0) {
+		ANDROID_ERROR(("%s: ACS: error getting the spect\n", __FUNCTION__));
+		goto done;
+	}
+
+	if (spect > 0) {
+		/* If STA is connected, return is STA channel, else ACS can be issued,
+		 * set spect to 0 and proceed with ACS
+		 */
+		channel = wl_cfg80211_get_sta_channel(dev);
+		if (channel) {
+			channel = (channel <= CH_MAX_2G_CHANNEL) ? channel : APCS_DEFAULT_2G_CH;
+			goto done2;
+		}
+
+		if ((ret = wl_cfg80211_set_spect(dev, 0) < 0)) {
+			ANDROID_ERROR(("ACS: error while setting spect\n"));
+			goto done;
+		}
+	}
+
+	reqbuf = kzalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
+	if (reqbuf == NULL) {
+		ANDROID_ERROR(("%s: failed to allocate chanspec buffer\n", __FUNCTION__));
+		return -ENOMEM;
+	}
+
+	if (band == WLC_BAND_AUTO) {
+		ANDROID_INFO(("%s: ACS full channel scan \n", __func__));
+		reqbuf[0] = htod32(0);
+	} else if (band == WLC_BAND_5G) {
+		band_new = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G;
+		ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_new, sizeof(band_new));
+		if (ret < 0)
+			WL_ERR(("WLC_SET_BAND error %d\n", ret));
+		ANDROID_INFO(("%s: ACS 5G band scan \n", __func__));
+		if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
+			ANDROID_ERROR(("ACS 5g chanspec retreival failed! \n"));
+			goto done;
+		}
+	} else if (band == WLC_BAND_2G) {
+		/*
+		 * If channel argument is not provided/ argument 20 is provided,
+		 * Restrict channel to 2GHz, 20MHz BW, No SB
+		 */
+		ANDROID_INFO(("%s: ACS 2G band scan \n", __func__));
+		if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
+			ANDROID_ERROR(("ACS 2g chanspec retreival failed! \n"));
+			goto done;
+		}
+	} else {
+		ANDROID_ERROR(("ACS: No band chosen\n"));
+		goto done2;
+	}
+
+	buf_size = CHANSPEC_BUF_SIZE;
+	ret = wldev_ioctl_set(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf,
+		buf_size);
+	if (ret < 0) {
+		ANDROID_ERROR(("%s: can't start auto channel scan, err = %d\n",
+			__FUNCTION__, ret));
+		channel = 0;
+		goto done;
+	}
+
+	/* Wait for auto channel selection, max 3000 ms */
+	if ((band == WLC_BAND_2G) || (band == WLC_BAND_5G)) {
+		OSL_SLEEP(500);
+	} else {
+		/*
+		 * Full channel scan at the minimum takes 1.2secs
+		 * even with parallel scan. max wait time: 3500ms
+		 */
+		OSL_SLEEP(1000);
+	}
+
+	retry = APCS_MAX_RETRY;
+	while (retry--) {
+		ret = wldev_ioctl_get(dev, WLC_GET_CHANNEL_SEL, &chosen,
+			sizeof(chosen));
+		if (ret < 0) {
+			chosen = 0;
+		} else {
+			chosen = dtoh32(chosen);
+		}
+
+		if ((ret == 0) && (dtoh32(chosen) != 0)) {
+			uint chip;
+			chip = dhd_conf_get_chip(dhd_get_pub(dev));
+			if (chip != BCM43143_CHIP_ID) {
+				u32 chanspec = 0;
+				chanspec = wl_chspec_driver_to_host(chosen);
+				ANDROID_INFO(("%s: selected chanspec = 0x%x\n", __FUNCTION__, chanspec));
+				chosen = wf_chspec_ctlchan(chanspec);
+				ANDROID_INFO(("%s: selected chosen = 0x%x\n", __FUNCTION__, chosen));
+			}
+		}
+
+		if (chosen) {
+			int chosen_band;
+			int apcs_band;
+#ifdef D11AC_IOTYPES
+			if (wl_cfg80211_get_ioctl_version() == 1) {
+				channel = LCHSPEC_CHANNEL((chanspec_t)chosen);
+			} else {
+				channel = CHSPEC_CHANNEL((chanspec_t)chosen);
+			}
+#else
+			channel = CHSPEC_CHANNEL((chanspec_t)chosen);
+#endif /* D11AC_IOTYPES */
+			apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band;
+			chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G;
+			if (band == WLC_BAND_AUTO) {
+				printf("%s: selected channel = %d\n", __FUNCTION__, channel);
+				break;
+			} else if (apcs_band == chosen_band) {
+				printf("%s: selected channel = %d\n", __FUNCTION__, channel);
+				break;
+			}
+		}
+		ANDROID_INFO(("%s: %d tried, ret = %d, chosen = 0x%x\n", __FUNCTION__,
+			(APCS_MAX_RETRY - retry), ret, chosen));
+		OSL_SLEEP(250);
+	}
+
+done:
+	if ((retry == 0) || (ret < 0)) {
+		/* On failure, fallback to a default channel */
+		if (band == WLC_BAND_5G) {
+			channel = APCS_DEFAULT_5G_CH;
+		} else {
+			channel = APCS_DEFAULT_2G_CH;
+		}
+		ANDROID_ERROR(("%s: ACS failed."
+			" Fall back to default channel (%d) \n", __FUNCTION__, channel));
+	}
+done2:
+	ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur));
+	if (ret < 0)
+		WL_ERR(("WLC_SET_BAND error %d\n", ret));
+	if (spect > 0) {
+		if ((ret = wl_cfg80211_set_spect(dev, spect)) < 0) {
+			ANDROID_ERROR(("%s: ACS: error while setting spect\n", __FUNCTION__));
+		}
+	}
+
+	if (reqbuf) {
+		kfree(reqbuf);
+	}
+
+	if (channel) {
+		if (channel < 15)
+			pos += snprintf(pos, total_len, "2g=");
+		else
+			pos += snprintf(pos, total_len, "5g=");
+		pos += snprintf(pos, total_len, "%d", channel);
+		ANDROID_INFO(("%s: command result is %s \n", __FUNCTION__, command));
+		return strlen(command);
+	} else {
+		return ret;
+	}
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+
+
+#ifdef SUPPORT_SET_LPC
+static int
+wl_android_set_lpc(struct net_device *dev, const char* string_num)
+{
+	int lpc_enabled, ret;
+	s32 val = 1;
+
+	lpc_enabled = bcm_atoi(string_num);
+	ANDROID_INFO(("%s : HAPD_LPC_ENABLED = %d\n", __FUNCTION__, lpc_enabled));
+
+	ret = wldev_ioctl_set(dev, WLC_DOWN, &val, sizeof(s32));
+	if (ret < 0)
+		ANDROID_ERROR(("WLC_DOWN error %d\n", ret));
+
+	wldev_iovar_setint(dev, "lpc", lpc_enabled);
+
+	ret = wldev_ioctl_set(dev, WLC_UP, &val, sizeof(s32));
+	if (ret < 0)
+		ANDROID_ERROR(("WLC_UP error %d\n", ret));
+
+	return 1;
+}
+#endif /* SUPPORT_SET_LPC */
+
+static int
+wl_android_ch_res_rl(struct net_device *dev, bool change)
+{
+	int error = 0;
+	s32 srl = 7;
+	s32 lrl = 4;
+	printk("%s enter\n", __FUNCTION__);
+	if (change) {
+		srl = 4;
+		lrl = 2;
+	}
+	error = wldev_ioctl_set(dev, WLC_SET_SRL, &srl, sizeof(s32));
+	if (error) {
+		ANDROID_ERROR(("Failed to set SRL, error = %d\n", error));
+	}
+	error = wldev_ioctl_set(dev, WLC_SET_LRL, &lrl, sizeof(s32));
+	if (error) {
+		ANDROID_ERROR(("Failed to set LRL, error = %d\n", error));
+	}
+	return error;
+}
+
+
+#ifdef WL_RELMCAST
+static int
+wl_android_rmc_enable(struct net_device *net, int rmc_enable)
+{
+	int err;
+
+	err = wldev_iovar_setint(net, "rmc_ackreq", rmc_enable);
+	return err;
+}
+
+static int
+wl_android_rmc_set_leader(struct net_device *dev, const char* straddr)
+{
+	int error  = BCME_OK;
+	char smbuf[WLC_IOCTL_SMLEN];
+	wl_rmc_entry_t rmc_entry;
+	ANDROID_INFO(("%s: Set new RMC leader %s\n", __FUNCTION__, straddr));
+
+	memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t));
+	if (!bcm_ether_atoe(straddr, &rmc_entry.addr)) {
+		if (strlen(straddr) == 1 && bcm_atoi(straddr) == 0) {
+			ANDROID_INFO(("%s: Set auto leader selection mode\n", __FUNCTION__));
+			memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t));
+		} else {
+			ANDROID_ERROR(("%s: No valid mac address provided\n",
+				__FUNCTION__));
+			return BCME_ERROR;
+		}
+	}
+
+	error = wldev_iovar_setbuf(dev, "rmc_ar", &rmc_entry, sizeof(wl_rmc_entry_t),
+		smbuf, sizeof(smbuf), NULL);
+
+	if (error != BCME_OK) {
+		ANDROID_ERROR(("%s: Unable to set RMC leader, error = %d\n",
+			__FUNCTION__, error));
+	}
+
+	return error;
+}
+
+static int wl_android_set_rmc_event(struct net_device *dev, char *command, int total_len)
+{
+	int err = 0;
+	int pid = 0;
+
+	if (sscanf(command, CMD_SET_RMC_EVENT " %d", &pid) <= 0) {
+		ANDROID_ERROR(("Failed to get Parameter from : %s\n", command));
+		return -1;
+	}
+
+	/* set pid, and if the event was happened, let's send a notification through netlink */
+	wl_cfg80211_set_rmc_pid(dev, pid);
+
+	ANDROID_TRACE(("RMC pid=%d\n", pid));
+
+	return err;
+}
+#endif /* WL_RELMCAST */
+
+int wl_android_get_singlecore_scan(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int bytes_written = 0;
+	int mode = 0;
+
+	error = wldev_iovar_getint(dev, "scan_ps", &mode);
+	if (error) {
+		ANDROID_ERROR(("%s: Failed to get single core scan Mode, error = %d\n",
+			__FUNCTION__, error));
+		return -1;
+	}
+
+	bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_SCSCAN, mode);
+
+	return bytes_written;
+}
+
+int wl_android_set_singlecore_scan(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int mode = 0;
+
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	error = wldev_iovar_setint(dev, "scan_ps", mode);
+	if (error) {
+		ANDROID_ERROR(("%s[1]: Failed to set Mode %d, error = %d\n",
+		__FUNCTION__, mode, error));
+		return -1;
+	}
+
+	return error;
+}
+#ifdef TEST_TX_POWER_CONTROL
+static int
+wl_android_set_tx_power(struct net_device *dev, const char* string_num)
+{
+	int err = 0;
+	s32 dbm;
+	enum nl80211_tx_power_setting type;
+
+	dbm = bcm_atoi(string_num);
+
+	if (dbm < -1) {
+		ANDROID_ERROR(("%s: dbm is negative...\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	if (dbm == -1)
+		type = NL80211_TX_POWER_AUTOMATIC;
+	else
+		type = NL80211_TX_POWER_FIXED;
+
+	err = wl_set_tx_power(dev, type, dbm);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+		return err;
+	}
+
+	return 1;
+}
+
+static int
+wl_android_get_tx_power(struct net_device *dev, char *command, int total_len)
+{
+	int err;
+	int bytes_written;
+	s32 dbm = 0;
+
+	err = wl_get_tx_power(dev, &dbm);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+		return err;
+	}
+
+	bytes_written = snprintf(command, total_len, "%s %d",
+		CMD_TEST_GET_TX_POWER, dbm);
+
+	ANDROID_ERROR(("%s: GET_TX_POWER: dBm=%d\n", __FUNCTION__, dbm));
+
+	return bytes_written;
+}
+#endif /* TEST_TX_POWER_CONTROL */
+
+static int
+wl_android_set_sarlimit_txctrl(struct net_device *dev, const char* string_num)
+{
+	int err = 0;
+	int setval = 0;
+	s32 mode = bcm_atoi(string_num);
+
+	/* '0' means activate sarlimit
+	 * and '-1' means back to normal state (deactivate sarlimit)
+	 */
+	if (mode == 0) {
+		ANDROID_INFO(("%s: SAR limit control activated\n", __FUNCTION__));
+		setval = 1;
+	} else if (mode == -1) {
+		ANDROID_INFO(("%s: SAR limit control deactivated\n", __FUNCTION__));
+		setval = 0;
+	} else {
+		return -EINVAL;
+	}
+
+	err = wldev_iovar_setint(dev, "sar_enable", setval);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+		return err;
+	}
+	return 1;
+}
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int mode = 0;
+
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	error = wldev_iovar_setint(dev, "roam_off", mode);
+	if (error) {
+		ANDROID_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n",
+		__FUNCTION__, mode, error));
+		return -1;
+	}
+	else
+		ANDROID_ERROR(("%s: succeeded to set roaming Mode %d, error = %d\n",
+		__FUNCTION__, mode, error));
+	return 0;
+}
+
+#ifdef WL_CFG80211
+int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, int total_len)
+{
+	char ie_buf[VNDR_IE_MAX_LEN];
+	char *ioctl_buf = NULL;
+	char hex[] = "XX";
+	char *pcmd = NULL;
+	int ielen = 0, datalen = 0, idx = 0, tot_len = 0;
+	vndr_ie_setbuf_t *vndr_ie = NULL;
+	s32 iecount;
+	uint32 pktflag;
+	gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	s32 err = BCME_OK, bssidx;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	/* Check the VSIE (Vendor Specific IE) which was added.
+	 *  If exist then send IOVAR to delete it
+	 */
+	if (wl_cfg80211_ibss_vsie_delete(dev) != BCME_OK) {
+		return -EINVAL;
+	}
+
+	if (total_len < (strlen(CMD_SETIBSSBEACONOUIDATA) + 1)) {
+		ANDROID_ERROR(("error. total_len:%d\n", total_len));
+		return -EINVAL;
+	}
+
+	pcmd = command + strlen(CMD_SETIBSSBEACONOUIDATA) + 1;
+	for (idx = 0; idx < DOT11_OUI_LEN; idx++) {
+		if (*pcmd == '\0') {
+			ANDROID_ERROR(("error while parsing OUI.\n"));
+			return -EINVAL;
+		}
+		hex[0] = *pcmd++;
+		hex[1] = *pcmd++;
+		ie_buf[idx] =  (uint8)simple_strtoul(hex, NULL, 16);
+	}
+	pcmd++;
+	while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) {
+		hex[0] = *pcmd++;
+		hex[1] = *pcmd++;
+		ie_buf[idx++] =  (uint8)simple_strtoul(hex, NULL, 16);
+		datalen++;
+	}
+
+	if (datalen <= 0) {
+		ANDROID_ERROR(("error. vndr ie len:%d\n", datalen));
+		return -EINVAL;
+	}
+
+	tot_len = (int)(sizeof(vndr_ie_setbuf_t) + (datalen - 1));
+	vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags);
+	if (!vndr_ie) {
+		ANDROID_ERROR(("IE memory alloc failed\n"));
+		return -ENOMEM;
+	}
+	/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+	strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1);
+	vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Set the IE count - the buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+	/* Set packet flag to indicate that BEACON's will contain this IE */
+	pktflag = htod32(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG);
+	memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+		sizeof(u32));
+	/* Set the IE ID */
+	vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID;
+
+	memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf,
+		DOT11_OUI_LEN);
+	memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data,
+		&ie_buf[DOT11_OUI_LEN], datalen);
+
+	ielen = DOT11_OUI_LEN + datalen;
+	vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen;
+
+	ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+	if (!ioctl_buf) {
+		ANDROID_ERROR(("ioctl memory alloc failed\n"));
+		if (vndr_ie) {
+			kfree(vndr_ie);
+		}
+		return -ENOMEM;
+	}
+	memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN);	/* init the buffer */
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		ANDROID_ERROR(("Find index failed\n"));
+		err = BCME_ERROR;
+		goto end;
+	}
+	err = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie", vndr_ie, tot_len, ioctl_buf,
+		WLC_IOCTL_MEDLEN, bssidx, &cfg->ioctl_buf_sync);
+
+end:
+	if (err != BCME_OK) {
+		err = -EINVAL;
+		if (vndr_ie) {
+			kfree(vndr_ie);
+		}
+	}
+	else {
+		/* do NOT free 'vndr_ie' for the next process */
+		wl_cfg80211_ibss_vsie_set_buffer(dev, vndr_ie, tot_len);
+	}
+
+	if (ioctl_buf) {
+		kfree(ioctl_buf);
+	}
+
+	return err;
+}
+#endif
+
+#if defined(BCMFW_ROAM_ENABLE)
+static int
+wl_android_set_roampref(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	char smbuf[WLC_IOCTL_SMLEN];
+	uint8 buf[MAX_BUF_SIZE];
+	uint8 *pref = buf;
+	char *pcmd;
+	int num_ucipher_suites = 0;
+	int num_akm_suites = 0;
+	wpa_suite_t ucipher_suites[MAX_NUM_SUITES];
+	wpa_suite_t akm_suites[MAX_NUM_SUITES];
+	int num_tuples = 0;
+	int total_bytes = 0;
+	int total_len_left;
+	int i, j;
+	char hex[] = "XX";
+
+	pcmd = command + strlen(CMD_SET_ROAMPREF) + 1;
+	total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1;
+
+	num_akm_suites = simple_strtoul(pcmd, NULL, 16);
+	if (num_akm_suites > MAX_NUM_SUITES) {
+		ANDROID_ERROR(("too many AKM suites = %d\n", num_akm_suites));
+		return -1;
+	}
+
+	/* Increment for number of AKM suites field + space */
+	pcmd += 3;
+	total_len_left -= 3;
+
+	/* check to make sure pcmd does not overrun */
+	if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE))
+		return -1;
+
+	memset(buf, 0, sizeof(buf));
+	memset(akm_suites, 0, sizeof(akm_suites));
+	memset(ucipher_suites, 0, sizeof(ucipher_suites));
+
+	/* Save the AKM suites passed in the command */
+	for (i = 0; i < num_akm_suites; i++) {
+		/* Store the MSB first, as required by join_pref */
+		for (j = 0; j < 4; j++) {
+			hex[0] = *pcmd++;
+			hex[1] = *pcmd++;
+			buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+		}
+		memcpy((uint8 *)&akm_suites[i], buf, sizeof(uint32));
+	}
+
+	total_len_left -= (num_akm_suites * WIDTH_AKM_SUITE);
+	num_ucipher_suites = simple_strtoul(pcmd, NULL, 16);
+	/* Increment for number of cipher suites field + space */
+	pcmd += 3;
+	total_len_left -= 3;
+
+	if (total_len_left < (num_ucipher_suites * WIDTH_AKM_SUITE))
+		return -1;
+
+	/* Save the cipher suites passed in the command */
+	for (i = 0; i < num_ucipher_suites; i++) {
+		/* Store the MSB first, as required by join_pref */
+		for (j = 0; j < 4; j++) {
+			hex[0] = *pcmd++;
+			hex[1] = *pcmd++;
+			buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+		}
+		memcpy((uint8 *)&ucipher_suites[i], buf, sizeof(uint32));
+	}
+
+	/* Join preference for RSSI
+	 * Type	  : 1 byte (0x01)
+	 * Length : 1 byte (0x02)
+	 * Value  : 2 bytes	(reserved)
+	 */
+	*pref++ = WL_JOIN_PREF_RSSI;
+	*pref++ = JOIN_PREF_RSSI_LEN;
+	*pref++ = 0;
+	*pref++ = 0;
+
+	/* Join preference for WPA
+	 * Type	  : 1 byte (0x02)
+	 * Length : 1 byte (not used)
+	 * Value  : (variable length)
+	 *		reserved: 1 byte
+	 *      count	: 1 byte (no of tuples)
+	 *		Tuple1	: 12 bytes
+	 *			akm[4]
+	 *			ucipher[4]
+	 *			mcipher[4]
+	 *		Tuple2	: 12 bytes
+	 *		Tuplen	: 12 bytes
+	 */
+	num_tuples = num_akm_suites * num_ucipher_suites;
+	if (num_tuples != 0) {
+		if (num_tuples <= JOIN_PREF_MAX_WPA_TUPLES) {
+			*pref++ = WL_JOIN_PREF_WPA;
+			*pref++ = 0;
+			*pref++ = 0;
+			*pref++ = (uint8)num_tuples;
+			total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +
+				(JOIN_PREF_WPA_TUPLE_SIZE * num_tuples);
+		} else {
+			ANDROID_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__));
+			return -1;
+		}
+	} else {
+		/* No WPA config, configure only RSSI preference */
+		total_bytes = JOIN_PREF_RSSI_SIZE;
+	}
+
+	/* akm-ucipher-mcipher tuples in the format required for join_pref */
+	for (i = 0; i < num_ucipher_suites; i++) {
+		for (j = 0; j < num_akm_suites; j++) {
+			memcpy(pref, (uint8 *)&akm_suites[j], WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+			memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+			/* Set to 0 to match any available multicast cipher */
+			memset(pref, 0, WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+		}
+	}
+
+	prhex("join pref", (uint8 *)buf, total_bytes);
+	error = wldev_iovar_setbuf(dev, "join_pref", buf, total_bytes, smbuf, sizeof(smbuf), NULL);
+	if (error) {
+		ANDROID_ERROR(("Failed to set join_pref, error = %d\n", error));
+	}
+	return error;
+}
+#endif /* defined(BCMFW_ROAM_ENABLE */
+
+#ifdef WL_CFG80211
+static int
+wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_cfg *config)
+{
+	struct io_cfg *resume_cfg;
+	s32 ret;
+
+	resume_cfg = kzalloc(sizeof(struct io_cfg), GFP_KERNEL);
+	if (!resume_cfg)
+		return -ENOMEM;
+
+	if (config->iovar) {
+		ret = wldev_iovar_getint(dev, config->iovar, &resume_cfg->param);
+		if (ret) {
+			ANDROID_ERROR(("%s: Failed to get current %s value\n",
+				__FUNCTION__, config->iovar));
+			goto error;
+		}
+
+		ret = wldev_iovar_setint(dev, config->iovar, config->param);
+		if (ret) {
+			ANDROID_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+				config->iovar, config->param));
+			goto error;
+		}
+
+		resume_cfg->iovar = config->iovar;
+	} else {
+		resume_cfg->arg = kzalloc(config->len, GFP_KERNEL);
+		if (!resume_cfg->arg) {
+			ret = -ENOMEM;
+			goto error;
+		}
+		ret = wldev_ioctl_get(dev, config->ioctl, resume_cfg->arg, config->len);
+		if (ret) {
+			ANDROID_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__,
+				config->ioctl));
+			goto error;
+		}
+		ret = wldev_ioctl_set(dev, config->ioctl + 1, config->arg, config->len);
+		if (ret) {
+			ANDROID_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+				config->iovar, config->param));
+			goto error;
+		}
+		if (config->ioctl + 1 == WLC_SET_PM)
+			wl_cfg80211_update_power_mode(dev);
+		resume_cfg->ioctl = config->ioctl;
+		resume_cfg->len = config->len;
+	}
+
+	list_add(&resume_cfg->list, head);
+
+	return 0;
+error:
+	kfree(resume_cfg->arg);
+	kfree(resume_cfg);
+	return ret;
+}
+
+static void
+wl_android_iolist_resume(struct net_device *dev, struct list_head *head)
+{
+	struct io_cfg *config;
+	struct list_head *cur, *q;
+	s32 ret = 0;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_safe(cur, q, head) {
+		config = list_entry(cur, struct io_cfg, list);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		if (config->iovar) {
+			if (!ret)
+				ret = wldev_iovar_setint(dev, config->iovar,
+					config->param);
+		} else {
+			if (!ret)
+				ret = wldev_ioctl_set(dev, config->ioctl + 1,
+					config->arg, config->len);
+			if (config->ioctl + 1 == WLC_SET_PM)
+				wl_cfg80211_update_power_mode(dev);
+			kfree(config->arg);
+		}
+		list_del(cur);
+		kfree(config);
+	}
+}
+#ifdef WL11ULB
+static int
+wl_android_set_ulb_mode(struct net_device *dev, char *command, int total_len)
+{
+	int mode = 0;
+
+	ANDROID_INFO(("set ulb mode (%s) \n", command));
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+	return wl_cfg80211_set_ulb_mode(dev, mode);
+}
+static int
+wl_android_set_ulb_bw(struct net_device *dev, char *command, int total_len)
+{
+	int bw = 0;
+	u8 *pos;
+	char *ifname = NULL;
+	ANDROID_INFO(("set ulb bw (%s) \n", command));
+
+	/*
+	 * For sta/ap: IFNAME=<ifname> DRIVER ULB_BW <bw> ifname
+	 * For p2p:    IFNAME=wlan0 DRIVER ULB_BW <bw> p2p-dev-wlan0
+	 */
+	if (total_len < strlen(CMD_ULB_BW) + 2)
+		return -EINVAL;
+
+	pos = command + strlen(CMD_ULB_BW) + 1;
+	bw = bcm_atoi(pos);
+
+	if ((strlen(pos) >= 5)) {
+		ifname = pos + 2;
+	}
+
+	ANDROID_INFO(("[ULB] ifname:%s ulb_bw:%d \n", ifname, bw));
+	return wl_cfg80211_set_ulb_bw(dev, bw, ifname);
+}
+#endif /* WL11ULB */
+static int
+wl_android_set_miracast(struct net_device *dev, char *command, int total_len)
+{
+	int mode, val;
+	int ret = 0;
+	struct io_cfg config;
+
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	ANDROID_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode));
+
+	if (miracast_cur_mode == mode) {
+		return 0;
+	}
+
+	wl_android_iolist_resume(dev, &miracast_resume_list);
+	miracast_cur_mode = MIRACAST_MODE_OFF;
+
+	switch (mode) {
+	case MIRACAST_MODE_SOURCE:
+		/* setting mchan_algo to platform specific value */
+		config.iovar = "mchan_algo";
+
+		ret = wldev_ioctl_get(dev, WLC_GET_BCNPRD, &val, sizeof(int));
+		if (!ret && val > 100) {
+			config.param = 0;
+			ANDROID_ERROR(("%s: Connected station's beacon interval: "
+				"%d and set mchan_algo to %d \n",
+				__FUNCTION__, val, config.param));
+		} else {
+			config.param = MIRACAST_MCHAN_ALGO;
+		}
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret) {
+			goto resume;
+		}
+
+		/* setting mchan_bw to platform specific value */
+		config.iovar = "mchan_bw";
+		config.param = MIRACAST_MCHAN_BW;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret) {
+			goto resume;
+		}
+
+		/* setting apmdu to platform specific value */
+		config.iovar = "ampdu_mpdu";
+		config.param = MIRACAST_AMPDU_SIZE;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret) {
+			goto resume;
+		}
+		/* FALLTROUGH */
+		/* Source mode shares most configurations with sink mode.
+		 * Fall through here to avoid code duplication
+		 */
+	case MIRACAST_MODE_SINK:
+		/* disable internal roaming */
+		config.iovar = "roam_off";
+		config.param = 1;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret) {
+			goto resume;
+		}
+
+		/* tunr off pm */
+		ret = wldev_ioctl_get(dev, WLC_GET_PM, &val, sizeof(val));
+		if (ret) {
+			goto resume;
+		}
+
+		if (val != PM_OFF) {
+			val = PM_OFF;
+			config.iovar = NULL;
+			config.ioctl = WLC_GET_PM;
+			config.arg = &val;
+			config.len = sizeof(int);
+			ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+			if (ret) {
+				goto resume;
+			}
+		}
+		break;
+	case MIRACAST_MODE_OFF:
+	default:
+		break;
+	}
+	miracast_cur_mode = mode;
+
+	return 0;
+
+resume:
+	ANDROID_ERROR(("%s: turnoff miracast mode because of err%d\n", __FUNCTION__, ret));
+	wl_android_iolist_resume(dev, &miracast_resume_list);
+	return ret;
+}
+#endif
+
+#define NETLINK_OXYGEN     30
+#define AIBSS_BEACON_TIMEOUT	10
+
+static struct sock *nl_sk = NULL;
+
+static void wl_netlink_recv(struct sk_buff *skb)
+{
+	ANDROID_ERROR(("netlink_recv called\n"));
+}
+
+static int wl_netlink_init(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+	struct netlink_kernel_cfg cfg = {
+		.input	= wl_netlink_recv,
+	};
+#endif
+
+	if (nl_sk != NULL) {
+		ANDROID_ERROR(("nl_sk already exist\n"));
+		return BCME_ERROR;
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+	nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN,
+		0, wl_netlink_recv, NULL, THIS_MODULE);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+	nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, THIS_MODULE, &cfg);
+#else
+	nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, &cfg);
+#endif
+
+	if (nl_sk == NULL) {
+		ANDROID_ERROR(("nl_sk is not ready\n"));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+static void wl_netlink_deinit(void)
+{
+	if (nl_sk) {
+		netlink_kernel_release(nl_sk);
+		nl_sk = NULL;
+	}
+}
+
+s32
+wl_netlink_send_msg(int pid, int type, int seq, const void *data, size_t size)
+{
+	struct sk_buff *skb = NULL;
+	struct nlmsghdr *nlh = NULL;
+	int ret = -1;
+
+	if (nl_sk == NULL) {
+		ANDROID_ERROR(("nl_sk was not initialized\n"));
+		goto nlmsg_failure;
+	}
+
+	skb = alloc_skb(NLMSG_SPACE(size), GFP_ATOMIC);
+	if (skb == NULL) {
+		ANDROID_ERROR(("failed to allocate memory\n"));
+		goto nlmsg_failure;
+	}
+
+	nlh = nlmsg_put(skb, 0, 0, 0, size, 0);
+	if (nlh == NULL) {
+		ANDROID_ERROR(("failed to build nlmsg, skb_tailroom:%d, nlmsg_total_size:%d\n",
+			skb_tailroom(skb), nlmsg_total_size(size)));
+		dev_kfree_skb(skb);
+		goto nlmsg_failure;
+	}
+
+	memcpy(nlmsg_data(nlh), data, size);
+	nlh->nlmsg_seq = seq;
+	nlh->nlmsg_type = type;
+
+	/* netlink_unicast() takes ownership of the skb and frees it itself. */
+	ret = netlink_unicast(nl_sk, skb, pid, 0);
+	ANDROID_TRACE(("netlink_unicast() pid=%d, ret=%d\n", pid, ret));
+
+nlmsg_failure:
+	return ret;
+}
+
+
+int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len)
+{
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt;
+	int ret;
+	uint period_msec = 0;
+	char *buf;
+
+	if (extra == NULL) {
+		 ANDROID_ERROR(("%s: extra is NULL\n", __FUNCTION__));
+		 return -1;
+	}
+	if (sscanf(extra, "%d", &period_msec) != 1) {
+		 ANDROID_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__));
+		 return -EINVAL;
+	}
+	ANDROID_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec));
+
+	memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+
+	mkeep_alive_pkt.period_msec = period_msec;
+	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+
+	/* Setup keep alive zero for null packet generation */
+	mkeep_alive_pkt.keep_alive_id = 0;
+	mkeep_alive_pkt.len_bytes = 0;
+
+	buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+	if (!buf) {
+		ANDROID_ERROR(("%s: buffer alloc failed\n", __FUNCTION__));
+		return BCME_NOMEM;
+	}
+	ret = wldev_iovar_setbuf(dev, "mkeep_alive", (char *)&mkeep_alive_pkt,
+			WL_MKEEP_ALIVE_FIXED_LEN, buf, WLC_IOCTL_SMLEN, NULL);
+	if (ret < 0)
+		ANDROID_ERROR(("%s:keep_alive set failed:%d\n", __FUNCTION__, ret));
+	else
+		ANDROID_TRACE(("%s:keep_alive set ok\n", __FUNCTION__));
+	kfree(buf);
+	return ret;
+}
+
+#ifdef P2PRESP_WFDIE_SRC
+static int wl_android_get_wfdie_resp(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int bytes_written = 0;
+	int only_resp_wfdsrc = 0;
+
+	error = wldev_iovar_getint(dev, "p2p_only_resp_wfdsrc", &only_resp_wfdsrc);
+	if (error) {
+		ANDROID_ERROR(("%s: Failed to get the mode for only_resp_wfdsrc, error = %d\n",
+			__FUNCTION__, error));
+		return -1;
+	}
+
+	bytes_written = snprintf(command, total_len, "%s %d",
+		CMD_P2P_GET_WFDIE_RESP, only_resp_wfdsrc);
+
+	return bytes_written;
+}
+
+static int wl_android_set_wfdie_resp(struct net_device *dev, int only_resp_wfdsrc)
+{
+	int error = 0;
+
+	error = wldev_iovar_setint(dev, "p2p_only_resp_wfdsrc", only_resp_wfdsrc);
+	if (error) {
+		ANDROID_ERROR(("%s: Failed to set only_resp_wfdsrc %d, error = %d\n",
+			__FUNCTION__, only_resp_wfdsrc, error));
+		return -1;
+	}
+
+	return 0;
+}
+#endif /* P2PRESP_WFDIE_SRC */
+
+#ifdef BT_WIFI_HANDOVER
+static int
+wl_tbow_teardown(struct net_device *dev, char *command, int total_len)
+{
+	int err = BCME_OK;
+	char buf[WLC_IOCTL_SMLEN];
+	tbow_setup_netinfo_t netinfo;
+	memset(&netinfo, 0, sizeof(netinfo));
+	netinfo.opmode = TBOW_HO_MODE_TEARDOWN;
+
+	err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo,
+			sizeof(tbow_setup_netinfo_t), buf, WLC_IOCTL_SMLEN, 0, NULL);
+	if (err < 0) {
+		ANDROID_ERROR(("tbow_doho iovar error %d\n", err));
+			return err;
+	}
+	return err;
+}
+#endif /* BT_WIFI_HANOVER */
+
+#ifdef SET_RPS_CPUS
+static int
+wl_android_set_rps_cpus(struct net_device *dev, char *command, int total_len)
+{
+	int error, enable;
+
+	enable = command[strlen(CMD_RPSMODE) + 1] - '0';
+	error = dhd_rps_cpus_enable(dev, enable);
+
+#if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE) && defined(WL_CFG80211)
+	if (!error) {
+		void *dhdp = wl_cfg80211_get_dhdp(net);
+		if (enable) {
+			ANDROID_TRACE(("%s : set ack suppress. TCPACK_SUP_HOLD.\n", __FUNCTION__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
+		} else {
+			ANDROID_TRACE(("%s : clear ack suppress.\n", __FUNCTION__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+		}
+	}
+#endif /* DHDTCPACK_SUPPRESS && BCMPCIE && WL_CFG80211 */
+
+	return error;
+}
+#endif /* SET_RPS_CPUS */
+
+static int wl_android_get_link_status(struct net_device *dev, char *command,
+	int total_len)
+{
+	int bytes_written, error, result = 0, single_stream, stf = -1, i, nss = 0, mcs_map;
+	uint32 rspec;
+	uint encode, rate, txexp;
+	struct wl_bss_info *bi;
+	int datalen = sizeof(uint32) + sizeof(wl_bss_info_t);
+	char buf[datalen];
+
+	/* get BSS information */
+	*(u32 *) buf = htod32(datalen);
+	error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void *)buf, datalen);
+	if (unlikely(error)) {
+		ANDROID_ERROR(("Could not get bss info %d\n", error));
+		return -1;
+	}
+
+	bi = (struct wl_bss_info *) (buf + sizeof(uint32));
+
+	for (i = 0; i < ETHER_ADDR_LEN; i++) {
+		if (bi->BSSID.octet[i] > 0) {
+			break;
+		}
+	}
+
+	if (i == ETHER_ADDR_LEN) {
+		ANDROID_TRACE(("No BSSID\n"));
+		return -1;
+	}
+
+	/* check VHT capability at beacon */
+	if (bi->vht_cap) {
+		if (CHSPEC_IS5G(bi->chanspec)) {
+			result |= WL_ANDROID_LINK_AP_VHT_SUPPORT;
+		}
+	}
+
+	/* get a rspec (radio spectrum) rate */
+	error = wldev_iovar_getint(dev, "nrate", &rspec);
+	if (unlikely(error) || rspec == 0) {
+		ANDROID_ERROR(("get link status error (%d)\n", error));
+		return -1;
+	}
+
+	encode = (rspec & WL_RSPEC_ENCODING_MASK);
+	rate = (rspec & WL_RSPEC_RATE_MASK);
+	txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT;
+
+	switch (encode) {
+	case WL_RSPEC_ENCODE_HT:
+		/* check Rx MCS Map for HT */
+		for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
+			int8 bitmap = 0xFF;
+			if (i == MAX_STREAMS_SUPPORTED-1) {
+				bitmap = 0x7F;
+			}
+			if (bi->basic_mcs[i] & bitmap) {
+				nss++;
+			}
+		}
+		break;
+	case WL_RSPEC_ENCODE_VHT:
+		/* check Rx MCS Map for VHT */
+		for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
+			mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap));
+			if (mcs_map != VHT_CAP_MCS_MAP_NONE) {
+				nss++;
+			}
+		}
+		break;
+	}
+
+	/* check MIMO capability with nss in beacon */
+	if (nss > 1) {
+		result |= WL_ANDROID_LINK_AP_MIMO_SUPPORT;
+	}
+
+	single_stream = (encode == WL_RSPEC_ENCODE_RATE) ||
+		((encode == WL_RSPEC_ENCODE_HT) && rate < 8) ||
+		((encode == WL_RSPEC_ENCODE_VHT) &&
+		((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) == 1);
+
+	if (txexp == 0) {
+		if ((rspec & WL_RSPEC_STBC) && single_stream) {
+			stf = OLD_NRATE_STF_STBC;
+		} else {
+			stf = (single_stream) ? OLD_NRATE_STF_SISO : OLD_NRATE_STF_SDM;
+		}
+	} else if (txexp == 1 && single_stream) {
+		stf = OLD_NRATE_STF_CDD;
+	}
+
+	/* check 11ac (VHT) */
+	if (encode == WL_RSPEC_ENCODE_VHT) {
+		if (CHSPEC_IS5G(bi->chanspec)) {
+			result |= WL_ANDROID_LINK_VHT;
+		}
+	}
+
+	/* check MIMO */
+	if (result & WL_ANDROID_LINK_AP_MIMO_SUPPORT) {
+		switch (stf) {
+		case OLD_NRATE_STF_SISO:
+			break;
+		case OLD_NRATE_STF_CDD:
+		case OLD_NRATE_STF_STBC:
+			result |= WL_ANDROID_LINK_MIMO;
+			break;
+		case OLD_NRATE_STF_SDM:
+			if (!single_stream) {
+				result |= WL_ANDROID_LINK_MIMO;
+			}
+			break;
+		}
+	}
+
+	ANDROID_TRACE(("%s:result=%d, stf=%d, single_stream=%d, mcs map=%d\n",
+		__FUNCTION__, result, stf, single_stream, nss));
+
+	bytes_written = sprintf(command, "%s %d", CMD_GET_LINK_STATUS, result);
+
+	return bytes_written;
+}
+
+#ifdef P2P_LISTEN_OFFLOADING
+s32
+wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len)
+{
+	int ret = 0;
+
+	ANDROID_ERROR(("Entry cmd:%s arg_len:%d \n", cmd, len));
+
+	if (strncmp(cmd, "P2P_LO_START", strlen("P2P_LO_START")) == 0) {
+		ret = wl_cfg80211_p2plo_listen_start(dev, buf, len);
+	} else if (strncmp(cmd, "P2P_LO_STOP", strlen("P2P_LO_STOP")) == 0) {
+		ret = wl_cfg80211_p2plo_listen_stop(dev);
+	} else {
+		ANDROID_ERROR(("Request for Unsupported CMD:%s \n", buf));
+		ret = -EINVAL;
+	}
+	return ret;
+}
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#if defined(BCM4359_CHIP) && defined(WL_CFG80211)
+int
+wl_android_murx_bfe_cap(struct net_device *dev, int val)
+{
+	int err = BCME_OK;
+	int iface_count = wl_cfg80211_iface_count(dev);
+	struct ether_addr bssid;
+	wl_reassoc_params_t params;
+
+	if (iface_count > 1) {
+		ANDROID_ERROR(("murx_bfe_cap change is not allowed when "
+				"there are multiple interfaces\n"));
+		return -EINVAL;
+	}
+	/* Now there is only single interface */
+	err = wldev_iovar_setint(dev, "murx_bfe_cap", val);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("Failed to set murx_bfe_cap IOVAR to %d,"
+				"error %d\n", val, err));
+		return err;
+	}
+
+	/* If successful intiate a reassoc */
+	memset(&bssid, 0, ETHER_ADDR_LEN);
+	if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN)) < 0) {
+		ANDROID_ERROR(("Failed to get bssid, error=%d\n", err));
+		return err;
+	}
+
+	bzero(&params, sizeof(wl_reassoc_params_t));
+	memcpy(&params.bssid, &bssid, ETHER_ADDR_LEN);
+
+	if ((err = wldev_ioctl_set(dev, WLC_REASSOC, &params,
+		sizeof(wl_reassoc_params_t))) < 0) {
+		ANDROID_ERROR(("reassoc failed err:%d \n", err));
+	} else {
+		ANDROID_TRACE(("reassoc issued successfully\n"));
+	}
+
+	return err;
+}
+#endif /* BCM4359_CHIP */
+
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+int
+wl_android_set_ap_beaconrate(struct net_device *dev, char *command)
+{
+	int rate = 0;
+	char *pos, *token;
+	char *ifname = NULL;
+	int err = BCME_OK;
+
+	/*
+	 * DRIVER SET_AP_BEACONRATE <rate> <ifname>
+	 */
+	pos = command;
+
+	/* drop command */
+	token = bcmstrtok(&pos, " ", NULL);
+
+	/* Rate */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	rate = bcm_atoi(token);
+
+	/* get the interface name */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	ifname = token;
+
+	ANDROID_TRACE(("rate %d, ifacename %s\n", rate, ifname));
+
+	err = wl_set_ap_beacon_rate(dev, rate, ifname);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("Failed to set ap beacon rate to %d, error = %d\n", rate, err));
+	}
+
+	return err;
+}
+
+int wl_android_get_ap_basicrate(struct net_device *dev, char *command, int total_len)
+{
+	char *pos, *token;
+	char *ifname = NULL;
+	int bytes_written = 0;
+	/*
+	 * DRIVER GET_AP_BASICRATE <ifname>
+	 */
+	pos = command;
+
+	/* drop command */
+	token = bcmstrtok(&pos, " ", NULL);
+
+	/* get the interface name */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	ifname = token;
+
+	ANDROID_TRACE(("ifacename %s\n", ifname));
+
+	bytes_written = wl_get_ap_basic_rate(dev, command, ifname, total_len);
+	if (bytes_written < 1) {
+		ANDROID_ERROR(("Failed to get ap basic rate, error = %d\n", bytes_written));
+		return -EPROTO;
+	}
+
+	return bytes_written;
+
+}
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+int
+wl_android_get_ap_rps(struct net_device *dev, char *command, int total_len)
+{
+	char *pos, *token;
+	char *ifname = NULL;
+	int bytes_written = 0;
+	/*
+	 * DRIVER GET_AP_RPS <ifname>
+	 */
+	pos = command;
+
+	/* drop command */
+	token = bcmstrtok(&pos, " ", NULL);
+
+	/* get the interface name */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	ifname = token;
+
+	ANDROID_TRACE(("ifacename %s\n", ifname));
+
+	bytes_written = wl_get_ap_rps(dev, command, ifname, total_len);
+	if (bytes_written < 1) {
+		ANDROID_ERROR(("Failed to get rps, error = %d\n", bytes_written));
+		return -EPROTO;
+	}
+
+	return bytes_written;
+
+}
+
+int
+wl_android_set_ap_rps(struct net_device *dev, char *command, int total_len)
+{
+	int enable = 0;
+	char *pos, *token;
+	char *ifname = NULL;
+	int err = BCME_OK;
+
+	/*
+	 * DRIVER SET_AP_RPS <0/1> <ifname>
+	 */
+	pos = command;
+
+	/* drop command */
+	token = bcmstrtok(&pos, " ", NULL);
+
+	/* Enable */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	enable = bcm_atoi(token);
+
+	/* get the interface name */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	ifname = token;
+
+	ANDROID_TRACE(("enable %d, ifacename %s\n", enable, ifname));
+
+	err = wl_set_ap_rps(dev, enable? TRUE: FALSE, ifname);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("Failed to set rps, enable %d, error = %d\n", enable, err));
+	}
+
+	return err;
+}
+
+int
+wl_android_set_ap_rps_params(struct net_device *dev, char *command, int total_len)
+{
+	ap_rps_info_t rps;
+	char *pos, *token;
+	char *ifname = NULL;
+	int err = BCME_OK;
+
+	memset(&rps, 0, sizeof(rps));
+	/*
+	 * DRIVER SET_AP_RPS_PARAMS <pps> <level> <quiettime> <assoccheck> <ifname>
+	 */
+	pos = command;
+
+	/* drop command */
+	token = bcmstrtok(&pos, " ", NULL);
+
+	/* pps */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	rps.pps = bcm_atoi(token);
+
+	/* level */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	rps.level = bcm_atoi(token);
+
+	/* quiettime */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	rps.quiet_time = bcm_atoi(token);
+
+	/* sta assoc check */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	rps.sta_assoc_check = bcm_atoi(token);
+
+	/* get the interface name */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token)
+		return -EINVAL;
+	ifname = token;
+
+	ANDROID_TRACE(("pps %d, level %d, quiettime %d, sta_assoc_check %d, "
+		"ifacename %s\n", rps.pps, rps.level, rps.quiet_time,
+		rps.sta_assoc_check, ifname));
+
+	err = wl_update_ap_rps_params(dev, &rps, ifname);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("Failed to update rps, pps %d, level %d, quiettime %d, "
+			"sta_assoc_check %d, err = %d\n", rps.pps, rps.level, rps.quiet_time,
+			rps.sta_assoc_check, err));
+	}
+
+	return err;
+}
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+#ifdef SUPPORT_RSSI_LOGGING
+int
+wl_android_get_rssi_per_ant(struct net_device *dev, char *command, int total_len)
+{
+	wl_rssi_ant_mimo_t rssi_ant_mimo;
+	char *ifname = NULL;
+	char *peer_mac = NULL;
+	char *mimo_cmd = "mimo";
+	char *pos, *token;
+	int err = BCME_OK;
+	int bytes_written = 0;
+	bool mimo_rssi = FALSE;
+
+	memset(&rssi_ant_mimo, 0, sizeof(wl_rssi_ant_mimo_t));
+	/*
+	 * STA I/F: DRIVER GET_RSSI_PER_ANT <ifname> <mimo>
+	 * AP/GO I/F: DRIVER GET_RSSI_PER_ANT <ifname> <Peer MAC addr> <mimo>
+	 */
+	pos = command;
+
+	/* drop command */
+	token = bcmstrtok(&pos, " ", NULL);
+
+	/* get the interface name */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token) {
+		ANDROID_ERROR(("Invalid arguments\n"));
+		return -EINVAL;
+	}
+	ifname = token;
+
+	/* Optional: Check the MIMO RSSI mode or peer MAC address */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (token) {
+		/* Check the MIMO RSSI mode */
+		if (strncmp(token, mimo_cmd, strlen(mimo_cmd)) == 0) {
+			mimo_rssi = TRUE;
+		} else {
+			peer_mac = token;
+		}
+	}
+
+	/* Optional: Check the MIMO RSSI mode - RSSI sum across antennas */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (token && strncmp(token, mimo_cmd, strlen(mimo_cmd)) == 0) {
+		mimo_rssi = TRUE;
+	}
+
+	err = wl_get_rssi_per_ant(dev, ifname, peer_mac, &rssi_ant_mimo);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("Failed to get RSSI info, err=%d\n", err));
+		return err;
+	}
+
+	/* Parse the results */
+	ANDROID_TRACE(("ifname %s, version %d, count %d, mimo rssi %d\n",
+		ifname, rssi_ant_mimo.version, rssi_ant_mimo.count, mimo_rssi));
+	if (mimo_rssi) {
+		ANDROID_TRACE(("MIMO RSSI: %d\n", rssi_ant_mimo.rssi_sum));
+		bytes_written = snprintf(command, total_len, "%s MIMO %d",
+			CMD_GET_RSSI_PER_ANT, rssi_ant_mimo.rssi_sum);
+	} else {
+		int cnt;
+		bytes_written = snprintf(command, total_len, "%s PER_ANT ", CMD_GET_RSSI_PER_ANT);
+		for (cnt = 0; cnt < rssi_ant_mimo.count; cnt++) {
+			ANDROID_TRACE(("RSSI[%d]: %d\n", cnt, rssi_ant_mimo.rssi_ant[cnt]));
+			bytes_written = snprintf(command, total_len, "%d ",
+				rssi_ant_mimo.rssi_ant[cnt]);
+		}
+	}
+
+	return bytes_written;
+}
+
+int
+wl_android_set_rssi_logging(struct net_device *dev, char *command, int total_len)
+{
+	rssilog_set_param_t set_param;
+	char *pos, *token;
+	int err = BCME_OK;
+
+	memset(&set_param, 0, sizeof(rssilog_set_param_t));
+	/*
+	 * DRIVER SET_RSSI_LOGGING <enable/disable> <RSSI Threshold> <Time Threshold>
+	 */
+	pos = command;
+
+	/* drop command */
+	token = bcmstrtok(&pos, " ", NULL);
+
+	/* enable/disable */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token) {
+		ANDROID_ERROR(("Invalid arguments\n"));
+		return -EINVAL;
+	}
+	set_param.enable = bcm_atoi(token);
+
+	/* RSSI Threshold */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token) {
+		ANDROID_ERROR(("Invalid arguments\n"));
+		return -EINVAL;
+	}
+	set_param.rssi_threshold = bcm_atoi(token);
+
+	/* Time Threshold */
+	token = bcmstrtok(&pos, " ", NULL);
+	if (!token) {
+		ANDROID_ERROR(("Invalid arguments\n"));
+		return -EINVAL;
+	}
+	set_param.time_threshold = bcm_atoi(token);
+
+	ANDROID_TRACE(("enable %d, RSSI threshold %d, Time threshold %d\n", set_param.enable,
+		set_param.rssi_threshold, set_param.time_threshold));
+
+	err = wl_set_rssi_logging(dev, (void *)&set_param);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("Failed to configure RSSI logging: enable %d, RSSI Threshold %d,"
+			" Time Threshold %d\n", set_param.enable, set_param.rssi_threshold,
+			set_param.time_threshold));
+	}
+
+	return err;
+}
+
+int
+wl_android_get_rssi_logging(struct net_device *dev, char *command, int total_len)
+{
+	rssilog_get_param_t get_param;
+	int err = BCME_OK;
+	int bytes_written = 0;
+
+	err = wl_get_rssi_logging(dev, (void *)&get_param);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("Failed to get RSSI logging info\n"));
+		return BCME_ERROR;
+	}
+
+	ANDROID_TRACE(("report_count %d, enable %d, rssi_threshold %d, time_threshold %d\n",
+		get_param.report_count, get_param.enable, get_param.rssi_threshold,
+		get_param.time_threshold));
+
+	/* Parse the parameter */
+	if (!get_param.enable) {
+		ANDROID_TRACE(("RSSI LOGGING: Feature is disables\n"));
+		bytes_written = snprintf(command, total_len,
+			"%s FEATURE DISABLED\n", CMD_GET_RSSI_LOGGING);
+	} else if (get_param.enable &
+		(RSSILOG_FLAG_FEATURE_SW | RSSILOG_FLAG_REPORT_READY)) {
+		if (!get_param.report_count) {
+			ANDROID_TRACE(("[PASS] RSSI difference across antennas is within"
+				" threshold limits\n"));
+			bytes_written = snprintf(command, total_len, "%s PASS\n",
+				CMD_GET_RSSI_LOGGING);
+		} else {
+			ANDROID_TRACE(("[FAIL] RSSI difference across antennas found "
+				"to be greater than %3d dB\n", get_param.rssi_threshold));
+			ANDROID_TRACE(("[FAIL] RSSI difference check have failed for "
+				"%d out of %d times\n", get_param.report_count,
+				get_param.time_threshold));
+			ANDROID_TRACE(("[FAIL] RSSI difference is being monitored once "
+				"per second, for a %d secs window\n", get_param.time_threshold));
+			bytes_written = snprintf(command, total_len, "%s FAIL - RSSI Threshold "
+				"%d dBm for %d out of %d times\n", CMD_GET_RSSI_LOGGING,
+				get_param.rssi_threshold, get_param.report_count,
+				get_param.time_threshold);
+		}
+	} else {
+		ANDROID_TRACE(("[BUSY] Reprot is not ready\n"));
+		bytes_written = snprintf(command, total_len, "%s BUSY - NOT READY\n",
+			CMD_GET_RSSI_LOGGING);
+	}
+
+	return bytes_written;
+}
+#endif /* SUPPORT_RSSI_LOGGING */
+
+#ifdef SET_PCIE_IRQ_CPU_CORE
+void
+wl_android_set_irq_cpucore(struct net_device *net, int set)
+{
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
+	if (!dhdp) {
+		ANDROID_ERROR(("dhd is NULL\n"));
+		return;
+	}
+	dhd_set_irq_cpucore(dhdp, set);
+}
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+void
+wl_android_make_hang_with_reason(struct net_device *dev, const char *string_num)
+{
+	dhd_make_hang_with_reason(dev, string_num);
+}
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+#ifdef WL_CFG80211
+#ifdef WLMESH
+static int
+wl_android_set_rsdb_mode(struct net_device *dev, char *command, int total_len)
+{
+	int ret;
+	wl_config_t rsdb_mode_cfg = {-1, 0};
+	char smbuf[WLC_IOCTL_SMLEN];
+	s32 val = 1;
+
+	if (sscanf(command, "%*s %d", &rsdb_mode_cfg.config) != 1) {
+		DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+	DHD_INFO(("%s : RSDB_MODE = %d\n", __FUNCTION__, rsdb_mode_cfg.config));
+
+	ret = wldev_ioctl_set(dev, WLC_DOWN, &val, sizeof(s32));
+	if (ret < 0)
+		DHD_ERROR(("WLC_DOWN error %d\n", ret));
+
+	ret = wldev_iovar_setbuf(dev, "rsdb_mode", &rsdb_mode_cfg, sizeof(rsdb_mode_cfg),
+		smbuf, sizeof(smbuf), NULL);
+	if (ret < 0)
+		DHD_ERROR(("%s : set rsdb_mode error=%d\n", __FUNCTION__, ret));
+
+	ret = wldev_ioctl_set(dev, WLC_UP, &val, sizeof(s32));
+	if (ret < 0)
+		DHD_ERROR(("WLC_UP error %d\n", ret));
+
+	return ret;
+}
+#endif /* WLMESH */
+#endif /* WL_CFG80211 */
+
+#ifdef SUPPORT_LQCM
+static int
+wl_android_lqcm_enable(struct net_device *net, int lqcm_enable)
+{
+	int err = 0;
+
+	err = wldev_iovar_setint(net, "lqcm", lqcm_enable);
+	if (err != BCME_OK) {
+		ANDROID_ERROR(("failed to set lqcm enable %d, error = %d\n", lqcm_enable, err));
+		return -EIO;
+	}
+	return err;
+}
+
+static int wl_android_get_lqcm_report(
+	struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written, err = 0;
+	uint32 lqcm_report = 0;
+	uint32 lqcm_enable, tx_lqcm_idx, rx_lqcm_idx;
+
+	err = wldev_iovar_getint(dev, "lqcm", &lqcm_report);
+	if (err != BCME_OK) {
+		ANDROID_ERROR(("failed to get lqcm report, error = %d\n", err));
+		return -EIO;
+	}
+	lqcm_enable = lqcm_report & LQCM_ENAB_MASK;
+	tx_lqcm_idx = (lqcm_report & LQCM_TX_INDEX_MASK) >> LQCM_TX_INDEX_SHIFT;
+	rx_lqcm_idx = (lqcm_report & LQCM_RX_INDEX_MASK) >> LQCM_RX_INDEX_SHIFT;
+
+	ANDROID_ERROR(("lqcm report EN:%d, TX:%d, RX:%d\n", lqcm_enable, tx_lqcm_idx, rx_lqcm_idx));
+
+	bytes_written = snprintf(command, total_len, "%s %d",
+		CMD_GET_LQCM_REPORT, lqcm_report);
+
+	return bytes_written;
+}
+#endif /* SUPPORT_LQCM */
+
+int
+wl_android_get_snr(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written, error = 0;
+	s32 snr = 0;
+
+	error = wldev_iovar_getint(dev, "snr", &snr);
+	if (error) {
+		ANDROID_ERROR(("%s: Failed to get SNR %d, error = %d\n",
+			__FUNCTION__, snr, error));
+		return -EIO;
+	}
+
+	bytes_written = snprintf(command, total_len, "snr %d", snr);
+	ANDROID_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+	return bytes_written;
+}
+#ifdef WLADPS_PRIVATE_CMD
+static int
+wl_android_set_adps_mode(struct net_device *dev, const char* string_num)
+{
+	int err = 0, adps_mode;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+	adps_mode = bcm_atoi(string_num);
+
+	if ((adps_mode < 0) && (1 < adps_mode)) {
+		ANDROID_ERROR(("%s: Invalid value %d.\n", __FUNCTION__, adps_mode));
+		return -EINVAL;
+	}
+
+	err = dhd_enable_adps(dhdp, adps_mode);
+	if (err != BCME_OK) {
+		ANDROID_ERROR(("failed to set adps mode %d, error = %d\n", adps_mode, err));
+		return -EIO;
+	}
+	return err;
+}
+static int
+wl_android_get_adps_mode(
+	struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written, err = 0;
+	int len;
+	char buf[WLC_IOCTL_SMLEN];
+
+	bcm_iov_buf_t iov_buf;
+	bcm_iov_buf_t *ptr = NULL;
+	wl_adps_params_v1_t *data = NULL;
+
+	uint8 *pdata = NULL;
+	uint8 band, mode = 0;
+
+	memset(&iov_buf, 0, sizeof(iov_buf));
+
+	len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
+
+	iov_buf.version = WL_ADPS_IOV_VER;
+	iov_buf.len = sizeof(band);
+	iov_buf.id = WL_ADPS_IOV_MODE;
+
+	pdata = (uint8 *)&iov_buf.data;
+
+	for (band = 1; band <= MAX_BANDS; band++) {
+		pdata[0] = band;
+		err = wldev_iovar_getbuf(dev, "adps", &iov_buf, len,
+			buf, WLC_IOCTL_SMLEN, NULL);
+		if (err != BCME_OK) {
+			ANDROID_ERROR(("%s fail to get adps band %d(%d).\n",
+					__FUNCTION__, band, err));
+			return -EIO;
+		}
+		ptr = (bcm_iov_buf_t *) buf;
+		data = (wl_adps_params_v1_t *) ptr->data;
+		mode = data->mode;
+		if (mode != OFF) {
+			break;
+		}
+	}
+
+	bytes_written = snprintf(command, total_len, "%s %d",
+		CMD_GET_ADPS, mode);
+	return bytes_written;
+}
+#endif /* WLADPS_PRIVATE_CMD */
+
+#ifdef DHD_PKT_LOGGING
+static int
+wl_android_pktlog_filter_enable(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written = 0;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+	dhd_pktlog_filter_t *filter;
+	int err = BCME_OK;
+
+	if (!dhdp || !dhdp->pktlog) {
+		DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+			__FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+		return -EINVAL;
+	}
+
+	filter = dhdp->pktlog->pktlog_filter;
+
+	err = dhd_pktlog_filter_enable(filter, PKTLOG_TXPKT_CASE, TRUE);
+	err = dhd_pktlog_filter_enable(filter, PKTLOG_TXSTATUS_CASE, TRUE);
+	err = dhd_pktlog_filter_enable(filter, PKTLOG_RXPKT_CASE, TRUE);
+
+	if (err == BCME_OK) {
+		bytes_written = snprintf(command, total_len, "OK");
+		ANDROID_ERROR(("%s: pktlog filter enable success\n", __FUNCTION__));
+	} else {
+		ANDROID_ERROR(("%s: pktlog filter enable fail\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_disable(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written = 0;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+	dhd_pktlog_filter_t *filter;
+	int err = BCME_OK;
+
+	if (!dhdp || !dhdp->pktlog) {
+		DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+			__FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+		return -EINVAL;
+	}
+
+	filter = dhdp->pktlog->pktlog_filter;
+
+	err = dhd_pktlog_filter_enable(filter, PKTLOG_TXPKT_CASE, FALSE);
+	err = dhd_pktlog_filter_enable(filter, PKTLOG_TXSTATUS_CASE, FALSE);
+	err = dhd_pktlog_filter_enable(filter, PKTLOG_RXPKT_CASE, FALSE);
+
+	if (err == BCME_OK) {
+		bytes_written = snprintf(command, total_len, "OK");
+		ANDROID_ERROR(("%s: pktlog filter disable success\n", __FUNCTION__));
+	} else {
+		ANDROID_ERROR(("%s: pktlog filter disable fail\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_pattern_enable(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written = 0;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+	dhd_pktlog_filter_t *filter;
+	int err = BCME_OK;
+
+	if (!dhdp || !dhdp->pktlog) {
+		DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+			__FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+		return -EINVAL;
+	}
+
+	filter = dhdp->pktlog->pktlog_filter;
+
+	if (strlen(CMD_PKTLOG_FILTER_PATTERN_ENABLE) + 1 > total_len) {
+		return BCME_ERROR;
+	}
+
+	err = dhd_pktlog_filter_pattern_enable(filter,
+			command + strlen(CMD_PKTLOG_FILTER_PATTERN_ENABLE) + 1, TRUE);
+
+	if (err == BCME_OK) {
+		bytes_written = snprintf(command, total_len, "OK");
+		ANDROID_ERROR(("%s: pktlog filter pattern enable success\n", __FUNCTION__));
+	} else {
+		ANDROID_ERROR(("%s: pktlog filter pattern enable fail\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_pattern_disable(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written = 0;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+	dhd_pktlog_filter_t *filter;
+	int err = BCME_OK;
+
+	if (!dhdp || !dhdp->pktlog) {
+		DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+			__FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+		return -EINVAL;
+	}
+
+	filter = dhdp->pktlog->pktlog_filter;
+
+	if (strlen(CMD_PKTLOG_FILTER_PATTERN_DISABLE) + 1 > total_len) {
+		return BCME_ERROR;
+	}
+
+	err = dhd_pktlog_filter_pattern_enable(filter,
+			command + strlen(CMD_PKTLOG_FILTER_PATTERN_DISABLE) + 1, FALSE);
+
+	if (err == BCME_OK) {
+		bytes_written = snprintf(command, total_len, "OK");
+		ANDROID_ERROR(("%s: pktlog filter pattern disable success\n", __FUNCTION__));
+	} else {
+		ANDROID_ERROR(("%s: pktlog filter pattern disable fail\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_add(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written = 0;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+	dhd_pktlog_filter_t *filter;
+	int err = BCME_OK;
+
+	if (!dhdp || !dhdp->pktlog) {
+		DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+			__FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+		return -EINVAL;
+	}
+
+	filter = dhdp->pktlog->pktlog_filter;
+
+	if (strlen(CMD_PKTLOG_FILTER_ADD) + 1 > total_len) {
+		return BCME_ERROR;
+	}
+
+	err = dhd_pktlog_filter_add(filter, command + strlen(CMD_PKTLOG_FILTER_ADD) + 1);
+
+	if (err == BCME_OK) {
+		bytes_written = snprintf(command, total_len, "OK");
+		ANDROID_ERROR(("%s: pktlog filter add success\n", __FUNCTION__));
+	} else {
+		ANDROID_ERROR(("%s: pktlog filter add fail\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_info(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written = 0;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+	dhd_pktlog_filter_t *filter;
+	int err = BCME_OK;
+
+	if (!dhdp || !dhdp->pktlog) {
+		DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+			__FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+		return -EINVAL;
+	}
+
+	filter = dhdp->pktlog->pktlog_filter;
+
+	err = dhd_pktlog_filter_info(filter);
+
+	if (err == BCME_OK) {
+		bytes_written = snprintf(command, total_len, "OK");
+		ANDROID_ERROR(("%s: pktlog filter info success\n", __FUNCTION__));
+	} else {
+		ANDROID_ERROR(("%s: pktlog filter info fail\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	return bytes_written;
+}
+
+static int
+wl_android_pktlog_start(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written = 0;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+	if (!dhdp || !dhdp->pktlog) {
+		DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+			__FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+		return -EINVAL;
+	}
+
+	if (!dhdp->pktlog->tx_pktlog_ring || !dhdp->pktlog->rx_pktlog_ring) {
+		DHD_PKT_LOG(("%s(): tx_pktlog_ring=%p rx_pktlog_ring=%p\n",
+			__FUNCTION__, dhdp->pktlog->tx_pktlog_ring, dhdp->pktlog->rx_pktlog_ring));
+		return -EINVAL;
+	}
+
+	dhdp->pktlog->tx_pktlog_ring->start = TRUE;
+	dhdp->pktlog->rx_pktlog_ring->start = TRUE;
+
+	bytes_written = snprintf(command, total_len, "OK");
+
+	ANDROID_ERROR(("%s: pktlog start success\n", __FUNCTION__));
+
+	return bytes_written;
+}
+
+static int
+wl_android_pktlog_stop(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written = 0;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+	if (!dhdp || !dhdp->pktlog) {
+		DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+			__FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+		return -EINVAL;
+	}
+
+	if (!dhdp->pktlog->tx_pktlog_ring || !dhdp->pktlog->rx_pktlog_ring) {
+		DHD_PKT_LOG(("%s(): tx_pktlog_ring=%p rx_pktlog_ring=%p\n",
+			__FUNCTION__, dhdp->pktlog->tx_pktlog_ring, dhdp->pktlog->rx_pktlog_ring));
+		return -EINVAL;
+	}
+
+	dhdp->pktlog->tx_pktlog_ring->start = FALSE;
+	dhdp->pktlog->rx_pktlog_ring->start = FALSE;
+
+	bytes_written = snprintf(command, total_len, "OK");
+
+	ANDROID_ERROR(("%s: pktlog stop success\n", __FUNCTION__));
+
+	return bytes_written;
+}
+
+static int
+wl_android_pktlog_filter_exist(struct net_device *dev, char *command, int total_len)
+{
+	int bytes_written = 0;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+	dhd_pktlog_filter_t *filter;
+	uint32 id;
+	bool exist = FALSE;
+
+	if (!dhdp || !dhdp->pktlog) {
+		DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+			__FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+		return -EINVAL;
+	}
+
+	filter = dhdp->pktlog->pktlog_filter;
+
+	if (strlen(CMD_PKTLOG_FILTER_EXIST) + 1 > total_len) {
+		return BCME_ERROR;
+	}
+
+	exist = dhd_pktlog_filter_existed(filter, command + strlen(CMD_PKTLOG_FILTER_EXIST) + 1,
+			&id);
+
+	if (exist) {
+		bytes_written = snprintf(command, total_len, "TRUE");
+		ANDROID_ERROR(("%s: pktlog filter pattern id: %d is existed\n", __FUNCTION__, id));
+	} else {
+		bytes_written = snprintf(command, total_len, "FALSE");
+		ANDROID_ERROR(("%s: pktlog filter pattern id: %d is not existed\n", __FUNCTION__, id));
+	}
+
+	return bytes_written;
+}
+#endif /* DHD_PKT_LOGGING */
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+#define PRIVATE_COMMAND_MAX_LEN	8192
+#define PRIVATE_COMMAND_DEF_LEN	4096
+	int ret = 0;
+	char *command = NULL;
+	int bytes_written = 0;
+	android_wifi_priv_cmd priv_cmd;
+	int buf_size = 0;
+
+	net_os_wake_lock(net);
+
+	if (!capable(CAP_NET_ADMIN)) {
+		ret = -EPERM;
+		goto exit;
+	}
+
+	if (!ifr->ifr_data) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+	if (in_compat_syscall())
+#else
+	if (is_compat_task())
+#endif
+	{
+		compat_android_wifi_priv_cmd compat_priv_cmd;
+		if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
+			sizeof(compat_android_wifi_priv_cmd))) {
+			ret = -EFAULT;
+			goto exit;
+
+		}
+		priv_cmd.buf = compat_ptr(compat_priv_cmd.buf);
+		priv_cmd.used_len = compat_priv_cmd.used_len;
+		priv_cmd.total_len = compat_priv_cmd.total_len;
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+			ret = -EFAULT;
+			goto exit;
+		}
+	}
+	if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
+		ANDROID_ERROR(("%s: buf length invalid:%d\n", __FUNCTION__,
+			priv_cmd.total_len));
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	buf_size = max(priv_cmd.total_len, PRIVATE_COMMAND_DEF_LEN);
+	command = kmalloc((buf_size + 1), GFP_KERNEL);
+
+	if (!command)
+	{
+		ANDROID_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto exit;
+	}
+	if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+		ret = -EFAULT;
+		goto exit;
+	}
+	command[priv_cmd.total_len] = '\0';
+
+	ANDROID_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+	bytes_written = wl_handle_private_cmd(net, command, priv_cmd.total_len);
+	if (bytes_written >= 0) {
+		if ((bytes_written == 0) && (priv_cmd.total_len > 0)) {
+			command[0] = '\0';
+		}
+		if (bytes_written >= priv_cmd.total_len) {
+			ANDROID_ERROR(("%s: err. bytes_written:%d >= buf_size:%d \n",
+				__FUNCTION__, bytes_written, buf_size));
+			ret = BCME_BUFTOOSHORT;
+			goto exit;
+		}
+		bytes_written++;
+		priv_cmd.used_len = bytes_written;
+		if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
+			ANDROID_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+			ret = -EFAULT;
+		}
+	}
+	else {
+		/* Propagate the error */
+		ret = bytes_written;
+	}
+
+exit:
+	net_os_wake_unlock(net);
+	kfree(command);
+	return ret;
+}
+
+int
+wl_handle_private_cmd(struct net_device *net, char *command, u32 cmd_len)
+{
+	int bytes_written = 0;
+	android_wifi_priv_cmd priv_cmd;
+
+	bzero(&priv_cmd, sizeof(android_wifi_priv_cmd));
+	priv_cmd.total_len = cmd_len;
+
+	if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+		ANDROID_INFO(("%s, Received regular START command\n", __FUNCTION__));
+#ifdef  BT_OVER_SDIO
+		bytes_written = dhd_net_bus_get(net);
+#else
+		bytes_written = wl_android_wifi_on(net);
+#endif /* BT_OVER_SDIO */
+	}
+	else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+		bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+	}
+
+	if (!g_wifi_on) {
+		ANDROID_ERROR(("%s: Ignore private cmd \"%s\" - iface is down\n",
+			__FUNCTION__, command));
+		return 0;
+	}
+
+	if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+#ifdef  BT_OVER_SDIO
+		bytes_written = dhd_net_bus_put(net);
+#else
+		bytes_written = wl_android_wifi_off(net, FALSE);
+#endif /* BT_OVER_SDIO */
+	}
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+		wl_cfg80211_set_passive_scan(net, command);
+	}
+	else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+		wl_cfg80211_set_passive_scan(net, command);
+	}
+#endif
+	else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+		bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+		bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+	}
+#ifdef PKT_FILTER_SUPPORT
+	else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+		bytes_written = net_os_enable_packet_filter(net, 1);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+		bytes_written = net_os_enable_packet_filter(net, 0);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+	}
+#endif /* PKT_FILTER_SUPPORT */
+	else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+		/* TBD: BTCOEXSCAN-START */
+	}
+	else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+		/* TBD: BTCOEXSCAN-STOP */
+	}
+	else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+#ifdef WL_CFG80211
+		void *dhdp = wl_cfg80211_get_dhdp(net);
+		bytes_written = wl_cfg80211_set_btcoex_dhcp(net, dhdp, command);
+#else
+#ifdef PKT_FILTER_SUPPORT
+		uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+
+		if (mode == 1)
+			net_os_enable_packet_filter(net, 0); /* DHCP starts */
+		else
+			net_os_enable_packet_filter(net, 1); /* DHCP ends */
+#endif /* PKT_FILTER_SUPPORT */
+#endif /* WL_CFG80211 */
+	}
+	else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+		bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
+		bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_MAXDTIM_IN_SUSPEND, strlen(CMD_MAXDTIM_IN_SUSPEND)) == 0) {
+		bytes_written = wl_android_set_max_dtim(net, command, priv_cmd.total_len);
+	}
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+#ifdef DISABLE_SETBAND
+		bytes_written = BCME_DISABLED;
+#else	/* DISABLE_SETBAND */
+		uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+		if (dhd_conf_get_band(dhd_get_pub(net)) >= WLC_BAND_AUTO) {
+			printf("%s: Band is fixed in config.txt\n", __FUNCTION__);
+		} else
+			bytes_written = wl_cfg80211_set_if_band(net, band);
+#endif /* DISABLE_SETBAND */
+	}
+#endif
+	else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
+		bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
+	}
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_SET_CSA, strlen(CMD_SET_CSA)) == 0) {
+		bytes_written = wl_android_set_csa(net, command, priv_cmd.total_len);
+	} else if (strnicmp(command, CMD_80211_MODE, strlen(CMD_80211_MODE)) == 0) {
+		bytes_written = wl_android_get_80211_mode(net, command, priv_cmd.total_len);
+	} else if (strnicmp(command, CMD_CHANSPEC, strlen(CMD_CHANSPEC)) == 0) {
+		bytes_written = wl_android_get_chanspec(net, command, priv_cmd.total_len);
+	}
+#endif /* WL_CFG80211 */
+	/* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
+	else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+		/*
+		 * Usage examples:
+		 * DRIVER COUNTRY US
+		 * DRIVER COUNTRY US/7
+		 */
+		char *country_code = command + strlen(CMD_COUNTRY) + 1;
+		char *rev_info_delim = country_code + 2; /* 2 bytes of country code */
+		int revinfo = -1;
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+		dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
+
+		if (dhdp->is_blob) {
+			revinfo = 0;
+		} else
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+		if ((rev_info_delim) &&
+			(strnicmp(rev_info_delim, CMD_COUNTRY_DELIMITER,
+			strlen(CMD_COUNTRY_DELIMITER)) == 0) &&
+			(rev_info_delim + 1)) {
+			revinfo  = bcm_atoi(rev_info_delim + 1);
+		}
+		bytes_written = wldev_set_country(net, country_code, true, true, revinfo);
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef FCC_PWR_LIMIT_2G
+		if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) {
+			ANDROID_ERROR(("%s: fccpwrlimit2g deactivation is failed\n", __FUNCTION__));
+		} else {
+			ANDROID_ERROR(("%s: fccpwrlimit2g is deactivated\n", __FUNCTION__));
+		}
+#endif /* FCC_PWR_LIMIT_2G */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+	}
+	else if (strnicmp(command, CMD_DATARATE, strlen(CMD_DATARATE)) == 0) {
+		bytes_written = wl_android_get_datarate(net, command, priv_cmd.total_len);
+	} else if (strnicmp(command, CMD_ASSOC_CLIENTS,	strlen(CMD_ASSOC_CLIENTS)) == 0) {
+		bytes_written = wl_android_get_assoclist(net, command, priv_cmd.total_len);
+	}
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef WLTDLS
+	else if (strnicmp(command, CMD_TDLS_RESET, strlen(CMD_TDLS_RESET)) == 0) {
+		bytes_written = wl_android_tdls_reset(net);
+	}
+#endif /* WLTDLS */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+#ifdef PNO_SUPPORT
+	else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
+		bytes_written = dhd_dev_pno_stop_for_ssid(net);
+	}
+#ifndef WL_SCHED_SCAN
+	else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
+		bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
+	}
+#endif /* !WL_SCHED_SCAN */
+	else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) {
+		int enable = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
+		bytes_written = (enable)? 0 : dhd_dev_pno_stop_for_ssid(net);
+	}
+	else if (strnicmp(command, CMD_WLS_BATCHING, strlen(CMD_WLS_BATCHING)) == 0) {
+		bytes_written = wls_parse_batching_cmd(net, command, priv_cmd.total_len);
+	}
+#endif /* PNO_SUPPORT */
+	else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
+		bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
+	}
+#ifdef WL_CFG80211
+#ifdef WLMESH
+	else if (strnicmp(command, CMD_SAE_SET_PASSWORD, strlen(CMD_SAE_SET_PASSWORD)) == 0) {
+		int skip = strlen(CMD_SAE_SET_PASSWORD) + 1;
+		bytes_written = wl_cfg80211_set_sae_password(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+	else if (strnicmp(command, CMD_SET_RSDB_MODE, strlen(CMD_SET_RSDB_MODE)) == 0) {
+		bytes_written = wl_android_set_rsdb_mode(net, command, priv_cmd.total_len);
+	}
+#endif
+#endif /* WL_CFG80211 */
+	else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
+		int skip = strlen(CMD_P2P_SET_NOA) + 1;
+		bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+#ifdef P2P_LISTEN_OFFLOADING
+	else if (strnicmp(command, CMD_P2P_LISTEN_OFFLOAD, strlen(CMD_P2P_LISTEN_OFFLOAD)) == 0) {
+		u8 *sub_command = strchr(command, ' ');
+		bytes_written = wl_cfg80211_p2plo_offload(net, command, sub_command,
+				sub_command ? strlen(sub_command) : 0);
+	}
+#endif /* P2P_LISTEN_OFFLOADING */
+#if !defined WL_ENABLE_P2P_IF
+	else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) {
+		bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len);
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) {
+		int skip = strlen(CMD_P2P_SET_PS) + 1;
+		bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+	else if (strnicmp(command, CMD_P2P_ECSA, strlen(CMD_P2P_ECSA)) == 0) {
+		int skip = strlen(CMD_P2P_ECSA) + 1;
+		bytes_written = wl_cfg80211_set_p2p_ecsa(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+	else if (strnicmp(command, CMD_P2P_INC_BW, strlen(CMD_P2P_INC_BW)) == 0) {
+		int skip = strlen(CMD_P2P_INC_BW) + 1;
+		bytes_written = wl_cfg80211_increase_p2p_bw(net,
+				command + skip, priv_cmd.total_len - skip);
+	}
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
+		strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
+		int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3;
+		bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
+			priv_cmd.total_len - skip, *(command + skip - 2) - '0');
+	}
+#endif /* WL_CFG80211 */
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+	else if (strnicmp(command, CMD_GET_BEST_CHANNELS,
+		strlen(CMD_GET_BEST_CHANNELS)) == 0) {
+		bytes_written = wl_cfg80211_get_best_channels(net, command,
+			priv_cmd.total_len);
+	}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+	else if (strnicmp(command, CMD_SET_HAPD_AUTO_CHANNEL,
+		strlen(CMD_SET_HAPD_AUTO_CHANNEL)) == 0) {
+		int skip = strlen(CMD_SET_HAPD_AUTO_CHANNEL) + 1;
+		bytes_written = wl_android_set_auto_channel(net, (const char*)command+skip, command,
+			priv_cmd.total_len);
+	}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef SUPPORT_SET_LPC
+	else if (strnicmp(command, CMD_HAPD_LPC_ENABLED,
+		strlen(CMD_HAPD_LPC_ENABLED)) == 0) {
+		int skip = strlen(CMD_HAPD_LPC_ENABLED) + 3;
+		wl_android_set_lpc(net, (const char*)command+skip);
+	}
+#endif /* SUPPORT_SET_LPC */
+#ifdef SUPPORT_TRIGGER_HANG_EVENT
+	else if (strnicmp(command, CMD_TEST_FORCE_HANG,
+		strlen(CMD_TEST_FORCE_HANG)) == 0) {
+		int skip = strlen(CMD_TEST_FORCE_HANG) + 1;
+		net_os_send_hang_message_reason(net, (const char*)command+skip);
+	}
+#endif /* SUPPORT_TRIGGER_HANG_EVENT */
+	else if (strnicmp(command, CMD_CHANGE_RL, strlen(CMD_CHANGE_RL)) == 0)
+		bytes_written = wl_android_ch_res_rl(net, true);
+	else if (strnicmp(command, CMD_RESTORE_RL, strlen(CMD_RESTORE_RL)) == 0)
+		bytes_written = wl_android_ch_res_rl(net, false);
+#ifdef WL_RELMCAST
+	else if (strnicmp(command, CMD_SET_RMC_ENABLE, strlen(CMD_SET_RMC_ENABLE)) == 0) {
+		int rmc_enable = *(command + strlen(CMD_SET_RMC_ENABLE) + 1) - '0';
+		bytes_written = wl_android_rmc_enable(net, rmc_enable);
+	}
+	else if (strnicmp(command, CMD_SET_RMC_TXRATE, strlen(CMD_SET_RMC_TXRATE)) == 0) {
+		int rmc_txrate;
+		sscanf(command, "%*s %10d", &rmc_txrate);
+		bytes_written = wldev_iovar_setint(net, "rmc_txrate", rmc_txrate * 2);
+	}
+	else if (strnicmp(command, CMD_SET_RMC_ACTPERIOD, strlen(CMD_SET_RMC_ACTPERIOD)) == 0) {
+		int actperiod;
+		sscanf(command, "%*s %10d", &actperiod);
+		bytes_written = wldev_iovar_setint(net, "rmc_actf_time", actperiod);
+	}
+	else if (strnicmp(command, CMD_SET_RMC_IDLEPERIOD, strlen(CMD_SET_RMC_IDLEPERIOD)) == 0) {
+		int acktimeout;
+		sscanf(command, "%*s %10d", &acktimeout);
+		acktimeout *= 1000;
+		bytes_written = wldev_iovar_setint(net, "rmc_acktmo", acktimeout);
+	}
+	else if (strnicmp(command, CMD_SET_RMC_LEADER, strlen(CMD_SET_RMC_LEADER)) == 0) {
+		int skip = strlen(CMD_SET_RMC_LEADER) + 1;
+		bytes_written = wl_android_rmc_set_leader(net, (const char*)command+skip);
+	}
+	else if (strnicmp(command, CMD_SET_RMC_EVENT,
+		strlen(CMD_SET_RMC_EVENT)) == 0) {
+		bytes_written = wl_android_set_rmc_event(net, command, priv_cmd.total_len);
+	}
+#endif /* WL_RELMCAST */
+	else if (strnicmp(command, CMD_GET_SCSCAN, strlen(CMD_GET_SCSCAN)) == 0) {
+		bytes_written = wl_android_get_singlecore_scan(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SET_SCSCAN, strlen(CMD_SET_SCSCAN)) == 0) {
+		bytes_written = wl_android_set_singlecore_scan(net, command, priv_cmd.total_len);
+	}
+#ifdef TEST_TX_POWER_CONTROL
+	else if (strnicmp(command, CMD_TEST_SET_TX_POWER,
+		strlen(CMD_TEST_SET_TX_POWER)) == 0) {
+		int skip = strlen(CMD_TEST_SET_TX_POWER) + 1;
+		wl_android_set_tx_power(net, (const char*)command+skip);
+	}
+	else if (strnicmp(command, CMD_TEST_GET_TX_POWER,
+		strlen(CMD_TEST_GET_TX_POWER)) == 0) {
+		wl_android_get_tx_power(net, command, priv_cmd.total_len);
+	}
+#endif /* TEST_TX_POWER_CONTROL */
+	else if (strnicmp(command, CMD_SARLIMIT_TX_CONTROL,
+		strlen(CMD_SARLIMIT_TX_CONTROL)) == 0) {
+		int skip = strlen(CMD_SARLIMIT_TX_CONTROL) + 1;
+		wl_android_set_sarlimit_txctrl(net, (const char*)command+skip);
+	}
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+	else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) {
+		int skip = strlen(CMD_HAPD_MAC_FILTER) + 1;
+		wl_android_set_mac_address_filter(net, command+skip);
+	}
+	else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0)
+		bytes_written = wl_android_set_roam_mode(net, command, priv_cmd.total_len);
+#if defined(BCMFW_ROAM_ENABLE)
+	else if (strnicmp(command, CMD_SET_ROAMPREF, strlen(CMD_SET_ROAMPREF)) == 0) {
+		bytes_written = wl_android_set_roampref(net, command, priv_cmd.total_len);
+	}
+#endif /* BCMFW_ROAM_ENABLE */
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)
+		bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len);
+#ifdef WL11ULB
+	else if (strnicmp(command, CMD_ULB_MODE, strlen(CMD_ULB_MODE)) == 0)
+		bytes_written = wl_android_set_ulb_mode(net, command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_ULB_BW, strlen(CMD_ULB_BW)) == 0)
+		bytes_written = wl_android_set_ulb_bw(net, command, priv_cmd.total_len);
+#endif /* WL11ULB */
+	else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
+		bytes_written = wl_android_set_ibss_beacon_ouidata(net,
+		command, priv_cmd.total_len);
+#endif
+	else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
+		int skip = strlen(CMD_KEEP_ALIVE) + 1;
+		bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip);
+	}
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_ROAM_OFFLOAD, strlen(CMD_ROAM_OFFLOAD)) == 0) {
+		int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0';
+		bytes_written = wl_cfg80211_enable_roam_offload(net, enable);
+	}
+#endif
+#if defined(WL_VIRTUAL_APSTA)
+	else if (strnicmp(command, CMD_INTERFACE_CREATE, strlen(CMD_INTERFACE_CREATE)) == 0) {
+		char *name = (command + strlen(CMD_INTERFACE_CREATE) +1);
+		ANDROID_INFO(("Creating %s interface\n", name));
+		bytes_written = wl_cfg80211_interface_create(net, name);
+	}
+	else if (strnicmp(command, CMD_INTERFACE_DELETE, strlen(CMD_INTERFACE_DELETE)) == 0) {
+		char *name = (command + strlen(CMD_INTERFACE_DELETE) +1);
+		ANDROID_INFO(("Deleteing %s interface\n", name));
+		bytes_written = wl_cfg80211_interface_delete(net, name);
+	}
+#endif /* defined (WL_VIRTUAL_APSTA) */
+	else if (strnicmp(command, CMD_GET_LINK_STATUS, strlen(CMD_GET_LINK_STATUS)) == 0) {
+		bytes_written = wl_android_get_link_status(net, command, priv_cmd.total_len);
+	}
+#ifdef P2PRESP_WFDIE_SRC
+	else if (strnicmp(command, CMD_P2P_SET_WFDIE_RESP,
+		strlen(CMD_P2P_SET_WFDIE_RESP)) == 0) {
+		int mode = *(command + strlen(CMD_P2P_SET_WFDIE_RESP) + 1) - '0';
+		bytes_written = wl_android_set_wfdie_resp(net, mode);
+	} else if (strnicmp(command, CMD_P2P_GET_WFDIE_RESP,
+		strlen(CMD_P2P_GET_WFDIE_RESP)) == 0) {
+		bytes_written = wl_android_get_wfdie_resp(net, command, priv_cmd.total_len);
+	}
+#endif /* P2PRESP_WFDIE_SRC */
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_DFS_AP_MOVE, strlen(CMD_DFS_AP_MOVE)) == 0) {
+		char *data = (command + strlen(CMD_DFS_AP_MOVE) +1);
+		bytes_written = wl_cfg80211_dfs_ap_move(net, data, command, priv_cmd.total_len);
+	}
+#endif
+#ifdef WBTEXT
+	else if (strnicmp(command, CMD_WBTEXT_ENABLE, strlen(CMD_WBTEXT_ENABLE)) == 0) {
+		bytes_written = wl_android_wbtext(net, command, priv_cmd.total_len);
+	}
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_WBTEXT_PROFILE_CONFIG,
+			strlen(CMD_WBTEXT_PROFILE_CONFIG)) == 0) {
+		char *data = (command + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+		bytes_written = wl_cfg80211_wbtext_config(net, data, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_WBTEXT_WEIGHT_CONFIG,
+			strlen(CMD_WBTEXT_WEIGHT_CONFIG)) == 0) {
+		char *data = (command + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+		bytes_written = wl_cfg80211_wbtext_weight_config(net, data,
+				command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_WBTEXT_TABLE_CONFIG,
+			strlen(CMD_WBTEXT_TABLE_CONFIG)) == 0) {
+		char *data = (command + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+		bytes_written = wl_cfg80211_wbtext_table_config(net, data,
+				command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_WBTEXT_DELTA_CONFIG,
+			strlen(CMD_WBTEXT_DELTA_CONFIG)) == 0) {
+		char *data = (command + strlen(CMD_WBTEXT_DELTA_CONFIG) + 1);
+		bytes_written = wl_cfg80211_wbtext_delta_config(net, data,
+				command, priv_cmd.total_len);
+	}
+#endif
+	else if (strnicmp(command, CMD_WBTEXT_BTM_TIMER_THRESHOLD,
+			strlen(CMD_WBTEXT_BTM_TIMER_THRESHOLD)) == 0) {
+		bytes_written = wl_cfg80211_wbtext_btm_timer_threshold(net, command,
+			priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_WBTEXT_BTM_DELTA,
+			strlen(CMD_WBTEXT_BTM_DELTA)) == 0) {
+		bytes_written = wl_cfg80211_wbtext_btm_delta(net, command,
+			priv_cmd.total_len);
+	}
+#endif /* WBTEXT */
+#ifdef SET_RPS_CPUS
+	else if (strnicmp(command, CMD_RPSMODE, strlen(CMD_RPSMODE)) == 0) {
+		bytes_written = wl_android_set_rps_cpus(net, command, priv_cmd.total_len);
+	}
+#endif /* SET_RPS_CPUS */
+#ifdef WLWFDS
+	else if (strnicmp(command, CMD_ADD_WFDS_HASH, strlen(CMD_ADD_WFDS_HASH)) == 0) {
+		bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 1);
+	}
+	else if (strnicmp(command, CMD_DEL_WFDS_HASH, strlen(CMD_DEL_WFDS_HASH)) == 0) {
+		bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 0);
+	}
+#endif /* WLWFDS */
+#ifdef BT_WIFI_HANDOVER
+	else if (strnicmp(command, CMD_TBOW_TEARDOWN, strlen(CMD_TBOW_TEARDOWN)) == 0) {
+	    bytes_written = wl_tbow_teardown(net, command, priv_cmd.total_len);
+	}
+#endif /* BT_WIFI_HANDOVER */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef FCC_PWR_LIMIT_2G
+	else if (strnicmp(command, CMD_GET_FCC_PWR_LIMIT_2G,
+		strlen(CMD_GET_FCC_PWR_LIMIT_2G)) == 0) {
+		bytes_written = wl_android_get_fcc_pwr_limit_2g(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SET_FCC_PWR_LIMIT_2G,
+		strlen(CMD_SET_FCC_PWR_LIMIT_2G)) == 0) {
+		bytes_written = wl_android_set_fcc_pwr_limit_2g(net, command, priv_cmd.total_len);
+	}
+#endif /* FCC_PWR_LIMIT_2G */
+	else if (strnicmp(command, CMD_GET_STA_INFO, strlen(CMD_GET_STA_INFO)) == 0) {
+		bytes_written = wl_cfg80211_get_sta_info(net, command, priv_cmd.total_len);
+	}
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+	else if (strnicmp(command, CMD_MURX_BFE_CAP,
+			strlen(CMD_MURX_BFE_CAP)) == 0) {
+#if defined(BCM4359_CHIP) && defined(WL_CFG80211)
+		uint val = *(command + strlen(CMD_MURX_BFE_CAP) + 1) - '0';
+		bytes_written = wl_android_murx_bfe_cap(net, val);
+#else
+		return BCME_UNSUPPORTED;
+#endif /* BCM4359_CHIP */
+	}
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+	else if (strnicmp(command, CMD_GET_AP_BASICRATE, strlen(CMD_GET_AP_BASICRATE)) == 0) {
+		bytes_written = wl_android_get_ap_basicrate(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SET_AP_BEACONRATE, strlen(CMD_SET_AP_BEACONRATE)) == 0) {
+		bytes_written = wl_android_set_ap_beaconrate(net, command);
+	}
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+	else if (strnicmp(command, CMD_SET_AP_RPS_PARAMS, strlen(CMD_SET_AP_RPS_PARAMS)) == 0) {
+		bytes_written = wl_android_set_ap_rps_params(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SET_AP_RPS, strlen(CMD_SET_AP_RPS)) == 0) {
+		bytes_written = wl_android_set_ap_rps(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_GET_AP_RPS, strlen(CMD_GET_AP_RPS)) == 0) {
+		bytes_written = wl_android_get_ap_rps(net, command, priv_cmd.total_len);
+	}
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+#ifdef SUPPORT_RSSI_LOGGING
+	else if (strnicmp(command, CMD_SET_RSSI_LOGGING, strlen(CMD_SET_RSSI_LOGGING)) == 0) {
+		bytes_written = wl_android_set_rssi_logging(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_GET_RSSI_LOGGING, strlen(CMD_GET_RSSI_LOGGING)) == 0) {
+		bytes_written = wl_android_get_rssi_logging(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_GET_RSSI_PER_ANT, strlen(CMD_GET_RSSI_PER_ANT)) == 0) {
+		bytes_written = wl_android_get_rssi_per_ant(net, command, priv_cmd.total_len);
+	}
+#endif /* SUPPORT_RSSI_LOGGING */
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+	else if (strnicmp(command, CMD_GET_BSS_INFO, strlen(CMD_GET_BSS_INFO)) == 0) {
+		bytes_written = wl_cfg80211_get_bss_info(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_GET_ASSOC_REJECT_INFO, strlen(CMD_GET_ASSOC_REJECT_INFO))
+			== 0) {
+		bytes_written = wl_cfg80211_get_connect_failed_status(net, command,
+				priv_cmd.total_len);
+	}
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+	else if (strnicmp(command, ENABLE_RANDOM_MAC, strlen(ENABLE_RANDOM_MAC)) == 0) {
+		bytes_written = wl_cfg80211_set_random_mac(net, TRUE);
+	} else if (strnicmp(command, DISABLE_RANDOM_MAC, strlen(DISABLE_RANDOM_MAC)) == 0) {
+		bytes_written = wl_cfg80211_set_random_mac(net, FALSE);
+	}
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+#ifdef WL_NATOE
+	else if (strnicmp(command, CMD_NATOE, strlen(CMD_NATOE)) == 0) {
+		bytes_written = wl_android_process_natoe_cmd(net, command,
+				priv_cmd.total_len);
+	}
+#endif /* WL_NATOE */
+#ifdef CONNECTION_STATISTICS
+	else if (strnicmp(command, CMD_GET_CONNECTION_STATS,
+		strlen(CMD_GET_CONNECTION_STATS)) == 0) {
+		bytes_written = wl_android_get_connection_stats(net, command,
+			priv_cmd.total_len);
+	}
+#endif
+#ifdef DHD_LOG_DUMP
+	else if (strnicmp(command, CMD_NEW_DEBUG_PRINT_DUMP,
+		strlen(CMD_NEW_DEBUG_PRINT_DUMP)) == 0) {
+		dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
+		dhd_schedule_log_dump(dhdp);
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+		dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
+		dhd_bus_mem_dump(dhdp);
+#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
+#ifdef DHD_PKT_LOGGING
+		dhd_schedule_pktlog_dump(dhdp);
+#endif /* DHD_PKT_LOGGING */
+	}
+#endif /* DHD_LOG_DUMP */
+#ifdef SET_PCIE_IRQ_CPU_CORE
+	else if (strnicmp(command, CMD_PCIE_IRQ_CORE, strlen(CMD_PCIE_IRQ_CORE)) == 0) {
+		int set = *(command + strlen(CMD_PCIE_IRQ_CORE) + 1) - '0';
+		wl_android_set_irq_cpucore(net, set);
+	}
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+#if defined(DHD_HANG_SEND_UP_TEST)
+	else if (strnicmp(command, CMD_MAKE_HANG, strlen(CMD_MAKE_HANG)) == 0) {
+		int skip = strlen(CMD_MAKE_HANG) + 1;
+		wl_android_make_hang_with_reason(net, (const char*)command+skip);
+	}
+#endif /* DHD_HANG_SEND_UP_TEST */
+#ifdef SUPPORT_LQCM
+	else if (strnicmp(command, CMD_SET_LQCM_ENABLE, strlen(CMD_SET_LQCM_ENABLE)) == 0) {
+		int lqcm_enable = *(command + strlen(CMD_SET_LQCM_ENABLE) + 1) - '0';
+		bytes_written = wl_android_lqcm_enable(net, lqcm_enable);
+	}
+	else if (strnicmp(command, CMD_GET_LQCM_REPORT,
+		strlen(CMD_GET_LQCM_REPORT)) == 0) {
+		bytes_written = wl_android_get_lqcm_report(net, command,
+		priv_cmd.total_len);
+	}
+#endif
+	else if (strnicmp(command, CMD_GET_SNR, strlen(CMD_GET_SNR)) == 0) {
+		bytes_written = wl_android_get_snr(net, command, priv_cmd.total_len);
+	}
+#ifdef WLADPS_PRIVATE_CMD
+	else if (strnicmp(command, CMD_SET_ADPS, strlen(CMD_SET_ADPS)) == 0) {
+		int skip = strlen(CMD_SET_ADPS) + 1;
+		bytes_written = wl_android_set_adps_mode(net, (const char*)command+skip);
+	}
+	else if (strnicmp(command, CMD_GET_ADPS, strlen(CMD_GET_ADPS)) == 0) {
+		bytes_written = wl_android_get_adps_mode(net, command, priv_cmd.total_len);
+	}
+#endif /* WLADPS_PRIVATE_CMD */
+#ifdef DHD_PKT_LOGGING
+	else if (strnicmp(command, CMD_PKTLOG_FILTER_ENABLE,
+		strlen(CMD_PKTLOG_FILTER_ENABLE)) == 0) {
+		bytes_written = wl_android_pktlog_filter_enable(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_PKTLOG_FILTER_DISABLE,
+		strlen(CMD_PKTLOG_FILTER_DISABLE)) == 0) {
+		bytes_written = wl_android_pktlog_filter_disable(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_PKTLOG_FILTER_PATTERN_ENABLE,
+		strlen(CMD_PKTLOG_FILTER_PATTERN_ENABLE)) == 0) {
+		bytes_written =
+			wl_android_pktlog_filter_pattern_enable(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_PKTLOG_FILTER_PATTERN_DISABLE,
+		strlen(CMD_PKTLOG_FILTER_PATTERN_DISABLE)) == 0) {
+		bytes_written =
+			wl_android_pktlog_filter_pattern_disable(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_PKTLOG_FILTER_ADD, strlen(CMD_PKTLOG_FILTER_ADD)) == 0) {
+		bytes_written = wl_android_pktlog_filter_add(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_PKTLOG_FILTER_INFO, strlen(CMD_PKTLOG_FILTER_INFO)) == 0) {
+		bytes_written = wl_android_pktlog_filter_info(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_PKTLOG_START, strlen(CMD_PKTLOG_START)) == 0) {
+		bytes_written = wl_android_pktlog_start(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_PKTLOG_STOP, strlen(CMD_PKTLOG_STOP)) == 0) {
+		bytes_written = wl_android_pktlog_stop(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_PKTLOG_FILTER_EXIST, strlen(CMD_PKTLOG_FILTER_EXIST)) == 0) {
+		bytes_written = wl_android_pktlog_filter_exist(net, command, priv_cmd.total_len);
+	}
+#endif /* DHD_PKT_LOGGING */
+#if defined(STAT_REPORT)
+	else if (strnicmp(command, CMD_STAT_REPORT_GET_START,
+		strlen(CMD_STAT_REPORT_GET_START)) == 0) {
+		bytes_written = wl_android_stat_report_get_start(net, command, priv_cmd.total_len);
+	} else if (strnicmp(command, CMD_STAT_REPORT_GET_NEXT,
+		strlen(CMD_STAT_REPORT_GET_NEXT)) == 0) {
+		bytes_written = wl_android_stat_report_get_next(net, command, priv_cmd.total_len);
+	}
+#endif /* STAT_REPORT */
+	else if (wl_android_ext_priv_cmd(net, command, priv_cmd.total_len, &bytes_written) == 0) {
+	}
+	else {
+		ANDROID_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
+		bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+	}
+
+	return bytes_written;
+}
+
+int wl_android_init(void)
+{
+	int ret = 0;
+
+#if defined(ENABLE_INSMOD_NO_FW_LOAD) || defined(BUS_POWER_RESTORE)
+	dhd_download_fw_on_driverload = FALSE;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
+	if (!iface_name[0]) {
+		memset(iface_name, 0, IFNAMSIZ);
+		bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
+	}
+
+	wl_netlink_init();
+
+	return ret;
+}
+
+int wl_android_exit(void)
+{
+	int ret = 0;
+	struct io_cfg *cur, *q;
+
+	wl_netlink_deinit();
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
+		list_del(&cur->list);
+		kfree(cur);
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	return ret;
+}
+
+void wl_android_post_init(void)
+{
+
+#ifdef ENABLE_4335BT_WAR
+	bcm_bt_unlock(lock_cookie_wifi);
+	printk("%s: btlock released\n", __FUNCTION__);
+#endif /* ENABLE_4335BT_WAR */
+
+	if (!dhd_download_fw_on_driverload)
+		g_wifi_on = FALSE;
+}
+
+
+
+int wl_fatal_error(void * wl, int rc)
+{
+	return FALSE;
+}
+
+#if defined(BT_OVER_SDIO)
+void
+wl_android_set_wifi_on_flag(bool enable)
+{
+	g_wifi_on = enable;
+}
+#endif /* BT_OVER_SDIO */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h
new file mode 100644
index 0000000..5abe79a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.h
@@ -0,0 +1,290 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_android.h 607319 2015-12-18 14:16:55Z $
+ */
+
+#ifndef _wl_android_
+#define _wl_android_
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <wldev_common.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL
+ * automatically
+ */
+#if defined(BT_WIFI_HANDOVER)
+#define WL_GENL
+#endif
+
+
+
+typedef struct _android_wifi_priv_cmd {
+    char *buf;
+    int used_len;
+    int total_len;
+} android_wifi_priv_cmd;
+
+#ifdef CONFIG_COMPAT
+typedef struct _compat_android_wifi_priv_cmd {
+    compat_caddr_t buf;
+    int used_len;
+    int total_len;
+} compat_android_wifi_priv_cmd;
+#endif /* CONFIG_COMPAT */
+
+/**
+ * Android platform dependent functions, feel free to add Android specific functions here
+ * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd
+ * or cfg, define them as static in wl_android.c
+ */
+
+/* message levels */
+#define ANDROID_ERROR_LEVEL	0x0001
+#define ANDROID_TRACE_LEVEL	0x0002
+#define ANDROID_INFO_LEVEL	0x0004
+#define ANDROID_EVENT_LEVEL	0x0008
+
+#define ANDROID_MSG(x) \
+	do { \
+		if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+			printk(KERN_ERR "ANDROID-MSG) ");	\
+			printk x; \
+		} \
+	} while (0)
+#define ANDROID_ERROR(x) \
+	do { \
+		if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+			printk(KERN_ERR "ANDROID-ERROR) ");	\
+			printk x; \
+		} \
+	} while (0)
+#define ANDROID_TRACE(x) \
+	do { \
+		if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+			printk(KERN_ERR "ANDROID-TRACE) ");	\
+			printk x; \
+		} \
+	} while (0)
+#define ANDROID_INFO(x) \
+	do { \
+		if (android_msg_level & ANDROID_INFO_LEVEL) { \
+			printk(KERN_ERR "ANDROID-INFO) ");	\
+			printk x; \
+		} \
+	} while (0)
+#define ANDROID_EVENT(x) \
+	do { \
+		if (android_msg_level & ANDROID_EVENT_LEVEL) { \
+			printk(KERN_ERR "ANDROID-EVENT) ");	\
+			printk x; \
+		} \
+	} while (0)
+
+/**
+ * wl_android_init will be called from module init function (dhd_module_init now), similarly
+ * wl_android_exit will be called from module exit function (dhd_module_cleanup now)
+ */
+int wl_android_init(void);
+int wl_android_exit(void);
+void wl_android_post_init(void);
+int wl_android_wifi_on(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev, bool on_failure);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+int wl_handle_private_cmd(struct net_device *net, char *command, u32 cmd_len);
+
+s32 wl_netlink_send_msg(int pid, int type, int seq, const void *data, size_t size);
+#ifdef WL_EXT_IAPSTA
+int wl_ext_iapsta_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx);
+int wl_ext_iapsta_attach_name(struct net_device *net, int ifidx);
+int wl_ext_iapsta_dettach_netdev(struct net_device *net, int ifidx);
+u32 wl_ext_iapsta_update_channel(struct net_device *dev, u32 channel);
+int wl_ext_iapsta_alive_preinit(struct net_device *dev);
+int wl_ext_iapsta_alive_postinit(struct net_device *dev);
+int wl_ext_iapsta_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+int wl_ext_iapsta_attach(dhd_pub_t *pub);
+void wl_ext_iapsta_dettach(dhd_pub_t *pub);
+bool wl_ext_check_mesh_creating(struct net_device *net);
+extern int op_mode;
+#endif
+typedef struct bcol_gtk_para {
+	int enable;
+	int ptk_len;
+	char ptk[64];
+	char replay[8];
+} bcol_gtk_para_t;
+int wl_android_ext_priv_cmd(struct net_device *net, char *command, int total_len,
+	int *bytes_written);
+enum wl_ext_status {
+	WL_EXT_STATUS_DISCONNECTING = 0,
+	WL_EXT_STATUS_DISCONNECTED,
+	WL_EXT_STATUS_SCAN,
+	WL_EXT_STATUS_CONNECTING,
+	WL_EXT_STATUS_CONNECTED,
+	WL_EXT_STATUS_ADD_KEY,
+	WL_EXT_STATUS_AP_ENABLED,
+	WL_EXT_STATUS_DELETE_STA,
+	WL_EXT_STATUS_STA_DISCONNECTED,
+	WL_EXT_STATUS_STA_CONNECTED,
+	WL_EXT_STATUS_AP_DISABLED
+};
+typedef struct wl_conn_info {
+	struct net_device *dev;
+	dhd_pub_t *dhd;
+	uint8 bssidx;
+	wlc_ssid_t ssid;
+	struct ether_addr bssid;
+	uint16 channel;
+	struct delayed_work pm_enable_work;
+	struct mutex pm_sync;
+} wl_conn_info_t;
+#if defined(WL_WIRELESS_EXT)
+s32 wl_ext_connect(struct net_device *dev, wl_conn_info_t *conn_info);
+void wl_ext_pm_work_handler(struct work_struct * work);
+void wl_ext_add_remove_pm_enable_work(struct wl_conn_info *conn_info, bool add);
+#endif /* defined(WL_WIRELESS_EXT) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len))
+#endif
+
+/* hostap mac mode */
+#define MACLIST_MODE_DISABLED   0
+#define MACLIST_MODE_DENY       1
+#define MACLIST_MODE_ALLOW      2
+
+/* max number of assoc list */
+#define MAX_NUM_OF_ASSOCLIST    64
+
+/* Bandwidth */
+#define WL_CH_BANDWIDTH_20MHZ 20
+#define WL_CH_BANDWIDTH_40MHZ 40
+#define WL_CH_BANDWIDTH_80MHZ 80
+/* max number of mac filter list
+ * restrict max number to 10 as maximum cmd string size is 255
+ */
+#define MAX_NUM_MAC_FILT        10
+
+int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
+
+/* terence:
+ * BSSCACHE: Cache bss list
+ * RSSAVG: Average RSSI of BSS list
+ * RSSIOFFSET: RSSI offset
+ * SORT_BSS_BY_RSSI: Sort BSS by RSSI
+ */
+//#define BSSCACHE
+//#define RSSIAVG
+//#define RSSIOFFSET
+//#define RSSIOFFSET_NEW
+//#define SORT_BSS_BY_RSSI
+
+#define RSSI_MAXVAL -2
+#define RSSI_MINVAL -200
+
+#if defined(ESCAN_RESULT_PATCH)
+#define REPEATED_SCAN_RESULT_CNT	2
+#else
+#define REPEATED_SCAN_RESULT_CNT	1
+#endif
+
+#if defined(RSSIAVG) || defined(RSSIOFFSET)
+extern int g_wifi_on;
+#endif
+
+#if defined(RSSIAVG)
+#define RSSIAVG_LEN (4*REPEATED_SCAN_RESULT_CNT)
+#define RSSICACHE_TIMEOUT 15
+
+typedef struct wl_rssi_cache {
+	struct wl_rssi_cache *next;
+	int dirty;
+	struct timeval tv;
+	struct ether_addr BSSID;
+	int16 RSSI[RSSIAVG_LEN];
+} wl_rssi_cache_t;
+
+typedef struct wl_rssi_cache_ctrl {
+	wl_rssi_cache_t *m_cache_head;
+} wl_rssi_cache_ctrl_t;
+
+void wl_free_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl);
+void wl_delete_dirty_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl);
+void wl_delete_disconnected_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, u8 *bssid);
+void wl_reset_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl);
+void wl_update_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, wl_scan_results_t *ss_list);
+int wl_update_connected_rssi_cache(struct net_device *net, wl_rssi_cache_ctrl_t *rssi_cache_ctrl, int *rssi_avg);
+int16 wl_get_avg_rssi(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, void *addr);
+#endif
+
+#if defined(RSSIOFFSET)
+#define RSSI_OFFSET	5
+#if defined(RSSIOFFSET_NEW)
+#define RSSI_OFFSET_MAXVAL -80
+#define RSSI_OFFSET_MINVAL -94
+#define RSSI_OFFSET_INTVAL ((RSSI_OFFSET_MAXVAL-RSSI_OFFSET_MINVAL)/RSSI_OFFSET)
+#endif
+#define BCM4330_CHIP_ID		0x4330
+#define BCM4330B2_CHIP_REV      4
+int wl_update_rssi_offset(struct net_device *net, int rssi);
+#endif
+
+#if defined(BSSCACHE)
+#define BSSCACHE_TIMEOUT	15
+
+typedef struct wl_bss_cache {
+	struct wl_bss_cache *next;
+	int dirty;
+	struct timeval tv;
+	wl_scan_results_t results;
+} wl_bss_cache_t;
+
+typedef struct wl_bss_cache_ctrl {
+	wl_bss_cache_t *m_cache_head;
+} wl_bss_cache_ctrl_t;
+
+void wl_free_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
+void wl_delete_dirty_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
+void wl_delete_disconnected_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, u8 *bssid);
+void wl_reset_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
+void wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#if defined(RSSIAVG)
+	wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+#endif
+	wl_scan_results_t *ss_list);
+void wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl);
+#endif
+int wl_ext_get_best_channel(struct net_device *net,
+#if defined(BSSCACHE)
+	wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#else
+	struct wl_scan_results *bss_list,
+#endif
+	int ioctl_ver, int *best_2g_ch, int *best_5g_ch
+);
+#endif /* _wl_android_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_android_ext.c b/drivers/net/wireless/bcmdhd/wl_android_ext.c
new file mode 100644
index 0000000..951a27b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android_ext.c
@@ -0,0 +1,4597 @@
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/netlink.h>
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+
+#include <wl_android.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+#include <linux/wireless.h>
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_config.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif
+
+#if defined(WL_WIRELESS_EXT)
+#define WL_PM_ENABLE_TIMEOUT 10000
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop")
+#else
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member);
+#endif /* STRICT_GCC_WARNINGS */
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifndef WL_CFG80211
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+#define IEEE80211_BAND_2GHZ 0
+#define IEEE80211_BAND_5GHZ 1
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 		20
+#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 	320
+#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 	400
+#endif
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */
+#endif /* IW_CUSTOM_MAX */
+
+#define CMD_CHANNEL				"CHANNEL"
+#define CMD_CHANNELS			"CHANNELS"
+#define CMD_ROAM_TRIGGER		"ROAM_TRIGGER"
+#define CMD_KEEP_ALIVE			"KEEP_ALIVE"
+#define CMD_PM					"PM"
+#define CMD_MONITOR				"MONITOR"
+#define CMD_SET_SUSPEND_BCN_LI_DTIM		"SET_SUSPEND_BCN_LI_DTIM"
+
+#ifdef WL_EXT_IAPSTA
+#include <net/rtnetlink.h>
+#define CMD_IAPSTA_INIT			"IAPSTA_INIT"
+#define CMD_IAPSTA_CONFIG		"IAPSTA_CONFIG"
+#define CMD_IAPSTA_ENABLE		"IAPSTA_ENABLE"
+#define CMD_IAPSTA_DISABLE		"IAPSTA_DISABLE"
+#define CMD_ISAM_INIT			"ISAM_INIT"
+#define CMD_ISAM_CONFIG			"ISAM_CONFIG"
+#define CMD_ISAM_ENABLE			"ISAM_ENABLE"
+#define CMD_ISAM_DISABLE		"ISAM_DISABLE"
+#define CMD_ISAM_STATUS			"ISAM_STATUS"
+#ifdef PROP_TXSTATUS
+#ifdef PROP_TXSTATUS_VSDB
+#include <dhd_wlfc.h>
+extern int disable_proptx;
+#endif /* PROP_TXSTATUS_VSDB */
+#endif
+#endif
+#ifdef IDHCP
+#define CMD_DHCPC_ENABLE	"DHCPC_ENABLE"
+#define CMD_DHCPC_DUMP		"DHCPC_DUMP"
+#endif
+#define CMD_AUTOCHANNEL		"AUTOCHANNEL"
+#define CMD_WL		"WL"
+
+int wl_ext_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+	int ret;
+
+	ret = wldev_ioctl(dev, cmd, arg, len, set);
+	if (ret)
+		ANDROID_ERROR(("%s: cmd=%d ret=%d\n", __FUNCTION__, cmd, ret));
+	return ret;
+}
+
+int wl_ext_iovar_getint(struct net_device *dev, s8 *iovar, s32 *val)
+{
+	int ret;
+
+	ret = wldev_iovar_getint(dev, iovar, val);
+	if (ret)
+		ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar, ret));
+
+	return ret;
+}
+
+int wl_ext_iovar_setint(struct net_device *dev, s8 *iovar, s32 val)
+{
+	int ret;
+
+	ret = wldev_iovar_setint(dev, iovar, val);
+	if (ret)
+		ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar, ret));
+
+	return ret;
+}
+
+int wl_ext_iovar_getbuf(struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	int ret;
+
+	ret = wldev_iovar_getbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
+	if (ret != 0)
+		ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar_name, ret));
+
+	return ret;
+}
+
+int wl_ext_iovar_setbuf(struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	int ret;
+
+	ret = wldev_iovar_setbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
+	if (ret != 0)
+		ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar_name, ret));
+
+	return ret;
+}
+
+static int wl_ext_iovar_setbuf_bsscfg(struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx,
+	struct mutex* buf_sync)
+{
+	int ret;
+
+	ret = wldev_iovar_setbuf_bsscfg(dev, iovar_name, param, paramlen,
+		buf, buflen, bsscfg_idx, buf_sync);
+	if (ret < 0)
+		ANDROID_ERROR(("%s: iovar_name=%s ret=%d\n", __FUNCTION__, iovar_name, ret));
+
+	return ret;
+}
+
+#ifdef WL_EXT_IAPSTA
+typedef enum IF_STATE {
+	IF_STATE_INIT = 1,
+	IF_STATE_DISALBE,
+	IF_STATE_ENABLE
+} if_state_t;
+
+typedef enum APSTAMODE {
+	ISTAONLY_MODE = 1,
+	IAPONLY_MODE,
+	ISTAAP_MODE,
+	ISTAGO_MODE,
+	ISTASTA_MODE,
+	IDUALAP_MODE,
+	ISTAAPAP_MODE,
+	IMESHONLY_MODE,
+	IMESHSTA_MODE,
+	IMESHAP_MODE,
+	IMESHAPSTA_MODE,
+	IMESHAPAP_MODE
+} apstamode_t;
+
+typedef enum IFMODE {
+	ISTA_MODE = 1,
+	IAP_MODE,
+	IMESH_MODE
+} ifmode_t;
+
+typedef enum BGNMODE {
+	IEEE80211B = 1,
+	IEEE80211G,
+	IEEE80211BG,
+	IEEE80211BGN,
+	IEEE80211BGNAC
+} bgnmode_t;
+
+typedef enum AUTHMODE {
+	AUTH_OPEN,
+	AUTH_SHARED,
+	AUTH_WPAPSK,
+	AUTH_WPA2PSK,
+	AUTH_WPAWPA2PSK,
+	AUTH_SAE
+} authmode_t;
+
+typedef enum ENCMODE {
+	ENC_NONE,
+	ENC_WEP,
+	ENC_TKIP,
+	ENC_AES,
+	ENC_TKIPAES
+} encmode_t;
+
+enum wl_if_list {
+	IF_PIF,
+	IF_VIF,
+	IF_VIF2,
+	MAX_IF_NUM
+};
+
+typedef enum WL_PRIO {
+	PRIO_AP,
+	PRIO_MESH,
+	PRIO_STA
+}wl_prio_t;
+
+typedef struct wl_if_info {
+	struct net_device *dev;
+	if_state_t ifstate;
+	ifmode_t ifmode;
+	char prefix;
+	wl_prio_t prio;
+	int ifidx;
+	uint8 bssidx;
+	char ifname[IFNAMSIZ+1];
+	char ssid[DOT11_MAX_SSID_LEN];
+	struct ether_addr bssid;
+	bgnmode_t bgnmode;
+	int hidden;
+	int maxassoc;
+	uint16 channel;
+	authmode_t amode;
+	encmode_t emode;
+	char key[100];
+} wl_if_info_t;
+
+#define CSA_FW_BIT		(1<<0)
+#define CSA_DRV_BIT		(1<<1)
+
+typedef struct wl_apsta_params {
+	struct wl_if_info if_info[MAX_IF_NUM];
+	struct dhd_pub *dhd;
+	int ioctl_ver;
+	bool init;
+	bool rsdb;
+	bool vsdb;
+	uint csa;
+	apstamode_t apstamode;
+	bool netif_change;
+	bool mesh_creating;
+	wait_queue_head_t netif_change_event;
+} wl_apsta_params_t;
+
+static int wl_ext_enable_iface(struct net_device *dev, char *ifname);
+#endif
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_ext_chspec_to_legacy(chanspec_t chspec)
+{
+	chanspec_t lchspec;
+
+	if (wf_chspec_malformed(chspec)) {
+		ANDROID_ERROR(("wl_ext_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	/* get the channel number */
+	lchspec = CHSPEC_CHANNEL(chspec);
+
+	/* convert the band */
+	if (CHSPEC_IS2G(chspec)) {
+		lchspec |= WL_LCHANSPEC_BAND_2G;
+	} else {
+		lchspec |= WL_LCHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (CHSPEC_IS20(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_20;
+		lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+	} else if (CHSPEC_IS40(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_40;
+		if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+			lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+		} else {
+			lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+		}
+	} else {
+		/* cannot express the bandwidth */
+		char chanbuf[CHANSPEC_STR_LEN];
+		ANDROID_ERROR((
+		        "wl_ext_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+		        "to pre-11ac format\n",
+		        wf_chspec_ntoa(chspec, chanbuf), chspec));
+		return INVCHANSPEC;
+	}
+
+	return lchspec;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_ext_chspec_host_to_driver(int ioctl_ver, chanspec_t chanspec)
+{
+	if (ioctl_ver == 1) {
+		chanspec = wl_ext_chspec_to_legacy(chanspec);
+		if (chanspec == INVCHANSPEC) {
+			return chanspec;
+		}
+	}
+	chanspec = htodchanspec(chanspec);
+
+	return chanspec;
+}
+
+static void
+wl_ext_ch_to_chanspec(int ioctl_ver, int ch,
+	struct wl_join_params *join_params, size_t *join_params_size)
+{
+	chanspec_t chanspec = 0;
+
+	if (ch != 0) {
+		join_params->params.chanspec_num = 1;
+		join_params->params.chanspec_list[0] = ch;
+
+		if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+			chanspec |= WL_CHANSPEC_BAND_2G;
+		else
+			chanspec |= WL_CHANSPEC_BAND_5G;
+
+		chanspec |= WL_CHANSPEC_BW_20;
+		chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+		*join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+			join_params->params.chanspec_num * sizeof(chanspec_t);
+
+		join_params->params.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		join_params->params.chanspec_list[0] |= chanspec;
+		join_params->params.chanspec_list[0] =
+			wl_ext_chspec_host_to_driver(ioctl_ver,
+				join_params->params.chanspec_list[0]);
+
+		join_params->params.chanspec_num =
+			htod32(join_params->params.chanspec_num);
+		ANDROID_TRACE(("join_params->params.chanspec_list[0]= %X, %d channels\n",
+			join_params->params.chanspec_list[0],
+			join_params->params.chanspec_num));
+	}
+}
+
+#if defined(WL_EXT_IAPSTA) || defined(WL_CFG80211) || defined(WL_ESCAN)
+static chanspec_t
+wl_ext_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+	chanspec_t chspec;
+
+	/* get the channel number */
+	chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+	/* convert the band */
+	if (LCHSPEC_IS2G(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BAND_2G;
+	} else {
+		chspec |= WL_CHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (LCHSPEC_IS20(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BW_20;
+	} else {
+		chspec |= WL_CHANSPEC_BW_40;
+		if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+			chspec |= WL_CHANSPEC_CTL_SB_L;
+		} else {
+			chspec |= WL_CHANSPEC_CTL_SB_U;
+		}
+	}
+
+	if (wf_chspec_malformed(chspec)) {
+		ANDROID_ERROR(("wl_ext_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	return chspec;
+}
+
+static chanspec_t
+wl_ext_chspec_driver_to_host(int ioctl_ver, chanspec_t chanspec)
+{
+	chanspec = dtohchanspec(chanspec);
+	if (ioctl_ver == 1) {
+		chanspec = wl_ext_chspec_from_legacy(chanspec);
+	}
+
+	return chanspec;
+}
+#endif
+
+static int
+wl_ext_get_ioctl_ver(struct net_device *dev, int *ioctl_ver)
+{
+	int ret = 0;
+	s32 val = 0;
+
+	val = 1;
+	ret = wl_ext_ioctl(dev, WLC_GET_VERSION, &val, sizeof(val), 0);
+	if (ret) {
+		return ret;
+	}
+	val = dtoh32(val);
+	if (val != WLC_IOCTL_VERSION && val != 1) {
+		ANDROID_ERROR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+			val, WLC_IOCTL_VERSION));
+		return BCME_VERSION;
+	}
+	*ioctl_ver = val;
+
+	return ret;
+}
+
+static int
+wl_ext_set_chanspec(struct net_device *dev, int ioctl_ver,
+	uint16 channel, chanspec_t *ret_chspec)
+{
+	s32 _chan = channel;
+	chanspec_t chspec = 0;
+	chanspec_t fw_chspec = 0;
+	u32 bw = WL_CHANSPEC_BW_20;
+	s32 err = BCME_OK;
+	s32 bw_cap = 0;
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	struct {
+		u32 band;
+		u32 bw_cap;
+	} param = {0, 0};
+	uint band;
+
+	if (_chan <= CH_MAX_2G_CHANNEL)
+		band = IEEE80211_BAND_2GHZ;
+	else
+		band = IEEE80211_BAND_5GHZ;
+
+	if (band == IEEE80211_BAND_5GHZ) {
+		param.band = WLC_BAND_5G;
+		err = wl_ext_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		if (err) {
+			if (err != BCME_UNSUPPORTED) {
+				ANDROID_ERROR(("bw_cap failed, %d\n", err));
+				return err;
+			} else {
+				err = wl_ext_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+				if (bw_cap != WLC_N_BW_20ALL)
+					bw = WL_CHANSPEC_BW_40;
+			}
+		} else {
+			if (WL_BW_CAP_80MHZ(iovar_buf[0]))
+				bw = WL_CHANSPEC_BW_80;
+			else if (WL_BW_CAP_40MHZ(iovar_buf[0]))
+				bw = WL_CHANSPEC_BW_40;
+			else
+				bw = WL_CHANSPEC_BW_20;
+
+		}
+	}
+	else if (band == IEEE80211_BAND_2GHZ)
+		bw = WL_CHANSPEC_BW_20;
+
+set_channel:
+	chspec = wf_channel2chspec(_chan, bw);
+	if (wf_chspec_valid(chspec)) {
+		fw_chspec = wl_ext_chspec_host_to_driver(ioctl_ver, chspec);
+		if (fw_chspec != INVCHANSPEC) {
+			if ((err = wl_ext_iovar_setint(dev, "chanspec", fw_chspec)) == BCME_BADCHAN) {
+				if (bw == WL_CHANSPEC_BW_80)
+					goto change_bw;
+				wl_ext_ioctl(dev, WLC_SET_CHANNEL, &_chan, sizeof(_chan), 1);
+				ANDROID_MSG(("%s: channel %d\n", __FUNCTION__, _chan));
+			} else if (err) {
+				ANDROID_ERROR(("%s: failed to set chanspec error %d\n",
+					__FUNCTION__, err));
+			} else
+				ANDROID_MSG(("%s: %s channel %d, 0x%x\n", __FUNCTION__,
+					dev->name, channel, chspec));
+		} else {
+			ANDROID_ERROR(("%s: failed to convert host chanspec to fw chanspec\n",
+				__FUNCTION__));
+			err = BCME_ERROR;
+		}
+	} else {
+change_bw:
+		if (bw == WL_CHANSPEC_BW_80)
+			bw = WL_CHANSPEC_BW_40;
+		else if (bw == WL_CHANSPEC_BW_40)
+			bw = WL_CHANSPEC_BW_20;
+		else
+			bw = 0;
+		if (bw)
+			goto set_channel;
+		ANDROID_ERROR(("%s: Invalid chanspec 0x%x\n", __FUNCTION__, chspec));
+		err = BCME_ERROR;
+	}
+	*ret_chspec = fw_chspec;
+
+	return err;
+}
+
+int
+wl_ext_channel(struct net_device *dev, char* command, int total_len)
+{
+	int ret;
+	int channel=0;
+	channel_info_t ci;
+	int bytes_written = 0;
+	chanspec_t fw_chspec;
+	int ioctl_ver = 0;
+
+	ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+
+	sscanf(command, "%*s %d", &channel);
+
+	if (channel > 0) {
+		wl_ext_get_ioctl_ver(dev, &ioctl_ver);
+		ret = wl_ext_set_chanspec(dev, ioctl_ver, channel, &fw_chspec);
+	} else {
+		if (!(ret = wl_ext_ioctl(dev, WLC_GET_CHANNEL, &ci,
+				sizeof(channel_info_t), FALSE))) {
+			ANDROID_TRACE(("hw_channel %d\n", ci.hw_channel));
+			ANDROID_TRACE(("target_channel %d\n", ci.target_channel));
+			ANDROID_TRACE(("scan_channel %d\n", ci.scan_channel));
+			bytes_written = snprintf(command, sizeof(channel_info_t)+2,
+				"channel %d", ci.hw_channel);
+			ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+			ret = bytes_written;
+		}
+	}
+
+	return ret;
+}
+
+int
+wl_ext_channels(struct net_device *dev, char* command, int total_len)
+{
+	int ret, i;
+	int bytes_written = -1;
+	u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+	wl_uint32_list_t *list;
+
+	ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+
+	memset(valid_chan_list, 0, sizeof(valid_chan_list));
+	list = (wl_uint32_list_t *)(void *) valid_chan_list;
+	list->count = htod32(WL_NUMCHANNELS);
+	ret = wl_ext_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
+		sizeof(valid_chan_list), 0);
+	if (ret<0) {
+		ANDROID_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, ret));
+	} else {
+		bytes_written = snprintf(command, total_len, "channels");
+		for (i = 0; i < dtoh32(list->count); i++) {
+			bytes_written += snprintf(command+bytes_written, total_len, " %d",
+				dtoh32(list->element[i]));
+		}
+		ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+		ret = bytes_written;
+	}
+
+	return ret;
+}
+
+int
+wl_ext_roam_trigger(struct net_device *dev, char* command, int total_len)
+{
+	int ret = 0;
+	int roam_trigger[2] = {0, 0};
+	int trigger[2]= {0, 0};
+	int bytes_written=-1;
+
+	sscanf(command, "%*s %10d", &roam_trigger[0]);
+
+	if (roam_trigger[0]) {
+		roam_trigger[1] = WLC_BAND_ALL;
+		ret = wl_ext_ioctl(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+			sizeof(roam_trigger), 1);
+	} else {
+		roam_trigger[1] = WLC_BAND_2G;
+		ret = wl_ext_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger,
+			sizeof(roam_trigger), 0);
+		if (!ret)
+			trigger[0] = roam_trigger[0];
+
+		roam_trigger[1] = WLC_BAND_5G;
+		ret = wl_ext_ioctl(dev, WLC_GET_ROAM_TRIGGER, &roam_trigger,
+			sizeof(roam_trigger), 0);
+		if (!ret)
+			trigger[1] = roam_trigger[0];
+
+		ANDROID_TRACE(("roam_trigger %d %d\n", trigger[0], trigger[1]));
+		bytes_written = snprintf(command, total_len, "%d %d", trigger[0], trigger[1]);
+		ret = bytes_written;
+	}
+
+	return ret;
+}
+
+static int
+wl_ext_pattern_atoh(char *src, char *dst)
+{
+	int i;
+	if (strncmp(src, "0x", 2) != 0 &&
+	    strncmp(src, "0X", 2) != 0) {
+		ANDROID_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+		return -1;
+	}
+	src = src + 2; /* Skip past 0x */
+	if (strlen(src) % 2 != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+		return -1;
+	}
+	for (i = 0; *src != '\0'; i++) {
+		char num[3];
+		bcm_strncpy_s(num, sizeof(num), src, 2);
+		num[2] = '\0';
+		dst[i] = (uint8)strtoul(num, NULL, 16);
+		src += 2;
+	}
+	return i;
+}
+
+int
+wl_ext_keep_alive(struct net_device *dev, char *command, int total_len)
+{
+	wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+	int ret = -1, i;
+	int	id, period=-1, len_bytes=0, buf_len=0;
+	char data[200]="\0";
+	char buf[WLC_IOCTL_SMLEN]="\0", iovar_buf[WLC_IOCTL_SMLEN]="\0";
+	int bytes_written = -1;
+
+	ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, command));
+	sscanf(command, "%*s %d %d %s", &id, &period, data);
+	ANDROID_TRACE(("%s: id=%d, period=%d, data=%s\n", __FUNCTION__, id, period, data));
+
+	if (period >= 0) {
+		mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *)buf;
+		mkeep_alive_pktp->version = htod16(WL_MKEEP_ALIVE_VERSION);
+		mkeep_alive_pktp->length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+		mkeep_alive_pktp->keep_alive_id = id;
+		buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+		mkeep_alive_pktp->period_msec = period;
+		if (strlen(data)) {
+			len_bytes = wl_ext_pattern_atoh(data, (char *) mkeep_alive_pktp->data);
+			buf_len += len_bytes;
+		}
+		mkeep_alive_pktp->len_bytes = htod16(len_bytes);
+
+		ret = wl_ext_iovar_setbuf(dev, "mkeep_alive", buf, buf_len,
+			iovar_buf, sizeof(iovar_buf), NULL);
+	} else {
+		if (id < 0)
+			id = 0;
+		ret = wl_ext_iovar_getbuf(dev, "mkeep_alive", &id, sizeof(id), buf,
+			sizeof(buf), NULL);
+		if (ret) {
+			goto exit;
+		} else {
+			mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) buf;
+			if (android_msg_level & ANDROID_INFO_LEVEL) {
+				printf("Id            :%d\n"
+					"Period (msec) :%d\n"
+					"Length        :%d\n"
+					"Packet        :0x",
+					mkeep_alive_pktp->keep_alive_id,
+					dtoh32(mkeep_alive_pktp->period_msec),
+					dtoh16(mkeep_alive_pktp->len_bytes));
+				for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
+					printf("%02x", mkeep_alive_pktp->data[i]);
+				}
+				printf("\n");
+			}
+		}
+		bytes_written = snprintf(command, total_len, "mkeep_alive_period_msec %d ",
+			dtoh32(mkeep_alive_pktp->period_msec));
+		bytes_written += snprintf(command+bytes_written, total_len, "0x");
+		for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
+			bytes_written += snprintf(command+bytes_written, total_len, "%02x",
+				mkeep_alive_pktp->data[i]);
+		}
+		ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+		ret = bytes_written;
+	}
+
+exit:
+	return ret;
+}
+
+int
+wl_ext_pm(struct net_device *dev, char *command, int total_len)
+{
+	int pm=-1, ret = -1;
+	char *pm_local;
+	int bytes_written=-1;
+
+	ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+
+	sscanf(command, "%*s %d", &pm);
+
+	if (pm >= 0) {
+		ret = wl_ext_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+	} else {
+		ret = wl_ext_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), 0);
+		if (!ret) {
+			ANDROID_TRACE(("%s: PM = %d\n", __func__, pm));
+			if (pm == PM_OFF)
+				pm_local = "PM_OFF";
+			else if(pm == PM_MAX)
+				pm_local = "PM_MAX";
+			else if(pm == PM_FAST)
+				pm_local = "PM_FAST";
+			else {
+				pm = 0;
+				pm_local = "Invalid";
+			}
+			bytes_written = snprintf(command, total_len, "PM %s", pm_local);
+			ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+			ret = bytes_written;
+		}
+	}
+
+	return ret;
+}
+
+static int
+wl_ext_monitor(struct net_device *dev, char *command, int total_len)
+{
+	int val, ret = -1;
+	int bytes_written=-1;
+
+	sscanf(command, "%*s %d", &val);
+
+	if (val >=0) {
+		ret = wl_ext_ioctl(dev, WLC_SET_MONITOR, &val, sizeof(val), 1);
+	} else {
+		ret = wl_ext_ioctl(dev, WLC_GET_MONITOR, &val, sizeof(val), 0);
+		if (!ret) {
+			ANDROID_TRACE(("%s: monitor = %d\n", __FUNCTION__, val));
+			bytes_written = snprintf(command, total_len, "monitor %d", val);
+			ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+			ret = bytes_written;
+		}
+	}
+
+	return ret;
+}
+
+s32
+wl_ext_connect(struct net_device *dev, struct wl_conn_info *conn_info)
+{
+	wl_extjoin_params_t *ext_join_params;
+	struct wl_join_params join_params;
+	size_t join_params_size;
+	s32 err = 0;
+	u32 chan_cnt = 0;
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	int ioctl_ver = 0;
+
+	if (conn_info->channel) {
+		chan_cnt = 1;
+	}
+
+	/*
+	 *	Join with specific BSSID and cached SSID
+	 *	If SSID is zero join based on BSSID only
+	 */
+	join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+		chan_cnt * sizeof(chanspec_t);
+	ext_join_params =  (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+	if (ext_join_params == NULL) {
+		err = -ENOMEM;
+		goto exit;
+	}
+	wl_ext_get_ioctl_ver(dev, &ioctl_ver);
+	ext_join_params->ssid.SSID_len = min((uint32)sizeof(ext_join_params->ssid.SSID),
+		conn_info->ssid.SSID_len);
+	memcpy(&ext_join_params->ssid.SSID, conn_info->ssid.SSID, ext_join_params->ssid.SSID_len);
+	ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+	/* increate dwell time to receive probe response or detect Beacon
+	* from target AP at a noisy air only during connect command
+	*/
+	ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1;
+	ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
+	/* Set up join scan parameters */
+	ext_join_params->scan.scan_type = -1;
+	ext_join_params->scan.nprobes = chan_cnt ?
+		(ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
+	ext_join_params->scan.home_time = -1;
+
+	if (memcmp(&ether_null, &conn_info->bssid, ETHER_ADDR_LEN))
+		memcpy(&ext_join_params->assoc.bssid, &conn_info->bssid, ETH_ALEN);
+	else
+		memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN);
+	ext_join_params->assoc.chanspec_num = chan_cnt;
+	if (chan_cnt) {
+		u16 band, bw, ctl_sb;
+		chanspec_t chspec;
+		band = (conn_info->channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+			: WL_CHANSPEC_BAND_5G;
+		bw = WL_CHANSPEC_BW_20;
+		ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+		chspec = (conn_info->channel | band | bw | ctl_sb);
+		ext_join_params->assoc.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		ext_join_params->assoc.chanspec_list[0] |= chspec;
+		ext_join_params->assoc.chanspec_list[0] =
+			wl_ext_chspec_host_to_driver(ioctl_ver,
+				ext_join_params->assoc.chanspec_list[0]);
+	}
+	ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+
+	err = wl_ext_iovar_setbuf_bsscfg(dev, "join", ext_join_params,
+		join_params_size, iovar_buf, WLC_IOCTL_SMLEN, conn_info->bssidx, NULL);
+
+	ANDROID_MSG(("Connecting with " MACDBG " channel (%d) ssid \"%s\", len (%d)\n\n",
+		MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), conn_info->channel,
+		ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len));
+
+	kfree(ext_join_params);
+	if (err) {
+		if (err == BCME_UNSUPPORTED) {
+			ANDROID_TRACE(("join iovar is not supported\n"));
+			goto set_ssid;
+		} else {
+			ANDROID_ERROR(("error (%d)\n", err));
+			goto exit;
+		}
+	} else
+		goto exit;
+
+set_ssid:
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid);
+
+	join_params.ssid.SSID_len = min((uint32)sizeof(join_params.ssid.SSID), conn_info->ssid.SSID_len);
+	memcpy(&join_params.ssid.SSID, conn_info->ssid.SSID, join_params.ssid.SSID_len);
+	join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+	if (memcmp(&ether_null, &conn_info->bssid, ETHER_ADDR_LEN))
+		memcpy(&join_params.params.bssid, &conn_info->bssid, ETH_ALEN);
+	else
+		memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
+
+	wl_ext_ch_to_chanspec(ioctl_ver, conn_info->channel, &join_params, &join_params_size);
+	ANDROID_TRACE(("join_param_size %zu\n", join_params_size));
+
+	if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		ANDROID_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+			join_params.ssid.SSID_len));
+	}
+	err = wl_ext_ioctl(dev, WLC_SET_SSID, &join_params,join_params_size, 1);
+
+exit:
+	return err;
+
+}
+
+#if defined(WL_WIRELESS_EXT)
+void wl_ext_pm_work_handler(struct work_struct * work)
+{
+	struct wl_conn_info *conn_info;
+	s32 pm = PM_FAST;
+	dhd_pub_t *dhd;
+	BCM_SET_CONTAINER_OF(conn_info, work, struct wl_conn_info, pm_enable_work.work);
+
+	ANDROID_TRACE(("%s: Enter\n", __FUNCTION__));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+
+	dhd = (dhd_pub_t *)(conn_info->dhd);
+	if (!dhd || !conn_info->dhd->up) {
+		ANDROID_TRACE(("%s: dhd is null or not up\n", __FUNCTION__));
+		return;
+	}
+	if (dhd_conf_get_pm(dhd) >= 0)
+		pm = dhd_conf_get_pm(dhd);
+	wl_ext_ioctl(conn_info->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	DHD_PM_WAKE_UNLOCK(conn_info->dhd);
+
+}
+
+void wl_ext_add_remove_pm_enable_work(struct wl_conn_info *conn_info,
+	bool add)
+{
+	u16 wq_duration = 0;
+	s32 pm = PM_OFF;
+
+	if (conn_info == NULL || conn_info->dhd == NULL)
+		return;
+
+	mutex_lock(&conn_info->pm_sync);
+	/*
+	 * Make cancel and schedule work part mutually exclusive
+	 * so that while cancelling, we are sure that there is no
+	 * work getting scheduled.
+	 */
+
+	if (delayed_work_pending(&conn_info->pm_enable_work)) {
+		cancel_delayed_work_sync(&conn_info->pm_enable_work);
+		DHD_PM_WAKE_UNLOCK(conn_info->dhd);
+	}
+
+	if (add) {
+		wq_duration = (WL_PM_ENABLE_TIMEOUT);
+	}
+
+	/* It should schedule work item only if driver is up */
+	if (wq_duration && conn_info->dhd->up) {
+		if (dhd_conf_get_pm(conn_info->dhd) >= 0)
+			pm = dhd_conf_get_pm(conn_info->dhd);
+		wl_ext_ioctl(conn_info->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+		if (schedule_delayed_work(&conn_info->pm_enable_work,
+				msecs_to_jiffies((const unsigned int)wq_duration))) {
+			DHD_PM_WAKE_LOCK_TIMEOUT(conn_info->dhd, wq_duration);
+		} else {
+			ANDROID_ERROR(("%s: Can't schedule pm work handler\n", __FUNCTION__));
+		}
+	}
+	mutex_unlock(&conn_info->pm_sync);
+
+}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifdef WL_EXT_IAPSTA
+static int
+wl_ext_parse_wep(char *key, struct wl_wsec_key *wsec_key)
+{
+	char hex[] = "XX";
+	unsigned char *data = wsec_key->data;
+	char *keystr = key;
+
+	switch (strlen(keystr)) {
+	case 5:
+	case 13:
+	case 16:
+		wsec_key->len = strlen(keystr);
+		memcpy(data, keystr, wsec_key->len + 1);
+		break;
+	case 12:
+	case 28:
+	case 34:
+	case 66:
+		/* strip leading 0x */
+		if (!strnicmp(keystr, "0x", 2))
+			keystr += 2;
+		else
+			return -1;
+		/* fall through */
+	case 10:
+	case 26:
+	case 32:
+	case 64:
+		wsec_key->len = strlen(keystr) / 2;
+		while (*keystr) {
+			strncpy(hex, keystr, 2);
+			*data++ = (char) strtoul(hex, NULL, 16);
+			keystr += 2;
+		}
+		break;
+	default:
+		return -1;
+	}
+
+	switch (wsec_key->len) {
+	case 5:
+		wsec_key->algo = CRYPTO_ALGO_WEP1;
+		break;
+	case 13:
+		wsec_key->algo = CRYPTO_ALGO_WEP128;
+		break;
+	case 16:
+		/* default to AES-CCM */
+		wsec_key->algo = CRYPTO_ALGO_AES_CCM;
+		break;
+	case 32:
+		wsec_key->algo = CRYPTO_ALGO_TKIP;
+		break;
+	default:
+		return -1;
+	}
+
+	/* Set as primary wsec_key by default */
+	wsec_key->flags |= WL_PRIMARY_KEY;
+
+	return 0;
+}
+
+static int
+wl_ext_set_bgnmode(struct wl_if_info *cur_if)
+{
+	struct net_device *dev = cur_if->dev;
+	bgnmode_t bgnmode = cur_if->bgnmode;
+	int val;
+
+	if (bgnmode == 0)
+		return 0;
+
+	wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+	if (bgnmode == IEEE80211B) {
+		wl_ext_iovar_setint(dev, "nmode", 0);
+		val = 0;
+		wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+		ANDROID_TRACE(("%s: Network mode: B only\n", __FUNCTION__));
+	} else if (bgnmode == IEEE80211G) {
+		wl_ext_iovar_setint(dev, "nmode", 0);
+		val = 2;
+		wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+		ANDROID_TRACE(("%s: Network mode: G only\n", __FUNCTION__));
+	} else if (bgnmode == IEEE80211BG) {
+		wl_ext_iovar_setint(dev, "nmode", 0);
+		val = 1;
+		wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+		ANDROID_TRACE(("%s: Network mode: B/G mixed\n", __FUNCTION__));
+	} else if (bgnmode == IEEE80211BGN) {
+		wl_ext_iovar_setint(dev, "nmode", 0);
+		wl_ext_iovar_setint(dev, "nmode", 1);
+		wl_ext_iovar_setint(dev, "vhtmode", 0);
+		val = 1;
+		wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+		ANDROID_TRACE(("%s: Network mode: B/G/N mixed\n", __FUNCTION__));
+	} else if (bgnmode == IEEE80211BGNAC) {
+		wl_ext_iovar_setint(dev, "nmode", 0);
+		wl_ext_iovar_setint(dev, "nmode", 1);
+		wl_ext_iovar_setint(dev, "vhtmode", 1);
+		val = 1;
+		wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
+		ANDROID_TRACE(("%s: Network mode: B/G/N/AC mixed\n", __FUNCTION__));
+	}
+	wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+
+	return 0;
+}
+
+static void
+wl_ext_get_amode(struct wl_if_info *cur_if, char *amode)
+{
+	struct net_device *dev = cur_if->dev;
+	int auth=-1, wpa_auth=-1;
+
+	wl_ext_iovar_getint(dev, "auth", &auth);
+	wl_ext_iovar_getint(dev, "wpa_auth", &wpa_auth);
+
+#ifdef WLMESH
+	if (cur_if->ifmode == IMESH_MODE) {
+		if (auth == 0 && wpa_auth == 0) {
+			strcpy(amode, "open");
+		} else if (auth == 0 && wpa_auth == 128) {
+			strcpy(amode, "sae");
+		}
+	} else
+#endif
+	if (auth == 0 && wpa_auth == 0) {
+		strcpy(amode, "open");
+	} else if (auth == 1 && wpa_auth == 0) {
+		strcpy(amode, "shared");
+	} else if (auth == 0 && wpa_auth == 4) {
+		strcpy(amode, "wpapsk");
+	} else if (auth == 0 && wpa_auth == 128) {
+		strcpy(amode, "wpa2psk");
+	} else if (auth == 0 && wpa_auth == 132) {
+		strcpy(amode, "wpawpa2psk");
+	}
+}
+
+static void
+wl_ext_get_emode(struct wl_if_info *cur_if, char *emode)
+{
+	struct net_device *dev = cur_if->dev;
+	int wsec=0;
+
+	wl_ext_iovar_getint(dev, "wsec", &wsec);
+
+#ifdef WLMESH
+	if (cur_if->ifmode == IMESH_MODE) {
+		if (wsec == 0) {
+			strcpy(emode, "none");
+		} else {
+			strcpy(emode, "sae");
+		}
+	} else
+#endif
+	if (wsec == 0) {
+		strcpy(emode, "none");
+	} else if (wsec == 1) {
+		strcpy(emode, "wep");
+	} else if (wsec == 2 || wsec == 10) {
+		strcpy(emode, "tkip");
+	} else if (wsec == 4 || wsec == 12) {
+		strcpy(emode, "aes");
+	} else if (wsec == 6 || wsec == 14) {
+		strcpy(emode, "tkipaes");
+	}
+}
+
+static int
+wl_ext_set_amode(struct wl_if_info *cur_if)
+{
+	struct net_device *dev = cur_if->dev;
+	authmode_t amode = cur_if->amode;
+	int auth=0, wpa_auth=0;
+
+#ifdef WLMESH
+	if (cur_if->ifmode == IMESH_MODE) {
+		if (amode == AUTH_SAE) {
+			auth = 0;
+			wpa_auth = 128;
+			ANDROID_INFO(("%s: Authentication: SAE\n", __FUNCTION__));
+		} else {
+			auth = 0;
+			wpa_auth = 0;
+			ANDROID_INFO(("%s: Authentication: Open System\n", __FUNCTION__));
+		}
+	} else
+#endif
+	if (amode == AUTH_OPEN) {
+		auth = 0;
+		wpa_auth = 0;
+		ANDROID_INFO(("%s: Authentication: Open System\n", __FUNCTION__));
+	} else if (amode == AUTH_SHARED) {
+		auth = 1;
+		wpa_auth = 0;
+		ANDROID_INFO(("%s: Authentication: Shared Key\n", __FUNCTION__));
+	} else if (amode == AUTH_WPAPSK) {
+		auth = 0;
+		wpa_auth = 4;
+		ANDROID_INFO(("%s: Authentication: WPA-PSK\n", __FUNCTION__));
+	} else if (amode == AUTH_WPA2PSK) {
+		auth = 0;
+		wpa_auth = 128;
+		ANDROID_INFO(("%s: Authentication: WPA2-PSK\n", __FUNCTION__));
+	} else if (amode == AUTH_WPAWPA2PSK) {
+		auth = 0;
+		wpa_auth = 132;
+		ANDROID_INFO(("%s: Authentication: WPA/WPA2-PSK\n", __FUNCTION__));
+	}
+#ifdef WLMESH
+	if (cur_if->ifmode == IMESH_MODE) {
+		s32 val = WL_BSSTYPE_MESH;
+		wl_ext_ioctl(dev, WLC_SET_INFRA, &val, sizeof(val), 1);
+	} else
+#endif
+	if (cur_if->ifmode == ISTA_MODE) {
+		s32 val = WL_BSSTYPE_INFRA;
+		wl_ext_ioctl(dev, WLC_SET_INFRA, &val, sizeof(val), 1);
+	}
+	wl_ext_iovar_setint(dev, "auth", auth);
+
+	wl_ext_iovar_setint(dev, "wpa_auth", wpa_auth);
+
+	return 0;
+}
+
+static int
+wl_ext_set_emode(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+	struct net_device *dev = cur_if->dev;
+	int wsec=0;
+	struct wl_wsec_key wsec_key;
+	wsec_pmk_t psk;
+	authmode_t amode = cur_if->amode;
+	encmode_t emode = cur_if->emode;
+	char *key = cur_if->key;
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+
+	memset(&wsec_key, 0, sizeof(wsec_key));
+	memset(&psk, 0, sizeof(psk));
+
+#ifdef WLMESH
+	if (cur_if->ifmode == IMESH_MODE) {
+		if (amode == AUTH_SAE) {
+			wsec = 4;
+			ANDROID_INFO(("%s: Encryption: AES\n", __FUNCTION__));
+		} else {
+			wsec = 0;
+			ANDROID_INFO(("%s: Encryption: No securiy\n", __FUNCTION__));
+		}
+	} else
+#endif
+	if (emode == ENC_NONE) {
+		wsec = 0;
+		ANDROID_INFO(("%s: Encryption: No securiy\n", __FUNCTION__));
+	} else if (emode == ENC_WEP) {
+		wsec = 1;
+		wl_ext_parse_wep(key, &wsec_key);
+		ANDROID_INFO(("%s: Encryption: WEP\n", __FUNCTION__));
+		ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, wsec_key.data));
+	} else if (emode == ENC_TKIP) {
+		wsec = 2;
+		psk.key_len = strlen(key);
+		psk.flags = WSEC_PASSPHRASE;
+		memcpy(psk.key, key, strlen(key));
+		ANDROID_INFO(("%s: Encryption: TKIP\n", __FUNCTION__));
+		ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, psk.key));
+	} else if (emode == ENC_AES || amode == AUTH_SAE) {
+		wsec = 4;
+		psk.key_len = strlen(key);
+		psk.flags = WSEC_PASSPHRASE;
+		memcpy(psk.key, key, strlen(key));
+		ANDROID_INFO(("%s: Encryption: AES\n", __FUNCTION__));
+		ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, psk.key));
+	} else if (emode == ENC_TKIPAES) {
+		wsec = 6;
+		psk.key_len = strlen(key);
+		psk.flags = WSEC_PASSPHRASE;
+		memcpy(psk.key, key, strlen(key));
+		ANDROID_INFO(("%s: Encryption: TKIP/AES\n", __FUNCTION__));
+		ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, psk.key));
+	}
+	if (dhd->conf->chip == BCM43430_CHIP_ID && cur_if->ifidx > 0 && wsec >= 2 &&
+			apsta_params->apstamode == ISTAAP_MODE) {
+		wsec |= 0x8; // terence 20180628: fix me, this is a workaround
+	}
+
+	wl_ext_iovar_setint(dev, "wsec", wsec);
+
+#ifdef WLMESH
+	if (cur_if->ifmode == IMESH_MODE) {
+		if (amode == AUTH_SAE) {
+			s8 iovar_buf[WLC_IOCTL_SMLEN];
+			ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, key));
+			wl_ext_iovar_setint(dev, "mesh_auth_proto", 1);
+			wl_ext_iovar_setint(dev, "mfp", WL_MFP_REQUIRED);
+			wl_ext_iovar_setbuf(dev, "sae_password", key, strlen(key),
+				iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		} else {
+			wl_ext_iovar_setint(dev, "mesh_auth_proto", 0);
+			wl_ext_iovar_setint(dev, "mfp", WL_MFP_NONE);
+		}
+	} else
+#endif
+	if (emode == ENC_WEP) {
+		wl_ext_ioctl(dev, WLC_SET_KEY, &wsec_key, sizeof(wsec_key), 1);
+	} else if (emode == ENC_TKIP || emode == ENC_AES || emode == ENC_TKIPAES) {
+		if (dev) {
+			if (cur_if->ifmode == ISTA_MODE)
+				wl_ext_iovar_setint(dev, "sup_wpa", 1);
+			wl_ext_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk), 1);
+		} else {
+			ANDROID_ERROR(("%s: apdev is null\n", __FUNCTION__));
+		}
+	}
+
+	return 0;
+}
+
+static uint16
+wl_ext_get_chan(struct wl_apsta_params *apsta_params, struct net_device *dev)
+{
+	int ret = 0;
+	uint16 chan = 0, ctl_chan;
+	struct ether_addr bssid;
+	u32 chanspec = 0;
+
+	ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+	if (ret != BCME_NOTASSOCIATED && memcmp(&ether_null, &bssid, ETHER_ADDR_LEN)) {
+		if (wl_ext_iovar_getint(dev, "chanspec", (s32 *)&chanspec) == BCME_OK) {
+			chanspec = wl_ext_chspec_driver_to_host(apsta_params->ioctl_ver, chanspec);
+			ctl_chan = wf_chspec_ctlchan(chanspec);
+			chan = (u16)(ctl_chan & 0x00FF);
+			return chan;
+		}
+	}
+
+	return 0;
+}
+
+static chanspec_t
+wl_ext_get_chanspec(struct wl_apsta_params *apsta_params,
+	struct net_device *dev, uint16 channel)
+{
+	s32 _chan = channel;
+	chanspec_t chspec = 0;
+	chanspec_t fw_chspec = 0;
+	u32 bw = WL_CHANSPEC_BW_20;
+	s32 err = BCME_OK;
+	s32 bw_cap = 0;
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	struct {
+		u32 band;
+		u32 bw_cap;
+	} param = {0, 0};
+	uint band;
+
+	if (_chan <= CH_MAX_2G_CHANNEL)
+		band = IEEE80211_BAND_2GHZ;
+	else
+		band = IEEE80211_BAND_5GHZ;
+
+	if (band == IEEE80211_BAND_5GHZ) {
+		param.band = WLC_BAND_5G;
+		err = wl_ext_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		if (err) {
+			if (err != BCME_UNSUPPORTED) {
+				ANDROID_ERROR(("bw_cap failed, %d\n", err));
+				return err;
+			} else {
+				err = wl_ext_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+				if (bw_cap != WLC_N_BW_20ALL)
+					bw = WL_CHANSPEC_BW_40;
+			}
+		} else {
+			if (WL_BW_CAP_80MHZ(iovar_buf[0]))
+				bw = WL_CHANSPEC_BW_80;
+			else if (WL_BW_CAP_40MHZ(iovar_buf[0]))
+				bw = WL_CHANSPEC_BW_40;
+			else
+				bw = WL_CHANSPEC_BW_20;
+		}
+	}
+	else if (band == IEEE80211_BAND_2GHZ)
+		bw = WL_CHANSPEC_BW_20;
+
+set_channel:
+	chspec = wf_channel2chspec(_chan, bw);
+	if (wf_chspec_valid(chspec)) {
+		fw_chspec = wl_ext_chspec_host_to_driver(apsta_params->ioctl_ver, chspec);
+		if (fw_chspec == INVCHANSPEC) {
+			ANDROID_ERROR(("%s: failed to convert host chanspec to fw chanspec\n",
+				__FUNCTION__));
+			fw_chspec = 0;
+		}
+	} else {
+		if (bw == WL_CHANSPEC_BW_80)
+			bw = WL_CHANSPEC_BW_40;
+		else if (bw == WL_CHANSPEC_BW_40)
+			bw = WL_CHANSPEC_BW_20;
+		else
+			bw = 0;
+		if (bw)
+			goto set_channel;
+		ANDROID_ERROR(("%s: Invalid chanspec 0x%x\n", __FUNCTION__, chspec));
+		err = BCME_ERROR;
+	}
+
+	return fw_chspec;
+}
+
+static void
+wl_ext_wait_netif_change(struct wl_apsta_params *apsta_params,
+	bool need_rtnl_unlock)
+{
+	if (need_rtnl_unlock)
+		rtnl_unlock();
+	wait_event_interruptible_timeout(apsta_params->netif_change_event,
+		apsta_params->netif_change, msecs_to_jiffies(1500));
+	if (need_rtnl_unlock)
+		rtnl_lock();
+}
+
+bool
+wl_ext_check_mesh_creating(struct net_device *net)
+{
+	struct dhd_pub *dhd = dhd_get_pub(net);
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+
+	if (apsta_params)
+		return apsta_params->mesh_creating;
+	return FALSE;
+}
+
+static void
+wl_ext_iapsta_preinit(struct net_device *dev, struct wl_apsta_params *apsta_params)
+{
+	struct dhd_pub *dhd;
+	apstamode_t apstamode = apsta_params->apstamode;
+	wl_interface_create_t iface;
+	struct wl_if_info *cur_if;
+	wlc_ssid_t ssid = { 0, {0} };
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	wl_country_t cspec = {{0}, 0, {0}};
+	wl_p2p_if_t ifreq;
+	s32 val = 0;
+	int i, dfs = 1;
+
+	dhd = dhd_get_pub(dev);
+
+	for (i=0; i<MAX_IF_NUM; i++) {
+		cur_if = &apsta_params->if_info[i];
+		if (i == 1 && !strlen(cur_if->ifname))
+			strcpy(cur_if->ifname, "wlan1");
+		if (i == 2 && !strlen(cur_if->ifname))
+			strcpy(cur_if->ifname, "wlan2");
+		if (cur_if->ifmode == ISTA_MODE) {
+			cur_if->channel = 0;
+			cur_if->maxassoc = -1;
+			cur_if->ifstate = IF_STATE_INIT;
+			cur_if->prio = PRIO_STA;
+			cur_if->prefix = 'S';
+			snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_sta");
+		} else if (cur_if->ifmode == IAP_MODE) {
+			cur_if->channel = 1;
+			cur_if->maxassoc = -1;
+			cur_if->ifstate = IF_STATE_INIT;
+			cur_if->prio = PRIO_AP;
+			cur_if->prefix = 'A';
+			snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_ap");
+			dfs = 0;
+#ifdef WLMESH
+		} else if (cur_if->ifmode == IMESH_MODE) {
+			cur_if->channel = 1;
+			cur_if->maxassoc = -1;
+			cur_if->ifstate = IF_STATE_INIT;
+			cur_if->prio = PRIO_MESH;
+			cur_if->prefix = 'M';
+			snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_mesh");
+			dfs = 0;
+#endif
+		}
+	}
+
+	if (!dfs && !apsta_params->vsdb) {
+		dhd_conf_get_country(dhd, &cspec);
+		if (!dhd_conf_map_country_list(dhd, &cspec)) {
+			dhd_conf_set_country(dhd, &cspec);
+			dhd_bus_country_set(dev, &cspec, TRUE);
+		}
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "dfs_chan_disable", 1);
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+	}
+
+	if (FW_SUPPORTED(dhd, rsdb)) {
+		if (apstamode == IDUALAP_MODE)
+			apsta_params->rsdb = TRUE;
+		else if (apstamode == ISTAAPAP_MODE)
+			apsta_params->rsdb = FALSE;
+		if (apstamode == IDUALAP_MODE || apstamode == ISTAAPAP_MODE ||
+			apstamode == IMESHONLY_MODE || apstamode == IMESHSTA_MODE ||
+			apstamode == IMESHAP_MODE || apstamode == IMESHAPSTA_MODE ||
+			apstamode == IMESHAPAP_MODE) {
+			wl_config_t rsdb_mode_cfg = {0, 0};
+			if (apsta_params->rsdb)
+				rsdb_mode_cfg.config = 1;
+			ANDROID_MSG(("%s: set rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config));
+			wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+			wl_ext_iovar_setbuf(dev, "rsdb_mode", &rsdb_mode_cfg,
+				sizeof(rsdb_mode_cfg), iovar_buf, sizeof(iovar_buf), NULL);
+			wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		}
+	} else {
+		apsta_params->rsdb = FALSE;
+	}
+
+	if (apstamode == ISTAONLY_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+		// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+	} else if (apstamode == IAPONLY_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+#ifdef ARP_OFFLOAD_SUPPORT
+		/* IF SoftAP is enabled, disable arpoe */
+		dhd_arp_offload_set(dhd, 0);
+		dhd_arp_offload_enable(dhd, FALSE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		wl_ext_iovar_setint(dev, "apsta", 0);
+		val = 1;
+		wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		if (!apsta_params->rsdb && !disable_proptx) {
+			bool enabled;
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (!enabled) {
+				dhd_wlfc_init(dhd);
+				wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+			}
+		}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+	else if (apstamode == ISTAAP_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		wl_ext_iovar_setint(dev, "apsta", 1);
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		apsta_params->netif_change = FALSE;
+		if (apsta_params->rsdb) {
+			bzero(&iface, sizeof(wl_interface_create_t));
+			iface.ver = WL_INTERFACE_CREATE_VER;
+			iface.flags = WL_INTERFACE_CREATE_AP;
+			wl_ext_iovar_getbuf(dev, "interface_create", &iface,
+				sizeof(iface), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		} else {
+			wl_ext_iovar_setbuf_bsscfg(dev, "ssid", &ssid, sizeof(ssid),
+				iovar_buf, WLC_IOCTL_SMLEN, 1, NULL);
+		}
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+	}
+	else if (apstamode == ISTAGO_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "apsta", 1);
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		bzero(&ifreq, sizeof(wl_p2p_if_t));
+		ifreq.type = htod32(WL_P2P_IF_GO);
+		apsta_params->netif_change = FALSE;
+		wl_ext_iovar_setbuf(dev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+	}
+	else if (apstamode == ISTASTA_MODE) {
+		apsta_params->netif_change = FALSE;
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_STA;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface,
+			sizeof(iface), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+	}
+	else if (apstamode == IDUALAP_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		/* IF SoftAP is enabled, disable arpoe or wlan1 will ping fail */
+#ifdef ARP_OFFLOAD_SUPPORT
+		/* IF SoftAP is enabled, disable arpoe */
+		dhd_arp_offload_set(dhd, 0);
+		dhd_arp_offload_enable(dhd, FALSE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		wl_ext_iovar_setint(dev, "mbcn", 1);
+		wl_ext_iovar_setint(dev, "apsta", 0);
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		val = 1;
+		wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1);
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_AP;
+		apsta_params->netif_change = FALSE;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+	}
+	else if (apstamode == ISTAAPAP_MODE) {
+		u8 rand_bytes[2] = {0, };
+		get_random_bytes(&rand_bytes, sizeof(rand_bytes));
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		wl_ext_iovar_setint(dev, "mbss", 1);
+		wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_AP | WL_INTERFACE_MAC_USE;
+		memcpy(&iface.mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+		iface.mac_addr.octet[0] |= 0x02;
+		iface.mac_addr.octet[5] += 0x01;
+		memcpy(&iface.mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
+		apsta_params->netif_change = FALSE;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_AP | WL_INTERFACE_MAC_USE;
+		memcpy(&iface.mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+		iface.mac_addr.octet[0] |= 0x02;
+		iface.mac_addr.octet[5] += 0x02;
+		memcpy(&iface.mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
+		apsta_params->netif_change = FALSE;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+	}
+#ifdef WLMESH
+	else if (apstamode == IMESHONLY_MODE) {
+		int pm = 0;
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+		wl_ext_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+	}
+	else if (apstamode == IMESHSTA_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		wl_ext_iovar_setint(dev, "mbcn", 1);
+		wl_ext_iovar_setint(dev, "apsta", 1);
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_STA;
+		apsta_params->netif_change = FALSE;
+		apsta_params->mesh_creating = TRUE;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+		apsta_params->mesh_creating = FALSE;
+	}
+	else if (apstamode == IMESHAP_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		wl_ext_iovar_setint(dev, "mbcn", 1);
+		wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_STA;
+		apsta_params->netif_change = FALSE;
+		apsta_params->mesh_creating = TRUE;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+		apsta_params->mesh_creating = FALSE;
+	}
+	else if (apstamode == IMESHAPSTA_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		wl_ext_iovar_setint(dev, "mbcn", 1);
+		wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_AP;
+		apsta_params->netif_change = FALSE;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_STA;
+		apsta_params->netif_change = FALSE;
+		apsta_params->mesh_creating = TRUE;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+		apsta_params->mesh_creating = FALSE;
+	}
+	else if (apstamode == IMESHAPAP_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		wl_ext_iovar_setint(dev, "mbcn", 1);
+		wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_AP;
+		apsta_params->netif_change = FALSE;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+		bzero(&iface, sizeof(wl_interface_create_t));
+		iface.ver = WL_INTERFACE_CREATE_VER;
+		iface.flags = WL_INTERFACE_CREATE_AP;
+		apsta_params->netif_change = FALSE;
+		wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_wait_netif_change(apsta_params, TRUE);
+	}
+#endif
+
+	wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
+	apsta_params->init = TRUE;
+
+	ANDROID_MSG(("%s: apstamode=%d\n", __FUNCTION__, apstamode));
+}
+
+static int
+wl_ext_isam_init(struct net_device *dev, char *command, int total_len)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	char *pch, *pick_tmp, *pick_tmp2, *param;
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	int i;
+
+	if (apsta_params->init) {
+		ANDROID_ERROR(("%s: don't init twice\n", __FUNCTION__));
+		return -1;
+	}
+
+	ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+	pick_tmp = command;
+	param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_init
+	param = bcmstrtok(&pick_tmp, " ", 0);
+	while (param != NULL) {
+		if (!strcmp(param, "mode")) {
+			pch = NULL;
+			pick_tmp2 = bcmstrtok(&pick_tmp, " ", 0);
+			if (pick_tmp2) {
+				if (!strcmp(pick_tmp2, "sta")) {
+					apsta_params->apstamode = ISTAONLY_MODE;
+				} else if (!strcmp(pick_tmp2, "ap")) {
+					apsta_params->apstamode = IAPONLY_MODE;
+				} else if (!strcmp(pick_tmp2, "sta-ap")) {
+					apsta_params->apstamode = ISTAAP_MODE;
+				} else if (!strcmp(pick_tmp2, "sta-sta")) {
+					apsta_params->apstamode = ISTASTA_MODE;
+					apsta_params->vsdb = TRUE;
+				} else if (!strcmp(pick_tmp2, "ap-ap")) {
+					apsta_params->apstamode = IDUALAP_MODE;
+				} else if (!strcmp(pick_tmp2, "sta-ap-ap")) {
+					apsta_params->apstamode = ISTAAPAP_MODE;
+#ifdef WLMESH
+				} else if (!strcmp(pick_tmp2, "mesh")) {
+					apsta_params->apstamode = IMESHONLY_MODE;
+				} else if (!strcmp(pick_tmp2, "mesh-sta") ||
+						!strcmp(pick_tmp2, "sta-mesh")) {
+					apsta_params->apstamode = IMESHSTA_MODE;
+				} else if (!strcmp(pick_tmp2, "mesh-ap") ||
+						!strcmp(pick_tmp2, "ap-mesh")) {
+					apsta_params->apstamode = IMESHAP_MODE;
+				} else if (!strcmp(pick_tmp2, "mesh-ap-sta") ||
+						!strcmp(pick_tmp2, "sta-ap-mesh") ||
+						!strcmp(pick_tmp2, "sta-mesh-ap")) {
+					apsta_params->apstamode = IMESHAPSTA_MODE;
+				} else if (!strcmp(pick_tmp2, "mesh-ap-ap") ||
+						!strcmp(pick_tmp2, "ap-ap-mesh")) {
+					apsta_params->apstamode = IMESHAPAP_MODE;
+#endif
+				} else if (!strcmp(pick_tmp2, "apsta")) {
+					apsta_params->apstamode = ISTAAP_MODE;
+					apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+					apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+				} else if (!strcmp(pick_tmp2, "dualap")) {
+					apsta_params->apstamode = IDUALAP_MODE;
+					apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
+					apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+				} else if (!strcmp(pick_tmp2, "gosta")) {
+					if (!FW_SUPPORTED(dhd, p2p)) {
+						return -1;
+					}
+					apsta_params->apstamode = ISTAGO_MODE;
+					apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+					apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+				} else {
+					ANDROID_ERROR(("%s: mode [sta|ap|sta-ap|ap-ap]\n", __FUNCTION__));
+					return -1;
+				}
+				pch = bcmstrtok(&pick_tmp2, " -", 0);
+				for (i=0; i<MAX_IF_NUM && pch; i++) {
+					if (!strcmp(pch, "sta"))
+						apsta_params->if_info[i].ifmode = ISTA_MODE;
+					else if (!strcmp(pch, "ap"))
+						apsta_params->if_info[i].ifmode = IAP_MODE;
+#ifdef WLMESH
+					else if (!strcmp(pch, "mesh")) {
+						if (dhd->conf->fw_type != FW_TYPE_MESH) {
+							ANDROID_ERROR(("%s: wrong fw type\n", __FUNCTION__));
+							return -1;
+						}
+						apsta_params->if_info[i].ifmode = IMESH_MODE;
+					}
+#endif
+					pch = bcmstrtok(&pick_tmp2, " -", 0);
+				}
+			}
+		}
+		else if (!strcmp(param, "rsdb")) {
+			pch = bcmstrtok(&pick_tmp, " ", 0);
+			if (pch) {
+				if (!strcmp(pch, "y")) {
+					apsta_params->rsdb = TRUE;
+				} else if (!strcmp(pch, "n")) {
+					apsta_params->rsdb = FALSE;
+				} else {
+					ANDROID_ERROR(("%s: rsdb [y|n]\n", __FUNCTION__));
+					return -1;
+				}
+			}
+		} else if (!strcmp(param, "vsdb")) {
+			pch = bcmstrtok(&pick_tmp, " ", 0);
+			if (pch) {
+				if (!strcmp(pch, "y")) {
+					apsta_params->vsdb = TRUE;
+				} else if (!strcmp(pch, "n")) {
+					apsta_params->vsdb = FALSE;
+				} else {
+					ANDROID_ERROR(("%s: vsdb [y|n]\n", __FUNCTION__));
+					return -1;
+				}
+			}
+		} else if (!strcmp(param, "csa")) {
+			pch = bcmstrtok(&pick_tmp, " ", 0);
+			if (pch) {
+				apsta_params->csa = (int)simple_strtol(pch, NULL, 0);
+			}
+		} else if (!strcmp(param, "ifname")) {
+			pch = NULL;
+			pick_tmp2 = bcmstrtok(&pick_tmp, " ", 0);
+			if (pick_tmp2)
+				pch = bcmstrtok(&pick_tmp2, " -", 0);
+			for (i=0; i<MAX_IF_NUM && pch; i++) {
+				strcpy(apsta_params->if_info[i].ifname, pch);
+				pch = bcmstrtok(&pick_tmp2, " -", 0);
+			}
+		} else if (!strcmp(param, "vifname")) {
+			pch = bcmstrtok(&pick_tmp, " ", 0);
+			if (pch)
+				strcpy(apsta_params->if_info[IF_VIF].ifname, pch);
+			else {
+				ANDROID_ERROR(("%s: vifname [wlan1]\n", __FUNCTION__));
+				return -1;
+			}
+		}
+		param = bcmstrtok(&pick_tmp, " ", 0);
+	}
+
+	if (apsta_params->apstamode == 0) {
+		ANDROID_ERROR(("%s: mode [sta|ap|sta-ap|ap-ap]\n", __FUNCTION__));
+		return -1;
+	}
+
+	wl_ext_iapsta_preinit(dev, apsta_params);
+
+	return 0;
+}
+
+static int
+wl_ext_parse_config(struct wl_if_info *cur_if, char *command, char **pick_next)
+{
+	char *pch, *pick_tmp;
+	char name[20], data[100];
+	int i, j, len;
+	char *ifname_head = NULL;
+
+	typedef struct config_map_t {
+		char name[20];
+		char *head;
+		char *tail;
+	} config_map_t;
+
+	config_map_t config_map [] = {
+		{" ifname ",	NULL, NULL},
+		{" ssid ",		NULL, NULL},
+		{" bssid ", 	NULL, NULL},
+		{" bgnmode ",	NULL, NULL},
+		{" hidden ",	NULL, NULL},
+		{" maxassoc ",	NULL, NULL},
+		{" chan ",		NULL, NULL},
+		{" amode ", 	NULL, NULL},
+		{" emode ", 	NULL, NULL},
+		{" key ",		NULL, NULL},
+	};
+	config_map_t *row, *row_prev;
+
+	pick_tmp = command;
+
+	// reset head and tail
+	for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]); i++) {
+		row = &config_map[i];
+		row->head = NULL;
+		row->tail = pick_tmp + strlen(pick_tmp);
+	}
+
+	// pick head
+	for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]); i++) {
+		row = &config_map[i];
+		pch = strstr(pick_tmp, row->name);
+		if (pch) {
+			row->head = pch;
+		}
+	}
+
+	// sort by head
+	for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]) - 1; i++) {
+		row_prev = &config_map[i];
+		for (j = i+1; j < sizeof(config_map)/sizeof(config_map[0]); j++) {
+			row = &config_map[j];
+			if (row->head < row_prev->head) {
+				strcpy(name, row_prev->name);
+				strcpy(row_prev->name, row->name);
+				strcpy(row->name, name);
+				pch = row_prev->head;
+				row_prev->head = row->head;
+				row->head = pch;
+			}
+		}
+	}
+
+	// pick tail
+	for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]) - 1; i++) {
+		row_prev = &config_map[i];
+		row = &config_map[i+1];
+		if (row_prev->head) {
+			row_prev->tail = row->head;
+		}
+	}
+
+	// remove name from head
+	for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]); i++) {
+		row = &config_map[i];
+		if (row->head) {
+			if (!strcmp(row->name, " ifname ")) {
+				ifname_head = row->head + 1;
+				break;
+			}
+			row->head += strlen(row->name);
+		}
+	}
+
+	for (i = 0; i < sizeof(config_map)/sizeof(config_map[0]); i++) {
+		row = &config_map[i];
+		if (row->head) {
+			memset(data, 0, sizeof(data));
+			if (row->tail && row->tail > row->head) {
+				strncpy(data, row->head, row->tail-row->head);
+			} else {
+				strcpy(data, row->head);
+			}
+			pick_tmp = data;
+
+			if (!strcmp(row->name, " ifname ")) {
+				break;
+			} else if (!strcmp(row->name, " ssid ")) {
+				len = strlen(pick_tmp);
+				memset(cur_if->ssid, 0, sizeof(cur_if->ssid));
+				if (pick_tmp[0] == '"' && pick_tmp[len-1] == '"')
+					strncpy(cur_if->ssid, &pick_tmp[1], len-2);
+				else
+					strcpy(cur_if->ssid, pick_tmp);
+			} else if (!strcmp(row->name, " bssid ")) {
+				pch = bcmstrtok(&pick_tmp, ": ", 0);
+				for (j=0; j<6 && pch; j++) {
+					((u8 *)&cur_if->bssid)[j] = (int)simple_strtol(pch, NULL, 16);
+					pch = bcmstrtok(&pick_tmp, ": ", 0);
+				}
+			} else if (!strcmp(row->name, " bgnmode ")) {
+				if (!strcmp(pick_tmp, "b"))
+					cur_if->bgnmode = IEEE80211B;
+				else if (!strcmp(pick_tmp, "g"))
+					cur_if->bgnmode = IEEE80211G;
+				else if (!strcmp(pick_tmp, "bg"))
+					cur_if->bgnmode = IEEE80211BG;
+				else if (!strcmp(pick_tmp, "bgn"))
+					cur_if->bgnmode = IEEE80211BGN;
+				else if (!strcmp(pick_tmp, "bgnac"))
+					cur_if->bgnmode = IEEE80211BGNAC;
+				else {
+					ANDROID_ERROR(("%s: bgnmode [b|g|bg|bgn|bgnac]\n", __FUNCTION__));
+					return -1;
+				}
+			} else if (!strcmp(row->name, " hidden ")) {
+				if (!strcmp(pick_tmp, "n"))
+					cur_if->hidden = 0;
+				else if (!strcmp(pick_tmp, "y"))
+					cur_if->hidden = 1;
+				else {
+					ANDROID_ERROR(("%s: hidden [y|n]\n", __FUNCTION__));
+					return -1;
+				}
+			} else if (!strcmp(row->name, " maxassoc ")) {
+				cur_if->maxassoc = (int)simple_strtol(pick_tmp, NULL, 10);
+			} else if (!strcmp(row->name, " chan ")) {
+				cur_if->channel = (int)simple_strtol(pick_tmp, NULL, 10);
+			} else if (!strcmp(row->name, " amode ")) {
+				if (!strcmp(pick_tmp, "open"))
+					cur_if->amode = AUTH_OPEN;
+				else if (!strcmp(pick_tmp, "shared"))
+					cur_if->amode = AUTH_SHARED;
+				else if (!strcmp(pick_tmp, "wpapsk"))
+					cur_if->amode = AUTH_WPAPSK;
+				else if (!strcmp(pick_tmp, "wpa2psk"))
+					cur_if->amode = AUTH_WPA2PSK;
+				else if (!strcmp(pick_tmp, "wpawpa2psk"))
+					cur_if->amode = AUTH_WPAWPA2PSK;
+				else if (!strcmp(pick_tmp, "sae"))
+					cur_if->amode = AUTH_SAE;
+				else {
+					ANDROID_ERROR(("%s: amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]\n",
+						__FUNCTION__));
+					return -1;
+				}
+			} else if (!strcmp(row->name, " emode ")) {
+				if (!strcmp(pick_tmp, "none"))
+					cur_if->emode = ENC_NONE;
+				else if (!strcmp(pick_tmp, "wep"))
+					cur_if->emode = ENC_WEP;
+				else if (!strcmp(pick_tmp, "tkip"))
+					cur_if->emode = ENC_TKIP;
+				else if (!strcmp(pick_tmp, "aes"))
+					cur_if->emode = ENC_AES;
+				else if (!strcmp(pick_tmp, "tkipaes"))
+					cur_if->emode = ENC_TKIPAES;
+				else {
+					ANDROID_ERROR(("%s: emode [none|wep|tkip|aes|tkipaes]\n",
+						__FUNCTION__));
+					return -1;
+				}
+			} else if (!strcmp(row->name, " key ")) {
+				len = strlen(pick_tmp);
+				memset(cur_if->key, 0, sizeof(cur_if->key));
+				if (pick_tmp[0] == '"' && pick_tmp[len-1] == '"')
+					strncpy(cur_if->key, &pick_tmp[1], len-2);
+				else
+					strcpy(cur_if->key, pick_tmp);
+			}
+		}
+	}
+
+	*pick_next = ifname_head;
+	return 0;
+}
+
+static int
+wl_ext_iapsta_config(struct net_device *dev, char *command, int total_len)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	int ret=0, i;
+	char *pch, *pch2, *pick_tmp, *pick_next=NULL, *param;
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	char ifname[IFNAMSIZ+1];
+	struct wl_if_info *cur_if = NULL;
+
+	if (!apsta_params->init) {
+		ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
+		return -1;
+	}
+
+	ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+	pick_tmp = command;
+	param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_config
+
+	while (pick_tmp != NULL) {
+		memset(ifname, 0, IFNAMSIZ+1);
+		if (!strncmp(pick_tmp, "ifname ", strlen("ifname "))) {
+			pch = pick_tmp + strlen("ifname ");
+			pch2 = strchr(pch, ' ');
+			if (pch && pch2) {
+				strncpy(ifname, pch, pch2-pch);
+			} else {
+				ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__));
+				return -1;
+			}
+			for (i=0; i<MAX_IF_NUM; i++) {
+				if (apsta_params->if_info[i].dev &&
+						!strcmp(apsta_params->if_info[i].dev->name, ifname)) {
+					cur_if = &apsta_params->if_info[i];
+					break;
+				}
+			}
+			if (!cur_if) {
+				ANDROID_ERROR(("%s: wrong ifname=%s in apstamode=%d\n",
+					__FUNCTION__, ifname, apsta_params->apstamode));
+				return -1;
+			}
+			ret = wl_ext_parse_config(cur_if, pick_tmp, &pick_next);
+			if (ret)
+				return -1;
+			pick_tmp = pick_next;
+		} else {
+			ANDROID_ERROR(("%s: first arg must be ifname\n", __FUNCTION__));
+			return -1;
+		}
+
+	}
+
+	return 0;
+}
+
+static int
+wl_ext_isam_status(struct net_device *dev, char *command, int total_len)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	int i;
+	bool now_if;
+	struct wl_if_info *tmp_if;
+	uint16 chan = 0;
+	wlc_ssid_t ssid = { 0, {0} };
+	char amode[16], emode[16];
+	int bytes_written=0;
+
+	if (apsta_params->init == FALSE) {
+		return 0;
+	}
+
+	ANDROID_INFO(("****************************\n"));
+	ANDROID_INFO(("%s: apstamode=%d\n", __FUNCTION__, apsta_params->apstamode));
+	for (i=0; i<MAX_IF_NUM; i++) {
+		now_if = FALSE;
+		memset(&ssid, 0, sizeof(ssid));
+		memset(amode, 0, sizeof(amode));
+		memset(emode, 0, sizeof(emode));
+		tmp_if = &apsta_params->if_info[i];
+		if (dev == tmp_if->dev)
+			now_if = TRUE;
+		if (tmp_if->dev) {
+			chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
+			if (chan) {
+				wl_ext_ioctl(tmp_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
+				wl_ext_get_amode(tmp_if, amode);
+				wl_ext_get_emode(tmp_if, emode);
+				ANDROID_INFO(("%s[%c-%c%s]: chan %3d, amode %s, emode %s, SSID \"%s\"\n",
+					tmp_if->ifname, tmp_if->prefix, chan?'E':'D',
+					now_if?"*":" ", chan, amode, emode, ssid.SSID));
+				if (command)
+					bytes_written += snprintf(command+bytes_written, total_len,
+						"%s[%c-%c%s]: chan %3d, amode %s, emode %s, SSID \"%s\"\n",
+						tmp_if->ifname, tmp_if->prefix, chan?'E':'D',
+						now_if?"*":" ", chan, amode, emode, ssid.SSID);
+			} else {
+				ANDROID_INFO(("%s[%c-%c%s]:\n",
+					tmp_if->ifname, tmp_if->prefix, chan?'E':'D', now_if?"*":" "));
+				if (command)
+					bytes_written += snprintf(command+bytes_written, total_len, "%s[%c-%c%s]:\n",
+						tmp_if->ifname, tmp_if->prefix, chan?'E':'D', now_if?"*":" ");
+			}
+		}
+	}
+	ANDROID_INFO(("****************************\n"));
+
+	return bytes_written;
+}
+
+#ifdef WLMESH
+static int
+wl_mesh_print_peer_info(mesh_peer_info_ext_t *mpi_ext,
+	uint32 peer_results_count, char *command, int total_len)
+{
+	char *peering_map[] = MESH_PEERING_STATE_STRINGS;
+	uint32 count = 0;
+	int bytes_written = 0;
+
+	bytes_written += snprintf(command+bytes_written, total_len,
+		"%2s: %12s : %6s : %-6s : %6s :"
+		" %5s : %4s : %4s : %11s : %4s",
+		"no", "------addr------ ", "l.aid", "state", "p.aid",
+		"mppid", "llid", "plid", "entry_state", "rssi");
+	for (count = 0; count < peer_results_count; count++) {
+		if (mpi_ext->entry_state != MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT) {
+			bytes_written += snprintf(command+bytes_written, total_len,
+				"\n%2d: %pM : 0x%4x : %6s : 0x%4x :"
+				" %5d : %4d : %4d : %11s : %4d",
+				count, &mpi_ext->ea, mpi_ext->local_aid,
+				peering_map[mpi_ext->peer_info.state],
+				mpi_ext->peer_info.peer_aid,
+				mpi_ext->peer_info.mesh_peer_prot_id,
+				mpi_ext->peer_info.local_link_id,
+				mpi_ext->peer_info.peer_link_id,
+				(mpi_ext->entry_state == MESH_SELF_PEER_ENTRY_STATE_ACTIVE) ?
+				"ACTIVE" :
+				"EXTERNAL",
+				mpi_ext->rssi);
+		} else {
+			bytes_written += snprintf(command+bytes_written, total_len,
+				"\n%2d: %pM : %6s : %5s : %6s :"
+				" %5s : %4s : %4s : %11s : %4s",
+				count, &mpi_ext->ea, "  NA  ", "  NA  ", "  NA  ",
+				"  NA ", " NA ", " NA ", "  TIMEDOUT ", " NA ");
+		}
+		mpi_ext++;
+	}
+	ANDROID_INFO(("\n%s\n", command));
+
+	return bytes_written;
+}
+
+static int
+wl_ext_mesh_peer_status(struct net_device *dev, char *data, char *command,
+	int total_len)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	int i;
+	struct wl_if_info *cur_if;
+	int bytes_written = -1;
+	int indata, inlen;
+	char *dump_buf;
+	mesh_peer_info_dump_t *peer_results;
+	mesh_peer_info_ext_t *mpi_ext;
+
+	if (apsta_params->init == FALSE) {
+		ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
+		return -1;
+	}
+
+	if (!data) {
+		dump_buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+		if (dump_buf == NULL) {
+			ANDROID_ERROR(("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_MAXLEN));
+			return -1;
+		}
+		memset(dump_buf, 0, WLC_IOCTL_MAXLEN);
+
+		for (i=0; i<MAX_IF_NUM; i++) {
+			cur_if = &apsta_params->if_info[i];
+			if (dev == cur_if->dev && cur_if->ifmode == IMESH_MODE) {
+				peer_results = (mesh_peer_info_dump_t *)dump_buf;
+				indata = htod32(WLC_IOCTL_MAXLEN);
+				inlen = 4;
+				bytes_written = wl_ext_iovar_getbuf(dev, "mesh_peer_status",
+					&indata, inlen, dump_buf, WLC_IOCTL_MAXLEN, NULL);
+				if (!bytes_written) {
+					peer_results = (mesh_peer_info_dump_t *)dump_buf;
+					mpi_ext = (mesh_peer_info_ext_t *)peer_results->mpi_ext;
+					bytes_written = wl_mesh_print_peer_info(mpi_ext, peer_results->count,
+						command, total_len);
+				}
+			} else if (dev == cur_if->dev) {
+				ANDROID_ERROR(("%s: %s[%c] is not mesh interface\n",
+					__FUNCTION__, cur_if->ifname, cur_if->prefix));
+			}
+		}
+		kfree(dump_buf);
+	}
+	
+	return bytes_written;
+}
+#endif
+
+static int
+wl_ext_if_down(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	scb_val_t scbval;
+	struct {
+		s32 cfg;
+		s32 val;
+	} bss_setbuf;
+	apstamode_t apstamode = apsta_params->apstamode;
+
+	ANDROID_MSG(("%s: %s[%c] Turning off\n", __FUNCTION__,
+		cur_if->ifname, cur_if->prefix));
+
+	if (cur_if->ifmode == ISTA_MODE) {
+		wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
+	} else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+		// deauthenticate all STA first
+		memcpy(scbval.ea.octet, &ether_bcast, ETHER_ADDR_LEN);
+		wl_ext_ioctl(cur_if->dev, WLC_SCB_DEAUTHENTICATE, &scbval.ea, ETHER_ADDR_LEN, 1);
+	}
+
+	if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
+		wl_ext_ioctl(cur_if->dev, WLC_DOWN, NULL, 0, 1);
+	} else {
+		bss_setbuf.cfg = 0xffffffff;
+		bss_setbuf.val = htod32(0);
+		wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+	}
+
+	return 0;
+}
+
+static int
+wl_ext_if_up(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	struct {
+		s32 cfg;
+		s32 val;
+	} bss_setbuf;
+	apstamode_t apstamode = apsta_params->apstamode;
+	chanspec_t fw_chspec;
+
+	if (cur_if->ifmode != IAP_MODE) {
+		ANDROID_ERROR(("%s: Wrong ifmode on %s[%c]\n", __FUNCTION__,
+			cur_if->ifname, cur_if->prefix));
+		return 0;
+	}
+
+	if (cur_if->channel >= 52 && cur_if->channel <= 148) {
+		ANDROID_MSG(("%s: %s[%c] skip DFS channel %d\n", __FUNCTION__,
+			cur_if->ifname, cur_if->prefix, cur_if->channel));
+		return 0;
+	}
+
+	ANDROID_MSG(("%s: %s[%c] Turning on\n", __FUNCTION__,
+		cur_if->ifname, cur_if->prefix));
+
+	wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver, cur_if->channel,
+		&fw_chspec);
+
+	if (apstamode == IAPONLY_MODE) {
+		wl_ext_ioctl(cur_if->dev, WLC_UP, NULL, 0, 1);
+	} else {
+		bss_setbuf.cfg = 0xffffffff;	
+		bss_setbuf.val = htod32(1);
+		wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf,
+			sizeof(bss_setbuf), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+	}
+
+	OSL_SLEEP(500);
+	wl_ext_isam_status(cur_if->dev, NULL, 0);
+
+	return 0;
+}
+
+static int
+wl_ext_iapsta_disable(struct net_device *dev, char *command, int total_len)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	char *pch, *pick_tmp, *param;
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	wlc_ssid_t ssid = { 0, {0} };
+	scb_val_t scbval;
+	struct {
+		s32 cfg;
+		s32 val;
+	} bss_setbuf;
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	apstamode_t apstamode = apsta_params->apstamode;
+	char ifname[IFNAMSIZ+1];
+	struct wl_if_info *cur_if = NULL;
+	int i;
+
+	if (!apsta_params->init) {
+		ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
+		return -1;
+	}
+
+	ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+	pick_tmp = command;
+	param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_disable
+	param = bcmstrtok(&pick_tmp, " ", 0);
+	while (param != NULL) {
+		if (!strcmp(param, "ifname")) {
+			pch = bcmstrtok(&pick_tmp, " ", 0);
+			if (pch)
+				strcpy(ifname, pch);
+			else {
+				ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__));
+				return -1;
+			}
+		}
+		param = bcmstrtok(&pick_tmp, " ", 0);
+	}
+
+	for (i=0; i<MAX_IF_NUM; i++) {
+		if (apsta_params->if_info[i].dev &&
+				!strcmp(apsta_params->if_info[i].dev->name, ifname)) {
+			cur_if = &apsta_params->if_info[i];
+			break;
+		}
+	}
+	if (!cur_if) {
+		ANDROID_ERROR(("%s: wrong ifname=%s or dev not ready\n", __FUNCTION__, ifname));
+		return -1;
+	}
+
+	ANDROID_MSG(("%s: %s[%c] Disabling\n", __FUNCTION__, ifname, cur_if->prefix));
+
+	if (cur_if->ifmode == ISTA_MODE) {
+		wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
+	} else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+		// deauthenticate all STA first
+		memcpy(scbval.ea.octet, &ether_bcast, ETHER_ADDR_LEN);
+		wl_ext_ioctl(cur_if->dev, WLC_SCB_DEAUTHENTICATE, &scbval.ea, ETHER_ADDR_LEN, 1);
+	}
+
+	if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
+		wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+		wl_ext_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); // reset ssid
+		wl_ext_iovar_setint(dev, "mpc", 1);
+	} else if ((apstamode==ISTAAP_MODE || apstamode==ISTAGO_MODE) &&
+			cur_if->ifmode == IAP_MODE) {
+		bss_setbuf.cfg = 0xffffffff;
+		bss_setbuf.val = htod32(0);
+		wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		wl_ext_iovar_setint(dev, "mpc", 1);
+#ifdef ARP_OFFLOAD_SUPPORT
+		/* IF SoftAP is disabled, enable arpoe back for STA mode. */
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+		dhd_arp_offload_enable(dhd, TRUE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		if (dhd->conf->disable_proptx!=0) {
+			bool enabled;
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (enabled) {
+				dhd_wlfc_deinit(dhd);
+			}
+		}
+#endif 
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+	else if (apstamode == IDUALAP_MODE) {
+		bss_setbuf.cfg = 0xffffffff;
+		bss_setbuf.val = htod32(0);
+		wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+#ifdef WLMESH
+	} else if (apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
+			apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE ||
+			apstamode == ISTAAPAP_MODE) {
+		bss_setbuf.cfg = 0xffffffff;
+		bss_setbuf.val = htod32(0);
+		wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+#endif
+	}
+#ifdef WLMESH
+	if ((cur_if->ifmode == IMESH_MODE) &&
+			(apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
+			apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE)) {
+		int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
+		for (i=0; i<MAX_IF_NUM; i++) {
+			if (apsta_params->if_info[i].dev &&
+					apsta_params->if_info[i].ifmode == ISTA_MODE) {
+				struct wl_if_info *tmp_if = &apsta_params->if_info[i];
+				wl_ext_ioctl(tmp_if->dev, WLC_SET_SCAN_CHANNEL_TIME,
+					&scan_assoc_time, sizeof(scan_assoc_time), 1);
+			}
+		}
+	}
+#endif
+
+	cur_if->ifstate = IF_STATE_DISALBE;
+
+	ANDROID_MSG(("%s: %s[%c] disabled\n", __FUNCTION__, ifname, cur_if->prefix));
+
+	return 0;
+}
+
+static bool
+wl_ext_diff_band(uint16 chan1, uint16 chan2)
+{
+	if ((chan1 <= CH_MAX_2G_CHANNEL && chan2 > CH_MAX_2G_CHANNEL) ||
+		(chan1 > CH_MAX_2G_CHANNEL && chan2 <= CH_MAX_2G_CHANNEL)) {
+		return TRUE;
+	}
+	return FALSE;
+}
+
+static uint16
+wl_ext_get_vsdb_chan(struct wl_apsta_params *apsta_params,
+	struct wl_if_info *cur_if, struct wl_if_info *target_if)
+{
+	uint16 target_chan = 0, cur_chan = cur_if->channel;
+
+	target_chan = wl_ext_get_chan(apsta_params, target_if->dev);
+	if (target_chan) {
+		ANDROID_INFO(("%s: cur_chan=%d, target_chan=%d\n", __FUNCTION__,
+			cur_chan, target_chan));
+		if (wl_ext_diff_band(cur_chan, target_chan)) {
+			if (!apsta_params->rsdb)
+				return target_chan;
+		} else {
+			if (target_chan != cur_chan)
+				return target_chan;
+		}
+	}
+
+	return 0;
+}
+
+static int
+wl_ext_triger_csa(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	if (apsta_params->csa & CSA_DRV_BIT &&
+			(cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE)) {
+		if (!cur_if->channel) {
+			ANDROID_MSG(("%s: %s[%c] skip channel %d\n", __FUNCTION__,
+				cur_if->ifname, cur_if->prefix, cur_if->channel));
+		} else if (cur_if->channel >= 52 && cur_if->channel <= 148) {
+			ANDROID_MSG(("%s: %s[%c] skip DFS channel %d\n", __FUNCTION__,
+				cur_if->ifname, cur_if->prefix, cur_if->channel));
+			wl_ext_if_down(apsta_params, cur_if);
+		} else {
+			wl_chan_switch_t csa_arg;
+			memset(&csa_arg, 0, sizeof(csa_arg));
+			csa_arg.mode = 1;
+			csa_arg.count = 3;
+			csa_arg.chspec = wl_ext_get_chanspec(apsta_params, cur_if->dev,
+				cur_if->channel);
+			if (csa_arg.chspec) {
+				ANDROID_MSG(("%s: Trigger CSA to channel %d(0x%x)\n", __FUNCTION__,
+					cur_if->channel, csa_arg.chspec));
+				wl_ext_iovar_setbuf(cur_if->dev, "csa", &csa_arg, sizeof(csa_arg),
+					iovar_buf, sizeof(iovar_buf), NULL);
+				OSL_SLEEP(500);
+				wl_ext_isam_status(cur_if->dev, NULL, 0);
+			} else {
+				ANDROID_ERROR(("%s: fail to get chanspec\n", __FUNCTION__));
+			}
+		}
+	}
+
+	return 0;
+}
+
+static uint16
+wl_ext_move_cur_channel(struct wl_apsta_params *apsta_params,
+	struct net_device *dev, struct wl_if_info *cur_if)
+{
+	struct wl_if_info *tmp_if, *target_if = NULL;
+	uint16 tmp_chan, target_chan = 0;
+	wl_prio_t max_prio;
+	int i;
+
+	if (apsta_params->vsdb) {
+		target_chan = cur_if->channel;
+		goto exit;
+	}
+
+	// find the max prio
+	max_prio = cur_if->prio;
+	for (i=0; i<MAX_IF_NUM; i++) {
+		tmp_if = &apsta_params->if_info[i];
+		if (tmp_if->ifstate >= IF_STATE_INIT && cur_if != tmp_if &&
+				tmp_if->prio > max_prio) {
+			tmp_chan = wl_ext_get_vsdb_chan(apsta_params, cur_if, tmp_if);
+			if (tmp_chan) {
+				target_if = tmp_if;
+				target_chan = tmp_chan;
+				max_prio = tmp_if->prio;
+			}
+		}
+	}
+	if (target_chan) {
+		tmp_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+		if (apsta_params->rsdb && tmp_chan &&
+				wl_ext_diff_band(tmp_chan, target_chan)) {
+			ANDROID_MSG(("%s: %s[%c] keep on current channel %d\n", __FUNCTION__,
+				cur_if->ifname, cur_if->prefix, tmp_chan));
+			cur_if->channel = 0;
+		} else {
+			ANDROID_MSG(("%s: %s[%c] channel=%d => %s[%c] channel=%d\n", __FUNCTION__,
+				cur_if->ifname, cur_if->prefix, cur_if->channel,
+				target_if->ifname, target_if->prefix, target_chan));
+			cur_if->channel = target_chan;
+		}
+	}
+exit:
+	if ((cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) &&
+			(cur_if->channel >= 52 && cur_if->channel <= 148)) {
+		ANDROID_MSG(("%s: %s[%c] skip DFS channel %d\n", __FUNCTION__,
+			cur_if->ifname, cur_if->prefix, cur_if->channel));
+		cur_if->channel = 0;
+	}
+
+	return cur_if->channel;
+}
+
+static void
+wl_ext_move_other_channel(struct wl_apsta_params *apsta_params,
+	struct net_device *dev, struct wl_if_info *cur_if)
+{
+	struct wl_if_info *tmp_if, *target_if=NULL;
+	uint16 tmp_chan, target_chan = 0;
+	wl_prio_t max_prio = 0, cur_prio;
+	int i;
+
+	if (apsta_params->vsdb || !cur_if->channel) {
+		return;
+	}
+
+	// find the max prio, but lower than cur_if
+	cur_prio = cur_if->prio;
+	for (i=0; i<MAX_IF_NUM; i++) {
+		tmp_if = &apsta_params->if_info[i];
+		if (tmp_if->ifstate >= IF_STATE_INIT && cur_if != tmp_if &&
+				tmp_if->prio >= max_prio && tmp_if->prio <= cur_prio) {
+			tmp_chan = wl_ext_get_vsdb_chan(apsta_params, cur_if, tmp_if);
+			if (tmp_chan) {
+				target_if = tmp_if;
+				target_chan = tmp_chan;
+				max_prio = tmp_if->prio;
+			}
+		}
+	}
+
+	if (target_if) {
+		ANDROID_MSG(("%s: %s channel=%d => %s channel=%d\n", __FUNCTION__,
+			target_if->ifname, target_chan, cur_if->ifname, cur_if->channel));
+		target_if->channel = cur_if->channel;
+		if (apsta_params->csa == 0) {
+			wl_ext_if_down(apsta_params, target_if);
+			wl_ext_move_other_channel(apsta_params, dev, target_if);
+			if (target_if->ifmode == ISTA_MODE || target_if->ifmode == IMESH_MODE) {
+				wl_ext_enable_iface(target_if->dev, target_if->ifname);
+			} else if (target_if->ifmode == IAP_MODE) {
+				wl_ext_if_up(apsta_params, target_if);
+			}
+		} else {
+			wl_ext_triger_csa(apsta_params, target_if);
+		}
+	}
+
+}
+
+static int
+wl_ext_enable_iface(struct net_device *dev, char *ifname)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	int i;
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	wlc_ssid_t ssid = { 0, {0} };
+	chanspec_t fw_chspec;
+	struct {
+		s32 cfg;
+		s32 val;
+	} bss_setbuf;
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	apstamode_t apstamode = apsta_params->apstamode;
+	struct wl_if_info *cur_if = NULL;
+	uint16 cur_chan;
+	struct wl_conn_info conn_info;
+
+	for (i=0; i<MAX_IF_NUM; i++) {
+		if (apsta_params->if_info[i].dev &&
+				!strcmp(apsta_params->if_info[i].dev->name, ifname)) {
+			cur_if = &apsta_params->if_info[i];
+			break;
+		}
+	}
+	if (!cur_if) {
+		ANDROID_ERROR(("%s: wrong ifname=%s or dev not ready\n", __FUNCTION__, ifname));
+		return -1;
+	}
+
+	wl_ext_isam_status(cur_if->dev, NULL, 0);
+	ANDROID_MSG(("%s: %s[%c] Enabling\n", __FUNCTION__, ifname, cur_if->prefix));
+
+	wl_ext_move_cur_channel(apsta_params, dev, cur_if);
+	if (!cur_if->channel && cur_if->ifmode != ISTA_MODE) {
+		return 0;
+	}
+
+	cur_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+	if (cur_chan) {
+		ANDROID_INFO(("%s: Associated!\n", __FUNCTION__));
+		if (cur_chan != cur_if->channel) {
+			wl_ext_triger_csa(apsta_params, cur_if);
+		}
+		return 0;
+	}
+
+	wl_ext_move_other_channel(apsta_params, dev, cur_if);
+
+	if (cur_if->ifidx > 0) {
+		wl_ext_iovar_setbuf(cur_if->dev, "cur_etheraddr", (u8 *)cur_if->dev->dev_addr,
+			ETHER_ADDR_LEN, iovar_buf, WLC_IOCTL_SMLEN, NULL);
+	}
+
+	// set ssid for AP
+	ssid.SSID_len = strlen(cur_if->ssid);
+	memcpy(ssid.SSID, cur_if->ssid, ssid.SSID_len);
+	if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+		wl_ext_iovar_setint(dev, "mpc", 0);
+		if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
+			wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+		} else if (apstamode==ISTAAP_MODE || apstamode==ISTAGO_MODE) {
+			wl_ext_iovar_setbuf_bsscfg(cur_if->dev, "ssid", &ssid, sizeof(ssid),
+				iovar_buf, WLC_IOCTL_SMLEN, cur_if->bssidx, NULL);
+		}
+	}
+
+	if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+		wl_ext_set_bgnmode(cur_if);
+		if (!cur_if->channel) {
+#ifdef WL_CFG80211
+			char *pick_tmp, *param;
+			char cmd[128];
+			uint16 cur_chan;
+			cur_chan = 1;
+			snprintf(cmd, 128, "get_best_channels");
+			wl_cfg80211_get_best_channels(dev, cmd, strlen(cmd));
+			pick_tmp = cmd;
+			param = bcmstrtok(&pick_tmp, " ", 0);
+			while (param != NULL) {
+				if (!strnicmp(param, "2g=", strlen("2g="))) {
+					cur_chan = (int)simple_strtol(param+strlen("2g="), NULL, 10);
+				} else if (!strnicmp(param, "5g=", strlen("5g="))) {
+					cur_chan = (int)simple_strtol(param+strlen("5g="), NULL, 10);
+				}
+				param = bcmstrtok(&pick_tmp, " ", 0);
+			}
+			cur_if->channel = cur_chan;
+#else
+			cur_if->channel = 1;
+#endif
+		}
+		wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver, cur_if->channel,
+			&fw_chspec);
+	}
+
+	wl_ext_set_amode(cur_if);
+	wl_ext_set_emode(apsta_params, cur_if);
+
+	if (cur_if->ifmode == ISTA_MODE) {
+		conn_info.bssidx = cur_if->bssidx;
+		conn_info.channel = cur_if->channel;
+		memcpy(conn_info.ssid.SSID, cur_if->ssid, strlen(cur_if->ssid));
+		conn_info.ssid.SSID_len = strlen(cur_if->ssid);
+		memcpy(&conn_info.bssid, &cur_if->bssid, ETHER_ADDR_LEN);
+	}
+	if (cur_if->ifmode == IAP_MODE) {
+		if (cur_if->maxassoc >= 0)
+			wl_ext_iovar_setint(dev, "maxassoc", cur_if->maxassoc);
+		// terence: fix me, hidden does not work in dualAP mode
+		if (cur_if->hidden > 0) {
+			wl_ext_ioctl(cur_if->dev, WLC_SET_CLOSED, &cur_if->hidden,
+				sizeof(cur_if->hidden), 1);
+			ANDROID_MSG(("%s: Broadcast SSID: %s\n", __FUNCTION__,
+				cur_if->hidden ? "OFF":"ON"));
+		}
+	}
+
+	if (apstamode == ISTAONLY_MODE) {
+		wl_ext_connect(cur_if->dev, &conn_info);
+	} else if (apstamode == IAPONLY_MODE) {
+		wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+		wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+	} else if (apstamode == ISTAAP_MODE || apstamode == ISTAGO_MODE) {
+		if (cur_if->ifmode == ISTA_MODE) {
+			wl_ext_connect(cur_if->dev, &conn_info);
+		} else {
+			if (apsta_params->rsdb) {
+				wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+			} else {
+				bss_setbuf.cfg = htod32(cur_if->bssidx);
+				bss_setbuf.val = htod32(1);
+				wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf,
+					sizeof(bss_setbuf), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+			}
+#ifdef ARP_OFFLOAD_SUPPORT
+			/* IF SoftAP is enabled, disable arpoe */
+			dhd_arp_offload_set(dhd, 0);
+			dhd_arp_offload_enable(dhd, FALSE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+			if (!apsta_params->rsdb && !disable_proptx) {
+				bool enabled;
+				dhd_wlfc_get_enable(dhd, &enabled);
+				if (!enabled) {
+					dhd_wlfc_init(dhd);
+					wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+				}
+			}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+		}
+	}
+	else if (apstamode == IDUALAP_MODE) {
+		wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+	} else if (apstamode == ISTAAPAP_MODE) {
+		if (cur_if->ifmode == ISTA_MODE) {
+			wl_ext_connect(cur_if->dev, &conn_info);
+		} else if (cur_if->ifmode == IAP_MODE) {
+			wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+		} else {
+			ANDROID_ERROR(("%s: wrong ifmode %d\n", __FUNCTION__, cur_if->ifmode));
+		}
+#ifdef WLMESH
+	} else if (apstamode == IMESHONLY_MODE ||
+			apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
+			apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE) {
+		if (cur_if->ifmode == ISTA_MODE) {
+			wl_ext_connect(cur_if->dev, &conn_info);
+		} else if (cur_if->ifmode == IAP_MODE) {
+			wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
+		} else if (cur_if->ifmode == IMESH_MODE) {
+			struct wl_join_params join_params;
+			// need to up before setting ssid
+			memset(&join_params, 0, sizeof(join_params));
+			join_params.ssid.SSID_len = strlen(cur_if->ssid);
+			memcpy((void *)join_params.ssid.SSID, cur_if->ssid, strlen(cur_if->ssid));
+			join_params.params.chanspec_list[0] = fw_chspec;
+			join_params.params.chanspec_num = 1;
+			wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &join_params, sizeof(join_params), 1);
+		} else {
+			ANDROID_ERROR(("%s: wrong ifmode %d\n", __FUNCTION__, cur_if->ifmode));
+		}
+#endif
+	}
+#ifdef WLMESH
+	if ((cur_if->ifmode == IMESH_MODE) &&
+			(apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
+			apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE)) {
+		int scan_assoc_time = 80;
+		for (i=0; i<MAX_IF_NUM; i++) {
+			if (apsta_params->if_info[i].dev &&
+					apsta_params->if_info[i].ifmode == ISTA_MODE) {
+				struct wl_if_info *tmp_if = &apsta_params->if_info[i];
+				wl_ext_ioctl(tmp_if->dev, WLC_SET_SCAN_CHANNEL_TIME,
+					&scan_assoc_time, sizeof(scan_assoc_time), 1);
+			}
+		}
+	}
+#endif
+
+	OSL_SLEEP(500);
+	ANDROID_MSG(("%s: %s[%c] enabled with SSID: \"%s\"\n", __FUNCTION__,
+		ifname, cur_if->prefix, cur_if->ssid));
+	wl_ext_isam_status(cur_if->dev, NULL, 0);
+
+	cur_if->ifstate = IF_STATE_ENABLE;
+
+	return 0;
+}
+
+static int
+wl_ext_iapsta_enable(struct net_device *dev, char *command, int total_len)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	int ret = 0;
+	char *pch, *pick_tmp, *param;
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	char ifname[IFNAMSIZ+1];
+
+	if (!apsta_params->init) {
+		ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
+		return -1;
+	}
+
+	ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+	pick_tmp = command;
+	param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_enable
+	param = bcmstrtok(&pick_tmp, " ", 0);
+	while (param != NULL) {
+		if (!strcmp(param, "ifname")) {
+			pch = bcmstrtok(&pick_tmp, " ", 0);
+			if (pch) {
+				strcpy(ifname, pch);
+				ret = wl_ext_enable_iface(dev, ifname);
+				if (ret)
+					return ret;
+			} else {
+				ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__));
+				return -1;
+			}
+		}
+		param = bcmstrtok(&pick_tmp, " ", 0);
+	}
+
+	return ret;
+}
+
+int
+wl_ext_iapsta_alive_preinit(struct net_device *dev)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	struct wl_if_info *cur_if;
+	int i;
+
+	if (apsta_params->init == TRUE) {
+		ANDROID_ERROR(("%s: don't init twice\n", __FUNCTION__));
+		return -1;
+	}
+
+	ANDROID_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	for (i=0; i<MAX_IF_NUM; i++) {
+		cur_if = &apsta_params->if_info[i];
+		if (i == 1 && !strlen(cur_if->ifname))
+			strcpy(cur_if->ifname, "wlan1");
+		if (i == 2 && !strlen(cur_if->ifname))
+			strcpy(cur_if->ifname, "wlan2");
+		if (cur_if->ifmode == ISTA_MODE) {
+			cur_if->channel = 0;
+			cur_if->maxassoc = -1;
+			cur_if->ifstate = IF_STATE_INIT;
+			cur_if->prio = PRIO_STA;
+			cur_if->prefix = 'S';
+			snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_sta");
+		} else if (cur_if->ifmode == IAP_MODE) {
+			cur_if->channel = 1;
+			cur_if->maxassoc = -1;
+			cur_if->ifstate = IF_STATE_INIT;
+			cur_if->prio = PRIO_AP;
+			cur_if->prefix = 'A';
+			snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_ap");
+#ifdef WLMESH
+		} else if (cur_if->ifmode == IMESH_MODE) {
+			cur_if->channel = 1;
+			cur_if->maxassoc = -1;
+			cur_if->ifstate = IF_STATE_INIT;
+			cur_if->prio = PRIO_MESH;
+			cur_if->prefix = 'M';
+			snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_mesh");
+#endif
+		}
+	}
+
+	apsta_params->init = TRUE;
+
+	return 0;
+}
+
+int
+wl_ext_iapsta_alive_postinit(struct net_device *dev)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	s32 apsta = 0;
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+
+	wl_ext_iovar_getint(dev, "apsta", &apsta);
+	if (apsta == 1) {
+		apsta_params->apstamode = ISTAONLY_MODE;
+		apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+		op_mode = DHD_FLAG_STA_MODE;
+	} else {
+		apsta_params->apstamode = IAPONLY_MODE;
+		apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
+		op_mode = DHD_FLAG_HOSTAP_MODE;
+	}
+	// fix me: how to check it's ISTAAP_MODE or IDUALAP_MODE?
+
+	wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
+	ANDROID_MSG(("%s: apstamode=%d\n", __FUNCTION__, apsta_params->apstamode));
+
+	return op_mode;
+}
+
+#if defined(WL_WIRELESS_EXT)
+static uint32
+wl_ext_event_str(uint32 event_type, uint32 status, uint32 reason,
+	char* stringBuf, uint buflen, void* data, uint32 datalen)
+{
+	int i;
+	uint32 event_len = 0;
+
+#ifdef SENDPROB
+	uint32 event_id[] = {DOT11_MNG_VS_ID};
+	typedef struct probreq_event_map_t {
+		uint8 event;
+		uint8 mgmt[DOT11_MGMT_HDR_LEN];
+		uint8 data[1]; // IE list
+	} probreq_event_map_t;
+#endif
+
+	typedef struct conn_fail_event_map_t {
+		uint32 inEvent;			/* input: event type to match */
+		uint32 inStatus;		/* input: event status code to match */
+		uint32 inReason;		/* input: event reason code to match */
+	} conn_fail_event_map_t;
+
+	/* Map of WLC_E events to connection failure strings */
+#define WL_IW_DONT_CARE	9999
+	const conn_fail_event_map_t event_map[] = {
+		/* inEvent				inStatus				inReason         */
+		{WLC_E_LINK,			WL_IW_DONT_CARE,	WL_IW_DONT_CARE},
+		{WLC_E_DEAUTH,			WL_IW_DONT_CARE,	WL_IW_DONT_CARE},
+		{WLC_E_DEAUTH_IND,		WL_IW_DONT_CARE,	WL_IW_DONT_CARE},
+		{WLC_E_DISASSOC,		WL_IW_DONT_CARE,	WL_IW_DONT_CARE},
+		{WLC_E_DISASSOC_IND,	WL_IW_DONT_CARE,	WL_IW_DONT_CARE},
+		{WLC_E_OVERLAY_REQ,		WL_IW_DONT_CARE,	WL_IW_DONT_CARE},
+		{WLC_E_ASSOC_IND,		WL_IW_DONT_CARE,	DOT11_SC_SUCCESS},
+		{WLC_E_REASSOC_IND,		WL_IW_DONT_CARE,	DOT11_SC_SUCCESS},
+	};
+
+#ifdef SENDPROB
+	if (event_type == WLC_E_PROBREQ_MSG) {
+		bcm_tlv_t *ie;
+		probreq_event_map_t *pevent = (probreq_event_map_t *)stringBuf;
+		char *pbuf;
+		int buf_len = buflen;
+		int num_ie = 0;
+		int totlen;
+
+		pevent->event = event_type;
+		memcpy(pevent->mgmt, data, DOT11_MGMT_HDR_LEN);
+		buf_len -= (DOT11_MGMT_HDR_LEN+1);
+		datalen -= DOT11_MGMT_HDR_LEN;
+		data += DOT11_MGMT_HDR_LEN;
+
+		pbuf = pevent->data;
+#if 1 // non-sort by id
+		ie = (bcm_tlv_t*)data;
+		totlen = datalen;
+		while (ie && totlen >= TLV_HDR_LEN) {
+			int ie_id = -1;
+			int ie_len = ie->len + TLV_HDR_LEN;
+			for (i=0; i<sizeof(event_id)/sizeof(event_id[0]); i++) {
+				if (ie->id == event_id[i]) {
+					ie_id = ie->id;
+					break;
+				}
+			}
+ 			if ((ie->id == ie_id) && (totlen >= ie_len) && (buf_len >= ie_len)) {
+				memcpy(pbuf, ie, ie_len);
+				pbuf += ie_len;
+				buf_len -= ie_len;
+				num_ie++;
+			}
+			ie = (bcm_tlv_t*)((uint8*)ie + ie_len);
+			totlen -= ie_len;
+		}
+#else // sort by id
+		for (i = 0; i < sizeof(event_id)/sizeof(event_id[0]); i++) {
+			void *pdata = data;
+			int data_len = datalen;
+			while (buf_len > 0) {
+				ie = bcm_parse_tlvs(pdata, data_len, event_id[i]);
+				if (!ie)
+					break;
+				if (buf_len < (ie->len+TLV_HDR_LEN)) {
+					ANDROID_TRACE(("%s: buffer is not enough\n", __FUNCTION__));
+					break;
+				}
+				memcpy(pbuf, ie, min(ie->len+TLV_HDR_LEN, buf_len));
+				pbuf += (ie->len+TLV_HDR_LEN);
+				buf_len -= (ie->len+TLV_HDR_LEN);
+				data_len -= (((void *)ie-pdata) + (ie->len+TLV_HDR_LEN));
+				pdata = (char *)ie + (ie->len+TLV_HDR_LEN);
+				num_ie++;
+			}
+		}
+#endif
+		if (num_ie)
+			event_len = buflen - buf_len;
+		return event_len;
+	}
+#endif
+
+	/* Search the event map table for a matching event */
+	for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) {
+		const conn_fail_event_map_t* row = &event_map[i];
+		if (row->inEvent == event_type &&
+		    (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+		    (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+			memset(stringBuf, 0, buflen);
+			event_len += snprintf(stringBuf, buflen, "isam_event event=%d reason=%d",
+				event_type, reason);
+			return event_len;
+		}
+	}
+
+	return event_len;
+}
+#endif /* WL_WIRELESS_EXT */
+
+int
+wl_ext_iapsta_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	struct wl_if_info *cur_if = NULL;
+	int i;
+#if defined(WL_WIRELESS_EXT)
+	char extra[IW_CUSTOM_MAX];
+	union iwreq_data wrqu;
+	uint32 datalen = ntoh32(e->datalen);
+#endif
+	uint32 event_type = ntoh32(e->event_type);
+	uint32 status =  ntoh32(e->status);
+	uint32 reason =  ntoh32(e->reason);
+	uint16 flags =  ntoh16(e->flags);
+
+	if (!apsta_params->init) {
+		return -1;
+	}
+
+	for (i=0; i<MAX_IF_NUM; i++) {
+		if (apsta_params->if_info[i].ifidx == e->ifidx) {
+			cur_if = &apsta_params->if_info[i];
+			break;
+		}
+	}
+	if (!cur_if || !cur_if->dev) {
+		ANDROID_ERROR(("%s: %s ifidx %d is not ready\n", __FUNCTION__,
+			dev->name, e->ifidx));
+		return -1;
+	}
+
+	if (cur_if->ifmode == ISTA_MODE) {
+		if (event_type == WLC_E_LINK) {
+			if (!(flags & WLC_EVENT_MSG_LINK)) {
+				ANDROID_MSG(("%s: %s[%c] Link Down with %pM\n", __FUNCTION__,
+					cur_if->ifname, cur_if->prefix, &e->addr));
+			} else {
+				ANDROID_MSG(("%s: %s[%c] Link UP with %pM\n", __FUNCTION__,
+					cur_if->ifname, cur_if->prefix, &e->addr));
+			}
+		}
+	}
+	else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+		if ((event_type == WLC_E_SET_SSID && status == WLC_E_STATUS_SUCCESS) ||
+				(event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+				reason == WLC_E_REASON_INITIAL_ASSOC)) {
+			ANDROID_MSG(("%s: %s[%c] Link up\n", __FUNCTION__,
+				cur_if->ifname, cur_if->prefix));
+		} else if ((event_type == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) ||
+				(event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
+				reason == WLC_E_REASON_DEAUTH)) {
+			ANDROID_MSG(("%s: %s[%c] Link down\n", __FUNCTION__,
+				cur_if->ifname, cur_if->prefix));
+		}
+		else if ((event_type == WLC_E_ASSOC_IND || event_type == WLC_E_REASSOC_IND) &&
+				reason == DOT11_SC_SUCCESS) {
+			ANDROID_MSG(("%s: %s[%c] connected device %pM\n", __FUNCTION__,
+				cur_if->ifname, cur_if->prefix, &e->addr));
+		} else if (event_type == WLC_E_DISASSOC_IND) {
+			ANDROID_MSG(("%s: %s[%c] disassociated device %pM\n", __FUNCTION__,
+				cur_if->ifname, cur_if->prefix, &e->addr));
+		} else if (event_type == WLC_E_DEAUTH_IND ||
+				(event_type == WLC_E_DEAUTH && reason != DOT11_RC_RESERVED)) {
+			ANDROID_MSG(("%s: %s[%c] deauthenticated device %pM\n", __FUNCTION__,
+				cur_if->ifname, cur_if->prefix, &e->addr));
+		}
+	}
+
+#if defined(WL_WIRELESS_EXT)
+	memset(extra, 0, sizeof(extra));
+	memset(&wrqu, 0, sizeof(wrqu));
+	memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+	wrqu.addr.sa_family = ARPHRD_ETHER;
+	wrqu.data.length = wl_ext_event_str(event_type, status, reason,
+		extra, sizeof(extra), data, datalen);
+	if (wrqu.data.length) {
+		wireless_send_event(cur_if->dev, IWEVCUSTOM, &wrqu, extra);
+		ANDROID_EVENT(("%s: %s[%c] event=%d, status=%d, reason=%d, flags=%d sent up\n",
+			__FUNCTION__, cur_if->ifname, cur_if->prefix, event_type, status,
+			reason, flags));
+	} else
+#endif /* WL_WIRELESS_EXT */
+	{
+		ANDROID_EVENT(("%s: %s[%c] event=%d, status=%d, reason=%d, flags=%d\n",
+			__FUNCTION__, cur_if->ifname, cur_if->prefix, event_type, status,
+			reason, flags));
+	}
+
+	return 0;
+}
+
+u32
+wl_ext_iapsta_update_channel(struct net_device *dev, u32 channel)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
+	int i;
+
+	for (i=0; i<MAX_IF_NUM; i++) {
+		tmp_if = &apsta_params->if_info[i];
+		if (tmp_if->dev && tmp_if->dev == dev) {
+			cur_if = tmp_if;
+			break;
+		}
+	}
+
+	if (cur_if) {
+		wl_ext_isam_status(cur_if->dev, NULL, 0);
+		cur_if->channel = channel;
+		channel = wl_ext_move_cur_channel(apsta_params,
+			apsta_params->if_info[IF_PIF].dev, cur_if);
+		if (channel)
+			wl_ext_move_other_channel(apsta_params,
+				apsta_params->if_info[IF_PIF].dev, cur_if);
+	}
+
+	return channel;
+}
+
+int
+wl_ext_iapsta_attach_name(struct net_device *net, int ifidx)
+{
+	struct dhd_pub *dhd = dhd_get_pub(net);
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	struct wl_if_info *cur_if = NULL;
+
+	ANDROID_TRACE(("%s: ifidx=%d, %s\n", __FUNCTION__, ifidx, net->name));
+	if (ifidx < MAX_IF_NUM) {
+		cur_if = &apsta_params->if_info[ifidx];
+	}
+	if (ifidx == 0) {
+		if (dhd->conf->fw_type == FW_TYPE_MESH) {
+			apsta_params->rsdb = TRUE;
+			apsta_params->csa = CSA_FW_BIT | CSA_DRV_BIT;
+		}
+		strcpy(cur_if->ifname, net->name);
+	} else if (cur_if && cur_if->ifstate == IF_STATE_INIT) {
+		strcpy(cur_if->ifname, net->name);
+		apsta_params->netif_change = TRUE;
+		wake_up_interruptible(&apsta_params->netif_change_event);
+	}
+
+	return 0;
+}
+
+int
+wl_ext_iapsta_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
+{
+	struct dhd_pub *dhd = dhd_get_pub(net);
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	struct wl_if_info *cur_if = NULL, *primary_if;
+
+	ANDROID_MSG(("%s: ifidx=%d, bssidx=%d\n", __FUNCTION__, ifidx, bssidx));
+	if (ifidx < MAX_IF_NUM) {
+		cur_if = &apsta_params->if_info[ifidx];
+	}
+	if (ifidx == 0) {
+		memset(apsta_params, 0, sizeof(struct wl_apsta_params));
+		apsta_params->dhd = dhd;
+		apsta_params->vsdb = FALSE;
+		cur_if->dev = net;
+		cur_if->ifidx = ifidx;
+		cur_if->bssidx = bssidx;
+		strcpy(cur_if->ifname, net->name);
+		init_waitqueue_head(&apsta_params->netif_change_event);
+	} else if (cur_if && cur_if->ifstate == IF_STATE_INIT) {
+		primary_if = &apsta_params->if_info[IF_PIF];
+		cur_if->dev = net;
+		cur_if->ifidx = ifidx;
+		cur_if->bssidx = bssidx;
+		if (strlen(cur_if->ifname)) {
+			memset(net->name, 0, sizeof(IFNAMSIZ));
+			strcpy(net->name, cur_if->ifname);
+			net->name[IFNAMSIZ-1] = '\0';
+		}
+		if (apsta_params->apstamode != ISTAAPAP_MODE) {
+			memcpy(net->dev_addr, primary_if->dev->dev_addr, ETHER_ADDR_LEN);
+			net->dev_addr[0] |= 0x02;
+			if (ifidx >= 2) {
+				net->dev_addr[4] ^= 0x80;
+				net->dev_addr[4] += ifidx;
+				net->dev_addr[5] += (ifidx-1);
+			}
+		}
+		if (cur_if->ifmode == ISTA_MODE) {
+			int pm;
+			wl_ext_iovar_setint(net, "roam_off", dhd->conf->roam_off);
+			wl_ext_iovar_setint(net, "bcn_timeout", dhd->conf->bcn_timeout);
+			if (dhd->conf->pm >= 0)
+				pm = dhd->conf->pm;
+			else
+				pm = PM_FAST;
+			wl_ext_ioctl(net, WLC_SET_PM, &pm, sizeof(pm), 1);
+			wl_ext_iovar_setint(net, "assoc_retry_max", 30);
+#ifdef WLMESH
+		} else if (cur_if->ifmode == IMESH_MODE) {
+			int pm = 0;
+			wl_ext_ioctl(net, WLC_SET_PM, &pm, sizeof(pm), 1);
+#endif
+		}
+	}
+
+	return 0;
+}
+
+int
+wl_ext_iapsta_dettach_netdev(struct net_device *net, int ifidx)
+{
+	struct dhd_pub *dhd = dhd_get_pub(net);
+	struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+	struct wl_if_info *cur_if = NULL;
+
+	if (!apsta_params)
+		return 0;
+
+	ANDROID_MSG(("%s: ifidx=%d\n", __FUNCTION__, ifidx));
+	if (ifidx < MAX_IF_NUM) {
+		cur_if = &apsta_params->if_info[ifidx];
+	}
+
+	if (ifidx == 0) {
+		memset(apsta_params, 0, sizeof(struct wl_apsta_params));
+	} else if (cur_if && cur_if->ifstate >= IF_STATE_INIT) {
+		memset(cur_if, 0, sizeof(struct wl_if_info));
+	}
+
+	return 0;
+}
+
+int wl_ext_iapsta_attach(dhd_pub_t *pub)
+{
+	struct wl_apsta_params *iapsta_params;
+
+	iapsta_params = kzalloc(sizeof(struct wl_apsta_params), GFP_KERNEL);
+	if (unlikely(!iapsta_params)) {
+		ANDROID_ERROR(("%s: Could not allocate apsta_params\n", __FUNCTION__));
+		return -ENOMEM;
+	}
+	pub->iapsta_params = (void *)iapsta_params;
+
+	return 0;
+}
+
+void wl_ext_iapsta_dettach(dhd_pub_t *pub)
+{
+	if (pub->iapsta_params) {
+		kfree(pub->iapsta_params);
+		pub->iapsta_params = NULL;
+	}
+}
+#endif
+
+#ifdef IDHCP
+int
+wl_ext_ip_dump(int ip, char *buf)
+{
+	unsigned char bytes[4];
+	int bytes_written=-1;
+
+	bytes[0] = ip & 0xFF;
+	bytes[1] = (ip >> 8) & 0xFF;
+	bytes[2] = (ip >> 16) & 0xFF;
+	bytes[3] = (ip >> 24) & 0xFF;
+	bytes_written = sprintf(buf, "%d.%d.%d.%d", bytes[0], bytes[1], bytes[2], bytes[3]);
+
+	return bytes_written;
+}
+
+/*
+terence 20170215:
+dhd_priv dhcpc_dump ifname [wlan0|wlan1]
+dhd_priv dhcpc_enable [0|1]
+*/
+int
+wl_ext_dhcpc_enable(struct net_device *dev, char *command, int total_len)
+{
+	int enable = -1, ret = -1;
+	int bytes_written = -1;
+
+	ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+
+	sscanf(command, "%*s %d", &enable);
+
+	if (enable >= 0)
+		ret = wl_ext_iovar_setint(dev, "dhcpc_enable", enable);
+	else {
+		ret = wl_ext_iovar_getint(dev, "dhcpc_enable", &enable);
+		if (!ret) {
+			bytes_written = snprintf(command, total_len, "%d", enable);
+			ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+			ret = bytes_written;
+		}
+	}
+
+	return ret;
+}
+
+int
+wl_ext_dhcpc_dump(struct net_device *dev, char *command, int total_len)
+{
+	int ret = 0;
+	int bytes_written = 0;
+	uint32 ip_addr;
+	char buf[20]="";
+
+	ret = wl_ext_iovar_getint(dev, "dhcpc_ip_addr", &ip_addr);
+	if (!ret) {
+		wl_ext_ip_dump(ip_addr, buf);
+		bytes_written += snprintf(command+bytes_written, total_len, "ipaddr %s ", buf);
+	}
+
+	ret = wl_ext_iovar_getint(dev, "dhcpc_ip_mask", &ip_addr);
+	if (!ret) {
+		wl_ext_ip_dump(ip_addr, buf);
+		bytes_written += snprintf(command+bytes_written, total_len, "mask %s ", buf);
+	}
+
+	ret = wl_ext_iovar_getint(dev, "dhcpc_ip_gateway", &ip_addr);
+	if (!ret) {
+		wl_ext_ip_dump(ip_addr, buf);
+		bytes_written += snprintf(command+bytes_written, total_len, "gw %s ", buf);
+	}
+
+	ret = wl_ext_iovar_getint(dev, "dhcpc_ip_dnsserv", &ip_addr);
+	if (!ret) {
+		wl_ext_ip_dump(ip_addr, buf);
+		bytes_written += snprintf(command+bytes_written, total_len, "dnsserv %s ", buf);
+	}
+
+	if (!bytes_written)
+		bytes_written = -1;
+
+	ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+
+	return bytes_written;
+}
+#endif
+
+static int
+wl_ext_rsdb_mode(struct net_device *dev, char *data, char *command,
+	int total_len)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	wl_config_t rsdb_mode_cfg = {1, 0}, *rsdb_p;
+	int ret = 0;
+
+	ANDROID_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (data) {
+		rsdb_mode_cfg.config = (int)simple_strtol(data, NULL, 0);
+		ret = wl_ext_iovar_setbuf(dev, "rsdb_mode", (char *)&rsdb_mode_cfg,
+			sizeof(rsdb_mode_cfg), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		ANDROID_MSG(("%s: rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config));
+	} else {
+		ret = wl_ext_iovar_getbuf(dev, "rsdb_mode", NULL, 0,
+			iovar_buf, WLC_IOCTL_SMLEN, NULL);
+		if (!ret) {
+			rsdb_p = (wl_config_t *) iovar_buf;
+			ret = snprintf(command, total_len, "%d", rsdb_p->config);
+			ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__,
+				command));
+		}
+	}
+
+	return ret;
+}
+
+static s32
+wl_ext_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+	s8 eventmask[WL_EVENTING_MASK_LEN];
+	s32 err = 0;
+
+	if (!ndev)
+		return -ENODEV;
+
+	/* Setup event_msgs */
+	err = wldev_iovar_getbuf(ndev, "event_msgs", NULL, 0, iovbuf, sizeof(iovbuf), NULL);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("Get event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+	if (add) {
+		setbit(eventmask, event);
+	} else {
+		clrbit(eventmask, event);
+	}
+	err = wldev_iovar_setbuf(ndev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+			sizeof(iovbuf), NULL);
+	if (unlikely(err)) {
+		ANDROID_ERROR(("Set event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+
+eventmsg_out:
+	return err;
+}
+
+static int
+wl_ext_event_msg(struct net_device *dev, char *data,
+	char *command, int total_len)
+{
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+	s8 eventmask[WL_EVENTING_MASK_LEN];
+	int i, bytes_written = 0, add = -1;
+	uint event;
+	char *vbuf;
+	bool skipzeros;
+
+	/* dhd_priv wl event_msg [offset] [1/0, 1 for add, 0 for remove] */
+	/* dhd_priv wl event_msg 40 1 */
+	if (data) {
+		ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, data));
+		sscanf(data, "%d %d", &event, &add);
+		/* Setup event_msgs */
+		bytes_written = wldev_iovar_getbuf(dev, "event_msgs", NULL, 0, iovbuf,
+			sizeof(iovbuf), NULL);
+		if (unlikely(bytes_written)) {
+			ANDROID_ERROR(("Get event_msgs error (%d)\n", bytes_written));
+			goto eventmsg_out;
+		}
+		memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+		if (add == -1) {
+			if (isset(eventmask, event))
+				bytes_written += snprintf(command+bytes_written, total_len, "1");
+			else
+				bytes_written += snprintf(command+bytes_written, total_len, "0");
+			ANDROID_INFO(("%s\n", command));
+			goto eventmsg_out;
+		}
+		bytes_written = wl_ext_add_remove_eventmsg(dev, event, add);
+	}
+	else {
+		/* Setup event_msgs */
+		bytes_written = wldev_iovar_getbuf(dev, "event_msgs", NULL, 0, iovbuf,
+			sizeof(iovbuf), NULL);
+		if (bytes_written) {
+			ANDROID_ERROR(("Get event_msgs error (%d)\n", bytes_written));
+			goto eventmsg_out;
+		}
+		vbuf = (char *)iovbuf;
+		bytes_written += snprintf(command+bytes_written, total_len, "0x");
+		for (i = (sizeof(eventmask) - 1); i >= 0; i--) {
+			if (vbuf[i] || (i == 0))
+				skipzeros = FALSE;
+			if (skipzeros)
+				continue;
+			bytes_written += snprintf(command+bytes_written, total_len,
+				"%02x", vbuf[i] & 0xff);
+		}
+		ANDROID_INFO(("%s\n", command));
+	}
+
+eventmsg_out:
+	return bytes_written;
+}
+
+#ifdef SENDPROB
+static int
+wl_ext_send_probresp(struct net_device *dev, char *data, char *command, int total_len)
+{
+	int err = 0, buf_len, ie_data_len = 0;
+	char addr_str[16], addr[6];
+	char buf[WLC_IOCTL_SMLEN]="\0", iovar_buf[WLC_IOCTL_SMLEN]="\0";
+	char ie_data[WLC_IOCTL_SMLEN] = "\0";
+	uint8 add_ie_header[] = {
+        0x61, 0x64, 0x64, 0x00, /* "add" */
+        0x01, 0x00, 0x00, 0x00, /* 0x0000001: 1 element */
+        0x02, 0x00, 0x00, 0x00, /* 0x0000002: apply to probe response only */
+        0xdd /* 221 */
+    };
+	uint8 del[] = {0x64, 0x65, 0x6c};
+
+	/* dhd_priv wl send_probresp [dest. addr] [OUI+VAL] */
+	/* dhd_priv wl send_probresp 0x00904c010203 0x00904c01020304050607 */
+	if (data) {
+		ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, data));
+		sscanf(data, "%s %s", addr_str, ie_data);
+		ANDROID_TRACE(("%s: addr=%s, ie=%s\n", __FUNCTION__, addr_str, ie_data));
+
+		if (strlen(addr_str) != 14 || !strlen(ie_data)) {
+			ANDROID_ERROR(("%s: wrong addr %s or ie %s\n", __FUNCTION__,
+				addr_str, ie_data));
+			goto exit;
+		}
+		wl_ext_pattern_atoh(addr_str, (char *) addr);
+
+		buf_len = sizeof(add_ie_header);
+		memcpy(buf, add_ie_header, buf_len);
+
+		buf[buf_len] = (strlen(ie_data)-2)/2;
+		buf_len++;
+
+		ie_data_len = wl_ext_pattern_atoh(ie_data, (char *) &buf[buf_len]);
+		if (ie_data_len <= 0) {
+			ANDROID_ERROR(("%s: wrong ie_data_len %d\n", __FUNCTION__, strlen(ie_data)-2));
+			goto exit;
+		}
+		buf_len += ie_data_len;
+		err = wl_ext_iovar_setbuf(dev, "ie", buf, buf_len,
+			iovar_buf, sizeof(iovar_buf), NULL);
+		if (err) {
+			ANDROID_ERROR(("%s: failed to add ie %s\n", __FUNCTION__, ie_data));
+			goto exit;
+		}
+		err = wl_ext_iovar_setbuf(dev, "send_probresp", addr, sizeof(addr),
+			iovar_buf, sizeof(iovar_buf), NULL);
+
+		memcpy(buf, del, sizeof(del));
+		err = wl_ext_iovar_setbuf(dev, "ie", buf, buf_len,
+			iovar_buf, sizeof(iovar_buf), NULL);
+		if (err) {
+			ANDROID_ERROR(("%s: failed to del ie %s\n", __FUNCTION__, ie_data));
+			goto exit;
+		}
+	}
+
+exit:
+    return err;
+}
+#endif
+
+static int
+wl_ext_gtk_key_info(struct net_device *dev, char *data, char *command, int total_len)
+{
+	int err = 0;
+	char iovar_buf[WLC_IOCTL_SMLEN]="\0";
+	gtk_keyinfo_t keyinfo;
+	bcol_gtk_para_t bcol_keyinfo;
+
+	/* wl gtk_key_info [kck kek replay_ctr] */
+	/* wl gtk_key_info 001122..FF001122..FF00000000000001 */
+	if (data) {
+		memset(&keyinfo, 0, sizeof(keyinfo));
+		memcpy(&keyinfo, data, RSN_KCK_LENGTH+RSN_KEK_LENGTH+RSN_REPLAY_LEN);
+		if (android_msg_level & ANDROID_INFO_LEVEL) {
+			prhex("kck", (uchar *)keyinfo.KCK, RSN_KCK_LENGTH);
+			prhex("kek", (uchar *)keyinfo.KEK, RSN_KEK_LENGTH);
+			prhex("replay_ctr", (uchar *)keyinfo.ReplayCounter, RSN_REPLAY_LEN);
+		}
+
+		memset(&bcol_keyinfo, 0, sizeof(bcol_keyinfo));
+		bcol_keyinfo.enable = 1;
+		bcol_keyinfo.ptk_len = 64;
+		memcpy(&bcol_keyinfo.ptk, data, RSN_KCK_LENGTH+RSN_KEK_LENGTH);
+		err = wl_ext_iovar_setbuf(dev, "bcol_gtk_rekey_ptk", &bcol_keyinfo,
+			sizeof(bcol_keyinfo), iovar_buf, sizeof(iovar_buf), NULL);
+		if (!err) {
+			goto exit;
+		}
+
+		err = wl_ext_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
+			iovar_buf, sizeof(iovar_buf), NULL);
+		if (err) {
+			ANDROID_ERROR(("%s: failed to set gtk_key_info\n", __FUNCTION__));
+			goto exit;
+		}
+	}
+
+exit:
+    return err;
+}
+
+typedef int (wl_ext_tpl_parse_t)(struct net_device *dev, char *data, char *command,
+	int total_len);
+
+typedef struct wl_ext_iovar_tpl_t {
+	int get;
+	int set;
+	char *name;
+	wl_ext_tpl_parse_t *parse;
+} wl_ext_iovar_tpl_t;
+
+const wl_ext_iovar_tpl_t wl_ext_iovar_tpl_list[] = {
+	{WLC_GET_VAR,	WLC_SET_VAR,	"rsdb_mode",	wl_ext_rsdb_mode},
+#ifdef WLMESH
+	{WLC_GET_VAR,	WLC_SET_VAR,	"mesh_peer_status",	wl_ext_mesh_peer_status},
+#endif
+	{WLC_GET_VAR,	WLC_SET_VAR,	"event_msg",	wl_ext_event_msg},
+#ifdef SENDPROB
+	{WLC_GET_VAR,	WLC_SET_VAR,	"send_probresp",	wl_ext_send_probresp},
+#endif
+	{WLC_GET_VAR,	WLC_SET_VAR,	"gtk_key_info",	wl_ext_gtk_key_info},
+};
+
+/*
+Ex: dhd_priv wl [cmd] [val]
+  dhd_priv wl 85
+  dhd_priv wl 86 1
+  dhd_priv wl mpc
+  dhd_priv wl mpc 1
+*/
+int
+wl_ext_wl_iovar(struct net_device *dev, char *command, int total_len)
+{
+	int cmd, val, ret = -1, i;
+	char name[32], *pch, *pick_tmp, *data;
+	int bytes_written=-1;
+	const wl_ext_iovar_tpl_t *tpl = wl_ext_iovar_tpl_list;
+	int tpl_count = ARRAY_SIZE(wl_ext_iovar_tpl_list);
+
+	ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+	pick_tmp = command;
+
+	pch = bcmstrtok(&pick_tmp, " ", 0); // pick wl
+	if (!pch || strncmp(pch, "wl", 2))
+		goto exit;
+
+	pch = bcmstrtok(&pick_tmp, " ", 0); // pick cmd
+	if (!pch)
+		goto exit;
+
+	memset(name, 0 , sizeof (name));
+	cmd = (int)simple_strtol(pch, NULL, 0);
+	if (cmd == 0) {
+		strcpy(name, pch);
+	}
+	data = bcmstrtok(&pick_tmp, "", 0); // pick data
+	if (data && cmd == 0) {
+		cmd = WLC_SET_VAR;
+	} else if (cmd == 0) {
+		cmd = WLC_GET_VAR;
+	}
+
+	/* look for a matching code in the table */
+	for (i = 0; i < tpl_count; i++, tpl++) {
+		if ((tpl->get == cmd || tpl->set == cmd) && !strcmp(tpl->name, name))
+			break;
+	}
+	if (i < tpl_count && tpl->parse) {
+		ret = tpl->parse(dev, data, command, total_len);
+	} else {
+		if (cmd == WLC_SET_VAR) {
+			val = (int)simple_strtol(data, NULL, 0);
+			ANDROID_TRACE(("%s: set %s %d\n", __FUNCTION__, name, val));
+			ret = wl_ext_iovar_setint(dev, name, val);
+		} else if (cmd == WLC_GET_VAR) {
+			ANDROID_TRACE(("%s: get %s\n", __FUNCTION__, name));
+			ret = wl_ext_iovar_getint(dev, name, &val);
+			if (!ret) {
+				bytes_written = snprintf(command, total_len, "%d", val);
+				ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__,
+					command));
+				ret = bytes_written;
+			}
+		} else if (data) {
+			val = (int)simple_strtol(data, NULL, 0);
+			ANDROID_TRACE(("%s: set %d %d\n", __FUNCTION__, cmd, val));
+			ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), TRUE);
+		} else {
+			ANDROID_TRACE(("%s: get %d\n", __FUNCTION__, cmd));
+			ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), FALSE);
+			if (!ret) {
+				bytes_written = snprintf(command, total_len, "%d", val);
+				ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__,
+					command));
+				ret = bytes_written;
+			}
+		}
+	}
+
+exit:
+	return ret;
+}
+
+int wl_android_ext_priv_cmd(struct net_device *net, char *command,
+	int total_len, int *bytes_written)
+{
+	int ret = 0;
+
+	if (strnicmp(command, CMD_CHANNELS, strlen(CMD_CHANNELS)) == 0) {
+		*bytes_written = wl_ext_channels(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_CHANNEL, strlen(CMD_CHANNEL)) == 0) {
+		*bytes_written = wl_ext_channel(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_ROAM_TRIGGER, strlen(CMD_ROAM_TRIGGER)) == 0) {
+		*bytes_written = wl_ext_roam_trigger(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
+		*bytes_written = wl_ext_keep_alive(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_PM, strlen(CMD_PM)) == 0) {
+		*bytes_written = wl_ext_pm(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_MONITOR, strlen(CMD_MONITOR)) == 0) {
+		*bytes_written = wl_ext_monitor(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_SET_SUSPEND_BCN_LI_DTIM, strlen(CMD_SET_SUSPEND_BCN_LI_DTIM)) == 0) {
+		int bcn_li_dtim;
+		bcn_li_dtim = (int)simple_strtol((command + strlen(CMD_SET_SUSPEND_BCN_LI_DTIM) + 1), NULL, 10);
+		*bytes_written = net_os_set_suspend_bcn_li_dtim(net, bcn_li_dtim);
+	}
+#ifdef WL_EXT_IAPSTA
+	else if (strnicmp(command, CMD_IAPSTA_INIT, strlen(CMD_IAPSTA_INIT)) == 0) {
+		*bytes_written = wl_ext_isam_init(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_ISAM_INIT, strlen(CMD_ISAM_INIT)) == 0) {
+		*bytes_written = wl_ext_isam_init(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_IAPSTA_CONFIG, strlen(CMD_IAPSTA_CONFIG)) == 0) {
+		*bytes_written = wl_ext_iapsta_config(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_ISAM_CONFIG, strlen(CMD_ISAM_CONFIG)) == 0) {
+		*bytes_written = wl_ext_iapsta_config(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_IAPSTA_ENABLE, strlen(CMD_IAPSTA_ENABLE)) == 0) {
+		*bytes_written = wl_ext_iapsta_enable(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_ISAM_ENABLE, strlen(CMD_ISAM_ENABLE)) == 0) {
+		*bytes_written = wl_ext_iapsta_enable(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_IAPSTA_DISABLE, strlen(CMD_IAPSTA_DISABLE)) == 0) {
+		*bytes_written = wl_ext_iapsta_disable(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_ISAM_DISABLE, strlen(CMD_ISAM_DISABLE)) == 0) {
+		*bytes_written = wl_ext_iapsta_disable(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_ISAM_STATUS, strlen(CMD_ISAM_STATUS)) == 0) {
+		*bytes_written = wl_ext_isam_status(net, command, total_len);
+	}
+#endif
+#ifdef IDHCP
+	else if (strnicmp(command, CMD_DHCPC_ENABLE, strlen(CMD_DHCPC_ENABLE)) == 0) {
+		*bytes_written = wl_ext_dhcpc_enable(net, command, total_len);
+	}
+	else if (strnicmp(command, CMD_DHCPC_DUMP, strlen(CMD_DHCPC_DUMP)) == 0) {
+		*bytes_written = wl_ext_dhcpc_dump(net, command, total_len);
+	}
+#endif
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_AUTOCHANNEL, strlen(CMD_AUTOCHANNEL)) == 0) {
+		*bytes_written = wl_cfg80211_autochannel(net, command, total_len);
+	}
+#endif
+#ifdef WL_ESCAN
+	else if (strnicmp(command, CMD_AUTOCHANNEL, strlen(CMD_AUTOCHANNEL)) == 0) {
+		*bytes_written = wl_escan_autochannel(net, command, total_len);
+	}
+#endif
+	else if (strnicmp(command, CMD_WL, strlen(CMD_WL)) == 0) {
+		*bytes_written = wl_ext_wl_iovar(net, command, total_len);
+	}
+	else
+		ret = -1;
+
+	return ret;
+}
+
+#if defined(WL_CFG80211) || defined(WL_ESCAN)
+int
+wl_ext_get_distance(struct net_device *net, u32 band)
+{
+	u32 bw = WL_CHANSPEC_BW_20;
+	s32 bw_cap = 0, distance = 0;
+	struct {
+		u32 band;
+		u32 bw_cap;
+	} param = {0, 0};
+	char buf[WLC_IOCTL_SMLEN]="\0";
+	s32 err = BCME_OK;
+
+	param.band = band;
+	err = wl_ext_iovar_getbuf(net, "bw_cap", &param, sizeof(param), buf,
+		sizeof(buf), NULL);
+	if (err) {
+		if (err != BCME_UNSUPPORTED) {
+			ANDROID_ERROR(("bw_cap failed, %d\n", err));
+			return err;
+		} else {
+			err = wl_ext_iovar_getint(net, "mimo_bw_cap", &bw_cap);
+			if (bw_cap != WLC_N_BW_20ALL)
+				bw = WL_CHANSPEC_BW_40;
+		}
+	} else {
+		if (WL_BW_CAP_80MHZ(buf[0]))
+			bw = WL_CHANSPEC_BW_80;
+		else if (WL_BW_CAP_40MHZ(buf[0]))
+			bw = WL_CHANSPEC_BW_40;
+		else
+			bw = WL_CHANSPEC_BW_20;
+	}
+
+	if (bw == WL_CHANSPEC_BW_20)
+		distance = 2;
+	else if (bw == WL_CHANSPEC_BW_40)
+		distance = 4;
+	else if (bw == WL_CHANSPEC_BW_80)
+		distance = 8;
+	else
+		distance = 16;
+	ANDROID_INFO(("%s: bw=0x%x, distance=%d\n", __FUNCTION__, bw, distance));
+
+	return distance;
+}
+
+int
+wl_ext_get_best_channel(struct net_device *net,
+#if defined(BSSCACHE)
+	wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#else
+	struct wl_scan_results *bss_list,
+#endif
+	int ioctl_ver, int *best_2g_ch, int *best_5g_ch
+)
+{
+	struct wl_bss_info *bi = NULL;	/* must be initialized */
+	s32 i, j;
+#if defined(BSSCACHE)
+	wl_bss_cache_t *node;
+#endif
+	int b_band[CH_MAX_2G_CHANNEL]={0}, a_band1[4]={0}, a_band4[5]={0};
+	s32 cen_ch, distance, distance_2g, distance_5g, ch, min_ap=999;
+	u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+	wl_uint32_list_t *list;
+	int ret;
+	chanspec_t chanspec;
+
+	memset(b_band, -1, sizeof(b_band));
+	memset(a_band1, -1, sizeof(a_band1));
+	memset(a_band4, -1, sizeof(a_band4));
+
+	memset(valid_chan_list, 0, sizeof(valid_chan_list));
+	list = (wl_uint32_list_t *)(void *) valid_chan_list;
+	list->count = htod32(WL_NUMCHANNELS);
+	ret = wl_ext_ioctl(net, WLC_GET_VALID_CHANNELS, &valid_chan_list,
+		sizeof(valid_chan_list), 0);
+	if (ret<0) {
+		ANDROID_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, ret));
+		return 0;
+	} else {
+		for (i = 0; i < dtoh32(list->count); i++) {
+			ch = dtoh32(list->element[i]);
+			if (ch < CH_MAX_2G_CHANNEL)
+				b_band[ch-1] = 0;
+			else if (ch <= 48)
+				a_band1[(ch-36)/4] = 0;
+			else if (ch >= 149 && ch <= 161)
+				a_band4[(ch-149)/4] = 0;
+		}
+	}
+
+	distance_2g = wl_ext_get_distance(net, WLC_BAND_2G);
+	distance_5g = wl_ext_get_distance(net, WLC_BAND_5G);
+
+#if defined(BSSCACHE)
+	node = bss_cache_ctrl->m_cache_head;
+	for (i=0; node && i<256; i++)
+#else
+	for (i=0; i < bss_list->count; i++)
+#endif
+	{
+#if defined(BSSCACHE)
+		bi = node->results.bss_info;
+#else
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : bss_list->bss_info;
+#endif
+		chanspec = wl_ext_chspec_driver_to_host(ioctl_ver, bi->chanspec);
+		cen_ch = CHSPEC_CHANNEL(bi->chanspec);
+		distance = 0;
+		if (CHSPEC_IS20(chanspec))
+			distance += 2;
+		else if (CHSPEC_IS40(chanspec))
+			distance += 4;
+		else if (CHSPEC_IS80(chanspec))
+			distance += 8;
+		else
+			distance += 16;
+
+		if (CHSPEC_IS2G(chanspec)) {
+			distance += distance_2g;
+			for (j=0; j<ARRAYSIZE(b_band); j++) {
+				if (b_band[j] >= 0 && abs(cen_ch-(1+j)) <= distance)
+				b_band[j] += 1;
+			}
+		} else {
+			distance += distance_5g;
+			if (cen_ch <= 48) {
+				for (j=0; j<ARRAYSIZE(a_band1); j++) {
+					if (a_band1[j] >= 0 && abs(cen_ch-(36+j*4)) <= distance)
+						a_band1[j] += 1;
+				}
+			} else if (cen_ch >= 149) {
+				for (j=0; j<ARRAYSIZE(a_band4); j++) {
+					if (a_band4[j] >= 0 && abs(cen_ch-(149+j*4)) <= distance)
+						a_band4[j] += 1;
+				}
+			}
+		}
+#if defined(BSSCACHE)
+		node = node->next;
+#endif
+	}
+
+	*best_2g_ch = 0;
+	min_ap = 999;
+	for (i=0; i<CH_MAX_2G_CHANNEL; i++) {
+		if(b_band[i] < min_ap && b_band[i] >= 0) {
+			min_ap = b_band[i];
+			*best_2g_ch = i+1;
+		}
+	}
+	*best_5g_ch = 0;
+	min_ap = 999;
+	for (i=0; i<ARRAYSIZE(a_band1); i++) {
+		if(a_band1[i] < min_ap && a_band1[i] >= 0) {
+			min_ap = a_band1[i];
+			*best_5g_ch = i*4 + 36;
+		}
+	}
+	for (i=0; i<ARRAYSIZE(a_band4); i++) {
+		if(a_band4[i] < min_ap && a_band4[i] >= 0) {
+			min_ap = a_band4[i];
+			*best_5g_ch = i*4 + 149;
+		}
+	}
+
+	if (android_msg_level & ANDROID_INFO_LEVEL) {
+		printf("%s: b_band: ", __FUNCTION__);
+		for (j=0; j<ARRAYSIZE(b_band); j++)
+			printf("%d, ", b_band[j]);
+		printf("\n");
+		printf("%s: a_band1: ", __FUNCTION__);
+		for (j=0; j<ARRAYSIZE(a_band1); j++)
+			printf("%d, ", a_band1[j]);
+		printf("\n");
+		printf("%s: a_band4: ", __FUNCTION__);
+		for (j=0; j<ARRAYSIZE(a_band4); j++)
+			printf("%d, ", a_band4[j]);
+		printf("\n");
+		printf("%s: best_2g_ch=%d, best_5g_ch=%d\n", __FUNCTION__,
+			*best_2g_ch, *best_5g_ch);
+	}
+
+	return 0;
+}
+#endif
+
+#if defined(RSSIAVG)
+void
+wl_free_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl)
+{
+	wl_rssi_cache_t *node, *cur, **rssi_head;
+	int i=0;
+
+	rssi_head = &rssi_cache_ctrl->m_cache_head;
+	node = *rssi_head;
+
+	for (;node;) {
+		ANDROID_INFO(("%s: Free %d with BSSID %pM\n",
+			__FUNCTION__, i, &node->BSSID));
+		cur = node;
+		node = cur->next;
+		kfree(cur);
+		i++;
+	}
+	*rssi_head = NULL;
+}
+
+void
+wl_delete_dirty_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl)
+{
+	wl_rssi_cache_t *node, *prev, **rssi_head;
+	int i = -1, tmp = 0;
+	struct timeval now;
+
+	do_gettimeofday(&now);
+
+	rssi_head = &rssi_cache_ctrl->m_cache_head;
+	node = *rssi_head;
+	prev = node;
+	for (;node;) {
+		i++;
+		if (now.tv_sec > node->tv.tv_sec) {
+			if (node == *rssi_head) {
+				tmp = 1;
+				*rssi_head = node->next;
+			} else {
+				tmp = 0;
+				prev->next = node->next;
+			}
+			ANDROID_INFO(("%s: Del %d with BSSID %pM\n",
+				__FUNCTION__, i, &node->BSSID));
+			kfree(node);
+			if (tmp == 1) {
+				node = *rssi_head;
+				prev = node;
+			} else {
+				node = prev->next;
+			}
+			continue;
+		}
+		prev = node;
+		node = node->next;
+	}
+}
+
+void
+wl_delete_disconnected_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+	u8 *bssid)
+{
+	wl_rssi_cache_t *node, *prev, **rssi_head;
+	int i = -1, tmp = 0;
+
+	rssi_head = &rssi_cache_ctrl->m_cache_head;
+	node = *rssi_head;
+	prev = node;
+	for (;node;) {
+		i++;
+		if (!memcmp(&node->BSSID, bssid, ETHER_ADDR_LEN)) {
+			if (node == *rssi_head) {
+				tmp = 1;
+				*rssi_head = node->next;
+			} else {
+				tmp = 0;
+				prev->next = node->next;
+			}
+			ANDROID_INFO(("%s: Del %d with BSSID %pM\n",
+				__FUNCTION__, i, &node->BSSID));
+			kfree(node);
+			if (tmp == 1) {
+				node = *rssi_head;
+				prev = node;
+			} else {
+				node = prev->next;
+			}
+			continue;
+		}
+		prev = node;
+		node = node->next;
+	}
+}
+
+void
+wl_reset_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl)
+{
+	wl_rssi_cache_t *node, **rssi_head;
+
+	rssi_head = &rssi_cache_ctrl->m_cache_head;
+
+	/* reset dirty */
+	node = *rssi_head;
+	for (;node;) {
+		node->dirty += 1;
+		node = node->next;
+	}
+}
+
+int
+wl_update_connected_rssi_cache(struct net_device *net,
+	wl_rssi_cache_ctrl_t *rssi_cache_ctrl, int *rssi_avg)
+{
+	wl_rssi_cache_t *node, *prev, *leaf, **rssi_head;
+	int j, k=0;
+	int rssi, error=0;
+	struct ether_addr bssid;
+	struct timeval now, timeout;
+	scb_val_t scbval;
+
+	if (!g_wifi_on)
+		return 0;
+
+	error = wldev_ioctl(net, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+	if (error == BCME_NOTASSOCIATED) {
+		ANDROID_INFO(("%s: Not Associated! res:%d\n", __FUNCTION__, error));
+		return 0;
+	}
+	if (error) {
+		ANDROID_ERROR(("Could not get bssid (%d)\n", error));
+	}
+	error = wldev_get_rssi(net, &scbval);
+	if (error) {
+		ANDROID_ERROR(("Could not get rssi (%d)\n", error));
+		return error;
+	}
+	rssi = scbval.val;
+
+	do_gettimeofday(&now);
+	timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
+	if (timeout.tv_sec < now.tv_sec) {
+		/*
+		 * Integer overflow - assume long enough timeout to be assumed
+		 * to be infinite, i.e., the timeout would never happen.
+		 */
+		ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
+			__FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
+	}
+
+	/* update RSSI */
+	rssi_head = &rssi_cache_ctrl->m_cache_head;
+	node = *rssi_head;
+	prev = NULL;
+	for (;node;) {
+		if (!memcmp(&node->BSSID, &bssid, ETHER_ADDR_LEN)) {
+			ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%d\n",
+				__FUNCTION__, k, &bssid, rssi));
+			for (j=0; j<RSSIAVG_LEN-1; j++)
+				node->RSSI[j] = node->RSSI[j+1];
+			node->RSSI[j] = rssi;
+			node->dirty = 0;
+			node->tv = timeout;
+			goto exit;
+		}
+		prev = node;
+		node = node->next;
+		k++;
+	}
+
+	leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
+	if (!leaf) {
+		ANDROID_ERROR(("%s: Memory alloc failure %d\n",
+			__FUNCTION__, (int)sizeof(wl_rssi_cache_t)));
+		return 0;
+	}
+	ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d in the leaf\n",
+			__FUNCTION__, k, &bssid, rssi));
+
+	leaf->next = NULL;
+	leaf->dirty = 0;
+	leaf->tv = timeout;
+	memcpy(&leaf->BSSID, &bssid, ETHER_ADDR_LEN);
+	for (j=0; j<RSSIAVG_LEN; j++)
+		leaf->RSSI[j] = rssi;
+
+	if (!prev)
+		*rssi_head = leaf;
+	else
+		prev->next = leaf;
+
+exit:
+	*rssi_avg = (int)wl_get_avg_rssi(rssi_cache_ctrl, &bssid);
+
+	return error;
+}
+
+void
+wl_update_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+	wl_scan_results_t *ss_list)
+{
+	wl_rssi_cache_t *node, *prev, *leaf, **rssi_head;
+	wl_bss_info_t *bi = NULL;
+	int i, j, k;
+	struct timeval now, timeout;
+
+	if (!ss_list->count)
+		return;
+
+	do_gettimeofday(&now);
+	timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
+	if (timeout.tv_sec < now.tv_sec) {
+		/*
+		 * Integer overflow - assume long enough timeout to be assumed
+		 * to be infinite, i.e., the timeout would never happen.
+		 */
+		ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
+			__FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
+	}
+
+	rssi_head = &rssi_cache_ctrl->m_cache_head;
+
+	/* update RSSI */
+	for (i = 0; i < ss_list->count; i++) {
+		node = *rssi_head;
+		prev = NULL;
+		k = 0;
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
+		for (;node;) {
+			if (!memcmp(&node->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
+				ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+					__FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
+				for (j=0; j<RSSIAVG_LEN-1; j++)
+					node->RSSI[j] = node->RSSI[j+1];
+				node->RSSI[j] = dtoh16(bi->RSSI);
+				node->dirty = 0;
+				node->tv = timeout;
+				break;
+			}
+			prev = node;
+			node = node->next;
+			k++;
+		}
+
+		if (node)
+			continue;
+
+		leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
+		if (!leaf) {
+			ANDROID_ERROR(("%s: Memory alloc failure %d\n",
+				__FUNCTION__, (int)sizeof(wl_rssi_cache_t)));
+			return;
+		}
+		ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\" in the leaf\n",
+				__FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
+
+		leaf->next = NULL;
+		leaf->dirty = 0;
+		leaf->tv = timeout;
+		memcpy(&leaf->BSSID, &bi->BSSID, ETHER_ADDR_LEN);
+		for (j=0; j<RSSIAVG_LEN; j++)
+			leaf->RSSI[j] = dtoh16(bi->RSSI);
+
+		if (!prev)
+			*rssi_head = leaf;
+		else
+			prev->next = leaf;
+	}
+}
+
+int16
+wl_get_avg_rssi(wl_rssi_cache_ctrl_t *rssi_cache_ctrl, void *addr)
+{
+	wl_rssi_cache_t *node, **rssi_head;
+	int j, rssi_sum, rssi=RSSI_MINVAL;
+
+	rssi_head = &rssi_cache_ctrl->m_cache_head;
+
+	node = *rssi_head;
+	for (;node;) {
+		if (!memcmp(&node->BSSID, addr, ETHER_ADDR_LEN)) {
+			rssi_sum = 0;
+			rssi = 0;
+			for (j=0; j<RSSIAVG_LEN; j++)
+				rssi_sum += node->RSSI[RSSIAVG_LEN-j-1];
+			rssi = rssi_sum / j;
+			break;
+		}
+		node = node->next;
+	}
+	rssi = MIN(rssi, RSSI_MAXVAL);
+	if (rssi == RSSI_MINVAL) {
+		ANDROID_ERROR(("%s: BSSID %pM does not in RSSI cache\n",
+		__FUNCTION__, addr));
+	}
+	return (int16)rssi;
+}
+#endif
+
+#if defined(RSSIOFFSET)
+int
+wl_update_rssi_offset(struct net_device *net, int rssi)
+{
+#if defined(RSSIOFFSET_NEW)
+	int j;
+#endif
+
+	if (!g_wifi_on)
+		return rssi;
+
+#if defined(RSSIOFFSET_NEW)
+	for (j=0; j<RSSI_OFFSET; j++) {
+		if (rssi - (RSSI_OFFSET_MINVAL+RSSI_OFFSET_INTVAL*(j+1)) < 0)
+			break;
+	}
+	rssi += j;
+#else
+	rssi += RSSI_OFFSET;
+#endif
+	return MIN(rssi, RSSI_MAXVAL);
+}
+#endif
+
+#if defined(BSSCACHE)
+void
+wl_free_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl)
+{
+	wl_bss_cache_t *node, *cur, **bss_head;
+	int i=0;
+
+	ANDROID_TRACE(("%s called\n", __FUNCTION__));
+
+	bss_head = &bss_cache_ctrl->m_cache_head;
+	node = *bss_head;
+
+	for (;node;) {
+		ANDROID_TRACE(("%s: Free %d with BSSID %pM\n",
+			__FUNCTION__, i, &node->results.bss_info->BSSID));
+		cur = node;
+		node = cur->next;
+		kfree(cur);
+		i++;
+	}
+	*bss_head = NULL;
+}
+
+void
+wl_delete_dirty_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl)
+{
+	wl_bss_cache_t *node, *prev, **bss_head;
+	int i = -1, tmp = 0;
+	struct timeval now;
+
+	do_gettimeofday(&now);
+
+	bss_head = &bss_cache_ctrl->m_cache_head;
+	node = *bss_head;
+	prev = node;
+	for (;node;) {
+		i++;
+		if (now.tv_sec > node->tv.tv_sec) {
+			if (node == *bss_head) {
+				tmp = 1;
+				*bss_head = node->next;
+			} else {
+				tmp = 0;
+				prev->next = node->next;
+			}
+			ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+				__FUNCTION__, i, &node->results.bss_info->BSSID,
+				dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID));
+			kfree(node);
+			if (tmp == 1) {
+				node = *bss_head;
+				prev = node;
+			} else {
+				node = prev->next;
+			}
+			continue;
+		}
+		prev = node;
+		node = node->next;
+	}
+}
+
+void
+wl_delete_disconnected_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl,
+	u8 *bssid)
+{
+	wl_bss_cache_t *node, *prev, **bss_head;
+	int i = -1, tmp = 0;
+
+	bss_head = &bss_cache_ctrl->m_cache_head;
+	node = *bss_head;
+	prev = node;
+	for (;node;) {
+		i++;
+		if (!memcmp(&node->results.bss_info->BSSID, bssid, ETHER_ADDR_LEN)) {
+			if (node == *bss_head) {
+				tmp = 1;
+				*bss_head = node->next;
+			} else {
+				tmp = 0;
+				prev->next = node->next;
+			}
+			ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+				__FUNCTION__, i, &node->results.bss_info->BSSID,
+				dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID));
+			kfree(node);
+			if (tmp == 1) {
+				node = *bss_head;
+				prev = node;
+			} else {
+				node = prev->next;
+			}
+			continue;
+		}
+		prev = node;
+		node = node->next;
+	}
+}
+
+void
+wl_reset_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl)
+{
+	wl_bss_cache_t *node, **bss_head;
+
+	bss_head = &bss_cache_ctrl->m_cache_head;
+
+	/* reset dirty */
+	node = *bss_head;
+	for (;node;) {
+		node->dirty += 1;
+		node = node->next;
+	}
+}
+
+void dump_bss_cache(
+#if defined(RSSIAVG)
+	wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+#endif
+	wl_bss_cache_t *node)
+{
+	int k = 0;
+	int16 rssi;
+
+	for (;node;) {
+#if defined(RSSIAVG)
+		rssi = wl_get_avg_rssi(rssi_cache_ctrl, &node->results.bss_info->BSSID);
+#else
+		rssi = dtoh16(node->results.bss_info->RSSI);
+#endif
+		ANDROID_TRACE(("%s: dump %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+			__FUNCTION__, k, &node->results.bss_info->BSSID, rssi,
+			node->results.bss_info->SSID));
+		k++;
+		node = node->next;
+	}
+}
+
+void
+wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#if defined(RSSIAVG)
+	wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+#endif
+	wl_scan_results_t *ss_list)
+{
+	wl_bss_cache_t *node, *prev, *leaf, **bss_head;
+	wl_bss_info_t *bi = NULL;
+	int i, k=0;
+#if defined(SORT_BSS_BY_RSSI)
+	int16 rssi, rssi_node;
+#endif
+	struct timeval now, timeout;
+
+	if (!ss_list->count)
+		return;
+
+	do_gettimeofday(&now);
+	timeout.tv_sec = now.tv_sec + BSSCACHE_TIMEOUT;
+	if (timeout.tv_sec < now.tv_sec) {
+		/*
+		 * Integer overflow - assume long enough timeout to be assumed
+		 * to be infinite, i.e., the timeout would never happen.
+		 */
+		ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
+			__FUNCTION__, BSSCACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
+	}
+
+	bss_head = &bss_cache_ctrl->m_cache_head;
+
+	for (i=0; i < ss_list->count; i++) {
+		node = *bss_head;
+		prev = NULL;
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
+
+		for (;node;) {
+			if (!memcmp(&node->results.bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
+				if (node == *bss_head)
+					*bss_head = node->next;
+				else {
+					prev->next = node->next;
+				}
+				break;
+			}
+			prev = node;
+			node = node->next;
+		}
+
+		leaf = kmalloc(dtoh32(bi->length) + sizeof(wl_bss_cache_t), GFP_KERNEL);
+		if (!leaf) {
+			ANDROID_ERROR(("%s: Memory alloc failure %d\n", __FUNCTION__,
+				dtoh32(bi->length) + (int)sizeof(wl_bss_cache_t)));
+			return;
+		}
+		if (node) {
+			kfree(node);
+			node = NULL;
+			ANDROID_TRACE(("%s: Update %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+				__FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
+		} else
+			ANDROID_TRACE(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+				__FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
+
+		memcpy(leaf->results.bss_info, bi, dtoh32(bi->length));
+		leaf->next = NULL;
+		leaf->dirty = 0;
+		leaf->tv = timeout;
+		leaf->results.count = 1;
+		leaf->results.version = ss_list->version;
+		k++;
+
+		if (*bss_head == NULL)
+			*bss_head = leaf;
+		else {
+#if defined(SORT_BSS_BY_RSSI)
+			node = *bss_head;
+#if defined(RSSIAVG)
+			rssi = wl_get_avg_rssi(rssi_cache_ctrl, &leaf->results.bss_info->BSSID);
+#else
+			rssi = dtoh16(leaf->results.bss_info->RSSI);
+#endif
+			for (;node;) {
+#if defined(RSSIAVG)
+				rssi_node = wl_get_avg_rssi(rssi_cache_ctrl,
+					&node->results.bss_info->BSSID);
+#else
+				rssi_node = dtoh16(node->results.bss_info->RSSI);
+#endif
+				if (rssi > rssi_node) {
+					leaf->next = node;
+					if (node == *bss_head)
+						*bss_head = leaf;
+					else
+						prev->next = leaf;
+					break;
+				}
+				prev = node;
+				node = node->next;
+			}
+			if (node == NULL)
+				prev->next = leaf;
+#else
+			leaf->next = *bss_head;
+			*bss_head = leaf;
+#endif
+		}
+	}
+	dump_bss_cache(
+#if defined(RSSIAVG)
+		rssi_cache_ctrl,
+#endif
+		*bss_head);
+}
+
+void
+wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl)
+{
+	ANDROID_TRACE(("%s:\n", __FUNCTION__));
+	wl_free_bss_cache(bss_cache_ctrl);
+}
+#endif
+
+
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
new file mode 100644
index 0000000..8533e08
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
@@ -0,0 +1,22120 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfg80211.c 711110 2017-07-17 04:38:25Z $
+ */
+/* */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <wlc_types.h>
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <bcmdevs.h>
+#include <wl_android.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhd_debug.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+#include <wl_cfgvendor.h>
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+#if !defined(WL_VENDOR_EXT_SUPPORT)
+#undef GSCAN_SUPPORT
+#endif
+
+#if defined(STAT_REPORT)
+#include <wl_statreport.h>
+#endif /* STAT_REPORT */
+#include <dhd_config.h>
+
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+#ifdef WL11U
+#endif /* WL11U */
+
+#ifndef DHD_UNSUPPORT_IF_CNTS
+#define DHD_SUPPORT_IF_CNTS
+#endif /* !DHD_UN_SUPPORT_IF_CNTS */
+
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1  0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4     0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
+
+#ifdef BCMWAPI_WPI
+#define IW_WSEC_ENABLED(wsec)   ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define IW_WSEC_ENABLED(wsec)   ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+
+static struct device *cfg80211_parent_dev = NULL;
+#ifdef CUSTOMER_HW4_DEBUG
+u32 wl_dbg_level = WL_DBG_ERR | WL_DBG_P2P_ACTION;
+#else
+u32 wl_dbg_level = WL_DBG_ERR;
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#define MAX_WAIT_TIME 1500
+#ifdef WLAIBSS_MCHAN
+#define IBSS_IF_NAME "ibss%d"
+#endif /* WLAIBSS_MCHAN */
+
+#ifdef VSDB
+/* sleep time to keep STA's connecting or connection for continuous af tx or finding a peer */
+#define DEFAULT_SLEEP_TIME_VSDB		120
+#define OFF_CHAN_TIME_THRESHOLD_MS	200
+#define AF_RETRY_DELAY_TIME			40
+
+/* if sta is connected or connecting, sleep for a while before retry af tx or finding a peer */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)	\
+	do {	\
+		if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) ||	\
+			wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {	\
+			OSL_SLEEP(DEFAULT_SLEEP_TIME_VSDB);			\
+		}	\
+	} while (0)
+#else /* VSDB */
+/* if not VSDB, do nothing */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)
+#endif /* VSDB */
+
+#ifdef WL_CFG80211_SYNC_GON
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \
+	(wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \
+		wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN))
+#else
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM)
+#endif /* WL_CFG80211_SYNC_GON */
+
+#define DNGL_FUNC(func, parameters) func parameters
+#define COEX_DHCP
+
+#define WLAN_EID_SSID	0
+#define CH_MIN_5G_CHANNEL 34
+#define CH_MIN_2G_CHANNEL 1
+#define ACTIVE_SCAN 1
+#define PASSIVE_SCAN 0
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+(entry) = list_first_entry((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#else
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+(entry) = list_first_entry((ptr), type, member); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member); \
+
+#endif /* STRICT_GCC_WARNINGS */
+
+#ifdef WL_RELMCAST
+enum rmc_event_type {
+	RMC_EVENT_NONE,
+	RMC_EVENT_LEADER_CHECK_FAIL
+};
+#endif /* WL_RELMCAST */
+
+#ifdef WL_LASTEVT
+typedef struct wl_last_event {
+	uint32 current_time;		/* current tyime */
+	uint32 timestamp;		/* event timestamp */
+	wl_event_msg_t event;	/* Encapsulated event */
+} wl_last_event_t;
+#endif /* WL_LASTEVT */
+
+/* This is to override regulatory domains defined in cfg80211 module (reg.c)
+ * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
+ * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels.
+ * All the chnages in world regulatory domain are to be done here.
+ *
+ * this definition reuires disabling missing-field-initializer warning
+ * as the ieee80211_regdomain definition differs in plain linux and in Android
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
+#endif
+static const struct ieee80211_regdomain brcm_regdom = {
+	.n_reg_rules = 4,
+	.alpha2 =  "99",
+	.reg_rules = {
+		/* IEEE 802.11b/g, channels 1..11 */
+		REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
+		/* If any */
+		/* IEEE 802.11 channel 14 - Only JP enables
+		 * this and for 802.11b only
+		 */
+		REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
+		/* IEEE 802.11a, channel 36..64 */
+		REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+		/* IEEE 802.11a, channel 100..165 */
+		REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+};
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+	(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
+static const struct ieee80211_iface_limit common_if_limits[] = {
+	{
+	/*
+	 * Driver can support up to 2 AP's
+	 */
+	.max = 2,
+	.types = BIT(NL80211_IFTYPE_AP),
+	},
+	{
+	/*
+	 * During P2P-GO removal, P2P-GO is first changed to STA and later only
+	 * removed. So setting maximum possible number of STA interfaces according
+	 * to kernel version.
+	 *
+	 * less than linux-3.8 - max:3 (wlan0 + p2p0 + group removal of p2p-p2p0-x)
+	 * linux-3.8 and above - max:2 (wlan0 + group removal of p2p-wlan0-x)
+	 */
+#ifdef WL_ENABLE_P2P_IF
+	.max = 3,
+#else
+	.max = 2,
+#endif /* WL_ENABLE_P2P_IF */
+	.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+	.max = 2,
+	.types = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_ADHOC),
+	},
+};
+#ifdef BCM4330_CHIP
+#define NUM_DIFF_CHANNELS 1
+#else
+#define NUM_DIFF_CHANNELS 2
+#endif
+static const struct ieee80211_iface_combination
+common_iface_combinations[] = {
+	{
+	.num_different_channels = NUM_DIFF_CHANNELS,
+	/*
+	 * max_interfaces = 4
+	 * The max no of interfaces will be used in dual p2p case.
+	 * {STA, P2P Device, P2P Group 1, P2P Group 2}. Though we
+	 * will not be using the STA functionality in this case, it
+	 * will remain registered as it is the primary interface.
+	 */
+	.max_interfaces = 4,
+	.limits = common_if_limits,
+	.n_limits = ARRAY_SIZE(common_if_limits),
+	},
+};
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+/* Data Element Definitions */
+#define WPS_ID_CONFIG_METHODS     0x1008
+#define WPS_ID_REQ_TYPE           0x103A
+#define WPS_ID_DEVICE_NAME        0x1011
+#define WPS_ID_VERSION            0x104A
+#define WPS_ID_DEVICE_PWD_ID      0x1012
+#define WPS_ID_REQ_DEV_TYPE       0x106A
+#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE      0x1054
+
+/* Device Password ID */
+#define DEV_PW_DEFAULT 0x0000
+#define DEV_PW_USER_SPECIFIED 0x0001,
+#define DEV_PW_MACHINE_SPECIFIED 0x0002
+#define DEV_PW_REKEY 0x0003
+#define DEV_PW_PUSHBUTTON 0x0004
+#define DEV_PW_REGISTRAR_SPECIFIED 0x0005
+
+/* Config Methods */
+#define WPS_CONFIG_USBA 0x0001
+#define WPS_CONFIG_ETHERNET 0x0002
+#define WPS_CONFIG_LABEL 0x0004
+#define WPS_CONFIG_DISPLAY 0x0008
+#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010
+#define WPS_CONFIG_INT_NFC_TOKEN 0x0020
+#define WPS_CONFIG_NFC_INTERFACE 0x0040
+#define WPS_CONFIG_PUSHBUTTON 0x0080
+#define WPS_CONFIG_KEYPAD 0x0100
+#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280
+#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480
+#define WPS_CONFIG_VIRT_DISPLAY 0x2008
+#define WPS_CONFIG_PHY_DISPLAY 0x4008
+
+#define PM_BLOCK 1
+#define PM_ENABLE 0
+
+
+#define WL_AKM_SUITE_SHA256_1X  0x000FAC05
+#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06
+
+#ifndef IBSS_COALESCE_ALLOWED
+#define IBSS_COALESCE_ALLOWED IBSS_COALESCE_DEFAULT
+#endif
+
+#ifndef IBSS_INITIAL_SCAN_ALLOWED
+#define IBSS_INITIAL_SCAN_ALLOWED IBSS_INITIAL_SCAN_ALLOWED_DEFAULT
+#endif
+
+#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */
+#define LONG_LISTEN_TIME 2000
+
+#ifdef WBTEXT
+#define CMD_WBTEXT_PROFILE_CONFIG	"WBTEXT_PROFILE_CONFIG"
+#define CMD_WBTEXT_WEIGHT_CONFIG	"WBTEXT_WEIGHT_CONFIG"
+#define CMD_WBTEXT_TABLE_CONFIG		"WBTEXT_TABLE_CONFIG"
+#define CMD_WBTEXT_DELTA_CONFIG		"WBTEXT_DELTA_CONFIG"
+#define DEFAULT_WBTEXT_PROFILE_A		"a -70 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_PROFILE_B		"b -60 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_WEIGHT_RSSI_A	"RSSI a 65"
+#define DEFAULT_WBTEXT_WEIGHT_RSSI_B	"RSSI b 65"
+#define DEFAULT_WBTEXT_WEIGHT_CU_A	"CU a 35"
+#define DEFAULT_WBTEXT_WEIGHT_CU_B	"CU b 35"
+#define DEFAULT_WBTEXT_TABLE_RSSI_A	"RSSI a 0 55 100 55 60 90 \
+60 65 70 65 70 50 70 128 20"
+#define DEFAULT_WBTEXT_TABLE_RSSI_B	"RSSI b 0 55 100 55 60 90 \
+60 65 70 65 70 50 70 128 20"
+#define DEFAULT_WBTEXT_TABLE_CU_A	"CU a 0 30 100 30 50 90 \
+50 60 70 60 80 50 80 100 20"
+#define DEFAULT_WBTEXT_TABLE_CU_B	"CU b 0 10 100 10 25 90 \
+25 40 70 40 70 50 70 100 20"
+
+typedef struct wl_wbtext_bssid {
+	struct ether_addr ea;
+	struct list_head list;
+} wl_wbtext_bssid_t;
+
+static void wl_cfg80211_wbtext_update_rcc(struct bcm_cfg80211 *cfg, struct net_device *dev);
+static bool wl_cfg80211_wbtext_check_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea);
+static bool wl_cfg80211_wbtext_add_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea);
+static void wl_cfg80211_wbtext_clear_bssid_list(struct bcm_cfg80211 *cfg);
+static bool wl_cfg80211_wbtext_send_nbr_req(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	struct wl_profile *profile);
+static bool wl_cfg80211_wbtext_send_btm_query(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	struct wl_profile *profile);
+static void wl_cfg80211_wbtext_set_wnm_maxidle(struct bcm_cfg80211 *cfg, struct net_device *dev);
+static int wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, int body_len);
+#endif /* WBTEXT */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+#define RADIO_PWRSAVE_PPS					10
+#define RADIO_PWRSAVE_QUIET_TIME			10
+#define RADIO_PWRSAVE_LEVEL				3
+#define RADIO_PWRSAVE_STAS_ASSOC_CHECK	0
+
+#define RADIO_PWRSAVE_LEVEL_MIN				1
+#define RADIO_PWRSAVE_LEVEL_MAX				5
+#define RADIO_PWRSAVE_PPS_MIN					1
+#define RADIO_PWRSAVE_QUIETTIME_MIN			1
+#define RADIO_PWRSAVE_ASSOCCHECK_MIN		0
+#define RADIO_PWRSAVE_ASSOCCHECK_MAX		1
+
+#define RADIO_PWRSAVE_MAJOR_VER 1
+#define RADIO_PWRSAVE_MINOR_VER 0
+#define RADIO_PWRSAVE_MAJOR_VER_SHIFT 8
+#define RADIO_PWRSAVE_VERSION \
+	((RADIO_PWRSAVE_MAJOR_VER << RADIO_PWRSAVE_MAJOR_VER_SHIFT)| RADIO_PWRSAVE_MINOR_VER)
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+#ifdef WLADPS_SEAK_AP_WAR
+#define ATHEROS_OUI "\x00\x03\x7F"
+#define CAMEO_MAC_PREFIX "\x00\x18\xE7"
+#define MAC_PREFIX_LEN 3
+static bool
+wl_find_vndr_ies_specific_vender(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, const u8 *vndr_oui);
+static s32 wl_set_adps_mode(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint8 enable_mode);
+#endif /* WLADPS_SEAK_AP_WAR */
+
+#define MAX_VNDR_OUI_STR_LEN	256
+#define VNDR_OUI_STR_LEN	10
+static const uchar *exclude_vndr_oui_list[] = {
+	"\x00\x50\xf2",			/* Microsoft */
+	"\x00\x00\xf0",			/* Samsung Elec */
+	WFA_OUI,			/* WFA */
+	NULL
+};
+
+typedef struct wl_vndr_oui_entry {
+	uchar oui[DOT11_OUI_LEN];
+	struct list_head list;
+} wl_vndr_oui_entry_t;
+
+static int wl_vndr_ies_get_vendor_oui(struct bcm_cfg80211 *cfg,
+		struct net_device *ndev, char *vndr_oui, u32 vndr_oui_len);
+static void wl_vndr_ies_clear_vendor_oui_list(struct bcm_cfg80211 *cfg);
+
+/*
+ * cfg80211_ops api/callback list
+ */
+static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
+#else
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name);
+static s32 bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+#endif /* WLAIBSS_MCHAN */
+static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params);
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
+	struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+	struct net_device *dev, const u8 *mac,
+	struct station_info *sinfo);
+#else
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+	struct net_device *dev, u8 *mac,
+	struct station_info *sinfo);
+#endif
+static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+	struct net_device *dev, bool enabled,
+	s32 timeout);
+static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+	enum nl80211_tx_power_setting type, s32 mbm);
+#else
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type, s32 dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+	struct wireless_dev *wdev, s32 *dbm);
+#else
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+	struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast);
+static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params);
+static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr);
+static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	void *cookie, void (*callback) (void *cookie,
+	struct key_params *params));
+static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev,	u8 key_idx);
+static s32 wl_cfg80211_resume(struct wiphy *wiphy);
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+static s32 wl_cfg80211_del_station(
+		struct wiphy *wiphy, struct net_device *ndev,
+		struct station_del_parameters *params);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
+	struct net_device *ndev, const u8* mac_addr);
+#else
+static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
+	struct net_device *ndev, u8* mac_addr);
+#endif
+#ifdef WLMESH
+static s32 wl_cfg80211_join_mesh(
+	struct wiphy *wiphy, struct net_device *dev,
+	const struct mesh_config *conf,
+	const struct mesh_setup *setup);
+static s32 wl_cfg80211_leave_mesh(struct wiphy *wiphy,
+	struct net_device *dev);
+#endif /* WLMESH */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
+	struct net_device *dev, const u8 *mac, struct station_parameters *params);
+#else
+static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
+	struct net_device *dev, u8 *mac, struct station_parameters *params);
+#endif
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
+#endif
+static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+	struct net_device *dev);
+void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, bool aborted, bool fw_abort);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \
+	KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+	u32 peer_capability, const u8 *buf, size_t len);
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \
+		(LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+	u32 peer_capability, const u8 *buf, size_t len);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+       const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+       u32 peer_capability, bool initiator, const u8 *buf, size_t len);
+#else /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+	const u8 *buf, size_t len);
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	const u8 *peer, enum nl80211_tdls_operation oper);
+#else
+static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, enum nl80211_tdls_operation oper);
+#endif
+#endif 
+#ifdef WL_SCHED_SCAN
+static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+	, u64 reqid
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+);
+#endif
+static s32 wl_cfg80211_set_ap_role(struct bcm_cfg80211 *cfg, struct net_device *dev);
+#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF)
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy, enum nl80211_iftype
+		 iface_type, u8 *mac_addr, const char *name);
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */
+
+s32 wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, s32 bsscfg_idx,
+	enum nl80211_iftype iface_type, s32 del, u8 *addr);
+s32 wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, s32 bsscfg_idx,
+	enum nl80211_iftype iface_type, s32 del, u8 *addr);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static s32 wl_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) */
+#ifdef GTK_OFFLOAD_SUPPORT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+static s32 wl_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_gtk_rekey_data *data);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
+#endif /* GTK_OFFLOAD_SUPPORT */
+chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec);
+chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
+#ifdef WL11ULB
+static s32 wl_cfg80211_get_ulb_bw(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev);
+static chanspec_t wl_cfg80211_ulb_get_min_bw_chspec(struct bcm_cfg80211 *cfg,
+	struct wireless_dev *wdev, s32 bssidx);
+static s32 wl_cfg80211_ulbbw_to_ulbchspec(u32 ulb_bw);
+#else
+static inline chanspec_t wl_cfg80211_ulb_get_min_bw_chspec(
+		struct bcm_cfg80211 *cfg, struct wireless_dev *wdev, s32 bssidx)
+{
+	return WL_CHANSPEC_BW_20;
+}
+#endif /* WL11ULB */
+static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev);
+
+/*
+ * event & event Q handlers for cfg80211 interfaces
+ */
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg);
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg);
+static void wl_event_handler(struct work_struct *work_data);
+static void wl_init_eq(struct bcm_cfg80211 *cfg);
+static void wl_flush_eq(struct bcm_cfg80211 *cfg);
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg);
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags);
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg);
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg);
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg);
+static s32 wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 type,
+	const wl_event_msg_t *msg, void *data);
+static void wl_put_event(struct wl_event_q *e);
+static s32 wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_connect_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed);
+static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#ifdef BT_WIFI_HANDOVER
+static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* BT_WIFI_HANDOVER */
+#ifdef WL_SCHED_SCAN
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+#endif /* WL_SCHED_SCAN */
+#ifdef PNO_SUPPORT
+static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+static s32 wl_notify_gscan_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_handle_roam_exp_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* GSCAN_SUPPORT */
+#ifdef RSSI_MONITOR_SUPPORT
+static s32 wl_handle_rssi_monitor_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* RSSI_MONITOR_SUPPORT */
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+	enum wl_status state, bool set);
+#ifdef CUSTOM_EVENT_PM_WAKE
+static s32 wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif	/* CUSTOM_EVENT_PM_WAKE */
+#ifdef ENABLE_TEMP_THROTTLING
+static s32 wl_check_rx_throttle_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* ENABLE_TEMP_THROTTLING */
+#if defined(DHD_LOSSLESS_ROAMING) || defined(DBG_PKT_MON)
+static s32 wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
+#ifdef DHD_LOSSLESS_ROAMING
+static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg);
+#endif /* DHD_LOSSLESS_ROAMING */
+
+#ifdef WLTDLS
+static s32 wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg,
+	enum wl_tdls_config state, bool tdls_mode);
+static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* WLTDLS */
+/*
+ * register/deregister parent device
+ */
+static void wl_cfg80211_clear_parent_dev(void);
+/*
+ * ioctl utilites
+ */
+
+/*
+ * cfg80211 set_wiphy_params utilities
+ */
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+
+/*
+ * cfg profile utilities
+ */
+static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, const void *data, s32 item);
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+/*
+ * cfg80211 connect utilites
+ */
+static s32 wl_set_wpa_version(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_auth_type(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_set_cipher(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_key_mgmt(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+#ifdef BCMWAPI_WPI
+static s32 wl_set_set_wapi_ie(struct net_device *dev,
+        struct cfg80211_connect_params *sme);
+#endif
+static s32 wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static s32 wl_ch_to_chanspec(struct net_device *dev, int ch,
+	struct wl_join_params *join_params, size_t *join_params_size);
+void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg);
+
+/*
+ * information element utilities
+ */
+static void wl_rst_ie(struct bcm_cfg80211 *cfg);
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size,
+	bool roam);
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size);
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size);
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg);
+#ifdef MFP
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8** rsn_cap);
+#endif
+
+#ifdef WL11U
+static bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len);
+static s32
+wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx);
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+            uint8 ie_id, uint8 *data, uint8 data_len);
+#endif /* WL11U */
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, dhd_pub_t *data);
+static void wl_free_wdev(struct bcm_cfg80211 *cfg);
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 11))
+static int
+#else
+static void
+#endif /* kernel version < 3.10.11 */
+wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam);
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+s32 wl_cfg80211_channel_to_freq(u32 channel);
+
+
+static void wl_cfg80211_work_handler(struct work_struct *work);
+static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr,
+	struct key_params *params);
+/*
+ * key indianess swap utilities
+ */
+static void swap_key_from_BE(struct wl_wsec_key *key);
+static void swap_key_to_BE(struct wl_wsec_key *key);
+
+/*
+ * bcm_cfg80211 memory init/deinit utilities
+ */
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg);
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg);
+
+static void wl_delay(u32 ms);
+
+/*
+ * ibss mode utilities
+ */
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg);
+
+/*
+ * link up/down , default configuration utilities
+ */
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg);
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg);
+
+#ifdef WL_LASTEVT
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, void *data);
+#define WL_IS_LINKDOWN(cfg, e, data) wl_is_linkdown(cfg, e, data)
+#else
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+#define WL_IS_LINKDOWN(cfg, e, data) wl_is_linkdown(cfg, e)
+#endif /* WL_LASTEVT */
+
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e,
+	struct net_device *ndev);
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+static void wl_link_up(struct bcm_cfg80211 *cfg);
+static void wl_link_down(struct bcm_cfg80211 *cfg);
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype);
+static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+	struct net_device* ndev);
+int wl_cfg80211_get_ioctl_version(void);
+
+/*
+ * find most significant bit set
+ */
+static __used u32 wl_find_msb(u16 bit16);
+
+/*
+ * rfkill support
+ */
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup);
+static int wl_rfkill_set(void *data, bool blocked);
+#ifdef DEBUGFS_CFG80211
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg);
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg);
+#endif
+static wl_scan_params_t *wl_cfg80211_scan_alloc_params(struct bcm_cfg80211 *cfg,
+	int channel, int nprobes, int *out_params_size);
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role);
+
+#ifdef WL_CFG80211_ACL
+/* ACL */
+static int wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+	const struct cfg80211_acl_data *acl);
+#endif /* WL_CFG80211_ACL */
+
+/*
+ * Some external functions, TODO: move them to dhd_linux.h
+ */
+int dhd_add_monitor(const char *name, struct net_device **new_ndev);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+
+
+#ifdef P2P_LISTEN_OFFLOADING
+s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif /* PKT_FILTER_SUPPORT */
+
+#ifdef SUPPORT_SET_CAC
+static void wl_cfg80211_set_cac(struct bcm_cfg80211 *cfg, int enable);
+#endif /* SUPPORT_SET_CAC */
+
+static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const struct ether_addr *bssid);
+static s32 __wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
+
+static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ,
+	WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ };
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) || (defined(CONFIG_ARCH_MSM) && \
+	defined(CFG80211_DISCONNECTED_V2))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+	cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+	cfg80211_disconnected(dev, reason, ie, len, gfp);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) || (defined(CONFIG_ARCH_MSM) && \
+	defined(CFG80211_DISCONNECTED_V2))
+#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
+	cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len,	\
+			IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+#else
+#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
+	cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len,	\
+			WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || 			\
+				 (akm) == RSN_AKM_UNSPECIFIED || 	\
+				 (akm) == RSN_AKM_PSK)
+
+
+extern int dhd_wait_pend8021x(struct net_device *dev);
+#ifdef PROP_TXSTATUS_VSDB
+extern int disable_proptx;
+#endif /* PROP_TXSTATUS_VSDB */
+static int wl_cfg80211_check_in4way(struct bcm_cfg80211 *cfg,
+	struct net_device *dev, uint action, enum wl_ext_status status, void *context);
+
+
+extern int passive_channel_skip;
+
+static s32
+wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+static s32
+wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0)) && (LINUX_VERSION_CODE <= (3, 7, \
+	0)))
+struct chan_info {
+	int freq;
+	int chan_type;
+};
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+#define CFG80211_PUT_BSS(wiphy, bss) cfg80211_put_bss(wiphy, bss);
+#else
+#define CFG80211_PUT_BSS(wiphy, bss) cfg80211_put_bss(bss);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+
+#define CHAN2G(_channel, _freq, _flags) {			\
+	.band			= IEEE80211_BAND_2GHZ,		\
+	.center_freq		= (_freq),			\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define CHAN5G(_channel, _flags) {				\
+	.band			= IEEE80211_BAND_5GHZ,		\
+	.center_freq		= 5000 + (5 * (_channel)),	\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define RATE_TO_BASE100KBPS(rate)   (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+	{								\
+		.bitrate	= RATE_TO_BASE100KBPS(_rateid),     \
+		.hw_value	= (_rateid),			    \
+		.flags	  = (_flags),			     \
+	}
+
+static struct ieee80211_rate __wl_rates[] = {
+	RATETAB_ENT(DOT11_RATE_1M, 0),
+	RATETAB_ENT(DOT11_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_6M, 0),
+	RATETAB_ENT(DOT11_RATE_9M, 0),
+	RATETAB_ENT(DOT11_RATE_12M, 0),
+	RATETAB_ENT(DOT11_RATE_18M, 0),
+	RATETAB_ENT(DOT11_RATE_24M, 0),
+	RATETAB_ENT(DOT11_RATE_36M, 0),
+	RATETAB_ENT(DOT11_RATE_48M, 0),
+	RATETAB_ENT(DOT11_RATE_54M, 0)
+};
+
+#define wl_a_rates		(__wl_rates + 4)
+#define wl_a_rates_size	8
+#define wl_g_rates		(__wl_rates + 0)
+#define wl_g_rates_size	12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+	CHAN2G(1, 2412, 0),
+	CHAN2G(2, 2417, 0),
+	CHAN2G(3, 2422, 0),
+	CHAN2G(4, 2427, 0),
+	CHAN2G(5, 2432, 0),
+	CHAN2G(6, 2437, 0),
+	CHAN2G(7, 2442, 0),
+	CHAN2G(8, 2447, 0),
+	CHAN2G(9, 2452, 0),
+	CHAN2G(10, 2457, 0),
+	CHAN2G(11, 2462, 0),
+	CHAN2G(12, 2467, 0),
+	CHAN2G(13, 2472, 0),
+	CHAN2G(14, 2484, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+	CHAN5G(34, 0), CHAN5G(36, 0),
+	CHAN5G(38, 0), CHAN5G(40, 0),
+	CHAN5G(42, 0), CHAN5G(44, 0),
+	CHAN5G(46, 0), CHAN5G(48, 0),
+	CHAN5G(52, 0), CHAN5G(56, 0),
+	CHAN5G(60, 0), CHAN5G(64, 0),
+	CHAN5G(100, 0), CHAN5G(104, 0),
+	CHAN5G(108, 0), CHAN5G(112, 0),
+	CHAN5G(116, 0), CHAN5G(120, 0),
+	CHAN5G(124, 0), CHAN5G(128, 0),
+	CHAN5G(132, 0), CHAN5G(136, 0),
+	CHAN5G(140, 0), CHAN5G(144, 0),
+	CHAN5G(149, 0), CHAN5G(153, 0),
+	CHAN5G(157, 0), CHAN5G(161, 0),
+	CHAN5G(165, 0)
+};
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+	.band = IEEE80211_BAND_2GHZ,
+	.channels = __wl_2ghz_channels,
+	.n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+	.bitrates = wl_g_rates,
+	.n_bitrates = wl_g_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+	.band = IEEE80211_BAND_5GHZ,
+	.channels = __wl_5ghz_a_channels,
+	.n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+	.bitrates = wl_a_rates,
+	.n_bitrates = wl_a_rates_size
+};
+
+static const u32 __wl_cipher_suites[] = {
+	WLAN_CIPHER_SUITE_WEP40,
+	WLAN_CIPHER_SUITE_WEP104,
+	WLAN_CIPHER_SUITE_TKIP,
+	WLAN_CIPHER_SUITE_CCMP,
+#ifdef MFP
+	/*
+	 * Advertising AES_CMAC cipher suite to userspace would imply that we
+	 * are supporting MFP. So advertise only when MFP support is enabled.
+	 */
+	WLAN_CIPHER_SUITE_AES_CMAC,
+#endif /* MFP */
+#ifdef BCMWAPI_WPI
+	WLAN_CIPHER_SUITE_SMS4,
+#endif
+};
+
+#ifdef WL_SUPPORT_ACS
+/*
+ * The firmware code required for this feature to work is currently under
+ * BCMINTERNAL flag. In future if this is to enabled we need to bring the
+ * required firmware code out of the BCMINTERNAL flag.
+ */
+struct wl_dump_survey {
+	u32 obss;
+	u32 ibss;
+	u32 no_ctg;
+	u32 no_pckt;
+	u32 tx;
+	u32 idle;
+};
+#endif /* WL_SUPPORT_ACS */
+
+
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+static int maxrxpktglom = 0;
+#endif
+
+/* IOCtl version read from targeted driver */
+int ioctl_version;
+#ifdef DEBUGFS_CFG80211
+#define S_SUBLOGLEVEL 20
+static const struct {
+	u32 log_level;
+	char *sublogname;
+} sublogname_map[] = {
+	{WL_DBG_ERR, "ERR"},
+	{WL_DBG_INFO, "INFO"},
+	{WL_DBG_DBG, "DBG"},
+	{WL_DBG_SCAN, "SCAN"},
+	{WL_DBG_TRACE, "TRACE"},
+	{WL_DBG_P2P_ACTION, "P2PACTION"}
+};
+#endif
+
+#ifdef CUSTOMER_HW4_DEBUG
+uint prev_dhd_console_ms = 0;
+u32 prev_wl_dbg_level = 0;
+bool wl_scan_timeout_dbg_enabled = 0;
+static void wl_scan_timeout_dbg_set(void);
+static void wl_scan_timeout_dbg_clear(void);
+
+static void wl_scan_timeout_dbg_set(void)
+{
+	WL_ERR(("Enter \n"));
+	prev_dhd_console_ms = dhd_console_ms;
+	prev_wl_dbg_level = wl_dbg_level;
+
+	dhd_console_ms = 1;
+	wl_dbg_level |= (WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_SCAN);
+
+	wl_scan_timeout_dbg_enabled = 1;
+}
+static void wl_scan_timeout_dbg_clear(void)
+{
+	WL_ERR(("Enter \n"));
+	dhd_console_ms = prev_dhd_console_ms;
+	wl_dbg_level = prev_wl_dbg_level;
+
+	wl_scan_timeout_dbg_enabled = 0;
+}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+/* watchdog timer for disconnecting when fw is not associated for FW_ASSOC_WATCHDOG_TIME ms */
+uint32 fw_assoc_watchdog_ms = 0;
+bool fw_assoc_watchdog_started = 0;
+#define FW_ASSOC_WATCHDOG_TIME 10 * 1000 /* msec */
+
+static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg,
+	enum wl_pm_workq_act_type type)
+{
+	u16 wq_duration = 0;
+	dhd_pub_t *dhd =  NULL;
+
+	if (cfg == NULL)
+		return;
+
+	dhd = (dhd_pub_t *)(cfg->pub);
+
+	mutex_lock(&cfg->pm_sync);
+	/*
+	 * Make cancel and schedule work part mutually exclusive
+	 * so that while cancelling, we are sure that there is no
+	 * work getting scheduled.
+	 */
+	if (delayed_work_pending(&cfg->pm_enable_work)) {
+		cancel_delayed_work_sync(&cfg->pm_enable_work);
+		DHD_PM_WAKE_UNLOCK(cfg->pub);
+	}
+
+	if (type == WL_PM_WORKQ_SHORT) {
+		wq_duration = WL_PM_ENABLE_TIMEOUT;
+	} else if (type == WL_PM_WORKQ_LONG) {
+		wq_duration = (WL_PM_ENABLE_TIMEOUT*2);
+	}
+
+	/* It should schedule work item only if driver is up */
+	if (wq_duration && dhd->up) {
+		if (schedule_delayed_work(&cfg->pm_enable_work,
+				msecs_to_jiffies((const unsigned int)wq_duration))) {
+			DHD_PM_WAKE_LOCK_TIMEOUT(cfg->pub, wq_duration);
+		} else {
+			WL_ERR(("Can't schedule pm work handler\n"));
+		}
+	}
+	mutex_unlock(&cfg->pm_sync);
+}
+
+/* Return a new chanspec given a legacy chanspec
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+	chanspec_t chspec;
+
+	/* get the channel number */
+	chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+	/* convert the band */
+	if (LCHSPEC_IS2G(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BAND_2G;
+	} else {
+		chspec |= WL_CHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (LCHSPEC_IS20(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BW_20;
+	} else {
+		chspec |= WL_CHANSPEC_BW_40;
+		if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+			chspec |= WL_CHANSPEC_CTL_SB_L;
+		} else {
+			chspec |= WL_CHANSPEC_CTL_SB_U;
+		}
+	}
+
+	if (wf_chspec_malformed(chspec)) {
+		WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	return chspec;
+}
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_to_legacy(chanspec_t chspec)
+{
+	chanspec_t lchspec;
+
+	if (wf_chspec_malformed(chspec)) {
+		WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	/* get the channel number */
+	lchspec = CHSPEC_CHANNEL(chspec);
+
+	/* convert the band */
+	if (CHSPEC_IS2G(chspec)) {
+		lchspec |= WL_LCHANSPEC_BAND_2G;
+	} else {
+		lchspec |= WL_LCHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (CHSPEC_IS20(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_20;
+		lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+	} else if (CHSPEC_IS40(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_40;
+		if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+			lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+		} else {
+			lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+		}
+	} else {
+		/* cannot express the bandwidth */
+		char chanbuf[CHANSPEC_STR_LEN];
+		WL_ERR((
+		        "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+		        "to pre-11ac format\n",
+		        wf_chspec_ntoa(chspec, chanbuf), chspec));
+		return INVCHANSPEC;
+	}
+
+	return lchspec;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec)
+{
+	if (ioctl_version == 1) {
+		chanspec = wl_chspec_to_legacy(chanspec);
+		if (chanspec == INVCHANSPEC) {
+			return chanspec;
+		}
+	}
+	chanspec = htodchanspec(chanspec);
+
+	return chanspec;
+}
+
+/* given a channel value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_ch_host_to_driver(struct bcm_cfg80211 *cfg, s32 bssidx, u16 channel)
+{
+	chanspec_t chanspec;
+
+	chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		chanspec |= WL_CHANSPEC_BAND_2G;
+	else
+		chanspec |= WL_CHANSPEC_BAND_5G;
+
+	chanspec |= wl_cfg80211_ulb_get_min_bw_chspec(cfg, NULL, bssidx);
+
+	chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+	return wl_chspec_host_to_driver(chanspec);
+}
+
+/* given a chanspec value from the driver, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec)
+{
+	chanspec = dtohchanspec(chanspec);
+	if (ioctl_version == 1) {
+		chanspec = wl_chspec_from_legacy(chanspec);
+	}
+
+	return chanspec;
+}
+
+/*
+ * convert ASCII string to MAC address (colon-delimited format)
+ * eg: 00:11:22:33:44:55
+ */
+int
+wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n)
+{
+	char *c = NULL;
+	int count = 0;
+
+	memset(n, 0, ETHER_ADDR_LEN);
+	for (;;) {
+		n->octet[count++] = (uint8)simple_strtoul(a, &c, 16);
+		if (!*c++ || count == ETHER_ADDR_LEN)
+			break;
+		a = c;
+	}
+	return (count == ETHER_ADDR_LEN);
+}
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+#ifdef WLMESH
+	[NL80211_IFTYPE_MESH_POINT] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4)
+	},
+#endif /* WLMESH */
+	[NL80211_IFTYPE_ADHOC] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_STATION] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_AP] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_AP_VLAN] = {
+		/* copy AP */
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_P2P_CLIENT] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_P2P_GO] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	[NL80211_IFTYPE_P2P_DEVICE] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+#endif /* WL_CFG80211_P2P_DEV_IF */
+};
+
+static void swap_key_from_BE(struct wl_wsec_key *key)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(struct wl_wsec_key *key)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+/* Dump the contents of the encoded wps ie buffer and get pbc value */
+static void
+wl_validate_wps_ie(char *wps_ie, s32 wps_ie_len, bool *pbc)
+{
+	#define WPS_IE_FIXED_LEN 6
+	s16 len;
+	u8 *subel = NULL;
+	u16 subelt_id;
+	u16 subelt_len;
+	u16 val;
+	u8 *valptr = (uint8*) &val;
+	if (wps_ie == NULL || wps_ie_len < WPS_IE_FIXED_LEN) {
+		WL_ERR(("invalid argument : NULL\n"));
+		return;
+	}
+	len = (s16)wps_ie[TLV_LEN_OFF];
+
+	if (len > wps_ie_len) {
+		WL_ERR(("invalid length len %d, wps ie len %d\n", len, wps_ie_len));
+		return;
+	}
+	WL_DBG(("wps_ie len=%d\n", len));
+	len -= 4;	/* for the WPS IE's OUI, oui_type fields */
+	subel = wps_ie + WPS_IE_FIXED_LEN;
+	while (len >= 4) {		/* must have attr id, attr len fields */
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_id = HTON16(val);
+
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_len = HTON16(val);
+
+		len -= 4;			/* for the attr id, attr len fields */
+		len -= (s16)subelt_len;	/* for the remaining fields in this attribute */
+		if (len < 0) {
+			break;
+		}
+		WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+			subel, subelt_id, subelt_len));
+
+		if (subelt_id == WPS_ID_VERSION) {
+			WL_DBG(("  attr WPS_ID_VERSION: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_REQ_TYPE) {
+			WL_DBG(("  attr WPS_ID_REQ_TYPE: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_DEVICE_NAME) {
+			char devname[100];
+			int namelen = MIN(subelt_len, (sizeof(devname) - 1));
+
+			if (namelen) {
+				memcpy(devname, subel, namelen);
+				devname[namelen] = '\0';
+				/* Printing len as rx'ed in the IE */
+				WL_DBG(("  attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+					devname, subelt_len));
+			}
+		} else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+			*pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false;
+		} else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS"
+				": cat=%u\n", HTON16(val)));
+		} else {
+			WL_DBG(("  unknown attr 0x%x\n", subelt_id));
+		}
+
+		subel += subelt_len;
+	}
+}
+
+s32 wl_set_tx_power(struct net_device *dev,
+	enum nl80211_tx_power_setting type, s32 dbm)
+{
+	s32 err = 0;
+	s32 disable = 0;
+	s32 txpwrqdbm;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = WL_RADIO_SW_DISABLE << 16;
+	disable = htod32(disable);
+	err = wldev_ioctl_set(dev, WLC_SET_RADIO, &disable, sizeof(disable));
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+		return err;
+	}
+
+	if (dbm > 0xffff)
+		dbm = 0xffff;
+	txpwrqdbm = dbm * 4;
+	err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm,
+		sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+	if (unlikely(err))
+		WL_ERR(("qtxpower error (%d)\n", err));
+	else
+		WL_ERR(("dBm=%d, txpwrqdbm=0x%x\n", dbm, txpwrqdbm));
+
+	return err;
+}
+
+s32 wl_get_tx_power(struct net_device *dev, s32 *dbm)
+{
+	s32 err = 0;
+	s32 txpwrdbm;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	err = wldev_iovar_getbuf_bsscfg(dev, "qtxpower",
+		NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	memcpy(&txpwrdbm, cfg->ioctl_buf, sizeof(txpwrdbm));
+	txpwrdbm = dtoh32(txpwrdbm);
+	*dbm = (txpwrdbm & ~WL_TXPWR_OVERRIDE) / 4;
+
+	WL_INFORM(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm));
+
+	return err;
+}
+
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+{
+	chanspec_t chspec;
+	int cur_band, err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	struct ether_addr bssid;
+	struct wl_bss_info *bss = NULL;
+	s32 bssidx = 0; /* Explicitly set to primary bssidx */
+	char *buf;
+
+	memset(&bssid, 0, sizeof(bssid));
+	if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, sizeof(bssid)))) {
+		/* STA interface is not associated. So start the new interface on a temp
+		 * channel . Later proper channel will be applied by the above framework
+		 * via set_channel (cfg80211 API).
+		 */
+		WL_DBG(("Not associated. Return a temp channel. \n"));
+		cur_band = 0;
+		err = wldev_ioctl_get(dev, WLC_GET_BAND, &cur_band, sizeof(int));
+		if (unlikely(err)) {
+			WL_ERR(("Get band failed\n"));
+			return wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN);
+		}
+		if (cur_band == WLC_BAND_5G)
+			return wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN_5G);
+		else
+			return wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN);
+	}
+
+	buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	if (!buf) {
+		WL_ERR(("buf alloc failed. use temp channel\n"));
+		return wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN);
+	}
+
+	*(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
+	if ((err = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, buf,
+		WL_EXTRA_BUF_MAX))) {
+			WL_ERR(("Failed to get associated bss info, use temp channel \n"));
+			chspec = wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN);
+	}
+	else {
+			bss = (struct wl_bss_info *) (buf + 4);
+			chspec =  bss->chanspec;
+
+			WL_DBG(("Valid BSS Found. chanspec:%d \n", chspec));
+	}
+
+	kfree(buf);
+	return chspec;
+}
+
+static bcm_struct_cfgdev *
+wl_cfg80211_add_monitor_if(const char *name)
+{
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+	WL_INFORM(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
+	return ERR_PTR(-EOPNOTSUPP);
+#else
+	struct net_device* ndev = NULL;
+
+	dhd_add_monitor(name, &ndev);
+	WL_INFORM(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+	return ndev_to_cfgdev(ndev);
+#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
+}
+
+static bcm_struct_cfgdev *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	const char *name,
+#else
+	char *name,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+	unsigned char name_assign_type,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
+	enum nl80211_iftype type,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	u32 *flags,
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+	struct vif_params *params)
+{
+	s32 err = -ENODEV;
+	s32 timeout = -1;
+	s32 wlif_type = -1;
+	s32 mode = 0;
+	s32 val = 0;
+	s32 cfg_type;
+	s32 dhd_mode = 0;
+	chanspec_t chspec;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *primary_ndev;
+	struct net_device *new_ndev;
+	struct ether_addr primary_mac;
+	bcm_struct_cfgdev *new_cfgdev;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+	s32 up = 1;
+	bool enabled;
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+	dhd_pub_t *dhd;
+	bool hang_required = false;
+
+	if (!cfg)
+		return ERR_PTR(-EINVAL);
+
+	dhd = (dhd_pub_t *)(cfg->pub);
+
+	/* Use primary I/F for sending cmds down to firmware */
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
+		WL_ERR(("device is not ready\n"));
+		return ERR_PTR(-ENODEV);
+	}
+
+	if (!name) {
+		WL_ERR(("Interface name not provided \n"));
+		return ERR_PTR(-EINVAL);
+	}
+
+#ifdef WLTDLS
+	/* disable TDLS if number of connected interfaces is >= 1 */
+	wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_CREATE, false);
+#endif /* WLTDLS */
+
+	mutex_lock(&cfg->if_sync);
+	WL_DBG(("if name: %s, type: %d\n", name, type));
+	switch (type) {
+	case NL80211_IFTYPE_ADHOC:
+#ifdef WLAIBSS_MCHAN
+		new_cfgdev = bcm_cfg80211_add_ibss_if(wiphy, (char *)name);
+		mutex_unlock(&cfg->if_sync);
+		return new_cfgdev;
+#endif /* WLAIBSS_MCHAN */
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MESH_POINT:
+		WL_ERR(("Unsupported interface type\n"));
+		mode = WL_MODE_IBSS;
+		err = -EINVAL;
+		goto fail;
+	case NL80211_IFTYPE_MONITOR:
+		new_cfgdev = wl_cfg80211_add_monitor_if(name);
+		mutex_unlock(&cfg->if_sync);
+		return new_cfgdev;
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	case NL80211_IFTYPE_P2P_DEVICE:
+		cfg->down_disc_if = FALSE;
+		new_cfgdev = wl_cfgp2p_add_p2p_disc_if(cfg);
+		mutex_unlock(&cfg->if_sync);
+		return new_cfgdev;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	case NL80211_IFTYPE_STATION:
+#ifdef WL_VIRTUAL_APSTA
+#ifdef WLAIBSS_MCHAN
+		if (cfg->ibss_cfgdev) {
+			WL_ERR(("AIBSS is already operational. "
+					" AIBSS & DUALSTA can't be used together \n"));
+			err = -ENOMEM;
+			goto fail;
+		}
+#endif /* WLAIBSS_MCHAN */
+
+		if (wl_cfgp2p_vif_created(cfg)) {
+			WL_ERR(("Could not create new iface."
+				"Already one p2p interface is running"));
+			err = -ENOMEM;
+			goto fail;
+		}
+		new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+			NL80211_IFTYPE_STATION, NULL, name);
+		if (!new_cfgdev) {
+			err = -ENOMEM;
+			goto fail;
+		 } else {
+			mutex_unlock(&cfg->if_sync);
+			return new_cfgdev;
+		}
+#endif /* WL_VIRTUAL_APSTA */
+	case NL80211_IFTYPE_P2P_CLIENT:
+		wlif_type = WL_P2P_IF_CLIENT;
+		mode = WL_MODE_BSS;
+		break;
+	case NL80211_IFTYPE_P2P_GO:
+		wlif_type = WL_P2P_IF_GO;
+		mode = WL_MODE_AP;
+		break;
+#ifdef WL_VIRTUAL_APSTA
+	case NL80211_IFTYPE_AP:
+		dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
+		new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+			NL80211_IFTYPE_AP, NULL, name);
+		if (!new_cfgdev) {
+			err = -ENOMEM;
+			goto fail;
+		} else {
+			mutex_unlock(&cfg->if_sync);
+			return new_cfgdev;
+		}
+#endif /* WL_VIRTUAL_APSTA */
+	default:
+		WL_ERR(("Unsupported interface type\n"));
+		err = -EINVAL;
+		goto fail;
+	}
+
+	if (cfg->p2p_supported && (wlif_type != -1)) {
+		ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */
+
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		if (!dhd) {
+			err = -EINVAL;
+			goto fail;
+		}
+#endif 
+#endif /* PROP_TXSTATUS_VSDB */
+
+		if (!cfg->p2p) {
+			WL_ERR(("Failed to start p2p"));
+			err = -ENODEV;
+			goto fail;
+		}
+
+		if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+			p2p_on(cfg) = true;
+			wl_cfgp2p_set_firm_p2p(cfg);
+			wl_cfgp2p_init_discovery(cfg);
+			get_primary_mac(cfg, &primary_mac);
+			wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+		}
+
+		strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
+		cfg->p2p->vir_ifname[IFNAMSIZ - 1] = '\0';
+
+		wl_cfg80211_scan_abort(cfg);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+		if (!cfg->wlfc_on && !disable_proptx) {
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (!enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+				dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+				dhd_wlfc_init(dhd);
+				err = wldev_ioctl_set(primary_ndev, WLC_UP, &up, sizeof(s32));
+				if (err < 0)
+					WL_ERR(("WLC_UP return err:%d\n", err));
+			}
+			cfg->wlfc_on = true;
+		}
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+
+		/* Dual p2p doesn't support multiple P2PGO interfaces,
+		 * p2p_go_count is the counter for GO creation
+		 * requests.
+		 */
+		if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
+			WL_ERR(("Fw does not support multiple Go\n"));
+			err = -ENOTSUPP;
+			goto fail;
+		}
+		/* In concurrency case, STA may be already associated in a particular channel.
+		 * so retrieve the current channel of primary interface and then start the virtual
+		 * interface on that.
+		 */
+		 chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+		/* For P2P mode, use P2P-specific driver features to create the
+		 * bss: "cfg p2p_ifadd"
+		 */
+		wl_set_p2p_status(cfg, IF_ADDING);
+		memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+		cfg_type = wl_cfgp2p_get_conn_idx(cfg);
+		if (cfg_type == BCME_ERROR) {
+			wl_clr_p2p_status(cfg, IF_ADDING);
+			WL_ERR(("Failed to get connection idx for p2p interface\n"));
+			err = -ENOTSUPP;
+			goto fail;
+		}
+		err = wl_cfgp2p_ifadd(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type),
+			htod32(wlif_type), chspec);
+		if (unlikely(err)) {
+			wl_clr_p2p_status(cfg, IF_ADDING);
+			WL_ERR((" virtual iface add failed (%d) \n", err));
+			err = -ENOMEM;
+			goto fail;
+		}
+
+		timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+			((wl_get_p2p_status(cfg, IF_ADDING) == false) &&
+			(cfg->if_event_info.valid)),
+			msecs_to_jiffies(MAX_WAIT_TIME));
+
+		if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
+			struct wireless_dev *vwdev;
+			int pm_mode = PM_ENABLE;
+			wl_if_event_info *event = &cfg->if_event_info;
+			/* IF_ADD event has come back, we can proceed to to register
+			 * the new interface now, use the interface name provided by caller (thus
+			 * ignore the one from wlc)
+			 */
+			new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname,
+				event->mac, event->bssidx, event->name);
+			if (new_ndev == NULL) {
+				err = -ENODEV;
+				goto fail;
+			}
+
+			wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev;
+			wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx;
+			vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+			if (unlikely(!vwdev)) {
+				WL_ERR(("Could not allocate wireless device\n"));
+				err = -ENOMEM;
+				goto fail;
+			}
+			vwdev->wiphy = cfg->wdev->wiphy;
+			WL_INFORM(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname));
+			if (type == NL80211_IFTYPE_P2P_GO) {
+				cfg->p2p->p2p_go_count++;
+			}
+			vwdev->iftype = type;
+			vwdev->netdev = new_ndev;
+			new_ndev->ieee80211_ptr = vwdev;
+			SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy));
+			wl_set_drv_status(cfg, READY, new_ndev);
+			if (wl_config_ifmode(cfg, new_ndev, type) < 0) {
+				WL_ERR(("conf ifmode failed\n"));
+				kfree(vwdev);
+				err = -ENOTSUPP;
+				goto fail;
+			}
+
+			if (wl_cfg80211_register_if(cfg,
+				event->ifidx, new_ndev, FALSE) != BCME_OK) {
+				wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
+				err = -ENODEV;
+				goto fail;
+			}
+			err = wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode, event->bssidx);
+			if (unlikely(err != 0)) {
+				wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
+				WL_ERR(("Allocation of netinfo failed (%d) \n", err));
+				goto fail;
+			}
+			val = 1;
+			/* Disable firmware roaming for P2P interface  */
+			wldev_iovar_setint(new_ndev, "roam_off", val);
+			wldev_iovar_setint(new_ndev, "bcn_timeout", dhd->conf->bcn_timeout);
+#ifdef WL11ULB
+			if (cfg->p2p_wdev && is_p2p_group_iface(new_ndev->ieee80211_ptr)) {
+				u32 ulb_bw = wl_cfg80211_get_ulb_bw(cfg, cfg->p2p_wdev);
+				if (ulb_bw) {
+					/* Apply ULB BW settings on the newly spawned interface */
+					WL_DBG(("[ULB] Applying ULB BW for the newly"
+						"created P2P interface \n"));
+					if (wl_cfg80211_set_ulb_bw(new_ndev,
+						ulb_bw, new_ndev->name) < 0) {
+						/*
+						 * If ulb_bw set failed, fail the iface creation.
+						 * wl_dealloc_netinfo_by_wdev will be called by the
+						 * unregister notifier.
+						 */
+						wl_cfg80211_remove_if(cfg,
+							event->ifidx, new_ndev, FALSE);
+						err = -EINVAL;
+						goto fail;
+					}
+				}
+			}
+#endif /* WL11ULB */
+
+			if (mode != WL_MODE_AP)
+				wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1);
+
+			WL_ERR((" virtual interface(%s) is "
+				"created net attach done\n", cfg->p2p->vir_ifname));
+			if (mode == WL_MODE_AP)
+				wl_set_drv_status(cfg, CONNECTED, new_ndev);
+#ifdef SUPPORT_AP_POWERSAVE
+			if (mode == WL_MODE_AP) {
+				dhd_set_ap_powersave(dhd, 0, TRUE);
+			}
+#endif /* SUPPORT_AP_POWERSAVE */
+			if (type == NL80211_IFTYPE_P2P_CLIENT)
+				dhd_mode = DHD_FLAG_P2P_GC_MODE;
+			else if (type == NL80211_IFTYPE_P2P_GO)
+				dhd_mode = DHD_FLAG_P2P_GO_MODE;
+			DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
+			/* reinitialize completion to clear previous count */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+			INIT_COMPLETION(cfg->iface_disable);
+#else
+			init_completion(&cfg->iface_disable);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
+#ifdef SUPPORT_SET_CAC
+			wl_cfg80211_set_cac(cfg, 0);
+#endif /* SUPPORT_SET_CAC */
+			mutex_unlock(&cfg->if_sync);
+			return ndev_to_cfgdev(new_ndev);
+		} else {
+			wl_clr_p2p_status(cfg, IF_ADDING);
+			WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname));
+
+			WL_ERR(("left timeout : %d\n", timeout));
+			WL_ERR(("IF_ADDING status : %d\n", wl_get_p2p_status(cfg, IF_ADDING)));
+			WL_ERR(("event valid : %d\n", cfg->if_event_info.valid));
+
+			wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+			wl_set_p2p_status(cfg, IF_DELETING);
+
+			hang_required = true;
+			if ((err = wl_cfgp2p_ifdel(cfg,
+					wl_to_p2p_bss_macaddr(cfg,
+					cfg_type))) == BCME_OK) {
+				timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+					((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
+					(cfg->if_event_info.valid)),
+					msecs_to_jiffies(MAX_WAIT_TIME));
+				if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+					cfg->if_event_info.valid) {
+					hang_required = false;
+					WL_ERR(("IFDEL operation done\n"));
+				} else {
+					WL_ERR(("IFDEL didn't complete properly\n"));
+				}
+				err = -ENODEV;
+#ifdef SUPPORT_SET_CAC
+				wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+			} else {
+				WL_ERR(("IFDEL operation failed, error code = %d\n", err));
+			}
+
+			memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+			wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+				dhd->op_mode != DHD_FLAG_IBSS_MODE && dhd->conf->disable_proptx!=0) {
+				dhd_wlfc_deinit(dhd);
+				cfg->wlfc_on = false;
+			}
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+		}
+	}
+
+fail:
+	mutex_unlock(&cfg->if_sync);
+	if (err) {
+#ifdef WLTDLS
+		/* Enable back TDLS on failure */
+		wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
+#endif /* WLTDLS */
+		if (err != -ENOTSUPP) {
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+			if (dhd->memdump_enabled) {
+				/* Load the dongle side dump to host
+				 * memory and then BUG_ON()
+				 */
+				dhd->memdump_type = DUMP_TYPE_HANG_ON_IFACE_OP_FAIL;
+				dhd_bus_mem_dump(dhd);
+			}
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+			if (hang_required) {
+				/* Notify the user space to recover */
+				struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+				WL_ERR(("if add failed, error %d, sent HANG event to %s\n",
+					err, ndev->name));
+				dhd->hang_reason = HANG_REASON_IFACE_OP_FAILURE;
+				net_os_send_hang_message(ndev);
+			}
+		}
+	}
+	return ERR_PTR(err);
+}
+
+static s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	struct net_device *dev = NULL;
+	struct ether_addr p2p_mac;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 timeout = -1;
+	s32 ret = 0;
+	s32 index = -1;
+	s32 type = -1;
+#if defined(CUSTOM_SET_CPUCORE) || defined(DHD_HANG_SEND_UP_TEST)
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE || DHD_HANG_SEND_UP_TEST */
+	WL_DBG(("Enter\n"));
+
+	memset(&p2p_mac, 0, sizeof(struct ether_addr));
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE;
+	if (!(dhd->chan_isvht80))
+		dhd_set_cpucore(dhd, FALSE);
+#endif /* CUSTOM_SET_CPUCORE */
+	mutex_lock(&cfg->if_sync);
+#ifdef WL_CFG80211_P2P_DEV_IF
+	if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+		if (dhd_download_fw_on_driverload) {
+			ret = wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg);
+		} else {
+			cfg->down_disc_if = TRUE;
+			ret = 0;
+		}
+		mutex_unlock(&cfg->if_sync);
+		return ret;
+	}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifdef WLAIBSS_MCHAN
+	if (cfgdev == cfg->ibss_cfgdev) {
+		ret = bcm_cfg80211_del_ibss_if(wiphy, cfgdev);
+		goto done;
+	}
+#endif /* WLAIBSS_MCHAN */
+
+	if ((index = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) {
+		WL_ERR(("Find p2p index from wdev failed\n"));
+		ret = -ENODEV;
+		goto done;
+	}
+	if ((cfg->p2p_supported) && index && (wl_cfgp2p_find_type(cfg, index, &type) == BCME_OK)) {
+		/* Handle P2P Interace del */
+		memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet, ETHER_ADDR_LEN);
+
+		/* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases
+		 */
+		WL_DBG(("P2P: GO_NEG_PHASE status cleared "));
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+		if (wl_cfgp2p_vif_created(cfg)) {
+			if (wl_get_drv_status(cfg, SCANNING, dev)) {
+				wl_notify_escan_complete(cfg, dev, true, true);
+			}
+			/* Delete pm_enable_work */
+			wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+
+			/* for GC */
+			if (wl_get_drv_status(cfg, DISCONNECTING, dev) &&
+				(wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)) {
+				WL_ERR(("Wait for Link Down event for GC !\n"));
+				wait_for_completion_timeout
+					(&cfg->iface_disable, msecs_to_jiffies(500));
+			}
+
+			memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+			wl_set_p2p_status(cfg, IF_DELETING);
+			DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
+
+			/* for GO */
+			if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+				cfg->p2p->p2p_go_count--;
+				/* disable interface before bsscfg free */
+				ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac);
+				/* if fw doesn't support "ifdis",
+				   do not wait for link down of ap mode
+				 */
+				if (ret == 0) {
+					WL_ERR(("Wait for Link Down event for GO !!!\n"));
+					wait_for_completion_timeout(&cfg->iface_disable,
+						msecs_to_jiffies(500));
+				} else if (ret != BCME_UNSUPPORTED) {
+					msleep(300);
+				}
+			}
+			wl_cfg80211_clear_per_bss_ies(cfg, index);
+
+			if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)
+				wldev_iovar_setint(dev, "buf_key_b4_m4", 0);
+			memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet,
+			ETHER_ADDR_LEN);
+			CFGP2P_INFO(("primary idx %d : cfg p2p_ifdis "MACDBG"\n",
+			       dev->ifindex, MAC2STRDBG(p2p_mac.octet)));
+
+			/* delete interface after link down */
+			ret = wl_cfgp2p_ifdel(cfg, &p2p_mac);
+#if defined(DHD_HANG_SEND_UP_TEST)
+			if (ret != BCME_OK ||
+				dhd->req_hang_type == HANG_REASON_IFACE_OP_FAILURE)
+#else /* DHD_HANG_SEND_UP_TEST */
+			if (ret != BCME_OK)
+#endif /* DHD_HANG_SEND_UP_TEST */
+			{
+				struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+				dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+				WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n",
+					ret, ndev->name));
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+				if (dhd->memdump_enabled) {
+					/* Load the dongle side dump to host
+					 * memory and then BUG_ON()
+					 */
+					dhd->memdump_type = DUMP_TYPE_HANG_ON_IFACE_OP_FAIL;
+					dhd_bus_mem_dump(dhd);
+				}
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+				dhd->hang_reason = HANG_REASON_IFACE_OP_FAILURE;
+				net_os_send_hang_message(ndev);
+			} else {
+				/* Wait for IF_DEL operation to be finished */
+				timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+					((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
+					(cfg->if_event_info.valid)),
+					msecs_to_jiffies(MAX_WAIT_TIME));
+				if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+					cfg->if_event_info.valid) {
+
+					WL_DBG(("IFDEL operation done\n"));
+					wl_cfg80211_handle_ifdel(cfg, &cfg->if_event_info, dev);
+				} else {
+					WL_ERR(("IFDEL didn't complete properly\n"));
+				}
+#ifdef SUPPORT_SET_CAC
+				wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+			}
+
+			ret = dhd_del_monitor(dev);
+			if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+				DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL((dhd_pub_t *)(cfg->pub));
+			}
+		}
+	} else if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) ||
+		(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION)) {
+#ifdef WL_VIRTUAL_APSTA
+		ret = wl_cfg80211_del_iface(wiphy, cfgdev);
+#else
+		WL_ERR(("Virtual APSTA not supported!\n"));
+#endif /* WL_VIRTUAL_APSTA */
+	}
+
+done:
+	mutex_unlock(&cfg->if_sync);
+#ifdef WLTDLS
+	if (ret == BCME_OK) {
+		/* If interface del is success, try enabling back TDLS */
+		wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
+	}
+#endif /* WLTDLS */
+	return ret;
+}
+
+static s32
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+	enum nl80211_iftype type,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+	u32 *flags,
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+	struct vif_params *params)
+{
+	s32 ap = 0;
+	s32 infra_ibss = 1;
+	s32 wlif_type;
+	s32 mode = 0;
+	s32 err = BCME_OK;
+	s32 index;
+	s32 conn_idx = -1;
+	chanspec_t chspec;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+	mutex_lock(&cfg->if_sync);
+	WL_DBG(("Enter type %d\n", type));
+	switch (type) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+#ifndef WLMESH
+	case NL80211_IFTYPE_MESH_POINT:
+#endif /* WLMESH */
+		ap = 1;
+		WL_ERR(("type (%d) : currently we do not support this type\n",
+			type));
+		break;
+#ifdef WLMESH
+	case NL80211_IFTYPE_MESH_POINT:
+		infra_ibss = WL_BSSTYPE_MESH;
+		break;
+#endif /* WLMESH */
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		infra_ibss = 0;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+			s32 bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+			if (bssidx < 0) {
+				/* validate bssidx */
+				WL_ERR(("Wrong bssidx! \n"));
+				err = -EINVAL;
+				goto error;
+			}
+			WL_DBG(("del interface. bssidx:%d", bssidx));
+			/* Downgrade role from AP to STA */
+			if ((err = wl_cfg80211_add_del_bss(cfg, ndev,
+					bssidx, NL80211_IFTYPE_STATION, 0, NULL)) < 0) {
+				WL_ERR(("AP-STA Downgrade failed \n"));
+				err = -EINVAL;
+				goto error;
+			}
+		}
+		mode = WL_MODE_BSS;
+		break;
+	case NL80211_IFTYPE_AP:
+		dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
+		/* intentional fall through */
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_P2P_GO:
+		mode = WL_MODE_AP;
+		ap = 1;
+		break;
+	default:
+		err = -EINVAL;
+		goto error;
+	}
+
+	if (!dhd) {
+		err = -EINVAL;
+		goto error;
+	}
+	if (ap) {
+		wl_set_mode_by_netdev(cfg, ndev, mode);
+		if (is_p2p_group_iface(ndev->ieee80211_ptr) &&
+			cfg->p2p && wl_cfgp2p_vif_created(cfg)) {
+			WL_DBG(("p2p_vif_created p2p_on (%d)\n", p2p_on(cfg)));
+			wl_notify_escan_complete(cfg, ndev, true, true);
+
+			/* Dual p2p doesn't support multiple P2PGO interfaces,
+			 * p2p_go_count is the counter for GO creation
+			 * requests.
+			 */
+			if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
+				wl_set_mode_by_netdev(cfg, ndev, WL_MODE_BSS);
+				WL_ERR(("Fw does not support multiple GO\n"));
+				err = BCME_ERROR;
+				goto error;
+			}
+			/* In concurrency case, STA may be already associated in a particular
+			 * channel. so retrieve the current channel of primary interface and
+			 * then start the virtual interface on that.
+			 */
+			chspec = wl_cfg80211_get_shared_freq(wiphy);
+			index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+			if (index < 0) {
+				WL_ERR(("Find p2p index from ndev(%p) failed\n", ndev));
+				err = BCME_ERROR;
+				goto error;
+			}
+			if (wl_cfgp2p_find_type(cfg, index, &conn_idx) != BCME_OK) {
+				err = BCME_ERROR;
+				goto error;
+			}
+
+			wlif_type = WL_P2P_IF_GO;
+			printf("%s: %s  ap (%d), infra_ibss (%d), iftype (%d) conn_idx (%d)\n",
+				__FUNCTION__, ndev->name, ap, infra_ibss, type, conn_idx);
+			wl_set_p2p_status(cfg, IF_CHANGING);
+			wl_clr_p2p_status(cfg, IF_CHANGED);
+			wl_cfgp2p_ifchange(cfg, wl_to_p2p_bss_macaddr(cfg, conn_idx),
+				htod32(wlif_type), chspec, conn_idx);
+			wait_event_interruptible_timeout(cfg->netif_change_event,
+				(wl_get_p2p_status(cfg, IF_CHANGED) == true),
+				msecs_to_jiffies(MAX_WAIT_TIME));
+			wl_set_mode_by_netdev(cfg, ndev, mode);
+			dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE;
+			dhd->op_mode |= DHD_FLAG_P2P_GO_MODE;
+			wl_clr_p2p_status(cfg, IF_CHANGING);
+			wl_clr_p2p_status(cfg, IF_CHANGED);
+			if (mode == WL_MODE_AP)
+				wl_set_drv_status(cfg, CONNECTED, ndev);
+#ifdef SUPPORT_AP_POWERSAVE
+			dhd_set_ap_powersave(dhd, 0, TRUE);
+#endif /* SUPPORT_AP_POWERSAVE */
+		} else if ((type == NL80211_IFTYPE_AP) &&
+			!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+#if 0
+			err = wl_cfg80211_set_ap_role(cfg, ndev);
+			if (unlikely(err)) {
+				WL_ERR(("set ap role failed!\n"));
+				goto error;
+			}
+#else
+			wl_set_drv_status(cfg, AP_CREATING, ndev);
+#endif
+		} else {
+			WL_ERR(("Cannot change the interface for GO or SOFTAP\n"));
+			err = -EINVAL;
+			goto error;
+		}
+	} else {
+		/* P2P GO interface deletion is handled on the basis of role type (AP).
+		 * So avoid changing role for p2p type.
+		 */
+		if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+			wl_set_mode_by_netdev(cfg, ndev, mode);
+		WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA\n"));
+#ifdef SUPPORT_AP_POWERSAVE
+		dhd_set_ap_powersave(dhd, 0, FALSE);
+#endif /* SUPPORT_AP_POWERSAVE */
+	}
+
+	if (!infra_ibss) {
+		err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra_ibss, sizeof(s32));
+		if (err < 0) {
+			WL_ERR(("SET INFRA/IBSS  error %d\n", err));
+			err = -EINVAL;
+			goto error;
+		}
+	}
+
+	WL_DBG(("Setting iftype to %d \n", type));
+	ndev->ieee80211_ptr->iftype = type;
+error:
+	mutex_unlock(&cfg->if_sync);
+	return err;
+}
+
+s32
+wl_cfg80211_notify_ifadd(struct net_device *dev, int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	bool ifadd_expected = FALSE;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	/* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
+	 * redirect the IF_ADD event to ifchange as it is not a real "new" interface
+	 */
+	if (wl_get_p2p_status(cfg, IF_CHANGING))
+		return wl_cfg80211_notify_ifchange(dev, ifidx, name, mac, bssidx);
+
+	/* Okay, we are expecting IF_ADD (as IF_ADDING is true) */
+	if (wl_get_p2p_status(cfg, IF_ADDING)) {
+		ifadd_expected = TRUE;
+		wl_clr_p2p_status(cfg, IF_ADDING);
+	} else if (cfg->bss_pending_op) {
+		ifadd_expected = TRUE;
+		cfg->bss_pending_op = FALSE;
+	}
+
+	if (ifadd_expected) {
+		wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+		if_event_info->valid = TRUE;
+		if_event_info->ifidx = ifidx;
+		if_event_info->bssidx = bssidx;
+		strncpy(if_event_info->name, name, IFNAMSIZ);
+		if_event_info->name[IFNAMSIZ] = '\0';
+		if (mac)
+			memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifdel(struct net_device *dev, int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	bool ifdel_expected = FALSE;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+	if (wl_get_p2p_status(cfg, IF_DELETING)) {
+		ifdel_expected = TRUE;
+		wl_clr_p2p_status(cfg, IF_DELETING);
+	} else if (cfg->bss_pending_op) {
+		ifdel_expected = TRUE;
+		cfg->bss_pending_op = FALSE;
+	}
+
+	if (ifdel_expected) {
+		if_event_info->valid = TRUE;
+		if_event_info->ifidx = ifidx;
+		if_event_info->bssidx = bssidx;
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifchange(struct net_device * dev, int ifidx, char *name, uint8 *mac,
+	uint8 bssidx)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	if (wl_get_p2p_status(cfg, IF_CHANGING)) {
+		wl_set_p2p_status(cfg, IF_CHANGED);
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+	struct net_device* ndev)
+{
+	s32 type = -1;
+	s32 bssidx = -1;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	bool enabled;
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+
+	bssidx = if_event_info->bssidx;
+	if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) &&
+		bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2)) {
+		WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx));
+		return BCME_ERROR;
+	}
+
+	if (p2p_is_on(cfg) && wl_cfgp2p_vif_created(cfg)) {
+		if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) {
+			/* Abort any pending scan requests */
+			cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+			WL_DBG(("ESCAN COMPLETED\n"));
+			wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, false);
+		}
+
+		memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+		if (wl_cfgp2p_find_type(cfg, bssidx, &type) == BCME_OK) {
+			/* Update P2P data */
+			wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type));
+			wl_to_p2p_bss_ndev(cfg, type) = NULL;
+			wl_to_p2p_bss_bssidx(cfg, type) = -1;
+		} else if (wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr) < 0) {
+			WL_ERR(("bssidx not known for the given ndev as per net_info data \n"));
+			return BCME_ERROR;
+		}
+
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+		dhd_wlfc_get_enable(dhd, &enabled);
+		if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+			dhd->op_mode != DHD_FLAG_IBSS_MODE && dhd->conf->disable_proptx!=0) {
+			dhd_wlfc_deinit(dhd);
+			cfg->wlfc_on = false;
+		}
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+
+	dhd_net_if_lock(ndev);
+	wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev, FALSE);
+	dhd_net_if_unlock(ndev);
+
+	return BCME_OK;
+}
+
+/* Find listen channel */
+static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
+	const u8 *ie, u32 ie_len)
+{
+	wifi_p2p_ie_t *p2p_ie;
+	u8 *end, *pos;
+	s32 listen_channel;
+
+/* unfortunately const cast required here - function is
+ * a callback so its signature must not be changed
+ * and cascade of changing wl_cfgp2p_find_p2pie
+ * causes need for const cast in other places
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+	pos = (u8 *)ie;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
+
+	if (p2p_ie == NULL)
+		return 0;
+
+	pos = p2p_ie->subelts;
+	end = p2p_ie->subelts + (p2p_ie->len - 4);
+
+	CFGP2P_DBG((" found p2p ie ! lenth %d \n",
+		p2p_ie->len));
+
+	while (pos < end) {
+		uint16 attr_len;
+		if (pos + 2 >= end) {
+			CFGP2P_DBG((" -- Invalid P2P attribute"));
+			return 0;
+		}
+		attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
+
+		if (pos + 3 + attr_len > end) {
+			CFGP2P_DBG(("P2P: Attribute underflow "
+				   "(len=%u left=%d)",
+				   attr_len, (int) (end - pos - 3)));
+			return 0;
+		}
+
+		/* if Listen Channel att id is 6 and the vailue is valid,
+		 * return the listen channel
+		 */
+		if (pos[0] == 6) {
+			/* listen channel subel length format
+			 * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
+			 */
+			listen_channel = pos[1 + 2 + 3 + 1];
+
+			if (listen_channel == SOCIAL_CHAN_1 ||
+				listen_channel == SOCIAL_CHAN_2 ||
+				listen_channel == SOCIAL_CHAN_3) {
+				CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
+				return listen_channel;
+			}
+		}
+		pos += 3 + attr_len;
+	}
+	return 0;
+}
+
+static void wl_scan_prep(struct bcm_cfg80211 *cfg, struct wl_scan_params *params,
+	struct cfg80211_scan_request *request)
+{
+	u32 n_ssids;
+	u32 n_channels;
+	u16 channel;
+	chanspec_t chanspec;
+	s32 i = 0, j = 0, offset;
+	char *ptr;
+	wlc_ssid_t ssid;
+	struct wireless_dev *wdev;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+	memset(&params->ssid, 0, sizeof(wlc_ssid_t));
+
+	WL_SCAN(("Preparing Scan request\n"));
+	WL_SCAN(("nprobes=%d\n", params->nprobes));
+	WL_SCAN(("active_time=%d\n", params->active_time));
+	WL_SCAN(("passive_time=%d\n", params->passive_time));
+	WL_SCAN(("home_time=%d\n", params->home_time));
+	WL_SCAN(("scan_type=%d\n", params->scan_type));
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+
+	/* if request is null just exit so it will be all channel broadcast scan */
+	if (!request)
+		return;
+
+	n_ssids = request->n_ssids;
+	n_channels = request->n_channels;
+
+	/* Copy channel array if applicable */
+	WL_SCAN(("### List of channelspecs to scan ###\n"));
+	if (n_channels > 0) {
+		for (i = 0; i < n_channels; i++) {
+			channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+			/* SKIP DFS channels for Secondary interface */
+			if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
+				(request->channels[i]->flags &
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+				(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN)))
+#else
+				(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)))
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
+				continue;
+			if (!dhd_conf_match_channel(cfg->pub, channel))
+				continue;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+			wdev = request->wdev;
+#else
+			wdev = request->dev->ieee80211_ptr;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+			chanspec = wl_cfg80211_ulb_get_min_bw_chspec(cfg, wdev, -1);
+			if (chanspec == INVCHANSPEC) {
+				WL_ERR(("Invalid chanspec! Skipping channel\n"));
+				continue;
+			}
+
+			if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
+				chanspec |= WL_CHANSPEC_BAND_2G;
+			} else {
+				chanspec |= WL_CHANSPEC_BAND_5G;
+			}
+			params->channel_list[j] = channel;
+			params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
+			params->channel_list[j] |= chanspec;
+			WL_SCAN(("Chan : %d, Channel spec: %x \n",
+				channel, params->channel_list[j]));
+			params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]);
+			j++;
+		}
+	} else {
+		WL_SCAN(("Scanning all channels\n"));
+	}
+	n_channels = j;
+	/* Copy ssid array if applicable */
+	WL_SCAN(("### List of SSIDs to scan ###\n"));
+	if (n_ssids > 0) {
+		offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		ptr = (char*)params + offset;
+		for (i = 0; i < n_ssids; i++) {
+			memset(&ssid, 0, sizeof(wlc_ssid_t));
+			ssid.SSID_len = MIN(request->ssids[i].ssid_len, DOT11_MAX_SSID_LEN);
+			memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
+			if (!ssid.SSID_len)
+				WL_SCAN(("%d: Broadcast scan\n", i));
+			else
+				WL_SCAN(("%d: scan  for  %s size =%d\n", i,
+				ssid.SSID, ssid.SSID_len));
+			memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
+			ptr += sizeof(wlc_ssid_t);
+		}
+	} else {
+		WL_SCAN(("Broadcast scan\n"));
+	}
+	/* Adding mask to channel numbers */
+	params->channel_num =
+	        htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	               (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+
+	if (n_channels == 1) {
+		params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+		params->nprobes = htod32(params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+	}
+}
+
+static s32
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
+{
+	wl_uint32_list_t *list;
+	s32 err = BCME_OK;
+	if (valid_chan_list == NULL || size <= 0)
+		return -ENOMEM;
+
+	memset(valid_chan_list, 0, size);
+	list = (wl_uint32_list_t *)(void *) valid_chan_list;
+	list->count = htod32(WL_NUMCHANNELS);
+	err = wldev_ioctl_get(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size);
+	if (err != 0) {
+		WL_ERR(("get channels failed with %d\n", err));
+	}
+
+	return err;
+}
+
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
+bool g_first_broadcast_scan = TRUE;
+#endif 
+
+static s32
+wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct cfg80211_scan_request *request, uint16 action)
+{
+	s32 err = BCME_OK;
+	u32 n_channels;
+	u32 n_ssids;
+	s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+	wl_escan_params_t *params = NULL;
+	u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+	u32 num_chans = 0;
+	s32 channel;
+	u32 n_valid_chan;
+	s32 search_state = WL_P2P_DISC_ST_SCAN;
+	u32 i, j, n_nodfs = 0;
+	u16 *default_chan_list = NULL;
+	wl_uint32_list_t *list;
+	s32 bssidx = -1;
+	struct net_device *dev = NULL;
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+	bool is_first_init_2g_scan = false;
+#endif 
+	p2p_scan_purpose_t	p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
+	scb_val_t scbval;
+	static int cnt = 0;
+
+	WL_DBG(("Enter \n"));
+
+	/* scan request can come with empty request : perform all default scan */
+	if (!cfg) {
+		err = -EINVAL;
+		goto exit;
+	}
+	if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+		/* LEGACY SCAN TRIGGER */
+		WL_SCAN((" LEGACY E-SCAN START\n"));
+
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+		if (!request) {
+			err = -EINVAL;
+			goto exit;
+		}
+		if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
+			is_first_init_2g_scan = true;
+			g_first_broadcast_scan = false;
+		}
+#endif 
+
+		/* if scan request is not empty parse scan request paramters */
+		if (request != NULL) {
+			n_channels = request->n_channels;
+			n_ssids = request->n_ssids;
+			if (n_channels % 2)
+				/* If n_channels is odd, add a padd of u16 */
+				params_size += sizeof(u16) * (n_channels + 1);
+			else
+				params_size += sizeof(u16) * n_channels;
+
+			/* Allocate space for populating ssids in wl_escan_params_t struct */
+			params_size += sizeof(struct wlc_ssid) * n_ssids;
+		}
+		params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+		if (params == NULL) {
+			err = -ENOMEM;
+			goto exit;
+		}
+		wl_scan_prep(cfg, &params->params, request);
+
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+		/* Override active_time to reduce scan time if it's first bradcast scan. */
+		if (is_first_init_2g_scan)
+			params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+#endif 
+
+		params->version = htod32(ESCAN_REQ_VERSION);
+		params->action =  htod16(action);
+		wl_escan_set_sync_id(params->sync_id, cfg);
+		wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
+		if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+			WL_ERR(("ioctl buffer length not sufficient\n"));
+			kfree(params);
+			err = -ENOMEM;
+			goto exit;
+		}
+		if (cfg->active_scan == PASSIVE_SCAN) {
+			params->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+			WL_DBG(("Passive scan_type %d \n", params->params.scan_type));
+		}
+
+		bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+
+		err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+			cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		printf("%s: LEGACY_SCAN sync ID: %d, bssidx: %d\n", __FUNCTION__, params->sync_id, bssidx);
+		if (unlikely(err)) {
+			if (err == BCME_EPERM)
+				/* Scan Not permitted at this point of time */
+				WL_DBG((" Escan not permitted at this time (%d)\n", err));
+			else
+				WL_ERR((" Escan set error (%d)\n", err));
+		} else {
+			DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_REQUESTED);
+		}
+		kfree(params);
+	}
+	else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+		/* P2P SCAN TRIGGER */
+		s32 _freq = 0;
+		n_nodfs = 0;
+		if (request && request->n_channels) {
+			num_chans = request->n_channels;
+			WL_SCAN((" chann number : %d\n", num_chans));
+			default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list),
+				GFP_KERNEL);
+			if (default_chan_list == NULL) {
+				WL_ERR(("channel list allocation failed \n"));
+				err = -ENOMEM;
+				goto exit;
+			}
+			if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+#ifdef P2P_SKIP_DFS
+				int is_printed = false;
+#endif /* P2P_SKIP_DFS */
+				list = (wl_uint32_list_t *) chan_buf;
+				n_valid_chan = dtoh32(list->count);
+				if (n_valid_chan > WL_NUMCHANNELS) {
+					WL_ERR(("wrong n_valid_chan:%d\n", n_valid_chan));
+					kfree(default_chan_list);
+					err = -EINVAL;
+					goto exit;
+				}
+
+				for (i = 0; i < num_chans; i++)
+				{
+					_freq = request->channels[i]->center_freq;
+					channel = ieee80211_frequency_to_channel(_freq);
+
+					/* ignore DFS channels */
+					if (request->channels[i]->flags &
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+						(IEEE80211_CHAN_NO_IR
+						| IEEE80211_CHAN_RADAR))
+#else
+						(IEEE80211_CHAN_RADAR
+						| IEEE80211_CHAN_PASSIVE_SCAN))
+#endif
+						continue;
+#ifdef P2P_SKIP_DFS
+					if (channel >= 52 && channel <= 144) {
+						if (is_printed == false) {
+							WL_ERR(("SKIP DFS CHANs(52~144)\n"));
+							is_printed = true;
+						}
+						continue;
+					}
+#endif /* P2P_SKIP_DFS */
+
+					for (j = 0; j < n_valid_chan; j++) {
+						/* allows only supported channel on
+						*  current reguatory
+						*/
+						if (n_nodfs >= num_chans) {
+							break;
+						}
+						if (channel == (dtoh32(list->element[j]))) {
+							default_chan_list[n_nodfs++] =
+								channel;
+						}
+					}
+
+				}
+			}
+			if (num_chans == SOCIAL_CHAN_CNT && (
+						(default_chan_list[0] == SOCIAL_CHAN_1) &&
+						(default_chan_list[1] == SOCIAL_CHAN_2) &&
+						(default_chan_list[2] == SOCIAL_CHAN_3))) {
+				/* SOCIAL CHANNELS 1, 6, 11 */
+				search_state = WL_P2P_DISC_ST_SEARCH;
+				p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+				WL_INFORM(("P2P SEARCH PHASE START \n"));
+			} else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) &&
+				(wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) ||
+				((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) &&
+				(wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) {
+				/* If you are already a GO, then do SEARCH only */
+				WL_INFORM(("Already a GO. Do SEARCH Only"));
+				search_state = WL_P2P_DISC_ST_SEARCH;
+				num_chans = n_nodfs;
+				p2p_scan_purpose = P2P_SCAN_NORMAL;
+
+			} else if (num_chans == 1) {
+				p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
+			} else if (num_chans == SOCIAL_CHAN_CNT + 1) {
+			/* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
+			 * the supplicant
+			 */
+				p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+			} else {
+				WL_INFORM(("P2P SCAN STATE START \n"));
+				num_chans = n_nodfs;
+				p2p_scan_purpose = P2P_SCAN_NORMAL;
+			}
+		} else {
+			err = -EINVAL;
+			goto exit;
+		}
+		err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list,
+			search_state, action,
+			wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
+			p2p_scan_purpose);
+
+		if (!err)
+			cfg->p2p->search_state = search_state;
+
+		kfree(default_chan_list);
+	}
+exit:
+	if (unlikely(err)) {
+		int suppressed = 0;
+		wldev_ioctl(dev, WLC_GET_SCANSUPPRESS, &suppressed, sizeof(int), false);
+		/* Don't print Error incase of Scan suppress */
+		if ((err == BCME_EPERM) && (cfg->scan_suppressed || suppressed)) {
+			cnt = 0;
+			WL_DBG(("Escan failed: Scan Suppressed \n"));
+		} else {
+			cnt++;
+			WL_ERR(("error (%d), cnt=%d\n", err, cnt));
+			// terence 20140111: send disassoc to firmware
+			if (cnt >= 4) {
+				dev = bcmcfg_to_prmry_ndev(cfg);
+				memset(&scbval, 0, sizeof(scb_val_t));
+				wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
+				WL_ERR(("Send disassoc to break the busy dev=%p\n", dev));
+				cnt = 0;
+			}
+		}
+	} else {
+		cnt = 0;
+	}
+	return err;
+}
+
+
+static s32
+wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request)
+{
+	s32 err = BCME_OK;
+	s32 passive_scan = 0;
+	s32 passive_scan_time = 0;
+	s32 passive_scan_time_org = 0;
+	wl_scan_results_t *results;
+	WL_SCAN(("Enter \n"));
+
+	results = wl_escan_get_buf(cfg, FALSE);
+	results->version = 0;
+	results->count = 0;
+	results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+	cfg->escan_info.ndev = ndev;
+	cfg->escan_info.wiphy = wiphy;
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+	passive_scan = cfg->active_scan ? 0 : 1;
+	err = wldev_ioctl_set(ndev, WLC_SET_PASSIVE_SCAN,
+	                      &passive_scan, sizeof(passive_scan));
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		goto exit;
+	}
+
+	if (passive_channel_skip) {
+
+		err = wldev_ioctl_get(ndev, WLC_GET_SCAN_PASSIVE_TIME,
+			&passive_scan_time_org, sizeof(passive_scan_time_org));
+		if (unlikely(err)) {
+			WL_ERR(("== error (%d)\n", err));
+			goto exit;
+		}
+
+		WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org));
+
+		passive_scan_time = 0;
+		err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+			&passive_scan_time, sizeof(passive_scan_time));
+		if (unlikely(err)) {
+			WL_ERR(("== error (%d)\n", err));
+			goto exit;
+		}
+
+		WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n",
+			passive_channel_skip));
+	}
+
+	err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
+
+	if (passive_channel_skip) {
+		err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+			&passive_scan_time_org, sizeof(passive_scan_time_org));
+		if (unlikely(err)) {
+			WL_ERR(("== error (%d)\n", err));
+			goto exit;
+		}
+
+		WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n",
+			passive_scan_time_org));
+	}
+
+exit:
+	return err;
+}
+
+static s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_ssid *ssids;
+	struct ether_addr primary_mac;
+	bool p2p_ssid;
+#ifdef WL11U
+	bcm_tlv_t *interworking_ie;
+#endif
+	s32 err = 0;
+	s32 bssidx = -1;
+	s32 i;
+
+	unsigned long flags;
+	static s32 busy_count = 0;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	struct net_device *remain_on_channel_ndev = NULL;
+#endif
+	uint scan_timer_interval_ms = WL_SCAN_TIMER_INTERVAL_MS;
+
+	/*
+	 * Hostapd triggers scan before starting automatic channel selection
+	 * to collect channel characteristics. However firmware scan engine
+	 * doesn't support any channel characteristics collection along with
+	 * scan. Hence return scan success.
+	 */
+	if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) {
+		WL_INFORM(("Scan Command on SoftAP Interface. Ignoring...\n"));
+// terence 20161023: let it scan in SoftAP mode
+//		return 0;
+	}
+
+	ndev = ndev_to_wlc_ndev(ndev, cfg);
+
+	if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+		WL_ERR(("Sending Action Frames. Try it again.\n"));
+		return -EAGAIN;
+	}
+
+	WL_DBG(("Enter wiphy (%p)\n", wiphy));
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		if (cfg->scan_request == NULL) {
+			wl_clr_drv_status_all(cfg, SCANNING);
+			WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
+		} else {
+			WL_ERR(("Scanning already\n"));
+			return -EAGAIN;
+		}
+	}
+	if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
+		WL_ERR(("Scanning being aborted\n"));
+		return -EAGAIN;
+	}
+	if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+		WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+		return -EOPNOTSUPP;
+	}
+
+#ifdef P2P_LISTEN_OFFLOADING
+	if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+		WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
+		return -EAGAIN;
+	}
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
+	if (remain_on_channel_ndev) {
+		WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
+		wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
+	}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+
+	/* Arm scan timeout timer */
+	mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(scan_timer_interval_ms));
+	if (request) {		/* scan bss */
+		ssids = request->ssids;
+		p2p_ssid = false;
+		for (i = 0; i < request->n_ssids; i++) {
+			if (ssids[i].ssid_len &&
+				IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
+				p2p_ssid = true;
+				break;
+			}
+		}
+		if (p2p_ssid) {
+			if (cfg->p2p_supported) {
+				/* p2p scan trigger */
+				if (p2p_on(cfg) == false) {
+					/* p2p on at the first time */
+					p2p_on(cfg) = true;
+					wl_cfgp2p_set_firm_p2p(cfg);
+					get_primary_mac(cfg, &primary_mac);
+					wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#if defined(P2P_IE_MISSING_FIX)
+					cfg->p2p_prb_noti = false;
+#endif
+				}
+				wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+				WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+				p2p_scan(cfg) = true;
+			}
+		} else {
+			/* legacy scan trigger
+			 * So, we have to disable p2p discovery if p2p discovery is on
+			 */
+			if (cfg->p2p_supported) {
+				p2p_scan(cfg) = false;
+				/* If Netdevice is not equals to primary and p2p is on
+				*  , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+				*/
+
+				if (p2p_scan(cfg) == false) {
+					if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+						err = wl_cfgp2p_discover_enable_search(cfg,
+						false);
+						if (unlikely(err)) {
+							goto scan_out;
+						}
+
+					}
+				}
+			}
+			if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+				if ((bssidx = wl_get_bssidx_by_wdev(cfg,
+					ndev->ieee80211_ptr)) < 0) {
+					WL_ERR(("Find p2p index from ndev(%p) failed\n",
+						ndev));
+					err = BCME_ERROR;
+					goto scan_out;
+				}
+#ifdef WL11U
+				if (request && (interworking_ie =
+						wl_cfg80211_find_interworking_ie(
+						request->ie, request->ie_len)) != NULL) {
+					if ((err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+							VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+							interworking_ie->data,
+							interworking_ie->len)) != BCME_OK) {
+						WL_ERR(("Failed to add interworking IE"));
+					}
+				} else if (cfg->wl11u) {
+					/* we have to clear IW IE and disable gratuitous APR */
+					wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx);
+					err = wldev_iovar_setint_bsscfg(ndev, "grat_arp",
+					                                0, bssidx);
+					/* we don't care about error here
+					 * because the only failure case is unsupported,
+					 * which is fine
+					 */
+					if (unlikely(err)) {
+						WL_ERR(("Set grat_arp failed:(%d) Ignore!\n", err));
+					}
+					cfg->wl11u = FALSE;
+				}
+#endif /* WL11U */
+				if (request) {
+					err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+							ndev_to_cfgdev(ndev),
+							bssidx, VNDR_IE_PRBREQ_FLAG, request->ie,
+							request->ie_len);
+				}
+
+				if (unlikely(err)) {
+// terence 20161023: let it scan in SoftAP mode
+//					goto scan_out;
+				}
+
+			}
+		}
+	} else {		/* scan in ibss */
+		ssids = this_ssid;
+	}
+
+	if (request && cfg->p2p_supported) {
+		WL_TRACE_HW4(("START SCAN\n"));
+		DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub),
+			SCAN_WAKE_LOCK_TIMEOUT);
+		DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+	}
+
+	if (cfg->p2p_supported) {
+		if (request && p2p_on(cfg) && p2p_scan(cfg)) {
+
+			/* find my listen channel */
+			cfg->afx_hdl->my_listen_chan =
+				wl_find_listen_channel(cfg, request->ie,
+				request->ie_len);
+			err = wl_cfgp2p_enable_discovery(cfg, ndev,
+			request->ie, request->ie_len);
+
+			if (unlikely(err)) {
+				goto scan_out;
+			}
+		}
+	}
+	err = wl_do_escan(cfg, wiphy, ndev, request);
+	if (likely(!err))
+		goto scan_success;
+	else
+		goto scan_out;
+
+scan_success:
+	busy_count = 0;
+	cfg->scan_request = request;
+	wl_set_drv_status(cfg, SCANNING, ndev);
+
+	return 0;
+
+scan_out:
+	if (err == BCME_BUSY || err == BCME_NOTREADY) {
+		WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
+		err = -EBUSY;
+	} else if ((err == BCME_EPERM) && cfg->scan_suppressed) {
+		WL_ERR(("Scan not permitted due to scan suppress\n"));
+		err = -EPERM;
+	} else {
+		/* For all other fw errors, use a generic error code as return
+		 * value to cfg80211 stack
+		 */
+		err = -EAGAIN;
+	}
+
+#define SCAN_EBUSY_RETRY_LIMIT 20
+	if (err == -EBUSY) {
+		if (busy_count++ > SCAN_EBUSY_RETRY_LIMIT) {
+			struct ether_addr bssid;
+			s32 ret = 0;
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+			dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+			busy_count = 0;
+			WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
+				wl_get_drv_status(cfg, SCANNING, ndev),
+				wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
+				wl_get_drv_status(cfg, CONNECTING, ndev),
+				wl_get_drv_status(cfg, CONNECTED, ndev),
+				wl_get_drv_status(cfg, DISCONNECTING, ndev),
+				wl_get_drv_status(cfg, AP_CREATING, ndev),
+				wl_get_drv_status(cfg, AP_CREATED, ndev),
+				wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
+				wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
+
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+			if (dhdp->memdump_enabled) {
+				dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY;
+				dhd_bus_mem_dump(dhdp);
+			}
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+
+			bzero(&bssid, sizeof(bssid));
+			if ((ret = wldev_ioctl_get(ndev, WLC_GET_BSSID,
+				&bssid, ETHER_ADDR_LEN)) == 0)
+				WL_ERR(("FW is connected with " MACDBG "/n",
+					MAC2STRDBG(bssid.octet)));
+			else
+				WL_ERR(("GET BSSID failed with %d\n", ret));
+
+			wl_cfg80211_scan_abort(cfg);
+
+		} else {
+			/* Hold the context for 400msec, so that 10 subsequent scans
+			* can give a buffer of 4sec which is enough to
+			* cover any on-going scan in the firmware
+			*/
+			WL_DBG(("Enforcing delay for EBUSY case \n"));
+			msleep(400);
+		}
+	} else {
+		busy_count = 0;
+	}
+
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+	DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	cfg->scan_request = NULL;
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	return err;
+}
+
+static s32
+#if defined(WL_CFG80211_P2P_DEV_IF)
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+#else
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	WL_DBG(("Enter\n"));
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+#ifdef DHD_IFDEBUG
+#ifdef WL_CFG80211_P2P_DEV_IF
+	PRINT_WDEV_INFO(request->wdev);
+#else
+	PRINT_WDEV_INFO(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* DHD_IFDEBUG */
+
+	if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+		if (wl_cfg_multip2p_operational(cfg)) {
+			WL_ERR(("wlan0 scan failed, p2p devices are operational"));
+			 return -ENODEV;
+		}
+	}
+	err = wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY,
+		WL_EXT_STATUS_SCAN, NULL);
+	if (err)
+		return err;
+
+	mutex_lock(&cfg->usr_sync);
+	err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("scan error (%d)\n", err));
+	}
+	mutex_unlock(&cfg->usr_sync);
+#ifdef WL_DRV_AVOID_SCANCACHE
+	/* Reset roam cache after successful scan request */
+#endif /* WL_DRV_AVOID_SCANCACHE */
+	return err;
+}
+
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+{
+	s32 err = 0;
+
+	err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+	s32 err = 0;
+
+	err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+	s32 err = 0;
+	u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+
+	retry = htod32(retry);
+	err = wldev_ioctl_set(dev, cmd, &retry, sizeof(retry));
+	if (unlikely(err)) {
+		WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_DBG(("Enter\n"));
+	if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+		(cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+		cfg->conf->rts_threshold = wiphy->rts_threshold;
+		err = wl_set_rts(ndev, cfg->conf->rts_threshold);
+		if (err != BCME_OK)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+		(cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+		cfg->conf->frag_threshold = wiphy->frag_threshold;
+		err = wl_set_frag(ndev, cfg->conf->frag_threshold);
+		if (err != BCME_OK)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_LONG &&
+		(cfg->conf->retry_long != wiphy->retry_long)) {
+		cfg->conf->retry_long = wiphy->retry_long;
+		err = wl_set_retry(ndev, cfg->conf->retry_long, true);
+		if (err != BCME_OK)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_SHORT &&
+		(cfg->conf->retry_short != wiphy->retry_short)) {
+		cfg->conf->retry_short = wiphy->retry_short;
+		err = wl_set_retry(ndev, cfg->conf->retry_short, false);
+		if (err != BCME_OK) {
+			return err;
+		}
+	}
+
+	return err;
+}
+static chanspec_t
+channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	u8 *buf = NULL;
+	wl_uint32_list_t *list;
+	int err = BCME_OK;
+	chanspec_t c = 0, ret_c = 0;
+	int bw = 0, tmp_bw = 0;
+	int i;
+	u32 tmp_c;
+	gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#define LOCAL_BUF_SIZE	1024
+	buf = (u8 *) kzalloc(LOCAL_BUF_SIZE, kflags);
+	if (!buf) {
+		WL_ERR(("buf memory alloc failed\n"));
+		goto exit;
+	}
+
+	err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+		0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
+	if (err != BCME_OK) {
+		WL_ERR(("get chanspecs failed with %d\n", err));
+		goto exit;
+	}
+
+	list = (wl_uint32_list_t *)(void *)buf;
+	for (i = 0; i < dtoh32(list->count); i++) {
+		c = dtoh32(list->element[i]);
+		if (channel <= CH_MAX_2G_CHANNEL) {
+			if (!CHSPEC_IS20(c))
+				continue;
+			if (channel == CHSPEC_CHANNEL(c)) {
+				ret_c = c;
+				bw = 20;
+				goto exit;
+			}
+		}
+		tmp_c = wf_chspec_ctlchan(c);
+		tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT];
+		if (tmp_c != channel)
+			continue;
+
+		if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
+			bw = tmp_bw;
+			ret_c = c;
+			if (bw == bw_cap)
+				goto exit;
+		}
+	}
+exit:
+	if (buf)
+		kfree(buf);
+#undef LOCAL_BUF_SIZE
+	WL_INFORM(("return chanspec %x %d\n", ret_c, bw));
+	return ret_c;
+}
+
+void
+wl_cfg80211_ibss_vsie_set_buffer(struct net_device *dev, vndr_ie_setbuf_t *ibss_vsie,
+	int ibss_vsie_len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	if (cfg != NULL && ibss_vsie != NULL) {
+		if (cfg->ibss_vsie != NULL) {
+			kfree(cfg->ibss_vsie);
+		}
+		cfg->ibss_vsie = ibss_vsie;
+		cfg->ibss_vsie_len = ibss_vsie_len;
+	}
+}
+
+static void
+wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
+{
+	/* free & initiralize VSIE (Vendor Specific IE) */
+	if (cfg->ibss_vsie != NULL) {
+		kfree(cfg->ibss_vsie);
+		cfg->ibss_vsie = NULL;
+		cfg->ibss_vsie_len = 0;
+	}
+}
+
+s32
+wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	char *ioctl_buf = NULL;
+	s32 ret = BCME_OK, bssidx;
+
+	if (cfg != NULL && cfg->ibss_vsie != NULL) {
+		ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+		if (!ioctl_buf) {
+			WL_ERR(("ioctl memory alloc failed\n"));
+			return -ENOMEM;
+		}
+
+		if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+			WL_ERR(("Find index failed\n"));
+			ret = BCME_ERROR;
+			goto end;
+		}
+		/* change the command from "add" to "del" */
+		strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1);
+		cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+		ret = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie",
+			cfg->ibss_vsie, cfg->ibss_vsie_len,
+			ioctl_buf, WLC_IOCTL_MEDLEN, bssidx, &cfg->ioctl_buf_sync);
+		WL_ERR(("ret=%d\n", ret));
+
+		if (ret == BCME_OK) {
+			/* free & initiralize VSIE */
+			kfree(cfg->ibss_vsie);
+			cfg->ibss_vsie = NULL;
+			cfg->ibss_vsie_len = 0;
+		}
+end:
+		if (ioctl_buf) {
+			kfree(ioctl_buf);
+		}
+	}
+
+	return ret;
+}
+
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev*
+bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wireless_dev* wdev = NULL;
+	struct net_device *new_ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 timeout;
+	wl_aibss_if_t aibss_if;
+	wl_if_event_info *event = NULL;
+
+	if (cfg->ibss_cfgdev != NULL) {
+		WL_ERR(("IBSS interface %s already exists\n", name));
+		return NULL;
+	}
+
+	WL_ERR(("Try to create IBSS interface %s\n", name));
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+	/* generate a new MAC address for the IBSS interface */
+	get_primary_mac(cfg, &cfg->ibss_if_addr);
+	cfg->ibss_if_addr.octet[4] ^= 0x40;
+	memset(&aibss_if, sizeof(aibss_if), 0);
+	memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
+	aibss_if.chspec = 0;
+	aibss_if.len = sizeof(aibss_if);
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+	err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
+		sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (err) {
+		WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err));
+		goto fail;
+	}
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op)
+		goto fail;
+
+	event = &cfg->if_event_info;
+	/* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
+	 * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
+	 * and will be freed by dhd_detach unless it gets unregistered before that. The
+	 * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will
+	 * be freed by wl_dealloc_netinfo
+	 */
+	new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
+		event->mac, event->bssidx, event->name);
+	if (new_ndev == NULL)
+		goto fail;
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (wdev == NULL)
+		goto fail;
+	wdev->wiphy = wiphy;
+	wdev->iftype = NL80211_IFTYPE_ADHOC;
+	wdev->netdev = new_ndev;
+	new_ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+	/* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if
+	* needs to be modified to take one parameter (bool need_rtnl_lock)
+	 */
+	ASSERT_RTNL();
+	if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, FALSE) != BCME_OK)
+		goto fail;
+
+	wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE, event->bssidx);
+	cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
+	WL_ERR(("IBSS interface %s created\n", new_ndev->name));
+	return cfg->ibss_cfgdev;
+
+fail:
+	WL_ERR(("failed to create IBSS interface %s \n", name));
+	cfg->bss_pending_op = FALSE;
+	if (new_ndev)
+		wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
+	if (wdev)
+		kfree(wdev);
+	return NULL;
+}
+
+static s32
+bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 timeout;
+
+	if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
+		return -EINVAL;
+	ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev);
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+	err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
+		sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (err) {
+		WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err));
+		goto fail;
+	}
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op) {
+		WL_ERR(("timeout in waiting IF_DEL event\n"));
+		goto fail;
+	}
+
+	wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
+	cfg->ibss_cfgdev = NULL;
+	return 0;
+
+fail:
+	cfg->bss_pending_op = FALSE;
+	return -1;
+}
+#endif /* WLAIBSS_MCHAN */
+
+#ifdef WLMESH
+s32
+wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, s32 bsscfg_idx,
+	enum nl80211_iftype iface_type, s32 del, u8 *addr)
+{
+	wl_interface_create_t iface;
+	s32 ret;
+	wl_interface_info_t *info;
+
+	bzero(&iface, sizeof(wl_interface_create_t));
+
+	iface.ver = WL_INTERFACE_CREATE_VER;
+
+	if (iface_type == NL80211_IFTYPE_AP)
+		iface.flags = WL_INTERFACE_CREATE_AP;
+	else
+		iface.flags = WL_INTERFACE_CREATE_STA;
+
+	if (del) {
+		ret = wldev_iovar_setbuf(ndev, "interface_remove",
+			NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+	} else {
+		if (addr) {
+			memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
+			iface.flags |= WL_INTERFACE_MAC_USE;
+		}
+		ret = wldev_iovar_getbuf(ndev, "interface_create",
+			&iface, sizeof(wl_interface_create_t),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		if (ret == 0) {
+			/* success */
+			info = (wl_interface_info_t *)cfg->ioctl_buf;
+			WL_DBG(("wl interface create success!! bssidx:%d \n",
+				info->bsscfgidx));
+		}
+	}
+
+	if (ret < 0)
+		WL_ERR(("Interface %s failed!! ret %d\n",
+			del ? "remove" : "create", ret));
+
+	return ret;
+}
+#else
+s32
+wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, s32 bsscfg_idx,
+	enum nl80211_iftype iface_type, s32 del, u8 *addr)
+{
+	s32 ret;
+	struct wl_interface_create_v2 iface;
+	wl_interface_create_v3_t iface_v3;
+	struct wl_interface_info_v1 *info;
+	wl_interface_info_v2_t *info_v2;
+	enum wl_interface_type iftype;
+	uint32 ifflags;
+	bool use_iface_info_v2 = false;
+	u8 ioctl_buf[WLC_IOCTL_SMLEN];
+
+	if (del) {
+		ret = wldev_iovar_setbuf(ndev, "interface_remove",
+			NULL, 0, ioctl_buf, sizeof(ioctl_buf), NULL);
+		if (unlikely(ret))
+			WL_ERR(("Interface remove failed!! ret %d\n", ret));
+		return ret;
+	}
+
+	/* Interface create */
+	bzero(&iface, sizeof(iface));
+	/*
+	 * flags field is still used along with iftype inorder to support the old version of the
+	 * FW work with the latest app changes.
+	 */
+	if (iface_type == NL80211_IFTYPE_AP) {
+		iftype = WL_INTERFACE_TYPE_AP;
+		ifflags = WL_INTERFACE_CREATE_AP;
+	} else {
+		iftype = WL_INTERFACE_TYPE_STA;
+		ifflags = WL_INTERFACE_CREATE_STA;
+	}
+	if (addr) {
+		ifflags |= WL_INTERFACE_MAC_USE;
+	}
+
+	/* Pass ver = 0 for fetching the interface_create iovar version */
+	ret = wldev_iovar_getbuf(ndev, "interface_create",
+		&iface, sizeof(struct wl_interface_create_v2),
+		ioctl_buf, sizeof(ioctl_buf), NULL);
+	if (ret == BCME_UNSUPPORTED) {
+		WL_ERR(("interface_create iovar not supported\n"));
+		return ret;
+	} else if ((ret == 0) && *((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_3) {
+		WL_DBG(("interface_create version 3\n"));
+		use_iface_info_v2 = true;
+		bzero(&iface_v3, sizeof(wl_interface_create_v3_t));
+		iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
+		iface_v3.iftype = iftype;
+		iface_v3.flags = ifflags;
+		if (addr) {
+			memcpy(&iface_v3.mac_addr.octet, addr, ETH_ALEN);
+		}
+		ret = wldev_iovar_getbuf(ndev, "interface_create",
+			&iface_v3, sizeof(wl_interface_create_v3_t),
+			ioctl_buf, sizeof(ioctl_buf), NULL);
+	} else {
+#if 0
+		/* On any other error, attempt with iovar version 2 */
+		WL_DBG(("interface_create version 2. get_ver:%d\n", ret));
+		iface.ver = WL_INTERFACE_CREATE_VER_2;
+		iface.iftype = iftype;
+		iface.flags = ifflags;
+		if (addr) {
+			memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
+		}
+		ret = wldev_iovar_getbuf(ndev, "interface_create",
+			&iface, sizeof(struct wl_interface_create_v2),
+			ioctl_buf, sizeof(ioctl_buf), NULL);
+#endif
+	}
+
+	if (unlikely(ret)) {
+		WL_ERR(("Interface create failed!! ret %d\n", ret));
+		return ret;
+	}
+
+	/* success case */
+	if (use_iface_info_v2 == true) {
+		info_v2 = (wl_interface_info_v2_t *)ioctl_buf;
+		ret = info_v2->bsscfgidx;
+	} else {
+		/* Use v1 struct */
+		info = (struct wl_interface_info_v1 *)ioctl_buf;
+		ret = info->bsscfgidx;
+	}
+
+	WL_DBG(("wl interface create success!! bssidx:%d \n", ret));
+	return ret;
+}
+#endif
+
+bool
+wl_customer6_legacy_chip_check(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev)
+{
+	u32 chipnum;
+	wlc_rev_info_t revinfo;
+	int ret;
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = wldev_ioctl_get(ndev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+	if (ret < 0) {
+		WL_ERR(("%s: GET revinfo FAILED. ret:%d\n", __FUNCTION__, ret));
+		ASSERT(0);
+		return false;
+	}
+
+	WL_DBG(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+		dtoh32(revinfo.deviceid), dtoh32(revinfo.vendorid), dtoh32(revinfo.chipnum)));
+	chipnum = revinfo.chipnum;
+	if ((chipnum == BCM4350_CHIP_ID) || (chipnum == BCM4355_CHIP_ID) ||
+		(chipnum == BCM4345_CHIP_ID) || (chipnum == BCM43430_CHIP_ID) ||
+		(chipnum == BCM43362_CHIP_ID)) {
+		/* WAR required */
+		return true;
+	}
+
+	return false;
+}
+
+void
+wl_bss_iovar_war(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, s32 *val)
+{
+	u32 chipnum;
+	wlc_rev_info_t revinfo;
+	int ret;
+	bool need_war = false;
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = wldev_ioctl_get(ndev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+	if (ret < 0) {
+		WL_ERR(("%s: GET revinfo FAILED. ret:%d\n", __FUNCTION__, ret));
+	} else {
+		WL_DBG(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+			dtoh32(revinfo.deviceid), dtoh32(revinfo.vendorid), dtoh32(revinfo.chipnum)));
+		chipnum = revinfo.chipnum;
+		if ((chipnum == BCM4359_CHIP_ID) || (chipnum == BCM43596_CHIP_ID)) { 
+			/* WAR required */
+			need_war = true;
+		}
+	}
+
+	if (wl_customer6_legacy_chip_check(cfg, ndev) || need_war) {
+		/* Few firmware branches have issues in bss iovar handling and
+		 * that can't be changed since they are in production.
+		 */
+		if (*val == WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE) {
+			*val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+		} else if (*val == WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE) {
+			*val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+		} else {
+			/* Ignore for other bss enums */
+			return;
+		}
+		WL_ERR(("wl bss %d\n", *val));
+	}
+}
+
+s32
+wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, s32 bsscfg_idx,
+	enum nl80211_iftype iface_type, s32 del, u8 *addr)
+{
+	s32 ret = BCME_OK;
+	s32 val = 0;
+
+	struct {
+		s32 cfg;
+		s32 val;
+		struct ether_addr ea;
+	} bss_setbuf;
+
+	WL_INFORM(("iface_type:%d del:%d \n", iface_type, del));
+
+	bzero(&bss_setbuf, sizeof(bss_setbuf));
+
+	/* AP=2, STA=3, up=1, down=0, val=-1 */
+	if (del) {
+		val = WLC_AP_IOV_OP_DELETE;
+	} else if (iface_type == NL80211_IFTYPE_AP) {
+		/* Add/role change to AP Interface */
+		WL_DBG(("Adding AP Interface \n"));
+		val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+	} else if (iface_type == NL80211_IFTYPE_STATION) {
+		/* Add/role change to STA Interface */
+		WL_DBG(("Adding STA Interface \n"));
+		val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+	} else {
+		WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", iface_type));
+		return -EINVAL;
+	}
+
+	if (!del) {
+		wl_bss_iovar_war(cfg, ndev, &val);
+	}
+
+	bss_setbuf.cfg = htod32(bsscfg_idx);
+	bss_setbuf.val = htod32(val);
+
+	if (addr) {
+		memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
+	}
+
+	WL_DBG(("wl bss %d bssidx:%d iface:%s \n", val, bsscfg_idx, ndev->name));
+	ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (ret != 0)
+		WL_ERR(("'bss %d' failed with %d\n", val, ret));
+
+	return ret;
+}
+
+s32
+wl_cfg80211_bss_up(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 bss_up)
+{
+	s32 ret = BCME_OK;
+	s32 val = bss_up ? 1 : 0;
+
+	struct {
+		s32 cfg;
+		s32 val;
+	} bss_setbuf;
+
+	bss_setbuf.cfg = htod32(bsscfg_idx);
+	bss_setbuf.val = htod32(val);
+
+	WL_DBG(("wl bss -C %d %s\n", bsscfg_idx, bss_up ? "up" : "down"));
+	ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (ret != 0) {
+		WL_ERR(("'bss %d' failed with %d\n", bss_up, ret));
+	}
+
+	return ret;
+}
+
+bool
+wl_cfg80211_bss_isup(struct net_device *ndev, int bsscfg_idx)
+{
+	s32 result, val;
+	bool isup = false;
+	s8 getbuf[64];
+
+	/* Check if the BSS is up */
+	*(int*)getbuf = -1;
+	result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+		sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
+	if (result != 0) {
+		WL_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
+		WL_ERR(("NOTE: this ioctl error is normal "
+			"when the BSS has not been created yet.\n"));
+	} else {
+		val = *(int*)getbuf;
+		val = dtoh32(val);
+		WL_DBG(("wl bss -C %d = %d\n", bsscfg_idx, val));
+		isup = (val ? TRUE : FALSE);
+	}
+	return isup;
+}
+
+static s32
+cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode)
+{
+	switch (type) {
+		case NL80211_IFTYPE_STATION:
+			*role = WLC_E_IF_ROLE_STA;
+			*mode = WL_MODE_BSS;
+			break;
+		case NL80211_IFTYPE_AP:
+			*role = WLC_E_IF_ROLE_AP;
+			*mode = WL_MODE_AP;
+			break;
+		case NL80211_IFTYPE_P2P_GO:
+			*role = WLC_E_IF_ROLE_P2P_GO;
+			*mode = WL_MODE_AP;
+			break;
+		case NL80211_IFTYPE_P2P_CLIENT:
+			*role = WLC_E_IF_ROLE_P2P_CLIENT;
+			*mode = WL_MODE_BSS;
+			break;
+		case NL80211_IFTYPE_MONITOR:
+			WL_ERR(("Unsupported mode \n"));
+			return BCME_UNSUPPORTED;
+		case NL80211_IFTYPE_ADHOC:
+			*role = WLC_E_IF_ROLE_IBSS;
+			*mode = WL_MODE_IBSS;
+			break;
+		default:
+			WL_ERR(("Unknown interface type:0x%x\n", type));
+			return BCME_ERROR;
+	}
+	return BCME_OK;
+}
+
+static s32
+wl_if_to_cfg80211_type(uint16 role)
+{
+	switch (role) {
+	case WLC_E_IF_ROLE_STA:
+		return NL80211_IFTYPE_STATION;
+	case WLC_E_IF_ROLE_AP:
+		return NL80211_IFTYPE_AP;
+	case WLC_E_IF_ROLE_P2P_GO:
+		return NL80211_IFTYPE_P2P_GO;
+	case WLC_E_IF_ROLE_P2P_CLIENT:
+		return NL80211_IFTYPE_P2P_CLIENT;
+	case WLC_E_IF_ROLE_IBSS:
+		return NL80211_IFTYPE_ADHOC;
+	default:
+		WL_ERR(("Unknown interface role:0x%x. Forcing type station\n", role));
+		return BCME_ERROR;
+	}
+}
+
+struct net_device *
+wl_cfg80211_post_ifcreate(struct net_device *ndev,
+	wl_if_event_info *event, u8 *addr,
+	const char *name, bool rtnl_lock_reqd)
+{
+	struct bcm_cfg80211 *cfg;
+	struct net_device *primary_ndev;
+	struct net_device *new_ndev = NULL;
+	struct wireless_dev *wdev = NULL;
+	s32 iface_type;
+	s32 ret;
+	u16 mode;
+	u16 role;
+	u8 mac_addr[ETH_ALEN];
+
+	if (!ndev || !event) {
+		WL_ERR(("Wrong arg\n"));
+		return NULL;
+	}
+
+	cfg = wl_get_cfg(ndev);
+	if (!cfg) {
+		WL_ERR(("cfg null\n"));
+		return NULL;
+	}
+
+	WL_DBG(("Enter. role:%d ifidx:%d bssidx:%d\n",
+		event->role, event->ifidx, event->bssidx));
+	if (!event->ifidx || !event->bssidx) {
+		/* Fw returned primary idx (0) for virtual interface */
+		WL_ERR(("Wrong index. ifidx:%d bssidx:%d \n",
+			event->ifidx, event->bssidx));
+		return NULL;
+	}
+
+	iface_type = wl_if_to_cfg80211_type(event->role);
+	if (iface_type < 0) {
+		/* Unknown iface type */
+		WL_ERR(("Wrong iface type \n"));
+		return NULL;
+	}
+
+#ifdef WL_EXT_IAPSTA
+	if (wl_ext_check_mesh_creating(ndev)) {
+		printf("%s: change iface_type to NL80211_IFTYPE_MESH_POINT\n", __FUNCTION__);
+		iface_type = NL80211_IFTYPE_MESH_POINT;
+	}
+#endif
+	if (cfg80211_to_wl_iftype(iface_type, &role, &mode) < 0) {
+		/* Unsupported operating mode */
+		WL_ERR(("Unsupported operating mode \n"));
+		return NULL;
+	}
+
+	WL_DBG(("mac_ptr:%p name:%s role:%d nl80211_iftype:%d " MACDBG "\n",
+		addr, name, event->role, iface_type, MAC2STRDBG(event->mac)));
+	if (!name) {
+		/* If iface name is not provided, use dongle ifname */
+		name = event->name;
+	}
+
+	if (!addr) {
+		/* If mac address is not set, use primary mac with locally administered
+		 * bit set.
+		 */
+		primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+		memcpy(mac_addr, primary_ndev->dev_addr, ETH_ALEN);
+#ifndef CUSTOMER_HW6
+		/* For customer6 builds, use primary mac address for virtual interface */
+		mac_addr[0] |= 0x02;
+#endif /* CUSTOMER_HW6 */
+		addr = mac_addr;
+	}
+
+	new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
+		name, addr, event->bssidx, event->name);
+	if (!new_ndev) {
+		WL_ERR(("I/F allocation failed! \n"));
+		goto fail;
+	} else {
+		WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n",
+		 event->ifidx, event->bssidx));
+	}
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (!wdev) {
+		WL_ERR(("wireless_dev alloc failed! \n"));
+		goto fail;
+	}
+
+	wdev->wiphy = bcmcfg_to_wiphy(cfg);
+	wdev->iftype = iface_type;
+	new_ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+	/* Check whether mac addr is in sync with fw. If not,
+	 * apply it using cur_etheraddr.
+	 */
+	if (memcmp(addr, event->mac, ETH_ALEN) != 0) {
+		ret = wldev_iovar_setbuf_bsscfg(new_ndev, "cur_etheraddr",
+			addr, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+			event->bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(ret)) {
+			WL_ERR(("set cur_etheraddr Error (%d)\n", ret));
+			goto fail;
+		}
+		memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
+		WL_ERR(("Applying updated mac address to firmware\n"));
+	}
+
+	if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd) != BCME_OK) {
+		WL_ERR(("IFACE register failed \n"));
+		goto fail;
+	}
+
+	/* Initialize with the station mode params */
+	ret = wl_alloc_netinfo(cfg, new_ndev, wdev, mode,
+		PM_ENABLE, event->bssidx);
+	if (unlikely(ret)) {
+		WL_ERR(("wl_alloc_netinfo Error (%d)\n", ret));
+		goto fail;
+	}
+
+	/* Apply the mode & infra setting based on iftype */
+	if ((ret = wl_config_ifmode(cfg, new_ndev, iface_type)) < 0) {
+		WL_ERR(("config ifmode failure (%d)\n", ret));
+		goto fail;
+	}
+
+	if (mode == WL_MODE_AP) {
+		wl_set_drv_status(cfg, AP_CREATING, new_ndev);
+	}
+
+	WL_INFORM(("Host Network Interface (%s) for Secondary I/F created."
+		" cfg_iftype:%d wl_role:%d\n", new_ndev->name, iface_type, event->role));
+
+	return new_ndev;
+
+fail:
+	if (wdev)
+		kfree(wdev);
+	if (new_ndev)
+		wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
+
+	return NULL;
+}
+
+void
+wl_cfg80211_cleanup_virtual_ifaces(struct net_device *dev, bool rtnl_lock_reqd)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct net_info *iter, *next;
+	struct net_device *primary_ndev;
+
+	/* Note: This function will clean up only the network interface and host
+	 * data structures. The firmware interface clean up will happen in the
+	 * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
+	 * context for the module case).
+	 */
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	for_each_ndev(cfg, iter, next) {
+		if (iter->ndev && (iter->ndev != primary_ndev)) {
+			WL_DBG(("Cleaning up iface:%s \n", iter->ndev->name));
+			wl_cfg80211_post_ifdel(iter->ndev, rtnl_lock_reqd);
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+s32
+wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd)
+{
+	int ifidx = -1;
+	struct bcm_cfg80211 *cfg;
+
+	if (!ndev || !ndev->ieee80211_ptr) {
+		/* No wireless dev done for this interface */
+		return -EINVAL;
+	}
+
+	cfg = wl_get_cfg(ndev);
+	if (!cfg) {
+		WL_ERR(("cfg null\n"));
+		return BCME_ERROR;
+	}
+	ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev);
+	BCM_REFERENCE(ifidx);
+	if (ifidx <= 0) {
+		WL_ERR(("Invalid IF idx for iface:%s\n", ndev->name));
+		ASSERT(0);
+		return BCME_ERROR;
+	}
+	WL_DBG(("cfg80211_remove for iface:%s \n", ndev->name));
+	wl_cfg80211_remove_if(cfg, ifidx, ndev, rtnl_lock_reqd);
+	cfg->bss_pending_op = FALSE;
+
+	return BCME_OK;
+}
+
+#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF)
+/* Create a Generic Network Interface and initialize it depending up on
+ * the interface type
+ */
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy,
+	enum nl80211_iftype iface_type,
+	u8 *mac_addr, const char *name)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *new_ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 ret = BCME_OK;
+	s32 bsscfg_idx = 0;
+	u32 timeout;
+	wl_if_event_info *event = NULL;
+	u8 addr[ETH_ALEN];
+	struct net_info *iter, *next;
+#ifdef WLMESH
+	u16 role = 0, mode = 0;
+#endif
+
+	WL_DBG(("Enter\n"));
+	if (!name) {
+		WL_ERR(("Interface name not provided\n"));
+		return NULL;
+	}
+	else {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		for_each_ndev(cfg, iter, next) {
+			if (iter->ndev) {
+				if (strcmp(iter->ndev->name, name) == 0) {
+					WL_ERR(("Interface name, %s exists !\n", iter->ndev->name));
+					return NULL;
+				}
+			}
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	}
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (likely(!mac_addr)) {
+		/* Use primary MAC with the locally administered bit for the
+		 *  Secondary STA I/F
+		 */
+		memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
+		addr[0] |= 0x02;
+	} else {
+		/* Use the application provided mac address (if any) */
+		memcpy(addr, mac_addr, ETH_ALEN);
+	}
+
+	if ((iface_type != NL80211_IFTYPE_STATION) && (iface_type != NL80211_IFTYPE_AP)) {
+		WL_ERR(("IFACE type:%d not supported. STA "
+					"or AP IFACE is only supported\n", iface_type));
+		return NULL;
+	}
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+
+	/* De-initialize the p2p discovery interface, if operational */
+	if (p2p_is_on(cfg)) {
+		WL_DBG(("Disabling P2P Discovery Interface \n"));
+#ifdef WL_CFG80211_P2P_DEV_IF
+		ret = wl_cfg80211_scan_stop(cfg, bcmcfg_to_p2p_wdev(cfg));
+#else
+		ret = wl_cfg80211_scan_stop(cfg, cfg->p2p_net);
+#endif
+		if (unlikely(ret < 0)) {
+			CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+		}
+
+		wl_cfgp2p_disable_discovery(cfg);
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+		p2p_on(cfg) = false;
+	}
+
+	/*
+	 * Intialize the firmware I/F.
+	 */
+	if (wl_customer6_legacy_chip_check(cfg, primary_ndev)) {
+		/* Use bss iovar instead of interface_create iovar */
+		ret = BCME_UNSUPPORTED;
+	} else {
+		ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx,
+			iface_type, 0, addr);
+	}
+	if (ret == BCME_UNSUPPORTED) {
+		/* Use bssidx 1 by default */
+		bsscfg_idx = 1;
+		if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
+			bsscfg_idx, iface_type, 0, addr)) < 0) {
+			goto exit;
+		}
+	} else if (ret < 0) {
+		WL_ERR(("Interface create failed!! ret:%d \n", ret));
+		goto exit;
+	} else {
+		/* Success */
+		bsscfg_idx = ret;
+	}
+
+	WL_DBG(("Interface created!! bssidx:%d \n", bsscfg_idx));
+
+	/*
+	 * Wait till the firmware send a confirmation event back.
+	 */
+	WL_DBG(("Wait for the FW I/F Event\n"));
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op) {
+		WL_ERR(("ADD_IF event, didn't come. Return \n"));
+		goto exit;
+	}
+
+	event = &cfg->if_event_info;
+#ifdef WLMESH
+	cfg80211_to_wl_iftype(iface_type, &role, &mode);
+	event->role = role;
+#endif
+
+	/*
+	 * Since FW operation is successful,we can go ahead with the
+	 * the host interface creation.
+	 */
+	if ((new_ndev = wl_cfg80211_post_ifcreate(primary_ndev,
+		event, mac_addr, name, false))) {
+		/* Iface post ops successful. Return ndev/wdev ptr */
+		return  ndev_to_cfgdev(new_ndev);
+	}
+
+exit:
+	cfg->bss_pending_op = FALSE;
+	return NULL;
+}
+
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = NULL;
+	s32 ret = BCME_OK;
+	s32 bsscfg_idx = 1;
+	u32 timeout;
+	enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION;
+
+	WL_DBG(("Enter\n"));
+
+	/* If any scan is going on, abort it */
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		WL_DBG(("Scan in progress. Aborting the scan!\n"));
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+
+	ndev = (struct net_device *)cfgdev_to_ndev(cfgdev);
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+
+	/* Delete the firmware interface. "interface_remove" command
+	 * should go on the interface to be deleted
+	 */
+	bsscfg_idx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+	if (bsscfg_idx <= 0) {
+		/* validate bsscfgidx */
+		WL_ERR(("Wrong bssidx! \n"));
+		return -EINVAL;
+	}
+	WL_DBG(("del interface. bssidx:%d", bsscfg_idx));
+	ret = wl_cfg80211_interface_ops(cfg, ndev, bsscfg_idx,
+		NL80211_IFTYPE_STATION, 1, NULL);
+	if (ret == BCME_UNSUPPORTED) {
+		if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
+			bsscfg_idx, iface_type, true, NULL)) < 0) {
+			WL_ERR(("DEL bss failed ret:%d \n", ret));
+			goto exit;
+		}
+	} else if (ret < 0) {
+		WL_ERR(("Interface DEL failed ret:%d \n", ret));
+		goto exit;
+	}
+
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op) {
+		WL_ERR(("timeout in waiting IF_DEL event\n"));
+	}
+
+exit:
+	ret = wl_cfg80211_post_ifdel(ndev, false);
+	if (unlikely(ret)) {
+		WL_ERR(("post_ifdel failed\n"));
+	}
+
+	return ret;
+}
+#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */
+
+#ifdef WLMESH
+s32 wl_cfg80211_set_sae_password(struct net_device *dev, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	sscanf(buf, "%s %d", cfg->sae_password, &cfg->sae_password_len);
+	return 0;
+}
+
+static s32 wl_cfg80211_join_mesh(
+	struct wiphy *wiphy, struct net_device *dev,
+	const struct mesh_config *conf,
+	const struct mesh_setup *setup)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	struct ieee80211_channel *chan = setup->chandef.chan;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 6, 0))
+	struct ieee80211_channel *chan = setup->channel;
+#endif
+	u32 param[2] = {0, 0};
+	s32 err = 0;
+	u32 bw_cap = 0;
+	u32 beacon_interval = setup->beacon_interval;
+	u32 dtim_period = setup->dtim_period;
+	size_t join_params_size;
+	struct wl_join_params join_params;
+	chanspec_t chanspec = 0;
+
+	cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+
+	if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+		struct wlc_ssid *lssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
+		u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID);
+		u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN);
+		if ((memcmp(setup->mesh_id, lssid->SSID, lssid->SSID_len) == 0) &&
+			(*channel == cfg->channel)) {
+			WL_ERR(("MESH connection already existed to " MACDBG "\n",
+				MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID))));
+			return -EISCONN;
+		}
+		WL_ERR(("Previous connecton existed, please disconnect mesh %s (" MACDBG ") first\n",
+			lssid->SSID, MAC2STRDBG(bssid)));
+		return -EISCONN;
+	}
+
+	if (chan) {
+		if (chan->band == IEEE80211_BAND_5GHZ)
+			param[0] = WLC_BAND_5G;
+		else if (chan->band == IEEE80211_BAND_2GHZ)
+			param[0] = WLC_BAND_2G;
+		err = wldev_iovar_getint(dev, "bw_cap", param);
+		if (unlikely(err)) {
+			WL_ERR(("Get bw_cap Failed (%d)\n", err));
+			return err;
+		}
+		bw_cap = param[0];
+		chanspec = channel_to_chanspec(wiphy, dev, cfg->channel, bw_cap);
+	}
+
+	memset(&join_params, 0, sizeof(join_params));
+	memcpy((void *)join_params.ssid.SSID, (void *)setup->mesh_id,
+		setup->mesh_id_len);
+
+	join_params.ssid.SSID_len = htod32(setup->mesh_id_len);
+	join_params.params.chanspec_list[0] = chanspec;
+	join_params.params.chanspec_num = 1;
+	wldev_iovar_setint(dev, "chanspec", chanspec);
+	join_params_size = sizeof(join_params);
+
+	wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+	wldev_iovar_setint(dev, "wsec", 0);
+
+	if (cfg->sae_password_len > 0) {
+		wldev_iovar_setint(dev, "mesh_auth_proto", 1);
+		wldev_iovar_setint(dev, "wpa_auth", WPA2_AUTH_PSK);
+		wldev_iovar_setint(dev, "wsec", AES_ENABLED);
+		wldev_iovar_setint(dev, "mfp", WL_MFP_REQUIRED);
+		printf("%s: password=%s, len=%d\n", __FUNCTION__,
+			cfg->sae_password, cfg->sae_password_len);
+		wldev_iovar_setbuf(dev, "sae_password", cfg->sae_password, cfg->sae_password_len,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+	} else {
+		wldev_iovar_setint(dev, "mesh_auth_proto", 0);
+		wldev_iovar_setint(dev, "mfp", WL_MFP_NONE);
+	}
+
+	if (beacon_interval) {
+		if ((err = wldev_ioctl_set(dev, WLC_SET_BCNPRD,
+			&beacon_interval, sizeof(s32))) < 0) {
+			WL_ERR(("Beacon Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if (dtim_period) {
+		if ((err = wldev_ioctl_set(dev, WLC_SET_DTIMPRD,
+			&dtim_period, sizeof(s32))) < 0) {
+			WL_ERR(("DTIM Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+	wldev_iovar_setint(dev, "mpc", 0);
+
+	WL_ERR(("JOIN %s on channel %d with chanspec 0x%4x\n",
+				join_params.ssid.SSID, cfg->channel, chanspec));
+
+	err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
+		join_params_size);
+
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+
+	wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+	wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN);
+	return err;
+}
+
+
+static s32 wl_cfg80211_leave_mesh(
+	struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	scb_val_t scbval;
+	u8 *curbssid;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	wl_link_down(cfg);
+
+	WL_ERR(("Leave MESH\n"));
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+	wl_set_drv_status(cfg, DISCONNECTING, dev);
+	scbval.val = 0;
+	memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+	err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+		sizeof(scb_val_t));
+	if (unlikely(err)) {
+		wl_clr_drv_status(cfg, DISCONNECTING, dev);
+		WL_ERR(("error(%d)\n", err));
+		return err;
+	}
+	memset(cfg->sae_password, 0, SAE_MAX_PASSWD_LEN);
+	cfg->sae_password_len = 0;
+
+	return err;
+}
+#endif /* WLMESH */
+
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_bss *bss;
+	struct ieee80211_channel *chan;
+	struct wl_join_params join_params;
+	int scan_suppress;
+	struct cfg80211_ssid ssid;
+	s32 scan_retry = 0;
+	s32 err = 0;
+	size_t join_params_size;
+	chanspec_t chanspec = 0;
+	u32 param[2] = {0, 0};
+	u32 bw_cap = 0;
+
+	WL_TRACE(("In\n"));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_INFORM(("JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
+	if (!params->ssid || params->ssid_len <= 0 ||
+			params->ssid_len > DOT11_MAX_SSID_LEN) {
+		WL_ERR(("Invalid parameter\n"));
+		return -EINVAL;
+	}
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	chan = params->chandef.chan;
+#else
+	chan = params->channel;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	if (chan)
+		cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+	if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+		struct wlc_ssid *lssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
+		u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID);
+		u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN);
+		if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) &&
+			(memcmp(params->ssid, lssid->SSID, lssid->SSID_len) == 0) &&
+			(*channel == cfg->channel))) {
+			WL_ERR(("Connection already existed to " MACDBG "\n",
+				MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID))));
+			return -EISCONN;
+		}
+		WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n",
+			lssid->SSID, MAC2STRDBG(bssid)));
+	}
+
+	/* remove the VSIE */
+	wl_cfg80211_ibss_vsie_delete(dev);
+
+	bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
+	if (!bss) {
+		if (IBSS_INITIAL_SCAN_ALLOWED == TRUE) {
+			memcpy(ssid.ssid, params->ssid, params->ssid_len);
+			ssid.ssid_len = params->ssid_len;
+			do {
+				if (unlikely
+					(__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) ==
+					 -EBUSY)) {
+					wl_delay(150);
+				} else {
+					break;
+				}
+			} while (++scan_retry < WL_SCAN_RETRY_MAX);
+
+			/* rtnl lock code is removed here. don't see why rtnl lock
+			 * needs to be released.
+			 */
+
+			/* wait 4 secons till scan done.... */
+			schedule_timeout_interruptible(msecs_to_jiffies(4000));
+
+			bss = cfg80211_get_ibss(wiphy, NULL,
+				params->ssid, params->ssid_len);
+		}
+	}
+	if (bss && ((IBSS_COALESCE_ALLOWED == TRUE) ||
+		((IBSS_COALESCE_ALLOWED == FALSE) && params->bssid &&
+		!memcmp(bss->bssid, params->bssid, ETHER_ADDR_LEN)))) {
+		cfg->ibss_starter = false;
+		WL_DBG(("Found IBSS\n"));
+	} else {
+		cfg->ibss_starter = true;
+	}
+
+	if (bss) {
+		CFG80211_PUT_BSS(wiphy, bss);
+	}
+
+	if (chan) {
+		if (chan->band == IEEE80211_BAND_5GHZ)
+			param[0] = WLC_BAND_5G;
+		else if (chan->band == IEEE80211_BAND_2GHZ)
+			param[0] = WLC_BAND_2G;
+		err = wldev_iovar_getint(dev, "bw_cap", param);
+		if (unlikely(err)) {
+			WL_ERR(("Get bw_cap Failed (%d)\n", err));
+			return err;
+		}
+		bw_cap = param[0];
+		chanspec = channel_to_chanspec(wiphy, dev, cfg->channel, bw_cap);
+	}
+	/*
+	 * Join with specific BSSID and cached SSID
+	 * If SSID is zero join based on BSSID only
+	 */
+	memset(&join_params, 0, sizeof(join_params));
+	memcpy((void *)join_params.ssid.SSID, (void *)params->ssid,
+		params->ssid_len);
+	join_params.ssid.SSID_len = htod32(params->ssid_len);
+	if (params->bssid) {
+		memcpy(&join_params.params.bssid, params->bssid, ETHER_ADDR_LEN);
+		err = wldev_ioctl_set(dev, WLC_SET_DESIRED_BSSID, &join_params.params.bssid,
+			ETHER_ADDR_LEN);
+		if (unlikely(err)) {
+			WL_ERR(("Error (%d)\n", err));
+			return err;
+		}
+	} else
+		memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+
+	if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+		scan_suppress = TRUE;
+		/* Set the SCAN SUPPRESS Flag in the firmware to skip join scan */
+		err = wldev_ioctl_set(dev, WLC_SET_SCANSUPPRESS,
+			&scan_suppress, sizeof(int));
+		if (unlikely(err)) {
+			WL_ERR(("Scan Suppress Setting Failed (%d)\n", err));
+			return err;
+		}
+	}
+
+	join_params.params.chanspec_list[0] = chanspec;
+	join_params.params.chanspec_num = 1;
+	wldev_iovar_setint(dev, "chanspec", chanspec);
+	join_params_size = sizeof(join_params);
+
+	/* Disable Authentication, IBSS will add key if it required */
+	wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+	wldev_iovar_setint(dev, "wsec", 0);
+
+	err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
+		join_params_size);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+
+	if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+		scan_suppress = FALSE;
+		/* Reset the SCAN SUPPRESS Flag */
+		err = wldev_ioctl_set(dev, WLC_SET_SCANSUPPRESS,
+			&scan_suppress, sizeof(int));
+		if (unlikely(err)) {
+			WL_ERR(("Reset Scan Suppress Flag Failed (%d)\n", err));
+			return err;
+		}
+	}
+	wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+	wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN);
+#ifdef WL_RELMCAST
+	cfg->rmc_event_seq = 0; /* initialize rmcfail sequence */
+#endif /* WL_RELMCAST */
+	return err;
+}
+
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	scb_val_t scbval;
+	u8 *curbssid;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	wl_link_down(cfg);
+
+	WL_ERR(("Leave IBSS\n"));
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+	wl_set_drv_status(cfg, DISCONNECTING, dev);
+	scbval.val = 0;
+	memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+	err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+		sizeof(scb_val_t));
+	if (unlikely(err)) {
+		wl_clr_drv_status(cfg, DISCONNECTING, dev);
+		WL_ERR(("error(%d)\n", err));
+		return err;
+	}
+
+	/* remove the VSIE */
+	wl_cfg80211_ibss_vsie_delete(dev);
+
+	return err;
+}
+
+#ifdef MFP
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8** rsn_cap)
+{
+	u16 suite_count;
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	u16 len;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+
+	if (!wpa2ie)
+		return -1;
+
+	len = wpa2ie->len;
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	if ((len -= WPA_SUITE_LEN) <= 0)
+		return BCME_BADLEN;
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+		(len -= (WPA_IE_SUITE_COUNT_LEN +
+		(WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = ltoh16_ua(&mgmt->count);
+
+	if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+		(len -= (WPA_IE_SUITE_COUNT_LEN +
+		(WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		rsn_cap[0] = (u8 *)&mgmt->list[suite_count];
+	} else
+		return BCME_BADLEN;
+
+	return 0;
+}
+#endif /* MFP */
+
+static s32
+wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+		val = WPA_AUTH_PSK |
+			WPA_AUTH_UNSPECIFIED;
+	else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+		val = WPA2_AUTH_PSK|
+			WPA2_AUTH_UNSPECIFIED;
+	else
+		val = WPA_AUTH_DISABLED;
+
+	if (is_wps_conn(sme))
+		val = WPA_AUTH_DISABLED;
+
+#ifdef BCMWAPI_WPI
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+		WL_DBG((" * wl_set_wpa_version, set wpa_auth"
+			" to WPA_AUTH_WAPI 0x400"));
+		val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED;
+	}
+#endif
+	WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wpa_auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->wpa_versions = sme->crypto.wpa_versions;
+	return err;
+}
+ 
+#ifdef BCMWAPI_WPI
+static s32
+wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	s32 err = 0;
+	s32 bssidx;
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	WL_DBG((" %s \n", __FUNCTION__));
+
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+		err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", (void *)sme->ie, sme->ie_len,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+		if (unlikely(err)) {
+			WL_ERR(("===> set_wapi_ie Error (%d)\n", err));
+			return err;
+		}
+	} else
+		WL_DBG((" * skip \n"));
+	return err;
+}
+#endif /* BCMWAPI_WPI */
+
+static s32
+wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	switch (sme->auth_type) {
+	case NL80211_AUTHTYPE_OPEN_SYSTEM:
+		val = WL_AUTH_OPEN_SYSTEM;
+		WL_DBG(("open system\n"));
+		break;
+	case NL80211_AUTHTYPE_SHARED_KEY:
+		val = WL_AUTH_SHARED_KEY;
+		WL_DBG(("shared key\n"));
+		break;
+	case NL80211_AUTHTYPE_AUTOMATIC:
+		val = WL_AUTH_OPEN_SHARED;
+		WL_DBG(("automatic\n"));
+		break;
+	default:
+		val = 2;
+		WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+		break;
+	}
+
+	err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->auth_type = sme->auth_type;
+	return err;
+}
+
+static s32
+wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct wl_security *sec;
+	s32 pval = 0;
+	s32 gval = 0;
+	s32 err = 0;
+	s32 wsec_val = 0;
+	s32 bssidx;
+#ifdef BCMWAPI_WPI
+	s32 val = 0;
+#endif
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.n_ciphers_pairwise) {
+		switch (sme->crypto.ciphers_pairwise[0]) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			pval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			pval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			val = SMS4_ENABLED;
+			pval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("invalid cipher pairwise (%d)\n",
+				sme->crypto.ciphers_pairwise[0]));
+			return -EINVAL;
+		}
+	}
+	if (sme->crypto.cipher_group) {
+		switch (sme->crypto.cipher_group) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			gval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			gval = AES_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			gval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			val = SMS4_ENABLED;
+			gval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("invalid cipher group (%d)\n",
+				sme->crypto.cipher_group));
+			return -EINVAL;
+		}
+	}
+
+	WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+
+	if (is_wps_conn(sme)) {
+		if (sme->privacy)
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx);
+		else
+			/* WPS-2.0 allows no security */
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+	} else {
+#ifdef BCMWAPI_WPI
+		if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_SMS4) {
+			WL_DBG((" NO, is_wps_conn, WAPI set to SMS4_ENABLED"));
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", val, bssidx);
+		} else {
+#endif
+			WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC"));
+			wsec_val = pval | gval;
+
+			WL_DBG((" Set WSEC to fW 0x%x \n", wsec_val));
+			err = wldev_iovar_setint_bsscfg(dev, "wsec",
+				wsec_val, bssidx);
+#ifdef BCMWAPI_WPI
+		}
+#endif
+	}
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+	sec->cipher_group = sme->crypto.cipher_group;
+
+	return err;
+}
+
+#ifdef MFP
+static s32
+wl_cfg80211_set_mfp(struct bcm_cfg80211 *cfg,
+	struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	s32 mfp = WL_MFP_NONE;
+	s32 current_mfp = WL_MFP_NONE;
+	bcm_tlv_t *wpa2_ie;
+	u8* rsn_cap = NULL;
+	bool fw_support = false;
+	int err, count = 0;
+	u8 *eptr = NULL, *ptr = NULL;
+	u8* group_mgmt_cs = NULL;
+	wpa_pmkid_list_t* pmkid = NULL;
+
+	if (!sme) {
+		/* No connection params from userspace, Do nothing. */
+		return 0;
+	}
+
+	/* Check fw support and retreive current mfp val */
+	err = wldev_iovar_getint(dev, "mfp", &current_mfp);
+	if (!err) {
+		fw_support = true;
+	}
+
+	/* Parse the wpa2ie to decode the MFP capablity */
+	if (((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+			DOT11_MNG_RSN_ID)) != NULL) &&
+			(wl_cfg80211_get_rsn_capa(wpa2_ie, &rsn_cap) == 0)) {
+		/* Check for MFP cap in the RSN capability field */
+		if (rsn_cap[0] & RSN_CAP_MFPR) {
+			mfp = WL_MFP_REQUIRED;
+		} else if (rsn_cap[0] & RSN_CAP_MFPC) {
+			mfp = WL_MFP_CAPABLE;
+		}
+
+		/*
+		 * eptr --> end/last byte addr of wpa2_ie
+		 * ptr --> to keep track of current/required byte addr
+		 */
+		eptr = (u8*)wpa2_ie + (wpa2_ie->len + TLV_HDR_LEN);
+		/* pointing ptr to the next byte after rns_cap */
+		ptr = (u8*)rsn_cap + RSN_CAP_LEN;
+		if (mfp && (eptr - ptr) >= WPA2_PMKID_COUNT_LEN) {
+			/* pmkid now to point to 1st byte addr of pmkid in wpa2_ie */
+			pmkid = (wpa_pmkid_list_t*)ptr;
+			count = pmkid->count.low | (pmkid->count.high << 8);
+			/* ptr now to point to last byte addr of pmkid */
+			ptr = (u8*)pmkid + (count * WPA2_PMKID_LEN
+				+ WPA2_PMKID_COUNT_LEN);
+			if ((eptr - ptr) >= WPA_SUITE_LEN) {
+				/* group_mgmt_cs now to point to first byte addr of bip */
+				group_mgmt_cs = ptr;
+			}
+		}
+	}
+
+	WL_DBG((" mfp:%d wpa2_ie ptr:%p rsn_cap 0x%x%x fw mfp support:%d\n",
+		mfp, wpa2_ie, rsn_cap[0], rsn_cap[1], fw_support));
+
+	if (fw_support == false) {
+		if (mfp == WL_MFP_REQUIRED) {
+			/* if mfp > 0, mfp capability set in wpa ie, but
+			 * FW indicated error for mfp. Propagate the error up.
+			 */
+			WL_ERR(("mfp capability found in wpaie. But fw doesn't "
+				"seem to support MFP\n"));
+			return -EINVAL;
+		} else {
+			/* Firmware doesn't support mfp. But since connection request
+			 * is for non-mfp case, don't bother.
+			 */
+			return 0;
+		}
+	} else if (mfp != current_mfp) {
+		err = wldev_iovar_setint(dev, "mfp", mfp);
+		if (unlikely(err)) {
+			WL_ERR(("mfp (%d) set failed ret:%d \n", mfp, err));
+			return err;
+		}
+		WL_DBG(("mfp set to 0x%x \n", mfp));
+	}
+
+	if (group_mgmt_cs && bcmp((const uint8 *)WPA2_OUI,
+			group_mgmt_cs, (WPA_SUITE_LEN - 1)) == 0) {
+		WL_DBG(("BIP is found\n"));
+		err = wldev_iovar_setbuf(dev, "bip",
+				group_mgmt_cs, WPA_SUITE_LEN, cfg->ioctl_buf,
+				WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		/*
+		 * Dont return failure for unsupported cases
+		 * of bip iovar for backward compatibility
+		 */
+		if (err != BCME_UNSUPPORTED && err < 0) {
+			WL_ERR(("bip set error (%d)\n", err));
+			return err;
+		}
+	}
+
+	return 0;
+}
+#endif /* MFP */
+
+static s32
+wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.n_akm_suites) {
+		err = wldev_iovar_getint(dev, "wpa_auth", &val);
+		if (unlikely(err)) {
+			WL_ERR(("could not get wpa_auth (%d)\n", err));
+			return err;
+		}
+		if (val & (WPA_AUTH_PSK |
+			WPA_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid akm suite (0x%x)\n",
+					sme->crypto.akm_suites[0]));
+				return -EINVAL;
+			}
+		} else if (val & (WPA2_AUTH_PSK |
+			WPA2_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA2_AUTH_UNSPECIFIED;
+				break;
+#ifdef MFP
+#ifdef CUSTOMER_HW6
+			case WL_AKM_SUITE_SHA256_1X:
+				if (wl_customer6_legacy_chip_check(cfg,	dev)) {
+					val = WPA2_AUTH_UNSPECIFIED;
+				} else {
+					val = WPA2_AUTH_1X_SHA256;
+				}
+				break;
+			case WL_AKM_SUITE_SHA256_PSK:
+				if (wl_customer6_legacy_chip_check(cfg,	dev)) {
+					val = WPA2_AUTH_PSK;
+				} else {
+					val = WPA2_AUTH_PSK_SHA256;
+				}
+				break;
+#else
+			case WL_AKM_SUITE_SHA256_1X:
+				val = WPA2_AUTH_1X_SHA256;
+				break;
+			case WL_AKM_SUITE_SHA256_PSK:
+				val = WPA2_AUTH_PSK_SHA256;
+				break;
+#endif /* CUSTOMER_HW6 */
+#endif /* MFP */
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA2_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid akm suite (0x%x)\n",
+					sme->crypto.akm_suites[0]));
+				return -EINVAL;
+			}
+		}
+#ifdef BCMWAPI_WPI
+		else if (val & (WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_WAPI_CERT:
+				val = WAPI_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_WAPI_PSK:
+				val = WAPI_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		}
+#endif
+
+#ifdef MFP
+		if ((err = wl_cfg80211_set_mfp(cfg, dev, sme)) < 0) {
+			WL_ERR(("MFP set failed err:%d\n", err));
+			return -EINVAL;
+		}
+#endif /* MFP */
+
+		WL_DBG(("setting wpa_auth to 0x%x\n", val));
+		err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+		if (unlikely(err)) {
+			WL_ERR(("could not set wpa_auth (0x%x)\n", err));
+			return err;
+		}
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->wpa_auth = sme->crypto.akm_suites[0];
+
+	return err;
+}
+
+static s32
+wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct wl_security *sec;
+	struct wl_wsec_key key;
+	s32 val;
+	s32 err = 0;
+	s32 bssidx;
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	WL_DBG(("key len (%d)\n", sme->key_len));
+	if (sme->key_len) {
+		sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+		WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+			sec->wpa_versions, sec->cipher_pairwise));
+		if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
+#ifdef BCMWAPI_WPI
+			NL80211_WPA_VERSION_2 | NL80211_WAPI_VERSION_1)) &&
+#else
+			NL80211_WPA_VERSION_2)) &&
+#endif
+			(sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
+#ifdef BCMWAPI_WPI
+		    WLAN_CIPHER_SUITE_WEP104 | WLAN_CIPHER_SUITE_SMS4)))
+#else
+		    WLAN_CIPHER_SUITE_WEP104)))
+#endif
+		{
+			memset(&key, 0, sizeof(key));
+			key.len = (u32) sme->key_len;
+			key.index = (u32) sme->key_idx;
+			if (unlikely(key.len > sizeof(key.data))) {
+				WL_ERR(("Too long key length (%u)\n", key.len));
+				return -EINVAL;
+			}
+			memcpy(key.data, sme->key, key.len);
+			key.flags = WL_PRIMARY_KEY;
+			switch (sec->cipher_pairwise) {
+			case WLAN_CIPHER_SUITE_WEP40:
+				key.algo = CRYPTO_ALGO_WEP1;
+				break;
+			case WLAN_CIPHER_SUITE_WEP104:
+				key.algo = CRYPTO_ALGO_WEP128;
+				break;
+#ifdef BCMWAPI_WPI
+			case WLAN_CIPHER_SUITE_SMS4:
+				key.algo = CRYPTO_ALGO_SMS4;
+				break;
+#endif
+			default:
+				WL_ERR(("Invalid algorithm (%d)\n",
+					sme->crypto.ciphers_pairwise[0]));
+				return -EINVAL;
+			}
+			/* Set the new key/index */
+			WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
+				key.len, key.index, key.algo));
+			WL_DBG(("key \"%s\"\n", key.data));
+			swap_key_from_BE(&key);
+			err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+			if (unlikely(err)) {
+				WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+				return err;
+			}
+			if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+				WL_DBG(("set auth_type to shared key\n"));
+				val = WL_AUTH_SHARED_KEY;	/* shared key */
+				err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+				if (unlikely(err)) {
+					WL_ERR(("set auth failed (%d)\n", err));
+					return err;
+				}
+			}
+		}
+	}
+	return err;
+}
+
+#if defined(ESCAN_RESULT_PATCH)
+static u8 connect_req_bssid[6];
+static u8 broad_bssid[6];
+#endif /* ESCAN_RESULT_PATCH */
+
+
+
+#if defined(CUSTOM_SET_CPUCORE) || defined(CONFIG_TCPACK_FASTTX)
+static bool wl_get_chan_isvht80(struct net_device *net, dhd_pub_t *dhd)
+{
+	u32 chanspec = 0;
+	bool isvht80 = 0;
+
+	if (wldev_iovar_getint(net, "chanspec", (s32 *)&chanspec) == BCME_OK)
+		chanspec = wl_chspec_driver_to_host(chanspec);
+
+	isvht80 = chanspec & WL_CHANSPEC_BW_80;
+	WL_INFORM(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80));
+
+	return isvht80;
+}
+#endif /* CUSTOM_SET_CPUCORE || CONFIG_TCPACK_FASTTX */
+
+int wl_cfg80211_cleanup_mismatch_status(struct net_device *dev, struct bcm_cfg80211 *cfg,
+	bool disassociate)
+{
+	scb_val_t scbval;
+	int err = TRUE;
+	int wait_cnt;
+
+	if (disassociate) {
+		WL_ERR(("Disassociate previous connection!\n"));
+		wl_set_drv_status(cfg, DISCONNECTING, dev);
+		scbval.val = DOT11_RC_DISASSOC_LEAVING;
+		scbval.val = htod32(scbval.val);
+
+		err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+				sizeof(scb_val_t));
+		if (unlikely(err)) {
+			wl_clr_drv_status(cfg, DISCONNECTING, dev);
+			WL_ERR(("error (%d)\n", err));
+			return err;
+		}
+		wait_cnt = 500/10;
+	} else {
+		wait_cnt = 200/10;
+		WL_ERR(("Waiting for previous DISCONNECTING status!\n"));
+		if (wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+			wl_clr_drv_status(cfg, DISCONNECTING, dev);
+		}
+	}
+
+	while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+		WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n",
+			wait_cnt));
+		wait_cnt--;
+		OSL_SLEEP(10);
+	}
+
+	if (wait_cnt == 0) {
+		WL_ERR(("DISCONNECING clean up failed!\n"));
+		return BCME_NOTREADY;
+	}
+	return BCME_OK;
+}
+
+#define MAX_SCAN_ABORT_WAIT_CNT 20
+#define WAIT_SCAN_ABORT_OSL_SLEEP_TIME 10
+
+static s32
+wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct ieee80211_channel *chan = sme->channel;
+	wl_extjoin_params_t *ext_join_params;
+	struct wl_join_params join_params;
+	size_t join_params_size;
+	dhd_pub_t *dhdp =  (dhd_pub_t *)(cfg->pub);
+	s32 err = 0;
+	wpa_ie_fixed_t *wpa_ie;
+	bcm_tlv_t *wpa2_ie;
+	u8* wpaie  = 0;
+	u32 wpaie_len = 0;
+	u32 chan_cnt = 0;
+	struct ether_addr bssid;
+	s32 bssidx = -1;
+#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH))
+	int wait_cnt;
+#endif
+
+	WL_DBG(("In\n"));
+	BCM_REFERENCE(dhdp);
+
+#ifdef WLMESH
+	wl_config_ifmode(cfg, dev, dev->ieee80211_ptr->iftype);
+#endif
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+	wl_cfg80211_set_random_mac(dev, FALSE);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+	if (sme->channel_hint) {
+		chan = sme->channel_hint;
+		WL_DBG(("channel_hint (%d), channel_hint center_freq (%d)\n",
+			ieee80211_frequency_to_channel(sme->channel_hint->center_freq),
+			sme->channel_hint->center_freq));
+	}
+	if (sme->bssid_hint) {
+		sme->bssid = sme->bssid_hint;
+		WL_DBG(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint)));
+	}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
+
+	if (unlikely(!sme->ssid)) {
+		WL_ERR(("Invalid ssid\n"));
+		return -EOPNOTSUPP;
+	}
+
+	if (unlikely(sme->ssid_len > DOT11_MAX_SSID_LEN)) {
+		WL_ERR(("Invalid SSID info: SSID=%s, length=%zd\n",
+			sme->ssid, sme->ssid_len));
+		return -EINVAL;
+	}
+
+	WL_DBG(("SME IE : len=%zu\n", sme->ie_len));
+	if (sme->ie != NULL && sme->ie_len > 0 && (wl_dbg_level & WL_DBG_DBG)) {
+		prhex(NULL, (uchar *)sme->ie, sme->ie_len);
+	}
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	/*
+	 * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+	 */
+#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH))
+	if (cfg->scan_request) {
+		WL_TRACE_HW4(("Aborting the scan! \n"));
+		wl_cfg80211_scan_abort(cfg);
+		wait_cnt = MAX_SCAN_ABORT_WAIT_CNT;
+		while (wl_get_drv_status(cfg, SCANNING, dev) && wait_cnt) {
+			WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt));
+			wait_cnt--;
+			OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME);
+		}
+		if (wl_get_drv_status(cfg, SCANNING, dev)) {
+			wl_notify_escan_complete(cfg, dev, true, true);
+		}
+	}
+#endif
+#ifdef WL_SCHED_SCAN
+	/* Locks are taken in wl_cfg80211_sched_scan_stop()
+	 * A start scan occuring during connect is unlikely
+	 */
+	if (cfg->sched_scan_req) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+		wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg), 0);
+#else
+		wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg));
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+	}
+#endif
+#if defined(ESCAN_RESULT_PATCH)
+	if (sme->bssid)
+		memcpy(connect_req_bssid, sme->bssid, ETHER_ADDR_LEN);
+	else
+		bzero(connect_req_bssid, ETHER_ADDR_LEN);
+	bzero(broad_bssid, ETHER_ADDR_LEN);
+#endif
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+	maxrxpktglom = 0;
+#endif
+	if (wl_get_drv_status(cfg, CONNECTING, dev) || wl_get_drv_status(cfg, CONNECTED, dev)) {
+		/* set nested connect bit to identify the context */
+		wl_set_drv_status(cfg, NESTED_CONNECT, dev);
+		/* DHD prev status is CONNECTING/CONNECTED */
+		err = wl_cfg80211_cleanup_mismatch_status(dev, cfg, TRUE);
+	} else if (wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+		/* DHD prev status is DISCONNECTING */
+		err = wl_cfg80211_cleanup_mismatch_status(dev, cfg, FALSE);
+	} else if (!wl_get_drv_status(cfg, CONNECTED, dev)) {
+		/* DHD previous status is not connected and FW connected */
+		if (wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN) == 0) {
+			/* set nested connect bit to identify the context */
+			wl_set_drv_status(cfg, NESTED_CONNECT, dev);
+			err = wl_cfg80211_cleanup_mismatch_status(dev, cfg, TRUE);
+		}
+	}
+	wl_cfg80211_check_in4way(cfg, dev, WAIT_DISCONNECTED,
+		WL_EXT_STATUS_CONNECTING, NULL);
+
+	/* 'connect' request received */
+	wl_set_drv_status(cfg, CONNECTING, dev);
+	/* clear nested connect bit on proceeding for connection */
+	wl_clr_drv_status(cfg, NESTED_CONNECT, dev);
+
+	/* Clean BSSID */
+	bzero(&bssid, sizeof(bssid));
+	if (!wl_get_drv_status(cfg, DISCONNECTING, dev))
+		wl_update_prof(cfg, dev, NULL, (void *)&bssid, WL_PROF_BSSID);
+
+	if (p2p_is_on(cfg) && (dev != bcmcfg_to_prmry_ndev(cfg))) {
+		/* we only allow to connect using virtual interface in case of P2P */
+			if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+				WL_ERR(("Find p2p index from wdev(%p) failed\n",
+					dev->ieee80211_ptr));
+				err = BCME_ERROR;
+				goto exit;
+			}
+			wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+				VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+	} else if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+			WL_ERR(("Find wlan index from wdev(%p) failed\n", dev->ieee80211_ptr));
+			err = BCME_ERROR;
+			goto exit;
+		}
+
+		/* find the RSN_IE */
+		if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+			DOT11_MNG_RSN_ID)) != NULL) {
+			WL_DBG((" WPA2 IE is found\n"));
+		}
+		/* find the WPA_IE */
+		if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie,
+			sme->ie_len)) != NULL) {
+			WL_DBG((" WPA IE is found\n"));
+		}
+		if (wpa_ie != NULL || wpa2_ie != NULL) {
+			wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie;
+			wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
+			wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
+			err = wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+			if (unlikely(err)) {
+				WL_ERR(("wpaie set error (%d)\n", err));
+				goto exit;
+			}
+		} else {
+			err = wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+			if (unlikely(err)) {
+				WL_ERR(("wpaie set error (%d)\n", err));
+				goto exit;
+			}
+		}
+		err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+			VNDR_IE_ASSOCREQ_FLAG, (const u8 *)sme->ie, sme->ie_len);
+		if (unlikely(err)) {
+			goto exit;
+		}
+	}
+
+	if (chan) {
+		cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+		chan_cnt = 1;
+		WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel,
+			chan->center_freq, chan_cnt));
+	} else {
+		WL_DBG(("No channel info from user space\n"));
+		cfg->channel = 0;
+	}
+#ifdef BCMWAPI_WPI
+	WL_DBG(("1. enable wapi auth\n"));
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+		WL_DBG(("2. set wapi ie  \n"));
+		err = wl_set_set_wapi_ie(dev, sme);
+		if (unlikely(err))
+			goto exit;
+	} else
+		WL_DBG(("2. Not wapi ie  \n"));
+#endif
+	WL_DBG(("3. set wpa version \n"));
+
+	err = wl_set_wpa_version(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid wpa_version\n"));
+		goto exit;
+	}
+#ifdef BCMWAPI_WPI
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1)
+		WL_DBG(("4. WAPI Dont Set wl_set_auth_type\n"));
+	else {
+		WL_DBG(("4. wl_set_auth_type\n"));
+#endif
+		err = wl_set_auth_type(dev, sme);
+		if (unlikely(err)) {
+			WL_ERR(("Invalid auth type\n"));
+			goto exit;
+		}
+#ifdef BCMWAPI_WPI
+	}
+#endif
+
+	err = wl_set_set_cipher(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid ciper\n"));
+		goto exit;
+	}
+
+	err = wl_set_key_mgmt(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid key mgmt\n"));
+		goto exit;
+	}
+
+	err = wl_set_set_sharedkey(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid shared key\n"));
+		goto exit;
+	}
+
+	/*
+	 *  Join with specific BSSID and cached SSID
+	 *  If SSID is zero join based on BSSID only
+	 */
+	join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+		chan_cnt * sizeof(chanspec_t);
+	ext_join_params =  (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+	if (ext_join_params == NULL) {
+		err = -ENOMEM;
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+		goto exit;
+	}
+	ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+	memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
+	wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
+	ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+	/* increate dwell time to receive probe response or detect Beacon
+	* from target AP at a noisy air only during connect command
+	*/
+	ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1;
+	ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
+	/* Set up join scan parameters */
+	ext_join_params->scan.scan_type = -1;
+	ext_join_params->scan.nprobes = chan_cnt ?
+		(ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
+	ext_join_params->scan.home_time = -1;
+
+	if (sme->bssid)
+		memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN);
+	else
+		memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN);
+	ext_join_params->assoc.chanspec_num = chan_cnt;
+	if (chan_cnt) {
+		if (cfg->channel) {
+			/*
+			 * Use the channel provided by userspace
+			 */
+			u16 channel, band, bw, ctl_sb;
+			chanspec_t chspec;
+			channel = cfg->channel;
+			band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+				: WL_CHANSPEC_BAND_5G;
+
+			/* Get min_bw set for the interface */
+			bw = wl_cfg80211_ulb_get_min_bw_chspec(cfg, dev->ieee80211_ptr, bssidx);
+			if (bw == INVCHANSPEC) {
+				WL_ERR(("Invalid chanspec \n"));
+				kfree(ext_join_params);
+				err = BCME_ERROR;
+				goto exit;
+			}
+
+			ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+			chspec = (channel | band | bw | ctl_sb);
+			ext_join_params->assoc.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+			ext_join_params->assoc.chanspec_list[0] |= chspec;
+			ext_join_params->assoc.chanspec_list[0] =
+				wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]);
+		}
+	}
+	ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+	if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		WL_INFORM(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+			ext_join_params->ssid.SSID_len));
+	}
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		kfree(ext_join_params);
+		err = BCME_ERROR;
+		goto exit;
+	}
+#if defined(CUSTOMER_HW2)
+	DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
+#endif /* BCMDONGLEHOST && CUSTOMER_HW2 */
+#ifdef WL_EXT_IAPSTA
+	wl_ext_iapsta_update_channel(dev, cfg->channel);
+#endif
+	err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	if (cfg->rcc_enabled) {
+		printf("Connecting with " MACDBG " ssid \"%s\", len (%d) with rcc channels \n\n",
+			MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
+			ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len);
+	} else {
+		printf("Connecting with " MACDBG " ssid \"%s\", len (%d) channel=%d\n\n",
+			MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
+			ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, cfg->channel);
+	}
+
+	kfree(ext_join_params);
+	if (err) {
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+		if (err == BCME_UNSUPPORTED) {
+			WL_DBG(("join iovar is not supported\n"));
+			goto set_ssid;
+		} else {
+			WL_ERR(("error (%d)\n", err));
+			goto exit;
+		}
+	} else
+		goto exit;
+
+set_ssid:
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid);
+
+	join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+	memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
+	join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+	wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+	if (sme->bssid)
+		memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN);
+	else
+		memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
+
+	if (wl_ch_to_chanspec(dev, cfg->channel, &join_params, &join_params_size) < 0) {
+		WL_ERR(("Invalid chanspec\n"));
+		return -EINVAL;
+	}
+
+	WL_DBG(("join_param_size %zu\n", join_params_size));
+
+	if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		WL_INFORM(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+			join_params.ssid.SSID_len));
+	}
+	err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params, join_params_size);
+exit:
+	if (err) {
+		WL_ERR(("error (%d)\n", err));
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+	}
+	if (!err)
+		wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
+			WL_EXT_STATUS_CONNECTING, NULL);
+
+#ifdef WLTDLS
+	/* disable TDLS if number of connected interfaces is >= 1 */
+	wl_cfg80211_tdls_config(cfg, TDLS_STATE_CONNECT, false);
+#endif /* WLTDLS */
+
+#ifdef DBG_PKT_MON
+	if ((dev == bcmcfg_to_prmry_ndev(cfg)) && !err) {
+		DHD_DBG_PKT_MON_START(dhdp);
+	}
+#endif /* DBG_PKT_MON */
+	return err;
+}
+
+#define WAIT_FOR_DISCONNECT_MAX 10
+static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+	uint8 wait_cnt;
+
+	wait_cnt = WAIT_FOR_DISCONNECT_MAX;
+	while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+		WL_DBG(("Waiting for disconnection, wait_cnt: %d\n", wait_cnt));
+		wait_cnt--;
+		OSL_SLEEP(50);
+	}
+
+	return;
+}
+
+static s32
+wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scbval;
+	bool act = false;
+	s32 err = 0;
+	u8 *curbssid;
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+	WL_ERR(("Reason %d\n", reason_code));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT);
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+
+	BCM_REFERENCE(dhdp);
+
+#ifdef ESCAN_RESULT_PATCH
+	if (wl_get_drv_status(cfg, CONNECTING, dev) && curbssid &&
+		(memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0)) {
+		WL_ERR(("Disconnecting from connecting device: " MACDBG "\n",
+			MAC2STRDBG(curbssid)));
+		act = true;
+	}
+#endif /* ESCAN_RESULT_PATCH */
+
+	if (act) {
+#ifdef DBG_PKT_MON
+		if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+			DHD_DBG_PKT_MON_STOP(dhdp);
+		}
+#endif /* DBG_PKT_MON */
+		/*
+		* Cancel ongoing scan to sync up with sme state machine of cfg80211.
+		*/
+#if !defined(ESCAN_RESULT_PATCH)
+		/* Let scan aborted by F/W */
+		if (cfg->scan_request) {
+			WL_TRACE_HW4(("Aborting the scan! \n"));
+			wl_notify_escan_complete(cfg, dev, true, true);
+		}
+#endif /* ESCAN_RESULT_PATCH */
+		if (wl_get_drv_status(cfg, CONNECTING, dev) ||
+			wl_get_drv_status(cfg, CONNECTED, dev)) {
+				wl_set_drv_status(cfg, DISCONNECTING, dev);
+				scbval.val = reason_code;
+				memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+				scbval.val = htod32(scbval.val);
+				err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+						sizeof(scb_val_t));
+				if (unlikely(err)) {
+					wl_clr_drv_status(cfg, DISCONNECTING, dev);
+					WL_ERR(("error (%d)\n", err));
+					return err;
+				}
+				wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
+					WL_EXT_STATUS_DISCONNECTING, NULL);
+				wl_cfg80211_wait_for_disconnection(cfg, dev);
+		}
+	}
+#ifdef CUSTOM_SET_CPUCORE
+	/* set default cpucore */
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dhdp->chan_isvht80 &= ~DHD_FLAG_STA_MODE;
+		if (!(dhdp->chan_isvht80))
+			dhd_set_cpucore(dhdp, FALSE);
+	}
+#endif /* CUSTOM_SET_CPUCORE */
+
+	return err;
+}
+
+static s32
+#if defined(WL_CFG80211_P2P_DEV_IF)
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+	enum nl80211_tx_power_setting type, s32 mbm)
+#else
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type, s32 dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	s32 dbm = MBM_TO_DBM(mbm);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \
+	defined(WL_COMPAT_WIRELESS) || defined(WL_SUPPORT_BACKPORTED_KPATCHES)
+	dbm = MBM_TO_DBM(dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	switch (type) {
+	case NL80211_TX_POWER_AUTOMATIC:
+		break;
+	case NL80211_TX_POWER_LIMITED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+			return -EINVAL;
+		}
+		break;
+	case NL80211_TX_POWER_FIXED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+			return -EINVAL;
+		}
+		break;
+	}
+
+	err = wl_set_tx_power(ndev, type, dbm);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	cfg->conf->tx_power = dbm;
+
+	return err;
+}
+
+static s32
+#if defined(WL_CFG80211_P2P_DEV_IF)
+wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+	struct wireless_dev *wdev, s32 *dbm)
+#else
+wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	err = wl_get_tx_power(ndev, dbm);
+	if (unlikely(err))
+		WL_ERR(("error (%d)\n", err));
+
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	u32 index;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx;
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	if (wsec == WEP_ENABLED) {
+		/* Just select a new current key */
+		index = (u32) key_idx;
+		index = htod32(index);
+		err = wldev_ioctl_set(dev, WLC_SET_KEY_PRIMARY, &index,
+			sizeof(index));
+		if (unlikely(err)) {
+			WL_ERR(("error (%d)\n", err));
+		}
+	}
+	return err;
+}
+
+static s32
+wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_wsec_key key;
+	s32 err = 0;
+	s32 bssidx;
+	s32 mode = wl_get_mode_by_netdev(cfg, dev);
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+	memset(&key, 0, sizeof(key));
+	key.index = (u32) key_idx;
+
+	if (!ETHER_ISMULTI(mac_addr))
+		memcpy((char *)&key.ea, (const void *)mac_addr, ETHER_ADDR_LEN);
+	key.len = (u32) params->key_len;
+
+	/* check for key index change */
+	if (key.len == 0) {
+		/* key delete */
+		swap_key_from_BE(&key);
+		err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(err)) {
+			WL_ERR(("key delete error (%d)\n", err));
+			return err;
+		}
+	} else {
+		if (key.len > sizeof(key.data)) {
+			WL_ERR(("Invalid key length (%d)\n", key.len));
+			return -EINVAL;
+		}
+		WL_DBG(("Setting the key index %d\n", key.index));
+		memcpy(key.data, params->key, key.len);
+
+		if ((mode == WL_MODE_BSS) &&
+			(params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+			u8 keybuf[8];
+			memcpy(keybuf, &key.data[24], sizeof(keybuf));
+			memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+			memcpy(&key.data[16], keybuf, sizeof(keybuf));
+		}
+
+		/* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+		if (params->seq && params->seq_len == 6) {
+			/* rx iv */
+			u8 *ivptr;
+			ivptr = (u8 *) params->seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = true;
+		}
+
+		switch (params->cipher) {
+		case WLAN_CIPHER_SUITE_WEP40:
+			key.algo = CRYPTO_ALGO_WEP1;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			break;
+		case WLAN_CIPHER_SUITE_WEP104:
+			key.algo = CRYPTO_ALGO_WEP128;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			key.algo = CRYPTO_ALGO_TKIP;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			key.algo = CRYPTO_ALGO_SMS4;
+			WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+			break;
+#endif
+		default:
+			WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+			return -EINVAL;
+		}
+		swap_key_from_BE(&key);
+		/* need to guarantee EAPOL 4/4 send out before set key */
+		dhd_wait_pend8021x(dev);
+		err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+			return err;
+		}
+	}
+	return err;
+}
+
+int
+wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable)
+{
+	int err;
+	wl_eventmsg_buf_t ev_buf;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	if (dev != bcmcfg_to_prmry_ndev(cfg)) {
+		/* roam offload is only for the primary device */
+		return -1;
+	}
+	err = wldev_iovar_setint(dev, "roam_offload", enable);
+	if (err)
+		return err;
+
+	bzero(&ev_buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);
+	err = wl_cfg80211_apply_eventbuffer(dev, cfg, &ev_buf);
+	if (!err) {
+		cfg->roam_offload = enable;
+	}
+	return err;
+}
+
+#if defined(WL_VIRTUAL_APSTA)
+int
+wl_cfg80211_interface_create(struct net_device *dev, char *name)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	bcm_struct_cfgdev *new_cfgdev;
+	char ifname[IFNAMSIZ];
+	char iftype[IFNAMSIZ];
+	enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION;
+
+	sscanf(name, "%s %s", ifname, iftype);
+
+	if (strnicmp(iftype, "AP", strlen("AP")) == 0) {
+		iface_type = NL80211_IFTYPE_AP;
+	}
+
+	new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+		iface_type, NULL, ifname);
+	if (!new_cfgdev) {
+		return BCME_ERROR;
+	}
+	else {
+		WL_DBG(("Iface %s created successfuly\n", name));
+		return BCME_OK;
+	}
+}
+
+int
+wl_cfg80211_interface_delete(struct net_device *dev, char *name)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct net_info *iter, *next;
+	int err = BCME_ERROR;
+
+	if (name == NULL) {
+		return BCME_ERROR;
+	}
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	for_each_ndev(cfg, iter, next) {
+		if (iter->ndev) {
+			if (strcmp(iter->ndev->name, name) == 0) {
+				err =  wl_cfg80211_del_iface(cfg->wdev->wiphy,
+					ndev_to_cfgdev(iter->ndev));
+				break;
+			}
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	if (!err) {
+		WL_DBG(("Iface %s deleted successfuly", name));
+	}
+	return err;
+}
+
+#if defined(PKT_FILTER_SUPPORT) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
+void
+wl_cfg80211_block_arp(struct net_device *dev, int enable)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+	if (!dhd_pkt_filter_enable) {
+		WL_INFORM(("Packet filter isn't enabled\n"));
+		return;
+	}
+
+	/* Block/Unblock ARP frames only if STA is connected to
+	 * the upstream AP in case of STA+SoftAP Concurrenct mode
+	 */
+	if (!wl_get_drv_status(cfg, CONNECTED, dev)) {
+		WL_INFORM(("STA doesn't connected to upstream AP\n"));
+		return;
+	}
+
+	if (enable) {
+		WL_DBG(("Enable ARP Filter\n"));
+		/* Add ARP filter */
+		dhd_packet_filter_add_remove(dhdp, TRUE, DHD_BROADCAST_ARP_FILTER_NUM);
+
+		/* Enable ARP packet filter - blacklist */
+		dhd_master_mode = FALSE;
+		dhd_pktfilter_offload_enable(dhdp, dhdp->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM],
+			TRUE, dhd_master_mode);
+	} else {
+		WL_DBG(("Disable ARP Filter\n"));
+		/* Disable ARP packet filter */
+		dhd_master_mode = TRUE;
+		dhd_pktfilter_offload_enable(dhdp, dhdp->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM],
+			FALSE, dhd_master_mode);
+
+		/* Delete ARP filter */
+		dhd_packet_filter_add_remove(dhdp, FALSE, DHD_BROADCAST_ARP_FILTER_NUM);
+	}
+}
+#endif /* PKT_FILTER_SUPPORT && APSTA_BLOCK_ARP_DURING_DHCP */
+#endif /* defined (WL_VIRTUAL_APSTA) */
+
+static s32
+wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params)
+{
+	struct wl_wsec_key key;
+	s32 val = 0;
+	s32 wsec = 0;
+	s32 err = 0;
+	u8 keybuf[8];
+	s32 bssidx = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 mode = wl_get_mode_by_netdev(cfg, dev);
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	if (mac_addr &&
+		((params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+		(params->cipher != WLAN_CIPHER_SUITE_WEP104))) {
+			wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+			goto exit;
+	}
+	memset(&key, 0, sizeof(key));
+	/* Clear any buffered wep key */
+	memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+
+	key.len = (u32) params->key_len;
+	key.index = (u32) key_idx;
+
+	if (unlikely(key.len > sizeof(key.data))) {
+		WL_ERR(("Too long key length (%u)\n", key.len));
+		return -EINVAL;
+	}
+	memcpy(key.data, params->key, key.len);
+
+	key.flags = WL_PRIMARY_KEY;
+	switch (params->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		key.algo = CRYPTO_ALGO_WEP1;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		key.algo = CRYPTO_ALGO_WEP128;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key.algo = CRYPTO_ALGO_TKIP;
+		val = TKIP_ENABLED;
+		/* wpa_supplicant switches the third and fourth quarters of the TKIP key */
+		if (mode == WL_MODE_BSS) {
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+		WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+		break;
+#ifdef BCMWAPI_WPI
+	case WLAN_CIPHER_SUITE_SMS4:
+		key.algo = CRYPTO_ALGO_SMS4;
+		WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+		val = SMS4_ENABLED;
+		break;
+#endif /* BCMWAPI_WPI */
+	default:
+		WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+		return -EINVAL;
+	}
+
+	/* Set the new key/index */
+	if ((mode == WL_MODE_IBSS) && (val & (TKIP_ENABLED | AES_ENABLED))) {
+		WL_ERR(("IBSS KEY setted\n"));
+		wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE);
+	}
+	swap_key_from_BE(&key);
+	if ((params->cipher == WLAN_CIPHER_SUITE_WEP40) ||
+		(params->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+		/*
+		 * For AP role, since we are doing a wl down before bringing up AP,
+		 * the plumbed keys will be lost. So for AP once we bring up AP, we
+		 * need to plumb keys again. So buffer the keys for future use. This
+		 * is more like a WAR. If firmware later has the capability to do
+		 * interface upgrade without doing a "wl down" and "wl apsta 0", then
+		 * this will not be required.
+		 */
+		WL_DBG(("Buffering WEP Keys \n"));
+		memcpy(&cfg->wep_key, &key, sizeof(struct wl_wsec_key));
+	}
+	err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+		WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		return err;
+	}
+
+exit:
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("get wsec error (%d)\n", err));
+		return err;
+	}
+
+	wsec |= val;
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wsec error (%d)\n", err));
+		return err;
+	}
+	wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
+		WL_EXT_STATUS_ADD_KEY, NULL);
+
+	return err;
+}
+
+static s32
+wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+	struct wl_wsec_key key;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	s32 bssidx;
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+	WL_DBG(("Enter\n"));
+
+#ifndef MFP
+	if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2))
+		return -EINVAL;
+#endif
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(&key, 0, sizeof(key));
+
+	key.flags = WL_PRIMARY_KEY;
+	key.algo = CRYPTO_ALGO_OFF;
+	key.index = (u32) key_idx;
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	/* Set the new key/index */
+	swap_key_from_BE(&key);
+	err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+		WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		if (err == -EINVAL) {
+			if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
+				/* we ignore this key index in this case */
+				WL_DBG(("invalid key index (%d)\n", key_idx));
+			}
+		} else {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		}
+		return err;
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+	void (*callback) (void *cookie, struct key_params * params))
+{
+	struct key_params params;
+	struct wl_wsec_key key;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_security *sec;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx;
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(&key, 0, sizeof(key));
+	key.index = key_idx;
+	swap_key_to_BE(&key);
+	memset(&params, 0, sizeof(params));
+	params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
+	memcpy((void *)params.key, key.data, params.key_len);
+
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	switch (WSEC_ENABLED(wsec)) {
+		case WEP_ENABLED:
+			sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+			if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+				params.cipher = WLAN_CIPHER_SUITE_WEP40;
+				WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			} else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+				params.cipher = WLAN_CIPHER_SUITE_WEP104;
+				WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			}
+			break;
+		case TKIP_ENABLED:
+			params.cipher = WLAN_CIPHER_SUITE_TKIP;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+		case AES_ENABLED:
+			params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			break;
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+		/* to connect to mixed mode AP */
+		case (AES_ENABLED | TKIP_ENABLED): /* TKIP CCMP */
+			params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+#endif
+#ifdef BCMWAPI_WPI
+		case SMS4_ENABLED:
+			params.cipher = WLAN_CIPHER_SUITE_SMS4;
+			WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+			break;
+#endif
+		default:
+			WL_ERR(("Invalid algo (0x%x)\n", wsec));
+			return -EINVAL;
+	}
+
+	callback(cookie, &params);
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev, u8 key_idx)
+{
+#ifdef MFP
+	return 0;
+#else
+	WL_INFORM(("Not supported\n"));
+	return -EOPNOTSUPP;
+#endif /* MFP */
+}
+
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+	const u8 *mac, struct station_info *sinfo)
+#else
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+	u8 *mac, struct station_info *sinfo)
+#endif
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scb_val;
+	s32 rssi;
+	s32 rate;
+	s32 err = 0;
+	sta_info_t *sta;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+#endif
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	bool fw_assoc_state = FALSE;
+	u32 dhd_assoc_state = 0;
+	static int err_cnt = 0;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+		err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
+			ETHER_ADDR_LEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		if (err < 0) {
+			WL_ERR(("GET STA INFO failed, %d\n", err));
+			return err;
+		}
+		sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME);
+		sta = (sta_info_t *)cfg->ioctl_buf;
+		sta->len = dtoh16(sta->len);
+		sta->cap = dtoh16(sta->cap);
+		sta->flags = dtoh32(sta->flags);
+		sta->idle = dtoh32(sta->idle);
+		sta->in = dtoh32(sta->in);
+		sinfo->inactive_time = sta->idle * 1000;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+		if (sta->flags & WL_STA_ASSOC) {
+			sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME);
+			sinfo->connected_time = sta->in;
+		}
+		WL_INFORM(("STA %s : idle time : %d sec, connected time :%d ms\n",
+			bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time,
+			sta->idle * 1000));
+#endif
+	} else if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS ||
+		wl_get_mode_by_netdev(cfg, dev) == WL_MODE_IBSS) {
+		get_pktcnt_t pktcnt;
+#ifdef DHD_SUPPORT_IF_CNTS
+		wl_if_stats_t *if_stats = NULL;
+#endif /* DHD_SUPPORT_IF_CNTS */
+		u8 *curmacp;
+
+		if (cfg->roam_offload) {
+			struct ether_addr bssid;
+			memset(&bssid, 0, sizeof(bssid));
+			err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+			if (err) {
+				WL_ERR(("Failed to get current BSSID\n"));
+			} else {
+				if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+					/* roaming is detected */
+					err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
+					if (err)
+						WL_ERR(("Failed to handle the delayed roam, "
+							"err=%d", err));
+					mac = (u8 *)bssid.octet;
+				}
+			}
+		}
+		dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev);
+		DHD_OS_WAKE_LOCK(dhd);
+		fw_assoc_state = dhd_is_associated(dhd, 0, &err);
+		DHD_OS_WAKE_UNLOCK(dhd);
+		if (!dhd_assoc_state || !fw_assoc_state) {
+			WL_ERR(("NOT assoc\n"));
+			if (err == -ENODATA)
+				return err;
+			if (!dhd_assoc_state) {
+				WL_TRACE_HW4(("drv state is not connected \n"));
+			}
+			if (!fw_assoc_state) {
+				WL_TRACE_HW4(("fw state is not associated \n"));
+			}
+			/* Disconnect due to fw is not associated for FW_ASSOC_WATCHDOG_TIME ms.
+			* 'err == 0' of dhd_is_associated() and '!fw_assoc_state'
+			* means that BSSID is null.
+			*/
+			if (dhd_assoc_state && !fw_assoc_state && !err) {
+				if (!fw_assoc_watchdog_started) {
+					fw_assoc_watchdog_ms = OSL_SYSUPTIME();
+					fw_assoc_watchdog_started = TRUE;
+					WL_TRACE_HW4(("fw_assoc_watchdog_started \n"));
+				} else {
+					if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms >
+						FW_ASSOC_WATCHDOG_TIME) {
+						fw_assoc_watchdog_started = FALSE;
+						err = -ENODEV;
+						WL_TRACE_HW4(("fw is not associated for %d ms \n",
+							(OSL_SYSUPTIME() - fw_assoc_watchdog_ms)));
+						goto get_station_err;
+					}
+				}
+			}
+			err = -ENODEV;
+			return err;
+		}
+		fw_assoc_watchdog_started = FALSE;
+		curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+		if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+			WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
+				MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
+		}
+
+		/* Report the current tx rate */
+		rate = 0;
+		err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
+		if (err) {
+			WL_ERR(("Could not get rate (%d)\n", err));
+		} else {
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+			int rxpktglom;
+#endif
+			rate = dtoh32(rate);
+			sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE);
+			sinfo->txrate.legacy = rate * 5;
+			WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+			rxpktglom = ((rate/2) > 150) ? 20 : 10;
+
+			if (maxrxpktglom != rxpktglom) {
+				maxrxpktglom = rxpktglom;
+				WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2),
+					maxrxpktglom));
+				err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
+					(char*)&maxrxpktglom, 4, cfg->ioctl_buf,
+					WLC_IOCTL_MAXLEN, NULL);
+				if (err < 0) {
+					WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+				}
+			}
+#endif
+		}
+
+		memset(&scb_val, 0, sizeof(scb_val));
+		scb_val.val = 0;
+		err = wldev_ioctl_get(dev, WLC_GET_RSSI, &scb_val,
+			sizeof(scb_val_t));
+		if (err) {
+			WL_ERR(("Could not get rssi (%d)\n", err));
+			goto get_station_err;
+		}
+		rssi = dtoh32(scb_val.val);
+#if defined(RSSIAVG)
+		err = wl_update_connected_rssi_cache(dev, &cfg->g_connected_rssi_cache_ctrl, &rssi);
+		if (err) {
+			WL_ERR(("Could not get rssi (%d)\n", err));
+			goto get_station_err;
+		}
+		wl_delete_dirty_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+		wl_reset_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+#endif
+#if defined(RSSIOFFSET)
+		rssi = wl_update_rssi_offset(dev, rssi);
+#endif
+#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
+		// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+		rssi = MIN(rssi, RSSI_MAXVAL);
+#endif
+		sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL);
+		sinfo->signal = rssi;
+		WL_DBG(("RSSI %d dBm\n", rssi));
+
+#ifdef DHD_SUPPORT_IF_CNTS
+		if ((if_stats = kmalloc(sizeof(*if_stats), GFP_KERNEL)) == NULL) {
+			WL_ERR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__));
+			goto error;
+		}
+		memset(if_stats, 0, sizeof(*if_stats));
+
+		err = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+			(char *)if_stats, sizeof(*if_stats), NULL);
+		if (!err) {
+			sinfo->rx_packets = (uint32)dtoh64(if_stats->rxframe);
+			sinfo->rx_dropped_misc = 0;
+			sinfo->tx_packets = (uint32)dtoh64(if_stats->txfrmsnt);
+			sinfo->tx_failed = (uint32)dtoh64(if_stats->txnobuf) +
+				(uint32)dtoh64(if_stats->txrunt) +
+				(uint32)dtoh64(if_stats->txfail);
+		} else {
+//			WL_ERR(("%s: if_counters not supported ret=%d\n",
+//				__FUNCTION__, err));
+#endif /* DHD_SUPPORT_IF_CNTS */
+
+			err = wldev_ioctl_get(dev, WLC_GET_PKTCNTS, &pktcnt,
+				sizeof(pktcnt));
+			if (err) {
+				WL_ERR(("Could not get WLC_GET_PKTCNTS (%d)\n", err));
+				goto get_station_err;
+			}
+			sinfo->rx_packets = pktcnt.rx_good_pkt;
+			sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
+			sinfo->tx_packets = pktcnt.tx_good_pkt;
+			sinfo->tx_failed  = pktcnt.tx_bad_pkt;
+#ifdef DHD_SUPPORT_IF_CNTS
+		}
+#endif /* DHD_SUPPORT_IF_CNTS */
+
+		sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) |
+			STA_INFO_BIT(INFO_RX_DROP_MISC) |
+			STA_INFO_BIT(INFO_TX_PACKETS) |
+			STA_INFO_BIT(INFO_TX_FAILED));
+
+get_station_err:
+		if (err)
+			err_cnt++;
+		else
+			err_cnt = 0;
+		if (err_cnt >= 3 && (err != -ENODATA)) {
+			/* Disconnect due to zero BSSID or error to get RSSI */
+			scb_val_t scbval;
+			scbval.val = htod32(DOT11_RC_DISASSOC_LEAVING);
+			err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+			if (unlikely(err)) {
+				WL_ERR(("disassoc error (%d)\n", err));
+			}
+
+			WL_ERR(("force cfg80211_disconnected: %d\n", err));
+			wl_clr_drv_status(cfg, CONNECTED, dev);
+			CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
+			wl_link_down(cfg);
+		}
+#ifdef DHD_SUPPORT_IF_CNTS
+error:
+		if (if_stats) {
+			kfree(if_stats);
+		}
+#endif /* DHD_SUPPORT_IF_CNTS */
+	}
+	else {
+		WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	bool enabled, s32 timeout)
+{
+	s32 pm;
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_info *_net_info = wl_get_netinfo_by_netdev(cfg, dev);
+#ifdef RTT_SUPPORT
+	rtt_status_info_t *rtt_status;
+#endif /* RTT_SUPPORT */
+	dhd_pub_t *dhd = cfg->pub;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_DBG(("Enter\n"));
+	if (cfg->p2p_net == dev || _net_info == NULL ||
+			!wl_get_drv_status(cfg, CONNECTED, dev) ||
+			(wl_get_mode_by_netdev(cfg, dev) != WL_MODE_BSS &&
+			wl_get_mode_by_netdev(cfg, dev) != WL_MODE_IBSS)) {
+		return err;
+	}
+
+	/* Enlarge pm_enable_work */
+	wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_LONG);
+
+	pm = enabled ? PM_FAST : PM_OFF;
+	if (_net_info->pm_block) {
+		WL_ERR(("%s:Do not enable the power save for pm_block %d\n",
+			dev->name, _net_info->pm_block));
+		pm = PM_OFF;
+	}
+	if (enabled && dhd_conf_get_pm(dhd) >= 0)
+		pm = dhd_conf_get_pm(dhd);
+	pm = htod32(pm);
+	WL_DBG(("%s:power save %s\n", dev->name, (pm ? "enabled" : "disabled")));
+#ifdef RTT_SUPPORT
+	rtt_status = GET_RTTSTATE(dhd);
+	if (rtt_status->status != RTT_ENABLED) {
+#endif /* RTT_SUPPORT */
+		err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
+		if (unlikely(err)) {
+			if (err == -ENODEV)
+				WL_DBG(("net_device is not ready yet\n"));
+			else
+				WL_ERR(("error (%d)\n", err));
+			return err;
+		}
+#ifdef RTT_SUPPORT
+	}
+#endif /* RTT_SUPPORT */
+	wl_cfg80211_update_power_mode(dev);
+	return err;
+}
+
+void wl_cfg80211_update_power_mode(struct net_device *dev)
+{
+	int err, pm = -1;
+
+	err = wldev_ioctl_get(dev, WLC_GET_PM, &pm, sizeof(pm));
+	if (err)
+		WL_ERR(("%s:error (%d)\n", __FUNCTION__, err));
+	else if (pm != -1 && dev->ieee80211_ptr)
+		dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
+}
+
+void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	if (strcmp(command, "SCAN-ACTIVE") == 0) {
+		cfg->active_scan = 1;
+	} else if (strcmp(command, "SCAN-PASSIVE") == 0) {
+		cfg->active_scan = 0;
+	} else
+		WL_ERR(("Unknown command \n"));
+}
+
+static __used u32 wl_find_msb(u16 bit16)
+{
+	u32 ret = 0;
+
+	if (bit16 & 0xff00) {
+		ret += 8;
+		bit16 >>= 8;
+	}
+
+	if (bit16 & 0xf0) {
+		ret += 4;
+		bit16 >>= 4;
+	}
+
+	if (bit16 & 0xc) {
+		ret += 2;
+		bit16 >>= 2;
+	}
+
+	if (bit16 & 2)
+		ret += bit16 & 2;
+	else if (bit16)
+		ret += bit16;
+
+	return ret;
+}
+
+static s32 wl_cfg80211_resume(struct wiphy *wiphy)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = BCME_OK;
+
+	if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+		WL_INFORM(("device is not ready\n"));
+		return err;
+	}
+
+
+	return err;
+}
+
+
+static s32
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+#else
+wl_cfg80211_suspend(struct wiphy *wiphy)
+#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */
+{
+	s32 err = BCME_OK;
+#ifdef DHD_CLEAR_ON_SUSPEND
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_info *iter, *next;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	unsigned long flags;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+	struct cfg80211_scan_info info;
+#endif
+
+	if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+		WL_INFORM(("device is not ready : status (%d)\n",
+			(int)cfg->status));
+		return err;
+	}
+	for_each_ndev(cfg, iter, next) {
+		/* p2p discovery iface doesn't have a ndev associated with it (for kernel > 3.8) */
+		if (iter->ndev)
+			wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+		}
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+		info.aborted = true;
+		cfg80211_scan_done(cfg->scan_request, &info);
+#else
+		cfg80211_scan_done(cfg->scan_request, true);
+#endif
+		cfg->scan_request = NULL;
+	}
+	for_each_ndev(cfg, iter, next) {
+		if (iter->ndev) {
+			wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+			wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+		}
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	for_each_ndev(cfg, iter, next) {
+		if (iter->ndev) {
+			if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+				wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false);
+			}
+		}
+	}
+#endif /* DHD_CLEAR_ON_SUSPEND */
+
+
+	return err;
+}
+
+static s32
+wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
+	s32 err)
+{
+	int i, j;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (!pmk_list) {
+		printf("pmk_list is NULL\n");
+		return -EINVAL;
+	}
+	/* pmk list is supported only for STA interface i.e. primary interface
+	 * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
+	 */
+	if (primary_dev != dev) {
+		WL_INFORM(("Not supporting Flushing pmklist on virtual"
+			" interfaces than primary interface\n"));
+		return err;
+	}
+
+	WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+	for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+		WL_DBG(("PMKID[%d]: %pM =\n", i,
+			&pmk_list->pmkids.pmkid[i].BSSID));
+		for (j = 0; j < WPA2_PMKID_LEN; j++) {
+			WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+		}
+	}
+	if (likely(!err)) {
+		err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+			sizeof(*pmk_list), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	int i;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+			ETHER_ADDR_LEN))
+			break;
+	if (i < WL_NUM_PMKIDS_MAX) {
+		memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+			ETHER_ADDR_LEN);
+		memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+			WPA2_PMKID_LEN);
+		if (i == cfg->pmk_list->pmkids.npmkid)
+			cfg->pmk_list->pmkids.npmkid++;
+	} else {
+		err = -EINVAL;
+	}
+	WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+		&cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n",
+			cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].
+			PMKID[i]));
+	}
+
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+	return err;
+}
+
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	struct _pmkid_list pmkid = {.npmkid = 0};
+	s32 err = 0;
+	int i;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+	memcpy(pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
+
+	WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+		&pmkid.pmkid[0].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+	}
+
+	for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp
+		    (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+		     ETHER_ADDR_LEN))
+			break;
+
+	if ((cfg->pmk_list->pmkids.npmkid > 0) &&
+		(i < cfg->pmk_list->pmkids.npmkid)) {
+		memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+		for (; i < (cfg->pmk_list->pmkids.npmkid - 1); i++) {
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+				&cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
+				ETHER_ADDR_LEN);
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+				&cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
+				WPA2_PMKID_LEN);
+		}
+		cfg->pmk_list->pmkids.npmkid--;
+	} else {
+		err = -EINVAL;
+	}
+
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+	return err;
+
+}
+
+static s32
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+	return err;
+}
+
+static wl_scan_params_t *
+wl_cfg80211_scan_alloc_params(struct bcm_cfg80211 *cfg, int channel, int nprobes,
+	int *out_params_size)
+{
+	wl_scan_params_t *params;
+	int params_size;
+	int num_chans;
+	int bssidx = 0;
+
+	*out_params_size = 0;
+
+	/* Our scan params only need space for 1 channel and 0 ssids */
+	params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+	params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
+		return params;
+	}
+	memset(params, 0, params_size);
+	params->nprobes = nprobes;
+
+	num_chans = (channel == 0) ? 0 : 1;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = DOT11_SCANTYPE_ACTIVE;
+	params->nprobes = htod32(1);
+	params->active_time = htod32(-1);
+	params->passive_time = htod32(-1);
+	params->home_time = htod32(10);
+	if (channel == -1)
+		params->channel_list[0] = htodchanspec(channel);
+	else
+		params->channel_list[0] = wl_ch_host_to_driver(cfg, bssidx, channel);
+
+	/* Our scan params have 1 channel and 0 ssids */
+	params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+		(num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	*out_params_size = params_size;	/* rtn size to the caller */
+	return params;
+}
+
+static s32
+#if defined(WL_CFG80211_P2P_DEV_IF)
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel *channel, unsigned int duration, u64 *cookie)
+#else
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel * channel,
+	enum nl80211_channel_type channel_type,
+	unsigned int duration, u64 *cookie)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	s32 target_channel;
+	u32 id;
+	s32 err = BCME_OK;
+	struct ether_addr primary_mac;
+	struct net_device *ndev = NULL;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+#ifdef DHD_IFDEBUG
+	PRINT_WDEV_INFO(cfgdev);
+#endif /* DHD_IFDEBUG */
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n",
+		ieee80211_frequency_to_channel(channel->center_freq),
+		duration, (wl_get_drv_status(cfg, SCANNING, ndev)) ? "YES":"NO"));
+
+	if (!cfg->p2p) {
+		WL_ERR(("cfg->p2p is not initialized\n"));
+		err = BCME_ERROR;
+		goto exit;
+	}
+
+#ifdef P2P_LISTEN_OFFLOADING
+	if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+		WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
+		return -EAGAIN;
+	}
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+	target_channel = ieee80211_frequency_to_channel(channel->center_freq);
+	memcpy(&cfg->remain_on_chan, channel, sizeof(struct ieee80211_channel));
+#if defined(WL_ENABLE_P2P_IF)
+	cfg->remain_on_chan_type = channel_type;
+#endif /* WL_ENABLE_P2P_IF */
+	id = ++cfg->last_roc_id;
+	if (id == 0)
+		id = ++cfg->last_roc_id;
+	*cookie = id;
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	if (wl_get_drv_status(cfg, SCANNING, ndev)) {
+		struct timer_list *_timer;
+		WL_DBG(("scan is running. go to fake listen state\n"));
+
+		if (duration > LONG_LISTEN_TIME) {
+			wl_cfg80211_scan_abort(cfg);
+		} else {
+			wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+
+			if (timer_pending(&cfg->p2p->listen_timer)) {
+				WL_DBG(("cancel current listen timer \n"));
+				del_timer_sync(&cfg->p2p->listen_timer);
+			}
+
+			_timer = &cfg->p2p->listen_timer;
+			wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+			INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0);
+
+			err = BCME_OK;
+			goto exit;
+		}
+	}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_CFG80211_SYNC_GON
+	if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+		/* do not enter listen mode again if we are in listen mode already for next af.
+		 * remain on channel completion will be returned by waiting next af completion.
+		 */
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#else
+		wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		goto exit;
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+	if (cfg->p2p && !cfg->p2p->on) {
+		/* In case of p2p_listen command, supplicant send remain_on_channel
+		 * without turning on P2P
+		 */
+		get_primary_mac(cfg, &primary_mac);
+		wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+		p2p_on(cfg) = true;
+	}
+
+	if (p2p_is_on(cfg)) {
+		err = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
+		if (unlikely(err)) {
+			goto exit;
+		}
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		err = wl_cfgp2p_discover_listen(cfg, target_channel, duration);
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		if (err == BCME_OK) {
+			wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+		} else {
+			/* if failed, firmware may be internal scanning state.
+			 * so other scan request shall not abort it
+			 */
+			wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+		}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		/* WAR: set err = ok to prevent cookie mismatch in wpa_supplicant
+		 * and expire timer will send a completion to the upper layer
+		 */
+		err = BCME_OK;
+	}
+
+exit:
+	if (err == BCME_OK) {
+		WL_INFORM(("Success\n"));
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		cfg80211_ready_on_channel(cfgdev, *cookie, channel,
+			duration, GFP_KERNEL);
+#else
+		cfg80211_ready_on_channel(cfgdev, *cookie, channel,
+			channel_type, duration, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	} else {
+		WL_ERR(("Fail to Set (err=%d cookie:%llu)\n", err, *cookie));
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+	s32 err = 0;
+
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+#ifdef P2PLISTEN_AP_SAMECHN
+	struct net_device *dev;
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+#ifdef DHD_IFDEBUG
+	PRINT_WDEV_INFO(cfgdev);
+#endif /* DHD_IFDEBUG */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+		WL_DBG((" enter ) on P2P dedicated discover interface\n"));
+	}
+#else
+	WL_DBG((" enter ) netdev_ifidx: %d \n", cfgdev->ifindex));
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#ifdef P2PLISTEN_AP_SAMECHN
+	if (cfg && cfg->p2p_resp_apchn_status) {
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		wl_cfg80211_set_p2p_resp_ap_chn(dev, 0);
+		cfg->p2p_resp_apchn_status = false;
+		WL_DBG(("p2p_resp_apchn_status Turn OFF \n"));
+	}
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+	if (cfg->last_roc_id == cookie) {
+		wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+			wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	} else {
+		WL_ERR(("%s : ignore, request cookie(%llu) is not matched. (cur : %llu)\n",
+			__FUNCTION__, cookie, cfg->last_roc_id));
+	}
+
+	return err;
+}
+
+static void
+wl_cfg80211_afx_handler(struct work_struct *work)
+{
+	struct afx_hdl *afx_instance;
+	struct bcm_cfg80211 *cfg;
+	s32 ret = BCME_OK;
+
+	BCM_SET_CONTAINER_OF(afx_instance, work, struct afx_hdl, work);
+	if (!afx_instance) {
+		WL_ERR(("afx_instance is NULL\n"));
+		return;
+	}
+	cfg = wl_get_cfg(afx_instance->dev);
+	if (afx_instance) {
+		if (cfg->afx_hdl->is_active) {
+			if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) {
+				ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan,
+					(100 * (1 + (RANDOM32() % 3)))); /* 100ms ~ 300ms */
+			} else {
+				ret = wl_cfgp2p_act_frm_search(cfg, cfg->afx_hdl->dev,
+					cfg->afx_hdl->bssidx, cfg->afx_hdl->peer_listen_chan,
+					NULL);
+			}
+			if (unlikely(ret != BCME_OK)) {
+				WL_ERR(("ERROR occurred! returned value is (%d)\n", ret));
+				if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL))
+					complete(&cfg->act_frm_scan);
+			}
+		}
+	}
+}
+
+static s32
+wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+	u32 max_retry = WL_CHANNEL_SYNC_RETRY;
+	bool is_p2p_gas = false;
+
+	if (dev == NULL)
+		return -1;
+
+	WL_DBG((" enter ) \n"));
+
+	wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+	cfg->afx_hdl->is_active = TRUE;
+
+	if (cfg->afx_hdl->pending_tx_act_frm) {
+		wl_action_frame_t *action_frame;
+		action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame);
+		if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len))
+			is_p2p_gas = true;
+	}
+
+	/* Loop to wait until we find a peer's channel or the
+	 * pending action frame tx is cancelled.
+	 */
+	while ((cfg->afx_hdl->retry < max_retry) &&
+		(cfg->afx_hdl->peer_chan == WL_INVALID)) {
+		cfg->afx_hdl->is_listen = FALSE;
+		wl_set_drv_status(cfg, SCANNING, dev);
+		WL_DBG(("Scheduling the action frame for sending.. retry %d\n",
+			cfg->afx_hdl->retry));
+		/* search peer on peer's listen channel */
+		schedule_work(&cfg->afx_hdl->work);
+		wait_for_completion_timeout(&cfg->act_frm_scan,
+			msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+
+		if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+			!(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+			break;
+
+		if (is_p2p_gas)
+			break;
+
+		if (cfg->afx_hdl->my_listen_chan) {
+			WL_DBG(("Scheduling Listen peer in my listen channel = %d\n",
+				cfg->afx_hdl->my_listen_chan));
+			/* listen on my listen channel */
+			cfg->afx_hdl->is_listen = TRUE;
+			schedule_work(&cfg->afx_hdl->work);
+			wait_for_completion_timeout(&cfg->act_frm_scan,
+				msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+		}
+		if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+			!(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+			break;
+
+		cfg->afx_hdl->retry++;
+
+		WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+	}
+
+	cfg->afx_hdl->is_active = FALSE;
+
+	wl_clr_drv_status(cfg, SCANNING, dev);
+	wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+
+	return (cfg->afx_hdl->peer_chan);
+}
+
+struct p2p_config_af_params {
+	s32 max_tx_retry;	/* max tx retry count if tx no ack */
+#ifdef WL_CFG80211_SYNC_GON
+	bool extra_listen;
+#endif
+	bool search_channel;	/* 1: search peer's channel to send af */
+};
+
+static s32
+wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy,
+	wl_action_frame_t *action_frame, wl_af_params_t *af_params,
+	struct p2p_config_af_params *config_af_params)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	wifi_p2p_pub_act_frame_t *act_frm =
+		(wifi_p2p_pub_act_frame_t *) (action_frame->data);
+
+	/* initialize default value */
+#ifdef WL_CFG80211_SYNC_GON
+	config_af_params->extra_listen = true;
+#endif
+	config_af_params->search_channel = false;
+	config_af_params->max_tx_retry = WL_AF_TX_MAX_RETRY;
+	cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+
+	switch (act_frm->subtype) {
+	case P2P_PAF_GON_REQ: {
+		WL_DBG(("P2P: GO_NEG_PHASE status set \n"));
+		wl_set_p2p_status(cfg, GO_NEG_PHASE);
+
+		config_af_params->search_channel = true;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+
+		break;
+	}
+	case P2P_PAF_GON_RSP: {
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for CONF frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME + 100;
+		break;
+	}
+	case P2P_PAF_GON_CONF: {
+		/* If we reached till GO Neg confirmation reset the filter */
+		WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	}
+	case P2P_PAF_INVITE_REQ: {
+		config_af_params->search_channel = true;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+
+		/* increase dwell time */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_INVITE_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	case P2P_PAF_DEVDIS_REQ: {
+		if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+			action_frame->len)) {
+			config_af_params->search_channel = true;
+		}
+
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* maximize dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_LONG_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_DEVDIS_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	case P2P_PAF_PROVDIS_REQ: {
+		if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+			action_frame->len)) {
+			config_af_params->search_channel = true;
+		}
+
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_PROVDIS_RSP: {
+		cfg->next_af_subtype = P2P_PAF_GON_REQ;
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	}
+	default:
+		WL_DBG(("Unknown p2p pub act frame subtype: %d\n",
+			act_frm->subtype));
+		err = BCME_BADARG;
+	}
+	return err;
+}
+
+#ifdef WL11U
+static bool
+wl_cfg80211_check_DFS_channel(struct bcm_cfg80211 *cfg, wl_af_params_t *af_params,
+	void *frame, u16 frame_len)
+{
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;
+	bool result = false;
+	s32 i;
+	chanspec_t chanspec;
+
+	/* If DFS channel is 52~148, check to block it or not */
+	if (af_params &&
+		(af_params->channel >= 52 && af_params->channel <= 148)) {
+		if (!wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+			bss_list = cfg->bss_list;
+			bi = next_bss(bss_list, bi);
+			for_each_bss(bss_list, bi, i) {
+				chanspec = wl_chspec_driver_to_host(bi->chanspec);
+				if (CHSPEC_IS5G(chanspec) &&
+					((bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(chanspec))
+					== af_params->channel)) {
+					result = true;	/* do not block the action frame */
+					break;
+				}
+			}
+		}
+	}
+	else {
+		result = true;
+	}
+
+	WL_DBG(("result=%s", result?"true":"false"));
+	return result;
+}
+#endif /* WL11U */
+static bool
+wl_cfg80211_check_dwell_overflow(int32 requested_dwell, ulong dwell_jiffies)
+{
+	if ((requested_dwell & CUSTOM_RETRY_MASK) &&
+			(jiffies_to_msecs(jiffies - dwell_jiffies) >
+			 (requested_dwell & ~CUSTOM_RETRY_MASK))) {
+		WL_ERR(("Action frame TX retry time over dwell time!\n"));
+		return true;
+	}
+	return false;
+}
+
+static bool
+wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev,
+	bcm_struct_cfgdev *cfgdev, wl_af_params_t *af_params,
+	wl_action_frame_t *action_frame, u16 action_frame_len, s32 bssidx)
+{
+#ifdef WL11U
+	struct net_device *ndev = NULL;
+#endif /* WL11U */
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	bool ack = false;
+	u8 category, action;
+	s32 tx_retry;
+	struct p2p_config_af_params config_af_params;
+	struct net_info *netinfo;
+#ifdef VSDB
+	ulong off_chan_started_jiffies = 0;
+#endif
+	ulong dwell_jiffies = 0;
+	bool dwell_overflow = false;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	bool miss_gon_cfm = false;
+
+	int32 requested_dwell = af_params->dwell_time;
+
+	/* Add the default dwell time
+	 * Dwell time to stay off-channel to wait for a response action frame
+	 * after transmitting an GO Negotiation action frame
+	 */
+	af_params->dwell_time = WL_DWELL_TIME;
+
+#ifdef WL11U
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	ndev = dev;
+#else
+	ndev = ndev_to_cfgdev(cfgdev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* WL11U */
+
+	category = action_frame->data[DOT11_ACTION_CAT_OFF];
+	action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+	/* initialize variables */
+	tx_retry = 0;
+	cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+	config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY;
+	config_af_params.search_channel = false;
+#ifdef WL_CFG80211_SYNC_GON
+	config_af_params.extra_listen = false;
+#endif
+
+	/* config parameters */
+	/* Public Action Frame Process - DOT11_ACTION_CAT_PUBLIC */
+	if (category == DOT11_ACTION_CAT_PUBLIC) {
+		if ((action == P2P_PUB_AF_ACTION) &&
+			(action_frame_len >= sizeof(wifi_p2p_pub_act_frame_t))) {
+			/* p2p public action frame process */
+			if (BCME_OK != wl_cfg80211_config_p2p_pub_af_tx(wiphy,
+				action_frame, af_params, &config_af_params)) {
+				WL_DBG(("Unknown subtype.\n"));
+			}
+
+		} else if (action_frame_len >= sizeof(wifi_p2psd_gas_pub_act_frame_t)) {
+			/* service discovery process */
+			if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+				action == P2PSD_ACTION_ID_GAS_CREQ) {
+				/* configure service discovery query frame */
+
+				config_af_params.search_channel = true;
+
+				/* save next af suptype to cancel remained dwell time */
+				cfg->next_af_subtype = action + 1;
+
+				af_params->dwell_time = WL_MED_DWELL_TIME;
+				if (requested_dwell & CUSTOM_RETRY_MASK) {
+					config_af_params.max_tx_retry =
+						(requested_dwell & CUSTOM_RETRY_MASK) >> 24;
+					af_params->dwell_time =
+						(requested_dwell & ~CUSTOM_RETRY_MASK);
+					WL_DBG(("Custom retry(%d) and dwell time(%d) is set.\n",
+						config_af_params.max_tx_retry,
+						af_params->dwell_time));
+				}
+			} else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+				action == P2PSD_ACTION_ID_GAS_CRESP) {
+				/* configure service discovery response frame */
+				af_params->dwell_time = WL_MIN_DWELL_TIME;
+			} else {
+				WL_DBG(("Unknown action type: %d\n", action));
+			}
+		} else {
+			WL_DBG(("Unknown Frame: category 0x%x, action 0x%x, length %d\n",
+				category, action, action_frame_len));
+		}
+	} else if (category == P2P_AF_CATEGORY) {
+		/* do not configure anything. it will be sent with a default configuration */
+	} else {
+		WL_DBG(("Unknown Frame: category 0x%x, action 0x%x\n",
+			category, action));
+		if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+			wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+			return false;
+		}
+	}
+	netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+	/* validate channel and p2p ies */
+	if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) &&
+		netinfo && netinfo->bss.ies.probe_req_ie_len) {
+		config_af_params.search_channel = true;
+	} else {
+		config_af_params.search_channel = false;
+	}
+#ifdef WL11U
+	if (ndev == bcmcfg_to_prmry_ndev(cfg))
+		config_af_params.search_channel = false;
+#endif /* WL11U */
+
+#ifdef VSDB
+	/* if connecting on primary iface, sleep for a while before sending af tx for VSDB */
+	if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
+		OSL_SLEEP(50);
+	}
+#endif
+
+	/* if scan is ongoing, abort current scan. */
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+
+	/* Abort P2P listen */
+	if (discover_cfgdev(cfgdev, cfg)) {
+		if (cfg->p2p_supported && cfg->p2p) {
+			wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+				wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+		}
+	}
+
+#ifdef WL11U
+	/* handling DFS channel exceptions */
+	if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) {
+		return false;	/* the action frame was blocked */
+	}
+#endif /* WL11U */
+
+	/* set status and destination address before sending af */
+	if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+		/* set this status to cancel the remained dwell time in rx process */
+		wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+	}
+	wl_set_drv_status(cfg, SENDING_ACT_FRM, dev);
+	memcpy(cfg->afx_hdl->tx_dst_addr.octet,
+		af_params->action_frame.da.octet,
+		sizeof(cfg->afx_hdl->tx_dst_addr.octet));
+
+	/* save af_params for rx process */
+	cfg->afx_hdl->pending_tx_act_frm = af_params;
+
+	if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) {
+		WL_DBG(("Set GAS action frame config.\n"));
+		config_af_params.search_channel = false;
+		config_af_params.max_tx_retry = 1;
+	}
+
+	/* search peer's channel */
+	if (config_af_params.search_channel) {
+		/* initialize afx_hdl */
+		if ((cfg->afx_hdl->bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+			WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+			goto exit;
+		}
+		cfg->afx_hdl->dev = dev;
+		cfg->afx_hdl->retry = 0;
+		cfg->afx_hdl->peer_chan = WL_INVALID;
+
+		if (wl_cfg80211_af_searching_channel(cfg, dev) == WL_INVALID) {
+			WL_ERR(("couldn't find peer's channel.\n"));
+			wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len,
+				af_params->channel);
+			/* Even if we couldn't find peer channel, try to send the frame
+			 * out. P2P cert 5.1.14 testbed device (realtek) doesn't seem to
+			 * respond to probe request (Ideally it has to be in listen and
+			 * responsd to probe request). However if we send Go neg req, the
+			 * peer is sending GO-neg resp. So instead of giving up here, just
+			 * proceed and attempt sending out the action frame.
+			 */
+		}
+
+		wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+		/*
+		 * Abort scan even for VSDB scenarios. Scan gets aborted in firmware
+		 * but after the check of piggyback algorithm.
+		 * To take care of current piggback algo, lets abort the scan here itself.
+		 */
+		wl_notify_escan_complete(cfg, dev, true, true);
+		/* Suspend P2P discovery's search-listen to prevent it from
+		 * starting a scan or changing the channel.
+		 */
+		if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+			WL_ERR(("Can not disable discovery mode\n"));
+			goto exit;
+		}
+
+		/* update channel */
+		if (cfg->afx_hdl->peer_chan != WL_INVALID) {
+			af_params->channel = cfg->afx_hdl->peer_chan;
+			WL_ERR(("Attempt tx on peer listen channel:%d ",
+				cfg->afx_hdl->peer_chan));
+		} else {
+			WL_ERR(("Attempt tx with the channel provided by userspace."
+			"Channel: %d\n", af_params->channel));
+		}
+	}
+
+#ifdef VSDB
+	off_chan_started_jiffies = jiffies;
+#endif /* VSDB */
+
+	wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel);
+
+	wl_cfgp2p_need_wait_actfrmae(cfg, action_frame->data, action_frame->len, true);
+
+	dwell_jiffies = jiffies;
+	/* Now send a tx action frame */
+	ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true;
+	dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies);
+
+	if (ack && (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM))) {
+		wifi_p2p_pub_act_frame_t *pact_frm;
+		pact_frm = (wifi_p2p_pub_act_frame_t *)(action_frame->data);
+		if (pact_frm->subtype == P2P_PAF_GON_RSP) {
+			WL_ERR(("Miss GO Nego cfm after P2P_PAF_GON_RSP\n"));
+			miss_gon_cfm = true;
+		}
+	}
+
+	/* if failed, retry it. tx_retry_max value is configure by .... */
+	while ((miss_gon_cfm || (ack == false)) && (tx_retry++ < config_af_params.max_tx_retry) &&
+			!dwell_overflow) {
+#ifdef VSDB
+		if (af_params->channel) {
+			if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) >
+				OFF_CHAN_TIME_THRESHOLD_MS) {
+				WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+				off_chan_started_jiffies = jiffies;
+			} else
+				OSL_SLEEP(AF_RETRY_DELAY_TIME);
+		}
+#endif /* VSDB */
+		ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ?
+			false : true;
+		if (miss_gon_cfm && !wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+			WL_ERR(("Received GO Nego cfm after P2P_PAF_GON_RSP\n"));
+			miss_gon_cfm = false;
+		}
+		dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies);
+	}
+
+	if (ack == false) {
+		WL_ERR(("Failed to send Action Frame(retry %d)\n", tx_retry));
+	}
+	WL_DBG(("Complete to send action frame\n"));
+exit:
+	/* Clear SENDING_ACT_FRM after all sending af is done */
+	wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+
+#ifdef WL_CFG80211_SYNC_GON
+	/* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+	 * if we coundn't get the next action response frame and dongle does not keep
+	 * the dwell time, go to listen state again to get next action response frame.
+	 */
+	if (ack && config_af_params.extra_listen &&
+		wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM) &&
+		cfg->af_sent_channel == cfg->afx_hdl->my_listen_chan) {
+		s32 extar_listen_time;
+
+		extar_listen_time = af_params->dwell_time -
+			jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies);
+
+		if (extar_listen_time > 50) {
+			wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+			WL_DBG(("Wait more time! actual af time:%d,"
+				"calculated extar listen:%d\n",
+				af_params->dwell_time, extar_listen_time));
+			if (wl_cfgp2p_discover_listen(cfg, cfg->af_sent_channel,
+				extar_listen_time + 100) == BCME_OK) {
+				wait_for_completion_timeout(&cfg->wait_next_af,
+					msecs_to_jiffies(extar_listen_time + 100 + 300));
+			}
+			wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+		}
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+	wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+	cfg->afx_hdl->pending_tx_act_frm = NULL;
+
+	WL_INFORM(("-- sending Action Frame is %s, listen chan: %d\n",
+		(ack) ? "Succeeded!!":"Failed!!", cfg->afx_hdl->my_listen_chan));
+
+	return ack;
+}
+
+#define MAX_NUM_OF_ASSOCIATED_DEV       64
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+#else
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel *channel, bool offchan,
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0))
+	enum nl80211_channel_type channel_type,
+	bool channel_type_valid,
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */
+	unsigned int wait, const u8* buf, size_t len,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	bool no_cck,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+	bool dont_wait_for_ack,
+#endif
+	u64 *cookie)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+{
+	wl_action_frame_t *action_frame;
+	wl_af_params_t *af_params;
+	scb_val_t scb_val;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	struct ieee80211_channel *channel = params->chan;
+	const u8 *buf = params->buf;
+	size_t len = params->len;
+#endif
+	const struct ieee80211_mgmt *mgmt;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *dev = NULL;
+	s32 err = BCME_OK;
+	s32 bssidx = 0;
+	u32 id;
+	bool ack = false;
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+
+	WL_DBG(("Enter \n"));
+
+	if (len > ACTION_FRAME_SIZE) {
+		WL_ERR(("bad length:%zu\n", len));
+		return BCME_BADLEN;
+	}
+#ifdef DHD_IFDEBUG
+	PRINT_WDEV_INFO(cfgdev);
+#endif /* DHD_IFDEBUG */
+
+	dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (!dev) {
+		WL_ERR(("dev is NULL\n"));
+		return -EINVAL;
+	}
+
+	/* set bsscfg idx for iovar (wlan0: P2PAPI_BSSCFG_PRIMARY, p2p: P2PAPI_BSSCFG_DEVICE)	*/
+	if (discover_cfgdev(cfgdev, cfg)) {
+		if (!cfg->p2p_supported || !cfg->p2p) {
+			WL_ERR(("P2P doesn't setup completed yet\n"));
+			return -EINVAL;
+		}
+		bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	}
+	else {
+		if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) {
+			WL_ERR(("Find p2p index failed\n"));
+			return BCME_ERROR;
+		}
+	}
+
+	WL_DBG(("TX target bssidx=%d\n", bssidx));
+
+	if (p2p_is_on(cfg)) {
+		/* Suspend P2P discovery search-listen to prevent it from changing the
+		 * channel.
+		 */
+		if ((err = wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+			WL_ERR(("Can not disable discovery mode\n"));
+			return -EFAULT;
+		}
+	}
+	*cookie = 0;
+	id = cfg->send_action_id++;
+	if (id == 0)
+		id = cfg->send_action_id++;
+	*cookie = id;
+	mgmt = (const struct ieee80211_mgmt *)buf;
+	if (ieee80211_is_mgmt(mgmt->frame_control)) {
+		if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+			s32 ie_offset =  DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+			s32 ie_len = len - ie_offset;
+			if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p) {
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+			}
+			wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+				VNDR_IE_PRBRSP_FLAG, (const u8 *)(buf + ie_offset), ie_len);
+			cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+#if defined(P2P_IE_MISSING_FIX)
+			if (!cfg->p2p_prb_noti) {
+				cfg->p2p_prb_noti = true;
+				WL_DBG(("%s: TX 802_1X Probe Response first time.\n",
+					__FUNCTION__));
+			}
+#endif
+			goto exit;
+		} else if (ieee80211_is_disassoc(mgmt->frame_control) ||
+			ieee80211_is_deauth(mgmt->frame_control)) {
+			char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+				sizeof(struct ether_addr) + sizeof(uint)] = {0};
+			int num_associated = 0;
+			struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+			if (!bcmp((const uint8 *)BSSID_BROADCAST,
+				(const struct ether_addr *)mgmt->da, ETHER_ADDR_LEN)) {
+				assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+				err = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST,
+					assoc_maclist, sizeof(mac_buf));
+				if (err < 0)
+					WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+				else
+					num_associated = assoc_maclist->count;
+			}
+			memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN);
+			scb_val.val = mgmt->u.disassoc.reason_code;
+			err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+				sizeof(scb_val_t));
+			if (err < 0)
+				WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON error %d\n", err));
+			WL_ERR(("Disconnect STA : %s scb_val.val %d\n",
+				bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf),
+				scb_val.val));
+
+			if (num_associated > 0 && ETHER_ISBCAST(mgmt->da))
+				wl_delay(400);
+
+			cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+			goto exit;
+
+		} else if (ieee80211_is_action(mgmt->frame_control)) {
+			/* Abort the dwell time of any previous off-channel
+			* action frame that may be still in effect.  Sending
+			* off-channel action frames relies on the driver's
+			* scan engine.  If a previous off-channel action frame
+			* tx is still in progress (including the dwell time),
+			* then this new action frame will not be sent out.
+			*/
+/* Do not abort scan for VSDB. Scan will be aborted in firmware if necessary.
+ * And previous off-channel action frame must be ended before new af tx.
+ */
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+			wl_notify_escan_complete(cfg, dev, true, true);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		}
+
+	} else {
+		WL_ERR(("Driver only allows MGMT packet type\n"));
+		goto exit;
+	}
+
+	af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
+
+	if (af_params == NULL)
+	{
+		WL_ERR(("unable to allocate frame\n"));
+		return -ENOMEM;
+	}
+
+	action_frame = &af_params->action_frame;
+
+	/* Add the packet Id */
+	action_frame->packetId = *cookie;
+	WL_DBG(("action frame %d\n", action_frame->packetId));
+	/* Add BSSID */
+	memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN);
+	memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN);
+
+	/* Add the length exepted for 802.11 header  */
+	action_frame->len = len - DOT11_MGMT_HDR_LEN;
+	WL_DBG(("action_frame->len: %d\n", action_frame->len));
+
+	/* Add the channel */
+	af_params->channel =
+		ieee80211_frequency_to_channel(channel->center_freq);
+	/* Save listen_chan for searching common channel */
+	cfg->afx_hdl->peer_listen_chan = af_params->channel;
+	WL_DBG(("channel from upper layer %d\n", cfg->afx_hdl->peer_listen_chan));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	af_params->dwell_time = params->wait;
+#else
+	af_params->dwell_time = wait;
+#endif
+
+	memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
+
+	ack = wl_cfg80211_send_action_frame(wiphy, dev, cfgdev, af_params,
+		action_frame, action_frame->len, bssidx);
+	cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL);
+
+	kfree(af_params);
+exit:
+	return err;
+}
+
+static void wl_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy,
+	struct wireless_dev *wdev, struct mgmt_frame_regs *upd)
+{
+	WL_DBG(("%s\n", __func__));
+	return;
+}
+
+static s32
+wl_cfg80211_change_bss(struct wiphy *wiphy,
+	struct net_device *dev,
+	struct bss_parameters *params)
+{
+	s32 err = 0;
+	s32 ap_isolate = 0;
+#ifdef PCIE_FULL_DONGLE
+	s32 ifidx = DHD_BAD_IF;
+#endif
+#if defined(PCIE_FULL_DONGLE)
+	dhd_pub_t *dhd;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd = (dhd_pub_t *)(cfg->pub);
+#if defined(WL_ENABLE_P2P_IF)
+	if (cfg->p2p_net == dev)
+		dev = bcmcfg_to_prmry_ndev(cfg);
+#endif
+#endif 
+
+	if (params->use_cts_prot >= 0) {
+	}
+
+	if (params->use_short_preamble >= 0) {
+	}
+
+	if (params->use_short_slot_time >= 0) {
+	}
+
+	if (params->basic_rates) {
+	}
+
+	if (params->ap_isolate >= 0) {
+		ap_isolate = params->ap_isolate;
+#ifdef PCIE_FULL_DONGLE
+		ifidx = dhd_net2idx(dhd->info, dev);
+
+		if (ifidx != DHD_BAD_IF) {
+			err = dhd_set_ap_isolate(dhd, ifidx, ap_isolate);
+		} else {
+			WL_ERR(("Failed to set ap_isolate\n"));
+		}
+#else
+		err = wldev_iovar_setint(dev, "ap_isolate", ap_isolate);
+		if (unlikely(err))
+		{
+			WL_ERR(("set ap_isolate Error (%d)\n", err));
+		}
+#endif /* PCIE_FULL_DONGLE */
+	}
+
+	if (params->ht_opmode >= 0) {
+	}
+
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+	struct ieee80211_channel *chan,
+	enum nl80211_channel_type channel_type)
+{
+	s32 _chan;
+	chanspec_t chspec = 0;
+	chanspec_t fw_chspec = 0;
+	u32 bw = WL_CHANSPEC_BW_20;
+#ifdef WL11ULB
+	u32 ulb_bw = wl_cfg80211_get_ulb_bw(wl_get_cfg(dev), dev->ieee80211_ptr);
+#endif /* WL11ULB */
+
+	s32 err = BCME_OK;
+	s32 bw_cap = 0;
+	struct {
+		u32 band;
+		u32 bw_cap;
+	} param = {0, 0};
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(CUSTOM_SET_CPUCORE) || (defined(WL_VIRTUAL_APSTA) && \
+	defined(APSTA_RESTRICTED_CHANNEL))
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE || (WL_VIRTUAL_APSTA && APSTA_RESTRICTED_CHANNEL) */
+
+	dev = ndev_to_wlc_ndev(dev, cfg);
+	_chan = ieee80211_frequency_to_channel(chan->center_freq);
+#ifdef WL_EXT_IAPSTA
+	_chan = wl_ext_iapsta_update_channel(dev, _chan);
+#endif
+	printf("%s: netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
+		__FUNCTION__, dev->ifindex, channel_type, _chan);
+
+
+#if defined(WL_VIRTUAL_APSTA) && defined(APSTA_RESTRICTED_CHANNEL)
+#define DEFAULT_2G_SOFTAP_CHANNEL	1
+#define DEFAULT_5G_SOFTAP_CHANNEL	149
+	if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP &&
+		(dhd->op_mode & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) ==
+		DHD_FLAG_CONCURR_STA_HOSTAP_MODE &&
+		wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
+		u32 *sta_chan = (u32 *)wl_read_prof(cfg,
+			bcmcfg_to_prmry_ndev(cfg), WL_PROF_CHAN);
+		u32 sta_band = (*sta_chan > CH_MAX_2G_CHANNEL) ?
+			IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+		if (chan->band == sta_band) {
+			/* Do not try SCC in 5GHz if channel is not CH149 */
+			_chan = (sta_band == IEEE80211_BAND_5GHZ &&
+				*sta_chan != DEFAULT_5G_SOFTAP_CHANNEL) ?
+				DEFAULT_2G_SOFTAP_CHANNEL : *sta_chan;
+			WL_ERR(("target channel will be changed to %d\n", _chan));
+			if (_chan <= CH_MAX_2G_CHANNEL) {
+				bw = WL_CHANSPEC_BW_20;
+				goto set_channel;
+			}
+		}
+	}
+#undef DEFAULT_2G_SOFTAP_CHANNEL
+#undef DEFAULT_5G_SOFTAP_CHANNEL
+#endif /* WL_VIRTUAL_APSTA && APSTA_RESTRICTED_CHANNEL */
+
+#ifdef WL11ULB
+	if (ulb_bw) {
+		WL_DBG(("[ULB] setting AP/GO BW to ulb_bw 0x%x \n", ulb_bw));
+		bw = wl_cfg80211_ulbbw_to_ulbchspec(ulb_bw);
+		goto set_channel;
+	}
+#endif /* WL11ULB */
+	if (chan->band == IEEE80211_BAND_5GHZ) {
+		param.band = WLC_BAND_5G;
+		err = wldev_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+			cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		if (err) {
+			if (err != BCME_UNSUPPORTED) {
+				WL_ERR(("bw_cap failed, %d\n", err));
+				return err;
+			} else {
+				err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+				if (err) {
+					WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+				}
+				if (bw_cap != WLC_N_BW_20ALL)
+					bw = WL_CHANSPEC_BW_40;
+			}
+		} else {
+			if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0]))
+				bw = WL_CHANSPEC_BW_80;
+			else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0]))
+				bw = WL_CHANSPEC_BW_40;
+			else
+				bw = WL_CHANSPEC_BW_20;
+
+		}
+
+	} else if (chan->band == IEEE80211_BAND_2GHZ)
+		bw = WL_CHANSPEC_BW_20;
+set_channel:
+	chspec = wf_channel2chspec(_chan, bw);
+	if (wf_chspec_valid(chspec)) {
+		fw_chspec = wl_chspec_host_to_driver(chspec);
+		if (fw_chspec != INVCHANSPEC) {
+			if ((err = wldev_iovar_setint(dev, "chanspec",
+				fw_chspec)) == BCME_BADCHAN) {
+				if (bw == WL_CHANSPEC_BW_80)
+					goto change_bw;
+				err = wldev_ioctl_set(dev, WLC_SET_CHANNEL,
+					&_chan, sizeof(_chan));
+				if (err < 0) {
+					WL_ERR(("WLC_SET_CHANNEL error %d"
+					"chip may not be supporting this channel\n", err));
+				}
+			} else if (err) {
+				WL_ERR(("failed to set chanspec error %d\n", err));
+			}
+		} else {
+			WL_ERR(("failed to convert host chanspec to fw chanspec\n"));
+			err = BCME_ERROR;
+		}
+	} else {
+change_bw:
+		if (bw == WL_CHANSPEC_BW_80)
+			bw = WL_CHANSPEC_BW_40;
+		else if (bw == WL_CHANSPEC_BW_40)
+			bw = WL_CHANSPEC_BW_20;
+		else
+			bw = 0;
+		if (bw)
+			goto set_channel;
+		WL_ERR(("Invalid chanspec 0x%x\n", chspec));
+		err = BCME_ERROR;
+	}
+#ifdef CUSTOM_SET_CPUCORE
+	if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) {
+		WL_DBG(("SoftAP mode do not need to set cpucore\n"));
+	} else if (chspec & WL_CHANSPEC_BW_80) {
+		/* SoftAp only mode do not need to set cpucore */
+		if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) &&
+			dev != bcmcfg_to_prmry_ndev(cfg)) {
+			/* Soft AP on virtual Iface (AP+STA case) */
+			dhd->chan_isvht80 |= DHD_FLAG_HOSTAP_MODE;
+			dhd_set_cpucore(dhd, TRUE);
+		} else if (is_p2p_group_iface(dev->ieee80211_ptr)) {
+			/* If P2P IF is vht80 */
+			dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE;
+			dhd_set_cpucore(dhd, TRUE);
+		}
+	}
+#endif /* CUSTOM_SET_CPUCORE */
+	if (!err && (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) {
+		/* Update AP/GO operating channel */
+		cfg->ap_oper_channel = ieee80211_frequency_to_channel(chan->center_freq);
+	}
+	return err;
+}
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *
+wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *_net_info, *next;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->ndev &&
+			test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state))
+			return _net_info->ndev;
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	return NULL;
+}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+static s32
+wl_validate_opensecurity(struct net_device *dev, s32 bssidx, bool privacy)
+{
+	s32 err = BCME_OK;
+	u32 wpa_val;
+	s32 wsec = 0;
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+
+	if (privacy) {
+		/* If privacy bit is set in open mode, then WEP would be enabled */
+		wsec = WEP_ENABLED;
+		WL_DBG(("Setting wsec to %d for WEP \n", wsec));
+	}
+
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+
+	/* set upper-layer auth */
+	if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_ADHOC)
+		wpa_val = WPA_AUTH_NONE;
+	else
+		wpa_val = WPA_AUTH_DISABLED;
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_val, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+
+	return 0;
+}
+
+static s32
+wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+	s32 len = 0;
+	s32 err = BCME_OK;
+	u16 auth = 0; /* d11 open authentication */
+	u32 wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+	wpa_pmkid_list_t *pmkid;
+	int cnt = 0;
+#ifdef MFP
+	int mfp = 0;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif /* MFP */
+
+	u16 suite_count;
+	u8 rsn_cap[2];
+	u32 wme_bss_disable;
+
+	if (wpa2ie == NULL)
+		goto exit;
+
+	WL_DBG(("Enter \n"));
+	len =  wpa2ie->len - WPA2_VERSION_LEN;
+	/* check the mcast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	switch (mcast->type) {
+		case WPA_CIPHER_NONE:
+			gval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			gval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			gval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WAPI_CIPHER_SMS4:
+			gval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("No Security Info\n"));
+			break;
+	}
+	if ((len -= WPA_SUITE_LEN) <= 0)
+		return BCME_BADLEN;
+
+	/* check the unicast cipher */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	switch (ucast->list[0].type) {
+		case WPA_CIPHER_NONE:
+			pval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			pval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			pval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WAPI_CIPHER_SMS4:
+			pval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("No Security Info\n"));
+	}
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+	/* check the AKM */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = cnt = ltoh16_ua(&mgmt->count);
+	while (cnt--) {
+		switch (mgmt->list[cnt].type) {
+		case RSN_AKM_NONE:
+			wpa_auth |= WPA_AUTH_NONE;
+			break;
+		case RSN_AKM_UNSPECIFIED:
+			wpa_auth |= WPA2_AUTH_UNSPECIFIED;
+			break;
+		case RSN_AKM_PSK:
+			wpa_auth |= WPA2_AUTH_PSK;
+			break;
+#ifdef MFP
+		case RSN_AKM_MFP_PSK:
+			wpa_auth |= WPA2_AUTH_PSK_SHA256;
+			break;
+		case RSN_AKM_MFP_1X:
+			wpa_auth |= WPA2_AUTH_1X_SHA256;
+			break;
+#endif /* MFP */
+		default:
+			WL_ERR(("No Key Mgmt Info\n"));
+		}
+	}
+
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
+		rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+
+		if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
+			wme_bss_disable = 0;
+		} else {
+			wme_bss_disable = 1;
+		}
+
+#ifdef MFP
+	if (rsn_cap[0] & RSN_CAP_MFPR) {
+		WL_DBG(("MFP Required \n"));
+		mfp = WL_MFP_REQUIRED;
+		/* Our firmware has requirement that WPA2_AUTH_PSK/WPA2_AUTH_UNSPECIFIED
+		 * be set, if SHA256 OUI is to be included in the rsn ie.
+		 */
+		if (wpa_auth & WPA2_AUTH_PSK_SHA256) {
+			wpa_auth |= WPA2_AUTH_PSK;
+		} else if (wpa_auth & WPA2_AUTH_1X_SHA256) {
+			wpa_auth |= WPA2_AUTH_UNSPECIFIED;
+		}
+	} else if (rsn_cap[0] & RSN_CAP_MFPC) {
+		WL_DBG(("MFP Capable \n"));
+		mfp = WL_MFP_CAPABLE;
+	}
+#endif /* MFP */
+
+		/* set wme_bss_disable to sync RSN Capabilities */
+		err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
+		if (err < 0) {
+			WL_ERR(("wme_bss_disable error %d\n", err));
+			return BCME_ERROR;
+		}
+	} else {
+		WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
+	}
+
+	len -= RSN_CAP_LEN;
+	if (len >= WPA2_PMKID_COUNT_LEN) {
+		pmkid = (wpa_pmkid_list_t *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN);
+		cnt = ltoh16_ua(&pmkid->count);
+		if (cnt != 0) {
+			WL_ERR(("AP has non-zero PMKID count. Wrong!\n"));
+			return BCME_ERROR;
+		}
+		/* since PMKID cnt is known to be 0 for AP, */
+		/* so don't bother to send down this info to firmware */
+	}
+
+#ifdef MFP
+	len -= WPA2_PMKID_COUNT_LEN;
+	if (len >= WPA_SUITE_LEN) {
+		cfg->bip_pos = (u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN + WPA2_PMKID_COUNT_LEN;
+	} else {
+		cfg->bip_pos = NULL;
+	}
+#endif
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+
+#ifdef MFP
+	cfg->mfp_mode = mfp;
+#endif /* MFP */
+
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+
+static s32
+wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx)
+{
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+	u16 auth = 0; /* d11 open authentication */
+	u16 count;
+	s32 err = BCME_OK;
+	s32 len = 0;
+	u32 i;
+	u32 wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	u32 tmp = 0;
+
+	if (wpaie == NULL)
+		goto exit;
+	WL_DBG(("Enter \n"));
+	len = wpaie->length;    /* value length */
+	len -= WPA_IE_TAG_FIXED_LEN;
+	/* check for multicast cipher suite */
+	if (len < WPA_SUITE_LEN) {
+		WL_INFORM(("no multicast cipher suite\n"));
+		goto exit;
+	}
+
+	/* pick up multicast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpaie[1];
+	len -= WPA_SUITE_LEN;
+	if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+		if (IS_WPA_CIPHER(mcast->type)) {
+			tmp = 0;
+			switch (mcast->type) {
+				case WPA_CIPHER_NONE:
+					tmp = 0;
+					break;
+				case WPA_CIPHER_WEP_40:
+				case WPA_CIPHER_WEP_104:
+					tmp = WEP_ENABLED;
+					break;
+				case WPA_CIPHER_TKIP:
+					tmp = TKIP_ENABLED;
+					break;
+				case WPA_CIPHER_AES_CCM:
+					tmp = AES_ENABLED;
+					break;
+				default:
+					WL_ERR(("No Security Info\n"));
+			}
+			gval |= tmp;
+		}
+	}
+	/* Check for unicast suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM(("no unicast suite\n"));
+		goto exit;
+	}
+	/* walk thru unicast cipher list and pick up what we recognize */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	count = ltoh16_ua(&ucast->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_CIPHER(ucast->list[i].type)) {
+				tmp = 0;
+				switch (ucast->list[i].type) {
+					case WPA_CIPHER_NONE:
+						tmp = 0;
+						break;
+					case WPA_CIPHER_WEP_40:
+					case WPA_CIPHER_WEP_104:
+						tmp = WEP_ENABLED;
+						break;
+					case WPA_CIPHER_TKIP:
+						tmp = TKIP_ENABLED;
+						break;
+					case WPA_CIPHER_AES_CCM:
+						tmp = AES_ENABLED;
+						break;
+					default:
+						WL_ERR(("No Security Info\n"));
+				}
+				pval |= tmp;
+			}
+		}
+	}
+	len -= (count - i) * WPA_SUITE_LEN;
+	/* Check for auth key management suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM((" no auth key mgmt suite\n"));
+		goto exit;
+	}
+	/* walk thru auth management suite list and pick up what we recognize */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+	count = ltoh16_ua(&mgmt->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_AKM(mgmt->list[i].type)) {
+				tmp = 0;
+				switch (mgmt->list[i].type) {
+					case RSN_AKM_NONE:
+						tmp = WPA_AUTH_NONE;
+						break;
+					case RSN_AKM_UNSPECIFIED:
+						tmp = WPA_AUTH_UNSPECIFIED;
+						break;
+					case RSN_AKM_PSK:
+						tmp = WPA_AUTH_PSK;
+						break;
+					default:
+						WL_ERR(("No Key Mgmt Info\n"));
+				}
+				wpa_auth |= tmp;
+			}
+		}
+
+	}
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+static u32 wl_get_cipher_type(uint8 type)
+{
+	u32 ret = 0;
+	switch (type) {
+		case WPA_CIPHER_NONE:
+			ret = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			ret = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			ret = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			ret = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WAPI_CIPHER_SMS4:
+			ret = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("No Security Info\n"));
+	}
+	return ret;
+}
+
+static u32 wl_get_suite_auth_key_mgmt_type(uint8 type)
+{
+	u32 ret = 0;
+	switch (type) {
+		case RSN_AKM_NONE:
+			ret = WPA_AUTH_NONE;
+			break;
+		case RSN_AKM_UNSPECIFIED:
+			ret = WPA_AUTH_UNSPECIFIED;
+			break;
+		case RSN_AKM_PSK:
+			ret = WPA_AUTH_PSK;
+			break;
+		default:
+			WL_ERR(("No Key Mgmt Info\n"));
+	}
+	return ret;
+}
+
+static u32 wl_get_suite_auth2_key_mgmt_type(uint8 type)
+{
+	u32 ret = 0;
+	switch (type) {
+		case RSN_AKM_NONE:
+			ret = WPA_AUTH_NONE;
+			break;
+		case RSN_AKM_UNSPECIFIED:
+			ret = WPA2_AUTH_UNSPECIFIED;
+			break;
+		case RSN_AKM_PSK:
+			ret = WPA2_AUTH_PSK;
+			break;
+		default:
+			WL_ERR(("No Key Mgmt Info\n"));
+	}
+	return ret;
+}
+
+static s32
+wl_validate_wpaie_wpa2ie(struct net_device *dev, wpa_ie_fixed_t *wpaie,
+	bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+	u16 auth = 0; /* d11 open authentication */
+	u16 count;
+	s32 err = BCME_OK;
+	u32 wme_bss_disable;
+	u16 suite_count;
+	u8 rsn_cap[2];
+	s32 len = 0;
+	u32 i;
+	u32 wsec1, wsec2, wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	u32 wpa_auth1 = 0;
+	u32 wpa_auth2 = 0;
+	u8* ptmp;
+
+	if (wpaie == NULL || wpa2ie == NULL)
+		goto exit;
+
+	WL_DBG(("Enter \n"));
+	len = wpaie->length;    /* value length */
+	len -= WPA_IE_TAG_FIXED_LEN;
+	/* check for multicast cipher suite */
+	if (len < WPA_SUITE_LEN) {
+		WL_INFORM(("no multicast cipher suite\n"));
+		goto exit;
+	}
+
+	/* pick up multicast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpaie[1];
+	len -= WPA_SUITE_LEN;
+	if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+		if (IS_WPA_CIPHER(mcast->type)) {
+			gval |= wl_get_cipher_type(mcast->type);
+		}
+	}
+	WL_ERR(("\nwpa ie validate\n"));
+	WL_ERR(("wpa ie mcast cipher = 0x%X\n", gval));
+
+	/* Check for unicast suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM(("no unicast suite\n"));
+		goto exit;
+	}
+
+	/* walk thru unicast cipher list and pick up what we recognize */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	count = ltoh16_ua(&ucast->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_CIPHER(ucast->list[i].type)) {
+				pval |= wl_get_cipher_type(ucast->list[i].type);
+			}
+		}
+	}
+	WL_ERR(("wpa ie ucast count =%d, cipher = 0x%X\n", count, pval));
+
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec1 = (pval | gval | SES_OW_ENABLED);
+	WL_ERR(("wpa ie wsec = 0x%X\n", wsec1));
+
+	len -= (count - i) * WPA_SUITE_LEN;
+	/* Check for auth key management suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM((" no auth key mgmt suite\n"));
+		goto exit;
+	}
+	/* walk thru auth management suite list and pick up what we recognize */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+	count = ltoh16_ua(&mgmt->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_AKM(mgmt->list[i].type)) {
+
+				wpa_auth1 |= wl_get_suite_auth_key_mgmt_type(mgmt->list[i].type);
+			}
+		}
+
+	}
+	WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth1));
+	WL_ERR(("\nwpa2 ie validate\n"));
+
+	pval = 0;
+	gval = 0;
+	len =  wpa2ie->len;
+	/* check the mcast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	ptmp = mcast->oui;
+	gval = wl_get_cipher_type(ptmp[DOT11_OUI_LEN]);
+
+	WL_ERR(("wpa2 ie mcast cipher = 0x%X\n", gval));
+	if ((len -= WPA_SUITE_LEN) <= 0)
+	{
+		WL_ERR(("P:wpa2 ie len[%d]", len));
+		return BCME_BADLEN;
+	}
+
+	/* check the unicast cipher */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	WL_ERR((" WPA2 ucast cipher count=%d\n", suite_count));
+	pval |= wl_get_cipher_type(ucast->list[0].type);
+
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	WL_ERR(("wpa2 ie ucast cipher = 0x%X\n", pval));
+
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec2 = (pval | gval | SES_OW_ENABLED);
+	WL_ERR(("wpa2 ie wsec = 0x%X\n", wsec2));
+
+	/* check the AKM */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = ltoh16_ua(&mgmt->count);
+	ptmp = (u8 *)&mgmt->list[0];
+	wpa_auth2 = wl_get_suite_auth2_key_mgmt_type(ptmp[DOT11_OUI_LEN]);
+	WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth2));
+
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
+		rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+		if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
+			wme_bss_disable = 0;
+		} else {
+			wme_bss_disable = 1;
+		}
+		WL_DBG(("P:rsn_cap[0]=[0x%X]:wme_bss_disabled[%d]\n", rsn_cap[0], wme_bss_disable));
+
+		/* set wme_bss_disable to sync RSN Capabilities */
+		err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
+		if (err < 0) {
+			WL_ERR(("wme_bss_disable error %d\n", err));
+			return BCME_ERROR;
+		}
+	} else {
+		WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
+	}
+
+	wsec = (wsec1 | wsec2);
+	wpa_auth = (wpa_auth1 | wpa_auth2);
+	WL_ERR(("wpa_wpa2 wsec=0x%X wpa_auth=0x%X\n", wsec, wpa_auth));
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+
+static s32
+wl_cfg80211_bcn_validate_sec(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	u32 dev_role,
+	s32 bssidx,
+	bool privacy)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr);
+
+	if (!bss) {
+		WL_ERR(("cfgbss is NULL \n"));
+		return BCME_ERROR;
+	}
+
+	if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) {
+		/* For P2P GO, the sec type is WPA2-PSK */
+		WL_DBG(("P2P GO: validating wpa2_ie"));
+		if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0)
+			return BCME_ERROR;
+
+	} else if (dev_role == NL80211_IFTYPE_AP) {
+
+		WL_DBG(("SoftAP: validating security"));
+		/* If wpa2_ie or wpa_ie is present validate it */
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+		if ((ies->wpa_ie != NULL && ies->wpa2_ie != NULL)) {
+			if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie, ies->wpa2_ie, bssidx)  < 0) {
+				bss->security_mode = false;
+				return BCME_ERROR;
+			}
+		}
+		else {
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+		if ((ies->wpa2_ie || ies->wpa_ie) &&
+			((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0 ||
+			wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) {
+			bss->security_mode = false;
+			return BCME_ERROR;
+		}
+
+		bss->security_mode = true;
+		if (bss->rsn_ie) {
+			kfree(bss->rsn_ie);
+			bss->rsn_ie = NULL;
+		}
+		if (bss->wpa_ie) {
+			kfree(bss->wpa_ie);
+			bss->wpa_ie = NULL;
+		}
+		if (bss->wps_ie) {
+			kfree(bss->wps_ie);
+			bss->wps_ie = NULL;
+		}
+		if (ies->wpa_ie != NULL) {
+			/* WPAIE */
+			bss->rsn_ie = NULL;
+			bss->wpa_ie = kmemdup(ies->wpa_ie,
+				ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+				GFP_KERNEL);
+		} else if (ies->wpa2_ie != NULL) {
+			/* RSNIE */
+			bss->wpa_ie = NULL;
+			bss->rsn_ie = kmemdup(ies->wpa2_ie,
+				ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+				GFP_KERNEL);
+		}
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+		}
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
+		if (!ies->wpa2_ie && !ies->wpa_ie) {
+			wl_validate_opensecurity(dev, bssidx, privacy);
+			bss->security_mode = false;
+		}
+
+		if (ies->wps_ie) {
+			bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		}
+	}
+
+	return 0;
+
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static s32 wl_cfg80211_bcn_set_params(
+	struct cfg80211_ap_settings *info,
+	struct net_device *dev,
+	u32 dev_role, s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	s32 err = BCME_OK;
+
+	WL_DBG(("interval (%d) \ndtim_period (%d) \n",
+		info->beacon_interval, info->dtim_period));
+
+	if (info->beacon_interval) {
+		if ((err = wldev_ioctl_set(dev, WLC_SET_BCNPRD,
+			&info->beacon_interval, sizeof(s32))) < 0) {
+			WL_ERR(("Beacon Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if (info->dtim_period) {
+		if ((err = wldev_ioctl_set(dev, WLC_SET_DTIMPRD,
+			&info->dtim_period, sizeof(s32))) < 0) {
+			WL_ERR(("DTIM Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if ((info->ssid) && (info->ssid_len > 0) &&
+		(info->ssid_len <= DOT11_MAX_SSID_LEN)) {
+		WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
+		if (dev_role == NL80211_IFTYPE_AP) {
+			/* Store the hostapd SSID */
+			memset(cfg->hostapd_ssid.SSID, 0x00, DOT11_MAX_SSID_LEN);
+			memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
+			cfg->hostapd_ssid.SSID_len = info->ssid_len;
+		} else {
+				/* P2P GO */
+			memset(cfg->p2p->ssid.SSID, 0x00, DOT11_MAX_SSID_LEN);
+			memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
+			cfg->p2p->ssid.SSID_len = info->ssid_len;
+		}
+	}
+
+	if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) {
+		if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
+			WL_ERR(("failed to set hidden : %d\n", err));
+		WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
+	}
+
+	return err;
+}
+#endif 
+
+static s32
+wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies)
+{
+	s32 err = BCME_OK;
+
+	memset(ies, 0, sizeof(struct parsed_ies));
+
+	/* find the WPSIE */
+	if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) {
+		WL_DBG(("WPSIE in beacon \n"));
+		ies->wps_ie_len = ies->wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+	} else {
+		WL_DBG(("No WPSIE in beacon \n"));
+	}
+
+	/* find the RSN_IE */
+	if ((ies->wpa2_ie = bcm_parse_tlvs(ptr, len,
+		DOT11_MNG_RSN_ID)) != NULL) {
+		WL_DBG((" WPA2 IE found\n"));
+		ies->wpa2_ie_len = ies->wpa2_ie->len;
+	}
+
+	/* find the WPA_IE */
+	if ((ies->wpa_ie = wl_cfgp2p_find_wpaie(ptr, len)) != NULL) {
+		WL_DBG((" WPA found\n"));
+		ies->wpa_ie_len = ies->wpa_ie->length;
+	}
+
+	return err;
+
+}
+static s32
+wl_cfg80211_set_ap_role(
+	struct bcm_cfg80211 *cfg,
+	struct net_device *dev)
+{
+	s32 err = BCME_OK;
+	s32 infra = 1;
+	s32 ap = 1;
+	s32 pm;
+	s32 is_rsdb_supported = BCME_ERROR;
+	s32 bssidx;
+	s32 apsta = 0;
+
+	is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+	if (is_rsdb_supported < 0)
+		return (-ENODEV);
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return -EINVAL;
+	}
+
+	/* AP on primary Interface */
+	if (bssidx == 0) {
+		if (is_rsdb_supported) {
+			if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx,
+				NL80211_IFTYPE_AP, 0, NULL)) < 0) {
+				WL_ERR(("wl add_del_bss returned error:%d\n", err));
+				return err;
+			}
+		} else if (is_rsdb_supported == 0) {
+			/* AP mode switch not supported. Try setting up AP explicitly */
+			err = wldev_iovar_getint(dev, "apsta", (s32 *)&apsta);
+			if (unlikely(err)) {
+				WL_ERR(("Could not get apsta %d\n", err));
+			}
+			if (1) { // terence: fix me
+				/* If apsta is not set, set it */
+				err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
+				if (err < 0) {
+					WL_ERR(("WLC_DOWN error %d\n", err));
+					return err;
+				}
+				err = wldev_iovar_setint(dev, "apsta", 0);
+				if (err < 0) {
+					WL_ERR(("wl apsta 0 error %d\n", err));
+					return err;
+				}
+				if ((err = wldev_ioctl_set(dev,
+					WLC_SET_AP, &ap, sizeof(s32))) < 0) {
+					WL_ERR(("setting AP mode failed %d \n", err));
+					return err;
+				}
+			}
+		}
+
+		pm = 0;
+		if ((err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm))) != 0) {
+			WL_ERR(("wl PM 0 returned error:%d\n", err));
+			/* Ignore error, if any */
+			err = BCME_OK;
+		}
+		err = wldev_ioctl_set(dev, WLC_SET_INFRA, &infra, sizeof(s32));
+		if (err < 0) {
+			WL_ERR(("SET INFRA error %d\n", err));
+			return err;
+		}
+	} else {
+		WL_DBG(("Bringup SoftAP on virtual Interface bssidx:%d \n", bssidx));
+		if ((err = wl_cfg80211_add_del_bss(cfg, dev,
+			bssidx, NL80211_IFTYPE_AP, 0, NULL)) < 0) {
+			WL_ERR(("wl bss ap returned error:%d\n", err));
+			return err;
+		}
+	}
+
+	/* On success, mark AP creation in progress. */
+	wl_set_drv_status(cfg, AP_CREATING, dev);
+	return 0;
+}
+
+
+/* In RSDB downgrade cases, the link up event can get delayed upto 7-8 secs */
+#define MAX_AP_LINK_WAIT_TIME   10000
+static s32
+wl_cfg80211_bcn_bringup_ap(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	u32 dev_role, s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct wl_join_params join_params;
+	bool is_bssup = false;
+	s32 infra = 1;
+	s32 join_params_size = 0;
+	s32 ap = 1;
+	s32 wsec;
+#ifdef WLMESH
+	bool retried = false;
+#endif
+#ifdef SOFTAP_UAPSD_OFF
+	uint32 wme_apsd = 0;
+#endif /* SOFTAP_UAPSD_OFF */
+	s32 err = BCME_OK;
+	s32 is_rsdb_supported = BCME_ERROR;
+	u32 timeout;
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+	is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+	if (is_rsdb_supported < 0)
+		return (-ENODEV);
+
+	WL_DBG(("Enter dev_role:%d bssidx:%d ifname:%s\n", dev_role, bssidx, dev->name));
+
+	/* Common code for SoftAP and P2P GO */
+	wl_clr_drv_status(cfg, AP_CREATED, dev);
+
+	/* Make sure INFRA is set for AP/GO */
+	err = wldev_ioctl_set(dev, WLC_SET_INFRA, &infra, sizeof(s32));
+	if (err < 0) {
+		WL_ERR(("SET INFRA error %d\n", err));
+		goto exit;
+	}
+
+	/* Do abort scan before creating GO */
+	wl_cfg80211_scan_abort(cfg);
+
+	if (dev_role == NL80211_IFTYPE_P2P_GO) {
+		is_bssup = wl_cfg80211_bss_isup(dev, bssidx);
+		if (!is_bssup && (ies->wpa2_ie != NULL)) {
+
+			err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &cfg->p2p->ssid,
+				sizeof(cfg->p2p->ssid), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+				bssidx, &cfg->ioctl_buf_sync);
+			if (err < 0) {
+				WL_ERR(("GO SSID setting error %d\n", err));
+				goto exit;
+			}
+
+			if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 1)) < 0) {
+				WL_ERR(("GO Bring up error %d\n", err));
+				goto exit;
+			}
+		} else
+			WL_DBG(("Bss is already up\n"));
+	} else if (dev_role == NL80211_IFTYPE_AP) {
+
+//		if (!wl_get_drv_status(cfg, AP_CREATING, dev)) {
+			/* Make sure fw is in proper state */
+			err = wl_cfg80211_set_ap_role(cfg, dev);
+			if (unlikely(err)) {
+				WL_ERR(("set ap role failed!\n"));
+				goto exit;
+			}
+//		}
+
+		/* Device role SoftAP */
+		WL_DBG(("Creating AP bssidx:%d dev_role:%d\n", bssidx, dev_role));
+		/* Clear the status bit after use */
+		wl_clr_drv_status(cfg, AP_CREATING, dev);
+
+
+#ifdef SOFTAP_UAPSD_OFF
+		err = wldev_iovar_setbuf_bsscfg(dev, "wme_apsd", &wme_apsd, sizeof(wme_apsd),
+			cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+		if (err < 0) {
+			WL_ERR(("failed to disable uapsd, error=%d\n", err));
+		}
+#endif /* SOFTAP_UAPSD_OFF */
+
+		err = wldev_ioctl_set(dev, WLC_UP, &ap, sizeof(s32));
+		if (unlikely(err)) {
+			WL_ERR(("WLC_UP error (%d)\n", err));
+			goto exit;
+		}
+
+#ifdef MFP
+		if (cfg->bip_pos) {
+			err = wldev_iovar_setbuf_bsscfg(dev, "bip",
+				(void *)(cfg->bip_pos), WPA_SUITE_LEN, cfg->ioctl_buf,
+				WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+			if (err < 0) {
+				WL_ERR(("bip set error %d\n", err));
+				if (wl_customer6_legacy_chip_check(cfg,
+					bcmcfg_to_prmry_ndev(cfg))) {
+					/* Ignore bip error: Some older firmwares doesn't
+					 * support bip iovar/ return BCME_NOTUP while trying
+					 * to set bip from AP bring up context. These firmares
+					 * include bip in RSNIE by default. So its okay to ignore
+					 * the error.
+					 */
+					err = BCME_OK;
+				} else {
+					goto exit;
+				}
+			}
+		}
+#endif /* MFP */
+
+		err = wldev_iovar_getint(dev, "wsec", (s32 *)&wsec);
+		if (unlikely(err)) {
+			WL_ERR(("Could not get wsec %d\n", err));
+			goto exit;
+		}
+		if (dhdp->conf->chip == BCM43430_CHIP_ID && bssidx > 0 && wsec >= 2) {
+			wsec |= 0x8; // terence 20180628: fix me, this is a workaround
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+			if (err < 0) {
+				WL_ERR(("wsec error %d\n", err));
+				goto exit;
+			}
+		}
+		if ((wsec == WEP_ENABLED) && cfg->wep_key.len) {
+			WL_DBG(("Applying buffered WEP KEY \n"));
+			err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &cfg->wep_key,
+				sizeof(struct wl_wsec_key), cfg->ioctl_buf,
+				WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+			/* clear the key after use */
+			memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+			if (unlikely(err)) {
+				WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+				goto exit;
+			}
+		}
+
+#ifdef MFP
+		if (cfg->mfp_mode) {
+			/* This needs to go after wsec otherwise the wsec command will
+			 * overwrite the values set by MFP
+			 */
+			err = wldev_iovar_setint_bsscfg(dev, "mfp", cfg->mfp_mode, bssidx);
+			if (err < 0) {
+				WL_ERR(("MFP Setting failed. ret = %d \n", err));
+				/* If fw doesn't support mfp, Ignore the error */
+				if (err != BCME_UNSUPPORTED) {
+					goto exit;
+				}
+			}
+		}
+#endif /* MFP */
+
+#ifdef WLMESH
+ssid_retry:
+#endif
+		memset(&join_params, 0, sizeof(join_params));
+		/* join parameters starts with ssid */
+		join_params_size = sizeof(join_params.ssid);
+		join_params.ssid.SSID_len = MIN(cfg->hostapd_ssid.SSID_len,
+			(uint32)DOT11_MAX_SSID_LEN);
+		memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID,
+			join_params.ssid.SSID_len);
+		join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+
+		/* create softap */
+		if ((err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
+			join_params_size)) != 0) {
+			WL_ERR(("SoftAP/GO set ssid failed! %d\n", err));
+			goto exit;
+		} else {
+			WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID));
+		}
+
+		if (bssidx != 0) {
+			/* AP on Virtual Interface */
+			if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 1)) < 0) {
+				WL_ERR(("AP Bring up error %d\n", err));
+				goto exit;
+			}
+		}
+
+	}
+
+	/* Wait for Linkup event to mark successful AP/GO bring up */
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		wl_get_drv_status(cfg, AP_CREATED, dev), msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
+	if (timeout <= 0 || !wl_get_drv_status(cfg, AP_CREATED, dev)) {
+#ifdef WLMESH
+		if (!retried) {
+			retried = true;
+			WL_ERR(("Link up didn't come for AP interface. Try to set ssid again to recover it! \n"));
+			goto ssid_retry;
+		}
+#endif
+		WL_ERR(("Link up didn't come for AP interface. AP/GO creation failed! \n"));
+		if (timeout == -ERESTARTSYS) {
+			WL_ERR(("waitqueue was interrupted by a signal, returns -ERESTARTSYS\n"));
+			err = -ERESTARTSYS;
+			goto exit;
+		}
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+		if (dhdp->memdump_enabled) {
+			dhdp->memdump_type = DUMP_TYPE_AP_LINKUP_FAILURE;
+			dhd_bus_mem_dump(dhdp);
+		}
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+		err = -ENODEV;
+		goto exit;
+	}
+
+exit:
+	if (cfg->wep_key.len) {
+		memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+	}
+
+#ifdef MFP
+	if (cfg->mfp_mode) {
+		cfg->mfp_mode = 0;
+	}
+
+	if (cfg->bip_pos) {
+		cfg->bip_pos = NULL;
+	}
+#endif /* MFP */
+
+	return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+s32
+wl_cfg80211_parse_ap_ies(
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info,
+	struct parsed_ies *ies)
+{
+	struct parsed_ies prb_ies;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	const u8 *vndr = NULL;
+	u32 vndr_ie_len = 0;
+	s32 err = BCME_OK;
+
+	/* Parse Beacon IEs */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	if (wl_cfg80211_parse_ies((u8 *)info->tail,
+		info->tail_len, ies) < 0) {
+		WL_ERR(("Beacon get IEs failed \n"));
+		err = -EINVAL;
+		goto fail;
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	vndr = (const u8 *)info->proberesp_ies;
+	vndr_ie_len = info->proberesp_ies_len;
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		/* SoftAP mode */
+		const struct ieee80211_mgmt *mgmt;
+		mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
+		if (mgmt != NULL) {
+			vndr = (const u8 *)&mgmt->u.probe_resp.variable;
+			vndr_ie_len = info->probe_resp_len -
+				offsetof(const struct ieee80211_mgmt, u.probe_resp.variable);
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	/* Parse Probe Response IEs */
+	if (wl_cfg80211_parse_ies((u8 *)vndr, vndr_ie_len, &prb_ies) < 0) {
+		WL_ERR(("PROBE RESP get IEs failed \n"));
+		err = -EINVAL;
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+fail:
+
+	return err;
+}
+
+s32
+wl_cfg80211_set_ies(
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info,
+	s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	const u8 *vndr = NULL;
+	u32 vndr_ie_len = 0;
+	s32 err = BCME_OK;
+
+	/* Set Beacon IEs to FW */
+	if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+		VNDR_IE_BEACON_FLAG, (const u8 *)info->tail,
+		info->tail_len)) < 0) {
+		WL_ERR(("Set Beacon IE Failed \n"));
+	} else {
+		WL_DBG(("Applied Vndr IEs for Beacon \n"));
+	}
+
+	vndr = (const u8 *)info->proberesp_ies;
+	vndr_ie_len = info->proberesp_ies_len;
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		/* SoftAP mode */
+		const struct ieee80211_mgmt *mgmt;
+		mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
+		if (mgmt != NULL) {
+			vndr = (const u8 *)&mgmt->u.probe_resp.variable;
+			vndr_ie_len = info->probe_resp_len -
+				offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+		}
+	}
+
+	/* Set Probe Response IEs to FW */
+	if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+		VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) {
+		WL_ERR(("Set Probe Resp IE Failed \n"));
+	} else {
+		WL_DBG(("Applied Vndr IEs for Probe Resp \n"));
+	}
+
+	return err;
+}
+#endif 
+
+static s32 wl_cfg80211_hostapd_sec(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	s32 bssidx)
+{
+	bool update_bss = 0;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr);
+
+	if (!bss) {
+		WL_ERR(("cfgbss is NULL \n"));
+		return -EINVAL;
+	}
+
+	if (ies->wps_ie) {
+		if (bss->wps_ie &&
+			memcmp(bss->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
+			WL_DBG((" WPS IE is changed\n"));
+			kfree(bss->wps_ie);
+			bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		} else if (bss->wps_ie == NULL) {
+			WL_DBG((" WPS IE is added\n"));
+			bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		}
+
+		if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) {
+			if (!bss->security_mode) {
+				/* change from open mode to security mode */
+				update_bss = true;
+				if (ies->wpa_ie != NULL) {
+					bss->wpa_ie = kmemdup(ies->wpa_ie,
+					ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				} else {
+					bss->rsn_ie = kmemdup(ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				}
+			} else if (bss->wpa_ie) {
+				/* change from WPA2 mode to WPA mode */
+				if (ies->wpa_ie != NULL) {
+					update_bss = true;
+					kfree(bss->rsn_ie);
+					bss->rsn_ie = NULL;
+					bss->wpa_ie = kmemdup(ies->wpa_ie,
+					ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				} else if (memcmp(bss->rsn_ie,
+					ies->wpa2_ie, ies->wpa2_ie->len
+					+ WPA_RSN_IE_TAG_FIXED_LEN)) {
+					update_bss = true;
+					kfree(bss->rsn_ie);
+					bss->rsn_ie = kmemdup(ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+					bss->wpa_ie = NULL;
+				}
+			}
+			if (update_bss) {
+				bss->security_mode = true;
+				wl_cfg80211_bss_up(cfg, dev, bssidx, 0);
+				if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0 ||
+					wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) {
+					return BCME_ERROR;
+				}
+				wl_cfg80211_bss_up(cfg, dev, bssidx, 1);
+			}
+		}
+	} else {
+		WL_ERR(("No WPSIE in beacon \n"));
+	}
+	return 0;
+}
+
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+wl_cfg80211_del_station(
+		struct wiphy *wiphy, struct net_device *ndev,
+		struct station_del_parameters *params)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_del_station(
+	struct wiphy *wiphy,
+	struct net_device *ndev,
+	const u8* mac_addr)
+#else
+wl_cfg80211_del_station(
+	struct wiphy *wiphy,
+	struct net_device *ndev,
+	u8* mac_addr)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+{
+	struct net_device *dev;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scb_val;
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+	int err;
+	char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+		sizeof(struct ether_addr) + sizeof(uint)] = {0};
+	struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+	int num_associated = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+	const u8 *mac_addr = params->mac;
+#ifdef CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE
+	u16 rc = params->reason_code;
+#endif /* CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+
+	WL_DBG(("Entry\n"));
+	if (mac_addr == NULL) {
+		WL_DBG(("mac_addr is NULL ignore it\n"));
+		return 0;
+	}
+
+	dev = ndev_to_wlc_ndev(ndev, cfg);
+
+	if (p2p_is_on(cfg)) {
+		/* Suspend P2P discovery search-listen to prevent it from changing the
+		 * channel.
+		 */
+		if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+			WL_ERR(("Can not disable discovery mode\n"));
+			return -EFAULT;
+		}
+	}
+	err = wl_cfg80211_check_in4way(cfg, ndev, DONT_DELETE_GC_AFTER_WPS,
+		WL_EXT_STATUS_DELETE_STA, (void *)mac_addr);
+	if (err) {
+		return 0;
+	}
+
+	assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+	err = wldev_ioctl_get(ndev, WLC_GET_ASSOCLIST,
+		assoc_maclist, sizeof(mac_buf));
+	if (err < 0)
+		WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+	else
+		num_associated = assoc_maclist->count;
+
+	memcpy(scb_val.ea.octet, mac_addr, ETHER_ADDR_LEN);
+#ifdef CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+	if (rc == DOT11_RC_8021X_AUTH_FAIL) {
+		WL_ERR(("deauth will be sent at F/W\n"));
+		scb_val.val = DOT11_RC_8021X_AUTH_FAIL;
+	} else {
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+#endif /* CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE */
+#ifndef BCMDBUS
+	dhd_wait_pend8021x(dev);
+#endif /* !BCMDBUS */
+	scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+	err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+		sizeof(scb_val_t));
+	if (err < 0)
+		WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
+#ifdef CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+	}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+#endif /* CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE */
+	printf("%s: Disconnect STA : %s scb_val.val %d\n", __FUNCTION__,
+		bcm_ether_ntoa((const struct ether_addr *)mac_addr, eabuf),
+		scb_val.val);
+
+	if (num_associated > 0 && ETHER_ISBCAST(mac_addr))
+		wl_delay(400);
+
+	return 0;
+}
+
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_change_station(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	const u8 *mac,
+	struct station_parameters *params)
+#else
+wl_cfg80211_change_station(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	u8 *mac,
+	struct station_parameters *params)
+#endif
+{
+	int err;
+#if defined(WL_ENABLE_P2P_IF)
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#endif
+	struct net_device *ndev = ndev_to_wlc_ndev(dev, cfg);
+
+	WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x "
+				"sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac),
+				params->sta_flags_mask, params->sta_flags_set, ndev->name));
+
+	/* Processing only authorize/de-authorize flag for now */
+	if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+		WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n"));
+		return -ENOTSUPP;
+	}
+
+	if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+		err = wldev_ioctl_set(ndev, WLC_SCB_DEAUTHORIZE, (u8 *)mac, ETH_ALEN);
+#else
+		err = wldev_ioctl_set(ndev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN);
+#endif
+		if (err)
+			WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err));
+		return err;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+	err = wldev_ioctl_set(ndev, WLC_SCB_AUTHORIZE, (u8 *)mac, ETH_ALEN);
+#else
+	err = wldev_ioctl_set(ndev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN);
+#endif
+	if (err)
+		WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err));
+#ifdef DHD_LOSSLESS_ROAMING
+	wl_del_roam_timeout(cfg);
+#endif
+	return err;
+}
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+
+static s32
+wl_cfg80211_set_scb_timings(
+	struct bcm_cfg80211 *cfg,
+	struct net_device *dev)
+{
+	int err;
+	u32 ps_pretend;
+	wl_scb_probe_t scb_probe;
+
+	bzero(&scb_probe, sizeof(wl_scb_probe_t));
+	scb_probe.scb_timeout = WL_SCB_TIMEOUT;
+	scb_probe.scb_activity_time = WL_SCB_ACTIVITY_TIME;
+	scb_probe.scb_max_probe = WL_SCB_MAX_PROBE;
+	err = wldev_iovar_setbuf(dev, "scb_probe", (void *)&scb_probe,
+		sizeof(wl_scb_probe_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+		&cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		WL_ERR(("set 'scb_probe' failed, error = %d\n", err));
+		return err;
+	}
+
+	ps_pretend = MAX(WL_SCB_MAX_PROBE / 2, WL_MIN_PSPRETEND_THRESHOLD);
+	err = wldev_iovar_setint(dev, "pspretend_threshold", ps_pretend);
+	if (unlikely(err)) {
+		if (err == BCME_UNSUPPORTED) {
+			/* Ignore error if fw doesn't support the iovar */
+			WL_DBG(("wl pspretend_threshold %d set error %d\n",
+				ps_pretend, err));
+		} else {
+			WL_ERR(("wl pspretend_threshold %d set error %d\n",
+				ps_pretend, err));
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static s32
+wl_cfg80211_start_ap(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	struct cfg80211_ap_settings *info)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = BCME_OK;
+	struct parsed_ies ies;
+	s32 bssidx = 0;
+	u32 dev_role = 0;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#ifdef WLMESH
+	struct wl_join_params join_params;
+	s32 join_params_size = 0;
+#endif
+
+	WL_DBG(("Enter \n"));
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+	wl_cfg80211_set_random_mac(dev, FALSE);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	if (p2p_is_on(cfg) && (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO)) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	} else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+		dev_role = NL80211_IFTYPE_AP;
+		dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
+		err = dhd_ndo_enable(dhd, FALSE);
+		WL_DBG(("%s: Disabling NDO on Hostapd mode %d\n", __FUNCTION__, err));
+		if (err) {
+			WL_ERR(("%s: Disabling NDO Failed %d\n", __FUNCTION__, err));
+		}
+#ifdef PKT_FILTER_SUPPORT
+		/* Disable packet filter */
+		if (dhd->early_suspended) {
+			WL_ERR(("Disable pkt_filter\n"));
+			dhd_enable_packet_filter(0, dhd);
+		}
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+		/* IF SoftAP is enabled, disable arpoe */
+		if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+			dhd_arp_offload_set(dhd, 0);
+			dhd_arp_offload_enable(dhd, FALSE);
+		}
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef SUPPORT_SET_CAC
+		wl_cfg80211_set_cac(cfg, 0);
+#endif /* SUPPORT_SET_CAC */
+	} else {
+		/* only AP or GO role need to be handled here. */
+		err = -EINVAL;
+		goto fail;
+	}
+
+	/* disable TDLS */
+#ifdef WLTDLS
+	if (bssidx == 0) {
+		/* Disable TDLS for primary Iface. For virtual interface,
+		 * tdls disable will happen from interface create context
+		 */
+		wl_cfg80211_tdls_config(cfg, TDLS_STATE_AP_CREATE, false);
+	}
+#endif /*  WLTDLS */
+
+	if (!check_dev_role_integrity(cfg, dev_role)) {
+		err = -EINVAL;
+		goto fail;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+	if ((err = wl_cfg80211_set_channel(wiphy, dev,
+		dev->ieee80211_ptr->preset_chandef.chan,
+		NL80211_CHAN_HT20) < 0)) {
+		WL_ERR(("Set channel failed \n"));
+		goto fail;
+	}
+#endif 
+
+	if ((err = wl_cfg80211_bcn_set_params(info, dev,
+		dev_role, bssidx)) < 0) {
+		WL_ERR(("Beacon params set failed \n"));
+		goto fail;
+	}
+
+	/* Parse IEs */
+	if ((err = wl_cfg80211_parse_ap_ies(dev, &info->beacon, &ies)) < 0) {
+		WL_ERR(("Set IEs failed \n"));
+		goto fail;
+	}
+
+	if ((err = wl_cfg80211_bcn_validate_sec(dev, &ies,
+		dev_role, bssidx, info->privacy)) < 0)
+	{
+		WL_ERR(("Beacon set security failed \n"));
+		goto fail;
+	}
+
+	if ((err = wl_cfg80211_bcn_bringup_ap(dev, &ies,
+		dev_role, bssidx)) < 0) {
+		WL_ERR(("Beacon bring up AP/GO failed \n"));
+		goto fail;
+	}
+
+	/* Set GC/STA SCB expiry timings. */
+	if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) {
+		WL_ERR(("scb setting failed \n"));
+//		goto fail;
+	}
+
+#ifdef WLMESH
+	OSL_SLEEP(1000);
+	if ((dev_role == NL80211_IFTYPE_P2P_GO) || (dev_role == NL80211_IFTYPE_AP)) {
+		memset(&join_params, 0, sizeof(join_params));
+		/* join parameters starts with ssid */
+		join_params_size = sizeof(join_params.ssid);
+		if (dev_role == NL80211_IFTYPE_P2P_GO) {
+			join_params.ssid.SSID_len = min(cfg->p2p->ssid.SSID_len,
+				(uint32)DOT11_MAX_SSID_LEN);
+			memcpy(join_params.ssid.SSID, cfg->p2p->ssid.SSID,
+				join_params.ssid.SSID_len);
+		} else if (dev_role == NL80211_IFTYPE_AP) {
+			join_params.ssid.SSID_len = min(cfg->hostapd_ssid.SSID_len,
+				(uint32)DOT11_MAX_SSID_LEN);
+			memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID,
+				join_params.ssid.SSID_len);
+		}
+		join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+		/* create softap */
+		if ((err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
+			join_params_size)) != 0) {
+			WL_ERR(("SoftAP/GO set ssid failed! \n"));
+			goto fail;
+		} else {
+			WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID));
+		}
+	}
+#endif
+
+	WL_DBG(("** AP/GO Created **\n"));
+
+#ifdef WL_CFG80211_ACL
+	/* Enfoce Admission Control. */
+	if ((err = wl_cfg80211_set_mac_acl(wiphy, dev, info->acl)) < 0) {
+		WL_ERR(("Set ACL failed\n"));
+	}
+#endif /* WL_CFG80211_ACL */
+
+	/* Set IEs to FW */
+	if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0)
+		WL_ERR(("Set IEs failed \n"));
+
+	/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+	if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+		bool pbc = 0;
+		wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+		if (pbc) {
+			WL_DBG(("set WLC_E_PROBREQ_MSG\n"));
+			wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+		}
+	}
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+	if ((dev_role == NL80211_IFTYPE_AP)) {
+		wl_set_ap_rps(dev, FALSE, dev->name);
+		wl_cfg80211_init_ap_rps(cfg);
+	}
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+fail:
+	if (err) {
+		WL_ERR(("ADD/SET beacon failed\n"));
+		wl_cfg80211_stop_ap(wiphy, dev);
+		if (dev_role == NL80211_IFTYPE_AP) {
+			dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+#ifdef PKT_FILTER_SUPPORT
+			/* Enable packet filter */
+			if (dhd->early_suspended) {
+				WL_ERR(("Enable pkt_filter\n"));
+				dhd_enable_packet_filter(1, dhd);
+			}
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+			/* IF SoftAP is disabled, enable arpoe back for STA mode. */
+			if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+				dhd_arp_offload_set(dhd, dhd_arp_mode);
+				dhd_arp_offload_enable(dhd, TRUE);
+			}
+#endif /* ARP_OFFLOAD_SUPPORT */
+		}
+#ifdef WLTDLS
+		if (bssidx == 0) {
+			/* Since AP creation failed, re-enable TDLS */
+			wl_cfg80211_tdls_config(cfg, TDLS_STATE_AP_DELETE, false);
+		}
+#endif /*  WLTDLS */
+
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_stop_ap(
+	struct wiphy *wiphy,
+	struct net_device *dev)
+{
+	int err = 0;
+	u32 dev_role = 0;
+	int ap = 0;
+	s32 bssidx = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 is_rsdb_supported = BCME_ERROR;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+	WL_DBG(("Enter \n"));
+
+	is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+	if (is_rsdb_supported < 0)
+		return (-ENODEV);
+
+	wl_clr_drv_status(cfg, AP_CREATING, dev);
+	wl_clr_drv_status(cfg, AP_CREATED, dev);
+	cfg->ap_oper_channel = 0;
+
+	if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+		dev_role = NL80211_IFTYPE_AP;
+		WL_DBG(("stopping AP operation\n"));
+	} else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+		WL_DBG(("stopping P2P GO operation\n"));
+	} else {
+		WL_ERR(("no AP/P2P GO interface is operational.\n"));
+		return -EINVAL;
+	}
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role)) {
+		WL_ERR(("role integrity check failed \n"));
+		err = -EINVAL;
+		goto exit;
+	}
+
+	/* Clear AP/GO connected status */
+	wl_clr_drv_status(cfg, CONNECTED, dev);
+	if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 0)) < 0) {
+		WL_ERR(("bss down error %d\n", err));
+	}
+
+	if (dev_role == NL80211_IFTYPE_AP) {
+#ifdef PKT_FILTER_SUPPORT
+		/* Enable packet filter */
+		if (dhd->early_suspended) {
+			WL_ERR(("Enable pkt_filter\n"));
+			dhd_enable_packet_filter(1, dhd);
+		}
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+		/* IF SoftAP is disabled, enable arpoe back for STA mode. */
+		if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+			dhd_arp_offload_set(dhd, dhd_arp_mode);
+			dhd_arp_offload_enable(dhd, TRUE);
+		}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+		if (is_rsdb_supported == 0) {
+			if (dhd_download_fw_on_driverload && bssidx == 0) {
+				wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
+				wldev_iovar_setint(dev, "apsta", 1);
+			}
+			/* For non-rsdb chips, we use stand alone AP. Do wl down on stop AP */
+			err = wldev_ioctl_set(dev, WLC_UP, &ap, sizeof(s32));
+			if (unlikely(err)) {
+				WL_ERR(("WLC_UP error (%d)\n", err));
+				err = -EINVAL;
+				goto exit;
+			}
+		}
+
+		 wl_cfg80211_clear_per_bss_ies(cfg, bssidx);
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+		wl_set_ap_rps(dev, FALSE, dev->name);
+		wl_cfg80211_init_ap_rps(cfg);
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+	} else {
+		WL_DBG(("Stopping P2P GO \n"));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub),
+			DHD_EVENT_TIMEOUT_MS*3);
+		DHD_OS_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub));
+	}
+exit:
+#ifdef WLTDLS
+	if (bssidx == 0) {
+		/* re-enable TDLS if the number of connected interfaces is less than 2 */
+		wl_cfg80211_tdls_config(cfg, TDLS_STATE_AP_DELETE, false);
+	}
+#endif /* WLTDLS */
+
+	if (dev_role == NL80211_IFTYPE_AP) {
+		/* clear the AP mode */
+		dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+	}
+#ifdef SUPPORT_SET_CAC
+	wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+	return err;
+}
+
+static s32
+wl_cfg80211_change_beacon(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct parsed_ies ies;
+	u32 dev_role = 0;
+	s32 bssidx = 0;
+	bool pbc = 0;
+
+	WL_DBG(("Enter \n"));
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	} else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+		dev_role = NL80211_IFTYPE_AP;
+	} else {
+		err = -EINVAL;
+		goto fail;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role)) {
+		err = -EINVAL;
+		goto fail;
+	}
+
+	if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+		WL_ERR(("P2P already down status!\n"));
+		err = BCME_ERROR;
+		goto fail;
+	}
+
+	/* Parse IEs */
+	if ((err = wl_cfg80211_parse_ap_ies(dev, info, &ies)) < 0) {
+		WL_ERR(("Parse IEs failed \n"));
+		goto fail;
+	}
+
+	/* Set IEs to FW */
+	if ((err = wl_cfg80211_set_ies(dev, info, bssidx)) < 0) {
+		WL_ERR(("Set IEs failed \n"));
+		goto fail;
+	}
+
+	if (dev_role == NL80211_IFTYPE_AP) {
+		if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+			WL_ERR(("Hostapd update sec failed \n"));
+			err = -EINVAL;
+			goto fail;
+		}
+		/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+		if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+			wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+			WL_DBG((" WPS AP, wps_ie is exists pbc=%d\n", pbc));
+			if (pbc)
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+			else
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+		}
+	}
+
+fail:
+	return err;
+}
+#else
+static s32
+wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+	struct beacon_parameters *info)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 ie_offset = 0;
+	s32 bssidx = 0;
+	u32 dev_role = NL80211_IFTYPE_AP;
+	struct parsed_ies ies;
+	bcm_tlv_t *ssid_ie;
+	bool pbc = 0;
+	bool privacy;
+	bool is_bss_up = 0;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+	WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
+		info->interval, info->dtim_period, info->head_len, info->tail_len));
+
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	} else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+		dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role)) {
+		err = -ENODEV;
+		goto fail;
+	}
+
+	if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+		WL_ERR(("P2P already down status!\n"));
+		err = BCME_ERROR;
+		goto fail;
+	}
+
+	ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+	/* find the SSID */
+	if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+		info->head_len - ie_offset,
+		DOT11_MNG_SSID_ID)) != NULL) {
+		if (dev_role == NL80211_IFTYPE_AP) {
+			/* Store the hostapd SSID */
+			memset(&cfg->hostapd_ssid.SSID[0], 0x00, DOT11_MAX_SSID_LEN);
+			cfg->hostapd_ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
+			memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data,
+				cfg->hostapd_ssid.SSID_len);
+		} else {
+			/* P2P GO */
+			memset(&cfg->p2p->ssid.SSID[0], 0x00, DOT11_MAX_SSID_LEN);
+			cfg->p2p->ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
+			memcpy(cfg->p2p->ssid.SSID, ssid_ie->data,
+				cfg->p2p->ssid.SSID_len);
+		}
+	}
+
+	if (wl_cfg80211_parse_ies((u8 *)info->tail,
+		info->tail_len, &ies) < 0) {
+		WL_ERR(("Beacon get IEs failed \n"));
+		err = -EINVAL;
+		goto fail;
+	}
+
+	if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+		VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
+		info->tail_len)) < 0) {
+		WL_ERR(("Beacon set IEs failed \n"));
+		goto fail;
+	} else {
+		WL_DBG(("Applied Vndr IEs for Beacon \n"));
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+		VNDR_IE_PRBRSP_FLAG, (u8 *)info->proberesp_ies,
+		info->proberesp_ies_len)) < 0) {
+		WL_ERR(("ProbeRsp set IEs failed \n"));
+		goto fail;
+	} else {
+		WL_DBG(("Applied Vndr IEs for ProbeRsp \n"));
+	}
+#endif
+
+	is_bss_up = wl_cfg80211_bss_isup(dev, bssidx);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	privacy = info->privacy;
+#else
+	privacy = 0;
+#endif
+	if (!is_bss_up &&
+		(wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx, privacy) < 0))
+	{
+		WL_ERR(("Beacon set security failed \n"));
+		err = -EINVAL;
+		goto fail;
+	}
+
+	/* Set BI and DTIM period */
+	if (info->interval) {
+		if ((err = wldev_ioctl_set(dev, WLC_SET_BCNPRD,
+			&info->interval, sizeof(s32))) < 0) {
+			WL_ERR(("Beacon Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+	if (info->dtim_period) {
+		if ((err = wldev_ioctl_set(dev, WLC_SET_DTIMPRD,
+			&info->dtim_period, sizeof(s32))) < 0) {
+			WL_ERR(("DTIM Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	/* If bss is already up, skip bring up */
+	if (!is_bss_up &&
+		(err = wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx)) < 0)
+	{
+		WL_ERR(("Beacon bring up AP/GO failed \n"));
+		goto fail;
+	}
+
+	/* Set GC/STA SCB expiry timings. */
+	if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) {
+		WL_ERR(("scb setting failed \n"));
+//		goto fail;
+	}
+
+	if (wl_get_drv_status(cfg, AP_CREATED, dev)) {
+		/* Soft AP already running. Update changed params */
+		if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+			WL_ERR(("Hostapd update sec failed \n"));
+			err = -EINVAL;
+			goto fail;
+		}
+	}
+
+	/* Enable Probe Req filter */
+	if (((dev_role == NL80211_IFTYPE_P2P_GO) ||
+		(dev_role == NL80211_IFTYPE_AP)) && (ies.wps_ie != NULL)) {
+		wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+		if (pbc)
+			wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+	}
+
+	WL_DBG(("** ADD/SET beacon done **\n"));
+
+fail:
+	if (err) {
+		WL_ERR(("ADD/SET beacon failed\n"));
+		if (dev_role == NL80211_IFTYPE_AP) {
+			/* clear the AP mode */
+			dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+		}
+	}
+	return err;
+
+}
+
+static s32
+wl_cfg80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
+{
+	int err = 0;
+	s32 bssidx = 0;
+	int infra = 0;
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+	WL_DBG(("Enter. \n"));
+
+	if (!wdev) {
+		WL_ERR(("wdev null \n"));
+		return -EINVAL;
+	}
+
+	if ((wdev->iftype != NL80211_IFTYPE_P2P_GO) && (wdev->iftype != NL80211_IFTYPE_AP)) {
+		WL_ERR(("Unspported iface type iftype:%d \n", wdev->iftype));
+	}
+
+	wl_clr_drv_status(cfg, AP_CREATING, dev);
+	wl_clr_drv_status(cfg, AP_CREATED, dev);
+
+	/* Clear AP/GO connected status */
+	wl_clr_drv_status(cfg, CONNECTED, dev);
+
+	cfg->ap_oper_channel = 0;
+
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+		WL_ERR(("find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
+		return BCME_ERROR;
+	}
+
+	/* Do bss down */
+	if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 0)) < 0) {
+		WL_ERR(("bss down error %d\n", err));
+	}
+
+	/* fall through is intentional */
+	err = wldev_ioctl_set(dev, WLC_SET_INFRA, &infra, sizeof(s32));
+	if (err < 0) {
+		WL_ERR(("SET INFRA error %d\n", err));
+	}
+	 wl_cfg80211_clear_per_bss_ies(cfg, bssidx);
+
+	if (wdev->iftype == NL80211_IFTYPE_AP) {
+		/* clear the AP mode */
+		dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+	}
+
+	return 0;
+}
+#endif 
+
+#ifdef WL_SCHED_SCAN
+#define PNO_TIME		30
+#define PNO_REPEAT		4
+#define PNO_FREQ_EXPO_MAX	2
+static bool
+is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
+{
+	int i;
+
+	if (!ssid || !ssid_list)
+		return FALSE;
+
+	for (i = 0; i < count; i++) {
+		if (ssid->ssid_len == ssid_list[i].ssid_len) {
+			if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
+				return TRUE;
+		}
+	}
+	return FALSE;
+}
+
+static int
+wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
+                             struct net_device *dev,
+                             struct cfg80211_sched_scan_request *request)
+{
+	ushort pno_time = PNO_TIME;
+	int pno_repeat = PNO_REPEAT;
+	int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
+	wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+	struct cfg80211_ssid *ssid = NULL;
+	struct cfg80211_ssid *hidden_ssid_list = NULL;
+	log_conn_event_t *event_data = NULL;
+	tlv_log *tlv_data = NULL;
+	u32 alloc_len, tlv_len;
+	u32 payload_len;
+	int ssid_cnt = 0;
+	int i;
+	int ret = 0;
+	unsigned long flags;
+
+	if (!request) {
+		WL_ERR(("Sched scan request was NULL\n"));
+		return -EINVAL;
+	}
+
+	WL_DBG(("Enter \n"));
+	WL_PNO((">>> SCHED SCAN START\n"));
+	WL_PNO(("Enter n_match_sets:%d   n_ssids:%d \n",
+		request->n_match_sets, request->n_ssids));
+	WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
+		request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
+
+
+	if (!request->n_ssids || !request->n_match_sets) {
+		WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
+		return -EINVAL;
+	}
+
+	memset(&ssids_local, 0, sizeof(ssids_local));
+
+	if (request->n_ssids > 0) {
+		hidden_ssid_list = request->ssids;
+	}
+
+	if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+		alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN;
+		event_data = MALLOC(dhdp->osh, alloc_len);
+		if (!event_data) {
+			WL_ERR(("%s: failed to allocate log_conn_event_t with "
+						"length(%d)\n", __func__, alloc_len));
+			return -ENOMEM;
+		}
+		memset(event_data, 0, alloc_len);
+		event_data->tlvs = NULL;
+		tlv_len = sizeof(tlv_log);
+		event_data->tlvs = (tlv_log *)MALLOC(dhdp->osh, tlv_len);
+		if (!event_data->tlvs) {
+			WL_ERR(("%s: failed to allocate log_tlv with "
+					"length(%d)\n", __func__, tlv_len));
+			MFREE(dhdp->osh, event_data, alloc_len);
+			return -ENOMEM;
+		}
+	}
+	for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
+		ssid = &request->match_sets[i].ssid;
+		/* No need to include null ssid */
+		if (ssid->ssid_len) {
+			ssids_local[ssid_cnt].SSID_len = MIN(ssid->ssid_len,
+				(uint32)DOT11_MAX_SSID_LEN);
+			memcpy(ssids_local[ssid_cnt].SSID, ssid->ssid,
+				ssids_local[ssid_cnt].SSID_len);
+			if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
+				ssids_local[ssid_cnt].hidden = TRUE;
+				WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
+			} else {
+				ssids_local[ssid_cnt].hidden = FALSE;
+				WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
+			}
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0))
+			if (request->match_sets[i].rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
+				ssids_local[ssid_cnt].rssi_thresh =
+				      (int8)request->match_sets[i].rssi_thold;
+			}
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0)) */
+			ssid_cnt++;
+		}
+	}
+
+	if (ssid_cnt) {
+		if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt,
+			pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
+			WL_ERR(("PNO setup failed!! ret=%d \n", ret));
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+			for (i = 0; i < ssid_cnt; i++) {
+				payload_len = sizeof(log_conn_event_t);
+				event_data->event = WIFI_EVENT_DRIVER_PNO_ADD;
+				tlv_data = event_data->tlvs;
+				/* ssid */
+				tlv_data->tag = WIFI_TAG_SSID;
+				tlv_data->len = ssids_local[i].SSID_len;
+				memcpy(tlv_data->value, ssids_local[i].SSID,
+					ssids_local[i].SSID_len);
+				payload_len += TLV_LOG_SIZE(tlv_data);
+
+				dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+					event_data, payload_len);
+			}
+		}
+
+		spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+		cfg->sched_scan_req = request;
+		spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	} else {
+		ret = -EINVAL;
+	}
+exit:
+	if (event_data) {
+		MFREE(dhdp->osh, event_data->tlvs, tlv_len);
+		MFREE(dhdp->osh, event_data, alloc_len);
+	}
+	return ret;
+}
+
+static int
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+	, u64 reqid
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+	unsigned long flags;
+
+	WL_DBG(("Enter \n"));
+	WL_PNO((">>> SCHED SCAN STOP\n"));
+
+	BCM_REFERENCE(dhdp);
+	if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
+		WL_ERR(("PNO Stop for SSID failed"));
+	} else {
+		DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_REMOVE);
+	}
+
+	if (cfg->scan_request && cfg->sched_scan_running) {
+		WL_PNO((">>> Sched scan running. Aborting it..\n"));
+		wl_notify_escan_complete(cfg, dev, true, true);
+	}
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	cfg->sched_scan_req = NULL;
+	cfg->sched_scan_running = FALSE;
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	return 0;
+}
+#endif /* WL_SCHED_SCAN */
+
+#ifdef WL_SUPPORT_ACS
+/*
+ * Currently the dump_obss IOVAR is returning string as output so we need to
+ * parse the output buffer in an unoptimized way. Going forward if we get the
+ * IOVAR output in binary format this method can be optimized
+ */
+static int wl_parse_dump_obss(char *buf, struct wl_dump_survey *survey)
+{
+	int i;
+	char *token;
+	char delim[] = " \n";
+
+	token = strsep(&buf, delim);
+	while (token != NULL) {
+		if (!strcmp(token, "OBSS")) {
+			for (i = 0; i < OBSS_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->obss = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "IBSS")) {
+			for (i = 0; i < IBSS_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->ibss = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "TXDur")) {
+			for (i = 0; i < TX_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->tx = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "Category")) {
+			for (i = 0; i < CTG_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->no_ctg = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "Packet")) {
+			for (i = 0; i < PKT_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->no_pckt = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "Opp(time):")) {
+			for (i = 0; i < IDLE_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->idle = simple_strtoul(token, NULL, 10);
+		}
+
+		token = strsep(&buf, delim);
+	}
+
+	return 0;
+}
+
+static int wl_dump_obss(struct net_device *ndev, cca_msrmnt_query req,
+	struct wl_dump_survey *survey)
+{
+	cca_stats_n_flags *results;
+	char *buf;
+	int retry, err;
+
+	buf = kzalloc(sizeof(char) * WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!buf)) {
+		WL_ERR(("%s: buf alloc failed\n", __func__));
+		return -ENOMEM;
+	}
+
+	retry = IOCTL_RETRY_COUNT;
+	while (retry--) {
+		err = wldev_iovar_getbuf(ndev, "dump_obss", &req, sizeof(req),
+			buf, WLC_IOCTL_MAXLEN, NULL);
+		if (err >=  0) {
+			break;
+		}
+		WL_DBG(("attempt = %d, err = %d, \n",
+			(IOCTL_RETRY_COUNT - retry), err));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("failure, dump_obss IOVAR failed\n"));
+		err = -EINVAL;
+		goto exit;
+	}
+
+	results = (cca_stats_n_flags *)(buf);
+	wl_parse_dump_obss(results->buf, survey);
+	kfree(buf);
+
+	return 0;
+exit:
+	kfree(buf);
+	return err;
+}
+
+static int wl_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
+	int idx, struct survey_info *info)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_dump_survey *survey;
+	struct ieee80211_supported_band *band;
+	struct ieee80211_channel*chan;
+	cca_msrmnt_query req;
+	int val, err, noise, retry;
+
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		return -ENOENT;
+	}
+	band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	if (band && idx >= band->n_channels) {
+		idx -= band->n_channels;
+		band = NULL;
+	}
+
+	if (!band || idx >= band->n_channels) {
+		/* Move to 5G band */
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+		if (idx >= band->n_channels) {
+			return -ENOENT;
+		}
+	}
+
+	chan = &band->channels[idx];
+	/* Setting current channel to the requested channel */
+	if ((err = wl_cfg80211_set_channel(wiphy, ndev, chan,
+		NL80211_CHAN_HT20) < 0)) {
+		WL_ERR(("Set channel failed \n"));
+	}
+
+	if (!idx) {
+		/* Set interface up, explicitly. */
+		val = 1;
+		err = wldev_ioctl_set(ndev, WLC_UP, (void *)&val, sizeof(val));
+		if (err < 0) {
+			WL_ERR(("set interface up failed, error = %d\n", err));
+		}
+	}
+
+	/* Get noise value */
+	retry = IOCTL_RETRY_COUNT;
+	while (retry--) {
+		noise = 0;
+		err = wldev_ioctl_get(ndev, WLC_GET_PHY_NOISE, &noise,
+			sizeof(noise));
+		if (err >=  0) {
+			break;
+		}
+		WL_DBG(("attempt = %d, err = %d, \n",
+			(IOCTL_RETRY_COUNT - retry), err));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("Get Phy Noise failed, error = %d\n", err));
+		noise = CHAN_NOISE_DUMMY;
+	}
+
+	survey = (struct wl_dump_survey *) kzalloc(sizeof(struct wl_dump_survey),
+		GFP_KERNEL);
+	if (unlikely(!survey)) {
+		WL_ERR(("%s: alloc failed\n", __func__));
+		return -ENOMEM;
+	}
+
+	/* Start Measurement for obss stats on current channel */
+	req.msrmnt_query = 0;
+	req.time_req = ACS_MSRMNT_DELAY;
+	if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+		goto exit;
+	}
+
+	/*
+	 * Wait for the meaurement to complete, adding a buffer value of 10 to take
+	 * into consideration any delay in IOVAR completion
+	 */
+	msleep(ACS_MSRMNT_DELAY + 10);
+
+	/* Issue IOVAR to collect measurement results */
+	req.msrmnt_query = 1;
+	if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+		goto exit;
+	}
+
+	info->channel = chan;
+	info->noise = noise;
+	info->channel_time = ACS_MSRMNT_DELAY;
+	info->channel_time_busy = ACS_MSRMNT_DELAY - survey->idle;
+	info->channel_time_rx = survey->obss + survey->ibss + survey->no_ctg +
+		survey->no_pckt;
+	info->channel_time_tx = survey->tx;
+	info->filled = SURVEY_INFO_NOISE_DBM |SURVEY_INFO_CHANNEL_TIME |
+		SURVEY_INFO_CHANNEL_TIME_BUSY |	SURVEY_INFO_CHANNEL_TIME_RX |
+		SURVEY_INFO_CHANNEL_TIME_TX;
+	kfree(survey);
+
+	return 0;
+exit:
+	kfree(survey);
+	return err;
+}
+#endif /* WL_SUPPORT_ACS */
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+	.add_virtual_intf = wl_cfg80211_add_virtual_iface,
+	.del_virtual_intf = wl_cfg80211_del_virtual_iface,
+	.change_virtual_intf = wl_cfg80211_change_virtual_iface,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	.start_p2p_device = wl_cfgp2p_start_p2p_device,
+	.stop_p2p_device = wl_cfgp2p_stop_p2p_device,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	.scan = wl_cfg80211_scan,
+	.set_wiphy_params = wl_cfg80211_set_wiphy_params,
+	.join_ibss = wl_cfg80211_join_ibss,
+	.leave_ibss = wl_cfg80211_leave_ibss,
+	.get_station = wl_cfg80211_get_station,
+	.set_tx_power = wl_cfg80211_set_tx_power,
+	.get_tx_power = wl_cfg80211_get_tx_power,
+	.add_key = wl_cfg80211_add_key,
+	.del_key = wl_cfg80211_del_key,
+	.get_key = wl_cfg80211_get_key,
+	.set_default_key = wl_cfg80211_config_default_key,
+	.set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
+	.set_power_mgmt = wl_cfg80211_set_power_mgmt,
+	.connect = wl_cfg80211_connect,
+	.disconnect = wl_cfg80211_disconnect,
+	.suspend = wl_cfg80211_suspend,
+	.resume = wl_cfg80211_resume,
+	.set_pmksa = wl_cfg80211_set_pmksa,
+	.del_pmksa = wl_cfg80211_del_pmksa,
+	.flush_pmksa = wl_cfg80211_flush_pmksa,
+	.remain_on_channel = wl_cfg80211_remain_on_channel,
+	.cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel,
+	.mgmt_tx = wl_cfg80211_mgmt_tx,
+	.update_mgmt_frame_registrations = wl_cfg80211_update_mgmt_frame_registrations,
+	.change_bss = wl_cfg80211_change_bss,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+	.set_channel = wl_cfg80211_set_channel,
+#endif 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+	.set_beacon = wl_cfg80211_add_set_beacon,
+	.add_beacon = wl_cfg80211_add_set_beacon,
+	.del_beacon = wl_cfg80211_del_beacon,
+#else
+	.change_beacon = wl_cfg80211_change_beacon,
+	.start_ap = wl_cfg80211_start_ap,
+	.stop_ap = wl_cfg80211_stop_ap,
+#endif 
+#ifdef WL_SCHED_SCAN
+	.sched_scan_start = wl_cfg80211_sched_scan_start,
+	.sched_scan_stop = wl_cfg80211_sched_scan_stop,
+#endif /* WL_SCHED_SCAN */
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+	.del_station = wl_cfg80211_del_station,
+	.change_station = wl_cfg80211_change_station,
+	.mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait,
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */
+#ifdef WLMESH
+	.join_mesh = wl_cfg80211_join_mesh,
+	.leave_mesh = wl_cfg80211_leave_mesh,
+#endif /* WLMESH */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+	.tdls_mgmt = wl_cfg80211_tdls_mgmt,
+	.tdls_oper = wl_cfg80211_tdls_oper,
+#endif 
+#ifdef WL_SUPPORT_ACS
+	.dump_survey = wl_cfg80211_dump_survey,
+#endif /* WL_SUPPORT_ACS */
+#ifdef WL_CFG80211_ACL
+	.set_mac_acl = wl_cfg80211_set_mac_acl,
+#endif /* WL_CFG80211_ACL */
+#ifdef GTK_OFFLOAD_SUPPORT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+	.set_rekey_data = wl_cfg80211_set_rekey_data,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
+#endif /* GTK_OFFLOAD_SUPPORT */
+};
+
+s32 wl_mode_to_nl80211_iftype(s32 mode)
+{
+	s32 err = 0;
+
+	switch (mode) {
+	case WL_MODE_BSS:
+		return NL80211_IFTYPE_STATION;
+	case WL_MODE_IBSS:
+		return NL80211_IFTYPE_ADHOC;
+	case WL_MODE_AP:
+		return NL80211_IFTYPE_AP;
+#ifdef WLMESH
+	case WL_MODE_MESH:
+		return NL80211_IFTYPE_MESH_POINT;
+#endif
+	default:
+		return NL80211_IFTYPE_UNSPECIFIED;
+	}
+
+	return err;
+}
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+#define WL_CFG80211_REG_NOTIFIER() static int wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+#else
+#define WL_CFG80211_REG_NOTIFIER() static void wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+#endif /* kernel version < 3.9.0 */
+#endif
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+WL_CFG80211_REG_NOTIFIER()
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+	int ret = 0;
+	int revinfo = -1;
+
+	if (!request || !cfg) {
+		WL_ERR(("Invalid arg\n"));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 11))
+		return -EINVAL;
+#else
+		return;
+#endif /* kernel version < 3.10.11 */
+	}
+
+	WL_DBG(("ccode: %c%c Initiator: %d\n",
+		request->alpha2[0], request->alpha2[1], request->initiator));
+
+	/* We support only REGDOM_SET_BY_USER as of now */
+	if ((request->initiator != NL80211_REGDOM_SET_BY_USER) &&
+		(request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+		WL_ERR(("reg_notifier for intiator:%d not supported : set default\n",
+			request->initiator));
+		/* in case of no supported country by regdb
+		     lets driver setup platform default Locale
+		*/
+	}
+
+	WL_ERR(("Set country code %c%c from %s\n",
+		request->alpha2[0], request->alpha2[1],
+		((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User")));
+
+	if ((ret = wldev_set_country(bcmcfg_to_prmry_ndev(cfg), request->alpha2,
+		false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false),
+		revinfo)) < 0) {
+		WL_ERR(("set country Failed :%d\n", ret));
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 11))
+	return ret;
+#else
+	return;
+#endif /* kernel version < 3.10.11 */
+}
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static const struct wiphy_wowlan_support brcm_wowlan_support = {
+	.flags = WIPHY_WOWLAN_ANY,
+	.n_patterns = WL_WOWLAN_MAX_PATTERNS,
+	.pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN,
+	.pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+	.max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */
+#endif /* CONFIG_PM */
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, dhd_pub_t *context)
+{
+	s32 err = 0;
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	struct cfg80211_wowlan *brcm_wowlan_config = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM */
+
+//#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+	dhd_pub_t *dhd = (dhd_pub_t *)context;
+	BCM_REFERENCE(dhd);
+
+	if (!dhd) {
+		WL_ERR(("DHD is NULL!!"));
+		err = -ENODEV;
+		return err;
+	}
+//#endif 
+
+	wdev->wiphy =
+	    wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211));
+	if (unlikely(!wdev->wiphy)) {
+		WL_ERR(("Couldn not allocate wiphy device\n"));
+		err = -ENOMEM;
+		return err;
+	}
+	set_wiphy_dev(wdev->wiphy, sdiofunc_dev);
+	wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+	/* Report  how many SSIDs Driver can support per Scan request */
+	wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX;
+	wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+#ifdef WL_SCHED_SCAN
+	wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT;
+	wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT;
+	wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+	wdev->wiphy->max_sched_scan_plan_interval = PNO_SCAN_MAX_FW_SEC;
+#else
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+#endif /* WL_SCHED_SCAN */
+#ifdef WLMESH
+	wdev->wiphy->flags |= WIPHY_FLAG_MESH_AUTH;
+#endif
+	wdev->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION)
+		| BIT(NL80211_IFTYPE_ADHOC)
+#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_MONITOR)
+#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_P2P_CLIENT)
+		| BIT(NL80211_IFTYPE_P2P_GO)
+#endif /* WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_P2P_DEVICE)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#ifdef WLMESH
+		| BIT(NL80211_IFTYPE_MESH_POINT)
+#endif /* WLMESH */
+		| BIT(NL80211_IFTYPE_AP);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+	(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
+	WL_DBG(("Setting interface combinations for common mode\n"));
+	wdev->wiphy->iface_combinations = common_iface_combinations;
+	wdev->wiphy->n_iface_combinations =
+		ARRAY_SIZE(common_iface_combinations);
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+
+	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wdev->wiphy->cipher_suites = __wl_cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+	wdev->wiphy->max_remain_on_channel_duration = 5000;
+	wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes;
+#ifndef WL_POWERSAVE_DISABLED
+	wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#else
+	wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif				/* !WL_POWERSAVE_DISABLED */
+	wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+		WIPHY_FLAG_4ADDR_AP |
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39))
+		WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
+#endif
+		WIPHY_FLAG_4ADDR_STATION;
+#if ((defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && (LINUX_VERSION_CODE >= \
+	KERNEL_VERSION(3, 2, 0)))
+	/*
+	 * If FW ROAM flag is advertised, upper layer wouldn't provide
+	 * the bssid & freq in the connect command. This will result a
+	 * delay in initial connection time due to firmware doing a full
+	 * channel scan to figure out the channel & bssid. However kernel
+	 * ver >= 3.15, provides bssid_hint & freq_hint and hence kernel
+	 * ver >= 3.15 won't have any issue. So if this flags need to be
+	 * advertised for kernel < 3.15, suggest to use RCC along with it
+	 * to avoid the initial connection delay.
+	 */
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif 
+#ifdef UNSET_FW_ROAM_WIPHY_FLAG
+	wdev->wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif /* UNSET_FW_ROAM_WIPHY_FLAG */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+	wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+		WIPHY_FLAG_OFFCHAN_TX;
+#endif
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	4, 0))
+	/* From 3.4 kernel ownards AP_SME flag can be advertised
+	 * to remove the patch from supplicant
+	 */
+	wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
+
+#ifdef WL_CFG80211_ACL
+	/* Configure ACL capabilities. */
+	wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+	/* Supplicant distinguish between the SoftAP mode and other
+	 * modes (e.g. P2P, WPS, HS2.0) when it builds the probe
+	 * response frame from Supplicant MR1 and Kernel 3.4.0 or
+	 * later version. To add Vendor specific IE into the
+	 * probe response frame in case of SoftAP mode,
+	 * AP_PROBE_RESP_OFFLOAD flag is set to wiphy->flags variable.
+	 */
+	if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) {
+		wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+		wdev->wiphy->probe_resp_offload = 0;
+	}
+#endif 
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+#endif
+
+#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
+	/*
+	 * From linux-3.10 kernel, wowlan packet filter is mandated to avoid the
+	 * disconnection of connected network before suspend. So a dummy wowlan
+	 * filter is configured for kernels linux-3.8 and above.
+	 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	wdev->wiphy->wowlan = &brcm_wowlan_support;
+	/* If this is not provided cfg stack will get disconnect
+	* during suspend.
+	*/
+	brcm_wowlan_config = kmalloc(sizeof(struct cfg80211_wowlan), GFP_KERNEL);
+	if (brcm_wowlan_config) {
+		brcm_wowlan_config->disconnect = true;
+		brcm_wowlan_config->gtk_rekey_failure = true;
+		brcm_wowlan_config->eap_identity_req = true;
+		brcm_wowlan_config->four_way_handshake = true;
+		brcm_wowlan_config->patterns = NULL;
+		brcm_wowlan_config->n_patterns = 0;
+		brcm_wowlan_config->tcp = NULL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+		brcm_wowlan_config->nd_config = NULL;
+#endif
+	} else {
+		WL_ERR(("Can not allocate memory for brcm_wowlan_config,"
+			" So wiphy->wowlan_config is set to NULL\n"));
+	}
+	wdev->wiphy->wowlan_config = brcm_wowlan_config;
+#else
+	wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+	wdev->wiphy->wowlan.n_patterns = WL_WOWLAN_MAX_PATTERNS;
+	wdev->wiphy->wowlan.pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN;
+	wdev->wiphy->wowlan.pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+	wdev->wiphy->wowlan.max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
+
+	WL_DBG(("Registering custom regulatory)\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	wdev->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+#else
+	wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+#endif
+	wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+	WL_ERR(("Registering Vendor80211\n"));
+	err = wl_cfgvendor_attach(wdev->wiphy, dhd);
+	if (unlikely(err < 0)) {
+		WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
+	}
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+	/* Now we can register wiphy with cfg80211 module */
+	err = wiphy_register(wdev->wiphy);
+	if (unlikely(err < 0)) {
+		WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+		wiphy_free(wdev->wiphy);
+	}
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS)
+	wdev->wiphy->flags &= ~WIPHY_FLAG_ENFORCE_COMBINATIONS;
+#endif
+
+	return err;
+}
+
+static void wl_free_wdev(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev = cfg->wdev;
+	struct wiphy *wiphy = NULL;
+	if (!wdev) {
+		WL_ERR(("wdev is invalid\n"));
+		return;
+	}
+	if (wdev->wiphy) {
+		wiphy = wdev->wiphy;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+		wl_cfgvendor_detach(wdev->wiphy);
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+		/* Reset wowlan & wowlan_config before Unregister to avoid  Kernel Panic */
+		WL_DBG(("wl_free_wdev Clearing wowlan Config \n"));
+		wdev->wiphy->wowlan = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
+		wiphy_unregister(wdev->wiphy);
+		wdev->wiphy->dev.parent = NULL;
+		wdev->wiphy = NULL;
+	}
+
+	wl_delete_all_netinfo(cfg);
+	if (wiphy)
+		wiphy_free(wiphy);
+
+	/* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg",
+	 * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
+	 */
+}
+
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
+{
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;	/* must be initialized */
+	s32 err = 0;
+	s32 i;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(RSSIAVG)
+	int rssi;
+#endif
+#if defined(BSSCACHE)
+	wl_bss_cache_t *node;
+#endif
+
+	bss_list = cfg->bss_list;
+
+	/* Free cache in p2p scanning*/
+	if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+#if defined(RSSIAVG)
+		wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+		wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+	}
+
+	/* Delete disconnected cache */
+#if defined(BSSCACHE)
+	wl_delete_disconnected_bss_cache(&cfg->g_bss_cache_ctrl, (u8*)&cfg->disconnected_bssid);
+#if defined(RSSIAVG)
+	wl_delete_disconnected_rssi_cache(&cfg->g_rssi_cache_ctrl, (u8*)&cfg->disconnected_bssid);
+#endif
+	if (cfg->p2p_disconnected == 0)
+		memset(&cfg->disconnected_bssid, 0, ETHER_ADDR_LEN);
+#endif
+
+	/* Update cache */
+#if defined(RSSIAVG)
+	wl_update_rssi_cache(&cfg->g_rssi_cache_ctrl, bss_list);
+	if (!in_atomic())
+		wl_update_connected_rssi_cache(ndev, &cfg->g_rssi_cache_ctrl, &rssi);
+#endif
+#if defined(BSSCACHE)
+	wl_update_bss_cache(&cfg->g_bss_cache_ctrl,
+#if defined(RSSIAVG)
+		&cfg->g_rssi_cache_ctrl,
+#endif
+		bss_list);
+#endif
+
+	/* delete dirty cache */
+#if defined(RSSIAVG)
+	wl_delete_dirty_rssi_cache(&cfg->g_rssi_cache_ctrl);
+	wl_reset_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+	wl_delete_dirty_bss_cache(&cfg->g_bss_cache_ctrl);
+	wl_reset_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+
+#if defined(BSSCACHE)
+	if (cfg->p2p_disconnected > 0) {
+		// terence 20130703: Fix for wrong group_capab (timing issue)
+		wl_delete_disconnected_bss_cache(&cfg->g_bss_cache_ctrl, (u8*)&cfg->disconnected_bssid);
+#if defined(RSSIAVG)
+		wl_delete_disconnected_rssi_cache(&cfg->g_rssi_cache_ctrl, (u8*)&cfg->disconnected_bssid);
+#endif
+	}
+	WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
+	node = cfg->g_bss_cache_ctrl.m_cache_head;
+	for (i=0; node && i<WL_AP_MAX; i++) {
+		bi = node->results.bss_info;
+		err = wl_inform_single_bss(cfg, bi, false);
+		node = node->next;
+	}
+	if (cfg->autochannel)
+		wl_ext_get_best_channel(ndev, &cfg->g_bss_cache_ctrl, ioctl_version,
+			&cfg->best_2g_ch, &cfg->best_5g_ch);
+#else
+	WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
+	preempt_disable();
+	bi = next_bss(bss_list, bi);
+	for_each_bss(bss_list, bi, i) {
+		if (cfg->p2p_disconnected > 0 && !memcmp(&bi->BSSID, &cfg->disconnected_bssid, ETHER_ADDR_LEN))
+			continue;
+		err = wl_inform_single_bss(cfg, bi, false);
+	}
+	preempt_enable();
+	if (cfg->autochannel)
+		wl_ext_get_best_channel(ndev, bss_list, ioctl_version,
+			&cfg->best_2g_ch, &cfg->best_5g_ch);
+#endif
+
+	if (cfg->p2p_disconnected > 0) {
+		// terence 20130703: Fix for wrong group_capab (timing issue)
+		cfg->p2p_disconnected++;
+		if (cfg->p2p_disconnected >= REPEATED_SCAN_RESULT_CNT+1) {
+			cfg->p2p_disconnected = 0;
+			memset(&cfg->disconnected_bssid, 0, ETHER_ADDR_LEN);
+		}
+	}
+
+	return err;
+}
+
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam)
+{
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_channel *channel;
+	struct ieee80211_supported_band *band;
+	struct wl_cfg80211_bss_info *notif_bss_info;
+	struct wl_scan_req *sr = wl_to_sr(cfg);
+	struct beacon_proberesp *beacon_proberesp;
+	struct cfg80211_bss *cbss = NULL;
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+	log_conn_event_t *event_data = NULL;
+	tlv_log *tlv_data = NULL;
+	u32 alloc_len, tlv_len;
+	u32 payload_len;
+	s32 mgmt_type;
+	s32 signal;
+	u32 freq;
+	s32 err = 0;
+	gfp_t aflags;
+	chanspec_t chanspec;
+
+	if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
+		WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+		return err;
+	}
+	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt)
+		- sizeof(u8) + WL_BSS_INFO_MAX, aflags);
+	if (unlikely(!notif_bss_info)) {
+		WL_ERR(("notif_bss_info alloc failed\n"));
+		return -ENOMEM;
+	}
+	mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
+	chanspec = wl_chspec_driver_to_host(bi->chanspec);
+	notif_bss_info->channel = wf_chspec_ctlchan(chanspec);
+
+	if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	notif_bss_info->rssi = dtoh16(bi->RSSI);
+#if defined(RSSIAVG)
+	notif_bss_info->rssi = wl_get_avg_rssi(&cfg->g_rssi_cache_ctrl, &bi->BSSID);
+	if (notif_bss_info->rssi == RSSI_MINVAL)
+		notif_bss_info->rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+#endif
+#if defined(RSSIOFFSET)
+	notif_bss_info->rssi = wl_update_rssi_offset(bcmcfg_to_prmry_ndev(cfg), notif_bss_info->rssi);
+#endif
+#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
+	// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+	notif_bss_info->rssi = MIN(notif_bss_info->rssi, RSSI_MAXVAL);
+#endif
+	memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+	mgmt_type = cfg->active_scan ?
+		IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
+	if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
+	    mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
+	}
+	beacon_proberesp = cfg->active_scan ?
+		(struct beacon_proberesp *)&mgmt->u.probe_resp :
+		(struct beacon_proberesp *)&mgmt->u.beacon;
+	beacon_proberesp->timestamp = 0;
+	beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
+	beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
+	wl_rst_ie(cfg);
+	wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam);
+	wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+	wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+		offsetof(struct wl_cfg80211_bss_info, frame_buf));
+	notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
+		u.beacon.variable) + wl_get_ielen(cfg);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band);
+#endif
+	if (freq == 0) {
+		WL_ERR(("Invalid channel, fail to change channel to freq\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	channel = ieee80211_get_channel(wiphy, freq);
+	WL_SCAN(("BSSID %pM, channel %2d(%2d %sMHz), rssi %3d, capa 0x04%x, mgmt_type %d, "
+		"frame_len %d, SSID \"%s\"\n",
+		&bi->BSSID, notif_bss_info->channel, CHSPEC_CHANNEL(chanspec),
+		CHSPEC_IS20(chanspec)?"20":
+		CHSPEC_IS40(chanspec)?"40":
+		CHSPEC_IS80(chanspec)?"80":"160",
+		notif_bss_info->rssi, mgmt->u.beacon.capab_info, mgmt_type,
+		notif_bss_info->frame_len, bi->SSID));
+	if (unlikely(!channel)) {
+		WL_ERR(("ieee80211_get_channel error, freq=%d, channel=%d\n",
+			freq, notif_bss_info->channel));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+
+	signal = notif_bss_info->rssi * 100;
+	if (!mgmt->u.probe_resp.timestamp) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+		struct timespec64 ts;
+		ktime_get_boottime_ts64(&ts);
+		mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000)
+				+ ts.tv_nsec / 1000;
+#else
+		struct timeval tv;
+		do_gettimeofday(&tv);
+		mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec*1000000)
+				+ tv.tv_usec;
+#endif
+	}
+
+
+	cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+		le16_to_cpu(notif_bss_info->frame_len), signal, aflags);
+	if (unlikely(!cbss)) {
+		WL_ERR(("cfg80211_inform_bss_frame error\n"));
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	CFG80211_PUT_BSS(wiphy, cbss);
+
+	if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID) &&
+			(cfg->sched_scan_req && !cfg->scan_request)) {
+		alloc_len = sizeof(log_conn_event_t) + IEEE80211_MAX_SSID_LEN + sizeof(uint16) +
+			sizeof(int16);
+		event_data = MALLOCZ(dhdp->osh, alloc_len);
+		if (!event_data) {
+			WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+				"length(%d)\n", __func__, alloc_len));
+			goto out_err;
+		}
+		tlv_len = 3 * sizeof(tlv_log);
+		event_data->tlvs = MALLOC(dhdp->osh, tlv_len);
+		if (!event_data->tlvs) {
+			WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+						"length(%d)\n", __func__, tlv_len));
+			goto out_err;
+		}
+
+		payload_len = sizeof(log_conn_event_t);
+		event_data->event = WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND;
+		tlv_data = event_data->tlvs;
+
+		/* ssid */
+		tlv_data->tag = WIFI_TAG_SSID;
+		tlv_data->len = bi->SSID_len;
+		memcpy(tlv_data->value, bi->SSID, bi->SSID_len);
+		payload_len += TLV_LOG_SIZE(tlv_data);
+		tlv_data = TLV_LOG_NEXT(tlv_data);
+
+		/* channel */
+		tlv_data->tag = WIFI_TAG_CHANNEL;
+		tlv_data->len = sizeof(uint16);
+		memcpy(tlv_data->value, &notif_bss_info->channel, sizeof(uint16));
+		payload_len += TLV_LOG_SIZE(tlv_data);
+		tlv_data = TLV_LOG_NEXT(tlv_data);
+
+		/* rssi */
+		tlv_data->tag = WIFI_TAG_RSSI;
+		tlv_data->len = sizeof(int16);
+		memcpy(tlv_data->value, &notif_bss_info->rssi, sizeof(int16));
+		payload_len += TLV_LOG_SIZE(tlv_data);
+		tlv_data = TLV_LOG_NEXT(tlv_data);
+
+		dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+				event_data, payload_len);
+		MFREE(dhdp->osh, event_data->tlvs, tlv_len);
+		MFREE(dhdp->osh, event_data, alloc_len);
+	}
+
+out_err:
+	kfree(notif_bss_info);
+	return err;
+}
+
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, struct net_device *ndev)
+{
+	u32 event = ntoh32(e->event_type);
+	u32 status =  ntoh32(e->status);
+	u16 flags = ntoh16(e->flags);
+#if defined(CUSTOM_SET_ANTNPM) || defined(CUSTOM_SET_OCLOFF)
+	dhd_pub_t *dhd;
+	dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_ANTNPM */
+
+	WL_DBG(("event %d, status %d flags %x\n", event, status, flags));
+	if (event == WLC_E_SET_SSID) {
+		if (status == WLC_E_STATUS_SUCCESS) {
+#ifdef CUSTOM_SET_ANTNPM
+			if (dhd->mimo_ant_set) {
+				int err = 0;
+
+				WL_ERR(("[WIFI_SEC] mimo_ant_set = %d\n", dhd->mimo_ant_set));
+				err = wldev_iovar_setint(ndev, "txchain", dhd->mimo_ant_set);
+				if (err != 0) {
+					WL_ERR(("[WIFI_SEC] Fail set txchain\n"));
+				}
+				err = wldev_iovar_setint(ndev, "rxchain", dhd->mimo_ant_set);
+				if (err != 0) {
+					WL_ERR(("[WIFI_SEC] Fail set rxchain\n"));
+				}
+			}
+#endif /* CUSTOM_SET_ANTNPM */
+#ifdef CUSTOM_SET_OCLOFF
+			if (dhd->ocl_off) {
+				int err = 0;
+				int ocl_enable = 0;
+				err = wldev_iovar_setint(ndev, "ocl_enable", ocl_enable);
+				if (err != 0) {
+					WL_ERR(("[WIFI_SEC] %s: Set ocl_enable %d failed %d\n",
+						__FUNCTION__, ocl_enable, err));
+				} else {
+					WL_ERR(("[WIFI_SEC] %s: Set ocl_enable %d succeeded %d\n",
+						__FUNCTION__, ocl_enable, err));
+				}
+			}
+#endif /* CUSTOM_SET_OCLOFF */
+			if (!wl_is_ibssmode(cfg, ndev))
+				return true;
+		}
+	} else if (event == WLC_E_LINK) {
+		if (flags & WLC_EVENT_MSG_LINK)
+			return true;
+	}
+
+	WL_DBG(("wl_is_linkup false\n"));
+	return false;
+}
+
+#ifdef WL_LASTEVT
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, void *data)
+{
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+	wl_last_event_t *last_event = (wl_last_event_t *)data;
+	u32 len = ntoh32(e->datalen);
+
+	if (event == WLC_E_DEAUTH_IND ||
+	event == WLC_E_DISASSOC_IND ||
+	event == WLC_E_DISASSOC ||
+	event == WLC_E_DEAUTH) {
+		WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
+		return true;
+	} else if (event == WLC_E_LINK) {
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+			if (last_event && len > 0) {
+				u32 current_time = last_event->current_time;
+				u32 timestamp = last_event->timestamp;
+				u32 event_type = last_event->event.event_type;
+				u32 status = last_event->event.status;
+				u32 reason = last_event->event.reason;
+
+				WL_ERR(("Last roam event before disconnection : current_time %d,"
+					" time %d, type %d, status %d, reason %d\n",
+					current_time, timestamp, event_type, status, reason));
+			}
+			WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
+			return true;
+		}
+	}
+
+	return false;
+}
+#else
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+
+	if (event == WLC_E_DEAUTH_IND ||
+	event == WLC_E_DISASSOC_IND ||
+	event == WLC_E_DISASSOC ||
+	event == WLC_E_DEAUTH) {
+		WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
+		return true;
+	} else if (event == WLC_E_LINK) {
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+			WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
+			return true;
+		}
+	}
+
+	return false;
+}
+#endif /* WL_LASTEVT */
+
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+
+	if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS)
+		return true;
+	if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
+		return true;
+
+	return false;
+}
+
+/* The mainline kernel >= 3.2.0 has support for indicating new/del station
+ * to AP/P2P GO via events. If this change is backported to kernel for which
+ * this driver is being built, then define WL_CFG80211_STA_EVENT. You
+ * should use this new/del sta event mechanism for BRCM supplicant >= 22.
+ */
+static s32
+wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+	u32 reason = ntoh32(e->reason);
+	u32 len = ntoh32(e->datalen);
+	u32 status = ntoh32(e->status);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+	bool isfree = false;
+	u8 *mgmt_frame;
+	u8 bsscfgidx = e->bsscfgidx;
+	s32 freq;
+	s32 channel;
+	u8 *body = NULL;
+	u16 fc = 0;
+
+	struct ieee80211_supported_band *band;
+	struct ether_addr da;
+	struct ether_addr bssid;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	channel_info_t ci;
+#else
+	struct station_info sinfo;
+#endif 
+
+	WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason));
+	/* if link down, bsscfg is disabled. */
+	if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
+		wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
+		wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
+		WL_INFORM(("AP mode link down !! \n"));
+		complete(&cfg->iface_disable);
+		return 0;
+	}
+
+	if ((event == WLC_E_LINK) && (status == WLC_E_STATUS_SUCCESS) &&
+		(reason == WLC_E_REASON_INITIAL_ASSOC) &&
+		(wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP)) {
+		if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+			/* AP/GO brought up successfull in firmware */
+			printf("%s: ** AP/GO Link up event **\n", __FUNCTION__);
+			wl_set_drv_status(cfg, AP_CREATED, ndev);
+			wake_up_interruptible(&cfg->netif_change_event);
+			wl_cfg80211_check_in4way(cfg, ndev, 0, WL_EXT_STATUS_AP_ENABLED, NULL);
+			return 0;
+		}
+	}
+
+	if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
+		printf("%s: event %s(%d) status %d reason %d\n", __FUNCTION__,
+		bcmevent_get_name(event), event, ntoh32(e->status), reason);
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+	WL_DBG(("Enter \n"));
+	if (!len && (event == WLC_E_DEAUTH)) {
+		len = 2; /* reason code field */
+		data = &reason;
+	}
+	if (len) {
+		body = kzalloc(len, GFP_KERNEL);
+
+		if (body == NULL) {
+			WL_ERR(("wl_notify_connect_status: Failed to allocate body\n"));
+			return WL_INVALID;
+		}
+	}
+	memset(&bssid, 0, ETHER_ADDR_LEN);
+	WL_DBG(("Enter event %d ndev %p\n", event, ndev));
+	if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
+		kfree(body);
+		return WL_INVALID;
+	}
+	if (len)
+		memcpy(body, data, len);
+
+	wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+		NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+	memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+	memset(&bssid, 0, sizeof(bssid));
+	err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+	switch (event) {
+		case WLC_E_ASSOC_IND:
+			fc = FC_ASSOC_REQ;
+			break;
+		case WLC_E_REASSOC_IND:
+			fc = FC_REASSOC_REQ;
+			break;
+		case WLC_E_DISASSOC_IND:
+			fc = FC_DISASSOC;
+			break;
+		case WLC_E_DEAUTH_IND:
+			fc = FC_DISASSOC;
+			break;
+		case WLC_E_DEAUTH:
+			fc = FC_DISASSOC;
+			break;
+		default:
+			fc = 0;
+			goto exit;
+	}
+	memset(&ci, 0, sizeof(ci));
+	if ((err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) {
+		kfree(body);
+		return err;
+	}
+
+	channel = dtoh32(ci.hw_channel);
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band\n"));
+		if (body)
+			kfree(body);
+		return -EINVAL;
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+	freq = ieee80211_channel_to_frequency(channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+
+	err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid,
+		&mgmt_frame, &len, body);
+	if (err < 0)
+		goto exit;
+	isfree = true;
+
+	if ((event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) ||
+			(event == WLC_E_DISASSOC_IND) ||
+			((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH))) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+		defined(WL_COMPAT_WIRELESS)
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif 
+	}
+
+exit:
+	if (isfree)
+		kfree(mgmt_frame);
+	if (body)
+		kfree(body);
+#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+	sinfo.filled = 0;
+	if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
+		reason == DOT11_SC_SUCCESS) {
+		/* Linux ver >= 4.0 assoc_req_ies_len is used instead of
+		 * STATION_INFO_ASSOC_REQ_IES flag
+		 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+		sinfo.filled = STA_INFO_BIT(INFO_ASSOC_REQ_IES);
+#endif /*  (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */
+		if (!data) {
+			WL_ERR(("No IEs present in ASSOC/REASSOC_IND"));
+			return -EINVAL;
+		}
+		sinfo.assoc_req_ies = data;
+		sinfo.assoc_req_ies_len = len;
+		printf("%s: connected device "MACDBG"\n", __FUNCTION__, MAC2STRDBG(e->addr.octet));
+		wl_cfg80211_check_in4way(cfg, ndev, DONT_DELETE_GC_AFTER_WPS,
+			WL_EXT_STATUS_STA_CONNECTED, NULL);
+		cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC);
+	} else if (event == WLC_E_DISASSOC_IND) {
+		printf("%s: disassociated device "MACDBG"\n", __FUNCTION__, MAC2STRDBG(e->addr.octet));
+		wl_cfg80211_check_in4way(cfg, ndev, DONT_DELETE_GC_AFTER_WPS,
+			WL_EXT_STATUS_STA_DISCONNECTED, NULL);
+		cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+	} else if ((event == WLC_E_DEAUTH_IND) ||
+		((event == WLC_E_DEAUTH) && (reason != DOT11_RC_RESERVED))) {
+		printf("%s: deauthenticated device "MACDBG"\n", __FUNCTION__, MAC2STRDBG(e->addr.octet));
+		wl_cfg80211_check_in4way(cfg, ndev, DONT_DELETE_GC_AFTER_WPS,
+			WL_EXT_STATUS_STA_DISCONNECTED, NULL);
+		cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+	}
+#endif 
+	return err;
+}
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define MAX_ASSOC_REJECT_ERR_STATUS 5
+int wl_get_connect_failed_status(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+	u32 status = ntoh32(e->status);
+
+	cfg->assoc_reject_status = 0;
+
+	if (status == WLC_E_STATUS_FAIL) {
+		WL_ERR(("auth assoc status event=%d e->status %d e->reason %d \n",
+			ntoh32(cfg->event_auth_assoc.event_type),
+			(int)ntoh32(cfg->event_auth_assoc.status),
+			(int)ntoh32(cfg->event_auth_assoc.reason)));
+
+		switch ((int)ntoh32(cfg->event_auth_assoc.status)) {
+			case WLC_E_STATUS_NO_ACK:
+				cfg->assoc_reject_status = 1;
+				break;
+			case WLC_E_STATUS_FAIL:
+				cfg->assoc_reject_status = 2;
+				break;
+			case WLC_E_STATUS_UNSOLICITED:
+				cfg->assoc_reject_status = 3;
+				break;
+			case WLC_E_STATUS_TIMEOUT:
+				cfg->assoc_reject_status = 4;
+				break;
+			case WLC_E_STATUS_ABORT:
+				cfg->assoc_reject_status = 5;
+				break;
+			default:
+				break;
+		}
+		if (cfg->assoc_reject_status) {
+			if (ntoh32(cfg->event_auth_assoc.event_type) == WLC_E_ASSOC) {
+				cfg->assoc_reject_status += MAX_ASSOC_REJECT_ERR_STATUS;
+			}
+		}
+	}
+
+	WL_ERR(("assoc_reject_status %d \n", cfg->assoc_reject_status));
+
+	return 0;
+}
+
+s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	int bytes_written = 0;
+
+	if (cfg == NULL) {
+		return -1;
+	}
+
+	memset(cmd, 0, total_len);
+	bytes_written = snprintf(cmd, 30, "assoc_reject.status %d", cfg->assoc_reject_status);
+
+	WL_ERR(("cmd: %s \n", cmd));
+
+	return bytes_written;
+}
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+static s32
+wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e)
+{
+	u32 reason = ntoh32(e->reason);
+	u32 event = ntoh32(e->event_type);
+	struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+	WL_DBG(("event type : %d, reason : %d\n", event, reason));
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+	memcpy(&cfg->event_auth_assoc, e, sizeof(wl_event_msg_t));
+	WL_ERR(("event=%d status %d reason %d \n",
+		ntoh32(cfg->event_auth_assoc.event_type),
+		ntoh32(cfg->event_auth_assoc.status),
+		ntoh32(cfg->event_auth_assoc.reason)));
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+	if (sec) {
+		switch (event) {
+		case WLC_E_ASSOC:
+		case WLC_E_AUTH:
+				sec->auth_assoc_res_status = reason;
+		default:
+			break;
+		}
+	} else
+		WL_ERR(("sec is NULL\n"));
+	return 0;
+}
+
+static s32
+wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+	u32 status =  ntoh32(e->status);
+	bool active;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+	struct ieee80211_channel *channel = NULL;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	u32 chanspec, chan;
+	u32 freq, band;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
+
+	if (event == WLC_E_JOIN) {
+		WL_DBG(("joined in IBSS network\n"));
+	}
+	if (event == WLC_E_START) {
+		WL_DBG(("started IBSS network\n"));
+	}
+	if (event == WLC_E_JOIN || event == WLC_E_START ||
+		(event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+		err = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+		if (unlikely(err)) {
+			WL_ERR(("Could not get chanspec %d\n", err));
+			return err;
+		}
+		chan = wf_chspec_ctlchan(wl_chspec_driver_to_host(chanspec));
+		band = (chan <= CH_MAX_2G_CHANNEL) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+		freq = ieee80211_channel_to_frequency(chan, band);
+		channel = ieee80211_get_channel(wiphy, freq);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
+		if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+			/* ROAM or Redundant */
+			u8 *cur_bssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+			if (memcmp(cur_bssid, &e->addr, ETHER_ADDR_LEN) == 0) {
+				WL_DBG(("IBSS connected event from same BSSID("
+					MACDBG "), ignore it\n", MAC2STRDBG(cur_bssid)));
+				return err;
+			}
+			WL_INFORM(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
+				MAC2STRDBG(cur_bssid), MAC2STRDBG((const u8 *)&e->addr)));
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+			cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
+#else
+			cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL);
+#endif
+		}
+		else {
+			/* New connection */
+			WL_INFORM(("IBSS connected to " MACDBG "\n",
+				MAC2STRDBG((const u8 *)&e->addr)));
+			wl_link_up(cfg);
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+			cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
+#else
+			cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL);
+#endif
+			wl_set_drv_status(cfg, CONNECTED, ndev);
+			active = true;
+			wl_update_prof(cfg, ndev, NULL, (const void *)&active, WL_PROF_ACT);
+		}
+	} else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) ||
+		event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) {
+		wl_clr_drv_status(cfg, CONNECTED, ndev);
+		wl_link_down(cfg);
+		wl_init_prof(cfg, ndev);
+	}
+	else if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_NO_NETWORKS) {
+		WL_DBG(("no action - join fail (IBSS mode)\n"));
+	}
+	else {
+		WL_DBG(("no action (IBSS mode)\n"));
+}
+	return err;
+}
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define WiFiALL_OUI         "\x50\x6F\x9A"  /* Wi-FiAll OUI */
+#define WiFiALL_OUI_LEN     3
+#define WiFiALL_OUI_TYPE    16
+
+int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, uint8 *mac)
+{
+	s32 err = 0;
+	struct wl_bss_info *bi;
+	uint8 eabuf[ETHER_ADDR_LEN];
+	u32 rate, channel, freq, supported_rate, nss = 0, mcs_map, mode_80211 = 0;
+	char rate_str[4];
+	u8 *ie = NULL;
+	u32 ie_len;
+	struct wiphy *wiphy;
+	struct cfg80211_bss *bss;
+	bcm_tlv_t *interworking_ie = NULL;
+	bcm_tlv_t *tlv_ie = NULL;
+	bcm_tlv_t *vht_ie = NULL;
+	vndr_ie_t *vndrie;
+	int16 ie_11u_rel_num = -1, ie_mu_mimo_cap = -1;
+	u32 i, remained_len, count = 0;
+	char roam_count_str[4], akm_str[4];
+	s32 val = 0;
+
+	/* get BSS information */
+
+	strncpy(cfg->bss_info, "x x x x x x x x x x x x x", GET_BSS_INFO_LEN);
+
+	memset(cfg->extra_buf, 0, WL_EXTRA_BUF_MAX);
+	*(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+
+	err = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX);
+	if (unlikely(err)) {
+		WL_ERR(("Could not get bss info %d\n", err));
+		cfg->roam_count = 0;
+		return -1;
+	}
+
+	if (!mac) {
+		WL_ERR(("mac is null \n"));
+		cfg->roam_count = 0;
+		return -1;
+	}
+
+	memcpy(eabuf, mac, ETHER_ADDR_LEN);
+
+	bi = (struct wl_bss_info *)(cfg->extra_buf + 4);
+	channel = wf_chspec_ctlchan(bi->chanspec);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+	freq = ieee80211_channel_to_frequency(channel);
+#else
+	if (channel > 14) {
+		freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+	} else {
+		freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+	}
+#endif
+	rate = 0;
+	err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
+	if (err) {
+		WL_ERR(("Could not get rate (%d)\n", err));
+		snprintf(rate_str, sizeof(rate_str), "x"); // Unknown
+
+	} else {
+		rate = dtoh32(rate);
+		snprintf(rate_str, sizeof(rate_str), "%d", (rate/2));
+	}
+
+	//supported maximum rate
+	supported_rate = (bi->rateset.rates[bi->rateset.count - 1] & 0x7f) / 2;
+
+	if (supported_rate < 12) {
+		mode_80211 = 0; //11b maximum rate is 11Mbps. 11b mode
+	} else {
+		//It's not HT Capable case.
+		if (channel > 14) {
+			mode_80211 = 3; // 11a mode
+		} else {
+			mode_80211 = 1; // 11g mode
+		}
+	}
+
+	if (bi->n_cap) {
+		/* check Rx MCS Map for HT */
+		nss = 0;
+		mode_80211 = 2;
+		for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
+			int8 bitmap = 0xFF;
+			if (i == MAX_STREAMS_SUPPORTED-1) {
+				bitmap = 0x7F;
+			}
+			if (bi->basic_mcs[i] & bitmap) {
+				nss++;
+			}
+		}
+	}
+
+	if (bi->vht_cap) {
+		nss = 0;
+		mode_80211 = 4;
+		for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
+			mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap));
+			if (mcs_map != VHT_CAP_MCS_MAP_NONE) {
+				nss++;
+			}
+		}
+	}
+
+	if (nss) {
+		nss = nss - 1;
+	}
+
+	wiphy = bcmcfg_to_wiphy(cfg);
+	bss = CFG80211_GET_BSS(wiphy, NULL, eabuf, bi->SSID, bi->SSID_len);
+	if (!bss) {
+		WL_ERR(("Could not find the AP\n"));
+	} else {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		ie = (u8 *)bss->ies->data;
+		ie_len = bss->ies->len;
+#else
+		ie = bss->information_elements;
+		ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	}
+
+	if (ie) {
+		ie_mu_mimo_cap = 0;
+		ie_11u_rel_num = 0;
+
+		if (bi->vht_cap) {
+			if ((vht_ie = bcm_parse_tlvs(ie, (u32)ie_len,
+					DOT11_MNG_VHT_CAP_ID)) != NULL) {
+				ie_mu_mimo_cap = (vht_ie->data[2] & 0x08) >> 3;
+			}
+		}
+
+		if ((interworking_ie = bcm_parse_tlvs(ie, (u32)ie_len,
+				DOT11_MNG_INTERWORKING_ID)) != NULL) {
+			if ((tlv_ie = bcm_parse_tlvs(ie, (u32)ie_len, DOT11_MNG_VS_ID)) != NULL) {
+				remained_len = ie_len;
+
+				while (tlv_ie) {
+					if (count > MAX_VNDR_IE_NUMBER)
+						break;
+
+					if (tlv_ie->id == DOT11_MNG_VS_ID) {
+						vndrie = (vndr_ie_t *) tlv_ie;
+
+						if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+							WL_ERR(("%s: invalid vndr ie."
+								"length is too small %d\n",
+								__FUNCTION__, vndrie->len));
+							break;
+						}
+
+						if (!bcmp(vndrie->oui,
+							(u8*)WiFiALL_OUI, WiFiALL_OUI_LEN) &&
+							(vndrie->data[0] == WiFiALL_OUI_TYPE))
+						{
+							WL_ERR(("Found Wi-FiAll OUI oui.\n"));
+							ie_11u_rel_num = vndrie->data[1];
+							ie_11u_rel_num = (ie_11u_rel_num & 0xf0)>>4;
+							ie_11u_rel_num += 1;
+
+							break;
+						}
+					}
+					count++;
+					tlv_ie = bcm_next_tlv(tlv_ie, &remained_len);
+				}
+			}
+		}
+	}
+
+	for (i = 0; i < bi->SSID_len; i++) {
+		if (bi->SSID[i] == ' ') {
+			bi->SSID[i] = '_';
+		}
+	}
+
+	//0 : None, 1 : OKC, 2 : FT, 3 : CCKM
+	err = wldev_iovar_getint(dev, "wpa_auth", &val);
+	if (unlikely(err)) {
+		WL_ERR(("could not get wpa_auth (%d)\n", err));
+		snprintf(akm_str, sizeof(akm_str), "x"); // Unknown
+	} else {
+		WL_ERR(("wpa_auth val %d \n", val));
+#if defined(BCMEXTCCX)
+		if (val & (WPA_AUTH_CCKM | WPA2_AUTH_CCKM)) {
+			snprintf(akm_str, sizeof(akm_str), "3");
+		} else
+#endif  
+		if (val & WPA2_AUTH_FT) {
+			snprintf(akm_str, sizeof(akm_str), "2");
+		} else if (val & (WPA_AUTH_UNSPECIFIED | WPA2_AUTH_UNSPECIFIED)) {
+			snprintf(akm_str, sizeof(akm_str), "1");
+		} else {
+			snprintf(akm_str, sizeof(akm_str), "0");
+		}
+	}
+
+	if (cfg->roam_offload) {
+		snprintf(roam_count_str, sizeof(roam_count_str), "x"); // Unknown
+	} else {
+		snprintf(roam_count_str, sizeof(roam_count_str), "%d", cfg->roam_count);
+	}
+	cfg->roam_count = 0;
+
+	WL_ERR(("BSSID:" MACDBG " SSID %s \n", MAC2STRDBG(eabuf), bi->SSID));
+	WL_ERR(("freq:%d, BW:%s, RSSI:%d dBm, Rate:%d Mbps, 11mode:%d, stream:%d,"
+				"MU-MIMO:%d, Passpoint:%d, SNR:%d, Noise:%d, \n"
+				"akm:%s roam:%s \n",
+				freq, wf_chspec_to_bw_str(bi->chanspec),
+				dtoh32(bi->RSSI), (rate / 2), mode_80211, nss,
+				ie_mu_mimo_cap, ie_11u_rel_num, bi->SNR, bi->phy_noise,
+				akm_str, roam_count_str));
+
+	if (ie) {
+		snprintf(cfg->bss_info, GET_BSS_INFO_LEN,
+				"%02x:%02x:%02x %d %s %d %s %d %d %d %d %d %d %s %s",
+				eabuf[0], eabuf[1], eabuf[2],
+				freq, wf_chspec_to_bw_str(bi->chanspec),
+				dtoh32(bi->RSSI), rate_str, mode_80211, nss,
+				ie_mu_mimo_cap, ie_11u_rel_num,
+				bi->SNR, bi->phy_noise, akm_str, roam_count_str);
+	} else {
+		//ie_mu_mimo_cap and ie_11u_rel_num is unknow.
+		snprintf(cfg->bss_info, GET_BSS_INFO_LEN,
+				"%02x:%02x:%02x %d %s %d %s %d %d x x %d %d %s %s",
+				eabuf[0], eabuf[1], eabuf[2],
+				freq, wf_chspec_to_bw_str(bi->chanspec),
+				dtoh32(bi->RSSI), rate_str, mode_80211, nss,
+				bi->SNR, bi->phy_noise, akm_str, roam_count_str);
+	}
+
+	CFG80211_PUT_BSS(wiphy, bss);
+
+	return 0;
+}
+
+s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	if (cfg == NULL) {
+		return -1;
+	}
+
+	memset(cmd, 0, total_len);
+	memcpy(cmd, cfg->bss_info, GET_BSS_INFO_LEN);
+
+	WL_ERR(("cmd: %s \n", cmd));
+
+	return GET_BSS_INFO_LEN;
+}
+
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+static s32
+wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	struct net_device *ndev = NULL;
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+	struct wiphy *wiphy = NULL;
+	struct cfg80211_bss *bss = NULL;
+	struct wlc_ssid *ssid = NULL;
+	u8 *bssid = 0;
+	dhd_pub_t *dhdp;
+	int vndr_oui_num = 0;
+	char vndr_oui[MAX_VNDR_OUI_STR_LEN] = {0, };
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+	dhdp = (dhd_pub_t *)(cfg->pub);
+	BCM_REFERENCE(dhdp);
+
+	if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+		err = wl_notify_connect_status_ap(cfg, ndev, e, data);
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) {
+		err = wl_notify_connect_status_ibss(cfg, ndev, e, data);
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
+		WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n",
+			ntoh32(e->event_type), ntoh32(e->status), ndev));
+		if (event == WLC_E_ASSOC || event == WLC_E_AUTH) {
+			wl_get_auth_assoc_status(cfg, ndev, e);
+			return 0;
+		}
+		DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
+		if (wl_is_linkup(cfg, e, ndev)) {
+			wl_link_up(cfg);
+			act = true;
+			if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+				if (event == WLC_E_LINK &&
+#ifdef DHD_LOSSLESS_ROAMING
+					!cfg->roam_offload &&
+#endif /* DHD_LOSSLESS_ROAMING */
+					wl_get_drv_status(cfg, CONNECTED, ndev)) {
+					wl_bss_roaming_done(cfg, ndev, e, data);
+				}
+
+				if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+					vndr_oui_num = wl_vndr_ies_get_vendor_oui(cfg,
+						ndev, vndr_oui, ARRAYSIZE(vndr_oui));
+#if defined(STAT_REPORT)
+					/* notify STA connection only */
+					wl_stat_report_notify_connected(cfg);
+#endif /* STAT_REPORT */
+				}
+
+				printf("wl_bss_connect_done succeeded with "
+					MACDBG " %s%s\n", MAC2STRDBG((const u8*)(&e->addr)),
+					vndr_oui_num > 0 ? "vndr_oui: " : "",
+					vndr_oui_num > 0 ? vndr_oui : "");
+
+				wl_bss_connect_done(cfg, ndev, e, data, true);
+				WL_DBG(("joined in BSS network \"%s\"\n",
+					((struct wlc_ssid *)
+					 wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID));
+
+#ifdef WBTEXT
+				if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
+					dhdp->wbtext_support &&
+					event == WLC_E_SET_SSID) {
+					/* set wnm_keepalives_max_idle after association */
+					wl_cfg80211_wbtext_set_wnm_maxidle(cfg, ndev);
+					/* send nbr request or BTM query to update RCC */
+					wl_cfg80211_wbtext_update_rcc(cfg, ndev);
+				}
+#endif /* WBTEXT */
+			}
+			wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+			wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
+		} else if (WL_IS_LINKDOWN(cfg, e, data) ||
+				((event == WLC_E_SET_SSID) &&
+				(ntoh32(e->status) != WLC_E_STATUS_SUCCESS) &&
+				(wl_get_drv_status(cfg, CONNECTED, ndev)))) {
+
+			WL_INFORM(("connection state bit status: [%d:%d:%d:%d]\n",
+				wl_get_drv_status(cfg, CONNECTING, ndev),
+				wl_get_drv_status(cfg, CONNECTED, ndev),
+				wl_get_drv_status(cfg, DISCONNECTING, ndev),
+				wl_get_drv_status(cfg, NESTED_CONNECT, ndev)));
+
+			if (wl_get_drv_status(cfg, DISCONNECTING, ndev) &&
+				(wl_get_drv_status(cfg, NESTED_CONNECT, ndev) ||
+				wl_get_drv_status(cfg, CONNECTING, ndev))) {
+				/* wl_cfg80211_connect was called before 'DISCONNECTING' was
+				 * cleared. Deauth/Link down event is caused by WLC_DISASSOC
+				 * command issued from the wl_cfg80211_connect context. Ignore
+				 * the event to avoid pre-empting the current connection
+				 */
+				WL_INFORM(("Nested connection case. Drop event. \n"));
+				wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
+					WL_EXT_STATUS_DISCONNECTED, NULL);
+				wl_clr_drv_status(cfg, NESTED_CONNECT, ndev);
+				wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+				/* Not in 'CONNECTED' state, clear it */
+				wl_clr_drv_status(cfg, CONNECTED, ndev);
+				return 0;
+			}
+
+#ifdef DHD_LOSSLESS_ROAMING
+			wl_del_roam_timeout(cfg);
+#endif
+#ifdef P2PLISTEN_AP_SAMECHN
+			if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+				wl_cfg80211_set_p2p_resp_ap_chn(ndev, 0);
+				cfg->p2p_resp_apchn_status = false;
+				WL_DBG(("p2p_resp_apchn_status Turn OFF \n"));
+			}
+#endif /* P2PLISTEN_AP_SAMECHN */
+			wl_cfg80211_cancel_scan(cfg);
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+			if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+				wl_get_bss_info(cfg, ndev, (u8*)(&e->addr));
+			}
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+			if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+				u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+				if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
+					bool fw_assoc_state = TRUE;
+					dhd_pub_t *dhd = (dhd_pub_t *)cfg->pub;
+					fw_assoc_state = dhd_is_associated(dhd, e->ifidx, &err);
+					if (!fw_assoc_state) {
+						WL_ERR(("Event sends up even different BSSID"
+							" cur: " MACDBG " event: " MACDBG"\n",
+							MAC2STRDBG(curbssid),
+							MAC2STRDBG((const u8*)(&e->addr))));
+					} else {
+						WL_ERR(("BSSID of event is not the connected BSSID"
+							"(ignore it) cur: " MACDBG
+							" event: " MACDBG"\n",
+							MAC2STRDBG(curbssid),
+							MAC2STRDBG((const u8*)(&e->addr))));
+						return 0;
+					}
+				}
+			}
+			/* Explicitly calling unlink to remove BSS in CFG */
+			wiphy = bcmcfg_to_wiphy(cfg);
+			ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+			bssid = (u8 *)wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+			if (ssid && bssid) {
+				bss = CFG80211_GET_BSS(wiphy, NULL, bssid,
+					ssid->SSID, ssid->SSID_len);
+				if (bss) {
+					cfg80211_unlink_bss(wiphy, bss);
+					CFG80211_PUT_BSS(wiphy, bss);
+				}
+			}
+
+			if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+				scb_val_t scbval;
+				u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+				s32 reason = 0;
+				struct ether_addr bssid_dongle = {{0, 0, 0, 0, 0, 0}};
+				struct ether_addr bssid_null = {{0, 0, 0, 0, 0, 0}};
+
+				if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND)
+					reason = ntoh32(e->reason);
+				/* WLAN_REASON_UNSPECIFIED is used for hang up event in Android */
+				reason = (reason == WLAN_REASON_UNSPECIFIED)? 0 : reason;
+
+				printf("link down if %s may call cfg80211_disconnected. "
+					"event : %d, reason=%d from " MACDBG "\n",
+					ndev->name, event, ntoh32(e->reason),
+					MAC2STRDBG((const u8*)(&e->addr)));
+
+				/* roam offload does not sync BSSID always, get it from dongle */
+				if (cfg->roam_offload) {
+					memset(&bssid_dongle, 0, sizeof(bssid_dongle));
+					if (wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid_dongle,
+							sizeof(bssid_dongle)) == BCME_OK) {
+						/* if not roam case, it would return null bssid */
+						if (memcmp(&bssid_dongle, &bssid_null,
+								ETHER_ADDR_LEN) != 0) {
+							curbssid = (u8 *)&bssid_dongle;
+						}
+					}
+				}
+				if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
+					bool fw_assoc_state = TRUE;
+					dhd_pub_t *dhd = (dhd_pub_t *)cfg->pub;
+					fw_assoc_state = dhd_is_associated(dhd, e->ifidx, &err);
+					if (!fw_assoc_state) {
+						WL_ERR(("Event sends up even different BSSID"
+							" cur: " MACDBG " event: " MACDBG"\n",
+							MAC2STRDBG(curbssid),
+							MAC2STRDBG((const u8*)(&e->addr))));
+					} else {
+						WL_ERR(("BSSID of event is not the connected BSSID"
+							"(ignore it) cur: " MACDBG
+							" event: " MACDBG"\n",
+							MAC2STRDBG(curbssid),
+							MAC2STRDBG((const u8*)(&e->addr))));
+						return 0;
+					}
+				}
+
+#ifdef DBG_PKT_MON
+				if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+					DHD_DBG_PKT_MON_STOP(dhdp);
+				}
+#endif /* DBG_PKT_MON */
+
+				/* clear RSSI monitor, framework will set new cfg */
+#ifdef RSSI_MONITOR_SUPPORT
+				dhd_dev_set_rssi_monitor_cfg(bcmcfg_to_prmry_ndev(cfg),
+				    FALSE, 0, 0);
+#endif /* RSSI_MONITOR_SUPPORT */
+				if (!memcmp(ndev->name, WL_P2P_INTERFACE_PREFIX, strlen(WL_P2P_INTERFACE_PREFIX))) {
+					// terence 20130703: Fix for wrong group_capab (timing issue)
+					cfg->p2p_disconnected = 1;
+				}
+				memcpy(&cfg->disconnected_bssid, curbssid, ETHER_ADDR_LEN);
+				wl_clr_drv_status(cfg, CONNECTED, ndev);
+
+				if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+					/* To make sure disconnect, explictly send dissassoc
+					*  for BSSID 00:00:00:00:00:00 issue
+					*/
+					scbval.val = WLAN_REASON_DEAUTH_LEAVING;
+
+					memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+					scbval.val = htod32(scbval.val);
+					err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval,
+						sizeof(scb_val_t));
+					if (err < 0) {
+						WL_ERR(("WLC_DISASSOC error %d\n", err));
+						err = 0;
+					}
+				}
+
+				/* Send up deauth and clear states */
+				CFG80211_DISCONNECTED(ndev, reason, NULL, 0,
+						false, GFP_KERNEL);
+				wl_link_down(cfg);
+				wl_init_prof(cfg, ndev);
+#ifdef WBTEXT
+				/* when STA was disconnected, clear join pref and set wbtext */
+				if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) {
+					char smbuf[WLC_IOCTL_SMLEN];
+					char clear[] = { 0x01, 0x02, 0x00, 0x00, 0x03,
+						0x02, 0x00, 0x00, 0x04, 0x02, 0x00, 0x00 };
+					if ((err = wldev_iovar_setbuf(ndev, "join_pref",
+							clear, sizeof(clear), smbuf,
+							sizeof(smbuf), NULL))
+							== BCME_OK) {
+						if ((err = wldev_iovar_setint(ndev,
+								"wnm_bsstrans_resp",
+								WL_BSSTRANS_POLICY_PRODUCT_WBTEXT))
+								== BCME_OK) {
+							wl_cfg80211_wbtext_set_default(ndev);
+						} else {
+							WL_ERR(("%s: Failed to set wbtext = %d\n",
+								__FUNCTION__, err));
+						}
+					} else {
+						WL_ERR(("%s: Failed to clear join pref = %d\n",
+							__FUNCTION__, err));
+					}
+					wl_cfg80211_wbtext_clear_bssid_list(cfg);
+				}
+#endif /* WBTEXT */
+				wl_vndr_ies_clear_vendor_oui_list(cfg);
+			}
+			else if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+				printf("link down, during connecting\n");
+				/* Issue WLC_DISASSOC to prevent FW roam attempts */
+				err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+				if (err < 0) {
+					WL_ERR(("CONNECTING state, WLC_DISASSOC error %d\n", err));
+					err = 0;
+				}
+				WL_INFORM(("Clear drv CONNECTING status\n"));
+				wl_clr_drv_status(cfg, CONNECTING, ndev);
+#ifdef ESCAN_RESULT_PATCH
+				if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+					(memcmp(&e->addr, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+					(memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0))
+					/* In case this event comes while associating another AP */
+#endif /* ESCAN_RESULT_PATCH */
+					wl_bss_connect_done(cfg, ndev, e, data, false);
+			}
+			wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+			wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
+				WL_EXT_STATUS_DISCONNECTED, NULL);
+
+			/* if link down, bsscfg is diabled */
+			if (ndev != bcmcfg_to_prmry_ndev(cfg))
+				complete(&cfg->iface_disable);
+#ifdef WLTDLS
+			/* re-enable TDLS if the number of connected interfaces
+			 * is less than 2.
+			 */
+			wl_cfg80211_tdls_config(cfg, TDLS_STATE_DISCONNECT, false);
+#endif /* WLTDLS */
+		} else if (wl_is_nonetwork(cfg, e)) {
+			printf("connect failed event=%d e->status %d e->reason %d \n",
+				event, (int)ntoh32(e->status), (int)ntoh32(e->reason));
+			wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
+				WL_EXT_STATUS_DISCONNECTED, NULL);
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+			if (event == WLC_E_SET_SSID) {
+				wl_get_connect_failed_status(cfg, e);
+			}
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+			if (wl_get_drv_status(cfg, DISCONNECTING, ndev) &&
+				wl_get_drv_status(cfg, CONNECTING, ndev)) {
+				wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+				wl_clr_drv_status(cfg, CONNECTING, ndev);
+				wl_cfg80211_scan_abort(cfg);
+				DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
+				return err;
+			}
+			/* Clean up any pending scan request */
+			wl_cfg80211_cancel_scan(cfg);
+
+			if (wl_get_drv_status(cfg, CONNECTING, ndev))
+				wl_bss_connect_done(cfg, ndev, e, data, false);
+		} else {
+			WL_DBG(("%s nothing\n", __FUNCTION__));
+		}
+		DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
+	}
+	else {
+		printf("wl_notify_connect_status : Invalid %s mode %d event %d status %d\n",
+			ndev->name, wl_get_mode_by_netdev(cfg, ndev), ntoh32(e->event_type),
+			ntoh32(e->status));
+	}
+	return err;
+}
+
+#ifdef WL_RELMCAST
+void wl_cfg80211_set_rmc_pid(struct net_device *dev, int pid)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	if (pid > 0)
+		cfg->rmc_event_pid = pid;
+	WL_DBG(("set pid for rmc event : pid=%d\n", pid));
+}
+#endif /* WL_RELMCAST */
+
+#ifdef WL_RELMCAST
+static s32
+wl_notify_rmc_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	u32 evt = ntoh32(e->event_type);
+	u32 reason = ntoh32(e->reason);
+	int ret = -1;
+
+	switch (reason) {
+		case WLC_E_REASON_RMC_AR_LOST:
+		case WLC_E_REASON_RMC_AR_NO_ACK:
+			if (cfg->rmc_event_pid != 0) {
+				ret = wl_netlink_send_msg(cfg->rmc_event_pid,
+					RMC_EVENT_LEADER_CHECK_FAIL,
+					cfg->rmc_event_seq++, NULL, 0);
+			}
+			break;
+		default:
+			break;
+	}
+	WL_DBG(("rmcevent : evt=%d, pid=%d, ret=%d\n", evt, cfg->rmc_event_pid, ret));
+	return ret;
+}
+#endif /* WL_RELMCAST */
+
+#ifdef GSCAN_SUPPORT
+static s32
+wl_handle_roam_exp_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+	u32 datalen = be32_to_cpu(e->datalen);
+
+	if (datalen) {
+		wl_roam_exp_event_t *evt_data = (wl_roam_exp_event_t *)data;
+		if (evt_data->version == ROAM_EXP_EVENT_VERSION) {
+			wlc_ssid_t *ssid = &evt_data->cur_ssid;
+			struct wireless_dev *wdev;
+			ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+			if (ndev) {
+				wdev = ndev->ieee80211_ptr;
+				wdev->ssid_len = min(ssid->SSID_len, (uint32)DOT11_MAX_SSID_LEN);
+				memcpy(wdev->ssid, ssid->SSID, wdev->ssid_len);
+				WL_ERR(("SSID is %s\n", ssid->SSID));
+				wl_update_prof(cfg, ndev, NULL, ssid, WL_PROF_SSID);
+			} else {
+				WL_ERR(("NULL ndev!\n"));
+			}
+		} else {
+			WL_ERR(("Version mismatch %d, expected %d", evt_data->version,
+			       ROAM_EXP_EVENT_VERSION));
+		}
+	}
+	return BCME_OK;
+}
+#endif /* GSCAN_SUPPORT */
+
+#ifdef RSSI_MONITOR_SUPPORT
+static s32 wl_handle_rssi_monitor_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+
+#if defined(WL_VENDOR_EXT_SUPPORT) || defined(CONFIG_BCMDHD_VENDOR_EXT)
+	u32 datalen = be32_to_cpu(e->datalen);
+	struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+
+	if (datalen) {
+		wl_rssi_monitor_evt_t *evt_data = (wl_rssi_monitor_evt_t *)data;
+		if (evt_data->version == RSSI_MONITOR_VERSION) {
+			dhd_rssi_monitor_evt_t monitor_data;
+			monitor_data.version = DHD_RSSI_MONITOR_EVT_VERSION;
+			monitor_data.cur_rssi = evt_data->cur_rssi;
+			memcpy(&monitor_data.BSSID, &e->addr, ETHER_ADDR_LEN);
+			wl_cfgvendor_send_async_event(wiphy, ndev,
+				GOOGLE_RSSI_MONITOR_EVENT,
+				&monitor_data, sizeof(monitor_data));
+		} else {
+			WL_ERR(("Version mismatch %d, expected %d", evt_data->version,
+			       RSSI_MONITOR_VERSION));
+		}
+	}
+#endif /* WL_VENDOR_EXT_SUPPORT || CONFIG_BCMDHD_VENDOR_EXT */
+	return BCME_OK;
+}
+#endif /* RSSI_MONITOR_SUPPORT */
+
+static s32
+wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	struct net_device *ndev = NULL;
+	s32 err = 0;
+	u32 event = be32_to_cpu(e->event_type);
+	u32 status = be32_to_cpu(e->status);
+#ifdef DHD_LOSSLESS_ROAMING
+	struct wl_security *sec;
+#endif
+	WL_DBG(("Enter \n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) {
+		wl_add_remove_eventmsg(ndev, WLC_E_ROAM, false);
+		cfg->disable_roam_event = TRUE;
+	}
+
+	if ((cfg->disable_roam_event) && (event == WLC_E_ROAM))
+		return err;
+
+	if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) {
+		if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+#ifdef DHD_LOSSLESS_ROAMING
+			sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+			/* In order to reduce roaming delay, wl_bss_roaming_done is
+			 * early called with WLC_E_LINK event. It is called from
+			 * here only if WLC_E_LINK event is blocked for specific
+			 * security type.
+			 */
+			if (IS_AKM_SUITE_FT(sec)) {
+				wl_bss_roaming_done(cfg, ndev, e, data);
+			}
+			/* Roam timer is deleted mostly from wl_cfg80211_change_station
+			 * after roaming is finished successfully. We need to delete
+			 * the timer from here only for some security types that aren't
+			 * using wl_cfg80211_change_station to authorize SCB
+			 */
+			if (IS_AKM_SUITE_FT(sec) || IS_AKM_SUITE_CCKM(sec)) {
+				wl_del_roam_timeout(cfg);
+			}
+#else
+			wl_bss_roaming_done(cfg, ndev, e, data);
+#endif /* DHD_LOSSLESS_ROAMING */
+		} else {
+			wl_bss_connect_done(cfg, ndev, e, data, true);
+		}
+		act = true;
+		wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+		wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
+
+		if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+			wl_vndr_ies_get_vendor_oui(cfg, ndev, NULL, 0);
+		}
+	}
+#ifdef DHD_LOSSLESS_ROAMING
+	else if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status != WLC_E_STATUS_SUCCESS) {
+		wl_del_roam_timeout(cfg);
+	}
+#endif
+	return err;
+}
+
+#ifdef CUSTOM_EVENT_PM_WAKE
+uint32 last_dpm_upd_time = 0;	/* ms */
+#define DPM_UPD_LMT_TIME	25000	/* ms */
+#define DPM_UPD_LMT_RSSI	-85	/* dbm */
+
+static s32
+wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = BCME_OK;
+	struct net_device *ndev = NULL;
+	u8 *pbuf = NULL;
+	uint32 cur_dpm_upd_time = 0;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	s32 rssi;
+	scb_val_t scb_val;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	pbuf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+	if (pbuf == NULL) {
+		WL_ERR(("failed to allocate local pbuf\n"));
+		return -ENOMEM;
+	}
+
+	err = wldev_iovar_getbuf_bsscfg(ndev, "dump",
+		"pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
+
+	if (err) {
+		WL_ERR(("dump ioctl err = %d", err));
+	} else {
+		WL_ERR(("PM status : %s\n", pbuf));
+	}
+
+	if (pbuf) {
+		kfree(pbuf);
+	}
+
+	if (dhd->early_suspended) {
+		/* LCD off */
+		memset(&scb_val, 0, sizeof(scb_val_t));
+		scb_val.val = 0;
+		err = wldev_ioctl_get(ndev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
+		if (err) {
+			WL_ERR(("Could not get rssi (%d)\n", err));
+		}
+		rssi = wl_rssi_offset(dtoh32(scb_val.val));
+		WL_ERR(("[%s] RSSI %d dBm\n", ndev->name, rssi));
+		if (rssi > DPM_UPD_LMT_RSSI) {
+			return err;
+		}
+	} else {
+		/* LCD on */
+		return err;
+	}
+
+	if (last_dpm_upd_time == 0) {
+		last_dpm_upd_time = OSL_SYSUPTIME();
+	} else {
+		cur_dpm_upd_time = OSL_SYSUPTIME();
+		if (cur_dpm_upd_time - last_dpm_upd_time < DPM_UPD_LMT_TIME) {
+			scb_val_t scbval;
+			bzero(&scbval, sizeof(scb_val_t));
+
+			err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+			if (err < 0) {
+				WL_ERR(("%s: Disassoc error %d\n", __FUNCTION__, err));
+				return err;
+			}
+			WL_ERR(("%s: Force Disassoc due to updated DPM event.\n", __FUNCTION__));
+
+			last_dpm_upd_time = 0;
+		} else {
+			last_dpm_upd_time = cur_dpm_upd_time;
+		}
+	}
+
+	return err;
+}
+#endif	/* CUSTOM_EVENT_PM_WAKE */
+
+#ifdef QOS_MAP_SET
+/* get user priority table */
+uint8 *
+wl_get_up_table(dhd_pub_t * dhdp, int idx)
+{
+	struct net_device *ndev;
+	struct bcm_cfg80211 *cfg;
+
+	ndev = dhd_idx2net(dhdp, idx);
+	if (ndev) {
+		cfg = wl_get_cfg(ndev);
+		if (cfg)
+			return (uint8 *)(cfg->up_table);
+	}
+
+	return NULL;
+}
+#endif /* QOS_MAP_SET */
+
+#if defined(DHD_LOSSLESS_ROAMING) || defined(DBG_PKT_MON)
+static s32
+wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct wl_security *sec;
+	struct net_device *ndev;
+#if defined(DHD_LOSSLESS_ROAMING) || defined(DBG_PKT_MON)
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
+	u32 status = ntoh32(e->status);
+	u32 reason = ntoh32(e->reason);
+
+	BCM_REFERENCE(sec);
+
+	if (status == WLC_E_STATUS_SUCCESS && reason != WLC_E_REASON_INITIAL_ASSOC) {
+		WL_ERR(("Attempting roam with reason code : %d\n", reason));
+	}
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifdef DBG_PKT_MON
+	if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+		DHD_DBG_PKT_MON_STOP(dhdp);
+		DHD_DBG_PKT_MON_START(dhdp);
+	}
+#endif /* DBG_PKT_MON */
+
+#ifdef DHD_LOSSLESS_ROAMING
+sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+	/* Disable Lossless Roaming for specific AKM suite
+	 * Any other AKM suite can be added below if transition time
+	 * is delayed because of Lossless Roaming
+	 * and it causes any certication failure
+	 */
+	if (IS_AKM_SUITE_FT(sec)) {
+		return BCME_OK;
+	}
+	dhdp->dequeue_prec_map = 1 << PRIO_8021D_NC;
+	/* Restore flow control  */
+	dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF);
+
+	mod_timer(&cfg->roam_timeout, jiffies + msecs_to_jiffies(WL_ROAM_TIMEOUT_MS));
+#endif /* DHD_LOSSLESS_ROAMING */
+	return BCME_OK;
+
+}
+#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
+#ifdef ENABLE_TEMP_THROTTLING
+static s32
+wl_check_rx_throttle_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 status = ntoh32(e->status);
+	u32 reason = ntoh32(e->reason);
+
+	WL_ERR_EX(("RX THROTTLE : status=%d, reason=0x%x\n", status, reason));
+
+	return err;
+}
+#endif /* ENABLE_TEMP_THROTTLING */
+
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	wl_assoc_info_t assoc_info;
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	s32 err = 0;
+#ifdef QOS_MAP_SET
+	bcm_tlv_t * qos_map_ie = NULL;
+#endif /* QOS_MAP_SET */
+
+	WL_DBG(("Enter \n"));
+	err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf,
+		WL_ASSOC_INFO_MAX, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("could not get assoc info (%d)\n", err));
+		return err;
+	}
+	memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t));
+	assoc_info.req_len = htod32(assoc_info.req_len);
+	assoc_info.resp_len = htod32(assoc_info.resp_len);
+	assoc_info.flags = htod32(assoc_info.flags);
+	if (conn_info->req_ie_len) {
+		conn_info->req_ie_len = 0;
+		bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
+	}
+	if (conn_info->resp_ie_len) {
+		conn_info->resp_ie_len = 0;
+		bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+	}
+	if (assoc_info.req_len) {
+		err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf,
+			WL_ASSOC_INFO_MAX, NULL);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc req (%d)\n", err));
+			return err;
+		}
+		conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+		if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
+			conn_info->req_ie_len -= ETHER_ADDR_LEN;
+		}
+		if (conn_info->req_ie_len <= MAX_REQ_LINE)
+			memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
+		else {
+			WL_ERR(("IE size %d above max %d size \n",
+				conn_info->req_ie_len, MAX_REQ_LINE));
+			return err;
+		}
+	} else {
+		conn_info->req_ie_len = 0;
+	}
+	if (assoc_info.resp_len) {
+		err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf,
+			WL_ASSOC_INFO_MAX, NULL);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc resp (%d)\n", err));
+			return err;
+		}
+		conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
+		if (conn_info->resp_ie_len <= MAX_REQ_LINE) {
+			memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
+		} else {
+			WL_ERR(("IE size %d above max %d size \n",
+				conn_info->resp_ie_len, MAX_REQ_LINE));
+			return err;
+		}
+
+#ifdef QOS_MAP_SET
+		/* find qos map set ie */
+		if ((qos_map_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+				DOT11_MNG_QOS_MAP_ID)) != NULL) {
+			WL_DBG((" QoS map set IE found in assoc response\n"));
+			if (!cfg->up_table) {
+				cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL);
+			}
+			wl_set_up_table(cfg->up_table, qos_map_ie);
+		} else {
+			kfree(cfg->up_table);
+			cfg->up_table = NULL;
+		}
+#endif /* QOS_MAP_SET */
+	} else {
+		conn_info->resp_ie_len = 0;
+	}
+	WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+		conn_info->resp_ie_len));
+
+	return err;
+}
+
+static s32 wl_ch_to_chanspec(struct net_device *dev, int ch, struct wl_join_params *join_params,
+        size_t *join_params_size)
+{
+	s32 bssidx = -1;
+	chanspec_t chanspec = 0, chspec;
+
+	if (ch != 0) {
+		struct bcm_cfg80211 *cfg =
+			(struct bcm_cfg80211 *)wiphy_priv(dev->ieee80211_ptr->wiphy);
+			join_params->params.chanspec_num = 1;
+			join_params->params.chanspec_list[0] = ch;
+
+			if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+				chanspec |= WL_CHANSPEC_BAND_2G;
+			else
+				chanspec |= WL_CHANSPEC_BAND_5G;
+
+			/* Get the min_bw set for the interface */
+			chspec = wl_cfg80211_ulb_get_min_bw_chspec(cfg, dev->ieee80211_ptr, bssidx);
+			if (chspec == INVCHANSPEC) {
+				WL_ERR(("Invalid chanspec \n"));
+				return -EINVAL;
+			}
+			chanspec |= chspec;
+			chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+			*join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+				join_params->params.chanspec_num * sizeof(chanspec_t);
+
+			join_params->params.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+			join_params->params.chanspec_list[0] |= chanspec;
+			join_params->params.chanspec_list[0] =
+				wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
+
+			join_params->params.chanspec_num =
+				htod32(join_params->params.chanspec_num);
+
+		WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n",
+			join_params->params.chanspec_list[0],
+			join_params->params.chanspec_num));
+	}
+	return 0;
+}
+
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam)
+{
+	struct cfg80211_bss *bss;
+	struct wl_bss_info *bi;
+	struct wlc_ssid *ssid;
+	struct bcm_tlv *tim;
+	s32 beacon_interval;
+	s32 dtim_period;
+	size_t ie_len;
+	u8 *ie;
+	u8 *curbssid;
+	s32 err = 0;
+	struct wiphy *wiphy;
+	u32 channel;
+	char *buf;
+	u32 freq, band;
+
+	wiphy = bcmcfg_to_wiphy(cfg);
+
+	ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
+		ssid->SSID, ssid->SSID_len);
+	buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_ATOMIC);
+	if (!buf) {
+		WL_ERR(("buffer alloc failed.\n"));
+		return BCME_NOMEM;
+	}
+	mutex_lock(&cfg->usr_sync);
+	*(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
+	err = wldev_ioctl_get(ndev, WLC_GET_BSS_INFO, buf, WL_EXTRA_BUF_MAX);
+	if (unlikely(err)) {
+		WL_ERR(("Could not get bss info %d\n", err));
+		goto update_bss_info_out;
+	}
+	bi = (struct wl_bss_info *)(buf + 4);
+	channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+	wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN);
+
+	if (!bss) {
+		WL_DBG(("Could not find the AP\n"));
+		if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+			WL_ERR(("Bssid doesn't match\n"));
+			err = -EIO;
+			goto update_bss_info_out;
+		}
+		err = wl_inform_single_bss(cfg, bi, roam);
+		if (unlikely(err))
+			goto update_bss_info_out;
+
+		ie = ((u8 *)bi) + bi->ie_offset;
+		ie_len = bi->ie_length;
+		beacon_interval = cpu_to_le16(bi->beacon_period);
+	} else {
+		WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+		freq = ieee80211_channel_to_frequency(channel);
+#else
+		band = (channel <= CH_MAX_2G_CHANNEL) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+		freq = ieee80211_channel_to_frequency(channel, band);
+#endif
+		bss->channel = ieee80211_get_channel(wiphy, freq);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		ie = (u8 *)bss->ies->data;
+		ie_len = bss->ies->len;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#else
+		ie = bss->information_elements;
+		ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		beacon_interval = bss->beacon_interval;
+
+		CFG80211_PUT_BSS(wiphy, bss);
+	}
+
+	tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+	if (tim) {
+		dtim_period = tim->data[1];
+	} else {
+		/*
+		* active scan was done so we could not get dtim
+		* information out of probe response.
+		* so we speficially query dtim information.
+		*/
+		dtim_period = 0;
+		err = wldev_ioctl_get(ndev, WLC_GET_DTIMPRD,
+			&dtim_period, sizeof(dtim_period));
+		if (unlikely(err)) {
+			WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+			goto update_bss_info_out;
+		}
+	}
+
+	wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
+	wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+	if (unlikely(err)) {
+		WL_ERR(("Failed with error %d\n", err));
+	}
+
+	kfree(buf);
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	s32 err = 0;
+	u8 *curbssid;
+	u32 *channel;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ieee80211_supported_band *band;
+	struct ieee80211_channel *notify_channel = NULL;
+	u32 freq;
+#ifdef BCM4359_CHIP
+	struct channel_info ci;
+	u32 cur_channel;
+#endif /* BCM4359_CHIP */
+#endif 
+#if defined(WLADPS_SEAK_AP_WAR) || defined(WBTEXT)
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* WLADPS_SEAK_AP_WAR || WBTEXT */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+	struct cfg80211_roam_info roam_info = {};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+
+#ifdef WLADPS_SEAK_AP_WAR
+	BCM_REFERENCE(dhdp);
+#endif /* WLADPS_SEAK_AP_WAR */
+
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+#ifdef BCM4359_CHIP
+	/* Skip calling cfg80211_roamed If the channels are same and
+	 * the current bssid/last_roamed_bssid & the new bssid are same
+	 * Also clear timer roam_timeout.
+	 * Only used on BCM4359 devices.
+	 */
+	memset(&ci, 0, sizeof(ci));
+	if ((wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci,
+			sizeof(ci))) < 0) {
+		WL_ERR(("Failed to get current channel !"));
+		return BCME_ERROR;
+	}
+	cur_channel = dtoh32(ci.hw_channel);
+	if ((*channel == cur_channel) && ((memcmp(curbssid, &e->addr,
+		ETHER_ADDR_LEN) == 0) || (memcmp(&cfg->last_roamed_addr,
+		&e->addr, ETHER_ADDR_LEN) == 0))) {
+		WL_ERR(("BSS already present, Skipping roamed event to"
+		" upper layer\n"));
+#ifdef DHD_LOSSLESS_ROAMING
+		wl_del_roam_timeout(cfg);
+#endif  /* DHD_LOSSLESS_ROAMING */
+		return  err;
+	}
+#endif /* BCM4359_CHIP */
+
+	wl_get_assoc_ies(cfg, ndev);
+	wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), WL_PROF_BSSID);
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	wl_update_bss_info(cfg, ndev, true);
+	wl_update_pmklist(ndev, cfg->pmk_list, err);
+
+	channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+	/* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */
+	if (*channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	freq = ieee80211_channel_to_frequency(*channel, band->band);
+	notify_channel = ieee80211_get_channel(wiphy, freq);
+#endif 
+#ifdef WLADPS_SEAK_AP_WAR
+	if ((dhdp->op_mode & DHD_FLAG_STA_MODE) &&
+			(!dhdp->disabled_adps)) {
+		bool find = FALSE;
+		uint8 enable_mode;
+		if (!memcmp(curbssid, (u8*)CAMEO_MAC_PREFIX, MAC_PREFIX_LEN)) {
+			find = wl_find_vndr_ies_specific_vender(cfg, ndev, ATHEROS_OUI);
+		}
+		enable_mode = (find == TRUE) ? OFF : ON;
+		wl_set_adps_mode(cfg, ndev, enable_mode);
+	}
+#endif /* WLADPS_SEAK_AP_WAR */
+	printf("%s succeeded to " MACDBG " (ch:%d)\n", __FUNCTION__,
+		MAC2STRDBG((const u8*)(&e->addr)), *channel);
+	wl_cfg80211_check_in4way(cfg, ndev, 0, WL_EXT_STATUS_CONNECTED, NULL);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+	roam_info.channel = notify_channel;
+	roam_info.bssid = curbssid;
+	roam_info.req_ie = conn_info->req_ie;
+	roam_info.req_ie_len = conn_info->req_ie_len;
+	roam_info.resp_ie = conn_info->resp_ie;
+	roam_info.resp_ie_len = conn_info->resp_ie_len;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+	cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
+#else
+	cfg80211_roamed(ndev,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+		notify_channel,
+#endif
+		curbssid,
+		conn_info->req_ie, conn_info->req_ie_len,
+		conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+	WL_DBG(("Report roaming result\n"));
+
+	memcpy(&cfg->last_roamed_addr, &e->addr, ETHER_ADDR_LEN);
+	wl_set_drv_status(cfg, CONNECTED, ndev);
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+	cfg->roam_count++;
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+#ifdef WBTEXT
+	if (dhdp->wbtext_support) {
+		/* set wnm_keepalives_max_idle after association */
+		wl_cfg80211_wbtext_set_wnm_maxidle(cfg, ndev);
+		/* send nbr request or BTM query to update RCC */
+		wl_cfg80211_wbtext_update_rcc(cfg, ndev);
+	}
+#endif /* WBTEXT */
+
+	return err;
+}
+
+static bool
+wl_cfg80211_verify_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct cfg80211_bss *bss;
+	struct wiphy *wiphy;
+	struct wlc_ssid *ssid;
+	uint8 *curbssid;
+	int count = 0;
+	int ret = false;
+
+	wiphy = bcmcfg_to_wiphy(cfg);
+	ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	if (!ssid) {
+		WL_ERR(("No SSID found in the saved profile \n"));
+		return false;
+	}
+
+	do {
+		bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
+			ssid->SSID, ssid->SSID_len);
+		if (bss || (count > 5)) {
+			break;
+		}
+
+		count++;
+		msleep(100);
+	} while (bss == NULL);
+
+	WL_DBG(("cfg80211 bss_ptr:%p loop_cnt:%d\n", bss, count));
+	if (bss) {
+		/* Update the reference count after use */
+		CFG80211_PUT_BSS(wiphy, bss);
+		ret = true;
+	}
+
+	return ret;
+}
+
+static s32
+wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+	s32 err = 0;
+	u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	dhd_pub_t *dhdp;
+
+	dhdp = (dhd_pub_t *)(cfg->pub);
+	BCM_REFERENCE(dhdp);
+
+	if (!sec) {
+		WL_ERR(("sec is NULL\n"));
+		return -ENODEV;
+	}
+	WL_DBG((" enter\n"));
+#ifdef ESCAN_RESULT_PATCH
+	if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+		if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) {
+			WL_DBG((" Connected event of connected device e=%d s=%d, ignore it\n",
+				ntoh32(e->event_type), ntoh32(e->status)));
+			return err;
+		}
+	}
+	if (memcmp(curbssid, broad_bssid, ETHER_ADDR_LEN) == 0 &&
+		memcmp(broad_bssid, connect_req_bssid, ETHER_ADDR_LEN) != 0) {
+		WL_DBG(("copy bssid\n"));
+		memcpy(curbssid, connect_req_bssid, ETHER_ADDR_LEN);
+	}
+
+#else
+	if (cfg->scan_request) {
+		wl_notify_escan_complete(cfg, ndev, true, true);
+	}
+#endif /* ESCAN_RESULT_PATCH */
+	if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+		wl_cfg80211_scan_abort(cfg);
+		wl_clr_drv_status(cfg, CONNECTING, ndev);
+		if (completed) {
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet),
+				WL_PROF_BSSID);
+			curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			wl_update_pmklist(ndev, cfg->pmk_list, err);
+			wl_set_drv_status(cfg, CONNECTED, ndev);
+#ifdef WLADPS_SEAK_AP_WAR
+			if ((dhdp->op_mode & DHD_FLAG_STA_MODE) &&
+					(!dhdp->disabled_adps)) {
+				bool find = FALSE;
+				uint8 enable_mode;
+				if (!memcmp(curbssid, (u8*)CAMEO_MAC_PREFIX, MAC_PREFIX_LEN)) {
+					find = wl_find_vndr_ies_specific_vender(cfg,
+						ndev, ATHEROS_OUI);
+				}
+				enable_mode = (find == TRUE) ? OFF : ON;
+				wl_set_adps_mode(cfg, ndev, enable_mode);
+			}
+#endif /* WLADPS_SEAK_AP_WAR */
+			if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+				init_completion(&cfg->iface_disable);
+#else
+				/* reinitialize completion to clear previous count */
+				INIT_COMPLETION(cfg->iface_disable);
+#endif
+			}
+#ifdef CUSTOM_SET_CPUCORE
+			if (wl_get_chan_isvht80(ndev, dhdp)) {
+				if (ndev == bcmcfg_to_prmry_ndev(cfg))
+					dhdp->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */
+				else if (is_p2p_group_iface(ndev->ieee80211_ptr))
+					dhdp->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */
+				dhd_set_cpucore(dhdp, TRUE);
+			}
+#endif /* CUSTOM_SET_CPUCORE */
+			memset(&cfg->last_roamed_addr, 0, ETHER_ADDR_LEN);
+		}
+
+		if (completed && (wl_cfg80211_verify_bss(cfg, ndev) != true)) {
+			/* If bss entry is not available in the cfg80211 bss cache
+			 * the wireless stack will complain and won't populate
+			 * wdev->current_bss ptr
+			 */
+			WL_ERR(("BSS entry not found. Indicate assoc event failure\n"));
+			completed = false;
+			sec->auth_assoc_res_status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+		}
+		if (completed) {
+			wl_cfg80211_check_in4way(cfg, ndev, 0, WL_EXT_STATUS_CONNECTED, NULL);
+		}
+		cfg80211_connect_result(ndev,
+			curbssid,
+			conn_info->req_ie,
+			conn_info->req_ie_len,
+			conn_info->resp_ie,
+			conn_info->resp_ie_len,
+			completed ? WLAN_STATUS_SUCCESS :
+			(sec->auth_assoc_res_status) ?
+			sec->auth_assoc_res_status :
+			WLAN_STATUS_UNSPECIFIED_FAILURE,
+			GFP_KERNEL);
+		if (completed) {
+			WL_INFORM(("Report connect result - connection succeeded\n"));
+		} else {
+			WL_ERR(("Report connect result - connection failed\n"));
+			wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
+				WL_EXT_STATUS_DISCONNECTED, NULL);
+		}
+	}
+#ifdef CONFIG_TCPACK_FASTTX
+	if (wl_get_chan_isvht80(ndev, dhdp))
+		wldev_iovar_setint(ndev, "tcpack_fast_tx", 0);
+	else
+		wldev_iovar_setint(ndev, "tcpack_fast_tx", 1);
+#endif /* CONFIG_TCPACK_FASTTX */
+
+	return err;
+}
+
+static s32
+wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+	u16 flags = ntoh16(e->flags);
+	enum nl80211_key_type key_type;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	if (flags & WLC_EVENT_MSG_GROUP)
+		key_type = NL80211_KEYTYPE_GROUP;
+	else
+		key_type = NL80211_KEYTYPE_PAIRWISE;
+
+	cfg80211_michael_mic_failure(ndev, (const u8 *)&e->addr, key_type, -1,
+		NULL, GFP_KERNEL);
+	mutex_unlock(&cfg->usr_sync);
+
+	return 0;
+}
+
+#ifdef BT_WIFI_HANDOVER
+static s32
+wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+	u32 event = ntoh32(e->event_type);
+	u32 datalen = ntoh32(e->datalen);
+	s32 err;
+
+	WL_ERR(("wl_notify_bt_wifi_handover_req: event_type : %d, datalen : %d\n", event, datalen));
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+	err = wl_genl_send_msg(ndev, event, data, (u16)datalen, 0, 0);
+
+	return err;
+}
+#endif /* BT_WIFI_HANDOVER */
+
+#ifdef PNO_SUPPORT
+static s32
+wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+#ifdef GSCAN_SUPPORT
+	void *ptr;
+	int send_evt_bytes = 0;
+	u32 event = be32_to_cpu(e->event_type);
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+#endif /* GSCAN_SUPPORT */
+
+	WL_ERR((">>> PNO Event\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#ifdef GSCAN_SUPPORT
+	ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+	if (ptr) {
+		wl_cfgvendor_send_async_event(wiphy, ndev,
+			GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+		kfree(ptr);
+	}
+	if (!dhd_dev_is_legacy_pno_enabled(ndev))
+		return 0;
+#endif /* GSCAN_SUPPORT */
+
+
+#ifndef WL_SCHED_SCAN
+	mutex_lock(&cfg->usr_sync);
+	/* TODO: Use cfg80211_sched_scan_results(wiphy); */
+	CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+	mutex_unlock(&cfg->usr_sync);
+#else
+	/* If cfg80211 scheduled scan is supported, report the pno results via sched
+	 * scan results
+	 */
+	wl_notify_sched_scan_results(cfg, ndev, e, data);
+#endif /* WL_SCHED_SCAN */
+	return 0;
+}
+#endif /* PNO_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+static s32
+wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = be32_to_cpu(e->event_type);
+	void *ptr;
+	int send_evt_bytes = 0;
+	int event_type;
+	struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	u32 len = ntoh32(e->datalen);
+
+	switch (event) {
+		case WLC_E_PFN_BEST_BATCHING:
+			err = dhd_dev_retrieve_batch_scan(ndev);
+			if (err < 0) {
+				WL_ERR(("Batch retrieval already in progress %d\n", err));
+			} else {
+				event_type = WIFI_SCAN_THRESHOLD_NUM_SCANS;
+				if (data && len) {
+					event_type = *((int *)data);
+				}
+				wl_cfgvendor_send_async_event(wiphy, ndev,
+				    GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+				     &event_type, sizeof(int));
+			}
+			break;
+		case WLC_E_PFN_SCAN_COMPLETE:
+			event_type = WIFI_SCAN_COMPLETE;
+			wl_cfgvendor_send_async_event(wiphy, ndev,
+				GOOGLE_SCAN_COMPLETE_EVENT,
+				&event_type, sizeof(int));
+			break;
+		case WLC_E_PFN_BSSID_NET_FOUND:
+			ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+			      HOTLIST_FOUND);
+			if (ptr) {
+				wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+				 ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
+				dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
+			} else {
+				err = -ENOMEM;
+			}
+			break;
+		case WLC_E_PFN_BSSID_NET_LOST:
+			/* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
+			 * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
+			 */
+			if (len) {
+				ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+				            HOTLIST_LOST);
+				if (ptr) {
+					wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+					 ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
+					dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
+				} else {
+					err = -ENOMEM;
+				}
+			} else {
+				err = -EINVAL;
+			}
+			break;
+		case WLC_E_PFN_GSCAN_FULL_RESULT:
+			ptr = dhd_dev_process_full_gscan_result(ndev, data, len, &send_evt_bytes);
+			if (ptr) {
+				wl_cfgvendor_send_async_event(wiphy, ndev,
+				    GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
+				kfree(ptr);
+			} else {
+				err = -ENOMEM;
+			}
+			break;
+		case WLC_E_PFN_SSID_EXT:
+			ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+			if (ptr) {
+				wl_cfgvendor_send_async_event(wiphy, ndev,
+				    GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+				kfree(ptr);
+			} else {
+				err = -ENOMEM;
+			}
+			break;
+		default:
+			WL_ERR(("Unknown event %d\n", event));
+			break;
+	}
+	return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+static s32
+wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct channel_info channel_inform;
+	struct wl_scan_results *bss_list;
+	struct net_device *ndev = NULL;
+	u32 len = WL_SCAN_BUF_MAX;
+	s32 err = 0;
+	unsigned long flags;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+	struct cfg80211_scan_info info;
+#endif
+
+	WL_DBG(("Enter \n"));
+	if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
+		WL_ERR(("scan is not ready \n"));
+		return err;
+	}
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+	memset(&channel_inform, 0, sizeof(channel_inform));
+	err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &channel_inform,
+		sizeof(channel_inform));
+	if (unlikely(err)) {
+		WL_ERR(("scan busy (%d)\n", err));
+		goto scan_done_out;
+	}
+	channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+	if (unlikely(channel_inform.scan_channel)) {
+
+		WL_DBG(("channel_inform.scan_channel (%d)\n",
+			channel_inform.scan_channel));
+	}
+	cfg->bss_list = cfg->scan_results;
+	bss_list = cfg->bss_list;
+	memset(bss_list, 0, len);
+	bss_list->buflen = htod32(len);
+	err = wldev_ioctl_get(ndev, WLC_SCAN_RESULTS, bss_list, len);
+	if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
+		WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+		err = -EINVAL;
+		goto scan_done_out;
+	}
+	bss_list->buflen = dtoh32(bss_list->buflen);
+	bss_list->version = dtoh32(bss_list->version);
+	bss_list->count = dtoh32(bss_list->count);
+
+	err = wl_inform_bss(cfg);
+
+scan_done_out:
+	del_timer_sync(&cfg->scan_timeout);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+		info.aborted = false;
+		cfg80211_scan_done(cfg->scan_request, &info);
+#else
+		cfg80211_scan_done(cfg->scan_request, false);
+#endif
+		cfg->scan_request = NULL;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	WL_DBG(("cfg80211_scan_done\n"));
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody)
+{
+	struct dot11_management_header *hdr;
+	u32 totlen = 0;
+	s32 err = 0;
+	u8 *offset;
+	u32 prebody_len = *body_len;
+	switch (fc) {
+		case FC_ASSOC_REQ:
+			/* capability , listen interval */
+			totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+			break;
+
+		case FC_REASSOC_REQ:
+			/* capability, listen inteval, ap address */
+			totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+			break;
+	}
+	totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+	*pheader = kzalloc(totlen, GFP_KERNEL);
+	if (*pheader == NULL) {
+		WL_ERR(("memory alloc failed \n"));
+		return -ENOMEM;
+	}
+	hdr = (struct dot11_management_header *) (*pheader);
+	hdr->fc = htol16(fc);
+	hdr->durid = 0;
+	hdr->seq = 0;
+	offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+	bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+	bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+	bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+	if ((pbody != NULL) && prebody_len)
+		bcopy((const char*)pbody, offset, prebody_len);
+	*body_len = totlen;
+	return err;
+}
+
+
+void
+wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev, u8 bsscfgidx)
+{
+	s32 err = 0;
+
+	if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+		if (cfg->afx_hdl != NULL) {
+			if (cfg->afx_hdl->dev != NULL) {
+				wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+				wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, cfg->afx_hdl->dev);
+			}
+			cfg->afx_hdl->peer_chan = WL_INVALID;
+		}
+		complete(&cfg->act_frm_scan);
+		WL_DBG(("*** Wake UP ** Working afx searching is cleared\n"));
+	} else if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+		if (!(wl_get_p2p_status(cfg, ACTION_TX_COMPLETED) ||
+			wl_get_p2p_status(cfg, ACTION_TX_NOACK)))
+			wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+
+		WL_DBG(("*** Wake UP ** abort actframe iovar on bsscfxidx %d\n", bsscfgidx));
+		/* if channel is not zero, "actfame" uses off channel scan.
+		 * So abort scan for off channel completion.
+		 */
+		if (cfg->af_sent_channel) {
+			/* Scan engine is not used for sending action frames in the latest driver
+			 * branches. So, actframe_abort is used in the latest driver branches
+			 * instead of scan abort.
+			 * New driver branches:
+			 *     Issue actframe_abort and it succeeded. So, don't execute scan abort.
+			 * Old driver branches:
+			 *     Issue actframe_abort and it fails. So, execute scan abort.
+			 */
+			err = wldev_iovar_setint_bsscfg(ndev, "actframe_abort", 1, bsscfgidx);
+			if (err < 0) {
+				wl_cfg80211_scan_abort(cfg);
+			}
+		}
+	}
+#ifdef WL_CFG80211_SYNC_GON
+	else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+		WL_DBG(("*** Wake UP ** abort listen for next af frame\n"));
+		/* So abort scan to cancel listen */
+		wl_cfg80211_scan_abort(cfg);
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+}
+
+#if defined(WLTDLS)
+bool wl_cfg80211_is_tdls_tunneled_frame(void *frame, u32 frame_len)
+{
+	unsigned char *data;
+
+	if (frame == NULL) {
+		WL_ERR(("Invalid frame \n"));
+		return false;
+	}
+
+	if (frame_len < 5) {
+		WL_ERR(("Invalid frame length [%d] \n", frame_len));
+		return false;
+	}
+
+	data = frame;
+
+	if (!memcmp(data, TDLS_TUNNELED_PRB_REQ, 5) ||
+		!memcmp(data, TDLS_TUNNELED_PRB_RESP, 5)) {
+		WL_DBG(("TDLS Vendor Specific Received type\n"));
+		return true;
+	}
+
+	return false;
+}
+#endif /* WLTDLS */
+
+
+int wl_cfg80211_get_ioctl_version(void)
+{
+	return ioctl_version;
+}
+
+static s32
+wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct ieee80211_supported_band *band;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ether_addr da;
+	struct ether_addr bssid;
+	bool isfree = false;
+	s32 err = 0;
+	s32 freq;
+#if defined(TDLS_MSG_ONLY_WFD)
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* TDLS_MSG_ONLY_WFD && BCMDONGLEHOST */
+	struct net_device *ndev = NULL;
+	wifi_p2p_pub_act_frame_t *act_frm = NULL;
+	wifi_p2p_action_frame_t *p2p_act_frm = NULL;
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+	wl_event_rx_frame_data_t *rxframe;
+	u32 event;
+	u8 *mgmt_frame;
+	u8 bsscfgidx;
+	u32 mgmt_frame_len;
+	u16 channel;
+	if (ntoh32(e->datalen) < sizeof(wl_event_rx_frame_data_t)) {
+		WL_ERR(("wrong datalen:%d\n", ntoh32(e->datalen)));
+		return -EINVAL;
+	}
+	mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+	event = ntoh32(e->event_type);
+	bsscfgidx = e->bsscfgidx;
+	rxframe = (wl_event_rx_frame_data_t *)data;
+	if (!rxframe) {
+		WL_ERR(("rxframe: NULL\n"));
+		return -EINVAL;
+	}
+	channel = (ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK);
+	memset(&bssid, 0, ETHER_ADDR_LEN);
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+	if ((ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) &&
+		(event == WLC_E_PROBREQ_MSG)) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+		struct net_info *iter, *next;
+		for_each_ndev(cfg, iter, next) {
+			if (iter->ndev && iter->wdev &&
+					iter->wdev->iftype == NL80211_IFTYPE_AP) {
+					ndev = iter->ndev;
+					cfgdev =  ndev_to_cfgdev(ndev);
+					break;
+			}
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	}
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band\n"));
+		return -EINVAL;
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+	freq = ieee80211_channel_to_frequency(channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+	if (event == WLC_E_ACTION_FRAME_RX) {
+		if ((err = wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+			NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx,
+			&cfg->ioctl_buf_sync)) != BCME_OK) {
+			WL_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err));
+			goto exit;
+		}
+
+		err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+		if (err < 0)
+			 WL_ERR(("WLC_GET_BSSID error %d\n", err));
+		memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+		err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
+			&mgmt_frame, &mgmt_frame_len,
+			(u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
+		if (err < 0) {
+			WL_ERR(("Error in receiving action frame len %d channel %d freq %d\n",
+				mgmt_frame_len, channel, freq));
+			goto exit;
+		}
+		isfree = true;
+		if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+			act_frm = (wifi_p2p_pub_act_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+		} else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+			p2p_act_frm = (wifi_p2p_action_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+			(void) p2p_act_frm;
+		} else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+
+			sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+			if (sd_act_frm && wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+				if (cfg->next_af_subtype == sd_act_frm->action) {
+					WL_DBG(("We got a right next frame of SD!(%d)\n",
+						sd_act_frm->action));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+				}
+			}
+			(void) sd_act_frm;
+#ifdef WLTDLS
+		} else if ((mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) ||
+				(wl_cfg80211_is_tdls_tunneled_frame(
+				    &mgmt_frame[DOT11_MGMT_HDR_LEN],
+				    mgmt_frame_len - DOT11_MGMT_HDR_LEN))) {
+			if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) {
+				WL_ERR((" TDLS Action Frame Received type = %d \n",
+					mgmt_frame[DOT11_MGMT_HDR_LEN + 1]));
+			}
+#ifdef TDLS_MSG_ONLY_WFD
+			if (!dhdp->tdls_mode) {
+				WL_DBG((" TDLS Frame filtered \n"));
+				return 0;
+			}
+#else
+			if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) {
+				cfg->tdls_mgmt_frame = mgmt_frame;
+				cfg->tdls_mgmt_frame_len = mgmt_frame_len;
+				cfg->tdls_mgmt_freq = freq;
+				return 0;
+			}
+#endif /* TDLS_MSG_ONLY_WFD */
+#endif /* WLTDLS */
+#ifdef QOS_MAP_SET
+		} else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == DOT11_ACTION_CAT_QOS) {
+			/* update QoS map set table */
+			bcm_tlv_t * qos_map_ie = NULL;
+			if ((qos_map_ie = bcm_parse_tlvs(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+					mgmt_frame_len - DOT11_MGMT_HDR_LEN,
+					DOT11_MNG_QOS_MAP_ID)) != NULL) {
+				WL_DBG((" QoS map set IE found in QoS action frame\n"));
+				if (!cfg->up_table) {
+					cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL);
+				}
+				wl_set_up_table(cfg->up_table, qos_map_ie);
+			} else {
+				kfree(cfg->up_table);
+				cfg->up_table = NULL;
+			}
+#endif /* QOS_MAP_SET */
+#ifdef WBTEXT
+		} else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == DOT11_ACTION_CAT_RRM) {
+			/* radio measurement category */
+			switch (mgmt_frame[DOT11_MGMT_HDR_LEN+1]) {
+				case DOT11_RM_ACTION_NR_REP:
+					if (wl_cfg80211_recv_nbr_resp(ndev,
+							&mgmt_frame[DOT11_MGMT_HDR_LEN],
+							mgmt_frame_len - DOT11_MGMT_HDR_LEN)
+							== BCME_OK) {
+						WL_DBG(("RCC updated by nbr response\n"));
+					}
+					break;
+				default:
+					break;
+			}
+#endif /* WBTEXT */
+		} else {
+			/*
+			 *  if we got normal action frame and ndev is p2p0,
+			 *  we have to change ndev from p2p0 to wlan0
+			 */
+
+
+			if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+				u8 action = 0;
+				if (wl_get_public_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+					mgmt_frame_len - DOT11_MGMT_HDR_LEN, &action) != BCME_OK) {
+					WL_DBG(("Recived action is not public action frame\n"));
+				} else if (cfg->next_af_subtype == action) {
+					WL_DBG(("Recived action is the waiting action(%d)\n",
+						action));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+				}
+			}
+		}
+
+		if (act_frm) {
+
+			if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+				if (cfg->next_af_subtype == act_frm->subtype) {
+					WL_DBG(("We got a right next frame!(%d)\n",
+						act_frm->subtype));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					if (cfg->next_af_subtype == P2P_PAF_GON_CONF) {
+						OSL_SLEEP(20);
+					}
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+				}
+			}
+		}
+
+		wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN, channel);
+		if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) {
+			WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+			wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+		}
+	} else if (event == WLC_E_PROBREQ_MSG) {
+
+		/* Handle probe reqs frame
+		 * WPS-AP certification 4.2.13
+		 */
+		struct parsed_ies prbreq_ies;
+		u32 prbreq_ie_len = 0;
+		bool pbc = 0;
+
+		WL_DBG((" Event WLC_E_PROBREQ_MSG received\n"));
+		mgmt_frame = (u8 *)(data);
+		mgmt_frame_len = ntoh32(e->datalen);
+		if (mgmt_frame_len < DOT11_MGMT_HDR_LEN) {
+			WL_ERR(("wrong datalen:%d\n", mgmt_frame_len));
+			return -EINVAL;
+		}
+		prbreq_ie_len = mgmt_frame_len - DOT11_MGMT_HDR_LEN;
+
+		/* Parse prob_req IEs */
+		if (wl_cfg80211_parse_ies(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			prbreq_ie_len, &prbreq_ies) < 0) {
+			WL_ERR(("Prob req get IEs failed\n"));
+			return 0;
+		}
+		if (prbreq_ies.wps_ie != NULL) {
+			wl_validate_wps_ie((char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc);
+			WL_DBG((" wps_ie exist pbc = %d\n", pbc));
+			/* if pbc method, send prob_req mgmt frame to upper layer */
+			if (!pbc)
+				return 0;
+		} else
+			return 0;
+	} else {
+		mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+
+		/* wpa supplicant use probe request event for restarting another GON Req.
+		 * but it makes GON Req repetition.
+		 * so if src addr of prb req is same as my target device,
+		 * do not send probe request event during sending action frame.
+		 */
+		if (event == WLC_E_P2P_PROBREQ_MSG) {
+			WL_DBG((" Event %s\n", (event == WLC_E_P2P_PROBREQ_MSG) ?
+				"WLC_E_P2P_PROBREQ_MSG":"WLC_E_PROBREQ_MSG"));
+
+
+			/* Filter any P2P probe reqs arriving during the
+			 * GO-NEG Phase
+			 */
+			if (cfg->p2p &&
+#if defined(P2P_IE_MISSING_FIX)
+				cfg->p2p_prb_noti &&
+#endif
+				wl_get_p2p_status(cfg, GO_NEG_PHASE)) {
+				WL_DBG(("Filtering P2P probe_req while "
+					"being in GO-Neg state\n"));
+				return 0;
+			}
+		}
+	}
+
+	if (discover_cfgdev(cfgdev, cfg))
+		WL_DBG(("Rx Managment frame For P2P Discovery Interface \n"));
+	else
+		WL_DBG(("Rx Managment frame For Iface (%s) \n", ndev->name));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+	 cfg80211_rx_mgmt(cfgdev, freq, 0,  mgmt_frame, mgmt_frame_len, 0);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+	cfg80211_rx_mgmt(cfgdev, freq, 0,  mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+	defined(WL_COMPAT_WIRELESS)
+	cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#else
+	cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 18, 0) */
+
+	WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n",
+		mgmt_frame_len, ntoh32(e->datalen), channel, freq));
+exit:
+	if (isfree)
+		kfree(mgmt_frame);
+	return err;
+}
+
+#ifdef WL_SCHED_SCAN
+/* If target scan is not reliable, set the below define to "1" to do a
+ * full escan
+ */
+#define FULL_ESCAN_ON_PFN_NET_FOUND		0
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	wl_pfn_net_info_v2_t *netinfo, *pnetinfo;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+	int err = 0;
+	struct cfg80211_scan_request *request = NULL;
+	struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
+	struct ieee80211_channel *channel = NULL;
+	int channel_req = 0;
+	int band = 0;
+	wl_pfn_scanresults_t *pfn_result = (wl_pfn_scanresults_t *)data;
+	int n_pfn_results = pfn_result->count;
+	log_conn_event_t *event_data = NULL;
+	tlv_log *tlv_data = NULL;
+	u32 alloc_len, tlv_len;
+	u32 payload_len;
+
+	WL_DBG(("Enter\n"));
+
+	if (pfn_result->version != PFN_SCANRESULT_VERSION) {
+		WL_ERR(("Incorrect version %d, expected %d\n", pfn_result->version,
+		       PFN_SCANRESULT_VERSION));
+		return BCME_VERSION;
+	}
+
+	if (e->event_type == WLC_E_PFN_NET_LOST) {
+		WL_PNO(("PFN NET LOST event. Do Nothing\n"));
+		return 0;
+	}
+
+	WL_PNO((">>> PFN NET FOUND event. count:%d \n", n_pfn_results));
+	if (n_pfn_results > 0) {
+		int i;
+
+		if (n_pfn_results > MAX_PFN_LIST_COUNT)
+			n_pfn_results = MAX_PFN_LIST_COUNT;
+		pnetinfo = (wl_pfn_net_info_v2_t *)((char *)data + sizeof(wl_pfn_scanresults_v2_t)
+				- sizeof(wl_pfn_net_info_v2_t));
+
+		memset(&ssid, 0x00, sizeof(ssid));
+
+		request = kzalloc(sizeof(*request)
+			+ sizeof(*request->channels) * n_pfn_results,
+			GFP_KERNEL);
+		channel = (struct ieee80211_channel *)kzalloc(
+			(sizeof(struct ieee80211_channel) * n_pfn_results),
+			GFP_KERNEL);
+		if (!request || !channel) {
+			WL_ERR(("No memory"));
+			err = -ENOMEM;
+			goto out_err;
+		}
+
+		request->wiphy = wiphy;
+
+		if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+			alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN + sizeof(uint16) +
+				sizeof(int16);
+			event_data = MALLOC(dhdp->osh, alloc_len);
+			if (!event_data) {
+				WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+					"length(%d)\n", __func__, alloc_len));
+				goto out_err;
+			}
+			tlv_len = 3 * sizeof(tlv_log);
+			event_data->tlvs = MALLOC(dhdp->osh, tlv_len);
+			if (!event_data->tlvs) {
+				WL_ERR(("%s: failed to allocate the tlv_log with "
+					"length(%d)\n", __func__, tlv_len));
+				goto out_err;
+			}
+		}
+
+		for (i = 0; i < n_pfn_results; i++) {
+			netinfo = &pnetinfo[i];
+			if (!netinfo) {
+				WL_ERR(("Invalid netinfo ptr. index:%d", i));
+				err = -EINVAL;
+				goto out_err;
+			}
+			WL_PNO((">>> SSID:%s Channel:%d \n",
+				netinfo->pfnsubnet.u.SSID, netinfo->pfnsubnet.channel));
+			/* PFN result doesn't have all the info which are required by the supplicant
+			 * (For e.g IEs) Do a target Escan so that sched scan results are reported
+			 * via wl_inform_single_bss in the required format. Escan does require the
+			 * scan request in the form of cfg80211_scan_request. For timebeing, create
+			 * cfg80211_scan_request one out of the received PNO event.
+			 */
+			ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN, netinfo->pfnsubnet.SSID_len);
+			memcpy(ssid[i].ssid, netinfo->pfnsubnet.u.SSID,
+				ssid[i].ssid_len);
+			request->n_ssids++;
+
+			channel_req = netinfo->pfnsubnet.channel;
+			band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+				: NL80211_BAND_5GHZ;
+			channel[i].center_freq = ieee80211_channel_to_frequency(channel_req, band);
+			channel[i].band = band;
+			channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+			request->channels[i] = &channel[i];
+			request->n_channels++;
+
+			if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+				payload_len = sizeof(log_conn_event_t);
+				event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
+				tlv_data = event_data->tlvs;
+
+				/* ssid */
+				tlv_data->tag = WIFI_TAG_SSID;
+				tlv_data->len = netinfo->pfnsubnet.SSID_len;
+				memcpy(tlv_data->value, ssid[i].ssid, ssid[i].ssid_len);
+				payload_len += TLV_LOG_SIZE(tlv_data);
+				tlv_data = TLV_LOG_NEXT(tlv_data);
+
+				/* channel */
+				tlv_data->tag = WIFI_TAG_CHANNEL;
+				tlv_data->len = sizeof(uint16);
+				memcpy(tlv_data->value, &channel_req, sizeof(uint16));
+				payload_len += TLV_LOG_SIZE(tlv_data);
+				tlv_data = TLV_LOG_NEXT(tlv_data);
+
+				/* rssi */
+				tlv_data->tag = WIFI_TAG_RSSI;
+				tlv_data->len = sizeof(int16);
+				memcpy(tlv_data->value, &netinfo->RSSI, sizeof(int16));
+				payload_len += TLV_LOG_SIZE(tlv_data);
+				tlv_data = TLV_LOG_NEXT(tlv_data);
+
+				dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+					&event_data->event, payload_len);
+			}
+		}
+
+		/* assign parsed ssid array */
+		if (request->n_ssids)
+			request->ssids = &ssid[0];
+
+		if (wl_get_drv_status_all(cfg, SCANNING)) {
+			/* Abort any on-going scan */
+			wl_notify_escan_complete(cfg, ndev, true, true);
+		}
+
+		if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+			WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+			err = wl_cfgp2p_discover_enable_search(cfg, false);
+			if (unlikely(err)) {
+				wl_clr_drv_status(cfg, SCANNING, ndev);
+				goto out_err;
+			}
+			p2p_scan(cfg) = false;
+		}
+
+		wl_set_drv_status(cfg, SCANNING, ndev);
+#if FULL_ESCAN_ON_PFN_NET_FOUND
+		WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
+		err = wl_do_escan(cfg, wiphy, ndev, NULL);
+#else
+		WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+		err = wl_do_escan(cfg, wiphy, ndev, request);
+#endif
+		if (err) {
+			wl_clr_drv_status(cfg, SCANNING, ndev);
+			goto out_err;
+		}
+		DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
+		cfg->sched_scan_running = TRUE;
+	}
+	else {
+		WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+	}
+out_err:
+	if (request)
+		kfree(request);
+	if (channel)
+		kfree(channel);
+
+	if (event_data) {
+		MFREE(dhdp->osh, event_data->tlvs, tlv_len);
+		MFREE(dhdp->osh, event_data, alloc_len);
+	}
+	return err;
+}
+#endif /* WL_SCHED_SCAN */
+
+static void wl_init_conf(struct wl_conf *conf)
+{
+	WL_DBG(("Enter \n"));
+	conf->frag_threshold = (u32)-1;
+	conf->rts_threshold = (u32)-1;
+	conf->retry_short = (u32)-1;
+	conf->retry_long = (u32)-1;
+	conf->tx_power = -1;
+}
+
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	unsigned long flags;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	if (profile) {
+		spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+		memset(profile, 0, sizeof(struct wl_profile));
+		spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	} else {
+		WL_ERR(("%s: No profile\n", __FUNCTION__));
+	}
+	return;
+}
+
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg)
+{
+	memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler));
+
+	cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+	cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ASSOC] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+	cfg->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+	cfg->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+	cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_START] = wl_notify_connect_status;
+#ifdef PNO_SUPPORT
+	cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
+#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+	cfg->evt_handler[WLC_E_PFN_BEST_BATCHING] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_SCAN_COMPLETE] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_GSCAN_FULL_RESULT] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_BSSID_NET_FOUND] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_BSSID_NET_LOST] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_SSID_EXT] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_GAS_FRAGMENT_RX] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_ROAM_EXP_EVENT] = wl_handle_roam_exp_event;
+#endif /* GSCAN_SUPPORT */
+#ifdef RSSI_MONITOR_SUPPORT
+	cfg->evt_handler[WLC_E_RSSI_LQM] = wl_handle_rssi_monitor_event;
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef WLTDLS
+	cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler;
+#endif /* WLTDLS */
+	cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status;
+#ifdef	WL_RELMCAST
+	cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status;
+#endif /* WL_RELMCAST */
+#ifdef BT_WIFI_HANDOVER
+	cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req;
+#endif
+	cfg->evt_handler[WLC_E_CSA_COMPLETE_IND] = wl_csa_complete_ind;
+	cfg->evt_handler[WLC_E_AP_STARTED] = wl_ap_start_ind;
+#ifdef CUSTOM_EVENT_PM_WAKE
+	cfg->evt_handler[WLC_E_EXCESS_PM_WAKE_EVENT] = wl_check_pmstatus;
+#endif	/* CUSTOM_EVENT_PM_WAKE */
+#if defined(DHD_LOSSLESS_ROAMING) || defined(DBG_PKT_MON)
+	cfg->evt_handler[WLC_E_ROAM_PREP] = wl_notify_roam_prep_status;
+#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON  */
+#ifdef ENABLE_TEMP_THROTTLING
+	cfg->evt_handler[WLC_E_TEMP_THROTTLE] = wl_check_rx_throttle_status;
+#endif /* ENABLE_TEMP_THROTTLING */
+
+}
+
+#if defined(STATIC_WL_PRIV_STRUCT)
+static int
+wl_init_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+	cfg->escan_info.escan_buf = DHD_OS_PREALLOC(cfg->pub,
+		DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE);
+	if (cfg->escan_info.escan_buf == NULL) {
+		WL_ERR(("Failed to alloc ESCAN_BUF\n"));
+		return -ENOMEM;
+	}
+	bzero(cfg->escan_info.escan_buf, ESCAN_BUF_SIZE);
+
+	return 0;
+}
+
+static void
+wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+	if (cfg->escan_info.escan_buf != NULL) {
+		cfg->escan_info.escan_buf = NULL;
+	}
+}
+#endif /* STATIC_WL_PRIV_STRUCT */
+
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg)
+{
+	WL_DBG(("Enter \n"));
+
+	cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!cfg->scan_results)) {
+		WL_ERR(("Scan results alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->conf = (void *)kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
+	if (unlikely(!cfg->conf)) {
+		WL_ERR(("wl_conf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->scan_req_int =
+	    (void *)kzalloc(sizeof(*cfg->scan_req_int), GFP_KERNEL);
+	if (unlikely(!cfg->scan_req_int)) {
+		WL_ERR(("Scan req alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!cfg->ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!cfg->escan_ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!cfg->extra_buf)) {
+		WL_ERR(("Extra buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->pmk_list = (void *)kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
+	if (unlikely(!cfg->pmk_list)) {
+		WL_ERR(("pmk list alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+#if defined(STATIC_WL_PRIV_STRUCT)
+	cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL);
+	if (unlikely(!cfg->conn_info)) {
+		WL_ERR(("cfg->conn_info  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->ie = (void *)kzalloc(sizeof(*cfg->ie), GFP_KERNEL);
+	if (unlikely(!cfg->ie)) {
+		WL_ERR(("cfg->ie  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	if (unlikely(wl_init_escan_result_buf(cfg))) {
+		WL_ERR(("Failed to init escan resul buf\n"));
+		goto init_priv_mem_out;
+	}
+#endif /* STATIC_WL_PRIV_STRUCT */
+	cfg->afx_hdl = (void *)kzalloc(sizeof(*cfg->afx_hdl), GFP_KERNEL);
+	if (unlikely(!cfg->afx_hdl)) {
+		WL_ERR(("afx hdl  alloc failed\n"));
+		goto init_priv_mem_out;
+	} else {
+		init_completion(&cfg->act_frm_scan);
+		init_completion(&cfg->wait_next_af);
+
+		INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler);
+	}
+#ifdef WLTDLS
+	if (cfg->tdls_mgmt_frame) {
+		kfree(cfg->tdls_mgmt_frame);
+		cfg->tdls_mgmt_frame = NULL;
+	}
+#endif /* WLTDLS */
+	return 0;
+
+init_priv_mem_out:
+	wl_deinit_priv_mem(cfg);
+
+	return -ENOMEM;
+}
+
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg)
+{
+	kfree(cfg->scan_results);
+	cfg->scan_results = NULL;
+	kfree(cfg->conf);
+	cfg->conf = NULL;
+	kfree(cfg->scan_req_int);
+	cfg->scan_req_int = NULL;
+	kfree(cfg->ioctl_buf);
+	cfg->ioctl_buf = NULL;
+	kfree(cfg->escan_ioctl_buf);
+	cfg->escan_ioctl_buf = NULL;
+	kfree(cfg->extra_buf);
+	cfg->extra_buf = NULL;
+	kfree(cfg->pmk_list);
+	cfg->pmk_list = NULL;
+#if defined(STATIC_WL_PRIV_STRUCT)
+	kfree(cfg->conn_info);
+	cfg->conn_info = NULL;
+	kfree(cfg->ie);
+	cfg->ie = NULL;
+	wl_deinit_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+	if (cfg->afx_hdl) {
+		cancel_work_sync(&cfg->afx_hdl->work);
+		kfree(cfg->afx_hdl);
+		cfg->afx_hdl = NULL;
+	}
+
+}
+
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg)
+{
+	int ret = 0;
+	WL_DBG(("Enter \n"));
+
+	/* Allocate workqueue for event */
+	if (!cfg->event_workq) {
+		cfg->event_workq = alloc_workqueue("dhd_eventd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+	}
+
+	if (!cfg->event_workq) {
+		WL_ERR(("event_workq alloc_workqueue failed\n"));
+		ret = -ENOMEM;
+	} else {
+		INIT_WORK(&cfg->event_work, wl_event_handler);
+	}
+	return ret;
+}
+
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg)
+{
+	if (cfg && cfg->event_workq) {
+		cancel_work_sync(&cfg->event_work);
+		destroy_workqueue(cfg->event_workq);
+		cfg->event_workq = NULL;
+	}
+}
+
+void wl_terminate_event_handler(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	if (cfg) {
+		wl_destroy_event_handler(cfg);
+		wl_flush_eq(cfg);
+	}
+}
+
+static void wl_scan_timeout(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	unsigned long data
+#endif
+)
+{
+	wl_event_msg_t msg;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct bcm_cfg80211 *cfg = from_timer(cfg, t, scan_timeout);
+#else
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+#endif
+	struct wireless_dev *wdev = NULL;
+	struct net_device *ndev = NULL;
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;
+	s32 i;
+	u32 channel;
+#if 0
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+	uint32 prev_memdump_mode = dhdp->memdump_enabled;
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+
+	if (!(cfg->scan_request)) {
+		WL_ERR(("timer expired but no scan request\n"));
+		return;
+	}
+
+	bss_list = wl_escan_get_buf(cfg, FALSE);
+	if (!bss_list) {
+		WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
+	} else {
+		WL_ERR(("scanned AP count (%d)\n", bss_list->count));
+
+		bi = next_bss(bss_list, bi);
+		for_each_bss(bss_list, bi, i) {
+			channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+			WL_ERR(("SSID :%s  Channel :%d\n", bi->SSID, channel));
+		}
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+	if (cfg->scan_request->dev)
+		wdev = cfg->scan_request->dev->ieee80211_ptr;
+#else
+	wdev = cfg->scan_request->wdev;
+#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
+	if (!wdev) {
+		WL_ERR(("No wireless_dev present\n"));
+		return;
+	}
+	ndev = wdev_to_wlc_ndev(wdev, cfg);
+
+	bzero(&msg, sizeof(wl_event_msg_t));
+	WL_ERR(("timer expired\n"));
+#if 0
+	if (dhdp->memdump_enabled) {
+		dhdp->memdump_enabled = DUMP_MEMFILE;
+		dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT;
+		dhd_bus_mem_dump(dhdp);
+		dhdp->memdump_enabled = prev_memdump_mode;
+	}
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+	msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+	msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+	msg.reason = 0xFFFFFFFF;
+	wl_cfg80211_event(ndev, &msg, NULL);
+#ifdef CUSTOMER_HW4_DEBUG
+	if (!wl_scan_timeout_dbg_enabled)
+		wl_scan_timeout_dbg_set();
+#endif /* CUSTOMER_HW4_DEBUG */
+
+	// terence 20130729: workaround to fix out of memory in firmware
+//	if (dhd_conf_get_chip(dhd_get_pub(ndev)) == BCM43362_CHIP_ID) {
+//		WL_ERR(("Send hang event\n"));
+//		net_os_send_hang_message(ndev);
+//	}
+}
+
+#ifdef DHD_LOSSLESS_ROAMING
+static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+	/* restore prec_map to ALLPRIO */
+	dhdp->dequeue_prec_map = ALLPRIO;
+	if (timer_pending(&cfg->roam_timeout)) {
+		del_timer_sync(&cfg->roam_timeout);
+	}
+
+}
+
+static void wl_roam_timeout(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	unsigned long data
+#endif
+)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct bcm_cfg80211 *cfg = from_timer(cfg, t, roam_timeout);
+#else
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+#endif
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+	WL_ERR(("roam timer expired\n"));
+
+	/* restore prec_map to ALLPRIO */
+	dhdp->dequeue_prec_map = ALLPRIO;
+}
+
+#endif /* DHD_LOSSLESS_ROAMING */
+
+static s32
+wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
+	unsigned long state, void *ptr)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
+	struct net_device *dev = ptr;
+#else
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+#endif /* LINUX_VERSION < VERSION(3, 11, 0) */
+	struct wireless_dev *wdev = ndev_to_wdev(dev);
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	WL_DBG(("Enter \n"));
+
+	if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg))
+		return NOTIFY_DONE;
+
+	switch (state) {
+		case NETDEV_DOWN:
+		{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
+			int max_wait_timeout = 2;
+			int max_wait_count = 100;
+			int refcnt = 0;
+			unsigned long limit = jiffies + max_wait_timeout * HZ;
+			while (work_pending(&wdev->cleanup_work)) {
+				if (refcnt%5 == 0) {
+					WL_ERR(("[NETDEV_DOWN] wait for "
+						"complete of cleanup_work"
+						" (%d th)\n", refcnt));
+				}
+				if (!time_before(jiffies, limit)) {
+					WL_ERR(("[NETDEV_DOWN] cleanup_work"
+						" of CFG80211 is not"
+						" completed in %d sec\n",
+						max_wait_timeout));
+					break;
+				}
+				if (refcnt >= max_wait_count) {
+					WL_ERR(("[NETDEV_DOWN] cleanup_work"
+						" of CFG80211 is not"
+						" completed in %d loop\n",
+						max_wait_count));
+					break;
+				}
+				set_current_state(TASK_INTERRUPTIBLE);
+				(void)schedule_timeout(100);
+				set_current_state(TASK_RUNNING);
+				refcnt++;
+			}
+#endif /* LINUX_VERSION < VERSION(3, 14, 0) */
+			break;
+		}
+		case NETDEV_UNREGISTER:
+			/* after calling list_del_rcu(&wdev->list) */
+			wl_cfg80211_clear_per_bss_ies(cfg,
+				wl_get_bssidx_by_wdev(cfg, wdev));
+			wl_dealloc_netinfo_by_wdev(cfg, wdev);
+			break;
+		case NETDEV_GOING_DOWN:
+			/*
+			 * At NETDEV_DOWN state, wdev_cleanup_work work will be called.
+			 * In front of door, the function checks whether current scan
+			 * is working or not. If the scanning is still working,
+			 * wdev_cleanup_work call WARN_ON and make the scan done forcibly.
+			 */
+			if (wl_get_drv_status(cfg, SCANNING, dev))
+				wl_notify_escan_complete(cfg, dev, true, true);
+			break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block wl_cfg80211_netdev_notifier = {
+	.notifier_call = wl_cfg80211_netdev_notifier_call,
+};
+
+/*
+ * to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool wl_cfg80211_netdev_notifier_registered = FALSE;
+
+static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev = NULL;
+	struct net_device *ndev = NULL;
+
+	if (!cfg->scan_request)
+		return;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+	if (cfg->scan_request->dev)
+		wdev = cfg->scan_request->dev->ieee80211_ptr;
+#else
+	wdev = cfg->scan_request->wdev;
+#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
+
+	if (!wdev) {
+		WL_ERR(("No wireless_dev present\n"));
+		return;
+	}
+
+	ndev = wdev_to_wlc_ndev(wdev, cfg);
+	wl_notify_escan_complete(cfg, ndev, true, true);
+	WL_ERR(("Scan aborted! \n"));
+}
+
+void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+{
+	wl_scan_params_t *params = NULL;
+	s32 params_size = 0;
+	s32 err = BCME_OK;
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	if (!in_atomic()) {
+		/* Our scan params only need space for 1 channel and 0 ssids */
+		params = wl_cfg80211_scan_alloc_params(cfg, -1, 0, &params_size);
+		if (params == NULL) {
+			WL_ERR(("scan params allocation failed \n"));
+			err = -ENOMEM;
+		} else {
+			/* Do a scan abort to stop the driver's scan engine */
+			err = wldev_ioctl_set(dev, WLC_SCAN, params, params_size);
+			if (err < 0) {
+				WL_ERR(("scan abort  failed \n"));
+			}
+			kfree(params);
+		}
+	}
+#ifdef WLTDLS
+	if (cfg->tdls_mgmt_frame) {
+		kfree(cfg->tdls_mgmt_frame);
+		cfg->tdls_mgmt_frame = NULL;
+	}
+#endif /* WLTDLS */
+}
+
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev,
+	bool aborted, bool fw_abort)
+{
+	s32 err = BCME_OK;
+	unsigned long flags;
+	struct net_device *dev;
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+	struct cfg80211_scan_info info;
+	info.aborted = aborted;
+#endif
+
+	WL_DBG(("Enter \n"));
+	BCM_REFERENCE(dhdp);
+
+	mutex_lock(&cfg->scan_complete);
+
+	if (!ndev) {
+		WL_ERR(("ndev is null\n"));
+		err = BCME_ERROR;
+		goto out;
+	}
+
+	if (cfg->escan_info.ndev != ndev) {
+		WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev));
+		err = BCME_ERROR;
+		goto out;
+	}
+
+	if (cfg->scan_request) {
+		dev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_ENABLE_P2P_IF)
+		if (cfg->scan_request->dev != cfg->p2p_net)
+			dev = cfg->scan_request->dev;
+#elif defined(WL_CFG80211_P2P_DEV_IF)
+		if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+			dev = cfg->scan_request->wdev->netdev;
+#endif /* WL_ENABLE_P2P_IF */
+	}
+	else {
+		WL_DBG(("cfg->scan_request is NULL may be internal scan."
+			"doing scan_abort for ndev %p primary %p",
+				ndev, bcmcfg_to_prmry_ndev(cfg)));
+		dev = ndev;
+	}
+	if (fw_abort && !in_atomic())
+		wl_cfg80211_scan_abort(cfg);
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+#if defined(ESCAN_RESULT_PATCH)
+	if (likely(cfg->scan_request)) {
+		cfg->bss_list = wl_escan_get_buf(cfg, aborted);
+		wl_inform_bss(cfg);
+	}
+#endif /* ESCAN_RESULT_PATCH */
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_SCHED_SCAN
+	if (cfg->sched_scan_req && !cfg->scan_request) {
+		WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n"));
+		if (!aborted) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+			cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy, 0);
+#else
+			cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+		}
+
+		DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE);
+		cfg->sched_scan_running = FALSE;
+	}
+#endif /* WL_SCHED_SCAN */
+	if (likely(cfg->scan_request)) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+		cfg80211_scan_done(cfg->scan_request, &info);
+#else
+		cfg80211_scan_done(cfg->scan_request, aborted);
+#endif
+		cfg->scan_request = NULL;
+		DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+		DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+	}
+	if (p2p_is_on(cfg))
+		wl_clr_p2p_status(cfg, SCANNING);
+	wl_clr_drv_status(cfg, SCANNING, dev);
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+out:
+	mutex_unlock(&cfg->scan_complete);
+	return err;
+}
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#ifndef WL_DRV_AVOID_SCANCACHE
+static void
+wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
+{
+	int idx;
+	for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
+		int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
+		if (bss->RSSI < candidate[idx].RSSI) {
+			if (len)
+				memcpy(&candidate[idx + 1], &candidate[idx],
+					sizeof(removal_element_t) * len);
+			candidate[idx].RSSI = bss->RSSI;
+			candidate[idx].length = bss->length;
+			memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN);
+			return;
+		}
+	}
+}
+
+static void
+wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
+	wl_bss_info_t *bi)
+{
+	int idx1, idx2;
+	int total_delete_len = 0;
+	for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
+		int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+		wl_bss_info_t *bss = NULL;
+		if (candidate[idx1].RSSI >= bi->RSSI)
+			continue;
+		for (idx2 = 0; idx2 < list->count; idx2++) {
+			bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
+				list->bss_info;
+			if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+				candidate[idx1].RSSI == bss->RSSI &&
+				candidate[idx1].length == dtoh32(bss->length)) {
+				u32 delete_len = dtoh32(bss->length);
+				WL_DBG(("delete scan info of " MACDBG " to add new AP\n",
+					MAC2STRDBG(bss->BSSID.octet)));
+				if (idx2 < list->count -1) {
+					memmove((u8 *)bss, (u8 *)bss + delete_len,
+						list->buflen - cur_len - delete_len);
+				}
+				list->buflen -= delete_len;
+				list->count--;
+				total_delete_len += delete_len;
+				/* if delete_len is greater than or equal to result length */
+				if (total_delete_len >= bi->length) {
+					return;
+				}
+				break;
+			}
+			cur_len += dtoh32(bss->length);
+		}
+	}
+}
+#endif /* WL_DRV_AVOID_SCANCACHE */
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+#ifdef WL_DRV_AVOID_SCANCACHE
+static u32 wl_p2p_find_peer_channel(struct bcm_cfg80211 *cfg, s32 status, wl_bss_info_t *bi,
+		u32 bi_length)
+{
+	u32 ret;
+	u8 *p2p_dev_addr = NULL;
+
+	ret = wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL);
+	if (!ret) {
+		return ret;
+	}
+	if (status == WLC_E_STATUS_PARTIAL) {
+		p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+		if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+			cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+			s32 channel = wf_chspec_ctlchan(
+				wl_chspec_driver_to_host(bi->chanspec));
+
+			if ((channel > MAXCHANNEL) || (channel <= 0))
+				channel = WL_INVALID;
+			else
+				WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+					" channel : %d\n",
+					MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+					channel));
+
+			wl_clr_p2p_status(cfg, SCANNING);
+			cfg->afx_hdl->peer_chan = channel;
+			complete(&cfg->act_frm_scan);
+		}
+	} else {
+		WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+		wl_clr_p2p_status(cfg, SCANNING);
+		wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+		if (cfg->afx_hdl->peer_chan == WL_INVALID)
+			complete(&cfg->act_frm_scan);
+	}
+
+	return ret;
+}
+
+static s32 wl_escan_without_scan_cache(struct bcm_cfg80211 *cfg, wl_escan_result_t *escan_result,
+	struct net_device *ndev, const wl_event_msg_t *e, s32 status)
+{
+	s32 err = BCME_OK;
+	wl_bss_info_t *bi;
+	u32 bi_length;
+	bool aborted = false;
+	bool fw_abort = false;
+	bool notify_escan_complete = false;
+
+	if (wl_escan_check_sync_id(status, escan_result->sync_id,
+		cfg->escan_info.cur_sync_id) < 0) {
+		goto exit;
+	}
+
+	wl_escan_print_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id);
+
+	if (!(status == WLC_E_STATUS_TIMEOUT) || !(status == WLC_E_STATUS_PARTIAL)) {
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+	}
+
+	if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+		notify_escan_complete = true;
+	}
+
+	if (status == WLC_E_STATUS_PARTIAL) {
+		WL_INFORM(("WLC_E_STATUS_PARTIAL \n"));
+		DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+		if ((!escan_result) || (dtoh16(escan_result->bss_count) != 1)) {
+			WL_ERR(("Invalid escan result (NULL pointer) or invalid bss_count\n"));
+			goto exit;
+		}
+
+		bi = escan_result->bss_info;
+		bi_length = dtoh32(bi->length);
+		if ((!bi) ||
+		(bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE))) {
+			WL_ERR(("Invalid escan bss info (NULL pointer)"
+				"or invalid bss_info length\n"));
+			goto exit;
+		}
+
+		if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+			if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+				WL_DBG(("Ignoring IBSS result\n"));
+				goto exit;
+			}
+		}
+
+		if (wl_p2p_find_peer_channel(cfg, status, bi, bi_length)) {
+			goto exit;
+		} else {
+			if (scan_req_match(cfg)) {
+				/* p2p scan && allow only probe response */
+				if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+					(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+					goto exit;
+			}
+			err = wl_inform_single_bss(cfg, bi, false);
+
+			/*
+			 * !Broadcast && number of ssid = 1 && number of channels =1
+			 * means specific scan to association
+			 */
+			if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+				WL_ERR(("P2P assoc scan fast aborted.\n"));
+				aborted = false;
+				fw_abort = true;
+			}
+			/* Directly exit from function here and
+			* avoid sending notify completion to cfg80211
+			*/
+			goto exit;
+		}
+	} else if (status == WLC_E_STATUS_SUCCESS) {
+		if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+			goto exit;
+		}
+		WL_INFORM(("ESCAN COMPLETED\n"));
+		DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+
+		/* Update escan complete status */
+		aborted = false;
+		fw_abort = false;
+
+#ifdef CUSTOMER_HW4_DEBUG
+		if (wl_scan_timeout_dbg_enabled)
+			wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+	} else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+		(status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+		(status == WLC_E_STATUS_NEWASSOC)) {
+		/* Handle all cases of scan abort */
+
+		WL_DBG(("ESCAN ABORT reason: %d\n", status));
+		if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+			goto exit;
+		}
+		WL_INFORM(("ESCAN ABORTED\n"));
+
+		/* Update escan complete status */
+		aborted = true;
+		fw_abort = false;
+
+	} else if (status == WLC_E_STATUS_TIMEOUT) {
+		WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+		WL_ERR(("reason[0x%x]\n", e->reason));
+		if (e->reason == 0xFFFFFFFF) {
+			/* Update escan complete status */
+			aborted = true;
+			fw_abort = true;
+		}
+	} else {
+		WL_ERR(("unexpected Escan Event %d : abort\n", status));
+
+		if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+			goto exit;
+		}
+		/* Update escan complete status */
+		aborted = true;
+		fw_abort = false;
+	}
+
+	/* Notify escan complete status */
+	if (notify_escan_complete) {
+		wl_notify_escan_complete(cfg, ndev, aborted, fw_abort);
+	}
+
+exit:
+	return err;
+
+}
+#endif /* WL_DRV_AVOID_SCANCACHE */
+
+static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = BCME_OK;
+	s32 status = ntoh32(e->status);
+	wl_escan_result_t *escan_result;
+	struct net_device *ndev = NULL;
+#ifndef WL_DRV_AVOID_SCANCACHE
+	wl_bss_info_t *bi;
+	u32 bi_length;
+	wifi_p2p_ie_t * p2p_ie;
+	u8 *p2p_dev_addr = NULL;
+	wl_scan_results_t *list;
+	wl_bss_info_t *bss = NULL;
+	u32 i;
+#endif /* WL_DRV_AVOID_SCANCACHE */
+	u16 channel;
+	struct ieee80211_supported_band *band;
+
+	WL_DBG((" enter event type : %d, status : %d \n",
+		ntoh32(e->event_type), ntoh32(e->status)));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	/* P2P SCAN is coming from primary interface */
+	if (wl_get_p2p_status(cfg, SCANNING)) {
+		if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+			ndev = cfg->afx_hdl->dev;
+		else
+			ndev = cfg->escan_info.ndev;
+
+	}
+	if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
+		WL_ERR(("escan is not ready ndev %p drv_status 0x%x e_type %d e_states %d\n",
+			ndev, wl_get_drv_status(cfg, SCANNING, ndev),
+			ntoh32(e->event_type), ntoh32(e->status)));
+		goto exit;
+	}
+	escan_result = (wl_escan_result_t *)data;
+
+#ifndef WL_DRV_AVOID_SCANCACHE
+	if (status == WLC_E_STATUS_PARTIAL) {
+		WL_INFORM(("WLC_E_STATUS_PARTIAL \n"));
+		DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+		if (!escan_result) {
+			WL_ERR(("Invalid escan result (NULL pointer)\n"));
+			goto exit;
+		}
+		if ((dtoh32(escan_result->buflen) > (int)ESCAN_BUF_SIZE) ||
+		    (dtoh32(escan_result->buflen) < sizeof(wl_escan_result_t))) {
+			WL_ERR(("Invalid escan buffer len:%d\n", dtoh32(escan_result->buflen)));
+			goto exit;
+		}
+		if (dtoh16(escan_result->bss_count) != 1) {
+			WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+			goto exit;
+		}
+		bi = escan_result->bss_info;
+		if (!bi) {
+			WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+			goto exit;
+		}
+		bi_length = dtoh32(bi->length);
+		if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+			WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+			goto exit;
+		}
+
+		/* +++++ terence 20130524: skip invalid bss */
+		channel =
+			bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
+		if (channel <= CH_MAX_2G_CHANNEL)
+			band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+		else
+			band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_5GHZ];
+		if (!band) {
+			WL_ERR(("No valid band\n"));
+			goto exit;
+		}
+		if (!dhd_conf_match_channel(cfg->pub, channel))
+			goto exit;
+		/* ----- terence 20130524: skip invalid bss */
+
+		if (wl_escan_check_sync_id(status, escan_result->sync_id,
+				cfg->escan_info.cur_sync_id) < 0)
+			goto exit;
+
+		if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+			if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+				WL_DBG(("Ignoring IBSS result\n"));
+				goto exit;
+			}
+		}
+
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+			if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+				cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+				s32 channel = wf_chspec_ctlchan(
+					wl_chspec_driver_to_host(bi->chanspec));
+
+				if ((channel > MAXCHANNEL) || (channel <= 0))
+					channel = WL_INVALID;
+				else
+					WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+						" channel : %d\n",
+						MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+						channel));
+
+				wl_clr_p2p_status(cfg, SCANNING);
+				cfg->afx_hdl->peer_chan = channel;
+				complete(&cfg->act_frm_scan);
+				goto exit;
+			}
+
+		} else {
+			int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+			removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
+			int remove_lower_rssi = FALSE;
+
+			bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+			list = wl_escan_get_buf(cfg, FALSE);
+			if (scan_req_match(cfg)) {
+				/* p2p scan && allow only probe response */
+				if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+					(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+					goto exit;
+				if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
+					bi->ie_length)) == NULL) {
+						WL_ERR(("Couldn't find P2PIE in probe"
+							" response/beacon\n"));
+						goto exit;
+				}
+			}
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+			if (bi_length > ESCAN_BUF_SIZE - list->buflen)
+				remove_lower_rssi = TRUE;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+			WL_SCAN(("%s("MACDBG") RSSI %d flags 0x%x length %d\n", bi->SSID,
+				MAC2STRDBG(bi->BSSID.octet), bi->RSSI, bi->flags, bi->length));
+			for (i = 0; i < list->count; i++) {
+				bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+					: list->bss_info;
+				if (!bss) {
+					WL_ERR(("bss is NULL\n"));
+					goto exit;
+				}
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+				WL_SCAN(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
+					bss->SSID, MAC2STRDBG(bss->BSSID.octet),
+					i, bss->RSSI, list->count));
+
+				if (remove_lower_rssi)
+					wl_cfg80211_find_removal_candidate(bss, candidate);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+				if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+					(CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+					== CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+					bi->SSID_len == bss->SSID_len &&
+					!bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+					/* do not allow beacon data to update
+					*the data recd from a probe response
+					*/
+					if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+						(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+						goto exit;
+
+					WL_SCAN(("%s("MACDBG"), i=%d prev: RSSI %d"
+						" flags 0x%x, new: RSSI %d flags 0x%x\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+						bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+
+					if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+						/* preserve max RSSI if the measurements are
+						* both on-channel or both off-channel
+						*/
+						WL_SCAN(("%s("MACDBG"), same onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+					} else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+						/* preserve the on-channel rssi measurement
+						* if the new measurement is off channel
+						*/
+						WL_SCAN(("%s("MACDBG"), prev onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = bss->RSSI;
+						bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+					}
+					if (dtoh32(bss->length) != bi_length) {
+						u32 prev_len = dtoh32(bss->length);
+
+						WL_SCAN(("bss info replacement"
+							" is occured(bcast:%d->probresp%d)\n",
+							bss->ie_length, bi->ie_length));
+						WL_SCAN(("%s("MACDBG"), replacement!(%d -> %d)\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						prev_len, bi_length));
+
+						if (list->buflen - prev_len + bi_length
+							> ESCAN_BUF_SIZE) {
+							WL_ERR(("Buffer is too small: keep the"
+								" previous result of this AP\n"));
+							/* Only update RSSI */
+							bss->RSSI = bi->RSSI;
+							bss->flags |= (bi->flags
+								& WL_BSS_FLAGS_RSSI_ONCHANNEL);
+							goto exit;
+						}
+
+						if (i < list->count - 1) {
+							/* memory copy required by this case only */
+							memmove((u8 *)bss + bi_length,
+								(u8 *)bss + prev_len,
+								list->buflen - cur_len - prev_len);
+						}
+						list->buflen -= prev_len;
+						list->buflen += bi_length;
+					}
+					list->version = dtoh32(bi->version);
+					memcpy((u8 *)bss, (u8 *)bi, bi_length);
+					goto exit;
+				}
+				cur_len += dtoh32(bss->length);
+			}
+			if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+				wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
+				if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+					WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
+						MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
+					goto exit;
+				}
+#else
+				WL_ERR(("Buffer is too small: ignoring\n"));
+				goto exit;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+			}
+
+			memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
+			list->version = dtoh32(bi->version);
+			list->buflen += bi_length;
+			list->count++;
+
+			/*
+			 * !Broadcast && number of ssid = 1 && number of channels =1
+			 * means specific scan to association
+			 */
+			if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+				WL_ERR(("P2P assoc scan fast aborted.\n"));
+				wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true);
+				goto exit;
+			}
+		}
+	}
+	else if (status == WLC_E_STATUS_SUCCESS) {
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
+			escan_result->sync_id);
+
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_p2p_status(cfg, SCANNING);
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			WL_INFORM(("ESCAN COMPLETED\n"));
+			DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+			cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+			if (!scan_req_match(cfg)) {
+				WL_SCAN(("SCAN COMPLETED: scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, false, false);
+		}
+		wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
+#ifdef CUSTOMER_HW4_DEBUG
+		if (wl_scan_timeout_dbg_enabled)
+			wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+	} else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+		(status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+		(status == WLC_E_STATUS_NEWASSOC)) {
+		/* Handle all cases of scan abort */
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id);
+		WL_DBG(("ESCAN ABORT reason: %d\n", status));
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			wl_clr_p2p_status(cfg, SCANNING);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			WL_INFORM(("ESCAN ABORTED\n"));
+			cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, true, false);
+		} else {
+			/* If there is no pending host initiated scan, do nothing */
+			WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n"));
+		}
+		wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
+	} else if (status == WLC_E_STATUS_TIMEOUT) {
+		WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+		WL_ERR(("reason[0x%x]\n", e->reason));
+		if (e->reason == 0xFFFFFFFF) {
+			wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+		}
+	} else {
+		WL_ERR(("unexpected Escan Event %d : abort\n", status));
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id);
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_p2p_status(cfg, SCANNING);
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
+					"scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, true, false);
+		}
+		wl_escan_increment_sync_id(cfg, 2);
+	}
+#else /* WL_DRV_AVOID_SCANCACHE */
+	err = wl_escan_without_scan_cache(cfg, escan_result, ndev, e, status);
+#endif /* WL_DRV_AVOID_SCANCACHE */
+exit:
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
+{
+	u32 connected_cnt  = wl_get_drv_status_all(cfg, CONNECTED);
+	bool p2p_connected  = wl_cfgp2p_vif_created(cfg);
+	struct net_info *iter, *next;
+
+	if (!(cfg->roam_flags & WL_ROAM_OFF_ON_CONCURRENT))
+		return;
+
+	WL_DBG(("roam off:%d p2p_connected:%d connected_cnt:%d \n",
+		enable, p2p_connected, connected_cnt));
+	/* Disable FW roam when we have a concurrent P2P connection */
+	if (enable && p2p_connected && connected_cnt > 1) {
+
+		/* Mark it as to be reverted */
+		cfg->roam_flags |= WL_ROAM_REVERT_STATUS;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+		for_each_ndev(cfg, iter, next) {
+			if (iter->ndev && iter->wdev &&
+					iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+				if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE)
+						== BCME_OK) {
+					iter->roam_off = TRUE;
+				}
+				else {
+					WL_ERR(("error to enable roam_off\n"));
+				}
+			}
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	}
+	else if (!enable && (cfg->roam_flags & WL_ROAM_REVERT_STATUS)) {
+		cfg->roam_flags &= ~WL_ROAM_REVERT_STATUS;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+		for_each_ndev(cfg, iter, next) {
+			if (iter->ndev && iter->wdev &&
+					iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+				if (iter->roam_off != WL_INVALID) {
+					if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE)
+							== BCME_OK) {
+						iter->roam_off = FALSE;
+					}
+					else {
+						WL_ERR(("error to disable roam_off\n"));
+					}
+				}
+			}
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	}
+
+	return;
+}
+
+static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *iter, *next;
+	u32 ctl_chan = 0;
+	u32 chanspec = 0;
+	u32 pre_ctl_chan = 0;
+	u32 connected_cnt  = wl_get_drv_status_all(cfg, CONNECTED);
+	cfg->vsdb_mode = false;
+
+	if (connected_cnt <= 1)  {
+		return;
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+	for_each_ndev(cfg, iter, next) {
+		/* p2p discovery iface ndev could be null */
+		if (iter->ndev) {
+			chanspec = 0;
+			ctl_chan = 0;
+			if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+				if (wldev_iovar_getint(iter->ndev, "chanspec",
+					(s32 *)&chanspec) == BCME_OK) {
+					chanspec = wl_chspec_driver_to_host(chanspec);
+					ctl_chan = wf_chspec_ctlchan(chanspec);
+					wl_update_prof(cfg, iter->ndev, NULL,
+						&ctl_chan, WL_PROF_CHAN);
+				}
+				if (!cfg->vsdb_mode) {
+					if (!pre_ctl_chan && ctl_chan)
+						pre_ctl_chan = ctl_chan;
+					else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
+						cfg->vsdb_mode = true;
+					}
+				}
+			}
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	printf("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel");
+	return;
+}
+
+#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
+extern int g_frameburst;
+#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
+
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+	enum wl_status state, bool set)
+{
+	s32 pm = PM_FAST;
+	s32 err = BCME_OK;
+	u32 mode;
+	u32 chan = 0;
+	struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+	dhd_pub_t *dhd = cfg->pub;
+#ifdef RTT_SUPPORT
+	rtt_status_info_t *rtt_status;
+#endif /* RTT_SUPPORT */
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		WL_ERR(("%s : busstate is DHD_BUS_DOWN!\n", __FUNCTION__));
+		return 0;
+	}
+	WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
+		state, set, _net_info->pm_restore, _net_info->ndev->name));
+
+	if (state != WL_STATUS_CONNECTED)
+		return 0;
+	mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
+	if (set) {
+		wl_cfg80211_concurrent_roam(cfg, 1);
+		wl_cfg80211_determine_vsdb_mode(cfg);
+		if (mode == WL_MODE_AP) {
+			if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
+				WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+		}
+		pm = PM_OFF;
+		if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
+				sizeof(pm))) != 0) {
+			if (err == -ENODEV)
+				WL_DBG(("%s:netdev not ready\n",
+					_net_info->ndev->name));
+			else
+				WL_ERR(("%s:error (%d)\n",
+					_net_info->ndev->name, err));
+
+			wl_cfg80211_update_power_mode(_net_info->ndev);
+		}
+		wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT);
+
+	} else { /* clear */
+		chan = 0;
+		/* clear chan information when the net device is disconnected */
+		wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
+		wl_cfg80211_determine_vsdb_mode(cfg);
+		if (primary_dev == _net_info->ndev) {
+			pm = PM_FAST;
+#ifdef RTT_SUPPORT
+			rtt_status = GET_RTTSTATE(dhd);
+			if (rtt_status->status != RTT_ENABLED) {
+#endif /* RTT_SUPPORT */
+				if (dhd_conf_get_pm(dhd) >= 0)
+					pm = dhd_conf_get_pm(dhd);
+				if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
+						sizeof(pm))) != 0) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n",
+							_net_info->ndev->name));
+					else
+						WL_ERR(("%s:error (%d)\n",
+							_net_info->ndev->name, err));
+
+					wl_cfg80211_update_power_mode(_net_info->ndev);
+				}
+#ifdef RTT_SUPPORT
+			}
+#endif /* RTT_SUPPORT */
+		}
+		wl_cfg80211_concurrent_roam(cfg, 0);
+
+	}
+	return err;
+}
+
+static s32 wl_init_scan(struct bcm_cfg80211 *cfg)
+{
+	int err = 0;
+
+	cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+	wl_escan_init_sync_id(cfg);
+
+	/* Init scan_timeout timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	timer_setup(&cfg->scan_timeout, wl_scan_timeout, 0);
+#else
+	init_timer(&cfg->scan_timeout);
+	cfg->scan_timeout.data = (unsigned long) cfg;
+	cfg->scan_timeout.function = wl_scan_timeout;
+#endif
+
+	return err;
+}
+
+#ifdef DHD_LOSSLESS_ROAMING
+static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg)
+{
+	int err = 0;
+
+	/* Init roam timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	timer_setup(&cfg->roam_timeout, wl_roam_timeout, 0);
+#else
+	init_timer(&cfg->roam_timeout);
+	cfg->roam_timeout.data = (unsigned long) cfg;
+	cfg->roam_timeout.function = wl_roam_timeout;
+#endif
+
+	return err;
+}
+#endif /* DHD_LOSSLESS_ROAMING */
+
+static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
+{
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	cfg->scan_request = NULL;
+	cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+#ifdef DISABLE_BUILTIN_ROAM
+	cfg->roam_on = false;
+#else
+	cfg->roam_on = true;
+#endif /* DISABLE_BUILTIN_ROAM */
+	cfg->active_scan = true;
+	cfg->rf_blocked = false;
+	cfg->vsdb_mode = false;
+#if defined(BCMSDIO) || defined(BCMDBUS)
+	cfg->wlfc_on = false;
+#endif /* BCMSDIO || BCMDBUS */
+	cfg->roam_flags |= WL_ROAM_OFF_ON_CONCURRENT;
+	cfg->disable_roam_event = false;
+	/* register interested state */
+	set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
+	spin_lock_init(&cfg->cfgdrv_lock);
+	mutex_init(&cfg->ioctl_buf_sync);
+	init_waitqueue_head(&cfg->netif_change_event);
+	init_waitqueue_head(&cfg->wps_done_event);
+	init_completion(&cfg->send_af_done);
+	init_completion(&cfg->iface_disable);
+	mutex_init(&cfg->usr_sync);
+	mutex_init(&cfg->event_sync);
+	mutex_init(&cfg->scan_complete);
+	mutex_init(&cfg->if_sync);
+	mutex_init(&cfg->pm_sync);
+	mutex_init(&cfg->in4way_sync);
+#ifdef WLTDLS
+	mutex_init(&cfg->tdls_sync);
+#endif /* WLTDLS */
+	wl_init_eq(cfg);
+	err = wl_init_priv_mem(cfg);
+	if (err)
+		return err;
+	if (wl_create_event_handler(cfg))
+		return -ENOMEM;
+	wl_init_event_handler(cfg);
+	err = wl_init_scan(cfg);
+	if (err)
+		return err;
+#ifdef DHD_LOSSLESS_ROAMING
+	err = wl_init_roam_timeout(cfg);
+	if (err) {
+		return err;
+	}
+#endif /* DHD_LOSSLESS_ROAMING */
+	wl_init_conf(cfg->conf);
+	wl_init_prof(cfg, ndev);
+	wl_link_down(cfg);
+	DNGL_FUNC(dhd_cfg80211_init, (cfg));
+#ifdef NAN_DP
+	cfg->nan_dp_state = NAN_DP_STATE_DISABLED;
+	init_waitqueue_head(&cfg->ndp_if_change_event);
+#endif /* NAN_DP */
+	return err;
+}
+
+static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+	DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
+	wl_destroy_event_handler(cfg);
+	wl_flush_eq(cfg);
+	wl_link_down(cfg);
+	if (cfg->scan_timeout.function)
+		del_timer_sync(&cfg->scan_timeout);
+#ifdef DHD_LOSSLESS_ROAMING
+	if (cfg->roam_timeout.function)
+		del_timer_sync(&cfg->roam_timeout);
+#endif
+	wl_deinit_priv_mem(cfg);
+	if (wl_cfg80211_netdev_notifier_registered) {
+		wl_cfg80211_netdev_notifier_registered = FALSE;
+		unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+	}
+}
+
+#if defined(WL_ENABLE_P2P_IF)
+static s32 wl_cfg80211_attach_p2p(struct bcm_cfg80211 *cfg)
+{
+	WL_TRACE(("Enter \n"));
+
+	if (wl_cfgp2p_register_ndev(cfg) < 0) {
+		WL_ERR(("P2P attach failed. \n"));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static s32  wl_cfg80211_detach_p2p(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev;
+
+	WL_DBG(("Enter \n"));
+	if (!cfg) {
+		WL_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	}
+	else {
+		wdev = cfg->p2p_wdev;
+		if (!wdev) {
+			WL_ERR(("Invalid Ptr\n"));
+			return -EINVAL;
+		}
+	}
+
+	wl_cfgp2p_unregister_ndev(cfg);
+
+	cfg->p2p_wdev = NULL;
+	cfg->p2p_net = NULL;
+	WL_DBG(("Freeing 0x%p \n", wdev));
+	kfree(wdev);
+
+	return 0;
+}
+#endif 
+
+static s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+	struct bcm_cfg80211 * cfg;
+	s32 err = 0;
+	s32 ret = 0;
+	WL_TRACE(("In\n"));
+	if (unlikely(!ndev)) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	cfg = wl_get_cfg(ndev);
+	if (unlikely(!cfg)) {
+		WL_ERR(("cfg is invaild\n"));
+		return -EINVAL;
+	}
+	if (!wl_get_drv_status(cfg, READY, ndev)) {
+		if (cfg->wdev) {
+			ret = wl_cfgp2p_supported(cfg, ndev);
+			if (ret > 0) {
+#if !defined(WL_ENABLE_P2P_IF)
+				cfg->wdev->wiphy->interface_modes |=
+					(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+					BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_ENABLE_P2P_IF */
+				if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+					goto fail;
+
+#if defined(WL_ENABLE_P2P_IF)
+				if (cfg->p2p_net) {
+					/* Update MAC addr for p2p0 interface here. */
+					memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+					cfg->p2p_net->dev_addr[0] |= 0x02;
+					printf("%s: p2p_dev_addr="MACDBG "\n",
+						cfg->p2p_net->name,
+						MAC2STRDBG(cfg->p2p_net->dev_addr));
+				} else {
+					WL_ERR(("p2p_net not yet populated."
+					" Couldn't update the MAC Address for p2p0 \n"));
+					return -ENODEV;
+				}
+#endif /* WL_ENABLE_P2P_IF */
+				cfg->p2p_supported = true;
+			} else if (ret == 0) {
+				if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+					goto fail;
+			} else {
+				/* SDIO bus timeout */
+				err = -ENODEV;
+				goto fail;
+			}
+		}
+	}
+	wl_set_drv_status(cfg, READY, ndev);
+fail:
+	return err;
+}
+
+struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev)
+{
+	struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+	if (!wdev || !wdev->wiphy)
+		return NULL;
+
+	return wiphy_priv(wdev->wiphy);
+}
+
+s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
+{
+	struct wireless_dev *wdev;
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+	struct device *dev;
+
+	WL_TRACE(("In\n"));
+	if (!ndev) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
+	dev = wl_cfg80211_get_parent_dev();
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		return -ENOMEM;
+	}
+	err = wl_setup_wiphy(wdev, dev, context);
+	if (unlikely(err)) {
+		kfree(wdev);
+		return -ENOMEM;
+	}
+#ifdef WLMESH
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_MESH);
+#else
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+#endif
+	cfg = wiphy_priv(wdev->wiphy);
+	cfg->wdev = wdev;
+	cfg->pub = context;
+	INIT_LIST_HEAD(&cfg->net_list);
+#ifdef WBTEXT
+	INIT_LIST_HEAD(&cfg->wbtext_bssid_list);
+#endif /* WBTEXT */
+	INIT_LIST_HEAD(&cfg->vndr_oui_list);
+	spin_lock_init(&cfg->net_list_sync);
+	ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+	wdev->netdev = ndev;
+	cfg->state_notifier = wl_notifier_change_state;
+	err = wl_init_priv(cfg);
+	if (err) {
+		WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+		goto cfg80211_attach_out;
+	}
+	err = wl_alloc_netinfo(cfg, ndev, wdev, wdev->iftype, PM_ENABLE, 0);
+	if (err) {
+		WL_ERR(("Failed to alloc net_info (%d)\n", err));
+		goto cfg80211_attach_out;
+	}
+
+	err = wl_setup_rfkill(cfg, TRUE);
+	if (err) {
+		WL_ERR(("Failed to setup rfkill %d\n", err));
+		goto cfg80211_attach_out;
+	}
+#ifdef DEBUGFS_CFG80211
+	err = wl_setup_debugfs(cfg);
+	if (err) {
+		WL_ERR(("Failed to setup debugfs %d\n", err));
+		goto cfg80211_attach_out;
+	}
+#endif
+	if (!wl_cfg80211_netdev_notifier_registered) {
+		wl_cfg80211_netdev_notifier_registered = TRUE;
+		err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+		if (err) {
+			wl_cfg80211_netdev_notifier_registered = FALSE;
+			WL_ERR(("Failed to register notifierl %d\n", err));
+			goto cfg80211_attach_out;
+		}
+	}
+#if defined(COEX_DHCP)
+	cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev);
+	if (!cfg->btcoex_info)
+		goto cfg80211_attach_out;
+#endif 
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+	cfg->random_mac_enabled = FALSE;
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+	wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier;
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#if defined(WL_ENABLE_P2P_IF)
+	err = wl_cfg80211_attach_p2p(cfg);
+	if (err)
+		goto cfg80211_attach_out;
+#endif 
+
+	INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+
+#if defined(STAT_REPORT)
+	err = wl_attach_stat_report(cfg);
+	if (err) {
+		goto cfg80211_attach_out;
+	}
+#endif /* STAT_REPORT */
+	return err;
+
+cfg80211_attach_out:
+	wl_cfg80211_detach(cfg);
+	return err;
+}
+
+void wl_cfg80211_detach(struct bcm_cfg80211 *cfg)
+{
+
+	WL_TRACE(("In\n"));
+	if (!cfg)
+		return;
+
+	wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+
+#if defined(COEX_DHCP)
+	wl_cfg80211_btcoex_deinit();
+	cfg->btcoex_info = NULL;
+#endif 
+
+	wl_setup_rfkill(cfg, FALSE);
+#ifdef DEBUGFS_CFG80211
+	wl_free_debugfs(cfg);
+#endif
+	if (cfg->p2p_supported) {
+		if (timer_pending(&cfg->p2p->listen_timer))
+			del_timer_sync(&cfg->p2p->listen_timer);
+		wl_cfgp2p_deinit_priv(cfg);
+	}
+
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+#ifdef DHD_LOSSLESS_ROAMING
+	if (timer_pending(&cfg->roam_timeout)) {
+		del_timer_sync(&cfg->roam_timeout);
+	}
+#endif /* DHD_LOSSLESS_ROAMING */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	if (cfg->p2p_wdev)
+		wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF  */
+#if defined(WL_ENABLE_P2P_IF)
+	wl_cfg80211_detach_p2p(cfg);
+#endif 
+#if defined(STAT_REPORT)
+	wl_detach_stat_report(cfg);
+#endif /* STAT_REPORT */
+
+	wl_cfg80211_ibss_vsie_free(cfg);
+	wl_cfg80211_clear_mgmt_vndr_ies(cfg);
+	wl_deinit_priv(cfg);
+	wl_cfg80211_clear_parent_dev();
+#if defined(RSSIAVG)
+	wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+	wl_free_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+	wl_release_bss_cache_ctrl(&cfg->g_bss_cache_ctrl);
+#endif
+	wl_free_wdev(cfg);
+	/* PLEASE do NOT call any function after wl_free_wdev, the driver's private
+	 * structure "cfg", which is the private part of wiphy, has been freed in
+	 * wl_free_wdev !!!!!!!!!!!
+	 */
+}
+
+static void wl_event_handler(struct work_struct *work_data)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct wl_event_q *e;
+	struct wireless_dev *wdev = NULL;
+
+	BCM_SET_CONTAINER_OF(cfg, work_data, struct bcm_cfg80211, event_work);
+	DHD_EVENT_WAKE_LOCK(cfg->pub);
+	while ((e = wl_deq_event(cfg))) {
+		WL_DBG(("event type (%d), ifidx: %d bssidx: %d \n",
+			e->etype, e->emsg.ifidx, e->emsg.bsscfgidx));
+
+		if (e->emsg.ifidx > WL_MAX_IFS) {
+			WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx));
+			goto fail;
+		}
+
+		/* Make sure iface operations, don't creat race conditions */
+		mutex_lock(&cfg->if_sync);
+		if (!(wdev = wl_get_wdev_by_bssidx(cfg, e->emsg.bsscfgidx))) {
+			/* For WLC_E_IF would be handled by wl_host_event */
+			if (e->etype != WLC_E_IF)
+				WL_ERR(("No wdev corresponding to bssidx: 0x%x found!"
+					" Ignoring event.\n", e->emsg.bsscfgidx));
+		} else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
+			dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+			if (dhd->busstate == DHD_BUS_DOWN) {
+				WL_ERR((": BUS is DOWN.\n"));
+			} else
+				cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev),
+					&e->emsg, e->edata);
+		} else {
+			WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+		}
+		mutex_unlock(&cfg->if_sync);
+fail:
+		wl_put_event(e);
+	}
+	DHD_EVENT_WAKE_UNLOCK(cfg->pub);
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+{
+	u32 event_type = ntoh32(e->event_type);
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	struct net_info *netinfo;
+
+	WL_DBG(("event_type (%d): %s\n", event_type, bcmevent_get_name(event_type)));
+
+	if ((cfg == NULL) || (cfg->p2p_supported && cfg->p2p == NULL)) {
+		WL_ERR(("Stale event ignored\n"));
+		return;
+	}
+
+	if (cfg->event_workq == NULL) {
+		WL_ERR(("Event handler is not created\n"));
+		return;
+	}
+
+	if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) {
+		WL_ERR(("during IF change, ignore event %d\n", event_type));
+		return;
+	}
+
+	netinfo = wl_get_netinfo_by_bssidx(cfg, e->bsscfgidx);
+	if (!netinfo) {
+		/* Since the netinfo entry is not there, the netdev entry is not
+		 * created via cfg80211 interface. so the event is not of interest
+		 * to the cfg80211 layer.
+		 */
+		WL_ERR(("ignore event %d, not interested\n", event_type));
+		return;
+	}
+
+	if (event_type == WLC_E_PFN_NET_FOUND) {
+		WL_DBG((" PNOEVENT: PNO_NET_FOUND\n"));
+	}
+	else if (event_type == WLC_E_PFN_NET_LOST) {
+		WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
+	}
+
+	if (likely(!wl_enq_event(cfg, ndev, event_type, e, data))) {
+		queue_work(cfg->event_workq, &cfg->event_work);
+	}
+}
+
+static void wl_init_eq(struct bcm_cfg80211 *cfg)
+{
+	wl_init_eq_lock(cfg);
+	INIT_LIST_HEAD(&cfg->eq_list);
+}
+
+static void wl_flush_eq(struct bcm_cfg80211 *cfg)
+{
+	struct wl_event_q *e;
+	unsigned long flags;
+
+	flags = wl_lock_eq(cfg);
+	while (!list_empty_careful(&cfg->eq_list)) {
+		BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+		kfree(e);
+	}
+	wl_unlock_eq(cfg, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg)
+{
+	struct wl_event_q *e = NULL;
+	unsigned long flags;
+
+	flags = wl_lock_eq(cfg);
+	if (likely(!list_empty(&cfg->eq_list))) {
+		BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+	}
+	wl_unlock_eq(cfg, flags);
+
+	return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event,
+	const wl_event_msg_t *msg, void *data)
+{
+	struct wl_event_q *e;
+	s32 err = 0;
+	uint32 evtq_size;
+	uint32 data_len;
+	unsigned long flags;
+	gfp_t aflags;
+
+	data_len = 0;
+	if (data)
+		data_len = ntoh32(msg->datalen);
+	evtq_size = sizeof(struct wl_event_q) + data_len;
+	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	e = kzalloc(evtq_size, aflags);
+	if (unlikely(!e)) {
+		WL_ERR(("event alloc failed\n"));
+		return -ENOMEM;
+	}
+	e->etype = event;
+	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+	if (data)
+		memcpy(e->edata, data, data_len);
+	flags = wl_lock_eq(cfg);
+	list_add_tail(&e->eq_list, &cfg->eq_list);
+	wl_unlock_eq(cfg, flags);
+
+	return err;
+}
+
+static void wl_put_event(struct wl_event_q *e)
+{
+	kfree(e);
+}
+
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype)
+{
+	s32 infra = 0;
+	s32 err = 0;
+	s32 mode = 0;
+	switch (iftype) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+		WL_ERR(("type (%d) : currently we do not support this mode\n",
+			iftype));
+		err = -EINVAL;
+		return err;
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		mode = WL_MODE_BSS;
+		infra = 1;
+		break;
+#ifdef WLMESH
+	case NL80211_IFTYPE_MESH_POINT:
+		mode = WL_MODE_MESH;
+		infra = WL_BSSTYPE_MESH;
+		break;
+#endif /* WLMESH */
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_P2P_GO:
+		mode = WL_MODE_AP;
+		infra = 1;
+		break;
+	default:
+		err = -EINVAL;
+		WL_ERR(("invalid type (%d)\n", iftype));
+		return err;
+	}
+	infra = htod32(infra);
+	err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+		return err;
+	}
+
+	wl_set_mode_by_netdev(cfg, ndev, mode);
+
+	return 0;
+}
+
+void wl_cfg80211_add_to_eventbuffer(struct wl_eventmsg_buf *ev, u16 event, bool set)
+{
+	if (!ev || (event > WLC_E_LAST))
+		return;
+
+	if (ev->num < MAX_EVENT_BUF_NUM) {
+		ev->event[ev->num].type = event;
+		ev->event[ev->num].set = set;
+		ev->num++;
+	} else {
+		WL_ERR(("evenbuffer doesn't support > %u events. Update"
+			" the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM));
+		ASSERT(0);
+	}
+}
+
+s32 wl_cfg80211_apply_eventbuffer(
+	struct net_device *ndev,
+	struct bcm_cfg80211 *cfg,
+	wl_eventmsg_buf_t *ev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	int i, ret = 0;
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	if (!ev || (!ev->num))
+		return -EINVAL;
+
+	mutex_lock(&cfg->event_sync);
+
+	/* Read event_msgs mask */
+	ret = wldev_iovar_getbuf(ndev, "event_msgs", NULL, 0, iovbuf, sizeof(iovbuf), NULL);
+	if (unlikely(ret)) {
+		WL_ERR(("Get event_msgs error (%d)\n", ret));
+		goto exit;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+
+	/* apply the set bits */
+	for (i = 0; i < ev->num; i++) {
+		if (ev->event[i].set)
+			setbit(eventmask, ev->event[i].type);
+		else
+			clrbit(eventmask, ev->event[i].type);
+	}
+
+	/* Write updated Event mask */
+	ret = wldev_iovar_setbuf(ndev, "event_msgs", eventmask, sizeof(eventmask), iovbuf,
+			sizeof(iovbuf), NULL);
+	if (unlikely(ret)) {
+		WL_ERR(("Set event_msgs error (%d)\n", ret));
+	}
+
+exit:
+	mutex_unlock(&cfg->event_sync);
+	return ret;
+}
+
+s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+	s8 eventmask[WL_EVENTING_MASK_LEN];
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg;
+
+	if (!ndev)
+		return -ENODEV;
+
+	cfg = wl_get_cfg(ndev);
+	if (!cfg)
+		return -ENODEV;
+
+	mutex_lock(&cfg->event_sync);
+
+	/* Setup event_msgs */
+	err = wldev_iovar_getbuf(ndev, "event_msgs", NULL, 0, iovbuf, sizeof(iovbuf), NULL);
+	if (unlikely(err)) {
+		WL_ERR(("Get event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+	if (add) {
+		setbit(eventmask, event);
+	} else {
+		clrbit(eventmask, event);
+	}
+	err = wldev_iovar_setbuf(ndev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+			sizeof(iovbuf), NULL);
+	if (unlikely(err)) {
+		WL_ERR(("Set event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+
+eventmsg_out:
+	mutex_unlock(&cfg->event_sync);
+	return err;
+}
+
+static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap)
+{
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	struct ieee80211_channel *band_chan_arr = NULL;
+	wl_uint32_list_t *list;
+	u32 i, j, index, n_2g, n_5g, band, channel, array_size;
+	u32 *n_cnt = NULL;
+	chanspec_t c = 0;
+	s32 err = BCME_OK;
+	bool update;
+	bool ht40_allowed;
+	u8 *pbuf = NULL;
+	bool dfs_radar_disabled = FALSE;
+
+#define LOCAL_BUF_LEN 1024
+	pbuf = kzalloc(LOCAL_BUF_LEN, GFP_KERNEL);
+
+	if (pbuf == NULL) {
+		WL_ERR(("failed to allocate local buf\n"));
+		return -ENOMEM;
+	}
+
+	err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+		0, pbuf, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync);
+	if (err != 0) {
+		WL_ERR(("get chanspecs failed with %d\n", err));
+		kfree(pbuf);
+		return err;
+	}
+#undef LOCAL_BUF_LEN
+
+	list = (wl_uint32_list_t *)(void *)pbuf;
+	band = array_size = n_2g = n_5g = 0;
+	for (i = 0; i < dtoh32(list->count); i++) {
+		index = 0;
+		update = false;
+		ht40_allowed = false;
+		c = (chanspec_t)dtoh32(list->element[i]);
+		c = wl_chspec_driver_to_host(c);
+		channel = wf_chspec_ctlchan(c);
+
+		if (!CHSPEC_IS40(c) && ! CHSPEC_IS20(c)) {
+			WL_DBG(("HT80/160/80p80 center channel : %d\n", channel));
+			continue;
+		}
+		if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) &&
+			(channel <= CH_MAX_2G_CHANNEL)) {
+			band_chan_arr = __wl_2ghz_channels;
+			array_size = ARRAYSIZE(__wl_2ghz_channels);
+			n_cnt = &n_2g;
+			band = IEEE80211_BAND_2GHZ;
+			ht40_allowed = (bw_cap  == WLC_N_BW_40ALL)? true : false;
+		} else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) {
+			band_chan_arr = __wl_5ghz_a_channels;
+			array_size = ARRAYSIZE(__wl_5ghz_a_channels);
+			n_cnt = &n_5g;
+			band = IEEE80211_BAND_5GHZ;
+			ht40_allowed = (bw_cap  == WLC_N_BW_20ALL)? false : true;
+		} else {
+			WL_ERR(("Invalid channel Sepc. 0x%x.\n", c));
+			continue;
+		}
+		if (!ht40_allowed && CHSPEC_IS40(c))
+			continue;
+		for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
+			if (band_chan_arr[j].hw_value == channel) {
+				update = true;
+				break;
+			}
+		}
+		if (update)
+			index = j;
+		else
+			index = *n_cnt;
+		if (!dhd_conf_match_channel(cfg->pub, channel))
+			continue;
+		if (index <  array_size) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+			band_chan_arr[index].center_freq =
+				ieee80211_channel_to_frequency(channel);
+#else
+			band_chan_arr[index].center_freq =
+				ieee80211_channel_to_frequency(channel, band);
+#endif
+			band_chan_arr[index].hw_value = channel;
+			band_chan_arr[index].beacon_found = false;
+
+			if (CHSPEC_IS40(c) && ht40_allowed) {
+				/* assuming the order is HT20, HT40 Upper,
+				 *  HT40 lower from chanspecs
+				 */
+				u32 ht40_flag = band_chan_arr[index].flags & IEEE80211_CHAN_NO_HT40;
+				if (CHSPEC_SB_UPPER(c)) {
+					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+						band_chan_arr[index].flags &=
+							~IEEE80211_CHAN_NO_HT40;
+					band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40PLUS;
+				} else {
+					/* It should be one of
+					 * IEEE80211_CHAN_NO_HT40 or IEEE80211_CHAN_NO_HT40PLUS
+					 */
+					band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40;
+					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+						band_chan_arr[index].flags |=
+							IEEE80211_CHAN_NO_HT40MINUS;
+				}
+			} else {
+				band_chan_arr[index].flags = IEEE80211_CHAN_NO_HT40;
+				if (!dfs_radar_disabled) {
+					if (band == IEEE80211_BAND_2GHZ)
+						channel |= WL_CHANSPEC_BAND_2G;
+					else
+						channel |= WL_CHANSPEC_BAND_5G;
+					channel |= WL_CHANSPEC_BW_20;
+					channel = wl_chspec_host_to_driver(channel);
+					err = wldev_iovar_getint(dev, "per_chan_info", &channel);
+					if (!err) {
+						if (channel & WL_CHAN_RADAR) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+							band_chan_arr[index].flags |=
+								(IEEE80211_CHAN_RADAR
+								| IEEE80211_CHAN_NO_IBSS);
+#else
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_RADAR;
+#endif
+						}
+
+						if (channel & WL_CHAN_PASSIVE)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_PASSIVE_SCAN;
+#else
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_NO_IR;
+#endif
+					} else if (err == BCME_UNSUPPORTED) {
+						dfs_radar_disabled = TRUE;
+						WL_ERR(("does not support per_chan_info\n"));
+					}
+				}
+			}
+			if (!update)
+				(*n_cnt)++;
+		}
+
+	}
+	__wl_band_2ghz.n_channels = n_2g;
+	__wl_band_5ghz_a.n_channels = n_5g;
+	kfree(pbuf);
+	return err;
+}
+
+static s32 __wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
+{
+	struct wiphy *wiphy;
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	u32 bandlist[3];
+	u32 nband = 0;
+	u32 i = 0;
+	s32 err = 0;
+	s32 index = 0;
+	s32 nmode = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+	u32 j = 0;
+	s32 vhtmode = 0;
+	s32 txstreams = 0;
+	s32 rxstreams = 0;
+	s32 ldpc_cap = 0;
+	s32 stbc_rx = 0;
+	s32 stbc_tx = 0;
+	s32 txbf_bfe_cap = 0;
+	s32 txbf_bfr_cap = 0;
+#endif 
+	s32 bw_cap = 0;
+	s32 cur_band = -1;
+	struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS] = {NULL, };
+
+	memset(bandlist, 0, sizeof(bandlist));
+	err = wldev_ioctl_get(dev, WLC_GET_BANDLIST, bandlist,
+		sizeof(bandlist));
+	if (unlikely(err)) {
+		WL_ERR(("error read bandlist (%d)\n", err));
+		return err;
+	}
+	err = wldev_ioctl_get(dev, WLC_GET_BAND, &cur_band,
+		sizeof(s32));
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	err = wldev_iovar_getint(dev, "nmode", &nmode);
+	if (unlikely(err)) {
+		WL_ERR(("error reading nmode (%d)\n", err));
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+	err = wldev_iovar_getint(dev, "vhtmode", &vhtmode);
+	if (unlikely(err)) {
+		WL_ERR(("error reading vhtmode (%d)\n", err));
+	}
+
+	if (vhtmode) {
+		err = wldev_iovar_getint(dev, "txstreams", &txstreams);
+		if (unlikely(err)) {
+			WL_ERR(("error reading txstreams (%d)\n", err));
+		}
+
+		err = wldev_iovar_getint(dev, "rxstreams", &rxstreams);
+		if (unlikely(err)) {
+			WL_ERR(("error reading rxstreams (%d)\n", err));
+		}
+
+		err = wldev_iovar_getint(dev, "ldpc_cap", &ldpc_cap);
+		if (unlikely(err)) {
+			WL_ERR(("error reading ldpc_cap (%d)\n", err));
+		}
+
+		err = wldev_iovar_getint(dev, "stbc_rx", &stbc_rx);
+		if (unlikely(err)) {
+			WL_ERR(("error reading stbc_rx (%d)\n", err));
+		}
+
+		err = wldev_iovar_getint(dev, "stbc_tx", &stbc_tx);
+		if (unlikely(err)) {
+			WL_ERR(("error reading stbc_tx (%d)\n", err));
+		}
+
+		err = wldev_iovar_getint(dev, "txbf_bfe_cap", &txbf_bfe_cap);
+		if (unlikely(err)) {
+			WL_ERR(("error reading txbf_bfe_cap (%d)\n", err));
+		}
+
+		err = wldev_iovar_getint(dev, "txbf_bfr_cap", &txbf_bfr_cap);
+		if (unlikely(err)) {
+			WL_ERR(("error reading txbf_bfr_cap (%d)\n", err));
+		}
+	}
+#endif 
+
+	/* For nmode and vhtmode   check bw cap */
+	if (nmode ||
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+		vhtmode ||
+#endif 
+		0) {
+		err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+		if (unlikely(err)) {
+			WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+		}
+	}
+
+	err = wl_construct_reginfo(cfg, bw_cap);
+	if (err) {
+		WL_ERR(("wl_construct_reginfo() fails err=%d\n", err));
+		if (err != BCME_UNSUPPORTED)
+			return err;
+	}
+
+	wiphy = bcmcfg_to_wiphy(cfg);
+	nband = bandlist[0];
+
+	for (i = 1; i <= nband && i < ARRAYSIZE(bandlist); i++) {
+		index = -1;
+		if (bandlist[i] == WLC_BAND_5G && __wl_band_5ghz_a.n_channels > 0) {
+			bands[IEEE80211_BAND_5GHZ] =
+				&__wl_band_5ghz_a;
+			index = IEEE80211_BAND_5GHZ;
+			if (nmode && (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G))
+				bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+			/* VHT capabilities. */
+			if (vhtmode) {
+				/* Supported */
+				bands[index]->vht_cap.vht_supported = TRUE;
+
+				for (j = 1; j <= VHT_CAP_MCS_MAP_NSS_MAX; j++) {
+					/* TX stream rates. */
+					if (j <= txstreams) {
+						VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
+							bands[index]->vht_cap.vht_mcs.tx_mcs_map);
+					} else {
+						VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
+							bands[index]->vht_cap.vht_mcs.tx_mcs_map);
+					}
+
+					/* RX stream rates. */
+					if (j <= rxstreams) {
+						VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
+							bands[index]->vht_cap.vht_mcs.rx_mcs_map);
+					} else {
+						VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
+							bands[index]->vht_cap.vht_mcs.rx_mcs_map);
+					}
+				}
+
+
+				/* Capabilities */
+				/* 80 MHz is mandatory */
+				bands[index]->vht_cap.cap |=
+					IEEE80211_VHT_CAP_SHORT_GI_80;
+
+				if (WL_BW_CAP_160MHZ(bw_cap)) {
+					bands[index]->vht_cap.cap |=
+						IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+					bands[index]->vht_cap.cap |=
+						IEEE80211_VHT_CAP_SHORT_GI_160;
+				}
+
+				bands[index]->vht_cap.cap |=
+					IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+
+				if (ldpc_cap)
+					bands[index]->vht_cap.cap |=
+						IEEE80211_VHT_CAP_RXLDPC;
+
+				if (stbc_tx)
+					bands[index]->vht_cap.cap |=
+						IEEE80211_VHT_CAP_TXSTBC;
+
+				if (stbc_rx)
+					bands[index]->vht_cap.cap |=
+						(stbc_rx << VHT_CAP_INFO_RX_STBC_SHIFT);
+
+				if (txbf_bfe_cap)
+					bands[index]->vht_cap.cap |=
+						IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+
+				if (txbf_bfr_cap) {
+					bands[index]->vht_cap.cap |=
+						IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+				}
+
+				if (txbf_bfe_cap || txbf_bfr_cap) {
+					bands[index]->vht_cap.cap |=
+						(2 << VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT);
+					bands[index]->vht_cap.cap |=
+						((txstreams - 1) <<
+							VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT);
+					bands[index]->vht_cap.cap |=
+						IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB;
+				}
+
+				/* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */
+				bands[index]->vht_cap.cap |=
+					(7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT);
+				WL_INFORM(("%s band[%d] vht_enab=%d vht_cap=%08x "
+					"vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n",
+					__FUNCTION__, index,
+					bands[index]->vht_cap.vht_supported,
+					bands[index]->vht_cap.cap,
+					bands[index]->vht_cap.vht_mcs.rx_mcs_map,
+					bands[index]->vht_cap.vht_mcs.tx_mcs_map));
+			}
+#endif 
+		}
+		else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) {
+			bands[IEEE80211_BAND_2GHZ] =
+				&__wl_band_2ghz;
+			index = IEEE80211_BAND_2GHZ;
+			if (bw_cap == WLC_N_BW_40ALL)
+				bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+		}
+
+		if ((index >= 0) && nmode) {
+			bands[index]->ht_cap.cap |=
+				(IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40);
+			bands[index]->ht_cap.ht_supported = TRUE;
+			bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+			bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+			/* An HT shall support all EQM rates for one spatial stream */
+			bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+		}
+
+	}
+
+	wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
+	wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
+
+	/* check if any bands populated otherwise makes 2Ghz as default */
+	if (wiphy->bands[IEEE80211_BAND_2GHZ] == NULL &&
+		wiphy->bands[IEEE80211_BAND_5GHZ] == NULL) {
+		/* Setup 2Ghz band as default */
+		wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+	}
+
+	if (notify)
+		wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+
+	return 0;
+}
+
+s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
+{
+	s32 err;
+
+	mutex_lock(&cfg->usr_sync);
+	err = __wl_update_wiphybands(cfg, notify);
+	mutex_unlock(&cfg->usr_sync);
+
+	return err;
+}
+
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	struct wireless_dev *wdev = ndev->ieee80211_ptr;
+#ifdef WBTEXT
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+#endif /* WBTEXT */
+#ifdef WLTDLS
+	u32 tdls;
+#endif /* WLTDLS */
+
+	WL_DBG(("In\n"));
+
+	if (!dhd_download_fw_on_driverload) {
+		err = wl_create_event_handler(cfg);
+		if (err) {
+			WL_ERR(("wl_create_event_handler failed\n"));
+			return err;
+		}
+		wl_init_event_handler(cfg);
+	}
+
+	err = dhd_config_dongle(cfg);
+	if (unlikely(err))
+		return err;
+
+	err = wl_config_ifmode(cfg, ndev, wdev->iftype);
+	if (unlikely(err && err != -EINPROGRESS)) {
+		WL_ERR(("wl_config_ifmode failed\n"));
+		if (err == -1) {
+			WL_ERR(("return error %d\n", err));
+			return err;
+		}
+	}
+
+	err = wl_init_scan(cfg);
+	if (err) {
+		WL_ERR(("wl_init_scan failed\n"));
+		return err;
+	}
+	err = __wl_update_wiphybands(cfg, true);
+	if (unlikely(err)) {
+		WL_ERR(("wl_update_wiphybands failed\n"));
+		if (err == -1) {
+			WL_ERR(("return error %d\n", err));
+			return err;
+		}
+	}
+
+#ifdef DHD_LOSSLESS_ROAMING
+	if (timer_pending(&cfg->roam_timeout)) {
+		del_timer_sync(&cfg->roam_timeout);
+	}
+#endif /* DHD_LOSSLESS_ROAMING */
+
+	err = dhd_monitor_init(cfg->pub);
+
+#ifdef WBTEXT
+	/* when wifi up, set roam_prof to default value */
+	if (dhd->wbtext_support) {
+		if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+			wl_cfg80211_wbtext_set_default(ndev);
+			wl_cfg80211_wbtext_clear_bssid_list(cfg);
+		}
+	}
+#endif /* WBTEXT */
+#ifdef WLTDLS
+	if (wldev_iovar_getint(ndev, "tdls_enable", &tdls) == 0) {
+		WL_DBG(("TDLS supported in fw\n"));
+		cfg->tdls_supported = true;
+	}
+#endif /* WLTDLS */
+	INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+	wl_set_drv_status(cfg, READY, ndev);
+	return err;
+}
+
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	unsigned long flags;
+	struct net_info *iter, *next;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
+	defined(WL_NEW_CFG_PRIVCMD_SUPPORT)) && !defined(PLATFORM_SLP)
+	struct net_device *p2p_net = cfg->p2p_net;
+#endif 
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+	struct cfg80211_scan_info info;
+#endif
+
+	WL_DBG(("In\n"));
+
+	/* Check if cfg80211 interface is already down */
+	if (!wl_get_drv_status(cfg, READY, ndev)) {
+		WL_DBG(("cfg80211 interface is already down\n"));
+		return err;	/* it is even not ready */
+	}
+
+#ifdef WLTDLS
+	cfg->tdls_supported = false;
+#endif /* WLTDLS */
+
+	/* Delete pm_enable_work */
+	wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+
+	/* clear all the security setting on primary Interface */
+	wl_cfg80211_clear_security(cfg);
+
+
+	if (cfg->p2p_supported) {
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+		if (wl_cfgp2p_vif_created(cfg)) {
+			bool enabled = false;
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+				dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+				dhd_wlfc_deinit(dhd);
+				cfg->wlfc_on = false;
+			}
+		}
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+
+
+	/* clean up any left over interfaces */
+	wl_cfg80211_cleanup_virtual_ifaces(ndev, false);
+
+	/* If primary BSS is operational (for e.g SoftAP), bring it down */
+	if (wl_cfg80211_bss_isup(ndev, 0)) {
+		if (wl_cfg80211_bss_up(cfg, ndev, 0, 0) < 0)
+			WL_ERR(("BSS down failed \n"));
+	}
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+	for_each_ndev(cfg, iter, next) {
+		if (iter->ndev) /* p2p discovery iface is null */
+			wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+
+#ifdef P2P_LISTEN_OFFLOADING
+	wl_cfg80211_p2plo_deinit(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+		info.aborted = true;
+		cfg80211_scan_done(cfg->scan_request, &info);
+#else
+		cfg80211_scan_done(cfg->scan_request, true);
+#endif
+		cfg->scan_request = NULL;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+	for_each_ndev(cfg, iter, next) {
+		/* p2p discovery iface ndev ptr could be null */
+		if (iter->ndev == NULL)
+			continue;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+		if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+			CFG80211_DISCONNECTED(iter->ndev, 0, NULL, 0, false, GFP_KERNEL);
+		}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+		wl_clr_drv_status(cfg, READY, iter->ndev);
+		wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+		wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+		wl_clr_drv_status(cfg, CONNECTING, iter->ndev);
+		wl_clr_drv_status(cfg, CONNECTED, iter->ndev);
+		wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev);
+		wl_clr_drv_status(cfg, AP_CREATED, iter->ndev);
+		wl_clr_drv_status(cfg, AP_CREATING, iter->ndev);
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
+		NL80211_IFTYPE_STATION;
+#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
+	defined(WL_NEW_CFG_PRIVCMD_SUPPORT)) && !defined(PLATFORM_SLP)
+		if (p2p_net)
+			dev_close(p2p_net);
+#endif 
+
+	/* Avoid deadlock from wl_cfg80211_down */
+	if (!dhd_download_fw_on_driverload) {
+		mutex_unlock(&cfg->usr_sync);
+		wl_destroy_event_handler(cfg);
+		mutex_lock(&cfg->usr_sync);
+	}
+
+	wl_flush_eq(cfg);
+	wl_link_down(cfg);
+	if (cfg->p2p_supported) {
+		if (timer_pending(&cfg->p2p->listen_timer))
+			del_timer_sync(&cfg->p2p->listen_timer);
+		wl_cfgp2p_down(cfg);
+	}
+
+	if (timer_pending(&cfg->scan_timeout)) {
+		del_timer_sync(&cfg->scan_timeout);
+	}
+
+	DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+
+	dhd_monitor_uninit();
+#ifdef WLAIBSS_MCHAN
+	bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev);
+#endif /* WLAIBSS_MCHAN */
+
+
+#ifdef WL11U
+	/* Clear interworking element. */
+	if (cfg->wl11u) {
+		cfg->wl11u = FALSE;
+	}
+#endif /* WL11U */
+
+#ifdef CUSTOMER_HW4_DEBUG
+	if (wl_scan_timeout_dbg_enabled) {
+		wl_scan_timeout_dbg_clear();
+	}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+	cfg->disable_roam_event = false;
+
+	DNGL_FUNC(dhd_cfg80211_down, (cfg));
+
+#ifdef DHD_IFDEBUG
+	/* Printout all netinfo entries */
+	wl_probe_wdev_all(cfg);
+#endif /* DHD_IFDEBUG */
+
+	return err;
+}
+
+s32 wl_cfg80211_up(struct net_device *net)
+{
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+	int val = 1;
+	dhd_pub_t *dhd;
+#ifdef DISABLE_PM_BCNRX
+	s32 interr = 0;
+	uint param = 0;
+	s8 iovbuf[WLC_IOCTL_SMLEN];
+#endif /* DISABLE_PM_BCNRX */
+
+	WL_DBG(("In\n"));
+	cfg = wl_get_cfg(net);
+
+	if ((err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val,
+		sizeof(int)) < 0)) {
+		WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
+		return err;
+	}
+	val = dtoh32(val);
+	if (val != WLC_IOCTL_VERSION && val != 1) {
+		WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+			val, WLC_IOCTL_VERSION));
+		return BCME_VERSION;
+	}
+	ioctl_version = val;
+	WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version));
+	wl_cfg80211_check_in4way(cfg, net, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
+		WL_EXT_STATUS_DISCONNECTED, NULL);
+
+	mutex_lock(&cfg->usr_sync);
+	dhd = (dhd_pub_t *)(cfg->pub);
+	if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg));
+		if (unlikely(err)) {
+			mutex_unlock(&cfg->usr_sync);
+			return err;
+		}
+	}
+#ifdef WLMESH
+	cfg->wdev->wiphy->features |= NL80211_FEATURE_USERSPACE_MPM;
+#endif /* WLMESH */
+
+	err = __wl_cfg80211_up(cfg);
+	if (unlikely(err))
+		WL_ERR(("__wl_cfg80211_up failed\n"));
+
+
+
+	/* IOVAR configurations with 'up' condition */
+#ifdef DISABLE_PM_BCNRX
+	interr = wldev_iovar_setbuf(bcmcfg_to_prmry_ndev(cfg), "pm_bcnrx", (char *)&param,
+			sizeof(param), iovbuf, sizeof(iovbuf), NULL);
+	if (unlikely(interr)) {
+		WL_ERR(("Set pm_bcnrx returned (%d)\n", interr));
+	}
+#endif /* DISABLE_PM_BCNRX */
+
+	mutex_unlock(&cfg->usr_sync);
+
+#ifdef WLAIBSS_MCHAN
+	bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME);
+#endif /* WLAIBSS_MCHAN */
+
+#ifdef DUAL_STA_STATIC_IF
+#ifdef WL_VIRTUAL_APSTA
+#error "Both DUAL STA and DUAL_STA_STATIC_IF can't be enabled together"
+#endif
+	/* Static Interface support is currently supported only for STA only builds (without P2P) */
+	wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, "wlan%d");
+#endif /* DUAL_STA_STATIC_IF */
+
+	return err;
+}
+
+/* Private Event to Supplicant with indication that chip hangs */
+int wl_cfg80211_hang(struct net_device *dev, u16 reason)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	dhd_pub_t *dhd;
+#if defined(SOFTAP_SEND_HANGEVT)
+	/* specifc mac address used for hang event */
+	uint8 hang_mac[ETHER_ADDR_LEN] = {0x11, 0x11, 0x11, 0x11, 0x11, 0x11};
+#endif /* SOFTAP_SEND_HANGEVT */
+	if (!cfg) {
+		return BCME_ERROR;
+	}
+
+	dhd = (dhd_pub_t *)(cfg->pub);
+#if defined(DHD_HANG_SEND_UP_TEST)
+	if (dhd->req_hang_type) {
+		WL_ERR(("%s, Clear HANG test request 0x%x\n",
+			__FUNCTION__, dhd->req_hang_type));
+		dhd->req_hang_type = 0;
+	}
+#endif /* DHD_HANG_SEND_UP_TEST */
+	if ((dhd->hang_reason <= HANG_REASON_MASK) || (dhd->hang_reason >= HANG_REASON_MAX)) {
+		WL_ERR(("%s, Invalid hang reason 0x%x\n",
+			__FUNCTION__, dhd->hang_reason));
+		dhd->hang_reason = HANG_REASON_UNKNOWN;
+	}
+#ifdef DHD_USE_EXTENDED_HANG_REASON
+	if (dhd->hang_reason != 0) {
+		reason = dhd->hang_reason;
+	}
+#endif /* DHD_USE_EXTENDED_HANG_REASON */
+	WL_ERR(("In : chip crash eventing, reason=0x%x\n", (uint32)(dhd->hang_reason)));
+
+	wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+#ifdef SOFTAP_SEND_HANGEVT
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		cfg80211_del_sta(dev, hang_mac, GFP_ATOMIC);
+	} else
+#endif /* SOFTAP_SEND_HANGEVT */
+	{
+		CFG80211_DISCONNECTED(dev, reason, NULL, 0, false, GFP_KERNEL);
+	}
+#if defined(RSSIAVG)
+	wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+	wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+	if (cfg != NULL) {
+		wl_link_down(cfg);
+	}
+	return 0;
+}
+
+s32 wl_cfg80211_down(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	s32 err = 0;
+
+	WL_DBG(("In\n"));
+	if (cfg == NULL)
+		return err;
+	mutex_lock(&cfg->usr_sync);
+#if defined(RSSIAVG)
+	wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+	wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+	err = __wl_cfg80211_down(cfg);
+	mutex_unlock(&cfg->usr_sync);
+
+	return err;
+}
+
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
+{
+	unsigned long flags;
+	void *rptr = NULL;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	if (!profile)
+		return NULL;
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	switch (item) {
+	case WL_PROF_SEC:
+		rptr = &profile->sec;
+		break;
+	case WL_PROF_ACT:
+		rptr = &profile->active;
+		break;
+	case WL_PROF_BSSID:
+		rptr = profile->bssid;
+		break;
+	case WL_PROF_SSID:
+		rptr = &profile->ssid;
+		break;
+	case WL_PROF_CHAN:
+		rptr = &profile->channel;
+		break;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	if (!rptr)
+		WL_ERR(("invalid item (%d)\n", item));
+	return rptr;
+}
+
+static s32
+wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, const void *data, s32 item)
+{
+	s32 err = 0;
+	const struct wlc_ssid *ssid;
+	unsigned long flags;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	if (!profile)
+		return WL_INVALID;
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	switch (item) {
+	case WL_PROF_SSID:
+		ssid = (const wlc_ssid_t *) data;
+		memset(profile->ssid.SSID, 0,
+			sizeof(profile->ssid.SSID));
+		profile->ssid.SSID_len = MIN(ssid->SSID_len, DOT11_MAX_SSID_LEN);
+		memcpy(profile->ssid.SSID, ssid->SSID, profile->ssid.SSID_len);
+		break;
+	case WL_PROF_BSSID:
+		if (data)
+			memcpy(profile->bssid, data, ETHER_ADDR_LEN);
+		else
+			memset(profile->bssid, 0, ETHER_ADDR_LEN);
+		break;
+	case WL_PROF_SEC:
+		memcpy(&profile->sec, data, sizeof(profile->sec));
+		break;
+	case WL_PROF_ACT:
+		profile->active = *(const bool *)data;
+		break;
+	case WL_PROF_BEACONINT:
+		profile->beacon_interval = *(const u16 *)data;
+		break;
+	case WL_PROF_DTIMPERIOD:
+		profile->dtim_period = *(const u8 *)data;
+		break;
+	case WL_PROF_CHAN:
+		profile->channel = *(const u32*)data;
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	if (err == -EOPNOTSUPP)
+		WL_ERR(("unsupported item (%d)\n", item));
+
+	return err;
+}
+
+void wl_cfg80211_dbg_level(u32 level)
+{
+	/*
+	* prohibit to change debug level
+	* by insmod parameter.
+	* eventually debug level will be configured
+	* in compile time by using CONFIG_XXX
+	*/
+	/* wl_dbg_level = level; */
+}
+
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	return wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS;
+}
+
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg)
+{
+	return cfg->ibss_starter;
+}
+
+static void wl_rst_ie(struct bcm_cfg80211 *cfg)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+
+	ie->offset = 0;
+}
+
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	ie->buf[ie->offset] = t;
+	ie->buf[ie->offset + 1] = l;
+	memcpy(&ie->buf[ie->offset + 2], v, l);
+	ie->offset += l + 2;
+
+	return err;
+}
+
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size,
+	bool roam)
+{
+	u8 *ssidie;
+	int32 ssid_len = MIN(bi->SSID_len, DOT11_MAX_SSID_LEN);
+	int32 remaining_ie_buf_len, available_buffer_len;
+	/* cfg80211_find_ie defined in kernel returning const u8 */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+	ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	/* ERROR out if
+	 * 1. No ssid IE is FOUND or
+	 * 2. New ssid length is > what was allocated for existing ssid (as
+	 * we do not want to overwrite the rest of the IEs) or
+	 * 3. If in case of erroneous buffer input where ssid length doesnt match the space
+	 * allocated to it.
+	 */
+	if (!ssidie) {
+		return;
+	}
+	available_buffer_len = ((int)(*ie_size)) - (ssidie + 2 - ie_stream);
+	remaining_ie_buf_len = available_buffer_len - (int)ssidie[1];
+	if ((ssid_len > ssidie[1]) ||
+		(ssidie[1] > available_buffer_len)) {
+		return;
+	}
+
+
+	if (ssidie[1] != ssid_len) {
+		if (ssidie[1]) {
+			WL_ERR(("%s: Wrong SSID len: %d != %d\n",
+				__FUNCTION__, ssidie[1], bi->SSID_len));
+		}
+		if (roam) {
+			WL_ERR(("Changing the SSID Info.\n"));
+			memmove(ssidie + ssid_len + 2,
+				(ssidie + 2) + ssidie[1],
+				remaining_ie_buf_len);
+			memcpy(ssidie + 2, bi->SSID, ssid_len);
+			*ie_size = *ie_size + ssid_len - ssidie[1];
+			ssidie[1] = ssid_len;
+		}
+		return;
+	}
+	if (*(ssidie + 2) == '\0')
+		 memcpy(ssidie + 2, bi->SSID, ssid_len);
+	return;
+}
+
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei_stream crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+	ie->offset += ie_size;
+
+	return err;
+}
+
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset > dst_size)) {
+		WL_ERR(("dst_size is not enough\n"));
+		return -ENOSPC;
+	}
+	memcpy(dst, &ie->buf[0], ie->offset);
+
+	return err;
+}
+
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+
+	return ie->offset;
+}
+
+static void wl_link_up(struct bcm_cfg80211 *cfg)
+{
+	cfg->link_up = true;
+}
+
+static void wl_link_down(struct bcm_cfg80211 *cfg)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+
+	WL_DBG(("In\n"));
+	cfg->link_up = false;
+	if (conn_info) {
+		conn_info->req_ie_len = 0;
+		conn_info->resp_ie_len = 0;
+	}
+}
+
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cfg->eq_lock, flags);
+	return flags;
+}
+
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
+{
+	spin_unlock_irqrestore(&cfg->eq_lock, flags);
+}
+
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
+{
+	spin_lock_init(&cfg->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+	if (in_atomic() || (ms < jiffies_to_msecs(1))) {
+		OSL_DELAY(ms*1000);
+	} else {
+		OSL_SLEEP(ms);
+	}
+}
+
+s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+	struct ether_addr primary_mac;
+	if (!cfg->p2p)
+		return -1;
+	if (!p2p_is_on(cfg)) {
+		get_primary_mac(cfg, &primary_mac);
+		wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+	} else {
+		memcpy(p2pdev_addr->octet, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE).octet,
+			ETHER_ADDR_LEN);
+	}
+
+	return 0;
+}
+s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+	return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+	return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+	return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+	return wl_cfgp2p_set_p2p_ecsa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+	return wl_cfgp2p_increase_p2p_bw(cfg, net, buf, len);
+}
+
+#ifdef P2PLISTEN_AP_SAMECHN
+s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable)
+{
+	s32 ret = wldev_iovar_setint(net, "p2p_resp_ap_chn", enable);
+
+	if ((ret == 0) && enable) {
+		/* disable PM for p2p responding on infra AP channel */
+		s32 pm = PM_OFF;
+
+		ret = wldev_ioctl_set(net, WLC_SET_PM, &pm, sizeof(pm));
+	}
+
+	return ret;
+}
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+s32 wl_cfg80211_channel_to_freq(u32 channel)
+{
+	int freq = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+	freq = ieee80211_channel_to_frequency(channel);
+#else
+	{
+		u16 band = 0;
+		if (channel <= CH_MAX_2G_CHANNEL)
+			band = IEEE80211_BAND_2GHZ;
+		else
+			band = IEEE80211_BAND_5GHZ;
+		freq = ieee80211_channel_to_frequency(channel, band);
+	}
+#endif
+	return freq;
+}
+
+
+#ifdef WLTDLS
+static s32
+wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data) {
+
+	struct net_device *ndev = NULL;
+	u32 reason = ntoh32(e->reason);
+	s8 *msg = NULL;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	switch (reason) {
+	case WLC_E_TDLS_PEER_DISCOVERED :
+		msg = " TDLS PEER DISCOVERD ";
+		break;
+	case WLC_E_TDLS_PEER_CONNECTED :
+		if (cfg->tdls_mgmt_frame) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+			cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+					cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+			cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+					cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,	0,
+					GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+			defined(WL_COMPAT_WIRELESS)
+			cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+					cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
+					GFP_ATOMIC);
+#else
+			cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq,
+					cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, GFP_ATOMIC);
+#endif 
+		}
+		msg = " TDLS PEER CONNECTED ";
+#ifdef SUPPORT_SET_CAC
+		/* TDLS connect reset CAC */
+		wl_cfg80211_set_cac(cfg, 0);
+#endif /* SUPPORT_SET_CAC */
+		break;
+	case WLC_E_TDLS_PEER_DISCONNECTED :
+		if (cfg->tdls_mgmt_frame) {
+			kfree(cfg->tdls_mgmt_frame);
+			cfg->tdls_mgmt_frame = NULL;
+			cfg->tdls_mgmt_freq = 0;
+		}
+		msg = "TDLS PEER DISCONNECTED ";
+#ifdef SUPPORT_SET_CAC
+		/* TDLS disconnec, set CAC */
+		wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+		break;
+	}
+	if (msg) {
+		WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((const u8*)(&e->addr)),
+			(bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
+	}
+	return 0;
+
+}
+#endif /* WLTDLS */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+static s32
+#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \
+	KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+	u32 peer_capability, const u8 *buf, size_t len)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \
+		(LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+	u32 peer_capability, const u8 *buf, size_t len)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+       const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+       u32 peer_capability, bool initiator, const u8 *buf, size_t len)
+#else /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+	const u8 *buf, size_t len)
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+{
+	s32 ret = 0;
+#ifdef WLTDLS
+	struct bcm_cfg80211 *cfg;
+	tdls_wfd_ie_iovar_t info;
+	memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t));
+	cfg = wl_get_cfg(dev);
+
+#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
+	/* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10
+	 * and that cuases build error
+	 */
+	BCM_REFERENCE(peer_capability);
+#endif  /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+
+	switch (action_code) {
+		/* We need to set TDLS Wifi Display IE to firmware
+		 * using tdls_wfd_ie iovar
+		 */
+		case WLAN_TDLS_SET_PROBE_WFD_IE:
+			WL_ERR(("%s WLAN_TDLS_SET_PROBE_WFD_IE\n", __FUNCTION__));
+			info.mode = TDLS_WFD_PROBE_IE_TX;
+			memcpy(&info.data, buf, len);
+			info.length = len;
+			break;
+		case WLAN_TDLS_SET_SETUP_WFD_IE:
+			WL_ERR(("%s WLAN_TDLS_SET_SETUP_WFD_IE\n", __FUNCTION__));
+			info.mode = TDLS_WFD_IE_TX;
+			memcpy(&info.data, buf, len);
+			info.length = len;
+			break;
+		case WLAN_TDLS_SET_WFD_ENABLED:
+			WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_ENABLED\n", __FUNCTION__));
+			dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true);
+			goto out;
+		case WLAN_TDLS_SET_WFD_DISABLED:
+			WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_DISABLED\n", __FUNCTION__));
+			dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false);
+			goto out;
+		default:
+			WL_ERR(("Unsupported action code : %d\n", action_code));
+			goto out;
+	}
+	ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (ret) {
+		WL_ERR(("tdls_wfd_ie error %d\n", ret));
+	}
+
+out:
+#endif /* WLTDLS */
+	return ret;
+}
+
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	const u8 *peer, enum nl80211_tdls_operation oper)
+#else
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, enum nl80211_tdls_operation oper)
+#endif
+{
+	s32 ret = 0;
+#ifdef WLTDLS
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	tdls_iovar_t info;
+	dhd_pub_t *dhdp;
+	bool tdls_auto_mode = false;
+	dhdp = (dhd_pub_t *)(cfg->pub);
+	memset(&info, 0, sizeof(tdls_iovar_t));
+	if (peer) {
+		memcpy(&info.ea, peer, ETHER_ADDR_LEN);
+	} else {
+		return -1;
+	}
+	switch (oper) {
+	case NL80211_TDLS_DISCOVERY_REQ:
+		/* If the discovery request is broadcast then we need to set
+		 * info.mode to Tunneled Probe Request
+		 */
+		if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) {
+			info.mode = TDLS_MANUAL_EP_WFD_TPQ;
+			WL_ERR(("%s TDLS TUNNELED PRBOBE REQUEST\n", __FUNCTION__));
+		} else {
+			info.mode = TDLS_MANUAL_EP_DISCOVERY;
+		}
+		break;
+	case NL80211_TDLS_SETUP:
+		if (dhdp->tdls_mode == true) {
+			info.mode = TDLS_MANUAL_EP_CREATE;
+			tdls_auto_mode = false;
+			/* Do tear down and create a fresh one */
+			ret = wl_cfg80211_tdls_config(cfg, TDLS_STATE_TEARDOWN, tdls_auto_mode);
+			if (ret < 0) {
+				return ret;
+			}
+		} else {
+			tdls_auto_mode = true;
+		}
+		break;
+	case NL80211_TDLS_TEARDOWN:
+		info.mode = TDLS_MANUAL_EP_DELETE;
+		break;
+	default:
+		WL_ERR(("Unsupported operation : %d\n", oper));
+		goto out;
+	}
+	/* turn on TDLS */
+	wl_cfg80211_tdls_config(cfg, TDLS_STATE_SETUP, tdls_auto_mode);
+	if (ret < 0) {
+		return ret;
+	}
+	if (info.mode) {
+		ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		if (ret) {
+			WL_ERR(("tdls_endpoint error %d\n", ret));
+		}
+	}
+out:
+	if (ret) {
+		return -ENOTSUPP;
+	}
+#endif /* WLTDLS */
+	return ret;
+}
+#endif 
+
+/*
+ * This function returns no of bytes written
+ * In case of failure return zero, not bcme_error
+ */
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *ndev, char *buf, int len,
+	enum wl_management_type type)
+{
+	struct bcm_cfg80211 *cfg;
+	s32 ret = 0;
+	struct ether_addr primary_mac;
+	s32 bssidx = 0;
+	s32 pktflag = 0;
+	cfg = wl_get_cfg(ndev);
+
+	if (wl_get_drv_status(cfg, AP_CREATING, ndev)) {
+		/* Vendor IEs should be set to FW
+		 * after SoftAP interface is brought up
+		 */
+		WL_DBG(("Skipping set IE since AP is not up \n"));
+		goto exit;
+	} else  if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+		/* Either stand alone AP case or P2P discovery */
+		if (wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+			/* Stand alone AP case on primary interface */
+			WL_DBG(("Apply IEs for Primary AP Interface \n"));
+			bssidx = 0;
+		} else {
+			if (!cfg->p2p) {
+				/* If p2p not initialized, return failure */
+				WL_ERR(("P2P not initialized \n"));
+				goto exit;
+			}
+			/* P2P Discovery case (p2p listen) */
+			if (!cfg->p2p->on) {
+				/* Turn on Discovery interface */
+				get_primary_mac(cfg, &primary_mac);
+				wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+				p2p_on(cfg) = true;
+				ret = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
+				if (unlikely(ret)) {
+					WL_ERR(("Enable discovery failed \n"));
+					goto exit;
+				}
+			}
+			WL_DBG(("Apply IEs for P2P Discovery Iface \n"));
+			ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+			bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+		}
+	} else {
+		/* Virtual AP/ P2P Group Interface */
+		WL_DBG(("Apply IEs for iface:%s\n", ndev->name));
+		bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+	}
+
+	if (ndev != NULL) {
+		switch (type) {
+			case WL_BEACON:
+				pktflag = VNDR_IE_BEACON_FLAG;
+				break;
+			case WL_PROBE_RESP:
+				pktflag = VNDR_IE_PRBRSP_FLAG;
+				break;
+			case WL_ASSOC_RESP:
+				pktflag = VNDR_IE_ASSOCRSP_FLAG;
+				break;
+		}
+		if (pktflag) {
+			ret = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+				ndev_to_cfgdev(ndev), bssidx, pktflag, buf, len);
+		}
+	}
+exit:
+	return ret;
+}
+
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+static s32
+wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev)
+{
+	u32 val = 0;
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	/* Set interface up, explicitly. */
+	val = 1;
+
+	ret = wldev_ioctl_set(ndev, WLC_UP, (void *)&val, sizeof(val));
+	if (ret < 0) {
+		WL_ERR(("set interface up failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Stop all scan explicitly, till auto channel selection complete. */
+	wl_set_drv_status(cfg, SCANNING, ndev);
+	if (cfg->escan_info.ndev == NULL) {
+		ret = BCME_OK;
+		goto done;
+	}
+	ret = wl_notify_escan_complete(cfg, ndev, true, true);
+	if (ret < 0) {
+		WL_ERR(("set scan abort failed, error = %d\n", ret));
+		ret = BCME_OK; // terence 20140115: fix escan_complete error
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static bool
+wl_cfg80211_valid_channel_p2p(int channel)
+{
+	bool valid = false;
+
+	/* channel 1 to 14 */
+	if ((channel >= 1) && (channel <= 14)) {
+		valid = true;
+	}
+	/* channel 36 to 48 */
+	else if ((channel >= 36) && (channel <= 48)) {
+		valid = true;
+	}
+	/* channel 149 to 161 */
+	else if ((channel >= 149) && (channel <= 161)) {
+		valid = true;
+	}
+	else {
+		valid = false;
+		WL_INFORM(("invalid P2P chanspec, channel = %d\n", channel));
+	}
+
+	return valid;
+}
+
+s32
+wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen)
+{
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = NULL;
+	chanspec_t chanspec = 0;
+
+	cfg = wl_get_cfg(ndev);
+
+	/* Restrict channels to 2.4GHz, 20MHz BW, no SB. */
+	chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 |
+		WL_CHANSPEC_CTL_SB_NONE);
+	chanspec = wl_chspec_host_to_driver(chanspec);
+
+	ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+		sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+	}
+
+	return ret;
+}
+
+s32
+wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen)
+{
+	u32 channel = 0;
+	s32 ret = BCME_ERROR;
+	s32 i = 0;
+	s32 j = 0;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	wl_uint32_list_t *list = NULL;
+	chanspec_t chanspec = 0;
+
+	/* Restrict channels to 5GHz, 20MHz BW, no SB. */
+	chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 |
+		WL_CHANSPEC_CTL_SB_NONE);
+	chanspec = wl_chspec_host_to_driver(chanspec);
+
+	ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+		sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+		goto done;
+	}
+
+	list = (wl_uint32_list_t *)buf;
+	/* Skip DFS and inavlid P2P channel. */
+	for (i = 0, j = 0; i < dtoh32(list->count); i++) {
+		chanspec = (chanspec_t) dtoh32(list->element[i]);
+		channel = CHSPEC_CHANNEL(chanspec);
+
+		ret = wldev_iovar_getint(ndev, "per_chan_info", &channel);
+		if (ret < 0) {
+			WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret));
+			goto done;
+		}
+
+		if (CHANNEL_IS_RADAR(channel) ||
+			!(wl_cfg80211_valid_channel_p2p(CHSPEC_CHANNEL(chanspec)))) {
+			continue;
+		} else {
+			list->element[j] = list->element[i];
+		}
+
+		j++;
+	}
+
+	list->count = j;
+
+done:
+	return ret;
+}
+
+static s32
+wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen,
+	int *channel)
+{
+	s32 ret = BCME_ERROR;
+	int chosen = 0;
+	int retry = 0;
+	uint chip;
+
+	/* Start auto channel selection scan. */
+	ret = wldev_ioctl_set(ndev, WLC_START_CHANNEL_SEL, buf, buflen);
+	if (ret < 0) {
+		WL_ERR(("can't start auto channel scan, error = %d\n", ret));
+		*channel = 0;
+		goto done;
+	}
+
+	/* Wait for auto channel selection, worst case possible delay is 5250ms. */
+	retry = CHAN_SEL_RETRY_COUNT;
+
+	while (retry--) {
+		OSL_SLEEP(CHAN_SEL_IOCTL_DELAY);
+		chosen = 0;
+		ret = wldev_ioctl_get(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen));
+		if ((ret == 0) && (dtoh32(chosen) != 0)) {
+			chip = dhd_conf_get_chip(dhd_get_pub(ndev));
+			if (chip != BCM43362_CHIP_ID &&	chip != BCM4330_CHIP_ID &&
+					chip != BCM43143_CHIP_ID) {
+				u32 chanspec = 0;
+				int ctl_chan;
+				chanspec = wl_chspec_driver_to_host(chosen);
+				WL_INFORM(("selected chanspec = 0x%x\n", chanspec));
+				ctl_chan = wf_chspec_ctlchan(chanspec);
+				WL_INFORM(("selected ctl_chan = %d\n", ctl_chan));
+				*channel = (u16)(ctl_chan & 0x00FF);
+			} else
+				*channel = (u16)(chosen & 0x00FF);
+			WL_INFORM(("selected channel = %d\n", *channel));
+			break;
+		}
+		WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n",
+			(CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("failure, auto channel selection timed out\n"));
+		*channel = 0;
+		ret = BCME_ERROR;
+	}
+	WL_INFORM(("selected channel = %d\n", *channel));
+
+done:
+	return ret;
+}
+
+static s32
+wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	/* Clear scan stop driver status. */
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+
+	return BCME_OK;
+}
+
+s32
+wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len)
+{
+	int channel = 0, band, band_cur;
+	s32 ret = BCME_ERROR;
+	u8 *buf = NULL;
+	char *pos = cmd;
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_device *ndev = NULL;
+
+	memset(cmd, 0, total_len);
+
+	buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
+	if (buf == NULL) {
+		WL_ERR(("failed to allocate chanspec buffer\n"));
+		return -ENOMEM;
+	}
+
+	/*
+	 * Always use primary interface, irrespective of interface on which
+	 * command came.
+	 */
+	cfg = wl_get_cfg(dev);
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/*
+	 * Make sure that FW and driver are in right state to do auto channel
+	 * selection scan.
+	 */
+	ret = wl_cfg80211_set_auto_channel_scan_state(ndev);
+	if (ret < 0) {
+		WL_ERR(("can't set auto channel scan state, error = %d\n", ret));
+		goto done;
+	}
+
+	ret = wldev_ioctl(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur), false);
+	if (band_cur != WLC_BAND_5G) {
+		/* Best channel selection in 2.4GHz band. */
+		ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+		if (ret < 0) {
+			WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret));
+			goto done;
+		}
+
+		ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+			&channel);
+		if (ret < 0) {
+			WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret));
+			goto done;
+		}
+
+		if (CHANNEL_IS_2G(channel)) {
+#if 0
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+			channel = ieee80211_channel_to_frequency(channel);
+#else
+			channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+#endif
+#endif
+		} else {
+			WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel));
+			channel = 0;
+		}
+		pos += snprintf(pos, total_len, "2g=%d ", channel);
+	}
+
+	if (band_cur != WLC_BAND_2G) {
+		// terence 20140120: fix for some chipsets only return 2.4GHz channel (4330b2/43341b0/4339a0)
+		band = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G;
+		ret = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true);
+		if (ret < 0) {
+			WL_ERR(("WLC_SET_BAND error %d\n", ret));
+			goto done;
+		}
+
+		/* Best channel selection in 5GHz band. */
+		ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+		if (ret < 0) {
+			WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret));
+			goto done;
+		}
+
+		ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+			&channel);
+		if (ret < 0) {
+			WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret));
+			goto done;
+		}
+
+		if (CHANNEL_IS_5G(channel)) {
+#if 0
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+			channel = ieee80211_channel_to_frequency(channel);
+#else
+			channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+#endif
+#endif
+		} else {
+			WL_ERR(("invalid 5GHz channel, channel = %d\n", channel));
+			channel = 0;
+		}
+
+		ret = wldev_ioctl(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur), true);
+		if (ret < 0)
+			WL_ERR(("WLC_SET_BAND error %d\n", ret));
+		pos += snprintf(pos, total_len, "5g=%d ", channel);
+	}
+
+done:
+	if (NULL != buf) {
+		kfree(buf);
+	}
+
+	/* Restore FW and driver back to normal state. */
+	ret = wl_cfg80211_restore_auto_channel_scan_state(ndev);
+	if (ret < 0) {
+		WL_ERR(("can't restore auto channel scan state, error = %d\n", ret));
+	}
+
+	printf("%s: %s\n", __FUNCTION__, cmd);
+
+	return (pos - cmd);
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+static const struct rfkill_ops wl_rfkill_ops = {
+	.set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+	WL_DBG(("Enter \n"));
+	WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+	if (!cfg)
+		return -EINVAL;
+
+	cfg->rf_blocked = blocked;
+
+	return 0;
+}
+
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
+{
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	if (!cfg)
+		return -EINVAL;
+	if (setup) {
+		cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
+			wl_cfg80211_get_parent_dev(),
+			RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
+
+		if (!cfg->rfkill) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		err = rfkill_register(cfg->rfkill);
+
+		if (err)
+			rfkill_destroy(cfg->rfkill);
+	} else {
+		if (!cfg->rfkill) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		rfkill_unregister(cfg->rfkill);
+		rfkill_destroy(cfg->rfkill);
+	}
+
+err_out:
+	return err;
+}
+
+#ifdef DEBUGFS_CFG80211
+/**
+* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level
+* to turn on SCAN and DBG log.
+* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level
+* To see current setting of debug level,
+* cat /sys/kernel/debug/dhd/debug_level
+*/
+static ssize_t
+wl_debuglevel_write(struct file *file, const char __user *userbuf,
+	size_t count, loff_t *ppos)
+{
+	char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL];
+	char *params, *token, *colon;
+	uint i, tokens, log_on = 0;
+	size_t minsize = min_t(size_t, (sizeof(tbuf) - 1), count);
+
+	memset(tbuf, 0, sizeof(tbuf));
+	memset(sublog, 0, sizeof(sublog));
+	if (copy_from_user(&tbuf, userbuf, minsize)) {
+		return -EFAULT;
+	}
+
+	tbuf[minsize + 1] = '\0';
+	params = &tbuf[0];
+	colon = strchr(params, '\n');
+	if (colon != NULL)
+		*colon = '\0';
+	while ((token = strsep(&params, " ")) != NULL) {
+		memset(sublog, 0, sizeof(sublog));
+		if (token == NULL || !*token)
+			break;
+		if (*token == '\0')
+			continue;
+		colon = strchr(token, ':');
+		if (colon != NULL) {
+			*colon = ' ';
+		}
+		tokens = sscanf(token, "%s %u", sublog, &log_on);
+		if (colon != NULL)
+			*colon = ':';
+
+		if (tokens == 2) {
+				for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+					if (!strncmp(sublog, sublogname_map[i].sublogname,
+						strlen(sublogname_map[i].sublogname))) {
+						if (log_on)
+							wl_dbg_level |=
+							(sublogname_map[i].log_level);
+						else
+							wl_dbg_level &=
+							~(sublogname_map[i].log_level);
+					}
+				}
+		} else
+			WL_ERR(("%s: can't parse '%s' as a "
+			       "SUBMODULE:LEVEL (%d tokens)\n",
+			       tbuf, token, tokens));
+
+
+	}
+	return count;
+}
+
+static ssize_t
+wl_debuglevel_read(struct file *file, char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	char *param;
+	char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)];
+	uint i;
+	memset(tbuf, 0, sizeof(tbuf));
+	param = &tbuf[0];
+	for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+		param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
+			sublogname_map[i].sublogname,
+			(wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0);
+	}
+	*param = '\n';
+	return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0]));
+
+}
+static const struct file_operations fops_debuglevel = {
+	.open = NULL,
+	.write = wl_debuglevel_write,
+	.read = wl_debuglevel_read,
+	.owner = THIS_MODULE,
+	.llseek = NULL,
+};
+
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	struct dentry *_dentry;
+	if (!cfg)
+		return -EINVAL;
+	cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!cfg->debugfs || IS_ERR(cfg->debugfs)) {
+		if (cfg->debugfs == ERR_PTR(-ENODEV))
+			WL_ERR(("Debugfs is not enabled on this kernel\n"));
+		else
+			WL_ERR(("Can not create debugfs directory\n"));
+		cfg->debugfs = NULL;
+		goto exit;
+
+	}
+	_dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR,
+		cfg->debugfs, cfg, &fops_debuglevel);
+	if (!_dentry || IS_ERR(_dentry)) {
+		WL_ERR(("failed to create debug_level debug file\n"));
+		wl_free_debugfs(cfg);
+	}
+exit:
+	return err;
+}
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg)
+{
+	if (!cfg)
+		return -EINVAL;
+	if (cfg->debugfs)
+		debugfs_remove_recursive(cfg->debugfs);
+	cfg->debugfs = NULL;
+	return 0;
+}
+#endif /* DEBUGFS_CFG80211 */
+
+struct device *wl_cfg80211_get_parent_dev(void)
+{
+	return cfg80211_parent_dev;
+}
+
+void wl_cfg80211_set_parent_dev(void *dev)
+{
+	cfg80211_parent_dev = dev;
+}
+
+static void wl_cfg80211_clear_parent_dev(void)
+{
+	cfg80211_parent_dev = NULL;
+}
+
+void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	if (wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL,
+		0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync) == BCME_OK) {
+		memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+	} else {
+		memset(mac->octet, 0, ETHER_ADDR_LEN);
+	}
+}
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	if (((dev_role == NL80211_IFTYPE_AP) &&
+		!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
+		((dev_role == NL80211_IFTYPE_P2P_GO) &&
+		!(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
+	{
+		WL_ERR(("device role select failed role:%d op_mode:%d \n", dev_role, dhd->op_mode));
+		return false;
+	}
+	return true;
+}
+
+int wl_cfg80211_do_driver_init(struct net_device *net)
+{
+	struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+
+	if (!cfg || !cfg->wdev)
+		return -EINVAL;
+
+	if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
+		return -1;
+
+	return 0;
+}
+
+void wl_cfg80211_enable_trace(u32 level)
+{
+	wl_dbg_level = level;
+	printf("%s: wl_dbg_level = 0x%x\n", __FUNCTION__, wl_dbg_level);
+}
+
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32
+wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+	/* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION
+	 * is passed with CMD_FRAME. This callback is supposed to cancel
+	 * the OFFCHANNEL Wait. Since we are already taking care of that
+	 *  with the tx_mgmt logic, do nothing here.
+	 */
+
+	return 0;
+}
+#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
+
+#ifdef WL11U
+static bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+/* unfortunately it's too much work to dispose the const cast - bcm_parse_tlvs
+ * is used everywhere and changing its prototype to take const qualifier needs
+ * a massive change to all its callers...
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+	if ((ie = bcm_parse_tlvs((void *)parse, (int)len, DOT11_MNG_INTERWORKING_ID))) {
+		return ie;
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	return NULL;
+}
+
+static s32
+wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx)
+{
+	ie_setbuf_t ie_setbuf;
+
+	WL_DBG(("clear interworking IE\n"));
+
+	memset(&ie_setbuf, 0, sizeof(ie_setbuf_t));
+
+	ie_setbuf.ie_buffer.iecount = htod32(1);
+	ie_setbuf.ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+	ie_setbuf.ie_buffer.ie_list[0].ie_data.len = 0;
+
+	return wldev_iovar_setbuf_bsscfg(ndev, "ie", &ie_setbuf, sizeof(ie_setbuf),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+}
+
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+            uint8 ie_id, uint8 *data, uint8 data_len)
+{
+	s32 err = BCME_OK;
+	s32 buf_len;
+	ie_setbuf_t *ie_setbuf;
+	ie_getbuf_t ie_getbufp;
+	char getbuf[WLC_IOCTL_SMLEN];
+
+	if (ie_id != DOT11_MNG_INTERWORKING_ID) {
+		WL_ERR(("unsupported (id=%d)\n", ie_id));
+		return BCME_UNSUPPORTED;
+	}
+
+	/* access network options (1 octet)  is the mandatory field */
+	if (!data || data_len == 0 || data_len > IW_IES_MAX_BUF_LEN) {
+		WL_ERR(("wrong interworking IE (len=%d)\n", data_len));
+		return BCME_BADARG;
+	}
+
+	/* Validate the pktflag parameter */
+	if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+			VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+			VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
+			VNDR_IE_CUSTOM_FLAG))) {
+		WL_ERR(("invalid packet flag 0x%x\n", pktflag));
+		return BCME_BADARG;
+	}
+
+	buf_len = sizeof(ie_setbuf_t) + data_len - 1;
+
+	ie_getbufp.id = DOT11_MNG_INTERWORKING_ID;
+	if (wldev_iovar_getbuf_bsscfg(ndev, "ie", (void *)&ie_getbufp,
+			sizeof(ie_getbufp), getbuf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)
+			== BCME_OK) {
+		if (!memcmp(&getbuf[TLV_HDR_LEN], data, data_len)) {
+			WL_DBG(("skip to set interworking IE\n"));
+			return BCME_OK;
+		}
+	}
+
+	/* if already set with previous values, delete it first */
+	if (cfg->wl11u) {
+		if ((err = wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx)) != BCME_OK) {
+			return err;
+		}
+	}
+
+	ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+	if (!ie_setbuf) {
+		WL_ERR(("Error allocating buffer for IE\n"));
+		return -ENOMEM;
+	}
+	strncpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd));
+	ie_setbuf->cmd[sizeof(ie_setbuf->cmd) - 1] = '\0';
+
+	/* Buffer contains only 1 IE */
+	ie_setbuf->ie_buffer.iecount = htod32(1);
+	/* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
+	ie_setbuf->ie_buffer.ie_list[0].pktflag = htod32(pktflag);
+
+	/* Now, add the IE to the buffer */
+	ie_setbuf->ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+	ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
+	memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
+
+	if ((err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync))
+			== BCME_OK) {
+		WL_DBG(("set interworking IE\n"));
+		cfg->wl11u = TRUE;
+		err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+	}
+
+	kfree(ie_setbuf);
+	return err;
+}
+#endif /* WL11U */
+
+
+s32
+wl_cfg80211_set_if_band(struct net_device *ndev, int band)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	int ret = 0, wait_cnt;
+	char ioctl_buf[32];
+
+	if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) {
+		WL_ERR(("Invalid band\n"));
+		return -EINVAL;
+	}
+	if (wl_get_drv_status(cfg, CONNECTED, ndev) ||
+			wl_get_drv_status(cfg, CONNECTING, ndev)) {
+		/* if driver is connected or connecting status, try to disconnect first.
+		* if dongle is associated, iovar 'if_band' would be rejected.
+		*/
+		wl_set_drv_status(cfg, DISCONNECTING, ndev);
+		ret = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+		if (ret < 0) {
+			WL_ERR(("WLC_DISASSOC error %d\n", ret));
+			/* continue to set 'if_band' */
+		}
+		else {
+			/* This is to ensure that 'if_band' iovar is issued only after
+			* disconnection is completed
+			*/
+			wait_cnt = WAIT_FOR_DISCONNECT_MAX;
+			while (wl_get_drv_status(cfg, DISCONNECTING, ndev) && wait_cnt) {
+				WL_DBG(("Wait until disconnected. wait_cnt: %d\n", wait_cnt));
+				wait_cnt--;
+				OSL_SLEEP(10);
+			}
+		}
+	}
+	if ((ret = wldev_iovar_setbuf(ndev, "if_band", &band,
+			sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+		WL_ERR(("seting if_band failed ret=%d\n", ret));
+		/* issue 'WLC_SET_BAND' if if_band is not supported */
+		if (ret == BCME_UNSUPPORTED) {
+			ret = wldev_set_band(ndev, band);
+			if (ret < 0) {
+				WL_ERR(("seting band failed ret=%d\n", ret));
+			}
+		}
+	}
+	return ret;
+}
+
+s32
+wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, char *command, int total_len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	char ioctl_buf[50];
+	int err = 0;
+	uint32 val = 0;
+	chanspec_t chanspec = 0;
+	int abort;
+	int bytes_written = 0;
+	wl_dfs_ap_move_status_t *status;
+	char chanbuf[CHANSPEC_STR_LEN];
+	const char *dfs_state_str[DFS_SCAN_S_MAX] = {
+		"Radar Free On Channel",
+		"Radar Found On Channel",
+		"Radar Scan In Progress",
+		"Radar Scan Aborted",
+		"RSDB Mode switch in Progress For Scan"
+	};
+	if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+		bytes_written = snprintf(command, total_len, "AP is not up\n");
+		return bytes_written;
+	}
+	if (!*data) {
+		if ((err = wldev_iovar_getbuf(ndev, "dfs_ap_move", NULL, 0,
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN,  &cfg->ioctl_buf_sync))) {
+			WL_ERR(("setting dfs_ap_move failed with err=%d \n", err));
+			return err;
+		}
+		status = (wl_dfs_ap_move_status_t *)cfg->ioctl_buf;
+
+		if (status->version != WL_DFS_AP_MOVE_VERSION) {
+			err = BCME_UNSUPPORTED;
+			WL_ERR(("err=%d version=%d\n", err, status->version));
+			return err;
+		}
+
+		if (status->move_status != (int8) DFS_SCAN_S_IDLE) {
+			chanspec = wl_chspec_driver_to_host(status->chanspec);
+			if (chanspec != 0 && chanspec != INVCHANSPEC) {
+				wf_chspec_ntoa(chanspec, chanbuf);
+				bytes_written = snprintf(command, total_len,
+					"AP Target Chanspec %s (0x%x)\n", chanbuf, chanspec);
+
+			}
+			bytes_written += snprintf(command + bytes_written, total_len,
+					 "%s\n", dfs_state_str[status->move_status]);
+			return bytes_written;
+		} else {
+			bytes_written = snprintf(command, total_len, "dfs AP move in IDLE state\n");
+			return bytes_written;
+		}
+
+	}
+
+	abort = bcm_atoi(data);
+	if (abort == -1) {
+		if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &abort,
+				sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+			WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
+			return err;
+		}
+	} else {
+		chanspec = wf_chspec_aton(data);
+		if (chanspec != 0) {
+			val = wl_chspec_host_to_driver(chanspec);
+			if (val != INVCHANSPEC) {
+				if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &val,
+					sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+					WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
+					return err;
+				}
+				WL_DBG((" set dfs_ap_move successfull"));
+			} else {
+				err = BCME_USAGE_ERROR;
+			}
+		}
+	}
+	return err;
+}
+
+#ifdef WBTEXT
+s32
+wl_cfg80211_wbtext_set_default(struct net_device *ndev)
+{
+	char commandp[WLC_IOCTL_SMLEN];
+	s32 ret = BCME_OK;
+	char *data;
+
+	WL_DBG(("set wbtext to default\n"));
+
+	/* set roam profile */
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_PROFILE_CONFIG, DEFAULT_WBTEXT_PROFILE_A);
+	data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set roam_prof %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_PROFILE_CONFIG, DEFAULT_WBTEXT_PROFILE_B);
+	data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set roam_prof %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	/* set RSSI weight */
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_A);
+	data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_B);
+	data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	/* set CU weight */
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_A);
+	data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_B);
+	data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	/* set RSSI table */
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_A);
+	data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set RSSI table %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_B);
+	data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set RSSI table %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	/* set CU table */
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_A);
+	data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set CU table %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	memset(commandp, 0, sizeof(commandp));
+	snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+		CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_B);
+	data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+	ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+	if (ret != BCME_OK) {
+		WL_ERR(("%s: Failed to set CU table %s error = %d\n",
+			__FUNCTION__, data, ret));
+		return ret;
+	}
+
+	return ret;
+}
+
+s32
+wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, char *command, int total_len)
+{
+	uint i = 0;
+	long int rssi_lower, roam_trigger;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	wl_roam_prof_band_t *rp;
+	int err = -EINVAL, bytes_written = 0;
+	size_t len = strlen(data);
+	int rp_len = 0;
+	data[len] = '\0';
+	rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp)
+			* WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL);
+	if (unlikely(!rp)) {
+		WL_ERR(("%s: failed to allocate memory\n", __func__));
+		err =  -ENOMEM;
+		goto exit;
+	}
+	rp->ver = WL_MAX_ROAM_PROF_VER;
+	if (*data && (!strncmp(data, "b", 1))) {
+		rp->band = WLC_BAND_2G;
+	} else if (*data && (!strncmp(data, "a", 1))) {
+		rp->band = WLC_BAND_5G;
+	} else {
+		err = snprintf(command, total_len, "Missing band\n");
+		goto exit;
+	}
+	data++;
+	rp->len = 0;
+	/* Getting roam profile  from fw */
+	if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN,  &cfg->ioctl_buf_sync))) {
+		WL_ERR(("Getting roam_profile failed with err=%d \n", err));
+		goto exit;
+	}
+	memcpy(rp, cfg->ioctl_buf, sizeof(*rp) * WL_MAX_ROAM_PROF_BRACKETS);
+	/* roam_prof version get */
+	if (rp->ver != WL_MAX_ROAM_PROF_VER) {
+		WL_ERR(("bad version (=%d) in return data\n", rp->ver));
+		err = -EINVAL;
+		goto exit;
+	}
+	if ((rp->len % sizeof(wl_roam_prof_t)) != 0) {
+		WL_ERR(("bad length (=%d) in return data\n", rp->len));
+		err = -EINVAL;
+		goto exit;
+	}
+
+	if (!*data) {
+		for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+			/* printing contents of roam profile data from fw and exits
+			 * if code hits any of one of the below condtion. If remaining
+			 * length of buffer is less than roam profile size or
+			 * if there is no valid entry.
+			 */
+			if (((i * sizeof(wl_roam_prof_t)) > rp->len) ||
+				(rp->roam_prof[i].fullscan_period == 0)) {
+				break;
+			}
+			bytes_written += snprintf(command+bytes_written,
+					total_len, "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)\n",
+					rp->roam_prof[i].roam_trigger, rp->roam_prof[i].rssi_lower,
+					rp->roam_prof[i].channel_usage,
+					rp->roam_prof[i].cu_avg_calc_dur);
+		}
+		err = bytes_written;
+		goto exit;
+	} else {
+		for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+			/* reading contents of roam profile data from fw and exits
+			 * if code hits any of one of the below condtion, If remaining
+			 * length of buffer is less than roam profile size or if there
+			 * is no valid entry.
+			 */
+			if (((i * sizeof(wl_roam_prof_t)) > rp->len) ||
+				(rp->roam_prof[i].fullscan_period == 0)) {
+				break;
+			}
+		}
+		/* Do not set roam_prof from upper layer if fw doesn't have 2 rows */
+		if (i != 2) {
+			WL_ERR(("FW must have 2 rows to fill roam_prof\n"));
+			err = -EINVAL;
+			goto exit;
+		}
+		/* setting roam profile to fw */
+		data++;
+		for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+			roam_trigger = simple_strtol(data, &data, 10);
+			if (roam_trigger >= 0) {
+				WL_ERR(("roam trigger[%d] value must be negative\n", i));
+				err = -EINVAL;
+				goto exit;
+			}
+			rp->roam_prof[i].roam_trigger = roam_trigger;
+			data++;
+			rssi_lower = simple_strtol(data, &data, 10);
+			if (rssi_lower >= 0) {
+				WL_ERR(("rssi lower[%d] value must be negative\n", i));
+				err = -EINVAL;
+				goto exit;
+			}
+			rp->roam_prof[i].rssi_lower = rssi_lower;
+			data++;
+			rp->roam_prof[i].channel_usage = simple_strtol(data, &data, 10);
+			data++;
+			rp->roam_prof[i].cu_avg_calc_dur = simple_strtol(data, &data, 10);
+
+			rp_len += sizeof(wl_roam_prof_t);
+
+			if (*data == '\0') {
+				break;
+			}
+			data++;
+		}
+		if (i != 1) {
+			WL_ERR(("Only two roam_prof rows supported.\n"));
+			err = -EINVAL;
+			goto exit;
+		}
+		rp->len = rp_len;
+		if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+				sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN,
+				&cfg->ioctl_buf_sync)) < 0) {
+			WL_ERR(("seting roam_profile failed with err %d\n", err));
+		}
+	}
+exit:
+	if (rp) {
+		kfree(rp);
+	}
+	return err;
+}
+
+#define BUFSZ 5
+
+#define _S(x) #x
+#define S(x) _S(x)
+
+int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
+		char *command, int total_len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	int bytes_written = 0, err = -EINVAL, argc = 0;
+	char rssi[BUFSZ], band[BUFSZ], weight[BUFSZ];
+	char *endptr = NULL;
+	wnm_bss_select_weight_cfg_t *bwcfg;
+
+	bwcfg = kzalloc(sizeof(*bwcfg), GFP_KERNEL);
+	if (unlikely(!bwcfg)) {
+		WL_ERR(("%s: failed to allocate memory\n", __func__));
+		err = -ENOMEM;
+		goto exit;
+	}
+	bwcfg->version =  WNM_BSSLOAD_MONITOR_VERSION;
+	bwcfg->type = 0;
+	bwcfg->weight = 0;
+
+	argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band, weight);
+
+	if (!strcasecmp(rssi, "rssi"))
+		bwcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
+	else if (!strcasecmp(rssi, "cu"))
+		bwcfg->type = WNM_BSS_SELECT_TYPE_CU;
+	else {
+		/* Usage DRIVER WBTEXT_WEIGHT_CONFIG <rssi/cu> <band> <weight> */
+		WL_ERR(("%s: Command usage error\n", __func__));
+		goto exit;
+	}
+
+	if (!strcasecmp(band, "a"))
+		bwcfg->band = WLC_BAND_5G;
+	else if (!strcasecmp(band, "b"))
+		bwcfg->band = WLC_BAND_2G;
+	else if (!strcasecmp(band, "all"))
+		bwcfg->band = WLC_BAND_ALL;
+	else {
+		WL_ERR(("%s: Command usage error\n", __func__));
+		goto exit;
+	}
+
+	if (argc == 2) {
+		/* If there is no data after band, getting wnm_bss_select_weight from fw */
+		if (bwcfg->band == WLC_BAND_ALL) {
+			WL_ERR(("band option \"all\" is for set only, not get\n"));
+			goto exit;
+		}
+		if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_weight", bwcfg,
+				sizeof(*bwcfg),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN,  &cfg->ioctl_buf_sync))) {
+			WL_ERR(("Getting wnm_bss_select_weight failed with err=%d \n", err));
+			goto exit;
+		}
+		memcpy(bwcfg, cfg->ioctl_buf, sizeof(*bwcfg));
+		bytes_written = snprintf(command, total_len, "%s %s weight = %d\n",
+			(bwcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU",
+			(bwcfg->band == WLC_BAND_2G) ? "2G" : "5G", bwcfg->weight);
+		err = bytes_written;
+		goto exit;
+	} else {
+		/* if weight is non integer returns command usage error */
+		bwcfg->weight = simple_strtol(weight, &endptr, 0);
+		if (*endptr != '\0') {
+			WL_ERR(("%s: Command usage error", __func__));
+			goto exit;
+		}
+		/* setting weight for iovar wnm_bss_select_weight to fw */
+		if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_weight", bwcfg,
+				sizeof(*bwcfg),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN,  &cfg->ioctl_buf_sync))) {
+			WL_ERR(("Getting wnm_bss_select_weight failed with err=%d\n", err));
+		}
+	}
+exit:
+	if (bwcfg) {
+		kfree(bwcfg);
+	}
+	return err;
+}
+
+/* WBTEXT_TUPLE_MIN_LEN_CHECK :strlen(low)+" "+strlen(high)+" "+strlen(factor) */
+#define WBTEXT_TUPLE_MIN_LEN_CHECK 5
+
+int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
+	char *command, int total_len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	int bytes_written = 0, err = -EINVAL;
+	char rssi[BUFSZ], band[BUFSZ];
+	int btcfg_len = 0, i = 0, parsed_len = 0;
+	wnm_bss_select_factor_cfg_t *btcfg;
+	size_t slen = strlen(data);
+	char *start_addr = NULL;
+	data[slen] = '\0';
+
+	btcfg = kzalloc((sizeof(*btcfg) + sizeof(*btcfg) *
+			WL_FACTOR_TABLE_MAX_LIMIT), GFP_KERNEL);
+	if (unlikely(!btcfg)) {
+		WL_ERR(("%s: failed to allocate memory\n", __func__));
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	btcfg->version = WNM_BSS_SELECT_FACTOR_VERSION;
+	btcfg->band = WLC_BAND_AUTO;
+	btcfg->type = 0;
+	btcfg->count = 0;
+
+	sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band);
+
+	if (!strcasecmp(rssi, "rssi")) {
+		btcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
+	}
+	else if (!strcasecmp(rssi, "cu")) {
+		btcfg->type = WNM_BSS_SELECT_TYPE_CU;
+	}
+	else {
+		WL_ERR(("%s: Command usage error\n", __func__));
+		goto exit;
+	}
+
+	if (!strcasecmp(band, "a")) {
+		btcfg->band = WLC_BAND_5G;
+	}
+	else if (!strcasecmp(band, "b")) {
+		btcfg->band = WLC_BAND_2G;
+	}
+	else if (!strcasecmp(band, "all")) {
+		btcfg->band = WLC_BAND_ALL;
+	}
+	else {
+		WL_ERR(("%s: Command usage, Wrong band\n", __func__));
+		goto exit;
+	}
+
+	if ((slen - 1) == (strlen(rssi) + strlen(band))) {
+		/* Getting factor table using iovar 'wnm_bss_select_table' from fw */
+		if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_table", btcfg,
+				sizeof(*btcfg),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN,  &cfg->ioctl_buf_sync))) {
+			WL_ERR(("Getting wnm_bss_select_table failed with err=%d \n", err));
+			goto exit;
+		}
+		memcpy(btcfg, cfg->ioctl_buf, sizeof(*btcfg));
+		memcpy(btcfg, cfg->ioctl_buf, (btcfg->count+1) * sizeof(*btcfg));
+
+		bytes_written += snprintf(command + bytes_written, total_len,
+					"No of entries in table: %d\n", btcfg->count);
+		bytes_written += snprintf(command + bytes_written, total_len, "%s factor table\n",
+				(btcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU");
+		bytes_written += snprintf(command + bytes_written, total_len,
+					"low\thigh\tfactor\n");
+		for (i = 0; i <= btcfg->count-1; i++) {
+			bytes_written += snprintf(command + bytes_written, total_len,
+				"%d\t%d\t%d\n", btcfg->params[i].low, btcfg->params[i].high,
+				btcfg->params[i].factor);
+		}
+		err = bytes_written;
+		goto exit;
+	} else {
+		memset(btcfg->params, 0, sizeof(wnm_bss_select_factor_params_t)
+				* WL_FACTOR_TABLE_MAX_LIMIT);
+		data += (strlen(rssi) + strlen(band) + 2);
+		start_addr = data;
+		slen = slen - (strlen(rssi) + strlen(band) + 2);
+		for (i = 0; i < WL_FACTOR_TABLE_MAX_LIMIT; i++) {
+			if (parsed_len + WBTEXT_TUPLE_MIN_LEN_CHECK <= slen) {
+				btcfg->params[i].low = simple_strtol(data, &data, 10);
+				data++;
+				btcfg->params[i].high = simple_strtol(data, &data, 10);
+				data++;
+				btcfg->params[i].factor = simple_strtol(data, &data, 10);
+				btcfg->count++;
+				if (*data == '\0') {
+					break;
+				}
+				data++;
+				parsed_len = data - start_addr;
+			} else {
+				WL_ERR(("%s:Command usage:less no of args\n", __func__));
+				goto exit;
+			}
+		}
+		btcfg_len = sizeof(*btcfg) + ((btcfg->count) * sizeof(*btcfg));
+		if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_table", btcfg, btcfg_len,
+				cfg->ioctl_buf, WLC_IOCTL_MEDLEN, &cfg->ioctl_buf_sync)) < 0) {
+			WL_ERR(("seting wnm_bss_select_table failed with err %d\n", err));
+			goto exit;
+		}
+	}
+exit:
+	if (btcfg) {
+		kfree(btcfg);
+	}
+	return err;
+}
+
+s32
+wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, char *command, int total_len)
+{
+	uint i = 0;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+	int err = -EINVAL, bytes_written = 0, argc = 0, val, len = 0;
+	char delta[BUFSZ], band[BUFSZ], *endptr = NULL;
+	wl_roam_prof_band_t *rp;
+
+	rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp)
+			* WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL);
+	if (unlikely(!rp)) {
+		WL_ERR(("%s: failed to allocate memory\n", __func__));
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", band, delta);
+	if (!strcasecmp(band, "a"))
+		rp->band = WLC_BAND_5G;
+	else if (!strcasecmp(band, "b"))
+		rp->band = WLC_BAND_2G;
+	else {
+		WL_ERR(("%s: Missing band\n", __func__));
+		goto exit;
+	}
+	/* Getting roam profile  from fw */
+	if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN,  &cfg->ioctl_buf_sync))) {
+		WL_ERR(("Getting roam_profile failed with err=%d \n", err));
+		goto exit;
+	}
+	memcpy(rp, cfg->ioctl_buf, sizeof(wl_roam_prof_band_t));
+	if (rp->ver != WL_MAX_ROAM_PROF_VER) {
+		WL_ERR(("bad version (=%d) in return data\n", rp->ver));
+		err = -EINVAL;
+		goto exit;
+	}
+	if ((rp->len % sizeof(wl_roam_prof_t)) != 0) {
+		WL_ERR(("bad length (=%d) in return data\n", rp->len));
+		err = -EINVAL;
+		goto exit;
+	}
+
+	if (argc == 2) {
+		/* if delta is non integer returns command usage error */
+		val = simple_strtol(delta, &endptr, 0);
+		if (*endptr != '\0') {
+			WL_ERR(("%s: Command usage error", __func__));
+			goto exit;
+		}
+		for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+		/*
+		 * Checking contents of roam profile data from fw and exits
+		 * if code hits below condtion. If remaining length of buffer is
+		 * less than roam profile size or if there is no valid entry.
+		 */
+			if (((i * sizeof(wl_roam_prof_t)) > rp->len) ||
+				(rp->roam_prof[i].fullscan_period == 0)) {
+				break;
+			}
+			if (rp->roam_prof[i].channel_usage != 0) {
+				rp->roam_prof[i].roam_delta = val;
+			}
+			len += sizeof(wl_roam_prof_t);
+		}
+	}
+	else {
+		if (rp->roam_prof[i].channel_usage != 0) {
+			bytes_written = snprintf(command, total_len,
+				"%s Delta %d\n", (rp->band == WLC_BAND_2G) ? "2G" : "5G",
+				rp->roam_prof[0].roam_delta);
+		}
+		err = bytes_written;
+		goto exit;
+	}
+	rp->len = len;
+	if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+			sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("seting roam_profile failed with err %d\n", err));
+	}
+exit :
+	if (rp) {
+		kfree(rp);
+	}
+	return err;
+}
+#endif /* WBTEXT */
+
+
+int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev)
+{
+	struct net_device *ndev = NULL;
+	unsigned long flags;
+	int clear_flag = 0;
+	int ret = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+	struct cfg80211_scan_info info;
+#endif
+
+	WL_TRACE(("Enter\n"));
+
+	if (!cfg)
+		return -EINVAL;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_CFG80211_P2P_DEV_IF
+	if (cfg->scan_request && cfg->scan_request->wdev == cfgdev)
+#else
+	if (cfg->scan_request && cfg->scan_request->dev == cfgdev)
+#endif
+	{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+		info.aborted = true;
+		cfg80211_scan_done(cfg->scan_request, &info);
+#else
+		cfg80211_scan_done(cfg->scan_request, true);
+#endif
+		cfg->scan_request = NULL;
+		clear_flag = 1;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	if (clear_flag)
+		wl_clr_drv_status(cfg, SCANNING, ndev);
+
+	return ret;
+}
+
+bool wl_cfg80211_is_concurrent_mode(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	if ((cfg) && (wl_get_drv_status_all(cfg, CONNECTED) > 1)) {
+		return true;
+	} else {
+		return false;
+	}
+}
+
+void* wl_cfg80211_get_dhdp(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	return cfg->pub;
+}
+
+bool wl_cfg80211_is_p2p_active(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	return (cfg && cfg->p2p);
+}
+
+bool wl_cfg80211_is_roam_offload(struct net_device * dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	return (cfg && cfg->roam_offload);
+}
+
+bool wl_cfg80211_is_event_from_connected_bssid(struct net_device * dev, const wl_event_msg_t *e,
+		int ifidx)
+{
+	u8 *curbssid = NULL;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	if (!cfg) {
+		/* When interface is created using wl
+		 * ndev->ieee80211_ptr will be NULL.
+		 */
+		return NULL;
+	}
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+
+	if (!curbssid) {
+		return NULL;
+	}
+
+	if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0) {
+		return true;
+	}
+	return false;
+}
+
+static void wl_cfg80211_work_handler(struct work_struct * work)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_info *iter, *next;
+	s32 err = BCME_OK;
+	s32 pm = PM_FAST;
+	dhd_pub_t *dhd;
+	BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work);
+	WL_DBG(("Enter \n"));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+	for_each_ndev(cfg, iter, next) {
+		/* p2p discovery iface ndev could be null */
+		if (iter->ndev) {
+			if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
+				(wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS &&
+				wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_IBSS))
+				continue;
+			if (iter->ndev) {
+				dhd = (dhd_pub_t *)(cfg->pub);
+				if (dhd_conf_get_pm(dhd) >= 0)
+					pm = dhd_conf_get_pm(dhd);
+				if ((err = wldev_ioctl_set(iter->ndev, WLC_SET_PM,
+						&pm, sizeof(pm))) != 0) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n",
+							iter->ndev->name));
+					else
+						WL_ERR(("%s:error (%d)\n",
+							iter->ndev->name, err));
+				} else
+					wl_cfg80211_update_power_mode(iter->ndev);
+			}
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	DHD_PM_WAKE_UNLOCK(cfg->pub);
+}
+
+u8
+wl_get_action_category(void *frame, u32 frame_len)
+{
+	u8 category;
+	u8 *ptr = (u8 *)frame;
+	if (frame == NULL)
+		return DOT11_ACTION_CAT_ERR_MASK;
+	if (frame_len < DOT11_ACTION_HDR_LEN)
+		return DOT11_ACTION_CAT_ERR_MASK;
+	category = ptr[DOT11_ACTION_CAT_OFF];
+	WL_INFORM(("Action Category: %d\n", category));
+	return category;
+}
+
+int
+wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action)
+{
+	u8 *ptr = (u8 *)frame;
+	if (frame == NULL || ret_action == NULL)
+		return BCME_ERROR;
+	if (frame_len < DOT11_ACTION_HDR_LEN)
+		return BCME_ERROR;
+	if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
+		return BCME_ERROR;
+	*ret_action = ptr[DOT11_ACTION_ACT_OFF];
+	WL_INFORM(("Public Action : %d\n", *ret_action));
+	return BCME_OK;
+}
+
+
+static int
+wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const struct ether_addr *bssid)
+{
+	s32 err;
+	wl_event_msg_t e;
+
+	bzero(&e, sizeof(e));
+	e.event_type = cpu_to_be32(WLC_E_ROAM);
+	memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
+	/* trigger the roam event handler */
+	err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
+
+	return err;
+}
+
+static s32
+wl_cfg80211_parse_vndr_ies(u8 *parse, u32 len,
+    struct parsed_vndr_ies *vndr_ies)
+{
+	s32 err = BCME_OK;
+	vndr_ie_t *vndrie;
+	bcm_tlv_t *ie;
+	struct parsed_vndr_ie_info *parsed_info;
+	u32 count = 0;
+	s32 remained_len;
+
+	remained_len = (s32)len;
+	memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+	WL_DBG(("---> len %d\n", len));
+	ie = (bcm_tlv_t *) parse;
+	if (!bcm_valid_tlv(ie, remained_len))
+		ie = NULL;
+	while (ie) {
+		if (count >= MAX_VNDR_IE_NUMBER)
+			break;
+		if (ie->id == DOT11_MNG_VS_ID) {
+			vndrie = (vndr_ie_t *) ie;
+			/* len should be bigger than OUI length + one data length at least */
+			if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+				WL_ERR(("%s: invalid vndr ie. length is too small %d\n",
+					__FUNCTION__, vndrie->len));
+				goto end;
+			}
+			/* if wpa or wme ie, do not add ie */
+			if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
+				((vndrie->data[0] == WPA_OUI_TYPE) ||
+				(vndrie->data[0] == WME_OUI_TYPE))) {
+				WL_DBG(("Found WPA/WME oui. Do not add it\n"));
+				goto end;
+			}
+
+			parsed_info = &vndr_ies->ie_info[count++];
+
+			/* save vndr ie information */
+			parsed_info->ie_ptr = (char *)vndrie;
+			parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
+			memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
+			vndr_ies->count = count;
+
+			WL_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x len:%d\n",
+			parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1],
+			parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0],
+			parsed_info->ie_len));
+		}
+end:
+		ie = bcm_next_tlv(ie, &remained_len);
+	}
+	return err;
+}
+
+#ifdef WLADPS_SEAK_AP_WAR
+static bool
+wl_find_vndr_ies_specific_vender(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, const u8 *vndr_oui)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	struct parsed_vndr_ie_info *vndr_info_list;
+	struct parsed_vndr_ies vndr_ies;
+	bool ret = FALSE;
+	int i;
+
+	if (conn_info->resp_ie_len) {
+		if ((wl_cfg80211_parse_vndr_ies((u8 *)conn_info->resp_ie,
+				conn_info->resp_ie_len, &vndr_ies)) == BCME_OK) {
+			for (i = 0; i < vndr_ies.count; i++) {
+				vndr_info_list = &vndr_ies.ie_info[i];
+				if (!bcmp(vndr_info_list->vndrie.oui,
+						(u8*)vndr_oui, DOT11_OUI_LEN)) {
+					WL_ERR(("Find OUI %02x %02x %02x\n",
+						vndr_info_list->vndrie.oui[0],
+						vndr_info_list->vndrie.oui[1],
+						vndr_info_list->vndrie.oui[2]));
+					ret = TRUE;
+					break;
+				}
+			}
+		}
+	}
+	return ret;
+}
+
+static s32
+wl_set_adps_mode(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint8 enable_mode)
+{
+	int i;
+	int len;
+	int ret = BCME_OK;
+
+	bcm_iov_buf_t *iov_buf = NULL;
+	wl_adps_params_v1_t *data = NULL;
+
+	len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
+	iov_buf = kmalloc(len, GFP_KERNEL);
+	if (iov_buf == NULL) {
+		WL_ERR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
+		ret = BCME_NOMEM;
+		goto exit;
+	}
+
+	iov_buf->version = WL_ADPS_IOV_VER;
+	iov_buf->len = sizeof(*data);
+	iov_buf->id = WL_ADPS_IOV_MODE;
+
+	data = (wl_adps_params_v1_t *)iov_buf->data;
+	data->version = ADPS_SUB_IOV_VERSION_1;
+	data->length = sizeof(*data);
+	data->mode = enable_mode;
+
+	for (i = 1; i <= MAX_BANDS; i++) {
+		data->band = i;
+		ret = wldev_iovar_setbuf(ndev, "adps", iov_buf, len,
+				cfg->ioctl_buf, WLC_IOCTL_SMLEN, NULL);
+	}
+
+exit:
+	if (iov_buf) {
+		kfree(iov_buf);
+	}
+	return ret;
+
+}
+#endif /* WLADPS_SEAK_AP_WAR */
+
+static bool
+wl_vndr_ies_exclude_vndr_oui(struct parsed_vndr_ie_info *vndr_info)
+{
+	int i = 0;
+
+	while (exclude_vndr_oui_list[i]) {
+		if (!memcmp(vndr_info->vndrie.oui,
+			exclude_vndr_oui_list[i],
+			DOT11_OUI_LEN)) {
+			return TRUE;
+		}
+		i++;
+	}
+
+	return FALSE;
+}
+
+static bool
+wl_vndr_ies_check_duplicate_vndr_oui(struct bcm_cfg80211 *cfg,
+	struct parsed_vndr_ie_info *vndr_info)
+{
+	wl_vndr_oui_entry_t *oui_entry = NULL;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+		if (!memcmp(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN)) {
+			return TRUE;
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	return FALSE;
+}
+
+static bool
+wl_vndr_ies_add_vendor_oui_list(struct bcm_cfg80211 *cfg,
+	struct parsed_vndr_ie_info *vndr_info)
+{
+	wl_vndr_oui_entry_t *oui_entry = NULL;
+
+	oui_entry = kmalloc(sizeof(*oui_entry), GFP_KERNEL);
+	if (oui_entry == NULL) {
+		WL_ERR(("alloc failed\n"));
+		return FALSE;
+	}
+
+	memcpy(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN);
+
+	INIT_LIST_HEAD(&oui_entry->list);
+	list_add_tail(&oui_entry->list, &cfg->vndr_oui_list);
+
+	return TRUE;
+}
+
+static void
+wl_vndr_ies_clear_vendor_oui_list(struct bcm_cfg80211 *cfg)
+{
+	wl_vndr_oui_entry_t *oui_entry = NULL;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	while (!list_empty(&cfg->vndr_oui_list)) {
+		oui_entry = list_entry(cfg->vndr_oui_list.next, wl_vndr_oui_entry_t, list);
+		if (oui_entry) {
+			list_del(&oui_entry->list);
+			kfree(oui_entry);
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+static int
+wl_vndr_ies_get_vendor_oui(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	char *vndr_oui, u32 vndr_oui_len)
+{
+	int i;
+	int vndr_oui_num = 0;
+
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	wl_vndr_oui_entry_t *oui_entry = NULL;
+	struct parsed_vndr_ie_info *vndr_info;
+	struct parsed_vndr_ies vndr_ies;
+
+	char *pos = vndr_oui;
+	u32 remained_buf_len = vndr_oui_len;
+
+	if (!conn_info->resp_ie_len) {
+		return BCME_ERROR;
+	}
+
+	wl_vndr_ies_clear_vendor_oui_list(cfg);
+
+	if ((wl_cfg80211_parse_vndr_ies((u8 *)conn_info->resp_ie,
+		conn_info->resp_ie_len, &vndr_ies)) == BCME_OK) {
+		for (i = 0; i < vndr_ies.count; i++) {
+			vndr_info = &vndr_ies.ie_info[i];
+			if (wl_vndr_ies_exclude_vndr_oui(vndr_info)) {
+				continue;
+			}
+
+			if (wl_vndr_ies_check_duplicate_vndr_oui(cfg, vndr_info)) {
+				continue;
+			}
+
+			wl_vndr_ies_add_vendor_oui_list(cfg, vndr_info);
+			vndr_oui_num++;
+		}
+	}
+
+	if (vndr_oui) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+			if (remained_buf_len < VNDR_OUI_STR_LEN) {
+				return BCME_ERROR;
+			}
+			pos += snprintf(pos, VNDR_OUI_STR_LEN, "%02X-%02X-%02X ",
+				oui_entry->oui[0], oui_entry->oui[1], oui_entry->oui[2]);
+			remained_buf_len -= VNDR_OUI_STR_LEN;
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	}
+
+	return vndr_oui_num;
+}
+
+int
+wl_cfg80211_get_vndr_ouilist(struct bcm_cfg80211 *cfg, uint8 *buf, int max_cnt)
+{
+	wl_vndr_oui_entry_t *oui_entry = NULL;
+	int cnt = 0;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+		memcpy(buf, oui_entry->oui, DOT11_OUI_LEN);
+		cnt++;
+		if (cnt >= max_cnt) {
+			return cnt;
+		}
+		buf += DOT11_OUI_LEN;
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	return cnt;
+}
+
+s32
+wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+	s32 index;
+	struct net_info *netinfo;
+	s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG,
+		VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
+
+	netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+	if (!netinfo || !netinfo->wdev) {
+		WL_ERR(("netinfo or netinfo->wdev is NULL\n"));
+		return -1;
+	}
+
+	WL_DBG(("clear management vendor IEs for bssidx:%d \n", bssidx));
+	/* Clear the IEs set in the firmware so that host is in sync with firmware */
+	for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
+		if (wl_cfg80211_set_mgmt_vndr_ies(cfg, wdev_to_cfgdev(netinfo->wdev),
+			bssidx, vndrie_flag[index], NULL, 0) < 0)
+			WL_ERR(("vndr_ies clear failed. Ignoring.. \n"));
+	}
+
+	return 0;
+}
+
+s32
+wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *iter, *next;
+
+	WL_DBG(("clear management vendor IEs \n"));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+	for_each_ndev(cfg, iter, next) {
+		wl_cfg80211_clear_per_bss_ies(cfg, iter->bssidx);
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+	return 0;
+}
+
+#define WL_VNDR_IE_MAXLEN 2048
+static s8 g_mgmt_ie_buf[WL_VNDR_IE_MAXLEN];
+int
+wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
+{
+	struct net_device *ndev = NULL;
+	s32 ret = BCME_OK;
+	u8  *curr_ie_buf = NULL;
+	u8  *mgmt_ie_buf = NULL;
+	u32 mgmt_ie_buf_len = 0;
+	u32 *mgmt_ie_len = 0;
+	u32 del_add_ie_buf_len = 0;
+	u32 total_ie_buf_len = 0;
+	u32 parsed_ie_buf_len = 0;
+	struct parsed_vndr_ies old_vndr_ies;
+	struct parsed_vndr_ies new_vndr_ies;
+	s32 i;
+	u8 *ptr;
+	s32 remained_buf_len;
+	wl_bss_vndr_ies_t *ies = NULL;
+	struct net_info *netinfo;
+
+	WL_DBG(("Enter. pktflag:0x%x bssidx:%x vnd_ie_len:%d \n",
+		pktflag, bssidx, vndr_ie_len));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (bssidx > WL_MAX_IFS) {
+		WL_ERR(("bssidx > supported concurrent Ifaces \n"));
+		return -EINVAL;
+	}
+
+	netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+	if (!netinfo) {
+		WL_ERR(("net_info ptr is NULL \n"));
+		return -EINVAL;
+	}
+
+	/* Clear the global buffer */
+	memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
+	curr_ie_buf = g_mgmt_ie_buf;
+	ies = &netinfo->bss.ies;
+
+	switch (pktflag) {
+		case VNDR_IE_PRBRSP_FLAG :
+			mgmt_ie_buf = ies->probe_res_ie;
+			mgmt_ie_len = &ies->probe_res_ie_len;
+			mgmt_ie_buf_len = sizeof(ies->probe_res_ie);
+			break;
+		case VNDR_IE_ASSOCRSP_FLAG :
+			mgmt_ie_buf = ies->assoc_res_ie;
+			mgmt_ie_len = &ies->assoc_res_ie_len;
+			mgmt_ie_buf_len = sizeof(ies->assoc_res_ie);
+			break;
+		case VNDR_IE_BEACON_FLAG :
+			mgmt_ie_buf = ies->beacon_ie;
+			mgmt_ie_len = &ies->beacon_ie_len;
+			mgmt_ie_buf_len = sizeof(ies->beacon_ie);
+			break;
+		case VNDR_IE_PRBREQ_FLAG :
+			mgmt_ie_buf = ies->probe_req_ie;
+			mgmt_ie_len = &ies->probe_req_ie_len;
+			mgmt_ie_buf_len = sizeof(ies->probe_req_ie);
+			break;
+		case VNDR_IE_ASSOCREQ_FLAG :
+			mgmt_ie_buf = ies->assoc_req_ie;
+			mgmt_ie_len = &ies->assoc_req_ie_len;
+			mgmt_ie_buf_len = sizeof(ies->assoc_req_ie);
+			break;
+		default:
+			mgmt_ie_buf = NULL;
+			mgmt_ie_len = NULL;
+			WL_ERR(("not suitable packet type (%d)\n", pktflag));
+			return BCME_ERROR;
+	}
+
+	if (vndr_ie_len > mgmt_ie_buf_len) {
+		WL_ERR(("extra IE size too big\n"));
+		ret = -ENOMEM;
+	} else {
+		/* parse and save new vndr_ie in curr_ie_buff before comparing it */
+		if (vndr_ie && vndr_ie_len && curr_ie_buf) {
+			ptr = curr_ie_buf;
+/* must discard vndr_ie constness, attempt to change vndr_ie arg to non-const
+ * causes cascade of errors in other places, fix involves const casts there
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+			if ((ret = wl_cfg80211_parse_vndr_ies((u8 *)vndr_ie,
+				vndr_ie_len, &new_vndr_ies)) < 0) {
+				WL_ERR(("parse vndr ie failed \n"));
+				goto exit;
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+			for (i = 0; i < new_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&new_vndr_ies.ie_info[i];
+
+				if ((parsed_ie_buf_len + vndrie_info->ie_len) > WL_VNDR_IE_MAXLEN) {
+					WL_ERR(("IE size is too big (%d > %d)\n",
+						parsed_ie_buf_len, WL_VNDR_IE_MAXLEN));
+					ret = -EINVAL;
+					goto exit;
+				}
+
+				memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+					vndrie_info->ie_len);
+				parsed_ie_buf_len += vndrie_info->ie_len;
+			}
+		}
+
+		if (mgmt_ie_buf != NULL) {
+			if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+				(memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
+				WL_INFORM(("Previous mgmt IE is equals to current IE"));
+				goto exit;
+			}
+
+			/* parse old vndr_ie */
+			if ((ret = wl_cfg80211_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
+				&old_vndr_ies)) < 0) {
+				WL_ERR(("parse vndr ie failed \n"));
+				goto exit;
+			}
+			/* make a command to delete old ie */
+			for (i = 0; i < old_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+				&old_vndr_ies.ie_info[i];
+
+				WL_INFORM(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+					vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+					vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+					vndrie_info->vndrie.oui[2]));
+
+				del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+					pktflag, vndrie_info->vndrie.oui,
+					vndrie_info->vndrie.id,
+					vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+					vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+					"del");
+
+				curr_ie_buf += del_add_ie_buf_len;
+				total_ie_buf_len += del_add_ie_buf_len;
+			}
+		}
+
+		*mgmt_ie_len = 0;
+		/* Add if there is any extra IE */
+		if (mgmt_ie_buf && parsed_ie_buf_len) {
+			ptr = mgmt_ie_buf;
+
+			remained_buf_len = mgmt_ie_buf_len;
+
+			/* make a command to add new ie */
+			for (i = 0; i < new_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&new_vndr_ies.ie_info[i];
+
+				WL_INFORM(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n",
+					vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+					vndrie_info->ie_len - 2,
+					vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+					vndrie_info->vndrie.oui[2]));
+
+				del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+					pktflag, vndrie_info->vndrie.oui,
+					vndrie_info->vndrie.id,
+					vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+					vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+					"add");
+
+				/* verify remained buf size before copy data */
+				if (remained_buf_len >= vndrie_info->ie_len) {
+					remained_buf_len -= vndrie_info->ie_len;
+				} else {
+					WL_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
+					"found vndr ies # = %d(cur %d), remained len %d, "
+					"cur mgmt_ie_len %d, new ie len = %d\n",
+					pktflag, new_vndr_ies.count, i, remained_buf_len,
+					*mgmt_ie_len, vndrie_info->ie_len));
+					break;
+				}
+
+				/* save the parsed IE in cfg struct */
+				memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+					vndrie_info->ie_len);
+				*mgmt_ie_len += vndrie_info->ie_len;
+				curr_ie_buf += del_add_ie_buf_len;
+				total_ie_buf_len += del_add_ie_buf_len;
+			}
+		}
+
+		if (total_ie_buf_len && cfg->ioctl_buf != NULL) {
+			ret  = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
+				total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+				bssidx, &cfg->ioctl_buf_sync);
+			if (ret)
+				WL_ERR(("vndr ie set error : %d\n", ret));
+		}
+	}
+exit:
+
+return ret;
+}
+
+#ifdef WL_CFG80211_ACL
+static int
+wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+	const struct cfg80211_acl_data *acl)
+{
+	int i;
+	int ret = 0;
+	int macnum = 0;
+	int macmode = MACLIST_MODE_DISABLED;
+	struct maclist *list;
+
+	/* get the MAC filter mode */
+	if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
+		macmode = MACLIST_MODE_ALLOW;
+	} else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
+	acl->n_acl_entries) {
+		macmode = MACLIST_MODE_DENY;
+	}
+
+	/* if acl == NULL, macmode is still disabled.. */
+	if (macmode == MACLIST_MODE_DISABLED) {
+		if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
+			WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+		return ret;
+	}
+
+	macnum = acl->n_acl_entries;
+	if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+		WL_ERR(("%s : invalid number of MAC address entries %d\n",
+			__FUNCTION__, macnum));
+		return -1;
+	}
+
+	/* allocate memory for the MAC list */
+	list = (struct maclist*)kmalloc(sizeof(int) +
+		sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+	if (!list) {
+		WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
+		return -1;
+	}
+
+	/* prepare the MAC list */
+	list->count = htod32(macnum);
+	for (i = 0; i < macnum; i++) {
+		memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
+	}
+	/* set the list */
+	if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
+		WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+	kfree(list);
+
+	return ret;
+}
+#endif /* WL_CFG80211_ACL */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+int wl_chspec_chandef(chanspec_t chanspec,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+	struct cfg80211_chan_def *chandef,
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+	struct chan_info *chaninfo,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) */
+struct wiphy *wiphy)
+
+{
+	uint16 freq = 0;
+	int chan_type = 0;
+	int channel = 0;
+	struct ieee80211_channel *chan;
+
+	if (!chandef) {
+		return -1;
+	}
+	channel = CHSPEC_CHANNEL(chanspec);
+
+	switch (CHSPEC_BW(chanspec)) {
+		case WL_CHANSPEC_BW_20:
+			chan_type = NL80211_CHAN_HT20;
+			break;
+		case WL_CHANSPEC_BW_40:
+		{
+			if (CHSPEC_SB_UPPER(chanspec)) {
+				channel += CH_10MHZ_APART;
+			} else {
+				channel -= CH_10MHZ_APART;
+			}
+		}
+			chan_type = NL80211_CHAN_HT40PLUS;
+			break;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+		case WL_CHANSPEC_BW_80:
+		case WL_CHANSPEC_BW_8080:
+		{
+			uint16 sb = CHSPEC_CTL_SB(chanspec);
+
+			if (sb == WL_CHANSPEC_CTL_SB_LL) {
+				channel -= (CH_10MHZ_APART + CH_20MHZ_APART);
+			} else if (sb == WL_CHANSPEC_CTL_SB_LU) {
+				channel -= CH_10MHZ_APART;
+			} else if (sb == WL_CHANSPEC_CTL_SB_UL) {
+				channel += CH_10MHZ_APART;
+			} else {
+				/* WL_CHANSPEC_CTL_SB_UU */
+				channel += (CH_10MHZ_APART + CH_20MHZ_APART);
+			}
+
+			if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_LU)
+				chan_type = NL80211_CHAN_HT40MINUS;
+			else if (sb == WL_CHANSPEC_CTL_SB_UL || sb == WL_CHANSPEC_CTL_SB_UU)
+				chan_type = NL80211_CHAN_HT40PLUS;
+		}
+			break;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+		default:
+			chan_type = NL80211_CHAN_HT20;
+			break;
+
+	}
+
+	if (CHSPEC_IS5G(chanspec))
+		freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ);
+	else
+		freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
+
+	chan = ieee80211_get_channel(wiphy, freq);
+	WL_DBG(("channel:%d freq:%d chan_type: %d chan_ptr:%p \n",
+		channel, freq, chan_type, chan));
+
+	if (unlikely(!chan)) {
+		/* fw and cfg80211 channel lists are not in sync */
+		WL_ERR(("Couldn't find matching channel in wiphy channel list \n"));
+		ASSERT(0);
+		return -EINVAL;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+	cfg80211_chandef_create(chandef, chan, chan_type);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
+	\
+	\
+	\
+	0)))
+	chaninfo->freq = freq;
+	chaninfo->chan_type = chan_type;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+	return 0;
+}
+
+void
+wl_cfg80211_ch_switch_notify(struct net_device *dev, uint16 chanspec, struct wiphy *wiphy)
+{
+	u32 freq;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+	struct cfg80211_chan_def chandef;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
+	\
+	\
+	\
+	0)))
+	struct chan_info chaninfo;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+
+	if (!wiphy) {
+		printf("wiphy is null\n");
+		return;
+	}
+#ifndef ALLOW_CHSW_EVT
+	/* Channel switch support is only for AP/GO/ADHOC/MESH */
+	if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION ||
+		dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+		WL_ERR(("No channel switch notify support for STA/GC\n"));
+		return;
+	}
+#endif /* !ALLOW_CHSW_EVT */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+	if (wl_chspec_chandef(chanspec, &chandef, wiphy))
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+	if (wl_chspec_chandef(chanspec, &chaninfo, wiphy))
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+	{
+		WL_ERR(("chspec_chandef failed\n"));
+		return;
+	}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+	freq = chandef.chan ? chandef.chan->center_freq : chandef.center_freq1;
+	cfg80211_ch_switch_notify(dev, &chandef);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+	freq = chan_info.freq;
+	cfg80211_ch_switch_notify(dev, freq, chan_info.chan_type);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+
+	WL_ERR(("Channel switch notification for freq: %d chanspec: 0x%x\n", freq, chanspec));
+	return;
+}
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+
+#ifdef WL11ULB
+s32
+wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode)
+{
+	int ret;
+	int cur_mode;
+
+	ret = wldev_iovar_getint(dev, "ulb_mode", &cur_mode);
+	if (unlikely(ret)) {
+		WL_ERR(("[ULB] ulb_mode get failed. ret:%d \n", ret));
+		return ret;
+	}
+
+	if (cur_mode == mode) {
+		/* If request mode is same as that of the current mode, then
+		 * do nothing (Avoid unnecessary wl down and up).
+		 */
+		WL_INFORM(("[ULB] No change in ulb_mode. Do nothing.\n"));
+		return 0;
+	}
+
+	/* setting of ulb_mode requires wl to be down */
+	ret = wldev_ioctl_set(dev, WLC_DOWN, NULL, 0);
+	if (unlikely(ret)) {
+		WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret));
+		return ret;
+	}
+
+	if (mode >= MAX_SUPP_ULB_MODES) {
+		WL_ERR(("[ULB] unsupported ulb_mode :[%d]\n", mode));
+		return -EINVAL;
+	}
+
+	ret = wldev_iovar_setint(dev, "ulb_mode", mode);
+	if (unlikely(ret)) {
+		WL_ERR(("[ULB] ulb_mode set failed. ret:%d \n", ret));
+		return ret;
+	}
+
+	ret = wldev_ioctl_set(dev, WLC_UP, NULL, 0);
+	if (unlikely(ret)) {
+		WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret));
+		return ret;
+	}
+
+	WL_DBG(("[ULB] ulb_mode set to %d successfully \n", mode));
+
+	return ret;
+}
+
+static s32
+wl_cfg80211_ulbbw_to_ulbchspec(u32 bw)
+{
+	if (bw == ULB_BW_DISABLED) {
+		return WL_CHANSPEC_BW_20;
+	} else if (bw == ULB_BW_10MHZ) {
+		return WL_CHANSPEC_BW_10;
+	} else if (bw == ULB_BW_5MHZ) {
+		return WL_CHANSPEC_BW_5;
+	} else if (bw == ULB_BW_2P5MHZ) {
+		return WL_CHANSPEC_BW_2P5;
+	} else {
+		WL_ERR(("[ULB] unsupported value for ulb_bw \n"));
+		return -EINVAL;
+	}
+}
+
+static chanspec_t
+wl_cfg80211_ulb_get_min_bw_chspec(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev, s32 bssidx)
+{
+	struct net_info *_netinfo;
+
+	/*
+	 *  Return the chspec value corresponding to the
+	 *  BW setting for a particular interface
+	 */
+	if (wdev) {
+		/* if wdev is provided, use it */
+		_netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+	} else if (bssidx >= 0) {
+		/* if wdev is not provided, use it */
+		_netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+	} else {
+		WL_ERR(("[ULB] wdev/bssidx not provided\n"));
+		return INVCHANSPEC;
+	}
+
+	if (unlikely(!_netinfo)) {
+		WL_ERR(("[ULB] net_info is null \n"));
+		return INVCHANSPEC;
+	}
+
+	if (_netinfo->ulb_bw) {
+		WL_DBG(("[ULB] wdev_ptr:%p ulb_bw:0x%x \n", _netinfo->wdev, _netinfo->ulb_bw));
+		return wl_cfg80211_ulbbw_to_ulbchspec(_netinfo->ulb_bw);
+	} else {
+		return WL_CHANSPEC_BW_20;
+	}
+}
+
+static s32
+wl_cfg80211_get_ulb_bw(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+	struct net_info *_netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+
+	/*
+	 *  Return the ulb_bw setting for a
+	 *  particular interface
+	 */
+	if (unlikely(!_netinfo)) {
+		WL_ERR(("[ULB] net_info is null \n"));
+		return -1;
+	}
+
+	return _netinfo->ulb_bw;
+}
+
+s32
+wl_cfg80211_set_ulb_bw(struct net_device *dev,
+	u32 ulb_bw,  char *ifname)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	int ret;
+	int mode;
+	struct net_info *_netinfo = NULL, *iter, *next;
+	u32 bssidx;
+
+	if (!ifname)
+		return -EINVAL;
+
+	WL_DBG(("[ULB] Enter. bw_type:%d \n", ulb_bw));
+
+	ret = wldev_iovar_getint(dev, "ulb_mode", &mode);
+	if (unlikely(ret)) {
+		WL_ERR(("[ULB] ulb_mode not supported \n"));
+		return ret;
+	}
+
+	if (mode != ULB_MODE_STD_ALONE_MODE) {
+		WL_ERR(("[ULB] ulb bw modification allowed only in stand-alone mode\n"));
+		return -EINVAL;
+	}
+
+	if (ulb_bw >= MAX_SUPP_ULB_BW) {
+		WL_ERR(("[ULB] unsupported value (%d) for ulb_bw \n", ulb_bw));
+		return -EINVAL;
+	}
+
+#ifdef WL_CFG80211_P2P_DEV_IF
+	if (strcmp(ifname, "p2p-dev-wlan0") == 0) {
+		/* Use wdev corresponding to the dedicated p2p discovery interface */
+		if (likely(cfg->p2p_wdev)) {
+			_netinfo = wl_get_netinfo_by_wdev(cfg, cfg->p2p_wdev);
+		} else {
+			return -ENODEV;
+		}
+	}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	if (!_netinfo) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		for_each_ndev(cfg, iter, next) {
+			if (iter->ndev) {
+				if (strncmp(iter->ndev->name, ifname, strlen(ifname)) == 0) {
+					_netinfo = wl_get_netinfo_by_netdev(cfg, iter->ndev);
+				}
+			}
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	}
+
+	if (!_netinfo)
+		return -ENODEV;
+	bssidx = _netinfo->bssidx;
+	_netinfo->ulb_bw = ulb_bw;
+
+
+	WL_DBG(("[ULB] Applying ulb_bw:%d for bssidx:%d \n", ulb_bw, bssidx));
+	ret = wldev_iovar_setbuf_bsscfg(dev, "ulb_bw", (void *)&ulb_bw, 4,
+		cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx,
+		&cfg->ioctl_buf_sync);
+	if (unlikely(ret)) {
+		WL_ERR(("[ULB] ulb_bw set failed. ret:%d \n", ret));
+		return ret;
+	}
+
+	return ret;
+}
+#endif /* WL11ULB */
+
+static void
+wl_ap_channel_ind(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev,
+	chanspec_t chanspec)
+{
+	u32 channel = LCHSPEC_CHANNEL(chanspec);
+
+	WL_DBG(("(%s) AP channel:%d chspec:0x%x \n",
+		ndev->name, channel, chanspec));
+	if (cfg->ap_oper_channel && (cfg->ap_oper_channel != channel)) {
+		/*
+		 * If cached channel is different from the channel indicated
+		 * by the event, notify user space about the channel switch.
+		 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+		wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+		cfg->ap_oper_channel = channel;
+	}
+}
+
+static s32
+wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+	chanspec_t chanspec;
+
+	WL_DBG(("Enter\n"));
+	if (unlikely(e->status)) {
+		WL_ERR(("status:0x%x \n", e->status));
+		return -1;
+	}
+
+	if (!data) {
+		return -EINVAL;
+	}
+
+	if (likely(cfgdev)) {
+		ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+		chanspec = *((chanspec_t *)data);
+
+		if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+			/* For AP/GO role */
+			wl_ap_channel_ind(cfg, ndev, chanspec);
+		}
+	}
+
+	return 0;
+}
+
+static s32
+wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+const wl_event_msg_t *e, void *data)
+{
+	int error = 0;
+	u32 chanspec = 0;
+	struct net_device *ndev = NULL;
+
+	WL_DBG(("Enter\n"));
+	if (unlikely(e->status)) {
+		WL_ERR(("status:0x%x \n", e->status));
+		return -1;
+	}
+
+	if (likely(cfgdev)) {
+		ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+		error = wldev_iovar_getint(ndev, "chanspec", &chanspec);
+		if (unlikely(error)) {
+			WL_ERR(("Get chanspec error: %d \n", error));
+			return -1;
+		}
+
+		if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+			/* For AP/GO role */
+			wl_ap_channel_ind(cfg, ndev, chanspec);
+		} else {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+			wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+		}
+
+	}
+
+	return 0;
+}
+
+
+void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	int err;
+
+	/* Clear the security settings on the primary Interface */
+	err = wldev_iovar_setint(dev, "wsec", 0);
+	if (unlikely(err)) {
+		WL_ERR(("wsec clear failed \n"));
+	}
+	err = wldev_iovar_setint(dev, "auth", 0);
+	if (unlikely(err)) {
+		WL_ERR(("auth clear failed \n"));
+	}
+	err = wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+	if (unlikely(err)) {
+		WL_ERR(("wpa_auth clear failed \n"));
+	}
+}
+
+#ifdef WL_CFG80211_P2P_DEV_IF
+void wl_cfg80211_del_p2p_wdev(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct wireless_dev *wdev = NULL;
+
+	WL_DBG(("Enter \n"));
+	if (!cfg) {
+		WL_ERR(("Invalid Ptr\n"));
+		return;
+	} else {
+		wdev = cfg->p2p_wdev;
+	}
+
+	if (wdev && cfg->down_disc_if) {
+		wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
+		cfg->down_disc_if = FALSE;
+	}
+}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#ifdef GTK_OFFLOAD_SUPPORT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+static s32
+wl_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
+		struct cfg80211_gtk_rekey_data *data)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	gtk_keyinfo_t keyinfo;
+	bcol_gtk_para_t bcol_keyinfo;
+
+	WL_DBG(("Enter\n"));
+	if (data == NULL || cfg->p2p_net == dev) {
+		WL_ERR(("data is NULL or wrong net device\n"));
+		return -EINVAL;
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
+	prhex("kck", (uchar *) (data->kck), RSN_KCK_LENGTH);
+	prhex("kek", (uchar *) (data->kek), RSN_KEK_LENGTH);
+	prhex("replay_ctr", (uchar *) (data->replay_ctr), RSN_REPLAY_LEN);
+#else
+	prhex("kck", (uchar *)data->kck, RSN_KCK_LENGTH);
+	prhex("kek", (uchar *)data->kek, RSN_KEK_LENGTH);
+	prhex("replay_ctr", (uchar *)data->replay_ctr, RSN_REPLAY_LEN);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
+	bcopy(data->kck, keyinfo.KCK, RSN_KCK_LENGTH);
+	bcopy(data->kek, keyinfo.KEK, RSN_KEK_LENGTH);
+	bcopy(data->replay_ctr, keyinfo.ReplayCounter, RSN_REPLAY_LEN);
+
+	memset(&bcol_keyinfo, 0, sizeof(bcol_keyinfo));
+	bcol_keyinfo.enable = 1;
+	bcol_keyinfo.ptk_len = 64;
+	memcpy(&bcol_keyinfo.ptk[0], data->kck, RSN_KCK_LENGTH);
+	memcpy(&bcol_keyinfo.ptk[RSN_KCK_LENGTH], data->kek, RSN_KEK_LENGTH);
+	err = wldev_iovar_setbuf(dev, "bcol_gtk_rekey_ptk", &bcol_keyinfo,
+		sizeof(bcol_keyinfo), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+	if (!err) {
+		return err;
+	}
+
+	if ((err = wldev_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
+		cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("seting gtk_key_info failed code=%d\n", err));
+		return err;
+	}
+
+	WL_DBG(("Exit\n"));
+	return err;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
+#endif /* GTK_OFFLOAD_SUPPORT */
+
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+int
+wl_cfg80211_set_spect(struct net_device *dev, int spect)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	int wlc_down = 1;
+	int wlc_up = 1;
+	int err = BCME_OK;
+
+	if (!wl_get_drv_status_all(cfg, CONNECTED)) {
+		err = wldev_ioctl_set(dev, WLC_DOWN, &wlc_down, sizeof(wlc_down));
+		if (err) {
+			WL_ERR(("%s: WLC_DOWN failed: code: %d\n", __func__, err));
+			return err;
+		}
+
+		err = wldev_ioctl_set(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect));
+		if (err) {
+			WL_ERR(("%s: error setting spect: code: %d\n", __func__, err));
+			return err;
+		}
+
+		err = wldev_ioctl_set(dev, WLC_UP, &wlc_up, sizeof(wlc_up));
+		if (err) {
+			WL_ERR(("%s: WLC_UP failed: code: %d\n", __func__, err));
+			return err;
+		}
+	}
+	return err;
+}
+
+int
+wl_cfg80211_get_sta_channel(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	int channel = 0;
+
+	if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+		channel = cfg->channel;
+	}
+	return channel;
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef P2P_LISTEN_OFFLOADING
+s32
+wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg)
+{
+	s32 bssidx;
+	int ret = 0;
+	int p2plo_pause = 0;
+	if (!cfg || !cfg->p2p) {
+		WL_ERR(("Wl %p or cfg->p2p %p is null\n",
+			cfg, cfg ? cfg->p2p : 0));
+		return 0;
+	}
+
+	bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
+			"p2po_stop", (void*)&p2plo_pause, sizeof(p2plo_pause),
+			cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("p2po_stop Failed :%d\n", ret));
+	}
+
+	return  ret;
+}
+s32
+wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	wl_p2plo_listen_t p2plo_listen;
+	int ret = -EAGAIN;
+	int channel = 0;
+	int period = 0;
+	int interval = 0;
+	int count = 0;
+	if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+		WL_ERR(("Sending Action Frames. Try it again.\n"));
+		goto exit;
+	}
+
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		WL_ERR(("Scanning already\n"));
+		goto exit;
+	}
+
+	if (wl_get_drv_status(cfg, SCAN_ABORTING, dev)) {
+		WL_ERR(("Scanning being aborted\n"));
+		goto exit;
+	}
+
+	if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+		WL_ERR(("p2p listen offloading already running\n"));
+		goto exit;
+	}
+
+	/* Just in case if it is not enabled */
+	if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+		WL_ERR(("cfgp2p_enable discovery failed"));
+		goto exit;
+	}
+
+	bzero(&p2plo_listen, sizeof(wl_p2plo_listen_t));
+
+	if (len) {
+		sscanf(buf, " %10d %10d %10d %10d", &channel, &period, &interval, &count);
+		if ((channel == 0) || (period == 0) ||
+				(interval == 0) || (count == 0)) {
+			WL_ERR(("Wrong argument %d/%d/%d/%d \n",
+				channel, period, interval, count));
+			ret = -EAGAIN;
+			goto exit;
+		}
+		p2plo_listen.period = period;
+		p2plo_listen.interval = interval;
+		p2plo_listen.count = count;
+
+		WL_ERR(("channel:%d period:%d, interval:%d count:%d\n",
+			channel, period, interval, count));
+	} else {
+		WL_ERR(("Argument len is wrong.\n"));
+		ret = -EAGAIN;
+		goto exit;
+	}
+
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
+			sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+			bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
+		goto exit;
+	}
+
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&p2plo_listen,
+			sizeof(wl_p2plo_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+			bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("p2po_listen Failed :%d\n", ret));
+		goto exit;
+	}
+
+	wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
+exit :
+	return ret;
+}
+s32
+wl_cfg80211_p2plo_listen_stop(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	int ret = -EAGAIN;
+
+	if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", NULL,
+			0, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+			bssidx, &cfg->ioctl_buf_sync)) < 0) {
+		WL_ERR(("p2po_stop Failed :%d\n", ret));
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+#endif /* P2P_LISTEN_OFFLOADING */
+
+u64
+wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg)
+{
+	u64 id = 0;
+	id = ++cfg->last_roc_id;
+#ifdef  P2P_LISTEN_OFFLOADING
+	if (id == P2PO_COOKIE) {
+		id = ++cfg->last_roc_id;
+	}
+#endif /* P2P_LISTEN_OFFLOADING */
+	if (id == 0)
+		id = ++cfg->last_roc_id;
+	return id;
+}
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int
+wl_cfg80211_set_random_mac(struct net_device *dev, bool enable)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	int ret;
+
+	if (cfg->random_mac_enabled == enable) {
+		WL_ERR(("Random MAC already %s\n", enable ? "Enabled" : "Disabled"));
+		return BCME_OK;
+	}
+
+	if (enable) {
+		ret = wl_cfg80211_random_mac_enable(dev);
+	} else {
+		ret = wl_cfg80211_random_mac_disable(dev);
+	}
+
+	if (!ret) {
+		cfg->random_mac_enabled = enable;
+	}
+
+	return ret;
+}
+
+int
+wl_cfg80211_random_mac_enable(struct net_device *dev)
+{
+	u8 random_mac[ETH_ALEN] = {0, };
+	u8 rand_bytes[3] = {0, };
+	s32 err = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) ||
+	    wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) {
+		WL_ERR(("fail to Set random mac, current state is wrong\n"));
+		return err;
+	}
+
+	memcpy(random_mac, bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN);
+	get_random_bytes(&rand_bytes, sizeof(rand_bytes));
+
+	if (rand_bytes[2] == 0x0 || rand_bytes[2] == 0xff) {
+		rand_bytes[2] = 0xf0;
+	}
+
+	memcpy(&random_mac[3], rand_bytes, sizeof(rand_bytes));
+
+	err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+		random_mac, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+	if (err != BCME_OK) {
+		WL_ERR(("failed to set random generate MAC address\n"));
+	} else {
+		WL_ERR(("set mac " MACDBG " to " MACDBG "\n",
+			MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr),
+			MAC2STRDBG((const u8 *)&random_mac)));
+		WL_ERR(("random MAC enable done"));
+	}
+
+	return err;
+}
+
+int
+wl_cfg80211_random_mac_disable(struct net_device *dev)
+{
+	s32 err = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+	WL_ERR(("set original mac " MACDBG "\n",
+		MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr)));
+
+	err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+		bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN,
+		cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+	if (err != BCME_OK) {
+		WL_ERR(("failed to set original MAC address\n"));
+	} else {
+		WL_ERR(("random MAC disable done\n"));
+	}
+
+	return err;
+}
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#ifdef WLTDLS
+static s32
+wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg, enum wl_tdls_config state, bool auto_mode)
+{
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	int err = 0;
+	struct net_info *iter, *next;
+	int update_reqd = 0;
+	int enable = 0;
+	dhd_pub_t *dhdp;
+	dhdp = (dhd_pub_t *)(cfg->pub);
+
+	/*
+	 * TDLS need to be enabled only if we have a single STA/GC
+	 * connection.
+	 */
+
+	WL_DBG(("Enter state:%d\n", state));
+
+	if (!cfg->tdls_supported) {
+		/* FW doesn't support tdls. Do nothing */
+		return -ENODEV;
+	}
+
+	/* Protect tdls config session */
+	mutex_lock(&cfg->tdls_sync);
+
+	if (state == TDLS_STATE_TEARDOWN) {
+		/* Host initiated TDLS tear down */
+		err = dhd_tdls_enable(ndev, false, auto_mode, NULL);
+		goto exit;
+	} else if (state == TDLS_STATE_AP_CREATE) {
+		/* We don't support tdls while AP/GO is operational */
+		update_reqd = true;
+		enable = false;
+	} else if ((state == TDLS_STATE_CONNECT) || (state == TDLS_STATE_IF_CREATE)) {
+		if (wl_get_drv_status_all(cfg,
+			CONNECTED) >= TDLS_MAX_IFACE_FOR_ENABLE) {
+			/* For STA/GC connect command request, disable
+			 * tdls if we have any concurrent interfaces
+			 * operational.
+			 */
+			WL_DBG(("Interface limit restriction. disable tdls.\n"));
+			update_reqd = true;
+			enable = false;
+		}
+	} else if ((state == TDLS_STATE_DISCONNECT) ||
+		(state == TDLS_STATE_AP_DELETE) ||
+		(state == TDLS_STATE_SETUP) ||
+		(state == TDLS_STATE_IF_DELETE)) {
+		/* Enable back the tdls connection only if we have less than
+		 * or equal to a single STA/GC connection.
+		 */
+		if (wl_get_drv_status_all(cfg,
+			CONNECTED) == 0) {
+			/* If there are no interfaces connected, enable tdls */
+			update_reqd = true;
+			enable = true;
+		} else if (wl_get_drv_status_all(cfg,
+			CONNECTED) == TDLS_MAX_IFACE_FOR_ENABLE) {
+			/* We have one interface in CONNECTED state.
+			 * Verify whether its a non-AP interface before
+			 * we enable back tdls.
+			 */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+			for_each_ndev(cfg, iter, next) {
+				if ((iter->ndev) && (wl_get_drv_status(cfg, CONNECTED, ndev)) &&
+					wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+					WL_DBG(("AP/GO operational. Can't enable tdls. \n"));
+					err = -ENOTSUPP;
+					goto exit;
+				}
+			}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+			/* No AP/GO found. Enable back tdls */
+			update_reqd = true;
+			enable = true;
+		} else {
+			WL_DBG(("Concurrent connection mode. Can't enable tdls. \n"));
+			err = -ENOTSUPP;
+			goto exit;
+		}
+	} else {
+		WL_ERR(("Unknown tdls state:%d \n", state));
+		err = -EINVAL;
+		goto exit;
+	}
+
+	if (update_reqd == true) {
+		if (dhdp->tdls_enable == enable) {
+			WL_ERR(("No change in tdls state. Do nothing."
+				" tdls_enable:%d\n", enable));
+			goto exit;
+		}
+		err = wldev_iovar_setint(ndev, "tdls_enable", enable);
+		if (unlikely(err)) {
+			WL_ERR(("tdls_enable setting failed. err:%d\n", err));
+			goto exit;
+		} else {
+			WL_DBG(("set tdls_enable: %d done\n", enable));
+			/* Update the dhd state variable to be in sync */
+			dhdp->tdls_enable = enable;
+			if (state == TDLS_STATE_SETUP) {
+				/* For host initiated setup, apply TDLS params
+				 * Don't propagate errors up for param config
+				 * failures
+				 */
+				dhd_tdls_enable(ndev, true, auto_mode, NULL);
+
+			}
+		}
+	} else {
+		WL_DBG(("Skip tdls config. state:%d update_reqd:%d "
+			"current_status:%d \n",
+			state, update_reqd, dhdp->tdls_enable));
+	}
+
+exit:
+	mutex_unlock(&cfg->tdls_sync);
+
+	return err;
+}
+#endif /* WLTDLS */
+
+struct net_device* wl_get_ap_netdev(struct bcm_cfg80211 *cfg, char *ifname)
+{
+	struct net_info *iter, *next;
+	struct net_device *ndev = NULL;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	for_each_ndev(cfg, iter, next) {
+		if (iter->ndev) {
+			if (strncmp(iter->ndev->name, ifname, strlen(iter->ndev->name)) == 0) {
+				if (iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+					ndev = iter->ndev;
+					break;
+				}
+			}
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	return ndev;
+}
+
+struct net_device*
+wl_get_netdev_by_name(struct bcm_cfg80211 *cfg, char *ifname)
+{
+	struct net_info *iter, *next;
+	struct net_device *ndev = NULL;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	for_each_ndev(cfg, iter, next) {
+		if (iter->ndev) {
+			if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
+				ndev = iter->ndev;
+				break;
+			}
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	return ndev;
+}
+
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+#define WLC_RATE_FLAG	0x80
+#define RATE_MASK		0x7f
+
+int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	dhd_pub_t *dhdp;
+	wl_rateset_args_t rs;
+	int error = BCME_ERROR, i;
+	struct net_device *ndev = NULL;
+
+	dhdp = (dhd_pub_t *)(cfg->pub);
+
+	if (dhdp && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		WL_ERR(("Not Hostapd mode\n"));
+		return BCME_NOTAP;
+	}
+
+	ndev = wl_get_ap_netdev(cfg, ifname);
+
+	if (ndev == NULL) {
+		WL_ERR(("No softAP interface named %s\n", ifname));
+		return BCME_NOTAP;
+	}
+
+	bzero(&rs, sizeof(wl_rateset_args_t));
+	error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
+		&rs, sizeof(wl_rateset_args_t), NULL);
+	if (error < 0) {
+		WL_ERR(("get rateset failed = %d\n", error));
+		return error;
+	}
+
+	if (rs.count < 1) {
+		WL_ERR(("Failed to get rate count\n"));
+		return BCME_ERROR;
+	}
+
+	/* Host delivers target rate in the unit of 500kbps */
+	/* To make it to 1mbps unit, atof should be implemented for 5.5mbps basic rate */
+	for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
+		if (rs.rates[i] & WLC_RATE_FLAG)
+			if ((rs.rates[i] & RATE_MASK) == val)
+				break;
+
+	/* Valid rate has been delivered as an argument */
+	if (i < rs.count && i < WL_NUMRATES) {
+		error = wldev_iovar_setint(ndev, "force_bcn_rspec", val);
+		if (error < 0) {
+			WL_ERR(("set beacon rate failed = %d\n", error));
+			return BCME_ERROR;
+		}
+	} else {
+		WL_ERR(("Rate is invalid"));
+		return BCME_BADARG;
+	}
+
+	return BCME_OK;
+}
+
+int
+wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	dhd_pub_t *dhdp;
+	wl_rateset_args_t rs;
+	int error = BCME_ERROR;
+	int i, bytes_written = 0;
+	struct net_device *ndev = NULL;
+
+	dhdp = (dhd_pub_t *)(cfg->pub);
+
+	if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		WL_ERR(("Not Hostapd mode\n"));
+		return BCME_NOTAP;
+	}
+
+	ndev = wl_get_ap_netdev(cfg, ifname);
+
+	if (ndev == NULL) {
+		WL_ERR(("No softAP interface named %s\n", ifname));
+		return BCME_NOTAP;
+	}
+
+	bzero(&rs, sizeof(wl_rateset_args_t));
+	error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
+		&rs, sizeof(wl_rateset_args_t), NULL);
+	if (error < 0) {
+		WL_ERR(("get rateset failed = %d\n", error));
+		return error;
+	}
+
+	if (rs.count < 1) {
+		WL_ERR(("Failed to get rate count\n"));
+		return BCME_ERROR;
+	}
+
+	/* Delivers basic rate in the unit of 500kbps to host */
+	for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
+		if (rs.rates[i] & WLC_RATE_FLAG)
+			bytes_written += snprintf(command + bytes_written, total_len,
+							"%d ", rs.rates[i] & RATE_MASK);
+
+	/* Remove last space in the command buffer */
+	if (bytes_written) {
+		command[bytes_written - 1] = '\0';
+		bytes_written--;
+	}
+
+	return bytes_written;
+
+}
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+static int
+_wl_update_ap_rps_params(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	rpsnoa_iovar_params_t iovar;
+	u8 smbuf[WLC_IOCTL_SMLEN];
+
+	if (!dev)
+		return BCME_BADARG;
+
+	memset(&iovar, 0, sizeof(iovar));
+	memset(smbuf, 0, sizeof(smbuf));
+
+	iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+	iovar.hdr.subcmd = WL_RPSNOA_CMD_PARAMS;
+	iovar.hdr.len = sizeof(iovar);
+	iovar.param->band = WLC_BAND_ALL;
+	iovar.param->level = cfg->ap_rps_info.level;
+	iovar.param->stas_assoc_check = cfg->ap_rps_info.sta_assoc_check;
+	iovar.param->pps = cfg->ap_rps_info.pps;
+	iovar.param->quiet_time = cfg->ap_rps_info.quiet_time;
+
+	if (wldev_iovar_setbuf(dev, "rpsnoa", &iovar, sizeof(iovar),
+		smbuf, sizeof(smbuf), NULL)) {
+		WL_ERR(("Failed to set rpsnoa params"));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+int
+wl_get_ap_rps(struct net_device *dev, char* command, char *ifname, int total_len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	dhd_pub_t *dhdp;
+	int error = BCME_ERROR;
+	int bytes_written = 0;
+	struct net_device *ndev = NULL;
+	rpsnoa_iovar_t iovar;
+	u8 smbuf[WLC_IOCTL_SMLEN];
+	u32 chanspec = 0;
+	u8 idx = 0;
+	u8 val;
+
+	dhdp = (dhd_pub_t *)(cfg->pub);
+
+	if (!dhdp) {
+		error = BCME_NOTUP;
+		goto fail;
+	}
+
+	if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		WL_ERR(("Not Hostapd mode\n"));
+		error = BCME_NOTAP;
+		goto fail;
+	}
+
+	ndev = wl_get_ap_netdev(cfg, ifname);
+
+	if (ndev == NULL) {
+		WL_ERR(("No softAP interface named %s\n", ifname));
+		error = BCME_NOTAP;
+		goto fail;
+	}
+
+	memset(&iovar, 0, sizeof(iovar));
+	memset(smbuf, 0, sizeof(smbuf));
+
+	iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+	iovar.hdr.subcmd = WL_RPSNOA_CMD_STATUS;
+	iovar.hdr.len = sizeof(iovar);
+	iovar.data->band = WLC_BAND_ALL;
+
+	error = wldev_iovar_getbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
+		smbuf, sizeof(smbuf), NULL);
+	if (error < 0) {
+		WL_ERR(("get ap radio pwrsave failed = %d\n", error));
+		goto fail;
+	}
+
+	/* RSDB event doesn't seem to be handled correctly.
+	 * So check chanspec of AP directly from the firmware
+	 */
+	error = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+	if (error < 0) {
+		WL_ERR(("get chanspec from AP failed = %d\n", error));
+		goto fail;
+	}
+
+	chanspec = wl_chspec_driver_to_host(chanspec);
+	if (CHSPEC_IS2G(chanspec))
+		idx = 0;
+	else if (CHSPEC_IS5G(chanspec))
+		idx = 1;
+	else {
+		error = BCME_BADCHAN;
+		goto fail;
+	}
+
+	val = ((rpsnoa_iovar_t *)smbuf)->data[idx].value;
+	bytes_written += snprintf(command + bytes_written, total_len, "%d", val);
+	error = bytes_written;
+
+fail:
+	return error;
+}
+
+int
+wl_set_ap_rps(struct net_device *dev, bool enable, char *ifname)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	dhd_pub_t *dhdp;
+	struct net_device *ndev = NULL;
+	rpsnoa_iovar_t iovar;
+	u8 smbuf[WLC_IOCTL_SMLEN];
+	int ret = BCME_OK;
+
+	dhdp = (dhd_pub_t *)(cfg->pub);
+
+	if (!dhdp) {
+		ret = BCME_NOTUP;
+		goto exit;
+	}
+
+	if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		WL_ERR(("Not Hostapd mode\n"));
+		ret = BCME_NOTAP;
+		goto exit;
+	}
+
+	ndev = wl_get_ap_netdev(cfg, ifname);
+
+	if (ndev == NULL) {
+		WL_ERR(("No softAP interface named %s\n", ifname));
+		ret = BCME_NOTAP;
+		goto exit;
+	}
+
+	if (cfg->ap_rps_info.enable != enable) {
+		cfg->ap_rps_info.enable = enable;
+		if (enable) {
+			ret = _wl_update_ap_rps_params(ndev);
+			if (ret) {
+				WL_ERR(("Filed to update rpsnoa params\n"));
+				goto exit;
+			}
+		}
+		memset(&iovar, 0, sizeof(iovar));
+		memset(smbuf, 0, sizeof(smbuf));
+
+		iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+		iovar.hdr.subcmd = WL_RPSNOA_CMD_ENABLE;
+		iovar.hdr.len = sizeof(iovar);
+		iovar.data->band = WLC_BAND_ALL;
+		iovar.data->value = (int16)enable;
+
+		ret = wldev_iovar_setbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
+			smbuf, sizeof(smbuf), NULL);
+		if (ret) {
+			WL_ERR(("Failed to enable AP radio power save"));
+			goto exit;
+		}
+		cfg->ap_rps_info.enable = enable;
+	}
+exit:
+	return ret;
+}
+
+int
+wl_update_ap_rps_params(struct net_device *dev, ap_rps_info_t* rps, char *ifname)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	dhd_pub_t *dhdp;
+	struct net_device *ndev = NULL;
+
+	dhdp = (dhd_pub_t *)(cfg->pub);
+
+	if (!dhdp)
+		return BCME_NOTUP;
+
+	if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		WL_ERR(("Not Hostapd mode\n"));
+		return BCME_NOTAP;
+	}
+
+	ndev = wl_get_ap_netdev(cfg, ifname);
+
+	if (ndev == NULL) {
+		WL_ERR(("No softAP interface named %s\n", ifname));
+		return BCME_NOTAP;
+	}
+
+	if (!rps)
+		return BCME_BADARG;
+
+	if (rps->pps < RADIO_PWRSAVE_PPS_MIN)
+		return BCME_BADARG;
+
+	if (rps->level < RADIO_PWRSAVE_LEVEL_MIN ||
+		rps->level > RADIO_PWRSAVE_LEVEL_MAX)
+		return BCME_BADARG;
+
+	if (rps->quiet_time < RADIO_PWRSAVE_QUIETTIME_MIN)
+		return BCME_BADARG;
+
+	if (rps->sta_assoc_check > RADIO_PWRSAVE_ASSOCCHECK_MAX ||
+		rps->sta_assoc_check < RADIO_PWRSAVE_ASSOCCHECK_MIN)
+		return BCME_BADARG;
+
+	cfg->ap_rps_info.pps = rps->pps;
+	cfg->ap_rps_info.level = rps->level;
+	cfg->ap_rps_info.quiet_time = rps->quiet_time;
+	cfg->ap_rps_info.sta_assoc_check = rps->sta_assoc_check;
+
+	if (cfg->ap_rps_info.enable) {
+		if (_wl_update_ap_rps_params(ndev)) {
+			WL_ERR(("Failed to update rpsnoa params"));
+			return BCME_ERROR;
+		}
+	}
+
+	return BCME_OK;
+}
+
+void
+wl_cfg80211_init_ap_rps(struct bcm_cfg80211 *cfg)
+{
+	cfg->ap_rps_info.enable = FALSE;
+	cfg->ap_rps_info.sta_assoc_check = RADIO_PWRSAVE_STAS_ASSOC_CHECK;
+	cfg->ap_rps_info.pps = RADIO_PWRSAVE_PPS;
+	cfg->ap_rps_info.quiet_time = RADIO_PWRSAVE_QUIET_TIME;
+	cfg->ap_rps_info.level = RADIO_PWRSAVE_LEVEL;
+}
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+int
+wl_cfg80211_iface_count(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	struct net_info *iter, *next;
+	int iface_count = 0;
+
+	/* Return the count of network interfaces (skip netless p2p discovery
+	 * interface)
+	 */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	for_each_ndev(cfg, iter, next) {
+		if (iter->ndev) {
+			iface_count++;
+		}
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+	return iface_count;
+}
+
+#ifdef WBTEXT
+static bool wl_cfg80211_wbtext_check_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea)
+{
+	wl_wbtext_bssid_t *bssid = NULL;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+	/* check duplicate */
+	list_for_each_entry(bssid, &cfg->wbtext_bssid_list, list) {
+		if (!memcmp(bssid->ea.octet, ea, ETHER_ADDR_LEN)) {
+			return FALSE;
+		}
+	}
+
+	return TRUE;
+}
+
+static bool wl_cfg80211_wbtext_add_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea)
+{
+	wl_wbtext_bssid_t *bssid = NULL;
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	bssid = kmalloc(sizeof(wl_wbtext_bssid_t), GFP_KERNEL);
+	if (bssid == NULL) {
+		WL_ERR(("alloc failed\n"));
+		return FALSE;
+	}
+
+	memcpy(bssid->ea.octet, ea, ETHER_ADDR_LEN);
+
+	INIT_LIST_HEAD(&bssid->list);
+	list_add_tail(&bssid->list, &cfg->wbtext_bssid_list);
+
+	WL_DBG(("add wbtext bssid : %s\n", bcm_ether_ntoa(ea, eabuf)));
+
+	return TRUE;
+}
+
+static void wl_cfg80211_wbtext_clear_bssid_list(struct bcm_cfg80211 *cfg)
+{
+	wl_wbtext_bssid_t *bssid = NULL;
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	while (!list_empty(&cfg->wbtext_bssid_list)) {
+		bssid = list_entry(cfg->wbtext_bssid_list.next, wl_wbtext_bssid_t, list);
+		if (bssid) {
+			WL_DBG(("clear wbtext bssid : %s\n", bcm_ether_ntoa(&bssid->ea, eabuf)));
+			list_del(&bssid->list);
+			kfree(bssid);
+		}
+	}
+}
+
+static void wl_cfg80211_wbtext_update_rcc(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	bcm_tlv_t * cap_ie = NULL;
+	bool req_sent = FALSE;
+	struct wl_profile *profile;
+
+	WL_DBG(("Enter\n"));
+
+	profile = wl_get_profile_by_netdev(cfg, dev);
+	if (!profile) {
+		WL_ERR(("no profile exists\n"));
+		return;
+	}
+
+	if (wl_cfg80211_wbtext_check_bssid_list(cfg,
+			(struct ether_addr *)&profile->bssid) == FALSE) {
+		WL_DBG(("already updated\n"));
+		return;
+	}
+
+	/* first, check NBR bit in RRM IE */
+	if ((cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+			DOT11_MNG_RRM_CAP_ID)) != NULL) {
+		if (isset(cap_ie->data, DOT11_RRM_CAP_NEIGHBOR_REPORT)) {
+			req_sent = wl_cfg80211_wbtext_send_nbr_req(cfg, dev, profile);
+		}
+	}
+
+	/* if RRM nbr was not supported, check BTM bit in extend cap. IE */
+	if (!req_sent) {
+		if ((cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+				DOT11_MNG_EXT_CAP_ID)) != NULL) {
+			if (cap_ie->len >= DOT11_EXTCAP_LEN_BSSTRANS &&
+					isset(cap_ie->data, DOT11_EXT_CAP_BSSTRANS_MGMT)) {
+				wl_cfg80211_wbtext_send_btm_query(cfg, dev, profile);
+			}
+		}
+	}
+}
+
+static bool wl_cfg80211_wbtext_send_nbr_req(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	struct wl_profile *profile)
+{
+	int error = -1;
+	char *smbuf = NULL;
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	bcm_tlv_t * rrm_cap_ie = NULL;
+	wlc_ssid_t *ssid = NULL;
+	bool ret = FALSE;
+
+	WL_DBG(("Enter\n"));
+
+	/* check RRM nbr bit in extend cap. IE of assoc response */
+	if ((rrm_cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+			DOT11_MNG_RRM_CAP_ID)) != NULL) {
+		if (!isset(rrm_cap_ie->data, DOT11_RRM_CAP_NEIGHBOR_REPORT)) {
+			WL_DBG(("AP doesn't support neighbor report\n"));
+			return FALSE;
+		}
+	}
+
+	smbuf = (char *) kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (smbuf == NULL) {
+		WL_ERR(("failed to allocated memory\n"));
+		goto nbr_req_out;
+	}
+
+	ssid = (wlc_ssid_t *) kzalloc(sizeof(wlc_ssid_t), GFP_KERNEL);
+	if (ssid == NULL) {
+		WL_ERR(("failed to allocated memory\n"));
+		goto nbr_req_out;
+	}
+
+	ssid->SSID_len = MIN(profile->ssid.SSID_len, DOT11_MAX_SSID_LEN);
+	memcpy(ssid->SSID, profile->ssid.SSID, ssid->SSID_len);
+
+	error = wldev_iovar_setbuf(dev, "rrm_nbr_req", ssid,
+		sizeof(wlc_ssid_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+	if (error == BCME_OK) {
+		ret = wl_cfg80211_wbtext_add_bssid_list(cfg,
+			(struct ether_addr *)&profile->bssid);
+	} else {
+		WL_ERR(("failed to send neighbor report request, error=%d\n", error));
+	}
+
+nbr_req_out:
+	if (ssid) {
+		kfree(ssid);
+	}
+
+	if (smbuf) {
+		kfree(smbuf);
+	}
+	return ret;
+}
+
+static bool wl_cfg80211_wbtext_send_btm_query(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	struct wl_profile *profile)
+
+{
+	int error = -1;
+	bool ret = FALSE;
+
+	WL_DBG(("Enter\n"));
+
+	error = wldev_iovar_setbuf(dev, "wnm_bsstrans_query", NULL,
+		0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+	if (error == BCME_OK) {
+		ret = wl_cfg80211_wbtext_add_bssid_list(cfg,
+			(struct ether_addr *)&profile->bssid);
+	} else {
+		WL_ERR(("%s: failed to set BTM query, error=%d\n", __FUNCTION__, error));
+	}
+	return ret;
+}
+
+static void wl_cfg80211_wbtext_set_wnm_maxidle(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+	keepalives_max_idle_t keepalive = {0, 0, 0, 0};
+	s32 bssidx, error;
+	int wnm_maxidle = 0;
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+
+	/* AP supports wnm max idle ? */
+	if (bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+			DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID) != NULL) {
+		error = wldev_iovar_getint(dev, "wnm_maxidle", &wnm_maxidle);
+		if (error < 0) {
+			WL_ERR(("failed to get wnm max idle period : %d\n", error));
+		}
+	}
+
+	WL_DBG(("wnm max idle period : %d\n", wnm_maxidle));
+
+	/* if wnm maxidle has valid period, set it as keep alive */
+	if (wnm_maxidle > 0) {
+		keepalive.keepalive_count = 1;
+	}
+
+	if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) >= 0) {
+		error = wldev_iovar_setbuf_bsscfg(dev, "wnm_keepalives_max_idle", &keepalive,
+			sizeof(keepalives_max_idle_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+			bssidx, &cfg->ioctl_buf_sync);
+		if (error < 0) {
+			WL_ERR(("set wnm_keepalives_max_idle failed : %d\n", error));
+		}
+	}
+}
+
+static int
+wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, int body_len)
+{
+	dot11_rm_action_t *rm_rep;
+	bcm_tlv_t *tlvs;
+	int tlv_len, i, error;
+	dot11_neighbor_rep_ie_t *nbr_rep_ie;
+	chanspec_t ch;
+	wl_roam_channel_list_t channel_list;
+	char iobuf[WLC_IOCTL_SMLEN];
+
+	if (body_len < DOT11_RM_ACTION_LEN) {
+		WL_ERR(("Received Neighbor Report frame with incorrect length %d\n",
+			body_len));
+		return BCME_ERROR;
+	}
+
+	rm_rep = (dot11_rm_action_t *)body;
+	WL_DBG(("received neighbor report (token = %d)\n", rm_rep->token));
+
+	tlvs = (bcm_tlv_t *)&rm_rep->data[0];
+
+	tlv_len = body_len - DOT11_RM_ACTION_LEN;
+
+	while (tlvs && tlvs->id == DOT11_MNG_NEIGHBOR_REP_ID) {
+		nbr_rep_ie = (dot11_neighbor_rep_ie_t *)tlvs;
+
+		if (nbr_rep_ie->len < DOT11_NEIGHBOR_REP_IE_FIXED_LEN) {
+			WL_ERR(("malformed Neighbor Report element with length %d\n",
+				nbr_rep_ie->len));
+			tlvs = bcm_next_tlv(tlvs, &tlv_len);
+			continue;
+		}
+
+		ch = CH20MHZ_CHSPEC(nbr_rep_ie->channel);
+		WL_DBG(("ch:%d, bssid:%02x:%02x:%02x:%02x:%02x:%02x\n",
+			ch, nbr_rep_ie->bssid.octet[0], nbr_rep_ie->bssid.octet[1],
+			nbr_rep_ie->bssid.octet[2],	nbr_rep_ie->bssid.octet[3],
+			nbr_rep_ie->bssid.octet[4],	nbr_rep_ie->bssid.octet[5]));
+
+		/* get RCC list */
+		error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
+			(void *)&channel_list, sizeof(channel_list), NULL);
+		if (error) {
+			WL_ERR(("Failed to get roamscan channels, error = %d\n", error));
+			return BCME_ERROR;
+		}
+
+		/* update RCC */
+		if (channel_list.n < MAX_ROAM_CHANNEL) {
+			for (i = 0; i < channel_list.n; i++) {
+				if (channel_list.channels[i] == ch) {
+					break;
+				}
+			}
+			if (i == channel_list.n) {
+				channel_list.channels[channel_list.n] = ch;
+				channel_list.n++;
+			}
+		}
+
+		/* set RCC list */
+		error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
+			sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
+		if (error) {
+			WL_DBG(("Failed to set roamscan channels, error = %d\n", error));
+		}
+
+		tlvs = bcm_next_tlv(tlvs, &tlv_len);
+	}
+
+	return BCME_OK;
+}
+#endif /* WBTEXT */
+
+#ifdef SUPPORT_SET_CAC
+static void
+wl_cfg80211_set_cac(struct bcm_cfg80211 *cfg, int enable)
+{
+	int ret = 0;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+	WL_DBG(("cac enable %d, op_mode 0x%04x\n", enable, dhd->op_mode));
+	if (!dhd) {
+		WL_ERR(("dhd is NULL\n"));
+		return;
+	}
+	if (enable && ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) ||
+			(dhd->op_mode & DHD_FLAG_P2P_GC_MODE) ||
+			(dhd->op_mode & DHD_FLAG_P2P_GO_MODE))) {
+		WL_ERR(("op_mode 0x%04x\n", dhd->op_mode));
+		enable = 0;
+	}
+	if ((ret = dhd_wl_ioctl_set_intiovar(dhd, "cac", enable,
+			WLC_SET_VAR, TRUE, 0)) < 0) {
+		WL_ERR(("Failed set CAC, ret=%d\n", ret));
+	} else {
+		WL_DBG(("CAC set successfully\n"));
+	}
+	return;
+}
+#endif /* SUPPORT_SET_CAC */
+
+#ifdef SUPPORT_RSSI_LOGGING
+int
+wl_get_rssi_per_ant(struct net_device *dev, char *ifname, char *peer_mac, void *param)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	wl_rssi_ant_mimo_t *get_param = (wl_rssi_ant_mimo_t *)param;
+	rssi_ant_param_t *set_param = NULL;
+	struct net_device *ifdev = NULL;
+	char iobuf[WLC_IOCTL_SMLEN];
+	int err = BCME_OK;
+	int iftype = 0;
+
+	memset(iobuf, 0, WLC_IOCTL_SMLEN);
+
+	/* Check the interface type */
+	ifdev = wl_get_netdev_by_name(cfg, ifname);
+	if (ifdev == NULL) {
+		WL_ERR(("Could not find net_device for ifname:%s\n", ifname));
+		err = BCME_BADARG;
+		goto fail;
+	}
+
+	iftype = ifdev->ieee80211_ptr->iftype;
+	if (iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO) {
+		if (peer_mac) {
+			set_param = (rssi_ant_param_t *)kzalloc(sizeof(rssi_ant_param_t),
+				GFP_KERNEL);
+			err = wl_cfg80211_ether_atoe(peer_mac, &set_param->ea);
+			if (!err) {
+				WL_ERR(("Invalid Peer MAC format\n"));
+				err = BCME_BADARG;
+				goto fail;
+			}
+		} else {
+			WL_ERR(("Peer MAC is not provided for iftype %d\n", iftype));
+			err = BCME_BADARG;
+			goto fail;
+		}
+	}
+
+	err = wldev_iovar_getbuf(ifdev, "phy_rssi_ant", peer_mac ?
+		(void *)&(set_param->ea) : NULL, peer_mac ? ETHER_ADDR_LEN : 0,
+		(void *)iobuf, sizeof(iobuf), NULL);
+	if (unlikely(err)) {
+		WL_ERR(("Failed to get rssi info, err=%d\n", err));
+	} else {
+		memcpy(get_param, iobuf, sizeof(wl_rssi_ant_mimo_t));
+		if (get_param->count == 0) {
+			WL_ERR(("Not supported on this chip\n"));
+			err = BCME_UNSUPPORTED;
+		}
+	}
+
+fail:
+	if (set_param) {
+		kfree(set_param);
+	}
+
+	return err;
+}
+
+int
+wl_get_rssi_logging(struct net_device *dev, void *param)
+{
+	rssilog_get_param_t *get_param = (rssilog_get_param_t *)param;
+	char iobuf[WLC_IOCTL_SMLEN];
+	int err = BCME_OK;
+
+	memset(iobuf, 0, WLC_IOCTL_SMLEN);
+	memset(get_param, 0, sizeof(*get_param));
+	err = wldev_iovar_getbuf(dev, "rssilog", NULL, 0, (void *)iobuf,
+		sizeof(iobuf), NULL);
+	if (err) {
+		WL_ERR(("Failed to get rssi logging info, err=%d\n", err));
+	} else {
+		memcpy(get_param, iobuf, sizeof(*get_param));
+	}
+
+	return err;
+}
+
+int
+wl_set_rssi_logging(struct net_device *dev, void *param)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	rssilog_set_param_t *set_param = (rssilog_set_param_t *)param;
+	int err;
+
+	err = wldev_iovar_setbuf(dev, "rssilog", set_param,
+		sizeof(*set_param), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+		&cfg->ioctl_buf_sync);
+	if (err) {
+		WL_ERR(("Failed to set rssi logging param, err=%d\n", err));
+	}
+
+	return err;
+}
+#endif /* SUPPORT_RSSI_LOGGING */
+
+s32
+wl_cfg80211_autochannel(struct net_device *dev, char* command, int total_len)
+{
+	struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+	int ret = 0;
+	int bytes_written = -1;
+
+	sscanf(command, "%*s %d", &cfg->autochannel);
+
+	if (cfg->autochannel == 0) {
+		cfg->best_2g_ch = 0;
+		cfg->best_5g_ch = 0;
+	} else if (cfg->autochannel == 2) {
+		bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
+			cfg->best_2g_ch, cfg->best_5g_ch);
+		ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+		ret = bytes_written;
+	}
+
+	return ret;
+}
+
+static int
+wl_cfg80211_check_in4way(struct bcm_cfg80211 *cfg,
+	struct net_device *dev, uint action, enum wl_ext_status status, void *context)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+	struct wl_security *sec;
+	s32 bssidx = -1;
+	int ret = 0, cur_eapol_status;
+	int max_wait_time, max_wait_cnt;
+
+	mutex_lock(&cfg->in4way_sync);
+	WL_DBG(("status=%d, action=0x%x\n", status, action));
+
+	cur_eapol_status = dhdp->conf->eapol_status;
+	switch (status) {
+		case WL_EXT_STATUS_SCAN:
+			if (action & NO_SCAN_IN4WAY) {
+				if (cfg->handshaking > 0 && cfg->handshaking <= 3) {
+					WL_ERR(("return -EBUSY cnt %d\n", cfg->handshaking));
+					cfg->handshaking++;
+					ret = -EBUSY;
+					break;
+				}
+			}
+			break;
+ 		case WL_EXT_STATUS_DISCONNECTING:
+			if (cur_eapol_status >= EAPOL_STATUS_WPA_START &&
+					cur_eapol_status < EAPOL_STATUS_WPA_END) {
+				WL_ERR(("WPA failed at %d\n", cur_eapol_status));
+				dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
+			} else if (cur_eapol_status >= EAPOL_STATUS_WPS_WSC_START &&
+					cur_eapol_status < EAPOL_STATUS_WPS_DONE) {
+				WL_ERR(("WPS failed at %d\n", cur_eapol_status));
+				dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
+			}
+			if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
+				if (cfg->handshaking) {
+					if (action & NO_BTC_IN4WAY) {
+						WL_TRACE(("status=%d, enable btc_mode\n", status));
+						wldev_iovar_setint(dev, "btc_mode", 1);
+					}
+					cfg->handshaking = 0;
+				}
+			}
+			if (action & WAIT_DISCONNECTED) {
+				max_wait_time = 200;
+				max_wait_cnt = 20;
+				cfg->disconnected_jiffies = jiffies;
+				while (!time_after(jiffies,
+						cfg->disconnected_jiffies + msecs_to_jiffies(max_wait_time)) &&
+						max_wait_cnt) {
+					WL_TRACE(("status=%d, max_wait_cnt=%d waiting...\n",
+						status, max_wait_cnt));
+					mutex_unlock(&cfg->in4way_sync);
+					OSL_SLEEP(50);
+					mutex_lock(&cfg->in4way_sync);
+					max_wait_cnt--;
+				}
+			}
+			break;
+		case WL_EXT_STATUS_CONNECTING:
+			if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
+				bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr);
+				sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+				if ((sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) &&
+						bssidx == 0) {
+					dhdp->conf->eapol_status = EAPOL_STATUS_WPA_START;
+					cfg->handshaking = 1;
+					if (action & NO_BTC_IN4WAY) {
+						WL_TRACE(("status=%d, disable btc_mode\n", status));
+						wldev_iovar_setint(dev, "btc_mode", 0);
+					}
+				}
+			}
+			if (action & WAIT_DISCONNECTED) {
+				max_wait_time = 200;
+				max_wait_cnt = 10;
+				while (!time_after(jiffies,
+						cfg->disconnected_jiffies + msecs_to_jiffies(max_wait_time)) &&
+						max_wait_cnt) {
+					WL_TRACE(("status=%d, max_wait_cnt=%d waiting...\n",
+						status, max_wait_cnt));
+					mutex_unlock(&cfg->in4way_sync);
+					OSL_SLEEP(50);
+					mutex_lock(&cfg->in4way_sync);
+					max_wait_cnt--;
+				}
+			}
+			break;
+		case WL_EXT_STATUS_CONNECTED:
+			if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) {
+				dhd_conf_set_wme(cfg->pub, 0);
+				dhd_conf_set_intiovar(cfg->pub, WLC_SET_VAR, "phy_oclscdenable",
+					cfg->pub->conf->phy_oclscdenable, 0, FALSE);
+			}
+			else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+				dhd_conf_set_mchan_bw(cfg->pub, WL_P2P_IF_CLIENT, -1);
+			}
+			break;
+		case WL_EXT_STATUS_DISCONNECTED:
+			if (cur_eapol_status >= EAPOL_STATUS_WPA_START &&
+					cur_eapol_status < EAPOL_STATUS_WPA_END) {
+				WL_ERR(("WPA failed at %d\n", cur_eapol_status));
+				dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
+			} else if (cur_eapol_status >= EAPOL_STATUS_WPS_WSC_START &&
+					cur_eapol_status < EAPOL_STATUS_WPS_DONE) {
+				WL_ERR(("WPS failed at %d\n", cur_eapol_status));
+				dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
+			}
+			if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
+				if (cfg->handshaking) {
+					if (action & NO_BTC_IN4WAY) {
+						WL_TRACE(("status=%d, enable btc_mode\n", status));
+						wldev_iovar_setint(dev, "btc_mode", 1);
+					}
+					cfg->handshaking = 0;
+				}
+			}
+			if (action & WAIT_DISCONNECTED) {
+				cfg->disconnected_jiffies = jiffies;
+			}
+			break;
+		case WL_EXT_STATUS_ADD_KEY:
+			dhdp->conf->eapol_status = EAPOL_STATUS_WPA_END;
+			if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
+				if (cfg->handshaking) {
+					if (action & NO_BTC_IN4WAY) {
+						WL_TRACE(("status=%d, enable btc_mode\n", status));
+						wldev_iovar_setint(dev, "btc_mode", 1);
+					}
+					cfg->handshaking = 0;
+				}
+			}
+			break;
+		case WL_EXT_STATUS_AP_ENABLED:
+			if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+				dhd_conf_set_wme(cfg->pub, 1);
+			}
+			else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+				dhd_conf_set_mchan_bw(cfg->pub, WL_P2P_IF_GO, -1);
+			}
+			break;
+		case WL_EXT_STATUS_DELETE_STA:
+			if ((action & DONT_DELETE_GC_AFTER_WPS) &&
+					(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO)) {
+				u8* mac_addr = context;
+				if (mac_addr && memcmp(&ether_bcast, mac_addr, ETHER_ADDR_LEN) &&
+						cur_eapol_status == EAPOL_STATUS_WPS_DONE) {
+					u32 timeout;
+					max_wait_time = dhdp->conf->max_wait_gc_time;
+					WL_TRACE(("status=%d, wps_done=%d, waiting %dms ...\n",
+						status, cfg->wps_done, max_wait_time));
+					mutex_unlock(&cfg->in4way_sync);
+					timeout = wait_event_interruptible_timeout(cfg->wps_done_event,
+						cfg->wps_done, msecs_to_jiffies(max_wait_time));
+					mutex_lock(&cfg->in4way_sync);
+					WL_TRACE(("status=%d, wps_done=%d, timeout=%d\n",
+						status, cfg->wps_done, timeout));
+					if (timeout > 0) {
+						ret = -1;
+						break;
+					}
+				} else {
+					WL_TRACE(("status=%d, wps_done=%d => 0\n", status, cfg->wps_done));
+					cfg->wps_done = FALSE;
+					dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
+				}
+			}
+			break;
+		case WL_EXT_STATUS_STA_DISCONNECTED:
+			if ((action & DONT_DELETE_GC_AFTER_WPS) &&
+					(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) &&
+					cur_eapol_status == EAPOL_STATUS_WPS_DONE) {
+				WL_TRACE(("status=%d, wps_done=%d => 0\n", status, cfg->wps_done));
+				cfg->wps_done = FALSE;
+			}
+			break;
+		case WL_EXT_STATUS_STA_CONNECTED:
+			if ((action & DONT_DELETE_GC_AFTER_WPS) &&
+					(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) &&
+					cur_eapol_status == EAPOL_STATUS_WPS_DONE) {
+				WL_TRACE(("status=%d, wps_done=%d => 1\n", status, cfg->wps_done));
+				cfg->wps_done = TRUE;
+				wake_up_interruptible(&cfg->wps_done_event);
+			}
+			break;
+		default:
+			WL_ERR(("Unknown action=0x%x, status=%d\n", action, status));
+	}
+
+	mutex_unlock(&cfg->in4way_sync);
+
+	return ret;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
new file mode 100644
index 0000000..62d4c84
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
@@ -0,0 +1,1687 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfg80211.h 710862 2017-07-14 07:43:59Z $
+ */
+
+/**
+ * Older Linux versions support the 'iw' interface, more recent ones the 'cfg80211' interface.
+ */
+
+#ifndef _wl_cfg80211_h_
+#define _wl_cfg80211_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <wl_cfgp2p.h>
+#include <wl_android.h>
+struct wl_conf;
+struct wl_iface;
+struct bcm_cfg80211;
+struct wl_security;
+struct wl_ibss;
+
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh64(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#define WL_DBG_NONE	0
+#define WL_DBG_P2P_ACTION	(1 << 5)
+#define WL_DBG_TRACE	(1 << 4)
+#define WL_DBG_SCAN	(1 << 3)
+#define WL_DBG_DBG	(1 << 2)
+#define WL_DBG_INFO	(1 << 1)
+#define WL_DBG_ERR	(1 << 0)
+
+#ifdef DHD_LOG_DUMP
+extern void dhd_log_dump_write(int type, const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
+#ifndef _DHD_LOG_DUMP_DEFINITIONS_
+#define DLD_BUF_TYPE_GENERAL    0
+#define DLD_BUF_TYPE_SPECIAL    1
+#define DHD_LOG_DUMP_WRITE(fmt, ...)	dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, fmt, ##__VA_ARGS__)
+#define DHD_LOG_DUMP_WRITE_EX(fmt, ...)	dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, fmt, ##__VA_ARGS__)
+#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
+#endif /* DHD_LOG_DUMP */
+
+/* 0 invalidates all debug messages.  default is 1 */
+#define WL_DBG_LEVEL 0xFF
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define CFG80211_ERROR_TEXT		"CFG80211-INFO2) "
+#else
+#define CFG80211_ERROR_TEXT		"CFG80211-ERROR) "
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#if defined(DHD_DEBUG)
+#ifdef DHD_LOG_DUMP
+#define	WL_ERR(args)	\
+do {	\
+	if (wl_dbg_level & WL_DBG_ERR) {	\
+		printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+		printk args;	\
+		DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__);	\
+		DHD_LOG_DUMP_WRITE args;	\
+	}	\
+} while (0)
+#define	WL_ERR_MEM(args)	\
+do {	\
+	if (wl_dbg_level & WL_DBG_ERR) {	\
+		DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__);	\
+		DHD_LOG_DUMP_WRITE args;	\
+	}	\
+} while (0)
+#define	WL_ERR_EX(args)	\
+do {	\
+	if (wl_dbg_level & WL_DBG_ERR) {	\
+		printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+		printk args;	\
+		DHD_LOG_DUMP_WRITE_EX("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__);	\
+		DHD_LOG_DUMP_WRITE_EX args;	\
+	}	\
+} while (0)
+#else
+#define	WL_ERR(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+#define WL_ERR_MEM(args) WL_ERR(args)
+#define WL_ERR_EX(args) WL_ERR(args)
+#endif /* DHD_LOG_DUMP */
+#else /* defined(DHD_DEBUG) */
+#define	WL_ERR(args)									\
+do {										\
+	if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) {				\
+			printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+#define WL_ERR_MEM(args) WL_ERR(args)
+#define WL_ERR_EX(args) WL_ERR(args)
+#endif /* defined(DHD_DEBUG) */
+
+#ifdef WL_INFORM
+#undef WL_INFORM
+#endif
+
+#define	WL_INFORM(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_INFO) {				\
+			printk(KERN_INFO "CFG80211-INFO) %s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+
+
+#ifdef WL_SCAN
+#undef WL_SCAN
+#endif
+#define	WL_SCAN(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_SCAN) {			\
+		printk(KERN_INFO "CFG80211-SCAN) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#ifdef WL_TRACE
+#undef WL_TRACE
+#endif
+#define	WL_TRACE(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_TRACE) {			\
+		printk(KERN_INFO "CFG80211-TRACE) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#ifdef WL_TRACE_HW4
+#undef WL_TRACE_HW4
+#endif
+#ifdef CUSTOMER_HW4_DEBUG
+#define	WL_TRACE_HW4(args)					\
+do {										\
+	if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_INFO "CFG80211-TRACE) %s : ", __func__);	\
+			printk args;						\
+		} 								\
+} while (0)
+#else
+#define	WL_TRACE_HW4			WL_TRACE
+#endif /* CUSTOMER_HW4_DEBUG */
+#if (WL_DBG_LEVEL > 0)
+#define	WL_DBG(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_DBG) {			\
+		printk(KERN_INFO "CFG80211-DEBUG) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#else				/* !(WL_DBG_LEVEL > 0) */
+#define	WL_DBG(args)
+#endif				/* (WL_DBG_LEVEL > 0) */
+#define WL_PNO(x)
+#define WL_SD(x)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
+#define ieee80211_band nl80211_band
+#define IEEE80211_BAND_2GHZ NL80211_BAND_2GHZ
+#define IEEE80211_BAND_5GHZ NL80211_BAND_5GHZ
+#define IEEE80211_NUM_BANDS NUM_NL80211_BANDS
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+#ifdef WLMESH
+#undef WLMESH
+#endif
+#endif
+
+#define WL_SCAN_RETRY_MAX	3
+#define WL_NUM_PMKIDS_MAX	MAXPMKID
+#define WL_SCAN_BUF_MAX 	(1024 * 8)
+#define WL_TLV_INFO_MAX 	1500
+#define WL_SCAN_IE_LEN_MAX      2048
+#define WL_BSS_INFO_MAX		2048
+#define WL_ASSOC_INFO_MAX	512
+#define WL_IOCTL_LEN_MAX	2048
+#define WL_EXTRA_BUF_MAX	2048
+#define WL_SCAN_ERSULTS_LAST 	(WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_AP_MAX			256
+#define WL_FILE_NAME_MAX	256
+#define WL_DWELL_TIME		200
+#define WL_MED_DWELL_TIME	400
+#define WL_MIN_DWELL_TIME	100
+#define WL_LONG_DWELL_TIME	1000
+#define IFACE_MAX_CNT	4
+#define WL_SCAN_CONNECT_DWELL_TIME_MS		200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS		20
+#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS	320
+#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS	400
+#define WL_AF_TX_MAX_RETRY	5
+
+#define WL_AF_SEARCH_TIME_MAX		450
+#define WL_AF_TX_EXTRA_TIME_MAX		200
+
+#define WL_SCAN_TIMER_INTERVAL_MS	10000 /* Scan timeout */
+#define WL_CHANNEL_SYNC_RETRY 	5
+#define WL_INVALID 		-1
+
+#ifdef DHD_LOSSLESS_ROAMING
+#define WL_ROAM_TIMEOUT_MS	1000 /* Roam timeout */
+#endif
+/* Bring down SCB Timeout to 20secs from 60secs default */
+#ifndef WL_SCB_TIMEOUT
+#define WL_SCB_TIMEOUT	20
+#endif
+
+#ifndef WL_SCB_ACTIVITY_TIME
+#define WL_SCB_ACTIVITY_TIME	5
+#endif
+
+#ifndef WL_SCB_MAX_PROBE
+#define WL_SCB_MAX_PROBE	3
+#endif
+
+#ifndef WL_MIN_PSPRETEND_THRESHOLD
+#define WL_MIN_PSPRETEND_THRESHOLD	2
+#endif
+
+/* Cipher suites */
+#define WLAN_CIPHER_SUITE_PMK		0x00904C00
+
+#ifndef WLAN_AKM_SUITE_FT_8021X
+#define WLAN_AKM_SUITE_FT_8021X		0x000FAC03
+#endif /* WLAN_AKM_SUITE_FT_8021X */
+
+#ifndef WLAN_AKM_SUITE_FT_PSK
+#define WLAN_AKM_SUITE_FT_PSK		0x000FAC04
+#endif /* WLAN_AKM_SUITE_FT_PSK */
+
+/*
+ * BRCM local.
+ * Use a high number that's unlikely to clash with linux upstream for a while until we can
+ * submit these changes to the community.
+*/
+#define NL80211_FEATURE_FW_4WAY_HANDSHAKE (1<<31)
+
+/* SCAN_SUPPRESS timer values in ms */
+#define WL_SCAN_SUPPRESS_TIMEOUT 31000 /* default Framwork DHCP timeout is 30 sec */
+#define WL_SCAN_SUPPRESS_RETRY 3000
+
+#define WL_PM_ENABLE_TIMEOUT 10000
+
+/* cfg80211 wowlan definitions */
+#define WL_WOWLAN_MAX_PATTERNS			8
+#define WL_WOWLAN_MIN_PATTERN_LEN		1
+#define WL_WOWLAN_MAX_PATTERN_LEN		255
+#define WL_WOWLAN_PKT_FILTER_ID_FIRST	201
+#define WL_WOWLAN_PKT_FILTER_ID_LAST	(WL_WOWLAN_PKT_FILTER_ID_FIRST + \
+									WL_WOWLAN_MAX_PATTERNS - 1)
+
+#ifdef WLTDLS
+#define TDLS_TUNNELED_PRB_REQ	"\x7f\x50\x6f\x9a\04"
+#define TDLS_TUNNELED_PRB_RESP	"\x7f\x50\x6f\x9a\05"
+#define TDLS_MAX_IFACE_FOR_ENABLE 1
+#endif /* WLTDLS */
+
+
+/* driver status */
+enum wl_status {
+	WL_STATUS_READY = 0,
+	WL_STATUS_SCANNING,
+	WL_STATUS_SCAN_ABORTING,
+	WL_STATUS_CONNECTING,
+	WL_STATUS_CONNECTED,
+	WL_STATUS_DISCONNECTING,
+	WL_STATUS_AP_CREATING,
+	WL_STATUS_AP_CREATED,
+	/* whole sending action frame procedure:
+	 * includes a) 'finding common channel' for public action request frame
+	 * and b) 'sending af via 'actframe' iovar'
+	 */
+	WL_STATUS_SENDING_ACT_FRM,
+	/* find a peer to go to a common channel before sending public action req frame */
+	WL_STATUS_FINDING_COMMON_CHANNEL,
+	/* waiting for next af to sync time of supplicant.
+	 * it includes SENDING_ACT_FRM and WAITING_NEXT_ACT_FRM_LISTEN
+	 */
+	WL_STATUS_WAITING_NEXT_ACT_FRM,
+#ifdef WL_CFG80211_SYNC_GON
+	/* go to listen state to wait for next af after SENDING_ACT_FRM */
+	WL_STATUS_WAITING_NEXT_ACT_FRM_LISTEN,
+#endif /* WL_CFG80211_SYNC_GON */
+	/* it will be set when upper layer requests listen and succeed in setting listen mode.
+	 * if set, other scan request can abort current listen state
+	 */
+	WL_STATUS_REMAINING_ON_CHANNEL,
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	/* it's fake listen state to keep current scan state.
+	 * it will be set when upper layer requests listen but scan is running. then just run
+	 * a expire timer without actual listen state.
+	 * if set, other scan request does not need to abort scan.
+	 */
+	WL_STATUS_FAKE_REMAINING_ON_CHANNEL,
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+	WL_STATUS_NESTED_CONNECT
+};
+
+/* wi-fi mode */
+enum wl_mode {
+	WL_MODE_BSS,
+	WL_MODE_IBSS,
+	WL_MODE_AP,
+#ifdef WLMESH
+	WL_MODE_MESH
+#endif
+};
+
+/* driver profile list */
+enum wl_prof_list {
+	WL_PROF_MODE,
+	WL_PROF_SSID,
+	WL_PROF_SEC,
+	WL_PROF_IBSS,
+	WL_PROF_BAND,
+	WL_PROF_CHAN,
+	WL_PROF_BSSID,
+	WL_PROF_ACT,
+	WL_PROF_BEACONINT,
+	WL_PROF_DTIMPERIOD
+};
+
+/* donlge escan state */
+enum wl_escan_state {
+	WL_ESCAN_STATE_IDLE,
+	WL_ESCAN_STATE_SCANING
+};
+/* fw downloading status */
+enum wl_fw_status {
+	WL_FW_LOADING_DONE,
+	WL_NVRAM_LOADING_DONE
+};
+
+enum wl_management_type {
+	WL_BEACON = 0x1,
+	WL_PROBE_RESP = 0x2,
+	WL_ASSOC_RESP = 0x4
+};
+
+enum wl_pm_workq_act_type {
+	WL_PM_WORKQ_SHORT,
+	WL_PM_WORKQ_LONG,
+	WL_PM_WORKQ_DEL
+};
+
+enum wl_tdls_config {
+    TDLS_STATE_AP_CREATE,
+    TDLS_STATE_AP_DELETE,
+    TDLS_STATE_CONNECT,
+    TDLS_STATE_DISCONNECT,
+    TDLS_STATE_SETUP,
+    TDLS_STATE_TEARDOWN,
+    TDLS_STATE_IF_CREATE,
+    TDLS_STATE_IF_DELETE
+};
+
+/* beacon / probe_response */
+struct beacon_proberesp {
+	__le64 timestamp;
+	__le16 beacon_int;
+	__le16 capab_info;
+	u8 variable[0];
+} __attribute__ ((packed));
+
+/* driver configuration */
+struct wl_conf {
+	u32 frag_threshold;
+	u32 rts_threshold;
+	u32 retry_short;
+	u32 retry_long;
+	s32 tx_power;
+	struct ieee80211_channel channel;
+};
+
+typedef s32(*EVENT_HANDLER) (struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+                            const wl_event_msg_t *e, void *data);
+
+/* bss inform structure for cfg80211 interface */
+struct wl_cfg80211_bss_info {
+	u16 band;
+	u16 channel;
+	s16 rssi;
+	u16 frame_len;
+	u8 frame_buf[1];
+};
+
+/* basic structure of scan request */
+struct wl_scan_req {
+	struct wlc_ssid ssid;
+};
+
+/* basic structure of information element */
+struct wl_ie {
+	u16 offset;
+	u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+	struct list_head eq_list;
+	u32 etype;
+	wl_event_msg_t emsg;
+	s8 edata[1];
+};
+
+/* security information with currently associated ap */
+struct wl_security {
+	u32 wpa_versions;
+	u32 auth_type;
+	u32 cipher_pairwise;
+	u32 cipher_group;
+	u32 wpa_auth;
+	u32 auth_assoc_res_status;
+};
+
+/* ibss information for currently joined ibss network */
+struct wl_ibss {
+	u8 beacon_interval;	/* in millisecond */
+	u8 atim;		/* in millisecond */
+	s8 join_only;
+	u8 band;
+	u8 channel;
+};
+
+typedef struct wl_bss_vndr_ies {
+	u8  probe_req_ie[VNDR_IES_BUF_LEN];
+	u8  probe_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u8  assoc_req_ie[VNDR_IES_BUF_LEN];
+	u8  assoc_res_ie[VNDR_IES_BUF_LEN];
+	u8  beacon_ie[VNDR_IES_MAX_BUF_LEN];
+	u32 probe_req_ie_len;
+	u32 probe_res_ie_len;
+	u32 assoc_req_ie_len;
+	u32 assoc_res_ie_len;
+	u32 beacon_ie_len;
+} wl_bss_vndr_ies_t;
+
+typedef struct wl_cfgbss {
+	u8 *wpa_ie;
+	u8 *rsn_ie;
+	u8 *wps_ie;
+	bool security_mode;
+	struct wl_bss_vndr_ies ies;	/* Common for STA, P2P GC, GO, AP, P2P Disc Interface */
+} wl_cfgbss_t;
+
+/* cfg driver profile */
+struct wl_profile {
+	u32 mode;
+	s32 band;
+	u32 channel;
+	struct wlc_ssid ssid;
+	struct wl_security sec;
+	struct wl_ibss ibss;
+	u8 bssid[ETHER_ADDR_LEN];
+	u16 beacon_interval;
+	u8 dtim_period;
+	bool active;
+};
+
+struct net_info {
+	struct net_device *ndev;
+	struct wireless_dev *wdev;
+	struct wl_profile profile;
+	s32 mode;
+	s32 roam_off;
+	unsigned long sme_state;
+	bool pm_restore;
+	bool pm_block;
+	s32 pm;
+	s32 bssidx;
+	wl_cfgbss_t bss;
+	u32 ulb_bw;
+	struct list_head list; /* list of all net_info structure */
+};
+
+/* association inform */
+#define MAX_REQ_LINE 1024
+struct wl_connect_info {
+	u8 req_ie[MAX_REQ_LINE];
+	s32 req_ie_len;
+	u8 resp_ie[MAX_REQ_LINE];
+	s32 resp_ie_len;
+};
+
+/* firmware /nvram downloading controller */
+struct wl_fw_ctrl {
+	const struct firmware *fw_entry;
+	unsigned long status;
+	u32 ptr;
+	s8 fw_name[WL_FILE_NAME_MAX];
+	s8 nvram_name[WL_FILE_NAME_MAX];
+};
+
+/* assoc ie length */
+struct wl_assoc_ielen {
+	u32 req_len;
+	u32 resp_len;
+};
+
+/* wpa2 pmk list */
+struct wl_pmk_list {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID - 1];
+};
+
+#ifdef DHD_MAX_IFS
+#define WL_MAX_IFS DHD_MAX_IFS
+#else
+#define WL_MAX_IFS 16
+#endif
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+struct escan_info {
+	u32 escan_state;
+#ifdef STATIC_WL_PRIV_STRUCT
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+#error STATIC_WL_PRIV_STRUCT should be used with CONFIG_DHD_USE_STATIC_BUF
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	u8 *escan_buf;
+#else
+	u8 escan_buf[ESCAN_BUF_SIZE];
+#endif /* STATIC_WL_PRIV_STRUCT */
+	struct wiphy *wiphy;
+	struct net_device *ndev;
+};
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#define BUF_OVERFLOW_MGMT_COUNT 3
+typedef struct {
+	int RSSI;
+	int length;
+	struct ether_addr BSSID;
+} removal_element_t;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+struct afx_hdl {
+	wl_af_params_t *pending_tx_act_frm;
+	struct ether_addr	tx_dst_addr;
+	struct net_device *dev;
+	struct work_struct work;
+	s32 bssidx;
+	u32 retry;
+	s32 peer_chan;
+	s32 peer_listen_chan; /* search channel: configured by upper layer */
+	s32 my_listen_chan;	/* listen chanel: extract it from prb req or gon req */
+	bool is_listen;
+	bool ack_recv;
+	bool is_active;
+};
+
+struct parsed_ies {
+	wpa_ie_fixed_t *wps_ie;
+	u32 wps_ie_len;
+	wpa_ie_fixed_t *wpa_ie;
+	u32 wpa_ie_len;
+	bcm_tlv_t *wpa2_ie;
+	u32 wpa2_ie_len;
+};
+
+
+#ifdef P2P_LISTEN_OFFLOADING
+typedef struct {
+	uint16	period;                 /* listen offload period */
+	uint16	interval;               /* listen offload interval */
+	uint16	count;			/* listen offload count */
+	uint16	pad;                    /* pad for 32bit align */
+} wl_p2plo_listen_t;
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#ifdef WL11U
+/* Max length of Interworking element */
+#define IW_IES_MAX_BUF_LEN 		9
+#endif
+#define MAX_EVENT_BUF_NUM 16
+typedef struct wl_eventmsg_buf {
+	u16 num;
+	struct {
+		u16 type;
+		bool set;
+	} event [MAX_EVENT_BUF_NUM];
+} wl_eventmsg_buf_t;
+
+typedef struct wl_if_event_info {
+	bool valid;
+	int ifidx;
+	int bssidx;
+	uint8 mac[ETHER_ADDR_LEN];
+	char name[IFNAMSIZ+1];
+	uint8 role;
+} wl_if_event_info;
+
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+typedef struct ap_rps_info {
+	bool enable;
+	bool sta_assoc_check;
+	int pps;
+	int quiet_time;
+	int level;
+} ap_rps_info_t;
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+
+#ifdef SUPPORT_RSSI_LOGGING
+#define RSSILOG_FLAG_FEATURE_SW		0x1
+#define RSSILOG_FLAG_REPORT_READY	0x2
+typedef struct rssilog_set_param {
+	uint8 enable;
+	uint8 rssi_threshold;
+	uint8 time_threshold;
+	uint8 pad;
+} rssilog_set_param_t;
+
+typedef struct rssilog_get_param {
+	uint8 report_count;
+	uint8 enable;
+	uint8 rssi_threshold;
+	uint8 time_threshold;
+} rssilog_get_param_t;
+
+typedef struct rssi_ant_param {
+	struct ether_addr ea;
+	chanspec_t chanspec;
+} rssi_ant_param_t;
+
+typedef struct wl_rssi_ant_mimo {
+	uint32 version;
+	uint32 count;
+	int8 rssi_ant[WL_RSSI_ANT_MAX];
+	int8 rssi_sum;
+	int8 PAD[3];
+} wl_rssi_ant_mimo_t;
+#endif /* SUPPORT_RSSI_LOGGING */
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define GET_BSS_INFO_LEN 90
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
+
+/* private data of cfg80211 interface */
+struct bcm_cfg80211 {
+	struct wireless_dev *wdev;	/* representing cfg cfg80211 device */
+
+	struct wireless_dev *p2p_wdev;	/* representing cfg cfg80211 device for P2P */
+	struct net_device *p2p_net;    /* reference to p2p0 interface */
+
+	struct wl_conf *conf;
+	struct cfg80211_scan_request *scan_request;	/* scan request object */
+	EVENT_HANDLER evt_handler[WLC_E_LAST];
+	struct list_head eq_list;	/* used for event queue */
+	struct list_head net_list;     /* used for struct net_info */
+	spinlock_t net_list_sync;	/* to protect scan status (and others if needed) */
+	spinlock_t eq_lock;	/* for event queue synchronization */
+	spinlock_t cfgdrv_lock;	/* to protect scan status (and others if needed) */
+	struct completion act_frm_scan;
+	struct completion iface_disable;
+	struct completion wait_next_af;
+	struct mutex usr_sync;	/* maily for up/down synchronization */
+	struct mutex if_sync;	/* maily for iface op synchronization */
+	struct mutex scan_complete;	/* serialize scan_complete call */
+	struct wl_scan_results *bss_list;
+	struct wl_scan_results *scan_results;
+
+	/* scan request object for internal purpose */
+	struct wl_scan_req *scan_req_int;
+	/* information element object for internal purpose */
+#if defined(STATIC_WL_PRIV_STRUCT)
+	struct wl_ie *ie;
+#else
+	struct wl_ie ie;
+#endif
+
+	/* association information container */
+#if defined(STATIC_WL_PRIV_STRUCT)
+	struct wl_connect_info *conn_info;
+#else
+	struct wl_connect_info conn_info;
+#endif
+#ifdef DEBUGFS_CFG80211
+	struct dentry		*debugfs;
+#endif /* DEBUGFS_CFG80211 */
+	struct wl_pmk_list *pmk_list;	/* wpa2 pmk list */
+	tsk_ctl_t event_tsk;  		/* task of main event handler thread */
+	dhd_pub_t *pub;
+	u32 iface_cnt;
+	u32 channel;		/* current channel */
+	u32 af_sent_channel;	/* channel action frame is sent */
+	/* next af subtype to cancel the remained dwell time in rx process */
+	u8 next_af_subtype;
+#ifdef WL_CFG80211_SYNC_GON
+	ulong af_tx_sent_jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+	struct escan_info escan_info;   /* escan information */
+	bool active_scan;	/* current scan mode */
+	bool ibss_starter;	/* indicates this sta is ibss starter */
+	bool link_up;		/* link/connection up flag */
+
+	/* indicate whether chip to support power save mode */
+	bool pwr_save;
+	bool roam_on;		/* on/off switch for self-roaming */
+	bool scan_tried;	/* indicates if first scan attempted */
+#if defined(BCMSDIO) || defined(BCMDBUS)
+	bool wlfc_on;
+#endif 
+	bool vsdb_mode;
+#define WL_ROAM_OFF_ON_CONCURRENT 	0x0001
+#define WL_ROAM_REVERT_STATUS		0x0002
+	u32 roam_flags;
+	u8 *ioctl_buf;		/* ioctl buffer */
+	struct mutex ioctl_buf_sync;
+	u8 *escan_ioctl_buf;
+	u8 *extra_buf;	/* maily to grab assoc information */
+	struct dentry *debugfsdir;
+	struct rfkill *rfkill;
+	bool rf_blocked;
+	struct ieee80211_channel remain_on_chan;
+	enum nl80211_channel_type remain_on_chan_type;
+	u64 send_action_id;
+	u64 last_roc_id;
+	wait_queue_head_t netif_change_event;
+	wl_if_event_info if_event_info;
+	struct completion send_af_done;
+	struct afx_hdl *afx_hdl;
+	struct p2p_info *p2p;
+	bool p2p_supported;
+	void *btcoex_info;
+	struct timer_list scan_timeout;   /* Timer for catch scan event timeout */
+#if defined(P2P_IE_MISSING_FIX)
+	bool p2p_prb_noti;
+#endif
+	s32(*state_notifier) (struct bcm_cfg80211 *cfg,
+		struct net_info *_net_info, enum wl_status state, bool set);
+	unsigned long interrested_state;
+	wlc_ssid_t hostapd_ssid;
+#ifdef WL11U
+	bool wl11u;
+#endif /* WL11U */
+	bool sched_scan_running;	/* scheduled scan req status */
+	struct cfg80211_sched_scan_request *sched_scan_req;	/* scheduled scan req */
+	bool scan_suppressed;
+	struct timer_list scan_supp_timer;
+	struct work_struct wlan_work;
+	struct mutex event_sync;	/* maily for up/down synchronization */
+	bool disable_roam_event;
+	struct delayed_work pm_enable_work;
+	struct workqueue_struct *event_workq;   /* workqueue for event */
+	struct work_struct event_work;		/* work item for event */
+	struct mutex pm_sync;	/* mainly for pm work synchronization */
+
+	vndr_ie_setbuf_t *ibss_vsie;	/* keep the VSIE for IBSS */
+	int ibss_vsie_len;
+#ifdef WL_RELMCAST
+	u32 rmc_event_pid;
+	u32 rmc_event_seq;
+#endif /* WL_RELMCAST */
+#ifdef WLAIBSS_MCHAN
+	struct ether_addr ibss_if_addr;
+	bcm_struct_cfgdev *ibss_cfgdev; /* For AIBSS */
+#endif /* WLAIBSS_MCHAN */
+	bool bss_pending_op;		/* indicate where there is a pending IF operation */
+	int roam_offload;
+#ifdef WL_CFG80211_P2P_DEV_IF
+	bool down_disc_if;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#ifdef P2PLISTEN_AP_SAMECHN
+	bool p2p_resp_apchn_status;
+#endif /* P2PLISTEN_AP_SAMECHN */
+	struct wl_wsec_key wep_key;
+#ifdef WLTDLS
+	u8 *tdls_mgmt_frame;
+	u32 tdls_mgmt_frame_len;
+	s32 tdls_mgmt_freq;
+#endif /* WLTDLS */
+	bool need_wait_afrx;
+#ifdef QOS_MAP_SET
+	uint8	 *up_table;	/* user priority table, size is UP_TABLE_MAX */
+#endif /* QOS_MAP_SET */
+	struct ether_addr last_roamed_addr;
+	bool rcc_enabled;	/* flag for Roam channel cache feature */
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+	char bss_info[GET_BSS_INFO_LEN];
+	wl_event_msg_t event_auth_assoc;
+	u32 assoc_reject_status;
+	u32 roam_count;
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+	u16 ap_oper_channel;
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+	bool random_mac_enabled;
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+#ifdef DHD_LOSSLESS_ROAMING
+	struct timer_list roam_timeout;   /* Timer for catch roam timeout */
+#endif
+	uint16 escan_sync_id_cntr;
+#ifdef WLTDLS
+	uint8 tdls_supported;
+	struct mutex tdls_sync;	/* protect tdls config operations */
+#endif /* WLTDLS */
+#ifdef MFP
+	uint8 *bip_pos;
+	int mfp_mode;
+#endif /* MFP */
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+	ap_rps_info_t ap_rps_info;
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+#ifdef WBTEXT
+	struct list_head wbtext_bssid_list;
+#endif /* WBTEXT */
+	struct list_head vndr_oui_list;
+
+#ifdef STAT_REPORT
+	void *stat_report_info;
+#endif
+#ifdef WLMESH
+	char sae_password[SAE_MAX_PASSWD_LEN];
+	uint sae_password_len;
+#endif /* WLMESH */
+#if defined(RSSIAVG)
+	wl_rssi_cache_ctrl_t g_rssi_cache_ctrl;
+	wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl;
+#endif
+#if defined(BSSCACHE)
+	wl_bss_cache_ctrl_t g_bss_cache_ctrl;
+#endif
+	int p2p_disconnected; // terence 20130703: Fix for wrong group_capab (timing issue)
+	struct ether_addr disconnected_bssid;
+	int autochannel;
+	int best_2g_ch;
+	int best_5g_ch;
+	uint handshaking;
+	bool wps_done;
+	wait_queue_head_t wps_done_event;
+	struct mutex in4way_sync;
+	ulong disconnected_jiffies;
+};
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+#define GCC_DIAGNOSTIC_PUSH() \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#define GCC_DIAGNOSTIC_POP() \
+_Pragma("GCC diagnostic pop")
+#else
+#define GCC_DIAGNOSTIC_PUSH()
+#define GCC_DIAGNOSTIC_POP()
+#endif /* STRICT_GCC_WARNINGS */
+
+#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \
+	list_for_each_entry_safe((pos), (next), (head), member)
+extern int ioctl_version;
+
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+{
+	return bss = bss ?
+		(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+
+static inline void
+wl_probe_wdev_all(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *_net_info, *next;
+	unsigned long int flags;
+	int idx = 0;
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next,
+		&cfg->net_list, list) {
+		WL_ERR(("%s: net_list[%d] bssidx: %d, "
+			"ndev: %p, wdev: %p \n", __FUNCTION__,
+			idx++, _net_info->bssidx,
+			_net_info->ndev, _net_info->wdev));
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return;
+}
+
+static inline struct net_info *
+wl_get_netinfo_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+	struct net_info *_net_info, *next, *info = NULL;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if ((bssidx >= 0) && (_net_info->bssidx == bssidx)) {
+			info = _net_info;
+			break;
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return info;
+}
+
+static inline void
+wl_dealloc_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+	struct net_info *_net_info, *next;
+	unsigned long int flags;
+
+#ifdef DHD_IFDEBUG
+	WL_ERR(("dealloc_netinfo enter wdev=%p \n", wdev));
+#endif
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (wdev && (_net_info->wdev == wdev)) {
+			wl_cfgbss_t *bss = &_net_info->bss;
+
+			kfree(bss->wpa_ie);
+			bss->wpa_ie = NULL;
+			kfree(bss->rsn_ie);
+			bss->rsn_ie = NULL;
+			kfree(bss->wps_ie);
+			bss->wps_ie = NULL;
+			list_del(&_net_info->list);
+			cfg->iface_cnt--;
+			kfree(_net_info);
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+#ifdef DHD_IFDEBUG
+	WL_ERR(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
+#endif
+}
+
+static inline s32
+wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct wireless_dev * wdev, s32 mode, bool pm_block, u8 bssidx)
+{
+	struct net_info *_net_info;
+	s32 err = 0;
+	unsigned long int flags;
+#ifdef DHD_IFDEBUG
+	WL_ERR(("alloc_netinfo enter bssidx=%d wdev=%p ndev=%p\n", bssidx, wdev, ndev));
+#endif
+	/* Check whether there is any duplicate entry for the
+	 *  same bssidx *
+	 */
+	if ((_net_info = wl_get_netinfo_by_bssidx(cfg, bssidx))) {
+		/* We have a duplicate entry for the same bssidx
+		 * already present which shouldn't have been the case.
+		 * Attempt recovery.
+		 */
+		WL_ERR(("Duplicate entry for bssidx=%d present\n", bssidx));
+		wl_probe_wdev_all(cfg);
+#ifdef DHD_DEBUG
+		ASSERT(0);
+#endif /* DHD_DEBUG */
+		WL_ERR(("Removing the Dup entry for bssidx=%d \n", bssidx));
+		wl_dealloc_netinfo_by_wdev(cfg, _net_info->wdev);
+	}
+	if (cfg->iface_cnt == IFACE_MAX_CNT)
+		return -ENOMEM;
+	_net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL);
+	if (!_net_info)
+		err = -ENOMEM;
+	else {
+		_net_info->mode = mode;
+		_net_info->ndev = ndev;
+		_net_info->wdev = wdev;
+		_net_info->pm_restore = 0;
+		_net_info->pm = 0;
+		_net_info->pm_block = pm_block;
+		_net_info->roam_off = WL_INVALID;
+		_net_info->bssidx = bssidx;
+		spin_lock_irqsave(&cfg->net_list_sync, flags);
+		cfg->iface_cnt++;
+		list_add(&_net_info->list, &cfg->net_list);
+		spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	}
+#ifdef DHD_IFDEBUG
+	WL_ERR(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
+#endif
+	return err;
+}
+
+static inline void
+wl_delete_all_netinfo(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *_net_info, *next;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		wl_cfgbss_t *bss = &_net_info->bss;
+
+		kfree(bss->wpa_ie);
+		bss->wpa_ie = NULL;
+		kfree(bss->rsn_ie);
+		bss->rsn_ie = NULL;
+		kfree(bss->wps_ie);
+		bss->wps_ie = NULL;
+		list_del(&_net_info->list);
+		if (_net_info->wdev)
+			kfree(_net_info->wdev);
+		kfree(_net_info);
+	}
+	cfg->iface_cnt = 0;
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+static inline u32
+wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
+
+{
+	struct net_info *_net_info, *next;
+	u32 cnt = 0;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->ndev &&
+			test_bit(status, &_net_info->sme_state))
+			cnt++;
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return cnt;
+}
+static inline void
+wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op)
+{
+	struct net_info *_net_info, *next;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		switch (op) {
+			case 1:
+				break; /* set all status is not allowed */
+			case 2:
+				/*
+				 * Release the spinlock before calling notifier. Else there
+				 * will be nested calls
+				 */
+				spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+				clear_bit(status, &_net_info->sme_state);
+				if (cfg->state_notifier &&
+					test_bit(status, &(cfg->interrested_state)))
+					cfg->state_notifier(cfg, _net_info, status, false);
+				return;
+			case 4:
+				break; /* change all status is not allowed */
+			default:
+				break; /* unknown operation */
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+static inline void
+wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+	struct net_device *ndev, u32 op)
+{
+
+	struct net_info *_net_info, *next;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			switch (op) {
+				case 1:
+					/*
+					 * Release the spinlock before calling notifier. Else there
+					 * will be nested calls
+					 */
+					spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+					set_bit(status, &_net_info->sme_state);
+					if (cfg->state_notifier &&
+						test_bit(status, &(cfg->interrested_state)))
+						cfg->state_notifier(cfg, _net_info, status, true);
+					return;
+				case 2:
+					/*
+					 * Release the spinlock before calling notifier. Else there
+					 * will be nested calls
+					 */
+					spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+					clear_bit(status, &_net_info->sme_state);
+					if (cfg->state_notifier &&
+						test_bit(status, &(cfg->interrested_state)))
+						cfg->state_notifier(cfg, _net_info, status, false);
+					return;
+				case 4:
+					change_bit(status, &_net_info->sme_state);
+					break;
+			}
+		}
+
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+
+}
+
+static inline wl_cfgbss_t *
+wl_get_cfgbss_by_wdev(struct bcm_cfg80211 *cfg,
+	struct wireless_dev *wdev)
+{
+	struct net_info *_net_info, *next;
+	wl_cfgbss_t *bss = NULL;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (wdev && (_net_info->wdev == wdev)) {
+			bss = &_net_info->bss;
+			break;
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return bss;
+}
+
+static inline u32
+wl_get_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+	struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+	u32 stat = 0;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			stat = test_bit(status, &_net_info->sme_state);
+			break;
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return stat;
+}
+
+static inline s32
+wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+	s32 mode = -1;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			mode = _net_info->mode;
+			break;
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return mode;
+}
+
+static inline void
+wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 mode)
+{
+	struct net_info *_net_info, *next;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev))
+			_net_info->mode = mode;
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+
+static inline s32
+wl_get_bssidx_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+	struct net_info *_net_info, *next;
+	s32 bssidx = -1;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->wdev && (_net_info->wdev == wdev)) {
+			bssidx = _net_info->bssidx;
+			break;
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return bssidx;
+}
+
+static inline struct wireless_dev *
+wl_get_wdev_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+	struct net_info *_net_info, *next;
+	struct wireless_dev *wdev = NULL;
+	unsigned long int flags;
+
+	if (bssidx < 0)
+		return NULL;
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->bssidx == bssidx) {
+			wdev = _net_info->wdev;
+			break;
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return wdev;
+}
+
+static inline struct wl_profile *
+wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+	struct wl_profile *prof = NULL;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			prof = &_net_info->profile;
+			break;
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return prof;
+}
+static inline struct net_info *
+wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next, *info = NULL;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			info = _net_info;
+			break;
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return info;
+}
+
+static inline struct net_info *
+wl_get_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+	struct net_info *_net_info, *next, *info = NULL;
+	unsigned long int flags;
+
+	spin_lock_irqsave(&cfg->net_list_sync, flags);
+	GCC_DIAGNOSTIC_PUSH();
+	BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+		if (wdev && (_net_info->wdev == wdev)) {
+			info = _net_info;
+			break;
+		}
+	}
+	GCC_DIAGNOSTIC_POP();
+	spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+	return info;
+}
+
+#define is_p2p_group_iface(wdev) (((wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
+		(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) ? 1 : 0)
+#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
+#define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev)
+#define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev)
+#define bcmcfg_to_p2p_wdev(cfg) (cfg->p2p_wdev)
+#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
+#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr)
+#define wdev_to_ndev(wdev) (wdev->netdev)
+
+#if defined(WL_ENABLE_P2P_IF)
+#define ndev_to_wlc_ndev(ndev, cfg)	((ndev == cfg->p2p_net) ? \
+	bcmcfg_to_prmry_ndev(cfg) : ndev)
+#else
+#define ndev_to_wlc_ndev(ndev, cfg)	(ndev)
+#endif /* WL_ENABLE_P2P_IF */
+
+#define wdev_to_wlc_ndev(wdev, cfg)	\
+	(wdev_to_ndev(wdev) ? \
+	wdev_to_ndev(wdev) : bcmcfg_to_prmry_ndev(cfg))
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	wdev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg)
+#elif defined(WL_ENABLE_P2P_IF)
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	ndev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_ndev(cfg)
+#else
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	(cfgdev)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) (cfgdev)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define cfgdev_to_wdev(cfgdev)	(cfgdev)
+#define ndev_to_cfgdev(ndev)	ndev_to_wdev(ndev)
+#define cfgdev_to_ndev(cfgdev)	(cfgdev ? (cfgdev->netdev) : NULL)
+#define wdev_to_cfgdev(cfgdev)	(cfgdev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE)
+#else
+#define cfgdev_to_wdev(cfgdev)	(cfgdev->ieee80211_ptr)
+#define wdev_to_cfgdev(cfgdev)	cfgdev ? (cfgdev->netdev) : NULL
+#define ndev_to_cfgdev(ndev)	(ndev)
+#define cfgdev_to_ndev(cfgdev)	(cfgdev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define scan_req_match(cfg)	(((cfg) && (cfg->scan_request) && \
+	(cfg->scan_request->wdev == cfg->p2p_wdev)) ? true : false)
+#elif defined(WL_ENABLE_P2P_IF)
+#define scan_req_match(cfg)	(((cfg) && (cfg->scan_request) && \
+	(cfg->scan_request->dev == cfg->p2p_net)) ? true : false)
+#else
+#define scan_req_match(cfg)	(((cfg) && p2p_is_on(cfg) && p2p_scan(cfg)) ? \
+	true : false)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#define	PRINT_WDEV_INFO(cfgdev)	\
+	{ \
+		struct wireless_dev *wdev = cfgdev_to_wdev(cfgdev); \
+		struct net_device *netdev = wdev ? wdev->netdev : NULL; \
+		WL_DBG(("wdev_ptr:%p ndev_ptr:%p ifname:%s iftype:%d\n", wdev, netdev,	\
+			netdev ? netdev->name : "NULL (non-ndev device)",	\
+			wdev ? wdev->iftype : 0xff)); \
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define scan_req_iftype(req) (req->dev->ieee80211_ptr->iftype)
+#else
+#define scan_req_iftype(req) (req->wdev->iftype)
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) */
+
+#define wl_to_sr(w) (w->scan_req_int)
+#if defined(STATIC_WL_PRIV_STRUCT)
+#define wl_to_ie(w) (w->ie)
+#define wl_to_conn(w) (w->conn_info)
+#else
+#define wl_to_ie(w) (&w->ie)
+#define wl_to_conn(w) (&w->conn_info)
+#endif
+#define wiphy_from_scan(w) (w->escan_info.wiphy)
+#define wl_get_drv_status_all(cfg, stat) \
+	(wl_get_status_all(cfg, WL_STATUS_ ## stat))
+#define wl_get_drv_status(cfg, stat, ndev)  \
+	(wl_get_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev))
+#define wl_set_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 1))
+#define wl_clr_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 2))
+#define wl_clr_drv_status_all(cfg, stat)  \
+	(wl_set_status_all(cfg, WL_STATUS_ ## stat, 2))
+#define wl_chg_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 4))
+
+#define for_each_bss(list, bss, __i)	\
+	for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+
+#define for_each_ndev(cfg, iter, next) \
+	list_for_each_entry_safe(iter, next, &cfg->net_list, list)
+
+/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
+ * In addtion to that, wpa_version is WPA_VERSION_1
+ */
+#define is_wps_conn(_sme) \
+	((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \
+	 (!_sme->crypto.n_ciphers_pairwise) && \
+	 (!_sme->crypto.cipher_group))
+
+#define IS_AKM_SUITE_FT(sec) ({BCM_REFERENCE(sec); FALSE;})
+
+#define IS_AKM_SUITE_CCKM(sec) ({BCM_REFERENCE(sec); FALSE;})
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+#define STA_INFO_BIT(info) (1ul << NL80211_STA_ ## info)
+#ifdef strnicmp
+#undef strnicmp
+#endif /* strnicmp */
+#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len))
+#else
+#define STA_INFO_BIT(info) (STATION_ ## info)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */
+
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context);
+extern void wl_cfg80211_detach(struct bcm_cfg80211 *cfg);
+
+extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
+            void *data);
+void wl_cfg80211_set_parent_dev(void *dev);
+struct device *wl_cfg80211_get_parent_dev(void);
+
+/* clear IEs */
+extern s32 wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx);
+
+extern s32 wl_cfg80211_up(struct net_device *net);
+extern s32 wl_cfg80211_down(struct net_device *net);
+extern s32 wl_cfg80211_notify_ifadd(struct net_device * dev, int ifidx, char *name, uint8 *mac,
+	uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifdel(struct net_device * dev, int ifidx, char *name, uint8 *mac,
+	uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifchange(struct net_device * dev, int ifidx, char *name, uint8 *mac,
+	uint8 bssidx);
+extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx,
+	const char *name, uint8 *mac, uint8 bssidx, const char *dngl_name);
+extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg,
+	int ifidx, struct net_device* ndev, bool rtnl_lock_reqd);
+extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg,
+	int ifidx, struct net_device* ndev, bool rtnl_lock_reqd);
+extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
+extern void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+extern bool wl_cfg80211_is_concurrent_mode(struct net_device * dev);
+extern void* wl_cfg80211_get_dhdp(struct net_device * dev);
+extern bool wl_cfg80211_is_p2p_active(struct net_device * dev);
+extern bool wl_cfg80211_is_roam_offload(struct net_device * dev);
+extern bool wl_cfg80211_is_event_from_connected_bssid(struct net_device * dev,
+		const wl_event_msg_t *e, int ifidx);
+extern void wl_cfg80211_dbg_level(u32 level);
+extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+	enum wl_management_type type);
+extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len);
+#ifdef WLMESH
+extern s32 wl_cfg80211_set_sae_password(struct net_device *net, char* buf, int len);
+#endif
+#ifdef WL11ULB
+extern s32 wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode);
+extern s32 wl_cfg80211_set_ulb_bw(struct net_device *dev,
+	u32 ulb_bw,  char *ifname);
+#endif /* WL11ULB */
+#ifdef P2PLISTEN_AP_SAMECHN
+extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable);
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+/* btcoex functions */
+void* wl_cfg80211_btcoex_init(struct net_device *ndev);
+void wl_cfg80211_btcoex_deinit(void);
+
+extern chanspec_t wl_chspec_from_legacy(chanspec_t legacy_chspec);
+extern chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec);
+
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+#define CHANSPEC_BUF_SIZE	1024
+#define CHAN_SEL_IOCTL_DELAY	300
+#define CHAN_SEL_RETRY_COUNT	15
+#define CHANNEL_IS_RADAR(channel)	(((channel & WL_CHAN_RADAR) || \
+	(channel & WL_CHAN_PASSIVE)) ? true : false)
+#define CHANNEL_IS_2G(channel)	(((channel >= 1) && (channel <= 14)) ? \
+	true : false)
+#define CHANNEL_IS_5G(channel)	(((channel >= 36) && (channel <= 165)) ? \
+	true : false)
+extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command,
+	int total_len);
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+extern int wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n);
+extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
+extern s32 wl_mode_to_nl80211_iftype(s32 mode);
+int wl_cfg80211_do_driver_init(struct net_device *net);
+void wl_cfg80211_enable_trace(u32 level);
+extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
+extern s32 wl_cfg80211_if_is_group_owner(void);
+extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
+extern chanspec_t wl_ch_host_to_driver(struct bcm_cfg80211 *cfg, s32 bssidx, u16 channel);
+extern s32 wl_set_tx_power(struct net_device *dev,
+	enum nl80211_tx_power_setting type, s32 dbm);
+extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm);
+extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
+extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	u8 bsscfgidx);
+extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set);
+extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
+extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern void wl_cfg80211_update_power_mode(struct net_device *dev);
+extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command);
+extern void wl_terminate_event_handler(struct net_device *dev);
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+extern s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len);
+extern s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len);
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+extern struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev);
+extern s32 wl_cfg80211_set_if_band(struct net_device *ndev, int band);
+
+#define SCAN_BUF_CNT	2
+#define SCAN_BUF_NEXT	1
+#define WL_SCANTYPE_LEGACY	0x1
+#define WL_SCANTYPE_P2P		0x2
+#define wl_escan_set_sync_id(a, b) ((a) = htod16((b)->escan_sync_id_cntr++))
+#define wl_escan_set_type(a, b)
+#define wl_escan_get_buf(a, b) ((wl_scan_results_t *) (a)->escan_info.escan_buf)
+#define wl_escan_check_sync_id(a, b, c) 0
+#define wl_escan_print_sync_id(a, b, c)
+#define wl_escan_increment_sync_id(a, b)
+#define wl_escan_init_sync_id(a)
+extern void wl_cfg80211_ibss_vsie_set_buffer(struct net_device *dev, vndr_ie_setbuf_t *ibss_vsie,
+	int ibss_vsie_len);
+extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev);
+#ifdef WL_RELMCAST
+extern void wl_cfg80211_set_rmc_pid(struct net_device *dev, int pid);
+#endif /* WL_RELMCAST */
+extern int wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, s32 bssidx, s32 pktflag,
+	const u8 *vndr_ie, u32 vndr_ie_len);
+
+
+/* Action frame specific functions */
+extern u8 wl_get_action_category(void *frame, u32 frame_len);
+extern int wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action);
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_SUPPORT_ACS
+#define ACS_MSRMNT_DELAY 1000 /* dump_obss delay in ms */
+#define IOCTL_RETRY_COUNT 5
+#define CHAN_NOISE_DUMMY -80
+#define OBSS_TOKEN_IDX 15
+#define IBSS_TOKEN_IDX 15
+#define TX_TOKEN_IDX 14
+#define CTG_TOKEN_IDX 13
+#define PKT_TOKEN_IDX 15
+#define IDLE_TOKEN_IDX 12
+#endif /* WL_SUPPORT_ACS */
+
+
+extern int wl_cfg80211_get_ioctl_version(void);
+extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable);
+extern s32 wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data,
+		char *command, int total_len);
+#ifdef WBTEXT
+extern s32 wl_cfg80211_wbtext_set_default(struct net_device *ndev);
+extern s32 wl_cfg80211_wbtext_config(struct net_device *ndev, char *data,
+		char *command, int total_len);
+extern int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
+		char *command, int total_len);
+extern int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
+		char *command, int total_len);
+extern s32 wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data,
+		char *command, int total_len);
+#endif /* WBTEXT */
+extern s32 wl_cfg80211_get_chanspecs_2g(struct net_device *ndev,
+		void *buf, s32 buflen);
+extern s32 wl_cfg80211_get_chanspecs_5g(struct net_device *ndev,
+		void *buf, s32 buflen);
+
+extern s32 wl_cfg80211_bss_up(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, s32 bsscfg_idx, s32 up);
+extern bool wl_cfg80211_bss_isup(struct net_device *ndev, int bsscfg_idx);
+
+struct net_device *wl_cfg80211_post_ifcreate(struct net_device *ndev,
+	wl_if_event_info *event, u8 *addr, const char *name, bool rtnl_lock_reqd);
+extern s32 wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd);
+#if defined(WL_VIRTUAL_APSTA)
+extern int wl_cfg80211_interface_create(struct net_device *dev, char *name);
+extern int wl_cfg80211_interface_delete(struct net_device *dev, char *name);
+#if defined(PKT_FILTER_SUPPORT) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
+extern void wl_cfg80211_block_arp(struct net_device *dev, int enable);
+#endif /* PKT_FILTER_SUPPORT && APSTA_BLOCK_ARP_DURING_DHCP */
+#endif /* defined (WL_VIRTUAL_APSTA) */
+
+
+#ifdef WL_CFG80211_P2P_DEV_IF
+extern void wl_cfg80211_del_p2p_wdev(struct net_device *dev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+extern int wl_cfg80211_set_spect(struct net_device *dev, int spect);
+extern int wl_cfg80211_get_sta_channel(struct net_device *dev);
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#ifdef P2P_LISTEN_OFFLOADING
+extern s32 wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len);
+extern s32 wl_cfg80211_p2plo_listen_stop(struct net_device *dev);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#define RETURN_EIO_IF_NOT_UP(wlpriv)                        \
+do {                                    \
+	struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv);           \
+	if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) {  \
+		WL_INFORM(("device is not ready\n"));           \
+		return -EIO;                        \
+	}                               \
+} while (0)
+
+#ifdef QOS_MAP_SET
+extern uint8 *wl_get_up_table(void);
+#endif /* QOS_MAP_SET */
+
+#define P2PO_COOKIE     65535
+u64 wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg);
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int wl_cfg80211_set_random_mac(struct net_device *dev, bool enable);
+int wl_cfg80211_random_mac_enable(struct net_device *dev);
+int wl_cfg80211_random_mac_disable(struct net_device *dev);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname);
+int wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len);
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+int wl_get_ap_rps(struct net_device *dev, char* command, char *ifname, int total_len);
+int wl_set_ap_rps(struct net_device *dev, bool enable, char *ifname);
+int wl_update_ap_rps_params(struct net_device *dev, ap_rps_info_t* rps, char *ifname);
+void wl_cfg80211_init_ap_rps(struct bcm_cfg80211 *cfg);
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+#ifdef SUPPORT_RSSI_LOGGING
+int wl_get_rssi_logging(struct net_device *dev, void *param);
+int wl_set_rssi_logging(struct net_device *dev, void *param);
+int wl_get_rssi_per_ant(struct net_device *dev, char *ifname, char *peer_mac, void *param);
+#endif /* SUPPORT_RSSI_LOGGING */
+int wl_cfg80211_iface_count(struct net_device *dev);
+struct net_device* wl_get_ap_netdev(struct bcm_cfg80211 *cfg, char *ifname);
+struct net_device* wl_get_netdev_by_name(struct bcm_cfg80211 *cfg, char *ifname);
+int wl_cfg80211_get_vndr_ouilist(struct bcm_cfg80211 *cfg, uint8 *buf, int max_cnt);
+s32 wl_cfg80211_autochannel(struct net_device *dev, char* command, int total_len);
+#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c
new file mode 100644
index 0000000..6d62b3a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c
@@ -0,0 +1,585 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfg_btcoex.c 699163 2017-05-12 05:18:23Z $
+ */
+
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+struct btcoex_info {
+	struct timer_list timer;
+	u32 timer_ms;
+	u32 timer_on;
+	u32 ts_dhcp_start;	/* ms ts ecord time stats */
+	u32 ts_dhcp_ok;		/* ms ts ecord time stats */
+	bool dhcp_done;	/* flag, indicates that host done with
+					 * dhcp before t1/t2 expiration
+					 */
+	s32 bt_state;
+	struct work_struct work;
+	struct net_device *dev;
+};
+
+static struct btcoex_info *btcoex_info_loc = NULL;
+
+/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */
+
+/* use New SCO/eSCO smart YG suppression */
+#define BT_DHCP_eSCO_FIX
+/* this flag boost wifi pkt priority to max, caution: -not fair to sco */
+#define BT_DHCP_USE_FLAGS
+/* T1 start SCO/ESCo priority suppression */
+#define BT_DHCP_OPPR_WIN_TIME	2500
+/* T2 turn off SCO/SCO supperesion is (timeout) */
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+
+enum wl_cfg80211_btcoex_status {
+	BT_DHCP_IDLE,
+	BT_DHCP_START,
+	BT_DHCP_OPPR_WIN,
+	BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+/*
+ * get named driver variable to uint register value and return error indication
+ * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, &reg_value)
+ */
+static int
+dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
+	uint reg, int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	bcm_mkiovar(name, (char *)(&reg), sizeof(reg),
+		(char *)(&var), sizeof(var.buf));
+	error = wldev_ioctl_get(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf));
+
+	*retval = dtoh32(var.val);
+	return (error);
+}
+
+static int
+dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
+{
+	char ioctlbuf_local[WLC_IOCTL_SMLEN];
+
+	bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
+
+	return (wldev_ioctl_set(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local)));
+}
+/*
+get named driver variable to uint register value and return error indication
+calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
+*/
+static int
+dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+{
+	char reg_addr[8];
+
+	memset(reg_addr, 0, sizeof(reg_addr));
+	memcpy((char *)&reg_addr[0], (char *)addr, 4);
+	memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+	return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+static bool btcoex_is_sco_active(struct net_device *dev)
+{
+	int ioc_res = 0;
+	bool res = FALSE;
+	int sco_id_cnt = 0;
+	int param27;
+	int i;
+
+	for (i = 0; i < 12; i++) {
+
+		ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+		WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27));
+
+		if (ioc_res < 0) {
+			WL_ERR(("ioc read btc params error\n"));
+			break;
+		}
+
+		if ((param27 & 0x6) == 2) { /* count both sco & esco  */
+			sco_id_cnt++;
+		}
+
+		if (sco_id_cnt > 2) {
+			WL_TRACE(("sco/esco detected, pkt id_cnt:%d  samples:%d\n",
+				sco_id_cnt, i));
+			res = TRUE;
+			break;
+		}
+
+		OSL_SLEEP(5);
+	}
+
+	return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+	static bool saved_status = FALSE;
+
+	char buf_reg50va_dhcp_on[8] =
+		{ 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+	char buf_reg51va_dhcp_on[8] =
+		{ 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg64va_dhcp_on[8] =
+		{ 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg65va_dhcp_on[8] =
+		{ 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg71va_dhcp_on[8] =
+		{ 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	uint32 regaddr;
+	static uint32 saved_reg50;
+	static uint32 saved_reg51;
+	static uint32 saved_reg64;
+	static uint32 saved_reg65;
+	static uint32 saved_reg71;
+
+	if (trump_sco) {
+		/* this should reduce eSCO agressive retransmit
+		 * w/o breaking it
+		 */
+
+		/* 1st save current */
+		WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+			  "override}\n"));
+		if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+			saved_status = TRUE;
+			WL_TRACE(("saved bt_params[50,51,64,65,71]:"
+				  "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				  saved_reg50, saved_reg51,
+				  saved_reg64, saved_reg65, saved_reg71));
+		} else {
+			WL_ERR((":%s: save btc_params failed\n",
+				__FUNCTION__));
+			saved_status = FALSE;
+			return -1;
+		}
+
+		WL_TRACE(("override with [50,51,64,65,71]:"
+			  "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			  *(u32 *)(buf_reg50va_dhcp_on+4),
+			  *(u32 *)(buf_reg51va_dhcp_on+4),
+			  *(u32 *)(buf_reg64va_dhcp_on+4),
+			  *(u32 *)(buf_reg65va_dhcp_on+4),
+			  *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg50va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg51va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg64va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg65va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg71va_dhcp_on[0], 8);
+
+		saved_status = TRUE;
+	} else if (saved_status) {
+		/* restore previously saved bt params */
+		WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+			  "override}\n"));
+
+		regaddr = 50;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg50);
+		regaddr = 51;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg51);
+		regaddr = 64;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg64);
+		regaddr = 65;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg65);
+		regaddr = 71;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg71);
+
+		WL_TRACE(("restore bt_params[50,51,64,65,71]:"
+			"0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			saved_reg50, saved_reg51, saved_reg64,
+			saved_reg65, saved_reg71));
+
+		saved_status = FALSE;
+	} else {
+		WL_ERR((":%s att to restore not saved BTCOEX params\n",
+			__FUNCTION__));
+		return -1;
+	}
+	return 0;
+}
+#endif /* BT_DHCP_eSCO_FIX */
+
+static void
+wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+	char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+	char buf_flag7_default[8]   = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+
+#if defined(BT_DHCP_eSCO_FIX)
+	/* set = 1, save & turn on  0 - off & restore prev settings */
+	set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+	WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
+	if (set == TRUE)
+		/* Forcing bt_flag7  */
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_dhcp_on[0],
+			sizeof(buf_flag7_dhcp_on));
+	else
+		/* Restoring default bt flag7 */
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_default[0],
+			sizeof(buf_flag7_default));
+#endif
+}
+
+static void wl_cfg80211_bt_timerfunc(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	unsigned long data
+#endif
+)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct btcoex_info *bt_local = from_timer(bt_local, t, timer);
+#else
+	struct btcoex_info *bt_local = (struct btcoex_info *)data;
+#endif
+	WL_TRACE(("Enter\n"));
+	bt_local->timer_on = 0;
+	schedule_work(&bt_local->work);
+}
+
+static void wl_cfg80211_bt_handler(struct work_struct *work)
+{
+	struct btcoex_info *btcx_inf;
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	btcx_inf = container_of(work, struct btcoex_info, work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+	if (btcx_inf->timer_on) {
+		btcx_inf->timer_on = 0;
+		del_timer_sync(&btcx_inf->timer);
+	}
+
+	switch (btcx_inf->bt_state) {
+		case BT_DHCP_START:
+			/* DHCP started
+			 * provide OPPORTUNITY window to get DHCP address
+			 */
+			WL_TRACE(("bt_dhcp stm: started \n"));
+
+			btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
+			mod_timer(&btcx_inf->timer,
+				jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME));
+			btcx_inf->timer_on = 1;
+			break;
+
+		case BT_DHCP_OPPR_WIN:
+			if (btcx_inf->dhcp_done) {
+				WL_TRACE(("DHCP Done before T1 expiration\n"));
+				goto btc_coex_idle;
+			}
+
+			/* DHCP is not over yet, start lowering BT priority
+			 * enforce btc_params + flags if necessary
+			 */
+			WL_TRACE(("DHCP T1:%d expired\n", BT_DHCP_OPPR_WIN_TIME));
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
+			btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+			mod_timer(&btcx_inf->timer,
+				jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME));
+			btcx_inf->timer_on = 1;
+			break;
+
+		case BT_DHCP_FLAG_FORCE_TIMEOUT:
+			if (btcx_inf->dhcp_done) {
+				WL_TRACE(("DHCP Done before T2 expiration\n"));
+			} else {
+				/* Noo dhcp during T1+T2, restore BT priority */
+				WL_TRACE(("DHCP wait interval T2:%d msec expired\n",
+					BT_DHCP_FLAG_FORCE_TIME));
+			}
+
+			/* Restoring default bt priority */
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+btc_coex_idle:
+			btcx_inf->bt_state = BT_DHCP_IDLE;
+			btcx_inf->timer_on = 0;
+			break;
+
+		default:
+			WL_ERR(("error g_status=%d !!!\n",	btcx_inf->bt_state));
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+			btcx_inf->bt_state = BT_DHCP_IDLE;
+			btcx_inf->timer_on = 0;
+			break;
+	}
+
+	net_os_wake_unlock(btcx_inf->dev);
+}
+
+void* wl_cfg80211_btcoex_init(struct net_device *ndev)
+{
+	struct btcoex_info *btco_inf = NULL;
+
+	btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
+	if (!btco_inf)
+		return NULL;
+
+	btco_inf->bt_state = BT_DHCP_IDLE;
+	btco_inf->ts_dhcp_start = 0;
+	btco_inf->ts_dhcp_ok = 0;
+	/* Set up timer for BT  */
+	btco_inf->timer_ms = 10;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	timer_setup(&btco_inf->timer, wl_cfg80211_bt_timerfunc, 0);
+#else
+	init_timer(&btco_inf->timer);
+	btco_inf->timer.data = (ulong)btco_inf;
+	btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
+#endif
+
+	btco_inf->dev = ndev;
+
+	INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
+
+	btcoex_info_loc = btco_inf;
+	return btco_inf;
+}
+
+void wl_cfg80211_btcoex_deinit()
+{
+	if (!btcoex_info_loc)
+		return;
+
+	if (btcoex_info_loc->timer_on) {
+		btcoex_info_loc->timer_on = 0;
+		del_timer_sync(&btcoex_info_loc->timer);
+	}
+
+	cancel_work_sync(&btcoex_info_loc->work);
+
+	kfree(btcoex_info_loc);
+}
+
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command)
+{
+
+	struct btcoex_info *btco_inf = btcoex_info_loc;
+	char powermode_val = 0;
+	char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+	char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+	char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+	uint32 regaddr;
+	static uint32 saved_reg66;
+	static uint32 saved_reg41;
+	static uint32 saved_reg68;
+	static bool saved_status = FALSE;
+
+	char buf_flag7_default[8] =   { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+
+	/* Figure out powermode 1 or o command */
+	strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+
+	if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+		WL_TRACE_HW4(("DHCP session starts\n"));
+
+
+#ifdef PKT_FILTER_SUPPORT
+		dhd->dhcp_in_progress = 1;
+
+#if defined(WL_VIRTUAL_APSTA) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
+		if ((dhd->op_mode & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) ==
+			DHD_FLAG_CONCURR_STA_HOSTAP_MODE) {
+			/* Block ARP frames while DHCP of STA interface is in
+			 * progress in case of STA/SoftAP concurrent mode
+			 */
+			wl_cfg80211_block_arp(dev, TRUE);
+		} else
+#endif /* WL_VIRTUAL_APSTA && APSTA_BLOCK_ARP_DURING_DHCP */
+		if (dhd->early_suspended) {
+			WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n"));
+			dhd_enable_packet_filter(0, dhd);
+		}
+#endif /* PKT_FILTER_SUPPORT */
+
+		/* Retrieve and saved orig regs value */
+		if ((saved_status == FALSE) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 66,  &saved_reg66)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 41,  &saved_reg41)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 68,  &saved_reg68)))   {
+				saved_status = TRUE;
+				WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+					saved_reg66, saved_reg41, saved_reg68));
+
+				/* Disable PM mode during dhpc session */
+
+				/* Disable PM mode during dhpc session */
+				/* Start  BT timer only for SCO connection */
+				if (btcoex_is_sco_active(dev)) {
+					/* btc_params 66 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg66va_dhcp_on[0],
+						sizeof(buf_reg66va_dhcp_on));
+					/* btc_params 41 0x33 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg41va_dhcp_on[0],
+						sizeof(buf_reg41va_dhcp_on));
+					/* btc_params 68 0x190 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg68va_dhcp_on[0],
+						sizeof(buf_reg68va_dhcp_on));
+					saved_status = TRUE;
+
+					btco_inf->bt_state = BT_DHCP_START;
+					btco_inf->timer_on = 1;
+					mod_timer(&btco_inf->timer, btco_inf->timer.expires);
+					WL_TRACE(("enable BT DHCP Timer\n"));
+				}
+		}
+		else if (saved_status == TRUE) {
+			WL_ERR(("was called w/o DHCP OFF. Continue\n"));
+		}
+	}
+	else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+
+#ifdef PKT_FILTER_SUPPORT
+		dhd->dhcp_in_progress = 0;
+		WL_TRACE_HW4(("DHCP is complete \n"));
+
+#if defined(WL_VIRTUAL_APSTA) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
+		if ((dhd->op_mode & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) ==
+			DHD_FLAG_CONCURR_STA_HOSTAP_MODE) {
+			/* Unblock ARP frames */
+			wl_cfg80211_block_arp(dev, FALSE);
+		} else
+#endif /* WL_VIRTUAL_APSTA && APSTA_BLOCK_ARP_DURING_DHCP */
+		if (dhd->early_suspended) {
+			/* Enable packet filtering */
+			WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n"));
+			dhd_enable_packet_filter(1, dhd);
+		}
+#endif /* PKT_FILTER_SUPPORT */
+
+		/* Restoring PM mode */
+
+		/* Stop any bt timer because DHCP session is done */
+		WL_TRACE(("disable BT DHCP Timer\n"));
+		if (btco_inf->timer_on) {
+			btco_inf->timer_on = 0;
+			del_timer_sync(&btco_inf->timer);
+
+			if (btco_inf->bt_state != BT_DHCP_IDLE) {
+			/* need to restore original btc flags & extra btc params */
+				WL_TRACE(("bt->bt_state:%d\n", btco_inf->bt_state));
+				/* wake up btcoex thread to restore btlags+params  */
+				schedule_work(&btco_inf->work);
+			}
+		}
+
+		/* Restoring btc_flag paramter anyway */
+		if (saved_status == TRUE)
+			dev_wlc_bufvar_set(dev, "btc_flags",
+				(char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+
+		/* Restore original values */
+		if (saved_status == TRUE) {
+			regaddr = 66;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg66);
+			regaddr = 41;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg41);
+			regaddr = 68;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg68);
+
+			WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+				saved_reg66, saved_reg41, saved_reg68));
+		}
+		saved_status = FALSE;
+
+	}
+	else {
+		WL_ERR(("Unkwown yet power setting, ignored\n"));
+	}
+
+	snprintf(command, 3, "OK");
+
+	return (strlen("OK"));
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgnan.c b/drivers/net/wireless/bcmdhd/wl_cfgnan.c
new file mode 100644
index 0000000..39682b5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgnan.c
@@ -0,0 +1,27 @@
+/*
+ * Neighbor Awareness Networking
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgnan.c 676811 2016-12-24 20:48:46Z $
+ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgnan.h b/drivers/net/wireless/bcmdhd/wl_cfgnan.h
new file mode 100644
index 0000000..a609af3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgnan.h
@@ -0,0 +1,364 @@
+/*
+ * Neighbor Awareness Networking
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgnan.h 650782 2016-07-22 11:51:53Z $
+ */
+
+#ifndef _wl_cfgnan_h_
+#define _wl_cfgnan_h_
+
+#define NAN_IOCTL_BUF_SIZE			512
+#define NAN_EVENT_NAME_MAX_LEN		40
+#define NAN_CONFIG_ATTR_MAX_LEN		24
+#define NAN_RTT_IOVAR_BUF_SIZE		1024
+#define WL_NAN_EVENT_CLEAR_BIT		32
+#define NAN_EVENT_MASK_ALL			0x7fffffff
+
+#define NAN_MAXIMUM_ID_NUMBER 255
+#define NAN_MAXIMUM_MASTER_PREFERENCE 255
+#ifdef NAN_DP
+#define MAX_IF_ADD_WAIT_TIME	1000
+#endif /* NAN_DP */
+#define NAN_INVALID_ID(id)	(id > NAN_MAXIMUM_ID_NUMBER)
+#define NAN_INVALID_ROLE(role)	(role > WL_NAN_ROLE_ANCHOR_MASTER)
+#define NAN_INVALID_CHANSPEC(chanspec)	((chanspec == INVCHANSPEC) || \
+	(chanspec == 0))
+#define NAN_INVALID_EVENT(num)	((num < WL_NAN_EVENT_START) || \
+	(num >= WL_NAN_EVENT_INVALID))
+#define NAN_INVALID_PROXD_EVENT(num)	(num != WLC_E_PROXD_NAN_EVENT)
+#define NAN_EVENT_BIT(event) (1U << (event - WL_NAN_EVENT_START))
+#define NAME_TO_STR(name) #name
+#define NAN_ID_CTRL_SIZE ((NAN_MAXIMUM_ID_NUMBER/8) + 1)
+
+#define SUPP_EVENT_PREFIX		"CTRL-EVENT-"
+#define EVENT_RTT_STATUS_STR	"NAN-RTT-STATUS"
+
+#define TIMESTAMP_PREFIX	"TSF="			/* timestamp */
+#define AMR_PREFIX			"AMR="			/* anchor master rank */
+#define DISTANCE_PREFIX		"DIST="			/* distance */
+#define ATTR_PREFIX			"ATTR="			/* attribute */
+#define ROLE_PREFIX			"ROLE="			/* role */
+#define CHAN_PREFIX			"CHAN="			/* channel */
+#define BITMAP_PREFIX		"BMAP="			/* bitmap */
+#define DEBUG_PREFIX		"DEBUG="		/* debug enable/disable flag */
+#define DW_LEN_PREFIX		"DW_LEN="		/* discovery window length */
+#define DW_INT_PREFIX		"DW_INT="		/* discovery window interval */
+#define STATUS_PREFIX		"STATUS="		/* status */
+#define PUB_ID_PREFIX		"PUB_ID="		/* publisher id */
+#define SUB_ID_PREFIX		"SUB_ID="		/* subscriber id */
+#define INSTANCE_ID_PREFIX		"LOCAL_ID="		/* Instance id */
+#define REMOTE_INSTANCE_ID_PREFIX		"PEER_ID="		/* Peer id */
+
+#ifdef NAN_P2P_CONFIG
+#define P2P_IE_PREFIX		"P2P_IE="		/* p2p ie  id */
+#define IE_EN_PREFIX		"ENBLE_IE="		/* enable p2p ie  */
+#endif
+#define PUB_PR_PREFIX		"PUB_PR="		/* publish period */
+#define PUB_INT_PREFIX		"PUB_INT="		/* publish interval (ttl) */
+#define CLUS_ID_PREFIX		"CLUS_ID="		/* cluster id */
+#define IF_ADDR_PREFIX		"IF_ADDR="		/* IF address */
+#define MAC_ADDR_PREFIX		"MAC_ADDR="		/* mac address */
+#define SVC_HASH_PREFIX		"SVC_HASH="		/* service hash */
+#define SVC_INFO_PREFIX		"SVC_INFO="		/* service information */
+#define HOP_COUNT_PREFIX	"HOP_COUNT="	/* hop count */
+#define MASTER_PREF_PREFIX	"MASTER_PREF="	/* master preference */
+#define ACTIVE_OPTION		"ACTIVE"		/* Active Subscribe. */
+#define SOLICITED_OPTION	"SOLICITED"		/* Solicited Publish. */
+#define UNSOLICITED_OPTION	"UNSOLICITED"	/* Unsolicited Publish. */
+/* anchor master beacon transmission time */
+#define AMBTT_PREFIX		"AMBTT="
+/* passive scan period for cluster merge */
+#define SCAN_PERIOD_PREFIX	"SCAN_PERIOD="
+/* passive scan interval for cluster merge */
+#define SCAN_INTERVAL_PREFIX	"SCAN_INTERVAL="
+#define BCN_INTERVAL_PREFIX		"BCN_INTERVAL="
+
+#define NAN_EVENT_STR_STARTED               "NAN-STARTED"
+#define NAN_EVENT_STR_JOINED                "NAN-JOINED"
+#define NAN_EVENT_STR_ROLE_CHANGE           "NAN-ROLE-CHANGE"
+#define NAN_EVENT_STR_SCAN_COMPLETE         "NAN-SCAN-COMPLETE"
+#define NAN_EVENT_STR_SDF_RX                "NAN-SDF-RX"
+#define NAN_EVENT_STR_REPLIED               "NAN-REPLIED"
+#define NAN_EVENT_STR_TERMINATED            "NAN-TERMINATED"
+#define NAN_EVENT_STR_FOLLOWUP_RX           "NAN-FOLLOWUP-RX"
+#define NAN_EVENT_STR_STATUS_CHANGE         "NAN-STATUS-CHANGE"
+#define NAN_EVENT_STR_MERGED                "NAN-MERGED"
+#define NAN_EVENT_STR_STOPPED               "NAN-STOPPED"
+#define NAN_EVENT_STR_P2P_RX                "NAN-P2P-RX"
+#define NAN_EVENT_STR_WINDOW_BEGUN_P2P      "NAN-WINDOW-BEGUN-P2P"
+#define NAN_EVENT_STR_WINDOW_BEGUN_MESH     "NAN-WINDOW-BEGUN-MESH"
+#define NAN_EVENT_STR_WINDOW_BEGUN_IBSS     "NAN-WINDOW-BEGUN-IBSS"
+#define NAN_EVENT_STR_WINDOW_BEGUN_RANGING  "NAN-WINDOW-BEGUN-RANGING"
+#define NAN_EVENT_STR_INVALID               "NAN-INVALID"
+
+#ifdef NAN_DP
+enum nan_dp_states {
+	NAN_DP_STATE_DISABLED = 0,
+	NAN_DP_STATE_ENABLED = 1
+};
+#endif /* NAN_DP */
+
+enum nan_de_event_type {
+	NAN_EVENT_START = 0,
+	NAN_EVENT_JOIN = 1
+};
+
+typedef struct nan_str_data {
+	u8 *data;
+	u32 dlen;
+} nan_str_data_t;
+
+typedef struct nan_mac_list {
+	u8 *list;
+	u32 num_mac_addr;
+} nan_mac_list_t;
+
+typedef struct nan_config_attr {
+	char name[NAN_CONFIG_ATTR_MAX_LEN];	/* attribute name */
+	u16 type;							/* attribute xtlv type */
+} nan_config_attr_t;
+
+typedef struct wl_nan_sid_beacon_tune {
+	u8 sid_enable;	/* flag for sending service id in beacon */
+	u8 sid_count;	/* Limit for number of SIDs to be included in Beacons */
+} wl_nan_sid_beacon_ctrl_t;
+
+typedef struct nan_cmd_data {
+	nan_config_attr_t attr;			/* set config attributes */
+	nan_str_data_t svc_hash;		/* service hash */
+	nan_str_data_t svc_info;		/* service information */
+	nan_str_data_t p2p_info;		/* p2p information */
+	struct ether_addr mac_addr;		/* mac address */
+	struct ether_addr clus_id;		/* cluster id */
+	struct ether_addr if_addr;		/* if addr */
+	u32 beacon_int;					/* beacon interval */
+	u32 ttl;                      /* time to live */
+	u32 period;                   /* publish period */
+	u32 bmap;						/* bitmap */
+	u32 role;						/* role */
+	wl_nan_instance_id_t pub_id;	/* publisher id */
+	wl_nan_instance_id_t sub_id;	/* subscriber id */
+	wl_nan_instance_id_t local_id;	/* Local id */
+	wl_nan_instance_id_t remote_id;	/* Remote id */
+	uint32 flags;					/* Flag bits */
+	u16 dw_len;						/* discovery window length */
+	u16 master_pref;				/* master preference */
+	chanspec_t chanspec;			/* channel */
+	u8 debug_flag;					/* debug enable/disable flag */
+	u8 life_count;             /* life count of the instance */
+	u8 srf_type;               /* SRF type */
+	u8 srf_include;            /* SRF include */
+	u8 use_srf;                /* use SRF */
+	nan_str_data_t rx_match;	/* matching filter rx */
+	nan_str_data_t tx_match;	/* matching filter tx */
+	nan_mac_list_t mac_list;   /* mac list */
+	uint8 hop_count_limit;     /* hop count limit */
+	uint8 nan_band;            /* nan band <A/B/AUTO> */
+	uint8 support_5g;          /* To decide dual band support */
+	uint32 nan_oui;            /* configured nan oui */
+#ifdef NAN_DP
+	struct ether_addr data_cluster_id;		/* data cluster id */
+#endif /* NAN_DP */
+	wl_nan_sid_beacon_ctrl_t sid_beacon;          /* sending service id in beacon */
+} nan_cmd_data_t;
+
+typedef int (nan_func_t)(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+	char *cmd, int size, nan_cmd_data_t *cmd_data);
+
+typedef struct nan_cmd {
+	const char *name;					/* command name */
+	nan_func_t *func;					/* command hadler */
+} nan_cmd_t;
+
+typedef struct nan_event_hdr {
+	u16 event_subtype;
+	u32 flags;							/* future use */
+} nan_event_hdr_t;
+
+typedef struct wl_nan_tlv_data {
+	wl_nan_status_t nstatus;			/* status data */
+	wl_nan_disc_params_t params;		/* discovery parameters */
+	struct ether_addr mac_addr;			/* peer mac address */
+	struct ether_addr clus_id;			/* cluster id */
+	nan_str_data_t svc_info;			/* service info */
+	nan_str_data_t vend_info;			/* vendor info */
+	/* anchor master beacon transmission time */
+	u32 ambtt;
+	u32 dev_role;						/* device role */
+	u16 inst_id;						/* instance id */
+	u16 peer_inst_id;					/* Peer instance id */
+	u16 pub_id;							/* publisher id */
+	u16 sub_id;							/* subscriber id */
+	u16 master_pref;					/* master preference */
+	chanspec_t chanspec;				/* channel */
+	u8 amr[NAN_MASTER_RANK_LEN];		/* anchor master role */
+	u8 svc_name[WL_NAN_SVC_HASH_LEN];	/* service name */
+	u8 hop_count;						/* hop count */
+	u8 enabled;							/* nan status flag */
+	nan_scan_params_t scan_params;		/* scan_param */
+	int reason_code;              /* reason code */
+} wl_nan_tlv_data_t;
+
+typedef struct _nan_de_event_data {
+	wl_nan_cfg_status_t *nstatus;
+	u8 nan_de_evt_type;
+} nan_de_event_data_t;
+
+typedef struct _nan_hal_resp {
+	unsigned short instance_id;
+	unsigned short subcmd;
+	int status;
+	int value;
+} nan_hal_resp_t;
+
+#ifdef NAN_DP
+typedef struct nan_data_path_peer {
+	struct ether_addr addr;        /* peer mac address */
+	chanspec_t chanspec;           /* Channel Specification */
+} nan_data_path_peer_t;
+#endif /* NAN_DP */
+
+extern int wl_cfgnan_set_vars_cbfn(void *ctx, uint8 *tlv_buf,
+	uint16 type, uint16 len);
+extern int wl_cfgnan_enable_events(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg);
+extern int wl_cfgnan_start_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_stop_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_support_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_status_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_pub_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_p2p_ie_add_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_p2p_ie_enable_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_p2p_ie_del_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+
+extern int wl_cfgnan_sub_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_transmit_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_set_config_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_rtt_config_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_rtt_find_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+#ifdef WL_NAN_DEBUG
+extern int wl_cfgnan_debug_handler(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+#endif /* WL_NAN_DEBUG */
+extern int wl_cfgnan_cmd_handler(struct net_device *dev,
+	struct bcm_cfg80211 *cfg, char *cmd, int cmd_len);
+extern s32 wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+extern s32 wl_cfgnan_notify_proxd_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+extern int wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg,
+	uint8 inst_type, uint8 *p_inst_id);
+extern int wl_cfgnan_validate_inst_id(struct bcm_cfg80211 *cfg,
+	uint8 inst_id);
+extern int wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg,
+	uint8 inst_id);
+extern int wl_cfgnan_get_inst_type(struct bcm_cfg80211 *cfg,
+	uint8 inst_id, uint8 *inst_type);
+
+typedef enum {
+	NAN_ATTRIBUTE_HEADER = 100,
+	NAN_ATTRIBUTE_HANDLE,
+	NAN_ATTRIBUTE_TRANSAC_ID,
+	/* NAN Enable request attributes */
+	NAN_ATTRIBUTE_5G_SUPPORT,
+	NAN_ATTRIBUTE_CLUSTER_LOW,
+	NAN_ATTRIBUTE_CLUSTER_HIGH,
+	NAN_ATTRIBUTE_SID_BEACON,
+	NAN_ATTRIBUTE_SYNC_DISC_5G,
+	NAN_ATTRIBUTE_RSSI_CLOSE,
+	NAN_ATTRIBUTE_RSSI_MIDDLE,
+	NAN_ATTRIBUTE_RSSI_PROXIMITY,
+	NAN_ATTRIBUTE_HOP_COUNT_LIMIT,
+	NAN_ATTRIBUTE_RANDOM_TIME,
+	NAN_ATTRIBUTE_MASTER_PREF,
+	NAN_ATTRIBUTE_PERIODIC_SCAN_INTERVAL,
+	/* Nan Publish/Subscribe request Attributes */
+	NAN_ATTRIBUTE_PUBLISH_ID,
+	NAN_ATTRIBUTE_TTL,
+	NAN_ATTRIBUTE_PERIOD,
+	NAN_ATTRIBUTE_REPLIED_EVENT_FLAG,
+	NAN_ATTRIBUTE_PUBLISH_TYPE,
+	NAN_ATTRIBUTE_TX_TYPE,
+	NAN_ATTRIBUTE_PUBLISH_COUNT,
+	NAN_ATTRIBUTE_SERVICE_NAME_LEN,
+	NAN_ATTRIBUTE_SERVICE_NAME,
+	NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN,
+	NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO,
+	NAN_ATTRIBUTE_RX_MATCH_FILTER_LEN,
+	NAN_ATTRIBUTE_RX_MATCH_FILTER,
+	NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN,
+	NAN_ATTRIBUTE_TX_MATCH_FILTER,
+	NAN_ATTRIBUTE_SUBSCRIBE_ID,
+	NAN_ATTRIBUTE_SUBSCRIBE_TYPE,
+	NAN_ATTRIBUTE_SERVICERESPONSEFILTER,
+	NAN_ATTRIBUTE_SERVICERESPONSEINCLUDE,
+	NAN_ATTRIBUTE_USESERVICERESPONSEFILTER,
+	NAN_ATTRIBUTE_SSIREQUIREDFORMATCHINDICATION,
+	NAN_ATTRIBUTE_SUBSCRIBE_MATCH,
+	NAN_ATTRIBUTE_SUBSCRIBE_COUNT,
+	NAN_ATTRIBUTE_MAC_ADDR,
+	NAN_ATTRIBUTE_MAC_ADDR_LIST,
+	NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES,
+	NAN_ATTRIBUTE_PUBLISH_MATCH,
+	/* Nan Event attributes */
+	NAN_ATTRIBUTE_ENABLE_STATUS,
+	NAN_ATTRIBUTE_JOIN_STATUS,
+	NAN_ATTRIBUTE_ROLE,
+	NAN_ATTRIBUTE_CHANNEL,	/* channel */
+	NAN_ATTRIBUTE_PEER_ID,
+	NAN_ATTRIBUTE_INST_ID,
+	NAN_ATTRIBUTE_OUI,
+	NAN_ATTRIBUTE_DATA_IF_ADD,       /* NAN DP Interface Address */
+	NAN_ATTRIBUTE_STATUS,
+	NAN_ATTRIBUTE_DE_EVENT_TYPE
+} NAN_ATTRIBUTE;
+
+#define NAN_BLOOM_LENGTH_DEFAULT        240
+#define NAN_SRF_MAX_MAC (NAN_BLOOM_LENGTH_DEFAULT / ETHER_ADDR_LEN)
+
+#ifdef NAN_DP
+int wl_cfgnan_data_path_open_handler(struct net_device *ndev,
+		struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+int wl_cfgnan_data_path_close_handler(struct net_device *ndev,
+		struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+#endif /* NAN_DP */
+#endif	/* _wl_cfgnan_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
new file mode 100644
index 0000000..69f7a05
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
@@ -0,0 +1,2614 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgp2p.c 699163 2017-05-12 05:18:23Z $
+ *
+ */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <802.11.h>
+#include <net/rtnetlink.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wldev_common.h>
+#include <wl_android.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#include <dhd_config.h>
+
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+
+static s8 scanparambuf[WLC_IOCTL_SMLEN];
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct wireless_dev *wdev, bool notify);
+
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+static int wl_cfgp2p_if_open(struct net_device *net);
+static int wl_cfgp2p_if_stop(struct net_device *net);
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+	.ndo_open       = wl_cfgp2p_if_open,
+	.ndo_stop       = wl_cfgp2p_if_stop,
+	.ndo_do_ioctl   = wl_cfgp2p_do_ioctl,
+	.ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+#endif /* WL_ENABLE_P2P_IF */
+
+
+bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
+{
+	wifi_p2p_pub_act_frame_t *pact_frm;
+
+	if (frame == NULL)
+		return false;
+	pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+	if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1)
+		return false;
+
+	if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+		pact_frm->action == P2P_PUB_AF_ACTION &&
+		pact_frm->oui_type == P2P_VER &&
+		memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) {
+		return true;
+	}
+
+	return false;
+}
+
+bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len)
+{
+	wifi_p2p_action_frame_t *act_frm;
+
+	if (frame == NULL)
+		return false;
+	act_frm = (wifi_p2p_action_frame_t *)frame;
+	if (frame_len < sizeof(wifi_p2p_action_frame_t) -1)
+		return false;
+
+	if (act_frm->category == P2P_AF_CATEGORY &&
+		act_frm->type  == P2P_VER &&
+		memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) {
+		return true;
+	}
+
+	return false;
+}
+
+#define GAS_RESP_LEN		2
+#define DOUBLE_TLV_BODY_OFF	4
+#define GAS_RESP_OFFSET		4
+#define GAS_CRESP_OFFSET	5
+
+bool wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len)
+{
+	bcm_tlv_t *ie = (bcm_tlv_t *)data;
+	u8 *frame = NULL;
+	u16 id, flen;
+
+	/* Skipped first ANQP Element, if frame has anqp elemnt */
+	ie = bcm_parse_tlvs(ie, (int)len, DOT11_MNG_ADVERTISEMENT_ID);
+
+	if (ie == NULL)
+		return false;
+
+	frame = (uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN;
+	id = ((u16) (((frame)[1] << 8) | (frame)[0]));
+	flen = ((u16) (((frame)[3] << 8) | (frame)[2]));
+
+	/* If the contents match the OUI and the type */
+	if (flen >= WFA_OUI_LEN + 1 &&
+		id ==  P2PSD_GAS_NQP_INFOID &&
+		!bcmp(&frame[DOUBLE_TLV_BODY_OFF], (const uint8*)WFA_OUI, WFA_OUI_LEN) &&
+		subtype == frame[DOUBLE_TLV_BODY_OFF+WFA_OUI_LEN]) {
+		return true;
+	}
+
+	return false;
+}
+
+bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len)
+{
+
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+	if (frame == NULL)
+		return false;
+
+	sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+	if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1))
+		return false;
+	if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+		return false;
+
+#ifdef WL11U
+	if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP)
+		return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
+			(u8 *)sd_act_frm->query_data + GAS_RESP_OFFSET,
+			frame_len);
+
+	else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+		return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
+			(u8 *)sd_act_frm->query_data + GAS_CRESP_OFFSET,
+			frame_len);
+	else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ)
+		return true;
+	else
+		return false;
+#else
+	if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+		return true;
+	else
+		return false;
+#endif /* WL11U */
+}
+
+bool wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len)
+{
+
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+	if (frame == NULL)
+		return false;
+
+	sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+	if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1))
+		return false;
+	if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+		return false;
+
+	if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ)
+		return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
+			(u8 *)sd_act_frm->query_data,
+			frame_len);
+	else
+		return false;
+}
+
+void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel)
+{
+	wifi_p2p_pub_act_frame_t *pact_frm;
+	wifi_p2p_action_frame_t *act_frm;
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+	if (!frame || frame_len <= 2)
+		return;
+
+	if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+		pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+		switch (pact_frm->subtype) {
+			case P2P_PAF_GON_REQ:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Req Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_GON_RSP:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Rsp Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_GON_CONF:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Confirm Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_INVITE_REQ:
+				CFGP2P_ACTION(("%s P2P Invitation Request  Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_INVITE_RSP:
+				CFGP2P_ACTION(("%s P2P Invitation Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_DEVDIS_REQ:
+				CFGP2P_ACTION(("%s P2P Device Discoverability Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_DEVDIS_RSP:
+				CFGP2P_ACTION(("%s P2P Device Discoverability Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_PROVDIS_REQ:
+				CFGP2P_ACTION(("%s P2P Provision Discovery Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_PROVDIS_RSP:
+				CFGP2P_ACTION(("%s P2P Provision Discovery Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown Public Action Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+
+		}
+
+	} else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+		act_frm = (wifi_p2p_action_frame_t *)frame;
+		switch (act_frm->subtype) {
+			case P2P_AF_NOTICE_OF_ABSENCE:
+				CFGP2P_ACTION(("%s P2P Notice of Absence Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_PRESENCE_REQ:
+				CFGP2P_ACTION(("%s P2P Presence Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_PRESENCE_RSP:
+				CFGP2P_ACTION(("%s P2P Presence Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_GO_DISC_REQ:
+				CFGP2P_ACTION(("%s P2P Discoverability Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P Action Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+		}
+
+	} else if (wl_cfgp2p_is_gas_action(frame, frame_len)) {
+		sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+		switch (sd_act_frm->action) {
+			case P2PSD_ACTION_ID_GAS_IREQ:
+				CFGP2P_ACTION(("%s GAS Initial Request,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_IRESP:
+				CFGP2P_ACTION(("%s GAS Initial Response,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_CREQ:
+				CFGP2P_ACTION(("%s GAS Comback Request,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_CRESP:
+				CFGP2P_ACTION(("%s GAS Comback Response,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown GAS Frame,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+		}
+
+
+	}
+}
+
+/*
+ *  Initialize variables related to P2P
+ *
+ */
+s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg)
+{
+	if (!(cfg->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
+		CFGP2P_ERR(("struct p2p_info allocation failed\n"));
+		return -ENOMEM;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	cfg->p2p->cfg = cfg;
+#endif
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg);
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1) = NULL;
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) = -1;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2) = NULL;
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) = -1;
+	return BCME_OK;
+
+}
+/*
+ *  Deinitialize variables related to P2P
+ *
+ */
+void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+	CFGP2P_ERR(("In\n"));
+	if (cfg->p2p) {
+		kfree(cfg->p2p);
+		cfg->p2p = NULL;
+	}
+	cfg->p2p_supported = 0;
+}
+/*
+ * Set P2P functions into firmware
+ */
+s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
+	s32 ret = BCME_OK;
+	s32 val = 0;
+	/* Do we have to check whether APSTA is enabled or not ? */
+	ret = wldev_iovar_getint(ndev, "apsta", &val);
+	if (ret < 0) {
+		CFGP2P_ERR(("get apsta error %d\n", ret));
+		return ret;
+	}
+	if (val == 0) {
+		val = 1;
+		ret = wldev_ioctl_set(ndev, WLC_DOWN, &val, sizeof(s32));
+		if (ret < 0) {
+			CFGP2P_ERR(("WLC_DOWN error %d\n", ret));
+			return ret;
+		}
+
+		ret = wldev_iovar_setint(ndev, "apsta", val);
+		if (ret < 0) {
+			/* return error and fail the initialization */
+			CFGP2P_ERR(("wl apsta %d set error. ret: %d\n", val, ret));
+			return ret;
+		}
+
+		ret = wldev_ioctl_set(ndev, WLC_UP, &val, sizeof(s32));
+		if (ret < 0) {
+			CFGP2P_ERR(("WLC_UP error %d\n", ret));
+			return ret;
+		}
+	}
+
+	/* In case of COB type, firmware has default mac address
+	 * After Initializing firmware, we have to set current mac address to
+	 * firmware for P2P device address
+	 */
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
+		sizeof(null_eth_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
+	if (ret && ret != BCME_UNSUPPORTED) {
+		CFGP2P_ERR(("failed to update device address ret %d\n", ret));
+	}
+	return ret;
+}
+
+int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg)
+{
+	if (!cfg->p2p) {
+		CFGP2P_DBG(("p2p not enabled! \n"));
+		return false;
+	}
+
+	if ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) &&
+	    (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1))
+		return true;
+	else
+		return false;
+}
+
+/* Create a new P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to create
+ * @if_type  : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
+ * @chspec   : chspec to use if creating a GO BSS.
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_ERR(("---cfg p2p_ifadd "MACDBG" %s %u\n",
+		MAC2STRDBG(ifreq.addr.octet),
+		(if_type == WL_P2P_IF_GO) ? "go" : "client",
+	        (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+	err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(err < 0)) {
+		printk("'cfg p2p_ifadd' error %d\n", err);
+		return err;
+	}
+
+	return err;
+}
+
+/* Disable a P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to disable
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	s32 ret;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdis "MACDBG"\n",
+		netdev->ifindex, MAC2STRDBG(mac->octet)));
+	ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(ret < 0)) {
+		printk("'cfg p2p_ifdis' error %d\n", ret);
+	}
+	return ret;
+}
+
+/* Delete a P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to delete
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	s32 ret;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_ERR(("------primary idx %d : cfg p2p_ifdel "MACDBG"\n",
+	    netdev->ifindex, MAC2STRDBG(mac->octet)));
+	ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(ret < 0)) {
+		printk("'cfg p2p_ifdel' error %d\n", ret);
+	}
+	return ret;
+}
+
+/* Change a P2P Role.
+ * Parameters:
+ * @mac      : MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec, s32 conn_idx)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+
+	struct net_device *netdev =  wl_to_p2p_bss_ndev(cfg, conn_idx);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_INFO(("---cfg p2p_ifchange "MACDBG" %s %u"
+		" chanspec 0x%04x\n", MAC2STRDBG(ifreq.addr.octet),
+		(if_type == WL_P2P_IF_GO) ? "go" : "client",
+		(chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT,
+		ifreq.chspec));
+
+	err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(err < 0)) {
+		printk("'cfg p2p_ifupd' error %d\n", err);
+	} else if (if_type == WL_P2P_IF_GO) {
+		cfg->p2p->p2p_go_count++;
+	}
+	return err;
+}
+
+
+/* Get the index of a created P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the created BSS
+ * @index    : output: index of created BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index)
+{
+	s32 ret;
+	u8 getbuf[64];
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("---cfg p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet)));
+
+	ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf,
+		sizeof(getbuf), wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY), NULL);
+
+	if (ret == 0) {
+		memcpy(index, getbuf, sizeof(s32));
+		CFGP2P_INFO(("---cfg p2p_if   ==> %d\n", *index));
+	}
+
+	return ret;
+}
+
+static s32
+wl_cfgp2p_set_discovery(struct bcm_cfg80211 *cfg, s32 on)
+{
+	s32 ret = BCME_OK;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	CFGP2P_DBG(("enter\n"));
+
+	ret = wldev_iovar_setint(ndev, "p2p_disc", on);
+
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
+	}
+
+	return ret;
+}
+
+/* Set the WL driver's P2P mode.
+ * Parameters :
+ * @mode      : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
+ * @channel   : the channel to listen
+ * @listen_ms : the time (milli seconds) to wait
+ * @bssidx    : bss index for BSSCFG
+ * Returns 0 if success
+ */
+
+s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+{
+	wl_p2p_disc_st_t discovery_mode;
+	s32 ret;
+	struct net_device *dev;
+	CFGP2P_DBG(("enter\n"));
+
+	if (unlikely(bssidx == WL_INVALID)) {
+		CFGP2P_ERR((" %d index out of range\n", bssidx));
+		return -1;
+	}
+
+	dev = wl_cfgp2p_find_ndev(cfg, bssidx);
+	if (unlikely(dev == NULL)) {
+		CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
+		return BCME_NOTFOUND;
+	}
+
+#ifdef P2PLISTEN_AP_SAMECHN
+	CFGP2P_DBG(("p2p0 listen channel %d  AP connection chan %d \n",
+		channel, cfg->channel));
+	if ((mode == WL_P2P_DISC_ST_LISTEN) && (cfg->channel == channel)) {
+		struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+		if (cfg->p2p_resp_apchn_status) {
+			CFGP2P_DBG(("p2p_resp_apchn_status already ON \n"));
+			return BCME_OK;
+		}
+
+		if (wl_get_drv_status(cfg, CONNECTED, primary_ndev)) {
+			ret = wl_cfg80211_set_p2p_resp_ap_chn(primary_ndev, 1);
+			cfg->p2p_resp_apchn_status = true;
+			CFGP2P_DBG(("p2p_resp_apchn_status ON \n"));
+			return ret;
+		}
+	}
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+	/* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
+	discovery_mode.state = mode;
+	discovery_mode.chspec = wl_ch_host_to_driver(cfg, bssidx, channel);
+	discovery_mode.dwell = listen_ms;
+	ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
+		sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+		bssidx, &cfg->ioctl_buf_sync);
+
+	return ret;
+}
+
+/* Get the index of the P2P Discovery BSS */
+static s32
+wl_cfgp2p_get_disc_idx(struct bcm_cfg80211 *cfg, s32 *index)
+{
+	s32 ret;
+	struct net_device *dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+
+	ret = wldev_iovar_getint(dev, "p2p_dev", index);
+	CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
+
+	if (unlikely(ret <  0)) {
+	    CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
+		return ret;
+	}
+	return ret;
+}
+
+int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg)
+{
+	int i;
+	s32 connected_cnt;
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	if (!dhd)
+		return (-ENODEV);
+	for (i = P2PAPI_BSSCFG_CONNECTION1; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (wl_to_p2p_bss_bssidx(cfg, i) == -1) {
+			if (i == P2PAPI_BSSCFG_CONNECTION2) {
+				if (!(dhd->op_mode & DHD_FLAG_MP2P_MODE)) {
+					CFGP2P_ERR(("Multi p2p not supported"));
+					return BCME_ERROR;
+				}
+				if ((connected_cnt = wl_get_drv_status_all(cfg, CONNECTED)) > 1) {
+					CFGP2P_ERR(("Failed to create second p2p interface"
+						"Already one connection exists"));
+					return BCME_ERROR;
+				}
+			}
+		return i;
+		}
+	}
+	return BCME_ERROR;
+}
+
+s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg)
+{
+
+	s32 bssidx = 0;
+	s32 ret = BCME_OK;
+
+	CFGP2P_DBG(("enter\n"));
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) {
+		CFGP2P_ERR(("do nothing, already initialized\n"));
+		return ret;
+	}
+
+	ret = wl_cfgp2p_set_discovery(cfg, 1);
+	if (ret < 0) {
+		CFGP2P_ERR(("set discover error\n"));
+		return ret;
+	}
+	/* Enable P2P Discovery in the WL Driver */
+	ret = wl_cfgp2p_get_disc_idx(cfg, &bssidx);
+
+	if (ret < 0) {
+		return ret;
+	}
+	/* In case of CFG80211 case, check if p2p_discovery interface has allocated p2p_wdev */
+	if (!cfg->p2p_wdev) {
+		CFGP2P_ERR(("p2p_wdev is NULL.\n"));
+		return BCME_NODEVICE;
+	}
+	/* Make an entry in the netinfo */
+	ret = wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_MODE_BSS, 0, bssidx);
+	if (unlikely(ret)) {
+		return ret;
+	}
+
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) =
+	    wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = bssidx;
+
+	/* Set the initial discovery state to SCAN */
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
+	if (unlikely(ret != 0)) {
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+		wl_cfgp2p_set_discovery(cfg, 0);
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+		wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+		return 0;
+	}
+	return ret;
+}
+
+/* Deinitialize P2P Discovery
+ * Parameters :
+ * @cfg        : wl_private data
+ * Returns 0 if succes
+ */
+static s32
+wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg)
+{
+	s32 ret = BCME_OK;
+	s32 bssidx;
+
+	CFGP2P_DBG(("enter\n"));
+	bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	if (bssidx <= 0) {
+		CFGP2P_ERR(("do nothing, not initialized\n"));
+		return -1;
+	}
+
+	/* Clear our saved WPS and P2P IEs for the discovery BSS */
+	wl_cfg80211_clear_per_bss_ies(cfg, bssidx);
+
+	/* Set the discovery state to SCAN */
+	wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+	            bssidx);
+	/* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
+	ret = wl_cfgp2p_set_discovery(cfg, 0);
+
+	/* Remove the p2p disc entry in the netinfo */
+	wl_dealloc_netinfo_by_wdev(cfg, cfg->p2p_wdev);
+
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+
+	return ret;
+
+}
+/* Enable P2P Discovery
+ * Parameters:
+ * @cfg	: wl_private data
+ * @ie  : probe request ie (WPS IE + P2P IE)
+ * @ie_len   : probe request ie length
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	const u8 *ie, u32 ie_len)
+{
+	s32 ret = BCME_OK;
+	s32 bssidx;
+
+	CFGP2P_DBG(("enter\n"));
+	if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+		CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
+		goto set_ie;
+	}
+
+	ret = wl_cfgp2p_init_discovery(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" init discovery error %d\n", ret));
+		goto exit;
+	}
+
+	wl_set_p2p_status(cfg, DISCOVERY_ON);
+	/* Set wsec to any non-zero value in the discovery bsscfg to ensure our
+	 * P2P probe responses have the privacy bit set in the 802.11 WPA IE.
+	 * Some peer devices may not initiate WPS with us if this bit is not set.
+	 */
+	ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE),
+			"wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" wsec error %d\n", ret));
+	}
+set_ie:
+	if (ie_len) {
+
+		if (bcmcfg_to_prmry_ndev(cfg) == dev) {
+			bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+		} else if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfg->p2p_wdev)) < 0) {
+			WL_ERR(("Find p2p index from wdev(%p) failed\n", cfg->p2p_wdev));
+			return BCME_ERROR;
+		}
+
+		ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev),
+			bssidx,
+			VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+
+		if (unlikely(ret < 0)) {
+			CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
+			goto exit;
+		}
+	}
+exit:
+	return ret;
+}
+
+/* Disable P2P Discovery
+ * Parameters:
+ * @cfg       : wl_private_data
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg)
+{
+	s32 ret = BCME_OK;
+	s32 bssidx;
+
+	CFGP2P_DBG((" enter\n"));
+	wl_clr_p2p_status(cfg, DISCOVERY_ON);
+
+	if (!cfg->p2p) { // terence 20130113: Fix for p2p NULL pointer
+		ret = BCME_ERROR;
+		CFGP2P_ERR(("wl->p2p is NULL\n"));
+		goto exit;
+	}
+
+#ifdef DHD_IFDEBUG
+	WL_ERR(("%s: (cfg)->p2p->bss[type].bssidx: %d\n",
+		__FUNCTION__, (cfg)->p2p->bss[P2PAPI_BSSCFG_DEVICE].bssidx));
+#endif
+	bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	if (bssidx <= 0) {
+		CFGP2P_ERR((" do nothing, not initialized\n"));
+		return 0;
+	}
+
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+	}
+	/* Do a scan abort to stop the driver's scan engine in case it is still
+	 * waiting out an action frame tx dwell time.
+	 */
+	wl_clr_p2p_status(cfg, DISCOVERY_ON);
+	ret = wl_cfgp2p_deinit_discovery(cfg);
+
+exit:
+	return ret;
+}
+
+s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active,
+	u32 num_chans, u16 *channels,
+	s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+	p2p_scan_purpose_t p2p_scan_purpose)
+{
+	s32 ret = BCME_OK;
+	s32 memsize;
+	s32 eparams_size;
+	u32 i;
+	s8 *memblk;
+	wl_p2p_scan_t *p2p_params;
+	wl_escan_params_t *eparams;
+	wlc_ssid_t ssid;
+	/* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+
+	struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+	/* Allocate scan params which need space for 3 channels and 0 ssids */
+	eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+	    OFFSETOF(wl_escan_params_t, params)) +
+		num_chans * sizeof(eparams->params.channel_list[0]);
+
+	memsize = sizeof(wl_p2p_scan_t) + eparams_size;
+	memblk = scanparambuf;
+	if (memsize > sizeof(scanparambuf)) {
+		CFGP2P_ERR((" scanpar buf too small (%u > %zu)\n",
+		    memsize, sizeof(scanparambuf)));
+		return -1;
+	}
+	memset(memblk, 0, memsize);
+	memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
+	if (search_state == WL_P2P_DISC_ST_SEARCH) {
+		/*
+		 * If we in SEARCH STATE, we don't need to set SSID explictly
+		 * because dongle use P2P WILDCARD internally by default
+		 */
+		wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+		/* use null ssid */
+		ssid.SSID_len = 0;
+		memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+	} else if (search_state == WL_P2P_DISC_ST_SCAN) {
+		/* SCAN STATE 802.11 SCAN
+		 * WFD Supplicant has p2p_find command with (type=progressive, type= full)
+		 * So if P2P_find command with type=progressive,
+		 * we have to set ssid to P2P WILDCARD because
+		 * we just do broadcast scan unless setting SSID
+		 */
+		wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+		/* use wild card ssid */
+		ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
+		memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+		memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN);
+	} else {
+		CFGP2P_ERR((" invalid search state %d\n", search_state));
+		return -1;
+	}
+
+
+	/* Fill in the P2P scan structure at the start of the iovar param block */
+	p2p_params = (wl_p2p_scan_t*) memblk;
+	p2p_params->type = 'E';
+	/* Fill in the Scan structure that follows the P2P scan structure */
+	eparams = (wl_escan_params_t*) (p2p_params + 1);
+	eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+	if (active)
+		eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+	else
+		eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+
+	if (tx_dst_addr == NULL)
+		memcpy(&eparams->params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+	else
+		memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN);
+
+	if (ssid.SSID_len)
+		memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+
+	eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+
+	switch (p2p_scan_purpose) {
+		case P2P_SCAN_SOCIAL_CHANNEL:
+		eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+			break;
+		case P2P_SCAN_AFX_PEER_NORMAL:
+		case P2P_SCAN_AFX_PEER_REDUCED:
+		eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
+			break;
+		case P2P_SCAN_CONNECT_TRY:
+			eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+			break;
+		default :
+			if (wl_get_drv_status_all(cfg, CONNECTED))
+		eparams->params.active_time = -1;
+	else
+		eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+			break;
+	}
+
+	if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY)
+		eparams->params.nprobes = htod32(eparams->params.active_time /
+			WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+	else
+	eparams->params.nprobes = htod32((eparams->params.active_time /
+		P2PAPI_SCAN_NPROBS_TIME_MS));
+
+
+	if (eparams->params.nprobes <= 0)
+		eparams->params.nprobes = 1;
+	CFGP2P_DBG(("nprobes # %d, active_time %d\n",
+		eparams->params.nprobes, eparams->params.active_time));
+	eparams->params.passive_time = htod32(-1);
+	eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	    (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	for (i = 0; i < num_chans; i++) {
+		eparams->params.channel_list[i] = wl_ch_host_to_driver(cfg, bssidx, channels[i]);
+	}
+	eparams->version = htod32(ESCAN_REQ_VERSION);
+	eparams->action =  htod16(action);
+	wl_escan_set_sync_id(eparams->sync_id, cfg);
+	wl_escan_set_type(cfg, WL_SCANTYPE_P2P);
+	CFGP2P_INFO(("SCAN CHANNELS : "));
+
+	for (i = 0; i < num_chans; i++) {
+		if (i == 0) CFGP2P_INFO(("%d", channels[i]));
+		else CFGP2P_INFO((",%d", channels[i]));
+	}
+
+	CFGP2P_INFO(("\n"));
+
+	ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
+		memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	printf("%s: P2P_SEARCH sync ID: %d, bssidx: %d\n", __FUNCTION__, eparams->sync_id, bssidx);
+	if (ret == BCME_OK)
+		wl_set_p2p_status(cfg, SCANNING);
+	return ret;
+}
+
+/* search function to reach at common channel to send action frame
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device for bssidx
+ * @bssidx   : bssidx for BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr)
+{
+	s32 ret = 0;
+	u32 chan_cnt = 0;
+	u16 *default_chan_list = NULL;
+	p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL;
+	if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID)
+		return -EINVAL;
+	WL_TRACE_HW4((" Enter\n"));
+	if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY))
+		bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	if (channel)
+		chan_cnt = AF_PEER_SEARCH_CNT;
+	else
+		chan_cnt = SOCIAL_CHAN_CNT;
+
+	if (cfg->afx_hdl->pending_tx_act_frm && cfg->afx_hdl->is_active) {
+		wl_action_frame_t *action_frame;
+		action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame);
+		if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len))	{
+			chan_cnt = 1;
+			p2p_scan_purpose = P2P_SCAN_AFX_PEER_REDUCED;
+		}
+	}
+
+	default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL);
+	if (default_chan_list == NULL) {
+		CFGP2P_ERR(("channel list allocation failed \n"));
+		ret = -ENOMEM;
+		goto exit;
+	}
+	if (channel) {
+		u32 i;
+		/* insert same channel to the chan_list */
+		for (i = 0; i < chan_cnt; i++) {
+			default_chan_list[i] = channel;
+		}
+	} else {
+		default_chan_list[0] = SOCIAL_CHAN_1;
+		default_chan_list[1] = SOCIAL_CHAN_2;
+		default_chan_list[2] = SOCIAL_CHAN_3;
+	}
+	ret = wl_cfgp2p_escan(cfg, ndev, true, chan_cnt,
+		default_chan_list, WL_P2P_DISC_ST_SEARCH,
+		WL_SCAN_ACTION_START, bssidx, NULL, p2p_scan_purpose);
+	kfree(default_chan_list);
+exit:
+	return ret;
+}
+
+/* Check whether pointed-to IE looks like WPA. */
+#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define wl_cfgp2p_is_wps_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+/* Check whether the given IE looks like WFA P2P IE. */
+#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
+/* Check whether the given IE looks like WFA WFDisplay IE. */
+#ifndef WFA_OUI_TYPE_WFD
+#define WFA_OUI_TYPE_WFD	0x0a			/* WiFi Display OUI TYPE */
+#endif
+#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
+
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+	/* If the contents match the OUI and the type */
+	if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+		!bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+		type == ie[TLV_BODY_OFF + oui_len]) {
+		return TRUE;
+	}
+
+	if (tlvs == NULL)
+		return FALSE;
+	/* point to the next ie */
+	ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+
+	return FALSE;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
+			return (wifi_p2p_ie_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) {
+			return (wifi_wfd_ie_t *)ie;
+		}
+	}
+	return NULL;
+}
+u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+            s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd)
+{
+	vndr_ie_setbuf_t hdr;	/* aligned temporary vndr_ie buffer header */
+	s32 iecount;
+	u32 data_offset;
+
+	/* Validate the pktflag parameter */
+	if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+	            VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+	            VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+		CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
+		return -1;
+	}
+
+	/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+	strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+	hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Set the IE count - the buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+	/* Copy packet flags that indicate which packets will contain this IE */
+	pktflag = htod32(pktflag);
+	memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+		sizeof(u32));
+
+	/* Add the IE ID to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
+
+	/* Add the IE length to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len =
+		(uint8) VNDR_IE_MIN_LEN + datalen;
+
+	/* Add the IE OUI to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[0] = oui[0];
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[1] = oui[1];
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[2] = oui[2];
+
+	/* Copy the aligned temporary vndr_ie buffer header to the IE buffer */
+	memcpy(iebuf, &hdr, sizeof(hdr) - 1);
+
+	/* Copy the IE data to the IE buffer */
+	data_offset =
+		(u8*)&hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data[0] -
+		(u8*)&hdr;
+	memcpy(iebuf + data_offset, data, datalen);
+	return data_offset + datalen;
+
+}
+
+struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+	u32 i;
+	struct net_device *ndev = NULL;
+	if (bssidx < 0) {
+		CFGP2P_ERR((" bsscfg idx is invalid\n"));
+		goto exit;
+	}
+
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+			ndev = wl_to_p2p_bss_ndev(cfg, i);
+			break;
+		}
+	}
+
+exit:
+	return ndev;
+}
+/*
+ * Search the driver array idx based on bssidx argument
+ * Parameters: Note that this idx is applicable only
+ * for primary and P2P interfaces. The virtual AP/STA is not
+ * covered here.
+ * @cfg     : wl_private data
+ * @bssidx : bssidx which indicate bsscfg->idx of firmware.
+ * @type   : output arg to store array idx of p2p->bss.
+ * Returns error
+ */
+
+s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type)
+{
+	u32 i;
+	if (bssidx < 0 || type == NULL) {
+		CFGP2P_ERR((" argument is invalid\n"));
+		goto exit;
+	}
+
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+			*type = i;
+			return BCME_OK;
+		}
+	}
+
+exit:
+	return BCME_BADARG;
+}
+
+/*
+ * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
+ */
+s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+	struct net_device *ndev = NULL;
+
+	if (!cfg || !cfg->p2p || !cfgdev)
+		return BCME_ERROR;
+
+	CFGP2P_DBG((" Enter\n"));
+#ifdef DHD_IFDEBUG
+	PRINT_WDEV_INFO(cfgdev);
+#endif /* DHD_IFDEBUG */
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifdef P2P_LISTEN_OFFLOADING
+	if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+		wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
+		CFGP2P_ERR(("DISC_IN_PROGRESS cleared\n"));
+		if (ndev && (ndev->ieee80211_ptr != NULL)) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+			if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy) {
+				cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+						&cfg->remain_on_chan, GFP_KERNEL);
+			} else {
+				CFGP2P_ERR(("Invalid cfgdev. Dropping the"
+						"remain_on_channel_expired event.\n"));
+			}
+#else
+			cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+					&cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		}
+	}
+#endif /* P2P_LISTEN_OFFLOADING */
+
+	if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) {
+		wl_set_p2p_status(cfg, LISTEN_EXPIRED);
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+
+		if (cfg->afx_hdl->is_listen == TRUE &&
+			wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_DBG(("Listen DONE for action frame\n"));
+			complete(&cfg->act_frm_scan);
+		}
+#ifdef WL_CFG80211_SYNC_GON
+		else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+			wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, ndev);
+			WL_DBG(("Listen DONE and wake up wait_next_af !!(%d)\n",
+				jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies)));
+
+			if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM))
+				wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+			complete(&cfg->wait_next_af);
+		}
+#endif /* WL_CFG80211_SYNC_GON */
+
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL))
+#else
+		if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL) ||
+			wl_get_drv_status_all(cfg, FAKE_REMAINING_ON_CHANNEL))
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		{
+			WL_DBG(("Listen DONE for remain on channel expired\n"));
+			wl_clr_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+			wl_clr_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+			if (ndev && (ndev->ieee80211_ptr != NULL)) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+				if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy &&
+				    bcmcfg_to_p2p_wdev(cfg)) {
+					/*
+					 * To prevent kernel panic,
+					 * if cfgdev->wiphy may be invalid, adding explicit check
+					 */
+					cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
+						cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
+				} else
+					CFGP2P_ERR(("Invalid cfgdev. Dropping the"
+						"remain_on_channel_expired event.\n"));
+#else
+				if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy)
+					cfg80211_remain_on_channel_expired(cfgdev,
+						cfg->last_roc_id, &cfg->remain_on_chan,
+						cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+			}
+		}
+		if (wl_add_remove_eventmsg(bcmcfg_to_prmry_ndev(cfg),
+			WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) {
+			CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+		}
+	} else
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+	return ret;
+
+}
+
+/*
+ *  Timer expire callback function for LISTEN
+ *  We can't report cfg80211_remain_on_channel_expired from Timer ISR context,
+ *  so lets do it from thread context.
+ */
+void
+wl_cfgp2p_listen_expired(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	ulong data
+#endif
+)
+{
+	wl_event_msg_t msg;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct p2p_info *p2p = from_timer(p2p, t, listen_timer);
+	struct bcm_cfg80211 *cfg = p2p->cfg;
+#else
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data;
+#endif
+	struct net_device *ndev;
+	CFGP2P_DBG((" Enter\n"));
+
+	if (!cfg) {
+		CFGP2P_ERR((" No cfg\n"));
+		return;
+	}
+	bzero(&msg, sizeof(wl_event_msg_t));
+	msg.event_type =  hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+	msg.bsscfgidx  =  wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+#if defined(WL_ENABLE_P2P_IF)
+	ndev = cfg->p2p_net ? cfg->p2p_net :
+		wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE);
+#else
+	ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE);
+#endif /* WL_ENABLE_P2P_IF */
+	if (!ndev) {
+		CFGP2P_ERR((" No ndev\n"));
+		return;
+	}
+	wl_cfg80211_event(ndev, &msg, NULL);
+}
+/*
+ *  Routine for cancelling the P2P LISTEN
+ */
+static s32
+wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+                         struct wireless_dev *wdev, bool notify)
+{
+	WL_DBG(("Enter \n"));
+	/* Irrespective of whether timer is running or not, reset
+	 * the LISTEN state.
+	 */
+	if (timer_pending(&cfg->p2p->listen_timer)) {
+		del_timer_sync(&cfg->p2p->listen_timer);
+		if (notify) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+			if (bcmcfg_to_p2p_wdev(cfg))
+				cfg80211_remain_on_channel_expired(wdev, cfg->last_roc_id,
+					&cfg->remain_on_chan, GFP_KERNEL);
+#else
+			if (ndev && ndev->ieee80211_ptr)
+				cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id,
+					&cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		}
+	}
+	return 0;
+}
+/*
+ * Do a P2P Listen on the given channel for the given duration.
+ * A listen consists of sitting idle and responding to P2P probe requests
+ * with a P2P probe response.
+ *
+ * This fn assumes dongle p2p device discovery is already enabled.
+ * Parameters   :
+ * @cfg          : wl_private data
+ * @channel     : channel to listen
+ * @duration_ms : the time (milli seconds) to wait
+ */
+s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms)
+{
+#define EXTRA_DELAY_TIME	100
+	s32 ret = BCME_OK;
+	struct timer_list *_timer;
+	s32 extra_delay;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_DBG((" Enter Listen Channel : %d, Duration : %d\n", channel, duration_ms));
+	if (unlikely(wl_get_p2p_status(cfg, DISCOVERY_ON) == 0)) {
+
+		CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
+
+		ret = BCME_NOTREADY;
+		goto exit;
+	}
+	if (timer_pending(&cfg->p2p->listen_timer)) {
+		CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
+		goto exit;
+
+	}
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	else
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+	if (wl_add_remove_eventmsg(netdev, WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) {
+			CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n"));
+	}
+
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	_timer = &cfg->p2p->listen_timer;
+
+	/*  We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
+	 *  otherwise we will wait up to duration_ms + 100ms + duration / 10
+	 */
+	if (ret == BCME_OK) {
+		extra_delay = EXTRA_DELAY_TIME + (duration_ms / 10);
+	} else {
+		/* if failed to set listen, it doesn't need to wait whole duration. */
+		duration_ms = 100 + duration_ms / 20;
+		extra_delay = 0;
+	}
+
+	INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, extra_delay);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#undef EXTRA_DELAY_TIME
+exit:
+	return ret;
+}
+
+
+s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" Enter\n"));
+	if (!wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+
+		CFGP2P_DBG((" do nothing, discovery is off\n"));
+		return ret;
+	}
+	if (wl_get_p2p_status(cfg, SEARCH_ENABLED) == enable) {
+		CFGP2P_DBG(("already : %d\n", enable));
+		return ret;
+	}
+
+	wl_chg_p2p_status(cfg, SEARCH_ENABLED);
+	/* When disabling Search, reset the WL driver's p2p discovery state to
+	 * WL_P2P_DISC_ST_SCAN.
+	 */
+	if (!enable) {
+		wl_clr_p2p_status(cfg, SCANNING);
+		ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+		            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	}
+
+	return ret;
+}
+
+/*
+ * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
+ */
+s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+            const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+	u32 event_type = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+	struct net_device *ndev = NULL;
+	u8 bsscfgidx = e->bsscfgidx;
+
+	CFGP2P_DBG((" Enter\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+		if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
+
+			CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+			if (status == WLC_E_STATUS_SUCCESS) {
+				wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+				CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
+				if (!cfg->need_wait_afrx && cfg->af_sent_channel) {
+					CFGP2P_DBG(("no need to wait next AF.\n"));
+					wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+				}
+			}
+			else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+				wl_set_p2p_status(cfg, ACTION_TX_NOACK);
+				CFGP2P_INFO(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+				wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
+			}
+		} else {
+			CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+						"status : %d\n", status));
+
+			if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+				complete(&cfg->send_af_done);
+		}
+	}
+	return ret;
+}
+/* Send an action frame immediately without doing channel synchronization.
+ *
+ * This function does not wait for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
+ * 802.11 ack has been received for the sent action frame.
+ */
+s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx)
+{
+	s32 ret = BCME_OK;
+	s32 evt_ret = BCME_OK;
+	s32 timeout = 0;
+	wl_eventmsg_buf_t buf;
+
+
+	CFGP2P_INFO(("\n"));
+	CFGP2P_INFO(("channel : %u , dwell time : %u\n",
+	    af_params->channel, af_params->dwell_time));
+
+	wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+	wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+	bzero(&buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, true);
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, true);
+	if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0)
+		return evt_ret;
+
+	cfg->af_sent_channel  = af_params->channel;
+#ifdef WL_CFG80211_SYNC_GON
+	cfg->af_tx_sent_jiffies = jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+
+	ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	if (ret < 0) {
+		CFGP2P_ERR((" sending action frame is failed\n"));
+		goto exit;
+	}
+
+	timeout = wait_for_completion_timeout(&cfg->send_af_done,
+		msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX));
+
+	if (timeout >= 0 && wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+		CFGP2P_INFO(("tx action frame operation is completed\n"));
+		ret = BCME_OK;
+	} else if (ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) {
+		CFGP2P_INFO(("bcast tx action frame operation is completed\n"));
+		ret = BCME_OK;
+	} else {
+		ret = BCME_ERROR;
+		CFGP2P_INFO(("tx action frame operation is failed\n"));
+	}
+	/* clear status bit for action tx */
+	wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+	wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+exit:
+	CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
+
+	bzero(&buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false);
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, false);
+	if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) {
+		WL_ERR(("TX frame events revert back failed \n"));
+		return evt_ret;
+	}
+
+	return ret;
+}
+
+/* Generate our P2P Device Address and P2P Interface Address from our primary
+ * MAC address.
+ */
+void
+wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr)
+{
+	struct ether_addr *mac_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
+	struct ether_addr *int_addr;
+
+	memcpy(mac_addr, primary_addr, sizeof(struct ether_addr));
+	mac_addr->octet[0] |= 0x02;
+	WL_DBG(("P2P Discovery address:"MACDBG "\n", MAC2STRDBG(mac_addr->octet)));
+
+	int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION1);
+	memcpy(int_addr, mac_addr, sizeof(struct ether_addr));
+	int_addr->octet[4] ^= 0x80;
+	WL_DBG(("Primary P2P Interface address:"MACDBG "\n", MAC2STRDBG(int_addr->octet)));
+
+	int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION2);
+	memcpy(int_addr, mac_addr, sizeof(struct ether_addr));
+	int_addr->octet[4] ^= 0x90;
+}
+
+/* P2P IF Address change to Virtual Interface MAC Address */
+void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
+{
+	wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
+	u16 len = ie->len;
+	u8 *subel;
+	u8 subelt_id;
+	u16 subelt_len;
+	CFGP2P_DBG((" Enter\n"));
+
+	/* Point subel to the P2P IE's subelt field.
+	 * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+	 */
+	subel = ie->subelts;
+	len -= 4;	/* exclude OUI + OUI_TYPE */
+
+	while (len >= 3) {
+	/* attribute id */
+		subelt_id = *subel;
+		subel += 1;
+		len -= 1;
+
+		/* 2-byte little endian */
+		subelt_len = *subel++;
+		subelt_len |= *subel++ << 8;
+
+		len -= 2;
+		len -= subelt_len;	/* for the remaining subelt fields */
+
+		if (subelt_id == element_id) {
+			if (subelt_id == P2P_SEID_INTINTADDR) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device ID ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_INFO) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_GROUP_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
+			}			return;
+		} else {
+			CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
+		}
+		subel += subelt_len;
+	}
+}
+
+s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	s32 ret = BCME_OK;
+	s32 p2p_supported = 0;
+	ret = wldev_iovar_getint(ndev, "p2p",
+	               &p2p_supported);
+	if (ret < 0) {
+		if (ret == BCME_UNSUPPORTED) {
+			CFGP2P_INFO(("p2p is unsupported\n"));
+			return 0;
+		} else {
+			CFGP2P_ERR(("cfg p2p error %d\n", ret));
+			return ret;
+		}
+	}
+	if (cfg->pub->conf->fw_type == FW_TYPE_MESH)
+		p2p_supported = 0;
+	if (p2p_supported == 1) {
+		CFGP2P_INFO(("p2p is supported\n"));
+	} else {
+		CFGP2P_INFO(("p2p is unsupported\n"));
+		p2p_supported = 0;
+	}
+	return p2p_supported;
+}
+
+/* Cleanup P2P resources */
+s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev = NULL;
+	struct wireless_dev *wdev = NULL;
+	s32 i = 0, index = -1;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+	wdev = bcmcfg_to_p2p_wdev(cfg);
+#elif defined(WL_ENABLE_P2P_IF)
+	ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg);
+	wdev = ndev_to_wdev(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE);
+	wl_cfgp2p_disable_discovery(cfg);
+
+#if defined(WL_CFG80211_P2P_DEV_IF) && !defined(KEEP_WIFION_OPTION)
+/*
+ * In CUSTOMER_HW4 implementation "ifconfig wlan0 down" can get
+ * called during phone suspend and customer requires the p2p
+ * discovery interface to be left untouched so that the user
+ * space can resume without any problem.
+ */
+	if (cfg->p2p_wdev) {
+		/* If p2p wdev is left out, clean it up */
+		WL_ERR(("Clean up the p2p discovery IF\n"));
+		wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+	}
+#endif /* WL_CFG80211_P2P_DEV_IF !defined(KEEP_WIFION_OPTION) */
+
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+			index = wl_to_p2p_bss_bssidx(cfg, i);
+			if (index != WL_INVALID)
+				wl_cfg80211_clear_per_bss_ies(cfg, index);
+	}
+	wl_cfgp2p_deinit_priv(cfg);
+	return 0;
+}
+
+int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg)
+{
+	if (cfg->p2p && ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) ||
+		(wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1)))
+		return true;
+	else
+		return false;
+
+}
+
+s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	s32 ret = -1;
+	int count, start, duration;
+	wl_p2p_sched_t dongle_noa;
+	s32 bssidx, type;
+	int iovar_len = sizeof(dongle_noa);
+	CFGP2P_DBG((" Enter\n"));
+
+	memset(&dongle_noa, 0, sizeof(dongle_noa));
+
+	if (wl_cfgp2p_vif_created(cfg)) {
+		cfg->p2p->noa.desc[0].start = 0;
+
+		sscanf(buf, "%10d %10d %10d", &count, &start, &duration);
+		CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
+			count, start, duration));
+		if (count != -1)
+			cfg->p2p->noa.desc[0].count = count;
+
+		/* supplicant gives interval as start */
+		if (start != -1)
+			cfg->p2p->noa.desc[0].interval = start;
+
+		if (duration != -1)
+			cfg->p2p->noa.desc[0].duration = duration;
+
+		if (cfg->p2p->noa.desc[0].count != 255 && cfg->p2p->noa.desc[0].count != 0) {
+			cfg->p2p->noa.desc[0].start = 200;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
+			dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
+			dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
+		}
+		else if (cfg->p2p->noa.desc[0].count == 0) {
+			cfg->p2p->noa.desc[0].start = 0;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+			dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+			dongle_noa.action = WL_P2P_SCHED_ACTION_RESET;
+		}
+		else {
+			/* Continuous NoA interval. */
+			dongle_noa.action = WL_P2P_SCHED_ACTION_DOZE;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+			if ((cfg->p2p->noa.desc[0].interval == 102) ||
+				(cfg->p2p->noa.desc[0].interval == 100)) {
+				cfg->p2p->noa.desc[0].start = 100 -
+					cfg->p2p->noa.desc[0].duration;
+				dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
+			}
+			else {
+				dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+			}
+		}
+		/* Put the noa descriptor in dongle format for dongle */
+		dongle_noa.desc[0].count = htod32(cfg->p2p->noa.desc[0].count);
+		if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
+			dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start);
+			dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration);
+		}
+		else {
+			dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start*1000);
+			dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000);
+		}
+		dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000);
+		bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+		if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK)
+			return BCME_ERROR;
+
+		if (dongle_noa.action == WL_P2P_SCHED_ACTION_RESET) {
+			iovar_len -= sizeof(wl_p2p_sched_desc_t);
+		}
+
+		ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, type),
+			"p2p_noa", &dongle_noa, iovar_len, cfg->ioctl_buf,
+			WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+		if (ret < 0) {
+			CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
+		}
+	}
+	else {
+		CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n"));
+	}
+	return ret;
+}
+s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int buf_len)
+{
+
+	wifi_p2p_noa_desc_t *noa_desc;
+	int len = 0, i;
+	char _buf[200];
+
+	CFGP2P_DBG((" Enter\n"));
+	buf[0] = '\0';
+	if (wl_cfgp2p_vif_created(cfg)) {
+		if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) {
+			_buf[0] = 1; /* noa index */
+			_buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) |
+				(cfg->p2p->ops.ctw & 0x7f); /* ops + ctw */
+			len += 2;
+			if (cfg->p2p->noa.desc[0].count) {
+				noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
+				noa_desc->cnt_type = cfg->p2p->noa.desc[0].count;
+				noa_desc->duration = cfg->p2p->noa.desc[0].duration;
+				noa_desc->interval = cfg->p2p->noa.desc[0].interval;
+				noa_desc->start = cfg->p2p->noa.desc[0].start;
+				len += sizeof(wifi_p2p_noa_desc_t);
+			}
+			if (buf_len <= len * 2) {
+				CFGP2P_ERR(("ERROR: buf_len %d in not enough for"
+					"returning noa in string format\n", buf_len));
+				return -1;
+			}
+			/* We have to convert the buffer data into ASCII strings */
+			for (i = 0; i < len; i++) {
+				snprintf(buf, 3, "%02x", _buf[i]);
+				buf += 2;
+			}
+			buf[i*2] = '\0';
+		}
+	}
+	else {
+		CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n"));
+		return -1;
+	}
+	return len * 2;
+}
+s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	int ps, ctw;
+	int ret = -1;
+	s32 legacy_ps;
+	s32 conn_idx;
+	s32 bssidx;
+	struct net_device *dev;
+
+	CFGP2P_DBG((" Enter\n"));
+	if (wl_cfgp2p_vif_created(cfg)) {
+		sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw);
+		CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
+
+		bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+		if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK)
+			return BCME_ERROR;
+		dev = wl_to_p2p_bss_ndev(cfg, conn_idx);
+		if (ctw != -1) {
+			cfg->p2p->ops.ctw = ctw;
+			ret = 0;
+		}
+		if (ps != -1) {
+			cfg->p2p->ops.ops = ps;
+			ret = wldev_iovar_setbuf(dev,
+				"p2p_ops", &cfg->p2p->ops, sizeof(cfg->p2p->ops),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+			if (ret < 0) {
+				CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
+			}
+		}
+
+		if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) {
+			ret = wldev_ioctl_set(dev,
+				WLC_SET_PM, &legacy_ps, sizeof(legacy_ps));
+			if (unlikely(ret))
+				CFGP2P_ERR(("error (%d)\n", ret));
+			wl_cfg80211_update_power_mode(dev);
+		}
+		else
+			CFGP2P_ERR(("ilegal setting\n"));
+	}
+	else {
+		CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n"));
+		ret = -1;
+	}
+	return ret;
+}
+
+s32
+wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	int ch, bw;
+	s32 conn_idx;
+	s32 bssidx;
+	struct net_device *dev;
+	char smbuf[WLC_IOCTL_SMLEN];
+	wl_chan_switch_t csa_arg;
+	u32 chnsp = 0;
+	int err = 0;
+
+	CFGP2P_DBG((" Enter\n"));
+	if (wl_cfgp2p_vif_created(cfg)) {
+		sscanf(buf, "%10d %10d", &ch, &bw);
+		CFGP2P_DBG(("Enter ch %d bw %d\n", ch, bw));
+
+		bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+		if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK) {
+			return BCME_ERROR;
+		}
+		dev = wl_to_p2p_bss_ndev(cfg, conn_idx);
+		if (ch <= 0 || bw <= 0) {
+			CFGP2P_ERR(("Negative value not permitted!\n"));
+			return BCME_ERROR;
+		}
+
+		csa_arg.mode = DOT11_CSA_MODE_ADVISORY;
+		csa_arg.count = P2P_ECSA_CNT;
+		csa_arg.reg = 0;
+
+		snprintf(buf, len, "%d/%d", ch, bw);
+		chnsp = wf_chspec_aton(buf);
+		if (chnsp == 0) {
+			CFGP2P_ERR(("%s:chsp is not correct\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+		chnsp = wl_chspec_host_to_driver(chnsp);
+		csa_arg.chspec = chnsp;
+
+		err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg),
+			smbuf, sizeof(smbuf), NULL);
+		if (err) {
+			CFGP2P_ERR(("%s:set p2p_ecsa failed:%d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+	} else {
+		CFGP2P_ERR(("ERROR: set_p2p_ecsa in non-p2p mode\n"));
+		return BCME_ERROR;
+	}
+	return BCME_OK;
+}
+
+s32
+wl_cfgp2p_increase_p2p_bw(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	int algo;
+	int bw;
+	int ret = BCME_OK;
+
+
+	sscanf(buf, "%3d", &bw);
+	if (bw == 0) {
+		algo = 0;
+		ret = wldev_iovar_setbuf(ndev, "mchan_algo", &algo, sizeof(algo), cfg->ioctl_buf,
+				WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		if (ret < 0) {
+			CFGP2P_ERR(("fw set mchan_algo failed %d\n", ret));
+			return BCME_ERROR;
+		}
+	} else {
+		algo = 1;
+		ret = wldev_iovar_setbuf(ndev, "mchan_algo", &algo, sizeof(algo), cfg->ioctl_buf,
+				WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		if (ret < 0) {
+			CFGP2P_ERR(("fw set mchan_algo failed %d\n", ret));
+			return BCME_ERROR;
+		}
+		ret = wldev_iovar_setbuf(ndev, "mchan_bw", &bw, sizeof(algo), cfg->ioctl_buf,
+				WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		if (ret < 0) {
+			CFGP2P_ERR(("fw set mchan_bw failed %d\n", ret));
+			return BCME_ERROR;
+		}
+	}
+	return BCME_OK;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id)
+{
+	wifi_p2p_ie_t *ie = NULL;
+	u16 len = 0;
+	u8 *subel;
+	u8 subelt_id;
+	u16 subelt_len;
+
+	if (!buf) {
+		WL_ERR(("P2P IE not present"));
+		return 0;
+	}
+
+	ie = (wifi_p2p_ie_t*) buf;
+	len = ie->len;
+
+	/* Point subel to the P2P IE's subelt field.
+	 * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+	 */
+	subel = ie->subelts;
+	len -= 4;	/* exclude OUI + OUI_TYPE */
+
+	while (len >= 3) {
+		/* attribute id */
+		subelt_id = *subel;
+		subel += 1;
+		len -= 1;
+
+		/* 2-byte little endian */
+		subelt_len = *subel++;
+		subelt_len |= *subel++ << 8;
+
+		len -= 2;
+		len -= subelt_len;	/* for the remaining subelt fields */
+
+		if (subelt_id == element_id) {
+			/* This will point to start of subelement attrib after
+			 * attribute id & len
+			 */
+			return subel;
+		}
+
+		/* Go to next subelement */
+		subel += subelt_len;
+	}
+
+	/* Not Found */
+	return NULL;
+}
+
+#define P2P_GROUP_CAPAB_GO_BIT	0x01
+
+u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib)
+{
+	bcm_tlv_t *ie;
+	u8* pAttrib;
+
+	CFGP2P_INFO(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len) == TRUE) {
+			/* Have the P2p ie. Now check for attribute */
+			if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(parse, attrib)) != NULL) {
+				CFGP2P_INFO(("P2P attribute %d was found at parse %p",
+					attrib, parse));
+				return pAttrib;
+			}
+			else {
+				parse += (ie->len + TLV_HDR_LEN);
+				len -= (ie->len + TLV_HDR_LEN);
+				CFGP2P_INFO(("P2P Attribute %d not found Moving parse"
+					" to %p len to %d", attrib, parse, len));
+			}
+		}
+		else {
+			/* It was not p2p IE. parse will get updated automatically to next TLV */
+			CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len));
+		}
+	}
+	CFGP2P_ERR(("P2P attribute %d was NOT found", attrib));
+	return NULL;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length)
+{
+	u8 *capability = NULL;
+	bool p2p_go	= 0;
+	u8 *ptr = NULL;
+
+	if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+	bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) {
+		WL_ERR(("P2P Capability attribute not found"));
+		return NULL;
+	}
+
+	/* Check Group capability for Group Owner bit */
+	p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT;
+	if (!p2p_go) {
+		return bi->BSSID.octet;
+	}
+
+	/* In probe responses, DEVICE INFO attribute will be present */
+	if (!(ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+	bi->ie_length,  P2P_SEID_DEV_INFO))) {
+		/* If DEVICE_INFO is not found, this might be a beacon frame.
+		 * check for DEVICE_ID in the beacon frame.
+		 */
+		ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+		bi->ie_length,  P2P_SEID_DEV_ID);
+	}
+
+	if (!ptr)
+		WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE "));
+
+	return ptr;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+wl_cfgp2p_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	snprintf(info->driver, sizeof(info->driver), "p2p");
+	snprintf(info->version, sizeof(info->version), "%lu", (unsigned long)(0));
+}
+
+struct ethtool_ops cfgp2p_ethtool_ops = {
+	.get_drvinfo = wl_cfgp2p_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(WL_ENABLE_P2P_IF)
+s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
+{
+	int ret = 0;
+	struct net_device* net = NULL;
+	struct wireless_dev *wdev = NULL;
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
+
+	if (cfg->p2p_net) {
+		CFGP2P_ERR(("p2p_net defined already.\n"));
+		return -EINVAL;
+	}
+
+	/* Allocate etherdev, including space for private structure */
+	if (!(net = alloc_etherdev(sizeof(struct bcm_cfg80211 *)))) {
+		CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		free_netdev(net);
+		return -ENOMEM;
+	}
+
+	strncpy(net->name, "p2p%d", sizeof(net->name) - 1);
+	net->name[IFNAMSIZ - 1] = '\0';
+
+	/* Copy the reference to bcm_cfg80211 */
+	memcpy((void *)netdev_priv(net), &cfg, sizeof(struct bcm_cfg80211 *));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->do_ioctl = wl_cfgp2p_do_ioctl;
+	net->hard_start_xmit = wl_cfgp2p_start_xmit;
+	net->open = wl_cfgp2p_if_open;
+	net->stop = wl_cfgp2p_if_stop;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &wl_cfgp2p_if_ops;
+#endif
+
+	/* Register with a dummy MAC addr */
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+	wdev->wiphy = cfg->wdev->wiphy;
+
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+
+	net->ieee80211_ptr = wdev;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &cfgp2p_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+	SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
+
+	/* Associate p2p0 network interface with new wdev */
+	wdev->netdev = net;
+
+	ret = register_netdev(net);
+	if (ret) {
+		CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
+		free_netdev(net);
+		kfree(wdev);
+		return -ENODEV;
+	}
+
+	/* store p2p net ptr for further reference. Note that iflist won't have this
+	 * entry as there corresponding firmware interface is a "Hidden" interface.
+	 */
+	cfg->p2p_wdev = wdev;
+	cfg->p2p_net = net;
+
+	printk("%s: P2P Interface Registered\n", net->name);
+
+	return ret;
+}
+
+s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg)
+{
+
+	if (!cfg || !cfg->p2p_net) {
+		CFGP2P_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	}
+
+	unregister_netdev(cfg->p2p_net);
+	free_netdev(cfg->p2p_net);
+
+	return 0;
+}
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+
+	if (skb)
+	{
+		CFGP2P_DBG(("(%s) is not used for data operations.Droping the packet.\n",
+			ndev->name));
+		dev_kfree_skb_any(skb);
+	}
+
+	return 0;
+}
+
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/* There is no ifidx corresponding to p2p0 in our firmware. So we should
+	 * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs.
+	 * For Android PRIV CMD handling map it to primary I/F
+	 */
+	if (cmd == SIOCDEVPRIVATE+1) {
+		ret = wl_android_priv_cmd(ndev, ifr, cmd);
+
+	} else {
+		CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n",
+		__FUNCTION__, cmd));
+		return -1;
+	}
+
+	return ret;
+}
+#endif 
+
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_if_open(struct net_device *net)
+{
+	struct wireless_dev *wdev = net->ieee80211_ptr;
+
+	if (!wdev || !wl_cfg80211_is_p2p_active(net))
+		return -EINVAL;
+	WL_TRACE(("Enter\n"));
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+	/* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now,
+	 * do it here. This will make sure that in concurrent mode, supplicant
+	 * is not dependent on a particular order of interface initialization.
+	 * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N
+	 * -iwlan0.
+	 */
+	wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT)
+		| BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+	wl_cfg80211_do_driver_init(net);
+
+	return 0;
+}
+
+static int wl_cfgp2p_if_stop(struct net_device *net)
+{
+	struct wireless_dev *wdev = net->ieee80211_ptr;
+	struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+	if (!wdev)
+		return -EINVAL;
+
+	wl_cfg80211_scan_stop(cfg, net);
+
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+	wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
+					& (~(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+					BIT(NL80211_IFTYPE_P2P_GO)));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+	return 0;
+}
+
+bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops)
+{
+	return (if_ops == &wl_cfgp2p_if_ops);
+}
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev = NULL;
+	struct ether_addr primary_mac;
+
+	if (!cfg || !cfg->p2p_supported)
+		return ERR_PTR(-EINVAL);
+
+	WL_TRACE(("Enter\n"));
+
+	if (cfg->p2p_wdev) {
+#ifndef EXPLICIT_DISCIF_CLEANUP
+		dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* EXPLICIT_DISCIF_CLEANUP */
+		/*
+		 * This is not expected. This can happen due to
+		 * supplicant crash/unclean de-initialization which
+		 * didn't free the p2p discovery interface. Indicate
+		 * driver hang to user space so that the framework
+		 * can rei-init the Wi-Fi.
+		 */
+		CFGP2P_ERR(("p2p_wdev defined already.\n"));
+		wl_probe_wdev_all(cfg);
+#ifdef EXPLICIT_DISCIF_CLEANUP
+		/*
+		 * CUSTOMER_HW4 design doesn't delete the p2p discovery
+		 * interface on ifconfig wlan0 down context which comes
+		 * without a preceeding NL80211_CMD_DEL_INTERFACE for p2p
+		 * discovery. But during supplicant crash the DEL_IFACE
+		 * command will not happen and will cause a left over iface
+		 * even after ifconfig wlan0 down. So delete the iface
+		 * first and then indicate the HANG event
+		 */
+		wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+#else
+		dhd->hang_reason = HANG_REASON_IFACE_OP_FAILURE;
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+		if (dhd->memdump_enabled) {
+			/* Load the dongle side dump to host
+			 * memory and then BUG_ON()
+			 */
+			dhd->memdump_type = DUMP_TYPE_HANG_ON_IFACE_OP_FAIL;
+			dhd_bus_mem_dump(dhd);
+		}
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+		net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+		return ERR_PTR(-ENODEV);
+#endif /* EXPLICIT_DISCIF_CLEANUP */
+	}
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	memset(&primary_mac, 0, sizeof(primary_mac));
+	get_primary_mac(cfg, &primary_mac);
+	wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+
+	wdev->wiphy = cfg->wdev->wiphy;
+	wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
+	memcpy(wdev->address, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE), ETHER_ADDR_LEN);
+
+
+	/* store p2p wdev ptr for further reference. */
+	cfg->p2p_wdev = wdev;
+
+	printf("P2P interface registered\n");
+	printf("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev);
+
+	return wdev;
+}
+
+int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	if (!cfg)
+		return -EINVAL;
+
+	WL_TRACE(("Enter\n"));
+
+	ret = wl_cfgp2p_set_firm_p2p(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("Set P2P in firmware failed, ret=%d\n", ret));
+		goto exit;
+	}
+
+	ret = wl_cfgp2p_enable_discovery(cfg, bcmcfg_to_prmry_ndev(cfg), NULL, 0);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P enable discovery failed, ret=%d\n", ret));
+		goto exit;
+	}
+
+	p2p_on(cfg) = true;
+#if defined(P2P_IE_MISSING_FIX)
+	cfg->p2p_prb_noti = false;
+#endif
+
+	printf("P2P interface started\n");
+
+exit:
+	return ret;
+}
+
+void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	if (!cfg)
+		return;
+
+	CFGP2P_DBG(("Enter\n"));
+
+	ret = wl_cfg80211_scan_stop(cfg, wdev);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+	}
+
+	if (!cfg->p2p)
+		return;
+
+	/* Cancel any on-going listen */
+	wl_cfgp2p_cancel_listen(cfg, bcmcfg_to_prmry_ndev(cfg), wdev, TRUE);
+
+	ret = wl_cfgp2p_disable_discovery(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret));
+	}
+
+	p2p_on(cfg) = false;
+
+	printf("Exit. P2P interface stopped\n");
+
+	return;
+}
+
+int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg)
+{
+	bool rollback_lock = false;
+
+	if (!wdev)
+		return -EINVAL;
+
+	WL_TRACE(("Enter\n"));
+	printf("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev);
+
+	if (!rtnl_is_locked()) {
+		rtnl_lock();
+		rollback_lock = true;
+	}
+
+	cfg80211_unregister_wdev(wdev);
+
+	if (rollback_lock)
+		rtnl_unlock();
+
+	synchronize_rcu();
+
+	kfree(wdev);
+
+	if (cfg)
+		cfg->p2p_wdev = NULL;
+
+	printf("P2P interface unregistered\n");
+
+	return 0;
+}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+void
+wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx)
+{
+	wifi_p2p_pub_act_frame_t *pact_frm;
+	int status = 0;
+
+	if (!frame || (frame_len < (sizeof(*pact_frm) + WL_P2P_AF_STATUS_OFFSET - 1))) {
+		return;
+	}
+
+	if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+		pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+		if (pact_frm->subtype == P2P_PAF_GON_RSP && tx) {
+			CFGP2P_ACTION(("Check TX P2P Group Owner Negotiation Rsp Frame status\n"));
+			status = pact_frm->elts[WL_P2P_AF_STATUS_OFFSET];
+			if (status) {
+				cfg->need_wait_afrx = false;
+				return;
+			}
+		}
+	}
+
+	cfg->need_wait_afrx = true;
+	return;
+}
+
+int
+wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request)
+{
+	if (request && (request->n_ssids == 1) &&
+		(request->n_channels == 1) &&
+		IS_P2P_SSID(request->ssids[0].ssid, WL_P2P_WILDCARD_SSID_LEN) &&
+		(request->ssids[0].ssid_len > WL_P2P_WILDCARD_SSID_LEN)) {
+		return true;
+	}
+	return false;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
new file mode 100644
index 0000000..ca930ac
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
@@ -0,0 +1,471 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgp2p.h 676811 2016-12-24 20:48:46Z $
+ */
+#ifndef _wl_cfgp2p_h_
+#define _wl_cfgp2p_h_
+#include <802.11.h>
+#include <p2p.h>
+
+struct bcm_cfg80211;
+extern u32 wl_dbg_level;
+
+typedef struct wifi_p2p_ie wifi_wfd_ie_t;
+/* Enumeration of the usages of the BSSCFGs used by the P2P Library.  Do not
+ * confuse this with a bsscfg index.  This value is an index into the
+ * saved_ie[] array of structures which in turn contains a bsscfg index field.
+ */
+typedef enum {
+	P2PAPI_BSSCFG_PRIMARY, /**< maps to driver's primary bsscfg */
+	P2PAPI_BSSCFG_DEVICE, /**< maps to driver's P2P device discovery bsscfg */
+	P2PAPI_BSSCFG_CONNECTION1, /**< maps to driver's P2P connection bsscfg */
+	P2PAPI_BSSCFG_CONNECTION2,
+	P2PAPI_BSSCFG_MAX
+} p2p_bsscfg_type_t;
+
+typedef enum {
+	P2P_SCAN_PURPOSE_MIN,
+	P2P_SCAN_SOCIAL_CHANNEL, /**< scan for social channel */
+	P2P_SCAN_AFX_PEER_NORMAL, /**< scan for action frame search */
+	P2P_SCAN_AFX_PEER_REDUCED, /**< scan for action frame search with short time */
+	P2P_SCAN_DURING_CONNECTED, /**< scan during connected status */
+	P2P_SCAN_CONNECT_TRY, /**< scan for connecting */
+	P2P_SCAN_NORMAL, /**< scan during not-connected status */
+	P2P_SCAN_PURPOSE_MAX
+} p2p_scan_purpose_t;
+
+/** vendor ies max buffer length for probe response or beacon */
+#define VNDR_IES_MAX_BUF_LEN	1400
+/** normal vendor ies buffer length */
+#define VNDR_IES_BUF_LEN 		512
+
+struct p2p_bss {
+	s32 bssidx;
+	struct net_device *dev;
+	void *private_data;
+	struct ether_addr mac_addr;
+};
+
+struct p2p_info {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct bcm_cfg80211 *cfg;
+#endif
+	bool on;    /**< p2p on/off switch */
+	bool scan;
+	int16 search_state;
+	s8 vir_ifname[IFNAMSIZ];
+	unsigned long status;
+	struct p2p_bss bss[P2PAPI_BSSCFG_MAX];
+	struct timer_list listen_timer;
+	wl_p2p_sched_t noa;
+	wl_p2p_ops_t ops;
+	wlc_ssid_t ssid;
+	s8 p2p_go_count;
+};
+
+#define MAX_VNDR_IE_NUMBER	10
+
+struct parsed_vndr_ie_info {
+	char *ie_ptr;
+	u32 ie_len;	/**< total length including id & length field */
+	vndr_ie_t vndrie;
+};
+
+struct parsed_vndr_ies {
+	u32 count;
+	struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+};
+
+/* dongle status */
+enum wl_cfgp2p_status {
+	WLP2P_STATUS_DISCOVERY_ON = 0,
+	WLP2P_STATUS_SEARCH_ENABLED,
+	WLP2P_STATUS_IF_ADDING,
+	WLP2P_STATUS_IF_DELETING,
+	WLP2P_STATUS_IF_CHANGING,
+	WLP2P_STATUS_IF_CHANGED,
+	WLP2P_STATUS_LISTEN_EXPIRED,
+	WLP2P_STATUS_ACTION_TX_COMPLETED,
+	WLP2P_STATUS_ACTION_TX_NOACK,
+	WLP2P_STATUS_SCANNING,
+	WLP2P_STATUS_GO_NEG_PHASE,
+	WLP2P_STATUS_DISC_IN_PROGRESS
+};
+
+
+#define wl_to_p2p_bss_ndev(cfg, type)		((cfg)->p2p->bss[type].dev)
+#define wl_to_p2p_bss_bssidx(cfg, type)		((cfg)->p2p->bss[type].bssidx)
+#define wl_to_p2p_bss_macaddr(cfg, type)     &((cfg)->p2p->bss[type].mac_addr)
+#define wl_to_p2p_bss_saved_ie(cfg, type)	((cfg)->p2p->bss[type].saved_ie)
+#define wl_to_p2p_bss_private(cfg, type)		((cfg)->p2p->bss[type].private_data)
+#define wl_to_p2p_bss(cfg, type)			((cfg)->p2p->bss[type])
+#define wl_get_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		test_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_set_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		set_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_clr_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		clear_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_chg_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+	change_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define p2p_on(cfg) ((cfg)->p2p->on)
+#define p2p_scan(cfg) ((cfg)->p2p->scan)
+#define p2p_is_on(cfg) ((cfg)->p2p && (cfg)->p2p->on)
+
+/* dword align allocation */
+#define WLC_IOCTL_MAXLEN 8192
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define CFGP2P_ERROR_TEXT		"CFGP2P-INFO2) "
+#else
+#define CFGP2P_ERROR_TEXT		"CFGP2P-ERROR) "
+#endif /* CUSTOMER_HW4_DEBUG */
+
+#ifdef DHD_LOG_DUMP
+#define CFGP2P_ERR(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+			DHD_LOG_DUMP_WRITE("[%s] %s: ",	\
+			dhd_log_dump_get_timestamp(), __func__);	\
+			DHD_LOG_DUMP_WRITE args;	\
+		}									\
+	} while (0)
+#else
+#define CFGP2P_ERR(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+		}									\
+	} while (0)
+#endif /* DHD_LOG_DUMP */
+#define	CFGP2P_INFO(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_INFO) {				\
+			printk(KERN_INFO "CFGP2P-INFO) %s : ", __func__);	\
+			printk args;						\
+		}									\
+	} while (0)
+#define	CFGP2P_DBG(args)								\
+	do {									\
+		if (wl_dbg_level & WL_DBG_DBG) {			\
+			printk(KERN_INFO "CFGP2P-DEBUG) %s :", __func__);	\
+			printk args;							\
+		}									\
+	} while (0)
+
+#define	CFGP2P_ACTION(args)								\
+	do {									\
+		if (wl_dbg_level & WL_DBG_P2P_ACTION) {			\
+			printk(KERN_INFO "CFGP2P-ACTION) %s :", __func__);	\
+			printk args;							\
+		}									\
+	} while (0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+#define INIT_TIMER(timer, func, duration, extra_delay)	\
+	do {				   \
+		timer_setup(timer, func, 0); \
+		timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+		add_timer(timer); \
+	} while (0);
+#else
+#define INIT_TIMER(timer, func, duration, extra_delay)	\
+	do {				   \
+		init_timer(timer); \
+		timer->function = func; \
+		timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+		timer->data = (unsigned long) cfg; \
+		add_timer(timer); \
+	} while (0);
+#endif
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
+#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
+#undef WL_SUPPORT_BACKPORTED_KPATCHES
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#ifdef WL_CFG80211_STA_EVENT
+#undef WL_CFG80211_STA_EVENT
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_CFG80211_P2P_DEV_IF)
+#define WL_CFG80211_P2P_DEV_IF
+
+#ifdef WL_ENABLE_P2P_IF
+#undef WL_ENABLE_P2P_IF
+#endif
+
+#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
+#undef WL_SUPPORT_BACKPORTED_KPATCHES
+#endif
+#else
+#ifdef WLP2P
+#ifndef WL_ENABLE_P2P_IF
+/* Enable P2P network Interface if P2P support is enabled */
+#define WL_ENABLE_P2P_IF
+#endif /* WL_ENABLE_P2P_IF */
+#endif /* WLP2P */
+#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */
+
+#ifndef WL_CFG80211_P2P_DEV_IF
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)))
+#error Disable 'WL_ENABLE_P2P_IF', if 'WL_CFG80211_P2P_DEV_IF' is enabled \
+	or kernel version is 3.8.0 or above
+#endif /* WL_ENABLE_P2P_IF && (WL_CFG80211_P2P_DEV_IF || (LINUX_VERSION >= VERSION(3, 8, 0))) */
+
+#if !defined(WLP2P) && (defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF))
+#error WLP2P not defined
+#endif /* !WLP2P && (WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF) */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define bcm_struct_cfgdev	struct wireless_dev
+#else
+#define bcm_struct_cfgdev	struct net_device
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#define P2P_ECSA_CNT 50
+
+extern void
+wl_cfgp2p_listen_expired(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	ulong data
+#endif
+);
+extern bool
+wl_cfgp2p_is_pub_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_gas_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len);
+extern bool
+wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len);
+extern void
+wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel);
+extern s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg);
+extern void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode,
+            u32 channel, u16 listen_ms, int bssidx);
+extern s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec);
+extern s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+	chanspec_t chspec, s32 conn_idx);
+
+extern s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index);
+
+extern s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 *ie,
+	u32 ie_len);
+extern s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, u32 num_chans,
+	u16 *channels,
+	s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+	p2p_scan_purpose_t p2p_scan_purpose);
+
+extern s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len);
+
+extern wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len);
+
+extern wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len);
+extern s32
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
+            s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
+extern s32
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx);
+
+extern struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx);
+extern s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type);
+
+
+extern s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms);
+
+extern s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable);
+
+extern s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+
+extern s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx);
+
+extern void
+wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr);
+
+extern void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
+
+extern s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+extern s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_increase_p2p_bw(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id);
+
+extern u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib);
+
+extern u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length);
+
+extern s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg);
+
+extern bool
+wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops);
+
+extern u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+	s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd);
+
+extern int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg);
+
+extern
+int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg);
+
+extern
+int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg);
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+extern struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg);
+
+extern int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg);
+
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+extern void
+wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx);
+
+extern int
+wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request);
+
+/* WiFi Direct */
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+					(channel == SOCIAL_CHAN_2) || \
+					(channel == SOCIAL_CHAN_3))
+#define SOCIAL_CHAN_CNT 3
+#define AF_PEER_SEARCH_CNT 2
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define WL_P2P_INTERFACE_PREFIX "p2p"
+#define WL_P2P_TEMP_CHAN 11
+#define WL_P2P_TEMP_CHAN_5G 36
+#define WL_P2P_AF_STATUS_OFFSET 9
+
+/* If the provision discovery is for JOIN operations,
+ * or the device discoverablity frame is destined to GO
+ * then we need not do an internal scan to find GO.
+ */
+#define IS_ACTPUB_WITHOUT_GROUP_ID(p2p_ie, len) \
+	(wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_GROUP_ID) == NULL)
+
+#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \
+					((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \
+					(frame->action == P2PSD_ACTION_ID_GAS_CREQ)))
+
+#define IS_P2P_PUB_ACT_RSP_SUBTYPE(subtype) ((subtype == P2P_PAF_GON_RSP) || \
+							((subtype == P2P_PAF_GON_CONF) || \
+							(subtype == P2P_PAF_INVITE_RSP) || \
+							(subtype == P2P_PAF_PROVDIS_RSP)))
+#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3))
+#define IS_P2P_SSID(ssid, len) (!memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) && \
+					(len == WL_P2P_WILDCARD_SSID_LEN))
+#endif				/* _wl_cfgp2p_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.c b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c
new file mode 100644
index 0000000..500e638
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c
@@ -0,0 +1,3730 @@
+/*
+ * Linux cfg80211 Vendor Extension Code
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgvendor.c 710862 2017-07-14 07:43:59Z $
+ */
+
+/*
+ * New vendor interface additon to nl80211/cfg80211 to allow vendors
+ * to implement proprietary features over the cfg80211 stack.
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+#include <802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_debug.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <wlioctl_utils.h>
+#include <dhd_cfg80211.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+
+#include <ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_android.h>
+#include <wl_cfgvendor.h>
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+#include <brcm_nl80211.h>
+
+#ifdef STAT_REPORT
+#include <wl_statreport.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+
+/*
+ * This API is to be used for asynchronous vendor events. This
+ * shouldn't be used in response to a vendor command from its
+ * do_it handler context (instead wl_cfgvendor_send_cmd_reply should
+ * be used).
+ */
+int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+	struct net_device *dev, int event_id, const void  *data, int len)
+{
+	gfp_t kflags;
+	struct sk_buff *skb;
+
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+	/* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+	skb = cfg80211_vendor_event_alloc(wiphy, NULL, len, event_id, kflags);
+#else
+	skb = cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+		/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+	if (!skb) {
+		WL_ERR(("skb alloc failed"));
+		return -ENOMEM;
+	}
+
+	/* Push the data to the skb */
+	nla_put_nohdr(skb, len, data);
+
+	cfg80211_vendor_event(skb, kflags);
+
+	return 0;
+}
+
+static int
+wl_cfgvendor_send_cmd_reply(struct wiphy *wiphy,
+	struct net_device *dev, const void  *data, int len)
+{
+	struct sk_buff *skb;
+
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len);
+	if (unlikely(!skb)) {
+		WL_ERR(("skb alloc failed"));
+		return -ENOMEM;
+	}
+
+	/* Push the data to the skb */
+	nla_put_nohdr(skb, len, data);
+
+	return cfg80211_vendor_cmd_reply(skb);
+}
+
+static int
+wl_cfgvendor_get_feature_set(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int reply;
+
+	reply = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg));
+
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+	        &reply, sizeof(int));
+	if (unlikely(err))
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+	return err;
+}
+
+static int
+wl_cfgvendor_get_feature_set_matrix(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct sk_buff *skb;
+	int reply;
+	int mem_needed, i;
+
+	mem_needed = VENDOR_REPLY_OVERHEAD +
+		(ATTRIBUTE_U32_LEN * MAX_FEATURE_SET_CONCURRRENT_GROUPS) + ATTRIBUTE_U32_LEN;
+
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+	if (unlikely(!skb)) {
+		WL_ERR(("skb alloc failed"));
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, MAX_FEATURE_SET_CONCURRRENT_GROUPS);
+	for (i = 0; i < MAX_FEATURE_SET_CONCURRRENT_GROUPS; i++) {
+		reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), i);
+		if (reply != WIFI_FEATURE_INVALID) {
+			nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET, reply);
+		}
+	}
+
+	err =  cfg80211_vendor_cmd_reply(skb);
+
+	if (unlikely(err)) {
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+	}
+exit:
+	return err;
+}
+
+static int
+wl_cfgvendor_set_rand_mac_oui(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int type;
+	uint8 random_mac_oui[DOT11_OUI_LEN];
+
+	type = nla_type(data);
+
+	if (type == ANDR_WIFI_ATTRIBUTE_RANDOM_MAC_OUI) {
+		memcpy(random_mac_oui, nla_data(data), DOT11_OUI_LEN);
+
+		err = dhd_dev_cfg_rand_mac_oui(bcmcfg_to_prmry_ndev(cfg), random_mac_oui);
+
+		if (unlikely(err))
+			WL_ERR(("Bad OUI, could not set:%d \n", err));
+
+	} else {
+		err = -1;
+	}
+
+	return err;
+}
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+static int
+wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int type;
+	u32 nodfs;
+
+	type = nla_type(data);
+	if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) {
+		nodfs = nla_get_u32(data);
+		err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs);
+	} else {
+		err = -1;
+	}
+	return err;
+}
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+
+static int
+wl_cfgvendor_set_country(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void *data, int len)
+{
+	int err = BCME_ERROR, rem, type;
+	char country_code[WLC_CNTRY_BUF_SZ] = {0};
+	const struct nlattr *iter;
+
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+			case ANDR_WIFI_ATTRIBUTE_COUNTRY:
+				memcpy(country_code, nla_data(iter),
+					MIN(nla_len(iter), WLC_CNTRY_BUF_SZ));
+				break;
+			default:
+				WL_ERR(("Unknown type: %d\n", type));
+				return err;
+		}
+	}
+
+	err = wldev_set_country(wdev->netdev, country_code, true, true, -1);
+	if (err < 0) {
+		WL_ERR(("Set country failed ret:%d\n", err));
+	}
+
+	return err;
+}
+
+#ifdef GSCAN_SUPPORT
+int
+wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+	struct net_device *dev, void  *data, int len, wl_vendor_event_t event)
+{
+	u16 kflags;
+	const void *ptr;
+	struct sk_buff *skb;
+	int malloc_len, total, iter_cnt_to_send, cnt;
+	gscan_results_cache_t *cache = (gscan_results_cache_t *)data;
+
+	total = len/sizeof(wifi_gscan_result_t);
+	while (total > 0) {
+		malloc_len = (total * sizeof(wifi_gscan_result_t)) + VENDOR_DATA_OVERHEAD;
+		if (malloc_len > NLMSG_DEFAULT_SIZE) {
+			malloc_len = NLMSG_DEFAULT_SIZE;
+		}
+		iter_cnt_to_send =
+		   (malloc_len - VENDOR_DATA_OVERHEAD)/sizeof(wifi_gscan_result_t);
+		total = total - iter_cnt_to_send;
+
+		kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+		/* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+		skb = cfg80211_vendor_event_alloc(wiphy, NULL, malloc_len, event, kflags);
+#else
+		skb = cfg80211_vendor_event_alloc(wiphy, malloc_len, event, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+		/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+		if (!skb) {
+			WL_ERR(("skb alloc failed"));
+			return -ENOMEM;
+		}
+
+		while (cache && iter_cnt_to_send) {
+			ptr = (const void *) &cache->results[cache->tot_consumed];
+
+			if (iter_cnt_to_send < (cache->tot_count - cache->tot_consumed)) {
+				cnt = iter_cnt_to_send;
+			} else {
+				cnt = (cache->tot_count - cache->tot_consumed);
+			}
+
+			iter_cnt_to_send -= cnt;
+			cache->tot_consumed += cnt;
+			/* Push the data to the skb */
+			nla_append(skb, cnt * sizeof(wifi_gscan_result_t), ptr);
+			if (cache->tot_consumed == cache->tot_count) {
+				cache = cache->next;
+			}
+
+		}
+
+		cfg80211_vendor_event(skb, kflags);
+	}
+
+	return 0;
+}
+
+
+static int
+wl_cfgvendor_gscan_get_capabilities(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pno_gscan_capabilities_t *reply = NULL;
+	uint32 reply_len = 0;
+
+
+	reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+	   DHD_PNO_GET_CAPABILITIES, NULL, &reply_len);
+	if (!reply) {
+		WL_ERR(("Could not get capabilities\n"));
+		err = -EINVAL;
+		return err;
+	}
+
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+	        reply, reply_len);
+
+	if (unlikely(err)) {
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+	}
+
+	kfree(reply);
+	return err;
+}
+
+static int
+wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	gscan_results_cache_t *results, *iter;
+	uint32 reply_len, is_done = 1;
+	int32 mem_needed, num_results_iter;
+	wifi_gscan_result_t *ptr;
+	uint16 num_scan_ids, num_results;
+	struct sk_buff *skb;
+	struct nlattr *scan_hdr, *complete_flag;
+
+	err = dhd_dev_wait_batch_results_complete(bcmcfg_to_prmry_ndev(cfg));
+	if (err != BCME_OK)
+		return -EBUSY;
+
+	err = dhd_dev_pno_lock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+	if (err != BCME_OK) {
+		WL_ERR(("Can't obtain lock to access batch results %d\n", err));
+		return -EBUSY;
+	}
+	results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+	             DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len);
+
+	if (!results) {
+		WL_ERR(("No results to send %d\n", err));
+		err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+		        results, 0);
+
+		if (unlikely(err))
+			WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+		dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+		return err;
+	}
+	num_scan_ids = reply_len & 0xFFFF;
+	num_results = (reply_len & 0xFFFF0000) >> 16;
+	mem_needed = (num_results * sizeof(wifi_gscan_result_t)) +
+	             (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) +
+	             VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN;
+
+	if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) {
+		mem_needed = (int32)NLMSG_DEFAULT_SIZE;
+	}
+
+	WL_TRACE(("is_done %d mem_needed %d max_mem %d\n", is_done, mem_needed,
+		(int)NLMSG_DEFAULT_SIZE));
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+	if (unlikely(!skb)) {
+		WL_ERR(("skb alloc failed"));
+		dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+		return -ENOMEM;
+	}
+	iter = results;
+	complete_flag = nla_reserve(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
+	                    sizeof(is_done));
+	mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD);
+
+	while (iter) {
+		num_results_iter = (mem_needed - (int32)GSCAN_BATCH_RESULT_HDR_LEN);
+		num_results_iter /= (int32)sizeof(wifi_gscan_result_t);
+		if (num_results_iter <= 0 ||
+		    ((iter->tot_count - iter->tot_consumed) > num_results_iter)) {
+			break;
+		}
+		scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS);
+		/* no more room? we are done then (for now) */
+		if (scan_hdr == NULL) {
+			is_done = 0;
+			break;
+		}
+		nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
+		nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
+		nla_put_u32(skb, GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK, iter->scan_ch_bucket);
+
+		num_results_iter = iter->tot_count - iter->tot_consumed;
+
+		nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
+		if (num_results_iter) {
+			ptr = &iter->results[iter->tot_consumed];
+			iter->tot_consumed += num_results_iter;
+			nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
+			 num_results_iter * sizeof(wifi_gscan_result_t), ptr);
+		}
+		nla_nest_end(skb, scan_hdr);
+		mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN +
+		    (num_results_iter * sizeof(wifi_gscan_result_t));
+		iter = iter->next;
+	}
+	/* Returns TRUE if all result consumed */
+	is_done = dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
+	memcpy(nla_data(complete_flag), &is_done, sizeof(is_done));
+	dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+	return cfg80211_vendor_cmd_reply(skb);
+}
+
+static int
+wl_cfgvendor_initiate_gscan(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int type, tmp = len;
+	int run = 0xFF;
+	int flush = 0;
+	const struct nlattr *iter;
+
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+		if (type == GSCAN_ATTRIBUTE_ENABLE_FEATURE)
+			run = nla_get_u32(iter);
+		else if (type == GSCAN_ATTRIBUTE_FLUSH_FEATURE)
+			flush = nla_get_u32(iter);
+	}
+
+	if (run != 0xFF) {
+		err = dhd_dev_pno_run_gscan(bcmcfg_to_prmry_ndev(cfg), run, flush);
+
+		if (unlikely(err)) {
+			WL_ERR(("Could not run gscan:%d \n", err));
+		}
+		return err;
+	} else {
+		return -EINVAL;
+	}
+
+
+}
+
+static int
+wl_cfgvendor_enable_full_scan_result(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int type;
+	bool real_time = FALSE;
+
+	type = nla_type(data);
+
+	if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) {
+		real_time = nla_get_u32(data);
+
+		err = dhd_dev_pno_enable_full_scan_result(bcmcfg_to_prmry_ndev(cfg), real_time);
+
+		if (unlikely(err)) {
+			WL_ERR(("Could not run gscan:%d \n", err));
+		}
+
+	} else {
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int
+wl_cfgvendor_set_scan_cfg_bucket(const struct nlattr *prev,
+	gscan_scan_params_t *scan_param, int num)
+{
+	struct dhd_pno_gscan_channel_bucket  *ch_bucket;
+	int k = 0;
+	int type, err = 0, rem;
+	const struct nlattr *cur, *next;
+
+	nla_for_each_nested(cur, prev, rem) {
+		type = nla_type(cur);
+		ch_bucket = scan_param->channel_bucket;
+		switch (type) {
+		case GSCAN_ATTRIBUTE_BUCKET_ID:
+			break;
+		case GSCAN_ATTRIBUTE_BUCKET_PERIOD:
+			if (nla_len(cur) != sizeof(uint32)) {
+				err = -EINVAL;
+				goto exit;
+			}
+
+			ch_bucket[num].bucket_freq_multiple =
+				nla_get_u32(cur) / MSEC_PER_SEC;
+			break;
+		case GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS:
+			if (nla_len(cur) != sizeof(uint32)) {
+				err = -EINVAL;
+				goto exit;
+			}
+			ch_bucket[num].num_channels = nla_get_u32(cur);
+			if (ch_bucket[num].num_channels >
+				GSCAN_MAX_CHANNELS_IN_BUCKET) {
+				WL_ERR(("channel range:%d,bucket:%d\n",
+					ch_bucket[num].num_channels,
+					num));
+				err = -EINVAL;
+				goto exit;
+			}
+			break;
+		case GSCAN_ATTRIBUTE_BUCKET_CHANNELS:
+			nla_for_each_nested(next, cur, rem) {
+				if (k >= GSCAN_MAX_CHANNELS_IN_BUCKET)
+					break;
+				if (nla_len(next) != sizeof(uint32)) {
+					err = -EINVAL;
+					goto exit;
+				}
+				ch_bucket[num].chan_list[k] = nla_get_u32(next);
+				k++;
+			}
+			break;
+		case GSCAN_ATTRIBUTE_BUCKETS_BAND:
+			if (nla_len(cur) != sizeof(uint32)) {
+				err = -EINVAL;
+				goto exit;
+			}
+			ch_bucket[num].band = (uint16)nla_get_u32(cur);
+			break;
+		case GSCAN_ATTRIBUTE_REPORT_EVENTS:
+			if (nla_len(cur) != sizeof(uint32)) {
+				err = -EINVAL;
+				goto exit;
+			}
+			ch_bucket[num].report_flag = (uint8)nla_get_u32(cur);
+			break;
+		case GSCAN_ATTRIBUTE_BUCKET_STEP_COUNT:
+			if (nla_len(cur) != sizeof(uint32)) {
+				err = -EINVAL;
+				goto exit;
+			}
+			ch_bucket[num].repeat = (uint16)nla_get_u32(cur);
+			break;
+		case GSCAN_ATTRIBUTE_BUCKET_MAX_PERIOD:
+			if (nla_len(cur) != sizeof(uint32)) {
+				err = -EINVAL;
+				goto exit;
+			}
+			ch_bucket[num].bucket_max_multiple =
+				nla_get_u32(cur) / MSEC_PER_SEC;
+			break;
+		default:
+			WL_ERR(("unknown attr type:%d\n", type));
+			err = -EINVAL;
+			goto exit;
+		}
+	}
+
+exit:
+	return err;
+}
+
+static int
+wl_cfgvendor_set_scan_cfg(struct wiphy *wiphy, struct wireless_dev *wdev,
+	const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	gscan_scan_params_t *scan_param;
+	int j = 0;
+	int type, tmp;
+	const struct nlattr *iter;
+
+	scan_param = kzalloc(sizeof(gscan_scan_params_t), GFP_KERNEL);
+	if (!scan_param) {
+		WL_ERR(("Could not set GSCAN scan cfg, mem alloc failure\n"));
+		err = -EINVAL;
+		return err;
+
+	}
+
+	scan_param->scan_fr = PNO_SCAN_MIN_FW_SEC;
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+
+		if (j >= GSCAN_MAX_CH_BUCKETS) {
+			break;
+		}
+
+		switch (type) {
+			case GSCAN_ATTRIBUTE_BASE_PERIOD:
+				if (nla_len(iter) != sizeof(uint32)) {
+					err = -EINVAL;
+					goto exit;
+				}
+				scan_param->scan_fr = nla_get_u32(iter) / MSEC_PER_SEC;
+				break;
+			case GSCAN_ATTRIBUTE_NUM_BUCKETS:
+				if (nla_len(iter) != sizeof(uint32)) {
+					err = -EINVAL;
+					goto exit;
+				}
+				scan_param->nchannel_buckets = nla_get_u32(iter);
+				if (scan_param->nchannel_buckets >=
+				    GSCAN_MAX_CH_BUCKETS) {
+					WL_ERR(("ncha_buck out of range %d\n",
+					scan_param->nchannel_buckets));
+					err = -EINVAL;
+					goto exit;
+				}
+				break;
+			case GSCAN_ATTRIBUTE_CH_BUCKET_1:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_2:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_3:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_4:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_5:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_6:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_7:
+				err = wl_cfgvendor_set_scan_cfg_bucket(iter, scan_param, j);
+				if (err < 0) {
+					WL_ERR(("set_scan_cfg_buck error:%d\n", err));
+					goto exit;
+				}
+				j++;
+				break;
+			default:
+				WL_ERR(("Unknown type %d\n", type));
+				err = -EINVAL;
+				goto exit;
+		}
+	}
+
+	err = dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+	     DHD_PNO_SCAN_CFG_ID, scan_param, FALSE);
+
+	if (err < 0) {
+		WL_ERR(("Could not set GSCAN scan cfg\n"));
+		err = -EINVAL;
+	}
+
+exit:
+	kfree(scan_param);
+	return err;
+
+}
+
+static int
+wl_cfgvendor_hotlist_cfg(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	gscan_hotlist_scan_params_t *hotlist_params;
+	int tmp, tmp1, tmp2, type, j = 0, dummy;
+	const struct nlattr *outer, *inner = NULL, *iter;
+	bool flush = FALSE;
+	struct bssid_t *pbssid;
+
+	BCM_REFERENCE(dummy);
+
+	if (len < sizeof(*hotlist_params) || len >= WLC_IOCTL_MAXLEN) {
+		WL_ERR(("buffer length :%d wrong - bail out.\n", len));
+		return -EINVAL;
+	}
+
+	hotlist_params = kzalloc(sizeof(*hotlist_params)
+		+ (sizeof(struct bssid_t) * (PFN_SWC_MAX_NUM_APS - 1)),
+		GFP_KERNEL);
+
+	if (!hotlist_params) {
+		WL_ERR(("Cannot Malloc memory.\n"));
+		return -ENOMEM;
+	}
+
+	hotlist_params->lost_ap_window = GSCAN_LOST_AP_WINDOW_DEFAULT;
+
+	nla_for_each_attr(iter, data, len, tmp2) {
+		type = nla_type(iter);
+		switch (type) {
+		case GSCAN_ATTRIBUTE_HOTLIST_BSSIDS:
+			pbssid = hotlist_params->bssid;
+			nla_for_each_nested(outer, iter, tmp) {
+				nla_for_each_nested(inner, outer, tmp1) {
+					type = nla_type(inner);
+
+					switch (type) {
+					case GSCAN_ATTRIBUTE_BSSID:
+						if (nla_len(inner) != sizeof(pbssid[j].macaddr)) {
+							WL_ERR(("type:%d length:%d not matching.\n",
+								type, nla_len(inner)));
+							err = -EINVAL;
+							goto exit;
+						}
+						memcpy(
+							&(pbssid[j].macaddr),
+							nla_data(inner),
+							sizeof(pbssid[j].macaddr));
+						break;
+					case GSCAN_ATTRIBUTE_RSSI_LOW:
+						if (nla_len(inner) != sizeof(uint8)) {
+							WL_ERR(("type:%d length:%d not matching.\n",
+								type, nla_len(inner)));
+							err = -EINVAL;
+							goto exit;
+						}
+						pbssid[j].rssi_reporting_threshold =
+							(int8)nla_get_u8(inner);
+						break;
+					case GSCAN_ATTRIBUTE_RSSI_HIGH:
+						if (nla_len(inner) != sizeof(uint8)) {
+							WL_ERR(("type:%d length:%d not matching.\n",
+								type, nla_len(inner)));
+							err = -EINVAL;
+							goto exit;
+						}
+						dummy = (int8)nla_get_u8(inner);
+						break;
+					default:
+						WL_ERR(("ATTR unknown %d\n", type));
+						err = -EINVAL;
+						goto exit;
+					}
+				}
+				if (++j >= PFN_SWC_MAX_NUM_APS) {
+					WL_ERR(("cap hotlist max:%d\n", j));
+					break;
+				}
+			}
+			hotlist_params->nbssid = j;
+			break;
+		case GSCAN_ATTRIBUTE_HOTLIST_FLUSH:
+			if (nla_len(iter) != sizeof(uint8)) {
+				WL_ERR(("type:%d length:%d not matching.\n",
+					type, nla_len(inner)));
+				err = -EINVAL;
+				goto exit;
+			}
+			flush = nla_get_u8(iter);
+			break;
+		case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE:
+			if (nla_len(iter) != sizeof(uint32)) {
+				WL_ERR(("type:%d length:%d not matching.\n",
+					type, nla_len(inner)));
+				err = -EINVAL;
+				goto exit;
+			}
+			hotlist_params->lost_ap_window = (uint16)nla_get_u32(iter);
+			break;
+		default:
+			WL_ERR(("Unknown type %d\n", type));
+			err = -EINVAL;
+			goto exit;
+		}
+
+	}
+
+	if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+	      DHD_PNO_GEOFENCE_SCAN_CFG_ID, hotlist_params, flush) < 0) {
+		WL_ERR(("Could not set GSCAN HOTLIST cfg error: %d\n", err));
+		err = -EINVAL;
+		goto exit;
+	}
+exit:
+	kfree(hotlist_params);
+	return err;
+}
+
+static int wl_cfgvendor_epno_cfg(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pno_ssid_t *ssid_elem;
+	int tmp, tmp1, tmp2, type = 0, num = 0;
+	const struct nlattr *outer, *inner, *iter;
+	uint8 flush = FALSE, i = 0;
+	wl_pfn_ssid_params_t params;
+
+	nla_for_each_attr(iter, data, len, tmp2) {
+		type = nla_type(iter);
+		switch (type) {
+			case GSCAN_ATTRIBUTE_EPNO_SSID_LIST:
+				nla_for_each_nested(outer, iter, tmp) {
+					ssid_elem = (dhd_pno_ssid_t *)
+						dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+						DHD_PNO_GET_NEW_EPNO_SSID_ELEM,
+						NULL, &num);
+					if (!ssid_elem) {
+						WL_ERR(("Failed to get SSID LIST buffer\n"));
+						err = -ENOMEM;
+						goto exit;
+					}
+					i++;
+					nla_for_each_nested(inner, outer, tmp1) {
+						type = nla_type(inner);
+
+						switch (type) {
+							case GSCAN_ATTRIBUTE_EPNO_SSID:
+								memcpy(ssid_elem->SSID,
+								  nla_data(inner),
+								  DOT11_MAX_SSID_LEN);
+								break;
+							case GSCAN_ATTRIBUTE_EPNO_SSID_LEN:
+								ssid_elem->SSID_len =
+									nla_get_u32(inner);
+								if (ssid_elem->SSID_len >
+									DOT11_MAX_SSID_LEN) {
+									WL_ERR(("SSID too"
+									"long %d\n",
+									ssid_elem->SSID_len));
+									err = -EINVAL;
+									goto exit;
+								}
+								break;
+							case GSCAN_ATTRIBUTE_EPNO_FLAGS:
+								ssid_elem->flags =
+									nla_get_u32(inner);
+								ssid_elem->hidden =
+									((ssid_elem->flags &
+									DHD_EPNO_HIDDEN_SSID) != 0);
+								break;
+							case GSCAN_ATTRIBUTE_EPNO_AUTH:
+								ssid_elem->wpa_auth =
+								        nla_get_u32(inner);
+								break;
+						}
+					}
+					if (!ssid_elem->SSID_len) {
+						WL_ERR(("Broadcast SSID is illegal for ePNO\n"));
+						err = -EINVAL;
+						goto exit;
+					}
+					dhd_pno_translate_epno_fw_flags(&ssid_elem->flags);
+					dhd_pno_set_epno_auth_flag(&ssid_elem->wpa_auth);
+				}
+				break;
+			case GSCAN_ATTRIBUTE_EPNO_SSID_NUM:
+				num = nla_get_u8(iter);
+				break;
+			case GSCAN_ATTRIBUTE_EPNO_FLUSH:
+				flush = (bool)nla_get_u32(iter);
+				/* Flush attribute is expected before any ssid attribute */
+				if (i && flush) {
+					WL_ERR(("Bad attributes\n"));
+					err = -EINVAL;
+					goto exit;
+				}
+				/* Need to flush driver and FW cfg */
+				dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+				DHD_PNO_EPNO_CFG_ID, NULL, flush);
+				dhd_dev_flush_fw_epno(bcmcfg_to_prmry_ndev(cfg));
+				break;
+			case GSCAN_ATTRIBUTE_EPNO_5G_RSSI_THR:
+				params.min5G_rssi = nla_get_s8(iter);
+				break;
+			case GSCAN_ATTRIBUTE_EPNO_2G_RSSI_THR:
+				params.min2G_rssi = nla_get_s8(iter);
+				break;
+			case GSCAN_ATTRIBUTE_EPNO_INIT_SCORE_MAX:
+				params.init_score_max = nla_get_s16(iter);
+				break;
+			case GSCAN_ATTRIBUTE_EPNO_CUR_CONN_BONUS:
+				params.cur_bssid_bonus = nla_get_s16(iter);
+				break;
+			case GSCAN_ATTRIBUTE_EPNO_SAME_NETWORK_BONUS:
+				params.same_ssid_bonus = nla_get_s16(iter);
+				break;
+			case GSCAN_ATTRIBUTE_EPNO_SECURE_BONUS:
+				params.secure_bonus = nla_get_s16(iter);
+				break;
+			case GSCAN_ATTRIBUTE_EPNO_5G_BONUS:
+				params.band_5g_bonus = nla_get_s16(iter);
+				break;
+			default:
+				WL_ERR(("%s: No such attribute %d\n", __FUNCTION__, type));
+				err = -EINVAL;
+				goto exit;
+			}
+	}
+	if (i != num) {
+		WL_ERR(("%s: num_ssid %d does not match ssids sent %d\n", __FUNCTION__,
+		     num, i));
+		err = -EINVAL;
+	}
+exit:
+	/* Flush all configs if error condition */
+	if (err < 0) {
+		dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+		   DHD_PNO_EPNO_CFG_ID, NULL, TRUE);
+		dhd_dev_flush_fw_epno(bcmcfg_to_prmry_ndev(cfg));
+	} else if (type != GSCAN_ATTRIBUTE_EPNO_FLUSH) {
+		/* If the last attribute was FLUSH, nothing else to do */
+		dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+		DHD_PNO_EPNO_PARAMS_ID, &params, FALSE);
+		err = dhd_dev_set_epno(bcmcfg_to_prmry_ndev(cfg));
+	}
+	return err;
+}
+
+static int
+wl_cfgvendor_set_batch_scan_cfg(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0, tmp, type;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	gscan_batch_params_t batch_param;
+	const struct nlattr *iter;
+
+	batch_param.mscan = batch_param.bestn = 0;
+	batch_param.buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+
+		switch (type) {
+			case GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN:
+				batch_param.bestn = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE:
+				batch_param.mscan = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_REPORT_THRESHOLD:
+				batch_param.buffer_threshold = nla_get_u32(iter);
+				break;
+			default:
+				WL_ERR(("Unknown type %d\n", type));
+				break;
+		}
+	}
+
+	if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+	       DHD_PNO_BATCH_SCAN_CFG_ID, &batch_param, FALSE) < 0) {
+		WL_ERR(("Could not set batch cfg\n"));
+		err = -EINVAL;
+		return err;
+	}
+
+	return err;
+}
+
+#endif /* GSCAN_SUPPORT */
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+static int
+wl_cfgvendor_gscan_get_channel_list(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0, type, band;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	uint16 *reply = NULL;
+	uint32 reply_len = 0, num_channels, mem_needed;
+	struct sk_buff *skb;
+
+	type = nla_type(data);
+
+	if (type == GSCAN_ATTRIBUTE_BAND) {
+		band = nla_get_u32(data);
+	} else {
+		return -EINVAL;
+	}
+
+	reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+	   DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len);
+
+	if (!reply) {
+		WL_ERR(("Could not get channel list\n"));
+		err = -EINVAL;
+		return err;
+	}
+	num_channels =  reply_len/ sizeof(uint32);
+	mem_needed = reply_len + VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2);
+
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+	if (unlikely(!skb)) {
+		WL_ERR(("skb alloc failed"));
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels);
+	nla_put(skb, GSCAN_ATTRIBUTE_CHANNEL_LIST, reply_len, reply);
+
+	err =  cfg80211_vendor_cmd_reply(skb);
+
+	if (unlikely(err)) {
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+	}
+exit:
+	kfree(reply);
+	return err;
+}
+#endif	/* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+
+#ifdef RSSI_MONITOR_SUPPORT
+static int wl_cfgvendor_set_rssi_monitor(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0, tmp, type, start = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int8 max_rssi = 0, min_rssi = 0;
+	const struct nlattr *iter;
+
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+		switch (type) {
+			case RSSI_MONITOR_ATTRIBUTE_MAX_RSSI:
+				max_rssi = (int8) nla_get_u32(iter);
+				break;
+			case RSSI_MONITOR_ATTRIBUTE_MIN_RSSI:
+				min_rssi = (int8) nla_get_u32(iter);
+				break;
+			case RSSI_MONITOR_ATTRIBUTE_START:
+				start = nla_get_u32(iter);
+		}
+	}
+
+	if (dhd_dev_set_rssi_monitor_cfg(bcmcfg_to_prmry_ndev(cfg),
+	       start, max_rssi, min_rssi) < 0) {
+		WL_ERR(("Could not set rssi monitor cfg\n"));
+		err = -EINVAL;
+	}
+	return err;
+}
+#endif /* RSSI_MONITOR_SUPPORT */
+
+#ifdef DHDTCPACK_SUPPRESS
+static int wl_cfgvendor_set_tcpack_sup_mode(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0, tmp, type;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
+	uint8 enable = 0;
+	const struct nlattr *iter;
+
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+		if (type == ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE) {
+			enable = (int8)nla_get_u32(iter);
+		}
+	}
+
+	if (dhd_dev_set_tcpack_sup_mode_cfg(ndev, enable) < 0) {
+		WL_ERR(("Could not set TCP Ack Suppress mode cfg\n"));
+		err = -EINVAL;
+	}
+	return err;
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef DHD_WAKE_STATUS
+static int
+wl_cfgvendor_get_wake_reason_stats(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void *data, int len)
+{
+	struct net_device *ndev = wdev_to_ndev(wdev);
+	wake_counts_t *pwake_count_info;
+	int ret, mem_needed;
+#if defined(DHD_WAKE_EVENT_STATUS) && defined(DHD_DEBUG)
+	int flowid;
+#endif	/* DHD_WAKE_EVENT_STATUS && DHD_DEBUG */
+	struct sk_buff *skb;
+	dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
+
+	WL_DBG(("Recv get wake status info cmd.\n"));
+
+	pwake_count_info = dhd_get_wakecount(dhdp);
+	mem_needed =  VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 20) +
+		(WLC_E_LAST * sizeof(uint));
+
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+	if (unlikely(!skb)) {
+		WL_ERR(("%s: can't allocate %d bytes\n", __FUNCTION__, mem_needed));
+		return -ENOMEM;
+		goto exit;
+	}
+#ifdef DHD_WAKE_EVENT_STATUS
+	WL_ERR(("pwake_count_info->rcwake %d\n", pwake_count_info->rcwake));
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, pwake_count_info->rcwake);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED, WLC_E_LAST);
+	nla_put(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE, (WLC_E_LAST * sizeof(uint)),
+		pwake_count_info->rc_event);
+#ifdef DHD_DEBUG
+	for (flowid = 0; flowid < WLC_E_LAST; flowid++) {
+		if (pwake_count_info->rc_event[flowid] != 0) {
+			WL_ERR((" %s = %u\n", bcmevent_get_name(flowid),
+				pwake_count_info->rc_event[flowid]));
+		}
+	}
+#endif /* DHD_DEBUG */
+#endif /* DHD_WAKE_EVENT_STATUS */
+#ifdef DHD_WAKE_RX_STATUS
+	WL_ERR(("pwake_count_info->rxwake %d\n", pwake_count_info->rxwake));
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, pwake_count_info->rxwake);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT, pwake_count_info->rx_ucast);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT, pwake_count_info->rx_mcast);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT, pwake_count_info->rx_bcast);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT, pwake_count_info->rx_arp);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT, pwake_count_info->rx_icmpv6);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA, pwake_count_info->rx_icmpv6_ra);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA, pwake_count_info->rx_icmpv6_na);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS, pwake_count_info->rx_icmpv6_ns);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
+		pwake_count_info->rx_multi_ipv4);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
+		pwake_count_info->rx_multi_ipv6);
+	nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT,
+		pwake_count_info->rx_multi_other);
+#endif /* #ifdef DHD_WAKE_RX_STATUS */
+	ret = cfg80211_vendor_cmd_reply(skb);
+	if (unlikely(ret)) {
+		WL_ERR(("Vendor cmd reply for -get wake status failed:%d \n", ret));
+	}
+exit:
+	return ret;
+}
+#endif /* DHD_WAKE_STATUS */
+
+#ifdef RTT_SUPPORT
+void
+wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data)
+{
+	struct wireless_dev *wdev = (struct wireless_dev *)ctx;
+	struct wiphy *wiphy;
+	struct sk_buff *skb;
+	uint32 evt_complete = 0;
+	gfp_t kflags;
+	rtt_result_t *rtt_result;
+	rtt_results_header_t *rtt_header;
+	struct list_head *rtt_cache_list;
+	struct nlattr *rtt_nl_hdr;
+	wiphy = wdev->wiphy;
+
+	WL_DBG(("In\n"));
+	/* Push the data to the skb */
+	if (!rtt_data) {
+		WL_ERR(("rtt_data is NULL\n"));
+		return;
+	}
+	rtt_cache_list = (struct list_head *)rtt_data;
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	if (list_empty(rtt_cache_list)) {
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+		skb = cfg80211_vendor_event_alloc(wiphy, NULL, 100,
+			GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#else
+		skb = cfg80211_vendor_event_alloc(wiphy, 100, GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+		/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+		if (!skb) {
+			WL_ERR(("skb alloc failed"));
+			return;
+		}
+		evt_complete = 1;
+		nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+		cfg80211_vendor_event(skb, kflags);
+		return;
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	list_for_each_entry(rtt_header, rtt_cache_list, list) {
+		/* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+		skb = cfg80211_vendor_event_alloc(wiphy, NULL, rtt_header->result_tot_len + 100,
+			GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#else
+		skb = cfg80211_vendor_event_alloc(wiphy, rtt_header->result_tot_len + 100,
+			GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+		/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+		if (!skb) {
+			WL_ERR(("skb alloc failed"));
+			return;
+		}
+		if (list_is_last(&rtt_header->list, rtt_cache_list)) {
+			evt_complete = 1;
+		}
+		nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
+		rtt_nl_hdr = nla_nest_start(skb, RTT_ATTRIBUTE_RESULTS_PER_TARGET);
+		if (!rtt_nl_hdr) {
+			WL_ERR(("rtt_nl_hdr is NULL\n"));
+			break;
+		}
+		nla_put(skb, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN, &rtt_header->peer_mac);
+		nla_put_u32(skb, RTT_ATTRIBUTE_RESULT_CNT, rtt_header->result_cnt);
+		list_for_each_entry(rtt_result, &rtt_header->result_list, list) {
+			nla_put(skb, RTT_ATTRIBUTE_RESULT,
+				rtt_result->report_len, &rtt_result->report);
+		}
+		nla_nest_end(skb, rtt_nl_hdr);
+		cfg80211_vendor_event(skb, kflags);
+	}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+static int
+wl_cfgvendor_rtt_set_config(struct wiphy *wiphy, struct wireless_dev *wdev,
+	const void *data, int len) {
+	int err = 0, rem, rem1, rem2, type;
+	int target_cnt;
+	rtt_config_params_t rtt_param;
+	rtt_target_info_t* rtt_target = NULL;
+	const struct nlattr *iter, *iter1, *iter2;
+	int8 eabuf[ETHER_ADDR_STR_LEN];
+	int8 chanbuf[CHANSPEC_STR_LEN];
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	rtt_capabilities_t capability;
+
+	memset(&rtt_param, 0, sizeof(rtt_param));
+
+	WL_DBG(("In\n"));
+	err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt);
+	if (err < 0) {
+		WL_ERR(("failed to register rtt_noti_callback\n"));
+		goto exit;
+	}
+	err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability);
+	if (err < 0) {
+		WL_ERR(("failed to get the capability\n"));
+		goto exit;
+	}
+
+	if (len <= 0) {
+		WL_ERR(("Length of the nlattr is not valid len : %d\n", len));
+		err = BCME_ERROR;
+		goto exit;
+	}
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+		case RTT_ATTRIBUTE_TARGET_CNT:
+			target_cnt = nla_get_u8(iter);
+			if ((target_cnt <= 0) || (target_cnt > RTT_MAX_TARGET_CNT)) {
+				WL_ERR(("target_cnt is not valid : %d\n",
+					target_cnt));
+				err = BCME_RANGE;
+				goto exit;
+			}
+			rtt_param.rtt_target_cnt = target_cnt;
+
+			rtt_param.target_info = kzalloc(TARGET_INFO_SIZE(target_cnt), GFP_KERNEL);
+			if (rtt_param.target_info == NULL) {
+				WL_ERR(("failed to allocate target info for (%d)\n", target_cnt));
+				err = BCME_NOMEM;
+				goto exit;
+			}
+			break;
+		case RTT_ATTRIBUTE_TARGET_INFO:
+			/* Added this variable for safe check to avoid crash
+			 * incase the caller did not respect the order
+			 */
+			if (rtt_param.target_info == NULL) {
+				WL_ERR(("rtt_target_info is NULL\n"));
+				err = BCME_NOMEM;
+				goto exit;
+			}
+			rtt_target = rtt_param.target_info;
+			nla_for_each_nested(iter1, iter, rem1) {
+				nla_for_each_nested(iter2, iter1, rem2) {
+					type = nla_type(iter2);
+					switch (type) {
+					case RTT_ATTRIBUTE_TARGET_MAC:
+						memcpy(&rtt_target->addr, nla_data(iter2),
+							ETHER_ADDR_LEN);
+						break;
+					case RTT_ATTRIBUTE_TARGET_TYPE:
+						rtt_target->type = nla_get_u8(iter2);
+						if (rtt_target->type == RTT_INVALID ||
+							(rtt_target->type == RTT_ONE_WAY &&
+							!capability.rtt_one_sided_supported)) {
+							WL_ERR(("doesn't support RTT type"
+								" : %d\n",
+								rtt_target->type));
+							err = -EINVAL;
+							goto exit;
+						}
+						break;
+					case RTT_ATTRIBUTE_TARGET_PEER:
+						rtt_target->peer = nla_get_u8(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_CHAN:
+						memcpy(&rtt_target->channel, nla_data(iter2),
+							sizeof(rtt_target->channel));
+						break;
+					case RTT_ATTRIBUTE_TARGET_PERIOD:
+						rtt_target->burst_period = nla_get_u32(iter2);
+						if (rtt_target->burst_period < 32) {
+							/* 100ms unit */
+							rtt_target->burst_period *= 100;
+						} else {
+							WL_ERR(("%d value must in (0-31)\n",
+								rtt_target->burst_period));
+							err = EINVAL;
+							goto exit;
+						}
+						break;
+					case RTT_ATTRIBUTE_TARGET_NUM_BURST:
+						rtt_target->num_burst = nla_get_u32(iter2);
+						if (rtt_target->num_burst > 16) {
+							WL_ERR(("%d value must in (0-15)\n",
+								rtt_target->num_burst));
+							err = -EINVAL;
+							goto exit;
+						}
+						rtt_target->num_burst = BIT(rtt_target->num_burst);
+						break;
+					case RTT_ATTRIBUTE_TARGET_NUM_FTM_BURST:
+						rtt_target->num_frames_per_burst =
+						nla_get_u32(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTM:
+						rtt_target->num_retries_per_ftm =
+						nla_get_u32(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTMR:
+						rtt_target->num_retries_per_ftmr =
+						nla_get_u32(iter2);
+						if (rtt_target->num_retries_per_ftmr > 3) {
+							WL_ERR(("%d value must in (0-3)\n",
+								rtt_target->num_retries_per_ftmr));
+							err = -EINVAL;
+							goto exit;
+						}
+						break;
+					case RTT_ATTRIBUTE_TARGET_LCI:
+						rtt_target->LCI_request = nla_get_u8(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_LCR:
+						rtt_target->LCI_request = nla_get_u8(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_BURST_DURATION:
+						if ((nla_get_u32(iter2) > 1 &&
+							nla_get_u32(iter2) < 12)) {
+							rtt_target->burst_duration =
+							dhd_rtt_idx_to_burst_duration(
+								nla_get_u32(iter2));
+						} else if (nla_get_u32(iter2) == 15) {
+							/* use default value */
+							rtt_target->burst_duration = 0;
+						} else {
+							WL_ERR(("%d value must in (2-11) or 15\n",
+								nla_get_u32(iter2)));
+							err = -EINVAL;
+							goto exit;
+						}
+						break;
+					case RTT_ATTRIBUTE_TARGET_BW:
+						rtt_target->bw = nla_get_u8(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_PREAMBLE:
+						rtt_target->preamble = nla_get_u8(iter2);
+						break;
+					}
+				}
+				/* convert to chanspec value */
+				rtt_target->chanspec =
+					dhd_rtt_convert_to_chspec(rtt_target->channel);
+				if (rtt_target->chanspec == 0) {
+					WL_ERR(("Channel is not valid \n"));
+					err = -EINVAL;
+					goto exit;
+				}
+				WL_INFORM(("Target addr %s, Channel : %s for RTT \n",
+					bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr,
+					eabuf),
+					wf_chspec_ntoa(rtt_target->chanspec, chanbuf)));
+				rtt_target++;
+			}
+			break;
+		}
+	}
+	WL_DBG(("leave :target_cnt : %d\n", rtt_param.rtt_target_cnt));
+	if (dhd_dev_rtt_set_cfg(bcmcfg_to_prmry_ndev(cfg), &rtt_param) < 0) {
+		WL_ERR(("Could not set RTT configuration\n"));
+		err = -EINVAL;
+	}
+exit:
+	/* free the target info list */
+	if (rtt_param.target_info) {
+		kfree(rtt_param.target_info);
+		rtt_param.target_info = NULL;
+	}
+	return err;
+}
+
+static int
+wl_cfgvendor_rtt_cancel_config(struct wiphy *wiphy, struct wireless_dev *wdev,
+	const void *data, int len)
+{
+	int err = 0, rem, type, target_cnt = 0;
+	int target_cnt_chk = 0;
+	const struct nlattr *iter;
+	struct ether_addr *mac_list = NULL, *mac_addr = NULL;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	if (len <= 0) {
+		WL_ERR(("Length of nlattr is not valid len : %d\n", len));
+		err = -EINVAL;
+		goto exit;
+	}
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+		case RTT_ATTRIBUTE_TARGET_CNT:
+			if (mac_list != NULL) {
+				WL_ERR(("mac_list is not NULL\n"));
+				err = -EINVAL;
+				goto exit;
+			}
+			target_cnt = nla_get_u8(iter);
+			if ((target_cnt > 0) && (target_cnt < RTT_MAX_TARGET_CNT)) {
+				mac_list = (struct ether_addr *)kzalloc(target_cnt * ETHER_ADDR_LEN,
+					GFP_KERNEL);
+				if (mac_list == NULL) {
+					WL_ERR(("failed to allocate mem for mac list\n"));
+					err = -EINVAL;
+					goto exit;
+				}
+				mac_addr = &mac_list[0];
+			} else {
+				/* cancel the current whole RTT process */
+				goto cancel;
+			}
+			break;
+		case RTT_ATTRIBUTE_TARGET_MAC:
+			if (mac_addr) {
+				memcpy(mac_addr++, nla_data(iter), ETHER_ADDR_LEN);
+				target_cnt_chk++;
+				if (target_cnt_chk > target_cnt) {
+					WL_ERR(("over target count\n"));
+					err = -EINVAL;
+					goto exit;
+				}
+				break;
+			} else {
+				WL_ERR(("mac_list is NULL\n"));
+				err = -EINVAL;
+				goto exit;
+			}
+		}
+	}
+cancel:
+	if (dhd_dev_rtt_cancel_cfg(bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
+		WL_ERR(("Could not cancel RTT configuration\n"));
+		err = -EINVAL;
+	}
+
+exit:
+	if (mac_list) {
+		kfree(mac_list);
+	}
+	return err;
+}
+
+static int
+wl_cfgvendor_rtt_get_capability(struct wiphy *wiphy, struct wireless_dev *wdev,
+	const void *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	rtt_capabilities_t capability;
+
+	err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability);
+	if (unlikely(err)) {
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+		goto exit;
+	}
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+	        &capability, sizeof(capability));
+
+	if (unlikely(err)) {
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+	}
+exit:
+	return err;
+}
+static int
+get_responder_info(struct bcm_cfg80211 *cfg,
+	struct wifi_rtt_responder *responder_info)
+{
+	int err = 0;
+	rtt_capabilities_t capability;
+	err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability);
+	if (unlikely(err)) {
+		WL_ERR(("Could not get responder capability:%d \n", err));
+		return err;
+	}
+	if (capability.preamble_support & RTT_PREAMBLE_VHT) {
+		responder_info->preamble |= RTT_PREAMBLE_VHT;
+	}
+	if (capability.preamble_support & RTT_PREAMBLE_HT) {
+		responder_info->preamble |= RTT_PREAMBLE_HT;
+	}
+	err = dhd_dev_rtt_avail_channel(bcmcfg_to_prmry_ndev(cfg), &(responder_info->channel));
+	if (unlikely(err)) {
+		WL_ERR(("Could not get available channel:%d \n", err));
+		return err;
+	}
+	return err;
+}
+static int
+wl_cfgvendor_rtt_get_responder_info(struct wiphy *wiphy, struct wireless_dev *wdev,
+	const void *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	wifi_rtt_responder_t responder_info;
+
+	WL_DBG(("Recv -get_avail_ch command \n"));
+
+	memset(&responder_info, 0, sizeof(responder_info));
+	err = get_responder_info(cfg, &responder_info);
+	if (unlikely(err)) {
+		WL_ERR(("Failed to get responder info:%d \n", err));
+		return err;
+	}
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+	        &responder_info, sizeof(responder_info));
+
+	if (unlikely(err)) {
+		WL_ERR(("Vendor cmd reply for -get_avail_ch failed ret:%d \n", err));
+	}
+	return err;
+}
+
+static int
+wl_cfgvendor_rtt_set_responder(struct wiphy *wiphy, struct wireless_dev *wdev,
+	const void *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
+	wifi_rtt_responder_t responder_info;
+
+	WL_DBG(("Recv rtt -enable_resp cmd.\n"));
+
+	memset(&responder_info, 0, sizeof(responder_info));
+
+	/*
+	*Passing channel as NULL until implementation
+	*to get chan info from upper layers is donex
+	*/
+	err = dhd_dev_rtt_enable_responder(ndev, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("Could not enable responder ret:%d \n", err));
+		goto done;
+	}
+	err = get_responder_info(cfg, &responder_info);
+	if (unlikely(err)) {
+		WL_ERR(("Failed to get responder info:%d \n", err));
+		dhd_dev_rtt_cancel_responder(ndev);
+		goto done;
+	}
+done:
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, ndev,
+			&responder_info, sizeof(responder_info));
+
+	if (unlikely(err)) {
+		WL_ERR(("Vendor cmd reply for -enable_resp failed ret:%d \n", err));
+	}
+	return err;
+}
+
+static int
+wl_cfgvendor_rtt_cancel_responder(struct wiphy *wiphy, struct wireless_dev *wdev,
+	const void *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	WL_DBG(("Recv rtt -cancel_resp cmd \n"));
+
+	err = dhd_dev_rtt_cancel_responder(bcmcfg_to_prmry_ndev(cfg));
+	if (unlikely(err)) {
+		WL_ERR(("Vendor cmd -cancel_resp failed ret:%d \n", err));
+	}
+	return err;
+}
+#endif /* RTT_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+static int wl_cfgvendor_enable_lazy_roam(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = -EINVAL;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int type;
+	uint32 lazy_roam_enable_flag;
+
+	type = nla_type(data);
+
+	if (type == GSCAN_ATTRIBUTE_LAZY_ROAM_ENABLE) {
+		lazy_roam_enable_flag = nla_get_u32(data);
+
+		err = dhd_dev_lazy_roam_enable(bcmcfg_to_prmry_ndev(cfg),
+		           lazy_roam_enable_flag);
+
+		if (unlikely(err))
+			WL_ERR(("Could not enable lazy roam:%d \n", err));
+
+	}
+	return err;
+}
+
+static int wl_cfgvendor_set_lazy_roam_cfg(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0, tmp, type;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	wlc_roam_exp_params_t roam_param;
+	const struct nlattr *iter;
+
+	memset(&roam_param, 0, sizeof(roam_param));
+
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+
+		switch (type) {
+			case GSCAN_ATTRIBUTE_A_BAND_BOOST_THRESHOLD:
+				roam_param.a_band_boost_threshold = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_A_BAND_PENALTY_THRESHOLD:
+				roam_param.a_band_penalty_threshold = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_A_BAND_BOOST_FACTOR:
+				roam_param.a_band_boost_factor = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_A_BAND_PENALTY_FACTOR:
+				roam_param.a_band_penalty_factor = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_A_BAND_MAX_BOOST:
+				roam_param.a_band_max_boost = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_LAZY_ROAM_HYSTERESIS:
+				roam_param.cur_bssid_boost = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_ALERT_ROAM_RSSI_TRIGGER:
+				roam_param.alert_roam_trigger_threshold = nla_get_u32(iter);
+				break;
+		}
+	}
+
+	if (dhd_dev_set_lazy_roam_cfg(bcmcfg_to_prmry_ndev(cfg), &roam_param) < 0) {
+		WL_ERR(("Could not set batch cfg\n"));
+		err = -EINVAL;
+	}
+	return err;
+}
+
+/* small helper function */
+static wl_bssid_pref_cfg_t *
+create_bssid_pref_cfg(uint32 num)
+{
+	uint32 mem_needed;
+	wl_bssid_pref_cfg_t *bssid_pref;
+
+	mem_needed = sizeof(wl_bssid_pref_cfg_t);
+	if (num)
+		mem_needed += (num - 1) * sizeof(wl_bssid_pref_list_t);
+	bssid_pref = (wl_bssid_pref_cfg_t *) kmalloc(mem_needed, GFP_KERNEL);
+	return bssid_pref;
+}
+
+static int
+wl_cfgvendor_set_bssid_pref(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	wl_bssid_pref_cfg_t *bssid_pref = NULL;
+	wl_bssid_pref_list_t *bssids;
+	int tmp, tmp1, tmp2, type;
+	const struct nlattr *outer, *inner, *iter;
+	uint32 flush = 0, i = 0, num = 0;
+
+	/* Assumption: NUM attribute must come first */
+	nla_for_each_attr(iter, data, len, tmp2) {
+		type = nla_type(iter);
+		switch (type) {
+			case GSCAN_ATTRIBUTE_NUM_BSSID:
+				num = nla_get_u32(iter);
+				if (num > MAX_BSSID_PREF_LIST_NUM) {
+					WL_ERR(("Too many Preferred BSSIDs!\n"));
+					err = -EINVAL;
+					goto exit;
+				}
+				break;
+			case GSCAN_ATTRIBUTE_BSSID_PREF_FLUSH:
+				flush = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_BSSID_PREF_LIST:
+				if (!num)
+					return -EINVAL;
+				if ((bssid_pref = create_bssid_pref_cfg(num)) == NULL) {
+					WL_ERR(("%s: Can't malloc memory\n", __FUNCTION__));
+					err = -ENOMEM;
+					goto exit;
+				}
+				bssid_pref->count = num;
+				bssids = bssid_pref->bssids;
+				nla_for_each_nested(outer, iter, tmp) {
+					if (i >= num) {
+						WL_ERR(("CFGs don't seem right!\n"));
+						err = -EINVAL;
+						goto exit;
+					}
+					nla_for_each_nested(inner, outer, tmp1) {
+						type = nla_type(inner);
+						switch (type) {
+							case GSCAN_ATTRIBUTE_BSSID_PREF:
+								memcpy(&(bssids[i].bssid),
+								  nla_data(inner), ETHER_ADDR_LEN);
+								/* not used for now */
+								bssids[i].flags = 0;
+								break;
+							case GSCAN_ATTRIBUTE_RSSI_MODIFIER:
+								bssids[i].rssi_factor =
+								       (int8) nla_get_u32(inner);
+								break;
+						}
+					}
+					i++;
+				}
+				break;
+			default:
+				WL_ERR(("%s: No such attribute %d\n", __FUNCTION__, type));
+				break;
+			}
+	}
+
+	if (!bssid_pref) {
+		/* What if only flush is desired? */
+		if (flush) {
+			if ((bssid_pref = create_bssid_pref_cfg(0)) == NULL) {
+				WL_ERR(("%s: Can't malloc memory\n", __FUNCTION__));
+				err = -ENOMEM;
+				goto exit;
+			}
+			bssid_pref->count = 0;
+		} else {
+			err = -EINVAL;
+			goto exit;
+		}
+	}
+	err = dhd_dev_set_lazy_roam_bssid_pref(bcmcfg_to_prmry_ndev(cfg),
+	          bssid_pref, flush);
+exit:
+	kfree(bssid_pref);
+	return err;
+}
+
+static int
+wl_cfgvendor_set_bssid_blacklist(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	maclist_t *blacklist = NULL;
+	int err = 0;
+	int type, tmp;
+	const struct nlattr *iter;
+	uint32 mem_needed = 0, flush = 0, i = 0, num = 0;
+
+	/* Assumption: NUM attribute must come first */
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+		switch (type) {
+			case GSCAN_ATTRIBUTE_NUM_BSSID:
+				num = nla_get_u32(iter);
+				if (num > MAX_BSSID_BLACKLIST_NUM) {
+					WL_ERR(("Too many Blacklist BSSIDs!\n"));
+					err = -EINVAL;
+					goto exit;
+				}
+				break;
+			case GSCAN_ATTRIBUTE_BSSID_BLACKLIST_FLUSH:
+				flush = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_BLACKLIST_BSSID:
+				if (num) {
+					if (!blacklist) {
+						mem_needed = sizeof(maclist_t) +
+						     sizeof(struct ether_addr) * (num - 1);
+						blacklist = (maclist_t *)
+						      kmalloc(mem_needed, GFP_KERNEL);
+						if (!blacklist) {
+							WL_ERR(("%s: Can't malloc %d bytes\n",
+							     __FUNCTION__, mem_needed));
+							err = -ENOMEM;
+							goto exit;
+						}
+						blacklist->count = num;
+					}
+					if (i >= num) {
+						WL_ERR(("CFGs don't seem right!\n"));
+						err = -EINVAL;
+						goto exit;
+					}
+					memcpy(&(blacklist->ea[i]),
+					  nla_data(iter), ETHER_ADDR_LEN);
+					i++;
+				}
+				break;
+			default:
+				WL_ERR(("%s: No such attribute %d\n", __FUNCTION__, type));
+				break;
+			}
+	}
+	err = dhd_dev_set_blacklist_bssid(bcmcfg_to_prmry_ndev(cfg),
+	          blacklist, mem_needed, flush);
+exit:
+	kfree(blacklist);
+	return err;
+}
+
+static int
+wl_cfgvendor_set_ssid_whitelist(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	wl_ssid_whitelist_t *ssid_whitelist = NULL;
+	wlc_ssid_t *ssid_elem;
+	int tmp, tmp2, mem_needed = 0, type;
+	const struct nlattr *inner, *iter;
+	uint32 flush = 0, i = 0, num = 0;
+
+	/* Assumption: NUM attribute must come first */
+	nla_for_each_attr(iter, data, len, tmp2) {
+		type = nla_type(iter);
+		switch (type) {
+			case GSCAN_ATTRIBUTE_NUM_WL_SSID:
+				num = nla_get_u32(iter);
+				if (num > MAX_SSID_WHITELIST_NUM) {
+					WL_ERR(("Too many WL SSIDs!\n"));
+					err = -EINVAL;
+					goto exit;
+				}
+				mem_needed = sizeof(wl_ssid_whitelist_t);
+				if (num)
+					mem_needed += (num - 1) * sizeof(ssid_info_t);
+				ssid_whitelist = (wl_ssid_whitelist_t *)
+				        kzalloc(mem_needed, GFP_KERNEL);
+				if (ssid_whitelist == NULL) {
+					WL_ERR(("%s: Can't malloc %d bytes\n",
+					      __FUNCTION__, mem_needed));
+					err = -ENOMEM;
+					goto exit;
+				}
+				ssid_whitelist->ssid_count = num;
+				break;
+			case GSCAN_ATTRIBUTE_WL_SSID_FLUSH:
+				flush = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_WHITELIST_SSID_ELEM:
+				if (!num || !ssid_whitelist) {
+					WL_ERR(("num ssid is not set!\n"));
+					return -EINVAL;
+				}
+				if (i >= num) {
+					WL_ERR(("CFGs don't seem right!\n"));
+					err = -EINVAL;
+					goto exit;
+				}
+				ssid_elem = &ssid_whitelist->ssids[i];
+				nla_for_each_nested(inner, iter, tmp) {
+					type = nla_type(inner);
+					switch (type) {
+						case GSCAN_ATTRIBUTE_WHITELIST_SSID:
+							memcpy(ssid_elem->SSID,
+							  nla_data(inner),
+							  DOT11_MAX_SSID_LEN);
+							break;
+						case GSCAN_ATTRIBUTE_WL_SSID_LEN:
+							ssid_elem->SSID_len = (uint8)
+							        nla_get_u32(inner);
+							break;
+					}
+				}
+				i++;
+				break;
+			default:
+				WL_ERR(("%s: No such attribute %d\n", __FUNCTION__, type));
+				break;
+		}
+	}
+
+	err = dhd_dev_set_whitelist_ssid(bcmcfg_to_prmry_ndev(cfg),
+	          ssid_whitelist, mem_needed, flush);
+exit:
+	kfree(ssid_whitelist);
+	return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+static int
+wl_cfgvendor_priv_string_handler(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int ret = 0;
+	int ret_len = 0, payload = 0, msglen;
+	const struct bcm_nlmsg_hdr *nlioc = data;
+	void *buf = NULL, *cur;
+	int maxmsglen = PAGE_SIZE - 0x100;
+	struct sk_buff *reply;
+
+	WL_ERR(("entry: cmd = %d\n", nlioc->cmd));
+
+	len -= sizeof(struct bcm_nlmsg_hdr);
+	ret_len = nlioc->len;
+	if (ret_len > 0 || len > 0) {
+		if (len > DHD_IOCTL_MAXLEN) {
+			WL_ERR(("oversize input buffer %d\n", len));
+			len = DHD_IOCTL_MAXLEN;
+		}
+		if (ret_len > DHD_IOCTL_MAXLEN) {
+			WL_ERR(("oversize return buffer %d\n", ret_len));
+			ret_len = DHD_IOCTL_MAXLEN;
+		}
+		payload = max(ret_len, len) + 1;
+		buf = vzalloc(payload);
+		if (!buf) {
+			return -ENOMEM;
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		memcpy(buf, (void *)((char *)nlioc + nlioc->offset), len);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		*((char *)buf + len) = '\0';
+	}
+
+	ret = dhd_cfgvendor_priv_string_handler(cfg, wdev, nlioc, buf);
+	if (ret) {
+		WL_ERR(("dhd_cfgvendor returned error %d", ret));
+		vfree(buf);
+		return ret;
+	}
+	cur = buf;
+	while (ret_len > 0) {
+		msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len;
+		ret_len -= msglen;
+		payload = msglen + sizeof(msglen);
+		reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
+		if (!reply) {
+			WL_ERR(("Failed to allocate reply msg\n"));
+			ret = -ENOMEM;
+			break;
+		}
+
+		if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) ||
+			nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) {
+			kfree_skb(reply);
+			ret = -ENOBUFS;
+			break;
+		}
+
+		ret = cfg80211_vendor_cmd_reply(reply);
+		if (ret) {
+			WL_ERR(("testmode reply failed:%d\n", ret));
+			break;
+		}
+		cur = (void *)((char *)cur + msglen);
+	}
+
+	return ret;
+}
+
+struct net_device *
+wl_cfgvendor_get_ndev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
+	const void *data, unsigned long int *out_addr)
+{
+	char *pos, *pos1;
+	char ifname[IFNAMSIZ + 1] = {0};
+	struct net_info *iter, *next;
+	struct net_device *ndev = NULL;
+	*out_addr = (unsigned long int) data; /* point to command str by default */
+
+	/* check whether ifname=<ifname> is provided in the command */
+	pos = strstr(data, "ifname=");
+	if (pos) {
+		pos += strlen("ifname=");
+		pos1 = strstr(pos, " ");
+		if (!pos1) {
+			WL_ERR(("command format error \n"));
+			return NULL;
+		}
+		memcpy(ifname, pos, (pos1 - pos));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+		for_each_ndev(cfg, iter, next) {
+			if (iter->ndev) {
+				if (strncmp(iter->ndev->name, ifname,
+					strlen(iter->ndev->name)) == 0) {
+					/* matching ifname found */
+					WL_DBG(("matching interface (%s) found ndev:%p \n",
+						iter->ndev->name, iter->ndev));
+					*out_addr = (unsigned long int)(pos1 + 1);
+					/* Returns the command portion after ifname=<name> */
+					return iter->ndev;
+				}
+			}
+		}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+		WL_ERR(("Couldn't find ifname:%s in the netinfo list \n",
+			ifname));
+		return NULL;
+	}
+
+	/* If ifname=<name> arg is not provided, use default ndev */
+	ndev = wdev->netdev ? wdev->netdev : bcmcfg_to_prmry_ndev(cfg);
+	WL_DBG(("Using default ndev (%s) \n", ndev->name));
+	return ndev;
+}
+
+/* Max length for the reply buffer. For BRCM_ATTR_DRIVER_CMD, the reply
+ * would be a formatted string and reply buf would be the size of the
+ * string.
+ */
+#define WL_DRIVER_PRIV_CMD_LEN 512
+static int
+wl_cfgvendor_priv_bcm_handler(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	const struct nlattr *iter;
+	int err = 0;
+	int data_len = 0, cmd_len = 0, tmp = 0, type = 0;
+	struct net_device *ndev = wdev->netdev;
+	char *reply_buf = NULL;
+	char *cmd = NULL;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int bytes_written;
+	struct net_device *net = NULL;
+	unsigned long int cmd_out = 0;
+	u32 reply_len = WL_DRIVER_PRIV_CMD_LEN;
+
+
+	WL_DBG(("%s: Enter \n", __func__));
+
+	/* hold wake lock */
+	net_os_wake_lock(ndev);
+
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+		cmd = nla_data(iter);
+		cmd_len = nla_len(iter);
+
+		WL_DBG(("%s: type: %d cmd_len:%d cmd_ptr:%p \n", __func__, type, cmd_len, cmd));
+		if (!cmd || !cmd_len) {
+			WL_ERR(("Invalid cmd data \n"));
+			err = -EINVAL;
+			goto exit;
+		}
+
+		if (type == BRCM_ATTR_DRIVER_CMD) {
+			if (cmd_len >= WL_DRIVER_PRIV_CMD_LEN) {
+				WL_ERR(("Unexpected command length. Ignore the command\n"));
+				err = -EINVAL;
+				goto exit;
+			}
+			net = wl_cfgvendor_get_ndev(cfg, wdev, cmd, &cmd_out);
+			if (!cmd_out || !net) {
+				err = -ENODEV;
+				goto exit;
+			}
+			cmd = (char *)cmd_out;
+			reply_buf = kzalloc(reply_len, GFP_KERNEL);
+			if (!reply_buf) {
+				WL_ERR(("memory alloc failed for %u \n", cmd_len));
+				err = -ENOMEM;
+				goto exit;
+			}
+			memcpy(reply_buf, cmd, cmd_len);
+			WL_DBG(("vendor_command: %s len: %u \n", cmd, cmd_len));
+			bytes_written = wl_handle_private_cmd(net, reply_buf, reply_len);
+			WL_DBG(("bytes_written: %d \n", bytes_written));
+			if (bytes_written == 0) {
+				snprintf(reply_buf, reply_len, "%s", "OK");
+				data_len = strlen("OK");
+			} else if (bytes_written > 0) {
+				data_len = bytes_written > reply_len ?
+					reply_len : bytes_written;
+			} else {
+				/* -ve return value. Propagate the error back */
+				err = bytes_written;
+				goto exit;
+			}
+			break;
+		}
+	}
+
+	if ((data_len > 0) && reply_buf) {
+		err =  wl_cfgvendor_send_cmd_reply(wiphy, wdev->netdev,
+			reply_buf, data_len+1);
+		if (unlikely(err))
+			WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+		else
+			WL_DBG(("Vendor Command reply sent successfully!\n"));
+	} else {
+		/* No data to be sent back as reply */
+		WL_ERR(("Vendor_cmd: No reply expected. data_len:%u reply_buf %p \n",
+			data_len, reply_buf));
+	}
+
+exit:
+	if (reply_buf)
+		kfree(reply_buf);
+	net_os_wake_unlock(ndev);
+	return err;
+}
+
+
+#ifdef LINKSTAT_SUPPORT
+#define NUM_RATE 32
+#define NUM_PEER 1
+#define NUM_CHAN 11
+#define HEADER_SIZE sizeof(ver_len)
+static int wl_cfgvendor_lstats_get_info(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	static char iovar_buf[WLC_IOCTL_MAXLEN];
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int err = 0, i;
+	wifi_radio_stat *radio;
+	wifi_radio_stat_h radio_h;
+	wl_wme_cnt_t *wl_wme_cnt;
+	wl_cnt_ge40mcst_v1_t *macstat_cnt;
+	wl_cnt_wlc_t *wlc_cnt;
+	scb_val_t scbval;
+	char *output = NULL;
+	char *outdata = NULL;
+	wifi_rate_stat_v1 *p_wifi_rate_stat_v1 = NULL;
+	wifi_rate_stat *p_wifi_rate_stat = NULL;
+	uint total_len = 0;
+	wifi_iface_stat iface;
+	wlc_rev_info_t revinfo;
+#ifdef CONFIG_COMPAT
+	compat_wifi_iface_stat compat_iface;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+	int compat_task_state = in_compat_syscall();
+#else
+	int compat_task_state = is_compat_task();
+#endif
+#endif /* CONFIG_COMPAT */
+
+	WL_INFORM(("%s: Enter \n", __func__));
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_REVINFO, &revinfo,
+			sizeof(revinfo));
+	if (err != BCME_OK) {
+		goto exit;
+	}
+
+	outdata = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (outdata == NULL) {
+		WL_ERR(("%s: alloc failed\n", __func__));
+		return -ENOMEM;
+	}
+
+	memset(&scbval, 0, sizeof(scb_val_t));
+	memset(outdata, 0, WLC_IOCTL_MAXLEN);
+	output = outdata;
+
+	err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0,
+		iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+		WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wifi_radio_stat)));
+		goto exit;
+	}
+	radio = (wifi_radio_stat *)iovar_buf;
+
+	memset(&radio_h, 0, sizeof(wifi_radio_stat_h));
+	radio_h.on_time = radio->on_time;
+	radio_h.tx_time = radio->tx_time;
+	radio_h.rx_time = radio->rx_time;
+	radio_h.on_time_scan = radio->on_time_scan;
+	radio_h.on_time_nbd = radio->on_time_nbd;
+	radio_h.on_time_gscan = radio->on_time_gscan;
+	radio_h.on_time_roam_scan = radio->on_time_roam_scan;
+	radio_h.on_time_pno_scan = radio->on_time_pno_scan;
+	radio_h.on_time_hs20 = radio->on_time_hs20;
+	radio_h.num_channels = NUM_CHAN;
+
+	memcpy(output, &radio_h, sizeof(wifi_radio_stat_h));
+
+	output += sizeof(wifi_radio_stat_h);
+	output += (NUM_CHAN * sizeof(wifi_channel_stat));
+
+	err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "wme_counters", NULL, 0,
+		iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		goto exit;
+	}
+	wl_wme_cnt = (wl_wme_cnt_t *)iovar_buf;
+
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].ac, WIFI_AC_VO);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].tx_mpdu, wl_wme_cnt->tx[AC_VO].packets);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].rx_mpdu, wl_wme_cnt->rx[AC_VO].packets);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].mpdu_lost,
+		wl_wme_cnt->tx_failed[WIFI_AC_VO].packets);
+
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].ac, WIFI_AC_VI);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].tx_mpdu, wl_wme_cnt->tx[AC_VI].packets);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].rx_mpdu, wl_wme_cnt->rx[AC_VI].packets);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].mpdu_lost,
+		wl_wme_cnt->tx_failed[WIFI_AC_VI].packets);
+
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].ac, WIFI_AC_BE);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].tx_mpdu, wl_wme_cnt->tx[AC_BE].packets);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].rx_mpdu, wl_wme_cnt->rx[AC_BE].packets);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].mpdu_lost,
+		wl_wme_cnt->tx_failed[WIFI_AC_BE].packets);
+
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BK].ac, WIFI_AC_BK);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BK].tx_mpdu, wl_wme_cnt->tx[AC_BK].packets);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BK].rx_mpdu, wl_wme_cnt->rx[AC_BK].packets);
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BK].mpdu_lost,
+		wl_wme_cnt->tx_failed[WIFI_AC_BK].packets);
+
+
+	err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "counters", NULL, 0,
+		iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wl_cnt_wlc_t)));
+		goto exit;
+	}
+
+	CHK_CNTBUF_DATALEN(iovar_buf, WLC_IOCTL_MAXLEN);
+
+#ifdef STAT_REPORT
+	wl_stat_report_gather(cfg, iovar_buf);
+#endif
+
+	/* Translate traditional (ver <= 10) counters struct to new xtlv type struct */
+	err = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WLC_IOCTL_MAXLEN, revinfo.corerev);
+	if (err != BCME_OK) {
+		WL_ERR(("%s wl_cntbuf_to_xtlv_format ERR %d\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+
+	if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
+		WL_ERR(("%s wlc_cnt NULL!\n", __FUNCTION__));
+		err = BCME_ERROR;
+		goto exit;
+	}
+
+	COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].retries, wlc_cnt->txretry);
+
+	if ((macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data,
+			((wl_cnt_info_t *)iovar_buf)->datalen,
+			WL_CNT_XTLV_CNTV_LE10_UCODE, NULL,
+			BCM_XTLV_OPTION_ALIGN32)) == NULL) {
+		if ((macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data,
+				((wl_cnt_info_t *)iovar_buf)->datalen,
+				WL_CNT_XTLV_GE40_UCODE_V1, NULL,
+				BCM_XTLV_OPTION_ALIGN32)) == NULL) {
+			macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data,
+				((wl_cnt_info_t *)iovar_buf)->datalen,
+				WL_CNT_XTLV_LT40_UCODE_V1, NULL,
+				BCM_XTLV_OPTION_ALIGN32);
+		}
+	}
+
+	if (macstat_cnt == NULL) {
+		printf("wlmTxGetAckedPackets: macstat_cnt NULL!\n");
+		err = BCME_ERROR;
+		goto exit;
+	}
+
+	err = wldev_get_rssi(bcmcfg_to_prmry_ndev(cfg), &scbval);
+	if (unlikely(err)) {
+		WL_ERR(("get_rssi error (%d)\n", err));
+		goto exit;
+	}
+
+	COMPAT_ASSIGN_VALUE(iface, beacon_rx, macstat_cnt->rxbeaconmbss);
+	COMPAT_ASSIGN_VALUE(iface, rssi_mgmt, scbval.val);
+	COMPAT_ASSIGN_VALUE(iface, num_peers, NUM_PEER);
+	COMPAT_ASSIGN_VALUE(iface, peer_info->num_rate, NUM_RATE);
+
+#ifdef CONFIG_COMPAT
+	if (compat_task_state) {
+		memcpy(output, &compat_iface, sizeof(compat_iface));
+		output += (sizeof(compat_iface) - sizeof(wifi_rate_stat));
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		memcpy(output, &iface, sizeof(iface));
+		output += (sizeof(iface) - sizeof(wifi_rate_stat));
+	}
+
+	err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "ratestat", NULL, 0,
+		iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+		WL_ERR(("error (%d) - size = %zu\n", err, NUM_RATE*sizeof(wifi_rate_stat)));
+		goto exit;
+	}
+	for (i = 0; i < NUM_RATE; i++) {
+		p_wifi_rate_stat =
+			(wifi_rate_stat *)(iovar_buf + i*sizeof(wifi_rate_stat));
+		p_wifi_rate_stat_v1 = (wifi_rate_stat_v1 *)output;
+		p_wifi_rate_stat_v1->rate.preamble = p_wifi_rate_stat->rate.preamble;
+		p_wifi_rate_stat_v1->rate.nss = p_wifi_rate_stat->rate.nss;
+		p_wifi_rate_stat_v1->rate.bw = p_wifi_rate_stat->rate.bw;
+		p_wifi_rate_stat_v1->rate.rateMcsIdx = p_wifi_rate_stat->rate.rateMcsIdx;
+		p_wifi_rate_stat_v1->rate.reserved = p_wifi_rate_stat->rate.reserved;
+		p_wifi_rate_stat_v1->rate.bitrate = p_wifi_rate_stat->rate.bitrate;
+		p_wifi_rate_stat_v1->tx_mpdu = p_wifi_rate_stat->tx_mpdu;
+		p_wifi_rate_stat_v1->rx_mpdu = p_wifi_rate_stat->rx_mpdu;
+		p_wifi_rate_stat_v1->mpdu_lost = p_wifi_rate_stat->mpdu_lost;
+		p_wifi_rate_stat_v1->retries = p_wifi_rate_stat->retries;
+		p_wifi_rate_stat_v1->retries_short = p_wifi_rate_stat->retries_short;
+		p_wifi_rate_stat_v1->retries_long = p_wifi_rate_stat->retries_long;
+		output = (char *) &(p_wifi_rate_stat_v1->retries_long);
+		output += sizeof(p_wifi_rate_stat_v1->retries_long);
+	}
+
+	total_len = sizeof(wifi_radio_stat_h) +
+		NUM_CHAN * sizeof(wifi_channel_stat);
+
+#ifdef CONFIG_COMPAT
+	if (compat_task_state) {
+		total_len += sizeof(compat_wifi_iface_stat);
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		total_len += sizeof(wifi_iface_stat);
+	}
+
+	total_len = total_len - sizeof(wifi_peer_info) +
+		NUM_PEER * (sizeof(wifi_peer_info) - sizeof(wifi_rate_stat_v1) +
+			NUM_RATE * sizeof(wifi_rate_stat_v1));
+
+	if (total_len > WLC_IOCTL_MAXLEN) {
+		WL_ERR(("Error! total_len:%d is unexpected value\n", total_len));
+		err = BCME_BADLEN;
+		goto exit;
+	}
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+		outdata,
+		total_len);
+
+	if (unlikely(err))
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+exit:
+	if (outdata) {
+		kfree(outdata);
+	}
+	return err;
+}
+#endif /* LINKSTAT_SUPPORT */
+
+#ifdef DEBUGABILITY
+static int wl_cfgvendor_dbg_start_logging(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int ret = BCME_OK, rem, type;
+	char ring_name[DBGRING_NAME_MAX] = {0};
+	int log_level = 0, flags = 0, time_intval = 0, threshold = 0;
+	const struct nlattr *iter;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd_pub = cfg->pub;
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+			case DEBUG_ATTRIBUTE_RING_NAME:
+				strncpy(ring_name, nla_data(iter),
+					MIN(sizeof(ring_name) -1, nla_len(iter)));
+				break;
+			case DEBUG_ATTRIBUTE_LOG_LEVEL:
+				log_level = nla_get_u32(iter);
+				break;
+			case DEBUG_ATTRIBUTE_RING_FLAGS:
+				flags = nla_get_u32(iter);
+				break;
+			case DEBUG_ATTRIBUTE_LOG_TIME_INTVAL:
+				time_intval = nla_get_u32(iter);
+				break;
+			case DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE:
+				threshold = nla_get_u32(iter);
+				break;
+			default:
+				WL_ERR(("Unknown type: %d\n", type));
+				ret = BCME_BADADDR;
+				goto exit;
+		}
+	}
+
+	ret = dhd_os_start_logging(dhd_pub, ring_name, log_level, flags, time_intval, threshold);
+	if (ret < 0) {
+		WL_ERR(("start_logging is failed ret: %d\n", ret));
+	}
+exit:
+	return ret;
+}
+
+static int wl_cfgvendor_dbg_reset_logging(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int ret = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd_pub = cfg->pub;
+
+	ret = dhd_os_reset_logging(dhd_pub);
+	if (ret < 0) {
+		WL_ERR(("reset logging is failed ret: %d\n", ret));
+	}
+
+	return ret;
+}
+
+static int
+wl_cfgvendor_dbg_trigger_mem_dump(struct wiphy *wiphy,
+		struct wireless_dev *wdev, const void  *data, int len)
+{
+	int ret = BCME_OK;
+	uint32 alloc_len;
+	struct sk_buff *skb;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+	dhdp->memdump_type = DUMP_TYPE_CFG_VENDOR_TRIGGERED;
+
+	ret = dhd_os_socram_dump(bcmcfg_to_prmry_ndev(cfg), &alloc_len);
+	if (ret) {
+		WL_ERR(("failed to call dhd_os_socram_dump : %d\n", ret));
+		goto exit;
+	}
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+	if (!skb) {
+		WL_ERR(("skb allocation is failed\n"));
+		ret = BCME_NOMEM;
+		goto exit;
+	}
+	nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, alloc_len);
+
+	ret = cfg80211_vendor_cmd_reply(skb);
+
+	if (ret) {
+		WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+	}
+
+exit:
+	return ret;
+}
+
+static int
+wl_cfgvendor_dbg_get_mem_dump(struct wiphy *wiphy,
+		struct wireless_dev *wdev, const void *data, int len)
+{
+	int ret = BCME_OK, rem, type;
+	int buf_len = 0;
+	uintptr_t user_buf = (uintptr_t)NULL;
+	const struct nlattr *iter;
+	char *mem_buf = NULL;
+	struct sk_buff *skb;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+			case DEBUG_ATTRIBUTE_FW_DUMP_LEN:
+				/* Check if the iter is valid and
+				 * buffer length is not already initialized.
+				 */
+				if ((nla_len(iter) == sizeof(uint32)) &&
+					!buf_len) {
+					buf_len = nla_get_u32(iter);
+				} else {
+					ret = BCME_ERROR;
+					goto exit;
+				}
+				break;
+			case DEBUG_ATTRIBUTE_FW_DUMP_DATA:
+				if (nla_len(iter) != sizeof(uint64)) {
+					WL_ERR(("Invalid len\n"));
+					ret = BCME_ERROR;
+					goto exit;
+				}
+				user_buf = (uintptr_t)nla_get_u64(iter);
+				break;
+			default:
+				WL_ERR(("Unknown type: %d\n", type));
+				ret = BCME_ERROR;
+				goto exit;
+		}
+	}
+	if (buf_len > 0 && user_buf) {
+		mem_buf = vmalloc(buf_len);
+		if (!mem_buf) {
+			WL_ERR(("failed to allocate mem_buf with size : %d\n", buf_len));
+			ret = BCME_NOMEM;
+			goto exit;
+		}
+		ret = dhd_os_get_socram_dump(bcmcfg_to_prmry_ndev(cfg), &mem_buf, &buf_len);
+		if (ret) {
+			WL_ERR(("failed to get_socram_dump : %d\n", ret));
+			goto free_mem;
+		}
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+		if (in_compat_syscall())
+#else
+		if (is_compat_task())
+#endif
+		{
+			void * usr_ptr =  compat_ptr((uintptr_t) user_buf);
+			ret = copy_to_user(usr_ptr, mem_buf, buf_len);
+			if (ret) {
+				WL_ERR(("failed to copy memdump into user buffer : %d\n", ret));
+				goto free_mem;
+			}
+		}
+		else
+#endif /* CONFIG_COMPAT */
+		{
+			ret = copy_to_user((void*)user_buf, mem_buf, buf_len);
+			if (ret) {
+				WL_ERR(("failed to copy memdump into user buffer : %d\n", ret));
+				goto free_mem;
+			}
+		}
+		/* Alloc the SKB for vendor_event */
+		skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+		if (!skb) {
+			WL_ERR(("skb allocation is failed\n"));
+			ret = BCME_NOMEM;
+			goto free_mem;
+		}
+		/* Indicate the memdump is succesfully copied */
+		nla_put(skb, DEBUG_ATTRIBUTE_FW_DUMP_DATA, sizeof(ret), &ret);
+
+		ret = cfg80211_vendor_cmd_reply(skb);
+
+		if (ret) {
+			WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+		}
+	}
+
+free_mem:
+	vfree(mem_buf);
+exit:
+	return ret;
+}
+
+static int wl_cfgvendor_dbg_get_version(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void *data, int len)
+{
+	int ret = BCME_OK, rem, type;
+	int buf_len = 1024;
+	bool dhd_ver = FALSE;
+	char *buf_ptr;
+	const struct nlattr *iter;
+	gfp_t kflags;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	buf_ptr = kzalloc(buf_len, kflags);
+	if (!buf_ptr) {
+		WL_ERR(("failed to allocate the buffer for version n"));
+		ret = BCME_NOMEM;
+		goto exit;
+	}
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+			case DEBUG_ATTRIBUTE_GET_DRIVER:
+				dhd_ver = TRUE;
+				break;
+			case DEBUG_ATTRIBUTE_GET_FW:
+				dhd_ver = FALSE;
+				break;
+			default:
+				WL_ERR(("Unknown type: %d\n", type));
+				ret = BCME_ERROR;
+				goto exit;
+		}
+	}
+	ret = dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg), dhd_ver, &buf_ptr, buf_len);
+	if (ret < 0) {
+		WL_ERR(("failed to get the version %d\n", ret));
+		goto exit;
+	}
+	ret = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+	        buf_ptr, strlen(buf_ptr));
+exit:
+	kfree(buf_ptr);
+	return ret;
+}
+
+static int wl_cfgvendor_dbg_get_ring_status(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int ret = BCME_OK;
+	int ring_id, i;
+	int ring_cnt;
+	struct sk_buff *skb;
+	dhd_dbg_ring_status_t dbg_ring_status[DEBUG_RING_ID_MAX];
+	dhd_dbg_ring_status_t ring_status;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd_pub = cfg->pub;
+	memset(dbg_ring_status, 0, DBG_RING_STATUS_SIZE * DEBUG_RING_ID_MAX);
+	ring_cnt = 0;
+	for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
+		ret = dhd_os_get_ring_status(dhd_pub, ring_id, &ring_status);
+		if (ret == BCME_NOTFOUND) {
+			WL_DBG(("The ring (%d) is not found \n", ring_id));
+		} else if (ret == BCME_OK) {
+			dbg_ring_status[ring_cnt++] = ring_status;
+		}
+	}
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy,
+		(DBG_RING_STATUS_SIZE * ring_cnt) + 100);
+	if (!skb) {
+		WL_ERR(("skb allocation is failed\n"));
+		ret = BCME_NOMEM;
+		goto exit;
+	}
+
+	nla_put_u32(skb, DEBUG_ATTRIBUTE_RING_NUM, ring_cnt);
+	for (i = 0; i < ring_cnt; i++) {
+		nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, DBG_RING_STATUS_SIZE,
+				&dbg_ring_status[i]);
+	}
+	ret = cfg80211_vendor_cmd_reply(skb);
+
+	if (ret) {
+		WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+	}
+exit:
+	return ret;
+}
+
+static int wl_cfgvendor_dbg_get_ring_data(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int ret = BCME_OK, rem, type;
+	char ring_name[DBGRING_NAME_MAX] = {0};
+	const struct nlattr *iter;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd_pub = cfg->pub;
+
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+			case DEBUG_ATTRIBUTE_RING_NAME:
+				strncpy(ring_name, nla_data(iter),
+					MIN(sizeof(ring_name) -1, nla_len(iter)));
+				break;
+			default:
+				WL_ERR(("Unknown type: %d\n", type));
+				return ret;
+		}
+	}
+
+	ret = dhd_os_trigger_get_ring_data(dhd_pub, ring_name);
+	if (ret < 0) {
+		WL_ERR(("trigger_get_data failed ret:%d\n", ret));
+	}
+
+	return ret;
+}
+
+static int wl_cfgvendor_dbg_get_feature(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int ret = BCME_OK;
+	u32 supported_features = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd_pub = cfg->pub;
+
+	ret = dhd_os_dbg_get_feature(dhd_pub, &supported_features);
+	if (ret < 0) {
+		WL_ERR(("dbg_get_feature failed ret:%d\n", ret));
+		goto exit;
+	}
+	ret = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+	        &supported_features, sizeof(supported_features));
+	if (ret < 0) {
+		WL_ERR(("wl_cfgvendor_send_cmd_reply failed ret:%d\n", ret));
+		goto exit;
+	}
+exit:
+	return ret;
+}
+
+static void wl_cfgvendor_dbg_ring_send_evt(void *ctx,
+	const int ring_id, const void *data, const uint32 len,
+	const dhd_dbg_ring_status_t ring_status)
+{
+	struct net_device *ndev = ctx;
+	struct wiphy *wiphy;
+	gfp_t kflags;
+	struct sk_buff *skb;
+	if (!ndev) {
+		WL_ERR(("ndev is NULL\n"));
+		return;
+	}
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	wiphy = ndev->ieee80211_ptr->wiphy;
+	/* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+	skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + 100,
+			GOOGLE_DEBUG_RING_EVENT, kflags);
+#else
+	skb = cfg80211_vendor_event_alloc(wiphy, len + 100,
+			GOOGLE_DEBUG_RING_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+		/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+	if (!skb) {
+		WL_ERR(("skb alloc failed"));
+		return;
+	}
+	nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, sizeof(ring_status), &ring_status);
+	nla_put(skb, DEBUG_ATTRIBUTE_RING_DATA, len, data);
+	cfg80211_vendor_event(skb, kflags);
+}
+
+static void wl_cfgvendor_dbg_send_urgent_evt(void *ctx, const void *data,
+	const uint32 len, const uint32 fw_len)
+{
+	struct net_device *ndev = ctx;
+	struct wiphy *wiphy;
+	gfp_t kflags;
+	struct sk_buff *skb;
+	if (!ndev) {
+		WL_ERR(("ndev is NULL\n"));
+		return;
+	}
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	wiphy = ndev->ieee80211_ptr->wiphy;
+	/* Alloc the SKB for vendor_event */
+#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
+	LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
+	skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + 100,
+			GOOGLE_FW_DUMP_EVENT, kflags);
+#else
+	skb = cfg80211_vendor_event_alloc(wiphy, len + 100,
+			GOOGLE_FW_DUMP_EVENT, kflags);
+#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
+		/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
+	if (!skb) {
+		WL_ERR(("skb alloc failed"));
+		return;
+	}
+	nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, fw_len);
+	nla_put(skb, DEBUG_ATTRIBUTE_RING_DATA, len, data);
+	cfg80211_vendor_event(skb, kflags);
+}
+#endif /* DEBUGABILITY */
+
+#ifdef DBG_PKT_MON
+static int wl_cfgvendor_dbg_start_pkt_fate_monitoring(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void *data, int len)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd_pub = cfg->pub;
+	int ret;
+
+	ret = dhd_os_dbg_attach_pkt_monitor(dhd_pub);
+	if (unlikely(ret)) {
+		WL_ERR(("failed to start pkt fate monitoring, ret=%d", ret));
+	}
+
+	return ret;
+}
+
+typedef int (*dbg_mon_get_pkts_t) (dhd_pub_t *dhdp, void __user *user_buf,
+	uint16 req_count, uint16 *resp_count);
+
+static int __wl_cfgvendor_dbg_get_pkt_fates(struct wiphy *wiphy,
+	const void *data, int len, dbg_mon_get_pkts_t dbg_mon_get_pkts)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd_pub = cfg->pub;
+	struct sk_buff *skb = NULL;
+	const struct nlattr *iter;
+	void __user *user_buf = NULL;
+	uint16 req_count = 0, resp_count = 0;
+	int ret, tmp, type, mem_needed;
+
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+		switch (type) {
+			case DEBUG_ATTRIBUTE_PKT_FATE_NUM:
+				req_count = nla_get_u32(iter);
+				break;
+			case DEBUG_ATTRIBUTE_PKT_FATE_DATA:
+				user_buf = (void __user *)(unsigned long) nla_get_u64(iter);
+				break;
+			default:
+				WL_ERR(("%s: no such attribute %d\n", __FUNCTION__, type));
+				ret = -EINVAL;
+				goto exit;
+		}
+	}
+
+	if (!req_count || !user_buf) {
+		WL_ERR(("%s: invalid request, user_buf=%p, req_count=%u\n",
+			__FUNCTION__, user_buf, req_count));
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = dbg_mon_get_pkts(dhd_pub, user_buf, req_count, &resp_count);
+	if (unlikely(ret)) {
+		WL_ERR(("failed to get packets, ret:%d \n", ret));
+		goto exit;
+	}
+
+	mem_needed = VENDOR_REPLY_OVERHEAD + ATTRIBUTE_U32_LEN;
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+	if (unlikely(!skb)) {
+		WL_ERR(("skb alloc failed"));
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	nla_put_u32(skb, DEBUG_ATTRIBUTE_PKT_FATE_NUM, resp_count);
+
+	ret = cfg80211_vendor_cmd_reply(skb);
+	if (unlikely(ret)) {
+		WL_ERR(("vendor Command reply failed ret:%d \n", ret));
+	}
+
+exit:
+	return ret;
+}
+
+static int wl_cfgvendor_dbg_get_tx_pkt_fates(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int ret;
+
+	ret = __wl_cfgvendor_dbg_get_pkt_fates(wiphy, data, len,
+			dhd_os_dbg_monitor_get_tx_pkts);
+	if (unlikely(ret)) {
+		WL_ERR(("failed to get tx packets, ret:%d \n", ret));
+	}
+
+	return ret;
+}
+
+static int wl_cfgvendor_dbg_get_rx_pkt_fates(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int ret;
+
+	ret = __wl_cfgvendor_dbg_get_pkt_fates(wiphy, data, len,
+			dhd_os_dbg_monitor_get_rx_pkts);
+	if (unlikely(ret)) {
+		WL_ERR(("failed to get rx packets, ret:%d \n", ret));
+	}
+
+	return ret;
+}
+#endif /* DBG_PKT_MON */
+
+#ifdef KEEP_ALIVE
+static int wl_cfgvendor_start_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev,
+	const void *data, int len)
+{
+	/* max size of IP packet for keep alive */
+	const int MKEEP_ALIVE_IP_PKT_MAX = 256;
+
+	int ret = BCME_OK, rem, type;
+	uint8 mkeep_alive_id = 0;
+	uint8 *ip_pkt = NULL;
+	uint16 ip_pkt_len = 0;
+	uint8 src_mac[ETHER_ADDR_LEN];
+	uint8 dst_mac[ETHER_ADDR_LEN];
+	uint32 period_msec = 0;
+	const struct nlattr *iter;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd_pub = cfg->pub;
+	gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+			case MKEEP_ALIVE_ATTRIBUTE_ID:
+				mkeep_alive_id = nla_get_u8(iter);
+				break;
+			case MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN:
+				ip_pkt_len = nla_get_u16(iter);
+				if (ip_pkt_len > MKEEP_ALIVE_IP_PKT_MAX) {
+					ret = BCME_BADARG;
+					goto exit;
+				}
+				break;
+			case MKEEP_ALIVE_ATTRIBUTE_IP_PKT:
+				if (!ip_pkt_len) {
+					ret = BCME_BADARG;
+					WL_ERR(("ip packet length is 0\n"));
+					goto exit;
+				}
+				ip_pkt = (u8 *)kzalloc(ip_pkt_len, kflags);
+				if (ip_pkt == NULL) {
+					ret = BCME_NOMEM;
+					WL_ERR(("Failed to allocate mem for ip packet\n"));
+					goto exit;
+				}
+				memcpy(ip_pkt, (u8*)nla_data(iter), ip_pkt_len);
+				break;
+			case MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR:
+				memcpy(src_mac, nla_data(iter), ETHER_ADDR_LEN);
+				break;
+			case MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR:
+				memcpy(dst_mac, nla_data(iter), ETHER_ADDR_LEN);
+				break;
+			case MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC:
+				period_msec = nla_get_u32(iter);
+				break;
+			default:
+				WL_ERR(("Unknown type: %d\n", type));
+				ret = BCME_BADARG;
+				goto exit;
+		}
+	}
+
+	if (ip_pkt == NULL) {
+		ret = BCME_BADARG;
+		WL_ERR(("ip packet is NULL\n"));
+		goto exit;
+	}
+
+	ret = dhd_dev_start_mkeep_alive(dhd_pub, mkeep_alive_id, ip_pkt, ip_pkt_len, src_mac,
+		dst_mac, period_msec);
+	if (ret < 0) {
+		WL_ERR(("start_mkeep_alive is failed ret: %d\n", ret));
+	}
+
+exit:
+	if (ip_pkt) {
+		kfree(ip_pkt);
+	}
+
+	return ret;
+}
+
+static int wl_cfgvendor_stop_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev,
+	const void *data, int len)
+{
+	int ret = BCME_OK, rem, type;
+	uint8 mkeep_alive_id = 0;
+	const struct nlattr *iter;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd_pub = cfg->pub;
+
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+			case MKEEP_ALIVE_ATTRIBUTE_ID:
+				mkeep_alive_id = nla_get_u8(iter);
+				break;
+			default:
+				WL_ERR(("Unknown type: %d\n", type));
+				ret = BCME_BADARG;
+				break;
+		}
+	}
+
+	ret = dhd_dev_stop_mkeep_alive(dhd_pub, mkeep_alive_id);
+	if (ret < 0) {
+		WL_ERR(("stop_mkeep_alive is failed ret: %d\n", ret));
+	}
+
+	return ret;
+}
+#endif /* KEEP_ALIVE */
+
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+static int
+wl_cfgvendor_apf_get_capabilities(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void *data, int len)
+{
+	struct net_device *ndev = wdev_to_ndev(wdev);
+	struct sk_buff *skb;
+	int ret, ver, max_len, mem_needed;
+
+	/* APF version */
+	ver = 0;
+	ret = dhd_dev_apf_get_version(ndev, &ver);
+	if (unlikely(ret)) {
+		WL_ERR(("APF get version failed, ret=%d\n", ret));
+		return ret;
+	}
+
+	/* APF memory size limit */
+	max_len = 0;
+	ret = dhd_dev_apf_get_max_len(ndev, &max_len);
+	if (unlikely(ret)) {
+		WL_ERR(("APF get maximum length failed, ret=%d\n", ret));
+		return ret;
+	}
+
+	mem_needed = VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2);
+
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+	if (unlikely(!skb)) {
+		WL_ERR(("%s: can't allocate %d bytes\n", __FUNCTION__, mem_needed));
+		return -ENOMEM;
+	}
+
+	nla_put_u32(skb, APF_ATTRIBUTE_VERSION, ver);
+	nla_put_u32(skb, APF_ATTRIBUTE_MAX_LEN, max_len);
+
+	ret = cfg80211_vendor_cmd_reply(skb);
+	if (unlikely(ret)) {
+		WL_ERR(("vendor command reply failed, ret=%d\n", ret));
+	}
+
+	return ret;
+}
+
+static int
+wl_cfgvendor_apf_set_filter(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	struct net_device *ndev = wdev_to_ndev(wdev);
+	const struct nlattr *iter;
+	u8 *program = NULL;
+	u32 program_len = 0;
+	int ret, tmp, type;
+	gfp_t kflags;
+
+	if (len <= 0) {
+		WL_ERR(("Invalid len: %d\n", len));
+		ret = -EINVAL;
+		goto exit;
+	}
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+		switch (type) {
+			case APF_ATTRIBUTE_PROGRAM_LEN:
+				/* check if the iter value is valid and program_len
+				 * is not already initialized.
+				 */
+				if (nla_len(iter) == sizeof(uint32) && !program_len) {
+					program_len = nla_get_u32(iter);
+				} else {
+					ret = -EINVAL;
+					goto exit;
+				}
+
+				if (program_len > WL_APF_PROGRAM_MAX_SIZE) {
+					WL_ERR(("program len is more than expected len\n"));
+					ret = -EINVAL;
+					goto exit;
+				}
+
+				if (unlikely(!program_len)) {
+					WL_ERR(("zero program length\n"));
+					ret = -EINVAL;
+					goto exit;
+				}
+				break;
+			case APF_ATTRIBUTE_PROGRAM:
+				if (unlikely(!program_len)) {
+					WL_ERR(("program len is not set\n"));
+					ret = -EINVAL;
+					goto exit;
+				}
+				kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+				program = kzalloc(program_len, kflags);
+				if (unlikely(!program)) {
+					WL_ERR(("%s: can't allocate %d bytes\n",
+					      __FUNCTION__, program_len));
+					ret = -ENOMEM;
+					goto exit;
+				}
+				memcpy(program, (u8*)nla_data(iter), program_len);
+				break;
+			default:
+				WL_ERR(("%s: no such attribute %d\n", __FUNCTION__, type));
+				ret = -EINVAL;
+				goto exit;
+		}
+	}
+
+	ret = dhd_dev_apf_add_filter(ndev, program, program_len);
+
+exit:
+	if (program) {
+		kfree(program);
+	}
+	return ret;
+}
+#endif /* PKT_FILTER_SUPPORT && APF */
+
+#ifdef NDO_CONFIG_SUPPORT
+static int wl_cfgvendor_configure_nd_offload(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	const struct nlattr *iter;
+	int ret = BCME_OK, rem, type;
+	u8 enable = 0;
+
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+			case ANDR_WIFI_ATTRIBUTE_ND_OFFLOAD_VALUE:
+				enable = nla_get_u8(iter);
+				break;
+			default:
+				WL_ERR(("Unknown type: %d\n", type));
+				ret = BCME_BADARG;
+				goto exit;
+		}
+	}
+
+	ret = dhd_dev_ndo_cfg(bcmcfg_to_prmry_ndev(cfg), enable);
+	if (ret < 0) {
+		WL_ERR(("dhd_dev_ndo_cfg() failed: %d\n", ret));
+	}
+
+exit:
+	return ret;
+}
+#endif /* NDO_CONFIG_SUPPORT */
+
+static const struct wiphy_vendor_command wl_vendor_cmds [] = {
+	{
+		{
+			.vendor_id = OUI_BRCM,
+			.subcmd = BRCM_VENDOR_SCMD_PRIV_STR
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_priv_string_handler
+	},
+	{
+		{
+			.vendor_id = OUI_BRCM,
+			.subcmd = BRCM_VENDOR_SCMD_BCM_STR
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_priv_bcm_handler
+	},
+#ifdef GSCAN_SUPPORT
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_GET_CAPABILITIES
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_gscan_get_capabilities
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_SET_CONFIG
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_scan_cfg
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_SET_SCAN_CONFIG
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_batch_scan_cfg
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_ENABLE_GSCAN
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_initiate_gscan
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_enable_full_scan_result
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_SET_HOTLIST
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_hotlist_cfg
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_gscan_get_batch_results
+	},
+#endif /* GSCAN_SUPPORT */
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_gscan_get_channel_list
+	},
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#ifdef RTT_SUPPORT
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = RTT_SUBCMD_SET_CONFIG
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_rtt_set_config
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = RTT_SUBCMD_CANCEL_CONFIG
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_rtt_cancel_config
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = RTT_SUBCMD_GETCAPABILITY
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_rtt_get_capability
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = RTT_SUBCMD_GETAVAILCHANNEL
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_rtt_get_responder_info
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = RTT_SUBCMD_SET_RESPONDER
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_rtt_set_responder
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = RTT_SUBCMD_CANCEL_RESPONDER
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_rtt_cancel_responder
+	},
+#endif /* RTT_SUPPORT */
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_get_feature_set
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_get_feature_set_matrix
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = ANDR_WIFI_RANDOM_MAC_OUI
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_rand_mac_oui
+	},
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = ANDR_WIFI_NODFS_CHANNELS
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_nodfs_flag
+	},
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = ANDR_WIFI_SET_COUNTRY
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_country
+	},
+#ifdef LINKSTAT_SUPPORT
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = LSTATS_SUBCMD_GET_INFO
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_lstats_get_info
+	},
+#endif /* LINKSTAT_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_SET_EPNO_SSID
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_epno_cfg
+
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_SUBCMD_SET_SSID_WHITELIST
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_ssid_whitelist
+
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_SUBCMD_SET_LAZY_ROAM_PARAMS
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_lazy_roam_cfg
+
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_SUBCMD_ENABLE_LAZY_ROAM
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_enable_lazy_roam
+
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_SUBCMD_SET_BSSID_PREF
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_bssid_pref
+
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_SUBCMD_SET_BSSID_BLACKLIST
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_bssid_blacklist
+	},
+#endif /* GSCAN_SUPPORT */
+#ifdef DEBUGABILITY
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_START_LOGGING
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_start_logging
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_RESET_LOGGING
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_reset_logging
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_TRIGGER_MEM_DUMP
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_trigger_mem_dump
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_GET_MEM_DUMP
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_get_mem_dump
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_GET_VER
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_get_version
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_GET_RING_STATUS
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_get_ring_status
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_GET_RING_DATA
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_get_ring_data
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_GET_FEATURE
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_get_feature
+	},
+#endif /* DEBUGABILITY */
+#ifdef DBG_PKT_MON
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_START_PKT_FATE_MONITORING
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_start_pkt_fate_monitoring
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_GET_TX_PKT_FATES
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_get_tx_pkt_fates
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_GET_RX_PKT_FATES
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_dbg_get_rx_pkt_fates
+	},
+#endif /* DBG_PKT_MON */
+#ifdef KEEP_ALIVE
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_start_mkeep_alive
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_stop_mkeep_alive
+	},
+#endif /* KEEP_ALIVE */
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = APF_SUBCMD_GET_CAPABILITIES
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_apf_get_capabilities
+	},
+
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = APF_SUBCMD_SET_FILTER
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_apf_set_filter
+	},
+#endif /* PKT_FILTER_SUPPORT && APF */
+#ifdef NDO_CONFIG_SUPPORT
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_SUBCMD_CONFIG_ND_OFFLOAD
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_configure_nd_offload
+	},
+#endif /* NDO_CONFIG_SUPPORT */
+#ifdef RSSI_MONITOR_SUPPORT
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_SUBCMD_SET_RSSI_MONITOR
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_rssi_monitor
+	},
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef DHDTCPACK_SUPPRESS
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = WIFI_SUBCMD_CONFIG_TCPACK_SUP
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_set_tcpack_sup_mode
+	},
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WAKE_STATUS
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = DEBUG_GET_WAKE_REASON_STATS
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.policy = VENDOR_CMD_RAW_DATA,
+		.doit = wl_cfgvendor_get_wake_reason_stats
+	}
+#endif /* DHD_WAKE_STATUS */
+};
+
+static const struct  nl80211_vendor_cmd_info wl_vendor_events [] = {
+		{ OUI_BRCM, BRCM_VENDOR_EVENT_UNSPEC },
+		{ OUI_BRCM, BRCM_VENDOR_EVENT_PRIV_STR },
+#ifdef GSCAN_SUPPORT
+		{ OUI_GOOGLE, GOOGLE_GSCAN_SIGNIFICANT_EVENT },
+		{ OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT },
+		{ OUI_GOOGLE, GOOGLE_GSCAN_BATCH_SCAN_EVENT },
+		{ OUI_GOOGLE, GOOGLE_SCAN_FULL_RESULTS_EVENT },
+#endif /* GSCAN_SUPPORT */
+#ifdef RTT_SUPPORT
+		{ OUI_GOOGLE, GOOGLE_RTT_COMPLETE_EVENT },
+#endif /* RTT_SUPPORT */
+#ifdef GSCAN_SUPPORT
+		{ OUI_GOOGLE, GOOGLE_SCAN_COMPLETE_EVENT },
+		{ OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT },
+		{ OUI_GOOGLE, GOOGLE_SCAN_EPNO_EVENT },
+#endif /* GSCAN_SUPPORT */
+		{ OUI_GOOGLE, GOOGLE_DEBUG_RING_EVENT },
+		{ OUI_GOOGLE, GOOGLE_FW_DUMP_EVENT },
+#ifdef GSCAN_SUPPORT
+		{ OUI_GOOGLE, GOOGLE_PNO_HOTSPOT_FOUND_EVENT },
+#endif /* GSCAN_SUPPORT */
+		{ OUI_GOOGLE, GOOGLE_RSSI_MONITOR_EVENT },
+		{ OUI_GOOGLE, GOOGLE_MKEEP_ALIVE_EVENT },
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_ENABLED},
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_DISABLED},
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_PUBLISH_TERMINATED},
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH},
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH},
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED},
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_DE_EVENT},
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_FOLLOWUP},
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_TCA},
+#ifdef NAN_DP
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_DATA_PATH_OPEN},
+#endif /* NAN_DP */
+		{ OUI_GOOGLE, GOOGLE_NAN_EVENT_UNKNOWN}
+};
+
+int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd)
+{
+
+	WL_INFORM(("Vendor: Register BRCM cfg80211 vendor cmd(0x%x) interface \n",
+		NL80211_CMD_VENDOR));
+
+	wiphy->vendor_commands	= wl_vendor_cmds;
+	wiphy->n_vendor_commands = ARRAY_SIZE(wl_vendor_cmds);
+	wiphy->vendor_events	= wl_vendor_events;
+	wiphy->n_vendor_events	= ARRAY_SIZE(wl_vendor_events);
+
+#ifdef DEBUGABILITY
+	dhd_os_dbg_register_callback(FW_VERBOSE_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+	dhd_os_dbg_register_callback(FW_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+	dhd_os_dbg_register_callback(DHD_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+	dhd_os_dbg_register_callback(NAN_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+	dhd_os_dbg_register_urgent_notifier(dhd, wl_cfgvendor_dbg_send_urgent_evt);
+#endif /* DEBUGABILITY */
+
+	return 0;
+}
+
+int wl_cfgvendor_detach(struct wiphy *wiphy)
+{
+	WL_INFORM(("Vendor: Unregister BRCM cfg80211 vendor interface \n"));
+
+	wiphy->vendor_commands  = NULL;
+	wiphy->vendor_events    = NULL;
+	wiphy->n_vendor_commands = 0;
+	wiphy->n_vendor_events  = 0;
+
+	return 0;
+}
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.h b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h
new file mode 100644
index 0000000..8933548
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h
@@ -0,0 +1,562 @@
+/*
+ * Linux cfg80211 Vendor Extension Code
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgvendor.h 698895 2017-05-11 02:55:17Z $
+ */
+
+
+#ifndef _wl_cfgvendor_h_
+#define _wl_cfgvendor_h_
+#if ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || \
+	defined(CONFIG_BCMDHD_VENDOR_EXT)) && !defined(WL_VENDOR_EXT_SUPPORT)
+/* defined CONFIG_BCMDHD_VENDOR_EXT in brix kernel to enable GSCAN testing */
+#define WL_VENDOR_EXT_SUPPORT
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0) && CONFIG_BCMDHD_VENDOR_EXT */
+
+#define OUI_BRCM    0x001018
+#define OUI_GOOGLE  0x001A11
+#define BRCM_VENDOR_SUBCMD_PRIV_STR	1
+#define ATTRIBUTE_U32_LEN                  (NLA_HDRLEN  + 4)
+#define VENDOR_ID_OVERHEAD                 ATTRIBUTE_U32_LEN
+#define VENDOR_SUBCMD_OVERHEAD             ATTRIBUTE_U32_LEN
+#define VENDOR_DATA_OVERHEAD               (NLA_HDRLEN)
+
+enum brcm_vendor_attr {
+	BRCM_ATTR_DRIVER_CMD,
+	BRCM_ATTR_DRIVER_MAX
+};
+
+#define SCAN_RESULTS_COMPLETE_FLAG_LEN       ATTRIBUTE_U32_LEN
+#define SCAN_INDEX_HDR_LEN                   (NLA_HDRLEN)
+#define SCAN_ID_HDR_LEN                      ATTRIBUTE_U32_LEN
+#define SCAN_FLAGS_HDR_LEN                   ATTRIBUTE_U32_LEN
+#define GSCAN_NUM_RESULTS_HDR_LEN            ATTRIBUTE_U32_LEN
+#define GSCAN_CH_BUCKET_MASK_HDR_LEN         ATTRIBUTE_U32_LEN
+#define GSCAN_RESULTS_HDR_LEN                (NLA_HDRLEN)
+#define GSCAN_BATCH_RESULT_HDR_LEN  (SCAN_INDEX_HDR_LEN + SCAN_ID_HDR_LEN + \
+									SCAN_FLAGS_HDR_LEN + \
+							        GSCAN_NUM_RESULTS_HDR_LEN + \
+								GSCAN_CH_BUCKET_MASK_HDR_LEN + \
+									GSCAN_RESULTS_HDR_LEN)
+
+#define VENDOR_REPLY_OVERHEAD       (VENDOR_ID_OVERHEAD + \
+									VENDOR_SUBCMD_OVERHEAD + \
+									VENDOR_DATA_OVERHEAD)
+
+#define GSCAN_ATTR_SET1				10
+#define GSCAN_ATTR_SET2				20
+#define GSCAN_ATTR_SET3				30
+#define GSCAN_ATTR_SET4				40
+#define GSCAN_ATTR_SET5				50
+#define GSCAN_ATTR_SET6				60
+#define GSCAN_ATTR_SET7				70
+#define GSCAN_ATTR_SET8				80
+#define GSCAN_ATTR_SET9				90
+#define GSCAN_ATTR_SET10			100
+#define GSCAN_ATTR_SET11			110
+#define GSCAN_ATTR_SET12			120
+#define GSCAN_ATTR_SET13			130
+
+
+typedef enum {
+	/* don't use 0 as a valid subcommand */
+	VENDOR_NL80211_SUBCMD_UNSPECIFIED,
+
+	/* define all vendor startup commands between 0x0 and 0x0FFF */
+	VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001,
+	VENDOR_NL80211_SUBCMD_RANGE_END   = 0x0FFF,
+
+	/* define all GScan related commands between 0x1000 and 0x10FF */
+	ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000,
+	ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END   = 0x10FF,
+
+	/* define all RTT related commands between 0x1100 and 0x11FF */
+	ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100,
+	ANDROID_NL80211_SUBCMD_RTT_RANGE_END   = 0x11FF,
+
+	ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200,
+	ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END   = 0x12FF,
+
+	ANDROID_NL80211_SUBCMD_TDLS_RANGE_START = 0x1300,
+	ANDROID_NL80211_SUBCMD_TDLS_RANGE_END	= 0x13FF,
+
+	ANDROID_NL80211_SUBCMD_DEBUG_RANGE_START = 0x1400,
+	ANDROID_NL80211_SUBCMD_DEBUG_RANGE_END	= 0x14FF,
+
+	/* define all NearbyDiscovery related commands between 0x1500 and 0x15FF */
+	ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1500,
+	ANDROID_NL80211_SUBCMD_NBD_RANGE_END   = 0x15FF,
+
+	/* define all wifi calling related commands between 0x1600 and 0x16FF */
+	ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START = 0x1600,
+	ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_END   = 0x16FF,
+
+	/* define all NAN related commands between 0x1700 and 0x17FF */
+	ANDROID_NL80211_SUBCMD_NAN_RANGE_START = 0x1700,
+	ANDROID_NL80211_SUBCMD_NAN_RANGE_END   = 0x17FF,
+
+	/* define all packet filter related commands between 0x1800 and 0x18FF */
+	ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START = 0x1800,
+	ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_END   = 0x18FF,
+
+	/* This is reserved for future usage */
+
+} ANDROID_VENDOR_SUB_COMMAND;
+
+enum andr_vendor_subcmd {
+	GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START,
+	GSCAN_SUBCMD_SET_CONFIG,
+	GSCAN_SUBCMD_SET_SCAN_CONFIG,
+	GSCAN_SUBCMD_ENABLE_GSCAN,
+	GSCAN_SUBCMD_GET_SCAN_RESULTS,
+	GSCAN_SUBCMD_SCAN_RESULTS,
+	GSCAN_SUBCMD_SET_HOTLIST,
+	GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG,
+	GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS,
+	GSCAN_SUBCMD_GET_CHANNEL_LIST,
+	/* ANDR_WIFI_XXX although not related to gscan are defined here */
+	ANDR_WIFI_SUBCMD_GET_FEATURE_SET,
+	ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX,
+	ANDR_WIFI_RANDOM_MAC_OUI,
+	ANDR_WIFI_NODFS_CHANNELS,
+	ANDR_WIFI_SET_COUNTRY,
+	GSCAN_SUBCMD_SET_EPNO_SSID,
+	WIFI_SUBCMD_SET_SSID_WHITELIST,
+	WIFI_SUBCMD_SET_LAZY_ROAM_PARAMS,
+	WIFI_SUBCMD_ENABLE_LAZY_ROAM,
+	WIFI_SUBCMD_SET_BSSID_PREF,
+	WIFI_SUBCMD_SET_BSSID_BLACKLIST,
+	GSCAN_SUBCMD_ANQPO_CONFIG,
+	WIFI_SUBCMD_SET_RSSI_MONITOR,
+	WIFI_SUBCMD_CONFIG_ND_OFFLOAD,
+	WIFI_SUBCMD_CONFIG_TCPACK_SUP,
+	RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START,
+	RTT_SUBCMD_CANCEL_CONFIG,
+	RTT_SUBCMD_GETCAPABILITY,
+	RTT_SUBCMD_GETAVAILCHANNEL,
+	RTT_SUBCMD_SET_RESPONDER,
+	RTT_SUBCMD_CANCEL_RESPONDER,
+	LSTATS_SUBCMD_GET_INFO = ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START,
+
+	DEBUG_START_LOGGING = ANDROID_NL80211_SUBCMD_DEBUG_RANGE_START,
+	DEBUG_TRIGGER_MEM_DUMP,
+	DEBUG_GET_MEM_DUMP,
+	DEBUG_GET_VER,
+	DEBUG_GET_RING_STATUS,
+	DEBUG_GET_RING_DATA,
+	DEBUG_GET_FEATURE,
+	DEBUG_RESET_LOGGING,
+
+	DEBUG_TRIGGER_DRIVER_MEM_DUMP,
+	DEBUG_GET_DRIVER_MEM_DUMP,
+	DEBUG_START_PKT_FATE_MONITORING,
+	DEBUG_GET_TX_PKT_FATES,
+	DEBUG_GET_RX_PKT_FATES,
+	DEBUG_GET_WAKE_REASON_STATS,
+
+	WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE = ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START,
+	WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE,
+
+	NAN_WIFI_SUBCMD_ENABLE = ANDROID_NL80211_SUBCMD_NAN_RANGE_START,
+	NAN_WIFI_SUBCMD_DISABLE,
+	NAN_WIFI_SUBCMD_REQUEST_PUBLISH,
+	NAN_WIFI_SUBCMD_REQUEST_SUBSCRIBE,
+	NAN_WIFI_SUBCMD_CANCEL_PUBLISH,
+	NAN_WIFI_SUBCMD_CANCEL_SUBSCRIBE,
+	NAN_WIFI_SUBCMD_TRANSMIT,
+#ifdef NAN_DP
+	NAN_WIFI_SUBCMD_DATA_PATH_OPEN,
+	NAN_WIFI_SUBCMD_DATA_PATH_CLOSE,
+#endif /* NAN_DP */
+	APF_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START,
+	APF_SUBCMD_SET_FILTER,
+	/* Add more sub commands here */
+	VENDOR_SUBCMD_MAX
+};
+
+enum gscan_attributes {
+    GSCAN_ATTRIBUTE_NUM_BUCKETS = GSCAN_ATTR_SET1,
+    GSCAN_ATTRIBUTE_BASE_PERIOD,
+    GSCAN_ATTRIBUTE_BUCKETS_BAND,
+    GSCAN_ATTRIBUTE_BUCKET_ID,
+    GSCAN_ATTRIBUTE_BUCKET_PERIOD,
+    GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS,
+    GSCAN_ATTRIBUTE_BUCKET_CHANNELS,
+    GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN,
+    GSCAN_ATTRIBUTE_REPORT_THRESHOLD,
+    GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE,
+    GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND,
+
+    GSCAN_ATTRIBUTE_ENABLE_FEATURE = GSCAN_ATTR_SET2,
+    GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
+    GSCAN_ATTRIBUTE_FLUSH_FEATURE,
+    GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS,
+    GSCAN_ATTRIBUTE_REPORT_EVENTS,
+    /* remaining reserved for additional attributes */
+    GSCAN_ATTRIBUTE_NUM_OF_RESULTS = GSCAN_ATTR_SET3,
+    GSCAN_ATTRIBUTE_FLUSH_RESULTS,
+    GSCAN_ATTRIBUTE_SCAN_RESULTS,                       /* flat array of wifi_scan_result */
+    GSCAN_ATTRIBUTE_SCAN_ID,                            /* indicates scan number */
+    GSCAN_ATTRIBUTE_SCAN_FLAGS,                         /* indicates if scan was aborted */
+    GSCAN_ATTRIBUTE_AP_FLAGS,                           /* flags on significant change event */
+    GSCAN_ATTRIBUTE_NUM_CHANNELS,
+    GSCAN_ATTRIBUTE_CHANNEL_LIST,
+    GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK,
+
+	/* remaining reserved for additional attributes */
+
+    GSCAN_ATTRIBUTE_SSID = GSCAN_ATTR_SET4,
+    GSCAN_ATTRIBUTE_BSSID,
+    GSCAN_ATTRIBUTE_CHANNEL,
+    GSCAN_ATTRIBUTE_RSSI,
+    GSCAN_ATTRIBUTE_TIMESTAMP,
+    GSCAN_ATTRIBUTE_RTT,
+    GSCAN_ATTRIBUTE_RTTSD,
+
+    /* remaining reserved for additional attributes */
+
+    GSCAN_ATTRIBUTE_HOTLIST_BSSIDS = GSCAN_ATTR_SET5,
+    GSCAN_ATTRIBUTE_RSSI_LOW,
+    GSCAN_ATTRIBUTE_RSSI_HIGH,
+    GSCAN_ATTRIBUTE_HOSTLIST_BSSID_ELEM,
+    GSCAN_ATTRIBUTE_HOTLIST_FLUSH,
+
+    /* remaining reserved for additional attributes */
+    GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = GSCAN_ATTR_SET6,
+    GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE,
+    GSCAN_ATTRIBUTE_MIN_BREACHING,
+    GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS,
+    GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH,
+
+    /* EPNO */
+    GSCAN_ATTRIBUTE_EPNO_SSID_LIST = GSCAN_ATTR_SET7,
+    GSCAN_ATTRIBUTE_EPNO_SSID,
+    GSCAN_ATTRIBUTE_EPNO_SSID_LEN,
+    GSCAN_ATTRIBUTE_EPNO_RSSI,
+    GSCAN_ATTRIBUTE_EPNO_FLAGS,
+    GSCAN_ATTRIBUTE_EPNO_AUTH,
+    GSCAN_ATTRIBUTE_EPNO_SSID_NUM,
+    GSCAN_ATTRIBUTE_EPNO_FLUSH,
+
+    /* Roam SSID Whitelist and BSSID pref */
+    GSCAN_ATTRIBUTE_WHITELIST_SSID = GSCAN_ATTR_SET8,
+    GSCAN_ATTRIBUTE_NUM_WL_SSID,
+    GSCAN_ATTRIBUTE_WL_SSID_LEN,
+    GSCAN_ATTRIBUTE_WL_SSID_FLUSH,
+    GSCAN_ATTRIBUTE_WHITELIST_SSID_ELEM,
+    GSCAN_ATTRIBUTE_NUM_BSSID,
+    GSCAN_ATTRIBUTE_BSSID_PREF_LIST,
+    GSCAN_ATTRIBUTE_BSSID_PREF_FLUSH,
+    GSCAN_ATTRIBUTE_BSSID_PREF,
+    GSCAN_ATTRIBUTE_RSSI_MODIFIER,
+
+
+    /* Roam cfg */
+    GSCAN_ATTRIBUTE_A_BAND_BOOST_THRESHOLD = GSCAN_ATTR_SET9,
+    GSCAN_ATTRIBUTE_A_BAND_PENALTY_THRESHOLD,
+    GSCAN_ATTRIBUTE_A_BAND_BOOST_FACTOR,
+    GSCAN_ATTRIBUTE_A_BAND_PENALTY_FACTOR,
+    GSCAN_ATTRIBUTE_A_BAND_MAX_BOOST,
+    GSCAN_ATTRIBUTE_LAZY_ROAM_HYSTERESIS,
+    GSCAN_ATTRIBUTE_ALERT_ROAM_RSSI_TRIGGER,
+    GSCAN_ATTRIBUTE_LAZY_ROAM_ENABLE,
+
+    /* BSSID blacklist */
+    GSCAN_ATTRIBUTE_BSSID_BLACKLIST_FLUSH = GSCAN_ATTR_SET10,
+    GSCAN_ATTRIBUTE_BLACKLIST_BSSID,
+
+    GSCAN_ATTRIBUTE_ANQPO_HS_LIST = GSCAN_ATTR_SET11,
+    GSCAN_ATTRIBUTE_ANQPO_HS_LIST_SIZE,
+    GSCAN_ATTRIBUTE_ANQPO_HS_NETWORK_ID,
+    GSCAN_ATTRIBUTE_ANQPO_HS_NAI_REALM,
+    GSCAN_ATTRIBUTE_ANQPO_HS_ROAM_CONSORTIUM_ID,
+    GSCAN_ATTRIBUTE_ANQPO_HS_PLMN,
+
+    /* Adaptive scan attributes */
+    GSCAN_ATTRIBUTE_BUCKET_STEP_COUNT = GSCAN_ATTR_SET12,
+    GSCAN_ATTRIBUTE_BUCKET_MAX_PERIOD,
+
+    /* ePNO cfg */
+    GSCAN_ATTRIBUTE_EPNO_5G_RSSI_THR = GSCAN_ATTR_SET13,
+    GSCAN_ATTRIBUTE_EPNO_2G_RSSI_THR,
+    GSCAN_ATTRIBUTE_EPNO_INIT_SCORE_MAX,
+    GSCAN_ATTRIBUTE_EPNO_CUR_CONN_BONUS,
+    GSCAN_ATTRIBUTE_EPNO_SAME_NETWORK_BONUS,
+    GSCAN_ATTRIBUTE_EPNO_SECURE_BONUS,
+    GSCAN_ATTRIBUTE_EPNO_5G_BONUS,
+    GSCAN_ATTRIBUTE_MAX
+};
+
+enum gscan_bucket_attributes {
+	GSCAN_ATTRIBUTE_CH_BUCKET_1,
+	GSCAN_ATTRIBUTE_CH_BUCKET_2,
+	GSCAN_ATTRIBUTE_CH_BUCKET_3,
+	GSCAN_ATTRIBUTE_CH_BUCKET_4,
+	GSCAN_ATTRIBUTE_CH_BUCKET_5,
+	GSCAN_ATTRIBUTE_CH_BUCKET_6,
+	GSCAN_ATTRIBUTE_CH_BUCKET_7
+};
+
+enum gscan_ch_attributes {
+	GSCAN_ATTRIBUTE_CH_ID_1,
+	GSCAN_ATTRIBUTE_CH_ID_2,
+	GSCAN_ATTRIBUTE_CH_ID_3,
+	GSCAN_ATTRIBUTE_CH_ID_4,
+	GSCAN_ATTRIBUTE_CH_ID_5,
+	GSCAN_ATTRIBUTE_CH_ID_6,
+	GSCAN_ATTRIBUTE_CH_ID_7
+};
+
+enum rtt_attributes {
+	RTT_ATTRIBUTE_TARGET_CNT,
+	RTT_ATTRIBUTE_TARGET_INFO,
+	RTT_ATTRIBUTE_TARGET_MAC,
+	RTT_ATTRIBUTE_TARGET_TYPE,
+	RTT_ATTRIBUTE_TARGET_PEER,
+	RTT_ATTRIBUTE_TARGET_CHAN,
+	RTT_ATTRIBUTE_TARGET_PERIOD,
+	RTT_ATTRIBUTE_TARGET_NUM_BURST,
+	RTT_ATTRIBUTE_TARGET_NUM_FTM_BURST,
+	RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTM,
+	RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTMR,
+	RTT_ATTRIBUTE_TARGET_LCI,
+	RTT_ATTRIBUTE_TARGET_LCR,
+	RTT_ATTRIBUTE_TARGET_BURST_DURATION,
+	RTT_ATTRIBUTE_TARGET_PREAMBLE,
+	RTT_ATTRIBUTE_TARGET_BW,
+	RTT_ATTRIBUTE_RESULTS_COMPLETE = 30,
+	RTT_ATTRIBUTE_RESULTS_PER_TARGET,
+	RTT_ATTRIBUTE_RESULT_CNT,
+	RTT_ATTRIBUTE_RESULT
+};
+
+enum wifi_rssi_monitor_attr {
+	RSSI_MONITOR_ATTRIBUTE_MAX_RSSI,
+	RSSI_MONITOR_ATTRIBUTE_MIN_RSSI,
+	RSSI_MONITOR_ATTRIBUTE_START
+};
+
+enum debug_attributes {
+	DEBUG_ATTRIBUTE_GET_DRIVER,
+	DEBUG_ATTRIBUTE_GET_FW,
+	DEBUG_ATTRIBUTE_RING_ID,
+	DEBUG_ATTRIBUTE_RING_NAME,
+	DEBUG_ATTRIBUTE_RING_FLAGS,
+	DEBUG_ATTRIBUTE_LOG_LEVEL,
+	DEBUG_ATTRIBUTE_LOG_TIME_INTVAL,
+	DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE,
+	DEBUG_ATTRIBUTE_FW_DUMP_LEN,
+	DEBUG_ATTRIBUTE_FW_DUMP_DATA,
+	DEBUG_ATTRIBUTE_RING_DATA,
+	DEBUG_ATTRIBUTE_RING_STATUS,
+	DEBUG_ATTRIBUTE_RING_NUM,
+	DEBUG_ATTRIBUTE_DRIVER_DUMP_LEN,
+	DEBUG_ATTRIBUTE_DRIVER_DUMP_DATA,
+	DEBUG_ATTRIBUTE_PKT_FATE_NUM,
+	DEBUG_ATTRIBUTE_PKT_FATE_DATA
+};
+
+enum mkeep_alive_attributes {
+	MKEEP_ALIVE_ATTRIBUTE_ID,
+	MKEEP_ALIVE_ATTRIBUTE_IP_PKT,
+	MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN,
+	MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR,
+	MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR,
+	MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC
+};
+enum apf_attributes {
+	APF_ATTRIBUTE_VERSION,
+	APF_ATTRIBUTE_MAX_LEN,
+	APF_ATTRIBUTE_PROGRAM,
+	APF_ATTRIBUTE_PROGRAM_LEN
+};
+
+typedef enum wl_vendor_event {
+	BRCM_VENDOR_EVENT_UNSPEC,
+	BRCM_VENDOR_EVENT_PRIV_STR,
+	GOOGLE_GSCAN_SIGNIFICANT_EVENT,
+	GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT,
+	GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+	GOOGLE_SCAN_FULL_RESULTS_EVENT,
+	GOOGLE_RTT_COMPLETE_EVENT,
+	GOOGLE_SCAN_COMPLETE_EVENT,
+	GOOGLE_GSCAN_GEOFENCE_LOST_EVENT,
+	GOOGLE_SCAN_EPNO_EVENT,
+	GOOGLE_DEBUG_RING_EVENT,
+	GOOGLE_FW_DUMP_EVENT,
+	GOOGLE_PNO_HOTSPOT_FOUND_EVENT,
+	GOOGLE_RSSI_MONITOR_EVENT,
+	GOOGLE_MKEEP_ALIVE_EVENT,
+	/*
+	 * BRCM specific events should be placed after
+	 * the Generic events so that enums don't mismatch
+	 * between the DHD and HAL
+	 */
+	GOOGLE_NAN_EVENT_ENABLED = 150,
+	GOOGLE_NAN_EVENT_DISABLED,
+	GOOGLE_NAN_EVENT_PUBLISH_TERMINATED,
+	GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH,
+	GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH,
+	GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED,
+	GOOGLE_NAN_EVENT_DE_EVENT,
+	GOOGLE_NAN_EVENT_FOLLOWUP,
+	GOOGLE_NAN_EVENT_TCA,
+	GOOGLE_NAN_EVENT_DATA_IF_ADD,
+	GOOGLE_NAN_EVENT_DATA_PATH_OPEN,
+	GOOGLE_NAN_EVENT_UNKNOWN
+} wl_vendor_event_t;
+
+enum andr_wifi_attr {
+	ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
+	ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
+	ANDR_WIFI_ATTRIBUTE_RANDOM_MAC_OUI,
+	ANDR_WIFI_ATTRIBUTE_NODFS_SET,
+	ANDR_WIFI_ATTRIBUTE_COUNTRY,
+	ANDR_WIFI_ATTRIBUTE_ND_OFFLOAD_VALUE,
+	ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE
+};
+
+typedef enum wl_vendor_gscan_attribute {
+	ATTR_START_GSCAN,
+	ATTR_STOP_GSCAN,
+	ATTR_SET_SCAN_BATCH_CFG_ID, /* set batch scan params */
+	ATTR_SET_SCAN_GEOFENCE_CFG_ID, /* set list of bssids to track */
+	ATTR_SET_SCAN_SIGNIFICANT_CFG_ID, /* set list of bssids, rssi threshold etc.. */
+	ATTR_SET_SCAN_CFG_ID, /* set common scan config params here */
+	ATTR_GET_GSCAN_CAPABILITIES_ID,
+    /* Add more sub commands here */
+	ATTR_GSCAN_MAX
+} wl_vendor_gscan_attribute_t;
+
+typedef enum gscan_batch_attribute {
+	ATTR_GSCAN_BATCH_BESTN,
+	ATTR_GSCAN_BATCH_MSCAN,
+	ATTR_GSCAN_BATCH_BUFFER_THRESHOLD
+} gscan_batch_attribute_t;
+
+typedef enum gscan_geofence_attribute {
+	ATTR_GSCAN_NUM_HOTLIST_BSSID,
+	ATTR_GSCAN_HOTLIST_BSSID
+} gscan_geofence_attribute_t;
+
+typedef enum gscan_complete_event {
+	WIFI_SCAN_COMPLETE,
+	WIFI_SCAN_THRESHOLD_NUM_SCANS,
+	WIFI_SCAN_BUFFER_THR_BREACHED
+} gscan_complete_event_t;
+
+#ifdef DHD_WAKE_STATUS
+enum wake_stat_attributes {
+	WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT,
+	WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE,
+	WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT,
+	WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED,
+	WAKE_STAT_ATTRIBUTE_TOTAL_DRIVER_FW,
+	WAKE_STAT_ATTRIBUTE_DRIVER_FW_WAKE,
+	WAKE_STAT_ATTRIBUTE_DRIVER_FW_COUNT,
+	WAKE_STAT_ATTRIBUTE_DRIVER_FW_COUNT_USED,
+	WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE,
+	WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT,
+	WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT,
+	WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT,
+	WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT,
+	WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT,
+	WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA,
+	WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA,
+	WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS,
+	WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
+	WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
+	WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT
+};
+
+typedef struct rx_data_cnt_details_t {
+	int rx_unicast_cnt;		/* Total rx unicast packet which woke up host */
+	int rx_multicast_cnt;   /* Total rx multicast packet which woke up host */
+	int rx_broadcast_cnt;   /* Total rx broadcast packet which woke up host */
+} RX_DATA_WAKE_CNT_DETAILS;
+
+typedef struct rx_wake_pkt_type_classification_t {
+	int icmp_pkt;   /* wake icmp packet count */
+	int icmp6_pkt;  /* wake icmp6 packet count */
+	int icmp6_ra;   /* wake icmp6 RA packet count */
+	int icmp6_na;   /* wake icmp6 NA packet count */
+	int icmp6_ns;   /* wake icmp6 NS packet count */
+} RX_WAKE_PKT_TYPE_CLASSFICATION;
+
+typedef struct rx_multicast_cnt_t {
+	int ipv4_rx_multicast_addr_cnt; /* Rx wake packet was ipv4 multicast */
+	int ipv6_rx_multicast_addr_cnt; /* Rx wake packet was ipv6 multicast */
+	int other_rx_multicast_addr_cnt; /* Rx wake packet was non-ipv4 and non-ipv6 */
+} RX_MULTICAST_WAKE_DATA_CNT;
+
+typedef struct wlan_driver_wake_reason_cnt_t {
+	int total_cmd_event_wake;    /* Total count of cmd event wakes */
+	int *cmd_event_wake_cnt;     /* Individual wake count array, each index a reason */
+	int cmd_event_wake_cnt_sz;   /* Max number of cmd event wake reasons */
+	int cmd_event_wake_cnt_used; /* Number of cmd event wake reasons specific to the driver */
+	int total_driver_fw_local_wake;    /* Total count of drive/fw wakes, for local reasons */
+	int *driver_fw_local_wake_cnt;     /* Individual wake count array, each index a reason */
+	int driver_fw_local_wake_cnt_sz;   /* Max number of local driver/fw wake reasons */
+	/* Number of local driver/fw wake reasons specific to the driver */
+	int driver_fw_local_wake_cnt_used;
+	int total_rx_data_wake;     /* total data rx packets, that woke up host */
+	RX_DATA_WAKE_CNT_DETAILS rx_wake_details;
+	RX_WAKE_PKT_TYPE_CLASSFICATION rx_wake_pkt_classification_info;
+	RX_MULTICAST_WAKE_DATA_CNT rx_multicast_wake_pkt_info;
+} WLAN_DRIVER_WAKE_REASON_CNT;
+#endif /* DHD_WAKE_STATUS */
+
+/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */
+#define BRCM_VENDOR_SCMD_CAPA	"cap"
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+extern int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd);
+extern int wl_cfgvendor_detach(struct wiphy *wiphy);
+extern int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+                  struct net_device *dev, int event_id, const void  *data, int len);
+extern int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+                struct net_device *dev, void  *data, int len, wl_vendor_event_t event);
+#else
+static INLINE int wl_cfgvendor_attach(struct wiphy *wiphy,
+		dhd_pub_t *dhd) { UNUSED_PARAMETER(wiphy); UNUSED_PARAMETER(dhd); return 0; }
+static INLINE int wl_cfgvendor_detach(struct wiphy *wiphy) { UNUSED_PARAMETER(wiphy); return 0; }
+#endif /*  (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_ASSIGN_VALUE(normal_structure, member, value)	\
+	do { \
+		if (compat_task_state) {	\
+			compat_ ## normal_structure.member = value; \
+		} else { \
+			normal_structure.member = value; \
+		} \
+	} while (0)
+#else
+#define COMPAT_ASSIGN_VALUE(normal_structure, member, value) \
+	normal_structure.member = value;
+#endif /* CONFIG_COMPAT */
+
+#endif /* _wl_cfgvendor_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h
new file mode 100644
index 0000000..dc05b07
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_dbg.h
@@ -0,0 +1,246 @@
+/*
+ * Minimal debug/trace/assert driver definitions for
+ * Broadcom 802.11 Networking Adapter.
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_dbg.h 664795 2016-10-13 20:13:32Z $
+ */
+
+
+#ifndef _wl_dbg_h_
+#define _wl_dbg_h_
+
+#if defined(EVENT_LOG_COMPILE)
+#include <event_log.h>
+#endif
+
+/* wl_msg_level is a bit vector with defs in wlioctl.h */
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+
+#define WL_TIMESTAMP()
+
+#ifdef ENABLE_CORECAPTURE
+#define MAX_BACKTRACE_DEPTH 32
+extern int wl_print_backtrace(const char * prefix, void * i_backtrace, int i_backtrace_depth);
+#else
+#define wl_print_backtrace(a, b, c)
+#endif /* ENABLE_CORECAPTURE */
+
+
+#define WIFICC_CAPTURE(_reason)
+#define WIFICC_LOGDEBUGIF(_flags, _args)
+#define WIFICC_LOGDEBUG(_args)
+
+#define WL_PRINT(args)		do { WL_TIMESTAMP(); printf args; } while (0)
+
+#if defined(EVENT_LOG_COMPILE) && defined(WLMSG_SRSCAN)
+#define _WL_SRSCAN(fmt, ...)	EVENT_LOG(EVENT_LOG_TAG_SRSCAN, fmt, ##__VA_ARGS__)
+#define WL_SRSCAN(args)		_WL_SRSCAN args
+#else
+#define WL_SRSCAN(args)
+#endif
+
+#if defined(BCMCONDITIONAL_LOGGING)
+
+/* Ideally this should be some include file that vendors can include to conditionalize logging */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x) x
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+#define WL_ERROR(args)		do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
+					else WIFICC_LOGDEBUG(args); } while (0)
+#define WL_TRACE(args)
+#define WL_PRHDRS_MSG(args)
+#define WL_PRHDRS(i, p, f, t, r, l)
+#define WL_PRPKT(m, b, n)
+#define WL_INFORM(args)
+#define WL_TMP(args)
+#define WL_OID(args)
+#define WL_RATE(args)		do {if (wl_msg_level & WL_RATE_VAL) WL_PRINT(args);} while (0)
+#define WL_ASSOC(args)		do {if (wl_msg_level & WL_ASSOC_VAL) WL_PRINT(args); \
+					else WIFICC_LOGDEBUG(args);} while (0)
+#define WL_PRUSR(m, b, n)
+#define WL_PS(args)		do {if (wl_msg_level & WL_PS_VAL) WL_PRINT(args);} while (0)
+
+#define WL_PORT(args)
+#define WL_DUAL(args)
+#define WL_REGULATORY(args)	do {if (wl_msg_level & WL_REGULATORY_VAL) WL_PRINT(args); \
+					else WIFICC_LOGDEBUG(args);} while (0)
+
+#define WL_MPC(args)
+#define WL_APSTA(args)
+#define WL_APSTA_BCN(args)
+#define WL_APSTA_TX(args)
+#define WL_APSTA_TSF(args)
+#define WL_APSTA_BSSID(args)
+#define WL_BA(args)
+#define WL_MBSS(args)
+#define WL_PROTO(args)
+
+#define	WL_CAC(args)		do {if (wl_msg_level & WL_CAC_VAL) WL_PRINT(args);} while (0)
+#define WL_AMSDU(args)
+#define WL_AMPDU(args)
+#define WL_FFPLD(args)
+#define WL_MCHAN(args)
+
+#define WL_DFS(args)
+#define WL_WOWL(args)
+#define WL_DPT(args)
+#define WL_ASSOC_OR_DPT(args)
+#define WL_SCAN(args)		do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0)
+#define WL_COEX(args)
+#define WL_RTDC(w, s, i, j)
+#define WL_RTDC2(w, s, i, j)
+#define WL_CHANINT(args)
+#define WL_P2P(args)
+#define WL_ITFR(args)
+#define WL_TDLS(args)
+#define WL_MCNX(args)
+#define WL_PROT(args)
+#define WL_PSTA(args)
+#define WL_WFDS(m, b, n)
+#define WL_TRF_MGMT(args)
+#define WL_L2FILTER(args)
+#define WL_MQ(args)
+#define WL_TXBF(args)
+#define WL_MUMIMO(args)
+#define WL_P2PO(args)
+#define WL_ROAM(args)
+#define WL_WNM(args)
+
+#ifdef WLMSG_MESH
+#define WL_MESH(args)       WL_PRINT(args)
+#define WL_MESH_AMPE(args)  WL_PRINT(args)
+#define WL_MESH_ROUTE(args) WL_PRINT(args)
+#define WL_MESH_BCN(args)
+#else
+#define WL_MESH(args)
+#define WL_MESH_AMPE(args)
+#define WL_MESH_ROUTE(args)
+#define WL_MESH_BCN(args)
+#endif
+
+
+#define WL_AMPDU_UPDN(args)
+#define WL_AMPDU_RX(args)
+#define WL_AMPDU_ERR(args)
+#define WL_AMPDU_TX(args)
+#define WL_AMPDU_CTL(args)
+#define WL_AMPDU_HW(args)
+#define WL_AMPDU_HWTXS(args)
+#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_STAT(args)
+#define WL_AMPDU_ERR_ON()       0
+#define WL_AMPDU_HW_ON()        0
+#define WL_AMPDU_HWTXS_ON()     0
+
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#define WL_PCIE(args)
+#define WL_TSLOG(w, s, i, j)
+#define WL_FBT(args)
+
+#define WL_ERROR_ON()		(wl_msg_level & WL_ERROR_VAL)
+#define WL_TRACE_ON()		0
+#define WL_PRHDRS_ON()		0
+#define WL_PRPKT_ON()		0
+#define WL_INFORM_ON()		0
+#define WL_TMP_ON()		0
+#define WL_OID_ON()		0
+#define WL_RATE_ON()		(wl_msg_level & WL_RATE_VAL)
+#define WL_ASSOC_ON()		(wl_msg_level & WL_ASSOC_VAL)
+#define WL_PRUSR_ON()		0
+#define WL_PS_ON()		(wl_msg_level & WL_PS_VAL)
+#define WL_PORT_ON()		0
+#define WL_WSEC_ON()		0
+#define WL_WSEC_DUMP_ON()	0
+#define WL_MPC_ON()		0
+#define WL_REGULATORY_ON()	(wl_msg_level & WL_REGULATORY_VAL)
+#define WL_APSTA_ON()		0
+#define WL_DFS_ON()		0
+#define WL_MBSS_ON()		0
+#define WL_CAC_ON()		(wl_msg_level & WL_CAC_VAL)
+#define WL_AMPDU_ON()		0
+#define WL_DPT_ON()		0
+#define WL_WOWL_ON()		0
+#define WL_SCAN_ON()		(wl_msg_level2 & WL_SCAN_VAL)
+#define WL_P2P_ON()		0
+#define WL_ITFR_ON()		0
+#define WL_MCHAN_ON()		0
+#define WL_TDLS_ON()		0
+#define WL_MCNX_ON()		0
+#define WL_PROT_ON()		0
+#define WL_PSTA_ON()		0
+#define WL_TRF_MGMT_ON()	0
+#define WL_LPC_ON()		0
+#define WL_L2FILTER_ON()	0
+#define WL_TXBF_ON()		0
+#define WL_P2PO_ON()		0
+#define WL_TSLOG_ON()		0
+#define WL_WNM_ON()		0
+#define WL_PCIE_ON()		0
+#define WL_MUMIMO_ON()		0
+#define WL_MESH_ON()		0
+
+#else /* !BCMDBG */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x)
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+
+#define	WL_ERROR(args)
+
+#define	WL_TRACE(args)
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#ifdef WLMSG_WSEC
+#define WL_WSEC(args)		WL_PRINT(args)
+#define WL_WSEC_DUMP(args)	WL_PRINT(args)
+#else
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#endif
+#define WL_PCIE(args)		do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0)
+#define WL_PCIE_ON()		(wl_msg_level2 & WL_PCIE_VAL)
+#define WL_PFN(args)      do {if (wl_msg_level & WL_PFN_VAL) WL_PRINT(args);} while (0)
+#define WL_PFN_ON()		(wl_msg_level & WL_PFN_VAL)
+#endif 
+
+#define DBGERRONLY(x)
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+#endif /* _wl_dbg_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_escan.c b/drivers/net/wireless/bcmdhd/wl_escan.c
new file mode 100644
index 0000000..a612957
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_escan.c
@@ -0,0 +1,1592 @@
+
+#if defined(WL_ESCAN)
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <wlioctl.h>
+#include <wl_android.h>
+#include <wl_iw.h>
+#include <wl_escan.h>
+#include <dhd_config.h>
+
+/* message levels */
+#define ESCAN_ERROR_LEVEL	0x0001
+#define ESCAN_SCAN_LEVEL	0x0002
+#define ESCAN_TRACE_LEVEL	0x0004
+
+#define ESCAN_ERROR(x) \
+	do { \
+		if (iw_msg_level & ESCAN_ERROR_LEVEL) { \
+			printf(KERN_ERR "ESCAN-ERROR) %s : ", __func__);	\
+			printf x; \
+		} \
+	} while (0)
+#define ESCAN_SCAN(x) \
+	do { \
+		if (iw_msg_level & ESCAN_SCAN_LEVEL) { \
+			printf(KERN_ERR "ESCAN-SCAN) %s : ", __func__);	\
+			printf x; \
+		} \
+	} while (0)
+#define ESCAN_TRACE(x) \
+	do { \
+		if (iw_msg_level & ESCAN_TRACE_LEVEL) { \
+			printf(KERN_ERR "ESCAN-TRACE) %s : ", __func__);	\
+			printf x; \
+		} \
+	} while (0)
+
+/* IOCTL swapping mode for Big Endian host with Little Endian dongle.  Default to off */
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#define WL_EXTRA_BUF_MAX 2048
+
+#define wl_escan_get_buf(a) ((wl_scan_results_t *) (a)->escan_buf)
+
+#define for_each_bss(list, bss, __i)	\
+	for (__i = 0; __i < list->count && __i < IW_MAX_AP; __i++, bss = next_bss(list, bss))
+
+#define wl_escan_set_sync_id(a) ((a) = htod16(0x1234))
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#define BUF_OVERFLOW_MGMT_COUNT 3
+typedef struct {
+	int RSSI;
+	int length;
+	struct ether_addr BSSID;
+} removal_element_t;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+/* Return a new chanspec given a legacy chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+	chanspec_t chspec;
+
+	/* get the channel number */
+	chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+	/* convert the band */
+	if (LCHSPEC_IS2G(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BAND_2G;
+	} else {
+		chspec |= WL_CHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (LCHSPEC_IS20(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BW_20;
+	} else {
+		chspec |= WL_CHANSPEC_BW_40;
+		if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+			chspec |= WL_CHANSPEC_CTL_SB_L;
+		} else {
+			chspec |= WL_CHANSPEC_CTL_SB_U;
+		}
+	}
+
+	if (wf_chspec_malformed(chspec)) {
+		ESCAN_ERROR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	return chspec;
+}
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_to_legacy(chanspec_t chspec)
+{
+	chanspec_t lchspec;
+
+	if (wf_chspec_malformed(chspec)) {
+		ESCAN_ERROR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	/* get the channel number */
+	lchspec = CHSPEC_CHANNEL(chspec);
+
+	/* convert the band */
+	if (CHSPEC_IS2G(chspec)) {
+		lchspec |= WL_LCHANSPEC_BAND_2G;
+	} else {
+		lchspec |= WL_LCHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (CHSPEC_IS20(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_20;
+		lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+	} else if (CHSPEC_IS40(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_40;
+		if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+			lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+		} else {
+			lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+		}
+	} else {
+		/* cannot express the bandwidth */
+		char chanbuf[CHANSPEC_STR_LEN];
+		ESCAN_ERROR((
+		        "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+		        "to pre-11ac format\n",
+		        wf_chspec_ntoa(chspec, chanbuf), chspec));
+		return INVCHANSPEC;
+	}
+
+	return lchspec;
+}
+
+/* given a chanspec value from the driver, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_driver_to_host(int ioctl_ver, chanspec_t chanspec)
+{
+	chanspec = dtohchanspec(chanspec);
+	if (ioctl_ver == 1) {
+		chanspec = wl_chspec_from_legacy(chanspec);
+	}
+
+	return chanspec;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_host_to_driver(int ioctl_ver, chanspec_t chanspec)
+{
+	if (ioctl_ver == 1) {
+		chanspec = wl_chspec_to_legacy(chanspec);
+		if (chanspec == INVCHANSPEC) {
+			return chanspec;
+		}
+	}
+	chanspec = htodchanspec(chanspec);
+
+	return chanspec;
+}
+
+/* given a channel value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_ch_host_to_driver(int ioctl_ver, s32 bssidx, u16 channel)
+{
+	chanspec_t chanspec;
+
+	chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		chanspec |= WL_CHANSPEC_BAND_2G;
+	else
+		chanspec |= WL_CHANSPEC_BAND_5G;
+
+	chanspec |= WL_CHANSPEC_BW_20;
+
+	chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+	return wl_chspec_host_to_driver(ioctl_ver, chanspec);
+}
+
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+{
+	return bss = bss ?
+		(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+
+static int
+rssi_to_qual(int rssi)
+{
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		return 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		return 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		return 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		return 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		return 4;
+	else
+		return 5;
+}
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+	4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+(entry) = list_first_entry((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#else
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+(entry) = list_first_entry((ptr), type, member); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member); \
+
+#endif /* STRICT_GCC_WARNINGS */
+
+static unsigned long wl_lock_eq(struct wl_escan_info *escan)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&escan->eq_lock, flags);
+	return flags;
+}
+
+static void wl_unlock_eq(struct wl_escan_info *escan, unsigned long flags)
+{
+	spin_unlock_irqrestore(&escan->eq_lock, flags);
+}
+
+static void wl_init_eq(struct wl_escan_info *escan)
+{
+	spin_lock_init(&escan->eq_lock);
+	INIT_LIST_HEAD(&escan->eq_list);
+}
+
+static void wl_flush_eq(struct wl_escan_info *escan)
+{
+	struct escan_event_q *e;
+	unsigned long flags;
+
+	flags = wl_lock_eq(escan);
+	while (!list_empty_careful(&escan->eq_list)) {
+		BCM_SET_LIST_FIRST_ENTRY(e, &escan->eq_list, struct escan_event_q, eq_list);
+		list_del(&e->eq_list);
+		kfree(e);
+	}
+	wl_unlock_eq(escan, flags);
+}
+
+static struct escan_event_q *wl_deq_event(struct wl_escan_info *escan)
+{
+	struct escan_event_q *e = NULL;
+	unsigned long flags;
+
+	flags = wl_lock_eq(escan);
+	if (likely(!list_empty(&escan->eq_list))) {
+		BCM_SET_LIST_FIRST_ENTRY(e, &escan->eq_list, struct escan_event_q, eq_list);
+		list_del(&e->eq_list);
+	}
+	wl_unlock_eq(escan, flags);
+
+	return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct wl_escan_info *escan, struct net_device *ndev, u32 event,
+	const wl_event_msg_t *msg, void *data)
+{
+	struct escan_event_q *e;
+	s32 err = 0;
+	uint32 evtq_size;
+	uint32 data_len;
+	unsigned long flags;
+	gfp_t aflags;
+
+	data_len = 0;
+	if (data)
+		data_len = ntoh32(msg->datalen);
+	evtq_size = sizeof(struct escan_event_q) + data_len;
+	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	e = kzalloc(evtq_size, aflags);
+	if (unlikely(!e)) {
+		ESCAN_ERROR(("event alloc failed\n"));
+		return -ENOMEM;
+	}
+	e->etype = event;
+	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+	if (data)
+		memcpy(e->edata, data, data_len);
+	flags = wl_lock_eq(escan);
+	list_add_tail(&e->eq_list, &escan->eq_list);
+	wl_unlock_eq(escan, flags);
+
+	return err;
+}
+
+static void wl_put_event(struct escan_event_q *e)
+{
+	kfree(e);
+}
+
+static void wl_wakeup_event(struct wl_escan_info *escan)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)(escan->pub);
+
+	if (dhd->up && (escan->event_tsk.thr_pid >= 0)) {
+		up(&escan->event_tsk.sema);
+	}
+}
+
+static s32 wl_escan_event_handler(void *data)
+{
+	struct wl_escan_info *escan = NULL;
+	struct escan_event_q *e;
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+
+	escan = (struct wl_escan_info *)tsk->parent;
+
+	printf("tsk Enter, tsk = 0x%p\n", tsk);
+
+	while (down_interruptible (&tsk->sema) == 0) {
+		SMP_RD_BARRIER_DEPENDS();
+		if (tsk->terminated) {
+			break;
+		}
+		while (escan && (e = wl_deq_event(escan))) {
+			ESCAN_TRACE(("dev=%p, event type (%d), ifidx: %d bssidx: %d \n",
+				escan->dev, e->etype, e->emsg.ifidx, e->emsg.bsscfgidx));
+
+			if (e->emsg.ifidx > WL_MAX_IFS) {
+				ESCAN_ERROR(("Event ifidx not in range. val:%d \n", e->emsg.ifidx));
+				goto fail;
+			}
+
+			if (escan->dev && escan->evt_handler[e->etype]) {
+				dhd_pub_t *dhd = (struct dhd_pub *)(escan->pub);
+				if (dhd->busstate == DHD_BUS_DOWN) {
+					ESCAN_ERROR((": BUS is DOWN.\n"));
+				} else {
+					escan->evt_handler[e->etype](escan, &e->emsg, e->edata);
+				}
+			} else {
+				ESCAN_TRACE(("Unknown Event (%d): ignoring\n", e->etype));
+			}
+fail:
+			wl_put_event(e);
+			DHD_EVENT_WAKE_UNLOCK(escan->pub);
+		}
+	}
+	printf("%s: was terminated\n", __FUNCTION__);
+	complete_and_exit(&tsk->completed, 0);
+	return 0;
+}
+
+void
+wl_escan_event(struct net_device *dev, const wl_event_msg_t * e, void *data)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_escan_info *escan = dhd->escan;
+	u32 event_type = ntoh32(e->event_type);
+
+	if (!escan || !escan->dev) {
+		return;
+	}
+
+	if (escan->event_tsk.thr_pid == -1) {
+		ESCAN_ERROR(("Event handler is not created\n"));
+		return;
+	}
+
+	if (escan == NULL) {
+		ESCAN_ERROR(("Stale event ignored\n"));
+		return;
+	}
+
+	if (event_type == WLC_E_PFN_NET_FOUND) {
+		ESCAN_TRACE(("PNOEVENT: PNO_NET_FOUND\n"));
+	}
+	else if (event_type == WLC_E_PFN_NET_LOST) {
+		ESCAN_TRACE(("PNOEVENT: PNO_NET_LOST\n"));
+	}
+
+	DHD_EVENT_WAKE_LOCK(escan->pub);
+	if (likely(!wl_enq_event(escan, dev, event_type, e, data))) {
+		wl_wakeup_event(escan);
+	} else {
+		DHD_EVENT_WAKE_UNLOCK(escan->pub);
+	}
+}
+
+static s32 wl_escan_inform_bss(struct wl_escan_info *escan)
+{
+	struct wl_scan_results *bss_list;
+	s32 err = 0;
+#if defined(RSSIAVG)
+	int rssi;
+#endif
+
+	bss_list = escan->bss_list;
+
+	/* Delete disconnected cache */
+#if defined(BSSCACHE)
+	wl_delete_disconnected_bss_cache(&escan->g_bss_cache_ctrl, (u8*)&escan->disconnected_bssid);
+#if defined(RSSIAVG)
+	wl_delete_disconnected_rssi_cache(&escan->g_rssi_cache_ctrl, (u8*)&escan->disconnected_bssid);
+#endif
+#endif
+
+	/* Update cache */
+#if defined(RSSIAVG)
+	wl_update_rssi_cache(&escan->g_rssi_cache_ctrl, bss_list);
+	if (!in_atomic())
+		wl_update_connected_rssi_cache(escan->dev, &escan->g_rssi_cache_ctrl, &rssi);
+#endif
+#if defined(BSSCACHE)
+	wl_update_bss_cache(&escan->g_bss_cache_ctrl,
+#if defined(RSSIAVG)
+		&escan->g_rssi_cache_ctrl,
+#endif
+		bss_list);
+#endif
+
+	/* delete dirty cache */
+#if defined(RSSIAVG)
+	wl_delete_dirty_rssi_cache(&escan->g_rssi_cache_ctrl);
+	wl_reset_rssi_cache(&escan->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+	wl_delete_dirty_bss_cache(&escan->g_bss_cache_ctrl);
+	wl_reset_bss_cache(&escan->g_bss_cache_ctrl);
+	if (escan->autochannel)
+		wl_ext_get_best_channel(escan->dev, &escan->g_bss_cache_ctrl,
+			escan->ioctl_ver &escan->best_2g_ch, &escan->best_5g_ch);
+#else
+	if (escan->autochannel)
+		wl_ext_get_best_channel(escan->dev, bss_list, escan->ioctl_ver,
+			&escan->best_2g_ch, &escan->best_5g_ch);
+#endif
+
+	ESCAN_TRACE(("scanned AP count (%d)\n", bss_list->count));
+
+	return err;
+}
+
+static wl_scan_params_t *
+wl_escan_alloc_params(struct wl_escan_info *escan, int channel,
+	int nprobes, int *out_params_size)
+{
+	wl_scan_params_t *params;
+	int params_size;
+	int num_chans;
+	int bssidx = 0;
+
+	*out_params_size = 0;
+
+	/* Our scan params only need space for 1 channel and 0 ssids */
+	params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+	params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		ESCAN_ERROR(("mem alloc failed (%d bytes)\n", params_size));
+		return params;
+	}
+	memset(params, 0, params_size);
+	params->nprobes = nprobes;
+
+	num_chans = (channel == 0) ? 0 : 1;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = DOT11_SCANTYPE_ACTIVE;
+	params->nprobes = htod32(1);
+	params->active_time = htod32(-1);
+	params->passive_time = htod32(-1);
+	params->home_time = htod32(10);
+	if (channel == -1)
+		params->channel_list[0] = htodchanspec(channel);
+	else
+		params->channel_list[0] = wl_ch_host_to_driver(escan->ioctl_ver, bssidx, channel);
+
+	/* Our scan params have 1 channel and 0 ssids */
+	params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+		(num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	*out_params_size = params_size;	/* rtn size to the caller */
+	return params;
+}
+
+static void wl_escan_abort(struct wl_escan_info *escan)
+{
+	wl_scan_params_t *params = NULL;
+	s32 params_size = 0;
+	s32 err = BCME_OK;
+	if (!in_atomic()) {
+		/* Our scan params only need space for 1 channel and 0 ssids */
+		params = wl_escan_alloc_params(escan, -1, 0, &params_size);
+		if (params == NULL) {
+			ESCAN_ERROR(("scan params allocation failed \n"));
+			err = -ENOMEM;
+		} else {
+			/* Do a scan abort to stop the driver's scan engine */
+			err = wldev_ioctl(escan->dev, WLC_SCAN, params, params_size, true);
+			if (err < 0) {
+				ESCAN_ERROR(("scan abort  failed \n"));
+			}
+			kfree(params);
+		}
+	}
+}
+
+static s32 wl_notify_escan_complete(struct wl_escan_info *escan, bool fw_abort)
+{
+	s32 err = BCME_OK;
+	int cmd = 0;
+#if WIRELESS_EXT > 13
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+
+	memset(extra, 0, sizeof(extra));
+#endif
+
+	ESCAN_TRACE(("Enter\n"));
+
+	if (!escan || !escan->dev) {
+		ESCAN_ERROR(("escan or dev is null\n"));
+		err = BCME_ERROR;
+		goto out;
+	}
+	if (fw_abort && !in_atomic())
+		wl_escan_abort(escan);
+
+	if (timer_pending(&escan->scan_timeout))
+		del_timer_sync(&escan->scan_timeout);
+#if defined(ESCAN_RESULT_PATCH)
+	escan->bss_list = wl_escan_get_buf(escan);
+	wl_escan_inform_bss(escan);
+#endif /* ESCAN_RESULT_PATCH */
+
+#if WIRELESS_EXT > 13
+#if WIRELESS_EXT > 14
+	cmd = SIOCGIWSCAN;
+#endif
+	ESCAN_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
+	// terence 20150224: fix "wlan0: (WE) : Wireless Event too big (65306)"
+	memset(&wrqu, 0, sizeof(wrqu));
+	if (cmd) {
+		if (cmd == SIOCGIWSCAN) {
+			wireless_send_event(escan->dev, cmd, &wrqu, NULL);
+		} else
+			wireless_send_event(escan->dev, cmd, &wrqu, extra);
+	}
+#endif
+
+out:
+	return err;
+}
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+static void
+wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
+{
+	int idx;
+	for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
+		int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
+		if (bss->RSSI < candidate[idx].RSSI) {
+			if (len)
+				memcpy(&candidate[idx + 1], &candidate[idx],
+					sizeof(removal_element_t) * len);
+			candidate[idx].RSSI = bss->RSSI;
+			candidate[idx].length = bss->length;
+			memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN);
+			return;
+		}
+	}
+}
+
+static void
+wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
+	wl_bss_info_t *bi)
+{
+	int idx1, idx2;
+	int total_delete_len = 0;
+	for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
+		int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+		wl_bss_info_t *bss = NULL;
+		if (candidate[idx1].RSSI >= bi->RSSI)
+			continue;
+		for (idx2 = 0; idx2 < list->count; idx2++) {
+			bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
+				list->bss_info;
+			if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+				candidate[idx1].RSSI == bss->RSSI &&
+				candidate[idx1].length == dtoh32(bss->length)) {
+				u32 delete_len = dtoh32(bss->length);
+				ESCAN_TRACE(("delete scan info of " MACDBG " to add new AP\n",
+					MAC2STRDBG(bss->BSSID.octet)));
+				if (idx2 < list->count -1) {
+					memmove((u8 *)bss, (u8 *)bss + delete_len,
+						list->buflen - cur_len - delete_len);
+				}
+				list->buflen -= delete_len;
+				list->count--;
+				total_delete_len += delete_len;
+				/* if delete_len is greater than or equal to result length */
+				if (total_delete_len >= bi->length) {
+					return;
+				}
+				break;
+			}
+			cur_len += dtoh32(bss->length);
+		}
+	}
+}
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+static s32 wl_escan_handler(struct wl_escan_info *escan,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = BCME_OK;
+	s32 status = ntoh32(e->status);
+	wl_bss_info_t *bi;
+	wl_escan_result_t *escan_result;
+	wl_bss_info_t *bss = NULL;
+	wl_scan_results_t *list;
+	u32 bi_length;
+	u32 i;
+	u16 channel;
+
+	ESCAN_TRACE(("enter event type : %d, status : %d \n",
+		ntoh32(e->event_type), ntoh32(e->status)));
+
+	mutex_lock(&escan->usr_sync);
+	escan_result = (wl_escan_result_t *)data;
+
+	if (escan->escan_state != ESCAN_STATE_SCANING) {
+		ESCAN_TRACE(("Not my scan\n"));
+		goto exit;
+	}
+
+	if (status == WLC_E_STATUS_PARTIAL) {
+		ESCAN_TRACE(("WLC_E_STATUS_PARTIAL \n"));
+		if (!escan_result) {
+			ESCAN_ERROR(("Invalid escan result (NULL pointer)\n"));
+			goto exit;
+		}
+		if (dtoh16(escan_result->bss_count) != 1) {
+			ESCAN_ERROR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+			goto exit;
+		}
+		bi = escan_result->bss_info;
+		if (!bi) {
+			ESCAN_ERROR(("Invalid escan bss info (NULL pointer)\n"));
+			goto exit;
+		}
+		bi_length = dtoh32(bi->length);
+		if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+			ESCAN_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length));
+			goto exit;
+		}
+
+		/* +++++ terence 20130524: skip invalid bss */
+		channel =
+			bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+		if (!dhd_conf_match_channel(escan->pub, channel))
+			goto exit;
+		/* ----- terence 20130524: skip invalid bss */
+
+		{
+			int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+			removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
+			int remove_lower_rssi = FALSE;
+
+			bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+			list = wl_escan_get_buf(escan);
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+			if (bi_length > ESCAN_BUF_SIZE - list->buflen)
+				remove_lower_rssi = TRUE;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+			ESCAN_TRACE(("%s("MACDBG") RSSI %d flags 0x%x length %d\n", bi->SSID,
+				MAC2STRDBG(bi->BSSID.octet), bi->RSSI, bi->flags, bi->length));
+			for (i = 0; i < list->count; i++) {
+				bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+					: list->bss_info;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+				ESCAN_TRACE(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
+					bss->SSID, MAC2STRDBG(bss->BSSID.octet),
+					i, bss->RSSI, list->count));
+
+				if (remove_lower_rssi)
+					wl_cfg80211_find_removal_candidate(bss, candidate);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+				if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+					(CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec))
+					== CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bss->chanspec))) &&
+					bi->SSID_len == bss->SSID_len &&
+					!bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+					/* do not allow beacon data to update
+					*the data recd from a probe response
+					*/
+					if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+						(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+						goto exit;
+
+					ESCAN_TRACE(("%s("MACDBG"), i=%d prev: RSSI %d"
+						" flags 0x%x, new: RSSI %d flags 0x%x\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+						bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+
+					if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+						/* preserve max RSSI if the measurements are
+						* both on-channel or both off-channel
+						*/
+						ESCAN_TRACE(("%s("MACDBG"), same onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+					} else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+						/* preserve the on-channel rssi measurement
+						* if the new measurement is off channel
+						*/
+						ESCAN_TRACE(("%s("MACDBG"), prev onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = bss->RSSI;
+						bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+					}
+					if (dtoh32(bss->length) != bi_length) {
+						u32 prev_len = dtoh32(bss->length);
+
+						ESCAN_TRACE(("bss info replacement"
+							" is occured(bcast:%d->probresp%d)\n",
+							bss->ie_length, bi->ie_length));
+						ESCAN_TRACE(("%s("MACDBG"), replacement!(%d -> %d)\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						prev_len, bi_length));
+
+						if (list->buflen - prev_len + bi_length
+							> ESCAN_BUF_SIZE) {
+							ESCAN_ERROR(("Buffer is too small: keep the"
+								" previous result of this AP\n"));
+							/* Only update RSSI */
+							bss->RSSI = bi->RSSI;
+							bss->flags |= (bi->flags
+								& WL_BSS_FLAGS_RSSI_ONCHANNEL);
+							goto exit;
+						}
+
+						if (i < list->count - 1) {
+							/* memory copy required by this case only */
+							memmove((u8 *)bss + bi_length,
+								(u8 *)bss + prev_len,
+								list->buflen - cur_len - prev_len);
+						}
+						list->buflen -= prev_len;
+						list->buflen += bi_length;
+					}
+					list->version = dtoh32(bi->version);
+					memcpy((u8 *)bss, (u8 *)bi, bi_length);
+					goto exit;
+				}
+				cur_len += dtoh32(bss->length);
+			}
+			if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+				wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
+				if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+					ESCAN_TRACE(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
+						MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
+					goto exit;
+				}
+#else
+				ESCAN_ERROR(("Buffer is too small: ignoring\n"));
+				goto exit;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+			}
+
+			if (strlen(bi->SSID) == 0) { // terence: fix for hidden SSID
+				ESCAN_SCAN(("Skip hidden SSID %pM\n", &bi->BSSID));
+				goto exit;
+			}
+
+			memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
+			list->version = dtoh32(bi->version);
+			list->buflen += bi_length;
+			list->count++;
+		}
+	}
+	else if (status == WLC_E_STATUS_SUCCESS) {
+		escan->escan_state = ESCAN_STATE_IDLE;
+		ESCAN_TRACE(("ESCAN COMPLETED\n"));
+		escan->bss_list = wl_escan_get_buf(escan);
+		ESCAN_TRACE(("SCAN COMPLETED: scanned AP count=%d\n",
+			escan->bss_list->count));
+		wl_escan_inform_bss(escan);
+		wl_notify_escan_complete(escan, false);
+	} else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+		(status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+		(status == WLC_E_STATUS_NEWASSOC)) {
+		/* Handle all cases of scan abort */
+		escan->escan_state = ESCAN_STATE_IDLE;
+		ESCAN_TRACE(("ESCAN ABORT reason: %d\n", status));
+		escan->bss_list = wl_escan_get_buf(escan);
+		ESCAN_TRACE(("SCAN ABORT: scanned AP count=%d\n",
+			escan->bss_list->count));
+		wl_escan_inform_bss(escan);
+		wl_notify_escan_complete(escan, false);
+	} else if (status == WLC_E_STATUS_TIMEOUT) {
+		ESCAN_ERROR(("WLC_E_STATUS_TIMEOUT\n"));
+		ESCAN_ERROR(("reason[0x%x]\n", e->reason));
+		if (e->reason == 0xFFFFFFFF) {
+			wl_notify_escan_complete(escan, true);
+		}
+		escan->escan_state = ESCAN_STATE_IDLE;
+	} else {
+		ESCAN_ERROR(("unexpected Escan Event %d : abort\n", status));
+		escan->escan_state = ESCAN_STATE_IDLE;
+		escan->bss_list = wl_escan_get_buf(escan);
+		ESCAN_TRACE(("SCAN ABORTED(UNEXPECTED): scanned AP count=%d\n",
+				escan->bss_list->count));
+		wl_escan_inform_bss(escan);
+		wl_notify_escan_complete(escan, false);
+	}
+exit:
+	mutex_unlock(&escan->usr_sync);
+	return err;
+}
+
+static int
+wl_escan_prep(struct wl_escan_info *escan, wl_uint32_list_t *list,
+	wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+	int err = 0;
+	wl_scan_results_t *results;
+	s32 offset;
+	char *ptr;
+	int i = 0, j = 0;
+	wlc_ssid_t ssid_tmp;
+	u32 n_channels = 0;
+	uint channel;
+	chanspec_t chanspec;
+
+	results = wl_escan_get_buf(escan);
+	results->version = 0;
+	results->count = 0;
+	results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+	escan->escan_state = ESCAN_STATE_SCANING;
+
+	/* Arm scan timeout timer */
+	mod_timer(&escan->scan_timeout, jiffies + msecs_to_jiffies(WL_ESCAN_TIMER_INTERVAL_MS));
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+
+	n_channels = dtoh32(list->count);
+	/* Copy channel array if applicable */
+	ESCAN_SCAN(("### List of channelspecs to scan ###\n"));
+	if (n_channels > 0) {
+		for (i = 0; i < n_channels; i++) {
+			channel = dtoh32(list->element[i]);
+			if (!dhd_conf_match_channel(escan->pub, channel))
+				continue;
+			chanspec = WL_CHANSPEC_BW_20;
+			if (chanspec == INVCHANSPEC) {
+				ESCAN_ERROR(("Invalid chanspec! Skipping channel\n"));
+				continue;
+			}
+			if (channel <= CH_MAX_2G_CHANNEL) {
+				chanspec |= WL_CHANSPEC_BAND_2G;
+			} else {
+				chanspec |= WL_CHANSPEC_BAND_5G;
+			}
+			params->channel_list[j] = channel;
+			params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
+			params->channel_list[j] |= chanspec;
+			ESCAN_SCAN(("Chan : %d, Channel spec: %x \n",
+				channel, params->channel_list[j]));
+			params->channel_list[j] = wl_chspec_host_to_driver(escan->ioctl_ver,
+				params->channel_list[j]);
+			j++;
+		}
+	} else {
+		ESCAN_SCAN(("Scanning all channels\n"));
+	}
+
+	if (ssid && ssid->SSID_len) {
+		/* Copy ssid array if applicable */
+		ESCAN_SCAN(("### List of SSIDs to scan ###\n"));
+		offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		ptr = (char*)params + offset;
+
+		ESCAN_SCAN(("0: Broadcast scan\n"));
+		memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
+		ssid_tmp.SSID_len = 0;
+		memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
+		ptr += sizeof(wlc_ssid_t);
+
+		memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
+		ssid_tmp.SSID_len = ssid->SSID_len;
+		memcpy(ssid_tmp.SSID, ssid->SSID, ssid->SSID_len);
+		memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
+		ptr += sizeof(wlc_ssid_t);
+		ESCAN_SCAN(("1: scan for %s size=%d\n", ssid_tmp.SSID, ssid_tmp.SSID_len));
+		/* Adding mask to channel numbers */
+		params->channel_num =
+	        htod32((2 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	               (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+	}
+	else {
+		ESCAN_SCAN(("Broadcast scan\n"));
+	}
+
+	return err;
+}
+
+static int wl_escan_reset(struct wl_escan_info *escan)
+{
+	if (timer_pending(&escan->scan_timeout))
+		del_timer_sync(&escan->scan_timeout);
+	escan->escan_state = ESCAN_STATE_IDLE;
+
+	return 0;
+}
+
+static void wl_escan_timeout(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	unsigned long data
+#endif
+)
+{
+	wl_event_msg_t msg;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct wl_escan_info *escan = from_timer(escan, t, scan_timeout);
+#else
+	struct wl_escan_info *escan = (struct wl_escan_info *)data;
+#endif
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;
+	s32 i;
+	u32 channel;
+
+	bss_list = wl_escan_get_buf(escan);
+	if (!bss_list) {
+		ESCAN_ERROR(("bss_list is null. Didn't receive any partial scan results\n"));
+	} else {
+		ESCAN_ERROR(("%s: scanned AP count (%d)\n", __FUNCTION__, bss_list->count));
+		bi = next_bss(bss_list, bi);
+		for_each_bss(bss_list, bi, i) {
+			channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+			ESCAN_ERROR(("SSID :%s  Channel :%d\n", bi->SSID, channel));
+		}
+	}
+
+	if (!escan->dev) {
+		ESCAN_ERROR(("No dev present\n"));
+		return;
+	}
+
+	bzero(&msg, sizeof(wl_event_msg_t));
+	ESCAN_ERROR(("timer expired\n"));
+
+	msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+	msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+	msg.reason = 0xFFFFFFFF;
+	wl_escan_event(escan->dev, &msg, NULL);
+
+	// terence 20130729: workaround to fix out of memory in firmware
+//	if (dhd_conf_get_chip(dhd_get_pub(dev)) == BCM43362_CHIP_ID) {
+//		ESCAN_ERROR(("Send hang event\n"));
+//		net_os_send_hang_message(dev);
+//	}
+}
+
+int
+wl_escan_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_escan_info *escan = dhd->escan;
+	s32 err = BCME_OK;
+	s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+	wl_escan_params_t *params = NULL;
+	scb_val_t scbval;
+	static int cnt = 0;
+	wlc_ssid_t ssid;
+	u32 n_channels = 0;
+	wl_uint32_list_t *list;
+	u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+	s32 val = 0;
+
+	ESCAN_TRACE(("Enter \n"));
+
+	if (!escan) {
+		ESCAN_ERROR(("device is not ready\n"));
+		return -EIO;
+	}
+	if (escan->escan_state == ESCAN_STATE_DOWN) {
+		ESCAN_ERROR(("STATE is down\n"));
+		return -EIO;
+	}
+	mutex_lock(&escan->usr_sync);
+
+	if (!escan->ioctl_ver) {
+		val = 1;
+		if ((err = wldev_ioctl(dev, WLC_GET_VERSION, &val, sizeof(int), false) < 0)) {
+			ESCAN_ERROR(("WLC_GET_VERSION failed, err=%d\n", err));
+			goto exit;
+		}
+		val = dtoh32(val);
+		if (val != WLC_IOCTL_VERSION && val != 1) {
+			ESCAN_ERROR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+				val, WLC_IOCTL_VERSION));
+			goto exit;
+		}
+		escan->ioctl_ver = val;
+		printf("%s: ioctl_ver=%d\n", __FUNCTION__, val);
+	}
+
+	/* default Broadcast scan */
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	/* check for given essid */
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+		}
+	}
+#endif
+	if (escan->escan_state == ESCAN_STATE_SCANING) {
+		ESCAN_ERROR(("Scanning already\n"));
+		goto exit;
+	}
+
+	/* if scan request is not empty parse scan request paramters */
+	memset(valid_chan_list, 0, sizeof(valid_chan_list));
+	list = (wl_uint32_list_t *)(void *) valid_chan_list;
+	list->count = htod32(WL_NUMCHANNELS);
+	err = wldev_ioctl(escan->dev, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), false);
+	if (err != 0) {
+		ESCAN_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+	n_channels = dtoh32(list->count);
+	/* Allocate space for populating ssids in wl_escan_params_t struct */
+	if (dtoh32(list->count) % 2)
+		/* If n_channels is odd, add a padd of u16 */
+		params_size += sizeof(u16) * (n_channels + 1);
+	else
+		params_size += sizeof(u16) * n_channels;
+	if (ssid.SSID_len) {
+		params_size += sizeof(struct wlc_ssid) * 2;
+	}
+
+	params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		err = -ENOMEM;
+		goto exit;
+	}
+	wl_escan_prep(escan, list, &params->params, &ssid);
+
+	params->version = htod32(ESCAN_REQ_VERSION);
+	params->action =  htod16(WL_SCAN_ACTION_START);
+	wl_escan_set_sync_id(params->sync_id);
+	if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+		ESCAN_ERROR(("ioctl buffer length not sufficient\n"));
+		kfree(params);
+		err = -ENOMEM;
+		goto exit;
+	}
+	params->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+	ESCAN_TRACE(("Passive scan_type %d\n", params->params.scan_type));
+
+	err = wldev_iovar_setbuf(dev, "escan", params, params_size,
+		escan->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+	printf("%s: LEGACY_SCAN\n", __FUNCTION__);
+	if (unlikely(err)) {
+		if (err == BCME_EPERM)
+			/* Scan Not permitted at this point of time */
+			ESCAN_TRACE(("Escan not permitted at this time (%d)\n", err));
+		else
+			ESCAN_ERROR(("Escan set error (%d)\n", err));
+		wl_escan_reset(escan);
+	}
+	kfree(params);
+
+exit:
+	if (unlikely(err)) {
+		/* Don't print Error incase of Scan suppress */
+		if ((err == BCME_EPERM))
+			ESCAN_TRACE(("Escan failed: Scan Suppressed \n"));
+		else {
+			cnt++;
+			ESCAN_ERROR(("error (%d), cnt=%d\n", err, cnt));
+			// terence 20140111: send disassoc to firmware
+			if (cnt >= 4) {
+				memset(&scbval, 0, sizeof(scb_val_t));
+				wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
+				ESCAN_ERROR(("Send disassoc to break the busy dev=%p\n", dev));
+				cnt = 0;
+			}
+		}
+	} else {
+		cnt = 0;
+	}
+	mutex_unlock(&escan->usr_sync);
+	return err;
+}
+
+int
+wl_escan_merge_scan_results(struct net_device *dev, struct iw_request_info *info,
+	char *extra, wl_bss_info_t *bi, int *len, int max_size)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_escan_info *escan = dhd->escan;
+	s32 err = BCME_OK;
+	struct iw_event	iwe;
+	int j;
+	char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value;
+	int16 rssi;
+	int channel;
+
+	/* overflow check cover fields before wpa IEs */
+	if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+		IW_EV_QUAL_LEN >= end) {
+		err = -E2BIG;
+		goto exit;
+	}
+
+#if defined(RSSIAVG)
+	rssi = wl_get_avg_rssi(&escan->g_rssi_cache_ctrl, &bi->BSSID);
+	if (rssi == RSSI_MINVAL)
+		rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+#else
+	// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+	rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+#endif
+	channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+	ESCAN_SCAN(("BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+		MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+
+	/* First entry must be the BSSID */
+	iwe.cmd = SIOCGIWAP;
+	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+	memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+	event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+	/* SSID */
+	iwe.u.data.length = dtoh32(bi->SSID_len);
+	iwe.cmd = SIOCGIWESSID;
+	iwe.u.data.flags = 1;
+	event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+	/* Mode */
+	if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+		iwe.cmd = SIOCGIWMODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+			iwe.u.mode = IW_MODE_INFRA;
+		else
+			iwe.u.mode = IW_MODE_ADHOC;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+	}
+
+	/* Channel */
+	iwe.cmd = SIOCGIWFREQ;
+#if 1
+	iwe.u.freq.m = wf_channel2mhz(channel, channel <= CH_MAX_2G_CHANNEL ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+#else
+	iwe.u.freq.m = wf_channel2mhz(bi->n_cap ?
+			bi->ctl_ch : CHSPEC_CHANNEL(bi->chanspec),
+			CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+#endif
+	iwe.u.freq.e = 6;
+	event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+	/* Channel quality */
+	iwe.cmd = IWEVQUAL;
+	iwe.u.qual.qual = rssi_to_qual(rssi);
+	iwe.u.qual.level = 0x100 + rssi;
+	iwe.u.qual.noise = 0x100 + bi->phy_noise;
+	event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+	wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+	/* Encryption */
+	iwe.cmd = SIOCGIWENCODE;
+	if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+	else
+		iwe.u.data.flags = IW_ENCODE_DISABLED;
+	iwe.u.data.length = 0;
+	event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+	/* Rates */
+	if (bi->rateset.count <= sizeof(bi->rateset.rates)) {
+		if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) {
+			err = -E2BIG;
+			goto exit;
+		}
+		value = event + IW_EV_LCP_LEN;
+		iwe.cmd = SIOCGIWRATE;
+		/* Those two flags are ignored... */
+		iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+		for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+			iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+			value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+				IW_EV_PARAM_LEN);
+		}
+		event = value;
+	}
+	*len = event - extra;
+	if (*len < 0)
+		ESCAN_ERROR(("==> Wrong size\n"));
+
+exit:
+	return err;
+}
+
+int
+wl_escan_get_scan(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *dwrq, char *extra)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_escan_info *escan = dhd->escan;
+	s32 err = BCME_OK;
+	int i = 0;
+	int len_prep = 0, len_ret = 0;
+	wl_bss_info_t *bi = NULL;
+	struct wl_scan_results *bss_list;
+	__u16 buflen_from_user = dwrq->length;
+#if defined(BSSCACHE)
+	wl_bss_cache_t *node;
+#endif
+	char *buf = NULL;
+	struct ether_addr cur_bssid;
+
+	ESCAN_TRACE(("%s: %s SIOCGIWSCAN, len=%d\n", __FUNCTION__, dev->name, dwrq->length));
+
+	if (!extra)
+		return -EINVAL;
+
+	mutex_lock(&escan->usr_sync);
+
+	/* Check for scan in progress */
+	if (escan->escan_state == ESCAN_STATE_SCANING) {
+		ESCAN_TRACE(("%s: SIOCGIWSCAN GET still scanning\n", dev->name));
+		err = -EAGAIN;
+		goto exit;
+	}
+	if (!escan->bss_list) {
+		ESCAN_ERROR(("%s: scan not ready\n", dev->name));
+		err = -EAGAIN;
+		goto exit;
+	}
+
+	err = wldev_ioctl(dev, WLC_GET_BSSID, &cur_bssid, sizeof(cur_bssid), false);
+	if (err != BCME_NOTASSOCIATED && memcmp(&ether_null, &cur_bssid, ETHER_ADDR_LEN)) {
+		// merge current connected bss
+		buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_ATOMIC);
+		if (!buf) {
+			ESCAN_ERROR(("buffer alloc failed.\n"));
+			err = BCME_NOMEM;
+			goto exit;
+		}
+		*(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
+		err = wldev_ioctl(dev, WLC_GET_BSS_INFO, buf, WL_EXTRA_BUF_MAX, false);
+		if (unlikely(err)) {
+			ESCAN_ERROR(("Could not get bss info %d\n", err));
+			goto exit;
+		}
+		bi = (struct wl_bss_info *)(buf + 4);
+		len_prep = 0;
+		err = wl_escan_merge_scan_results(dev, info, extra+len_ret, bi,
+			&len_prep, buflen_from_user-len_ret);
+		len_ret += len_prep;
+		if (err)
+			goto exit;
+		bi = NULL;
+	}
+
+#if defined(BSSCACHE)
+	bss_list = &escan->g_bss_cache_ctrl.m_cache_head->results;
+	node = escan->g_bss_cache_ctrl.m_cache_head;
+	for (i=0; node && i<IW_MAX_AP; i++)
+#else
+	bss_list = escan->bss_list;
+	bi = next_bss(bss_list, bi);
+	for_each_bss(bss_list, bi, i)
+#endif
+	{
+#if defined(BSSCACHE)
+		bi = node->results.bss_info;
+#endif
+		if (!memcmp(&bi->BSSID, &cur_bssid, ETHER_ADDR_LEN)) {
+			ESCAN_SCAN(("skip connected AP %pM\n", &cur_bssid));
+#if defined(BSSCACHE)
+			node = node->next;
+#endif
+			continue;
+		}
+		len_prep = 0;
+		err = wl_escan_merge_scan_results(dev, info, extra+len_ret, bi,
+			&len_prep, buflen_from_user-len_ret);
+		len_ret += len_prep;
+		if (err)
+			goto exit;
+#if defined(BSSCACHE)
+		node = node->next;
+#endif
+	}
+
+	if ((len_ret + WE_ADD_EVENT_FIX) < dwrq->length)
+		dwrq->length = len_ret;
+
+	dwrq->flags = 0;	/* todo */
+exit:
+	kfree(buf);
+	dwrq->length = len_ret;
+	ESCAN_SCAN(("scanned AP count (%d)\n", i));
+	mutex_unlock(&escan->usr_sync);
+	return err;
+}
+
+s32 wl_escan_autochannel(struct net_device *dev, char* command, int total_len)
+{
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	struct wl_escan_info *escan = dhd->escan;
+	int ret = 0;
+	int bytes_written = -1;
+
+	sscanf(command, "%*s %d", &escan->autochannel);
+
+	if (escan->autochannel == 0) {
+		escan->best_2g_ch = 0;
+		escan->best_5g_ch = 0;
+	} else if (escan->autochannel == 2) {
+		bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
+			escan->best_2g_ch, escan->best_5g_ch);
+		ESCAN_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+		ret = bytes_written;
+	}
+
+	return ret;
+}
+
+static s32 wl_create_event_handler(struct wl_escan_info *escan)
+{
+	int ret = 0;
+	ESCAN_TRACE(("Enter \n"));
+
+	/* Do not use DHD in cfg driver */
+	escan->event_tsk.thr_pid = -1;
+
+	PROC_START(wl_escan_event_handler, escan, &escan->event_tsk, 0, "wl_escan_handler");
+	if (escan->event_tsk.thr_pid < 0)
+		ret = -ENOMEM;
+	return ret;
+}
+
+static void wl_destroy_event_handler(struct wl_escan_info *escan)
+{
+	if (escan->event_tsk.thr_pid >= 0)
+		PROC_STOP(&escan->event_tsk);
+}
+
+static void wl_escan_deinit(struct wl_escan_info *escan)
+{
+	printf("%s: Enter\n", __FUNCTION__);
+	if (!escan) {
+		ESCAN_ERROR(("device is not ready\n"));
+		return;
+	}
+	wl_destroy_event_handler(escan);
+	wl_flush_eq(escan);
+	del_timer_sync(&escan->scan_timeout);
+	escan->escan_state = ESCAN_STATE_DOWN;
+
+#if defined(RSSIAVG)
+	wl_free_rssi_cache(&escan->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+	wl_free_bss_cache(&escan->g_bss_cache_ctrl);
+#endif
+	wl_ext_add_remove_pm_enable_work(&escan->conn_info, FALSE);
+}
+
+static s32 wl_escan_init(struct wl_escan_info *escan)
+{
+	int err = 0;
+
+	printf("%s: Enter\n", __FUNCTION__);
+	if (!escan) {
+		ESCAN_ERROR(("device is not ready\n"));
+		return -EIO;
+	}
+
+	/* Init scan_timeout timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	timer_setup(&escan->scan_timeout, wl_escan_timeout, 0);
+#else
+	init_timer(&escan->scan_timeout);
+	escan->scan_timeout.data = (unsigned long) escan;
+	escan->scan_timeout.function = wl_escan_timeout;
+#endif
+
+	if (wl_create_event_handler(escan)) {
+		ESCAN_ERROR(("device is not ready\n"));
+		err = -ENOMEM;
+		goto err;
+	}
+	memset(escan->evt_handler, 0, sizeof(escan->evt_handler));
+
+	escan->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+	escan->escan_state = ESCAN_STATE_IDLE;
+
+	return 0;
+err:
+	wl_escan_deinit(escan);
+	return err;
+}
+
+void wl_escan_down(dhd_pub_t *dhdp)
+{
+	struct wl_escan_info *escan = dhdp->escan;
+
+	wl_escan_deinit(escan);
+}
+
+int wl_escan_up(struct net_device *net, dhd_pub_t *dhdp)
+{
+	struct wl_escan_info *escan = dhdp->escan;
+	int err;
+
+	err = wl_escan_init(escan);
+	if (err) {
+		ESCAN_ERROR(("wl_escan_init err %d\n", err));
+		return err;
+	}
+
+	return 0;
+}
+
+void wl_escan_detach(dhd_pub_t *dhdp)
+{
+	struct wl_escan_info *escan = dhdp->escan;
+
+	printf("%s: Enter\n", __FUNCTION__);
+
+	if (!escan) {
+		ESCAN_ERROR(("device is not ready\n"));
+		return;
+	}
+
+	wl_escan_deinit(escan);
+
+	if (escan->escan_ioctl_buf) {
+		kfree(escan->escan_ioctl_buf);
+		escan->escan_ioctl_buf = NULL;
+	}
+	DHD_OS_PREFREE(dhdp, escan, sizeof(struct wl_escan_info));
+	dhdp->escan = NULL;
+}
+
+int
+wl_escan_attach(struct net_device *dev, dhd_pub_t *dhdp)
+{
+	struct wl_escan_info *escan = NULL;
+	int err = 0;
+
+	printf("%s: Enter\n", __FUNCTION__);
+
+	if (!dev)
+		return 0;
+	escan = (wl_escan_info_t *)DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_WL_ESCAN_INFO, sizeof(struct wl_escan_info));
+	if (!escan)
+		return -ENOMEM;
+	dhdp->escan = (void *)escan;
+	memset(escan, 0, sizeof(struct wl_escan_info));
+
+	/* we only care about main interface so save a global here */
+	escan->dev = dev;
+	escan->pub = dhdp;
+	escan->conn_info.dev = dev;
+	escan->conn_info.dhd = dhdp;
+	escan->escan_state = ESCAN_STATE_DOWN;
+
+	escan->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!escan->escan_ioctl_buf)) {
+		ESCAN_ERROR(("Ioctl buf alloc failed\n"));
+		goto err ;
+	}
+	wl_init_eq(escan);
+	err = wl_escan_init(escan);
+	if (err) {
+		ESCAN_ERROR(("wl_escan_init err %d\n", err));
+		return err;
+	}
+	mutex_init(&escan->usr_sync);
+	mutex_init(&escan->conn_info.pm_sync);
+	INIT_DELAYED_WORK(&escan->conn_info.pm_enable_work, wl_ext_pm_work_handler);
+
+	return 0;
+err:
+	wl_escan_detach(dhdp);
+	return -ENOMEM;
+}
+
+#endif /* WL_ESCAN */
+
diff --git a/drivers/net/wireless/bcmdhd/wl_escan.h b/drivers/net/wireless/bcmdhd/wl_escan.h
new file mode 100644
index 0000000..86cde62
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_escan.h
@@ -0,0 +1,96 @@
+

+#ifndef _wl_escan_

+#define _wl_escan_

+

+#include <linux/wireless.h>

+#include <wl_iw.h>

+#include <dngl_stats.h>

+#include <dhd.h>

+#include <linux/time.h>

+

+

+#ifdef DHD_MAX_IFS

+#define WL_MAX_IFS DHD_MAX_IFS

+#else

+#define WL_MAX_IFS 16

+#endif

+

+#define ESCAN_BUF_SIZE (64 * 1024)

+

+#define WL_ESCAN_TIMER_INTERVAL_MS	10000 /* Scan timeout */

+

+/* event queue for cfg80211 main event */

+struct escan_event_q {

+	struct list_head eq_list;

+	u32 etype;

+	wl_event_msg_t emsg;

+	s8 edata[1];

+};

+

+struct pmk_list {

+	pmkid_list_t pmkids;

+	pmkid_t foo[MAXPMKID - 1];

+};

+

+/* donlge escan state */

+enum escan_state {

+	ESCAN_STATE_DOWN,

+	ESCAN_STATE_IDLE,

+	ESCAN_STATE_SCANING

+};

+

+struct wl_escan_info;

+

+typedef s32(*ESCAN_EVENT_HANDLER) (struct wl_escan_info *escan,

+                            const wl_event_msg_t *e, void *data);

+

+typedef struct wl_escan_info {

+	struct net_device *dev;

+	dhd_pub_t *pub;

+	struct timer_list scan_timeout;   /* Timer for catch scan event timeout */

+	int escan_state;

+	int ioctl_ver;

+

+	char ioctlbuf[WLC_IOCTL_SMLEN];

+	u8 escan_buf[ESCAN_BUF_SIZE];

+	struct wl_scan_results *bss_list;

+	struct wl_scan_results *scan_results;

+	struct ether_addr disconnected_bssid;

+	u8 *escan_ioctl_buf;

+	spinlock_t eq_lock;	/* for event queue synchronization */

+	struct list_head eq_list;	/* used for event queue */

+	tsk_ctl_t event_tsk;  		/* task of main event handler thread */

+	ESCAN_EVENT_HANDLER evt_handler[WLC_E_LAST];

+	struct mutex usr_sync;	/* maily for up/down synchronization */

+	int autochannel;

+	int best_2g_ch;

+	int best_5g_ch;

+#if defined(RSSIAVG)

+	wl_rssi_cache_ctrl_t g_rssi_cache_ctrl;

+	wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl;

+#endif

+#if defined(BSSCACHE)

+	wl_bss_cache_ctrl_t g_bss_cache_ctrl;

+#endif

+	struct pmk_list pmk_list;

+	wl_conn_info_t conn_info;

+} wl_escan_info_t;

+

+void wl_escan_event(struct net_device *dev, const wl_event_msg_t * e, void *data);

+

+int wl_escan_set_scan(

+	struct net_device *dev,

+	struct iw_request_info *info,

+	union iwreq_data *wrqu,

+	char *extra

+);

+int wl_escan_get_scan(struct net_device *dev,	struct iw_request_info *info,

+	struct iw_point *dwrq, char *extra);

+s32 wl_escan_autochannel(struct net_device *dev, char* command, int total_len);

+int wl_escan_attach(struct net_device *dev, dhd_pub_t *dhdp);

+void wl_escan_detach(dhd_pub_t *dhdp);

+int wl_escan_up(struct net_device *net, dhd_pub_t *dhdp);

+void wl_escan_down(dhd_pub_t *dhdp);

+

+#endif /* _wl_escan_ */

+

diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c
new file mode 100644
index 0000000..fe97b21
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.c
@@ -0,0 +1,4325 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_iw.c 616333 2016-02-01 05:30:29Z $
+ */
+
+#if defined(USE_IW)
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+#include <wlioctl.h>
+#ifdef WL_NAN
+#include <wlioctl_utils.h>
+#endif
+#include <wl_android.h>
+#ifdef WL_ESCAN
+#include <wl_escan.h>
+#endif
+#include <dhd_config.h>
+
+typedef const struct si_pub	si_t;
+
+/* message levels */
+#define WL_ERROR_LEVEL	0x0001
+#define WL_SCAN_LEVEL	0x0002
+#define WL_ASSOC_LEVEL	0x0004
+#define WL_INFORM_LEVEL	0x0008
+#define WL_WSEC_LEVEL	0x0010
+#define WL_PNO_LEVEL	0x0020
+#define WL_COEX_LEVEL	0x0040
+#define WL_SOFTAP_LEVEL	0x0080
+#define WL_TRACE_LEVEL	0x0100
+
+uint iw_msg_level = WL_ERROR_LEVEL;
+
+#define WL_ERROR(x)		do {if (iw_msg_level & WL_ERROR_LEVEL) printf x;} while (0)
+#define WL_SCAN(x)		do {if (iw_msg_level & WL_SCAN_LEVEL) printf x;} while (0)
+#define WL_ASSOC(x)		do {if (iw_msg_level & WL_ASSOC_LEVEL) printf x;} while (0)
+#define WL_INFORM(x)	do {if (iw_msg_level & WL_INFORM_LEVEL) printf x;} while (0)
+#define WL_WSEC(x)		do {if (iw_msg_level & WL_WSEC_LEVEL) printf x;} while (0)
+#define WL_PNO(x)		do {if (iw_msg_level & WL_PNO_LEVEL) printf x;} while (0)
+#define WL_COEX(x)		do {if (iw_msg_level & WL_COEX_LEVEL) printf x;} while (0)
+#define WL_SOFTAP(x)	do {if (iw_msg_level & WL_SOFTAP_LEVEL) printf x;} while (0)
+#define WL_TRACE(x)		do {if (iw_msg_level & WL_TRACE_LEVEL) printf x;} while (0)
+
+#include <wl_iw.h>
+ 
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1	0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4	0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
+
+/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */
+#ifndef IW_AUTH_KEY_MGMT_FT_802_1X
+#define IW_AUTH_KEY_MGMT_FT_802_1X 0x04
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_FT_PSK
+#define IW_AUTH_KEY_MGMT_FT_PSK 0x08
+#endif
+
+#ifndef IW_ENC_CAPA_FW_ROAM_ENABLE
+#define IW_ENC_CAPA_FW_ROAM_ENABLE	0x00000020
+#endif
+
+
+/* FC9: wireless.h 2.6.25-14.fc9.i686 is missing these, even though WIRELESS_EXT is set to latest
+ * version 22.
+ */
+#ifndef IW_ENCODE_ALG_PMK
+#define IW_ENCODE_ALG_PMK 4
+#endif
+#ifndef IW_ENC_CAPA_4WAY_HANDSHAKE
+#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010
+#endif
+/* End FC9. */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/rtnetlink.h>
+#endif
+
+extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
+	uint32 reason, char* stringBuf, uint buflen);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN WLC_IOCTL_MEDLEN
+
+/* IOCTL swapping mode for Big Endian host with Little Endian dongle.  Default to off */
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd)	((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd)	((cmd) - IWEVFIRST)
+#endif /* WIRELESS_EXT < 19 */
+
+
+#ifndef WL_ESCAN
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a)	do { \
+		allow_signal(SIGKILL);	\
+		allow_signal(SIGTERM);	\
+	} while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#define ISCAN_STATE_IDLE   0
+#define ISCAN_STATE_SCANING 1
+
+/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */
+#define WLC_IW_ISCAN_MAXLEN   2048
+typedef struct iscan_buf {
+	struct iscan_buf * next;
+	char   iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+struct pmk_list {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID - 1];
+};
+
+typedef struct iscan_info {
+	struct net_device *dev;
+	struct timer_list timer;
+	uint32 timer_ms;
+	uint32 timer_on;
+	int    iscan_state;
+	iscan_buf_t * list_hdr;
+	iscan_buf_t * list_cur;
+
+	/* Thread to work on iscan */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	struct task_struct *kthread;
+#endif
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+	char ioctlbuf[WLC_IOCTL_SMLEN];
+	struct pmk_list pmk_list;
+	wl_conn_info_t conn_info;
+} iscan_info_t;
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+#endif /* WL_ESCAN */
+
+/* priv_link becomes netdev->priv and is the link between netdev and wlif struct */
+typedef struct priv_link {
+	wl_iw_t *wliw;
+} priv_link_t;
+
+/* dev to priv_link */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define WL_DEV_LINK(dev)       (priv_link_t*)(dev->priv)
+#else
+#define WL_DEV_LINK(dev)       (priv_link_t*)netdev_priv(dev)
+#endif
+
+/* dev to wl_iw_t */
+#define IW_DEV_IF(dev)          ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw)
+
+static void swap_key_from_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+	struct net_device *dev,
+	int cmd,
+	void *arg,
+	int len
+)
+{
+	struct ifreq ifr;
+	wl_ioctl_t ioc;
+	mm_segment_t fs;
+	int ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+#ifdef CONFIG_COMPAT
+	ioc.cmd = cmd | WLC_SPEC_FLAG;
+#else
+	ioc.cmd = cmd;
+#endif
+	ioc.buf = arg;
+	ioc.len = len;
+
+	strncpy(ifr.ifr_name, dev->name, sizeof(ifr.ifr_name));
+	ifr.ifr_name[sizeof(ifr.ifr_name) - 1] = '\0';
+	ifr.ifr_data = (caddr_t) &ioc;
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+#if defined(WL_USE_NETDEV_OPS)
+	ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+	ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif
+	set_fs(fs);
+
+	return ret;
+}
+
+/*
+set named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_set(dev, "arate", rate)
+*/
+
+static int
+dev_wlc_intvar_set(
+	struct net_device *dev,
+	char *name,
+	int val)
+{
+	char buf[WLC_IOCTL_SMLEN];
+	uint len;
+
+	val = htod32(val);
+	len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+	ASSERT(len);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+#ifndef WL_ESCAN
+static int
+dev_iw_iovar_setbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+	BCM_REFERENCE(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+	BCM_REFERENCE(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+#endif
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+	struct net_device *dev,
+	char *name,
+	char *buf, int len)
+{
+	char *ioctlbuf;
+	uint buflen;
+	int error;
+
+	ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+	if (!ioctlbuf)
+		return -ENOMEM;
+
+	buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	ASSERT(buflen);
+	error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen);
+
+	kfree(ioctlbuf);
+	return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_bufvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_bufvar_get(
+	struct net_device *dev,
+	char *name,
+	char *buf, int buflen)
+{
+	char *ioctlbuf;
+	int error;
+
+	uint len;
+
+	ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+	if (!ioctlbuf)
+		return -ENOMEM;
+	len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	ASSERT(len);
+	BCM_REFERENCE(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	if (!error)
+		bcopy(ioctlbuf, buf, buflen);
+
+	kfree(ioctlbuf);
+	return (error);
+}
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_intvar_get(
+	struct net_device *dev,
+	char *name,
+	int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	uint len;
+	uint data_null;
+
+	len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+	*retval = dtoh32(var.val);
+
+	return (error);
+}
+
+/* Maintain backward compatibility */
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+	__u16		cmd;		/* Wireless Extension command */
+	__u16		flags;		/* More to come ;-) */
+};
+
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+	void *wrqu, char *extra);
+#endif /* WIRELESS_EXT < 13 */
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_leddc(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int dc = *(int *)extra;
+	int error;
+
+	error = dev_wlc_intvar_set(dev, "leddc", dc);
+	return error;
+}
+
+static int
+wl_iw_set_vlanmode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int mode = *(int *)extra;
+	int error;
+
+	mode = htod32(mode);
+	error = dev_wlc_intvar_set(dev, "vlan_mode", mode);
+	return error;
+}
+
+static int
+wl_iw_set_pm(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int pm = *(int *)extra;
+	int error;
+
+	pm = htod32(pm);
+	error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+	return error;
+}
+#endif /* WIRELESS_EXT > 12 */
+
+static void
+wl_iw_update_connect_status(struct net_device *dev, enum wl_ext_status status)
+{
+#ifndef WL_CFG80211
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	int cur_eapol_status = 0;
+	int wpa_auth = 0;
+	int error = -EINVAL;
+	wl_conn_info_t *conn_info = NULL;
+#ifdef WL_ESCAN
+	wl_escan_info_t *escan;
+	if (dhd && dhd->escan) {
+		escan = (wl_escan_info_t *)dhd->escan;
+		conn_info = &escan->conn_info;
+	}
+#else
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan) {
+		iscan = (iscan_info_t *)dhd->iscan;
+		conn_info = &iscan->conn_info;
+	}
+#endif
+
+	if (!dhd || !dhd->conf || !conn_info)
+		return;
+
+	cur_eapol_status = dhd->conf->eapol_status;
+
+	if (status == WL_EXT_STATUS_CONNECTING) {
+		wl_ext_add_remove_pm_enable_work(conn_info, TRUE);
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &wpa_auth))) {
+			WL_ERROR(("%s: wpa_auth get error %d\n", __FUNCTION__, error));
+			return;
+		}
+		if (wpa_auth & (WPA_AUTH_PSK|WPA2_AUTH_PSK))
+			dhd->conf->eapol_status = EAPOL_STATUS_WPA_START;
+		else
+			dhd->conf->eapol_status = EAPOL_STATUS_NONE;
+	} else if (status == WL_EXT_STATUS_ADD_KEY) {
+		dhd->conf->eapol_status = EAPOL_STATUS_WPA_END;
+	} else if (status == WL_EXT_STATUS_DISCONNECTING) {
+		wl_ext_add_remove_pm_enable_work(conn_info, FALSE);
+		if (cur_eapol_status >= EAPOL_STATUS_WPA_START &&
+				cur_eapol_status < EAPOL_STATUS_WPA_END) {
+			WL_ERROR(("%s: WPA failed at %d\n", __FUNCTION__, cur_eapol_status));
+			dhd->conf->eapol_status = EAPOL_STATUS_NONE;
+		} else if (cur_eapol_status >= EAPOL_STATUS_WPS_WSC_START &&
+				cur_eapol_status < EAPOL_STATUS_WPS_DONE) {
+			WL_ERROR(("%s: WPS failed at %d\n", __FUNCTION__, cur_eapol_status));
+			dhd->conf->eapol_status = EAPOL_STATUS_NONE;
+		}
+	} else if (status == WL_EXT_STATUS_DISCONNECTED) {
+		if (cur_eapol_status >= EAPOL_STATUS_WPA_START &&
+				cur_eapol_status < EAPOL_STATUS_WPA_END) {
+			WL_ERROR(("%s: WPA failed at %d\n", __FUNCTION__, cur_eapol_status));
+			dhd->conf->eapol_status = EAPOL_STATUS_NONE;
+		} else if (cur_eapol_status >= EAPOL_STATUS_WPS_WSC_START &&
+				cur_eapol_status < EAPOL_STATUS_WPS_DONE) {
+			WL_ERROR(("%s: WPS failed at %d\n", __FUNCTION__, cur_eapol_status));
+			dhd->conf->eapol_status = EAPOL_STATUS_NONE;
+		}
+	}
+#endif
+	return;
+}
+
+int
+wl_iw_send_priv_event(
+	struct net_device *dev,
+	char *flag
+)
+{
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd;
+
+	cmd = IWEVCUSTOM;
+	memset(&wrqu, 0, sizeof(wrqu));
+	if (strlen(flag) > sizeof(extra))
+		return -1;
+
+	strncpy(extra, flag, sizeof(extra));
+	extra[sizeof(extra) - 1] = '\0';
+	wrqu.data.length = strlen(extra);
+	wireless_send_event(dev, cmd, &wrqu, extra);
+	WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+	return 0;
+}
+
+static int
+wl_iw_config_commit(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	void *zwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+	struct sockaddr bssid;
+
+	WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+		return error;
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	if (!ssid.SSID_len)
+		return 0;
+
+	bzero(&bssid, sizeof(struct sockaddr));
+	if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+		WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error));
+		return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_name(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *cwrq,
+	char *extra
+)
+{
+	int phytype, err;
+	uint band[3];
+	char cap[5];
+
+	WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+	cap[0] = 0;
+	if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0)
+		goto done;
+	if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0)
+		goto done;
+
+	band[0] = dtoh32(band[0]);
+	switch (phytype) {
+		case WLC_PHY_TYPE_A:
+			strncpy(cap, "a", sizeof(cap));
+			break;
+		case WLC_PHY_TYPE_B:
+			strncpy(cap, "b", sizeof(cap));
+			break;
+		case WLC_PHY_TYPE_G:
+			if (band[0] >= 2)
+				strncpy(cap, "abg", sizeof(cap));
+			else
+				strncpy(cap, "bg", sizeof(cap));
+			break;
+		case WLC_PHY_TYPE_N:
+			if (band[0] >= 2)
+				strncpy(cap, "abgn", sizeof(cap));
+			else
+				strncpy(cap, "bgn", sizeof(cap));
+			break;
+	}
+done:
+	(void)snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap);
+
+	return 0;
+}
+
+static int
+wl_iw_set_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	int error, chan;
+	uint sf = 0;
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	wl_conn_info_t *conn_info = NULL;
+#ifdef WL_ESCAN
+	wl_escan_info_t *escan;
+	if (dhd && dhd->escan) {
+		escan = (wl_escan_info_t *)dhd->escan;
+		conn_info = &escan->conn_info;
+	}
+#else
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan) {
+		iscan = (iscan_info_t *)dhd->iscan;
+		conn_info = &iscan->conn_info;
+	}
+#endif
+
+	WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name));
+
+	/* Setting by channel number */
+	if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+		chan = fwrq->m;
+	}
+
+	/* Setting by frequency */
+	else {
+		/* Convert to MHz as best we can */
+		if (fwrq->e >= 6) {
+			fwrq->e -= 6;
+			while (fwrq->e--)
+				fwrq->m *= 10;
+		} else if (fwrq->e < 6) {
+			while (fwrq->e++ < 6)
+				fwrq->m /= 10;
+		}
+	/* handle 4.9GHz frequencies as Japan 4 GHz based channelization */
+		if (fwrq->m > 4000 && fwrq->m < 5000) {
+			sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */
+		}
+		chan = wf_mhz2channel(fwrq->m, sf);
+	}
+	if (conn_info)
+		conn_info->channel = chan;
+	WL_ERROR(("%s: chan=%d\n", __FUNCTION__, chan));
+	chan = htod32(chan);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) {
+		WL_ERROR(("%s: WLC_SET_CHANNEL failed (%d).\n", __FUNCTION__, error));
+		return error;
+	}
+
+	/* -EINPROGRESS: Call commit handler */
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	int error;
+	u32 chanspec = 0;
+	int ctl_chan;
+
+	WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "chanspec", &chanspec)))
+		return error;
+	ctl_chan = wf_chspec_ctlchan(chanspec);
+
+	/* Return radio channel in channel form */
+	fwrq->m = ctl_chan;
+	fwrq->e = dtoh32(0);
+	return 0;
+}
+
+static int
+wl_iw_set_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int infra = 0, ap = 0, error = 0;
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	wl_conn_info_t *conn_info = NULL;
+#ifdef WL_ESCAN
+	wl_escan_info_t *escan;
+	if (dhd && dhd->escan) {
+		escan = (wl_escan_info_t *)dhd->escan;
+		conn_info = &escan->conn_info;
+	}
+#else
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan) {
+		iscan = (iscan_info_t *)dhd->iscan;
+		conn_info = &iscan->conn_info;
+	}
+#endif
+
+	WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+	if (conn_info) {
+		memset(&conn_info->ssid, 0, sizeof(wlc_ssid_t));
+		memset(&conn_info->bssid, 0, sizeof(struct ether_addr));
+		conn_info->channel = 0;
+	}
+
+	switch (*uwrq) {
+	case IW_MODE_MASTER:
+		infra = ap = 1;
+		break;
+	case IW_MODE_ADHOC:
+	case IW_MODE_AUTO:
+		break;
+	case IW_MODE_INFRA:
+		infra = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	infra = htod32(infra);
+	ap = htod32(ap);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+		return error;
+
+	/* -EINPROGRESS: Call commit handler */
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int error, infra = 0, ap = 0;
+
+	WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+		return error;
+
+	infra = dtoh32(infra);
+	ap = dtoh32(ap);
+	*uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+	return 0;
+}
+
+static int
+wl_iw_get_range(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	struct iw_range *range = (struct iw_range *) extra;
+	static int channels[MAXCHANNEL+1];
+	wl_uint32_list_t *list = (wl_uint32_list_t *) channels;
+	wl_rateset_t rateset;
+	int error, i, k;
+	uint sf, ch;
+
+	int phytype;
+	int bw_cap = 0, sgi_tx = 0, nmode = 0;
+	channel_info_t ci;
+	uint8 nrate_list2copy = 0;
+	uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+		{14, 29, 43, 58, 87, 116, 130, 144},
+		{27, 54, 81, 108, 162, 216, 243, 270},
+		{30, 60, 90, 120, 180, 240, 270, 300}};
+	int fbt_cap = 0;
+
+	WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = sizeof(struct iw_range);
+	memset(range, 0, sizeof(*range));
+
+	/* We don't use nwids */
+	range->min_nwid = range->max_nwid = 0;
+
+	/* Set available channels/frequencies */
+	list->count = htod32(MAXCHANNEL);
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels))))
+		return error;
+	for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+		range->freq[i].i = dtoh32(list->element[i]);
+
+		ch = dtoh32(list->element[i]);
+		if (ch <= CH_MAX_2G_CHANNEL)
+			sf = WF_CHAN_FACTOR_2_4_G;
+		else
+			sf = WF_CHAN_FACTOR_5_G;
+
+		range->freq[i].m = wf_channel2mhz(ch, sf);
+		range->freq[i].e = 6;
+	}
+	range->num_frequency = range->num_channels = i;
+
+	/* Link quality (use NDIS cutoffs) */
+	range->max_qual.qual = 5;
+	/* Signal level (use RSSI) */
+	range->max_qual.level = 0x100 - 200;	/* -200 dBm */
+	/* Noise level (use noise) */
+	range->max_qual.noise = 0x100 - 200;	/* -200 dBm */
+	/* Signal level threshold range (?) */
+	range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+	/* Link quality (use NDIS cutoffs) */
+	range->avg_qual.qual = 3;
+	/* Signal level (use RSSI) */
+	range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+	/* Noise level (use noise) */
+	range->avg_qual.noise = 0x100 - 75;	/* -75 dBm */
+#endif /* WIRELESS_EXT > 11 */
+
+	/* Set available bitrates */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+	rateset.count = dtoh32(rateset.count);
+	range->num_bitrates = rateset.count;
+	for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+		range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000; /* convert to bps */
+	if ((error = dev_wlc_intvar_get(dev, "nmode", &nmode)))
+		return error;
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))))
+		return error;
+	if (nmode == 1 && (((phytype == WLC_PHY_TYPE_LCN) ||
+	                    (phytype == WLC_PHY_TYPE_LCN40)))) {
+		if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap)))
+			return error;
+		if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx)))
+			return error;
+		if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t))))
+			return error;
+		ci.hw_channel = dtoh32(ci.hw_channel);
+
+		if (bw_cap == 0 ||
+			(bw_cap == 2 && ci.hw_channel <= 14)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 0;
+			else
+				nrate_list2copy = 1;
+		}
+		if (bw_cap == 1 ||
+			(bw_cap == 2 && ci.hw_channel >= 36)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 2;
+			else
+				nrate_list2copy = 3;
+		}
+		range->num_bitrates += 8;
+		ASSERT(range->num_bitrates < IW_MAX_BITRATES);
+		for (k = 0; i < range->num_bitrates; k++, i++) {
+			/* convert to bps */
+			range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+		}
+	}
+
+	/* Set an indication of the max TCP throughput
+	 * in bit/s that we can expect using this interface.
+	 * May be use for QoS stuff... Jean II
+	 */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i))))
+		return error;
+	i = dtoh32(i);
+	if (i == WLC_PHY_TYPE_A)
+		range->throughput = 24000000;	/* 24 Mbits/s */
+	else
+		range->throughput = 1500000;	/* 1.5 Mbits/s */
+
+	/* RTS and fragmentation thresholds */
+	range->min_rts = 0;
+	range->max_rts = 2347;
+	range->min_frag = 256;
+	range->max_frag = 2346;
+
+	range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+	range->num_encoding_sizes = 4;
+	range->encoding_size[0] = WEP1_KEY_SIZE;
+	range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+	range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+	range->encoding_size[2] = 0;
+#endif
+	range->encoding_size[3] = AES_KEY_SIZE;
+
+	/* Do not support power micro-management */
+	range->min_pmp = 0;
+	range->max_pmp = 0;
+	range->min_pmt = 0;
+	range->max_pmt = 0;
+	range->pmp_flags = 0;
+	range->pm_capa = 0;
+
+	/* Transmit Power - values are in mW */
+	range->num_txpower = 2;
+	range->txpower[0] = 1;
+	range->txpower[1] = 255;
+	range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 19;
+
+	/* Only support retry limits */
+	range->retry_capa = IW_RETRY_LIMIT;
+	range->retry_flags = IW_RETRY_LIMIT;
+	range->r_time_flags = 0;
+	/* SRL and LRL limits */
+	range->min_retry = 1;
+	range->max_retry = 255;
+	/* Retry lifetime limits unsupported */
+	range->min_r_time = 0;
+	range->max_r_time = 0;
+#endif /* WIRELESS_EXT > 10 */
+
+#if WIRELESS_EXT > 17
+	range->enc_capa = IW_ENC_CAPA_WPA;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+	range->enc_capa |= IW_ENC_CAPA_WPA2;
+
+	/* Determine driver FBT capability. */
+	if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+		if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+			/* Tell the host (e.g. wpa_supplicant) to let driver do the handshake */
+			range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE;
+		}
+	}
+
+#ifdef BCMFW_ROAM_ENABLE_WEXT
+	/* Advertise firmware roam capability to the external supplicant */
+	range->enc_capa |= IW_ENC_CAPA_FW_ROAM_ENABLE;
+#endif /* BCMFW_ROAM_ENABLE_WEXT */
+
+	/* Event capability (kernel) */
+	IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+	/* Event capability (driver) */
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+
+#if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID)
+	/* FC7 wireless.h defines EXT 22 but doesn't define scan_capa bits */
+	range->scan_capa = IW_SCAN_CAPA_ESSID;
+#endif
+#endif /* WIRELESS_EXT > 17 */
+
+	return 0;
+}
+
+#ifndef WL_ESCAN
+static int
+rssi_to_qual(int rssi)
+{
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		return 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		return 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		return 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		return 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		return 4;
+	else
+		return 5;
+}
+#endif /* WL_ESCAN */
+
+static int
+wl_iw_set_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	int i;
+
+	WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+	for (i = 0; i < iw->spy_num; i++)
+		memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+	memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+	return 0;
+}
+
+static int
+wl_iw_get_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = iw->spy_num;
+	for (i = 0; i < iw->spy_num; i++) {
+		memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+		addr[i].sa_family = AF_UNIX;
+		memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+		iw->spy_qual[i].updated = 0;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_set_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	int error = -EINVAL;
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	wl_conn_info_t *conn_info = NULL;
+#ifdef WL_ESCAN
+	wl_escan_info_t *escan;
+	if (dhd && dhd->escan) {
+		escan = (wl_escan_info_t *)dhd->escan;
+		conn_info = &escan->conn_info;
+	}
+#else
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan) {
+		iscan = (iscan_info_t *)dhd->iscan;
+		conn_info = &iscan->conn_info;
+	}
+#endif
+
+	WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+	if (awrq->sa_family != ARPHRD_ETHER) {
+		WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	/* Ignore "auto" or "off" */
+	if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+		scb_val_t scbval;
+		bzero(&scbval, sizeof(scb_val_t));
+		WL_ERROR(("%s: WLC_DISASSOC\n", __FUNCTION__));
+		if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+			WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
+		}
+		wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTING);
+		return 0;
+	}
+	/* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data),
+	 * eabuf)));
+	 */
+	/* Reassociate to the specified AP */
+	if (conn_info)
+		memcpy(&conn_info->bssid, awrq->sa_data, ETHER_ADDR_LEN);
+	if (conn_info && conn_info->ssid.SSID_len) {
+		if ((error = wl_ext_connect(dev, conn_info)))
+			return error;
+	} else {
+		if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
+			WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error));
+			return error;
+		}
+		WL_ERROR(("%s: join BSSID="MACSTR"\n", __FUNCTION__, MAC2STR((u8 *)awrq->sa_data)));
+	}
+	wl_iw_update_connect_status(dev, WL_EXT_STATUS_CONNECTING);
+
+	return 0;
+}
+
+static int
+wl_iw_get_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+	awrq->sa_family = ARPHRD_ETHER;
+	memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+	/* Ignore error (may be down or disassociated) */
+	(void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	struct iw_mlme *mlme;
+	scb_val_t scbval;
+	int error  = -EINVAL;
+
+	WL_TRACE(("%s: SIOCSIWMLME\n", dev->name));
+
+	mlme = (struct iw_mlme *)extra;
+	if (mlme == NULL) {
+		WL_ERROR(("Invalid ioctl data.\n"));
+		return error;
+	}
+
+	scbval.val = mlme->reason_code;
+	bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+	if (mlme->cmd == IW_MLME_DISASSOC) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+	}
+	else if (mlme->cmd == IW_MLME_DEAUTH) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+			sizeof(scb_val_t));
+	}
+	else {
+		WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__));
+		return error;
+	}
+	wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTING);
+
+	return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+#ifndef WL_ESCAN
+static int
+wl_iw_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int error, i;
+	uint buflen = dwrq->length;
+	int16 rssi;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Get scan results (too large to put on the stack) */
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+	ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			buflen));
+
+		/* Infrastructure only */
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		/* BSSID */
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+		rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+		qual[dwrq->length].qual = rssi_to_qual(rssi);
+		qual[dwrq->length].level = 0x100 + rssi;
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+		/* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+		dwrq->length++;
+	}
+
+	kfree(list);
+
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		/* Provided qual */
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	iscan_buf_t * buf;
+	iscan_info_t *iscan;
+
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int i;
+	int16 rssi;
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	if (dhd && dhd->iscan)
+		iscan = (iscan_info_t *)dhd->iscan;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_get_aplist(dev, info, dwrq, extra);
+	}
+
+	buf = iscan->list_hdr;
+	/* Get scan results (too large to put on the stack) */
+	while (buf) {
+	    list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+	    ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	    bi = NULL;
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			WLC_IW_ISCAN_MAXLEN));
+
+		/* Infrastructure only */
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		/* BSSID */
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+		rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+		qual[dwrq->length].qual = rssi_to_qual(rssi);
+		qual[dwrq->length].level = 0x100 + rssi;
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+		/* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+		dwrq->length++;
+	    }
+	    buf = buf->next;
+	}
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		/* Provided qual */
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+#endif
+
+#if WIRELESS_EXT > 13
+#ifndef WL_ESCAN
+static int
+wl_iw_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+
+	WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+	/* default Broadcast scan */
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	/* check for given essid */
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+		}
+	}
+#endif
+	/* Ignore error (most likely scan in progress) */
+	(void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid));
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan)
+		iscan = (iscan_info_t *)dhd->iscan;
+
+	WL_TRACE(("%s: SIOCSIWSCAN iscan=%p\n", dev->name, iscan));
+
+	/* use backup if our thread is not successful */
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_set_scan(dev, info, wrqu, extra);
+	}
+	if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+		return 0;
+	}
+
+	/* default Broadcast scan */
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	/* check for given essid */
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+		}
+	}
+#endif
+
+	iscan->list_cur = iscan->list_hdr;
+	iscan->iscan_state = ISCAN_STATE_SCANING;
+
+
+	wl_iw_set_event_mask(dev);
+	wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+	iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+	add_timer(&iscan->timer);
+	iscan->timer_on = 1;
+
+	return 0;
+}
+#endif /* WL_ESCAN */
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPA entry? If */
+/* not update the tlvs buffer pointer/length */
+	uint8 *ie = *wpaie;
+
+	/* If the contents match the WPA_OUI and type=1 */
+	if ((ie[1] >= 6) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+		return TRUE;
+	}
+
+	/* point to the next ie */
+	ie += ie[1] + 2;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+	return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPS entry? If */
+/* not update the tlvs buffer pointer/length */
+	uint8 *ie = *wpsie;
+
+	/* If the contents match the WPA_OUI and type=4 */
+	if ((ie[1] >= 4) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+		return TRUE;
+	}
+
+	/* point to the next ie */
+	ie += ie[1] + 2;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+	return FALSE;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+#ifdef BCMWAPI_WPI
+static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
+	size_t len, int uppercase)
+{
+	size_t i;
+	char *pos = buf, *end = buf + buf_size;
+	int ret;
+	if (buf_size == 0)
+		return 0;
+	for (i = 0; i < len; i++) {
+		ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
+			data[i]);
+		if (ret < 0 || ret >= end - pos) {
+			end[-1] = '\0';
+			return pos - buf;
+		}
+		pos += ret;
+	}
+	end[-1] = '\0';
+	return pos - buf;
+}
+
+/**
+ * wpa_snprintf_hex - Print data as a hex string into a buffer
+ * @buf: Memory area to use as the output buffer
+ * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1)
+ * @data: Data to be printed
+ * @len: Length of data in bytes
+ * Returns: Number of bytes written
+ */
+static int
+wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
+{
+	return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
+}
+#endif /* BCMWAPI_WPI */
+
+#ifndef WL_ESCAN
+static
+#endif
+int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+	struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+	struct iw_event	iwe;
+	char *event;
+#ifdef BCMWAPI_WPI
+	char *buf;
+	int custom_event_len;
+#endif
+
+	event = *event_p;
+	if (bi->ie_length) {
+		/* look for wpa/rsn ies in the ie list... */
+		bcm_tlv_t *ie;
+		uint8 *ptr = ((uint8 *)bi) + bi->ie_offset;
+		int ptr_len = bi->ie_length;
+
+		/* OSEN IE */
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_VS_ID)) &&
+			ie->len > WFA_OUI_LEN + 1 &&
+			!bcmp((const void *)&ie->data[0], (const void *)WFA_OUI, WFA_OUI_LEN) &&
+			ie->data[WFA_OUI_LEN] == WFA_OUI_TYPE_OSEN) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + bi->ie_offset;
+
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + bi->ie_offset;
+
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + bi->ie_offset;
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			/* look for WPS IE */
+			if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+		ptr = ((uint8 *)bi) + bi->ie_offset;
+		ptr_len = bi->ie_length;
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+		
+#ifdef BCMWAPI_WPI
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		ptr_len = bi->ie_length;
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
+			WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__));
+#ifdef WAPI_IE_USE_GENIE
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+#else /* using CUSTOM event */
+			iwe.cmd = IWEVCUSTOM;
+			custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
+			iwe.u.data.length = custom_event_len;
+
+			buf = kmalloc(custom_event_len+1, GFP_KERNEL);
+			if (buf == NULL)
+			{
+				WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
+				break;
+			}
+
+			memcpy(buf, "wapi_ie=", 8);
+			wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
+			wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
+			wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
+			kfree(buf);
+#endif /* WAPI_IE_USE_GENIE */
+			break;
+		}
+#endif /* BCMWAPI_WPI */
+		*event_p = event;
+	}
+
+#endif /* WIRELESS_EXT > 17 */
+	return 0;
+}
+
+#ifndef WL_ESCAN
+static int
+wl_iw_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int error, i, j;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	uint buflen = dwrq->length;
+	int16 rssi;
+	int channel;
+
+	WL_TRACE(("%s: %s SIOCGIWSCAN\n", __FUNCTION__, dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Check for scan in progress */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+	ci.scan_channel = dtoh32(ci.scan_channel);
+	if (ci.scan_channel)
+		return -EAGAIN;
+
+	/* Get scan results (too large to put on the stack) */
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+
+	ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			buflen));
+
+		// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+		rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+		channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+		WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+		__FUNCTION__, MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+
+		/* First entry must be the BSSID */
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+		/* SSID */
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		/* Mode */
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		/* Channel */
+		iwe.cmd = SIOCGIWFREQ;
+
+		iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+			(CHSPEC_IS2G(bi->chanspec)) ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		/* Channel quality */
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(rssi);
+		iwe.u.qual.level = 0x100 + rssi;
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		 wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		/* Encryption */
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		/* Rates */
+		if (bi->rateset.count) {
+			value = event + IW_EV_LCP_LEN;
+			iwe.cmd = SIOCGIWRATE;
+			/* Those two flags are ignored... */
+			iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+			for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+				iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+				value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+					IW_EV_PARAM_LEN);
+			}
+			event = value;
+		}
+	}
+
+	kfree(list);
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	/* todo */
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int ii, j;
+	int apcnt;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	iscan_buf_t * p_buf;
+	int16 rssi;
+	int channel;
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan)
+		iscan = (iscan_info_t *)dhd->iscan;
+
+	WL_TRACE(("%s: %s SIOCGIWSCAN\n", __FUNCTION__, dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* use backup if our thread is not successful */
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_get_scan(dev, info, dwrq, extra);
+	}
+
+	/* Check for scan in progress */
+	if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+		WL_TRACE(("%s: SIOCGIWSCAN GET still scanning\n", dev->name));
+		return -EAGAIN;
+	}
+
+	apcnt = 0;
+	p_buf = iscan->list_hdr;
+	/* Get scan results */
+	while (p_buf != iscan->list_cur) {
+		list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+		if (list->version != WL_BSS_INFO_VERSION) {
+			WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version));
+		}
+
+		bi = NULL;
+		for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+			bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+			ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+				WLC_IW_ISCAN_MAXLEN));
+
+			/* overflow check cover fields before wpa IEs */
+			if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+				IW_EV_QUAL_LEN >= end)
+				return -E2BIG;
+
+			// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+			rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+			channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
+			WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+			__FUNCTION__, MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+
+			/* First entry must be the BSSID */
+			iwe.cmd = SIOCGIWAP;
+			iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+			memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+			/* SSID */
+			iwe.u.data.length = dtoh32(bi->SSID_len);
+			iwe.cmd = SIOCGIWESSID;
+			iwe.u.data.flags = 1;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+			/* Mode */
+			if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+				iwe.cmd = SIOCGIWMODE;
+				if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+					iwe.u.mode = IW_MODE_INFRA;
+				else
+					iwe.u.mode = IW_MODE_ADHOC;
+				event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+			}
+
+			/* Channel */
+			iwe.cmd = SIOCGIWFREQ;
+			iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+				(CHSPEC_IS2G(bi->chanspec)) ?
+				WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+			iwe.u.freq.e = 6;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+			/* Channel quality */
+			iwe.cmd = IWEVQUAL;
+			iwe.u.qual.qual = rssi_to_qual(rssi);
+			iwe.u.qual.level = 0x100 + rssi;
+			iwe.u.qual.noise = 0x100 + bi->phy_noise;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+			wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+			/* Encryption */
+			iwe.cmd = SIOCGIWENCODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+				iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+			else
+				iwe.u.data.flags = IW_ENCODE_DISABLED;
+			iwe.u.data.length = 0;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+			/* Rates */
+			if (bi->rateset.count <= sizeof(bi->rateset.rates)) {
+				if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+					return -E2BIG;
+
+				value = event + IW_EV_LCP_LEN;
+				iwe.cmd = SIOCGIWRATE;
+				/* Those two flags are ignored... */
+				iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+				for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+					iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+					value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+						IW_EV_PARAM_LEN);
+				}
+				event = value;
+			}
+		}
+		p_buf = p_buf->next;
+	} /* while (p_buf) */
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	/* todo */
+	WL_SCAN(("%s: apcnt=%d\n", __FUNCTION__, apcnt));
+
+	return 0;
+}
+#endif /* WL_ESCAN */
+#endif /* WIRELESS_EXT > 13 */
+
+
+static int
+wl_iw_set_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	wl_conn_info_t *conn_info = NULL;
+#ifdef WL_ESCAN
+	wl_escan_info_t *escan;
+	if (dhd && dhd->escan) {
+		escan = (wl_escan_info_t *)dhd->escan;
+		conn_info = &escan->conn_info;
+	}
+#else
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan) {
+		iscan = (iscan_info_t *)dhd->iscan;
+		conn_info = &iscan->conn_info;
+	}
+#endif
+
+	WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+	/* default Broadcast SSID */
+	memset(&ssid, 0, sizeof(ssid));
+	if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+		ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length);
+#else
+		ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1);
+#endif
+		memcpy(ssid.SSID, extra, ssid.SSID_len);
+		ssid.SSID_len = htod32(ssid.SSID_len);
+
+		if (conn_info) {
+			memcpy(conn_info->ssid.SSID, ssid.SSID, ssid.SSID_len);
+			conn_info->ssid.SSID_len = ssid.SSID_len;
+		}
+		if (conn_info && memcmp(&ether_null, &conn_info->bssid, ETHER_ADDR_LEN)) {
+			if ((error = wl_ext_connect(dev, conn_info)))
+				return error;
+		} else {
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid)))) {
+				WL_ERROR(("%s: WLC_SET_SSID failed (%d).\n", __FUNCTION__, error));
+				return error;
+			}
+			WL_ERROR(("%s: join SSID=%s\n", __FUNCTION__, ssid.SSID));
+		}
+		wl_iw_update_connect_status(dev, WL_EXT_STATUS_CONNECTING);
+	}
+	/* If essid null then it is "iwconfig <interface> essid off" command */
+	else {
+		scb_val_t scbval;
+		bzero(&scbval, sizeof(scb_val_t));
+		WL_ERROR(("%s: WLC_DISASSOC\n", __FUNCTION__));
+		if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+			WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
+			return error;
+		}
+		wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTING);
+	}
+	return 0;
+}
+
+static int
+wl_iw_get_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+		WL_ERROR(("Error getting the SSID\n"));
+		return error;
+	}
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	/* Max SSID length check */
+	if (ssid.SSID_len > IW_ESSID_MAX_SIZE) {
+		ssid.SSID_len = IW_ESSID_MAX_SIZE;
+	}
+
+	/* Get the current SSID */
+	memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+	/* NULL terminating as length of extra buffer is IW_ESSID_MAX_SIZE ie 32 */
+	extra[IW_ESSID_MAX_SIZE] = '\0';
+
+	dwrq->length = ssid.SSID_len;
+
+	dwrq->flags = 1; /* active */
+
+	return 0;
+}
+
+static int
+wl_iw_set_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Check the size of the string */
+	if (dwrq->length > sizeof(iw->nickname))
+		return -E2BIG;
+
+	memcpy(iw->nickname, extra, dwrq->length);
+	iw->nickname[dwrq->length - 1] = '\0';
+
+	return 0;
+}
+
+static int
+wl_iw_get_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	strcpy(extra, iw->nickname);
+	dwrq->length = strlen(extra) + 1;
+
+	return 0;
+}
+
+static int wl_iw_set_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	wl_rateset_t rateset;
+	int error, rate, i, error_bg, error_a;
+
+	WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+	/* Get current rateset */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+
+	rateset.count = dtoh32(rateset.count);
+
+	if (vwrq->value < 0) {
+		/* Select maximum rate */
+		rate = rateset.rates[rateset.count - 1] & 0x7f;
+	} else if (vwrq->value < rateset.count) {
+		/* Select rate by rateset index */
+		rate = rateset.rates[vwrq->value] & 0x7f;
+	} else {
+		/* Specified rate in bps */
+		rate = vwrq->value / 500000;
+	}
+
+	if (vwrq->fixed) {
+		/*
+			Set rate override,
+			Since the is a/b/g-blind, both a/bg_rate are enforced.
+		*/
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+		error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+	} else {
+		/*
+			clear rate override
+			Since the is a/b/g-blind, both a/bg_rate are enforced.
+		*/
+		/* 0 is for clearing rate override */
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+		/* 0 is for clearing rate override */
+		error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+
+		/* Remove rates above selected rate */
+		for (i = 0; i < rateset.count; i++)
+			if ((rateset.rates[i] & 0x7f) > rate)
+				break;
+		rateset.count = htod32(i);
+
+		/* Set current rateset */
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+			return error;
+	}
+
+	return 0;
+}
+
+static int wl_iw_get_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rate;
+
+	WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+	/* Report the current tx rate */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+		return error;
+	rate = dtoh32(rate);
+	vwrq->value = rate * 500000;
+
+	return 0;
+}
+
+static int
+wl_iw_set_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+	if (vwrq->disabled)
+		rts = DOT11_DEFAULT_RTS_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+		return -EINVAL;
+	else
+		rts = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+		return error;
+
+	vwrq->value = rts;
+	vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, frag;
+
+	WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+	if (vwrq->disabled)
+		frag = DOT11_DEFAULT_FRAG_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+		return -EINVAL;
+	else
+		frag = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, fragthreshold;
+
+	WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+		return error;
+
+	vwrq->value = fragthreshold;
+	vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable;
+	uint16 txpwrmw;
+	WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+	disable += WL_RADIO_SW_DISABLE << 16;
+
+	disable = htod32(disable);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+		return error;
+
+	/* If Radio is off, nothing more to do */
+	if (disable & WL_RADIO_SW_DISABLE)
+		return 0;
+
+	/* Only handle mW */
+	if (!(vwrq->flags & IW_TXPOW_MWATT))
+		return -EINVAL;
+
+	/* Value < 0 means just "on" or "off" */
+	if (vwrq->value < 0)
+		return 0;
+
+	if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+	else txpwrmw = (uint16)vwrq->value;
+
+
+	error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+	return error;
+}
+
+static int
+wl_iw_get_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable, txpwrdbm;
+	uint8 result;
+
+	WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+	    (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+		return error;
+
+	disable = dtoh32(disable);
+	result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+	vwrq->value = (int32)bcm_qdbm_to_mw(result);
+	vwrq->fixed = 0;
+	vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+	vwrq->flags = IW_TXPOW_MWATT;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+	/* Do not handle "off" or "lifetime" */
+	if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+		return -EINVAL;
+
+	/* Handle "[min|max] limit" */
+	if (vwrq->flags & IW_RETRY_LIMIT) {
+		/* "max limit" or just "limit" */
+#if WIRELESS_EXT > 20
+		if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+			!((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN)))
+#else
+		if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN))
+#endif /* WIRELESS_EXT > 20 */
+		{
+			lrl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+				return error;
+		}
+		/* "min limit" or just "limit" */
+#if WIRELESS_EXT > 20
+		if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+			!((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX)))
+#else
+		if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX))
+#endif /* WIRELESS_EXT > 20 */
+		{
+			srl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+				return error;
+		}
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+	vwrq->disabled = 0;      /* Can't be disabled */
+
+	/* Do not handle lifetime queries */
+	if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+		return -EINVAL;
+
+	/* Get retry limits */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+		return error;
+
+	lrl = dtoh32(lrl);
+	srl = dtoh32(srl);
+
+	/* Note : by default, display the min retry number */
+	if (vwrq->flags & IW_RETRY_MAX) {
+		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+		vwrq->value = lrl;
+	} else {
+		vwrq->flags = IW_RETRY_LIMIT;
+		vwrq->value = srl;
+		if (srl != lrl)
+			vwrq->flags |= IW_RETRY_MIN;
+	}
+
+	return 0;
+}
+#endif /* WIRELESS_EXT > 10 */
+
+static int
+wl_iw_set_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec;
+
+	WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+
+	memset(&key, 0, sizeof(key));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		/* Find the current key */
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = htod32(key.index);
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+		/* Default to 0 */
+		if (key.index == DOT11_MAX_DEFAULT_KEYS)
+			key.index = 0;
+	} else {
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+		if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+			return -EINVAL;
+	}
+
+	/* Interpret "off" to mean no encryption */
+	wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+	if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+		return error;
+
+	/* Old API used to pass a NULL pointer instead of IW_ENCODE_NOKEY */
+	if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+		/* Just select a new current key */
+		val = htod32(key.index);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+			return error;
+	} else {
+		key.len = dwrq->length;
+
+		if (dwrq->length > sizeof(key.data))
+			return -EINVAL;
+
+		memcpy(key.data, extra, dwrq->length);
+
+		key.flags = WL_PRIMARY_KEY;
+		switch (key.len) {
+		case WEP1_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP1;
+			break;
+		case WEP128_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP128;
+			break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+		case TKIP_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_TKIP;
+			break;
+#endif
+		case AES_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		/* Set the new key/index */
+		swap_key_from_BE(&key);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+			return error;
+	}
+
+	/* Interpret "restricted" to mean shared key authentication */
+	val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+	val = htod32(val);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec, auth;
+
+	WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+	/* assure default values of zero for things we don't touch */
+	bzero(&key, sizeof(wl_wsec_key_t));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		/* Find the current key */
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = key.index;
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+	} else
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+		key.index = 0;
+
+	/* Get info */
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+		return error;
+
+	swap_key_to_BE(&key);
+
+	wsec = dtoh32(wsec);
+	auth = dtoh32(auth);
+	/* Get key length */
+	dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len);
+
+	/* Get flags */
+	dwrq->flags = key.index + 1;
+	if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+		/* Interpret "off" to mean no encryption */
+		dwrq->flags |= IW_ENCODE_DISABLED;
+	}
+	if (auth) {
+		/* Interpret "restricted" to mean shared key authentication */
+		dwrq->flags |= IW_ENCODE_RESTRICTED;
+	}
+
+	/* Get key */
+	if (dwrq->length && extra)
+		memcpy(extra, key.data, dwrq->length);
+
+	return 0;
+}
+
+static int
+wl_iw_set_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+	pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+	pm = htod32(pm);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+		return error;
+
+	pm = dtoh32(pm);
+	vwrq->disabled = pm ? 0 : 1;
+	vwrq->flags = IW_POWER_ALL_R;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+#if defined(BCMWAPI_WPI)
+	uchar buf[WLC_IOCTL_SMLEN] = {0};
+	uchar *p = buf;
+	int wapi_ie_size;
+
+	WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+	if (extra[0] == DOT11_MNG_WAPI_ID)
+	{
+		wapi_ie_size = iwp->length;
+		memcpy(p, extra, iwp->length);
+		dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
+	}
+	else
+#endif
+		dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+	return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+	iwp->length = 64;
+	dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+	return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error;
+	struct iw_encode_ext *iwe;
+
+	WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+	memset(&key, 0, sizeof(key));
+	iwe = (struct iw_encode_ext *)extra;
+
+	/* disable encryption completely  */
+	if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+	}
+
+	/* get the key index */
+	key.index = 0;
+	if (dwrq->flags & IW_ENCODE_INDEX)
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	key.len = iwe->key_len;
+
+	/* Instead of bcast for ea address for default wep keys, driver needs it to be Null */
+	if (!ETHER_ISMULTI(iwe->addr.sa_data))
+		bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+	/* check for key index change */
+	if (key.len == 0) {
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+			/* change the key index .... */
+			key.index = htod32(key.index);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+				&key.index, sizeof(key.index));
+			if (error)
+				return error;
+		}
+		/* key delete */
+		else {
+			swap_key_from_BE(&key);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+			if (error)
+				return error;
+		}
+	}
+	/* This case is used to allow an external 802.1x supplicant
+	 * to pass the PMK to the in-driver supplicant for use in
+	 * the 4-way handshake.
+	 */
+	else if (iwe->alg == IW_ENCODE_ALG_PMK) {
+		int j;
+		wsec_pmk_t pmk;
+		char keystring[WSEC_MAX_PSK_LEN + 1];
+		char* charptr = keystring;
+		uint len;
+
+		/* copy the raw hex key to the appropriate format */
+		for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
+			(void)snprintf(charptr, 3, "%02x", iwe->key[j]);
+			charptr += 2;
+		}
+		len = strlen(keystring);
+		pmk.key_len = htod16(len);
+		bcopy(keystring, pmk.key, len);
+		pmk.flags = htod16(WSEC_PASSPHRASE);
+
+		WL_WSEC(("%s: set key %s\n", __FUNCTION__, keystring));
+		error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+		if (error) {
+			WL_ERROR(("%s: WLC_SET_WSEC_PMK error %d\n", __FUNCTION__, error));
+			return error;
+		}
+	}
+
+	else {
+		if (iwe->key_len > sizeof(key.data))
+			return -EINVAL;
+
+		WL_WSEC(("Setting the key index %d\n", key.index));
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("key is a Primary Key\n"));
+			key.flags = WL_PRIMARY_KEY;
+		}
+
+		bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+		if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+			uint8 keybuf[8];
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+
+		/* rx iv */
+		if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+			uchar *ivptr;
+			ivptr = (uchar *)iwe->rx_seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = TRUE;
+		}
+
+		switch (iwe->alg) {
+			case IW_ENCODE_ALG_NONE:
+				key.algo = CRYPTO_ALGO_OFF;
+				break;
+			case IW_ENCODE_ALG_WEP:
+				if (iwe->key_len == WEP1_KEY_SIZE)
+					key.algo = CRYPTO_ALGO_WEP1;
+				else
+					key.algo = CRYPTO_ALGO_WEP128;
+				break;
+			case IW_ENCODE_ALG_TKIP:
+				key.algo = CRYPTO_ALGO_TKIP;
+				break;
+			case IW_ENCODE_ALG_CCMP:
+				key.algo = CRYPTO_ALGO_AES_CCM;
+				break;
+#ifdef BCMWAPI_WPI
+			case IW_ENCODE_ALG_SM4:
+				key.algo = CRYPTO_ALGO_SMS4;
+				if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+					key.flags &= ~WL_PRIMARY_KEY;
+				}
+				break;
+#endif
+			default:
+				break;
+		}
+		swap_key_from_BE(&key);
+
+		dhd_wait_pend8021x(dev);
+
+		error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		if (error)
+			return error;
+		wl_iw_update_connect_status(dev, WL_EXT_STATUS_ADD_KEY);
+	}
+	return 0;
+}
+
+/* wpa2 pmk list */
+static int
+wl_iw_set_pmksa(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	struct pmk_list *pmk_list = NULL;
+	struct iw_pmksa *iwpmksa;
+	uint i;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	pmkid_t *pmkid_array = NULL;
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+#ifdef WL_ESCAN
+	wl_escan_info_t *escan;
+	if (dhd && dhd->escan) {
+		escan = (wl_escan_info_t *)dhd->escan;
+		pmk_list = &escan->pmk_list;
+	}
+#else
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan) {
+		iscan = (iscan_info_t *)dhd->iscan;
+		pmk_list = &iscan->pmk_list;
+	}
+#endif
+
+	WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name));
+	if (pmk_list)
+		pmkid_array = pmk_list->pmkids.pmkid;
+	iwpmksa = (struct iw_pmksa *)extra;
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+	if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+		WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+		bzero((char *)pmk_list, sizeof(struct pmk_list));
+	}
+	if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+		pmkid_list_t pmkid, *pmkidptr;
+		pmkidptr = &pmkid;
+		bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+		bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+		{
+			uint j;
+			WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+			WL_TRACE(("\n"));
+		}
+		for (i = 0; i < pmk_list->pmkids.npmkid; i++)
+			if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
+				ETHER_ADDR_LEN))
+				break;
+		for (; i < pmk_list->pmkids.npmkid; i++) {
+			bcopy(&pmkid_array[i+1].BSSID,
+				&pmkid_array[i].BSSID,
+				ETHER_ADDR_LEN);
+			bcopy(&pmkid_array[i+1].PMKID,
+				&pmkid_array[i].PMKID,
+				WPA2_PMKID_LEN);
+		}
+		pmk_list->pmkids.npmkid--;
+	}
+	if (iwpmksa->cmd == IW_PMKSA_ADD) {
+		bcopy(&iwpmksa->bssid.sa_data[0],
+			&pmkid_array[pmk_list->pmkids.npmkid].BSSID,
+			ETHER_ADDR_LEN);
+		bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmk_list->pmkids.npmkid].PMKID,
+			WPA2_PMKID_LEN);
+		{
+			uint j;
+			uint k;
+			k = pmk_list->pmkids.npmkid;
+			BCM_REFERENCE(k);
+			WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkid_array[k].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_TRACE(("%02x ", pmkid_array[k].PMKID[j]));
+			WL_TRACE(("\n"));
+		}
+		pmk_list->pmkids.npmkid++;
+	}
+	WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmk_list->pmkids.npmkid));
+	for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+		uint j;
+		WL_TRACE(("PMKID[%d]: %s = ", i,
+			bcm_ether_ntoa(&pmkid_array[i].BSSID,
+			eabuf)));
+		for (j = 0; j < WPA2_PMKID_LEN; j++)
+			WL_TRACE(("%02x ", pmkid_array[i].PMKID[j]));
+		printf("\n");
+	}
+	WL_TRACE(("\n"));
+	dev_wlc_bufvar_set(dev, "pmkid_info", (char *)pmk_list, sizeof(struct pmk_list));
+	return 0;
+}
+
+static int
+wl_iw_get_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+	return 0;
+}
+
+static int
+wl_iw_set_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error = 0;
+	int paramid;
+	int paramval;
+	uint32 cipher_combined;
+	int val = 0;
+	wl_iw_t *iw = IW_DEV_IF(dev);
+
+	WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+	paramval = vwrq->value;
+
+	WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+		dev->name, paramid, paramval));
+
+	switch (paramid) {
+
+	case IW_AUTH_WPA_VERSION:
+		/* supported wpa version disabled or wpa or wpa2 */
+		if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
+			val = WPA_AUTH_DISABLED;
+		else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
+			val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+		else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
+			val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+#ifdef BCMWAPI_WPI
+		else if (paramval & IW_AUTH_WAPI_VERSION_1)
+			val = WAPI_AUTH_UNSPECIFIED;
+#endif
+		WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP: {
+		int fbt_cap = 0;
+
+		if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
+			iw->pwsec = paramval;
+		}
+		else {
+			iw->gwsec = paramval;
+		}
+
+		if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) {
+			WL_ERROR(("%s: wsec error %d\n", __FUNCTION__, error));
+			return error;
+		}
+		WL_WSEC(("%s: get wsec=0x%x\n", __FUNCTION__, val));
+
+		cipher_combined = iw->gwsec | iw->pwsec;
+		val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
+		if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+			val |= WEP_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_TKIP)
+			val |= TKIP_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_CCMP)
+			val |= AES_ENABLED;
+#ifdef BCMWAPI_WPI
+		val &= ~SMS4_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_SMS4)
+			val |= SMS4_ENABLED;
+#endif
+
+		if (iw->privacy_invoked && !val) {
+			WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+			         "we're a WPS enrollee\n", dev->name, __FUNCTION__));
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+				WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else if (val) {
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		}
+
+		WL_WSEC(("%s: set wsec=0x%x\n", __FUNCTION__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+			WL_ERROR(("%s: wsec error %d\n", __FUNCTION__, error));
+			return error;
+		}
+
+		/* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
+		 * handshake.
+		 */
+		if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+			WL_WSEC(("%s: get fbt_cap=0x%x\n", __FUNCTION__, fbt_cap));
+			if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+				if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) {
+					if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1))) {
+						WL_ERROR(("%s: sup_wpa 1 error %d\n", __FUNCTION__, error));
+						return error;
+					}
+				}
+				else if (val == 0) {
+					if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0))) {
+						WL_ERROR(("%s: sup_wpa 0 error %d\n", __FUNCTION__, error));
+						return error;
+					}
+				}
+			}
+		}
+		break;
+	}
+
+	case IW_AUTH_KEY_MGMT:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) {
+			WL_ERROR(("%s: wpa_auth error %d\n", __FUNCTION__, error));
+			return error;
+		}
+		WL_WSEC(("%s: get wpa_auth to %d\n", __FUNCTION__, val));
+
+		if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+				val = WPA_AUTH_PSK;
+			else
+				val = WPA_AUTH_UNSPECIFIED;
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+				val |= WPA2_AUTH_FT;
+		}
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+				val = WPA2_AUTH_PSK;
+			else
+				val = WPA2_AUTH_UNSPECIFIED;
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+				val |= WPA2_AUTH_FT;
+		}
+#ifdef BCMWAPI_WPI
+		if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
+			val = WAPI_AUTH_UNSPECIFIED;
+#endif
+		WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		/* open shared */
+		WL_ERROR(("Setting the D11auth %d\n", paramval));
+		if (paramval & IW_AUTH_ALG_OPEN_SYSTEM)
+			val = 0;
+		else if (paramval & IW_AUTH_ALG_SHARED_KEY)
+			val = 1;
+		else
+			error = 1;
+		if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		if (paramval == 0) {
+			val = 0;
+			WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+			error = dev_wlc_intvar_set(dev, "wpa_auth", val);
+			return error;
+		}
+		else {
+			/* If WPA is enabled, wpa_auth is set elsewhere */
+		}
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+#if WIRELESS_EXT > 17
+
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		/* driver control or user space app control */
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED: {
+		int wsec;
+
+		if (paramval == 0) {
+			iw->privacy_invoked = FALSE;
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else {
+			iw->privacy_invoked = TRUE;
+			if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+				return error;
+
+			if (!WSEC_ENABLED(wsec)) {
+				/* if privacy is true, but wsec is false, we are a WPS enrollee */
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+					WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			} else {
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+					WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			}
+		}
+		break;
+	}
+
+
+#endif /* WIRELESS_EXT > 17 */
+
+#ifdef BCMWAPI_WPI
+
+	case IW_AUTH_WAPI_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+			return error;
+		if (paramval) {
+			val |= SMS4_ENABLED;
+			if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+				WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n",
+					__FUNCTION__, val, error));
+				return error;
+			}
+			if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) {
+				WL_ERROR(("%s: setting wpa_auth(%d) returned %d\n",
+					__FUNCTION__, WAPI_AUTH_UNSPECIFIED,
+					error));
+				return error;
+			}
+		}
+
+		break;
+
+#endif /* BCMWAPI_WPI */
+
+	default:
+		break;
+	}
+	return 0;
+}
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+
+static int
+wl_iw_get_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error;
+	int paramid;
+	int paramval = 0;
+	int val;
+	wl_iw_t *iw = IW_DEV_IF(dev);
+
+	WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+
+	switch (paramid) {
+	case IW_AUTH_WPA_VERSION:
+		/* supported wpa version disabled or wpa or wpa2 */
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
+			paramval = IW_AUTH_WPA_VERSION_DISABLED;
+		else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA;
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA2;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+		paramval = iw->pwsec;
+		break;
+
+	case IW_AUTH_CIPHER_GROUP:
+		paramval = iw->gwsec;
+		break;
+
+	case IW_AUTH_KEY_MGMT:
+		/* psk, 1x */
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (VAL_PSK(val))
+			paramval = IW_AUTH_KEY_MGMT_PSK;
+		else
+			paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		/* open, shared, leap */
+		if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+			return error;
+		if (!val)
+			paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+		else
+			paramval = IW_AUTH_ALG_SHARED_KEY;
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val)
+			paramval = TRUE;
+		else
+			paramval = FALSE;
+		break;
+
+#if WIRELESS_EXT > 17
+
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		/* driver control or user space app control */
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED:
+		paramval = iw->privacy_invoked;
+		break;
+
+#endif /* WIRELESS_EXT > 17 */
+	}
+	vwrq->value = paramval;
+	return 0;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static const iw_handler wl_iw_handler[] =
+{
+	(iw_handler) wl_iw_config_commit,	/* SIOCSIWCOMMIT */
+	(iw_handler) wl_iw_get_name,		/* SIOCGIWNAME */
+	(iw_handler) NULL,			/* SIOCSIWNWID */
+	(iw_handler) NULL,			/* SIOCGIWNWID */
+	(iw_handler) wl_iw_set_freq,		/* SIOCSIWFREQ */
+	(iw_handler) wl_iw_get_freq,		/* SIOCGIWFREQ */
+	(iw_handler) wl_iw_set_mode,		/* SIOCSIWMODE */
+	(iw_handler) wl_iw_get_mode,		/* SIOCGIWMODE */
+	(iw_handler) NULL,			/* SIOCSIWSENS */
+	(iw_handler) NULL,			/* SIOCGIWSENS */
+	(iw_handler) NULL,			/* SIOCSIWRANGE */
+	(iw_handler) wl_iw_get_range,		/* SIOCGIWRANGE */
+	(iw_handler) NULL,			/* SIOCSIWPRIV */
+	(iw_handler) NULL,			/* SIOCGIWPRIV */
+	(iw_handler) NULL,			/* SIOCSIWSTATS */
+	(iw_handler) NULL,			/* SIOCGIWSTATS */
+	(iw_handler) wl_iw_set_spy,		/* SIOCSIWSPY */
+	(iw_handler) wl_iw_get_spy,		/* SIOCGIWSPY */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_wap,		/* SIOCSIWAP */
+	(iw_handler) wl_iw_get_wap,		/* SIOCGIWAP */
+#if WIRELESS_EXT > 17
+	(iw_handler) wl_iw_mlme,		/* SIOCSIWMLME */
+#else
+	(iw_handler) NULL,			/* -- hole -- */
+#endif
+#ifdef WL_ESCAN
+	(iw_handler) NULL,			/* SIOCGIWAPLIST */
+#else
+	(iw_handler) wl_iw_iscan_get_aplist,	/* SIOCGIWAPLIST */
+#endif
+#if WIRELESS_EXT > 13
+#ifdef WL_ESCAN
+	(iw_handler) wl_escan_set_scan,	/* SIOCSIWSCAN */
+	(iw_handler) wl_escan_get_scan,	/* SIOCGIWSCAN */
+#else
+	(iw_handler) wl_iw_iscan_set_scan,	/* SIOCSIWSCAN */
+	(iw_handler) wl_iw_iscan_get_scan,	/* SIOCGIWSCAN */
+#endif
+#else	/* WIRELESS_EXT > 13 */
+	(iw_handler) NULL,			/* SIOCSIWSCAN */
+	(iw_handler) NULL,			/* SIOCGIWSCAN */
+#endif	/* WIRELESS_EXT > 13 */
+	(iw_handler) wl_iw_set_essid,		/* SIOCSIWESSID */
+	(iw_handler) wl_iw_get_essid,		/* SIOCGIWESSID */
+	(iw_handler) wl_iw_set_nick,		/* SIOCSIWNICKN */
+	(iw_handler) wl_iw_get_nick,		/* SIOCGIWNICKN */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_rate,		/* SIOCSIWRATE */
+	(iw_handler) wl_iw_get_rate,		/* SIOCGIWRATE */
+	(iw_handler) wl_iw_set_rts,		/* SIOCSIWRTS */
+	(iw_handler) wl_iw_get_rts,		/* SIOCGIWRTS */
+	(iw_handler) wl_iw_set_frag,		/* SIOCSIWFRAG */
+	(iw_handler) wl_iw_get_frag,		/* SIOCGIWFRAG */
+	(iw_handler) wl_iw_set_txpow,		/* SIOCSIWTXPOW */
+	(iw_handler) wl_iw_get_txpow,		/* SIOCGIWTXPOW */
+#if WIRELESS_EXT > 10
+	(iw_handler) wl_iw_set_retry,		/* SIOCSIWRETRY */
+	(iw_handler) wl_iw_get_retry,		/* SIOCGIWRETRY */
+#endif /* WIRELESS_EXT > 10 */
+	(iw_handler) wl_iw_set_encode,		/* SIOCSIWENCODE */
+	(iw_handler) wl_iw_get_encode,		/* SIOCGIWENCODE */
+	(iw_handler) wl_iw_set_power,		/* SIOCSIWPOWER */
+	(iw_handler) wl_iw_get_power,		/* SIOCGIWPOWER */
+#if WIRELESS_EXT > 17
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_wpaie,		/* SIOCSIWGENIE */
+	(iw_handler) wl_iw_get_wpaie,		/* SIOCGIWGENIE */
+	(iw_handler) wl_iw_set_wpaauth,		/* SIOCSIWAUTH */
+	(iw_handler) wl_iw_get_wpaauth,		/* SIOCGIWAUTH */
+	(iw_handler) wl_iw_set_encodeext,	/* SIOCSIWENCODEEXT */
+	(iw_handler) wl_iw_get_encodeext,	/* SIOCGIWENCODEEXT */
+	(iw_handler) wl_iw_set_pmksa,		/* SIOCSIWPMKSA */
+#endif /* WIRELESS_EXT > 17 */
+};
+
+#if WIRELESS_EXT > 12
+enum {
+	WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV,
+	WL_IW_SET_VLANMODE,
+	WL_IW_SET_PM,
+	WL_IW_SET_LAST
+};
+
+static iw_handler wl_iw_priv_handler[] = {
+	wl_iw_set_leddc,
+	wl_iw_set_vlanmode,
+	wl_iw_set_pm,
+	NULL
+};
+
+static struct iw_priv_args wl_iw_priv_args[] = {
+	{
+		WL_IW_SET_LEDDC,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_leddc"
+	},
+	{
+		WL_IW_SET_VLANMODE,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_vlanmode"
+	},
+	{
+		WL_IW_SET_PM,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_pm"
+	},
+	{ 0, 0, 0, { 0 } }
+};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+	.num_standard = ARRAYSIZE(wl_iw_handler),
+	.num_private = ARRAY_SIZE(wl_iw_priv_handler),
+	.num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+	.standard = (const iw_handler *) wl_iw_handler,
+	.private = wl_iw_priv_handler,
+	.private_args = wl_iw_priv_args,
+#if WIRELESS_EXT >= 19
+	get_wireless_stats: dhd_get_wireless_stats,
+#endif /* WIRELESS_EXT >= 19 */
+	};
+#endif /* WIRELESS_EXT > 12 */
+
+int
+wl_iw_ioctl(
+	struct net_device *dev,
+	struct ifreq *rq,
+	int cmd
+)
+{
+	struct iwreq *wrq = (struct iwreq *) rq;
+	struct iw_request_info info;
+	iw_handler handler;
+	char *extra = NULL;
+	size_t token_size = 1;
+	int max_tokens = 0, ret = 0;
+#ifndef WL_ESCAN
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan)
+		iscan = (iscan_info_t *)dhd->iscan;
+#endif
+
+	if (cmd < SIOCIWFIRST ||
+		IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+		!(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)]))
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+
+	case SIOCSIWESSID:
+	case SIOCGIWESSID:
+	case SIOCSIWNICKN:
+	case SIOCGIWNICKN:
+		max_tokens = IW_ESSID_MAX_SIZE + 1;
+		break;
+
+	case SIOCSIWENCODE:
+	case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+	case SIOCSIWENCODEEXT:
+	case SIOCGIWENCODEEXT:
+#endif
+		max_tokens = IW_ENCODING_TOKEN_MAX;
+		break;
+
+	case SIOCGIWRANGE:
+		max_tokens = sizeof(struct iw_range);
+		break;
+
+	case SIOCGIWAPLIST:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_AP;
+		break;
+
+#if WIRELESS_EXT > 13
+	case SIOCGIWSCAN:
+#ifndef WL_ESCAN
+	if (iscan)
+		max_tokens = wrq->u.data.length;
+	else
+#endif
+		max_tokens = IW_SCAN_MAX_DATA;
+		break;
+#endif /* WIRELESS_EXT > 13 */
+
+	case SIOCSIWSPY:
+		token_size = sizeof(struct sockaddr);
+		max_tokens = IW_MAX_SPY;
+		break;
+
+	case SIOCGIWSPY:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_SPY;
+		break;
+	default:
+		break;
+	}
+
+	if (max_tokens && wrq->u.data.pointer) {
+		if (wrq->u.data.length > max_tokens)
+			return -E2BIG;
+
+		if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+	}
+
+	info.cmd = cmd;
+	info.flags = 0;
+
+	ret = handler(dev, &info, &wrq->u, extra);
+
+	if (extra) {
+		if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		kfree(extra);
+	}
+
+	return ret;
+}
+
+/* Convert a connection status event into a connection status string.
+ * Returns TRUE if a matching connection status string was found.
+ */
+bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+	char* stringBuf, uint buflen)
+{
+	typedef struct conn_fail_event_map_t {
+		uint32 inEvent;			/* input: event type to match */
+		uint32 inStatus;		/* input: event status code to match */
+		uint32 inReason;		/* input: event reason code to match */
+		const char* outName;	/* output: failure type */
+		const char* outCause;	/* output: failure cause */
+	} conn_fail_event_map_t;
+
+	/* Map of WLC_E events to connection failure strings */
+#	define WL_IW_DONT_CARE	9999
+	const conn_fail_event_map_t event_map [] = {
+		/* inEvent           inStatus                inReason         */
+		/* outName outCause                                           */
+		{WLC_E_SET_SSID,     WLC_E_STATUS_SUCCESS,   WL_IW_DONT_CARE,
+		"Conn", "Success"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+		"Conn", "NoNetworks"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ConfigMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_PRUNE_ENCR_MISMATCH,
+		"Conn", "EncrypMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_RSN_MISMATCH,
+		"Conn", "RsnMismatch"},
+		{WLC_E_AUTH,         WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "AuthTimeout"},
+		{WLC_E_AUTH,         WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "AuthFail"},
+		{WLC_E_AUTH,         WLC_E_STATUS_NO_ACK,    WL_IW_DONT_CARE,
+		"Conn", "AuthNoAck"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ReassocFail"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "ReassocTimeout"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_ABORT,     WL_IW_DONT_CARE,
+		"Conn", "ReassocAbort"},
+		{WLC_E_PSK_SUP,      WLC_SUP_KEYED,          WL_IW_DONT_CARE,
+		"Sup", "ConnSuccess"},
+		{WLC_E_PSK_SUP,      WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Sup", "WpaHandshakeFail"},
+		{WLC_E_DEAUTH_IND,   WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Deauth"},
+		{WLC_E_DISASSOC_IND, WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "DisassocInd"},
+		{WLC_E_DISASSOC,     WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Disassoc"}
+	};
+
+	const char* name = "";
+	const char* cause = NULL;
+	int i;
+
+	/* Search the event map table for a matching event */
+	for (i = 0;  i < sizeof(event_map)/sizeof(event_map[0]);  i++) {
+		const conn_fail_event_map_t* row = &event_map[i];
+		if (row->inEvent == event_type &&
+		    (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+		    (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+			name = row->outName;
+			cause = row->outCause;
+			break;
+		}
+	}
+
+	/* If found, generate a connection failure string and return TRUE */
+	if (cause) {
+		memset(stringBuf, 0, buflen);
+		(void)snprintf(stringBuf, buflen, "%s %s %02d %02d", name, cause, status, reason);
+		WL_TRACE(("Connection status: %s\n", stringBuf));
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
+#if (WIRELESS_EXT > 14)
+/* Check if we have received an event that indicates connection failure
+ * If so, generate a connection failure report string.
+ * The caller supplies a buffer to hold the generated string.
+ */
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+	uint32 event = ntoh32(e->event_type);
+	uint32 status =  ntoh32(e->status);
+	uint32 reason =  ntoh32(e->reason);
+
+	if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+		return TRUE;
+	} else
+	{
+		return FALSE;
+	}
+}
+#endif /* WIRELESS_EXT > 14 */
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */
+#endif /* IW_CUSTOM_MAX */
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd = 0;
+	uint32 event_type = ntoh32(e->event_type);
+	uint16 flags =  ntoh16(e->flags);
+	uint32 datalen = ntoh32(e->datalen);
+	uint32 status =  ntoh32(e->status);
+	uint32 reason =  ntoh32(e->reason);
+	struct dhd_pub *dhd = dhd_get_pub(dev);
+	wl_conn_info_t *conn_info = NULL;
+#ifdef WL_ESCAN
+	wl_escan_info_t *escan;
+	if (dhd && dhd->escan) {
+		escan = (wl_escan_info_t *)dhd->escan;
+		conn_info = &escan->conn_info;
+	}
+#else
+	iscan_info_t *iscan;
+	if (dhd && dhd->iscan) {
+		iscan = (iscan_info_t *)dhd->iscan;
+		conn_info = &iscan->conn_info;
+	}
+#endif
+
+	memset(&wrqu, 0, sizeof(wrqu));
+	memset(extra, 0, sizeof(extra));
+
+	memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+	wrqu.addr.sa_family = ARPHRD_ETHER;
+
+	switch (event_type) {
+	case WLC_E_TXFAIL:
+		cmd = IWEVTXDROP;
+		break;
+#if WIRELESS_EXT > 14
+	case WLC_E_JOIN:
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+		cmd = IWEVREGISTERED;
+		break;
+	case WLC_E_DEAUTH:
+	case WLC_E_DISASSOC:
+		wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTED);
+		printf("%s: disconnected with "MACSTR", event %d, reason %d\n",
+			__FUNCTION__, MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
+		break;
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		cmd = SIOCGIWAP;
+		wrqu.data.length = strlen(extra);
+		bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+		bzero(&extra, ETHER_ADDR_LEN);
+		wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTED);
+		printf("%s: disconnected with "MACSTR", event %d, reason %d\n",
+			__FUNCTION__, MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
+		break;
+
+	case WLC_E_LINK:
+		cmd = SIOCGIWAP;
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+			printf("%s: Link Down with "MACSTR", reason=%d\n", __FUNCTION__,
+				MAC2STR((u8 *)wrqu.addr.sa_data), reason);
+			bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+			bzero(&extra, ETHER_ADDR_LEN);
+			wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTED);
+		} else {
+			printf("%s: Link UP with "MACSTR"\n", __FUNCTION__,
+				MAC2STR((u8 *)wrqu.addr.sa_data));
+		}
+		break;
+	case WLC_E_ACTION_FRAME:
+		cmd = IWEVCUSTOM;
+		if (datalen + 1 <= sizeof(extra)) {
+			wrqu.data.length = datalen + 1;
+			extra[0] = WLC_E_ACTION_FRAME;
+			memcpy(&extra[1], data, datalen);
+			WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+		}
+		break;
+
+	case WLC_E_ACTION_FRAME_COMPLETE:
+		cmd = IWEVCUSTOM;
+		if (sizeof(status) + 1 <= sizeof(extra)) {
+			wrqu.data.length = sizeof(status) + 1;
+			extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+			memcpy(&extra[1], &status, sizeof(status));
+			WL_TRACE(("wl_iw_event status %d  \n", status));
+		}
+		break;
+#endif /* WIRELESS_EXT > 14 */
+#if WIRELESS_EXT > 17
+	case WLC_E_MIC_ERROR: {
+		struct	iw_michaelmicfailure  *micerrevt = (struct  iw_michaelmicfailure  *)&extra;
+		cmd = IWEVMICHAELMICFAILURE;
+		wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+		if (flags & WLC_EVENT_MSG_GROUP)
+			micerrevt->flags |= IW_MICFAILURE_GROUP;
+		else
+			micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+		memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+		break;
+	}
+
+	case WLC_E_ASSOC_REQ_IE:
+		cmd = IWEVASSOCREQIE;
+		wrqu.data.length = datalen;
+		if (datalen < sizeof(extra))
+			memcpy(extra, data, datalen);
+		break;
+
+	case WLC_E_ASSOC_RESP_IE:
+		cmd = IWEVASSOCRESPIE;
+		wrqu.data.length = datalen;
+		if (datalen < sizeof(extra))
+			memcpy(extra, data, datalen);
+		break;
+
+	case WLC_E_PMKID_CACHE: {
+		struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+		pmkid_cand_list_t *pmkcandlist;
+		pmkid_cand_t	*pmkidcand;
+		int count;
+
+		if (data == NULL)
+			break;
+
+		cmd = IWEVPMKIDCAND;
+		pmkcandlist = data;
+		count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+		wrqu.data.length = sizeof(struct iw_pmkid_cand);
+		pmkidcand = pmkcandlist->pmkid_cand;
+		while (count) {
+			bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+			if (pmkidcand->preauth)
+				iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+			bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+			      ETHER_ADDR_LEN);
+			wireless_send_event(dev, cmd, &wrqu, extra);
+			pmkidcand++;
+			count--;
+		}
+		break;
+	}
+#endif /* WIRELESS_EXT > 17 */
+
+#ifdef WL_ESCAN
+	case WLC_E_ESCAN_RESULT:
+		WL_TRACE(("event WLC_E_ESCAN_RESULT\n"));
+		wl_escan_event(dev, e, data);
+		break;
+#else
+
+	case WLC_E_SCAN_COMPLETE:
+#if WIRELESS_EXT > 14
+		cmd = SIOCGIWSCAN;
+#endif
+		WL_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
+		// terence 20150224: fix "wlan0: (WE) : Wireless Event too big (65306)"
+		memset(&wrqu, 0, sizeof(wrqu));
+		if ((iscan) && (iscan->sysioc_pid >= 0) &&
+			(iscan->iscan_state != ISCAN_STATE_IDLE))
+			up(&iscan->sysioc_sem);
+		break;
+#endif
+
+	default:
+		/* Cannot translate event */
+		break;
+	}
+
+	if (cmd) {
+#ifndef WL_ESCAN
+		if (cmd == SIOCGIWSCAN) {
+			if ((!iscan) || (iscan->sysioc_pid < 0)) {
+				wireless_send_event(dev, cmd, &wrqu, NULL);
+			};
+		} else
+#endif
+			wireless_send_event(dev, cmd, &wrqu, extra);
+	}
+
+#if WIRELESS_EXT > 14
+	/* Look for WLC events that indicate a connection failure.
+	 * If found, generate an IWEVCUSTOM event.
+	 */
+	memset(extra, 0, sizeof(extra));
+	if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+		cmd = IWEVCUSTOM;
+		wrqu.data.length = strlen(extra);
+		wireless_send_event(dev, cmd, &wrqu, extra);
+	}
+#endif /* WIRELESS_EXT > 14 */
+
+#endif /* WIRELESS_EXT > 13 */
+}
+
+#ifdef WL_NAN
+static int wl_iw_get_wireless_stats_cbfn(void *ctx, uint8 *data, uint16 type, uint16 len)
+{
+	struct iw_statistics *wstats = ctx;
+	int res = BCME_OK;
+
+	switch (type) {
+		case WL_CNT_XTLV_WLC: {
+			wl_cnt_wlc_t *cnt = (wl_cnt_wlc_t *)data;
+			if (len > sizeof(wl_cnt_wlc_t)) {
+				printf("counter structure length invalid! %d > %d\n",
+					len, (int)sizeof(wl_cnt_wlc_t));
+			}
+			wstats->discard.nwid = 0;
+			wstats->discard.code = dtoh32(cnt->rxundec);
+			wstats->discard.fragment = dtoh32(cnt->rxfragerr);
+			wstats->discard.retries = dtoh32(cnt->txfail);
+			wstats->discard.misc = dtoh32(cnt->rxrunt) + dtoh32(cnt->rxgiant);
+			wstats->miss.beacon = 0;
+			WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+				dtoh32(cnt->txframe), dtoh32(cnt->txbyte)));
+			WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n",
+				dtoh32(cnt->rxundec)));
+			WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n",
+				dtoh32(cnt->txfail)));
+			WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n",
+				dtoh32(cnt->rxfragerr)));
+			WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n",
+				dtoh32(cnt->rxrunt)));
+			WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n",
+				dtoh32(cnt->rxgiant)));
+			break;
+		}
+		case WL_CNT_XTLV_CNTV_LE10_UCODE:
+		case WL_CNT_XTLV_LT40_UCODE_V1:
+		case WL_CNT_XTLV_GE40_UCODE_V1:
+		{
+			/* Offsets of rxfrmtoolong and rxbadplcp are the same in
+			 * wl_cnt_v_le10_mcst_t, wl_cnt_lt40mcst_v1_t, and wl_cnt_ge40mcst_v1_t.
+			 * So we can just cast to wl_cnt_v_le10_mcst_t here.
+			 */
+			wl_cnt_v_le10_mcst_t *cnt = (wl_cnt_v_le10_mcst_t *)data;
+			if (len != WL_CNT_MCST_STRUCT_SZ) {
+				printf("counter structure length mismatch! %d != %d\n",
+					len, WL_CNT_MCST_STRUCT_SZ);
+			}
+			WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n",
+				dtoh32(cnt->rxfrmtoolong)));
+			WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n",
+				dtoh32(cnt->rxbadplcp)));
+			BCM_REFERENCE(cnt);
+			break;
+		}
+		default:
+			WL_ERROR(("%s %d: Unsupported type %d\n", __FUNCTION__, __LINE__, type));
+			break;
+	}
+	return res;
+}
+#endif
+
+int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+	int res = 0;
+	int phy_noise;
+	int rssi;
+	scb_val_t scb_val;
+#if WIRELESS_EXT > 11
+	char *cntbuf = NULL;
+	wl_cnt_info_t *cntinfo;
+	uint16 ver;
+	uint32 corerev = 0;
+#endif /* WIRELESS_EXT > 11 */
+
+	phy_noise = 0;
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) {
+		WL_ERROR(("%s: WLC_GET_PHY_NOISE error=%d\n", __FUNCTION__, res));
+		goto done;
+	}
+
+	phy_noise = dtoh32(phy_noise);
+	WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise));
+
+	memset(&scb_val, 0, sizeof(scb_val));
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) {
+		WL_ERROR(("%s: WLC_GET_RSSI error=%d\n", __FUNCTION__, res));
+		goto done;
+	}
+
+	rssi = dtoh32(scb_val.val);
+	rssi = MIN(rssi, RSSI_MAXVAL);
+	WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi));
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		wstats->qual.qual = 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		wstats->qual.qual = 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		wstats->qual.qual = 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		wstats->qual.qual = 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		wstats->qual.qual = 4;
+	else
+		wstats->qual.qual = 5;
+
+	/* Wraps to 0 if RSSI is 0 */
+	wstats->qual.level = 0x100 + rssi;
+	wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+	wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+	wstats->qual.updated |= 7;
+#endif /* WIRELESS_EXT > 18 */
+
+#if WIRELESS_EXT > 11
+	WL_TRACE(("wl_iw_get_wireless_stats counters\n *****"));
+
+	cntbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+	if (!cntbuf) {
+		res = BCME_NOMEM;
+		goto done;
+	}
+
+	memset(cntbuf, 0, MAX_WLIW_IOCTL_LEN);
+	res = dev_wlc_bufvar_get(dev, "counters", cntbuf, MAX_WLIW_IOCTL_LEN);
+	if (res)
+	{
+		WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res));
+		goto done;
+	}
+
+	cntinfo = (wl_cnt_info_t *)cntbuf;
+	cntinfo->version = dtoh16(cntinfo->version);
+	cntinfo->datalen = dtoh16(cntinfo->datalen);
+	ver = cntinfo->version;
+#ifdef WL_NAN
+	CHK_CNTBUF_DATALEN(cntbuf, MAX_WLIW_IOCTL_LEN);
+#endif
+	if (ver > WL_CNT_T_VERSION) {
+		WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+			WL_CNT_T_VERSION, ver));
+		res = BCME_VERSION;
+		goto done;
+	}
+
+	if (ver == WL_CNT_VERSION_11) {
+		wlc_rev_info_t revinfo;
+		memset(&revinfo, 0, sizeof(revinfo));
+		res = dev_wlc_ioctl(dev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+		if (res) {
+			WL_ERROR(("%s: WLC_GET_REVINFO failed %d\n", __FUNCTION__, res));
+			goto done;
+		}
+		corerev = dtoh32(revinfo.corerev);
+	}
+
+#ifdef WL_NAN
+	res = wl_cntbuf_to_xtlv_format(NULL, cntinfo, MAX_WLIW_IOCTL_LEN, corerev);
+	if (res) {
+		WL_ERROR(("%s: wl_cntbuf_to_xtlv_format failed %d\n", __FUNCTION__, res));
+		goto done;
+	}
+
+	if ((res = bcm_unpack_xtlv_buf(wstats, cntinfo->data, cntinfo->datalen,
+		BCM_XTLV_OPTION_ALIGN32, wl_iw_get_wireless_stats_cbfn))) {
+		goto done;
+	}
+#endif
+#endif /* WIRELESS_EXT > 11 */
+
+done:
+#if WIRELESS_EXT > 11
+	if (cntbuf) {
+		kfree(cntbuf);
+	}
+#endif /* WIRELESS_EXT > 11 */
+	return res;
+}
+
+#ifndef WL_ESCAN
+static void
+wl_iw_timerfunc(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	struct timer_list *t
+#else
+	unsigned long data
+#endif
+)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	iscan_info_t *iscan = from_timer(iscan, t, timer);
+#else
+	iscan_info_t *iscan = (iscan_info_t *)data;
+#endif
+	iscan->timer_on = 0;
+	if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+		WL_TRACE(("timer trigger\n"));
+		up(&iscan->sysioc_sem);
+	}
+}
+
+static void
+wl_iw_set_event_mask(struct net_device *dev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/* Room for "event_msgs" + '\0' + bitvec */
+
+	dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+	dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+		iovbuf, sizeof(iovbuf));
+
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+	int err = 0;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+	if (ssid && ssid->SSID_len)
+		memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+	return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+	int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+	wl_iscan_params_t *params;
+	int err = 0;
+
+	if (ssid && ssid->SSID_len) {
+		params_size += sizeof(wlc_ssid_t);
+	}
+	params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		return -ENOMEM;
+	}
+	memset(params, 0, params_size);
+	ASSERT(params_size < WLC_IOCTL_SMLEN);
+
+	err = wl_iw_iscan_prep(&params->params, ssid);
+
+	if (!err) {
+		params->version = htod32(ISCAN_REQ_VERSION);
+		params->action = htod16(action);
+		params->scan_duration = htod16(0);
+
+		/* params_size += OFFSETOF(wl_iscan_params_t, params); */
+		(void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+			iscan->ioctlbuf, WLC_IOCTL_SMLEN);
+	}
+
+	kfree(params);
+	return err;
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+	iscan_buf_t * buf;
+	iscan_buf_t * ptr;
+	wl_iscan_results_t * list_buf;
+	wl_iscan_results_t list;
+	wl_scan_results_t *results;
+	uint32 status;
+
+	/* buffers are allocated on demand */
+	if (iscan->list_cur) {
+		buf = iscan->list_cur;
+		iscan->list_cur = buf->next;
+	}
+	else {
+		buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+		if (!buf)
+			return WL_SCAN_RESULTS_ABORTED;
+		buf->next = NULL;
+		if (!iscan->list_hdr)
+			iscan->list_hdr = buf;
+		else {
+			ptr = iscan->list_hdr;
+			while (ptr->next) {
+				ptr = ptr->next;
+			}
+			ptr->next = buf;
+		}
+	}
+	memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+	list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+	results = &list_buf->results;
+	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+	results->version = 0;
+	results->count = 0;
+
+	memset(&list, 0, sizeof(list));
+	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+	(void) dev_iw_iovar_getbuf(
+		iscan->dev,
+		"iscanresults",
+		&list,
+		WL_ISCAN_RESULTS_FIXED_SIZE,
+		buf->iscan_buf,
+		WLC_IW_ISCAN_MAXLEN);
+	results->buflen = dtoh32(results->buflen);
+	results->version = dtoh32(results->version);
+	results->count = dtoh32(results->count);
+	WL_TRACE(("results->count = %d\n", results->count));
+
+	WL_TRACE(("results->buflen = %d\n", results->buflen));
+	status = dtoh32(list_buf->status);
+	return status;
+}
+
+static void wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+	union iwreq_data wrqu;
+
+	memset(&wrqu, 0, sizeof(wrqu));
+
+	/* wext expects to get no data for SIOCGIWSCAN Event  */
+	wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+	uint32 status;
+	iscan_info_t *iscan = (iscan_info_t *)data;
+
+	printf("%s: thread Enter\n", __FUNCTION__);
+	DAEMONIZE("iscan_sysioc");
+
+	status = WL_SCAN_RESULTS_PARTIAL;
+	while (down_interruptible(&iscan->sysioc_sem) == 0) {
+		if (iscan->timer_on) {
+			del_timer(&iscan->timer);
+			iscan->timer_on = 0;
+		}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_lock();
+#endif
+		status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_unlock();
+#endif
+
+		switch (status) {
+			case WL_SCAN_RESULTS_PARTIAL:
+				WL_TRACE(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_lock();
+#endif
+				/* make sure our buffer size is enough before going next round */
+				wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_unlock();
+#endif
+				/* Reschedule the timer */
+				iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+				add_timer(&iscan->timer);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_SUCCESS:
+				WL_TRACE(("iscanresults complete\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			case WL_SCAN_RESULTS_PENDING:
+				WL_TRACE(("iscanresults pending\n"));
+				/* Reschedule the timer */
+				iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+				add_timer(&iscan->timer);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_ABORTED:
+				WL_TRACE(("iscanresults aborted\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			default:
+				WL_TRACE(("iscanresults returned unknown status %d\n", status));
+				break;
+		 }
+	}
+	printf("%s: was terminated\n", __FUNCTION__);
+	complete_and_exit(&iscan->sysioc_exited, 0);
+}
+
+void wl_iw_down(dhd_pub_t *dhdp)
+{
+	iscan_info_t *iscan = dhdp->iscan;
+
+	if (iscan)
+		wl_ext_add_remove_pm_enable_work(&iscan->conn_info, FALSE);
+}
+
+int
+wl_iw_attach(struct net_device *dev, dhd_pub_t *dhdp)
+{
+	iscan_info_t *iscan = NULL;
+
+	printf("%s: Enter\n", __FUNCTION__);
+
+	if (!dev)
+		return 0;
+
+	iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+	if (!iscan)
+		return -ENOMEM;
+	dhdp->iscan = (void *)iscan;
+	memset(iscan, 0, sizeof(iscan_info_t));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	iscan->kthread = NULL;
+#endif
+	iscan->sysioc_pid = -1;
+	/* we only care about main interface so save a global here */
+	iscan->dev = dev;
+	iscan->conn_info.dev = dev;
+	iscan->conn_info.dhd = dhdp;
+	iscan->iscan_state = ISCAN_STATE_IDLE;
+
+	/* Set up the timer */
+	iscan->timer_ms    = 2000;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+	timer_setup(&iscan->timer, wl_iw_timerfunc, 0);
+#else
+	init_timer(&iscan->timer);
+	iscan->timer.data = (ulong)iscan;
+	iscan->timer.function = wl_iw_timerfunc;
+#endif
+
+	sema_init(&iscan->sysioc_sem, 0);
+	init_completion(&iscan->sysioc_exited);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	iscan->kthread = kthread_run(_iscan_sysioc_thread, iscan, "iscan_sysioc");
+	iscan->sysioc_pid = iscan->kthread->pid;
+#else
+	iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+#endif
+	if (iscan->sysioc_pid < 0)
+		return -ENOMEM;
+	mutex_init(&iscan->conn_info.pm_sync);
+	INIT_DELAYED_WORK(&iscan->conn_info.pm_enable_work, wl_ext_pm_work_handler);
+	return 0;
+}
+
+void wl_iw_detach(dhd_pub_t *dhdp)
+{
+	iscan_buf_t  *buf;
+	iscan_info_t *iscan = dhdp->iscan;
+	if (!iscan)
+		return;
+	if (iscan->sysioc_pid >= 0) {
+		KILL_PROC(iscan->sysioc_pid, SIGTERM);
+		wait_for_completion(&iscan->sysioc_exited);
+	}
+	wl_ext_add_remove_pm_enable_work(&iscan->conn_info, FALSE);
+
+	while (iscan->list_hdr) {
+		buf = iscan->list_hdr->next;
+		kfree(iscan->list_hdr);
+		iscan->list_hdr = buf;
+	}
+	kfree(iscan);
+	dhdp->iscan = NULL;
+}
+#endif /* !WL_ESCAN */
+
+#endif /* USE_IW */
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h
new file mode 100644
index 0000000..b00a489
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.h
@@ -0,0 +1,166 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_iw.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <ethernet.h>
+#include <wlioctl.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+#define GET_SSID			"SSID="
+#define GET_CHANNEL			"CH="
+#define GET_NPROBE 			"NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL  	"ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL  	"PASSIVE="
+#define GET_HOME_DWELL  		"HOME="
+#define GET_SCAN_TYPE			"TYPE="
+
+#define BAND_GET_CMD				"GETBAND"
+#define BAND_SET_CMD				"SETBAND"
+#define DTIM_SKIP_GET_CMD			"DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD			"DTIMSKIPSET"
+#define SETSUSPEND_CMD				"SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD			"PNOSSIDCLR"
+/* Lin - Is the extra space needed? */
+#define PNOSETUP_SET_CMD			"PNOSETUP " /* TLV command has extra end space */
+#define PNOENABLE_SET_CMD			"PNOFORCE"
+#define PNODEBUG_SET_CMD			"PNODEBUG"
+#define TXPOWER_SET_CMD			"TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+/* Structure to keep global parameters */
+typedef struct wl_iw_extra_params {
+	int 	target_channel; /* target channel */
+} wl_iw_extra_params_t;
+
+/* ============================================== */
+/* Defines from wlc_pub.h */
+#define	WL_IW_RSSI_MINVAL		-200	/* Low value, e.g. for forcing roam */
+#define	WL_IW_RSSI_NO_SIGNAL	-91	/* NDIS RSSI link quality cutoffs */
+#define	WL_IW_RSSI_VERY_LOW	-80	/* Very low quality cutoffs */
+#define	WL_IW_RSSI_LOW		-70	/* Low quality cutoffs */
+#define	WL_IW_RSSI_GOOD		-68	/* Good quality cutoffs */
+#define	WL_IW_RSSI_VERY_GOOD	-58	/* Very good quality cutoffs */
+#define	WL_IW_RSSI_EXCELLENT	-57	/* Excellent quality cutoffs */
+#define	WL_IW_RSSI_INVALID	 0	/* invalid RSSI value */
+#define MAX_WX_STRING 80
+#define SSID_FMT_BUF_LEN	((4 * 32) + 1)
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN	(SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI			(SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN	(SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED	(SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR	(SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP				(SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START			(SIOCIWFIRSTPRIV+13)
+
+#define 		G_SCAN_RESULTS 8*1024
+#define 		WE_ADD_EVENT_FIX	0x80
+#define          G_WLAN_SET_ON	0
+#define          G_WLAN_SET_OFF	1
+
+
+typedef struct wl_iw {
+	char nickname[IW_ESSID_MAX_SIZE];
+
+	struct iw_statistics wstats;
+
+	int spy_num;
+	uint32 pwsec;			/* pairwise wsec setting */
+	uint32 gwsec;			/* group wsec setting  */
+	bool privacy_invoked; 		/* IW_AUTH_PRIVACY_INVOKED setting */
+	struct ether_addr spy_addr[IW_MAX_SPY];
+	struct iw_quality spy_qual[IW_MAX_SPY];
+	void  *wlinfo;
+} wl_iw_t;
+
+struct wl_ctrl {
+	struct timer_list *timer;
+	struct net_device *dev;
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+};
+
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+#ifdef WL_ESCAN
+int wl_iw_handle_scanresults_ies(char **event_p, char *end,
+	struct iw_request_info *info, wl_bss_info_t *bi);
+#else
+int wl_iw_attach(struct net_device *dev, dhd_pub_t *dhdp);
+void wl_iw_detach(dhd_pub_t *dhdp);
+void wl_iw_down(dhd_pub_t *dhdp);
+#endif
+
+#define CSCAN_COMMAND				"CSCAN "
+#define CSCAN_TLV_PREFIX 			'S'
+#define CSCAN_TLV_VERSION			1
+#define CSCAN_TLV_SUBVERSION			0
+#define CSCAN_TLV_TYPE_SSID_IE          'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE   'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE     'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE      'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE    'P'
+#define CSCAN_TLV_TYPE_HOME_IE         'H'
+#define CSCAN_TLV_TYPE_STYPE_IE        'T'
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+#endif /* _wl_iw_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
new file mode 100644
index 0000000..75a417b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
@@ -0,0 +1,406 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux monitor network interface
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_linux_mon.c 576195 2015-08-01 18:21:54Z $
+ */
+
+#include <osl.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
+#include <linux/rtnetlink.h>
+#include <net/ieee80211_radiotap.h>
+
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+typedef enum monitor_states
+{
+	MONITOR_STATE_DEINIT = 0x0,
+	MONITOR_STATE_INIT = 0x1,
+	MONITOR_STATE_INTERFACE_ADDED = 0x2,
+	MONITOR_STATE_INTERFACE_DELETED = 0x4
+} monitor_states_t;
+int dhd_add_monitor(const char *name, struct net_device **new_ndev);
+extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+/**
+ * Local declarations and defintions (not exposed)
+ */
+#ifndef DHD_MAX_IFS
+#define DHD_MAX_IFS 16
+#endif
+#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__)
+#define MON_TRACE MON_PRINT
+
+typedef struct monitor_interface {
+	int radiotap_enabled;
+	struct net_device* real_ndev;	/* The real interface that the monitor is on */
+	struct net_device* mon_ndev;
+} monitor_interface;
+
+typedef struct dhd_linux_monitor {
+	void *dhd_pub;
+	monitor_states_t monitor_state;
+	monitor_interface mon_if[DHD_MAX_IFS];
+	struct mutex lock;		/* lock to protect mon_if */
+} dhd_linux_monitor_t;
+
+static dhd_linux_monitor_t g_monitor;
+
+static struct net_device* lookup_real_netdev(const char *name);
+static monitor_interface* ndev_to_monif(struct net_device *ndev);
+static int dhd_mon_if_open(struct net_device *ndev);
+static int dhd_mon_if_stop(struct net_device *ndev);
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev);
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr);
+
+static const struct net_device_ops dhd_mon_if_ops = {
+	.ndo_open		= dhd_mon_if_open,
+	.ndo_stop		= dhd_mon_if_stop,
+	.ndo_start_xmit		= dhd_mon_if_subif_start_xmit,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_mon_if_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_mon_if_set_multicast_list,
+#endif
+	.ndo_set_mac_address 	= dhd_mon_if_change_mac,
+};
+
+/**
+ * Local static function defintions
+ */
+
+/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0"
+ * "p2p-eth0-0" is a match for "mon.p2p-eth0-0")
+ */
+static struct net_device* lookup_real_netdev(const char *name)
+{
+	struct net_device *ndev_found = NULL;
+
+	int i;
+	int len = 0;
+	int last_name_len = 0;
+	struct net_device *ndev;
+
+	/* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0",
+	 * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon
+	 * iface would be mon-p2p0-0.
+	 */
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		ndev = dhd_idx2net(g_monitor.dhd_pub, i);
+
+		/* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it
+		 * it matches, then this netdev is the corresponding real_netdev.
+		 */
+		if (ndev && strstr(ndev->name, "p2p-p2p0")) {
+			len = strlen("p2p");
+		} else {
+		/* if p2p- is not present, then the IFNAMSIZ have reached and name
+		 * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x
+		 */
+			len = 0;
+		}
+		if (ndev && strstr(name, (ndev->name + len))) {
+			if (strlen(ndev->name) > last_name_len) {
+				ndev_found = ndev;
+				last_name_len = strlen(ndev->name);
+			}
+		}
+	}
+
+	return ndev_found;
+}
+
+static monitor_interface* ndev_to_monif(struct net_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		if (g_monitor.mon_if[i].mon_ndev == ndev)
+			return &g_monitor.mon_if[i];
+	}
+
+	return NULL;
+}
+
+static int dhd_mon_if_open(struct net_device *ndev)
+{
+	int ret = 0;
+
+	MON_PRINT("enter\n");
+	return ret;
+}
+
+static int dhd_mon_if_stop(struct net_device *ndev)
+{
+	int ret = 0;
+
+	MON_PRINT("enter\n");
+	return ret;
+}
+
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	int ret = 0;
+	int rtap_len;
+	int qos_len = 0;
+	int dot11_hdr_len = 24;
+	int snap_len = 6;
+	unsigned char *pdata;
+	unsigned short frame_ctl;
+	unsigned char src_mac_addr[6];
+	unsigned char dst_mac_addr[6];
+	struct ieee80211_hdr *dot11_hdr;
+	struct ieee80211_radiotap_header *rtap_hdr;
+	monitor_interface* mon_if;
+
+	MON_PRINT("enter\n");
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+		goto fail;
+	}
+
+	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+		goto fail;
+
+	rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+	if (unlikely(rtap_hdr->it_version))
+		goto fail;
+
+	rtap_len = ieee80211_get_radiotap_len(skb->data);
+	if (unlikely(skb->len < rtap_len))
+		goto fail;
+
+	MON_PRINT("radiotap len (should be 14): %d\n", rtap_len);
+
+	/* Skip the ratio tap header */
+	skb_pull(skb, rtap_len);
+
+	dot11_hdr = (struct ieee80211_hdr *)skb->data;
+	frame_ctl = le16_to_cpu(dot11_hdr->frame_control);
+	/* Check if the QoS bit is set */
+	if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
+		/* Check if this ia a Wireless Distribution System (WDS) frame
+		 * which has 4 MAC addresses
+		 */
+		if (dot11_hdr->frame_control & 0x0080)
+			qos_len = 2;
+		if ((dot11_hdr->frame_control & 0x0300) == 0x0300)
+			dot11_hdr_len += 6;
+
+		memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
+		memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
+
+		/* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
+		 * for two MAC addresses
+		 */
+		skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
+		pdata = (unsigned char*)skb->data;
+		memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
+		memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
+		PKTSETPRIO(skb, 0);
+
+		MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+
+		/* Use the real net device to transmit the packet */
+		ret = dhd_start_xmit(skb, mon_if->real_ndev);
+
+		return ret;
+	}
+fail:
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev)
+{
+	monitor_interface* mon_if;
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+	} else {
+		MON_PRINT("enter, if name: %s, matched if name %s\n",
+		ndev->name, mon_if->real_ndev->name);
+	}
+}
+
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr)
+{
+	int ret = 0;
+	monitor_interface* mon_if;
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+	} else {
+		MON_PRINT("enter, if name: %s, matched if name %s\n",
+		ndev->name, mon_if->real_ndev->name);
+	}
+	return ret;
+}
+
+/**
+ * Global function definitions (declared in dhd_linux_mon.h)
+ */
+
+int dhd_add_monitor(const char *name, struct net_device **new_ndev)
+{
+	int i;
+	int idx = -1;
+	int ret = 0;
+	struct net_device* ndev = NULL;
+	dhd_linux_monitor_t **dhd_mon;
+
+	mutex_lock(&g_monitor.lock);
+
+	MON_TRACE("enter, if name: %s\n", name);
+	if (!name || !new_ndev) {
+		MON_PRINT("invalid parameters\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Find a vacancy
+	 */
+	for (i = 0; i < DHD_MAX_IFS; i++)
+		if (g_monitor.mon_if[i].mon_ndev == NULL) {
+			idx = i;
+			break;
+		}
+	if (idx == -1) {
+		MON_PRINT("exceeds maximum interfaces\n");
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*));
+	if (!ndev) {
+		MON_PRINT("failed to allocate memory\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+	strncpy(ndev->name, name, IFNAMSIZ);
+	ndev->name[IFNAMSIZ - 1] = 0;
+	ndev->netdev_ops = &dhd_mon_if_ops;
+
+	ret = register_netdevice(ndev);
+	if (ret) {
+		MON_PRINT(" register_netdevice failed (%d)\n", ret);
+		goto out;
+	}
+
+	*new_ndev = ndev;
+	g_monitor.mon_if[idx].radiotap_enabled = TRUE;
+	g_monitor.mon_if[idx].mon_ndev = ndev;
+	g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name);
+	dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev);
+	*dhd_mon = &g_monitor;
+	g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED;
+	MON_PRINT("net device returned: 0x%p\n", ndev);
+	MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name);
+
+out:
+	if (ret && ndev)
+		free_netdev(ndev);
+
+	mutex_unlock(&g_monitor.lock);
+	return ret;
+
+}
+
+int dhd_del_monitor(struct net_device *ndev)
+{
+	int i;
+	if (!ndev)
+		return -EINVAL;
+	mutex_lock(&g_monitor.lock);
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		if (g_monitor.mon_if[i].mon_ndev == ndev ||
+			g_monitor.mon_if[i].real_ndev == ndev) {
+
+			g_monitor.mon_if[i].real_ndev = NULL;
+			unregister_netdevice(g_monitor.mon_if[i].mon_ndev);
+			free_netdev(g_monitor.mon_if[i].mon_ndev);
+			g_monitor.mon_if[i].mon_ndev = NULL;
+			g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED;
+			break;
+		}
+	}
+
+	if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED)
+		MON_PRINT("IF not found in monitor array, is this a monitor IF? 0x%p\n", ndev);
+	mutex_unlock(&g_monitor.lock);
+
+	return 0;
+}
+
+int dhd_monitor_init(void *dhd_pub)
+{
+	if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) {
+		g_monitor.dhd_pub = dhd_pub;
+		mutex_init(&g_monitor.lock);
+		g_monitor.monitor_state = MONITOR_STATE_INIT;
+	}
+	return 0;
+}
+
+int dhd_monitor_uninit(void)
+{
+	int i;
+	struct net_device *ndev;
+	if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) {
+		mutex_lock(&g_monitor.lock);
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			ndev = g_monitor.mon_if[i].mon_ndev;
+			if (ndev) {
+				unregister_netdevice(ndev);
+				free_netdev(ndev);
+				g_monitor.mon_if[i].real_ndev = NULL;
+				g_monitor.mon_if[i].mon_ndev = NULL;
+			}
+		}
+		g_monitor.monitor_state = MONITOR_STATE_DEINIT;
+		mutex_unlock(&g_monitor.lock);
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_roam.c b/drivers/net/wireless/bcmdhd/wl_roam.c
new file mode 100644
index 0000000..ec68d9a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_roam.c
@@ -0,0 +1,28 @@
+/*
+ * Linux roam cache
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_roam.c 662434 2016-09-29 12:21:46Z $
+ */
diff --git a/drivers/net/wireless/bcmdhd/wlc_types.h b/drivers/net/wireless/bcmdhd/wlc_types.h
new file mode 100644
index 0000000..7252cbd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wlc_types.h
@@ -0,0 +1,337 @@
+/*
+ * Forward declarations for commonly used wl driver structs
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wlc_types.h 665242 2016-10-17 05:59:26Z $
+ */
+
+#ifndef _wlc_types_h_
+#define _wlc_types_h_
+#include <wlioctl.h>
+
+/* Version of WLC interface to be returned as a part of wl_wlc_version structure.
+ * WLC_API_VERSION_MINOR is currently not in use.
+ */
+#define WLC_API_VERSION_MAJOR      8
+#define WLC_API_VERSION_MINOR      0
+
+/* forward declarations */
+
+typedef struct wlc_info wlc_info_t;
+typedef struct wlcband wlcband_t;
+typedef struct wlc_cmn_info wlc_cmn_info_t;
+typedef struct wlc_assoc_info wlc_assoc_info_t;
+typedef struct wlc_pm_info wlc_pm_info_t;
+
+typedef struct wlc_bsscfg wlc_bsscfg_t;
+typedef struct wlc_mbss_info wlc_mbss_info_t;
+typedef struct wlc_spt wlc_spt_t;
+typedef struct scb scb_t;
+typedef struct scb_iter scb_iter_t;
+typedef struct vndr_ie_listel vndr_ie_listel_t;
+typedef struct wlc_if wlc_if_t;
+typedef struct wl_if wl_if_t;
+typedef struct led_info led_info_t;
+typedef struct bmac_led bmac_led_t;
+typedef struct bmac_led_info bmac_led_info_t;
+typedef struct seq_cmds_info wlc_seq_cmds_info_t;
+typedef struct ota_test_info ota_test_info_t;
+typedef struct wlc_ccx ccx_t;
+typedef struct wlc_ccx_rm ccx_rm_t;
+typedef struct apps_wlc_psinfo apps_wlc_psinfo_t;
+typedef struct scb_module scb_module_t;
+typedef struct ba_info ba_info_t;
+typedef struct wlc_frminfo wlc_frminfo_t;
+typedef struct amsdu_info amsdu_info_t;
+typedef struct txq_info txq_info_t;
+typedef struct txq txq_t;
+typedef struct cram_info cram_info_t;
+typedef struct wlc_extlog_info wlc_extlog_info_t;
+typedef struct wlc_txq_info wlc_txq_info_t;
+typedef struct wlc_hrt_info wlc_hrt_info_t;
+typedef struct wlc_hrt_to wlc_hrt_to_t;
+typedef struct wlc_cac wlc_cac_t;
+typedef struct ampdu_tx_info ampdu_tx_info_t;
+typedef struct ampdu_rx_info ampdu_rx_info_t;
+typedef struct wlc_ratesel_info wlc_ratesel_info_t;
+typedef struct ratesel_info ratesel_info_t;
+typedef struct wlc_ap_info wlc_ap_info_t;
+typedef struct cs_info cs_info_t;
+typedef struct wlc_scan_info wlc_scan_info_t;
+typedef struct wlc_scan_cmn_info wlc_scan_cmn_t;
+typedef struct tdls_info tdls_info_t;
+typedef struct dls_info dls_info_t;
+typedef struct l2_filter_info l2_filter_info_t;
+typedef struct wlc_auth_info wlc_auth_info_t;
+typedef struct wlc_sup_info wlc_sup_info_t;
+typedef struct wlc_fbt_info wlc_fbt_info_t;
+typedef struct wlc_assoc_mgr_info wlc_assoc_mgr_info_t;
+typedef struct wlc_ccxsup_info wlc_ccxsup_info_t;
+typedef struct wlc_psta_info wlc_psta_info_t;
+typedef struct wlc_mcnx_info wlc_mcnx_info_t;
+typedef struct wlc_p2p_info wlc_p2p_info_t;
+typedef struct wlc_cxnoa_info wlc_cxnoa_info_t;
+typedef struct mchan_info mchan_info_t;
+typedef struct wlc_mchan_context wlc_mchan_context_t;
+typedef struct bta_info bta_info_t;
+typedef struct wowl_info wowl_info_t;
+typedef struct wowlpf_info wowlpf_info_t;
+typedef struct wlc_plt_info wlc_plt_pub_t;
+typedef struct antsel_info antsel_info_t;
+typedef struct bmac_pmq bmac_pmq_t;
+typedef struct wmf_info wmf_info_t;
+typedef struct wlc_rrm_info wlc_rrm_info_t;
+typedef struct rm_info rm_info_t;
+
+struct d11init;
+
+typedef struct wlc_dpc_info wlc_dpc_info_t;
+
+typedef struct wlc_11h_info wlc_11h_info_t;
+typedef struct wlc_tpc_info wlc_tpc_info_t;
+typedef struct wlc_csa_info wlc_csa_info_t;
+typedef struct wlc_quiet_info wlc_quiet_info_t;
+typedef struct cca_info cca_info_t;
+typedef struct itfr_info itfr_info_t;
+
+typedef struct wlc_wnm_info wlc_wnm_info_t;
+typedef struct wlc_11d_info wlc_11d_info_t;
+typedef struct wlc_cntry_info wlc_cntry_info_t;
+
+typedef struct wlc_dfs_info wlc_dfs_info_t;
+
+typedef struct bsscfg_module bsscfg_module_t;
+
+typedef struct wlc_prot_info wlc_prot_info_t;
+typedef struct wlc_prot_g_info wlc_prot_g_info_t;
+typedef struct wlc_prot_n_info wlc_prot_n_info_t;
+typedef struct wlc_prot_obss_info wlc_prot_obss_info_t;
+typedef struct wlc_obss_dynbw wlc_obss_dynbw_t;
+typedef struct wlc_11u_info wlc_11u_info_t;
+typedef struct wlc_probresp_info wlc_probresp_info_t;
+typedef struct wlc_wapi_info wlc_wapi_info_t;
+
+typedef struct wlc_tbtt_info wlc_tbtt_info_t;
+typedef struct wlc_nic_info wlc_nic_info_t;
+
+typedef struct wlc_bssload_info wlc_bssload_info_t;
+
+typedef struct wlc_pcb_info wlc_pcb_info_t;
+typedef struct wlc_txc_info wlc_txc_info_t;
+
+typedef struct wlc_trf_mgmt_ctxt    wlc_trf_mgmt_ctxt_t;
+typedef struct wlc_trf_mgmt_info    wlc_trf_mgmt_info_t;
+
+typedef struct wlc_net_detect_ctxt  wlc_net_detect_ctxt_t;
+
+typedef struct wlc_powersel_info wlc_powersel_info_t;
+typedef struct powersel_info powersel_info_t;
+
+typedef struct wlc_lpc_info wlc_lpc_info_t;
+typedef struct lpc_info lpc_info_t;
+typedef struct rate_lcb_info rate_lcb_info_t;
+typedef struct wlc_txbf_info wlc_txbf_info_t;
+typedef struct wlc_murx_info wlc_murx_info_t;
+
+typedef struct wlc_olpc_eng_info_t wlc_olpc_eng_info_t;
+/* used by olpc to register for callbacks from stf */
+typedef void (*wlc_stf_txchain_evt_notify)(wlc_info_t *wlc);
+
+typedef struct wlc_rfc wlc_rfc_t;
+typedef struct wlc_pktc_info wlc_pktc_info_t;
+
+typedef struct wlc_mfp_info wlc_mfp_info_t;
+
+typedef struct wlc_mdns_info wlc_mdns_info_t;
+
+typedef struct wlc_macfltr_info wlc_macfltr_info_t;
+typedef struct wlc_bmon_info wlc_bmon_info_t;
+
+typedef struct wlc_nar_info wlc_nar_info_t;
+typedef struct wlc_bs_data_info wlc_bs_data_info_t;
+
+typedef struct wlc_keymgmt wlc_keymgmt_t;
+typedef struct wlc_key	wlc_key_t;
+typedef struct wlc_key_info wlc_key_info_t;
+
+typedef struct wlc_hw wlc_hw_t;
+typedef struct wlc_hw_info wlc_hw_info_t;
+typedef struct wlc_hwband wlc_hwband_t;
+
+typedef struct wlc_rx_stall_info wlc_rx_stall_info_t;
+
+typedef struct wlc_rmc_info wlc_rmc_info_t;
+
+typedef struct wlc_iem_info wlc_iem_info_t;
+
+typedef struct wlc_ier_info wlc_ier_info_t;
+typedef struct wlc_ier_reg wlc_ier_reg_t;
+
+typedef struct wlc_ht_info wlc_ht_info_t;
+typedef struct wlc_obss_info wlc_obss_info_t;
+typedef struct wlc_vht_info wlc_vht_info_t;
+typedef struct wlc_akm_info wlc_akm_info_t;
+typedef struct wlc_srvsdb_info wlc_srvsdb_info_t;
+
+typedef struct wlc_bss_info wlc_bss_info_t;
+
+typedef struct wlc_hs20_info wlc_hs20_info_t;
+typedef struct wlc_pmkid_info	wlc_pmkid_info_t;
+typedef struct wlc_btc_info wlc_btc_info_t;
+
+typedef struct wlc_txh_info wlc_txh_info_t;
+typedef union wlc_txd wlc_txd_t;
+
+typedef struct wlc_staprio_info wlc_staprio_info_t;
+typedef struct wlc_stamon_info wlc_stamon_info_t;
+typedef struct wlc_monitor_info wlc_monitor_info_t;
+
+typedef struct wlc_debug_crash_info wlc_debug_crash_info_t;
+
+typedef struct wlc_nan_info wlc_nan_info_t;
+typedef struct wlc_tsmap_info wlc_tsmap_info_t;
+
+typedef struct wlc_wds_info wlc_wds_info_t;
+typedef struct okc_info okc_info_t;
+typedef struct wlc_aibss_info wlc_aibss_info_t;
+typedef struct wlc_ipfo_info wlc_ipfo_info_t;
+typedef struct wlc_stats_info wlc_stats_info_t;
+
+typedef struct wlc_pps_info wlc_pps_info_t;
+
+typedef struct duration_info duration_info_t;
+
+typedef struct wlc_pdsvc_info wlc_pdsvc_info_t;
+
+/* For LTE Coex */
+typedef struct wlc_ltecx_info wlc_ltecx_info_t;
+
+typedef struct wlc_probresp_mac_filter_info wlc_probresp_mac_filter_info_t;
+
+typedef struct wlc_ltr_info wlc_ltr_info_t;
+
+typedef struct bwte_info bwte_info_t;
+
+typedef struct tbow_info tbow_info_t;
+
+typedef struct wlc_modesw_info wlc_modesw_info_t;
+
+typedef struct wlc_pm_mute_tx_info wlc_pm_mute_tx_t;
+
+typedef struct wlc_bcntrim_info wlc_bcntrim_info_t;
+
+typedef struct wlc_smfs_info wlc_smfs_info_t;
+typedef struct wlc_misc_info wlc_misc_info_t;
+typedef struct wlc_ulb_info wlc_ulb_info_t;
+
+typedef struct wlc_eventq wlc_eventq_t;
+typedef struct wlc_event wlc_event_t;
+typedef struct wlc_ulp_info wlc_ulp_info_t;
+
+typedef struct wlc_bsscfg_psq_info wlc_bsscfg_psq_info_t;
+typedef struct wlc_bsscfg_viel_info wlc_bsscfg_viel_info_t;
+
+typedef struct wlc_txmod_info wlc_txmod_info_t;
+typedef struct tx_path_node tx_path_node_t;
+
+typedef struct wlc_linkstats_info wlc_linkstats_info_t;
+typedef struct wlc_lq_info wlc_lq_info_t;
+typedef struct chanim_info chanim_info_t;
+
+typedef struct wlc_mesh_info wlc_mesh_info_t;
+typedef struct wlc_wlfc_info wlc_wlfc_info_t;
+
+typedef struct wlc_frag_info wlc_frag_info_t;
+typedef struct wlc_bss_list wlc_bss_list_t;
+
+typedef struct wlc_msch_info wlc_msch_info_t;
+typedef struct wlc_msch_req_handle wlc_msch_req_handle_t;
+
+typedef struct wlc_randmac_info wlc_randmac_info_t;
+
+typedef struct wlc_chanctxt wlc_chanctxt_t;
+typedef struct wlc_chanctxt_info wlc_chanctxt_info_t;
+typedef struct wlc_sta_info wlc_sta_info_t;
+
+typedef struct health_check_info health_check_info_t;
+typedef struct wlc_act_frame_info wlc_act_frame_info_t;
+typedef struct nan_sched_req_handle nan_sched_req_handle_t;
+
+typedef struct wlc_qos_info wlc_qos_info_t;
+
+typedef struct wlc_assoc wlc_assoc_t;
+typedef struct wlc_roam wlc_roam_t;
+typedef struct wlc_pm_st wlc_pm_st_t;
+typedef struct wlc_wme wlc_wme_t;
+
+typedef struct wlc_link_qual wlc_link_qual_t;
+
+typedef struct wlc_rsdb_info wlc_rsdb_info_t;
+
+typedef struct wlc_asdb wlc_asdb_t;
+
+typedef struct rsdb_common_info rsdb_cmn_info_t;
+
+typedef struct wlc_macdbg_info wlc_macdbg_info_t;
+typedef struct wlc_rspec_info wlc_rspec_info_t;
+typedef struct wlc_ndis_info wlc_ndis_info_t;
+
+typedef struct wlc_join_pref wlc_join_pref_t;
+
+typedef struct wlc_scan_utils wlc_scan_utils_t;
+#ifdef ACKSUPR_MAC_FILTER
+typedef struct wlc_addrmatch_info wlc_addrmatch_info_t;
+#endif /* ACKSUPR_MAC_FILTER */
+
+typedef struct cca_ucode_counts cca_ucode_counts_t;
+typedef struct cca_chan_qual cca_chan_qual_t;
+
+typedef struct wlc_perf_utils wlc_perf_utils_t;
+typedef struct wlc_test_info wlc_test_info_t;
+
+typedef struct chanswitch_times chanswitch_times_t;
+typedef struct wlc_dump_info wlc_dump_info_t;
+
+typedef struct wlc_stf wlc_stf_t;
+
+typedef sta_info_v4_t sta_info_t;
+typedef struct wl_roam_prof_band_v2 wl_roam_prof_band_t;
+typedef struct wl_roam_prof_v2 wl_roam_prof_t;
+
+/* Inteface version mapping for versioned pfn structures */
+#undef PFN_SCANRESULT_VERSION
+#define PFN_SCANRESULT_VERSION PFN_SCANRESULT_VERSION_V2
+#define PFN_SCANRESULTS_VERSION PFN_SCANRESULTS_VERSION_V2
+#define PFN_LBEST_SCAN_RESULT_VERSION PFN_LBEST_SCAN_RESULT_VERSION_V2
+typedef wl_pfn_subnet_info_v2_t wl_pfn_subnet_info_t;
+typedef wl_pfn_net_info_v2_t wl_pfn_net_info_t;
+typedef wl_pfn_lnet_info_v2_t wl_pfn_lnet_info_t;
+typedef wl_pfn_lscanresults_v2_t wl_pfn_lscanresults_t;
+typedef wl_pfn_scanresults_v2_t wl_pfn_scanresults_t;
+typedef wl_pfn_scanresult_v2_t wl_pfn_scanresult_t;
+
+typedef wl_dfs_ap_move_status_v2_t wl_dfs_ap_move_status_t;
+
+#endif	/* _wlc_types_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c
new file mode 100644
index 0000000..9f77d8d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.c
@@ -0,0 +1,540 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wldev_common.c 699163 2017-05-12 05:18:23Z $
+ */
+
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+
+#include <wldev_common.h>
+#include <bcmutils.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#include <dhd_config.h>
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#define	WLDEV_ERROR(args)						\
+	do {										\
+		printk(KERN_ERR "WLDEV-ERROR) ");	\
+		printk args;							\
+	} while (0)
+
+#define	WLDEV_INFO(args)						\
+	do {										\
+		printk(KERN_INFO "WLDEV-INFO) ");	\
+		printk args;							\
+	} while (0)
+
+extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
+
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+	s32 ret = 0;
+	struct wl_ioctl ioc;
+
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+
+	ret = dhd_ioctl_entry_local(dev, &ioc, cmd);
+
+	return ret;
+}
+
+
+/*
+SET commands :
+cast buffer to non-const  and call the GET function
+*/
+
+s32 wldev_ioctl_set(
+	struct net_device *dev, u32 cmd, const void *arg, u32 len)
+{
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+	return wldev_ioctl(dev, cmd, (void *)arg, len, 1);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+}
+
+
+s32 wldev_ioctl_get(
+	struct net_device *dev, u32 cmd, void *arg, u32 len)
+{
+	return wldev_ioctl(dev, cmd, (void *)arg, len, 0);
+}
+
+/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+static s32 wldev_mkiovar(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, u32 buflen)
+{
+	s32 iolen = 0;
+
+	iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
+	return iolen;
+}
+
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+
+	if (buf && (buflen > 0)) {
+		/* initialize the response buffer */
+		memset(buf, 0, buflen);
+	} else {
+		ret = BCME_BADARG;
+		goto exit;
+	}
+
+	ret = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+
+	if (!ret) {
+		ret = BCME_BUFTOOSHORT;
+		goto exit;
+	}
+	ret = wldev_ioctl_get(dev, WLC_GET_VAR, buf, buflen);
+exit:
+	if (buf_sync)
+		mutex_unlock(buf_sync);
+	return ret;
+}
+
+
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+	if (iovar_len > 0)
+		ret = wldev_ioctl_set(dev, WLC_SET_VAR, buf, iovar_len);
+	else
+		ret = BCME_BUFTOOSHORT;
+
+	if (buf_sync)
+		mutex_unlock(buf_sync);
+	return ret;
+}
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf), NULL);
+}
+
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf), NULL);
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
+ *  taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ *  wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx)
+{
+	const s8 *prefix = "bsscfg:";
+	s8 *p;
+	u32 prefixlen;
+	u32 namelen;
+	u32 iolen;
+
+	/* initialize buffer */
+	if (!iovar_buf || buflen == 0)
+		return BCME_BADARG;
+	memset(iovar_buf, 0, buflen);
+
+	if (bssidx == 0) {
+		return wldev_mkiovar(iovar_name, param, paramlen,
+			iovar_buf, buflen);
+	}
+
+	prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+	namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar  name + null */
+	iolen = prefixlen + namelen + sizeof(u32) + paramlen;
+
+	if (buflen < 0 || iolen > (u32)buflen)
+	{
+		WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__));
+		return BCME_BUFTOOSHORT;
+	}
+
+	p = (s8 *)iovar_buf;
+
+	/* copy prefix, no null */
+	memcpy(p, prefix, prefixlen);
+	p += prefixlen;
+
+	/* copy iovar name including null */
+	memcpy(p, iovar_name, namelen);
+	p += namelen;
+
+	/* bss config index as first param */
+	bssidx = htod32(bssidx);
+	memcpy(p, &bssidx, sizeof(u32));
+	p += sizeof(u32);
+
+	/* parameter buffer follows */
+	if (paramlen)
+		memcpy(p, param, paramlen);
+
+	return iolen;
+
+}
+
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+
+	wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	ret = wldev_ioctl_get(dev, WLC_GET_VAR, buf, buflen);
+	if (buf_sync) {
+		mutex_unlock(buf_sync);
+	}
+	return ret;
+
+}
+
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	if (iovar_len > 0)
+		ret = wldev_ioctl_set(dev, WLC_SET_VAR, buf, iovar_len);
+	else {
+		ret = BCME_BUFTOOSHORT;
+	}
+
+	if (buf_sync) {
+		mutex_unlock(buf_sync);
+	}
+	return ret;
+}
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf), bssidx, NULL);
+}
+
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf), bssidx, NULL);
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+int wldev_get_link_speed(
+	struct net_device *dev, int *plink_speed)
+{
+	int error;
+
+	if (!plink_speed)
+		return -ENOMEM;
+	*plink_speed = 0;
+	error = wldev_ioctl_get(dev, WLC_GET_RATE, plink_speed, sizeof(int));
+	if (unlikely(error))
+		return error;
+
+	/* Convert internal 500Kbps to Kbps */
+	*plink_speed *= 500;
+	return error;
+}
+
+int wldev_get_rssi(
+	struct net_device *dev, scb_val_t *scb_val)
+{
+	int error;
+
+	if (!scb_val)
+		return -ENOMEM;
+	memset(scb_val, 0, sizeof(scb_val_t));
+	error = wldev_ioctl_get(dev, WLC_GET_RSSI, scb_val, sizeof(scb_val_t));
+	if (unlikely(error))
+		return error;
+
+	return error;
+}
+
+int wldev_get_ssid(
+	struct net_device *dev, wlc_ssid_t *pssid)
+{
+	int error;
+
+	if (!pssid)
+		return -ENOMEM;
+	memset(pssid, 0, sizeof(wlc_ssid_t));
+	error = wldev_ioctl_get(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t));
+	if (unlikely(error))
+		return error;
+	pssid->SSID_len = dtoh32(pssid->SSID_len);
+	return error;
+}
+
+int wldev_get_band(
+	struct net_device *dev, uint *pband)
+{
+	int error;
+
+	*pband = 0;
+	error = wldev_ioctl_get(dev, WLC_GET_BAND, pband, sizeof(uint));
+	return error;
+}
+
+int wldev_set_band(
+	struct net_device *dev, uint band)
+{
+	int error = -1;
+
+	if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+		error = wldev_ioctl_set(dev, WLC_SET_BAND, &band, sizeof(band));
+		if (!error)
+			dhd_bus_band_set(dev, band);
+	}
+	return error;
+}
+int wldev_get_datarate(struct net_device *dev, int *datarate)
+{
+	int error = 0;
+
+	error = wldev_ioctl_get(dev, WLC_GET_RATE, datarate, sizeof(int));
+	if (error) {
+		return -1;
+	} else {
+		*datarate = dtoh32(*datarate);
+	}
+
+	return error;
+}
+
+#ifdef WL_CFG80211
+extern chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec);
+#define WL_EXTRA_BUF_MAX 2048
+int wldev_get_mode(
+	struct net_device *dev, uint8 *cap, uint8 caplen)
+{
+	int error = 0;
+	int chanspec = 0;
+	uint16 band = 0;
+	uint16 bandwidth = 0;
+	wl_bss_info_t *bss = NULL;
+	char* buf = NULL;
+
+	buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	if (!buf) {
+		WLDEV_ERROR(("%s:ENOMEM\n", __FUNCTION__));
+		return -ENOMEM;
+	}
+
+	*(u32*) buf = htod32(WL_EXTRA_BUF_MAX);
+	error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void*)buf, WL_EXTRA_BUF_MAX);
+	if (error) {
+		WLDEV_ERROR(("%s:failed:%d\n", __FUNCTION__, error));
+		kfree(buf);
+		buf = NULL;
+		return error;
+	}
+	bss = (struct  wl_bss_info *)(buf + 4);
+	chanspec = wl_chspec_driver_to_host(bss->chanspec);
+
+	band = chanspec & WL_CHANSPEC_BAND_MASK;
+	bandwidth = chanspec & WL_CHANSPEC_BW_MASK;
+
+	if (band == WL_CHANSPEC_BAND_2G) {
+		if (bss->n_cap)
+			strncpy(cap, "n", caplen);
+		else
+			strncpy(cap, "bg", caplen);
+	} else if (band == WL_CHANSPEC_BAND_5G) {
+		if (bandwidth == WL_CHANSPEC_BW_80)
+			strncpy(cap, "ac", caplen);
+		else if ((bandwidth == WL_CHANSPEC_BW_40) || (bandwidth == WL_CHANSPEC_BW_20)) {
+			if ((bss->nbss_cap & 0xf00) && (bss->n_cap))
+				strncpy(cap, "n|ac", caplen);
+			else if (bss->n_cap)
+				strncpy(cap, "n", caplen);
+			else if (bss->vht_cap)
+				strncpy(cap, "ac", caplen);
+			else
+				strncpy(cap, "a", caplen);
+		} else {
+			WLDEV_ERROR(("%s:Mode get failed\n", __FUNCTION__));
+			error = BCME_ERROR;
+		}
+
+	}
+	kfree(buf);
+	buf = NULL;
+	return error;
+}
+#endif
+
+int wldev_set_country(
+	struct net_device *dev, char *country_code, bool notify, bool user_enforced, int revinfo)
+{
+	int error = -1;
+	wl_country_t cspec = {{0}, 0, {0}};
+	wl_country_t cur_cspec = {{0}, 0, {0}};	/* current ccode */
+	scb_val_t scbval;
+#ifdef WL_CFG80211
+	struct wireless_dev *wdev = ndev_to_wdev(dev);
+	struct wiphy *wiphy = wdev->wiphy;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#endif
+
+	if (!country_code)
+		return error;
+
+	bzero(&scbval, sizeof(scb_val_t));
+	error = wldev_iovar_getbuf(dev, "country", NULL, 0, &cur_cspec, sizeof(wl_country_t), NULL);
+	if (error < 0) {
+		WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error));
+		return error;
+	}
+
+	cspec.rev = revinfo;
+	memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+	memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+	error = dhd_conf_map_country_list(dhd_get_pub(dev), &cspec);
+	if (error)
+		dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
+
+	WLDEV_INFO(("%s: Current country %s rev %d\n",
+		__FUNCTION__, cur_cspec.ccode, cur_cspec.rev));
+
+	if ((error < 0) ||
+		dhd_force_country_change(dev) ||
+	    (strncmp(cspec.ccode, cur_cspec.ccode, WLC_CNTRY_BUF_SZ) != 0)) {
+
+		if ((user_enforced)
+#ifdef WL_CFG80211
+			&& (wl_get_drv_status(cfg, CONNECTED, dev))
+#endif
+		) {
+			bzero(&scbval, sizeof(scb_val_t));
+			error = wldev_ioctl_set(dev, WLC_DISASSOC,
+			                        &scbval, sizeof(scb_val_t));
+			if (error < 0) {
+				WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n",
+					__FUNCTION__, error));
+				return error;
+			}
+		}
+
+		error = dhd_conf_set_country(dhd_get_pub(dev), &cspec);
+		if (error < 0) {
+			WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n",
+				__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+			return error;
+		}
+		dhd_conf_fix_country(dhd_get_pub(dev));
+		dhd_conf_get_country(dhd_get_pub(dev), &cspec);
+		dhd_bus_country_set(dev, &cspec, notify);
+		printf("%s: set country for %s as %s rev %d\n",
+			__FUNCTION__, country_code, cspec.ccode, cspec.rev);
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h
new file mode 100644
index 0000000..4aa039e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.h
@@ -0,0 +1,130 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wldev_common.h 699163 2017-05-12 05:18:23Z $
+ */
+#ifndef __WLDEV_COMMON_H__
+#define __WLDEV_COMMON_H__
+
+#include <wlioctl.h>
+
+/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or
+ *  netdev_ops->ndo_do_ioctl in new kernels)
+ *  @dev: the net_device handle
+ */
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+
+s32 wldev_ioctl_get(
+	struct net_device *dev, u32 cmd, void *arg, u32 len);
+
+s32 wldev_ioctl_set(
+	struct net_device *dev, u32 cmd, const void *arg, u32 len);
+
+/** Retrieve named IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+/** Set named IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val);
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval);
+
+/** The following function can be implemented if there is a need for bsscfg
+ *  indexed IOVARs
+ */
+
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx);
+
+/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+	void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+	void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx);
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
+
+extern int dhd_net_set_fw_path(struct net_device *dev, char *fw);
+extern int dhd_net_bus_suspend(struct net_device *dev);
+extern int dhd_net_bus_resume(struct net_device *dev, uint8 stage);
+extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on,
+	unsigned long delay_msec);
+extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+	wl_country_t *cspec);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify);
+extern bool dhd_force_country_change(struct net_device *dev);
+extern void dhd_bus_band_set(struct net_device *dev, uint band);
+extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify,
+	bool user_enforced, int revinfo);
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val, int force);
+extern int net_os_set_max_dtim_enable(struct net_device *dev, int val);
+extern int wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid,
+	int max, int *bytes_left);
+
+/* Get the link speed from dongle, speed is in kpbs */
+int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
+
+int wldev_get_rssi(struct net_device *dev, scb_val_t *prssi);
+
+int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
+
+int wldev_get_band(struct net_device *dev, uint *pband);
+int wldev_get_mode(struct net_device *dev, uint8 *pband, uint8 caplen);
+int wldev_get_datarate(struct net_device *dev, int *datarate);
+int wldev_set_band(struct net_device *dev, uint band);
+
+#endif /* __WLDEV_COMMON_H__ */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 2d03104..e7d28b2 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -118,4 +118,6 @@
 
 source "drivers/staging/hikey9xx/Kconfig"
 
+source "drivers/staging/nanohub/Kconfig"
+
 endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 757a892..6161bd3 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -49,3 +49,4 @@
 obj-$(CONFIG_QLGE)		+= qlge/
 obj-$(CONFIG_WFX)		+= wfx/
 obj-y				+= hikey9xx/
+obj-$(CONFIG_NANOHUB)           += nanohub/
diff --git a/drivers/staging/media/meson/vdec/codec_h264.c b/drivers/staging/media/meson/vdec/codec_h264.c
index c61128f..d53c9a4 100644
--- a/drivers/staging/media/meson/vdec/codec_h264.c
+++ b/drivers/staging/media/meson/vdec/codec_h264.c
@@ -353,7 +353,8 @@
 		frame_width, frame_height, crop_right, crop_bottom);
 
 	codec_h264_set_par(sess);
-	amvdec_src_change(sess, frame_width, frame_height, h264->max_refs + 5);
+	amvdec_src_change(sess, frame_width, frame_height,
+			  h264->max_refs + 5, 8);
 }
 
 /*
diff --git a/drivers/staging/media/meson/vdec/codec_hevc_common.c b/drivers/staging/media/meson/vdec/codec_hevc_common.c
index 0315cc0..78fada7 100644
--- a/drivers/staging/media/meson/vdec/codec_hevc_common.c
+++ b/drivers/staging/media/meson/vdec/codec_hevc_common.c
@@ -10,7 +10,6 @@
 #include "vdec_helpers.h"
 #include "hevc_regs.h"
 
-#define MMU_COMPRESS_HEADER_SIZE 0x48000
 #define MMU_MAP_SIZE 0x4800
 
 const u16 vdec_hevc_parser_cmd[] = {
@@ -30,8 +29,11 @@
 void codec_hevc_setup_decode_head(struct amvdec_session *sess, int is_10bit)
 {
 	struct amvdec_core *core = sess->core;
-	u32 body_size = amvdec_am21c_body_size(sess->width, sess->height);
-	u32 head_size = amvdec_am21c_head_size(sess->width, sess->height);
+	u32 use_mmu = codec_hevc_use_mmu(core->platform->revision,
+					 sess->pixfmt_cap, is_10bit);
+	u32 body_size = amvdec_amfbc_body_size(sess->width, sess->height,
+					       is_10bit, use_mmu);
+	u32 head_size = amvdec_amfbc_head_size(sess->width, sess->height);
 
 	if (!codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit)) {
 		/* Enable 2-plane reference read mode */
@@ -39,9 +41,17 @@
 		return;
 	}
 
+	/* enable mem saving mode for 8-bit */
+	if (!is_10bit)
+		amvdec_write_dos_bits(core, HEVC_SAO_CTRL5, BIT(9));
+	else
+		amvdec_clear_dos_bits(core, HEVC_SAO_CTRL5, BIT(9));
+
 	if (codec_hevc_use_mmu(core->platform->revision,
 			       sess->pixfmt_cap, is_10bit))
 		amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(4));
+	else if (!is_10bit)
+		amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, BIT(3));
 	else
 		amvdec_write_dos(core, HEVCD_MPP_DECOMP_CTL1, 0);
 
@@ -73,7 +83,7 @@
 
 		idx = vb->index;
 
-		if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit))
+		if (codec_hevc_use_fbc(sess->pixfmt_cap, is_10bit))
 			buf_y_paddr = comm->fbc_buffer_paddr[idx];
 		else
 			buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
@@ -114,7 +124,6 @@
 {
 	struct amvdec_core *core = sess->core;
 	struct v4l2_m2m_buffer *buf;
-	u32 revision = core->platform->revision;
 	u32 pixfmt_cap = sess->pixfmt_cap;
 	int i;
 
@@ -127,9 +136,7 @@
 		dma_addr_t buf_uv_paddr = 0;
 		u32 idx = vb->index;
 
-		if (codec_hevc_use_mmu(revision, pixfmt_cap, is_10bit))
-			buf_y_paddr = comm->mmu_header_paddr[idx];
-		else if (codec_hevc_use_downsample(pixfmt_cap, is_10bit))
+		if (codec_hevc_use_downsample(pixfmt_cap, is_10bit))
 			buf_y_paddr = comm->fbc_buffer_paddr[idx];
 		else
 			buf_y_paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
@@ -154,7 +161,12 @@
 				 struct codec_hevc_common *comm)
 {
 	struct device *dev = sess->core->dev;
-	u32 am21_size = amvdec_am21c_size(sess->width, sess->height);
+	u32 use_mmu = codec_hevc_use_mmu(sess->core->platform->revision,
+					 sess->pixfmt_cap,
+					 sess->bitdepth == 10 ? 1 : 0);
+	u32 am21_size = amvdec_amfbc_size(sess->width, sess->height,
+					  sess->bitdepth == 10 ? 1 : 0,
+					  use_mmu);
 	int i;
 
 	for (i = 0; i < MAX_REF_PIC_NUM; ++i) {
@@ -165,6 +177,14 @@
 			comm->fbc_buffer_vaddr[i] = NULL;
 		}
 	}
+
+	if (comm->mmu_map_vaddr) {
+		dma_free_coherent(dev, MMU_MAP_SIZE,
+				  comm->mmu_map_vaddr,
+				  comm->mmu_map_paddr);
+		comm->mmu_map_vaddr = NULL;
+	}
+
 }
 EXPORT_SYMBOL_GPL(codec_hevc_free_fbc_buffers);
 
@@ -173,7 +193,12 @@
 {
 	struct device *dev = sess->core->dev;
 	struct v4l2_m2m_buffer *buf;
-	u32 am21_size = amvdec_am21c_size(sess->width, sess->height);
+	u32 use_mmu = codec_hevc_use_mmu(sess->core->platform->revision,
+					 sess->pixfmt_cap,
+					 sess->bitdepth == 10 ? 1 : 0);
+	u32 am21_size = amvdec_amfbc_size(sess->width, sess->height,
+					  sess->bitdepth == 10 ? 1 : 0,
+					  use_mmu);
 
 	v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
 		u32 idx = buf->vb.vb2_buf.index;
@@ -192,79 +217,29 @@
 	return 0;
 }
 
-void codec_hevc_free_mmu_headers(struct amvdec_session *sess,
-				 struct codec_hevc_common *comm)
-{
-	struct device *dev = sess->core->dev;
-	int i;
-
-	for (i = 0; i < MAX_REF_PIC_NUM; ++i) {
-		if (comm->mmu_header_vaddr[i]) {
-			dma_free_coherent(dev, MMU_COMPRESS_HEADER_SIZE,
-					  comm->mmu_header_vaddr[i],
-					  comm->mmu_header_paddr[i]);
-			comm->mmu_header_vaddr[i] = NULL;
-		}
-	}
-
-	if (comm->mmu_map_vaddr) {
-		dma_free_coherent(dev, MMU_MAP_SIZE,
-				  comm->mmu_map_vaddr,
-				  comm->mmu_map_paddr);
-		comm->mmu_map_vaddr = NULL;
-	}
-}
-EXPORT_SYMBOL_GPL(codec_hevc_free_mmu_headers);
-
-static int codec_hevc_alloc_mmu_headers(struct amvdec_session *sess,
-					struct codec_hevc_common *comm)
-{
-	struct device *dev = sess->core->dev;
-	struct v4l2_m2m_buffer *buf;
-
-	comm->mmu_map_vaddr = dma_alloc_coherent(dev, MMU_MAP_SIZE,
-						 &comm->mmu_map_paddr,
-						 GFP_KERNEL);
-	if (!comm->mmu_map_vaddr)
-		return -ENOMEM;
-
-	v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
-		u32 idx = buf->vb.vb2_buf.index;
-		dma_addr_t paddr;
-		void *vaddr = dma_alloc_coherent(dev, MMU_COMPRESS_HEADER_SIZE,
-						 &paddr, GFP_KERNEL);
-		if (!vaddr) {
-			codec_hevc_free_mmu_headers(sess, comm);
-			return -ENOMEM;
-		}
-
-		comm->mmu_header_vaddr[idx] = vaddr;
-		comm->mmu_header_paddr[idx] = paddr;
-	}
-
-	return 0;
-}
-
 int codec_hevc_setup_buffers(struct amvdec_session *sess,
 			     struct codec_hevc_common *comm,
 			     int is_10bit)
 {
 	struct amvdec_core *core = sess->core;
+	struct device *dev = core->dev;
 	int ret;
 
-	if (codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit)) {
-		ret = codec_hevc_alloc_fbc_buffers(sess, comm);
-		if (ret)
-			return ret;
-	}
-
 	if (codec_hevc_use_mmu(core->platform->revision,
 			       sess->pixfmt_cap, is_10bit)) {
-		ret = codec_hevc_alloc_mmu_headers(sess, comm);
-		if (ret) {
-			codec_hevc_free_fbc_buffers(sess, comm);
+		comm->mmu_map_vaddr = dma_alloc_coherent(dev, MMU_MAP_SIZE,
+							 &comm->mmu_map_paddr,
+							 GFP_KERNEL);
+		if (!comm->mmu_map_vaddr)
+			return -ENOMEM;
+	}
+
+	if (codec_hevc_use_mmu(core->platform->revision,
+			       sess->pixfmt_cap, is_10bit) ||
+	    codec_hevc_use_downsample(sess->pixfmt_cap, is_10bit)) {
+		ret = codec_hevc_alloc_fbc_buffers(sess, comm);
+		if (ret)
 			return ret;
-		}
 	}
 
 	if (core->platform->revision == VDEC_REVISION_GXBB)
@@ -278,19 +253,19 @@
 
 void codec_hevc_fill_mmu_map(struct amvdec_session *sess,
 			     struct codec_hevc_common *comm,
-			     struct vb2_buffer *vb)
+			     struct vb2_buffer *vb,
+			     u32 is_10bit)
 {
-	u32 size = amvdec_am21c_size(sess->width, sess->height);
+	u32 use_mmu = codec_hevc_use_mmu(sess->core->platform->revision,
+					 sess->pixfmt_cap, is_10bit);
+	u32 size = amvdec_amfbc_size(sess->width, sess->height, is_10bit,
+				     use_mmu);
 	u32 nb_pages = size / PAGE_SIZE;
 	u32 *mmu_map = comm->mmu_map_vaddr;
 	u32 first_page;
 	u32 i;
 
-	if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
-		first_page = comm->fbc_buffer_paddr[vb->index] >> PAGE_SHIFT;
-	else
-		first_page = vb2_dma_contig_plane_dma_addr(vb, 0) >> PAGE_SHIFT;
-
+	first_page = comm->fbc_buffer_paddr[vb->index] >> PAGE_SHIFT;
 	for (i = 0; i < nb_pages; ++i)
 		mmu_map[i] = first_page + i;
 }
diff --git a/drivers/staging/media/meson/vdec/codec_hevc_common.h b/drivers/staging/media/meson/vdec/codec_hevc_common.h
index 88e4379..5a3c652 100644
--- a/drivers/staging/media/meson/vdec/codec_hevc_common.h
+++ b/drivers/staging/media/meson/vdec/codec_hevc_common.h
@@ -22,9 +22,6 @@
 	void      *fbc_buffer_vaddr[MAX_REF_PIC_NUM];
 	dma_addr_t fbc_buffer_paddr[MAX_REF_PIC_NUM];
 
-	void      *mmu_header_vaddr[MAX_REF_PIC_NUM];
-	dma_addr_t mmu_header_paddr[MAX_REF_PIC_NUM];
-
 	void      *mmu_map_vaddr;
 	dma_addr_t mmu_map_paddr;
 };
@@ -32,14 +29,15 @@
 /* Returns 1 if we must use framebuffer compression */
 static inline int codec_hevc_use_fbc(u32 pixfmt, int is_10bit)
 {
-	/* TOFIX: Handle Amlogic Compressed buffer for 8bit also */
-	return is_10bit;
+	return pixfmt == V4L2_PIX_FMT_YUV420_8BIT ||
+	       pixfmt == V4L2_PIX_FMT_YUV420_10BIT ||
+	       is_10bit;
 }
 
 /* Returns 1 if we are decoding 10-bit but outputting 8-bit NV12 */
 static inline int codec_hevc_use_downsample(u32 pixfmt, int is_10bit)
 {
-	return is_10bit;
+	return pixfmt == V4L2_PIX_FMT_NV12M && is_10bit;
 }
 
 /* Returns 1 if we are decoding using the IOMMU */
@@ -66,6 +64,7 @@
 
 void codec_hevc_fill_mmu_map(struct amvdec_session *sess,
 			     struct codec_hevc_common *comm,
-			     struct vb2_buffer *vb);
+			     struct vb2_buffer *vb,
+			     u32 is_10bit);
 
 #endif
diff --git a/drivers/staging/media/meson/vdec/codec_vp9.c b/drivers/staging/media/meson/vdec/codec_vp9.c
index 897f5d7..e193a2f 100644
--- a/drivers/staging/media/meson/vdec/codec_vp9.c
+++ b/drivers/staging/media/meson/vdec/codec_vp9.c
@@ -458,12 +458,6 @@
 	struct list_head ref_frames_list;
 	u32 frames_num;
 
-	/* In case of downsampling (decoding with FBC but outputting in NV12M),
-	 * we need to allocate additional buffers for FBC.
-	 */
-	void      *fbc_buffer_vaddr[MAX_REF_PIC_NUM];
-	dma_addr_t fbc_buffer_paddr[MAX_REF_PIC_NUM];
-
 	int ref_frame_map[REF_FRAMES];
 	int next_ref_frame_map[REF_FRAMES];
 	struct vp9_frame *frame_refs[REFS_PER_FRAME];
@@ -901,11 +895,8 @@
 		buf_y_paddr =
 		       vb2_dma_contig_plane_dma_addr(vb, 0);
 
-	if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit)) {
-		val = amvdec_read_dos(core, HEVC_SAO_CTRL5) & ~0xff0200;
-		amvdec_write_dos(core, HEVC_SAO_CTRL5, val);
+	if (codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit))
 		amvdec_write_dos(core, HEVC_CM_BODY_START_ADDR, buf_y_paddr);
-	}
 
 	if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M) {
 		buf_y_paddr =
@@ -921,7 +912,7 @@
 	if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap,
 			       vp9->is_10bit)) {
 		amvdec_write_dos(core, HEVC_CM_HEADER_START_ADDR,
-				 vp9->common.mmu_header_paddr[vb->index]);
+				 vb2_dma_contig_plane_dma_addr(vb, 0));
 		/* use HEVC_CM_HEADER_START_ADDR */
 		amvdec_write_dos_bits(core, HEVC_SAO_CTRL5, BIT(10));
 	}
@@ -956,7 +947,8 @@
 		val &= ~0x3;
 		if (!codec_hevc_use_fbc(sess->pixfmt_cap, vp9->is_10bit))
 			val |= BIT(0); /* disable cm compression */
-		/* TOFIX: Handle Amlogic Framebuffer compression */
+		else if (amvdec_is_dst_fbc(sess))
+			val |= BIT(1); /* Disable double write */
 	}
 
 	amvdec_write_dos(core, HEVC_SAO_CTRL1, val);
@@ -1147,6 +1139,8 @@
 			     struct codec_vp9 *vp9)
 {
 	struct amvdec_core *core = sess->core;
+	u32 use_mmu = codec_hevc_use_mmu(core->platform->revision,
+					 sess->pixfmt_cap, vp9->is_10bit);
 	u32 scale = 0;
 	u32 sz;
 	int i;
@@ -1166,8 +1160,9 @@
 		    vp9->frame_refs[i]->height != vp9->height)
 			scale = 1;
 
-		sz = amvdec_am21c_body_size(vp9->frame_refs[i]->width,
-					    vp9->frame_refs[i]->height);
+		sz = amvdec_amfbc_body_size(vp9->frame_refs[i]->width,
+					    vp9->frame_refs[i]->height,
+					    vp9->is_10bit, use_mmu);
 
 		amvdec_write_dos(core, VP9D_MPP_REFINFO_DATA,
 				 vp9->frame_refs[i]->width);
@@ -1283,7 +1278,8 @@
 	if (codec_hevc_use_mmu(core->platform->revision, sess->pixfmt_cap,
 			       vp9->is_10bit))
 		codec_hevc_fill_mmu_map(sess, &vp9->common,
-					&vp9->cur_frame->vbuf->vb2_buf);
+					&vp9->cur_frame->vbuf->vb2_buf,
+					vp9->is_10bit);
 
 	intra_only = param->p.show_frame ? 0 : param->p.intra_only;
 
@@ -2132,7 +2128,8 @@
 
 	codec_vp9_fetch_rpm(sess);
 	if (codec_vp9_process_rpm(vp9)) {
-		amvdec_src_change(sess, vp9->width, vp9->height, 16);
+		amvdec_src_change(sess, vp9->width, vp9->height, 16,
+				  vp9->is_10bit ? 10 : 8);
 
 		/* No frame is actually processed */
 		vp9->cur_frame = NULL;
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 7a818ca..a039d3290 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -192,6 +192,7 @@
 {
 	struct amvdec_session *sess = vb2_get_drv_priv(q);
 	u32 output_size = amvdec_get_output_size(sess);
+	u32 revision = sess->core->platform->revision;
 
 	if (*num_planes) {
 		switch (q->type) {
@@ -215,6 +216,12 @@
 				    sizes[2] < output_size / 4)
 					return -EINVAL;
 				break;
+			case V4L2_PIX_FMT_YUV420_8BIT:
+			case V4L2_PIX_FMT_YUV420_10BIT:
+				if (*num_planes != 1 ||
+				    sizes[0] < MMU_COMPRESS_HEADER_SIZE)
+					return -EINVAL;
+				break;
 			default:
 				return -EINVAL;
 			}
@@ -244,6 +251,24 @@
 			sizes[2] = output_size / 4;
 			*num_planes = 3;
 			break;
+		case V4L2_PIX_FMT_YUV420_8BIT:
+			if (revision >= VDEC_REVISION_G12A)
+				sizes[0] = MMU_COMPRESS_HEADER_SIZE;
+			else
+				sizes[0] = amvdec_amfbc_size(sess->width,
+							     sess->height,
+							     0, 0);
+			*num_planes = 1;
+			break;
+		case V4L2_PIX_FMT_YUV420_10BIT:
+			if (revision >= VDEC_REVISION_G12A)
+				sizes[0] = MMU_COMPRESS_HEADER_SIZE;
+			else
+				sizes[0] = amvdec_amfbc_size(sess->width,
+							     sess->height,
+							     1, 0);
+			*num_planes = 1;
+			break;
 		default:
 			return -EINVAL;
 		}
@@ -496,6 +521,7 @@
 	struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
 	const struct amvdec_format *fmts = sess->core->platform->formats;
 	const struct amvdec_format *fmt_out = NULL;
+	u32 revision = sess->core->platform->revision;
 	u32 output_size = 0;
 
 	memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
@@ -548,6 +574,26 @@
 			pfmt[2].sizeimage = output_size / 2;
 			pfmt[2].bytesperline = ALIGN(pixmp->width, 32) / 2;
 			pixmp->num_planes = 3;
+		} else if (pixmp->pixelformat == V4L2_PIX_FMT_YUV420_8BIT) {
+			if (revision >= VDEC_REVISION_G12A) {
+				pfmt[0].sizeimage = MMU_COMPRESS_HEADER_SIZE;
+			} else {
+				pfmt[0].sizeimage =
+					amvdec_amfbc_size(pixmp->width,
+							  pixmp->height, 0, 0);
+				pfmt[0].bytesperline = pixmp->width;
+			}
+			pixmp->num_planes = 1;
+		} else if (pixmp->pixelformat == V4L2_PIX_FMT_YUV420_10BIT) {
+			if (revision >= VDEC_REVISION_G12A) {
+				pfmt[0].sizeimage = MMU_COMPRESS_HEADER_SIZE;
+			} else {
+				pfmt[0].sizeimage =
+					amvdec_amfbc_size(pixmp->width,
+							  pixmp->height, 1, 0);
+				pfmt[0].bytesperline = pixmp->width;
+			}
+			pixmp->num_planes = 1;
 		}
 	}
 
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
index f95445a..1412054 100644
--- a/drivers/staging/media/meson/vdec/vdec.h
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -17,6 +17,9 @@
 
 #include "vdec_platform.h"
 
+/* MMU header size for codecs using the IOMMU + FBC */
+#define MMU_COMPRESS_HEADER_SIZE 0x48000
+
 /* 32 buffers in 3-plane YUV420 */
 #define MAX_CANVAS (32 * 3)
 
@@ -234,6 +237,7 @@
 	u32 width;
 	u32 height;
 	u32 colorspace;
+	u32 bitdepth;
 	u8 ycbcr_enc;
 	u8 quantization;
 	u8 xfer_func;
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
index db4a854..03dda5a 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.c
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
@@ -50,32 +50,47 @@
 }
 EXPORT_SYMBOL_GPL(amvdec_write_parser);
 
-/* 4 KiB per 64x32 block */
-u32 amvdec_am21c_body_size(u32 width, u32 height)
+/* AMFBC body is made out of 64x32 blocks with varying block size */
+u32 amvdec_amfbc_body_size(u32 width, u32 height, u32 is_10bit, u32 use_mmu)
 {
 	u32 width_64 = ALIGN(width, 64) / 64;
 	u32 height_32 = ALIGN(height, 32) / 32;
+	u32 blk_size = 4096;
 
-	return SZ_4K * width_64 * height_32;
+	if (!is_10bit) {
+		if (use_mmu)
+			blk_size = 3200;
+		else
+			blk_size = 3072;
+	}
+
+	return blk_size * width_64 * height_32;
 }
-EXPORT_SYMBOL_GPL(amvdec_am21c_body_size);
+EXPORT_SYMBOL_GPL(amvdec_amfbc_body_size);
 
 /* 32 bytes per 128x64 block */
-u32 amvdec_am21c_head_size(u32 width, u32 height)
+u32 amvdec_amfbc_head_size(u32 width, u32 height)
 {
 	u32 width_128 = ALIGN(width, 128) / 128;
 	u32 height_64 = ALIGN(height, 64) / 64;
 
 	return 32 * width_128 * height_64;
 }
-EXPORT_SYMBOL_GPL(amvdec_am21c_head_size);
+EXPORT_SYMBOL_GPL(amvdec_amfbc_head_size);
 
-u32 amvdec_am21c_size(u32 width, u32 height)
+u32 amvdec_amfbc_size(u32 width, u32 height, u32 is_10bit, u32 use_mmu)
 {
-	return ALIGN(amvdec_am21c_body_size(width, height) +
-		     amvdec_am21c_head_size(width, height), SZ_64K);
+	return ALIGN(amvdec_amfbc_body_size(width, height, is_10bit, use_mmu) +
+		     amvdec_amfbc_head_size(width, height), SZ_64K);
 }
-EXPORT_SYMBOL_GPL(amvdec_am21c_size);
+EXPORT_SYMBOL_GPL(amvdec_amfbc_size);
+
+u32 amvdec_is_dst_fbc(struct amvdec_session *sess)
+{
+	return sess->pixfmt_cap == V4L2_PIX_FMT_YUV420_8BIT ||
+	       sess->pixfmt_cap == V4L2_PIX_FMT_YUV420_10BIT;
+}
+EXPORT_SYMBOL_GPL(amvdec_is_dst_fbc);
 
 static int canvas_alloc(struct amvdec_session *sess, u8 *canvas_id)
 {
@@ -288,6 +303,22 @@
 		vbuf->vb2_buf.planes[1].bytesused = output_size / 4;
 		vbuf->vb2_buf.planes[2].bytesused = output_size / 4;
 		break;
+	case V4L2_PIX_FMT_YUV420_8BIT:
+		if (sess->core->platform->revision >= VDEC_REVISION_G12A)
+			vbuf->vb2_buf.planes[0].bytesused =
+				MMU_COMPRESS_HEADER_SIZE;
+		else
+			vbuf->vb2_buf.planes[0].bytesused =
+			   amvdec_amfbc_size(sess->width, sess->height, 0, 0);
+		break;
+	case V4L2_PIX_FMT_YUV420_10BIT:
+		if (sess->core->platform->revision >= VDEC_REVISION_G12A)
+			vbuf->vb2_buf.planes[0].bytesused =
+				MMU_COMPRESS_HEADER_SIZE;
+		else
+			vbuf->vb2_buf.planes[0].bytesused =
+			   amvdec_amfbc_size(sess->width, sess->height, 1, 0);
+		break;
 	}
 
 	vbuf->vb2_buf.timestamp = timestamp;
@@ -440,7 +471,7 @@
 EXPORT_SYMBOL_GPL(amvdec_set_par_from_dar);
 
 void amvdec_src_change(struct amvdec_session *sess, u32 width,
-		       u32 height, u32 dpb_size)
+		       u32 height, u32 dpb_size, u32 bitdepth)
 {
 	static const struct v4l2_event ev = {
 		.type = V4L2_EVENT_SOURCE_CHANGE,
@@ -455,7 +486,8 @@
 	if (sess->streamon_cap &&
 	    sess->width == width &&
 	    sess->height == height &&
-	    dpb_size <= sess->num_dst_bufs) {
+	    dpb_size <= sess->num_dst_bufs &&
+	    sess->bitdepth == bitdepth) {
 		sess->fmt_out->codec_ops->resume(sess);
 		return;
 	}
@@ -464,9 +496,17 @@
 	sess->width = width;
 	sess->height = height;
 	sess->status = STATUS_NEEDS_RESUME;
+	sess->bitdepth = bitdepth;
 
-	dev_dbg(sess->core->dev, "Res. changed (%ux%u), DPB size %u\n",
-		width, height, dpb_size);
+	if (sess->pixfmt_cap == V4L2_PIX_FMT_YUV420_8BIT &&
+	    bitdepth == 10)
+		sess->pixfmt_cap = V4L2_PIX_FMT_YUV420_10BIT;
+	else if (sess->pixfmt_cap == V4L2_PIX_FMT_YUV420_10BIT &&
+		 bitdepth == 8)
+		sess->pixfmt_cap = V4L2_PIX_FMT_YUV420_8BIT;
+
+	dev_dbg(sess->core->dev, "Res. changed (%ux%u), DPB %u, bitdepth %u\n",
+		width, height, dpb_size, bitdepth);
 	v4l2_event_queue_fh(&sess->fh, &ev);
 }
 EXPORT_SYMBOL_GPL(amvdec_src_change);
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
index 798e5a8..fa3e042 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.h
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
@@ -27,9 +27,11 @@
 u32 amvdec_read_parser(struct amvdec_core *core, u32 reg);
 void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val);
 
-u32 amvdec_am21c_body_size(u32 width, u32 height);
-u32 amvdec_am21c_head_size(u32 width, u32 height);
-u32 amvdec_am21c_size(u32 width, u32 height);
+/* Helpers for the Amlogic compressed framebuffer format */
+u32 amvdec_amfbc_body_size(u32 width, u32 height, u32 is_10bit, u32 use_mmu);
+u32 amvdec_amfbc_head_size(u32 width, u32 height);
+u32 amvdec_amfbc_size(u32 width, u32 height, u32 is_10bit, u32 use_mmu);
+u32 amvdec_is_dst_fbc(struct amvdec_session *sess);
 
 /**
  * amvdec_dst_buf_done_idx() - Signal that a buffer is done decoding
@@ -76,9 +78,10 @@
  * @width: picture width detected by the hardware
  * @height: picture height detected by the hardware
  * @dpb_size: Decoded Picture Buffer size (= amount of buffers for decoding)
+ * @bitdepth: Bit depth (usually 10 or 8) of the coded content
  */
 void amvdec_src_change(struct amvdec_session *sess, u32 width,
-		       u32 height, u32 dpb_size);
+		       u32 height, u32 dpb_size, u32 bitdepth);
 
 /**
  * amvdec_abort() - Abort the current decoding session
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
index eabbeba..efc090d 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.c
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -61,7 +61,8 @@
 		.vdec_ops = &vdec_hevc_ops,
 		.codec_ops = &codec_vp9_ops,
 		.firmware_path = "meson/vdec/gxl_vp9.bin",
-		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420_8BIT,
+				 V4L2_PIX_FMT_YUV420_10BIT, 0 },
 		.flags = V4L2_FMT_FLAG_COMPRESSED |
 			 V4L2_FMT_FLAG_DYN_RESOLUTION,
 	}, {
@@ -149,7 +150,8 @@
 		.vdec_ops = &vdec_hevc_ops,
 		.codec_ops = &codec_vp9_ops,
 		.firmware_path = "meson/vdec/g12a_vp9.bin",
-		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420_8BIT,
+				 V4L2_PIX_FMT_YUV420_10BIT, 0 },
 		.flags = V4L2_FMT_FLAG_COMPRESSED |
 			 V4L2_FMT_FLAG_DYN_RESOLUTION,
 	}, {
@@ -199,7 +201,8 @@
 		.vdec_ops = &vdec_hevc_ops,
 		.codec_ops = &codec_vp9_ops,
 		.firmware_path = "meson/vdec/sm1_vp9_mmu.bin",
-		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+		.pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420_8BIT,
+				 V4L2_PIX_FMT_YUV420_10BIT, 0 },
 		.flags = V4L2_FMT_FLAG_COMPRESSED |
 			 V4L2_FMT_FLAG_DYN_RESOLUTION,
 	}, {
diff --git a/drivers/staging/nanohub/Kconfig b/drivers/staging/nanohub/Kconfig
new file mode 100644
index 0000000..8115064
--- /dev/null
+++ b/drivers/staging/nanohub/Kconfig
@@ -0,0 +1,22 @@
+config NANOHUB
+	tristate "Nanohub"
+	select IIO
+	default N
+	help
+	  Enable support for the nanohub sensorhub driver.
+
+	  This driver supports the android nanohub sensorhub.
+
+	  If in doubt, say N here.
+
+if NANOHUB
+
+config NANOHUB_SPI
+	bool "Nanohub SPI"
+	default Y
+	help
+	  Enable nanohub SPI support.
+
+	  If in doubt, say Y here.
+
+endif # NANOHUB
diff --git a/drivers/staging/nanohub/Makefile b/drivers/staging/nanohub/Makefile
new file mode 100644
index 0000000..950cae2
--- /dev/null
+++ b/drivers/staging/nanohub/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for nanohub
+#
+
+obj-$(CONFIG_NANOHUB) += nanohub.o
+nanohub-y := main.o bl.o comms.o
+nanohub-$(CONFIG_NANOHUB_SPI) += spi.o
diff --git a/drivers/staging/nanohub/bl.c b/drivers/staging/nanohub/bl.c
new file mode 100644
index 0000000..9a657f4
--- /dev/null
+++ b/drivers/staging/nanohub/bl.c
@@ -0,0 +1,500 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_data/nanohub.h>
+#include <linux/vmalloc.h>
+
+#include "main.h"
+#include "bl.h"
+
+#define MAX_BUFFER_SIZE		1024
+#define MAX_FLASH_BANKS		16
+#define READ_ACK_TIMEOUT	100000
+
+static uint8_t write_len(struct nanohub_data *data, int len)
+{
+	uint8_t buffer[sizeof(uint8_t) + 1];
+
+	buffer[0] = len - 1;
+
+	return data->bl.write_data(data, buffer, sizeof(uint8_t));
+}
+
+static uint8_t write_cnt(struct nanohub_data *data, uint16_t cnt)
+{
+	uint8_t buffer[sizeof(uint16_t) + 1];
+
+	buffer[0] = (cnt >> 8) & 0xFF;
+	buffer[1] = (cnt >> 0) & 0xFF;
+
+	return data->bl.write_data(data, buffer, sizeof(uint16_t));
+}
+
+static uint8_t write_addr(struct nanohub_data *data, uint32_t addr)
+{
+	uint8_t buffer[sizeof(uint32_t) + 1];
+
+	buffer[0] = (addr >> 24) & 0xFF;
+	buffer[1] = (addr >> 16) & 0xFF;
+	buffer[2] = (addr >> 8) & 0xFF;
+	buffer[3] = addr & 0xFF;
+
+	return data->bl.write_data(data, buffer, sizeof(uint32_t));
+}
+
+/* write length followed by the data */
+static uint8_t write_len_data(struct nanohub_data *data, int len,
+			      const uint8_t *buf)
+{
+	uint8_t buffer[sizeof(uint8_t) + 256 + sizeof(uint8_t)];
+
+	buffer[0] = len - 1;
+
+	memcpy(&buffer[1], buf, len);
+
+	return data->bl.write_data(data, buffer, sizeof(uint8_t) + len);
+}
+
+/* keep checking for ack until we receive a ack or nack */
+static uint8_t read_ack_loop(struct nanohub_data *data)
+{
+	uint8_t ret;
+	int32_t timeout = READ_ACK_TIMEOUT;
+
+	do {
+		ret = data->bl.read_ack(data);
+		if (ret != CMD_ACK && ret != CMD_NACK)
+			schedule();
+	} while (ret != CMD_ACK && ret != CMD_NACK && timeout-- > 0);
+
+	return ret;
+}
+
+uint8_t nanohub_bl_sync(struct nanohub_data *data)
+{
+	return data->bl.sync(data);
+}
+
+int nanohub_bl_open(struct nanohub_data *data)
+{
+	int ret = -1;
+
+	data->bl.tx_buffer = kmalloc(MAX_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+	if (!data->bl.tx_buffer)
+		goto out;
+
+	data->bl.rx_buffer = kmalloc(MAX_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+	if (!data->bl.rx_buffer)
+		goto free_tx;
+
+	ret = data->bl.open(data);
+	if (!ret)
+		goto out;
+
+	kfree(data->bl.rx_buffer);
+free_tx:
+	kfree(data->bl.tx_buffer);
+out:
+	return ret;
+}
+
+void nanohub_bl_close(struct nanohub_data *data)
+{
+	data->bl.close(data);
+	kfree(data->bl.tx_buffer);
+	kfree(data->bl.rx_buffer);
+}
+
+static uint8_t write_bank(struct nanohub_data *data, int bank, uint32_t addr,
+			  const uint8_t *buf, size_t length)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+	uint8_t status = CMD_ACK;
+	uint32_t offset;
+
+	if (addr <= pdata->flash_banks[bank].address) {
+		offset = pdata->flash_banks[bank].address - addr;
+		if (addr + length >
+		    pdata->flash_banks[bank].address +
+		    pdata->flash_banks[bank].length)
+			status =
+			    nanohub_bl_write_memory(data,
+						    pdata->flash_banks[bank].
+						    address,
+						    pdata->flash_banks[bank].
+						    length, buf + offset);
+		else
+			status =
+			    nanohub_bl_write_memory(data,
+						    pdata->flash_banks[bank].
+						    address, length - offset,
+						    buf + offset);
+	} else {
+		if (addr + length >
+		    pdata->flash_banks[bank].address +
+		    pdata->flash_banks[bank].length)
+			status =
+			    nanohub_bl_write_memory(data, addr,
+						    pdata->flash_banks[bank].
+						    address +
+						    pdata->flash_banks[bank].
+						    length - addr, buf);
+		else
+			status =
+			    nanohub_bl_write_memory(data, addr, length, buf);
+	}
+
+	return status;
+}
+
+uint8_t nanohub_bl_download(struct nanohub_data *data, uint32_t addr,
+			    const uint8_t *image, size_t length)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+	uint8_t *ptr;
+	int i, j;
+	uint8_t status;
+	uint32_t offset;
+	uint8_t erase_mask[MAX_FLASH_BANKS] = { 0 };
+	uint8_t erase_write_mask[MAX_FLASH_BANKS] = { 0 };
+	uint8_t write_mask[MAX_FLASH_BANKS] = { 0 };
+
+	if (pdata->num_flash_banks > MAX_FLASH_BANKS) {
+		status = CMD_NACK;
+		goto out;
+	}
+
+	status = nanohub_bl_sync(data);
+
+	if (status != CMD_ACK) {
+		pr_err("nanohub_bl_download: sync=%02x\n", status);
+		goto out;
+	}
+
+	ptr = vmalloc(length);
+	if (!ptr) {
+		status = CMD_NACK;
+		goto out;
+	}
+
+	status = nanohub_bl_read_memory(data, addr, length, ptr);
+	pr_info(
+	    "nanohub: nanohub_bl_read_memory: status=%02x, addr=%08x, length=%zd\n",
+	    status, addr, length);
+
+	for (i = 0; i < pdata->num_flash_banks; i++) {
+		if (addr >= pdata->flash_banks[i].address &&
+		    addr <
+		    pdata->flash_banks[i].address +
+		    pdata->flash_banks[i].length) {
+			break;
+		}
+	}
+
+	offset = (uint32_t) (addr - pdata->flash_banks[i].address);
+	j = 0;
+	while (j < length && i < pdata->num_flash_banks) {
+		if (image[j] != 0xFF)
+			erase_write_mask[i] = true;
+
+		if ((ptr[j] & image[j]) != image[j]) {
+			erase_mask[i] = true;
+			if (erase_write_mask[i]) {
+				j += pdata->flash_banks[i].length - offset;
+				offset = pdata->flash_banks[i].length;
+			} else {
+				j++;
+				offset++;
+			}
+		} else {
+			if (ptr[j] != image[j])
+				write_mask[i] = true;
+			j++;
+			offset++;
+		}
+
+		if (offset == pdata->flash_banks[i].length) {
+			i++;
+			offset = 0;
+			if (i < pdata->num_flash_banks)
+				j += (pdata->flash_banks[i].address -
+				      pdata->flash_banks[i - 1].address -
+				      pdata->flash_banks[i - 1].length);
+			else
+				j = length;
+		}
+	}
+
+	for (i = 0; status == CMD_ACK && i < pdata->num_flash_banks; i++) {
+		pr_info("nanohub: i=%d, erase=%d, erase_write=%d, write=%d\n",
+			i, erase_mask[i], erase_write_mask[i], write_mask[i]);
+		if (erase_mask[i]) {
+			status =
+			    nanohub_bl_erase_sector(data,
+						    pdata->flash_banks[i].bank);
+			if (status == CMD_ACK && erase_write_mask[i])
+				status =
+				    write_bank(data, i, addr, image, length);
+		} else if (write_mask[i]) {
+			status = write_bank(data, i, addr, image, length);
+		}
+	}
+
+	vfree(ptr);
+out:
+	return status;
+}
+
+uint8_t nanohub_bl_erase_shared(struct nanohub_data *data)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+	int i;
+	uint8_t status;
+
+	if (pdata->num_shared_flash_banks > MAX_FLASH_BANKS) {
+		status = CMD_NACK;
+		goto out;
+	}
+
+	status = nanohub_bl_sync(data);
+
+	if (status != CMD_ACK) {
+		pr_err("nanohub_bl_erase_shared: sync=%02x\n", status);
+		goto out;
+	}
+
+	for (i = 0;
+	     status == CMD_ACK && i < pdata->num_shared_flash_banks;
+	     i++) {
+		status = nanohub_bl_erase_sector(data,
+		    pdata->shared_flash_banks[i].bank);
+	}
+out:
+	return status;
+}
+
+uint8_t nanohub_bl_erase_shared_bl(struct nanohub_data *data)
+{
+	uint8_t status;
+
+	status = nanohub_bl_sync(data);
+
+	if (status != CMD_ACK) {
+		pr_err("nanohub_bl_erase_shared_bl: sync=%02x\n", status);
+		goto out;
+	}
+
+	status = nanohub_bl_erase_special(data, 0xFFF0);
+out:
+	return status;
+}
+
+/* erase a single sector */
+uint8_t nanohub_bl_erase_sector(struct nanohub_data *data, uint16_t sector)
+{
+	uint8_t ret;
+
+	data->bl.write_cmd(data, data->bl.cmd_erase);
+	ret = data->bl.read_ack(data);
+	if (ret == CMD_ACK)
+		ret = write_cnt(data, 0x0000);
+	if (ret != CMD_NACK)
+		ret = read_ack_loop(data);
+	if (ret == CMD_ACK)
+		ret = write_cnt(data, sector);
+	if (ret != CMD_NACK)
+		ret = read_ack_loop(data);
+
+	return ret;
+}
+
+/* erase special */
+uint8_t nanohub_bl_erase_special(struct nanohub_data *data, uint16_t special)
+{
+	uint8_t ret;
+
+	data->bl.write_cmd(data, data->bl.cmd_erase);
+	ret = data->bl.read_ack(data);
+	if (ret == CMD_ACK)
+		ret = write_cnt(data, special);
+	if (ret != CMD_NACK)
+		ret = read_ack_loop(data);
+
+	return ret;
+}
+
+/* read memory - this will chop the request into 256 byte reads */
+uint8_t nanohub_bl_read_memory(struct nanohub_data *data, uint32_t addr,
+			       uint32_t length, uint8_t *buffer)
+{
+	uint8_t ret = CMD_ACK;
+	uint32_t offset = 0;
+
+	while (ret == CMD_ACK && length > offset) {
+		data->bl.write_cmd(data, data->bl.cmd_read_memory);
+		ret = data->bl.read_ack(data);
+		if (ret == CMD_ACK) {
+			write_addr(data, addr + offset);
+			ret = read_ack_loop(data);
+			if (ret == CMD_ACK) {
+				if (length - offset >= 256) {
+					write_len(data, 256);
+					ret = read_ack_loop(data);
+					if (ret == CMD_ACK) {
+						data->bl.read_data(data,
+								   &buffer
+								   [offset],
+								   256);
+						offset += 256;
+					}
+				} else {
+					write_len(data, length - offset);
+					ret = read_ack_loop(data);
+					if (ret == CMD_ACK) {
+						data->bl.read_data(data,
+								   &buffer
+								   [offset],
+								   length -
+								   offset);
+						offset = length;
+					}
+				}
+			}
+		}
+	}
+
+	return ret;
+}
+
+/* write memory - this will chop the request into 256 byte writes */
+uint8_t nanohub_bl_write_memory(struct nanohub_data *data, uint32_t addr,
+				uint32_t length, const uint8_t *buffer)
+{
+	uint8_t ret = CMD_ACK;
+	uint32_t offset = 0;
+
+	while (ret == CMD_ACK && length > offset) {
+		data->bl.write_cmd(data, data->bl.cmd_write_memory);
+		ret = data->bl.read_ack(data);
+		if (ret == CMD_ACK) {
+			write_addr(data, addr + offset);
+			ret = read_ack_loop(data);
+			if (ret == CMD_ACK) {
+				if (length - offset >= 256) {
+					write_len_data(data, 256,
+						       &buffer[offset]);
+					offset += 256;
+				} else {
+					write_len_data(data, length - offset,
+						       &buffer[offset]);
+					offset = length;
+				}
+				ret = read_ack_loop(data);
+			}
+		}
+	}
+
+	return ret;
+}
+
+uint8_t nanohub_bl_get_version(struct nanohub_data *data, uint8_t *version)
+{
+	uint8_t status;
+
+	status = nanohub_bl_sync(data);
+	if (status != CMD_ACK) {
+		pr_err("nanohub_bl_get_version: sync=%02x\n", status);
+		goto out;
+	}
+
+	data->bl.write_cmd(data, data->bl.cmd_get_version);
+	status = data->bl.read_ack(data);
+	if (status == CMD_ACK)
+		data->bl.read_data(data, version, 1);
+	status = data->bl.read_ack(data);
+out:
+	return status;
+}
+
+uint8_t nanohub_bl_get_id(struct nanohub_data *data, uint16_t *id)
+{
+	uint8_t status;
+	uint8_t len;
+	uint8_t buffer[256];
+
+	status = nanohub_bl_sync(data);
+	if (status != CMD_ACK) {
+		pr_err("nanohub_bl_get_id: sync=%02x\n", status);
+		goto out;
+	}
+
+	data->bl.write_cmd(data, data->bl.cmd_get_id);
+	status = data->bl.read_ack(data);
+	if (status == CMD_ACK) {
+		data->bl.read_data(data, &len, 1);
+		data->bl.read_data(data, buffer, len+1);
+		*id = (buffer[0] << 8) | buffer[1];
+	}
+	status = data->bl.read_ack(data);
+out:
+	return status;
+}
+
+uint8_t nanohub_bl_lock(struct nanohub_data *data)
+{
+	uint8_t status;
+
+	status = nanohub_bl_sync(data);
+
+	if (status != CMD_ACK) {
+		pr_err("nanohub_bl_lock: sync=%02x\n", status);
+		goto out;
+	}
+
+	data->bl.write_cmd(data, data->bl.cmd_readout_protect);
+	status = data->bl.read_ack(data);
+	if (status == CMD_ACK)
+		status = read_ack_loop(data);
+out:
+	return status;
+}
+
+uint8_t nanohub_bl_unlock(struct nanohub_data *data)
+{
+	uint8_t status;
+
+	status = nanohub_bl_sync(data);
+
+	if (status != CMD_ACK) {
+		pr_err("nanohub_bl_lock: sync=%02x\n", status);
+		goto out;
+	}
+
+	data->bl.write_cmd(data, data->bl.cmd_readout_unprotect);
+	status = data->bl.read_ack(data);
+	if (status == CMD_ACK)
+		status = read_ack_loop(data);
+out:
+	return status;
+}
+
+uint8_t nanohub_bl_update_finished(struct nanohub_data *data)
+{
+	uint8_t ret;
+
+	data->bl.write_cmd(data, data->bl.cmd_update_finished);
+	ret = read_ack_loop(data);
+
+	return ret;
+}
diff --git a/drivers/staging/nanohub/bl.h b/drivers/staging/nanohub/bl.h
new file mode 100644
index 0000000..9ed8c40
--- /dev/null
+++ b/drivers/staging/nanohub/bl.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NANOHUB_BL_H
+#define _NANOHUB_BL_H
+
+#include <linux/platform_data/nanohub.h>
+#include <linux/spi/spi.h>
+
+struct nanohub_data;
+
+struct nanohub_bl {
+	uint8_t cmd_erase;
+	uint8_t cmd_read_memory;
+	uint8_t cmd_write_memory;
+	uint8_t cmd_get_version;
+	uint8_t cmd_get_id;
+	uint8_t cmd_readout_protect;
+	uint8_t cmd_readout_unprotect;
+	uint8_t cmd_update_finished;
+
+	int (*open)(const void *);
+	void (*close)(const void *);
+	uint8_t (*sync)(const void *);
+	uint8_t (*write_data)(const void *, uint8_t *, int);
+	uint8_t (*write_cmd)(const void *, uint8_t);
+	uint8_t (*read_data)(const void *, uint8_t *, int);
+	uint8_t (*read_ack)(const void *);
+
+	uint8_t *tx_buffer;
+	uint8_t *rx_buffer;
+};
+
+int nanohub_bl_open(struct nanohub_data *);
+uint8_t nanohub_bl_sync(struct nanohub_data *);
+void nanohub_bl_close(struct nanohub_data *);
+uint8_t nanohub_bl_download(struct nanohub_data *, uint32_t addr,
+			    const uint8_t *data, size_t length);
+uint8_t nanohub_bl_erase_shared(struct nanohub_data *);
+uint8_t nanohub_bl_erase_shared_bl(struct nanohub_data *);
+uint8_t nanohub_bl_erase_sector(struct nanohub_data *, uint16_t);
+uint8_t nanohub_bl_erase_special(struct nanohub_data *, uint16_t);
+uint8_t nanohub_bl_read_memory(struct nanohub_data *, uint32_t, uint32_t,
+			       uint8_t *);
+uint8_t nanohub_bl_write_memory(struct nanohub_data *, uint32_t, uint32_t,
+				const uint8_t *);
+uint8_t nanohub_bl_get_version(struct nanohub_data *, uint8_t *);
+uint8_t nanohub_bl_get_id(struct nanohub_data *, uint16_t *);
+uint8_t nanohub_bl_lock(struct nanohub_data *);
+uint8_t nanohub_bl_unlock(struct nanohub_data *);
+uint8_t nanohub_bl_update_finished(struct nanohub_data *);
+
+/*
+ * Bootloader commands
+ * _NS versions are no-stretch. (Only valid on I2C)
+ * will return CMD_BUSY instead of stretching the clock
+ */
+
+#define CMD_GET				0x00
+#define CMD_GET_VERSION			0x01
+#define CMD_GET_ID			0x02
+#define CMD_READ_MEMORY			0x11
+#define CMD_NACK			0x1F
+#define CMD_GO				0x21
+#define CMD_WRITE_MEMORY		0x31
+#define CMD_WRITE_MEMORY_NS		0x32
+#define CMD_ERASE			0x44
+#define CMD_ERASE_NS			0x45
+#define CMD_SOF				0x5A
+#define CMD_WRITE_PROTECT		0x63
+#define CMD_WRITE_PROTECT_NS		0x64
+#define CMD_WRITE_UNPROTECT		0x73
+#define CMD_WRITE_UNPROTECT_NS		0x74
+#define CMD_BUSY			0x76
+#define CMD_ACK				0x79
+#define CMD_READOUT_PROTECT		0x82
+#define CMD_READOUT_PROTECT_NS		0x83
+#define CMD_READOUT_UNPROTECT		0x92
+#define CMD_READOUT_UNPROTECT_NS	0x93
+#define CMD_SOF_ACK			0xA5
+#define CMD_GET_SIZES			0xEE
+#define CMD_UPDATE_FINISHED		0xEF
+
+#endif
diff --git a/drivers/staging/nanohub/comms.c b/drivers/staging/nanohub/comms.c
new file mode 100644
index 0000000..6da7fe3
--- /dev/null
+++ b/drivers/staging/nanohub/comms.c
@@ -0,0 +1,580 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <linux/gpio.h>
+
+#include "main.h"
+#include "comms.h"
+
+#define READ_ACK_TIMEOUT_MS	10
+#define READ_MSG_TIMEOUT_MS	70
+
+#define RESEND_SHORT_DELAY_US   500     /* 500us - 1ms */
+#define RESEND_LONG_DELAY_US    100000  /* 100ms - 200ms */
+
+static const uint32_t crc_table[] = {
+	0x00000000, 0x04C11DB7, 0x09823B6E, 0x0D4326D9,
+	0x130476DC, 0x17C56B6B, 0x1A864DB2, 0x1E475005,
+	0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6, 0x2B4BCB61,
+	0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD
+};
+
+static uint32_t crc32_word(uint32_t crc, uint32_t data, int cnt)
+{
+	int i;
+	crc = crc ^ data;
+
+	for (i = 0; i < cnt; i++)
+		crc = (crc << 4) ^ crc_table[crc >> 28];
+
+	return crc;
+}
+
+uint32_t crc32(const uint8_t *buffer, int length, uint32_t crc)
+{
+	uint32_t *data = (uint32_t *)buffer;
+	uint32_t word;
+	int i;
+
+	/* word by word crc32 */
+	for (i = 0; i < (length >> 2); i++)
+		crc = crc32_word(crc, data[i], 8);
+
+	/* zero pad last word if required */
+	if (length & 0x3) {
+		for (i *= 4, word = 0; i < length; i++)
+			word |= buffer[i] << ((i & 0x3) * 8);
+		crc = crc32_word(crc, word, 8);
+	}
+
+	return crc;
+}
+
+static inline size_t pad(size_t length)
+{
+	return (length + 3) & ~3;
+}
+
+static inline size_t tot_len(size_t length)
+{
+	/* [TYPE:1] [LENGTH:3] [DATA] [PAD:0-3] [CRC:4] */
+	return sizeof(uint32_t) + pad(length) + sizeof(uint32_t);
+}
+
+static struct nanohub_packet_pad *packet_alloc(int flags)
+{
+	int len =
+	    sizeof(struct nanohub_packet_pad) + MAX_UINT8 +
+	    sizeof(struct nanohub_packet_crc);
+	uint8_t *packet = kmalloc(len, flags);
+	memset(packet, 0xFF, len);
+	return (struct nanohub_packet_pad *)packet;
+}
+
+static int packet_create(struct nanohub_packet *packet, uint32_t seq,
+			 uint32_t reason, uint8_t len, const uint8_t *data,
+			 bool user)
+{
+	struct nanohub_packet_crc crc;
+	int ret = sizeof(struct nanohub_packet) + len +
+	    sizeof(struct nanohub_packet_crc);
+
+	if (packet) {
+		packet->sync = COMMS_SYNC;
+		packet->seq = seq;
+		packet->reason = reason;
+		packet->len = len;
+		if (len > 0) {
+			if (user) {
+				if (copy_from_user(packet->data, data, len) !=
+				    0)
+					ret = ERROR_NACK;
+			} else {
+				memcpy(packet->data, data, len);
+			}
+		}
+		crc.crc =
+		    crc32((uint8_t *) packet,
+			  sizeof(struct nanohub_packet) + len, ~0);
+		memcpy(&packet->data[len], &crc.crc,
+		       sizeof(struct nanohub_packet_crc));
+	} else {
+		ret = ERROR_NACK;
+	}
+
+	return ret;
+}
+
+static int packet_verify(struct nanohub_packet *packet)
+{
+	struct nanohub_packet_crc crc;
+	int cmp;
+
+	crc.crc =
+	    crc32((uint8_t *) packet,
+		  sizeof(struct nanohub_packet) + packet->len, ~0);
+
+	cmp =
+	    memcmp(&crc.crc, &packet->data[packet->len],
+		   sizeof(struct nanohub_packet_crc));
+
+	if (cmp != 0) {
+		uint8_t *ptr = (uint8_t *)packet;
+		pr_debug("nanohub: gen crc: %08x, got crc: %08x\n", crc.crc,
+			 *(uint32_t *)&packet->data[packet->len]);
+		pr_debug(
+		    "nanohub: %02x [%02x %02x %02x %02x] [%02x %02x %02x %02x] [%02x] [%02x %02x %02x %02x\n",
+		    ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5], ptr[6],
+		    ptr[7], ptr[8], ptr[9], ptr[10], ptr[11], ptr[12],
+		    ptr[13]);
+	}
+
+	return cmp;
+}
+
+static void packet_free(struct nanohub_packet_pad *packet)
+{
+	kfree(packet);
+}
+
+static int read_ack(struct nanohub_data *data, struct nanohub_packet *response,
+		    int timeout)
+{
+	int ret, i;
+	const int max_size = sizeof(struct nanohub_packet) + MAX_UINT8 +
+	    sizeof(struct nanohub_packet_crc);
+	unsigned long end = jiffies + msecs_to_jiffies(READ_ACK_TIMEOUT_MS);
+
+	for (i = 0; time_before_eq(jiffies, end); i++) {
+		ret =
+		    data->comms.read(data, (uint8_t *) response, max_size,
+				     timeout);
+
+		if (ret == 0) {
+			pr_debug("nanohub: read_ack: %d: empty packet\n", i);
+			ret = ERROR_NACK;
+			continue;
+		} else if (ret < sizeof(struct nanohub_packet)) {
+			pr_debug("nanohub: read_ack: %d: too small\n", i);
+			ret = ERROR_NACK;
+			continue;
+		} else if (ret <
+			   sizeof(struct nanohub_packet) + response->len +
+			   sizeof(struct nanohub_packet_crc)) {
+			pr_debug("nanohub: read_ack: %d: too small length\n",
+				 i);
+			ret = ERROR_NACK;
+			continue;
+		} else if (ret !=
+			   sizeof(struct nanohub_packet) + response->len +
+			   sizeof(struct nanohub_packet_crc)) {
+			pr_debug("nanohub: read_ack: %d: wrong length\n", i);
+			ret = ERROR_NACK;
+			break;
+		} else if (packet_verify(response) != 0) {
+			pr_debug("nanohub: read_ack: %d: invalid crc\n", i);
+			ret = ERROR_NACK;
+			break;
+		} else {
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int read_msg(struct nanohub_data *data, struct nanohub_packet *response,
+		    int timeout)
+{
+	int ret, i;
+	const int max_size = sizeof(struct nanohub_packet) + MAX_UINT8 +
+	    sizeof(struct nanohub_packet_crc);
+	unsigned long end = jiffies + msecs_to_jiffies(READ_MSG_TIMEOUT_MS);
+
+	for (i = 0; time_before_eq(jiffies, end); i++) {
+		ret =
+		    data->comms.read(data, (uint8_t *) response, max_size,
+				     timeout);
+
+		if (ret == 0) {
+			pr_debug("nanohub: read_msg: %d: empty packet\n", i);
+			ret = ERROR_NACK;
+			continue;
+		} else if (ret < sizeof(struct nanohub_packet)) {
+			pr_debug("nanohub: read_msg: %d: too small\n", i);
+			ret = ERROR_NACK;
+			continue;
+		} else if (ret <
+			   sizeof(struct nanohub_packet) + response->len +
+			   sizeof(struct nanohub_packet_crc)) {
+			pr_debug("nanohub: read_msg: %d: too small length\n",
+				 i);
+			ret = ERROR_NACK;
+			continue;
+		} else if (ret !=
+			   sizeof(struct nanohub_packet) + response->len +
+			   sizeof(struct nanohub_packet_crc)) {
+			pr_debug("nanohub: read_msg: %d: wrong length\n", i);
+			ret = ERROR_NACK;
+			break;
+		} else if (packet_verify(response) != 0) {
+			pr_debug("nanohub: read_msg: %d: invalid crc\n", i);
+			ret = ERROR_NACK;
+			break;
+		} else {
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int get_reply(struct nanohub_data *data, struct nanohub_packet *response,
+		     uint32_t seq)
+{
+	int ret;
+
+	ret = read_ack(data, response, data->comms.timeout_ack);
+
+	if (ret >= 0 && response->seq == seq) {
+		if (response->reason == CMD_COMMS_ACK) {
+			if (response->len == sizeof(data->interrupts))
+				memcpy(data->interrupts, response->data,
+				       response->len);
+			ret =
+			    read_msg(data, response, data->comms.timeout_reply);
+			if (ret < 0)
+				ret = ERROR_NACK;
+		} else {
+			int i;
+			uint8_t *b = (uint8_t *) response;
+			for (i = 0; i < ret; i += 25)
+				pr_debug(
+				    "nanohub: %d: %d: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+				    ret, i, b[i], b[i + 1], b[i + 2], b[i + 3],
+				    b[i + 4], b[i + 5], b[i + 6], b[i + 7],
+				    b[i + 8], b[i + 9], b[i + 10], b[i + 11],
+				    b[i + 12], b[i + 13], b[i + 14], b[i + 15],
+				    b[i + 16], b[i + 17], b[i + 18], b[i + 19],
+				    b[i + 20], b[i + 21], b[i + 22], b[i + 23],
+				    b[i + 24]);
+			if (response->reason == CMD_COMMS_NACK)
+				ret = ERROR_NACK;
+			else if (response->reason == CMD_COMMS_BUSY)
+				ret = ERROR_BUSY;
+		}
+
+		if (response->seq != seq)
+			ret = ERROR_NACK;
+	} else {
+		if (ret >= 0) {
+			int i;
+			uint8_t *b = (uint8_t *) response;
+			for (i = 0; i < ret; i += 25)
+				pr_debug(
+				    "nanohub: %d: %d: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+				    ret, i, b[i], b[i + 1], b[i + 2], b[i + 3],
+				    b[i + 4], b[i + 5], b[i + 6], b[i + 7],
+				    b[i + 8], b[i + 9], b[i + 10], b[i + 11],
+				    b[i + 12], b[i + 13], b[i + 14], b[i + 15],
+				    b[i + 16], b[i + 17], b[i + 18], b[i + 19],
+				    b[i + 20], b[i + 21], b[i + 22], b[i + 23],
+				    b[i + 24]);
+		}
+		ret = ERROR_NACK;
+	}
+
+	return ret;
+}
+
+static int nanohub_comms_tx_rx(struct nanohub_data *data,
+			       struct nanohub_packet_pad *pad, int packet_size,
+			       uint32_t seq, uint8_t *rx, size_t rx_len)
+{
+	int ret;
+
+	ret = data->comms.write(data, (uint8_t *)&pad->packet, packet_size,
+				data->comms.timeout_write);
+
+	if (ret == packet_size) {
+		ret = get_reply(data, &pad->packet, seq);
+
+		if (ret >= 0) {
+			if (pad->packet.len > 0) {
+				if (pad->packet.len > rx_len) {
+					memcpy(rx, pad->packet.data, rx_len);
+					ret = rx_len;
+				} else {
+					memcpy(rx, pad->packet.data,
+					       pad->packet.len);
+					ret = pad->packet.len;
+				}
+			} else {
+				ret = 0;
+			}
+		}
+	} else {
+		ret = ERROR_NACK;
+	}
+
+	return ret;
+}
+
+int nanohub_comms_rx_retrans_boottime(struct nanohub_data *data, uint32_t cmd,
+				      uint8_t *rx, size_t rx_len,
+				      int retrans_cnt, int retrans_delay)
+{
+	int packet_size = 0;
+	struct nanohub_packet_pad *pad = packet_alloc(GFP_KERNEL);
+	int delay = 0;
+	int ret;
+	uint32_t seq;
+	s64 boottime;
+
+	if (pad == NULL)
+		return ERROR_NACK;
+
+	seq = data->comms.seq++;
+
+	do {
+		data->comms.open(data);
+		boottime = ktime_get_boottime_ns();
+		packet_size =
+		    packet_create(&pad->packet, seq, cmd, sizeof(boottime),
+				  (uint8_t *)&boottime, false);
+
+		ret =
+		    nanohub_comms_tx_rx(data, pad, packet_size, seq, rx,
+					rx_len);
+
+		if (nanohub_wakeup_eom(data,
+				       (ret == ERROR_BUSY) ||
+				       (ret == ERROR_NACK && retrans_cnt >= 0)))
+			ret = -EFAULT;
+
+		data->comms.close(data);
+
+		if (ret == ERROR_NACK) {
+			retrans_cnt--;
+			delay += retrans_delay;
+			if (retrans_cnt >= 0)
+				udelay(retrans_delay);
+		} else if (ret == ERROR_BUSY) {
+			usleep_range(RESEND_LONG_DELAY_US,
+				     RESEND_LONG_DELAY_US * 2);
+		}
+	} while ((ret == ERROR_BUSY)
+		 || (ret == ERROR_NACK && retrans_cnt >= 0));
+
+	packet_free(pad);
+
+	return ret;
+}
+
+int nanohub_comms_tx_rx_retrans(struct nanohub_data *data, uint32_t cmd,
+				const uint8_t *tx, uint8_t tx_len,
+				uint8_t *rx, size_t rx_len, bool user,
+				int retrans_cnt, int retrans_delay)
+{
+	int packet_size = 0;
+	struct nanohub_packet_pad *pad = packet_alloc(GFP_KERNEL);
+	int delay = 0;
+	int ret;
+	uint32_t seq;
+
+	if (pad == NULL)
+		return ERROR_NACK;
+
+	seq = data->comms.seq++;
+
+	do {
+		packet_size =
+		    packet_create(&pad->packet, seq, cmd, tx_len, tx, user);
+
+		data->comms.open(data);
+		ret =
+		    nanohub_comms_tx_rx(data, pad, packet_size, seq, rx,
+					rx_len);
+
+		if (nanohub_wakeup_eom(data,
+				       (ret == ERROR_BUSY) ||
+				       (ret == ERROR_NACK && retrans_cnt >= 0)))
+			ret = -EFAULT;
+
+		data->comms.close(data);
+
+		if (ret == ERROR_NACK) {
+			retrans_cnt--;
+			delay += retrans_delay;
+			if (retrans_cnt >= 0)
+				udelay(retrans_delay);
+		} else if (ret == ERROR_BUSY) {
+			usleep_range(RESEND_LONG_DELAY_US,
+				     RESEND_LONG_DELAY_US * 2);
+		}
+	} while ((ret == ERROR_BUSY)
+		 || (ret == ERROR_NACK && retrans_cnt >= 0));
+
+	packet_free(pad);
+
+	return ret;
+}
+
+struct firmware_header {
+	uint32_t size;
+	uint32_t crc;
+	uint8_t type;
+} __packed;
+
+struct firmware_chunk {
+	uint32_t offset;
+	uint8_t data[128];
+}
+__packed;
+
+static int nanohub_comms_download(struct nanohub_data *data,
+				  const uint8_t *image, size_t length,
+				  uint8_t type)
+{
+	uint8_t accepted;
+	struct firmware_header header;
+	struct firmware_chunk chunk;
+	int max_chunk_size = sizeof(chunk.data);
+	int chunk_size;
+	uint32_t offset = 0;
+	int ret;
+	uint8_t chunk_reply, upload_reply = 0, last_reply = 0;
+	uint32_t clear_interrupts[8] = { 0x00000008 };
+	uint32_t delay;
+
+	header.type = type;
+	header.size = cpu_to_le32(length);
+	header.crc = cpu_to_le32(~crc32(image, length, ~0));
+
+	if (request_wakeup(data))
+		return -ERESTARTSYS;
+	ret = nanohub_comms_tx_rx_retrans(data, CMD_COMMS_START_KERNEL_UPLOAD,
+					  (const uint8_t *)&header,
+					  sizeof(header), &accepted,
+					  sizeof(accepted), false, 10, 10);
+	release_wakeup(data);
+
+	if (ret == 1 && accepted == 1) {
+		do {
+			if (request_wakeup(data))
+				continue;
+
+			delay = 0;
+			chunk.offset = cpu_to_le32(offset);
+			if (offset + max_chunk_size > length)
+				chunk_size = length - offset;
+			else
+				chunk_size = max_chunk_size;
+			memcpy(chunk.data, image + offset, chunk_size);
+
+			ret =
+			    nanohub_comms_tx_rx_retrans(data,
+							CMD_COMMS_KERNEL_CHUNK,
+							(const uint8_t *)&chunk,
+							sizeof(uint32_t) +
+							chunk_size,
+							&chunk_reply,
+							sizeof(chunk_reply),
+							false, 10, 10);
+
+			pr_debug("nanohub: ret=%d, chunk_reply=%d, offset=%d\n",
+				ret, chunk_reply, offset);
+			if (ret == sizeof(chunk_reply)) {
+				if (chunk_reply == CHUNK_REPLY_ACCEPTED) {
+					offset += chunk_size;
+				} else if (chunk_reply == CHUNK_REPLY_WAIT) {
+					ret = nanohub_wait_for_interrupt(data);
+					if (ret < 0) {
+						release_wakeup(data);
+						continue;
+					}
+					nanohub_comms_tx_rx_retrans(data,
+					    CMD_COMMS_CLR_GET_INTR,
+					    (uint8_t *)clear_interrupts,
+					    sizeof(clear_interrupts),
+					    (uint8_t *)data->interrupts,
+					    sizeof(data->interrupts),
+					    false, 10, 0);
+				} else if (chunk_reply == CHUNK_REPLY_RESEND) {
+					if (last_reply == CHUNK_REPLY_RESEND)
+						delay = RESEND_LONG_DELAY_US;
+					else
+						delay = RESEND_SHORT_DELAY_US;
+				} else if (chunk_reply == CHUNK_REPLY_RESTART)
+					offset = 0;
+				else if (chunk_reply == CHUNK_REPLY_CANCEL ||
+				    chunk_reply ==
+				    CHUNK_REPLY_CANCEL_NO_RETRY) {
+					release_wakeup(data);
+					break;
+				}
+				last_reply = chunk_reply;
+			} else if (ret <= 0) {
+				release_wakeup(data);
+				break;
+			}
+			release_wakeup(data);
+			if (delay > 0)
+				usleep_range(delay, delay * 2);
+		} while (offset < length);
+	}
+
+	do {
+		if (upload_reply == UPLOAD_REPLY_PROCESSING)
+			usleep_range(RESEND_LONG_DELAY_US,
+				     RESEND_LONG_DELAY_US * 2);
+
+		if (request_wakeup(data)) {
+			ret = sizeof(upload_reply);
+			upload_reply = UPLOAD_REPLY_PROCESSING;
+			continue;
+		}
+		ret = nanohub_comms_tx_rx_retrans(data,
+					CMD_COMMS_FINISH_KERNEL_UPLOAD,
+					NULL, 0,
+					&upload_reply, sizeof(upload_reply),
+					false, 10, 10);
+		release_wakeup(data);
+	} while (ret == sizeof(upload_reply) &&
+		 upload_reply == UPLOAD_REPLY_PROCESSING);
+
+	pr_info("nanohub: nanohub_comms_download: ret=%d, upload_reply=%d\n",
+		ret, upload_reply);
+
+	return 0;
+}
+
+int nanohub_comms_kernel_download(struct nanohub_data *data,
+				  const uint8_t *image, size_t length)
+{
+	return nanohub_comms_download(data, image, length,
+				      COMMS_FLASH_KERNEL_ID);
+}
+
+int nanohub_comms_app_download(struct nanohub_data *data, const uint8_t *image,
+			       size_t length)
+{
+	return nanohub_comms_download(data, image, length, COMMS_FLASH_APP_ID);
+}
diff --git a/drivers/staging/nanohub/comms.h b/drivers/staging/nanohub/comms.h
new file mode 100644
index 0000000..b22e55b
--- /dev/null
+++ b/drivers/staging/nanohub/comms.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NANOHUB_COMMS_H
+#define _NANOHUB_COMMS_H
+
+struct __attribute__ ((__packed__)) nanohub_packet {
+	uint8_t sync;
+	uint32_t seq;
+	uint32_t reason;
+	uint8_t len;
+	uint8_t data[];
+};
+
+struct __attribute__ ((__packed__)) nanohub_packet_pad {
+	uint8_t pad[3];
+	struct nanohub_packet
+	 packet;
+};
+
+struct __attribute__ ((__packed__)) nanohub_packet_crc {
+	uint32_t crc;
+};
+
+struct nanohub_data;
+
+struct nanohub_comms {
+	uint32_t seq;
+	int timeout_write;
+	int timeout_ack;
+	int timeout_reply;
+	int (*open)(void *);
+	void (*close)(void *);
+	int (*write)(void *, uint8_t *, int, int);
+	int (*read)(void *, uint8_t *, int, int);
+
+	uint8_t *tx_buffer;
+	uint8_t *rx_buffer;
+};
+
+int nanohub_comms_kernel_download(struct nanohub_data *, const uint8_t *,
+				  size_t);
+int nanohub_comms_app_download(struct nanohub_data *, const uint8_t *, size_t);
+int nanohub_comms_rx_retrans_boottime(struct nanohub_data *, uint32_t,
+				      uint8_t *, size_t, int, int);
+int nanohub_comms_tx_rx_retrans(struct nanohub_data *, uint32_t,
+				const uint8_t *, uint8_t, uint8_t *, size_t,
+				bool, int, int);
+
+#define ERROR_NACK			-1
+#define ERROR_BUSY			-2
+
+#define MAX_UINT8			((1 << (8*sizeof(uint8_t))) - 1)
+
+#define COMMS_SYNC			0x31
+#define COMMS_FLASH_KERNEL_ID		0x1
+#define COMMS_FLASH_EEDATA_ID		0x2
+#define COMMS_FLASH_APP_ID		0x4
+
+#define CMD_COMMS_ACK			0x00000000
+#define CMD_COMMS_NACK			0x00000001
+#define CMD_COMMS_BUSY			0x00000002
+
+#define CMD_COMMS_GET_OS_HW_VERSIONS	0x00001000
+#define CMD_COMMS_GET_APP_VERSIONS	0x00001001
+#define CMD_COMMS_QUERY_APP_INFO	0x00001002
+
+#define CMD_COMMS_START_KERNEL_UPLOAD	0x00001040
+#define CMD_COMMS_KERNEL_CHUNK		0x00001041
+#define CMD_COMMS_FINISH_KERNEL_UPLOAD	0x00001042
+
+#define CMD_COMMS_START_APP_UPLOAD	0x00001050
+#define CMD_COMMS_APP_CHUNK		0x00001051
+
+#define CMD_COMMS_CLR_GET_INTR		0x00001080
+#define CMD_COMMS_MASK_INTR		0x00001081
+#define CMD_COMMS_UNMASK_INTR		0x00001082
+#define CMD_COMMS_READ			0x00001090
+#define CMD_COMMS_WRITE			0x00001091
+
+#define CHUNK_REPLY_ACCEPTED		0
+#define CHUNK_REPLY_WAIT                1
+#define CHUNK_REPLY_RESEND              2
+#define CHUNK_REPLY_RESTART             3
+#define CHUNK_REPLY_CANCEL              4
+#define CHUNK_REPLY_CANCEL_NO_RETRY     5
+
+#define UPLOAD_REPLY_SUCCESS			0
+#define UPLOAD_REPLY_PROCESSING			1
+#define UPLOAD_REPLY_WAITING_FOR_DATA		2
+#define UPLOAD_REPLY_APP_SEC_KEY_NOT_FOUND	3
+#define UPLOAD_REPLY_APP_SEC_HEADER_ERROR	4
+#define UPLOAD_REPLY_APP_SEC_TOO_MUCH_DATA	5
+#define UPLOAD_REPLY_APP_SEC_TOO_LITTLE_DATA	6
+#define UPLOAD_REPLY_APP_SEC_SIG_VERIFY_FAIL	7
+#define UPLOAD_REPLY_APP_SEC_SIG_DECODE_FAIL	8
+#define UPLOAD_REPLY_APP_SEC_SIG_ROOT_UNKNOWN	9
+#define UPLOAD_REPLY_APP_SEC_MEMORY_ERROR	10
+#define UPLOAD_REPLY_APP_SEC_INVALID_DATA	11
+#define UPLOAD_REPLY_APP_SEC_BAD		12
+
+static inline int nanohub_comms_write(struct nanohub_data *data,
+				      const uint8_t *buffer, size_t buffer_len)
+{
+	uint8_t ret;
+	if (nanohub_comms_tx_rx_retrans
+	    (data, CMD_COMMS_WRITE, buffer, buffer_len, &ret, sizeof(ret), true,
+	     10, 10) == sizeof(ret)) {
+		if (ret)
+			return buffer_len;
+		else
+			return 0;
+	} else {
+		return ERROR_NACK;
+	}
+}
+
+#endif
diff --git a/drivers/staging/nanohub/main.c b/drivers/staging/nanohub/main.c
new file mode 100644
index 0000000..7479d18
--- /dev/null
+++ b/drivers/staging/nanohub/main.c
@@ -0,0 +1,1818 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/types.h>
+#include <linux/time.h>
+#include <linux/platform_data/nanohub.h>
+#include <uapi/linux/sched/types.h>
+
+#include "main.h"
+#include "comms.h"
+#include "bl.h"
+#include "spi.h"
+
+#define READ_QUEUE_DEPTH	10
+#define APP_FROM_HOST_EVENTID	0x000000F8
+#define FIRST_SENSOR_EVENTID	0x00000200
+#define LAST_SENSOR_EVENTID	0x000002FF
+#define APP_TO_HOST_EVENTID	0x00000401
+#define OS_LOG_EVENTID		0x3B474F4C
+#define WAKEUP_INTERRUPT	1
+#define WAKEUP_TIMEOUT_MS	1000
+#define SUSPEND_TIMEOUT_MS	100
+#define KTHREAD_ERR_TIME_NS	(60LL * NSEC_PER_SEC)
+#define KTHREAD_ERR_CNT		70
+#define KTHREAD_WARN_CNT	10
+#define WAKEUP_ERR_TIME_NS	(60LL * NSEC_PER_SEC)
+#define WAKEUP_ERR_CNT		4
+#define BL_MAX_SPEED_HZ		1000000
+
+/**
+ * struct gpio_config - this is a binding between platform data and driver data
+ * @label:     for diagnostics
+ * @flags:     to pass to gpio_request_one()
+ * @options:   one or more of GPIO_OPT_* flags, below
+ * @pdata_off: offset of u32 field in platform data with gpio #
+ * @data_off:  offset of int field in driver data with irq # (optional)
+ */
+struct gpio_config {
+	const char *label;
+	u16 flags;
+	u16 options;
+	u16 pdata_off;
+	u16 data_off;
+};
+
+#define GPIO_OPT_HAS_IRQ	0x0001
+#define GPIO_OPT_OPTIONAL	0x8000
+
+#define PLAT_GPIO_DEF(name, _flags) \
+	.pdata_off = offsetof(struct nanohub_platform_data, name ## _gpio), \
+	.label = "nanohub_" #name, \
+	.flags = _flags \
+
+#define PLAT_GPIO_DEF_IRQ(name, _flags, _opts) \
+	PLAT_GPIO_DEF(name, _flags), \
+	.data_off = offsetof(struct nanohub_data, name), \
+	.options = GPIO_OPT_HAS_IRQ | (_opts) \
+
+static int nanohub_open(struct inode *, struct file *);
+static ssize_t nanohub_read(struct file *, char *, size_t, loff_t *);
+static ssize_t nanohub_write(struct file *, const char *, size_t, loff_t *);
+static unsigned int nanohub_poll(struct file *, poll_table *);
+static int nanohub_release(struct inode *, struct file *);
+static int nanohub_hw_reset(struct nanohub_data *data);
+
+static struct class *sensor_class;
+static int major;
+
+static const struct gpio_config gconf[] = {
+	{ PLAT_GPIO_DEF(nreset, GPIOF_OUT_INIT_HIGH) },
+	{ PLAT_GPIO_DEF(wakeup, GPIOF_OUT_INIT_HIGH) },
+	{ PLAT_GPIO_DEF(boot0, GPIOF_OUT_INIT_LOW) },
+	{ PLAT_GPIO_DEF_IRQ(irq1, GPIOF_DIR_IN, 0) },
+	{ PLAT_GPIO_DEF_IRQ(irq2, GPIOF_DIR_IN, GPIO_OPT_OPTIONAL) },
+};
+
+static const struct iio_info nanohub_iio_info = {
+};
+
+static const struct file_operations nanohub_fileops = {
+	.owner = THIS_MODULE,
+	.open = nanohub_open,
+	.read = nanohub_read,
+	.write = nanohub_write,
+	.poll = nanohub_poll,
+	.release = nanohub_release,
+};
+
+enum {
+	ST_RESET,
+	ST_IDLE,
+	ST_ERROR,
+	ST_RUNNING
+};
+
+static inline bool gpio_is_optional(const struct gpio_config *_cfg)
+{
+	return _cfg->options & GPIO_OPT_OPTIONAL;
+}
+
+static inline bool gpio_has_irq(const struct gpio_config *_cfg)
+{
+	return _cfg->options & GPIO_OPT_HAS_IRQ;
+}
+
+static inline bool nanohub_has_priority_lock_locked(struct nanohub_data *data)
+{
+	return  atomic_read(&data->wakeup_lock_cnt) >
+		atomic_read(&data->wakeup_cnt);
+}
+
+static inline void nanohub_notify_thread(struct nanohub_data *data)
+{
+	atomic_set(&data->kthread_run, 1);
+	/* wake_up implementation works as memory barrier */
+	wake_up_interruptible_sync(&data->kthread_wait);
+}
+
+static inline void nanohub_io_init(struct nanohub_io *io,
+				   struct nanohub_data *data,
+				   struct device *dev)
+{
+	init_waitqueue_head(&io->buf_wait);
+	INIT_LIST_HEAD(&io->buf_list);
+	io->data = data;
+	io->dev = dev;
+}
+
+static inline bool nanohub_io_has_buf(struct nanohub_io *io)
+{
+	return !list_empty(&io->buf_list);
+}
+
+static struct nanohub_buf *nanohub_io_get_buf(struct nanohub_io *io,
+					      bool wait)
+{
+	struct nanohub_buf *buf = NULL;
+	int ret;
+
+	spin_lock(&io->buf_wait.lock);
+	if (wait) {
+		ret = wait_event_interruptible_locked(io->buf_wait,
+						      nanohub_io_has_buf(io));
+		if (ret < 0) {
+			spin_unlock(&io->buf_wait.lock);
+			return ERR_PTR(ret);
+		}
+	}
+
+	if (nanohub_io_has_buf(io)) {
+		buf = list_first_entry(&io->buf_list, struct nanohub_buf, list);
+		list_del(&buf->list);
+	}
+	spin_unlock(&io->buf_wait.lock);
+
+	return buf;
+}
+
+static void nanohub_io_put_buf(struct nanohub_io *io,
+			       struct nanohub_buf *buf)
+{
+	bool was_empty;
+
+	spin_lock(&io->buf_wait.lock);
+	was_empty = !nanohub_io_has_buf(io);
+	list_add_tail(&buf->list, &io->buf_list);
+	spin_unlock(&io->buf_wait.lock);
+
+	if (was_empty) {
+		if (&io->data->free_pool == io)
+			nanohub_notify_thread(io->data);
+		else
+			wake_up_interruptible(&io->buf_wait);
+	}
+}
+
+static inline int plat_gpio_get(struct nanohub_data *data,
+				const struct gpio_config *_cfg)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+
+	return *(u32 *)(((char *)pdata) + (_cfg)->pdata_off);
+}
+
+static inline void nanohub_set_irq_data(struct nanohub_data *data,
+					const struct gpio_config *_cfg, int val)
+{
+	int *data_addr = ((int *)(((char *)data) + _cfg->data_off));
+
+	if ((void *)data_addr > (void *)data &&
+	    (void *)data_addr < (void *)(data + 1))
+		*data_addr = val;
+	else
+		WARN(1, "No data binding defined for %s", _cfg->label);
+}
+
+static inline void mcu_wakeup_gpio_set_value(struct nanohub_data *data,
+					     int val)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+
+	gpio_set_value(pdata->wakeup_gpio, val);
+}
+
+static inline void mcu_wakeup_gpio_get_locked(struct nanohub_data *data,
+					      int priority_lock)
+{
+	atomic_inc(&data->wakeup_lock_cnt);
+	if (!priority_lock && atomic_inc_return(&data->wakeup_cnt) == 1 &&
+	    !nanohub_has_priority_lock_locked(data))
+		mcu_wakeup_gpio_set_value(data, 0);
+}
+
+static inline bool mcu_wakeup_gpio_put_locked(struct nanohub_data *data,
+					      int priority_lock)
+{
+	bool gpio_done = priority_lock ?
+			 atomic_read(&data->wakeup_cnt) == 0 :
+			 atomic_dec_and_test(&data->wakeup_cnt);
+	bool done = atomic_dec_and_test(&data->wakeup_lock_cnt);
+
+	if (!nanohub_has_priority_lock_locked(data))
+		mcu_wakeup_gpio_set_value(data, gpio_done ? 1 : 0);
+
+	return done;
+}
+
+static inline bool mcu_wakeup_gpio_is_locked(struct nanohub_data *data)
+{
+	return atomic_read(&data->wakeup_lock_cnt) != 0;
+}
+
+static inline void nanohub_handle_irq1(struct nanohub_data *data)
+{
+	bool locked;
+
+	spin_lock(&data->wakeup_wait.lock);
+	locked = mcu_wakeup_gpio_is_locked(data);
+	spin_unlock(&data->wakeup_wait.lock);
+	if (!locked)
+		nanohub_notify_thread(data);
+	else
+		wake_up_interruptible_sync(&data->wakeup_wait);
+}
+
+static inline void nanohub_handle_irq2(struct nanohub_data *data)
+{
+	nanohub_notify_thread(data);
+}
+
+static inline bool mcu_wakeup_try_lock(struct nanohub_data *data, int key)
+{
+	/* implementation contains memory barrier */
+	return atomic_cmpxchg(&data->wakeup_acquired, 0, key) == 0;
+}
+
+static inline void mcu_wakeup_unlock(struct nanohub_data *data, int key)
+{
+	WARN(atomic_cmpxchg(&data->wakeup_acquired, key, 0) != key,
+	     "%s: failed to unlock with key %d; current state: %d",
+	     __func__, key, atomic_read(&data->wakeup_acquired));
+}
+
+static inline void nanohub_set_state(struct nanohub_data *data, int state)
+{
+	atomic_set(&data->thread_state, state);
+	smp_mb__after_atomic(); /* updated thread state is now visible */
+}
+
+static inline int nanohub_get_state(struct nanohub_data *data)
+{
+	smp_mb__before_atomic(); /* wait for all updates to finish */
+	return atomic_read(&data->thread_state);
+}
+
+static inline void nanohub_clear_err_cnt(struct nanohub_data *data)
+{
+	data->kthread_err_cnt = data->wakeup_err_cnt = 0;
+}
+
+static long do_wait_intr_timeout(wait_queue_head_t *wq,
+			wait_queue_entry_t *wait, long tmo)
+{
+	long ret;
+
+	if (likely(list_empty(&wait->entry)))
+		__add_wait_queue_entry_tail(wq, wait);
+
+	set_current_state(TASK_INTERRUPTIBLE);
+	if (signal_pending(current))
+		return -ERESTARTSYS;
+
+	spin_unlock(&wq->lock);
+	ret = schedule_timeout(tmo);
+	spin_lock(&wq->lock);
+	return ret;
+}
+
+/* the following fragment is based on wait_event_* code from wait.h */
+#define wait_event_interruptible_timeout_locked(wq, cond, tmo)		\
+({									\
+	long __ret = (tmo);						\
+	DEFINE_WAIT(__wait);						\
+	if (!(cond)) {							\
+		do {							\
+			__ret = do_wait_intr_timeout(&(wq), &__wait, __ret); \
+			if (!__ret) {					\
+				if ((cond))				\
+					__ret = 1;			\
+				break;					\
+			} else if (__ret < 0)				\
+				break;					\
+		} while (!(cond));					\
+		__remove_wait_queue(&(wq), &__wait);			\
+		__set_current_state(TASK_RUNNING);			\
+	} else if (__ret == 0) {					\
+		__ret = 1;						\
+	}								\
+	__ret;								\
+})
+
+int request_wakeup_ex(struct nanohub_data *data, long timeout_ms,
+		      int key, int lock_mode)
+{
+	long timeout;
+	bool priority_lock = lock_mode > LOCK_MODE_NORMAL;
+	struct device *sensor_dev = data->io[ID_NANOHUB_SENSOR].dev;
+	int ret;
+	ktime_t ktime_delta;
+	ktime_t wakeup_ktime;
+
+	spin_lock(&data->wakeup_wait.lock);
+	mcu_wakeup_gpio_get_locked(data, priority_lock);
+	timeout = (timeout_ms != MAX_SCHEDULE_TIMEOUT) ?
+		   msecs_to_jiffies(timeout_ms) :
+		   MAX_SCHEDULE_TIMEOUT;
+
+	if (!priority_lock && !data->wakeup_err_cnt)
+		wakeup_ktime = ktime_get_boottime();
+	timeout = wait_event_interruptible_timeout_locked(
+			data->wakeup_wait,
+			((priority_lock || nanohub_irq1_fired(data)) &&
+			 mcu_wakeup_try_lock(data, key)),
+			timeout
+		  );
+
+	if (timeout <= 0) {
+		if (!timeout && !priority_lock) {
+			if (!data->wakeup_err_cnt)
+				data->wakeup_err_ktime = wakeup_ktime;
+			ktime_delta = ktime_sub(ktime_get_boottime(),
+						data->wakeup_err_ktime);
+			data->wakeup_err_cnt++;
+			if (ktime_to_ns(ktime_delta) > WAKEUP_ERR_TIME_NS
+				&& data->wakeup_err_cnt > WAKEUP_ERR_CNT) {
+				mcu_wakeup_gpio_put_locked(data, priority_lock);
+				spin_unlock(&data->wakeup_wait.lock);
+				dev_info(sensor_dev,
+					"wakeup: hard reset due to consistent error\n");
+				ret = nanohub_hw_reset(data);
+				if (ret) {
+					dev_info(sensor_dev,
+						"%s: failed to reset nanohub: ret=%d\n",
+						__func__, ret);
+				}
+				return -ETIME;
+			}
+		}
+		mcu_wakeup_gpio_put_locked(data, priority_lock);
+
+		if (timeout == 0)
+			timeout = -ETIME;
+	} else {
+		data->wakeup_err_cnt = 0;
+		timeout = 0;
+	}
+	spin_unlock(&data->wakeup_wait.lock);
+
+	return timeout;
+}
+
+void release_wakeup_ex(struct nanohub_data *data, int key, int lock_mode)
+{
+	bool done;
+	bool priority_lock = lock_mode > LOCK_MODE_NORMAL;
+
+	spin_lock(&data->wakeup_wait.lock);
+	done = mcu_wakeup_gpio_put_locked(data, priority_lock);
+	mcu_wakeup_unlock(data, key);
+	spin_unlock(&data->wakeup_wait.lock);
+
+	if (!done)
+		wake_up_interruptible_sync(&data->wakeup_wait);
+	else if (nanohub_irq1_fired(data) || nanohub_irq2_fired(data))
+		nanohub_notify_thread(data);
+}
+
+int nanohub_wait_for_interrupt(struct nanohub_data *data)
+{
+	int ret = -EFAULT;
+
+	/* release the wakeup line, and wait for nanohub to send
+	 * us an interrupt indicating the transaction completed.
+	 */
+	spin_lock(&data->wakeup_wait.lock);
+	if (mcu_wakeup_gpio_is_locked(data)) {
+		mcu_wakeup_gpio_set_value(data, 1);
+		ret = wait_event_interruptible_locked(data->wakeup_wait,
+						      nanohub_irq1_fired(data));
+		mcu_wakeup_gpio_set_value(data, 0);
+	}
+	spin_unlock(&data->wakeup_wait.lock);
+
+	return ret;
+}
+
+int nanohub_wakeup_eom(struct nanohub_data *data, bool repeat)
+{
+	int ret = -EFAULT;
+
+	spin_lock(&data->wakeup_wait.lock);
+	if (mcu_wakeup_gpio_is_locked(data)) {
+		mcu_wakeup_gpio_set_value(data, 1);
+		if (repeat)
+			mcu_wakeup_gpio_set_value(data, 0);
+		ret = 0;
+	}
+	spin_unlock(&data->wakeup_wait.lock);
+
+	return ret;
+}
+
+static void __nanohub_interrupt_cfg(struct nanohub_data *data,
+				    u8 interrupt, bool mask)
+{
+	int ret;
+	uint8_t mask_ret;
+	int cnt = 10;
+	struct device *dev = data->io[ID_NANOHUB_SENSOR].dev;
+	int cmd = mask ? CMD_COMMS_MASK_INTR : CMD_COMMS_UNMASK_INTR;
+
+	do {
+		ret = request_wakeup_timeout(data, WAKEUP_TIMEOUT_MS);
+		if (ret) {
+			dev_err(dev,
+				"%s: interrupt %d %smask failed: ret=%d\n",
+				__func__, interrupt, mask ? "" : "un", ret);
+			return;
+		}
+
+		ret =
+		    nanohub_comms_tx_rx_retrans(data, cmd,
+						&interrupt, sizeof(interrupt),
+						&mask_ret, sizeof(mask_ret),
+						false, 10, 0);
+		release_wakeup(data);
+		dev_dbg(dev,
+			"%smasking interrupt %d, ret=%d, mask_ret=%d\n",
+			mask ? "" : "un",
+			interrupt, ret, mask_ret);
+	} while ((ret != 1 || mask_ret != 1) && --cnt > 0);
+}
+
+static inline void nanohub_mask_interrupt(struct nanohub_data *data,
+					  u8 interrupt)
+{
+	__nanohub_interrupt_cfg(data, interrupt, true);
+}
+
+static inline void nanohub_unmask_interrupt(struct nanohub_data *data,
+					    u8 interrupt)
+{
+	__nanohub_interrupt_cfg(data, interrupt, false);
+}
+
+static ssize_t nanohub_wakeup_query(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	const struct nanohub_platform_data *pdata = data->pdata;
+
+	nanohub_clear_err_cnt(data);
+	if (nanohub_irq1_fired(data) || nanohub_irq2_fired(data))
+		wake_up_interruptible(&data->wakeup_wait);
+
+	return scnprintf(buf, PAGE_SIZE, "WAKEUP: %d INT1: %d INT2: %d\n",
+			 gpio_get_value(pdata->wakeup_gpio),
+			 gpio_get_value(pdata->irq1_gpio),
+			 data->irq2 ? gpio_get_value(pdata->irq2_gpio) : -1);
+}
+
+static ssize_t nanohub_app_info(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	struct {
+		uint64_t appId;
+		uint32_t appVer;
+		uint32_t appSize;
+	} __packed buffer;
+	uint32_t i = 0;
+	int ret;
+	ssize_t len = 0;
+
+	do {
+		if (request_wakeup(data))
+			return -ERESTARTSYS;
+
+		if (nanohub_comms_tx_rx_retrans
+		    (data, CMD_COMMS_QUERY_APP_INFO, (uint8_t *)&i,
+		     sizeof(i), (u8 *)&buffer, sizeof(buffer),
+		     false, 10, 10) == sizeof(buffer)) {
+			ret =
+			    scnprintf(buf + len, PAGE_SIZE - len,
+				      "app: %d id: %016llx ver: %08x size: %08x\n",
+				      i, buffer.appId, buffer.appVer,
+				      buffer.appSize);
+			if (ret > 0) {
+				len += ret;
+				i++;
+			}
+		} else {
+			ret = -1;
+		}
+
+		release_wakeup(data);
+	} while (ret > 0);
+
+	return len;
+}
+
+static ssize_t nanohub_firmware_query(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	uint16_t buffer[6];
+
+	if (request_wakeup(data))
+		return -ERESTARTSYS;
+
+	if (nanohub_comms_tx_rx_retrans
+	    (data, CMD_COMMS_GET_OS_HW_VERSIONS, NULL, 0, (uint8_t *)&buffer,
+	     sizeof(buffer), false, 10, 10) == sizeof(buffer)) {
+		release_wakeup(data);
+		return scnprintf(buf, PAGE_SIZE,
+				 "hw type: %04x hw ver: %04x bl ver: %04x os ver: %04x variant ver: %08x\n",
+				 buffer[0], buffer[1], buffer[2], buffer[3],
+				 buffer[5] << 16 | buffer[4]);
+	} else {
+		release_wakeup(data);
+		return 0;
+	}
+}
+
+static inline int nanohub_wakeup_lock(struct nanohub_data *data, int mode)
+{
+	int ret;
+
+	if (data->irq2)
+		disable_irq(data->irq2);
+	else
+		nanohub_mask_interrupt(data, 2);
+
+	ret = request_wakeup_ex(data,
+				mode == LOCK_MODE_SUSPEND_RESUME ?
+				SUSPEND_TIMEOUT_MS : WAKEUP_TIMEOUT_MS,
+				KEY_WAKEUP_LOCK, mode);
+	if (ret < 0) {
+		if (data->irq2)
+			enable_irq(data->irq2);
+		else
+			nanohub_unmask_interrupt(data, 2);
+		return ret;
+	}
+
+	if (mode == LOCK_MODE_IO || mode == LOCK_MODE_IO_BL)
+		ret = nanohub_bl_open(data);
+	if (ret < 0) {
+		release_wakeup_ex(data, KEY_WAKEUP_LOCK, mode);
+		return ret;
+	}
+	if (mode != LOCK_MODE_SUSPEND_RESUME)
+		disable_irq(data->irq1);
+
+	atomic_set(&data->lock_mode, mode);
+	mcu_wakeup_gpio_set_value(data, mode != LOCK_MODE_IO_BL);
+
+	return 0;
+}
+
+/* returns lock mode used to perform this lock */
+static inline int nanohub_wakeup_unlock(struct nanohub_data *data)
+{
+	int mode = atomic_read(&data->lock_mode);
+
+	atomic_set(&data->lock_mode, LOCK_MODE_NONE);
+	if (mode != LOCK_MODE_SUSPEND_RESUME)
+		enable_irq(data->irq1);
+	if (mode == LOCK_MODE_IO || mode == LOCK_MODE_IO_BL)
+		nanohub_bl_close(data);
+	if (data->irq2)
+		enable_irq(data->irq2);
+	release_wakeup_ex(data, KEY_WAKEUP_LOCK, mode);
+	if (!data->irq2)
+		nanohub_unmask_interrupt(data, 2);
+	nanohub_notify_thread(data);
+
+	return mode;
+}
+
+static void __nanohub_hw_reset(struct nanohub_data *data, int boot0)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+
+	gpio_set_value(pdata->nreset_gpio, 0);
+	gpio_set_value(pdata->boot0_gpio, boot0 > 0);
+	usleep_range(30, 40);
+	gpio_set_value(pdata->nreset_gpio, 1);
+	if (boot0 > 0)
+		usleep_range(70000, 75000);
+	else if (!boot0)
+		usleep_range(750000, 800000);
+	nanohub_clear_err_cnt(data);
+}
+
+static int nanohub_hw_reset(struct nanohub_data *data)
+{
+	int ret;
+	ret = nanohub_wakeup_lock(data, LOCK_MODE_RESET);
+
+	if (!ret) {
+		__nanohub_hw_reset(data, 0);
+		nanohub_wakeup_unlock(data);
+	}
+
+	return ret;
+}
+
+static ssize_t nanohub_try_hw_reset(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	int ret;
+
+	ret = nanohub_hw_reset(data);
+
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_erase_shared(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	uint8_t status = CMD_ACK;
+	int ret;
+
+	ret = nanohub_wakeup_lock(data, LOCK_MODE_IO);
+	if (ret < 0)
+		return ret;
+
+	__nanohub_hw_reset(data, 1);
+
+	status = nanohub_bl_erase_shared(data);
+	dev_info(dev, "nanohub_bl_erase_shared: status=%02x\n",
+		 status);
+
+	__nanohub_hw_reset(data, 0);
+	nanohub_wakeup_unlock(data);
+
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_erase_shared_bl(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	uint8_t status = CMD_ACK;
+	int ret;
+
+	ret = nanohub_wakeup_lock(data, LOCK_MODE_IO_BL);
+	if (ret < 0)
+		return ret;
+
+	__nanohub_hw_reset(data, -1);
+
+	status = nanohub_bl_erase_shared_bl(data);
+	dev_info(dev, "%s: status=%02x\n", __func__, status);
+
+	__nanohub_hw_reset(data, 0);
+	nanohub_wakeup_unlock(data);
+
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_download_bl(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	const struct nanohub_platform_data *pdata = data->pdata;
+	const struct firmware *fw_entry;
+	int ret;
+	uint8_t status = CMD_ACK;
+
+	ret = nanohub_wakeup_lock(data, LOCK_MODE_IO);
+	if (ret < 0)
+		return ret;
+
+	__nanohub_hw_reset(data, 1);
+
+	ret = request_firmware(&fw_entry, "nanohub.full.bin", dev);
+	if (ret) {
+		dev_err(dev, "%s: err=%d\n", __func__, ret);
+	} else {
+		status = nanohub_bl_download(data, pdata->bl_addr,
+					     fw_entry->data, fw_entry->size);
+		dev_info(dev, "%s: status=%02x\n", __func__, status);
+		release_firmware(fw_entry);
+	}
+
+	__nanohub_hw_reset(data, 0);
+	nanohub_wakeup_unlock(data);
+
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_download_kernel(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	const struct firmware *fw_entry;
+	int ret;
+
+	ret = request_firmware(&fw_entry, "nanohub.update.bin", dev);
+	if (ret) {
+		dev_err(dev, "nanohub_download_kernel: err=%d\n", ret);
+		return -EIO;
+	} else {
+		ret =
+		    nanohub_comms_kernel_download(data, fw_entry->data,
+						  fw_entry->size);
+
+		release_firmware(fw_entry);
+
+		return count;
+	}
+
+}
+
+static ssize_t nanohub_download_kernel_bl(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t count)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	const struct firmware *fw_entry;
+	int ret;
+	uint8_t status = CMD_ACK;
+
+	ret = request_firmware(&fw_entry, "nanohub.kernel.signed", dev);
+	if (ret) {
+		dev_err(dev, "%s: err=%d\n", __func__, ret);
+	} else {
+		ret = nanohub_wakeup_lock(data, LOCK_MODE_IO_BL);
+		if (ret < 0)
+			return ret;
+
+		__nanohub_hw_reset(data, -1);
+
+		status = nanohub_bl_erase_shared_bl(data);
+		dev_info(dev, "%s: (erase) status=%02x\n", __func__, status);
+		if (status == CMD_ACK) {
+			status = nanohub_bl_write_memory(data, 0x50000000,
+							 fw_entry->size,
+							 fw_entry->data);
+			mcu_wakeup_gpio_set_value(data, 1);
+			dev_info(dev, "%s: (write) status=%02x\n", __func__, status);
+			if (status == CMD_ACK) {
+				status = nanohub_bl_update_finished(data);
+				dev_info(dev, "%s: (finish) status=%02x\n", __func__, status);
+			}
+		} else {
+			mcu_wakeup_gpio_set_value(data, 1);
+		}
+
+		__nanohub_hw_reset(data, 0);
+		nanohub_wakeup_unlock(data);
+
+		release_firmware(fw_entry);
+	}
+
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_download_app(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	const struct firmware *fw_entry;
+	char buffer[70];
+	int i, ret, ret1, ret2, file_len = 0, appid_len = 0, ver_len = 0;
+	const char *appid = NULL, *ver = NULL;
+	unsigned long version;
+	uint64_t id;
+	uint32_t cur_version;
+	bool update = true;
+
+	for (i = 0; i < count; i++) {
+		if (buf[i] == ' ') {
+			if (i + 1 == count) {
+				break;
+			} else {
+				if (appid == NULL)
+					appid = buf + i + 1;
+				else if (ver == NULL)
+					ver = buf + i + 1;
+				else
+					break;
+			}
+		} else if (buf[i] == '\n' || buf[i] == '\r') {
+			break;
+		} else {
+			if (ver)
+				ver_len++;
+			else if (appid)
+				appid_len++;
+			else
+				file_len++;
+		}
+	}
+
+	if (file_len > 64 || appid_len > 16 || ver_len > 8 || file_len < 1)
+		return -EIO;
+
+	memcpy(buffer, buf, file_len);
+	memcpy(buffer + file_len, ".napp", 5);
+	buffer[file_len + 5] = '\0';
+
+	ret = request_firmware(&fw_entry, buffer, dev);
+	if (ret) {
+		dev_err(dev, "nanohub_download_app(%s): err=%d\n",
+			buffer, ret);
+		return -EIO;
+	}
+	if (appid_len > 0 && ver_len > 0) {
+		memcpy(buffer, appid, appid_len);
+		buffer[appid_len] = '\0';
+
+		ret1 = kstrtoull(buffer, 16, &id);
+
+		memcpy(buffer, ver, ver_len);
+		buffer[ver_len] = '\0';
+
+		ret2 = kstrtoul(buffer, 16, &version);
+
+		if (ret1 == 0 && ret2 == 0) {
+			if (request_wakeup(data))
+				return -ERESTARTSYS;
+			if (nanohub_comms_tx_rx_retrans
+			    (data, CMD_COMMS_GET_APP_VERSIONS,
+			     (uint8_t *)&id, sizeof(id),
+			     (uint8_t *)&cur_version,
+			     sizeof(cur_version), false, 10,
+			     10) == sizeof(cur_version)) {
+				if (cur_version == version)
+					update = false;
+			}
+			release_wakeup(data);
+		}
+	}
+
+	if (update)
+		ret =
+		    nanohub_comms_app_download(data, fw_entry->data,
+					       fw_entry->size);
+
+	release_firmware(fw_entry);
+
+	return count;
+}
+
+static ssize_t nanohub_lock_bl(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	int ret;
+	uint8_t status = CMD_ACK;
+
+	ret = nanohub_wakeup_lock(data, LOCK_MODE_IO);
+	if (ret < 0)
+		return ret;
+
+	__nanohub_hw_reset(data, 1);
+
+	gpio_set_value(data->pdata->boot0_gpio, 0);
+	/* this command reboots itself */
+	status = nanohub_bl_lock(data);
+	dev_info(dev, "%s: status=%02x\n", __func__, status);
+	msleep(350);
+
+	nanohub_wakeup_unlock(data);
+
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t nanohub_unlock_bl(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct nanohub_data *data = dev_get_nanohub_data(dev);
+	int ret;
+	uint8_t status = CMD_ACK;
+
+	ret = nanohub_wakeup_lock(data, LOCK_MODE_IO);
+	if (ret < 0)
+		return ret;
+
+	__nanohub_hw_reset(data, 1);
+
+	gpio_set_value(data->pdata->boot0_gpio, 0);
+	/* this command reboots itself (erasing the flash) */
+	status = nanohub_bl_unlock(data);
+	dev_info(dev, "%s: status=%02x\n", __func__, status);
+	msleep(20);
+
+	nanohub_wakeup_unlock(data);
+
+	return ret < 0 ? ret : count;
+}
+
+static struct device_attribute attributes[] = {
+	__ATTR(wakeup, 0440, nanohub_wakeup_query, NULL),
+	__ATTR(app_info, 0440, nanohub_app_info, NULL),
+	__ATTR(firmware_version, 0440, nanohub_firmware_query, NULL),
+	__ATTR(download_bl, 0220, NULL, nanohub_download_bl),
+	__ATTR(download_kernel, 0220, NULL, nanohub_download_kernel),
+	__ATTR(download_kernel_bl, 0220, NULL, nanohub_download_kernel_bl),
+	__ATTR(download_app, 0220, NULL, nanohub_download_app),
+	__ATTR(erase_shared, 0220, NULL, nanohub_erase_shared),
+	__ATTR(erase_shared_bl, 0220, NULL, nanohub_erase_shared_bl),
+	__ATTR(reset, 0220, NULL, nanohub_try_hw_reset),
+	__ATTR(lock, 0220, NULL, nanohub_lock_bl),
+	__ATTR(unlock, 0220, NULL, nanohub_unlock_bl),
+};
+
+static inline int nanohub_create_sensor(struct nanohub_data *data)
+{
+	int i, ret;
+	struct device *sensor_dev = data->io[ID_NANOHUB_SENSOR].dev;
+
+	for (i = 0, ret = 0; i < ARRAY_SIZE(attributes); i++) {
+		ret = device_create_file(sensor_dev, &attributes[i]);
+		if (ret) {
+			dev_err(sensor_dev,
+				"create sysfs attr %d [%s] failed; err=%d\n",
+				i, attributes[i].attr.name, ret);
+			goto fail_attr;
+		}
+	}
+
+	ret = sysfs_create_link(&sensor_dev->kobj,
+				&data->iio_dev->dev.kobj, "iio");
+	if (ret) {
+		dev_err(sensor_dev,
+			"sysfs_create_link failed; err=%d\n", ret);
+		goto fail_attr;
+	}
+	goto done;
+
+fail_attr:
+	for (i--; i >= 0; i--)
+		device_remove_file(sensor_dev, &attributes[i]);
+done:
+	return ret;
+}
+
+static int nanohub_create_devices(struct nanohub_data *data)
+{
+	int i, ret;
+	static const char *names[ID_NANOHUB_MAX] = {
+			"nanohub", "nanohub_comms"
+	};
+
+	for (i = 0; i < ID_NANOHUB_MAX; ++i) {
+		struct nanohub_io *io = &data->io[i];
+
+		nanohub_io_init(io, data, device_create(sensor_class, NULL,
+							MKDEV(major, i),
+							io, names[i]));
+		if (IS_ERR(io->dev)) {
+			ret = PTR_ERR(io->dev);
+			pr_err("nanohub: device_create failed for %s; err=%d\n",
+			       names[i], ret);
+			goto fail_dev;
+		}
+	}
+
+	ret = nanohub_create_sensor(data);
+	if (!ret)
+		goto done;
+
+fail_dev:
+	for (--i; i >= 0; --i)
+		device_destroy(sensor_class, MKDEV(major, i));
+done:
+	return ret;
+}
+
+static int nanohub_match_devt(struct device *dev, const void *data)
+{
+	const dev_t *devt = data;
+
+	return dev->devt == *devt;
+}
+
+static int nanohub_open(struct inode *inode, struct file *file)
+{
+	dev_t devt = inode->i_rdev;
+	struct device *dev;
+
+	dev = class_find_device(sensor_class, NULL, &devt, nanohub_match_devt);
+	if (dev) {
+		file->private_data = dev_get_drvdata(dev);
+		nonseekable_open(inode, file);
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static ssize_t nanohub_read(struct file *file, char *buffer, size_t length,
+			    loff_t *offset)
+{
+	struct nanohub_io *io = file->private_data;
+	struct nanohub_data *data = io->data;
+	struct nanohub_buf *buf;
+	int ret;
+
+	if (!nanohub_io_has_buf(io) && (file->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	buf = nanohub_io_get_buf(io, true);
+	if (IS_ERR_OR_NULL(buf))
+		return PTR_ERR(buf);
+
+	ret = copy_to_user(buffer, buf->buffer, buf->length);
+	if (ret != 0)
+		ret = -EFAULT;
+	else
+		ret = buf->length;
+
+	nanohub_io_put_buf(&data->free_pool, buf);
+
+	return ret;
+}
+
+static ssize_t nanohub_write(struct file *file, const char *buffer,
+			     size_t length, loff_t *offset)
+{
+	struct nanohub_io *io = file->private_data;
+	struct nanohub_data *data = io->data;
+	int ret;
+
+	ret = request_wakeup_timeout(data, WAKEUP_TIMEOUT_MS);
+	if (ret)
+		return ret;
+
+	ret = nanohub_comms_write(data, buffer, length);
+
+	release_wakeup(data);
+
+	return ret;
+}
+
+static unsigned int nanohub_poll(struct file *file, poll_table *wait)
+{
+	struct nanohub_io *io = file->private_data;
+	unsigned int mask = POLLOUT | POLLWRNORM;
+
+	poll_wait(file, &io->buf_wait, wait);
+
+	if (nanohub_io_has_buf(io))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
+}
+
+static int nanohub_release(struct inode *inode, struct file *file)
+{
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static void nanohub_destroy_devices(struct nanohub_data *data)
+{
+	int i;
+	struct device *sensor_dev = data->io[ID_NANOHUB_SENSOR].dev;
+
+	sysfs_remove_link(&sensor_dev->kobj, "iio");
+	for (i = 0; i < ARRAY_SIZE(attributes); i++)
+		device_remove_file(sensor_dev, &attributes[i]);
+	for (i = 0; i < ID_NANOHUB_MAX; ++i)
+		device_destroy(sensor_class, MKDEV(major, i));
+}
+
+static irqreturn_t nanohub_irq1(int irq, void *dev_id)
+{
+	struct nanohub_data *data = (struct nanohub_data *)dev_id;
+
+	nanohub_handle_irq1(data);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t nanohub_irq2(int irq, void *dev_id)
+{
+	struct nanohub_data *data = (struct nanohub_data *)dev_id;
+
+	nanohub_handle_irq2(data);
+
+	return IRQ_HANDLED;
+}
+
+static bool nanohub_os_log(char *buffer, int len)
+{
+	if (le32_to_cpu((((uint32_t *)buffer)[0]) & 0x7FFFFFFF) ==
+	    OS_LOG_EVENTID) {
+		char *mtype, *mdata = &buffer[5];
+
+		buffer[len] = 0x00;
+
+		switch (buffer[4]) {
+		case 'E':
+			mtype = KERN_ERR;
+			break;
+		case 'W':
+			mtype = KERN_WARNING;
+			break;
+		case 'I':
+			mtype = KERN_INFO;
+			break;
+		case 'D':
+			mtype = KERN_DEBUG;
+			break;
+		default:
+			mtype = KERN_DEFAULT;
+			mdata--;
+			break;
+		}
+		printk("%snanohub: %s", mtype, mdata);
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static void nanohub_process_buffer(struct nanohub_data *data,
+				   struct nanohub_buf **buf,
+				   int ret)
+{
+	uint32_t event_id;
+	uint8_t interrupt;
+	bool wakeup = false;
+	struct nanohub_io *io = &data->io[ID_NANOHUB_SENSOR];
+
+	data->kthread_err_cnt = 0;
+	if (ret < 4 || nanohub_os_log((*buf)->buffer, ret)) {
+		release_wakeup(data);
+		return;
+	}
+
+	(*buf)->length = ret;
+
+	event_id = le32_to_cpu((((uint32_t *)(*buf)->buffer)[0]) & 0x7FFFFFFF);
+	if (ret >= sizeof(uint32_t) + sizeof(uint64_t) + sizeof(uint32_t) &&
+	    event_id > FIRST_SENSOR_EVENTID &&
+	    event_id <= LAST_SENSOR_EVENTID) {
+		interrupt = (*buf)->buffer[sizeof(uint32_t) +
+					   sizeof(uint64_t) + 3];
+		if (interrupt == WAKEUP_INTERRUPT)
+			wakeup = true;
+	}
+	if (event_id == APP_TO_HOST_EVENTID) {
+		wakeup = true;
+		io = &data->io[ID_NANOHUB_COMMS];
+	}
+
+	nanohub_io_put_buf(io, *buf);
+
+	*buf = NULL;
+	/* (for wakeup interrupts): hold a wake lock for 250ms so the sensor hal
+	 * has time to grab its own wake lock */
+	if (wakeup)
+		__pm_wakeup_event(&data->wakesrc_read, 250);
+	release_wakeup(data);
+}
+
+static int nanohub_kthread(void *arg)
+{
+	struct nanohub_data *data = (struct nanohub_data *)arg;
+	struct nanohub_buf *buf = NULL;
+	int ret;
+	ktime_t ktime_delta;
+	uint32_t clear_interrupts[8] = { 0x00000006 };
+	struct device *sensor_dev = data->io[ID_NANOHUB_SENSOR].dev;
+	static const struct sched_param param = {
+		.sched_priority = (MAX_RT_PRIO/2)-1,
+	};
+
+	data->kthread_err_cnt = 0;
+	sched_setscheduler(current, SCHED_FIFO, &param);
+	nanohub_set_state(data, ST_RESET);
+
+	while (!kthread_should_stop()) {
+		switch (nanohub_get_state(data)) {
+		case ST_RESET:
+			nanohub_reset(data);
+			nanohub_wakeup_unlock(data);
+			nanohub_set_state(data, ST_IDLE);
+			break;
+		case ST_IDLE:
+			wait_event_interruptible(data->kthread_wait,
+						 atomic_read(&data->kthread_run)
+						 );
+			nanohub_set_state(data, ST_RUNNING);
+			break;
+		case ST_ERROR:
+			ktime_delta = ktime_sub(ktime_get_boottime(),
+						data->kthread_err_ktime);
+			if (ktime_to_ns(ktime_delta) > KTHREAD_ERR_TIME_NS
+				&& data->kthread_err_cnt > KTHREAD_ERR_CNT) {
+				dev_info(sensor_dev,
+					"kthread: hard reset due to consistent error\n");
+				ret = nanohub_hw_reset(data);
+				if (ret) {
+					dev_info(sensor_dev,
+						"%s: failed to reset nanohub: ret=%d\n",
+						__func__, ret);
+				}
+			}
+			msleep_interruptible(WAKEUP_TIMEOUT_MS);
+			nanohub_set_state(data, ST_RUNNING);
+			break;
+		case ST_RUNNING:
+			break;
+		}
+		atomic_set(&data->kthread_run, 0);
+		if (!buf)
+			buf = nanohub_io_get_buf(&data->free_pool,
+						 false);
+		if (buf) {
+			ret = request_wakeup_timeout(data, WAKEUP_TIMEOUT_MS);
+			if (ret) {
+				dev_info(sensor_dev,
+					 "%s: request_wakeup_timeout: ret=%d\n",
+					 __func__, ret);
+				continue;
+			}
+
+			ret = nanohub_comms_rx_retrans_boottime(
+			    data, CMD_COMMS_READ, buf->buffer,
+			    sizeof(buf->buffer), 10, 0);
+
+			if (ret > 0) {
+				nanohub_process_buffer(data, &buf, ret);
+				if (!nanohub_irq1_fired(data) &&
+				    !nanohub_irq2_fired(data)) {
+					nanohub_set_state(data, ST_IDLE);
+					continue;
+				}
+			} else if (ret == 0) {
+				/* queue empty, go to sleep */
+				data->kthread_err_cnt = 0;
+				data->interrupts[0] &= ~0x00000006;
+				release_wakeup(data);
+				nanohub_set_state(data, ST_IDLE);
+				continue;
+			} else {
+				release_wakeup(data);
+				if (data->kthread_err_cnt == 0)
+					data->kthread_err_ktime =
+						ktime_get_boottime();
+
+				data->kthread_err_cnt++;
+				if (data->kthread_err_cnt >= KTHREAD_WARN_CNT) {
+					dev_err(sensor_dev,
+						"%s: kthread_err_cnt=%d\n",
+						__func__,
+						data->kthread_err_cnt);
+					nanohub_set_state(data, ST_ERROR);
+					continue;
+				}
+			}
+		} else {
+			if (!nanohub_irq1_fired(data) &&
+			    !nanohub_irq2_fired(data)) {
+				nanohub_set_state(data, ST_IDLE);
+				continue;
+			}
+			/* pending interrupt, but no room to read data -
+			 * clear interrupts */
+			if (request_wakeup(data))
+				continue;
+			nanohub_comms_tx_rx_retrans(data,
+						    CMD_COMMS_CLR_GET_INTR,
+						    (uint8_t *)
+						    clear_interrupts,
+						    sizeof(clear_interrupts),
+						    (uint8_t *) data->
+						    interrupts,
+						    sizeof(data->interrupts),
+						    false, 10, 0);
+			release_wakeup(data);
+			nanohub_set_state(data, ST_IDLE);
+		}
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static struct nanohub_platform_data *nanohub_parse_dt(struct device *dev)
+{
+	struct nanohub_platform_data *pdata;
+	struct device_node *dt = dev->of_node;
+	const uint32_t *tmp;
+	struct property *prop;
+	uint32_t u, i;
+	int ret;
+
+	if (!dt)
+		return ERR_PTR(-ENODEV);
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	if (of_property_read_bool(dt, "spi-cpha"))
+		pdata->spi_mode |= SPI_CPHA;
+	if (of_property_read_bool(dt, "spi-cpol"))
+		pdata->spi_mode |= SPI_CPOL;
+
+	ret = pdata->irq1_gpio =
+	    of_get_named_gpio(dt, "sensorhub,irq1-gpio", 0);
+	if (ret < 0) {
+		pr_err("nanohub: missing sensorhub,irq1-gpio in device tree\n");
+		goto free_pdata;
+	}
+
+	/* optional (strongly recommended) */
+	pdata->irq2_gpio = of_get_named_gpio(dt, "sensorhub,irq2-gpio", 0);
+
+	ret = pdata->wakeup_gpio =
+	    of_get_named_gpio(dt, "sensorhub,wakeup-gpio", 0);
+	if (ret < 0) {
+		pr_err
+		    ("nanohub: missing sensorhub,wakeup-gpio in device tree\n");
+		goto free_pdata;
+	}
+
+	ret = pdata->nreset_gpio =
+	    of_get_named_gpio(dt, "sensorhub,nreset-gpio", 0);
+	if (ret < 0) {
+		pr_err
+		    ("nanohub: missing sensorhub,nreset-gpio in device tree\n");
+		goto free_pdata;
+	}
+
+	/* optional (stm32f bootloader) */
+	pdata->boot0_gpio = of_get_named_gpio(dt, "sensorhub,boot0-gpio", 0);
+
+	/* optional (spi) */
+	pdata->spi_cs_gpio = of_get_named_gpio(dt, "sensorhub,spi-cs-gpio", 0);
+
+	/* optional (bl-max-frequency) */
+	pdata->bl_max_speed_hz = BL_MAX_SPEED_HZ;
+	ret = of_property_read_u32(dt, "sensorhub,bl-max-frequency", &u);
+	if (!ret)
+		pdata->bl_max_speed_hz = u;
+
+	/* optional (stm32f bootloader) */
+	of_property_read_u32(dt, "sensorhub,bl-addr", &pdata->bl_addr);
+
+	/* optional (stm32f bootloader) */
+	tmp = of_get_property(dt, "sensorhub,num-flash-banks", NULL);
+	if (tmp) {
+		pdata->num_flash_banks = be32_to_cpup(tmp);
+		pdata->flash_banks =
+		    devm_kzalloc(dev,
+				 sizeof(struct nanohub_flash_bank) *
+				 pdata->num_flash_banks, GFP_KERNEL);
+		if (!pdata->flash_banks)
+			goto no_mem;
+
+		/* TODO: investigate replacing with of_property_read_u32_array
+		 */
+		i = 0;
+		of_property_for_each_u32(dt, "sensorhub,flash-banks", prop, tmp,
+					 u) {
+			if (i / 3 >= pdata->num_flash_banks)
+				break;
+			switch (i % 3) {
+			case 0:
+				pdata->flash_banks[i / 3].bank = u;
+				break;
+			case 1:
+				pdata->flash_banks[i / 3].address = u;
+				break;
+			case 2:
+				pdata->flash_banks[i / 3].length = u;
+				break;
+			}
+			i++;
+		}
+	}
+
+	/* optional (stm32f bootloader) */
+	tmp = of_get_property(dt, "sensorhub,num-shared-flash-banks", NULL);
+	if (tmp) {
+		pdata->num_shared_flash_banks = be32_to_cpup(tmp);
+		pdata->shared_flash_banks =
+		    devm_kzalloc(dev,
+				 sizeof(struct nanohub_flash_bank) *
+				 pdata->num_shared_flash_banks, GFP_KERNEL);
+		if (!pdata->shared_flash_banks)
+			goto no_mem_shared;
+
+		/* TODO: investigate replacing with of_property_read_u32_array
+		 */
+		i = 0;
+		of_property_for_each_u32(dt, "sensorhub,shared-flash-banks",
+					 prop, tmp, u) {
+			if (i / 3 >= pdata->num_shared_flash_banks)
+				break;
+			switch (i % 3) {
+			case 0:
+				pdata->shared_flash_banks[i / 3].bank = u;
+				break;
+			case 1:
+				pdata->shared_flash_banks[i / 3].address = u;
+				break;
+			case 2:
+				pdata->shared_flash_banks[i / 3].length = u;
+				break;
+			}
+			i++;
+		}
+	}
+
+	return pdata;
+
+no_mem_shared:
+	devm_kfree(dev, pdata->flash_banks);
+no_mem:
+	ret = -ENOMEM;
+free_pdata:
+	devm_kfree(dev, pdata);
+	return ERR_PTR(ret);
+}
+#else
+static struct nanohub_platform_data *nanohub_parse_dt(struct device *dev)
+{
+	struct nanohub_platform_data *pdata;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->bl_max_speed_hz = BL_MAX_SPEED_HZ;
+	return pdata;
+}
+#endif
+
+static int nanohub_request_irqs(struct nanohub_data *data)
+{
+	int ret;
+
+	ret = request_threaded_irq(data->irq1, NULL, nanohub_irq1,
+				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+				   "nanohub-irq1", data);
+	if (ret < 0)
+		data->irq1 = 0;
+	else
+		disable_irq(data->irq1);
+	if (data->irq2 <= 0 || ret < 0) {
+		data->irq2 = 0;
+		return ret;
+	}
+
+	ret = request_threaded_irq(data->irq2, NULL, nanohub_irq2,
+				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+				   "nanohub-irq2", data);
+	if (ret < 0) {
+		data->irq2 = 0;
+		WARN(1, "failed to request optional IRQ %d; err=%d",
+		     data->irq2, ret);
+	} else {
+		disable_irq(data->irq2);
+	}
+
+	/* if 2d request fails, hide this; it is optional IRQ,
+	 * and failure should not interrupt driver init sequence.
+	 */
+	return 0;
+}
+
+static int nanohub_request_gpios(struct nanohub_data *data)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(gconf); ++i) {
+		const struct gpio_config *cfg = &gconf[i];
+		unsigned int gpio = plat_gpio_get(data, cfg);
+		const char *label;
+		bool optional = gpio_is_optional(cfg);
+
+		ret = 0; /* clear errors on optional pins, if any */
+
+		if (!gpio_is_valid(gpio) && optional)
+			continue;
+
+		label = cfg->label;
+		ret = gpio_request_one(gpio, cfg->flags, label);
+		if (ret && !optional) {
+			pr_err("nanohub: gpio %d[%s] request failed;err=%d\n",
+			       gpio, label, ret);
+			break;
+		}
+		if (gpio_has_irq(cfg)) {
+			int irq = gpio_to_irq(gpio);
+                        if (irq <= 0) {
+				if (!optional)
+					irq = irq_of_parse_and_map(data->iio_dev->dev.parent->of_node, 0);
+				else
+					continue;
+				if (irq <= 0) {
+					ret = -EINVAL;
+					pr_err("nanohub: no irq; gpio %d[%s];err=%d\n",
+					       gpio, label, irq);
+					break;
+				}
+			}
+			nanohub_set_irq_data(data, cfg, irq);
+		}
+	}
+	if (i < ARRAY_SIZE(gconf)) {
+		for (--i; i >= 0; --i)
+			gpio_free(plat_gpio_get(data, &gconf[i]));
+	}
+
+	return ret;
+}
+
+static void nanohub_release_gpios_irqs(struct nanohub_data *data)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+
+	if (data->irq2)
+		free_irq(data->irq2, data);
+	if (data->irq1)
+		free_irq(data->irq1, data);
+	if (gpio_is_valid(pdata->irq2_gpio))
+		gpio_free(pdata->irq2_gpio);
+	gpio_free(pdata->irq1_gpio);
+	gpio_set_value(pdata->nreset_gpio, 0);
+	gpio_free(pdata->nreset_gpio);
+	mcu_wakeup_gpio_set_value(data, 1);
+	gpio_free(pdata->wakeup_gpio);
+	gpio_set_value(pdata->boot0_gpio, 0);
+	gpio_free(pdata->boot0_gpio);
+}
+
+struct iio_dev *nanohub_probe(struct device *dev, struct iio_dev *iio_dev)
+{
+	int ret, i;
+	const struct nanohub_platform_data *pdata;
+	struct nanohub_data *data;
+	struct nanohub_buf *buf;
+	bool own_iio_dev = !iio_dev;
+
+	pdata = dev_get_platdata(dev);
+	if (!pdata) {
+		pdata = nanohub_parse_dt(dev);
+		if (IS_ERR(pdata))
+			return ERR_PTR(PTR_ERR(pdata));
+	}
+
+	if (own_iio_dev) {
+		iio_dev = iio_device_alloc(dev, sizeof(struct nanohub_data));
+		if (!iio_dev)
+			return ERR_PTR(-ENOMEM);
+	}
+
+	iio_dev->name = "nanohub";
+	iio_dev->dev.parent = dev;
+	iio_dev->info = &nanohub_iio_info;
+	iio_dev->channels = NULL;
+	iio_dev->num_channels = 0;
+
+	data = iio_priv(iio_dev);
+	data->iio_dev = iio_dev;
+	data->pdata = pdata;
+
+	init_waitqueue_head(&data->kthread_wait);
+
+	nanohub_io_init(&data->free_pool, data, dev);
+
+	buf = vmalloc(sizeof(*buf) * READ_QUEUE_DEPTH);
+	data->vbuf = buf;
+	if (!buf) {
+		ret = -ENOMEM;
+		goto fail_vma;
+	}
+
+	for (i = 0; i < READ_QUEUE_DEPTH; i++)
+		nanohub_io_put_buf(&data->free_pool, &buf[i]);
+	atomic_set(&data->kthread_run, 0);
+	wakeup_source_add(&data->wakesrc_read);
+
+	/* hold lock until reset completes */
+	atomic_set(&data->lock_mode, LOCK_MODE_RESET);
+	atomic_set(&data->wakeup_cnt, 0);
+	atomic_set(&data->wakeup_lock_cnt, 1);
+	atomic_set(&data->wakeup_acquired, KEY_WAKEUP_LOCK);
+	init_waitqueue_head(&data->wakeup_wait);
+
+	ret = nanohub_request_gpios(data);
+	if (ret)
+		goto fail_gpio;
+
+	ret = nanohub_request_irqs(data);
+	if (ret)
+		goto fail_irq;
+
+	ret = iio_device_register(iio_dev);
+	if (ret) {
+		pr_err("nanohub: iio_device_register failed\n");
+		goto fail_irq;
+	}
+
+	ret = nanohub_create_devices(data);
+	if (ret)
+		goto fail_dev;
+
+	data->thread = kthread_create(nanohub_kthread, data, "nanohub");
+	if (!IS_ERR(data->thread))
+		return iio_dev;
+
+fail_dev:
+	iio_device_unregister(iio_dev);
+fail_irq:
+	nanohub_release_gpios_irqs(data);
+fail_gpio:
+	wakeup_source_remove(&data->wakesrc_read);
+	vfree(buf);
+fail_vma:
+	if (own_iio_dev)
+		iio_device_free(iio_dev);
+
+	return ERR_PTR(ret);
+}
+
+void nanohub_start(struct nanohub_data *data)
+{
+	wake_up_process(data->thread);
+}
+
+int nanohub_reset(struct nanohub_data *data)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+
+	gpio_set_value(pdata->nreset_gpio, 1);
+	usleep_range(650000, 700000);
+	return 0;
+}
+
+int nanohub_remove(struct iio_dev *iio_dev)
+{
+	struct nanohub_data *data = iio_priv(iio_dev);
+
+	nanohub_notify_thread(data);
+	kthread_stop(data->thread);
+
+	nanohub_destroy_devices(data);
+	iio_device_unregister(iio_dev);
+	nanohub_release_gpios_irqs(data);
+	wakeup_source_remove(&data->wakesrc_read);
+	vfree(data->vbuf);
+	iio_device_free(iio_dev);
+
+	return 0;
+}
+
+int nanohub_suspend(struct iio_dev *iio_dev)
+{
+	struct nanohub_data *data = iio_priv(iio_dev);
+	int ret;
+
+	ret = nanohub_wakeup_lock(data, LOCK_MODE_SUSPEND_RESUME);
+	if (!ret) {
+		int cnt;
+		const int max_cnt = 10;
+
+		for (cnt = 0; cnt < max_cnt; ++cnt) {
+			if (!nanohub_irq1_fired(data))
+				break;
+			usleep_range(10, 15);
+		}
+		if (cnt < max_cnt) {
+			dev_dbg(&iio_dev->dev, "%s: cnt=%d\n", __func__, cnt);
+			enable_irq_wake(data->irq1);
+			return 0;
+		}
+		ret = -EBUSY;
+		dev_info(&iio_dev->dev,
+			 "%s: failed to suspend: IRQ1=%d, state=%d\n",
+			 __func__, nanohub_irq1_fired(data),
+			 nanohub_get_state(data));
+		nanohub_wakeup_unlock(data);
+	} else {
+		dev_info(&iio_dev->dev, "%s: could not take wakeup lock\n",
+			 __func__);
+	}
+
+	return ret;
+}
+
+int nanohub_resume(struct iio_dev *iio_dev)
+{
+	struct nanohub_data *data = iio_priv(iio_dev);
+
+	disable_irq_wake(data->irq1);
+	nanohub_wakeup_unlock(data);
+
+	return 0;
+}
+
+static int __init nanohub_init(void)
+{
+	int ret = 0;
+
+	sensor_class = class_create(THIS_MODULE, "nanohub");
+	if (IS_ERR(sensor_class)) {
+		ret = PTR_ERR(sensor_class);
+		pr_err("nanohub: class_create failed; err=%d\n", ret);
+	}
+	if (!ret)
+		major = __register_chrdev(0, 0, ID_NANOHUB_MAX, "nanohub",
+					  &nanohub_fileops);
+
+	if (major < 0) {
+		ret = major;
+		major = 0;
+		pr_err("nanohub: can't register; err=%d\n", ret);
+	}
+
+#ifdef CONFIG_NANOHUB_SPI
+	if (ret == 0)
+		ret = nanohub_spi_init();
+#endif
+	pr_info("nanohub: loaded; ret=%d\n", ret);
+	return ret;
+}
+
+static void __exit nanohub_cleanup(void)
+{
+#ifdef CONFIG_NANOHUB_SPI
+	nanohub_spi_cleanup();
+#endif
+	__unregister_chrdev(major, 0, ID_NANOHUB_MAX, "nanohub");
+	class_destroy(sensor_class);
+	major = 0;
+	sensor_class = 0;
+}
+
+module_init(nanohub_init);
+module_exit(nanohub_cleanup);
+
+MODULE_AUTHOR("Ben Fennema");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/nanohub/main.h b/drivers/staging/nanohub/main.h
new file mode 100644
index 0000000..bbf638b
--- /dev/null
+++ b/drivers/staging/nanohub/main.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NANOHUB_MAIN_H
+#define _NANOHUB_MAIN_H
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+
+#include "comms.h"
+#include "bl.h"
+
+#define NANOHUB_NAME "nanohub"
+
+struct nanohub_buf {
+	struct list_head list;
+	uint8_t buffer[255];
+	uint8_t length;
+};
+
+struct nanohub_data;
+
+struct nanohub_io {
+	struct device *dev;
+	struct nanohub_data *data;
+	wait_queue_head_t buf_wait;
+	struct list_head buf_list;
+};
+
+static inline struct nanohub_data *dev_get_nanohub_data(struct device *dev)
+{
+	struct nanohub_io *io = dev_get_drvdata(dev);
+
+	return io->data;
+}
+
+struct nanohub_data {
+	/* indices for io[] array */
+	#define ID_NANOHUB_SENSOR 0
+	#define ID_NANOHUB_COMMS 1
+	#define ID_NANOHUB_MAX 2
+
+	struct iio_dev *iio_dev;
+	struct nanohub_io io[ID_NANOHUB_MAX];
+
+	struct nanohub_comms comms;
+	struct nanohub_bl bl;
+	const struct nanohub_platform_data *pdata;
+	int irq1;
+	int irq2;
+	uint32_t max_speed_hz;
+
+	atomic_t kthread_run;
+	atomic_t thread_state;
+	wait_queue_head_t kthread_wait;
+
+	struct wakeup_source wakesrc_read;
+
+	struct nanohub_io free_pool;
+
+	atomic_t lock_mode;
+	/* these 3 vars should be accessed only with wakeup_wait.lock held */
+	atomic_t wakeup_cnt;
+	atomic_t wakeup_lock_cnt;
+	atomic_t wakeup_acquired;
+	wait_queue_head_t wakeup_wait;
+
+	uint32_t interrupts[8];
+
+	ktime_t wakeup_err_ktime;
+	int wakeup_err_cnt;
+
+	ktime_t kthread_err_ktime;
+	int kthread_err_cnt;
+
+	void *vbuf;
+	struct task_struct *thread;
+};
+
+enum {
+	KEY_WAKEUP_NONE,
+	KEY_WAKEUP,
+	KEY_WAKEUP_LOCK,
+};
+
+enum {
+	LOCK_MODE_NONE,
+	LOCK_MODE_NORMAL,
+	LOCK_MODE_IO,
+	LOCK_MODE_IO_BL,
+	LOCK_MODE_RESET,
+	LOCK_MODE_SUSPEND_RESUME,
+};
+
+int request_wakeup_ex(struct nanohub_data *data, long timeout,
+		      int key, int lock_mode);
+void release_wakeup_ex(struct nanohub_data *data, int key, int lock_mode);
+int nanohub_wait_for_interrupt(struct nanohub_data *data);
+int nanohub_wakeup_eom(struct nanohub_data *data, bool repeat);
+struct iio_dev *nanohub_probe(struct device *dev, struct iio_dev *iio_dev);
+void nanohub_start(struct nanohub_data *data);
+int nanohub_reset(struct nanohub_data *data);
+int nanohub_remove(struct iio_dev *iio_dev);
+int nanohub_suspend(struct iio_dev *iio_dev);
+int nanohub_resume(struct iio_dev *iio_dev);
+
+static inline int nanohub_irq1_fired(struct nanohub_data *data)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+
+	return !gpio_get_value(pdata->irq1_gpio);
+}
+
+static inline int nanohub_irq2_fired(struct nanohub_data *data)
+{
+	const struct nanohub_platform_data *pdata = data->pdata;
+
+	return data->irq2 && !gpio_get_value(pdata->irq2_gpio);
+}
+
+static inline int request_wakeup_timeout(struct nanohub_data *data, int timeout)
+{
+	return request_wakeup_ex(data, timeout, KEY_WAKEUP, LOCK_MODE_NORMAL);
+}
+
+static inline int request_wakeup(struct nanohub_data *data)
+{
+	return request_wakeup_ex(data, MAX_SCHEDULE_TIMEOUT, KEY_WAKEUP,
+				 LOCK_MODE_NORMAL);
+}
+
+static inline void release_wakeup(struct nanohub_data *data)
+{
+	release_wakeup_ex(data, KEY_WAKEUP, LOCK_MODE_NORMAL);
+}
+
+#endif
diff --git a/drivers/staging/nanohub/spi.c b/drivers/staging/nanohub/spi.c
new file mode 100644
index 0000000..b5a9904
--- /dev/null
+++ b/drivers/staging/nanohub/spi.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+
+#include "main.h"
+#include "bl.h"
+#include "comms.h"
+
+#define SPI_TIMEOUT		65535
+#define SPI_MIN_DMA		48
+#define SPI_MAX_SPEED_HZ	10000000
+
+struct nanohub_spi_data {
+	struct nanohub_data data;
+	struct spi_device *device;
+	struct semaphore spi_sem;
+	int cs;
+	uint16_t rx_length;
+	uint16_t rx_offset;
+};
+
+static uint8_t bl_checksum(const uint8_t *bytes, int length)
+{
+	int i;
+	uint8_t csum;
+
+	if (length == 1) {
+		csum = ~bytes[0];
+	} else if (length > 1) {
+		for (csum = 0, i = 0; i < length; i++)
+			csum ^= bytes[i];
+	} else {
+		csum = 0xFF;
+	}
+
+	return csum;
+}
+
+static uint8_t spi_bl_write_data(const void *data, uint8_t *tx, int length)
+{
+	const struct nanohub_spi_data *spi_data = data;
+	const struct nanohub_bl *bl = &spi_data->data.bl;
+	struct spi_message msg;
+	struct spi_transfer xfer = {
+		.len = length + 1,
+		.tx_buf = bl->tx_buffer,
+		.rx_buf = bl->rx_buffer,
+		.cs_change = 1,
+	};
+
+	tx[length] = bl_checksum(tx, length);
+	memcpy(bl->tx_buffer, tx, length + 1);
+
+	spi_message_init_with_transfers(&msg, &xfer, 1);
+
+	if (spi_sync_locked(spi_data->device, &msg) == 0)
+		return bl->rx_buffer[length];
+	else
+		return CMD_NACK;
+}
+
+static uint8_t spi_bl_write_cmd(const void *data, uint8_t cmd)
+{
+	const struct nanohub_spi_data *spi_data = data;
+	const struct nanohub_bl *bl = &spi_data->data.bl;
+	struct spi_message msg;
+	struct spi_transfer xfer = {
+		.len = 3,
+		.tx_buf = bl->tx_buffer,
+		.rx_buf = bl->rx_buffer,
+		.cs_change = 1,
+	};
+	bl->tx_buffer[0] = CMD_SOF;
+	bl->tx_buffer[1] = cmd;
+	bl->tx_buffer[2] = ~cmd;
+
+	spi_message_init_with_transfers(&msg, &xfer, 1);
+
+	if (spi_sync_locked(spi_data->device, &msg) == 0)
+		return CMD_ACK;
+	else
+		return CMD_NACK;
+}
+
+static uint8_t spi_bl_read_data(const void *data, uint8_t *rx, int length)
+{
+	const struct nanohub_spi_data *spi_data = data;
+	const struct nanohub_bl *bl = &spi_data->data.bl;
+	struct spi_message msg;
+	struct spi_transfer xfer = {
+		.len = length + 1,
+		.tx_buf = bl->tx_buffer,
+		.rx_buf = bl->rx_buffer,
+		.cs_change = 1,
+	};
+	memset(&bl->tx_buffer[0], 0x00, length + 1);
+
+	spi_message_init_with_transfers(&msg, &xfer, 1);
+
+	if (spi_sync_locked(spi_data->device, &msg) == 0) {
+		memcpy(rx, &bl->rx_buffer[1], length);
+		return CMD_ACK;
+	} else {
+		return CMD_NACK;
+	}
+}
+
+static uint8_t spi_bl_read_ack(const void *data)
+{
+	const struct nanohub_spi_data *spi_data = data;
+	const struct nanohub_bl *bl = &spi_data->data.bl;
+	int32_t timeout = SPI_TIMEOUT;
+	uint8_t ret;
+	struct spi_message msg;
+	struct spi_transfer xfer = {
+		.len = 1,
+		.tx_buf = bl->tx_buffer,
+		.rx_buf = bl->rx_buffer,
+		.cs_change = 1,
+	};
+	bl->tx_buffer[0] = 0x00;
+
+	spi_message_init_with_transfers(&msg, &xfer, 1);
+
+	if (spi_sync_locked(spi_data->device, &msg) == 0) {
+		do {
+			spi_sync_locked(spi_data->device, &msg);
+			timeout--;
+			if (bl->rx_buffer[0] != CMD_ACK
+			    && bl->rx_buffer[0] != CMD_NACK
+			    && timeout % 256 == 0)
+				schedule();
+		} while (bl->rx_buffer[0] != CMD_ACK
+			 && bl->rx_buffer[0] != CMD_NACK && timeout > 0);
+
+		if (bl->rx_buffer[0] != CMD_ACK && bl->rx_buffer[0] != CMD_NACK
+		    && timeout == 0)
+			ret = CMD_NACK;
+		else
+			ret = bl->rx_buffer[0];
+
+		bl->tx_buffer[0] = CMD_ACK;
+		spi_sync_locked(spi_data->device, &msg);
+		return ret;
+	} else {
+		return CMD_NACK;
+	}
+}
+
+static int spi_bl_open(const void *data)
+{
+	const struct nanohub_spi_data *spi_data = data;
+	int ret;
+
+	spi_bus_lock(spi_data->device->master);
+	spi_data->device->max_speed_hz = spi_data->data.pdata->bl_max_speed_hz;
+	spi_data->device->mode = spi_data->data.pdata->spi_mode;
+	spi_data->device->bits_per_word = 8;
+	ret = spi_setup(spi_data->device);
+	if (!ret)
+		gpio_set_value(spi_data->cs, 0);
+
+	return ret;
+}
+
+static void spi_bl_close(const void *data)
+{
+	const struct nanohub_spi_data *spi_data = data;
+
+	gpio_set_value(spi_data->cs, 1);
+	spi_bus_unlock(spi_data->device->master);
+}
+
+static uint8_t spi_bl_sync(const void *data)
+{
+	const struct nanohub_spi_data *spi_data = data;
+	const struct nanohub_bl *bl = &spi_data->data.bl;
+	int32_t timeout = SPI_TIMEOUT;
+	struct spi_message msg;
+	struct spi_transfer xfer = {
+		.len = 1,
+		.tx_buf = bl->tx_buffer,
+		.rx_buf = bl->rx_buffer,
+		.cs_change = 1,
+	};
+	bl->tx_buffer[0] = CMD_SOF;
+
+	spi_message_init_with_transfers(&msg, &xfer, 1);
+
+	do {
+		if (spi_sync_locked(spi_data->device, &msg) != 0)
+			return CMD_NACK;
+		timeout--;
+		if (bl->rx_buffer[0] != CMD_SOF_ACK && timeout % 256 == 0)
+			schedule();
+	} while (bl->rx_buffer[0] != CMD_SOF_ACK && timeout > 0);
+
+	if (bl->rx_buffer[0] == CMD_SOF_ACK)
+		return bl->read_ack(data);
+	else
+		return CMD_NACK;
+}
+
+void nanohub_spi_bl_init(struct nanohub_spi_data *spi_data)
+{
+	struct nanohub_bl *bl = &spi_data->data.bl;
+
+	bl->open = spi_bl_open;
+	bl->sync = spi_bl_sync;
+	bl->write_data = spi_bl_write_data;
+	bl->write_cmd = spi_bl_write_cmd;
+	bl->read_data = spi_bl_read_data;
+	bl->read_ack = spi_bl_read_ack;
+	bl->close = spi_bl_close;
+}
+
+int nanohub_spi_write(void *data, uint8_t *tx, int length, int timeout)
+{
+	struct nanohub_spi_data *spi_data = data;
+	const struct nanohub_comms *comms = &spi_data->data.comms;
+	int max_len = sizeof(struct nanohub_packet) + MAX_UINT8 +
+		      sizeof(struct nanohub_packet_crc);
+	struct spi_message msg;
+	struct spi_transfer xfer = {
+		.len = max_len + timeout,
+		.tx_buf = comms->tx_buffer,
+		.rx_buf = comms->rx_buffer,
+		.cs_change = 1,
+	};
+	spi_data->rx_offset = max_len;
+	spi_data->rx_length = max_len + timeout;
+	memcpy(comms->tx_buffer, tx, length);
+	memset(comms->tx_buffer + length, 0xFF, max_len + timeout - length);
+
+	spi_message_init_with_transfers(&msg, &xfer, 1);
+
+	if (spi_sync_locked(spi_data->device, &msg) == 0)
+		return length;
+	else
+		return ERROR_NACK;
+}
+
+int nanohub_spi_read(void *data, uint8_t *rx, int max_length, int timeout)
+{
+	struct nanohub_spi_data *spi_data = data;
+	struct nanohub_comms *comms = &spi_data->data.comms;
+	const int min_size = sizeof(struct nanohub_packet) +
+	    sizeof(struct nanohub_packet_crc);
+	int i, ret;
+	int offset = 0;
+	struct nanohub_packet *packet = NULL;
+	struct spi_message msg;
+	struct spi_transfer xfer = {
+		.len = timeout,
+		.tx_buf = comms->tx_buffer,
+		.rx_buf = comms->rx_buffer,
+		.cs_change = 1,
+	};
+
+	if (max_length < min_size)
+		return ERROR_NACK;
+
+	/* consume leftover bytes, if any */
+	if (spi_data->rx_offset < spi_data->rx_length) {
+		for (i = spi_data->rx_offset; i < spi_data->rx_length; i++) {
+			if (comms->rx_buffer[i] != 0xFF) {
+				offset = spi_data->rx_length - i;
+
+				if (offset <
+				    offsetof(struct nanohub_packet,
+					     len) + sizeof(packet->len)) {
+					memcpy(rx, &comms->rx_buffer[i],
+					       offset);
+					xfer.len =
+					    min_size + MAX_UINT8 - offset;
+					break;
+				} else {
+					packet =
+					    (struct nanohub_packet *)&comms->
+					    rx_buffer[i];
+					if (offset < min_size + packet->len) {
+						memcpy(rx, packet, offset);
+						xfer.len =
+						    min_size + packet->len -
+						    offset;
+						break;
+					} else {
+						memcpy(rx, packet,
+						       min_size + packet->len);
+						spi_data->rx_offset = i +
+						     min_size + packet->len;
+						return min_size + packet->len;
+					}
+				}
+			}
+		}
+	}
+
+	if (xfer.len != 1 && xfer.len < SPI_MIN_DMA)
+		xfer.len = SPI_MIN_DMA;
+	memset(comms->tx_buffer, 0xFF, xfer.len);
+
+	spi_message_init_with_transfers(&msg, &xfer, 1);
+
+	ret = spi_sync_locked(spi_data->device, &msg);
+	if (ret == 0) {
+		if (offset > 0) {
+			packet = (struct nanohub_packet *)rx;
+			if (offset + xfer.len > max_length)
+				memcpy(&rx[offset], comms->rx_buffer,
+					max_length - offset);
+			else
+				memcpy(&rx[offset], comms->rx_buffer, xfer.len);
+			spi_data->rx_length = xfer.len;
+			spi_data->rx_offset = min_size + packet->len - offset;
+		} else {
+			for (i = 0; i < xfer.len; i++) {
+				if (comms->rx_buffer[i] != 0xFF) {
+					spi_data->rx_length = xfer.len;
+
+					if (xfer.len - i < min_size) {
+						spi_data->rx_offset = i;
+						break;
+					} else {
+						packet =
+						    (struct nanohub_packet *)
+						    &comms->rx_buffer[i];
+						if (xfer.len - i <
+						    min_size + packet->len) {
+							packet = NULL;
+							spi_data->rx_offset = i;
+						} else {
+							memcpy(rx, packet,
+							       min_size +
+							       packet->len);
+							spi_data->rx_offset =
+							    i + min_size +
+							    packet->len;
+						}
+					}
+					break;
+				}
+			}
+		}
+	}
+
+	if (ret < 0)
+		return ret;
+	else if (!packet)
+		return 0;
+	else
+		return min_size + packet->len;
+}
+
+static int nanohub_spi_open(void *data)
+{
+	struct nanohub_spi_data *spi_data = data;
+	int ret;
+
+	down(&spi_data->spi_sem);
+	spi_bus_lock(spi_data->device->master);
+	spi_data->device->max_speed_hz = spi_data->data.max_speed_hz;
+	spi_data->device->mode = spi_data->data.pdata->spi_mode;
+	spi_data->device->bits_per_word = 8;
+	ret = spi_setup(spi_data->device);
+	if (!ret) {
+		udelay(40);
+		gpio_set_value(spi_data->cs, 0);
+		udelay(30);
+	}
+	return ret;
+}
+
+static void nanohub_spi_close(void *data)
+{
+	struct nanohub_spi_data *spi_data = data;
+
+	gpio_set_value(spi_data->cs, 1);
+	spi_bus_unlock(spi_data->device->master);
+	up(&spi_data->spi_sem);
+	udelay(60);
+}
+
+void nanohub_spi_comms_init(struct nanohub_spi_data *spi_data)
+{
+	struct nanohub_comms *comms = &spi_data->data.comms;
+	int max_len = sizeof(struct nanohub_packet) + MAX_UINT8 +
+		      sizeof(struct nanohub_packet_crc);
+
+	comms->seq = 1;
+	comms->timeout_write = 544;
+	comms->timeout_ack = 272;
+	comms->timeout_reply = 512;
+	comms->open = nanohub_spi_open;
+	comms->close = nanohub_spi_close;
+	comms->write = nanohub_spi_write;
+	comms->read = nanohub_spi_read;
+
+	max_len += comms->timeout_write;
+	max_len = max(max_len, comms->timeout_ack);
+	max_len = max(max_len, comms->timeout_reply);
+	comms->tx_buffer = kmalloc(max_len, GFP_KERNEL | GFP_DMA);
+	comms->rx_buffer = kmalloc(max_len, GFP_KERNEL | GFP_DMA);
+
+	spi_data->rx_length = 0;
+	spi_data->rx_offset = 0;
+
+	sema_init(&spi_data->spi_sem, 1);
+}
+
+static int nanohub_spi_probe(struct spi_device *spi)
+{
+	struct nanohub_spi_data *spi_data;
+	struct iio_dev *iio_dev;
+	int error;
+
+	iio_dev = iio_device_alloc(&spi->dev, sizeof(struct nanohub_spi_data));
+
+	iio_dev = nanohub_probe(&spi->dev, iio_dev);
+
+	if (IS_ERR(iio_dev))
+		return PTR_ERR(iio_dev);
+
+	spi_data = iio_priv(iio_dev);
+
+	spi_set_drvdata(spi, iio_dev);
+
+	if (gpio_is_valid(spi_data->data.pdata->spi_cs_gpio)) {
+		error =
+		    gpio_request(spi_data->data.pdata->spi_cs_gpio,
+				 "nanohub_spi_cs");
+		if (error) {
+			pr_err("nanohub: spi_cs_gpio request failed\n");
+		} else {
+			spi_data->cs = spi_data->data.pdata->spi_cs_gpio;
+			gpio_direction_output(spi_data->cs, 1);
+		}
+	} else {
+		pr_err("nanohub: spi_cs_gpio is not valid\n");
+	}
+
+	spi_data->device = spi;
+	spi_data->data.max_speed_hz = spi->max_speed_hz ? : SPI_MAX_SPEED_HZ;
+	nanohub_spi_comms_init(spi_data);
+
+	spi_data->data.bl.cmd_erase = CMD_ERASE;
+	spi_data->data.bl.cmd_read_memory = CMD_READ_MEMORY;
+	spi_data->data.bl.cmd_write_memory = CMD_WRITE_MEMORY;
+	spi_data->data.bl.cmd_get_version = CMD_GET_VERSION;
+	spi_data->data.bl.cmd_get_id = CMD_GET_ID;
+	spi_data->data.bl.cmd_readout_protect = CMD_READOUT_PROTECT;
+	spi_data->data.bl.cmd_readout_unprotect = CMD_READOUT_UNPROTECT;
+	spi_data->data.bl.cmd_update_finished = CMD_UPDATE_FINISHED;
+	nanohub_spi_bl_init(spi_data);
+
+	nanohub_start(&spi_data->data);
+
+	return 0;
+}
+
+static int nanohub_spi_remove(struct spi_device *spi)
+{
+	struct nanohub_spi_data *spi_data;
+	struct iio_dev *iio_dev;
+
+	iio_dev = spi_get_drvdata(spi);
+	spi_data = iio_priv(iio_dev);
+
+	if (gpio_is_valid(spi_data->cs)) {
+		gpio_direction_output(spi_data->cs, 1);
+		gpio_free(spi_data->cs);
+	}
+
+	return nanohub_remove(iio_dev);
+}
+
+static int nanohub_spi_suspend(struct device *dev)
+{
+	struct iio_dev *iio_dev = spi_get_drvdata(to_spi_device(dev));
+	struct nanohub_spi_data *spi_data = iio_priv(iio_dev);
+	int ret;
+
+	ret = nanohub_suspend(iio_dev);
+
+	if (!ret) {
+		ret = down_interruptible(&spi_data->spi_sem);
+		if (ret)
+			up(&spi_data->spi_sem);
+	}
+
+	return ret;
+}
+
+static int nanohub_spi_resume(struct device *dev)
+{
+	struct iio_dev *iio_dev = spi_get_drvdata(to_spi_device(dev));
+	struct nanohub_spi_data *spi_data = iio_priv(iio_dev);
+
+	up(&spi_data->spi_sem);
+
+	return nanohub_resume(iio_dev);
+}
+
+static struct spi_device_id nanohub_spi_id[] = {
+	{NANOHUB_NAME, 0},
+	{},
+};
+
+static const struct dev_pm_ops nanohub_spi_pm_ops = {
+	.suspend = nanohub_spi_suspend,
+	.resume = nanohub_spi_resume,
+};
+
+static struct spi_driver nanohub_spi_driver = {
+	.driver = {
+		   .name = NANOHUB_NAME,
+		   .owner = THIS_MODULE,
+		   .pm = &nanohub_spi_pm_ops,
+		   },
+	.probe = nanohub_spi_probe,
+	.remove = nanohub_spi_remove,
+	.id_table = nanohub_spi_id,
+};
+
+int __init nanohub_spi_init(void)
+{
+	return spi_register_driver(&nanohub_spi_driver);
+}
+
+void nanohub_spi_cleanup(void)
+{
+	spi_unregister_driver(&nanohub_spi_driver);
+}
+
+MODULE_DEVICE_TABLE(spi, nanohub_spi_id);
diff --git a/drivers/staging/nanohub/spi.h b/drivers/staging/nanohub/spi.h
new file mode 100644
index 0000000..0155c81
--- /dev/null
+++ b/drivers/staging/nanohub/spi.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NANOHUB_SPI_H
+#define _NANOHUB_SPI_H
+
+int __init nanohub_spi_init(void);
+void nanohub_spi_cleanup(void);
+
+#endif
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c333d13..797aadc 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -244,6 +244,12 @@
 	  state changes. The gadget can be in any of the following
 	  three states: "CONNECTED/DISCONNECTED/CONFIGURED"
 
+config USB_CONFIGFS_WAKELOCK
+	bool "Keep wakelock in CONNECTED State"
+	depends on USB_CONFIGFS_UEVENT
+	help
+	  Keep wakelock in CONNECTED State.
+
 config USB_CONFIGFS_SERIAL
 	bool "Generic serial bulk in/out"
 	depends on USB_CONFIGFS
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 21dd268..1a51db8 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -82,6 +82,9 @@
 	bool sw_connected;
 	struct work_struct work;
 	struct device *dev;
+#ifdef CONFIG_USB_CONFIGFS_WAKELOCK
+	struct wakeup_source *usb_ws;
+#endif
 #endif
 };
 
@@ -1485,6 +1488,9 @@
 	spin_unlock_irqrestore(&cdev->lock, flags);
 
 	if (status[0]) {
+#ifdef CONFIG_USB_CONFIGFS_WAKELOCK
+		__pm_stay_awake(gi->usb_ws);
+#endif
 		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, connected);
 		pr_info("%s: sent uevent %s\n", __func__, connected[0]);
 		uevent_sent = true;
@@ -1500,6 +1506,9 @@
 		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, disconnected);
 		pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
 		uevent_sent = true;
+#ifdef CONFIG_USB_CONFIGFS_WAKELOCK
+		__pm_relax(gi->usb_ws);
+#endif
 	}
 
 	if (!uevent_sent) {
@@ -1777,6 +1786,9 @@
 	struct device_attribute **attrs;
 	struct device_attribute *attr;
 
+#ifdef CONFIG_USB_CONFIGFS_WAKELOCK
+	gi->usb_ws = wakeup_source_register(gi->dev, "android_usb");
+#endif
 	INIT_WORK(&gi->work, android_work);
 	gi->dev = device_create(android_class, NULL,
 			MKDEV(0, 0), NULL, "android%d", gadget_index++);
@@ -1802,8 +1814,9 @@
 	return 0;
 }
 
-static void android_device_destroy(struct gadget_info *gi)
+static void android_device_destroy(struct config_item *item)
 {
+	struct gadget_info *gi = to_gadget_info(item);
 	struct device_attribute **attrs;
 	struct device_attribute *attr;
 
@@ -1811,6 +1824,9 @@
 	while ((attr = *attrs++))
 		device_remove_file(gi->dev, attr);
 	device_destroy(gi->dev->class, gi->dev->devt);
+#ifdef CONFIG_USB_CONFIGFS_WAKELOCK
+	wakeup_source_unregister(gi->usb_ws);
+#endif
 }
 #else
 static inline int android_device_create(struct gadget_info *gi)
@@ -1818,7 +1834,7 @@
 	return 0;
 }
 
-static inline void android_device_destroy(struct gadget_info *gi)
+static inline void android_device_destroy(struct config_item *item)
 {
 }
 #endif
@@ -1891,7 +1907,7 @@
 
 	gi = container_of(to_config_group(item), struct gadget_info, group);
 	config_item_put(item);
-	android_device_destroy(gi);
+	android_device_destroy(item);
 }
 
 static struct configfs_group_operations gadgets_ops = {
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 427a993..1643d59 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -14,6 +14,8 @@
 
 source "drivers/gpu/vga/Kconfig"
 
+source "drivers/gpu/arm/Kconfig"
+
 source "drivers/gpu/host1x/Kconfig"
 source "drivers/gpu/ipu-v3/Kconfig"
 
diff --git a/include/linux/platform_data/nanohub.h b/include/linux/platform_data/nanohub.h
new file mode 100644
index 0000000..07a00d7
--- /dev/null
+++ b/include/linux/platform_data/nanohub.h
@@ -0,0 +1,28 @@
+#ifndef __LINUX_PLATFORM_DATA_NANOHUB_H
+#define __LINUX_PLATFORM_DATA_NANOHUB_H
+
+#include <linux/types.h>
+
+struct nanohub_flash_bank {
+	int bank;
+	u32 address;
+	size_t length;
+};
+
+struct nanohub_platform_data {
+	u32 wakeup_gpio;
+	u32 nreset_gpio;
+	u32 boot0_gpio;
+	u32 irq1_gpio;
+	u32 irq2_gpio;
+	u32 spi_cs_gpio;
+	u32 bl_max_speed_hz;
+	u32 bl_addr;
+	u32 num_flash_banks;
+	struct nanohub_flash_bank *flash_banks;
+	u32 num_shared_flash_banks;
+	struct nanohub_flash_bank *shared_flash_banks;
+	u16 spi_mode;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_NANOHUB_H */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 069241b..11a60c1 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -706,6 +706,15 @@
 #define V4L2_PIX_FMT_FWHT     v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
 #define V4L2_PIX_FMT_FWHT_STATELESS     v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */
 
+/*
+ * Compressed Luminance+Chrominance meta-formats
+ * In these formats, the component ordering is specified (Y, followed by U
+ * then V), but the exact Linear layout is undefined.
+ * These formats can only be used with a non-Linear modifier.
+ */
+#define V4L2_PIX_FMT_YUV420_8BIT	v4l2_fourcc('Y', 'U', '0', '8') /* 1-plane YUV 4:2:0 8-bit */
+#define V4L2_PIX_FMT_YUV420_10BIT	v4l2_fourcc('Y', 'U', '1', '0') /* 1-plane YUV 4:2:0 10-bit */
+
 /*  Vendor-specific formats   */
 #define V4L2_PIX_FMT_CPIA1    v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
 #define V4L2_PIX_FMT_WNVA     v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */